stream.go 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551
  1. /*
  2. *
  3. * Copyright 2014 gRPC authors.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. *
  17. */
  18. package grpc
  19. import (
  20. "context"
  21. "errors"
  22. "io"
  23. "math"
  24. "strconv"
  25. "sync"
  26. "time"
  27. "golang.org/x/net/trace"
  28. "google.golang.org/grpc/balancer"
  29. "google.golang.org/grpc/codes"
  30. "google.golang.org/grpc/encoding"
  31. "google.golang.org/grpc/internal/balancerload"
  32. "google.golang.org/grpc/internal/binarylog"
  33. "google.golang.org/grpc/internal/channelz"
  34. "google.golang.org/grpc/internal/grpcrand"
  35. "google.golang.org/grpc/internal/grpcutil"
  36. "google.golang.org/grpc/internal/transport"
  37. "google.golang.org/grpc/metadata"
  38. "google.golang.org/grpc/peer"
  39. "google.golang.org/grpc/stats"
  40. "google.golang.org/grpc/status"
  41. )
  42. // StreamHandler defines the handler called by gRPC server to complete the
  43. // execution of a streaming RPC. If a StreamHandler returns an error, it
  44. // should be produced by the status package, or else gRPC will use
  45. // codes.Unknown as the status code and err.Error() as the status message
  46. // of the RPC.
  47. type StreamHandler func(srv interface{}, stream ServerStream) error
  48. // StreamDesc represents a streaming RPC service's method specification.
  49. type StreamDesc struct {
  50. StreamName string
  51. Handler StreamHandler
  52. // At least one of these is true.
  53. ServerStreams bool
  54. ClientStreams bool
  55. }
  56. // Stream defines the common interface a client or server stream has to satisfy.
  57. //
  58. // Deprecated: See ClientStream and ServerStream documentation instead.
  59. type Stream interface {
  60. // Deprecated: See ClientStream and ServerStream documentation instead.
  61. Context() context.Context
  62. // Deprecated: See ClientStream and ServerStream documentation instead.
  63. SendMsg(m interface{}) error
  64. // Deprecated: See ClientStream and ServerStream documentation instead.
  65. RecvMsg(m interface{}) error
  66. }
  67. // ClientStream defines the client-side behavior of a streaming RPC.
  68. //
  69. // All errors returned from ClientStream methods are compatible with the
  70. // status package.
  71. type ClientStream interface {
  72. // Header returns the header metadata received from the server if there
  73. // is any. It blocks if the metadata is not ready to read.
  74. Header() (metadata.MD, error)
  75. // Trailer returns the trailer metadata from the server, if there is any.
  76. // It must only be called after stream.CloseAndRecv has returned, or
  77. // stream.Recv has returned a non-nil error (including io.EOF).
  78. Trailer() metadata.MD
  79. // CloseSend closes the send direction of the stream. It closes the stream
  80. // when non-nil error is met. It is also not safe to call CloseSend
  81. // concurrently with SendMsg.
  82. CloseSend() error
  83. // Context returns the context for this stream.
  84. //
  85. // It should not be called until after Header or RecvMsg has returned. Once
  86. // called, subsequent client-side retries are disabled.
  87. Context() context.Context
  88. // SendMsg is generally called by generated code. On error, SendMsg aborts
  89. // the stream. If the error was generated by the client, the status is
  90. // returned directly; otherwise, io.EOF is returned and the status of
  91. // the stream may be discovered using RecvMsg.
  92. //
  93. // SendMsg blocks until:
  94. // - There is sufficient flow control to schedule m with the transport, or
  95. // - The stream is done, or
  96. // - The stream breaks.
  97. //
  98. // SendMsg does not wait until the message is received by the server. An
  99. // untimely stream closure may result in lost messages. To ensure delivery,
  100. // users should ensure the RPC completed successfully using RecvMsg.
  101. //
  102. // It is safe to have a goroutine calling SendMsg and another goroutine
  103. // calling RecvMsg on the same stream at the same time, but it is not safe
  104. // to call SendMsg on the same stream in different goroutines. It is also
  105. // not safe to call CloseSend concurrently with SendMsg.
  106. SendMsg(m interface{}) error
  107. // RecvMsg blocks until it receives a message into m or the stream is
  108. // done. It returns io.EOF when the stream completes successfully. On
  109. // any other error, the stream is aborted and the error contains the RPC
  110. // status.
  111. //
  112. // It is safe to have a goroutine calling SendMsg and another goroutine
  113. // calling RecvMsg on the same stream at the same time, but it is not
  114. // safe to call RecvMsg on the same stream in different goroutines.
  115. RecvMsg(m interface{}) error
  116. }
  117. // NewStream creates a new Stream for the client side. This is typically
  118. // called by generated code. ctx is used for the lifetime of the stream.
  119. //
  120. // To ensure resources are not leaked due to the stream returned, one of the following
  121. // actions must be performed:
  122. //
  123. // 1. Call Close on the ClientConn.
  124. // 2. Cancel the context provided.
  125. // 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
  126. // client-streaming RPC, for instance, might use the helper function
  127. // CloseAndRecv (note that CloseSend does not Recv, therefore is not
  128. // guaranteed to release all resources).
  129. // 4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
  130. //
  131. // If none of the above happen, a goroutine and a context will be leaked, and grpc
  132. // will not call the optionally-configured stats handler with a stats.End message.
  133. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
  134. // allow interceptor to see all applicable call options, which means those
  135. // configured as defaults from dial option as well as per-call options
  136. opts = combine(cc.dopts.callOptions, opts)
  137. if cc.dopts.streamInt != nil {
  138. return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
  139. }
  140. return newClientStream(ctx, desc, cc, method, opts...)
  141. }
  142. // NewClientStream is a wrapper for ClientConn.NewStream.
  143. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
  144. return cc.NewStream(ctx, desc, method, opts...)
  145. }
  146. func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
  147. if channelz.IsOn() {
  148. cc.incrCallsStarted()
  149. defer func() {
  150. if err != nil {
  151. cc.incrCallsFailed()
  152. }
  153. }()
  154. }
  155. c := defaultCallInfo()
  156. // Provide an opportunity for the first RPC to see the first service config
  157. // provided by the resolver.
  158. if err := cc.waitForResolvedAddrs(ctx); err != nil {
  159. return nil, err
  160. }
  161. mc := cc.GetMethodConfig(method)
  162. if mc.WaitForReady != nil {
  163. c.failFast = !*mc.WaitForReady
  164. }
  165. // Possible context leak:
  166. // The cancel function for the child context we create will only be called
  167. // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
  168. // an error is generated by SendMsg.
  169. // https://github.com/grpc/grpc-go/issues/1818.
  170. var cancel context.CancelFunc
  171. if mc.Timeout != nil && *mc.Timeout >= 0 {
  172. ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
  173. } else {
  174. ctx, cancel = context.WithCancel(ctx)
  175. }
  176. defer func() {
  177. if err != nil {
  178. cancel()
  179. }
  180. }()
  181. for _, o := range opts {
  182. if err := o.before(c); err != nil {
  183. return nil, toRPCErr(err)
  184. }
  185. }
  186. c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
  187. c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
  188. if err := setCallInfoCodec(c); err != nil {
  189. return nil, err
  190. }
  191. callHdr := &transport.CallHdr{
  192. Host: cc.authority,
  193. Method: method,
  194. ContentSubtype: c.contentSubtype,
  195. }
  196. // Set our outgoing compression according to the UseCompressor CallOption, if
  197. // set. In that case, also find the compressor from the encoding package.
  198. // Otherwise, use the compressor configured by the WithCompressor DialOption,
  199. // if set.
  200. var cp Compressor
  201. var comp encoding.Compressor
  202. if ct := c.compressorType; ct != "" {
  203. callHdr.SendCompress = ct
  204. if ct != encoding.Identity {
  205. comp = encoding.GetCompressor(ct)
  206. if comp == nil {
  207. return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
  208. }
  209. }
  210. } else if cc.dopts.cp != nil {
  211. callHdr.SendCompress = cc.dopts.cp.Type()
  212. cp = cc.dopts.cp
  213. }
  214. if c.creds != nil {
  215. callHdr.Creds = c.creds
  216. }
  217. var trInfo *traceInfo
  218. if EnableTracing {
  219. trInfo = &traceInfo{
  220. tr: trace.New("grpc.Sent."+methodFamily(method), method),
  221. firstLine: firstLine{
  222. client: true,
  223. },
  224. }
  225. if deadline, ok := ctx.Deadline(); ok {
  226. trInfo.firstLine.deadline = time.Until(deadline)
  227. }
  228. trInfo.tr.LazyLog(&trInfo.firstLine, false)
  229. ctx = trace.NewContext(ctx, trInfo.tr)
  230. }
  231. ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp)
  232. sh := cc.dopts.copts.StatsHandler
  233. var beginTime time.Time
  234. if sh != nil {
  235. ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
  236. beginTime = time.Now()
  237. begin := &stats.Begin{
  238. Client: true,
  239. BeginTime: beginTime,
  240. FailFast: c.failFast,
  241. }
  242. sh.HandleRPC(ctx, begin)
  243. }
  244. cs := &clientStream{
  245. callHdr: callHdr,
  246. ctx: ctx,
  247. methodConfig: &mc,
  248. opts: opts,
  249. callInfo: c,
  250. cc: cc,
  251. desc: desc,
  252. codec: c.codec,
  253. cp: cp,
  254. comp: comp,
  255. cancel: cancel,
  256. beginTime: beginTime,
  257. firstAttempt: true,
  258. }
  259. if !cc.dopts.disableRetry {
  260. cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
  261. }
  262. cs.binlog = binarylog.GetMethodLogger(method)
  263. // Only this initial attempt has stats/tracing.
  264. // TODO(dfawley): move to newAttempt when per-attempt stats are implemented.
  265. if err := cs.newAttemptLocked(sh, trInfo); err != nil {
  266. cs.finish(err)
  267. return nil, err
  268. }
  269. op := func(a *csAttempt) error { return a.newStream() }
  270. if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
  271. cs.finish(err)
  272. return nil, err
  273. }
  274. if cs.binlog != nil {
  275. md, _ := metadata.FromOutgoingContext(ctx)
  276. logEntry := &binarylog.ClientHeader{
  277. OnClientSide: true,
  278. Header: md,
  279. MethodName: method,
  280. Authority: cs.cc.authority,
  281. }
  282. if deadline, ok := ctx.Deadline(); ok {
  283. logEntry.Timeout = time.Until(deadline)
  284. if logEntry.Timeout < 0 {
  285. logEntry.Timeout = 0
  286. }
  287. }
  288. cs.binlog.Log(logEntry)
  289. }
  290. if desc != unaryStreamDesc {
  291. // Listen on cc and stream contexts to cleanup when the user closes the
  292. // ClientConn or cancels the stream context. In all other cases, an error
  293. // should already be injected into the recv buffer by the transport, which
  294. // the client will eventually receive, and then we will cancel the stream's
  295. // context in clientStream.finish.
  296. go func() {
  297. select {
  298. case <-cc.ctx.Done():
  299. cs.finish(ErrClientConnClosing)
  300. case <-ctx.Done():
  301. cs.finish(toRPCErr(ctx.Err()))
  302. }
  303. }()
  304. }
  305. return cs, nil
  306. }
  307. // newAttemptLocked creates a new attempt with a transport.
  308. // If it succeeds, then it replaces clientStream's attempt with this new attempt.
  309. func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) {
  310. newAttempt := &csAttempt{
  311. cs: cs,
  312. dc: cs.cc.dopts.dc,
  313. statsHandler: sh,
  314. trInfo: trInfo,
  315. }
  316. defer func() {
  317. if retErr != nil {
  318. // This attempt is not set in the clientStream, so it's finish won't
  319. // be called. Call it here for stats and trace in case they are not
  320. // nil.
  321. newAttempt.finish(retErr)
  322. }
  323. }()
  324. if err := cs.ctx.Err(); err != nil {
  325. return toRPCErr(err)
  326. }
  327. ctx := cs.ctx
  328. if cs.cc.parsedTarget.Scheme == "xds" {
  329. // Add extra metadata (metadata that will be added by transport) to context
  330. // so the balancer can see them.
  331. ctx = grpcutil.WithExtraMetadata(cs.ctx, metadata.Pairs(
  332. "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
  333. ))
  334. }
  335. t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method)
  336. if err != nil {
  337. return err
  338. }
  339. if trInfo != nil {
  340. trInfo.firstLine.SetRemoteAddr(t.RemoteAddr())
  341. }
  342. newAttempt.t = t
  343. newAttempt.done = done
  344. cs.attempt = newAttempt
  345. return nil
  346. }
  347. func (a *csAttempt) newStream() error {
  348. cs := a.cs
  349. cs.callHdr.PreviousAttempts = cs.numRetries
  350. s, err := a.t.NewStream(cs.ctx, cs.callHdr)
  351. if err != nil {
  352. if _, ok := err.(transport.PerformedIOError); ok {
  353. // Return without converting to an RPC error so retry code can
  354. // inspect.
  355. return err
  356. }
  357. return toRPCErr(err)
  358. }
  359. cs.attempt.s = s
  360. cs.attempt.p = &parser{r: s}
  361. return nil
  362. }
  363. // clientStream implements a client side Stream.
  364. type clientStream struct {
  365. callHdr *transport.CallHdr
  366. opts []CallOption
  367. callInfo *callInfo
  368. cc *ClientConn
  369. desc *StreamDesc
  370. codec baseCodec
  371. cp Compressor
  372. comp encoding.Compressor
  373. cancel context.CancelFunc // cancels all attempts
  374. sentLast bool // sent an end stream
  375. beginTime time.Time
  376. methodConfig *MethodConfig
  377. ctx context.Context // the application's context, wrapped by stats/tracing
  378. retryThrottler *retryThrottler // The throttler active when the RPC began.
  379. binlog *binarylog.MethodLogger // Binary logger, can be nil.
  380. // serverHeaderBinlogged is a boolean for whether server header has been
  381. // logged. Server header will be logged when the first time one of those
  382. // happens: stream.Header(), stream.Recv().
  383. //
  384. // It's only read and used by Recv() and Header(), so it doesn't need to be
  385. // synchronized.
  386. serverHeaderBinlogged bool
  387. mu sync.Mutex
  388. firstAttempt bool // if true, transparent retry is valid
  389. numRetries int // exclusive of transparent retry attempt(s)
  390. numRetriesSincePushback int // retries since pushback; to reset backoff
  391. finished bool // TODO: replace with atomic cmpxchg or sync.Once?
  392. // attempt is the active client stream attempt.
  393. // The only place where it is written is the newAttemptLocked method and this method never writes nil.
  394. // So, attempt can be nil only inside newClientStream function when clientStream is first created.
  395. // One of the first things done after clientStream's creation, is to call newAttemptLocked which either
  396. // assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked,
  397. // then newClientStream calls finish on the clientStream and returns. So, finish method is the only
  398. // place where we need to check if the attempt is nil.
  399. attempt *csAttempt
  400. // TODO(hedging): hedging will have multiple attempts simultaneously.
  401. committed bool // active attempt committed for retry?
  402. buffer []func(a *csAttempt) error // operations to replay on retry
  403. bufferSize int // current size of buffer
  404. }
  405. // csAttempt implements a single transport stream attempt within a
  406. // clientStream.
  407. type csAttempt struct {
  408. cs *clientStream
  409. t transport.ClientTransport
  410. s *transport.Stream
  411. p *parser
  412. done func(balancer.DoneInfo)
  413. finished bool
  414. dc Decompressor
  415. decomp encoding.Compressor
  416. decompSet bool
  417. mu sync.Mutex // guards trInfo.tr
  418. // trInfo may be nil (if EnableTracing is false).
  419. // trInfo.tr is set when created (if EnableTracing is true),
  420. // and cleared when the finish method is called.
  421. trInfo *traceInfo
  422. statsHandler stats.Handler
  423. }
  424. func (cs *clientStream) commitAttemptLocked() {
  425. cs.committed = true
  426. cs.buffer = nil
  427. }
  428. func (cs *clientStream) commitAttempt() {
  429. cs.mu.Lock()
  430. cs.commitAttemptLocked()
  431. cs.mu.Unlock()
  432. }
  433. // shouldRetry returns nil if the RPC should be retried; otherwise it returns
  434. // the error that should be returned by the operation.
  435. func (cs *clientStream) shouldRetry(err error) error {
  436. unprocessed := false
  437. if cs.attempt.s == nil {
  438. pioErr, ok := err.(transport.PerformedIOError)
  439. if ok {
  440. // Unwrap error.
  441. err = toRPCErr(pioErr.Err)
  442. } else {
  443. unprocessed = true
  444. }
  445. if !ok && !cs.callInfo.failFast {
  446. // In the event of a non-IO operation error from NewStream, we
  447. // never attempted to write anything to the wire, so we can retry
  448. // indefinitely for non-fail-fast RPCs.
  449. return nil
  450. }
  451. }
  452. if cs.finished || cs.committed {
  453. // RPC is finished or committed; cannot retry.
  454. return err
  455. }
  456. // Wait for the trailers.
  457. if cs.attempt.s != nil {
  458. <-cs.attempt.s.Done()
  459. unprocessed = cs.attempt.s.Unprocessed()
  460. }
  461. if cs.firstAttempt && unprocessed {
  462. // First attempt, stream unprocessed: transparently retry.
  463. return nil
  464. }
  465. if cs.cc.dopts.disableRetry {
  466. return err
  467. }
  468. pushback := 0
  469. hasPushback := false
  470. if cs.attempt.s != nil {
  471. if !cs.attempt.s.TrailersOnly() {
  472. return err
  473. }
  474. // TODO(retry): Move down if the spec changes to not check server pushback
  475. // before considering this a failure for throttling.
  476. sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"]
  477. if len(sps) == 1 {
  478. var e error
  479. if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
  480. channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0])
  481. cs.retryThrottler.throttle() // This counts as a failure for throttling.
  482. return err
  483. }
  484. hasPushback = true
  485. } else if len(sps) > 1 {
  486. channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps)
  487. cs.retryThrottler.throttle() // This counts as a failure for throttling.
  488. return err
  489. }
  490. }
  491. var code codes.Code
  492. if cs.attempt.s != nil {
  493. code = cs.attempt.s.Status().Code()
  494. } else {
  495. code = status.Convert(err).Code()
  496. }
  497. rp := cs.methodConfig.retryPolicy
  498. if rp == nil || !rp.retryableStatusCodes[code] {
  499. return err
  500. }
  501. // Note: the ordering here is important; we count this as a failure
  502. // only if the code matched a retryable code.
  503. if cs.retryThrottler.throttle() {
  504. return err
  505. }
  506. if cs.numRetries+1 >= rp.maxAttempts {
  507. return err
  508. }
  509. var dur time.Duration
  510. if hasPushback {
  511. dur = time.Millisecond * time.Duration(pushback)
  512. cs.numRetriesSincePushback = 0
  513. } else {
  514. fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback))
  515. cur := float64(rp.initialBackoff) * fact
  516. if max := float64(rp.maxBackoff); cur > max {
  517. cur = max
  518. }
  519. dur = time.Duration(grpcrand.Int63n(int64(cur)))
  520. cs.numRetriesSincePushback++
  521. }
  522. // TODO(dfawley): we could eagerly fail here if dur puts us past the
  523. // deadline, but unsure if it is worth doing.
  524. t := time.NewTimer(dur)
  525. select {
  526. case <-t.C:
  527. cs.numRetries++
  528. return nil
  529. case <-cs.ctx.Done():
  530. t.Stop()
  531. return status.FromContextError(cs.ctx.Err()).Err()
  532. }
  533. }
  534. // Returns nil if a retry was performed and succeeded; error otherwise.
  535. func (cs *clientStream) retryLocked(lastErr error) error {
  536. for {
  537. cs.attempt.finish(lastErr)
  538. if err := cs.shouldRetry(lastErr); err != nil {
  539. cs.commitAttemptLocked()
  540. return err
  541. }
  542. cs.firstAttempt = false
  543. if err := cs.newAttemptLocked(nil, nil); err != nil {
  544. return err
  545. }
  546. if lastErr = cs.replayBufferLocked(); lastErr == nil {
  547. return nil
  548. }
  549. }
  550. }
  551. func (cs *clientStream) Context() context.Context {
  552. cs.commitAttempt()
  553. // No need to lock before using attempt, since we know it is committed and
  554. // cannot change.
  555. return cs.attempt.s.Context()
  556. }
  557. func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
  558. cs.mu.Lock()
  559. for {
  560. if cs.committed {
  561. cs.mu.Unlock()
  562. return op(cs.attempt)
  563. }
  564. a := cs.attempt
  565. cs.mu.Unlock()
  566. err := op(a)
  567. cs.mu.Lock()
  568. if a != cs.attempt {
  569. // We started another attempt already.
  570. continue
  571. }
  572. if err == io.EOF {
  573. <-a.s.Done()
  574. }
  575. if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) {
  576. onSuccess()
  577. cs.mu.Unlock()
  578. return err
  579. }
  580. if err := cs.retryLocked(err); err != nil {
  581. cs.mu.Unlock()
  582. return err
  583. }
  584. }
  585. }
  586. func (cs *clientStream) Header() (metadata.MD, error) {
  587. var m metadata.MD
  588. err := cs.withRetry(func(a *csAttempt) error {
  589. var err error
  590. m, err = a.s.Header()
  591. return toRPCErr(err)
  592. }, cs.commitAttemptLocked)
  593. if err != nil {
  594. cs.finish(err)
  595. return nil, err
  596. }
  597. if cs.binlog != nil && !cs.serverHeaderBinlogged {
  598. // Only log if binary log is on and header has not been logged.
  599. logEntry := &binarylog.ServerHeader{
  600. OnClientSide: true,
  601. Header: m,
  602. PeerAddr: nil,
  603. }
  604. if peer, ok := peer.FromContext(cs.Context()); ok {
  605. logEntry.PeerAddr = peer.Addr
  606. }
  607. cs.binlog.Log(logEntry)
  608. cs.serverHeaderBinlogged = true
  609. }
  610. return m, err
  611. }
  612. func (cs *clientStream) Trailer() metadata.MD {
  613. // On RPC failure, we never need to retry, because usage requires that
  614. // RecvMsg() returned a non-nil error before calling this function is valid.
  615. // We would have retried earlier if necessary.
  616. //
  617. // Commit the attempt anyway, just in case users are not following those
  618. // directions -- it will prevent races and should not meaningfully impact
  619. // performance.
  620. cs.commitAttempt()
  621. if cs.attempt.s == nil {
  622. return nil
  623. }
  624. return cs.attempt.s.Trailer()
  625. }
  626. func (cs *clientStream) replayBufferLocked() error {
  627. a := cs.attempt
  628. for _, f := range cs.buffer {
  629. if err := f(a); err != nil {
  630. return err
  631. }
  632. }
  633. return nil
  634. }
  635. func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
  636. // Note: we still will buffer if retry is disabled (for transparent retries).
  637. if cs.committed {
  638. return
  639. }
  640. cs.bufferSize += sz
  641. if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
  642. cs.commitAttemptLocked()
  643. return
  644. }
  645. cs.buffer = append(cs.buffer, op)
  646. }
  647. func (cs *clientStream) SendMsg(m interface{}) (err error) {
  648. defer func() {
  649. if err != nil && err != io.EOF {
  650. // Call finish on the client stream for errors generated by this SendMsg
  651. // call, as these indicate problems created by this client. (Transport
  652. // errors are converted to an io.EOF error in csAttempt.sendMsg; the real
  653. // error will be returned from RecvMsg eventually in that case, or be
  654. // retried.)
  655. cs.finish(err)
  656. }
  657. }()
  658. if cs.sentLast {
  659. return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
  660. }
  661. if !cs.desc.ClientStreams {
  662. cs.sentLast = true
  663. }
  664. // load hdr, payload, data
  665. hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp)
  666. if err != nil {
  667. return err
  668. }
  669. // TODO(dfawley): should we be checking len(data) instead?
  670. if len(payload) > *cs.callInfo.maxSendMessageSize {
  671. return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
  672. }
  673. msgBytes := data // Store the pointer before setting to nil. For binary logging.
  674. op := func(a *csAttempt) error {
  675. err := a.sendMsg(m, hdr, payload, data)
  676. // nil out the message and uncomp when replaying; they are only needed for
  677. // stats which is disabled for subsequent attempts.
  678. m, data = nil, nil
  679. return err
  680. }
  681. err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
  682. if cs.binlog != nil && err == nil {
  683. cs.binlog.Log(&binarylog.ClientMessage{
  684. OnClientSide: true,
  685. Message: msgBytes,
  686. })
  687. }
  688. return
  689. }
  690. func (cs *clientStream) RecvMsg(m interface{}) error {
  691. if cs.binlog != nil && !cs.serverHeaderBinlogged {
  692. // Call Header() to binary log header if it's not already logged.
  693. cs.Header()
  694. }
  695. var recvInfo *payloadInfo
  696. if cs.binlog != nil {
  697. recvInfo = &payloadInfo{}
  698. }
  699. err := cs.withRetry(func(a *csAttempt) error {
  700. return a.recvMsg(m, recvInfo)
  701. }, cs.commitAttemptLocked)
  702. if cs.binlog != nil && err == nil {
  703. cs.binlog.Log(&binarylog.ServerMessage{
  704. OnClientSide: true,
  705. Message: recvInfo.uncompressedBytes,
  706. })
  707. }
  708. if err != nil || !cs.desc.ServerStreams {
  709. // err != nil or non-server-streaming indicates end of stream.
  710. cs.finish(err)
  711. if cs.binlog != nil {
  712. // finish will not log Trailer. Log Trailer here.
  713. logEntry := &binarylog.ServerTrailer{
  714. OnClientSide: true,
  715. Trailer: cs.Trailer(),
  716. Err: err,
  717. }
  718. if logEntry.Err == io.EOF {
  719. logEntry.Err = nil
  720. }
  721. if peer, ok := peer.FromContext(cs.Context()); ok {
  722. logEntry.PeerAddr = peer.Addr
  723. }
  724. cs.binlog.Log(logEntry)
  725. }
  726. }
  727. return err
  728. }
  729. func (cs *clientStream) CloseSend() error {
  730. if cs.sentLast {
  731. // TODO: return an error and finish the stream instead, due to API misuse?
  732. return nil
  733. }
  734. cs.sentLast = true
  735. op := func(a *csAttempt) error {
  736. a.t.Write(a.s, nil, nil, &transport.Options{Last: true})
  737. // Always return nil; io.EOF is the only error that might make sense
  738. // instead, but there is no need to signal the client to call RecvMsg
  739. // as the only use left for the stream after CloseSend is to call
  740. // RecvMsg. This also matches historical behavior.
  741. return nil
  742. }
  743. cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
  744. if cs.binlog != nil {
  745. cs.binlog.Log(&binarylog.ClientHalfClose{
  746. OnClientSide: true,
  747. })
  748. }
  749. // We never returned an error here for reasons.
  750. return nil
  751. }
  752. func (cs *clientStream) finish(err error) {
  753. if err == io.EOF {
  754. // Ending a stream with EOF indicates a success.
  755. err = nil
  756. }
  757. cs.mu.Lock()
  758. if cs.finished {
  759. cs.mu.Unlock()
  760. return
  761. }
  762. cs.finished = true
  763. cs.commitAttemptLocked()
  764. if cs.attempt != nil {
  765. cs.attempt.finish(err)
  766. // after functions all rely upon having a stream.
  767. if cs.attempt.s != nil {
  768. for _, o := range cs.opts {
  769. o.after(cs.callInfo, cs.attempt)
  770. }
  771. }
  772. }
  773. cs.mu.Unlock()
  774. // For binary logging. only log cancel in finish (could be caused by RPC ctx
  775. // canceled or ClientConn closed). Trailer will be logged in RecvMsg.
  776. //
  777. // Only one of cancel or trailer needs to be logged. In the cases where
  778. // users don't call RecvMsg, users must have already canceled the RPC.
  779. if cs.binlog != nil && status.Code(err) == codes.Canceled {
  780. cs.binlog.Log(&binarylog.Cancel{
  781. OnClientSide: true,
  782. })
  783. }
  784. if err == nil {
  785. cs.retryThrottler.successfulRPC()
  786. }
  787. if channelz.IsOn() {
  788. if err != nil {
  789. cs.cc.incrCallsFailed()
  790. } else {
  791. cs.cc.incrCallsSucceeded()
  792. }
  793. }
  794. cs.cancel()
  795. }
  796. func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
  797. cs := a.cs
  798. if a.trInfo != nil {
  799. a.mu.Lock()
  800. if a.trInfo.tr != nil {
  801. a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
  802. }
  803. a.mu.Unlock()
  804. }
  805. if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil {
  806. if !cs.desc.ClientStreams {
  807. // For non-client-streaming RPCs, we return nil instead of EOF on error
  808. // because the generated code requires it. finish is not called; RecvMsg()
  809. // will call it with the stream's status independently.
  810. return nil
  811. }
  812. return io.EOF
  813. }
  814. if a.statsHandler != nil {
  815. a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now()))
  816. }
  817. if channelz.IsOn() {
  818. a.t.IncrMsgSent()
  819. }
  820. return nil
  821. }
  822. func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
  823. cs := a.cs
  824. if a.statsHandler != nil && payInfo == nil {
  825. payInfo = &payloadInfo{}
  826. }
  827. if !a.decompSet {
  828. // Block until we receive headers containing received message encoding.
  829. if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
  830. if a.dc == nil || a.dc.Type() != ct {
  831. // No configured decompressor, or it does not match the incoming
  832. // message encoding; attempt to find a registered compressor that does.
  833. a.dc = nil
  834. a.decomp = encoding.GetCompressor(ct)
  835. }
  836. } else {
  837. // No compression is used; disable our decompressor.
  838. a.dc = nil
  839. }
  840. // Only initialize this state once per stream.
  841. a.decompSet = true
  842. }
  843. err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp)
  844. if err != nil {
  845. if err == io.EOF {
  846. if statusErr := a.s.Status().Err(); statusErr != nil {
  847. return statusErr
  848. }
  849. return io.EOF // indicates successful end of stream.
  850. }
  851. return toRPCErr(err)
  852. }
  853. if a.trInfo != nil {
  854. a.mu.Lock()
  855. if a.trInfo.tr != nil {
  856. a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
  857. }
  858. a.mu.Unlock()
  859. }
  860. if a.statsHandler != nil {
  861. a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{
  862. Client: true,
  863. RecvTime: time.Now(),
  864. Payload: m,
  865. // TODO truncate large payload.
  866. Data: payInfo.uncompressedBytes,
  867. WireLength: payInfo.wireLength,
  868. Length: len(payInfo.uncompressedBytes),
  869. })
  870. }
  871. if channelz.IsOn() {
  872. a.t.IncrMsgRecv()
  873. }
  874. if cs.desc.ServerStreams {
  875. // Subsequent messages should be received by subsequent RecvMsg calls.
  876. return nil
  877. }
  878. // Special handling for non-server-stream rpcs.
  879. // This recv expects EOF or errors, so we don't collect inPayload.
  880. err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
  881. if err == nil {
  882. return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
  883. }
  884. if err == io.EOF {
  885. return a.s.Status().Err() // non-server streaming Recv returns nil on success
  886. }
  887. return toRPCErr(err)
  888. }
  889. func (a *csAttempt) finish(err error) {
  890. a.mu.Lock()
  891. if a.finished {
  892. a.mu.Unlock()
  893. return
  894. }
  895. a.finished = true
  896. if err == io.EOF {
  897. // Ending a stream with EOF indicates a success.
  898. err = nil
  899. }
  900. var tr metadata.MD
  901. if a.s != nil {
  902. a.t.CloseStream(a.s, err)
  903. tr = a.s.Trailer()
  904. }
  905. if a.done != nil {
  906. br := false
  907. if a.s != nil {
  908. br = a.s.BytesReceived()
  909. }
  910. a.done(balancer.DoneInfo{
  911. Err: err,
  912. Trailer: tr,
  913. BytesSent: a.s != nil,
  914. BytesReceived: br,
  915. ServerLoad: balancerload.Parse(tr),
  916. })
  917. }
  918. if a.statsHandler != nil {
  919. end := &stats.End{
  920. Client: true,
  921. BeginTime: a.cs.beginTime,
  922. EndTime: time.Now(),
  923. Trailer: tr,
  924. Error: err,
  925. }
  926. a.statsHandler.HandleRPC(a.cs.ctx, end)
  927. }
  928. if a.trInfo != nil && a.trInfo.tr != nil {
  929. if err == nil {
  930. a.trInfo.tr.LazyPrintf("RPC: [OK]")
  931. } else {
  932. a.trInfo.tr.LazyPrintf("RPC: [%v]", err)
  933. a.trInfo.tr.SetError()
  934. }
  935. a.trInfo.tr.Finish()
  936. a.trInfo.tr = nil
  937. }
  938. a.mu.Unlock()
  939. }
  940. // newClientStream creates a ClientStream with the specified transport, on the
  941. // given addrConn.
  942. //
  943. // It's expected that the given transport is either the same one in addrConn, or
  944. // is already closed. To avoid race, transport is specified separately, instead
  945. // of using ac.transpot.
  946. //
  947. // Main difference between this and ClientConn.NewStream:
  948. // - no retry
  949. // - no service config (or wait for service config)
  950. // - no tracing or stats
  951. func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) {
  952. if t == nil {
  953. // TODO: return RPC error here?
  954. return nil, errors.New("transport provided is nil")
  955. }
  956. // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct.
  957. c := &callInfo{}
  958. // Possible context leak:
  959. // The cancel function for the child context we create will only be called
  960. // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
  961. // an error is generated by SendMsg.
  962. // https://github.com/grpc/grpc-go/issues/1818.
  963. ctx, cancel := context.WithCancel(ctx)
  964. defer func() {
  965. if err != nil {
  966. cancel()
  967. }
  968. }()
  969. for _, o := range opts {
  970. if err := o.before(c); err != nil {
  971. return nil, toRPCErr(err)
  972. }
  973. }
  974. c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
  975. c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
  976. if err := setCallInfoCodec(c); err != nil {
  977. return nil, err
  978. }
  979. callHdr := &transport.CallHdr{
  980. Host: ac.cc.authority,
  981. Method: method,
  982. ContentSubtype: c.contentSubtype,
  983. }
  984. // Set our outgoing compression according to the UseCompressor CallOption, if
  985. // set. In that case, also find the compressor from the encoding package.
  986. // Otherwise, use the compressor configured by the WithCompressor DialOption,
  987. // if set.
  988. var cp Compressor
  989. var comp encoding.Compressor
  990. if ct := c.compressorType; ct != "" {
  991. callHdr.SendCompress = ct
  992. if ct != encoding.Identity {
  993. comp = encoding.GetCompressor(ct)
  994. if comp == nil {
  995. return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
  996. }
  997. }
  998. } else if ac.cc.dopts.cp != nil {
  999. callHdr.SendCompress = ac.cc.dopts.cp.Type()
  1000. cp = ac.cc.dopts.cp
  1001. }
  1002. if c.creds != nil {
  1003. callHdr.Creds = c.creds
  1004. }
  1005. // Use a special addrConnStream to avoid retry.
  1006. as := &addrConnStream{
  1007. callHdr: callHdr,
  1008. ac: ac,
  1009. ctx: ctx,
  1010. cancel: cancel,
  1011. opts: opts,
  1012. callInfo: c,
  1013. desc: desc,
  1014. codec: c.codec,
  1015. cp: cp,
  1016. comp: comp,
  1017. t: t,
  1018. }
  1019. s, err := as.t.NewStream(as.ctx, as.callHdr)
  1020. if err != nil {
  1021. err = toRPCErr(err)
  1022. return nil, err
  1023. }
  1024. as.s = s
  1025. as.p = &parser{r: s}
  1026. ac.incrCallsStarted()
  1027. if desc != unaryStreamDesc {
  1028. // Listen on cc and stream contexts to cleanup when the user closes the
  1029. // ClientConn or cancels the stream context. In all other cases, an error
  1030. // should already be injected into the recv buffer by the transport, which
  1031. // the client will eventually receive, and then we will cancel the stream's
  1032. // context in clientStream.finish.
  1033. go func() {
  1034. select {
  1035. case <-ac.ctx.Done():
  1036. as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing"))
  1037. case <-ctx.Done():
  1038. as.finish(toRPCErr(ctx.Err()))
  1039. }
  1040. }()
  1041. }
  1042. return as, nil
  1043. }
  1044. type addrConnStream struct {
  1045. s *transport.Stream
  1046. ac *addrConn
  1047. callHdr *transport.CallHdr
  1048. cancel context.CancelFunc
  1049. opts []CallOption
  1050. callInfo *callInfo
  1051. t transport.ClientTransport
  1052. ctx context.Context
  1053. sentLast bool
  1054. desc *StreamDesc
  1055. codec baseCodec
  1056. cp Compressor
  1057. comp encoding.Compressor
  1058. decompSet bool
  1059. dc Decompressor
  1060. decomp encoding.Compressor
  1061. p *parser
  1062. mu sync.Mutex
  1063. finished bool
  1064. }
  1065. func (as *addrConnStream) Header() (metadata.MD, error) {
  1066. m, err := as.s.Header()
  1067. if err != nil {
  1068. as.finish(toRPCErr(err))
  1069. }
  1070. return m, err
  1071. }
  1072. func (as *addrConnStream) Trailer() metadata.MD {
  1073. return as.s.Trailer()
  1074. }
  1075. func (as *addrConnStream) CloseSend() error {
  1076. if as.sentLast {
  1077. // TODO: return an error and finish the stream instead, due to API misuse?
  1078. return nil
  1079. }
  1080. as.sentLast = true
  1081. as.t.Write(as.s, nil, nil, &transport.Options{Last: true})
  1082. // Always return nil; io.EOF is the only error that might make sense
  1083. // instead, but there is no need to signal the client to call RecvMsg
  1084. // as the only use left for the stream after CloseSend is to call
  1085. // RecvMsg. This also matches historical behavior.
  1086. return nil
  1087. }
  1088. func (as *addrConnStream) Context() context.Context {
  1089. return as.s.Context()
  1090. }
  1091. func (as *addrConnStream) SendMsg(m interface{}) (err error) {
  1092. defer func() {
  1093. if err != nil && err != io.EOF {
  1094. // Call finish on the client stream for errors generated by this SendMsg
  1095. // call, as these indicate problems created by this client. (Transport
  1096. // errors are converted to an io.EOF error in csAttempt.sendMsg; the real
  1097. // error will be returned from RecvMsg eventually in that case, or be
  1098. // retried.)
  1099. as.finish(err)
  1100. }
  1101. }()
  1102. if as.sentLast {
  1103. return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
  1104. }
  1105. if !as.desc.ClientStreams {
  1106. as.sentLast = true
  1107. }
  1108. // load hdr, payload, data
  1109. hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp)
  1110. if err != nil {
  1111. return err
  1112. }
  1113. // TODO(dfawley): should we be checking len(data) instead?
  1114. if len(payld) > *as.callInfo.maxSendMessageSize {
  1115. return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
  1116. }
  1117. if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
  1118. if !as.desc.ClientStreams {
  1119. // For non-client-streaming RPCs, we return nil instead of EOF on error
  1120. // because the generated code requires it. finish is not called; RecvMsg()
  1121. // will call it with the stream's status independently.
  1122. return nil
  1123. }
  1124. return io.EOF
  1125. }
  1126. if channelz.IsOn() {
  1127. as.t.IncrMsgSent()
  1128. }
  1129. return nil
  1130. }
  1131. func (as *addrConnStream) RecvMsg(m interface{}) (err error) {
  1132. defer func() {
  1133. if err != nil || !as.desc.ServerStreams {
  1134. // err != nil or non-server-streaming indicates end of stream.
  1135. as.finish(err)
  1136. }
  1137. }()
  1138. if !as.decompSet {
  1139. // Block until we receive headers containing received message encoding.
  1140. if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity {
  1141. if as.dc == nil || as.dc.Type() != ct {
  1142. // No configured decompressor, or it does not match the incoming
  1143. // message encoding; attempt to find a registered compressor that does.
  1144. as.dc = nil
  1145. as.decomp = encoding.GetCompressor(ct)
  1146. }
  1147. } else {
  1148. // No compression is used; disable our decompressor.
  1149. as.dc = nil
  1150. }
  1151. // Only initialize this state once per stream.
  1152. as.decompSet = true
  1153. }
  1154. err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
  1155. if err != nil {
  1156. if err == io.EOF {
  1157. if statusErr := as.s.Status().Err(); statusErr != nil {
  1158. return statusErr
  1159. }
  1160. return io.EOF // indicates successful end of stream.
  1161. }
  1162. return toRPCErr(err)
  1163. }
  1164. if channelz.IsOn() {
  1165. as.t.IncrMsgRecv()
  1166. }
  1167. if as.desc.ServerStreams {
  1168. // Subsequent messages should be received by subsequent RecvMsg calls.
  1169. return nil
  1170. }
  1171. // Special handling for non-server-stream rpcs.
  1172. // This recv expects EOF or errors, so we don't collect inPayload.
  1173. err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
  1174. if err == nil {
  1175. return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
  1176. }
  1177. if err == io.EOF {
  1178. return as.s.Status().Err() // non-server streaming Recv returns nil on success
  1179. }
  1180. return toRPCErr(err)
  1181. }
  1182. func (as *addrConnStream) finish(err error) {
  1183. as.mu.Lock()
  1184. if as.finished {
  1185. as.mu.Unlock()
  1186. return
  1187. }
  1188. as.finished = true
  1189. if err == io.EOF {
  1190. // Ending a stream with EOF indicates a success.
  1191. err = nil
  1192. }
  1193. if as.s != nil {
  1194. as.t.CloseStream(as.s, err)
  1195. }
  1196. if err != nil {
  1197. as.ac.incrCallsFailed()
  1198. } else {
  1199. as.ac.incrCallsSucceeded()
  1200. }
  1201. as.cancel()
  1202. as.mu.Unlock()
  1203. }
  1204. // ServerStream defines the server-side behavior of a streaming RPC.
  1205. //
  1206. // All errors returned from ServerStream methods are compatible with the
  1207. // status package.
  1208. type ServerStream interface {
  1209. // SetHeader sets the header metadata. It may be called multiple times.
  1210. // When call multiple times, all the provided metadata will be merged.
  1211. // All the metadata will be sent out when one of the following happens:
  1212. // - ServerStream.SendHeader() is called;
  1213. // - The first response is sent out;
  1214. // - An RPC status is sent out (error or success).
  1215. SetHeader(metadata.MD) error
  1216. // SendHeader sends the header metadata.
  1217. // The provided md and headers set by SetHeader() will be sent.
  1218. // It fails if called multiple times.
  1219. SendHeader(metadata.MD) error
  1220. // SetTrailer sets the trailer metadata which will be sent with the RPC status.
  1221. // When called more than once, all the provided metadata will be merged.
  1222. SetTrailer(metadata.MD)
  1223. // Context returns the context for this stream.
  1224. Context() context.Context
  1225. // SendMsg sends a message. On error, SendMsg aborts the stream and the
  1226. // error is returned directly.
  1227. //
  1228. // SendMsg blocks until:
  1229. // - There is sufficient flow control to schedule m with the transport, or
  1230. // - The stream is done, or
  1231. // - The stream breaks.
  1232. //
  1233. // SendMsg does not wait until the message is received by the client. An
  1234. // untimely stream closure may result in lost messages.
  1235. //
  1236. // It is safe to have a goroutine calling SendMsg and another goroutine
  1237. // calling RecvMsg on the same stream at the same time, but it is not safe
  1238. // to call SendMsg on the same stream in different goroutines.
  1239. SendMsg(m interface{}) error
  1240. // RecvMsg blocks until it receives a message into m or the stream is
  1241. // done. It returns io.EOF when the client has performed a CloseSend. On
  1242. // any non-EOF error, the stream is aborted and the error contains the
  1243. // RPC status.
  1244. //
  1245. // It is safe to have a goroutine calling SendMsg and another goroutine
  1246. // calling RecvMsg on the same stream at the same time, but it is not
  1247. // safe to call RecvMsg on the same stream in different goroutines.
  1248. RecvMsg(m interface{}) error
  1249. }
  1250. // serverStream implements a server side Stream.
  1251. type serverStream struct {
  1252. ctx context.Context
  1253. t transport.ServerTransport
  1254. s *transport.Stream
  1255. p *parser
  1256. codec baseCodec
  1257. cp Compressor
  1258. dc Decompressor
  1259. comp encoding.Compressor
  1260. decomp encoding.Compressor
  1261. maxReceiveMessageSize int
  1262. maxSendMessageSize int
  1263. trInfo *traceInfo
  1264. statsHandler stats.Handler
  1265. binlog *binarylog.MethodLogger
  1266. // serverHeaderBinlogged indicates whether server header has been logged. It
  1267. // will happen when one of the following two happens: stream.SendHeader(),
  1268. // stream.Send().
  1269. //
  1270. // It's only checked in send and sendHeader, doesn't need to be
  1271. // synchronized.
  1272. serverHeaderBinlogged bool
  1273. mu sync.Mutex // protects trInfo.tr after the service handler runs.
  1274. }
  1275. func (ss *serverStream) Context() context.Context {
  1276. return ss.ctx
  1277. }
  1278. func (ss *serverStream) SetHeader(md metadata.MD) error {
  1279. if md.Len() == 0 {
  1280. return nil
  1281. }
  1282. return ss.s.SetHeader(md)
  1283. }
  1284. func (ss *serverStream) SendHeader(md metadata.MD) error {
  1285. err := ss.t.WriteHeader(ss.s, md)
  1286. if ss.binlog != nil && !ss.serverHeaderBinlogged {
  1287. h, _ := ss.s.Header()
  1288. ss.binlog.Log(&binarylog.ServerHeader{
  1289. Header: h,
  1290. })
  1291. ss.serverHeaderBinlogged = true
  1292. }
  1293. return err
  1294. }
  1295. func (ss *serverStream) SetTrailer(md metadata.MD) {
  1296. if md.Len() == 0 {
  1297. return
  1298. }
  1299. ss.s.SetTrailer(md)
  1300. }
  1301. func (ss *serverStream) SendMsg(m interface{}) (err error) {
  1302. defer func() {
  1303. if ss.trInfo != nil {
  1304. ss.mu.Lock()
  1305. if ss.trInfo.tr != nil {
  1306. if err == nil {
  1307. ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
  1308. } else {
  1309. ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
  1310. ss.trInfo.tr.SetError()
  1311. }
  1312. }
  1313. ss.mu.Unlock()
  1314. }
  1315. if err != nil && err != io.EOF {
  1316. st, _ := status.FromError(toRPCErr(err))
  1317. ss.t.WriteStatus(ss.s, st)
  1318. // Non-user specified status was sent out. This should be an error
  1319. // case (as a server side Cancel maybe).
  1320. //
  1321. // This is not handled specifically now. User will return a final
  1322. // status from the service handler, we will log that error instead.
  1323. // This behavior is similar to an interceptor.
  1324. }
  1325. if channelz.IsOn() && err == nil {
  1326. ss.t.IncrMsgSent()
  1327. }
  1328. }()
  1329. // load hdr, payload, data
  1330. hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
  1331. if err != nil {
  1332. return err
  1333. }
  1334. // TODO(dfawley): should we be checking len(data) instead?
  1335. if len(payload) > ss.maxSendMessageSize {
  1336. return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
  1337. }
  1338. if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
  1339. return toRPCErr(err)
  1340. }
  1341. if ss.binlog != nil {
  1342. if !ss.serverHeaderBinlogged {
  1343. h, _ := ss.s.Header()
  1344. ss.binlog.Log(&binarylog.ServerHeader{
  1345. Header: h,
  1346. })
  1347. ss.serverHeaderBinlogged = true
  1348. }
  1349. ss.binlog.Log(&binarylog.ServerMessage{
  1350. Message: data,
  1351. })
  1352. }
  1353. if ss.statsHandler != nil {
  1354. ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
  1355. }
  1356. return nil
  1357. }
  1358. func (ss *serverStream) RecvMsg(m interface{}) (err error) {
  1359. defer func() {
  1360. if ss.trInfo != nil {
  1361. ss.mu.Lock()
  1362. if ss.trInfo.tr != nil {
  1363. if err == nil {
  1364. ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
  1365. } else if err != io.EOF {
  1366. ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
  1367. ss.trInfo.tr.SetError()
  1368. }
  1369. }
  1370. ss.mu.Unlock()
  1371. }
  1372. if err != nil && err != io.EOF {
  1373. st, _ := status.FromError(toRPCErr(err))
  1374. ss.t.WriteStatus(ss.s, st)
  1375. // Non-user specified status was sent out. This should be an error
  1376. // case (as a server side Cancel maybe).
  1377. //
  1378. // This is not handled specifically now. User will return a final
  1379. // status from the service handler, we will log that error instead.
  1380. // This behavior is similar to an interceptor.
  1381. }
  1382. if channelz.IsOn() && err == nil {
  1383. ss.t.IncrMsgRecv()
  1384. }
  1385. }()
  1386. var payInfo *payloadInfo
  1387. if ss.statsHandler != nil || ss.binlog != nil {
  1388. payInfo = &payloadInfo{}
  1389. }
  1390. if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
  1391. if err == io.EOF {
  1392. if ss.binlog != nil {
  1393. ss.binlog.Log(&binarylog.ClientHalfClose{})
  1394. }
  1395. return err
  1396. }
  1397. if err == io.ErrUnexpectedEOF {
  1398. err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
  1399. }
  1400. return toRPCErr(err)
  1401. }
  1402. if ss.statsHandler != nil {
  1403. ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{
  1404. RecvTime: time.Now(),
  1405. Payload: m,
  1406. // TODO truncate large payload.
  1407. Data: payInfo.uncompressedBytes,
  1408. WireLength: payInfo.wireLength,
  1409. Length: len(payInfo.uncompressedBytes),
  1410. })
  1411. }
  1412. if ss.binlog != nil {
  1413. ss.binlog.Log(&binarylog.ClientMessage{
  1414. Message: payInfo.uncompressedBytes,
  1415. })
  1416. }
  1417. return nil
  1418. }
  1419. // MethodFromServerStream returns the method string for the input stream.
  1420. // The returned string is in the format of "/service/method".
  1421. func MethodFromServerStream(stream ServerStream) (string, bool) {
  1422. return Method(stream.Context())
  1423. }
  1424. // prepareMsg returns the hdr, payload and data
  1425. // using the compressors passed or using the
  1426. // passed preparedmsg
  1427. func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
  1428. if preparedMsg, ok := m.(*PreparedMsg); ok {
  1429. return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
  1430. }
  1431. // The input interface is not a prepared msg.
  1432. // Marshal and Compress the data at this point
  1433. data, err = encode(codec, m)
  1434. if err != nil {
  1435. return nil, nil, nil, err
  1436. }
  1437. compData, err := compress(data, cp, comp)
  1438. if err != nil {
  1439. return nil, nil, nil, err
  1440. }
  1441. hdr, payload = msgHeader(data, compData)
  1442. return hdr, payload, data, nil
  1443. }