grpclb.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. /*
  2. *
  3. * Copyright 2016 gRPC authors.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. *
  17. */
  18. //go:generate ./regenerate.sh
  19. // Package grpclb defines a grpclb balancer.
  20. //
  21. // To install grpclb balancer, import this package as:
  22. // import _ "google.golang.org/grpc/balancer/grpclb"
  23. package grpclb
  24. import (
  25. "context"
  26. "errors"
  27. "strconv"
  28. "strings"
  29. "sync"
  30. "time"
  31. durationpb "github.com/golang/protobuf/ptypes/duration"
  32. "google.golang.org/grpc"
  33. "google.golang.org/grpc/balancer"
  34. lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1"
  35. "google.golang.org/grpc/connectivity"
  36. "google.golang.org/grpc/credentials"
  37. "google.golang.org/grpc/grpclog"
  38. "google.golang.org/grpc/internal"
  39. "google.golang.org/grpc/internal/backoff"
  40. "google.golang.org/grpc/resolver"
  41. )
  42. const (
  43. lbTokeyKey = "lb-token"
  44. defaultFallbackTimeout = 10 * time.Second
  45. grpclbName = "grpclb"
  46. )
  47. var (
  48. // defaultBackoffConfig configures the backoff strategy that's used when the
  49. // init handshake in the RPC is unsuccessful. It's not for the clientconn
  50. // reconnect backoff.
  51. //
  52. // It has the same value as the default grpc.DefaultBackoffConfig.
  53. //
  54. // TODO: make backoff configurable.
  55. defaultBackoffConfig = backoff.Exponential{
  56. MaxDelay: 120 * time.Second,
  57. }
  58. errServerTerminatedConnection = errors.New("grpclb: failed to recv server list: server terminated connection")
  59. )
  60. func convertDuration(d *durationpb.Duration) time.Duration {
  61. if d == nil {
  62. return 0
  63. }
  64. return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
  65. }
  66. // Client API for LoadBalancer service.
  67. // Mostly copied from generated pb.go file.
  68. // To avoid circular dependency.
  69. type loadBalancerClient struct {
  70. cc *grpc.ClientConn
  71. }
  72. func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (*balanceLoadClientStream, error) {
  73. desc := &grpc.StreamDesc{
  74. StreamName: "BalanceLoad",
  75. ServerStreams: true,
  76. ClientStreams: true,
  77. }
  78. stream, err := c.cc.NewStream(ctx, desc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
  79. if err != nil {
  80. return nil, err
  81. }
  82. x := &balanceLoadClientStream{stream}
  83. return x, nil
  84. }
  85. type balanceLoadClientStream struct {
  86. grpc.ClientStream
  87. }
  88. func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error {
  89. return x.ClientStream.SendMsg(m)
  90. }
  91. func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) {
  92. m := new(lbpb.LoadBalanceResponse)
  93. if err := x.ClientStream.RecvMsg(m); err != nil {
  94. return nil, err
  95. }
  96. return m, nil
  97. }
  98. func init() {
  99. balancer.Register(newLBBuilder())
  100. }
  101. // newLBBuilder creates a builder for grpclb.
  102. func newLBBuilder() balancer.Builder {
  103. return newLBBuilderWithFallbackTimeout(defaultFallbackTimeout)
  104. }
  105. // newLBBuilderWithFallbackTimeout creates a grpclb builder with the given
  106. // fallbackTimeout. If no response is received from the remote balancer within
  107. // fallbackTimeout, the backend addresses from the resolved address list will be
  108. // used.
  109. //
  110. // Only call this function when a non-default fallback timeout is needed.
  111. func newLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder {
  112. return &lbBuilder{
  113. fallbackTimeout: fallbackTimeout,
  114. }
  115. }
  116. // newLBBuilderWithPickFirst creates a grpclb builder with pick-first.
  117. func newLBBuilderWithPickFirst() balancer.Builder {
  118. return &lbBuilder{
  119. usePickFirst: true,
  120. }
  121. }
  122. type lbBuilder struct {
  123. fallbackTimeout time.Duration
  124. // TODO: delete this when balancer can handle service config. This should be
  125. // updated by service config.
  126. usePickFirst bool // Use roundrobin or pickfirst for backends.
  127. }
  128. func (b *lbBuilder) Name() string {
  129. return grpclbName
  130. }
  131. func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
  132. // This generates a manual resolver builder with a random scheme. This
  133. // scheme will be used to dial to remote LB, so we can send filtered address
  134. // updates to remote LB ClientConn using this manual resolver.
  135. scheme := "grpclb_internal_" + strconv.FormatInt(time.Now().UnixNano(), 36)
  136. r := &lbManualResolver{scheme: scheme, ccb: cc}
  137. var target string
  138. targetSplitted := strings.Split(cc.Target(), ":///")
  139. if len(targetSplitted) < 2 {
  140. target = cc.Target()
  141. } else {
  142. target = targetSplitted[1]
  143. }
  144. lb := &lbBalancer{
  145. cc: newLBCacheClientConn(cc),
  146. target: target,
  147. opt: opt,
  148. usePickFirst: b.usePickFirst,
  149. fallbackTimeout: b.fallbackTimeout,
  150. doneCh: make(chan struct{}),
  151. manualResolver: r,
  152. subConns: make(map[resolver.Address]balancer.SubConn),
  153. scStates: make(map[balancer.SubConn]connectivity.State),
  154. picker: &errPicker{err: balancer.ErrNoSubConnAvailable},
  155. clientStats: newRPCStats(),
  156. backoff: defaultBackoffConfig, // TODO: make backoff configurable.
  157. }
  158. var err error
  159. if opt.CredsBundle != nil {
  160. lb.grpclbClientConnCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBalancer)
  161. if err != nil {
  162. grpclog.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err)
  163. }
  164. lb.grpclbBackendCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBackendFromBalancer)
  165. if err != nil {
  166. grpclog.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err)
  167. }
  168. }
  169. return lb
  170. }
  171. type lbBalancer struct {
  172. cc *lbCacheClientConn
  173. target string
  174. opt balancer.BuildOptions
  175. usePickFirst bool
  176. // grpclbClientConnCreds is the creds bundle to be used to connect to grpclb
  177. // servers. If it's nil, use the TransportCredentials from BuildOptions
  178. // instead.
  179. grpclbClientConnCreds credentials.Bundle
  180. // grpclbBackendCreds is the creds bundle to be used for addresses that are
  181. // returned by grpclb server. If it's nil, don't set anything when creating
  182. // SubConns.
  183. grpclbBackendCreds credentials.Bundle
  184. fallbackTimeout time.Duration
  185. doneCh chan struct{}
  186. // manualResolver is used in the remote LB ClientConn inside grpclb. When
  187. // resolved address updates are received by grpclb, filtered updates will be
  188. // send to remote LB ClientConn through this resolver.
  189. manualResolver *lbManualResolver
  190. // The ClientConn to talk to the remote balancer.
  191. ccRemoteLB *grpc.ClientConn
  192. // backoff for calling remote balancer.
  193. backoff backoff.Strategy
  194. // Support client side load reporting. Each picker gets a reference to this,
  195. // and will update its content.
  196. clientStats *rpcStats
  197. mu sync.Mutex // guards everything following.
  198. // The full server list including drops, used to check if the newly received
  199. // serverList contains anything new. Each generate picker will also have
  200. // reference to this list to do the first layer pick.
  201. fullServerList []*lbpb.Server
  202. // All backends addresses, with metadata set to nil. This list contains all
  203. // backend addresses in the same order and with the same duplicates as in
  204. // serverlist. When generating picker, a SubConn slice with the same order
  205. // but with only READY SCs will be gerenated.
  206. backendAddrs []resolver.Address
  207. // Roundrobin functionalities.
  208. state connectivity.State
  209. subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn.
  210. scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns.
  211. picker balancer.Picker
  212. // Support fallback to resolved backend addresses if there's no response
  213. // from remote balancer within fallbackTimeout.
  214. remoteBalancerConnected bool
  215. serverListReceived bool
  216. inFallback bool
  217. // resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set
  218. // when resolved address updates are received, and read in the goroutine
  219. // handling fallback.
  220. resolvedBackendAddrs []resolver.Address
  221. }
  222. // regeneratePicker takes a snapshot of the balancer, and generates a picker from
  223. // it. The picker
  224. // - always returns ErrTransientFailure if the balancer is in TransientFailure,
  225. // - does two layer roundrobin pick otherwise.
  226. // Caller must hold lb.mu.
  227. func (lb *lbBalancer) regeneratePicker(resetDrop bool) {
  228. if lb.state == connectivity.TransientFailure {
  229. lb.picker = &errPicker{err: balancer.ErrTransientFailure}
  230. return
  231. }
  232. if lb.state == connectivity.Connecting {
  233. lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable}
  234. return
  235. }
  236. var readySCs []balancer.SubConn
  237. if lb.usePickFirst {
  238. for _, sc := range lb.subConns {
  239. readySCs = append(readySCs, sc)
  240. break
  241. }
  242. } else {
  243. for _, a := range lb.backendAddrs {
  244. if sc, ok := lb.subConns[a]; ok {
  245. if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready {
  246. readySCs = append(readySCs, sc)
  247. }
  248. }
  249. }
  250. }
  251. if len(readySCs) <= 0 {
  252. // If there's no ready SubConns, always re-pick. This is to avoid drops
  253. // unless at least one SubConn is ready. Otherwise we may drop more
  254. // often than want because of drops + re-picks(which become re-drops).
  255. //
  256. // This doesn't seem to be necessary after the connecting check above.
  257. // Kept for safety.
  258. lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable}
  259. return
  260. }
  261. if lb.inFallback {
  262. lb.picker = newRRPicker(readySCs)
  263. return
  264. }
  265. if resetDrop {
  266. lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats)
  267. return
  268. }
  269. prevLBPicker, ok := lb.picker.(*lbPicker)
  270. if !ok {
  271. lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats)
  272. return
  273. }
  274. prevLBPicker.updateReadySCs(readySCs)
  275. }
  276. // aggregateSubConnStats calculate the aggregated state of SubConns in
  277. // lb.SubConns. These SubConns are subconns in use (when switching between
  278. // fallback and grpclb). lb.scState contains states for all SubConns, including
  279. // those in cache (SubConns are cached for 10 seconds after remove).
  280. //
  281. // The aggregated state is:
  282. // - If at least one SubConn in Ready, the aggregated state is Ready;
  283. // - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
  284. // - Else the aggregated state is TransientFailure.
  285. func (lb *lbBalancer) aggregateSubConnStates() connectivity.State {
  286. var numConnecting uint64
  287. for _, sc := range lb.subConns {
  288. if state, ok := lb.scStates[sc]; ok {
  289. switch state {
  290. case connectivity.Ready:
  291. return connectivity.Ready
  292. case connectivity.Connecting:
  293. numConnecting++
  294. }
  295. }
  296. }
  297. if numConnecting > 0 {
  298. return connectivity.Connecting
  299. }
  300. return connectivity.TransientFailure
  301. }
  302. func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
  303. if grpclog.V(2) {
  304. grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s)
  305. }
  306. lb.mu.Lock()
  307. defer lb.mu.Unlock()
  308. oldS, ok := lb.scStates[sc]
  309. if !ok {
  310. if grpclog.V(2) {
  311. grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
  312. }
  313. return
  314. }
  315. lb.scStates[sc] = s
  316. switch s {
  317. case connectivity.Idle:
  318. sc.Connect()
  319. case connectivity.Shutdown:
  320. // When an address was removed by resolver, b called RemoveSubConn but
  321. // kept the sc's state in scStates. Remove state for this sc here.
  322. delete(lb.scStates, sc)
  323. }
  324. // Force regenerate picker if
  325. // - this sc became ready from not-ready
  326. // - this sc became not-ready from ready
  327. lb.updateStateAndPicker((oldS == connectivity.Ready) != (s == connectivity.Ready), false)
  328. // Enter fallback when the aggregated state is not Ready and the connection
  329. // to remote balancer is lost.
  330. if lb.state != connectivity.Ready {
  331. if !lb.inFallback && !lb.remoteBalancerConnected {
  332. // Enter fallback.
  333. lb.refreshSubConns(lb.resolvedBackendAddrs, false)
  334. }
  335. }
  336. }
  337. // updateStateAndPicker re-calculate the aggregated state, and regenerate picker
  338. // if overall state is changed.
  339. //
  340. // If forceRegeneratePicker is true, picker will be regenerated.
  341. func (lb *lbBalancer) updateStateAndPicker(forceRegeneratePicker bool, resetDrop bool) {
  342. oldAggrState := lb.state
  343. lb.state = lb.aggregateSubConnStates()
  344. // Regenerate picker when one of the following happens:
  345. // - caller wants to regenerate
  346. // - the aggregated state changed
  347. if forceRegeneratePicker || (lb.state != oldAggrState) {
  348. lb.regeneratePicker(resetDrop)
  349. }
  350. lb.cc.UpdateBalancerState(lb.state, lb.picker)
  351. }
  352. // fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use
  353. // resolved backends (backends received from resolver, not from remote balancer)
  354. // if no connection to remote balancers was successful.
  355. func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) {
  356. timer := time.NewTimer(fallbackTimeout)
  357. defer timer.Stop()
  358. select {
  359. case <-timer.C:
  360. case <-lb.doneCh:
  361. return
  362. }
  363. lb.mu.Lock()
  364. if lb.inFallback || lb.serverListReceived {
  365. lb.mu.Unlock()
  366. return
  367. }
  368. // Enter fallback.
  369. lb.refreshSubConns(lb.resolvedBackendAddrs, false)
  370. lb.mu.Unlock()
  371. }
  372. // HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB
  373. // clientConn. The remoteLB clientConn will handle creating/removing remoteLB
  374. // connections.
  375. func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
  376. if grpclog.V(2) {
  377. grpclog.Infof("lbBalancer: handleResolvedResult: %+v", addrs)
  378. }
  379. if len(addrs) <= 0 {
  380. return
  381. }
  382. var remoteBalancerAddrs, backendAddrs []resolver.Address
  383. for _, a := range addrs {
  384. if a.Type == resolver.GRPCLB {
  385. a.Type = resolver.Backend
  386. remoteBalancerAddrs = append(remoteBalancerAddrs, a)
  387. } else {
  388. backendAddrs = append(backendAddrs, a)
  389. }
  390. }
  391. if lb.ccRemoteLB == nil {
  392. if len(remoteBalancerAddrs) <= 0 {
  393. grpclog.Errorf("grpclb: no remote balancer address is available, should never happen")
  394. return
  395. }
  396. // First time receiving resolved addresses, create a cc to remote
  397. // balancers.
  398. lb.dialRemoteLB(remoteBalancerAddrs[0].ServerName)
  399. // Start the fallback goroutine.
  400. go lb.fallbackToBackendsAfter(lb.fallbackTimeout)
  401. }
  402. // cc to remote balancers uses lb.manualResolver. Send the updated remote
  403. // balancer addresses to it through manualResolver.
  404. lb.manualResolver.UpdateState(resolver.State{Addresses: remoteBalancerAddrs})
  405. lb.mu.Lock()
  406. lb.resolvedBackendAddrs = backendAddrs
  407. if lb.inFallback {
  408. // This means we received a new list of resolved backends, and we are
  409. // still in fallback mode. Need to update the list of backends we are
  410. // using to the new list of backends.
  411. lb.refreshSubConns(lb.resolvedBackendAddrs, false)
  412. }
  413. lb.mu.Unlock()
  414. }
  415. func (lb *lbBalancer) Close() {
  416. select {
  417. case <-lb.doneCh:
  418. return
  419. default:
  420. }
  421. close(lb.doneCh)
  422. if lb.ccRemoteLB != nil {
  423. lb.ccRemoteLB.Close()
  424. }
  425. lb.cc.close()
  426. }