les: make clientPool.connectedBias configurable (#21305)
This commit is contained in:
		
							parent
							
								
									3a52c4dcf2
								
							
						
					
					
						commit
						4366c45e4e
					
				
							
								
								
									
										12
									
								
								les/api.go
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								les/api.go
									
									
									
									
									
								
							| @ -202,6 +202,18 @@ func (api *PrivateLightServerAPI) SetDefaultParams(params map[string]interface{} | ||||
| 	return err | ||||
| } | ||||
| 
 | ||||
| // SetConnectedBias set the connection bias, which is applied to already connected clients
 | ||||
| // So that already connected client won't be kicked out very soon and we can ensure all
 | ||||
| // connected clients can have enough time to request or sync some data.
 | ||||
| // When the input parameter `bias` < 0 (illegal), return error.
 | ||||
| func (api *PrivateLightServerAPI) SetConnectedBias(bias time.Duration) error { | ||||
| 	if bias < time.Duration(0) { | ||||
| 		return fmt.Errorf("bias illegal: %v less than 0", bias) | ||||
| 	} | ||||
| 	api.server.clientPool.setConnectedBias(bias) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Benchmark runs a request performance benchmark with a given set of measurement setups
 | ||||
| // in multiple passes specified by passCount. The measurement time for each setup in each
 | ||||
| // pass is specified in milliseconds by length.
 | ||||
|  | ||||
| @ -42,15 +42,7 @@ const ( | ||||
| 	persistCumulativeTimeRefresh = time.Minute * 5  // refresh period of the cumulative running time persistence
 | ||||
| 	posBalanceCacheLimit         = 8192             // the maximum number of cached items in positive balance queue
 | ||||
| 	negBalanceCacheLimit         = 8192             // the maximum number of cached items in negative balance queue
 | ||||
| 
 | ||||
| 	// connectedBias is applied to already connected clients So that
 | ||||
| 	// already connected client won't be kicked out very soon and we
 | ||||
| 	// can ensure all connected clients can have enough time to request
 | ||||
| 	// or sync some data.
 | ||||
| 	//
 | ||||
| 	// todo(rjl493456442) make it configurable. It can be the option of
 | ||||
| 	// free trial time!
 | ||||
| 	connectedBias = time.Minute * 3 | ||||
| 	defaultConnectedBias         = time.Minute * 3  // the default connectedBias used in clientPool
 | ||||
| ) | ||||
| 
 | ||||
| // clientPool implements a client database that assigns a priority to each client
 | ||||
| @ -94,7 +86,7 @@ type clientPool struct { | ||||
| 	freeClientCap     uint64         // The capacity value of each free client
 | ||||
| 	startTime         mclock.AbsTime // The timestamp at which the clientpool started running
 | ||||
| 	cumulativeTime    int64          // The cumulative running time of clientpool at the start point.
 | ||||
| 	disableBias       bool           // Disable connection bias(used in testing)
 | ||||
| 	connectedBias     time.Duration  // The connection bias. 0: Disable connection bias(used in testing)
 | ||||
| } | ||||
| 
 | ||||
| // clientPoolPeer represents a client peer in the pool.
 | ||||
| @ -171,6 +163,7 @@ func newClientPool(db ethdb.Database, freeClientCap uint64, clock mclock.Clock, | ||||
| 		startTime:      clock.Now(), | ||||
| 		cumulativeTime: ndb.getCumulativeTime(), | ||||
| 		stopCh:         make(chan struct{}), | ||||
| 		connectedBias:  defaultConnectedBias, | ||||
| 	} | ||||
| 	// If the negative balance of free client is even lower than 1,
 | ||||
| 	// delete this entry.
 | ||||
| @ -279,11 +272,7 @@ func (f *clientPool) connect(peer clientPoolPeer, capacity uint64) bool { | ||||
| 			newCount-- | ||||
| 			return newCapacity > f.capLimit || newCount > f.connLimit | ||||
| 		}) | ||||
| 		bias := connectedBias | ||||
| 		if f.disableBias { | ||||
| 			bias = 0 | ||||
| 		} | ||||
| 		if newCapacity > f.capLimit || newCount > f.connLimit || (e.balanceTracker.estimatedPriority(now+mclock.AbsTime(bias), false)-kickPriority) > 0 { | ||||
| 		if newCapacity > f.capLimit || newCount > f.connLimit || (e.balanceTracker.estimatedPriority(now+mclock.AbsTime(f.connectedBias), false)-kickPriority) > 0 { | ||||
| 			for _, c := range kickList { | ||||
| 				f.connectedQueue.Push(c) | ||||
| 			} | ||||
| @ -371,6 +360,16 @@ func (f *clientPool) setDefaultFactors(posFactors, negFactors priceFactors) { | ||||
| 	f.defaultNegFactors = negFactors | ||||
| } | ||||
| 
 | ||||
| // setConnectedBias sets the connection bias, which is applied to already connected clients
 | ||||
| // So that already connected client won't be kicked out very soon and we can ensure all
 | ||||
| // connected clients can have enough time to request or sync some data.
 | ||||
| func (f *clientPool) setConnectedBias(bias time.Duration) { | ||||
| 	f.lock.Lock() | ||||
| 	defer f.lock.Unlock() | ||||
| 
 | ||||
| 	f.connectedBias = bias | ||||
| } | ||||
| 
 | ||||
| // dropClient removes a client from the connected queue and finalizes its balance.
 | ||||
| // If kick is true then it also initiates the disconnection.
 | ||||
| func (f *clientPool) dropClient(e *clientInfo, now mclock.AbsTime, kick bool) { | ||||
|  | ||||
| @ -91,7 +91,7 @@ func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomD | ||||
| 		} | ||||
| 		pool = newClientPool(db, 1, &clock, disconnFn) | ||||
| 	) | ||||
| 	pool.disableBias = true | ||||
| 	pool.setConnectedBias(0) | ||||
| 	pool.setLimits(connLimit, uint64(connLimit)) | ||||
| 	pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1}) | ||||
| 
 | ||||
| @ -248,7 +248,7 @@ func TestPaidClientKickedOut(t *testing.T) { | ||||
| 		clock.Run(time.Millisecond) | ||||
| 	} | ||||
| 	clock.Run(time.Second) | ||||
| 	clock.Run(connectedBias) | ||||
| 	clock.Run(defaultConnectedBias) | ||||
| 	if !pool.connect(poolTestPeer(11), 0) { | ||||
| 		t.Fatalf("Free client should be accectped") | ||||
| 	} | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user