@@ -1431,10 +1431,6 @@ func (js *jetStream) monitorCluster() {
14311431 aq .recycle (& ces )
14321432
14331433 case isLeader = <- lch :
1434- // For meta layer synchronize everyone to our state on becoming leader.
1435- if isLeader && n .ApplyQ ().len () == 0 {
1436- n .SendSnapshot (js .metaSnapshot ())
1437- }
14381434 // Process the change.
14391435 js .processLeaderChange (isLeader )
14401436 if isLeader {
@@ -2129,8 +2125,32 @@ func (js *jetStream) createRaftGroup(accName string, rg *raftGroup, storage Stor
21292125 }
21302126
21312127 // Check if we already have this assigned.
2128+ retry:
21322129 if node := s .lookupRaftNode (rg .Name ); node != nil {
2130+ if node .State () == Closed {
2131+ // We're waiting for this node to finish shutting down before we replace it.
2132+ js .mu .Unlock ()
2133+ node .WaitForStop ()
2134+ js .mu .Lock ()
2135+ goto retry
2136+ }
21332137 s .Debugf ("JetStream cluster already has raft group %q assigned" , rg .Name )
2138+ // Check and see if the group has the same peers. If not then we
2139+ // will update the known peers, which will send a peerstate if leader.
2140+ groupPeerIDs := append ([]string {}, rg .Peers ... )
2141+ var samePeers bool
2142+ if nodePeers := node .Peers (); len (rg .Peers ) == len (nodePeers ) {
2143+ nodePeerIDs := make ([]string , 0 , len (nodePeers ))
2144+ for _ , n := range nodePeers {
2145+ nodePeerIDs = append (nodePeerIDs , n .ID )
2146+ }
2147+ slices .Sort (groupPeerIDs )
2148+ slices .Sort (nodePeerIDs )
2149+ samePeers = slices .Equal (groupPeerIDs , nodePeerIDs )
2150+ }
2151+ if ! samePeers {
2152+ node .UpdateKnownPeers (groupPeerIDs )
2153+ }
21342154 rg .node = node
21352155 js .mu .Unlock ()
21362156 return nil
@@ -8959,17 +8979,6 @@ func (mset *stream) runCatchup(sendSubject string, sreq *streamSyncRequest) {
89598979 // mset.store never changes after being set, don't need lock.
89608980 mset .store .FastState (& state )
89618981
8962- // Reset notion of first if this request wants sequences before our starting sequence
8963- // and we would have nothing to send. If we have partial messages still need to send skips for those.
8964- // We will keep sreq's first sequence to not create sequence mismatches on the follower, but we extend the last to our current state.
8965- if sreq .FirstSeq < state .FirstSeq && state .FirstSeq > sreq .LastSeq {
8966- s .Debugf ("Catchup for stream '%s > %s' resetting request first sequence from %d to %d" ,
8967- mset .account (), mset .name (), sreq .FirstSeq , state .FirstSeq )
8968- if state .LastSeq > sreq .LastSeq {
8969- sreq .LastSeq = state .LastSeq
8970- }
8971- }
8972-
89738982 // Setup sequences to walk through.
89748983 seq , last := sreq .FirstSeq , sreq .LastSeq
89758984 mset .setCatchupPeer (sreq .Peer , last - seq )
@@ -9133,25 +9142,10 @@ func (mset *stream) runCatchup(sendSubject string, sreq *streamSyncRequest) {
91339142 if drOk && dr .First > 0 {
91349143 sendDR ()
91359144 }
9136- // Check for a condition where our state's first is now past the last that we could have sent.
9137- // If so reset last and continue sending.
9138- var state StreamState
9139- mset .mu .RLock ()
9140- mset .store .FastState (& state )
9141- mset .mu .RUnlock ()
9142- if last < state .FirstSeq {
9143- last = state .LastSeq
9144- }
9145- // Recheck our exit condition.
9146- if seq == last {
9147- if drOk && dr .First > 0 {
9148- sendDR ()
9149- }
9150- s .Noticef ("Catchup for stream '%s > %s' complete" , mset .account (), mset .name ())
9151- // EOF
9152- s .sendInternalMsgLocked (sendSubject , _EMPTY_ , nil , nil )
9153- return false
9154- }
9145+ s .Noticef ("Catchup for stream '%s > %s' complete" , mset .account (), mset .name ())
9146+ // EOF
9147+ s .sendInternalMsgLocked (sendSubject , _EMPTY_ , nil , nil )
9148+ return false
91559149 }
91569150 select {
91579151 case <- remoteQuitCh :
0 commit comments