device: use channel close to shut down and drain outbound channel
This is a similar treatment to the handling of the encryption channel found a few commits ago: Use the closing of the channel to manage goroutine lifetime and shutdown. It is considerably simpler because there is only a single writer. Signed-off-by: Josh Bleecher Snyder <josh@tailscale.com>
This commit is contained in:
parent
63066ce406
commit
2832e96339
|
@ -17,7 +17,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
PeerRoutineNumber = 3
|
PeerRoutineNumber = 2
|
||||||
)
|
)
|
||||||
|
|
||||||
type Peer struct {
|
type Peer struct {
|
||||||
|
@ -287,7 +287,6 @@ func (peer *Peer) Stop() {
|
||||||
|
|
||||||
peer.queue.Lock()
|
peer.queue.Lock()
|
||||||
close(peer.queue.nonce)
|
close(peer.queue.nonce)
|
||||||
close(peer.queue.outbound)
|
|
||||||
close(peer.queue.inbound)
|
close(peer.queue.inbound)
|
||||||
peer.queue.Unlock()
|
peer.queue.Unlock()
|
||||||
|
|
||||||
|
|
|
@ -372,6 +372,7 @@ func (peer *Peer) RoutineNonce() {
|
||||||
logDebug.Println(peer, "- Routine: nonce worker - stopped")
|
logDebug.Println(peer, "- Routine: nonce worker - stopped")
|
||||||
peer.queue.packetInNonceQueueIsAwaitingKey.Set(false)
|
peer.queue.packetInNonceQueueIsAwaitingKey.Set(false)
|
||||||
device.queue.encryption.wg.Done() // no more writes from us
|
device.queue.encryption.wg.Done() // no more writes from us
|
||||||
|
close(peer.queue.outbound) // no more writes to this channel
|
||||||
peer.routines.stopping.Done()
|
peer.routines.stopping.Done()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -545,46 +546,26 @@ func (peer *Peer) RoutineSequentialSender() {
|
||||||
logDebug := device.log.Debug
|
logDebug := device.log.Debug
|
||||||
logError := device.log.Error
|
logError := device.log.Error
|
||||||
|
|
||||||
defer func() {
|
defer logDebug.Println(peer, "- Routine: sequential sender - stopped")
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case elem, ok := <-peer.queue.outbound:
|
|
||||||
if ok {
|
|
||||||
elem.Lock()
|
|
||||||
if !elem.IsDropped() {
|
|
||||||
device.PutMessageBuffer(elem.buffer)
|
|
||||||
elem.Drop()
|
|
||||||
}
|
|
||||||
device.PutOutboundElement(elem)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
goto out
|
|
||||||
}
|
|
||||||
}
|
|
||||||
out:
|
|
||||||
logDebug.Println(peer, "- Routine: sequential sender - stopped")
|
|
||||||
peer.routines.stopping.Done()
|
|
||||||
}()
|
|
||||||
|
|
||||||
logDebug.Println(peer, "- Routine: sequential sender - started")
|
logDebug.Println(peer, "- Routine: sequential sender - started")
|
||||||
|
|
||||||
for {
|
for elem := range peer.queue.outbound {
|
||||||
select {
|
|
||||||
|
|
||||||
case <-peer.routines.stop:
|
|
||||||
return
|
|
||||||
|
|
||||||
case elem, ok := <-peer.queue.outbound:
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
elem.Lock()
|
elem.Lock()
|
||||||
if elem.IsDropped() {
|
if elem.IsDropped() {
|
||||||
device.PutOutboundElement(elem)
|
device.PutOutboundElement(elem)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if !peer.isRunning.Get() {
|
||||||
|
// peer has been stopped; return re-usable elems to the shared pool.
|
||||||
|
// This is an optimization only. It is possible for the peer to be stopped
|
||||||
|
// immediately after this check, in which case, elem will get processed.
|
||||||
|
// The timers and SendBuffer code are resilient to a few stragglers.
|
||||||
|
// TODO(josharian): rework peer shutdown order to ensure
|
||||||
|
// that we never accidentally keep timers alive longer than necessary.
|
||||||
|
device.PutMessageBuffer(elem.buffer)
|
||||||
|
device.PutOutboundElement(elem)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
peer.timersAnyAuthenticatedPacketTraversal()
|
peer.timersAnyAuthenticatedPacketTraversal()
|
||||||
peer.timersAnyAuthenticatedPacketSent()
|
peer.timersAnyAuthenticatedPacketSent()
|
||||||
|
@ -604,5 +585,4 @@ func (peer *Peer) RoutineSequentialSender() {
|
||||||
|
|
||||||
peer.keepKeyFreshSending()
|
peer.keepKeyFreshSending()
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue