Improved readability of send/receive code
This commit is contained in:
parent
89d0045214
commit
f212795e51
|
@ -128,7 +128,7 @@ func (device *Device) RoutineReceiveIncomming() {
|
||||||
|
|
||||||
// read next datagram
|
// read next datagram
|
||||||
|
|
||||||
size, raddr, err := conn.ReadFromUDP(buffer[:]) // Blocks sometimes
|
size, raddr, err := conn.ReadFromUDP(buffer[:])
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
|
@ -222,7 +222,7 @@ func (device *Device) RoutineReceiveIncomming() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (device *Device) RoutineDecryption() {
|
func (device *Device) RoutineDecryption() {
|
||||||
var elem *QueueInboundElement
|
|
||||||
var nonce [chacha20poly1305.NonceSize]byte
|
var nonce [chacha20poly1305.NonceSize]byte
|
||||||
|
|
||||||
logDebug := device.log.Debug
|
logDebug := device.log.Debug
|
||||||
|
@ -230,10 +230,11 @@ func (device *Device) RoutineDecryption() {
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case elem = <-device.queue.decryption:
|
|
||||||
case <-device.signal.stop:
|
case <-device.signal.stop:
|
||||||
|
logDebug.Println("Routine, decryption worker, stopped")
|
||||||
return
|
return
|
||||||
}
|
|
||||||
|
case elem := <-device.queue.decryption:
|
||||||
|
|
||||||
// check if dropped
|
// check if dropped
|
||||||
|
|
||||||
|
@ -248,7 +249,6 @@ func (device *Device) RoutineDecryption() {
|
||||||
|
|
||||||
// decrypt with key-pair
|
// decrypt with key-pair
|
||||||
|
|
||||||
var err error
|
|
||||||
copy(nonce[4:], counter)
|
copy(nonce[4:], counter)
|
||||||
elem.counter = binary.LittleEndian.Uint64(counter)
|
elem.counter = binary.LittleEndian.Uint64(counter)
|
||||||
elem.keyPair.receive.mutex.RLock()
|
elem.keyPair.receive.mutex.RLock()
|
||||||
|
@ -256,6 +256,7 @@ func (device *Device) RoutineDecryption() {
|
||||||
// very unlikely (the key was deleted during queuing)
|
// very unlikely (the key was deleted during queuing)
|
||||||
elem.Drop()
|
elem.Drop()
|
||||||
} else {
|
} else {
|
||||||
|
var err error
|
||||||
elem.packet, err = elem.keyPair.receive.aead.Open(
|
elem.packet, err = elem.keyPair.receive.aead.Open(
|
||||||
elem.buffer[:0],
|
elem.buffer[:0],
|
||||||
nonce[:],
|
nonce[:],
|
||||||
|
@ -266,14 +267,14 @@ func (device *Device) RoutineDecryption() {
|
||||||
elem.Drop()
|
elem.Drop()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
elem.keyPair.receive.mutex.RUnlock()
|
elem.keyPair.receive.mutex.RUnlock()
|
||||||
elem.mutex.Unlock()
|
elem.mutex.Unlock()
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handles incomming packets related to handshake
|
/* Handles incomming packets related to handshake
|
||||||
*
|
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
func (device *Device) RoutineHandshake() {
|
func (device *Device) RoutineHandshake() {
|
||||||
|
|
||||||
|
@ -473,7 +474,6 @@ func (device *Device) RoutineHandshake() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (peer *Peer) RoutineSequentialReceiver() {
|
func (peer *Peer) RoutineSequentialReceiver() {
|
||||||
var elem *QueueInboundElement
|
|
||||||
|
|
||||||
device := peer.device
|
device := peer.device
|
||||||
|
|
||||||
|
@ -483,17 +483,17 @@ func (peer *Peer) RoutineSequentialReceiver() {
|
||||||
logDebug.Println("Routine, sequential receiver, started for peer", peer.id)
|
logDebug.Println("Routine, sequential receiver, started for peer", peer.id)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
// wait for decryption
|
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-peer.signal.stop:
|
case <-peer.signal.stop:
|
||||||
|
logDebug.Println("Routine, sequential receiver, stopped for peer", peer.id)
|
||||||
return
|
return
|
||||||
case elem = <-peer.queue.inbound:
|
|
||||||
}
|
case elem := <-peer.queue.inbound:
|
||||||
|
|
||||||
|
// wait for decryption
|
||||||
|
|
||||||
elem.mutex.Lock()
|
elem.mutex.Lock()
|
||||||
|
|
||||||
// process packet
|
|
||||||
|
|
||||||
if elem.IsDropped() {
|
if elem.IsDropped() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -597,4 +597,5 @@ func (peer *Peer) RoutineSequentialReceiver() {
|
||||||
logError.Println("Failed to write packet to TUN device:", err)
|
logError.Println("Failed to write packet to TUN device:", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
81
src/send.go
81
src/send.go
|
@ -35,7 +35,7 @@ type QueueOutboundElement struct {
|
||||||
dropped int32
|
dropped int32
|
||||||
mutex sync.Mutex
|
mutex sync.Mutex
|
||||||
buffer *[MaxMessageSize]byte // slice holding the packet data
|
buffer *[MaxMessageSize]byte // slice holding the packet data
|
||||||
packet []byte // slice of "data" (always!)
|
packet []byte // slice of "buffer" (always!)
|
||||||
nonce uint64 // nonce for encryption
|
nonce uint64 // nonce for encryption
|
||||||
keyPair *KeyPair // key-pair for encryption
|
keyPair *KeyPair // key-pair for encryption
|
||||||
peer *Peer // related peer
|
peer *Peer // related peer
|
||||||
|
@ -52,11 +52,6 @@ func (peer *Peer) FlushNonceQueue() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
ErrorNoEndpoint = errors.New("No known endpoint for peer")
|
|
||||||
ErrorNoConnection = errors.New("No UDP socket for device")
|
|
||||||
)
|
|
||||||
|
|
||||||
func (device *Device) NewOutboundElement() *QueueOutboundElement {
|
func (device *Device) NewOutboundElement() *QueueOutboundElement {
|
||||||
return &QueueOutboundElement{
|
return &QueueOutboundElement{
|
||||||
dropped: AtomicFalse,
|
dropped: AtomicFalse,
|
||||||
|
@ -118,14 +113,13 @@ func (peer *Peer) SendBuffer(buffer []byte) (int, error) {
|
||||||
defer peer.mutex.RUnlock()
|
defer peer.mutex.RUnlock()
|
||||||
|
|
||||||
endpoint := peer.endpoint
|
endpoint := peer.endpoint
|
||||||
conn := peer.device.net.conn
|
|
||||||
|
|
||||||
if endpoint == nil {
|
if endpoint == nil {
|
||||||
return 0, ErrorNoEndpoint
|
return 0, errors.New("No known endpoint for peer")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
conn := peer.device.net.conn
|
||||||
if conn == nil {
|
if conn == nil {
|
||||||
return 0, ErrorNoConnection
|
return 0, errors.New("No UDP socket for device")
|
||||||
}
|
}
|
||||||
|
|
||||||
return conn.WriteToUDP(buffer, endpoint)
|
return conn.WriteToUDP(buffer, endpoint)
|
||||||
|
@ -189,16 +183,6 @@ func (device *Device) RoutineReadFromTUN() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if known endpoint (drop early)
|
|
||||||
|
|
||||||
peer.mutex.RLock()
|
|
||||||
if peer.endpoint == nil {
|
|
||||||
peer.mutex.RUnlock()
|
|
||||||
logDebug.Println("No known endpoint for peer", peer.String())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
peer.mutex.RUnlock()
|
|
||||||
|
|
||||||
// insert into nonce/pre-handshake queue
|
// insert into nonce/pre-handshake queue
|
||||||
|
|
||||||
signalSend(peer.signal.handshakeReset)
|
signalSend(peer.signal.handshakeReset)
|
||||||
|
@ -211,86 +195,61 @@ func (device *Device) RoutineReadFromTUN() {
|
||||||
* Then assigns nonces to packets sequentially
|
* Then assigns nonces to packets sequentially
|
||||||
* and creates "work" structs for workers
|
* and creates "work" structs for workers
|
||||||
*
|
*
|
||||||
* TODO: Avoid dynamic allocation of work queue elements
|
|
||||||
*
|
|
||||||
* Obs. A single instance per peer
|
* Obs. A single instance per peer
|
||||||
*/
|
*/
|
||||||
func (peer *Peer) RoutineNonce() {
|
func (peer *Peer) RoutineNonce() {
|
||||||
var keyPair *KeyPair
|
var keyPair *KeyPair
|
||||||
var elem *QueueOutboundElement
|
|
||||||
|
|
||||||
device := peer.device
|
device := peer.device
|
||||||
logDebug := device.log.Debug
|
logDebug := device.log.Debug
|
||||||
logDebug.Println("Routine, nonce worker, started for peer", peer.String())
|
logDebug.Println("Routine, nonce worker, started for peer", peer.String())
|
||||||
|
|
||||||
func() {
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
NextPacket:
|
NextPacket:
|
||||||
|
|
||||||
// wait for packet
|
|
||||||
|
|
||||||
if elem == nil {
|
|
||||||
select {
|
select {
|
||||||
case elem = <-peer.queue.nonce:
|
|
||||||
case <-peer.signal.stop:
|
case <-peer.signal.stop:
|
||||||
return
|
return
|
||||||
}
|
|
||||||
}
|
case elem := <-peer.queue.nonce:
|
||||||
|
|
||||||
// wait for key pair
|
// wait for key pair
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
|
||||||
case <-peer.signal.newKeyPair:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
keyPair = peer.keyPairs.Current()
|
keyPair = peer.keyPairs.Current()
|
||||||
if keyPair != nil && keyPair.sendNonce < RejectAfterMessages {
|
if keyPair != nil && keyPair.sendNonce < RejectAfterMessages {
|
||||||
if time.Now().Sub(keyPair.created) < RejectAfterTime {
|
if time.Now().Sub(keyPair.created) < RejectAfterTime {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
signalSend(peer.signal.handshakeBegin)
|
signalSend(peer.signal.handshakeBegin)
|
||||||
logDebug.Println("Awaiting key-pair for", peer.String())
|
logDebug.Println("Awaiting key-pair for", peer.String())
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-peer.signal.newKeyPair:
|
case <-peer.signal.newKeyPair:
|
||||||
logDebug.Println("Key-pair negotiated for", peer.String())
|
|
||||||
goto NextPacket
|
|
||||||
|
|
||||||
case <-peer.signal.flushNonceQueue:
|
case <-peer.signal.flushNonceQueue:
|
||||||
logDebug.Println("Clearing queue for", peer.String())
|
logDebug.Println("Clearing queue for", peer.String())
|
||||||
peer.FlushNonceQueue()
|
peer.FlushNonceQueue()
|
||||||
elem = nil
|
|
||||||
goto NextPacket
|
goto NextPacket
|
||||||
|
|
||||||
case <-peer.signal.stop:
|
case <-peer.signal.stop:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// process current packet
|
// populate work element
|
||||||
|
|
||||||
if elem != nil {
|
|
||||||
|
|
||||||
// create work element
|
|
||||||
|
|
||||||
elem.keyPair = keyPair
|
|
||||||
elem.nonce = atomic.AddUint64(&keyPair.sendNonce, 1) - 1
|
|
||||||
elem.dropped = AtomicFalse
|
|
||||||
elem.peer = peer
|
elem.peer = peer
|
||||||
|
elem.nonce = atomic.AddUint64(&keyPair.sendNonce, 1) - 1
|
||||||
|
elem.keyPair = keyPair
|
||||||
|
elem.dropped = AtomicFalse
|
||||||
elem.mutex.Lock()
|
elem.mutex.Lock()
|
||||||
|
|
||||||
// add to parallel and sequential queue
|
// add to parallel and sequential queue
|
||||||
|
|
||||||
addToEncryptionQueue(device.queue.encryption, elem)
|
addToEncryptionQueue(device.queue.encryption, elem)
|
||||||
addToOutboundQueue(peer.queue.outbound, elem)
|
addToOutboundQueue(peer.queue.outbound, elem)
|
||||||
elem = nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Encrypts the elements in the queue
|
/* Encrypts the elements in the queue
|
||||||
|
@ -300,7 +259,6 @@ func (peer *Peer) RoutineNonce() {
|
||||||
*/
|
*/
|
||||||
func (device *Device) RoutineEncryption() {
|
func (device *Device) RoutineEncryption() {
|
||||||
|
|
||||||
var elem *QueueOutboundElement
|
|
||||||
var nonce [chacha20poly1305.NonceSize]byte
|
var nonce [chacha20poly1305.NonceSize]byte
|
||||||
|
|
||||||
logDebug := device.log.Debug
|
logDebug := device.log.Debug
|
||||||
|
@ -311,11 +269,11 @@ func (device *Device) RoutineEncryption() {
|
||||||
// fetch next element
|
// fetch next element
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case elem = <-device.queue.encryption:
|
|
||||||
case <-device.signal.stop:
|
case <-device.signal.stop:
|
||||||
logDebug.Println("Routine, encryption worker, stopped")
|
logDebug.Println("Routine, encryption worker, stopped")
|
||||||
return
|
return
|
||||||
}
|
|
||||||
|
case elem := <-device.queue.encryption:
|
||||||
|
|
||||||
// check if dropped
|
// check if dropped
|
||||||
|
|
||||||
|
@ -335,15 +293,14 @@ func (device *Device) RoutineEncryption() {
|
||||||
binary.LittleEndian.PutUint32(fieldReceiver, elem.keyPair.remoteIndex)
|
binary.LittleEndian.PutUint32(fieldReceiver, elem.keyPair.remoteIndex)
|
||||||
binary.LittleEndian.PutUint64(fieldNonce, elem.nonce)
|
binary.LittleEndian.PutUint64(fieldNonce, elem.nonce)
|
||||||
|
|
||||||
// pad content to MTU size
|
// pad content to multiple of 16
|
||||||
|
|
||||||
mtu := int(atomic.LoadInt32(&device.tun.mtu))
|
mtu := int(atomic.LoadInt32(&device.tun.mtu))
|
||||||
pad := len(elem.packet) % PaddingMultiple
|
rem := len(elem.packet) % PaddingMultiple
|
||||||
if pad > 0 {
|
if rem > 0 {
|
||||||
for i := 0; i < PaddingMultiple-pad && len(elem.packet) < mtu; i++ {
|
for i := 0; i < PaddingMultiple-rem && len(elem.packet) < mtu; i++ {
|
||||||
elem.packet = append(elem.packet, 0)
|
elem.packet = append(elem.packet, 0)
|
||||||
}
|
}
|
||||||
// TODO: How good is this code
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// encrypt content (append to header)
|
// encrypt content (append to header)
|
||||||
|
@ -361,13 +318,14 @@ func (device *Device) RoutineEncryption() {
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
elem.keyPair.send.mutex.RUnlock()
|
|
||||||
elem.mutex.Unlock()
|
elem.mutex.Unlock()
|
||||||
|
elem.keyPair.send.mutex.RUnlock()
|
||||||
|
|
||||||
// refresh key if necessary
|
// refresh key if necessary
|
||||||
|
|
||||||
elem.peer.KeepKeyFreshSending()
|
elem.peer.KeepKeyFreshSending()
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Sequentially reads packets from queue and sends to endpoint
|
/* Sequentially reads packets from queue and sends to endpoint
|
||||||
|
@ -399,6 +357,7 @@ func (peer *Peer) RoutineSequentialSender() {
|
||||||
_, err := peer.SendBuffer(elem.packet)
|
_, err := peer.SendBuffer(elem.packet)
|
||||||
device.PutMessageBuffer(elem.buffer)
|
device.PutMessageBuffer(elem.buffer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
logDebug.Println("Failed to send authenticated packet to peer", peer.String())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
atomic.AddUint64(&peer.stats.txBytes, length)
|
atomic.AddUint64(&peer.stats.txBytes, length)
|
||||||
|
|
Loading…
Reference in a new issue