wireguard-go/send.go

397 lines
8.2 KiB
Go
Raw Normal View History

/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2017-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*/
2017-06-26 11:14:02 +00:00
package main
import (
"encoding/binary"
"golang.org/x/crypto/chacha20poly1305"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
2017-06-26 11:14:02 +00:00
"net"
"sync"
"sync/atomic"
"time"
2017-06-26 11:14:02 +00:00
)
2017-12-01 22:37:26 +00:00
/* Outbound flow
2017-06-26 11:14:02 +00:00
*
* 1. TUN queue
* 2. Routing (sequential)
* 3. Nonce assignment (sequential)
* 4. Encryption (parallel)
* 5. Transmission (sequential)
2017-06-26 11:14:02 +00:00
*
2017-12-01 22:37:26 +00:00
* The functions in this file occur (roughly) in the order in
* which the packets are processed.
*
* Locking, Producers and Consumers
*
* The order of packets (per peer) must be maintained,
* but encryption of packets happen out-of-order:
*
* The sequential consumers will attempt to take the lock,
* workers release lock when they have completed work (encryption) on the packet.
*
* If the element is inserted into the "encryption queue",
2017-12-01 22:37:26 +00:00
* the content is preceded by enough "junk" to contain the transport header
2017-07-07 11:47:09 +00:00
* (to allow the construction of transport messages in-place)
*/
2017-12-01 22:37:26 +00:00
type QueueOutboundElement struct {
2017-07-08 21:51:26 +00:00
dropped int32
mutex sync.Mutex
buffer *[MaxMessageSize]byte // slice holding the packet data
packet []byte // slice of "buffer" (always!)
nonce uint64 // nonce for encryption
keyPair *KeyPair // key-pair for encryption
peer *Peer // related peer
2017-06-26 11:14:02 +00:00
}
func (peer *Peer) flushNonceQueue() {
elems := len(peer.queue.nonce)
for i := 0; i < elems; i++ {
select {
case <-peer.queue.nonce:
default:
return
}
}
2017-06-27 15:33:06 +00:00
}
func (device *Device) NewOutboundElement() *QueueOutboundElement {
return &QueueOutboundElement{
dropped: AtomicFalse,
buffer: device.pool.messageBuffers.Get().(*[MaxMessageSize]byte),
}
}
func (elem *QueueOutboundElement) Drop() {
2017-07-08 21:51:26 +00:00
atomic.StoreInt32(&elem.dropped, AtomicTrue)
}
func (elem *QueueOutboundElement) IsDropped() bool {
2017-07-08 21:51:26 +00:00
return atomic.LoadInt32(&elem.dropped) == AtomicTrue
}
func addToOutboundQueue(
queue chan *QueueOutboundElement,
element *QueueOutboundElement,
) {
for {
select {
case queue <- element:
return
default:
select {
case old := <-queue:
old.Drop()
default:
}
}
}
}
2017-06-26 11:14:02 +00:00
2017-07-08 21:51:26 +00:00
func addToEncryptionQueue(
queue chan *QueueOutboundElement,
element *QueueOutboundElement,
) {
for {
select {
case queue <- element:
return
default:
select {
case old := <-queue:
2017-08-25 12:53:23 +00:00
// drop & release to potential consumer
2017-07-08 21:51:26 +00:00
old.Drop()
old.mutex.Unlock()
default:
}
}
}
}
/* Reads packets from the TUN and inserts
* into nonce queue for peer
*
* Obs. Single instance per TUN device
*/
2017-08-04 14:15:53 +00:00
func (device *Device) RoutineReadFromTUN() {
2017-08-25 12:53:23 +00:00
elem := device.NewOutboundElement()
logDebug := device.log.Debug
logError := device.log.Error
defer func() {
logDebug.Println("Routine: TUN reader - stopped")
}()
logDebug.Println("Routine: TUN reader - started")
for {
2017-06-26 11:14:02 +00:00
2017-08-25 12:53:23 +00:00
// read packet
offset := MessageTransportHeaderSize
size, err := device.tun.device.Read(elem.buffer[:], offset)
if err != nil {
logError.Println("Failed to read packet from TUN device:", err)
device.Close()
return
}
2017-08-25 12:53:23 +00:00
if size == 0 || size > MaxContentSize {
continue
}
2017-06-26 11:14:02 +00:00
elem.packet = elem.buffer[offset : offset+size]
2017-08-04 14:15:53 +00:00
// lookup peer
2017-06-26 11:14:02 +00:00
var peer *Peer
switch elem.packet[0] >> 4 {
case ipv4.Version:
2017-08-04 14:15:53 +00:00
if len(elem.packet) < ipv4.HeaderLen {
continue
}
dst := elem.packet[IPv4offsetDst : IPv4offsetDst+net.IPv4len]
peer = device.routing.table.LookupIPv4(dst)
2017-06-26 11:14:02 +00:00
case ipv6.Version:
2017-08-04 14:15:53 +00:00
if len(elem.packet) < ipv6.HeaderLen {
continue
}
dst := elem.packet[IPv6offsetDst : IPv6offsetDst+net.IPv6len]
peer = device.routing.table.LookupIPv6(dst)
2017-06-26 11:14:02 +00:00
default:
2017-12-01 22:37:26 +00:00
logDebug.Println("Received packet with unknown IP version")
}
if peer == nil {
continue
}
// insert into nonce/pre-handshake queue
if peer.isRunning.Get() {
peer.event.handshakePushDeadline.Fire()
addToOutboundQueue(peer.queue.nonce, elem)
elem = device.NewOutboundElement()
}
2017-06-26 11:14:02 +00:00
}
}
/* Queues packets when there is no handshake.
* Then assigns nonces to packets sequentially
* and creates "work" structs for workers
2017-06-26 11:14:02 +00:00
*
* Obs. A single instance per peer
2017-06-26 11:14:02 +00:00
*/
func (peer *Peer) RoutineNonce() {
var keyPair *KeyPair
device := peer.device
2017-07-07 11:47:09 +00:00
logDebug := device.log.Debug
2018-02-04 18:18:44 +00:00
defer func() {
peer.routines.stopping.Done()
logDebug.Println(peer, ": Routine: nonce worker - stopped")
2018-02-04 18:18:44 +00:00
}()
peer.routines.starting.Done()
logDebug.Println(peer, ": Routine: nonce worker - started")
for {
NextPacket:
select {
case <-peer.routines.stop.Wait():
return
case elem, ok := <-peer.queue.nonce:
if !ok {
return
}
2017-06-26 11:14:02 +00:00
// wait for key pair
for {
2018-05-05 02:15:07 +00:00
peer.event.newKeyPair.Clear()
keyPair = peer.keyPairs.Current()
if keyPair != nil && keyPair.sendNonce < RejectAfterMessages {
if time.Now().Sub(keyPair.created) < RejectAfterTime {
break
}
}
2018-05-05 02:15:07 +00:00
peer.event.handshakeBegin.Fire()
2017-11-30 22:22:40 +00:00
logDebug.Println(peer, ": Awaiting key-pair")
select {
2018-05-05 02:15:07 +00:00
case <-peer.event.newKeyPair.C:
logDebug.Println(peer, ": Obtained awaited key-pair")
case <-peer.signal.flushNonceQueue:
goto NextPacket
case <-peer.routines.stop.Wait():
return
}
}
2017-06-26 11:14:02 +00:00
// populate work element
2017-06-26 11:14:02 +00:00
elem.peer = peer
elem.nonce = atomic.AddUint64(&keyPair.sendNonce, 1) - 1
elem.keyPair = keyPair
elem.dropped = AtomicFalse
elem.mutex.Lock()
// add to parallel and sequential queue
addToEncryptionQueue(device.queue.encryption, elem)
addToOutboundQueue(peer.queue.outbound, elem)
2017-06-26 11:14:02 +00:00
}
}
2017-06-26 11:14:02 +00:00
}
/* Encrypts the elements in the queue
* and marks them for sequential consumption (by releasing the mutex)
*
* Obs. One instance per core
*/
func (device *Device) RoutineEncryption() {
2017-07-17 14:16:18 +00:00
var nonce [chacha20poly1305.NonceSize]byte
2017-07-17 14:16:18 +00:00
logDebug := device.log.Debug
defer func() {
2018-05-05 04:00:38 +00:00
device.state.stopping.Done()
logDebug.Println("Routine: encryption worker - stopped")
}()
logDebug.Println("Routine: encryption worker - started")
2017-07-17 14:16:18 +00:00
for {
// fetch next element
select {
2017-12-01 22:37:26 +00:00
case <-device.signal.stop.Wait():
2017-07-17 14:16:18 +00:00
return
2017-07-08 21:51:26 +00:00
case elem, ok := <-device.queue.encryption:
if !ok {
return
}
2017-07-08 21:51:26 +00:00
// check if dropped
if elem.IsDropped() {
continue
}
// populate header fields
2017-07-02 13:28:38 +00:00
header := elem.buffer[:MessageTransportHeaderSize]
2017-06-26 11:14:02 +00:00
fieldType := header[0:4]
fieldReceiver := header[4:8]
fieldNonce := header[8:16]
2017-07-02 13:28:38 +00:00
binary.LittleEndian.PutUint32(fieldType, MessageTransportType)
binary.LittleEndian.PutUint32(fieldReceiver, elem.keyPair.remoteIndex)
binary.LittleEndian.PutUint64(fieldNonce, elem.nonce)
// pad content to multiple of 16
mtu := int(atomic.LoadInt32(&device.tun.mtu))
rem := len(elem.packet) % PaddingMultiple
if rem > 0 {
for i := 0; i < PaddingMultiple-rem && len(elem.packet) < mtu; i++ {
elem.packet = append(elem.packet, 0)
}
2017-08-04 14:15:53 +00:00
}
2017-07-02 13:28:38 +00:00
// encrypt content and release to consumer
binary.LittleEndian.PutUint64(nonce[4:], elem.nonce)
elem.packet = elem.keyPair.send.Seal(
header,
nonce[:],
elem.packet,
nil,
)
elem.mutex.Unlock()
}
}
}
/* Sequentially reads packets from queue and sends to endpoint
*
* Obs. Single instance per peer.
* The routine terminates then the outbound queue is closed.
*/
func (peer *Peer) RoutineSequentialSender() {
device := peer.device
2017-07-08 21:51:26 +00:00
logDebug := device.log.Debug
defer func() {
peer.routines.stopping.Done()
logDebug.Println(peer, ": Routine: sequential sender - stopped")
}()
logDebug.Println(peer, ": Routine: sequential sender - started")
peer.routines.starting.Done()
for {
select {
2017-11-30 22:22:40 +00:00
case <-peer.routines.stop.Wait():
return
case elem, ok := <-peer.queue.outbound:
if !ok {
return
}
2017-07-17 14:16:18 +00:00
elem.mutex.Lock()
2017-07-27 21:45:37 +00:00
if elem.IsDropped() {
continue
}
2017-07-08 21:51:26 +00:00
2017-07-27 21:45:37 +00:00
// send message and return buffer to pool
2017-07-27 21:45:37 +00:00
length := uint64(len(elem.packet))
err := peer.SendBuffer(elem.packet)
2017-07-27 21:45:37 +00:00
device.PutMessageBuffer(elem.buffer)
if err != nil {
logDebug.Println("Failed to send authenticated packet to peer", peer)
2017-07-27 21:45:37 +00:00
continue
}
atomic.AddUint64(&peer.stats.txBytes, length)
2017-07-27 21:45:37 +00:00
// update timers
2017-07-17 14:16:18 +00:00
2018-05-05 02:15:07 +00:00
peer.event.anyAuthenticatedPacketTraversal.Fire()
2017-07-27 21:45:37 +00:00
if len(elem.packet) != MessageKeepaliveSize {
2018-05-05 02:15:07 +00:00
peer.event.dataSent.Fire()
2017-07-27 21:45:37 +00:00
}
peer.KeepKeyFreshSending()
}
2017-06-26 11:14:02 +00:00
}
}