wireguard-go/src/send.go

256 lines
5.1 KiB
Go
Raw Normal View History

2017-06-26 11:14:02 +00:00
package main
import (
"encoding/binary"
"golang.org/x/crypto/chacha20poly1305"
2017-06-26 11:14:02 +00:00
"net"
"sync"
)
/* Handles outbound flow
*
* 1. TUN queue
* 2. Routing (sequential)
* 3. Nonce assignment (sequential)
* 4. Encryption (parallel)
* 5. Transmission (sequential)
2017-06-26 11:14:02 +00:00
*
* The order of packets (per peer) is maintained.
* The functions in this file occure (roughly) in the order packets are processed.
2017-06-26 11:14:02 +00:00
*/
/* A work unit
*
* The sequential consumers will attempt to take the lock,
* workers release lock when they have completed work on the packet.
*/
type QueueOutboundElement struct {
mutex sync.Mutex
2017-06-26 11:14:02 +00:00
packet []byte
nonce uint64
keyPair *KeyPair
}
func (peer *Peer) FlushNonceQueue() {
elems := len(peer.queue.nonce)
for i := 0; i < elems; i += 1 {
select {
case <-peer.queue.nonce:
default:
return
}
}
2017-06-27 15:33:06 +00:00
}
func (peer *Peer) InsertOutbound(elem *QueueOutboundElement) {
for {
select {
case peer.queue.outbound <- elem:
default:
select {
case <-peer.queue.outbound:
default:
}
}
}
}
2017-06-26 11:14:02 +00:00
/* Reads packets from the TUN and inserts
* into nonce queue for peer
*
* Obs. Single instance per TUN device
*/
func (device *Device) RoutineReadFromTUN(tun TUNDevice) {
device.log.Debug.Println("Routine, TUN Reader: started")
for {
// read packet
2017-06-26 11:14:02 +00:00
device.log.Debug.Println("Read")
packet := make([]byte, 1<<16) // TODO: Fix & avoid dynamic allocation
size, err := tun.Read(packet)
if err != nil {
device.log.Error.Println("Failed to read packet from TUN device:", err)
continue
}
packet = packet[:size]
if len(packet) < IPv4headerSize {
device.log.Error.Println("Packet too short, length:", len(packet))
continue
}
2017-06-26 11:14:02 +00:00
// lookup peer
2017-06-26 11:14:02 +00:00
var peer *Peer
switch packet[0] >> 4 {
case IPv4version:
dst := packet[IPv4offsetDst : IPv4offsetDst+net.IPv4len]
peer = device.routingTable.LookupIPv4(dst)
device.log.Debug.Println("New IPv4 packet:", packet, dst)
2017-06-26 11:14:02 +00:00
case IPv6version:
dst := packet[IPv6offsetDst : IPv6offsetDst+net.IPv6len]
peer = device.routingTable.LookupIPv6(dst)
device.log.Debug.Println("New IPv6 packet:", packet, dst)
2017-06-26 11:14:02 +00:00
default:
device.log.Debug.Println("Receieved packet with unknown IP version")
return
}
if peer == nil {
device.log.Debug.Println("No peer configured for IP")
continue
}
// insert into nonce/pre-handshake queue
for {
2017-06-26 11:14:02 +00:00
select {
case peer.queue.nonce <- packet:
2017-06-26 11:14:02 +00:00
default:
select {
case <-peer.queue.nonce:
default:
}
continue
2017-06-26 11:14:02 +00:00
}
break
2017-06-26 11:14:02 +00:00
}
}
}
/* Queues packets when there is no handshake.
* Then assigns nonces to packets sequentially
* and creates "work" structs for workers
2017-06-26 11:14:02 +00:00
*
* TODO: Avoid dynamic allocation of work queue elements
2017-06-26 11:14:02 +00:00
*
* Obs. A single instance per peer
2017-06-26 11:14:02 +00:00
*/
func (peer *Peer) RoutineNonce() {
var packet []byte
var keyPair *KeyPair
2017-06-26 11:14:02 +00:00
for {
// wait for packet
if packet == nil {
select {
case packet = <-peer.queue.nonce:
case <-peer.signal.stopSending:
close(peer.queue.outbound)
return
}
2017-06-26 11:14:02 +00:00
}
// wait for key pair
for keyPair == nil {
peer.signal.newHandshake <- true
2017-06-26 11:14:02 +00:00
select {
case <-peer.keyPairs.newKeyPair:
keyPair = peer.keyPairs.Current()
continue
case <-peer.signal.flushNonceQueue:
peer.FlushNonceQueue()
packet = nil
continue
case <-peer.signal.stopSending:
close(peer.queue.outbound)
return
}
}
// process current packet
if packet != nil {
2017-06-26 11:14:02 +00:00
// create work element
2017-06-26 11:14:02 +00:00
work := new(QueueOutboundElement) // TODO: profile, maybe use pool
work.keyPair = keyPair
work.packet = packet
work.nonce = keyPair.sendNonce
work.mutex.Lock()
2017-06-26 11:14:02 +00:00
packet = nil
keyPair.sendNonce += 1
2017-06-26 11:14:02 +00:00
// drop packets until there is space
2017-06-26 11:14:02 +00:00
func() {
2017-06-26 11:14:02 +00:00
for {
select {
case peer.device.queue.encryption <- work:
return
2017-06-26 11:14:02 +00:00
default:
drop := <-peer.device.queue.encryption
2017-06-26 11:14:02 +00:00
drop.packet = nil
drop.mutex.Unlock()
2017-06-26 11:14:02 +00:00
}
}
}()
peer.queue.outbound <- work
2017-06-26 11:14:02 +00:00
}
}
}
/* Encrypts the elements in the queue
* and marks them for sequential consumption (by releasing the mutex)
*
* Obs. One instance per core
*/
func (device *Device) RoutineEncryption() {
var nonce [chacha20poly1305.NonceSize]byte
for work := range device.queue.encryption {
// pad packet
2017-06-26 11:14:02 +00:00
padding := device.mtu - len(work.packet)
if padding < 0 {
// drop
work.packet = nil
work.mutex.Unlock()
}
for n := 0; n < padding; n += 1 {
work.packet = append(work.packet, 0)
}
2017-06-26 11:14:02 +00:00
// encrypt
2017-06-26 11:14:02 +00:00
binary.LittleEndian.PutUint64(nonce[4:], work.nonce)
work.packet = work.keyPair.send.Seal(
work.packet[:0],
nonce[:],
work.packet,
nil,
)
work.mutex.Unlock()
}
}
/* Sequentially reads packets from queue and sends to endpoint
*
* Obs. Single instance per peer.
* The routine terminates then the outbound queue is closed.
*/
func (peer *Peer) RoutineSequential() {
for work := range peer.queue.outbound {
work.mutex.Lock()
func() {
peer.mutex.RLock()
defer peer.mutex.RUnlock()
if work.packet == nil {
return
}
if peer.endpoint == nil {
return
}
peer.device.conn.WriteToUDP(work.packet, peer.endpoint)
peer.timer.sendKeepalive.Reset(peer.persistentKeepaliveInterval)
}()
work.mutex.Unlock()
2017-06-26 11:14:02 +00:00
}
}