2019-02-04 16:29:52 +00:00
|
|
|
/* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright (C) 2018-2019 WireGuard LLC. All Rights Reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package tun
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
2019-05-02 22:11:59 +00:00
|
|
|
"fmt"
|
2019-02-04 16:29:52 +00:00
|
|
|
"os"
|
2019-07-11 08:35:47 +00:00
|
|
|
"sync/atomic"
|
2019-03-20 20:45:40 +00:00
|
|
|
"time"
|
2019-02-22 15:16:14 +00:00
|
|
|
"unsafe"
|
2019-02-04 16:29:52 +00:00
|
|
|
|
|
|
|
"golang.org/x/sys/windows"
|
2019-07-05 05:54:25 +00:00
|
|
|
|
2019-02-19 17:49:24 +00:00
|
|
|
"golang.zx2c4.com/wireguard/tun/wintun"
|
2019-02-04 16:29:52 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2019-08-18 09:49:37 +00:00
|
|
|
rateMeasurementGranularity = uint64((time.Second / 2) / time.Nanosecond)
|
|
|
|
spinloopRateThreshold = 800000000 / 8 // 800mbps
|
|
|
|
spinloopDuration = uint64(time.Millisecond / 80 / time.Nanosecond) // ~1gbit/s
|
2019-02-04 16:29:52 +00:00
|
|
|
)
|
|
|
|
|
2019-08-18 09:49:37 +00:00
|
|
|
type rateJuggler struct {
|
|
|
|
current uint64
|
|
|
|
nextByteCount uint64
|
|
|
|
nextStartTime int64
|
|
|
|
changing int32
|
|
|
|
}
|
|
|
|
|
2019-02-28 23:05:57 +00:00
|
|
|
type NativeTun struct {
|
2019-08-29 18:20:40 +00:00
|
|
|
wt *wintun.Interface
|
2019-07-19 11:51:56 +00:00
|
|
|
handle windows.Handle
|
2019-07-11 08:35:47 +00:00
|
|
|
close bool
|
2019-08-29 18:47:16 +00:00
|
|
|
rings wintun.RingDescriptor
|
2019-07-11 08:35:47 +00:00
|
|
|
events chan Event
|
|
|
|
errors chan error
|
|
|
|
forcedMTU int
|
2019-08-18 09:49:37 +00:00
|
|
|
rate rateJuggler
|
2019-02-04 16:29:52 +00:00
|
|
|
}
|
|
|
|
|
2019-08-29 16:00:44 +00:00
|
|
|
const WintunPool = wintun.Pool("WireGuard")
|
|
|
|
|
2019-08-18 09:49:37 +00:00
|
|
|
//go:linkname procyield runtime.procyield
|
|
|
|
func procyield(cycles uint32)
|
|
|
|
|
|
|
|
//go:linkname nanotime runtime.nanotime
|
|
|
|
func nanotime() int64
|
|
|
|
|
2019-03-04 13:27:16 +00:00
|
|
|
//
|
2019-08-29 18:20:40 +00:00
|
|
|
// CreateTUN creates a Wintun interface with the given name. Should a Wintun
|
|
|
|
// interface with the same name exist, it is reused.
|
2019-03-04 13:27:16 +00:00
|
|
|
//
|
2019-06-10 21:33:40 +00:00
|
|
|
func CreateTUN(ifname string) (Device, error) {
|
2019-06-09 17:20:17 +00:00
|
|
|
return CreateTUNWithRequestedGUID(ifname, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
2019-08-29 18:20:40 +00:00
|
|
|
// CreateTUNWithRequestedGUID creates a Wintun interface with the given name and
|
|
|
|
// a requested GUID. Should a Wintun interface with the same name exist, it is reused.
|
2019-06-09 17:20:17 +00:00
|
|
|
//
|
2019-06-10 21:33:40 +00:00
|
|
|
func CreateTUNWithRequestedGUID(ifname string, requestedGUID *windows.GUID) (Device, error) {
|
2019-05-02 22:11:59 +00:00
|
|
|
var err error
|
2019-08-29 18:20:40 +00:00
|
|
|
var wt *wintun.Interface
|
2019-05-02 22:11:59 +00:00
|
|
|
|
2019-05-09 08:11:15 +00:00
|
|
|
// Does an interface with this name already exist?
|
2019-08-29 16:00:44 +00:00
|
|
|
wt, err = WintunPool.GetInterface(ifname)
|
2019-05-17 14:06:02 +00:00
|
|
|
if err == nil {
|
|
|
|
// If so, we delete it, in case it has weird residual configuration.
|
2019-06-10 09:02:18 +00:00
|
|
|
_, err = wt.DeleteInterface()
|
2019-02-06 21:30:14 +00:00
|
|
|
if err != nil {
|
2019-05-17 14:06:02 +00:00
|
|
|
return nil, fmt.Errorf("Unable to delete already existing Wintun interface: %v", err)
|
2019-02-06 21:30:14 +00:00
|
|
|
}
|
2019-02-07 21:02:51 +00:00
|
|
|
}
|
2019-08-29 23:42:28 +00:00
|
|
|
wt, _, err = WintunPool.CreateInterface(ifname, requestedGUID)
|
2019-03-31 08:17:11 +00:00
|
|
|
if err != nil {
|
2019-05-17 14:06:02 +00:00
|
|
|
return nil, fmt.Errorf("Unable to create Wintun interface: %v", err)
|
2019-03-31 08:17:11 +00:00
|
|
|
}
|
2019-02-07 21:02:51 +00:00
|
|
|
|
2019-07-11 08:35:47 +00:00
|
|
|
tun := &NativeTun{
|
2019-03-18 08:42:00 +00:00
|
|
|
wt: wt,
|
2019-07-23 09:45:48 +00:00
|
|
|
handle: windows.InvalidHandle,
|
2019-06-10 21:33:40 +00:00
|
|
|
events: make(chan Event, 10),
|
2019-03-18 08:42:00 +00:00
|
|
|
errors: make(chan error, 1),
|
2019-05-16 08:33:47 +00:00
|
|
|
forcedMTU: 1500,
|
2019-07-11 08:35:47 +00:00
|
|
|
}
|
|
|
|
|
2019-08-29 18:47:16 +00:00
|
|
|
err = tun.rings.Init()
|
2019-07-11 08:35:47 +00:00
|
|
|
if err != nil {
|
2019-07-19 11:51:56 +00:00
|
|
|
tun.Close()
|
2019-08-29 18:47:16 +00:00
|
|
|
return nil, fmt.Errorf("Error creating events: %v", err)
|
2019-07-11 08:35:47 +00:00
|
|
|
}
|
|
|
|
|
2019-08-29 18:47:16 +00:00
|
|
|
tun.handle, err = tun.wt.Register(&tun.rings)
|
2019-07-11 08:35:47 +00:00
|
|
|
if err != nil {
|
2019-07-19 11:51:56 +00:00
|
|
|
tun.Close()
|
|
|
|
return nil, fmt.Errorf("Error registering rings: %v", err)
|
2019-07-11 08:35:47 +00:00
|
|
|
}
|
2019-07-19 11:51:56 +00:00
|
|
|
return tun, nil
|
2019-02-20 12:12:08 +00:00
|
|
|
}
|
|
|
|
|
2019-02-28 23:05:57 +00:00
|
|
|
func (tun *NativeTun) Name() (string, error) {
|
2019-08-29 18:20:40 +00:00
|
|
|
return tun.wt.Name()
|
2019-02-04 16:29:52 +00:00
|
|
|
}
|
|
|
|
|
2019-02-28 23:05:57 +00:00
|
|
|
func (tun *NativeTun) File() *os.File {
|
2019-02-04 16:29:52 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-10 21:33:40 +00:00
|
|
|
func (tun *NativeTun) Events() chan Event {
|
2019-02-04 16:29:52 +00:00
|
|
|
return tun.events
|
|
|
|
}
|
|
|
|
|
2019-02-28 23:05:57 +00:00
|
|
|
func (tun *NativeTun) Close() error {
|
2019-03-20 20:45:40 +00:00
|
|
|
tun.close = true
|
2019-08-29 18:47:16 +00:00
|
|
|
if tun.rings.Send.TailMoved != 0 {
|
|
|
|
windows.SetEvent(tun.rings.Send.TailMoved) // wake the reader if it's sleeping
|
2019-02-04 16:29:52 +00:00
|
|
|
}
|
2019-07-19 11:51:56 +00:00
|
|
|
if tun.handle != windows.InvalidHandle {
|
|
|
|
windows.CloseHandle(tun.handle)
|
2019-07-11 08:35:47 +00:00
|
|
|
}
|
2019-08-29 18:47:16 +00:00
|
|
|
tun.rings.Close()
|
2019-07-19 11:51:56 +00:00
|
|
|
var err error
|
|
|
|
if tun.wt != nil {
|
|
|
|
_, err = tun.wt.DeleteInterface()
|
2019-03-26 14:57:53 +00:00
|
|
|
}
|
2019-07-22 07:37:20 +00:00
|
|
|
close(tun.events)
|
2019-07-11 08:35:47 +00:00
|
|
|
return err
|
2019-02-04 16:29:52 +00:00
|
|
|
}
|
|
|
|
|
2019-02-28 23:05:57 +00:00
|
|
|
func (tun *NativeTun) MTU() (int, error) {
|
2019-05-16 08:33:47 +00:00
|
|
|
return tun.forcedMTU, nil
|
2019-03-13 08:52:32 +00:00
|
|
|
}
|
|
|
|
|
2019-06-06 21:00:15 +00:00
|
|
|
// TODO: This is a temporary hack. We really need to be monitoring the interface in real time and adapting to MTU changes.
|
2019-05-16 08:33:47 +00:00
|
|
|
func (tun *NativeTun) ForceMTU(mtu int) {
|
|
|
|
tun.forcedMTU = mtu
|
2019-02-04 16:29:52 +00:00
|
|
|
}
|
|
|
|
|
2019-07-11 08:35:47 +00:00
|
|
|
// Note: Read() and Write() assume the caller comes only from a single thread; there's no locking.
|
|
|
|
|
2019-02-28 23:05:57 +00:00
|
|
|
func (tun *NativeTun) Read(buff []byte, offset int) (int, error) {
|
2019-07-19 11:51:56 +00:00
|
|
|
retry:
|
2019-02-04 16:29:52 +00:00
|
|
|
select {
|
|
|
|
case err := <-tun.errors:
|
|
|
|
return 0, err
|
|
|
|
default:
|
2019-02-08 13:31:05 +00:00
|
|
|
}
|
2019-07-19 11:51:56 +00:00
|
|
|
if tun.close {
|
|
|
|
return 0, os.ErrClosed
|
|
|
|
}
|
2019-02-04 16:29:52 +00:00
|
|
|
|
2019-08-29 18:47:16 +00:00
|
|
|
buffHead := atomic.LoadUint32(&tun.rings.Send.Ring.Head)
|
|
|
|
if buffHead >= wintun.PacketCapacity {
|
2019-07-19 11:51:56 +00:00
|
|
|
return 0, os.ErrClosed
|
|
|
|
}
|
2019-07-11 08:35:47 +00:00
|
|
|
|
2019-08-18 09:49:37 +00:00
|
|
|
start := nanotime()
|
|
|
|
shouldSpin := atomic.LoadUint64(&tun.rate.current) >= spinloopRateThreshold && uint64(start-atomic.LoadInt64(&tun.rate.nextStartTime)) <= rateMeasurementGranularity*2
|
2019-07-19 11:51:56 +00:00
|
|
|
var buffTail uint32
|
|
|
|
for {
|
2019-08-29 18:47:16 +00:00
|
|
|
buffTail = atomic.LoadUint32(&tun.rings.Send.Ring.Tail)
|
2019-07-19 11:51:56 +00:00
|
|
|
if buffHead != buffTail {
|
|
|
|
break
|
2019-07-11 08:35:47 +00:00
|
|
|
}
|
2019-07-19 11:51:56 +00:00
|
|
|
if tun.close {
|
|
|
|
return 0, os.ErrClosed
|
2019-02-08 13:31:05 +00:00
|
|
|
}
|
2019-08-18 09:49:37 +00:00
|
|
|
if !shouldSpin || uint64(nanotime()-start) >= spinloopDuration {
|
2019-08-29 18:47:16 +00:00
|
|
|
windows.WaitForSingleObject(tun.rings.Send.TailMoved, windows.INFINITE)
|
2019-07-19 11:51:56 +00:00
|
|
|
goto retry
|
2019-05-31 13:40:08 +00:00
|
|
|
}
|
2019-07-19 11:51:56 +00:00
|
|
|
procyield(1)
|
|
|
|
}
|
2019-08-29 18:47:16 +00:00
|
|
|
if buffTail >= wintun.PacketCapacity {
|
2019-07-19 11:51:56 +00:00
|
|
|
return 0, os.ErrClosed
|
|
|
|
}
|
2019-07-11 08:35:47 +00:00
|
|
|
|
2019-08-29 18:47:16 +00:00
|
|
|
buffContent := tun.rings.Send.Ring.Wrap(buffTail - buffHead)
|
|
|
|
if buffContent < uint32(unsafe.Sizeof(wintun.PacketHeader{})) {
|
2019-07-19 11:51:56 +00:00
|
|
|
return 0, errors.New("incomplete packet header in send ring")
|
|
|
|
}
|
2019-07-11 08:35:47 +00:00
|
|
|
|
2019-08-29 18:47:16 +00:00
|
|
|
packet := (*wintun.Packet)(unsafe.Pointer(&tun.rings.Send.Ring.Data[buffHead]))
|
|
|
|
if packet.Size > wintun.PacketSizeMax {
|
2019-07-19 11:51:56 +00:00
|
|
|
return 0, errors.New("packet too big in send ring")
|
|
|
|
}
|
2019-07-11 08:35:47 +00:00
|
|
|
|
2019-08-29 18:47:16 +00:00
|
|
|
alignedPacketSize := wintun.PacketAlign(uint32(unsafe.Sizeof(wintun.PacketHeader{})) + packet.Size)
|
2019-07-19 11:51:56 +00:00
|
|
|
if alignedPacketSize > buffContent {
|
|
|
|
return 0, errors.New("incomplete packet in send ring")
|
2019-02-04 16:29:52 +00:00
|
|
|
}
|
|
|
|
|
2019-08-29 18:47:16 +00:00
|
|
|
copy(buff[offset:], packet.Data[:packet.Size])
|
|
|
|
buffHead = tun.rings.Send.Ring.Wrap(buffHead + alignedPacketSize)
|
|
|
|
atomic.StoreUint32(&tun.rings.Send.Ring.Head, buffHead)
|
|
|
|
tun.rate.update(uint64(packet.Size))
|
|
|
|
return int(packet.Size), nil
|
2019-07-11 08:35:47 +00:00
|
|
|
}
|
2019-02-07 03:08:05 +00:00
|
|
|
|
2019-03-21 20:43:04 +00:00
|
|
|
func (tun *NativeTun) Flush() error {
|
2019-07-11 08:35:47 +00:00
|
|
|
return nil
|
|
|
|
}
|
2019-03-21 20:43:04 +00:00
|
|
|
|
2019-07-11 08:35:47 +00:00
|
|
|
func (tun *NativeTun) Write(buff []byte, offset int) (int, error) {
|
2019-07-19 11:51:56 +00:00
|
|
|
if tun.close {
|
|
|
|
return 0, os.ErrClosed
|
|
|
|
}
|
2019-02-20 12:12:08 +00:00
|
|
|
|
2019-07-19 11:51:56 +00:00
|
|
|
packetSize := uint32(len(buff) - offset)
|
2019-08-18 09:49:37 +00:00
|
|
|
tun.rate.update(uint64(packetSize))
|
2019-08-29 18:47:16 +00:00
|
|
|
alignedPacketSize := wintun.PacketAlign(uint32(unsafe.Sizeof(wintun.PacketHeader{})) + packetSize)
|
2019-02-04 16:29:52 +00:00
|
|
|
|
2019-08-29 18:47:16 +00:00
|
|
|
buffHead := atomic.LoadUint32(&tun.rings.Receive.Ring.Head)
|
|
|
|
if buffHead >= wintun.PacketCapacity {
|
2019-07-19 11:51:56 +00:00
|
|
|
return 0, os.ErrClosed
|
|
|
|
}
|
2019-02-04 16:29:52 +00:00
|
|
|
|
2019-08-29 18:47:16 +00:00
|
|
|
buffTail := atomic.LoadUint32(&tun.rings.Receive.Ring.Tail)
|
|
|
|
if buffTail >= wintun.PacketCapacity {
|
2019-07-19 11:51:56 +00:00
|
|
|
return 0, os.ErrClosed
|
|
|
|
}
|
2019-02-04 16:29:52 +00:00
|
|
|
|
2019-08-29 18:47:16 +00:00
|
|
|
buffSpace := tun.rings.Receive.Ring.Wrap(buffHead - buffTail - wintun.PacketAlignment)
|
2019-07-19 11:51:56 +00:00
|
|
|
if alignedPacketSize > buffSpace {
|
|
|
|
return 0, nil // Dropping when ring is full.
|
|
|
|
}
|
|
|
|
|
2019-08-29 18:47:16 +00:00
|
|
|
packet := (*wintun.Packet)(unsafe.Pointer(&tun.rings.Receive.Ring.Data[buffTail]))
|
|
|
|
packet.Size = packetSize
|
|
|
|
copy(packet.Data[:packetSize], buff[offset:])
|
|
|
|
atomic.StoreUint32(&tun.rings.Receive.Ring.Tail, tun.rings.Receive.Ring.Wrap(buffTail+alignedPacketSize))
|
|
|
|
if atomic.LoadInt32(&tun.rings.Receive.Ring.Alertable) != 0 {
|
|
|
|
windows.SetEvent(tun.rings.Receive.TailMoved)
|
2019-02-04 16:29:52 +00:00
|
|
|
}
|
2019-07-19 11:51:56 +00:00
|
|
|
return int(packetSize), nil
|
2019-02-04 16:29:52 +00:00
|
|
|
}
|
2019-02-28 23:11:12 +00:00
|
|
|
|
2019-08-29 18:20:40 +00:00
|
|
|
// LUID returns Windows interface instance ID.
|
2019-05-10 19:30:23 +00:00
|
|
|
func (tun *NativeTun) LUID() uint64 {
|
2019-05-17 12:26:46 +00:00
|
|
|
return tun.wt.LUID()
|
2019-05-10 19:30:23 +00:00
|
|
|
}
|
2019-07-11 08:35:47 +00:00
|
|
|
|
2019-08-18 09:49:37 +00:00
|
|
|
func (rate *rateJuggler) update(packetLen uint64) {
|
|
|
|
now := nanotime()
|
|
|
|
total := atomic.AddUint64(&rate.nextByteCount, packetLen)
|
|
|
|
period := uint64(now - atomic.LoadInt64(&rate.nextStartTime))
|
|
|
|
if period >= rateMeasurementGranularity {
|
|
|
|
if !atomic.CompareAndSwapInt32(&rate.changing, 0, 1) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
atomic.StoreInt64(&rate.nextStartTime, now)
|
|
|
|
atomic.StoreUint64(&rate.current, total*uint64(time.Second/time.Nanosecond)/period)
|
|
|
|
atomic.StoreUint64(&rate.nextByteCount, 0)
|
|
|
|
atomic.StoreInt32(&rate.changing, 0)
|
|
|
|
}
|
|
|
|
}
|