Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

UID2-4391 Major refactor to improve performance and maintainability #7

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,9 @@ cmake_minimum_required (VERSION 3.8)
project ("vsock-bridge")

set(CMAKE_CXX_FLAGS_DEBUG "-ggdb")
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED True)

enable_testing ()

add_subdirectory ("vsock-bridge")
30 changes: 26 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Vsock Proxy

Vsock Proxy to proxy TCP connection to vsock and vise versa.
Vsock Proxy to proxy TCP connection to vsock and vice versa.

This is intended for UID2 traffic forwarding between host and AWS Nitro Enclaves.

Expand All @@ -11,6 +11,7 @@ mkdir uid2-aws-enclave-vsockproxy/build
cd uid2-aws-enclave-vsockproxy/build
cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo
make
make test
```

## How to use
Expand All @@ -24,12 +25,33 @@ http-service:
service: direct
listen: tcp://0.0.0.0:80
connect: vsock://42:8080

sockx-proxy:
service: direct
listen: vsock://3:3305
connect: tcp://127.0.0.1:3305

tcp-to-tcp:
service: direct
listen: tcp://127.0.0.1:4000
connect: tcp://10.10.10.10:4001
```

Start vsockpx
This configuration file instructs the proxy to:
- listen on all IPv4 addresses on TCP port 80 and forward connections to vsock address 42:8080;
- listen on vsock address 3:3305 and forward connections to localhost (IPv4) TCP port 3305;
- listen on localhost (IPv4) TCP port 4000 and forward connections to 10.10.10.10 TCP port 4001.

Start vsock-bridge:

```
./vsockpx --config config.notyaml
./vsock-bridge --config config.notyaml
```

Traffic hitting host:80 port will be forwarded to vsock address 42:8080.
Run `./vsock-bridge -h` to get details for other supported command line options.

## Logging

In daemon mode the proxy logs to system (with ident `vsockpx`). In frontend mode logs go to stdout.

The log level can be configured through command line option `--log-level`.
2 changes: 0 additions & 2 deletions vsock-bridge/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,4 @@
cmake_minimum_required (VERSION 3.8)

add_subdirectory (src)

enable_testing ()
add_subdirectory (test)
222 changes: 32 additions & 190 deletions vsock-bridge/include/buffer.h
Original file line number Diff line number Diff line change
@@ -1,222 +1,64 @@
#pragma once

#include "logger.h"

#include <algorithm>
#include <array>
#include <cassert>
#include <cstdint>
#include <iostream>
#include <list>
#include <memory>
#include <vector>

#include <unistd.h>

namespace vsockio
{
struct MemoryBlock
{
MemoryBlock(int size, class MemoryArena* region)
: _startPtr(std::make_unique<uint8_t[]>(size)), _region(region) {}

uint8_t* offset(int x) const
{
return _startPtr.get() + x;
}

std::unique_ptr<uint8_t[]> _startPtr;
class MemoryArena* _region;
};

struct MemoryArena
{
std::vector<MemoryBlock> _blocks;
std::list<MemoryBlock*> _handles;
uint32_t _blockSizeInBytes = 0;
bool _initialized = false;

MemoryArena() = default;

void init(int blockSize, int numBlocks)
{
if (_initialized) throw;

Logger::instance->Log(Logger::INFO, "Thread-local memory arena init: blockSize=", blockSize, ", numBlocks=", numBlocks);

_blockSizeInBytes = blockSize;

for (int i = 0; i < numBlocks; i++)
{
_blocks.emplace_back(blockSize, this);
}

for (int i = 0; i < numBlocks; i++)
{
_handles.push_back(&_blocks[i]);
}

_initialized = true;
}

MemoryBlock* get()
{
if (!_handles.empty())
{
auto mb = _handles.front();
_handles.pop_front();
return mb;
}
else
{
return new MemoryBlock(_blockSizeInBytes, nullptr);
}
}

void put(MemoryBlock* mb)
{
if (mb->_region == this)
{
_handles.push_front(mb);
}
else if (mb->_region == nullptr)
{
delete mb;
}
else
{
throw;
}
}

int blockSize() const { return _blockSizeInBytes; }
};

struct Buffer
{
constexpr static int MAX_PAGES = 20;
int _pageCount;
int _cursor;
int _size;
int _pageSize;
MemoryBlock* _pages[MAX_PAGES];
MemoryArena* _arena;

explicit Buffer(MemoryArena* arena) : _arena(arena), _pageCount{ 0 }, _cursor{ 0 }, _size{ 0 }, _pageSize(arena->blockSize()) {}

Buffer(Buffer&& b) : _arena(b._arena), _pageCount(b._pageCount), _cursor(b._cursor), _size(b._size), _pageSize(b._arena->blockSize())
{
for (int i = 0; i < _pageCount; i++)
{
_pages[i] = b._pages[i];
}
b._pageCount = 0; // prevent _pages being destructed by old object
}
// Use the default minimum socket send buffer size on Linux.
static constexpr int BUFFER_SIZE = 4096;
scong-ttd marked this conversation as resolved.
Show resolved Hide resolved
std::array<std::uint8_t, BUFFER_SIZE> _data;
std::uint8_t* _head = _data.data();
std::uint8_t* _tail = _data.data();

Buffer(const Buffer&) = delete;
Buffer& operator=(const Buffer&) = delete;
std::uint8_t* head() const
{
return _head;
}

~Buffer()
std::uint8_t* tail() const
{
for (int i = 0; i < _pageCount; i++)
{
_arena->put(_pages[i]);
}
return _tail;
}

uint8_t* tail() const
{
return offset(_size);
}
bool hasRemainingCapacity() const
{
return _tail < _data.end();
}

int remainingCapacity() const
{
return capacity() - _size;
return _data.end() - _tail;
}

void produce(int size)
{
_size += size;
}
int remainingDataSize() const
{
return _tail - _head;
}

bool ensureCapacity()
{
return remainingCapacity() > 0 || tryNewPage();
}

uint8_t* head() const
{
return offset(_cursor);
}

int headLimit() const
void produce(int size)
{
return std::min(pageLimit(_cursor), _size - _cursor);
assert(remainingCapacity() >= size);
_tail += size;
}

void consume(int size)
{
_cursor += size;
}

bool tryNewPage()
{
if (_pageCount >= MAX_PAGES) return false;
_pages[_pageCount++] = _arena->get();
return true;
}

uint8_t* offset(int x) const
{
return _pages[x / _pageSize]->offset(x % _pageSize);
}

int capacity() const
{
return _pageCount * _pageSize;
}

int pageLimit(int x) const
{
return _pageSize - (x % _pageSize);
}

int cursor() const
{
return _cursor;
assert(remainingDataSize() >= size);
_head += size;
}

int size() const
{
return _size;
}

bool empty() const
{
return _size <= 0;
}
void reset()
{
_head = _tail = _data.data();
}

bool consumed() const
{
return _cursor >= _size;
}
};

struct BufferManager
{
thread_local static MemoryArena* arena;

static std::unique_ptr<Buffer> getBuffer()
{
auto b = std::make_unique<Buffer>(arena);
b->tryNewPage();
return b;
}

static std::unique_ptr<Buffer> getEmptyBuffer()
{
return std::make_unique<Buffer>(arena);
return _head >= _tail;
}
};


}
}
Loading