-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathtypes.go
149 lines (126 loc) · 3.86 KB
/
types.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
package nimbusdb
import (
"errors"
"fmt"
"os"
"sync"
"time"
flock "github.com/gofrs/flock"
"github.com/segmentio/ksuid"
)
const (
_ = iota
KB int64 = 1 << (10 * iota)
MB
GB
TB
PB
EB
)
const (
ActiveKeyValueEntryDatafileSuffix = ".dfile"
KeyValueEntryHintfileSuffix = ".hfile"
InactiveKeyValueEntryDataFileSuffix = ".idfile"
SwapFilePattern = "*.swp"
TempDataFilePattern = "*.dfile"
TempInactiveDataFilePattern = "*.idfile"
DefaultDataDir = "nimbusdb"
FlockSuffix = "flock"
DatafileThreshold = 1 * MB
BlockSize = 32 * KB
)
const (
CrcSize int64 = 5
DeleteFlagSize = 1
TstampSize = 10
KeySizeSize = 10
ValueSizeSize = 10
StaticChunkSize = CrcSize + DeleteFlagSize + TstampSize + KeySizeSize + ValueSizeSize
CrcOffset int64 = 5
DeleteFlagOffset = 6
TstampOffset = 16
KeySizeOffset = 26
ValueSizeOffset = 36
BTreeDegree int = 10
)
const (
TotalStaticChunkSize int64 = TstampOffset + KeySizeOffset + ValueSizeOffset + DeleteFlagOffset + CrcOffset + StaticChunkSize
)
var (
ERROR_KEY_NOT_FOUND = errors.New("key expired or does not exist")
ERROR_NO_ACTIVE_FILE_OPENED = errors.New("no file opened for writing")
ERROR_NO_ACTIVE_MERGE_FILE_OPENED = errors.New("no merge file opened for writing")
ERROR_OFFSET_EXCEEDED_FILE_SIZE = errors.New("offset exceeded file size")
ERROR_CANNOT_READ_FILE = errors.New("error reading file")
ERROR_KEY_VALUE_SIZE_EXCEEDED = errors.New(fmt.Sprintf("exceeded limit of %d bytes", BlockSize))
ERROR_CRC_DOES_NOT_MATCH = errors.New("crc does not match. corrupted datafile")
ERROR_DB_CLOSED = errors.New("database is closed")
ERROR_DATA_FILE_READER_NOT_CLOSED = errors.New("data file reader is not closed")
ERROR_DATA_FILE_WRITER_NOT_CLOSED = errors.New("data file writer is not closed")
ERROR_DATA_FILE_READER_NOT_OPEN = errors.New("data file reader is not open")
ERROR_DATA_FILE_WRITER_NOT_OPEN = errors.New("data file writer is not open")
ERROR_DIRPATH_ALREADY_IN_USE = errors.New("dirpath already in use")
)
var (
ERROR_BATCH_CLOSED = errors.New("batch is closed")
ERROR_CANNOT_CLOSE_CLOSED_BATCH = errors.New("cannot close closed batch")
)
const (
KEY_EXPIRES_IN_DEFAULT = 168 * time.Hour // 1 week
DELETED_FLAG_BYTE_VALUE = byte(0x31)
DELETED_FLAG_SET_VALUE = byte(0x01)
DELETED_FLAG_UNSET_VALUE = byte(0x00)
LRU_SIZE = 50
LRU_TTL = 24 * time.Hour
EXIT_NOT_OK = 0
EXIT_OK = 1
INITIAL_SEGMENT_OFFSET = 0
INITIAL_KEY_VALUE_ENTRY_OFFSET = 0
)
type Options struct {
Path string
ShouldWatch bool
WatchQueueSize int
Flock *flock.Flock
}
type KeyValuePair struct {
Key []byte
Value interface{}
Ttl time.Duration
}
type KeyDirValue struct {
offset int64
blockNumber int64
size int64
path string
tstamp int64
}
type ActiveKeyValueOffset struct {
Startoffset int64
Endoffset int64
}
// Segment represents an entire file. It is divided into Blocks.
// Each Segment is a collection of Blocks of size 32KB. A file pointer is kept opened for reading purposes.
// closed represents the state of the Segment's file pointer.
type Segment struct {
closed bool
currentBlockNumber int64
currentBlockOffset int64
path string
blocks map[int64]*BlockOffsetPair
writer *os.File
}
type Batch struct {
id ksuid.KSUID
db *Db
closed bool
batchlock sync.Mutex
mu sync.RWMutex
writeQueue []*KeyValuePair
}
// BlockOffsetPair contains metadata about the Block. The start and ending offsets of the Block, and the path.
type BlockOffsetPair struct {
startOffset int64
endOffset int64
filePath string
}