Skip to content

Commit d5ad40c

Browse files
committed
Merge branch 'release/v1.0.0-rc.1'
2 parents 0c0ec17 + 586538c commit d5ad40c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

58 files changed

+2204
-1145
lines changed

CHANGELOG.md

+35-1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,40 @@
22
All notable changes to this project will be documented in this file.
33
This project adheres to [Semantic Versioning](http://semver.org/).
44

5+
## v1.0.0-RC.1 - 2015-06-07
6+
In an attempt to make this library more "idiomatic" some functions have been renamed, for the full list of changes see below.
7+
8+
### Added
9+
- Added more documentation.
10+
- Added `Shards`, `Replicas` and `PrimaryReplicaTag` optional arguments in `TableCreateOpts`.
11+
- Added `MultiGroup` and `MultiGroupByIndex` which are equivalent to the running `group` with the `multi` optional argument set to true.
12+
13+
### Changed
14+
- Renamed `Db` to `DB`.
15+
- Renamed `DbCreate` to `DBCreate`.
16+
- Renamed `DbDrop` to `DBDrop`.
17+
- Renamed `RqlConnectionError` to `RQLConnectionError`.
18+
- Renamed `RqlDriverError` to `RQLDriverError`.
19+
- Renamed `RqlClientError` to `RQLClientError`.
20+
- Renamed `RqlRuntimeError` to `RQLRuntimeError`.
21+
- Renamed `RqlCompileError` to `RQLCompileError`.
22+
- Renamed `Js` to `JS`.
23+
- Renamed `Json` to `JSON`.
24+
- Renamed `Http` to `HTTP`.
25+
- Renamed `GeoJson` to `GeoJSON`.
26+
- Renamed `ToGeoJson` to `ToGeoJSON`.
27+
- Renamed `WriteChanges` to `ChangeResponse`, this is now a general type and can be used when dealing with changefeeds.
28+
- Removed depth limit when encoding values using `Expr`
29+
30+
### Fixed
31+
- Fixed issue causing inconsistent results when unmarshaling query response into structs (#192)
32+
- Fixed issue causing errors when closing a changefeed cursor (#191)
33+
- Fixed issue causing nodes to remain unhealthy when host discovery is disabled (#195)
34+
35+
### Removed
36+
- Removed `CacheSize` and `DataCenter` optional arguments in `TableCreateOpts`.
37+
- Removed `CacheSize` optional argument from `InsertOpts`
38+
539
## v0.7.2 - 2015-05-05
640
### Added
741
- Added support for connecting to a server using TLS (#179)
@@ -133,7 +167,7 @@ Internal Changes
133167

134168
- Updated the driver to support RethinkDB v1.14 (#116)
135169
- Added the Binary data type
136-
- Added the Binary command which takes a `[]byte`, `io.Reader` or `bytes.Buffer{}` as an argument.
170+
- Added the Binary command which takes a `[]byte` or `bytes.Buffer{}` as an argument.
137171
- Added the `BinaryFormat` optional argument to `RunOpts`
138172
- Added the `GroupFormat` optional argument to `RunOpts`
139173
- Added the `ArrayLimit` optional argument to `RunOpts`

README.md

+3-17
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
[Go](http://golang.org/) driver for [RethinkDB](http://www.rethinkdb.com/)
88

99

10-
Current version: v0.7.2 (RethinkDB v2.0)
10+
Current version: v1.0.0-RC.1 (RethinkDB v2.0)
1111

1212
Please note that this version of the driver only supports versions of RethinkDB using the v0.4 protocol (any versions of the driver older than RethinkDB 2.0 will not work).
1313

@@ -89,7 +89,7 @@ When `DiscoverHosts` is true any nodes are added to the cluster after the initia
8989

9090
This library is based on the official drivers so the code on the [API](http://www.rethinkdb.com/api/) page should require very few changes to work.
9191

92-
To view full documentation for the query functions check the [GoDoc](http://godoc.org/github.com/dancannon/gorethink#Term)
92+
To view full documentation for the query functions check the [API reference](https://github.com/dancannon/gorethink/wiki/Go-ReQL-command-reference) or [GoDoc](http://godoc.org/github.com/dancannon/gorethink#Term)
9393

9494
Slice Expr Example
9595
```go
@@ -205,20 +205,6 @@ Field int `gorethink:"myName,omitempty"`
205205
Field int `gorethink:",omitempty"`
206206
```
207207

208-
Alternatively you can implement the FieldMapper interface by providing the FieldMap function which returns a map of strings in the form of `"FieldName": "NewName"`. For example:
209-
210-
```go
211-
type A struct {
212-
Field int
213-
}
214-
215-
func (a A) FieldMap() map[string]string {
216-
return map[string]string{
217-
"Field": "myName",
218-
}
219-
}
220-
```
221-
222208
## Benchmarks
223209

224210
Everyone wants their project's benchmarks to be speedy. And while we know that rethinkDb and the gorethink driver are quite fast, our primary goal is for our benchmarks to be correct. They are designed to give you, the user, an accurate picture of writes per second (w/s). If you come up with a accurate test that meets this aim, submit a pull request please.
@@ -253,7 +239,7 @@ BenchmarkSequentialSoftWritesParallel10 10000 263
253239

254240
## Examples
255241

256-
View other examples on the [wiki](https://github.com/dancannon/gorethink/wiki/Examples).
242+
Many functions have examples and are viewable in the godoc, alternatively view some more full features examples on the [wiki](https://github.com/dancannon/gorethink/wiki/Examples).
257243

258244
## License
259245

benchmarks_test.go

+14-18
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,6 @@ import (
88
"time"
99
)
1010

11-
var bSess *Session
12-
var bDbName string
13-
var bTableName string
14-
1511
func BenchmarkBatch200RandomWrites(b *testing.B) {
1612

1713
var term Term
@@ -28,10 +24,10 @@ func BenchmarkBatch200RandomWrites(b *testing.B) {
2824
}
2925

3026
// Insert the new item into the database
31-
term = Table(bTableName).Insert(data)
27+
term = DB("benchmarks").Table("benchmarks").Insert(data)
3228

3329
// Insert the new item into the database
34-
_, err := term.RunWrite(bSess, RunOpts{
30+
_, err := term.RunWrite(session, RunOpts{
3531
MinBatchRows: 200,
3632
MaxBatchRows: 200,
3733
})
@@ -61,10 +57,10 @@ func BenchmarkBatch200RandomWritesParallel10(b *testing.B) {
6157
}
6258

6359
// Insert the new item into the database
64-
term = Table(bTableName).Insert(data)
60+
term = DB("benchmarks").Table("benchmarks").Insert(data)
6561

6662
// Insert the new item into the database
67-
_, err := term.RunWrite(bSess, RunOpts{
63+
_, err := term.RunWrite(session, RunOpts{
6864
MinBatchRows: 200,
6965
MaxBatchRows: 200,
7066
})
@@ -98,10 +94,10 @@ func BenchmarkBatch200SoftRandomWritesParallel10(b *testing.B) {
9894
}
9995

10096
// Insert the new item into the database
101-
term = Table(bTableName).Insert(data, opts)
97+
term = DB("benchmarks").Table("benchmarks").Insert(data, opts)
10298

10399
// Insert the new item into the database
104-
_, err := term.RunWrite(bSess, RunOpts{
100+
_, err := term.RunWrite(session, RunOpts{
105101
MinBatchRows: 200,
106102
MaxBatchRows: 200,
107103
})
@@ -121,7 +117,7 @@ func BenchmarkRandomWrites(b *testing.B) {
121117
"customer_id": strconv.FormatInt(r.Int63(), 10),
122118
}
123119
// Insert the new item into the database
124-
_, err := Table(bTableName).Insert(data).RunWrite(bSess)
120+
_, err := DB("benchmarks").Table("benchmarks").Insert(data).RunWrite(session)
125121
if err != nil {
126122
b.Errorf("insert failed [%s] ", err)
127123
}
@@ -141,7 +137,7 @@ func BenchmarkRandomWritesParallel10(b *testing.B) {
141137
"customer_id": strconv.FormatInt(r.Int63(), 10),
142138
}
143139
// Insert the new item into the database
144-
_, err := Table(bTableName).Insert(data).RunWrite(bSess)
140+
_, err := DB("benchmarks").Table("benchmarks").Insert(data).RunWrite(session)
145141
if err != nil {
146142
b.Errorf("insert failed [%s] ", err)
147143
}
@@ -158,7 +154,7 @@ func BenchmarkRandomSoftWrites(b *testing.B) {
158154
}
159155
// Insert the new item into the database
160156
opts := InsertOpts{Durability: "soft"}
161-
_, err := Table(bTableName).Insert(data, opts).RunWrite(bSess)
157+
_, err := DB("benchmarks").Table("benchmarks").Insert(data, opts).RunWrite(session)
162158
if err != nil {
163159
b.Errorf("insert failed [%s] ", err)
164160
}
@@ -180,7 +176,7 @@ func BenchmarkRandomSoftWritesParallel10(b *testing.B) {
180176

181177
// Insert the new item into the database
182178
opts := InsertOpts{Durability: "soft"}
183-
_, err := Table(bTableName).Insert(data, opts).RunWrite(bSess)
179+
_, err := DB("benchmarks").Table("benchmarks").Insert(data, opts).RunWrite(session)
184180
if err != nil {
185181
b.Errorf("insert failed [%s] ", err)
186182
}
@@ -199,7 +195,7 @@ func BenchmarkSequentialWrites(b *testing.B) {
199195
}
200196

201197
// Insert the new item into the database
202-
_, err := Table(bTableName).Insert(data).RunWrite(bSess)
198+
_, err := DB("benchmarks").Table("benchmarks").Insert(data).RunWrite(session)
203199
if err != nil {
204200
b.Errorf("insert failed [%s] ", err)
205201
return
@@ -226,7 +222,7 @@ func BenchmarkSequentialWritesParallel10(b *testing.B) {
226222
}
227223

228224
// Insert the new item into the database
229-
_, err := Table(bTableName).Insert(data).RunWrite(bSess)
225+
_, err := DB("benchmarks").Table("benchmarks").Insert(data).RunWrite(session)
230226
if err != nil {
231227
b.Errorf("insert failed [%s] ", err)
232228
return
@@ -248,7 +244,7 @@ func BenchmarkSequentialSoftWrites(b *testing.B) {
248244
}
249245

250246
// Insert the new item into the database
251-
_, err := Table(bTableName).Insert(data, opts).RunWrite(bSess)
247+
_, err := Table("benchmarks").Insert(data, opts).RunWrite(session)
252248
if err != nil {
253249
b.Errorf("insert failed [%s] ", err)
254250
return
@@ -277,7 +273,7 @@ func BenchmarkSequentialSoftWritesParallel10(b *testing.B) {
277273
opts := InsertOpts{Durability: "soft"}
278274

279275
// Insert the new item into the database
280-
_, err := Table(bTableName).Insert(data, opts).RunWrite(bSess)
276+
_, err := Table("benchmarks").Insert(data, opts).RunWrite(session)
281277
if err != nil {
282278
b.Errorf("insert failed [%s] ", err)
283279
return

cluster.go

+9-22
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ func (c *Cluster) listenForNodeChanges() error {
135135
}
136136

137137
cursor, err := node.Query(newQuery(
138-
Db("rethinkdb").Table("server_status").Changes(),
138+
DB("rethinkdb").Table("server_status").Changes(),
139139
map[string]interface{}{},
140140
c.opts,
141141
))
@@ -197,7 +197,7 @@ func (c *Cluster) connectNodes(hosts []Host) {
197197
defer conn.Close()
198198

199199
_, cursor, err := conn.Query(newQuery(
200-
Db("rethinkdb").Table("server_status"),
200+
DB("rethinkdb").Table("server_status"),
201201
map[string]interface{}{},
202202
c.opts,
203203
))
@@ -206,27 +206,14 @@ func (c *Cluster) connectNodes(hosts []Host) {
206206
continue
207207
}
208208

209-
if c.opts.DiscoverHosts {
210-
var results []nodeStatus
211-
err = cursor.All(&results)
212-
if err != nil {
213-
continue
214-
}
209+
var results []nodeStatus
210+
err = cursor.All(&results)
211+
if err != nil {
212+
continue
213+
}
215214

216-
for _, result := range results {
217-
node, err := c.connectNodeWithStatus(result)
218-
if err == nil {
219-
if _, ok := nodeSet[node.ID]; !ok {
220-
log.WithFields(logrus.Fields{
221-
"id": node.ID,
222-
"host": node.Host.String(),
223-
}).Debug("Connected to node")
224-
nodeSet[node.ID] = node
225-
}
226-
}
227-
}
228-
} else {
229-
node, err := c.connectNode(host.String(), []Host{host})
215+
for _, result := range results {
216+
node, err := c.connectNodeWithStatus(result)
230217
if err == nil {
231218
if _, ok := nodeSet[node.ID]; !ok {
232219
log.WithFields(logrus.Fields{

cluster_integration_test.go

+33-2
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,10 @@ import (
1111

1212
func (s *RethinkSuite) TestClusterDetectNewNode(c *test.C) {
1313
session, err := Connect(ConnectOpts{
14+
Addresses: []string{url, url2},
1415
DiscoverHosts: true,
1516
NodeRefreshInterval: time.Second,
16-
}, url, url2)
17+
})
1718
c.Assert(err, test.IsNil)
1819

1920
t := time.NewTimer(time.Second * 30)
@@ -31,13 +32,43 @@ func (s *RethinkSuite) TestClusterDetectNewNode(c *test.C) {
3132
}
3233
}
3334

35+
func (s *RethinkSuite) TestClusterRecoverAfterNoNodes(c *test.C) {
36+
session, err := Connect(ConnectOpts{
37+
Addresses: []string{url, url2},
38+
DiscoverHosts: true,
39+
NodeRefreshInterval: time.Second,
40+
})
41+
c.Assert(err, test.IsNil)
42+
43+
t := time.NewTimer(time.Second * 30)
44+
hasHadZeroNodes := false
45+
for {
46+
select {
47+
// Fail if deadline has passed
48+
case <-t.C:
49+
c.Fatal("No node was added to the cluster")
50+
default:
51+
// Check if there are no nodes
52+
if len(session.cluster.GetNodes()) == 0 {
53+
hasHadZeroNodes = true
54+
}
55+
56+
// Pass if another node was added
57+
if len(session.cluster.GetNodes()) >= 1 && hasHadZeroNodes {
58+
return
59+
}
60+
}
61+
}
62+
}
63+
3464
func (s *RethinkSuite) TestClusterNodeHealth(c *test.C) {
3565
session, err := Connect(ConnectOpts{
66+
Addresses: []string{url, url2, url3},
3667
DiscoverHosts: true,
3768
NodeRefreshInterval: time.Second,
3869
MaxIdle: 50,
3970
MaxOpen: 200,
40-
}, url, url2, url3)
71+
})
4172
c.Assert(err, test.IsNil)
4273

4374
attempts := 0

0 commit comments

Comments
 (0)