1
0
Fork 0

Fix bug in queuer

master
Philip O'Toole 2 years ago
parent 523f11ac0f
commit d52946aac1

@ -1,7 +1,7 @@
# Queued Writes API
> :warning: **This functionality was introduced in version 7.5. It does not exist in earlier releases.**
rqlite exposes a special API, which will queue up write-requests and execute them in bulk. This allows clients to send multiple distinct requests to a rqlite node, and rqlite do the batching and bulk insert for the client, without the client doing any extra work. This functionality is best illustrated by an example, showing two requests being queued.
rqlite exposes a special API, which will queue up write-requests and execute them in bulk. This allows clients to send multiple distinct requests to a rqlite node, and have rqlite automatically do the batching and bulk insert for the client, without the client doing any extra work. This functionality is best illustrated by an example, showing two requests being queued.
```bash
curl -XPOST 'localhost:4001/db/execute/queue/_default' -H "Content-Type: application/json" -d '[
["INSERT INTO foo(name) VALUES(?)", "fiona"],
@ -13,8 +13,10 @@ curl -XPOST 'localhost:4001/db/execute/queue/_default' -H "Content-Type: applica
```
rqlite will merge these requests, and execute them as though they had been both contained in a single request. For the same reason that using the [Bulk API](https://github.com/rqlite/rqlite/blob/master/DOC/BULK.md) results in much higher write performance, using the _Queued Writes_ API will also result in much higher write performance.
The behaviour of the queue rqlite uses to batch the requests is configurable at launch time.
The behaviour of the queue rqlite uses to batch the requests is configurable at rqlite launch time. Pass `-h` to `rqlited` to list all configuration options.
## Caveats
Because the API returns immediately after queue the requests **but before the data is commited to the SQLite database** there is a risk of data loss in the event the node crashes before queued data is persisted. Like most databases there is a trade-off to be made between write-performance and durability. In addition if the API returns HTTP 200 OK, that simply acknowledges that the data has been queued correctly. It does not indicate that the SQL statements will be applied successfully to the database. Be sure to check the node's logs if you have any concerns about failed queued writes.
Because the API returns immediately after queuing the requests **but before the data is commited to the SQLite database** there is a risk of data loss in the event the node crashes before queued data is persisted.
Like most databases there is a trade-off to be made between write-performance and durability. In addition, when the API returns `HTTP 200 OK`, that simply acknowledges that the data has been queued correctly. It does not indicate that the SQL statements will actually be applied successfully to the database. Be sure to check the node's logs if you have any concerns about failed queued writes.

@ -76,7 +76,7 @@ func (q *Queue) run() {
timer := time.NewTimer(q.timeout)
timer.Stop()
writeFn := func(stmts []*command.Statement) {
writeFn := func() {
newStmts := make([]*command.Statement, len(stmts))
copy(newStmts, stmts)
q.sendCh <- newStmts
@ -93,12 +93,12 @@ func (q *Queue) run() {
timer.Reset(q.timeout)
}
if len(stmts) >= q.batchSize {
writeFn(stmts)
writeFn()
}
case <-timer.C:
writeFn(stmts)
writeFn()
case <-q.flush:
writeFn(stmts)
writeFn()
case <-q.done:
timer.Stop()
return

@ -104,3 +104,82 @@ func Test_NewQueueWriteTimeout(t *testing.T) {
t.Fatalf("timed out waiting for statement")
}
}
// Test_NewQueueWriteTimeoutMulti ensures that timer expiring
// twice in a row works fine.
func Test_NewQueueWriteTimeoutMulti(t *testing.T) {
q := New(1024, 10, 1*time.Second)
defer q.Close()
if err := q.Write(testStmt); err != nil {
t.Fatalf("failed to write: %s", err.Error())
}
select {
case stmts := <-q.C:
if len(stmts) != 1 {
t.Fatalf("received wrong length slice")
}
if stmts[0].Sql != "SELECT * FROM foo" {
t.Fatalf("received wrong SQL")
}
case <-time.NewTimer(5 * time.Second).C:
t.Fatalf("timed out waiting for first statement")
}
if err := q.Write(testStmt); err != nil {
t.Fatalf("failed to write: %s", err.Error())
}
select {
case stmts := <-q.C:
if len(stmts) != 1 {
t.Fatalf("received wrong length slice")
}
if stmts[0].Sql != "SELECT * FROM foo" {
t.Fatalf("received wrong SQL")
}
case <-time.NewTimer(5 * time.Second).C:
t.Fatalf("timed out waiting for second statement")
}
}
// Test_NewQueueWriteTimeoutBatch ensures that timer expiring
// followed by a batch, works fine.
func Test_NewQueueWriteTimeoutBatch(t *testing.T) {
q := New(1024, 2, 2*time.Second)
defer q.Close()
if err := q.Write(testStmt); err != nil {
t.Fatalf("failed to write: %s", err.Error())
}
select {
case stmts := <-q.C:
if len(stmts) != 1 {
t.Fatalf("received wrong length slice")
}
if stmts[0].Sql != "SELECT * FROM foo" {
t.Fatalf("received wrong SQL")
}
case <-time.NewTimer(5 * time.Second).C:
t.Fatalf("timed out waiting for statement")
}
if err := q.Write(testStmt); err != nil {
t.Fatalf("failed to write: %s", err.Error())
}
if err := q.Write(testStmt); err != nil {
t.Fatalf("failed to write: %s", err.Error())
}
select {
case stmts := <-q.C:
if len(stmts) != 2 {
t.Fatalf("received wrong length slice")
}
if stmts[0].Sql != "SELECT * FROM foo" {
t.Fatalf("received wrong SQL")
}
case <-time.NewTimer(500 * time.Millisecond).C:
// Should happen before the timeout expires.
t.Fatalf("timed out waiting for statement")
}
}

Loading…
Cancel
Save