1
0
Fork 0

Merge pull request #1440 from rqlite/litefs-snapshotting-iter

Compacting WAL rewriter
master
Philip O'Toole 10 months ago committed by GitHub
commit 0a2646e2d5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

3
.gitignore vendored

@ -11,6 +11,9 @@ rqbench
**/rqbench
!**/rqbench/
walr
**/walr
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a

@ -50,6 +50,7 @@ When officially released 8.0 will support (mostly) seamless upgrades from the 7.
- [PR #1426](https://github.com/rqlite/rqlite/pull/1426): 'go mod' updates, including moving to Raft 1.6.
- [PR #1430](https://github.com/rqlite/rqlite/pull/1430): Check that any supplied Join addresses are not HTTP servers.
- [PR #1437](https://github.com/rqlite/rqlite/pull/1437), [PR #1438](https://github.com/rqlite/rqlite/pull/1438), [PR #1439](https://github.com/rqlite/rqlite/pull/1439): Actually timeout if needed during `nodes/` access. Fixes [issue #1435](https://github.com/rqlite/rqlite/issues/1435). Thanks @dwco-z
- [PR #1440](https://github.com/rqlite/rqlite/pull/1440): Add a Compacting WAL rewriter. Thanks @benbjohnson.
## 7.21.4 (July 8th 2023)
### Implementation changes and bug fixes

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -0,0 +1,147 @@
package wal
import (
"errors"
"io"
"sort"
)
var (
// ErrOpenTransaction is returned when the final frame in the WAL file is not a committing frame.
ErrOpenTransaction = errors.New("open transaction at end of WAL file")
)
type cFrame struct {
Pgno uint32
Commit uint32
Offset int64
}
type cFrames []*cFrame
// make cFrames sortable by offset.
func (c cFrames) Len() int { return len(c) }
func (c cFrames) Less(i, j int) bool { return c[uint32(i)].Offset < c[uint32(j)].Offset }
func (c cFrames) Swap(i, j int) { c[uint32(i)], c[uint32(j)] = c[uint32(j)], c[uint32(i)] }
// CompactingWALScanner implements WALIterator to iterate over frames in a WAL file.
// It also compacts the WAL file, with Next() returning the last frame for each page.
// This Scanner requires that the final frame in the WAL file is a committing frame.
// It will return an error at creation time if this is not the case.
type CompactingScanner struct {
readSeeker io.ReadSeeker
walReader *Reader
header *WALHeader
cIdx int
frames cFrames
}
// NewCompactingScanner creates a new CompactingScanner with the given io.ReadSeeker.
func NewCompactingScanner(r io.ReadSeeker) (*CompactingScanner, error) {
walReader := NewReader(r)
err := walReader.ReadHeader()
if err != nil {
return nil, err
}
hdr := &WALHeader{
Magic: walReader.magic,
Version: WALSupportedVersion,
PageSize: walReader.PageSize(),
Seq: walReader.seq,
Salt1: walReader.salt1,
Salt2: walReader.salt2,
Checksum1: walReader.chksum1,
Checksum2: walReader.chksum2,
}
s := &CompactingScanner{
readSeeker: r,
walReader: walReader,
header: hdr,
}
if err := s.scan(); err != nil {
return nil, err
}
return s, nil
}
// Header returns the header of the WAL file.
func (c *CompactingScanner) Header() (*WALHeader, error) {
return c.header, nil
}
// Next reads the next frame from the WAL file.
func (c *CompactingScanner) Next() (*Frame, error) {
if c.cIdx >= len(c.frames) {
return nil, io.EOF
}
frame := &Frame{
Pgno: c.frames[c.cIdx].Pgno,
Commit: c.frames[c.cIdx].Commit,
Data: make([]byte, c.header.PageSize),
}
if _, err := c.readSeeker.Seek(c.frames[c.cIdx].Offset+WALFrameHeaderSize, io.SeekStart); err != nil {
return nil, err
}
if _, err := io.ReadFull(c.readSeeker, frame.Data); err != nil {
return nil, err
}
c.cIdx++
return frame, nil
}
func (c *CompactingScanner) scan() error {
waitingForCommit := false
txFrames := make(map[uint32]*cFrame)
frames := make(map[uint32]*cFrame)
buf := make([]byte, c.header.PageSize)
for {
pgno, commit, err := c.walReader.ReadFrame(buf)
if err == io.EOF {
break
} else if err != nil {
return err
}
frame := &cFrame{
Pgno: pgno,
Commit: commit,
Offset: c.walReader.Offset(),
}
// Save latest frame information for each page.
txFrames[pgno] = frame
// If this is not a committing frame, continue to next frame.
if commit == 0 {
waitingForCommit = true
continue
}
waitingForCommit = false
// At the end of each transaction, copy frame information to main map.
for k, v := range txFrames {
frames[k] = v
}
txFrames = make(map[uint32]*cFrame)
}
if waitingForCommit {
return ErrOpenTransaction
}
// Now we have the latest version of each frame. Next we need to sort
// them by offset so we return them in the correct order.
c.frames = make(cFrames, 0, len(frames))
for _, frame := range frames {
c.frames = append(c.frames, frame)
}
sort.Sort(c.frames)
return nil
}

@ -0,0 +1,96 @@
package wal
import (
"bytes"
"io"
"os"
"testing"
)
func Test_CompactingScanner_Scan(t *testing.T) {
b, err := os.ReadFile("testdata/wal-reader/ok/wal")
if err != nil {
t.Fatal(err)
}
s, err := NewCompactingScanner(bytes.NewReader(b))
if err != nil {
t.Fatal(err)
}
for i, expF := range []struct {
pgno uint32
commit uint32
dataLowIdx int
dataHighIdx int
}{
{1, 0, 56, 4152},
//{2, 2, 4176, 8272}, skipped by the Compactor.
{2, 2, 8296, 12392},
} {
f, err := s.Next()
if err != nil {
t.Fatal(err)
}
if f.Pgno != expF.pgno {
t.Fatalf("expected pgno %d, got %d", expF.pgno, f.Pgno)
}
if f.Commit != expF.commit {
t.Fatalf("expected commit %d, got %d", expF.commit, f.Commit)
}
if len(f.Data) != 4096 {
t.Fatalf("expected data length 4096, got %d", len(f.Data))
}
if !bytes.Equal(f.Data, b[expF.dataLowIdx:expF.dataHighIdx]) {
t.Fatalf("page data mismatch on test %d", i)
}
}
_, err = s.Next()
if err != io.EOF {
t.Fatalf("expected EOF, got %v", err)
}
}
func Test_CompactingScanner_Scan_Commit0(t *testing.T) {
b, err := os.ReadFile("testdata/compacting-scanner/commit-0/wal")
if err != nil {
t.Fatal(err)
}
s, err := NewCompactingScanner(bytes.NewReader(b))
if err != nil {
t.Fatal(err)
}
for _, expF := range []struct {
pgno uint32
commit uint32
}{
// {1,0}, skipped by the Compactor.
// {2,2}, skipped by the Compactor.
{1, 0},
{2, 0},
{3, 0},
{4, 0},
{5, 0},
// {6,6}, skipped by the Compactor.
{6, 6},
} {
f, err := s.Next()
if err != nil {
t.Fatal(err)
}
if f.Pgno != expF.pgno {
t.Fatalf("expected pgno %d, got %d", expF.pgno, f.Pgno)
}
if f.Commit != expF.commit {
t.Fatalf("expected commit %d, got %d", expF.commit, f.Commit)
}
}
_, err = s.Next()
if err != io.EOF {
t.Fatalf("expected EOF, got %v", err)
}
}

@ -0,0 +1,57 @@
package wal
import (
"io"
)
// FullScanner implements WALIterator to iterate over all frames in a WAL file.
type FullScanner struct {
reader *Reader
header *WALHeader
}
// NewFullScanner creates a new FullScanner with the given io.Reader.
func NewFullScanner(r io.Reader) (*FullScanner, error) {
wr := NewReader(r)
err := wr.ReadHeader()
if err != nil {
return nil, err
}
hdr := &WALHeader{
Magic: wr.magic,
Version: WALSupportedVersion,
PageSize: wr.PageSize(),
Seq: wr.seq,
Salt1: wr.salt1,
Salt2: wr.salt2,
Checksum1: wr.chksum1,
Checksum2: wr.chksum2,
}
return &FullScanner{
reader: wr,
header: hdr,
}, nil
}
// Header returns the header of the WAL file.
func (f *FullScanner) Header() (*WALHeader, error) {
return f.header, nil
}
// Next reads the next frame from the WAL file.
func (f *FullScanner) Next() (*Frame, error) {
data := make([]byte, f.reader.PageSize())
pgno, commit, err := f.reader.ReadFrame(data)
if err != nil {
return nil, err
}
frame := &Frame{
Pgno: pgno,
Commit: commit,
Data: data,
}
return frame, nil
}

@ -0,0 +1,53 @@
package wal
import (
"bytes"
"io"
"os"
"testing"
)
func Test_FullScanner_Scan(t *testing.T) {
b, err := os.ReadFile("testdata/wal-reader/ok/wal")
if err != nil {
t.Fatal(err)
}
s, err := NewFullScanner(bytes.NewReader(b))
if err != nil {
t.Fatal(err)
}
for i, expF := range []struct {
pgno uint32
commit uint32
dataLowIdx int
dataHighIdx int
}{
{1, 0, 56, 4152},
{2, 2, 4176, 8272},
{2, 2, 8296, 12392},
} {
f, err := s.Next()
if err != nil {
t.Fatal(err)
}
if f.Pgno != expF.pgno {
t.Fatalf("expected pgno %d, got %d", expF.pgno, f.Pgno)
}
if f.Commit != expF.commit {
t.Fatalf("expected commit %d, got %d", expF.commit, f.Commit)
}
if len(f.Data) != 4096 {
t.Fatalf("expected data length 4096, got %d", len(f.Data))
}
if !bytes.Equal(f.Data, b[expF.dataLowIdx:expF.dataHighIdx]) {
t.Fatalf("page data mismatch on test %d", i)
}
}
_, err = s.Next()
if err != io.EOF {
t.Fatalf("expected EOF, got %v", err)
}
}

@ -0,0 +1,161 @@
package wal
import (
"encoding/binary"
"fmt"
"io"
)
// SQLite constants
const (
WALHeaderSize = 32
WALFrameHeaderSize = 24
WALSupportedVersion = 3007000
)
// Reader wraps an io.Reader and parses SQLite WAL frames.
//
// This reader verifies the salt & checksum integrity while it reads. It does
// not enforce transaction boundaries (i.e. it may return uncommitted frames).
// It is the responsibility of the caller to handle this.
//
// Reader has been copied from the litefs source code. Many thanks to the
// authors of that software. See https://github.com/superfly/litefs for more
// details.
type Reader struct {
r io.Reader
frameN int
magic uint32
bo binary.ByteOrder
pageSize uint32
seq uint32
salt1, salt2 uint32
chksum1, chksum2 uint32
}
// NewReader returns a new instance of Reader.
func NewReader(r io.Reader) *Reader {
return &Reader{r: r}
}
// PageSize returns the page size from the header. Must call ReadHeader() first.
func (r *Reader) PageSize() uint32 { return r.pageSize }
// Offset returns the file offset of the last read frame.
// Returns zero if no frames have been read.
func (r *Reader) Offset() int64 {
if r.frameN == 0 {
return 0
}
return WALHeaderSize + ((int64(r.frameN) - 1) * (WALFrameHeaderSize + int64(r.pageSize)))
}
// ReadHeader reads the WAL header into the reader. Returns io.EOF if WAL is invalid.
func (r *Reader) ReadHeader() error {
// If we have a partial WAL, then mark WAL as done.
hdr := make([]byte, WALHeaderSize)
if _, err := io.ReadFull(r.r, hdr); err == io.ErrUnexpectedEOF {
return io.EOF
} else if err != nil {
return err
}
// Determine byte order of checksums.
r.magic = binary.BigEndian.Uint32(hdr[0:])
switch r.magic {
case 0x377f0682:
r.bo = binary.LittleEndian
case 0x377f0683:
r.bo = binary.BigEndian
default:
return fmt.Errorf("invalid wal header magic: %x", r.magic)
}
// If the header checksum doesn't match then we may have failed with
// a partial WAL header write during checkpointing.
chksum1 := binary.BigEndian.Uint32(hdr[24:])
chksum2 := binary.BigEndian.Uint32(hdr[28:])
if v0, v1 := WALChecksum(r.bo, 0, 0, hdr[:24]); v0 != chksum1 || v1 != chksum2 {
return io.EOF
}
// Verify version is correct.
if version := binary.BigEndian.Uint32(hdr[4:]); version != WALSupportedVersion {
return fmt.Errorf("unsupported wal version: %d", version)
}
r.pageSize = binary.BigEndian.Uint32(hdr[8:])
r.seq = binary.BigEndian.Uint32(hdr[12:])
r.salt1 = binary.BigEndian.Uint32(hdr[16:])
r.salt2 = binary.BigEndian.Uint32(hdr[20:])
r.chksum1, r.chksum2 = chksum1, chksum2
return nil
}
// ReadFrame reads the next frame from the WAL and returns the page number.
// Returns io.EOF at the end of the valid WAL.
func (r *Reader) ReadFrame(data []byte) (pgno, commit uint32, err error) {
if len(data) != int(r.pageSize) {
return 0, 0, fmt.Errorf("WALReader.ReadFrame(): buffer size (%d) must match page size (%d)", len(data), r.pageSize)
}
// Read WAL frame header.
hdr := make([]byte, WALFrameHeaderSize)
if _, err := io.ReadFull(r.r, hdr); err == io.ErrUnexpectedEOF {
return 0, 0, io.EOF
} else if err != nil {
return 0, 0, err
}
// Read WAL page data.
if _, err := io.ReadFull(r.r, data); err == io.ErrUnexpectedEOF {
return 0, 0, io.EOF
} else if err != nil {
return 0, 0, err
}
// Verify salt matches the salt in the header.
salt1 := binary.BigEndian.Uint32(hdr[8:])
salt2 := binary.BigEndian.Uint32(hdr[12:])
if r.salt1 != salt1 || r.salt2 != salt2 {
return 0, 0, io.EOF
}
// Verify the checksum is valid.
chksum1 := binary.BigEndian.Uint32(hdr[16:])
chksum2 := binary.BigEndian.Uint32(hdr[20:])
r.chksum1, r.chksum2 = WALChecksum(r.bo, r.chksum1, r.chksum2, hdr[:8]) // frame header
r.chksum1, r.chksum2 = WALChecksum(r.bo, r.chksum1, r.chksum2, data) // frame data
if r.chksum1 != chksum1 || r.chksum2 != chksum2 {
return 0, 0, io.EOF
}
pgno = binary.BigEndian.Uint32(hdr[0:])
commit = binary.BigEndian.Uint32(hdr[4:])
r.frameN++
return pgno, commit, nil
}
// WALChecksum computes a running SQLite WAL checksum over a byte slice.
func WALChecksum(bo binary.ByteOrder, s0, s1 uint32, b []byte) (uint32, uint32) {
assert(len(b)%8 == 0, "misaligned checksum byte slice")
// Iterate over 8-byte units and compute checksum.
for i := 0; i < len(b); i += 8 {
s0 += bo.Uint32(b[i:]) + s1
s1 += bo.Uint32(b[i+4:]) + s0
}
return s0, s1
}
func assert(condition bool, msg string) {
if !condition {
panic("assertion failed: " + msg)
}
}

@ -0,0 +1,242 @@
package wal
import (
"bytes"
"io"
"os"
"testing"
)
func TestReader(t *testing.T) {
t.Run("OK", func(t *testing.T) {
buf := make([]byte, 4096)
b, err := os.ReadFile("testdata/wal-reader/ok/wal")
if err != nil {
t.Fatal(err)
}
// Initialize reader with header info.
r := NewReader(bytes.NewReader(b))
if err := r.ReadHeader(); err != nil {
t.Fatal(err)
} else if got, want := r.PageSize(), uint32(4096); got != want {
t.Fatalf("PageSize()=%d, want %d", got, want)
} else if got, want := r.Offset(), int64(0); got != want {
t.Fatalf("Offset()=%d, want %d", got, want)
}
// Read first frame.
if pgno, commit, err := r.ReadFrame(buf); err != nil {
t.Fatal(err)
} else if got, want := pgno, uint32(1); got != want {
t.Fatalf("pgno=%d, want %d", got, want)
} else if got, want := commit, uint32(0); got != want {
t.Fatalf("commit=%d, want %d", got, want)
} else if !bytes.Equal(buf, b[56:4152]) {
t.Fatal("page data mismatch")
} else if got, want := r.Offset(), int64(32); got != want {
t.Fatalf("Offset()=%d, want %d", got, want)
}
// Read second frame. End of transaction.
if pgno, commit, err := r.ReadFrame(buf); err != nil {
t.Fatal(err)
} else if got, want := pgno, uint32(2); got != want {
t.Fatalf("pgno=%d, want %d", got, want)
} else if got, want := commit, uint32(2); got != want {
t.Fatalf("commit=%d, want %d", got, want)
} else if !bytes.Equal(buf, b[4176:8272]) {
t.Fatal("page data mismatch")
} else if got, want := r.Offset(), int64(4152); got != want {
t.Fatalf("Offset()=%d, want %d", got, want)
}
// Read third frame.
if pgno, commit, err := r.ReadFrame(buf); err != nil {
t.Fatal(err)
} else if got, want := pgno, uint32(2); got != want {
t.Fatalf("pgno=%d, want %d", got, want)
} else if got, want := commit, uint32(2); got != want {
t.Fatalf("commit=%d, want %d", got, want)
} else if !bytes.Equal(buf, b[8296:12392]) {
t.Fatal("page data mismatch")
} else if got, want := r.Offset(), int64(8272); got != want {
t.Fatalf("Offset()=%d, want %d", got, want)
}
if _, _, err := r.ReadFrame(buf); err != io.EOF {
t.Fatalf("unexpected error: %s", err)
}
})
t.Run("SaltMismatch", func(t *testing.T) {
buf := make([]byte, 4096)
b, err := os.ReadFile("testdata/wal-reader/salt-mismatch/wal")
if err != nil {
t.Fatal(err)
}
// Initialize reader with header info.
r := NewReader(bytes.NewReader(b))
if err := r.ReadHeader(); err != nil {
t.Fatal(err)
} else if got, want := r.PageSize(), uint32(4096); got != want {
t.Fatalf("PageSize()=%d, want %d", got, want)
} else if got, want := r.Offset(), int64(0); got != want {
t.Fatalf("Offset()=%d, want %d", got, want)
}
// Read first frame.
if pgno, commit, err := r.ReadFrame(buf); err != nil {
t.Fatal(err)
} else if got, want := pgno, uint32(1); got != want {
t.Fatalf("pgno=%d, want %d", got, want)
} else if got, want := commit, uint32(0); got != want {
t.Fatalf("commit=%d, want %d", got, want)
} else if !bytes.Equal(buf, b[56:4152]) {
t.Fatal("page data mismatch")
}
// Read second frame. Salt has been altered so it doesn't match header.
if _, _, err := r.ReadFrame(buf); err != io.EOF {
t.Fatalf("unexpected error: %s", err)
}
})
t.Run("FrameChecksumMismatch", func(t *testing.T) {
buf := make([]byte, 4096)
b, err := os.ReadFile("testdata/wal-reader/frame-checksum-mismatch/wal")
if err != nil {
t.Fatal(err)
}
// Initialize reader with header info.
r := NewReader(bytes.NewReader(b))
if err := r.ReadHeader(); err != nil {
t.Fatal(err)
} else if got, want := r.PageSize(), uint32(4096); got != want {
t.Fatalf("PageSize()=%d, want %d", got, want)
} else if got, want := r.Offset(), int64(0); got != want {
t.Fatalf("Offset()=%d, want %d", got, want)
}
// Read first frame.
if pgno, commit, err := r.ReadFrame(buf); err != nil {
t.Fatal(err)
} else if got, want := pgno, uint32(1); got != want {
t.Fatalf("pgno=%d, want %d", got, want)
} else if got, want := commit, uint32(0); got != want {
t.Fatalf("commit=%d, want %d", got, want)
} else if !bytes.Equal(buf, b[56:4152]) {
t.Fatal("page data mismatch")
}
// Read second frame. Checksum has been altered so it doesn't match.
if _, _, err := r.ReadFrame(buf); err != io.EOF {
t.Fatalf("unexpected error: %s", err)
}
})
t.Run("ZeroLength", func(t *testing.T) {
r := NewReader(bytes.NewReader(nil))
if err := r.ReadHeader(); err != io.EOF {
t.Fatalf("unexpected error: %#v", err)
}
})
t.Run("PartialHeader", func(t *testing.T) {
r := NewReader(bytes.NewReader(make([]byte, 10)))
if err := r.ReadHeader(); err != io.EOF {
t.Fatalf("unexpected error: %#v", err)
}
})
t.Run("BadMagic", func(t *testing.T) {
r := NewReader(bytes.NewReader(make([]byte, 32)))
if err := r.ReadHeader(); err == nil || err.Error() != `invalid wal header magic: 0` {
t.Fatalf("unexpected error: %#v", err)
}
})
t.Run("BadHeaderChecksum", func(t *testing.T) {
data := []byte{
0x37, 0x7f, 0x06, 0x83, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
r := NewReader(bytes.NewReader(data))
if err := r.ReadHeader(); err != io.EOF {
t.Fatalf("unexpected error: %#v", err)
}
})
t.Run("BadHeaderVersion", func(t *testing.T) {
data := []byte{
0x37, 0x7f, 0x06, 0x83, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x15, 0x7b, 0x20, 0x92, 0xbb, 0xf8, 0x34, 0x1d}
r := NewReader(bytes.NewReader(data))
if err := r.ReadHeader(); err == nil || err.Error() != `unsupported wal version: 1` {
t.Fatalf("unexpected error: %#v", err)
}
})
t.Run("ErrBufferSize", func(t *testing.T) {
b, err := os.ReadFile("testdata/wal-reader/ok/wal")
if err != nil {
t.Fatal(err)
}
// Initialize reader with header info.
r := NewReader(bytes.NewReader(b))
if err := r.ReadHeader(); err != nil {
t.Fatal(err)
}
if _, _, err := r.ReadFrame(make([]byte, 512)); err == nil || err.Error() != `WALReader.ReadFrame(): buffer size (512) must match page size (4096)` {
t.Fatalf("unexpected error: %#v", err)
}
})
t.Run("ErrPartialFrameHeader", func(t *testing.T) {
b, err := os.ReadFile("testdata/wal-reader/ok/wal")
if err != nil {
t.Fatal(err)
}
r := NewReader(bytes.NewReader(b[:40]))
if err := r.ReadHeader(); err != nil {
t.Fatal(err)
} else if _, _, err := r.ReadFrame(make([]byte, 4096)); err != io.EOF {
t.Fatalf("unexpected error: %#v", err)
}
})
t.Run("ErrFrameHeaderOnly", func(t *testing.T) {
b, err := os.ReadFile("testdata/wal-reader/ok/wal")
if err != nil {
t.Fatal(err)
}
r := NewReader(bytes.NewReader(b[:56]))
if err := r.ReadHeader(); err != nil {
t.Fatal(err)
} else if _, _, err := r.ReadFrame(make([]byte, 4096)); err != io.EOF {
t.Fatalf("unexpected error: %#v", err)
}
})
t.Run("ErrPartialFrameData", func(t *testing.T) {
b, err := os.ReadFile("testdata/wal-reader/ok/wal")
if err != nil {
t.Fatal(err)
}
r := NewReader(bytes.NewReader(b[:1000]))
if err := r.ReadHeader(); err != nil {
t.Fatal(err)
} else if _, _, err := r.ReadFrame(make([]byte, 4096)); err != io.EOF {
t.Fatalf("unexpected error: %#v", err)
}
})
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

@ -0,0 +1,68 @@
package main
import (
"flag"
"fmt"
"io"
"os"
"github.com/rqlite/rqlite/db/wal"
)
const name = `walr`
const desc = `walr is a tool for displaying information about WAL files.`
func init() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "\n%s\n\n", desc)
fmt.Fprintf(os.Stderr, "Usage: %s <Path to WAL file>\n", name)
flag.PrintDefaults()
}
}
func main() {
flag.Parse()
if flag.NArg() == 0 {
flag.Usage()
os.Exit(1)
}
walPath := flag.Args()[0]
walFD, err := os.Open(walPath)
if err != nil {
fmt.Println("failed to open WAL file:", err)
os.Exit(1)
}
r := wal.NewReader(walFD)
if err := r.ReadHeader(); err != nil {
fmt.Println("failed to read WAL header:", err)
os.Exit(1)
}
fmt.Println("WAL page size:", r.PageSize())
nFrames := 0
nCommits := 0
uniquePgs := make(map[uint32]struct{})
buf := make([]byte, r.PageSize())
for {
pgno, commit, err := r.ReadFrame(buf)
if err == io.EOF {
break
} else if err != nil {
fmt.Println("failed to read WAL frame:", err)
os.Exit(1)
}
fmt.Println("pgno:", pgno, "commit:", commit)
uniquePgs[pgno] = struct{}{}
nFrames++
if commit != 0 {
nCommits++
}
}
fmt.Printf("Found %d WAL frames, %d unique pages, %d commit frames\n", nFrames, len(uniquePgs), nCommits)
}

@ -0,0 +1,147 @@
package wal
import (
"encoding/binary"
"fmt"
"io"
)
// WALHeader represents the header of a WAL file.
type WALHeader struct {
Magic uint32
Version uint32
PageSize uint32
Seq uint32
Salt1 uint32
Salt2 uint32
Checksum1 uint32
Checksum2 uint32
}
// Frame points to a single WAL frame in a WAL file.
type Frame struct {
Pgno uint32
Commit uint32
Data []byte
}
// WALIterator defines the interface for WAL frame iteration.
type WALIterator interface {
Header() (*WALHeader, error)
Next() (*Frame, error)
}
// Writer is used to write a WAL file.
type Writer struct {
r WALIterator
rHeader *WALHeader
chksum1, chksum2 uint32
bo binary.ByteOrder
}
// NewWriter returns a new Writer.
func NewWriter(r WALIterator) (*Writer, error) {
w := &Writer{
r: r,
}
rh, err := w.r.Header()
if err != nil {
return nil, err
}
w.rHeader = rh
w.chksum1, w.chksum2 = w.rHeader.Checksum1, w.rHeader.Checksum2
switch magic := w.rHeader.Magic; magic {
case 0x377f0682:
w.bo = binary.LittleEndian
case 0x377f0683:
w.bo = binary.BigEndian
default:
return nil, fmt.Errorf("invalid wal header magic: %x", magic)
}
return w, nil
}
// WriteTo writes the frames from the WALIterator to the given io.Writer.
func (w *Writer) WriteTo(ww io.Writer) (n int64, retErr error) {
nn, err := w.writeWALHeader(ww)
if err != nil {
return nn, err
}
n += nn
for {
frame, err := w.r.Next()
if err != nil {
if err == io.EOF {
break // No more frames!
}
return n, err
}
if nn, err = w.writeFrame(ww, frame); err != nil {
return n + nn, err
}
n += nn
}
return n, nil
}
func (w *Writer) writeWALHeader(ww io.Writer) (n int64, err error) {
rHeader, err := w.r.Header()
if err != nil {
return 0, err
}
wHeader := make([]byte, WALHeaderSize)
binary.BigEndian.PutUint32(wHeader[0:], rHeader.Magic)
binary.BigEndian.PutUint32(wHeader[4:], rHeader.Version)
// Database page size
binary.BigEndian.PutUint32(wHeader[8:], rHeader.PageSize)
// Checkpoint sequence number
binary.BigEndian.PutUint32(wHeader[12:], rHeader.Seq)
// Salt values, reusing the original salt values.
binary.BigEndian.PutUint32(wHeader[16:], rHeader.Salt1)
binary.BigEndian.PutUint32(wHeader[20:], rHeader.Salt2)
// Checksum of header
w.chksum1, w.chksum2 = rHeader.Checksum1, rHeader.Checksum2
binary.BigEndian.PutUint32(wHeader[24:], w.chksum1)
binary.BigEndian.PutUint32(wHeader[28:], w.chksum2)
// Write the header to the new WAL file.
nn, err := ww.Write(wHeader)
return int64(nn), err
}
func (w *Writer) writeFrame(ww io.Writer, frame *Frame) (n int64, err error) {
frmHdr := make([]byte, WALFrameHeaderSize)
// Calculate the frame header.
binary.BigEndian.PutUint32(frmHdr[0:], frame.Pgno)
binary.BigEndian.PutUint32(frmHdr[4:], frame.Commit)
binary.BigEndian.PutUint32(frmHdr[8:], w.rHeader.Salt1)
binary.BigEndian.PutUint32(frmHdr[12:], w.rHeader.Salt2)
// Checksum of frame header: "...the first 8 bytes..."
w.chksum1, w.chksum2 = WALChecksum(w.bo, w.chksum1, w.chksum2, frmHdr[:8])
// Update checksum using frame data: "..the content of all frames up to and including the current frame."
w.chksum1, w.chksum2 = WALChecksum(w.bo, w.chksum1, w.chksum2, frame.Data)
binary.BigEndian.PutUint32(frmHdr[16:], w.chksum1)
binary.BigEndian.PutUint32(frmHdr[20:], w.chksum2)
// Write the frame header and data.
nn, err := ww.Write(frmHdr)
if err != nil {
return n + int64(nn), err
}
n += int64(nn)
nn, err = ww.Write(frame.Data)
return n + int64(nn), err
}

@ -0,0 +1,230 @@
package wal
import (
"bytes"
"database/sql"
"fmt"
"io"
"os"
"testing"
_ "github.com/rqlite/go-sqlite3"
"github.com/rqlite/rqlite/random"
)
func Test_Writer_FullScanner(t *testing.T) {
b, err := os.ReadFile("testdata/wal-reader/ok/wal")
if err != nil {
t.Fatal(err)
}
s, err := NewFullScanner(bytes.NewReader(b))
if err != nil {
t.Fatal(err)
}
// Simply reading every frame and writing it back to a buffer should
// result in the same bytes as the original WAL file.
var buf bytes.Buffer
w, err := NewWriter(s)
if err != nil {
t.Fatal(err)
}
n, err := w.WriteTo(&buf)
if err != nil {
t.Fatal(err)
}
if n != int64(len(b)) {
t.Fatalf("expected to write %d bytes, wrote %d", len(b), n)
}
if !bytes.Equal(b, buf.Bytes()) {
t.Fatal("writer did not write the same bytes as the reader")
}
}
func Test_Writer_FullScanner_LargeWAL(t *testing.T) {
conn, path := mustCreateWAL(t, 128*1024)
defer conn.Close()
b, err := os.ReadFile(path)
if err != nil {
t.Fatal(err)
}
t.Log("WAL size:", len(b))
s, err := NewFullScanner(bytes.NewReader(b))
if err != nil {
t.Fatal(err)
}
// Simply reading every frame and writing it back to a buffer should
// result in the same bytes as the original WAL file.
var buf bytes.Buffer
w, err := NewWriter(s)
if err != nil {
t.Fatal(err)
}
n, err := w.WriteTo(&buf)
if err != nil {
t.Fatal(err)
}
if n != int64(len(b)) {
t.Fatalf("expected to write %d bytes, wrote %d", len(b), n)
}
if !bytes.Equal(b, buf.Bytes()) {
t.Fatal("writer did not write the same bytes as the reader")
}
}
// Test_Writer_CompactingScanner tests The CompactingScanner by continuously
// using it to copy a WAL file, and using that WAL file to apply changes to a
// second database. The second database should be identical to the first in
// terms of SQL queries.
func Test_Writer_CompactingScanner(t *testing.T) {
srcDir := t.TempDir()
srcDSN := fmt.Sprintf("file:%s", srcDir+"/src.db?_journal_mode=WAL&_synchronous=OFF")
srcDB := srcDir + "/src.db"
srcWAL := srcDir + "/src.db-wal"
srcConn, err := sql.Open("sqlite3", srcDSN)
if err != nil {
t.Fatal(err)
}
defer srcConn.Close()
mustExec(srcConn, "PRAGMA wal_autocheckpoint=0")
destDir := t.TempDir()
destDSN := fmt.Sprintf("file:%s", destDir+"/dest.db")
destDB := destDir + "/dest.db"
destWAL := destDir + "/dest.db-wal"
mustExec(srcConn, "CREATE TABLE foo (id INTEGER PRIMARY KEY, name TEXT)")
// Copy the src database to the dest database to seed the process.
mustCopyFile(destDB, srcDB)
writeRows := func(db *sql.DB, n int) {
// Write 1000 random rows to the src database, this data will appear in the WAL.
for i := 0; i < 1000; i++ {
mustExec(srcConn, fmt.Sprintf(`INSERT INTO foo (name) VALUES ('%s')`, random.String()))
}
mustExec(srcConn, "PRAGMA wal_checkpoint(FULL)")
}
copyAndCompactWAL := func(sWAL, dWAL string) {
// Copy the src WAL to the dest WAL using the CompactingScanner.
srcF, err := os.Open(sWAL)
if err != nil {
t.Fatal(err)
}
defer srcF.Close()
destF, err := os.Create(dWAL)
if err != nil {
t.Fatal(err)
}
defer destF.Close()
s, err := NewCompactingScanner(srcF)
if err != nil {
t.Fatal(err)
}
w, err := NewWriter(s)
if err != nil {
t.Fatal(err)
}
_, err = w.WriteTo(destF)
if err != nil {
t.Fatal(err)
}
}
checkRowCount := func(dsn string, n int) {
db, err := sql.Open("sqlite3", destDSN)
if err != nil {
t.Fatal(err)
}
defer db.Close()
rows, err := db.Query("SELECT COUNT(*) FROM foo")
if err != nil {
t.Fatal(err)
}
defer rows.Close()
var count int
for rows.Next() {
if err := rows.Scan(&count); err != nil {
t.Fatal(err)
}
}
if count != n {
t.Fatalf("expected %d rows, got %d", n, count)
}
}
writeRows(srcConn, 1000)
copyAndCompactWAL(srcWAL, destWAL)
checkRowCount(srcDSN, 1000)
checkRowCount(destDSN, 1000)
writeRows(srcConn, 1000)
copyAndCompactWAL(srcWAL, destWAL)
checkRowCount(srcDSN, 2000)
checkRowCount(destDSN, 2000)
writeRows(srcConn, 1000)
copyAndCompactWAL(srcWAL, destWAL)
checkRowCount(srcDSN, 3000)
checkRowCount(destDSN, 3000)
}
func mustExec(db *sql.DB, query string) {
if _, err := db.Exec(query); err != nil {
panic(err)
}
}
func mustCreateWAL(t *testing.T, size int) (*sql.DB, string) {
dir := t.TempDir()
rwDSN := fmt.Sprintf("file:%s", dir+"/test.db")
rwDB, err := sql.Open("sqlite3", rwDSN)
if err != nil {
panic(err)
}
mustExec(rwDB, "PRAGMA journal_mode=WAL")
mustExec(rwDB, "PRAGMA wal_autocheckpoint=0")
mustExec(rwDB, "PRAGMA synchronous=OFF")
mustExec(rwDB, "CREATE TABLE test (id INTEGER PRIMARY KEY, name TEXT)")
for {
for i := 0; i < 10; i++ {
mustExec(rwDB, "INSERT INTO test (name) VALUES ('name')")
}
// break if dir+test.db-wal is bigger than size
if fi, err := os.Stat(dir + "/test.db-wal"); err != nil {
continue
} else {
if fi.Size() >= int64(size) {
break
}
}
}
return rwDB, dir + "/test.db-wal"
}
func mustCopyFile(dst, src string) {
srcF, err := os.Open(src)
if err != nil {
panic(err)
}
defer srcF.Close()
dstF, err := os.Create(dst)
if err != nil {
panic(err)
}
defer dstF.Close()
_, err = io.Copy(dstF, srcF)
if err != nil {
panic(err)
}
}
Loading…
Cancel
Save