summaryrefslogtreecommitdiff
path: root/src/pkg/index
diff options
context:
space:
mode:
Diffstat (limited to 'src/pkg/index')
-rw-r--r--src/pkg/index/suffixarray/qsufsort.go38
-rw-r--r--src/pkg/index/suffixarray/suffixarray.go150
-rw-r--r--src/pkg/index/suffixarray/suffixarray_test.go40
3 files changed, 153 insertions, 75 deletions
diff --git a/src/pkg/index/suffixarray/qsufsort.go b/src/pkg/index/suffixarray/qsufsort.go
index f4ec3a103..c69be43c2 100644
--- a/src/pkg/index/suffixarray/qsufsort.go
+++ b/src/pkg/index/suffixarray/qsufsort.go
@@ -26,7 +26,7 @@ package suffixarray
import "sort"
-func qsufsort(data []byte) []int32 {
+func qsufsort(data []byte) []int {
// initial sorting by first byte of suffix
sa := sortedByFirstByte(data)
if len(sa) < 2 {
@@ -39,20 +39,20 @@ func qsufsort(data []byte) []int32 {
// the index starts 1-ordered
sufSortable := &suffixSortable{sa: sa, inv: inv, h: 1}
- for int(sa[0]) > -len(sa) { // until all suffixes are one big sorted group
+ for sa[0] > -len(sa) { // until all suffixes are one big sorted group
// The suffixes are h-ordered, make them 2*h-ordered
pi := 0 // pi is first position of first group
sl := 0 // sl is negated length of sorted groups
for pi < len(sa) {
- if s := int(sa[pi]); s < 0 { // if pi starts sorted group
+ if s := sa[pi]; s < 0 { // if pi starts sorted group
pi -= s // skip over sorted group
sl += s // add negated length to sl
} else { // if pi starts unsorted group
if sl != 0 {
- sa[pi+sl] = int32(sl) // combine sorted groups before pi
+ sa[pi+sl] = sl // combine sorted groups before pi
sl = 0
}
- pk := int(inv[s]) + 1 // pk-1 is last position of unsorted group
+ pk := inv[s] + 1 // pk-1 is last position of unsorted group
sufSortable.sa = sa[pi:pk]
sort.Sort(sufSortable)
sufSortable.updateGroups(pi)
@@ -60,19 +60,19 @@ func qsufsort(data []byte) []int32 {
}
}
if sl != 0 { // if the array ends with a sorted group
- sa[pi+sl] = int32(sl) // combine sorted groups at end of sa
+ sa[pi+sl] = sl // combine sorted groups at end of sa
}
sufSortable.h *= 2 // double sorted depth
}
for i := range sa { // reconstruct suffix array from inverse
- sa[inv[i]] = int32(i)
+ sa[inv[i]] = i
}
return sa
}
-func sortedByFirstByte(data []byte) []int32 {
+func sortedByFirstByte(data []byte) []int {
// total byte counts
var count [256]int
for _, b := range data {
@@ -84,17 +84,17 @@ func sortedByFirstByte(data []byte) []int32 {
count[b], sum = sum, count[b]+sum
}
// iterate through bytes, placing index into the correct spot in sa
- sa := make([]int32, len(data))
+ sa := make([]int, len(data))
for i, b := range data {
- sa[count[b]] = int32(i)
+ sa[count[b]] = i
count[b]++
}
return sa
}
-func initGroups(sa []int32, data []byte) []int32 {
+func initGroups(sa []int, data []byte) []int {
// label contiguous same-letter groups with the same group number
- inv := make([]int32, len(data))
+ inv := make([]int, len(data))
prevGroup := len(sa) - 1
groupByte := data[sa[prevGroup]]
for i := len(sa) - 1; i >= 0; i-- {
@@ -105,7 +105,7 @@ func initGroups(sa []int32, data []byte) []int32 {
groupByte = b
prevGroup = i
}
- inv[sa[i]] = int32(prevGroup)
+ inv[sa[i]] = prevGroup
if prevGroup == 0 {
sa[0] = -1
}
@@ -120,9 +120,9 @@ func initGroups(sa []int32, data []byte) []int32 {
if data[sa[i]] == lastByte && s == -1 {
s = i
}
- if int(sa[i]) == len(sa)-1 {
+ if sa[i] == len(sa)-1 {
sa[i], sa[s] = sa[s], sa[i]
- inv[sa[s]] = int32(s)
+ inv[sa[s]] = s
sa[s] = -1 // mark it as an isolated sorted group
break
}
@@ -132,9 +132,9 @@ func initGroups(sa []int32, data []byte) []int32 {
}
type suffixSortable struct {
- sa []int32
- inv []int32
- h int32
+ sa []int
+ inv []int
+ h int
buf []int // common scratch space
}
@@ -158,7 +158,7 @@ func (x *suffixSortable) updateGroups(offset int) {
prev := 0
for _, b := range bounds {
for i := prev; i < b; i++ {
- x.inv[x.sa[i]] = int32(offset + b - 1)
+ x.inv[x.sa[i]] = offset + b - 1
}
if b-prev == 1 {
x.sa[prev] = -1
diff --git a/src/pkg/index/suffixarray/suffixarray.go b/src/pkg/index/suffixarray/suffixarray.go
index cff7daa9d..c59ae6eef 100644
--- a/src/pkg/index/suffixarray/suffixarray.go
+++ b/src/pkg/index/suffixarray/suffixarray.go
@@ -18,17 +18,16 @@ package suffixarray
import (
"bytes"
- "exp/regexp"
- "gob"
+ "encoding/binary"
"io"
- "os"
+ "regexp"
"sort"
)
// Index implements a suffix array for fast substring search.
type Index struct {
data []byte
- sa []int32 // suffix array for data; len(sa) == len(data)
+ sa []int // suffix array for data; len(sa) == len(data)
}
// New creates a new Index for data.
@@ -37,72 +36,125 @@ func New(data []byte) *Index {
return &Index{data, qsufsort(data)}
}
-// Read and Write slice the data into successive portions of length gobN,
-// so gob can allocate smaller buffers for its I/O.
-const gobN = 1 << 16 // slightly better than say 1 << 20 (BenchmarkSaveRestore)
+// writeInt writes an int x to w using buf to buffer the write.
+func writeInt(w io.Writer, buf []byte, x int) error {
+ binary.PutVarint(buf, int64(x))
+ _, err := w.Write(buf[0:binary.MaxVarintLen64])
+ return err
+}
+
+// readInt reads an int x from r using buf to buffer the read and returns x.
+func readInt(r io.Reader, buf []byte) (int, error) {
+ _, err := io.ReadFull(r, buf[0:binary.MaxVarintLen64]) // ok to continue with error
+ x, _ := binary.Varint(buf)
+ return int(x), err
+}
+
+// writeSlice writes data[:n] to w and returns n.
+// It uses buf to buffer the write.
+func writeSlice(w io.Writer, buf []byte, data []int) (n int, err error) {
+ // encode as many elements as fit into buf
+ p := binary.MaxVarintLen64
+ for ; n < len(data) && p+binary.MaxVarintLen64 <= len(buf); n++ {
+ p += binary.PutUvarint(buf[p:], uint64(data[n]))
+ }
+
+ // update buffer size
+ binary.PutVarint(buf, int64(p))
+
+ // write buffer
+ _, err = w.Write(buf[0:p])
+ return
+}
+
+// readSlice reads data[:n] from r and returns n.
+// It uses buf to buffer the read.
+func readSlice(r io.Reader, buf []byte, data []int) (n int, err error) {
+ // read buffer size
+ var size int
+ size, err = readInt(r, buf)
+ if err != nil {
+ return
+ }
+
+ // read buffer w/o the size
+ if _, err = io.ReadFull(r, buf[binary.MaxVarintLen64:size]); err != nil {
+ return
+ }
+
+ // decode as many elements as present in buf
+ for p := binary.MaxVarintLen64; p < size; n++ {
+ x, w := binary.Uvarint(buf[p:])
+ data[n] = int(x)
+ p += w
+ }
+
+ return
+}
+
+const bufSize = 16 << 10 // reasonable for BenchmarkSaveRestore
// Read reads the index from r into x; x must not be nil.
-func (x *Index) Read(r io.Reader) os.Error {
- d := gob.NewDecoder(r)
- var n int
- if err := d.Decode(&n); err != nil {
+func (x *Index) Read(r io.Reader) error {
+ // buffer for all reads
+ buf := make([]byte, bufSize)
+
+ // read length
+ n, err := readInt(r, buf)
+ if err != nil {
return err
}
+
+ // allocate space
if 2*n < cap(x.data) || cap(x.data) < n {
// new data is significantly smaller or larger then
// existing buffers - allocate new ones
x.data = make([]byte, n)
- x.sa = make([]int32, n)
+ x.sa = make([]int, n)
} else {
// re-use existing buffers
x.data = x.data[0:n]
x.sa = x.sa[0:n]
}
- for i := 0; i < n; {
- j := i + gobN
- if j > n {
- j = n
- }
- // data holds next piece of x.data; its length is updated by Decode
- data := x.data[i:j]
- if err := d.Decode(&data); err != nil {
- return err
- }
- if len(data) != j-i {
- return os.NewError("suffixarray.Read: inconsistent data format")
- }
- // sa holds next piece of x.data; its length is updated by Decode
- sa := x.sa[i:j]
- if err := d.Decode(&sa); err != nil {
+
+ // read data
+ if _, err := io.ReadFull(r, x.data); err != nil {
+ return err
+ }
+
+ // read index
+ for sa := x.sa; len(sa) > 0; {
+ n, err := readSlice(r, buf, sa)
+ if err != nil {
return err
}
- if len(sa) != j-i {
- return os.NewError("suffixarray.Read: inconsistent data format")
- }
- i = j
+ sa = sa[n:]
}
return nil
}
// Write writes the index x to w.
-func (x *Index) Write(w io.Writer) os.Error {
- e := gob.NewEncoder(w)
- n := len(x.data)
- if err := e.Encode(n); err != nil {
+func (x *Index) Write(w io.Writer) error {
+ // buffer for all writes
+ buf := make([]byte, bufSize)
+
+ // write length
+ if err := writeInt(w, buf, len(x.data)); err != nil {
return err
}
- for i := 0; i < n; {
- j := i + gobN
- if j > n {
- j = n
- }
- if err := e.Encode(x.data[i:j]); err != nil {
- return err
- }
- if err := e.Encode(x.sa[i:j]); err != nil {
+
+ // write data
+ if _, err := w.Write(x.data); err != nil {
+ return err
+ }
+
+ // write index
+ for sa := x.sa; len(sa) > 0; {
+ n, err := writeSlice(w, buf, sa)
+ if err != nil {
return err
}
- i = j
+ sa = sa[n:]
}
return nil
}
@@ -120,7 +172,7 @@ func (x *Index) at(i int) []byte {
// lookupAll returns a slice into the matching region of the index.
// The runtime is O(log(N)*len(s)).
-func (x *Index) lookupAll(s []byte) []int32 {
+func (x *Index) lookupAll(s []byte) []int {
// find matching suffix index range [i:j]
// find the first index where s would be the prefix
i := sort.Search(len(x.sa), func(i int) bool { return bytes.Compare(x.at(i), s) >= 0 })
@@ -144,9 +196,7 @@ func (x *Index) Lookup(s []byte, n int) (result []int) {
// 0 <= n <= len(matches)
if n > 0 {
result = make([]int, n)
- for i, x := range matches[0:n] {
- result[i] = int(x)
- }
+ copy(result, matches)
}
}
return
diff --git a/src/pkg/index/suffixarray/suffixarray_test.go b/src/pkg/index/suffixarray/suffixarray_test.go
index 9b4d89f42..df3e449d3 100644
--- a/src/pkg/index/suffixarray/suffixarray_test.go
+++ b/src/pkg/index/suffixarray/suffixarray_test.go
@@ -6,8 +6,8 @@ package suffixarray
import (
"bytes"
- "exp/regexp"
- "rand"
+ "math/rand"
+ "regexp"
"sort"
"strings"
"testing"
@@ -230,11 +230,13 @@ func equal(x, y *Index) bool {
return true
}
-func testSaveRestore(t *testing.T, tc *testCase, x *Index) {
+// returns the serialized index size
+func testSaveRestore(t *testing.T, tc *testCase, x *Index) int {
var buf bytes.Buffer
if err := x.Write(&buf); err != nil {
t.Errorf("failed writing index %s (%s)", tc.name, err)
}
+ size := buf.Len()
var y Index
if err := y.Read(&buf); err != nil {
t.Errorf("failed reading index %s (%s)", tc.name, err)
@@ -242,6 +244,7 @@ func testSaveRestore(t *testing.T, tc *testCase, x *Index) {
if !equal(x, &y) {
t.Errorf("restored index doesn't match saved index %s", tc.name)
}
+ return size
}
func TestIndex(t *testing.T) {
@@ -257,16 +260,41 @@ func TestIndex(t *testing.T) {
}
}
+// Of all possible inputs, the random bytes have the least amount of substring
+// repetition, and the repeated bytes have the most. For most algorithms,
+// the running time of every input will be between these two.
+func benchmarkNew(b *testing.B, random bool) {
+ b.StopTimer()
+ data := make([]byte, 1e6)
+ if random {
+ for i := range data {
+ data[i] = byte(rand.Intn(256))
+ }
+ }
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ New(data)
+ }
+}
+
+func BenchmarkNewIndexRandom(b *testing.B) {
+ benchmarkNew(b, true)
+}
+func BenchmarkNewIndexRepeat(b *testing.B) {
+ benchmarkNew(b, false)
+}
+
func BenchmarkSaveRestore(b *testing.B) {
b.StopTimer()
r := rand.New(rand.NewSource(0x5a77a1)) // guarantee always same sequence
- data := make([]byte, 10<<20) // 10MB index data
+ data := make([]byte, 10<<20) // 10MB of data to index
for i := range data {
data[i] = byte(r.Intn(256))
}
x := New(data)
- testSaveRestore(nil, nil, x) // verify correctness
- buf := bytes.NewBuffer(make([]byte, len(data))) // avoid frequent growing
+ size := testSaveRestore(nil, nil, x) // verify correctness
+ buf := bytes.NewBuffer(make([]byte, size)) // avoid growing
+ b.SetBytes(int64(size))
b.StartTimer()
for i := 0; i < b.N; i++ {
x.Write(buf)