diff options
Diffstat (limited to 'src/pkg/index/suffixarray')
-rw-r--r-- | src/pkg/index/suffixarray/qsufsort.go | 44 | ||||
-rw-r--r-- | src/pkg/index/suffixarray/suffixarray.go | 86 | ||||
-rw-r--r-- | src/pkg/index/suffixarray/suffixarray_test.go | 50 |
3 files changed, 152 insertions, 28 deletions
diff --git a/src/pkg/index/suffixarray/qsufsort.go b/src/pkg/index/suffixarray/qsufsort.go index 30c110442..f4ec3a103 100644 --- a/src/pkg/index/suffixarray/qsufsort.go +++ b/src/pkg/index/suffixarray/qsufsort.go @@ -26,7 +26,7 @@ package suffixarray import "sort" -func qsufsort(data []byte) []int { +func qsufsort(data []byte) []int32 { // initial sorting by first byte of suffix sa := sortedByFirstByte(data) if len(sa) < 2 { @@ -37,22 +37,22 @@ func qsufsort(data []byte) []int { inv := initGroups(sa, data) // the index starts 1-ordered - sufSortable := &suffixSortable{sa, inv, 1} + sufSortable := &suffixSortable{sa: sa, inv: inv, h: 1} - for sa[0] > -len(sa) { // until all suffixes are one big sorted group + for int(sa[0]) > -len(sa) { // until all suffixes are one big sorted group // The suffixes are h-ordered, make them 2*h-ordered pi := 0 // pi is first position of first group sl := 0 // sl is negated length of sorted groups for pi < len(sa) { - if s := sa[pi]; s < 0 { // if pi starts sorted group + if s := int(sa[pi]); s < 0 { // if pi starts sorted group pi -= s // skip over sorted group sl += s // add negated length to sl } else { // if pi starts unsorted group if sl != 0 { - sa[pi+sl] = sl // combine sorted groups before pi + sa[pi+sl] = int32(sl) // combine sorted groups before pi sl = 0 } - pk := inv[s] + 1 // pk-1 is last position of unsorted group + pk := int(inv[s]) + 1 // pk-1 is last position of unsorted group sufSortable.sa = sa[pi:pk] sort.Sort(sufSortable) sufSortable.updateGroups(pi) @@ -60,19 +60,19 @@ func qsufsort(data []byte) []int { } } if sl != 0 { // if the array ends with a sorted group - sa[pi+sl] = sl // combine sorted groups at end of sa + sa[pi+sl] = int32(sl) // combine sorted groups at end of sa } sufSortable.h *= 2 // double sorted depth } for i := range sa { // reconstruct suffix array from inverse - sa[inv[i]] = i + sa[inv[i]] = int32(i) } return sa } -func sortedByFirstByte(data []byte) []int { +func sortedByFirstByte(data []byte) []int32 { // total byte counts var count [256]int for _, b := range data { @@ -84,17 +84,17 @@ func sortedByFirstByte(data []byte) []int { count[b], sum = sum, count[b]+sum } // iterate through bytes, placing index into the correct spot in sa - sa := make([]int, len(data)) + sa := make([]int32, len(data)) for i, b := range data { - sa[count[b]] = i + sa[count[b]] = int32(i) count[b]++ } return sa } -func initGroups(sa []int, data []byte) []int { +func initGroups(sa []int32, data []byte) []int32 { // label contiguous same-letter groups with the same group number - inv := make([]int, len(data)) + inv := make([]int32, len(data)) prevGroup := len(sa) - 1 groupByte := data[sa[prevGroup]] for i := len(sa) - 1; i >= 0; i-- { @@ -105,7 +105,7 @@ func initGroups(sa []int, data []byte) []int { groupByte = b prevGroup = i } - inv[sa[i]] = prevGroup + inv[sa[i]] = int32(prevGroup) if prevGroup == 0 { sa[0] = -1 } @@ -120,9 +120,9 @@ func initGroups(sa []int, data []byte) []int { if data[sa[i]] == lastByte && s == -1 { s = i } - if sa[i] == len(sa)-1 { + if int(sa[i]) == len(sa)-1 { sa[i], sa[s] = sa[s], sa[i] - inv[sa[s]] = s + inv[sa[s]] = int32(s) sa[s] = -1 // mark it as an isolated sorted group break } @@ -132,9 +132,10 @@ func initGroups(sa []int, data []byte) []int { } type suffixSortable struct { - sa []int - inv []int - h int + sa []int32 + inv []int32 + h int32 + buf []int // common scratch space } func (x *suffixSortable) Len() int { return len(x.sa) } @@ -142,7 +143,7 @@ func (x *suffixSortable) Less(i, j int) bool { return x.inv[x.sa[i]+x.h] < x.inv func (x *suffixSortable) Swap(i, j int) { x.sa[i], x.sa[j] = x.sa[j], x.sa[i] } func (x *suffixSortable) updateGroups(offset int) { - bounds := make([]int, 0, 4) + bounds := x.buf[0:0] group := x.inv[x.sa[0]+x.h] for i := 1; i < len(x.sa); i++ { if g := x.inv[x.sa[i]+x.h]; g > group { @@ -151,12 +152,13 @@ func (x *suffixSortable) updateGroups(offset int) { } } bounds = append(bounds, len(x.sa)) + x.buf = bounds // update the group numberings after all new groups are determined prev := 0 for _, b := range bounds { for i := prev; i < b; i++ { - x.inv[x.sa[i]] = offset + b - 1 + x.inv[x.sa[i]] = int32(offset + b - 1) } if b-prev == 1 { x.sa[prev] = -1 diff --git a/src/pkg/index/suffixarray/suffixarray.go b/src/pkg/index/suffixarray/suffixarray.go index 82e98d2ef..cff7daa9d 100644 --- a/src/pkg/index/suffixarray/suffixarray.go +++ b/src/pkg/index/suffixarray/suffixarray.go @@ -18,14 +18,17 @@ package suffixarray import ( "bytes" - "regexp" + "exp/regexp" + "gob" + "io" + "os" "sort" ) // Index implements a suffix array for fast substring search. type Index struct { data []byte - sa []int // suffix array for data + sa []int32 // suffix array for data; len(sa) == len(data) } // New creates a new Index for data. @@ -34,6 +37,76 @@ func New(data []byte) *Index { return &Index{data, qsufsort(data)} } +// Read and Write slice the data into successive portions of length gobN, +// so gob can allocate smaller buffers for its I/O. +const gobN = 1 << 16 // slightly better than say 1 << 20 (BenchmarkSaveRestore) + +// Read reads the index from r into x; x must not be nil. +func (x *Index) Read(r io.Reader) os.Error { + d := gob.NewDecoder(r) + var n int + if err := d.Decode(&n); err != nil { + return err + } + if 2*n < cap(x.data) || cap(x.data) < n { + // new data is significantly smaller or larger then + // existing buffers - allocate new ones + x.data = make([]byte, n) + x.sa = make([]int32, n) + } else { + // re-use existing buffers + x.data = x.data[0:n] + x.sa = x.sa[0:n] + } + for i := 0; i < n; { + j := i + gobN + if j > n { + j = n + } + // data holds next piece of x.data; its length is updated by Decode + data := x.data[i:j] + if err := d.Decode(&data); err != nil { + return err + } + if len(data) != j-i { + return os.NewError("suffixarray.Read: inconsistent data format") + } + // sa holds next piece of x.data; its length is updated by Decode + sa := x.sa[i:j] + if err := d.Decode(&sa); err != nil { + return err + } + if len(sa) != j-i { + return os.NewError("suffixarray.Read: inconsistent data format") + } + i = j + } + return nil +} + +// Write writes the index x to w. +func (x *Index) Write(w io.Writer) os.Error { + e := gob.NewEncoder(w) + n := len(x.data) + if err := e.Encode(n); err != nil { + return err + } + for i := 0; i < n; { + j := i + gobN + if j > n { + j = n + } + if err := e.Encode(x.data[i:j]); err != nil { + return err + } + if err := e.Encode(x.sa[i:j]); err != nil { + return err + } + i = j + } + return nil +} + // Bytes returns the data over which the index was created. // It must not be modified. // @@ -47,7 +120,7 @@ func (x *Index) at(i int) []byte { // lookupAll returns a slice into the matching region of the index. // The runtime is O(log(N)*len(s)). -func (x *Index) lookupAll(s []byte) []int { +func (x *Index) lookupAll(s []byte) []int32 { // find matching suffix index range [i:j] // find the first index where s would be the prefix i := sort.Search(len(x.sa), func(i int) bool { return bytes.Compare(x.at(i), s) >= 0 }) @@ -65,12 +138,15 @@ func (x *Index) lookupAll(s []byte) []int { func (x *Index) Lookup(s []byte, n int) (result []int) { if len(s) > 0 && n != 0 { matches := x.lookupAll(s) - if len(matches) < n || n < 0 { + if n < 0 || len(matches) < n { n = len(matches) } + // 0 <= n <= len(matches) if n > 0 { result = make([]int, n) - copy(result, matches) + for i, x := range matches[0:n] { + result[i] = int(x) + } } } return diff --git a/src/pkg/index/suffixarray/suffixarray_test.go b/src/pkg/index/suffixarray/suffixarray_test.go index 023748500..9b4d89f42 100644 --- a/src/pkg/index/suffixarray/suffixarray_test.go +++ b/src/pkg/index/suffixarray/suffixarray_test.go @@ -6,7 +6,8 @@ package suffixarray import ( "bytes" - "regexp" + "exp/regexp" + "rand" "sort" "strings" "testing" @@ -213,7 +214,33 @@ func (a *index) at(i int) []byte { return a.data[a.sa[i]:] } func testConstruction(t *testing.T, tc *testCase, x *Index) { if !sort.IsSorted((*index)(x)) { - t.Errorf("testConstruction failed %s", tc.name) + t.Errorf("failed testConstruction %s", tc.name) + } +} + +func equal(x, y *Index) bool { + if !bytes.Equal(x.data, y.data) { + return false + } + for i, j := range x.sa { + if j != y.sa[i] { + return false + } + } + return true +} + +func testSaveRestore(t *testing.T, tc *testCase, x *Index) { + var buf bytes.Buffer + if err := x.Write(&buf); err != nil { + t.Errorf("failed writing index %s (%s)", tc.name, err) + } + var y Index + if err := y.Read(&buf); err != nil { + t.Errorf("failed reading index %s (%s)", tc.name, err) + } + if !equal(x, &y) { + t.Errorf("restored index doesn't match saved index %s", tc.name) } } @@ -221,6 +248,7 @@ func TestIndex(t *testing.T) { for _, tc := range testCases { x := New([]byte(tc.source)) testConstruction(t, &tc, x) + testSaveRestore(t, &tc, x) testLookups(t, &tc, x, 0) testLookups(t, &tc, x, 1) testLookups(t, &tc, x, 10) @@ -228,3 +256,21 @@ func TestIndex(t *testing.T) { testLookups(t, &tc, x, -1) } } + +func BenchmarkSaveRestore(b *testing.B) { + b.StopTimer() + r := rand.New(rand.NewSource(0x5a77a1)) // guarantee always same sequence + data := make([]byte, 10<<20) // 10MB index data + for i := range data { + data[i] = byte(r.Intn(256)) + } + x := New(data) + testSaveRestore(nil, nil, x) // verify correctness + buf := bytes.NewBuffer(make([]byte, len(data))) // avoid frequent growing + b.StartTimer() + for i := 0; i < b.N; i++ { + x.Write(buf) + var y Index + y.Read(buf) + } +} |