summaryrefslogtreecommitdiff
path: root/libgo/go/compress/flate/deflate_test.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/compress/flate/deflate_test.go')
-rw-r--r--libgo/go/compress/flate/deflate_test.go429
1 files changed, 414 insertions, 15 deletions
diff --git a/libgo/go/compress/flate/deflate_test.go b/libgo/go/compress/flate/deflate_test.go
index 72bc6652c8..fbea761721 100644
--- a/libgo/go/compress/flate/deflate_test.go
+++ b/libgo/go/compress/flate/deflate_test.go
@@ -6,11 +6,13 @@ package flate
import (
"bytes"
+ "errors"
"fmt"
"internal/testenv"
"io"
"io/ioutil"
"reflect"
+ "runtime/debug"
"sync"
"testing"
)
@@ -42,10 +44,10 @@ var deflateTests = []*deflateTest{
{[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, 0,
[]byte{0, 8, 0, 247, 255, 17, 17, 17, 17, 17, 17, 17, 17, 1, 0, 0, 255, 255},
},
- {[]byte{}, 1, []byte{1, 0, 0, 255, 255}},
- {[]byte{0x11}, 1, []byte{18, 4, 4, 0, 0, 255, 255}},
- {[]byte{0x11, 0x12}, 1, []byte{18, 20, 2, 4, 0, 0, 255, 255}},
- {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, 1, []byte{18, 132, 2, 64, 0, 0, 0, 255, 255}},
+ {[]byte{}, 2, []byte{1, 0, 0, 255, 255}},
+ {[]byte{0x11}, 2, []byte{18, 4, 4, 0, 0, 255, 255}},
+ {[]byte{0x11, 0x12}, 2, []byte{18, 20, 2, 4, 0, 0, 255, 255}},
+ {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, 2, []byte{18, 132, 2, 64, 0, 0, 0, 255, 255}},
{[]byte{}, 9, []byte{1, 0, 0, 255, 255}},
{[]byte{0x11}, 9, []byte{18, 4, 4, 0, 0, 255, 255}},
{[]byte{0x11, 0x12}, 9, []byte{18, 20, 2, 4, 0, 0, 255, 255}},
@@ -80,6 +82,32 @@ func largeDataChunk() []byte {
return result
}
+func TestBulkHash4(t *testing.T) {
+ for _, x := range deflateTests {
+ y := x.out
+ if len(y) < minMatchLength {
+ continue
+ }
+ y = append(y, y...)
+ for j := 4; j < len(y); j++ {
+ y := y[:j]
+ dst := make([]uint32, len(y)-minMatchLength+1)
+ for i := range dst {
+ dst[i] = uint32(i + 100)
+ }
+ bulkHash4(y, dst)
+ for i, got := range dst {
+ want := hash4(y[i:])
+ if got != want && got == uint32(i)+100 {
+ t.Errorf("Len:%d Index:%d, want 0x%08x but not modified", len(y), i, want)
+ } else if got != want {
+ t.Errorf("Len:%d Index:%d, got 0x%08x want:0x%08x", len(y), i, got, want)
+ }
+ }
+ }
+ }
+}
+
func TestDeflate(t *testing.T) {
for _, h := range deflateTests {
var buf bytes.Buffer
@@ -91,7 +119,7 @@ func TestDeflate(t *testing.T) {
w.Write(h.in)
w.Close()
if !bytes.Equal(buf.Bytes(), h.out) {
- t.Errorf("Deflate(%d, %x) = %x, want %x", h.level, h.in, buf.Bytes(), h.out)
+ t.Errorf("Deflate(%d, %x) = \n%#v, want \n%#v", h.level, h.in, buf.Bytes(), h.out)
}
}
}
@@ -247,7 +275,7 @@ func testSync(t *testing.T, level int, input []byte, name string) {
// not necessarily the case: the write Flush may emit
// some extra framing bits that are not necessary
// to process to obtain the first half of the uncompressed
- // data. The test ran correctly most of the time, because
+ // data. The test ran correctly most of the time, because
// the background goroutine had usually read even
// those extra bits by now, but it's not a useful thing to
// check.
@@ -289,6 +317,9 @@ func testToFromWithLevelAndLimit(t *testing.T, level int, input []byte, name str
t.Errorf("level: %d, len(compress(data)) = %d > limit = %d", level, buffer.Len(), limit)
return
}
+ if limit > 0 {
+ t.Logf("level: %d, size:%.2f%%, %d b\n", level, float64(buffer.Len()*100)/float64(limit), buffer.Len())
+ }
r := NewReader(&buffer)
out, err := ioutil.ReadAll(r)
if err != nil {
@@ -303,15 +334,18 @@ func testToFromWithLevelAndLimit(t *testing.T, level int, input []byte, name str
testSync(t, level, input, name)
}
-func testToFromWithLimit(t *testing.T, input []byte, name string, limit [10]int) {
+func testToFromWithLimit(t *testing.T, input []byte, name string, limit [11]int) {
for i := 0; i < 10; i++ {
testToFromWithLevelAndLimit(t, i, input, name, limit[i])
}
+ // Test HuffmanCompression
+ testToFromWithLevelAndLimit(t, -2, input, name, limit[10])
}
func TestDeflateInflate(t *testing.T) {
+ t.Parallel()
for i, h := range deflateInflateTests {
- testToFromWithLimit(t, h.in, fmt.Sprintf("#%d", i), [10]int{})
+ testToFromWithLimit(t, h.in, fmt.Sprintf("#%d", i), [11]int{})
}
}
@@ -327,23 +361,24 @@ func TestReverseBits(t *testing.T) {
type deflateInflateStringTest struct {
filename string
label string
- limit [10]int
+ limit [11]int
}
var deflateInflateStringTests = []deflateInflateStringTest{
{
"../testdata/e.txt",
"2.718281828...",
- [...]int{100018, 50650, 50960, 51150, 50930, 50790, 50790, 50790, 50790, 50790},
+ [...]int{100018, 50650, 50960, 51150, 50930, 50790, 50790, 50790, 50790, 50790, 43683},
},
{
"../testdata/Mark.Twain-Tom.Sawyer.txt",
"Mark.Twain-Tom.Sawyer",
- [...]int{407330, 187598, 180361, 172974, 169160, 163476, 160936, 160506, 160295, 160295},
+ [...]int{407330, 187598, 180361, 172974, 169160, 163476, 160936, 160506, 160295, 160295, 233460},
},
}
func TestDeflateInflateString(t *testing.T) {
+ t.Parallel()
if testing.Short() && testenv.Builder() == "" {
t.Skip("skipping in short mode")
}
@@ -431,6 +466,7 @@ func TestRegression2508(t *testing.T) {
}
func TestWriterReset(t *testing.T) {
+ t.Parallel()
for level := 0; level <= 9; level++ {
if testing.Short() && level > 1 {
break
@@ -457,6 +493,18 @@ func TestWriterReset(t *testing.T) {
// DeepEqual doesn't compare functions.
w.d.fill, wref.d.fill = nil, nil
w.d.step, wref.d.step = nil, nil
+ w.d.bulkHasher, wref.d.bulkHasher = nil, nil
+ w.d.bestSpeed, wref.d.bestSpeed = nil, nil
+ // hashMatch is always overwritten when used.
+ copy(w.d.hashMatch[:], wref.d.hashMatch[:])
+ if len(w.d.tokens) != 0 {
+ t.Errorf("level %d Writer not reset after Reset. %d tokens were present", level, len(w.d.tokens))
+ }
+ // As long as the length is 0, we don't care about the content.
+ w.d.tokens = wref.d.tokens
+
+ // We don't care if there are values in the window, as long as it is at d.index is 0
+ w.d.window = wref.d.window
if !reflect.DeepEqual(w, wref) {
t.Errorf("level %d Writer not reset after Reset", level)
}
@@ -481,7 +529,7 @@ func testResetOutput(t *testing.T, newWriter func(w io.Writer) (*Writer, error))
w.Write(b)
}
w.Close()
- out1 := buf.String()
+ out1 := buf.Bytes()
buf2 := new(bytes.Buffer)
w.Reset(buf2)
@@ -489,10 +537,361 @@ func testResetOutput(t *testing.T, newWriter func(w io.Writer) (*Writer, error))
w.Write(b)
}
w.Close()
- out2 := buf2.String()
+ out2 := buf2.Bytes()
- if out1 != out2 {
- t.Errorf("got %q, expected %q", out2, out1)
+ if len(out1) != len(out2) {
+ t.Errorf("got %d, expected %d bytes", len(out2), len(out1))
+ return
+ }
+ if !bytes.Equal(out1, out2) {
+ mm := 0
+ for i, b := range out1[:len(out2)] {
+ if b != out2[i] {
+ t.Errorf("mismatch index %d: %#02x, expected %#02x", i, out2[i], b)
+ }
+ mm++
+ if mm == 10 {
+ t.Fatal("Stopping")
+ }
+ }
}
t.Logf("got %d bytes", len(out1))
}
+
+// TestBestSpeed tests that round-tripping through deflate and then inflate
+// recovers the original input. The Write sizes are near the thresholds in the
+// compressor.encSpeed method (0, 16, 128), as well as near maxStoreBlockSize
+// (65535).
+func TestBestSpeed(t *testing.T) {
+ t.Parallel()
+ abc := make([]byte, 128)
+ for i := range abc {
+ abc[i] = byte(i)
+ }
+ abcabc := bytes.Repeat(abc, 131072/len(abc))
+ var want []byte
+
+ testCases := [][]int{
+ {65536, 0},
+ {65536, 1},
+ {65536, 1, 256},
+ {65536, 1, 65536},
+ {65536, 14},
+ {65536, 15},
+ {65536, 16},
+ {65536, 16, 256},
+ {65536, 16, 65536},
+ {65536, 127},
+ {65536, 128},
+ {65536, 128, 256},
+ {65536, 128, 65536},
+ {65536, 129},
+ {65536, 65536, 256},
+ {65536, 65536, 65536},
+ }
+
+ for i, tc := range testCases {
+ for _, firstN := range []int{1, 65534, 65535, 65536, 65537, 131072} {
+ tc[0] = firstN
+ outer:
+ for _, flush := range []bool{false, true} {
+ buf := new(bytes.Buffer)
+ want = want[:0]
+
+ w, err := NewWriter(buf, BestSpeed)
+ if err != nil {
+ t.Errorf("i=%d, firstN=%d, flush=%t: NewWriter: %v", i, firstN, flush, err)
+ continue
+ }
+ for _, n := range tc {
+ want = append(want, abcabc[:n]...)
+ if _, err := w.Write(abcabc[:n]); err != nil {
+ t.Errorf("i=%d, firstN=%d, flush=%t: Write: %v", i, firstN, flush, err)
+ continue outer
+ }
+ if !flush {
+ continue
+ }
+ if err := w.Flush(); err != nil {
+ t.Errorf("i=%d, firstN=%d, flush=%t: Flush: %v", i, firstN, flush, err)
+ continue outer
+ }
+ }
+ if err := w.Close(); err != nil {
+ t.Errorf("i=%d, firstN=%d, flush=%t: Close: %v", i, firstN, flush, err)
+ continue
+ }
+
+ r := NewReader(buf)
+ got, err := ioutil.ReadAll(r)
+ if err != nil {
+ t.Errorf("i=%d, firstN=%d, flush=%t: ReadAll: %v", i, firstN, flush, err)
+ continue
+ }
+ r.Close()
+
+ if !bytes.Equal(got, want) {
+ t.Errorf("i=%d, firstN=%d, flush=%t: corruption during deflate-then-inflate", i, firstN, flush)
+ continue
+ }
+ }
+ }
+ }
+}
+
+var errIO = errors.New("IO error")
+
+// failWriter fails with errIO exactly at the nth call to Write.
+type failWriter struct{ n int }
+
+func (w *failWriter) Write(b []byte) (int, error) {
+ w.n--
+ if w.n == -1 {
+ return 0, errIO
+ }
+ return len(b), nil
+}
+
+func TestWriterPersistentError(t *testing.T) {
+ t.Parallel()
+ d, err := ioutil.ReadFile("../testdata/Mark.Twain-Tom.Sawyer.txt")
+ if err != nil {
+ t.Fatalf("ReadFile: %v", err)
+ }
+ d = d[:10000] // Keep this test short
+
+ zw, err := NewWriter(nil, DefaultCompression)
+ if err != nil {
+ t.Fatalf("NewWriter: %v", err)
+ }
+
+ // Sweep over the threshold at which an error is returned.
+ // The variable i makes it such that the ith call to failWriter.Write will
+ // return errIO. Since failWriter errors are not persistent, we must ensure
+ // that flate.Writer errors are persistent.
+ for i := 0; i < 1000; i++ {
+ fw := &failWriter{i}
+ zw.Reset(fw)
+
+ _, werr := zw.Write(d)
+ cerr := zw.Close()
+ if werr != errIO && werr != nil {
+ t.Errorf("test %d, mismatching Write error: got %v, want %v", i, werr, errIO)
+ }
+ if cerr != errIO && fw.n < 0 {
+ t.Errorf("test %d, mismatching Close error: got %v, want %v", i, cerr, errIO)
+ }
+ if fw.n >= 0 {
+ // At this point, the failure threshold was sufficiently high enough
+ // that we wrote the whole stream without any errors.
+ return
+ }
+ }
+}
+
+func TestBestSpeedMatch(t *testing.T) {
+ t.Parallel()
+ cases := []struct {
+ previous, current []byte
+ t, s, want int32
+ }{{
+ previous: []byte{0, 0, 0, 1, 2},
+ current: []byte{3, 4, 5, 0, 1, 2, 3, 4, 5},
+ t: -3,
+ s: 3,
+ want: 6,
+ }, {
+ previous: []byte{0, 0, 0, 1, 2},
+ current: []byte{2, 4, 5, 0, 1, 2, 3, 4, 5},
+ t: -3,
+ s: 3,
+ want: 3,
+ }, {
+ previous: []byte{0, 0, 0, 1, 1},
+ current: []byte{3, 4, 5, 0, 1, 2, 3, 4, 5},
+ t: -3,
+ s: 3,
+ want: 2,
+ }, {
+ previous: []byte{0, 0, 0, 1, 2},
+ current: []byte{2, 2, 2, 2, 1, 2, 3, 4, 5},
+ t: -1,
+ s: 0,
+ want: 4,
+ }, {
+ previous: []byte{0, 0, 0, 1, 2, 3, 4, 5, 2, 2},
+ current: []byte{2, 2, 2, 2, 1, 2, 3, 4, 5},
+ t: -7,
+ s: 4,
+ want: 5,
+ }, {
+ previous: []byte{9, 9, 9, 9, 9},
+ current: []byte{2, 2, 2, 2, 1, 2, 3, 4, 5},
+ t: -1,
+ s: 0,
+ want: 0,
+ }, {
+ previous: []byte{9, 9, 9, 9, 9},
+ current: []byte{9, 2, 2, 2, 1, 2, 3, 4, 5},
+ t: 0,
+ s: 1,
+ want: 0,
+ }, {
+ previous: []byte{},
+ current: []byte{9, 2, 2, 2, 1, 2, 3, 4, 5},
+ t: -5,
+ s: 1,
+ want: 0,
+ }, {
+ previous: []byte{},
+ current: []byte{9, 2, 2, 2, 1, 2, 3, 4, 5},
+ t: -1,
+ s: 1,
+ want: 0,
+ }, {
+ previous: []byte{},
+ current: []byte{2, 2, 2, 2, 1, 2, 3, 4, 5},
+ t: 0,
+ s: 1,
+ want: 3,
+ }, {
+ previous: []byte{3, 4, 5},
+ current: []byte{3, 4, 5},
+ t: -3,
+ s: 0,
+ want: 3,
+ }, {
+ previous: make([]byte, 1000),
+ current: make([]byte, 1000),
+ t: -1000,
+ s: 0,
+ want: maxMatchLength - 4,
+ }, {
+ previous: make([]byte, 200),
+ current: make([]byte, 500),
+ t: -200,
+ s: 0,
+ want: maxMatchLength - 4,
+ }, {
+ previous: make([]byte, 200),
+ current: make([]byte, 500),
+ t: 0,
+ s: 1,
+ want: maxMatchLength - 4,
+ }, {
+ previous: make([]byte, maxMatchLength-4),
+ current: make([]byte, 500),
+ t: -(maxMatchLength - 4),
+ s: 0,
+ want: maxMatchLength - 4,
+ }, {
+ previous: make([]byte, 200),
+ current: make([]byte, 500),
+ t: -200,
+ s: 400,
+ want: 100,
+ }, {
+ previous: make([]byte, 10),
+ current: make([]byte, 500),
+ t: 200,
+ s: 400,
+ want: 100,
+ }}
+ for i, c := range cases {
+ e := deflateFast{prev: c.previous}
+ got := e.matchLen(c.s, c.t, c.current)
+ if got != c.want {
+ t.Errorf("Test %d: match length, want %d, got %d", i, c.want, got)
+ }
+ }
+}
+
+func TestBestSpeedMaxMatchOffset(t *testing.T) {
+ t.Parallel()
+ const abc, xyz = "abcdefgh", "stuvwxyz"
+ for _, matchBefore := range []bool{false, true} {
+ for _, extra := range []int{0, inputMargin - 1, inputMargin, inputMargin + 1, 2 * inputMargin} {
+ for offsetAdj := -5; offsetAdj <= +5; offsetAdj++ {
+ report := func(desc string, err error) {
+ t.Errorf("matchBefore=%t, extra=%d, offsetAdj=%d: %s%v",
+ matchBefore, extra, offsetAdj, desc, err)
+ }
+
+ offset := maxMatchOffset + offsetAdj
+
+ // Make src to be a []byte of the form
+ // "%s%s%s%s%s" % (abc, zeros0, xyzMaybe, abc, zeros1)
+ // where:
+ // zeros0 is approximately maxMatchOffset zeros.
+ // xyzMaybe is either xyz or the empty string.
+ // zeros1 is between 0 and 30 zeros.
+ // The difference between the two abc's will be offset, which
+ // is maxMatchOffset plus or minus a small adjustment.
+ src := make([]byte, offset+len(abc)+extra)
+ copy(src, abc)
+ if !matchBefore {
+ copy(src[offset-len(xyz):], xyz)
+ }
+ copy(src[offset:], abc)
+
+ buf := new(bytes.Buffer)
+ w, err := NewWriter(buf, BestSpeed)
+ if err != nil {
+ report("NewWriter: ", err)
+ continue
+ }
+ if _, err := w.Write(src); err != nil {
+ report("Write: ", err)
+ continue
+ }
+ if err := w.Close(); err != nil {
+ report("Writer.Close: ", err)
+ continue
+ }
+
+ r := NewReader(buf)
+ dst, err := ioutil.ReadAll(r)
+ r.Close()
+ if err != nil {
+ report("ReadAll: ", err)
+ continue
+ }
+
+ if !bytes.Equal(dst, src) {
+ report("", fmt.Errorf("bytes differ after round-tripping"))
+ continue
+ }
+ }
+ }
+ }
+}
+
+func TestMaxStackSize(t *testing.T) {
+ // This test must not run in parallel with other tests as debug.SetMaxStack
+ // affects all goroutines.
+ n := debug.SetMaxStack(1 << 16)
+ defer debug.SetMaxStack(n)
+
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ b := make([]byte, 1<<20)
+ for level := HuffmanOnly; level <= BestCompression; level++ {
+ // Run in separate goroutine to increase probability of stack regrowth.
+ wg.Add(1)
+ go func(level int) {
+ defer wg.Done()
+ zw, err := NewWriter(ioutil.Discard, level)
+ if err != nil {
+ t.Errorf("level %d, NewWriter() = %v, want nil", level, err)
+ }
+ if n, err := zw.Write(b); n != len(b) || err != nil {
+ t.Errorf("level %d, Write() = (%d, %v), want (%d, nil)", level, n, err, len(b))
+ }
+ if err := zw.Close(); err != nil {
+ t.Errorf("level %d, Close() = %v, want nil", level, err)
+ }
+ zw.Reset(ioutil.Discard)
+ }(level)
+ }
+}