Skip to content

Commit

Permalink
pkg/detach: fix broken Copy() detach sequence
Browse files Browse the repository at this point in the history
The code only could handle the detach sequence when it read one byte at
a time. This obviously is not correct and lead to some issues for my
automated test in my podman PR[1] where I added some automated tests
for detaching and the read part is really undefined and depends on the
input side/kernel scheduling on how much we read at once.

This is large rework to make the code check for the key sequence across
the entire buffer. That is of course more work but it needs to happen
for this to work correctly.

I guess the only reason why this was never noticed is because normally
user detach manually by typing and not in an automated way which is much
slower and thus likely reads the bytes one by one.

I added new test to actually confirm the behavior. And to ensure this
works with various read sizes I made it a fuzz test. I had this running
for a while and did not spot any issues there. The old code fails
already on the simple test cases.

[1] containers/podman#25083

Signed-off-by: Paul Holzinger <[email protected]>
  • Loading branch information
Luap99 committed Jan 23, 2025
1 parent 34a90af commit ab34bb7
Show file tree
Hide file tree
Showing 2 changed files with 183 additions and 31 deletions.
89 changes: 58 additions & 31 deletions pkg/detach/copy.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package detach

import (
"bytes"
"errors"
"io"
)
Expand All @@ -11,47 +12,73 @@ var ErrDetach = errors.New("detached from container")

// Copy is similar to io.Copy but support a detach key sequence to break out.
func Copy(dst io.Writer, src io.Reader, keys []byte) (written int64, err error) {
// if no key sequence we can use the fast std lib implementation
if len(keys) == 0 {
return io.Copy(dst, src)
}
buf := make([]byte, 32*1024)
tmpKeyBuf := make([]byte, 0, len(keys))
outer:
for {
nr, er := src.Read(buf)
if nr > 0 {
preservBuf := []byte{}
for i, key := range keys {
preservBuf = append(preservBuf, buf[0:nr]...)
if nr != 1 || buf[0] != key {
break
}
if i == len(keys)-1 {
return 0, ErrDetach

// previous key buffer
if len(tmpKeyBuf) > 0 {
bytesToCheck := min(nr, len(keys)-len(tmpKeyBuf))
if bytes.Equal(buf[:bytesToCheck], keys[len(tmpKeyBuf):]) {
if len(tmpKeyBuf)+bytesToCheck == len(keys) {
// we are done
return written, ErrDetach
}
nr, er = src.Read(buf)
}
var nw int
var ew error
if len(preservBuf) > 0 {
nw, ew = dst.Write(preservBuf)
nr = len(preservBuf)
} else {
nw, ew = dst.Write(buf[0:nr])
}
if nw > 0 {
written += int64(nw)
tmpKeyBuf = append(tmpKeyBuf, buf[:bytesToCheck]...)
continue outer
}
// No match, write buffered keys now
nw, ew := dst.Write(tmpKeyBuf)
if ew != nil {
err = ew
break
}
if nr != nw {
err = io.ErrShortWrite
break
return written, ew
}
written += int64(nw)
tmpKeyBuf = tmpKeyBuf[:0]
}

if er != nil {
if er != io.EOF {
err = er
if er == io.EOF {
return written, nil
}
break
return written, err
}

readMinusKeys := nr - len(keys)
for i := 0; i < readMinusKeys; i++ {
if bytes.Equal(buf[i:i+len(keys)], keys) {
if i > 0 {
nw, ew := dst.Write(buf[:i])
if ew != nil {
return written, ew
}
written += int64(nw)
}
return written, ErrDetach
}
}

for i := max(readMinusKeys, 0); i < nr; i++ {
if bytes.Equal(buf[i:nr], keys[:nr-i]) {
nw, ew := dst.Write(buf[:i])
if ew != nil {
return written, ew
}
written += int64(nw)
tmpKeyBuf = append(tmpKeyBuf, buf[i:nr]...)
continue outer
}
}

nw, ew := dst.Write(buf[:nr])
if ew != nil {
return written, ew
}
written += int64(nw)
}
return written, err
}
125 changes: 125 additions & 0 deletions pkg/detach/copy_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
package detach

import (
"bytes"
"strings"
"testing"

"github.com/stretchr/testify/assert"
)

var (
smallBytes = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
bigBytes = []byte(strings.Repeat("0F", 32*1024+30))
)

func newCustomReader(buf *bytes.Buffer, readsize uint) *customReader {
return &customReader{
inner: buf,
readsize: readsize,
}
}

type customReader struct {
inner *bytes.Buffer
readsize uint
}

func (c *customReader) Read(p []byte) (n int, err error) {
return c.inner.Read(p[:min(int(c.readsize), len(p))])
}

func FuzzCopy(f *testing.F) {
for _, i := range []uint{1, 2, 3, 5, 10, 100, 200, 1000, 1024, 32 * 1024} {
f.Add(i)
}

f.Fuzz(func(t *testing.T, readSize uint) {
// 0 is not a valid read size
if readSize == 0 {
t.Skip()
}

tests := []struct {
name string
from []byte
expected []byte
expectDetach bool
keys []byte
}{
{
name: "small copy",
from: smallBytes,
expected: smallBytes,
keys: nil,
},
{
name: "small copy with detach keys",
from: smallBytes,
expected: smallBytes,
keys: []byte{'A', 'B'},
},
{
name: "big copy",
from: bigBytes,
expected: bigBytes,
keys: nil,
},
{
name: "big copy with detach keys",
from: bigBytes,
expected: bigBytes,
keys: []byte{'A', 'B'},
},
{
name: "simple detach 1 key",
from: append(smallBytes, 'A'),
expected: smallBytes,
expectDetach: true,
keys: []byte{'A'},
},
{
name: "simple detach 2 keys",
from: append(smallBytes, 'A', 'B'),
expected: smallBytes,
expectDetach: true,
keys: []byte{'A', 'B'},
},
{
name: "detach early",
from: append(smallBytes, 'A', 'B', 'B', 'A'),
expected: smallBytes,
expectDetach: true,
keys: []byte{'A', 'B'},
},
{
name: "detach with partial match",
from: append(smallBytes, 'A', 'A', 'A', 'B'),
expected: append(smallBytes, 'A', 'A'),
expectDetach: true,
keys: []byte{'A', 'B'},
},
{
name: "big buffer detach with partial match",
from: append(bigBytes, 'A', 'A', 'A', 'B'),
expected: append(bigBytes, 'A', 'A'),
expectDetach: true,
keys: []byte{'A', 'B'},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
dst := &bytes.Buffer{}
src := newCustomReader(bytes.NewBuffer(tt.from), readSize)
written, err := Copy(dst, src, tt.keys)
if tt.expectDetach {
assert.ErrorIs(t, err, ErrDetach)
} else {
assert.NoError(t, err)
}
assert.Equal(t, dst.Len(), int(written), "bytes written matches buffer")
assert.Equal(t, tt.expected, dst.Bytes(), "buffer matches")
})
}
})
}

0 comments on commit ab34bb7

Please sign in to comment.