Skip to content

Commit

Permalink
cmd/sszgen, tests: generate sizes for offsets too, ugh
Browse files Browse the repository at this point in the history
  • Loading branch information
karalabe committed Jul 19, 2024
1 parent 382957c commit 8fc30f4
Show file tree
Hide file tree
Showing 22 changed files with 235 additions and 233 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

Package `ssz` provides a zero-allocation, opinionated toolkit for working with Ethereum's [Simple Serialize (SSZ)](https://github.com/ethereum/consensus-specs/blob/dev/ssz/simple-serialize.md) format through Go. The focus is on code maintainability, only secondarily striving towards raw performance.

***Please note, this repository is a work in progress. The API is unstable and breaking changes will regularly be made. Hashing is not yet implemented. Do not depend on this in publicly available modules.***
***Please note, this repository is a work in progress. The API is unstable and breaking changes will regularly be made. Do not depend on this in publicly available modules.***

*This package heavily inspired from the code generated by- and contained within [`fastssz`](https://github.com/ferranbt/fastssz)!*

Expand Down
14 changes: 7 additions & 7 deletions cmd/sszgen/opset.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ func (p *parseContext) resolveBitlistOpset(tags *sizeTag) (opset, error) {
}
return &opsetDynamic{
"SizeSliceOfBits({{.Field}})",
"DefineSliceOfBitsOffset({{.Codec}}, &{{.Field}})",
fmt.Sprintf("DefineSliceOfBitsOffset({{.Codec}}, &{{.Field}}, %d)", tags.limit[0]), // inject bit-cap directly
fmt.Sprintf("DefineSliceOfBitsContent({{.Codec}}, &{{.Field}}, %d)", tags.limit[0]), // inject bit-cap directly
"EncodeSliceOfBitsOffset({{.Codec}}, &{{.Field}})",
fmt.Sprintf("EncodeSliceOfBitsContent({{.Codec}}, &{{.Field}}, %d)", tags.limit[0]), // inject bit-cap directly
Expand Down Expand Up @@ -233,7 +233,7 @@ func (p *parseContext) resolveSliceOpset(typ types.Type, tags *sizeTag) (opset,
}
return &opsetDynamic{
"SizeDynamicBytes({{.Field}})",
"DefineDynamicBytesOffset({{.Codec}}, &{{.Field}})",
"DefineDynamicBytesOffset({{.Codec}}, &{{.Field}}, {{.MaxSize}})",
"DefineDynamicBytesContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})",
"EncodeDynamicBytesOffset({{.Codec}}, &{{.Field}})",
"EncodeDynamicBytesContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})",
Expand Down Expand Up @@ -268,7 +268,7 @@ func (p *parseContext) resolveSliceOpset(typ types.Type, tags *sizeTag) (opset,
}
return &opsetDynamic{
"SizeSliceOfUint64s({{.Field}})",
"DefineSliceOfUint64sOffset({{.Codec}}, &{{.Field}})",
"DefineSliceOfUint64sOffset({{.Codec}}, &{{.Field}}, {{.MaxSize}})",
"DefineSliceOfUint64sContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})",
"EncodeSliceOfUint64sOffset({{.Codec}}, &{{.Field}})",
"EncodeSliceOfUint64sContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})",
Expand All @@ -290,7 +290,7 @@ func (p *parseContext) resolveSliceOpset(typ types.Type, tags *sizeTag) (opset,
}
return &opsetDynamic{
"SizeSliceOfStaticObjects({{.Field}})",
"DefineSliceOfStaticObjectsOffset({{.Codec}}, &{{.Field}})",
"DefineSliceOfStaticObjectsOffset({{.Codec}}, &{{.Field}}, {{.MaxSize}})",
"DefineSliceOfStaticObjectsContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})",
"EncodeSliceOfStaticObjectsOffset({{.Codec}}, &{{.Field}})",
"EncodeSliceOfStaticObjectsContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})",
Expand All @@ -308,7 +308,7 @@ func (p *parseContext) resolveSliceOpset(typ types.Type, tags *sizeTag) (opset,
}
return &opsetDynamic{
"SizeSliceOfDynamicObjects({{.Field}})",
"DefineSliceOfDynamicObjectsOffset({{.Codec}}, &{{.Field}})",
"DefineSliceOfDynamicObjectsOffset({{.Codec}}, &{{.Field}}, {{.MaxSize}})",
"DefineSliceOfDynamicObjectsContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})",
"EncodeSliceOfDynamicObjectsOffset({{.Codec}}, &{{.Field}})",
"EncodeSliceOfDynamicObjectsContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})",
Expand Down Expand Up @@ -364,7 +364,7 @@ func (p *parseContext) resolveSliceOfArrayOpset(typ types.Type, innerSize int, t
}
return &opsetDynamic{
"SizeSliceOfStaticBytes({{.Field}})",
"DefineSliceOfStaticBytesOffset({{.Codec}}, &{{.Field}})",
"DefineSliceOfStaticBytesOffset({{.Codec}}, &{{.Field}}, {{.MaxSize}})",
"DefineSliceOfStaticBytesContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})",
"EncodeSliceOfStaticBytesOffset({{.Codec}}, &{{.Field}})",
"EncodeSliceOfStaticBytesContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})",
Expand Down Expand Up @@ -399,7 +399,7 @@ func (p *parseContext) resolveSliceOfSliceOpset(typ types.Type, tags *sizeTag) (
}
return &opsetDynamic{
"SizeSliceOfDynamicBytes({{.Field}})",
"DefineSliceOfDynamicBytesOffset({{.Codec}}, &{{.Field}})",
"DefineSliceOfDynamicBytesOffset({{.Codec}}, &{{.Field}}, {{.MaxItems}}, {{.MaxSize}})",
"DefineSliceOfDynamicBytesContent({{.Codec}}, &{{.Field}}, {{.MaxItems}}, {{.MaxSize}})",
"EncodeSliceOfDynamicBytesOffset({{.Codec}}, &{{.Field}})",
"EncodeSliceOfDynamicBytesContent({{.Codec}}, &{{.Field}}, {{.MaxItems}}, {{.MaxSize}})",
Expand Down
2 changes: 1 addition & 1 deletion example_static_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ func ExampleEncodeStaticObject() {
if err := ssz.EncodeToStream(out, new(Withdrawal)); err != nil {
panic(err)
}
hash := ssz.MerkleizeSequential(new(Withdrawal))
hash := ssz.HashSequential(new(Withdrawal))

fmt.Printf("ssz: %#x\nhash: %#x\n", out, hash)
// Output:
Expand Down
16 changes: 9 additions & 7 deletions ssz.go
Original file line number Diff line number Diff line change
Expand Up @@ -198,9 +198,10 @@ func DecodeFromBytes(blob []byte, obj Object) error {
return err
}

// MerkleizeSequential computes the ssz merkle root of the object on a single
// thread. This is useful for processing small objects reliably.
func MerkleizeSequential(obj Object) [32]byte {
// HashSequential computes the ssz merkle root of the object on a single thread.
// This is useful for processing small objects with stable runtime and O(1) GC
// guarantees.
func HashSequential(obj Object) [32]byte {
codec := hasherPool.Get().(*Codec)
defer hasherPool.Put(codec)
defer codec.has.Reset()
Expand All @@ -215,10 +216,11 @@ func MerkleizeSequential(obj Object) [32]byte {
return codec.has.chunks[0]
}

// MerkleizeConcurrent computes the ssz merkle root of the object on concurrent
// threads. This is useful for processing large objects, but may place a bigger
// load on your CPU and GC.
func MerkleizeConcurrent(obj Object) [32]byte {
// HashConcurrent computes the ssz merkle root of the object on potentially multiple
// concurrent threads (iff some data segments are large enough to be worth it). This
// is useful for processing large objects, but will place a bigger load on your CPU
// and GC; and might be more variable timing wise depending on other load.
func HashConcurrent(obj Object) [32]byte {
codec := hasherPool.Get().(*Codec)
defer hasherPool.Put(codec)
defer codec.has.Reset()
Expand Down
24 changes: 12 additions & 12 deletions tests/consensus_specs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -208,11 +208,11 @@ func testConsensusSpecType[T newableObject[U], U any](t *testing.T, kind string,
if size := ssz.Size(obj); size != uint32(len(inSSZ)) {
t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ))
}
hash := ssz.MerkleizeSequential(obj)
hash := ssz.HashSequential(obj)
if fmt.Sprintf("%#x", hash) != inRoot.Root {
t.Fatalf("sequential merkle root mismatch: have %#x, want %s", hash, inRoot.Root)
}
hash = ssz.MerkleizeConcurrent(obj)
hash = ssz.HashConcurrent(obj)
if fmt.Sprintf("%#x", hash) != inRoot.Root {
t.Fatalf("concurrent merkle root mismatch: have %#x, want %s", hash, inRoot.Root)
}
Expand Down Expand Up @@ -336,7 +336,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k
b.ResetTimer()

for i := 0; i < b.N; i++ {
ssz.MerkleizeSequential(obj)
ssz.HashSequential(obj)
}
})
b.Run(fmt.Sprintf("%s/merkleize-concurrent", kind), func(b *testing.B) {
Expand All @@ -349,7 +349,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k
b.ResetTimer()

for i := 0; i < b.N; i++ {
ssz.MerkleizeConcurrent(obj)
ssz.HashConcurrent(obj)
}
})
}
Expand Down Expand Up @@ -546,8 +546,8 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string)
t.Fatalf("failed to decode buffer: %v", err)
}
// Sanity check that hashing and size retrieval works
hash1 := ssz.MerkleizeSequential(obj)
hash2 := ssz.MerkleizeConcurrent(obj)
hash1 := ssz.HashSequential(obj)
hash2 := ssz.HashConcurrent(obj)
if hash1 != hash2 {
t.Fatalf("sequential/concurrent hash mismatch: sequencial %x, concurrent %x", hash1, hash2)
}
Expand All @@ -574,8 +574,8 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string)
t.Fatalf("failed to decode stream: %v", err)
}
// Sanity check that hashing and size retrieval works
hash1 := ssz.MerkleizeSequential(obj)
hash2 := ssz.MerkleizeConcurrent(obj)
hash1 := ssz.HashSequential(obj)
hash2 := ssz.HashConcurrent(obj)
if hash1 != hash2 {
t.Fatalf("sequential/concurrent hash mismatch: sequencial %x, concurrent %x", hash1, hash2)
}
Expand Down Expand Up @@ -605,8 +605,8 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string)
t.Fatalf("re-encoded stream from used object mismatch: have %x, want %x, common prefix %d, have left %x, want left %x",
blob, inSSZ, len(prefix), blob.Bytes()[len(prefix):], inSSZ[len(prefix):])
}
hash1 := ssz.MerkleizeSequential(obj)
hash2 := ssz.MerkleizeConcurrent(obj)
hash1 := ssz.HashSequential(obj)
hash2 := ssz.HashConcurrent(obj)
if hash1 != hash2 {
t.Fatalf("sequential/concurrent hash mismatch: sequencial %x, concurrent %x", hash1, hash2)
}
Expand All @@ -630,8 +630,8 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string)
t.Fatalf("re-encoded buffer from used object mismatch: have %x, want %x, common prefix %d, have left %x, want left %x",
blob, inSSZ, len(prefix), bin[len(prefix):], inSSZ[len(prefix):])
}
hash1 = ssz.MerkleizeSequential(obj)
hash2 = ssz.MerkleizeConcurrent(obj)
hash1 = ssz.HashSequential(obj)
hash2 = ssz.HashConcurrent(obj)
if hash1 != hash2 {
t.Fatalf("sequential/concurrent hash mismatch: sequencial %x, concurrent %x", hash1, hash2)
}
Expand Down
6 changes: 3 additions & 3 deletions tests/manual_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ func BenchmarkMainnetState(b *testing.B) {
if err := ssz.DecodeFromBytes(blob, obj); err != nil {
panic(err)
}
hash := ssz.MerkleizeSequential(obj)
hash := ssz.HashSequential(obj)

b.Run(fmt.Sprintf("beacon-state/%d-bytes/encode", len(blob)), func(b *testing.B) {
b.SetBytes(int64(len(blob)))
Expand Down Expand Up @@ -49,7 +49,7 @@ func BenchmarkMainnetState(b *testing.B) {
b.ResetTimer()

for i := 0; i < b.N; i++ {
if ssz.MerkleizeSequential(obj) != hash {
if ssz.HashSequential(obj) != hash {
panic("hash mismatch")
}
}
Expand All @@ -60,7 +60,7 @@ func BenchmarkMainnetState(b *testing.B) {
b.ResetTimer()

for i := 0; i < b.N; i++ {
if ssz.MerkleizeConcurrent(obj) != hash {
if ssz.HashConcurrent(obj) != hash {
panic("hash mismatch")
}
}
Expand Down
6 changes: 3 additions & 3 deletions tests/testtypes/consensus-spec-tests/gen_attestation_ssz.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit 8fc30f4

Please sign in to comment.