From 5215e0272cd7ca6416fab93959cea26d23127708 Mon Sep 17 00:00:00 2001 From: Fufu Date: Mon, 24 Jun 2024 10:52:32 +0800 Subject: [PATCH] sync: xsync v3.2.0 --- xsync/README.md | 2 +- xsync/export_mapof_test.go | 6 ++-- xsync/map.go | 65 +++++++++++++++++++++++++++++++------- xsync/map_test.go | 39 +++++++++++++++++++++-- xsync/mapof.go | 33 +++++++++++++------ xsync/mapof_test.go | 43 +++++++++++++++++++++++-- xsync/rbmutex_test.go | 1 + 7 files changed, 158 insertions(+), 31 deletions(-) diff --git a/xsync/README.md b/xsync/README.md index 6911fe1..7af99bc 100644 --- a/xsync/README.md +++ b/xsync/README.md @@ -1,6 +1,6 @@ # 标准库 `sync` 扩展包 -*forked from puzpuzpuz/xsync v20240226 v3.1.0* +*forked from puzpuzpuz/xsync v20240622 v3.2.0* ## 改动: diff --git a/xsync/export_mapof_test.go b/xsync/export_mapof_test.go index 073175c..0ede5bf 100644 --- a/xsync/export_mapof_test.go +++ b/xsync/export_mapof_test.go @@ -15,9 +15,9 @@ func CollectMapOfStats[K comparable, V any](m *MapOf[K, V]) MapStats { return MapStats{m.stats()} } -func NewMapOfPresizedWithHasher[K comparable, V any]( +func NewMapOfWithHasher[K comparable, V any]( hasher func(K, uint64) uint64, - sizeHint int, + options ...func(*MapConfig), ) *MapOf[K, V] { - return newMapOfPresized[K, V](hasher, sizeHint) + return newMapOf[K, V](hasher, options...) } diff --git a/xsync/map.go b/xsync/map.go index e86b180..92d73ac 100644 --- a/xsync/map.go +++ b/xsync/map.go @@ -75,6 +75,7 @@ type Map struct { resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications) table unsafe.Pointer // *mapTable minTableLen int + growOnly bool } type mapTable struct { @@ -118,31 +119,70 @@ type rangeEntry struct { value unsafe.Pointer } -// NewMap creates a new Map instance. -func NewMap() *Map { - return NewMapPresized(defaultMinMapTableLen * entriesPerMapBucket) +// MapConfig defines configurable Map/MapOf options. +type MapConfig struct { + sizeHint int + growOnly bool } -// NewMapPresized creates a new Map instance with capacity enough to hold -// sizeHint entries. The capacity is treated as the minimal capacity -// meaning that the underlying hash table will never shrink to -// a smaller capacity. If sizeHint is zero or negative, the value +// WithPresize configures new Map/MapOf instance with capacity enough +// to hold sizeHint entries. The capacity is treated as the minimal +// capacity meaning that the underlying hash table will never shrink +// to a smaller capacity. If sizeHint is zero or negative, the value // is ignored. -func NewMapPresized(sizeHint int) *Map { +func WithPresize(sizeHint int) func(*MapConfig) { + return func(c *MapConfig) { + c.sizeHint = sizeHint + } +} + +// WithGrowOnly configures new Map/MapOf instance to be grow-only. +// This means that the underlying hash table grows in capacity when +// new keys are added, but does not shrink when keys are deleted. +// The only exception to this rule is the Clear method which +// shrinks the hash table back to the initial capacity. +func WithGrowOnly() func(*MapConfig) { + return func(c *MapConfig) { + c.growOnly = true + } +} + +// NewMap creates a new Map instance configured with the given +// options. +func NewMap(options ...func(*MapConfig)) *Map { + c := &MapConfig{ + sizeHint: defaultMinMapTableLen * entriesPerMapBucket, + } + for _, o := range options { + o(c) + } + m := &Map{} m.resizeCond = *sync.NewCond(&m.resizeMu) var table *mapTable - if sizeHint <= defaultMinMapTableLen*entriesPerMapBucket { + if c.sizeHint <= defaultMinMapTableLen*entriesPerMapBucket { table = newMapTable(defaultMinMapTableLen) } else { - tableLen := nextPowOf2(uint32(sizeHint / entriesPerMapBucket)) + tableLen := nextPowOf2(uint32(c.sizeHint / entriesPerMapBucket)) table = newMapTable(int(tableLen)) } m.minTableLen = len(table.buckets) + m.growOnly = c.growOnly atomic.StorePointer(&m.table, unsafe.Pointer(table)) return m } +// NewMapPresized creates a new Map instance with capacity enough to hold +// sizeHint entries. The capacity is treated as the minimal capacity +// meaning that the underlying hash table will never shrink to +// a smaller capacity. If sizeHint is zero or negative, the value +// is ignored. +// +// Deprecated: use NewMap in combination with WithPresize. +func NewMapPresized(sizeHint int) *Map { + return NewMap(WithPresize(sizeHint)) +} + func newMapTable(minTableLen int) *mapTable { buckets := make([]bucketPadded, minTableLen) counterLen := minTableLen >> 10 @@ -473,8 +513,9 @@ func (m *Map) resize(knownTable *mapTable, hint mapResizeHint) { knownTableLen := len(knownTable.buckets) // Fast path for shrink attempts. if hint == mapShrinkHint { - shrinkThreshold := int64((knownTableLen * entriesPerMapBucket) / mapShrinkFraction) - if knownTableLen == m.minTableLen || knownTable.sumSize() > shrinkThreshold { + if m.growOnly || + m.minTableLen == knownTableLen || + knownTable.sumSize() > int64((knownTableLen*entriesPerMapBucket)/mapShrinkFraction) { return } } diff --git a/xsync/map_test.go b/xsync/map_test.go index c609255..a8fdea9 100644 --- a/xsync/map_test.go +++ b/xsync/map_test.go @@ -580,14 +580,17 @@ func assertMapCapacity(t *testing.T, m *Map, expectedCap int) { func TestNewMapPresized(t *testing.T) { assertMapCapacity(t, NewMap(), DefaultMinMapTableCap) assertMapCapacity(t, NewMapPresized(1000), 1536) + assertMapCapacity(t, NewMap(WithPresize(1000)), 1536) assertMapCapacity(t, NewMapPresized(0), DefaultMinMapTableCap) + assertMapCapacity(t, NewMap(WithPresize(0)), DefaultMinMapTableCap) assertMapCapacity(t, NewMapPresized(-1), DefaultMinMapTableCap) + assertMapCapacity(t, NewMap(WithPresize(-1)), DefaultMinMapTableCap) } func TestNewMapPresized_DoesNotShrinkBelowMinTableLen(t *testing.T) { const minTableLen = 1024 const numEntries = minTableLen * EntriesPerMapBucket - m := NewMapPresized(numEntries) + m := NewMap(WithPresize(numEntries)) for i := 0; i < numEntries; i++ { m.Store(strconv.Itoa(i), i) } @@ -607,6 +610,38 @@ func TestNewMapPresized_DoesNotShrinkBelowMinTableLen(t *testing.T) { } } +func TestNewMapGrowOnly_OnlyShrinksOnClear(t *testing.T) { + const minTableLen = 128 + const numEntries = minTableLen * EntriesPerMapBucket + m := NewMap(WithPresize(numEntries), WithGrowOnly()) + + stats := CollectMapStats(m) + initialTableLen := stats.RootBuckets + + for i := 0; i < 2*numEntries; i++ { + m.Store(strconv.Itoa(i), i) + } + stats = CollectMapStats(m) + maxTableLen := stats.RootBuckets + if maxTableLen <= minTableLen { + t.Fatalf("table did not grow: %d", maxTableLen) + } + + for i := 0; i < numEntries; i++ { + m.Delete(strconv.Itoa(int(i))) + } + stats = CollectMapStats(m) + if stats.RootBuckets != maxTableLen { + t.Fatalf("table length was different from the expected: %d", stats.RootBuckets) + } + + m.Clear() + stats = CollectMapStats(m) + if stats.RootBuckets != initialTableLen { + t.Fatalf("table length was different from the initial: %d", stats.RootBuckets) + } +} + func TestMapResize(t *testing.T) { const numEntries = 100_000 m := NewMap() @@ -1222,7 +1257,7 @@ func BenchmarkMapStandard_NoWarmUp(b *testing.B) { func BenchmarkMap_WarmUp(b *testing.B) { for _, bc := range benchmarkCases { b.Run(bc.name, func(b *testing.B) { - m := NewMapPresized(benchmarkNumEntries) + m := NewMap(WithPresize(benchmarkNumEntries)) for i := 0; i < benchmarkNumEntries; i++ { m.Store(benchmarkKeyPrefix+strconv.Itoa(i), i) } diff --git a/xsync/mapof.go b/xsync/mapof.go index 57448e9..222370c 100644 --- a/xsync/mapof.go +++ b/xsync/mapof.go @@ -36,6 +36,7 @@ type MapOf[K comparable, V any] struct { table unsafe.Pointer // *mapOfTable hasher func(K, uint64) uint64 minTableLen int + growOnly bool } type mapOfTable[K comparable, V any] struct { @@ -68,9 +69,10 @@ type entryOf[K comparable, V any] struct { value V } -// NewMapOf creates a new MapOf instance. -func NewMapOf[K comparable, V any]() *MapOf[K, V] { - return NewMapOfPresized[K, V](defaultMinMapTableLen * entriesPerMapBucket) +// NewMapOf creates a new MapOf instance configured with the given +// options. +func NewMapOf[K comparable, V any](options ...func(*MapConfig)) *MapOf[K, V] { + return newMapOf[K, V](makeHasher[K](), options...) } // NewMapOfPresized creates a new MapOf instance with capacity enough @@ -78,25 +80,35 @@ func NewMapOf[K comparable, V any]() *MapOf[K, V] { // meaning that the underlying hash table will never shrink to // a smaller capacity. If sizeHint is zero or negative, the value // is ignored. +// +// Deprecated: use NewMapOf in combination with WithPresize. func NewMapOfPresized[K comparable, V any](sizeHint int) *MapOf[K, V] { - return newMapOfPresized[K, V](makeHasher[K](), sizeHint) + return NewMapOf[K, V](WithPresize(sizeHint)) } -func newMapOfPresized[K comparable, V any]( +func newMapOf[K comparable, V any]( hasher func(K, uint64) uint64, - sizeHint int, + options ...func(*MapConfig), ) *MapOf[K, V] { + c := &MapConfig{ + sizeHint: defaultMinMapTableLen * entriesPerMapBucket, + } + for _, o := range options { + o(c) + } + m := &MapOf[K, V]{} m.resizeCond = *sync.NewCond(&m.resizeMu) m.hasher = hasher var table *mapOfTable[K, V] - if sizeHint <= defaultMinMapTableLen*entriesPerMapBucket { + if c.sizeHint <= defaultMinMapTableLen*entriesPerMapBucket { table = newMapOfTable[K, V](defaultMinMapTableLen) } else { - tableLen := nextPowOf2(uint32(sizeHint / entriesPerMapBucket)) + tableLen := nextPowOf2(uint32(c.sizeHint / entriesPerMapBucket)) table = newMapOfTable[K, V](int(tableLen)) } m.minTableLen = len(table.buckets) + m.growOnly = c.growOnly atomic.StorePointer(&m.table, unsafe.Pointer(table)) return m } @@ -426,8 +438,9 @@ func (m *MapOf[K, V]) resize(knownTable *mapOfTable[K, V], hint mapResizeHint) { knownTableLen := len(knownTable.buckets) // Fast path for shrink attempts. if hint == mapShrinkHint { - shrinkThreshold := int64((knownTableLen * entriesPerMapBucket) / mapShrinkFraction) - if knownTableLen == m.minTableLen || knownTable.sumSize() > shrinkThreshold { + if m.growOnly || + m.minTableLen == knownTableLen || + knownTable.sumSize() > int64((knownTableLen*entriesPerMapBucket)/mapShrinkFraction) { return } } diff --git a/xsync/mapof_test.go b/xsync/mapof_test.go index a1ba45b..617807d 100644 --- a/xsync/mapof_test.go +++ b/xsync/mapof_test.go @@ -272,11 +272,11 @@ func TestMapOfStore_StructKeys_StructValues(t *testing.T) { func TestMapOfStore_HashCodeCollisions(t *testing.T) { const numEntries = 1000 - m := NewMapOfPresizedWithHasher[int, int](func(i int, _ uint64) uint64 { + m := NewMapOfWithHasher[int, int](func(i int, _ uint64) uint64 { // We intentionally use an awful hash function here to make sure // that the map copes with key collisions. return 42 - }, numEntries) + }, WithPresize(numEntries)) for i := 0; i < numEntries; i++ { m.Store(i, i) } @@ -618,16 +618,21 @@ func assertMapOfCapacity[K comparable, V any](t *testing.T, m *MapOf[K, V], expe func TestNewMapOfPresized(t *testing.T) { assertMapOfCapacity(t, NewMapOf[string, string](), DefaultMinMapTableCap) assertMapOfCapacity(t, NewMapOfPresized[string, string](0), DefaultMinMapTableCap) + assertMapOfCapacity(t, NewMapOf[string, string](WithPresize(0)), DefaultMinMapTableCap) assertMapOfCapacity(t, NewMapOfPresized[string, string](-100), DefaultMinMapTableCap) + assertMapOfCapacity(t, NewMapOf[string, string](WithPresize(-100)), DefaultMinMapTableCap) assertMapOfCapacity(t, NewMapOfPresized[string, string](500), 768) + assertMapOfCapacity(t, NewMapOf[string, string](WithPresize(500)), 768) assertMapOfCapacity(t, NewMapOfPresized[int, int](1_000_000), 1_572_864) + assertMapOfCapacity(t, NewMapOf[int, int](WithPresize(1_000_000)), 1_572_864) assertMapOfCapacity(t, NewMapOfPresized[point, point](100), 192) + assertMapOfCapacity(t, NewMapOf[point, point](WithPresize(100)), 192) } func TestNewMapOfPresized_DoesNotShrinkBelowMinTableLen(t *testing.T) { const minTableLen = 1024 const numEntries = minTableLen * EntriesPerMapBucket - m := NewMapOfPresized[int, int](numEntries) + m := NewMapOf[int, int](WithPresize(numEntries)) for i := 0; i < numEntries; i++ { m.Store(i, i) } @@ -647,6 +652,38 @@ func TestNewMapOfPresized_DoesNotShrinkBelowMinTableLen(t *testing.T) { } } +func TestNewMapOfGrowOnly_OnlyShrinksOnClear(t *testing.T) { + const minTableLen = 128 + const numEntries = minTableLen * EntriesPerMapBucket + m := NewMapOf[int, int](WithPresize(numEntries), WithGrowOnly()) + + stats := CollectMapOfStats(m) + initialTableLen := stats.RootBuckets + + for i := 0; i < 2*numEntries; i++ { + m.Store(i, i) + } + stats = CollectMapOfStats(m) + maxTableLen := stats.RootBuckets + if maxTableLen <= minTableLen { + t.Fatalf("table did not grow: %d", maxTableLen) + } + + for i := 0; i < numEntries; i++ { + m.Delete(i) + } + stats = CollectMapOfStats(m) + if stats.RootBuckets != maxTableLen { + t.Fatalf("table length was different from the expected: %d", stats.RootBuckets) + } + + m.Clear() + stats = CollectMapOfStats(m) + if stats.RootBuckets != initialTableLen { + t.Fatalf("table length was different from the initial: %d", stats.RootBuckets) + } +} + func TestMapOfResize(t *testing.T) { const numEntries = 100_000 m := NewMapOf[string, int]() diff --git a/xsync/rbmutex_test.go b/xsync/rbmutex_test.go index 914e83a..24009d8 100644 --- a/xsync/rbmutex_test.go +++ b/xsync/rbmutex_test.go @@ -20,6 +20,7 @@ func TestRBMutexSerialReader(t *testing.T) { var rtokens [numIters]*RToken for i := 0; i < numIters; i++ { rtokens[i] = mu.RLock() + } for i := 0; i < numIters; i++ { mu.RUnlock(rtokens[i])