concurrent-map
concurrent-map copied to clipboard
performance: Why do concurrency for Keys?
// Keys returns all keys as []string
func (m ConcurrentMap[V]) Keys() []string {
count := m.Count()
ch := make(chan string, count)
go func() {
// Foreach shard.
wg := sync.WaitGroup{}
wg.Add(SHARD_COUNT)
for _, shard := range m {
go func(shard *ConcurrentMapShared[V]) {
// Foreach key, value pair.
shard.RLock()
for key := range shard.items {
ch <- key
}
shard.RUnlock()
wg.Done()
}(shard)
}
wg.Wait()
close(ch)
}()
// Generate keys
keys := make([]string, 0, count)
for k := range ch {
keys = append(keys, k)
}
return keys
}
func (m ConcurrentMap[V]) Keys2() []string {
keys := make([]string, 0, m.Count())
wg := sync.WaitGroup{}
wg.Add(SHARD_COUNT)
for _, shard := range m {
go func(shard *ConcurrentMapShared[V]) {
shard.RLock()
for key := range shard.items {
keys = append(keys, key)
}
shard.RUnlock()
wg.Done()
}(shard)
}
wg.Wait()
return keys
}
func (m ConcurrentMap[V]) Keys3() []string {
keys := make([]string, 0, m.Count())
for _, shard := range m {
shard.RLock()
for key := range shard.items {
keys = append(keys, key)
}
shard.RUnlock()
}
return keys
}
func BenchmarkKeys(b *testing.B) {
m := New[Animal]()
// Insert 10000 elements.
for i := 0; i < 10000; i++ {
m.Set(strconv.Itoa(i), Animal{strconv.Itoa(i)})
}
b.ReportAllocs()
b.ResetTimer()
b.Run("Keys", func(b *testing.B) {
for i := 0; i < b.N; i++ {
m.Keys()
}
})
b.Run("Keys2", func(b *testing.B) {
for i := 0; i < b.N; i++ {
m.Keys2()
}
})
b.Run("Keys3", func(b *testing.B) {
for i := 0; i < b.N; i++ {
m.Keys3()
}
})
}
BenchmarkKeys
BenchmarkKeys/Keys
BenchmarkKeys/Keys-8 1406 859256 ns/op 329637 B/op 69 allocs/op
BenchmarkKeys/Keys2
BenchmarkKeys/Keys2-8 12532 96416 ns/op 165675 B/op 67 allocs/op
BenchmarkKeys/Keys3
BenchmarkKeys/Keys3-8 10000 114136 ns/op 163840 B/op 1 allocs/op
I really don't know what the advantages of the current Keys method, why not just use the sync solution? It's not the fastest, but it's gc friendly.