skipmap
skipmap copied to clipboard
Performance review
I ran some benchmarks and got the following results. SkipMap appears to have the least performance. Am I not setting up the tests properly? Code is below the specs.
goos: windows
goarch: amd64
pkg: source/core
cpu: Intel(R) Core(TM) i7-9700 CPU @ 3.00GHz
BenchmarkHaxMapReadsOnly
BenchmarkHaxMapReadsOnly-8 170182 6550 ns/op
0 B/op 0 allocs/op
BenchmarkHaxMapReadsWithWrites
BenchmarkHaxMapReadsWithWrites-8 172702 6914 ns/op
751 B/op 93 allocs/op
BenchmarkSkipMapReadsOnly
BenchmarkSkipMapReadsOnly-8 33459 32374 ns/op
0 B/op 0 allocs/op
BenchmarkSkipMapReadsWithWrites
BenchmarkSkipMapReadsWithWrites-8 21088 58494 ns/op
152 B/op 19 allocs/op
BenchmarkGoSyncMapReadsOnly
BenchmarkGoSyncMapReadsOnly-8 70982 17184 ns/op
0 B/op 0 allocs/op
BenchmarkGoSyncMapReadsWithWrites
BenchmarkGoSyncMapReadsWithWrites-8 58191 22223 ns/op
4809 B/op 446 allocs/op
BenchmarkCornelkMapReadsOnly
BenchmarkCornelkMapReadsOnly-8 192178 6747 ns/op
0 B/op 0 allocs/op
BenchmarkCornelkMapReadsWithWrites
BenchmarkCornelkMapReadsWithWrites-8 154560 8351 ns/op
1004 B/op 125 allocs/op
PASS
package main
import (
"github.com/alphadose/haxmap"
"github.com/cornelk/hashmap"
"github.com/zhangyunhao116/skipmap"
"sync"
"sync/atomic"
"testing"
)
const (
epochs uintptr = 1 << 12
mapSize = 256
)
func setupHaxMap() *haxmap.HashMap[uintptr, uintptr] {
m := haxmap.New[uintptr, uintptr](mapSize)
for i := uintptr(0); i < epochs; i++ {
m.Set(i, i)
}
return m
}
func setupGoSyncMap() *sync.Map {
m := &sync.Map{}
for i := uintptr(0); i < epochs; i++ {
m.Store(i, i)
}
return m
}
func setupCornelkMap(b *testing.B) *hashmap.Map[uintptr, uintptr] {
m := hashmap.NewSized[uintptr, uintptr](mapSize)
for i := uintptr(0); i < epochs; i++ {
m.Set(i, i)
}
return m
}
type anyskipmap[T any] interface {
Store(key T, value any)
Load(key T) (any, bool)
Delete(key T) bool
LoadAndDelete(key T) (any, bool)
LoadOrStore(key T, value any) (any, bool)
LoadOrStoreLazy(key T, f func() any) (any, bool)
Range(f func(key T, value any) bool)
Len() int
}
func setupSkipMap() *skipmap.OrderedMap[uintptr, uintptr] {
m := skipmap.New[uintptr, uintptr]()
//hashmap.NewSized[uintptr, uintptr](mapSize)
for i := uintptr(0); i < epochs; i++ {
m.Store(i, i)
}
return m
}
func BenchmarkHaxMapReadsOnly(b *testing.B) {
m := setupHaxMap()
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
for i := uintptr(0); i < epochs; i++ {
j, _ := m.Get(i)
if j != i {
b.Fail()
}
}
}
})
}
func BenchmarkHaxMapReadsWithWrites(b *testing.B) {
m := setupHaxMap()
var writer uintptr
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
// use 1 thread as writer
if atomic.CompareAndSwapUintptr(&writer, 0, 1) {
for pb.Next() {
for i := uintptr(0); i < epochs; i++ {
m.Set(i, i)
}
}
} else {
for pb.Next() {
for i := uintptr(0); i < epochs; i++ {
j, _ := m.Get(i)
if j != i {
b.Fail()
}
}
}
}
})
}
func BenchmarkSkipMapReadsOnly(b *testing.B) {
m := setupSkipMap()
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
for i := uintptr(0); i < epochs; i++ {
j, _ := m.Load(i)
if j != i {
b.Fail()
}
}
}
})
}
func BenchmarkSkipMapReadsWithWrites(b *testing.B) {
m := setupSkipMap()
var writer uintptr
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
// use 1 thread as writer
if atomic.CompareAndSwapUintptr(&writer, 0, 1) {
for pb.Next() {
for i := uintptr(0); i < epochs; i++ {
m.Store(i, i)
}
}
} else {
for pb.Next() {
for i := uintptr(0); i < epochs; i++ {
j, _ := m.Load(i)
if j != i {
b.Fail()
}
}
}
}
})
}
func BenchmarkGoSyncMapReadsOnly(b *testing.B) {
m := setupGoSyncMap()
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
for i := uintptr(0); i < epochs; i++ {
j, _ := m.Load(i)
if j != i {
b.Fail()
}
}
}
})
}
func BenchmarkGoSyncMapReadsWithWrites(b *testing.B) {
m := setupGoSyncMap()
var writer uintptr
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
// use 1 thread as writer
if atomic.CompareAndSwapUintptr(&writer, 0, 1) {
for pb.Next() {
for i := uintptr(0); i < epochs; i++ {
m.Store(i, i)
}
}
} else {
for pb.Next() {
for i := uintptr(0); i < epochs; i++ {
j, _ := m.Load(i)
if j != i {
b.Fail()
}
}
}
}
})
}
func BenchmarkCornelkMapReadsOnly(b *testing.B) {
m := setupCornelkMap(b)
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
for i := uintptr(0); i < epochs; i++ {
j, _ := m.Get(i)
if j != i {
b.Fail()
}
}
}
})
}
func BenchmarkCornelkMapReadsWithWrites(b *testing.B) {
m := setupCornelkMap(b)
var writer uintptr
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
// use 1 thread as writer
if atomic.CompareAndSwapUintptr(&writer, 0, 1) {
for pb.Next() {
for i := uintptr(0); i < epochs; i++ {
m.Set(i, i)
}
}
} else {
for pb.Next() {
for i := uintptr(0); i < epochs; i++ {
j, _ := m.Get(i)
if j != i {
b.Fail()
}
}
}
}
})
}
I feel like if we can use the typed API, the performance of the skipmap will be better. skipmap wrapped by an interface{} like type anyskipmap[T any] interface will also reduce its performance since there is still some consumption of the interface calls.
(From the readme.md) Note that generic APIs are always slower than typed APIs, but are more suitable for some scenarios such as functional programming.
e.g. New[string,int] is ~2x slower than NewString[int], and NewFunc[string,int](func(a, b string) bool { return a < b }) is 1~2x slower than NewString[int]. Performance ranking: NewString[int] > New[string,int] > NewFunc[string,int](func(a, b string) bool { return a < b })
Can you propose a change in the test submitted that will optimally test skipmap?
We can use the benchmark in the previous version for the typed API(https://github.com/zhangyunhao116/skipmap/blob/c799de92afe46bec79c666d531ab1037664af058/skipmap_bench_test.go).
The current one is for the generic version. In order to avoid too many test codes in the repo, these codes are removed for now :)
skipmap is a sorted map, are the other two also sorted map?