Why sync.map
First of all, maps are not thread free in concurrent scenarios, so they need to be locked during map operation. Atomic operation is naturally more time saving than locking, so the authors of Go introduced sync.map
Code sample
package main
import (
"fmt"
"sync"
)
func main(a) {
var syncMap sync.Map
var wg sync.WaitGroup
wg.Add(2)
go func(a) {
wg.Done()
syncMap.Store("1".1)
}()
go func(a) {
wg.Done()
syncMap.Store("2".2)
}()
wg.Wait()
syncMap.Range(func(key, value interface{}) bool {
fmt.Println(key, value)
return true})}Copy the code
In the above example, both goroutines are enabled to operate on sync.map at the same time, with no concurrency exceptions and the correct output
Source analysis
Before looking at the core code, let’s take a look at the definitions of the related constructs
type Map struct {
mu Mutex // Dirty map operations require locks
read atomic.Value // Read only map, where logical deletion is done by changing the state of the pointer in read
// A dirty map is a pocket with more key/value pairs than a read map
dirty map[interface{}]*entry
misses int // When the data is read, the number of misses is first picked from the read map, and when the dirty map fails, the number of misses is increased by 1
}
// Read the corresponding stored data
type readOnly struct {
m map[interface{}]*entry
amended bool This field is true if the dirty map stores more data than the read map
}
// expunged indicates that the current key has been deleted from the dirty map
var expunged = unsafe.Pointer(new(interface{}))
The entry structure contains a pointer to the value in the map
type entry struct {
// p points to the interface{} value stored for the entry.
//
// If p == nil, the entry has been deleted and m.dirty == nil.
//
// If p == expunged, the entry has been deleted, m.dirty ! = nil, and the entry
// is missing from m.dirty.
//
// Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
/ /! = nil, in m.dirty[key].
//
// An entry can be deleted by atomic replacement with nil: when m.dirty is
// next created, it will atomically replace nil with expunged and leave
// m.dirty[key] unset.
//
// An entry's associated value can be updated by atomic replacement, provided
// p ! = expunged. If p == expunged, an entry's associated value can be updated
// only after first setting m.dirty[key] = e so that lookups using the dirty
// map find the entry.
p unsafe.Pointer // *interface{}
}
Copy the code
Let’s take a look at the three core operations in sync.map: Store/Load/Delete
Store (storage)
func (m *Map) Store(key, value interface{}) {
// First read from the read map. If a key exists and the value is not in expunged state, the store succeeds
read, _ := m.read.Load().(readOnly)
if e, ok := read.m[key]; ok && e.tryStore(&value) {
return
}
// Otherwise, the dirty map needs to be operated on. Since dirty is a normal map, it needs to be locked
m.mu.Lock()
// If the current key exists in read Map and is in expunged state, dirty map! = nil
// && The key-value pair does not exist in the dirty map, first change the state to nil, then put it in the Dirty map, then
// Perform the assignment
read, _ = m.read.Load().(readOnly)
if e, ok := read.m[key]; ok {
if e.unexpungeLocked() {
// The entry was previously expunged, which implies that there is a
// non-nil dirty map and this entry is not in it.
m.dirty[key] = e
}
e.storeLocked(&value)
// If the current key exists in the dirty map, it is stored directly
} else if e, ok := m.dirty[key]; ok {
e.storeLocked(&value)
} else {
// If the current key does not exist in the dirty map or in the read map, if the dirty map is nil, the read map will be used
// Give all non-nil and expunged key-value pairs in the dirty map, and then put the new key-value pairs in the Dirty Map
if! read.amended {// We're adding the first new key to the dirty map.
// Make sure it is allocated and mark the read-only map as incomplete.
m.dirtyLocked()
m.read.Store(readOnly{m: read.m, amended: true})
}
m.dirty[key] = newEntry(value)
}
m.mu.Unlock()
}
// Try to store entry in read Map, return false if entry status is expunged, otherwise CAS operation is performed
func (e *entry) tryStore(i *interface{}) bool {
for {
p := atomic.LoadPointer(&e.p)
if p == expunged {
return false
}
if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
return true}}}// Replace the expunged state with nil
func (e *entry) unexpungeLocked(a) (wasExpunged bool) {
return atomic.CompareAndSwapPointer(&e.p, expunged, nil)}// Store related values
func (e *entry) storeLocked(i *interface{}) {
atomic.StorePointer(&e.p, unsafe.Pointer(i))
}
// Put all non-nil and expunged key-value pairs in the read map into the dirty map
func (m *Map) dirtyLocked(a) {
ifm.dirty ! =nil {
return
}
read, _ := m.read.Load().(readOnly)
m.dirty = make(map[interface{}]*entry, len(read.m))
for k, e := range read.m {
if! e.tryExpungeLocked() { m.dirty[k] = e } } }func (e *entry) tryExpungeLocked(a) (isExpunged bool) {
p := atomic.LoadPointer(&e.p)
for p == nil {
if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
return true
}
p = atomic.LoadPointer(&e.p)
}
return p == expunged
}
Copy the code
Load (Load)
func (m *Map) Load(key interface{}) (value interface{}, ok bool) {
// Start by retrieving data from the read map
read, _ := m.read.Load().(readOnly)
e, ok := read.m[key]
// If the dirty map does not exist and is not empty
if! ok && read.amended { m.mu.Lock()// Avoid reporting a spurious miss if m.dirty got promoted while we were
// blocked on m.mu. (If further loads of the same key will not miss, it's
// not worth copying the dirty map for this key.)
// Double check to prevent the key from appearing in the read map during the lock process
read, _ = m.read.Load().(readOnly)
e, ok = read.m[key]
if! ok && read.amended {// If not, fetch data from the dirty map and count the misses
e, ok = m.dirty[key]
// Regardless of whether the entry was present, record a miss: this key
// will take the slow path until the dirty map is promoted to the read
// map.
m.missLocked()
}
m.mu.Unlock()
}
// If not found, return nil
if! ok {return nil.false
}
return e.load()
}
// Count the number of missed Read map hits. If the number of hits reaches the size of the dirty map, the dirty map values are unified to the Read Map
func (m *Map) missLocked(a) {
m.misses++
if m.misses < len(m.dirty) {
return
}
m.read.Store(readOnly{m: m.dirty})
m.dirty = nil
m.misses = 0
}
// Load a value
func (e *entry) load(a) (value interface{}, ok bool) {
p := atomic.LoadPointer(&e.p)
if p == nil || p == expunged {
return nil.false
}
return* (*interface{})(p), true
}
Copy the code
Delete (Delete)
func (m *Map) LoadAndDelete(key interface{}) (value interface{}, loaded bool) {
The delete operation is similar to the load operation. The key is first retrieved from the read map
read, _ := m.read.Load().(readOnly)
e, ok := read.m[key]
// If there is no dirty map and the dirty map is not empty, find the dirty map
if! ok && read.amended { m.mu.Lock()// Double check to prevent the key from being added during the lock process
read, _ = m.read.Load().(readOnly)
e, ok = read.m[key]
// If not, delete the dirty map and increase the count of miss
if! ok && read.amended { e, ok = m.dirty[key]delete(m.dirty, key)
// Regardless of whether the entry was present, record a miss: this key
// will take the slow path until the dirty map is promoted to the read
// map.
m.missLocked()
}
m.mu.Unlock()
}
// Set the state of this value to nil to indicate that it has been deleted
if ok {
return e.delete()}return nil.false
}
// Delete deletes the value for a key.
func (m *Map) Delete(key interface{}) {
m.LoadAndDelete(key)
}
/ / delete the entry
func (e *entry) delete(a) (value interface{}, ok bool) {
for {
p := atomic.LoadPointer(&e.p)
if p == nil || p == expunged {
return nil.false
}
if atomic.CompareAndSwapPointer(&e.p, p, nil) {
return* (*interface{})(p), true}}}Copy the code
The dirty map copies the key pairs in the Read Map, resulting in poor performance. Sync. map is suitable for reading too much and writing too little