2018-09-24 23:46:21 +02:00
|
|
|
// Copyright 2018 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
|
|
|
import (
|
|
|
|
"runtime/internal/sys"
|
|
|
|
"unsafe"
|
|
|
|
)
|
|
|
|
|
2019-08-31 05:01:15 +02:00
|
|
|
// For gccgo, use go:linkname to export compiler-called functions.
|
2019-06-06 02:44:01 +02:00
|
|
|
//
|
2019-08-31 05:01:15 +02:00
|
|
|
//go:linkname mapaccess1_fast64
|
|
|
|
//go:linkname mapaccess2_fast64
|
|
|
|
//go:linkname mapassign_fast64
|
|
|
|
//go:linkname mapassign_fast64ptr
|
|
|
|
//go:linkname mapdelete_fast64
|
2019-06-06 02:44:01 +02:00
|
|
|
|
2018-09-24 23:46:21 +02:00
|
|
|
func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
|
|
|
|
if raceenabled && h != nil {
|
|
|
|
callerpc := getcallerpc()
|
|
|
|
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
|
|
|
|
}
|
|
|
|
if h == nil || h.count == 0 {
|
|
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
|
|
}
|
|
|
|
if h.flags&hashWriting != 0 {
|
|
|
|
throw("concurrent map read and map write")
|
|
|
|
}
|
|
|
|
var b *bmap
|
|
|
|
if h.B == 0 {
|
|
|
|
// One-bucket table. No need to hash.
|
|
|
|
b = (*bmap)(h.buckets)
|
|
|
|
} else {
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 22:55:32 +01:00
|
|
|
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
2018-09-24 23:46:21 +02:00
|
|
|
m := bucketMask(h.B)
|
|
|
|
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
|
|
|
if c := h.oldbuckets; c != nil {
|
|
|
|
if !h.sameSizeGrow() {
|
|
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
|
|
m >>= 1
|
|
|
|
}
|
|
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
|
|
|
if !evacuated(oldb) {
|
|
|
|
b = oldb
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for ; b != nil; b = b.overflow(t) {
|
|
|
|
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
|
2019-01-18 20:04:36 +01:00
|
|
|
if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
|
2019-09-06 20:12:46 +02:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
|
2018-09-24 23:46:21 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
|
|
}
|
|
|
|
|
|
|
|
func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
|
|
|
|
if raceenabled && h != nil {
|
|
|
|
callerpc := getcallerpc()
|
|
|
|
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
|
|
|
|
}
|
|
|
|
if h == nil || h.count == 0 {
|
|
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
|
|
}
|
|
|
|
if h.flags&hashWriting != 0 {
|
|
|
|
throw("concurrent map read and map write")
|
|
|
|
}
|
|
|
|
var b *bmap
|
|
|
|
if h.B == 0 {
|
|
|
|
// One-bucket table. No need to hash.
|
|
|
|
b = (*bmap)(h.buckets)
|
|
|
|
} else {
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 22:55:32 +01:00
|
|
|
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
2018-09-24 23:46:21 +02:00
|
|
|
m := bucketMask(h.B)
|
|
|
|
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
|
|
|
if c := h.oldbuckets; c != nil {
|
|
|
|
if !h.sameSizeGrow() {
|
|
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
|
|
m >>= 1
|
|
|
|
}
|
|
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
|
|
|
if !evacuated(oldb) {
|
|
|
|
b = oldb
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for ; b != nil; b = b.overflow(t) {
|
|
|
|
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
|
2019-01-18 20:04:36 +01:00
|
|
|
if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
|
2019-09-06 20:12:46 +02:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)), true
|
2018-09-24 23:46:21 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
|
|
}
|
|
|
|
|
|
|
|
func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
|
|
|
|
if h == nil {
|
|
|
|
panic(plainError("assignment to entry in nil map"))
|
|
|
|
}
|
|
|
|
if raceenabled {
|
|
|
|
callerpc := getcallerpc()
|
|
|
|
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
|
|
|
|
}
|
|
|
|
if h.flags&hashWriting != 0 {
|
|
|
|
throw("concurrent map writes")
|
|
|
|
}
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 22:55:32 +01:00
|
|
|
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
2018-09-24 23:46:21 +02:00
|
|
|
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 22:55:32 +01:00
|
|
|
// Set hashWriting after calling t.hasher for consistency with mapassign.
|
2019-01-18 20:04:36 +01:00
|
|
|
h.flags ^= hashWriting
|
2018-09-24 23:46:21 +02:00
|
|
|
|
|
|
|
if h.buckets == nil {
|
|
|
|
h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
again:
|
|
|
|
bucket := hash & bucketMask(h.B)
|
|
|
|
if h.growing() {
|
|
|
|
growWork_fast64(t, h, bucket)
|
|
|
|
}
|
|
|
|
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
|
|
|
|
|
|
|
|
var insertb *bmap
|
|
|
|
var inserti uintptr
|
|
|
|
var insertk unsafe.Pointer
|
|
|
|
|
2019-01-18 20:04:36 +01:00
|
|
|
bucketloop:
|
2018-09-24 23:46:21 +02:00
|
|
|
for {
|
|
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
2019-01-18 20:04:36 +01:00
|
|
|
if isEmpty(b.tophash[i]) {
|
2018-09-24 23:46:21 +02:00
|
|
|
if insertb == nil {
|
|
|
|
insertb = b
|
|
|
|
inserti = i
|
|
|
|
}
|
2019-01-18 20:04:36 +01:00
|
|
|
if b.tophash[i] == emptyRest {
|
|
|
|
break bucketloop
|
|
|
|
}
|
2018-09-24 23:46:21 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
|
|
|
|
if k != key {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
insertb = b
|
|
|
|
inserti = i
|
|
|
|
goto done
|
|
|
|
}
|
|
|
|
ovf := b.overflow(t)
|
|
|
|
if ovf == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
b = ovf
|
|
|
|
}
|
|
|
|
|
|
|
|
// Did not find mapping for key. Allocate new cell & add entry.
|
|
|
|
|
|
|
|
// If we hit the max load factor or we have too many overflow buckets,
|
|
|
|
// and we're not already in the middle of growing, start growing.
|
|
|
|
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
|
|
|
|
hashGrow(t, h)
|
|
|
|
goto again // Growing the table invalidates everything, so try again
|
|
|
|
}
|
|
|
|
|
|
|
|
if insertb == nil {
|
|
|
|
// all current buckets are full, allocate a new one.
|
|
|
|
insertb = h.newoverflow(t, b)
|
|
|
|
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
|
|
|
}
|
|
|
|
insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
|
|
|
|
|
|
|
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
|
|
|
|
// store new key at insert position
|
|
|
|
*(*uint64)(insertk) = key
|
|
|
|
|
|
|
|
h.count++
|
|
|
|
|
|
|
|
done:
|
2019-09-06 20:12:46 +02:00
|
|
|
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
|
2018-09-24 23:46:21 +02:00
|
|
|
if h.flags&hashWriting == 0 {
|
|
|
|
throw("concurrent map writes")
|
|
|
|
}
|
|
|
|
h.flags &^= hashWriting
|
2019-09-06 20:12:46 +02:00
|
|
|
return elem
|
2018-09-24 23:46:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
|
|
|
|
if h == nil {
|
|
|
|
panic(plainError("assignment to entry in nil map"))
|
|
|
|
}
|
|
|
|
if raceenabled {
|
|
|
|
callerpc := getcallerpc()
|
|
|
|
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
|
|
|
|
}
|
|
|
|
if h.flags&hashWriting != 0 {
|
|
|
|
throw("concurrent map writes")
|
|
|
|
}
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 22:55:32 +01:00
|
|
|
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
2018-09-24 23:46:21 +02:00
|
|
|
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 22:55:32 +01:00
|
|
|
// Set hashWriting after calling t.hasher for consistency with mapassign.
|
2019-01-18 20:04:36 +01:00
|
|
|
h.flags ^= hashWriting
|
2018-09-24 23:46:21 +02:00
|
|
|
|
|
|
|
if h.buckets == nil {
|
|
|
|
h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
again:
|
|
|
|
bucket := hash & bucketMask(h.B)
|
|
|
|
if h.growing() {
|
|
|
|
growWork_fast64(t, h, bucket)
|
|
|
|
}
|
|
|
|
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
|
|
|
|
|
|
|
|
var insertb *bmap
|
|
|
|
var inserti uintptr
|
|
|
|
var insertk unsafe.Pointer
|
|
|
|
|
2019-01-18 20:04:36 +01:00
|
|
|
bucketloop:
|
2018-09-24 23:46:21 +02:00
|
|
|
for {
|
|
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
2019-01-18 20:04:36 +01:00
|
|
|
if isEmpty(b.tophash[i]) {
|
2018-09-24 23:46:21 +02:00
|
|
|
if insertb == nil {
|
|
|
|
insertb = b
|
|
|
|
inserti = i
|
|
|
|
}
|
2019-01-18 20:04:36 +01:00
|
|
|
if b.tophash[i] == emptyRest {
|
|
|
|
break bucketloop
|
|
|
|
}
|
2018-09-24 23:46:21 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
|
|
|
|
if k != key {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
insertb = b
|
|
|
|
inserti = i
|
|
|
|
goto done
|
|
|
|
}
|
|
|
|
ovf := b.overflow(t)
|
|
|
|
if ovf == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
b = ovf
|
|
|
|
}
|
|
|
|
|
|
|
|
// Did not find mapping for key. Allocate new cell & add entry.
|
|
|
|
|
|
|
|
// If we hit the max load factor or we have too many overflow buckets,
|
|
|
|
// and we're not already in the middle of growing, start growing.
|
|
|
|
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
|
|
|
|
hashGrow(t, h)
|
|
|
|
goto again // Growing the table invalidates everything, so try again
|
|
|
|
}
|
|
|
|
|
|
|
|
if insertb == nil {
|
|
|
|
// all current buckets are full, allocate a new one.
|
|
|
|
insertb = h.newoverflow(t, b)
|
|
|
|
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
|
|
|
}
|
|
|
|
insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
|
|
|
|
|
|
|
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
|
|
|
|
// store new key at insert position
|
|
|
|
*(*unsafe.Pointer)(insertk) = key
|
|
|
|
|
|
|
|
h.count++
|
|
|
|
|
|
|
|
done:
|
2019-09-06 20:12:46 +02:00
|
|
|
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
|
2018-09-24 23:46:21 +02:00
|
|
|
if h.flags&hashWriting == 0 {
|
|
|
|
throw("concurrent map writes")
|
|
|
|
}
|
|
|
|
h.flags &^= hashWriting
|
2019-09-06 20:12:46 +02:00
|
|
|
return elem
|
2018-09-24 23:46:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
|
|
|
|
if raceenabled && h != nil {
|
|
|
|
callerpc := getcallerpc()
|
|
|
|
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64))
|
|
|
|
}
|
|
|
|
if h == nil || h.count == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if h.flags&hashWriting != 0 {
|
|
|
|
throw("concurrent map writes")
|
|
|
|
}
|
|
|
|
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 22:55:32 +01:00
|
|
|
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
2018-09-24 23:46:21 +02:00
|
|
|
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 22:55:32 +01:00
|
|
|
// Set hashWriting after calling t.hasher for consistency with mapdelete
|
2019-01-18 20:04:36 +01:00
|
|
|
h.flags ^= hashWriting
|
2018-09-24 23:46:21 +02:00
|
|
|
|
|
|
|
bucket := hash & bucketMask(h.B)
|
|
|
|
if h.growing() {
|
|
|
|
growWork_fast64(t, h, bucket)
|
|
|
|
}
|
|
|
|
b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
|
2019-01-18 20:04:36 +01:00
|
|
|
bOrig := b
|
2018-09-24 23:46:21 +02:00
|
|
|
search:
|
|
|
|
for ; b != nil; b = b.overflow(t) {
|
|
|
|
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
|
2019-01-18 20:04:36 +01:00
|
|
|
if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
|
2018-09-24 23:46:21 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Only clear key if there are pointers in it.
|
2019-09-06 20:12:46 +02:00
|
|
|
if t.key.ptrdata != 0 {
|
2018-09-24 23:46:21 +02:00
|
|
|
memclrHasPointers(k, t.key.size)
|
|
|
|
}
|
2019-09-06 20:12:46 +02:00
|
|
|
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
|
|
|
|
if t.elem.ptrdata != 0 {
|
|
|
|
memclrHasPointers(e, t.elem.size)
|
2018-09-24 23:46:21 +02:00
|
|
|
} else {
|
2019-09-06 20:12:46 +02:00
|
|
|
memclrNoHeapPointers(e, t.elem.size)
|
2018-09-24 23:46:21 +02:00
|
|
|
}
|
2019-01-18 20:04:36 +01:00
|
|
|
b.tophash[i] = emptyOne
|
|
|
|
// If the bucket now ends in a bunch of emptyOne states,
|
|
|
|
// change those to emptyRest states.
|
|
|
|
if i == bucketCnt-1 {
|
|
|
|
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
|
|
|
|
goto notLast
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if b.tophash[i+1] != emptyRest {
|
|
|
|
goto notLast
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
b.tophash[i] = emptyRest
|
|
|
|
if i == 0 {
|
|
|
|
if b == bOrig {
|
|
|
|
break // beginning of initial bucket, we're done.
|
|
|
|
}
|
|
|
|
// Find previous bucket, continue at its last entry.
|
|
|
|
c := b
|
|
|
|
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
|
|
|
|
}
|
|
|
|
i = bucketCnt - 1
|
|
|
|
} else {
|
|
|
|
i--
|
|
|
|
}
|
|
|
|
if b.tophash[i] != emptyOne {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
notLast:
|
2018-09-24 23:46:21 +02:00
|
|
|
h.count--
|
|
|
|
break search
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if h.flags&hashWriting == 0 {
|
|
|
|
throw("concurrent map writes")
|
|
|
|
}
|
|
|
|
h.flags &^= hashWriting
|
|
|
|
}
|
|
|
|
|
|
|
|
func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
|
|
|
|
// make sure we evacuate the oldbucket corresponding
|
|
|
|
// to the bucket we're about to use
|
|
|
|
evacuate_fast64(t, h, bucket&h.oldbucketmask())
|
|
|
|
|
|
|
|
// evacuate one more oldbucket to make progress on growing
|
|
|
|
if h.growing() {
|
|
|
|
evacuate_fast64(t, h, h.nevacuate)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
|
|
|
|
b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
|
|
|
|
newbit := h.noldbuckets()
|
|
|
|
if !evacuated(b) {
|
|
|
|
// TODO: reuse overflow buckets instead of using new ones, if there
|
|
|
|
// is no iterator using the old buckets. (If !oldIterator.)
|
|
|
|
|
|
|
|
// xy contains the x and y (low and high) evacuation destinations.
|
|
|
|
var xy [2]evacDst
|
|
|
|
x := &xy[0]
|
|
|
|
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
|
|
|
|
x.k = add(unsafe.Pointer(x.b), dataOffset)
|
2019-09-06 20:12:46 +02:00
|
|
|
x.e = add(x.k, bucketCnt*8)
|
2018-09-24 23:46:21 +02:00
|
|
|
|
|
|
|
if !h.sameSizeGrow() {
|
|
|
|
// Only calculate y pointers if we're growing bigger.
|
|
|
|
// Otherwise GC can see bad pointers.
|
|
|
|
y := &xy[1]
|
|
|
|
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
|
|
|
|
y.k = add(unsafe.Pointer(y.b), dataOffset)
|
2019-09-06 20:12:46 +02:00
|
|
|
y.e = add(y.k, bucketCnt*8)
|
2018-09-24 23:46:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for ; b != nil; b = b.overflow(t) {
|
|
|
|
k := add(unsafe.Pointer(b), dataOffset)
|
2019-09-06 20:12:46 +02:00
|
|
|
e := add(k, bucketCnt*8)
|
|
|
|
for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.elemsize)) {
|
2018-09-24 23:46:21 +02:00
|
|
|
top := b.tophash[i]
|
2019-01-18 20:04:36 +01:00
|
|
|
if isEmpty(top) {
|
2018-09-24 23:46:21 +02:00
|
|
|
b.tophash[i] = evacuatedEmpty
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if top < minTopHash {
|
|
|
|
throw("bad map state")
|
|
|
|
}
|
|
|
|
var useY uint8
|
|
|
|
if !h.sameSizeGrow() {
|
|
|
|
// Compute hash to make our evacuation decision (whether we need
|
2019-09-06 20:12:46 +02:00
|
|
|
// to send this key/elem to bucket x or bucket y).
|
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
While we're here, reorganize the rtype struct to more closely match
the gc version.
This is the gofrontend version of https://golang.org/cl/191198.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843
From-SVN: r279848
2020-01-02 22:55:32 +01:00
|
|
|
hash := t.hasher(k, uintptr(h.hash0))
|
2018-09-24 23:46:21 +02:00
|
|
|
if hash&newbit != 0 {
|
|
|
|
useY = 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
|
|
|
|
dst := &xy[useY] // evacuation destination
|
|
|
|
|
|
|
|
if dst.i == bucketCnt {
|
|
|
|
dst.b = h.newoverflow(t, dst.b)
|
|
|
|
dst.i = 0
|
|
|
|
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
|
2019-09-06 20:12:46 +02:00
|
|
|
dst.e = add(dst.k, bucketCnt*8)
|
2018-09-24 23:46:21 +02:00
|
|
|
}
|
|
|
|
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
|
|
|
|
|
|
|
// Copy key.
|
2019-09-06 20:12:46 +02:00
|
|
|
if t.key.ptrdata != 0 && writeBarrier.enabled {
|
2018-09-24 23:46:21 +02:00
|
|
|
if sys.PtrSize == 8 {
|
|
|
|
// Write with a write barrier.
|
|
|
|
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
|
|
|
|
} else {
|
|
|
|
// There are three ways to squeeze at least one 32 bit pointer into 64 bits.
|
|
|
|
// Give up and call typedmemmove.
|
|
|
|
typedmemmove(t.key, dst.k, k)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
*(*uint64)(dst.k) = *(*uint64)(k)
|
|
|
|
}
|
|
|
|
|
2019-09-06 20:12:46 +02:00
|
|
|
typedmemmove(t.elem, dst.e, e)
|
2018-09-24 23:46:21 +02:00
|
|
|
dst.i++
|
|
|
|
// These updates might push these pointers past the end of the
|
2019-09-06 20:12:46 +02:00
|
|
|
// key or elem arrays. That's ok, as we have the overflow pointer
|
2018-09-24 23:46:21 +02:00
|
|
|
// at the end of the bucket to protect against pointing past the
|
|
|
|
// end of the bucket.
|
|
|
|
dst.k = add(dst.k, 8)
|
2019-09-06 20:12:46 +02:00
|
|
|
dst.e = add(dst.e, uintptr(t.elemsize))
|
2018-09-24 23:46:21 +02:00
|
|
|
}
|
|
|
|
}
|
2019-09-06 20:12:46 +02:00
|
|
|
// Unlink the overflow buckets & clear key/elem to help GC.
|
|
|
|
if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
|
2018-09-24 23:46:21 +02:00
|
|
|
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
|
|
|
|
// Preserve b.tophash because the evacuation
|
|
|
|
// state is maintained there.
|
|
|
|
ptr := add(b, dataOffset)
|
|
|
|
n := uintptr(t.bucketsize) - dataOffset
|
|
|
|
memclrHasPointers(ptr, n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if oldbucket == h.nevacuate {
|
|
|
|
advanceEvacuationMark(h, t, newbit)
|
|
|
|
}
|
|
|
|
}
|