2010-12-03 05:34:57 +01:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2012-03-02 17:38:43 +01:00
|
|
|
// GOMAXPROCS=10 go test
|
2010-12-03 05:34:57 +01:00
|
|
|
|
|
|
|
package sync_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"runtime"
|
|
|
|
. "sync"
|
2011-03-17 00:05:44 +01:00
|
|
|
"sync/atomic"
|
2010-12-03 05:34:57 +01:00
|
|
|
"testing"
|
|
|
|
)
|
|
|
|
|
|
|
|
func parallelReader(m *RWMutex, clocked, cunlock, cdone chan bool) {
|
|
|
|
m.RLock()
|
|
|
|
clocked <- true
|
|
|
|
<-cunlock
|
|
|
|
m.RUnlock()
|
|
|
|
cdone <- true
|
|
|
|
}
|
|
|
|
|
|
|
|
func doTestParallelReaders(numReaders, gomaxprocs int) {
|
|
|
|
runtime.GOMAXPROCS(gomaxprocs)
|
|
|
|
var m RWMutex
|
|
|
|
clocked := make(chan bool)
|
|
|
|
cunlock := make(chan bool)
|
|
|
|
cdone := make(chan bool)
|
|
|
|
for i := 0; i < numReaders; i++ {
|
|
|
|
go parallelReader(&m, clocked, cunlock, cdone)
|
|
|
|
}
|
|
|
|
// Wait for all parallel RLock()s to succeed.
|
|
|
|
for i := 0; i < numReaders; i++ {
|
|
|
|
<-clocked
|
|
|
|
}
|
|
|
|
for i := 0; i < numReaders; i++ {
|
|
|
|
cunlock <- true
|
|
|
|
}
|
|
|
|
// Wait for the goroutines to finish.
|
|
|
|
for i := 0; i < numReaders; i++ {
|
|
|
|
<-cdone
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestParallelReaders(t *testing.T) {
|
2011-09-16 17:47:21 +02:00
|
|
|
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
|
2010-12-03 05:34:57 +01:00
|
|
|
doTestParallelReaders(1, 4)
|
|
|
|
doTestParallelReaders(3, 4)
|
|
|
|
doTestParallelReaders(4, 2)
|
|
|
|
}
|
|
|
|
|
2011-03-17 00:05:44 +01:00
|
|
|
func reader(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
|
2010-12-03 05:34:57 +01:00
|
|
|
for i := 0; i < num_iterations; i++ {
|
|
|
|
rwm.RLock()
|
2011-03-17 00:05:44 +01:00
|
|
|
n := atomic.AddInt32(activity, 1)
|
2010-12-03 05:34:57 +01:00
|
|
|
if n < 1 || n >= 10000 {
|
|
|
|
panic(fmt.Sprintf("wlock(%d)\n", n))
|
|
|
|
}
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
}
|
2011-03-17 00:05:44 +01:00
|
|
|
atomic.AddInt32(activity, -1)
|
2010-12-03 05:34:57 +01:00
|
|
|
rwm.RUnlock()
|
|
|
|
}
|
|
|
|
cdone <- true
|
|
|
|
}
|
|
|
|
|
2011-03-17 00:05:44 +01:00
|
|
|
func writer(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
|
2010-12-03 05:34:57 +01:00
|
|
|
for i := 0; i < num_iterations; i++ {
|
|
|
|
rwm.Lock()
|
2011-03-17 00:05:44 +01:00
|
|
|
n := atomic.AddInt32(activity, 10000)
|
2010-12-03 05:34:57 +01:00
|
|
|
if n != 10000 {
|
|
|
|
panic(fmt.Sprintf("wlock(%d)\n", n))
|
|
|
|
}
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
}
|
2011-03-17 00:05:44 +01:00
|
|
|
atomic.AddInt32(activity, -10000)
|
2010-12-03 05:34:57 +01:00
|
|
|
rwm.Unlock()
|
|
|
|
}
|
|
|
|
cdone <- true
|
|
|
|
}
|
|
|
|
|
|
|
|
func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) {
|
|
|
|
runtime.GOMAXPROCS(gomaxprocs)
|
|
|
|
// Number of active readers + 10000 * number of active writers.
|
2011-03-17 00:05:44 +01:00
|
|
|
var activity int32
|
2010-12-03 05:34:57 +01:00
|
|
|
var rwm RWMutex
|
|
|
|
cdone := make(chan bool)
|
|
|
|
go writer(&rwm, num_iterations, &activity, cdone)
|
|
|
|
var i int
|
|
|
|
for i = 0; i < numReaders/2; i++ {
|
|
|
|
go reader(&rwm, num_iterations, &activity, cdone)
|
|
|
|
}
|
|
|
|
go writer(&rwm, num_iterations, &activity, cdone)
|
|
|
|
for ; i < numReaders; i++ {
|
|
|
|
go reader(&rwm, num_iterations, &activity, cdone)
|
|
|
|
}
|
|
|
|
// Wait for the 2 writers and all readers to finish.
|
|
|
|
for i := 0; i < 2+numReaders; i++ {
|
|
|
|
<-cdone
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestRWMutex(t *testing.T) {
|
2011-09-16 17:47:21 +02:00
|
|
|
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
|
2011-03-30 17:33:16 +02:00
|
|
|
n := 1000
|
|
|
|
if testing.Short() {
|
|
|
|
n = 5
|
|
|
|
}
|
|
|
|
HammerRWMutex(1, 1, n)
|
|
|
|
HammerRWMutex(1, 3, n)
|
|
|
|
HammerRWMutex(1, 10, n)
|
|
|
|
HammerRWMutex(4, 1, n)
|
|
|
|
HammerRWMutex(4, 3, n)
|
|
|
|
HammerRWMutex(4, 10, n)
|
|
|
|
HammerRWMutex(10, 1, n)
|
|
|
|
HammerRWMutex(10, 3, n)
|
|
|
|
HammerRWMutex(10, 10, n)
|
|
|
|
HammerRWMutex(10, 5, n)
|
2010-12-03 05:34:57 +01:00
|
|
|
}
|
2011-03-17 00:05:44 +01:00
|
|
|
|
|
|
|
func TestRLocker(t *testing.T) {
|
|
|
|
var wl RWMutex
|
|
|
|
var rl Locker
|
|
|
|
wlocked := make(chan bool, 1)
|
|
|
|
rlocked := make(chan bool, 1)
|
|
|
|
rl = wl.RLocker()
|
|
|
|
n := 10
|
|
|
|
go func() {
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
rl.Lock()
|
|
|
|
rl.Lock()
|
|
|
|
rlocked <- true
|
|
|
|
wl.Lock()
|
|
|
|
wlocked <- true
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
<-rlocked
|
|
|
|
rl.Unlock()
|
|
|
|
select {
|
|
|
|
case <-wlocked:
|
|
|
|
t.Fatal("RLocker() didn't read-lock it")
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
rl.Unlock()
|
|
|
|
<-wlocked
|
|
|
|
select {
|
|
|
|
case <-rlocked:
|
|
|
|
t.Fatal("RLocker() didn't respect the write lock")
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
wl.Unlock()
|
|
|
|
}
|
|
|
|
}
|
2011-09-16 17:47:21 +02:00
|
|
|
|
|
|
|
func BenchmarkRWMutexUncontended(b *testing.B) {
|
|
|
|
type PaddedRWMutex struct {
|
|
|
|
RWMutex
|
|
|
|
pad [32]uint32
|
|
|
|
}
|
|
|
|
const CallsPerSched = 1000
|
|
|
|
procs := runtime.GOMAXPROCS(-1)
|
|
|
|
N := int32(b.N / CallsPerSched)
|
|
|
|
c := make(chan bool, procs)
|
|
|
|
for p := 0; p < procs; p++ {
|
|
|
|
go func() {
|
|
|
|
var rwm PaddedRWMutex
|
|
|
|
for atomic.AddInt32(&N, -1) >= 0 {
|
|
|
|
runtime.Gosched()
|
|
|
|
for g := 0; g < CallsPerSched; g++ {
|
|
|
|
rwm.RLock()
|
|
|
|
rwm.RLock()
|
|
|
|
rwm.RUnlock()
|
|
|
|
rwm.RUnlock()
|
|
|
|
rwm.Lock()
|
|
|
|
rwm.Unlock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c <- true
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
for p := 0; p < procs; p++ {
|
|
|
|
<-c
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
|
|
|
|
const CallsPerSched = 1000
|
|
|
|
procs := runtime.GOMAXPROCS(-1)
|
|
|
|
N := int32(b.N / CallsPerSched)
|
|
|
|
c := make(chan bool, procs)
|
|
|
|
var rwm RWMutex
|
|
|
|
for p := 0; p < procs; p++ {
|
|
|
|
go func() {
|
|
|
|
foo := 0
|
|
|
|
for atomic.AddInt32(&N, -1) >= 0 {
|
|
|
|
runtime.Gosched()
|
|
|
|
for g := 0; g < CallsPerSched; g++ {
|
|
|
|
foo++
|
|
|
|
if foo%writeRatio == 0 {
|
|
|
|
rwm.Lock()
|
|
|
|
rwm.Unlock()
|
|
|
|
} else {
|
|
|
|
rwm.RLock()
|
|
|
|
for i := 0; i != localWork; i += 1 {
|
|
|
|
foo *= 2
|
|
|
|
foo /= 2
|
|
|
|
}
|
|
|
|
rwm.RUnlock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c <- foo == 42
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
for p := 0; p < procs; p++ {
|
|
|
|
<-c
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkRWMutexWrite100(b *testing.B) {
|
|
|
|
benchmarkRWMutex(b, 0, 100)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkRWMutexWrite10(b *testing.B) {
|
|
|
|
benchmarkRWMutex(b, 0, 10)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkRWMutexWorkWrite100(b *testing.B) {
|
|
|
|
benchmarkRWMutex(b, 100, 100)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkRWMutexWorkWrite10(b *testing.B) {
|
|
|
|
benchmarkRWMutex(b, 100, 10)
|
|
|
|
}
|