add dependencies
This commit is contained in:
165
src/paxos-shardkv/client.go
Normal file
165
src/paxos-shardkv/client.go
Normal file
@@ -0,0 +1,165 @@
|
||||
package shardkv
|
||||
|
||||
import "shardmaster"
|
||||
import "net/rpc"
|
||||
import "time"
|
||||
import "sync"
|
||||
import "fmt"
|
||||
import "crypto/rand"
|
||||
import "math/big"
|
||||
|
||||
type Clerk struct {
|
||||
mu sync.Mutex // one RPC at a time
|
||||
sm *shardmaster.Clerk
|
||||
config shardmaster.Config
|
||||
// You'll have to modify Clerk.
|
||||
}
|
||||
|
||||
func nrand() int64 {
|
||||
max := big.NewInt(int64(1) << 62)
|
||||
bigx, _ := rand.Int(rand.Reader, max)
|
||||
x := bigx.Int64()
|
||||
return x
|
||||
}
|
||||
|
||||
func MakeClerk(shardmasters []string) *Clerk {
|
||||
ck := new(Clerk)
|
||||
ck.sm = shardmaster.MakeClerk(shardmasters)
|
||||
// You'll have to modify MakeClerk.
|
||||
return ck
|
||||
}
|
||||
|
||||
//
|
||||
// call() sends an RPC to the rpcname handler on server srv
|
||||
// with arguments args, waits for the reply, and leaves the
|
||||
// reply in reply. the reply argument should be a pointer
|
||||
// to a reply structure.
|
||||
//
|
||||
// the return value is true if the server responded, and false
|
||||
// if call() was not able to contact the server. in particular,
|
||||
// the reply's contents are only valid if call() returned true.
|
||||
//
|
||||
// you should assume that call() will return an
|
||||
// error after a while if the server is dead.
|
||||
// don't provide your own time-out mechanism.
|
||||
//
|
||||
// please use call() to send all RPCs, in client.go and server.go.
|
||||
// please don't change this function.
|
||||
//
|
||||
func call(srv string, rpcname string,
|
||||
args interface{}, reply interface{}) bool {
|
||||
c, errx := rpc.Dial("unix", srv)
|
||||
if errx != nil {
|
||||
return false
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
err := c.Call(rpcname, args, reply)
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
fmt.Println(err)
|
||||
return false
|
||||
}
|
||||
|
||||
//
|
||||
// which shard is a key in?
|
||||
// please use this function,
|
||||
// and please do not change it.
|
||||
//
|
||||
func key2shard(key string) int {
|
||||
shard := 0
|
||||
if len(key) > 0 {
|
||||
shard = int(key[0])
|
||||
}
|
||||
shard %= shardmaster.NShards
|
||||
return shard
|
||||
}
|
||||
|
||||
//
|
||||
// fetch the current value for a key.
|
||||
// returns "" if the key does not exist.
|
||||
// keeps trying forever in the face of all other errors.
|
||||
//
|
||||
func (ck *Clerk) Get(key string) string {
|
||||
ck.mu.Lock()
|
||||
defer ck.mu.Unlock()
|
||||
|
||||
// You'll have to modify Get().
|
||||
|
||||
for {
|
||||
shard := key2shard(key)
|
||||
|
||||
gid := ck.config.Shards[shard]
|
||||
|
||||
servers, ok := ck.config.Groups[gid]
|
||||
|
||||
if ok {
|
||||
// try each server in the shard's replication group.
|
||||
for _, srv := range servers {
|
||||
args := &GetArgs{}
|
||||
args.Key = key
|
||||
var reply GetReply
|
||||
ok := call(srv, "ShardKV.Get", args, &reply)
|
||||
if ok && (reply.Err == OK || reply.Err == ErrNoKey) {
|
||||
return reply.Value
|
||||
}
|
||||
if ok && (reply.Err == ErrWrongGroup) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// ask master for a new configuration.
|
||||
ck.config = ck.sm.Query(-1)
|
||||
}
|
||||
}
|
||||
|
||||
// send a Put or Append request.
|
||||
func (ck *Clerk) PutAppend(key string, value string, op string) {
|
||||
ck.mu.Lock()
|
||||
defer ck.mu.Unlock()
|
||||
|
||||
// You'll have to modify PutAppend().
|
||||
|
||||
for {
|
||||
shard := key2shard(key)
|
||||
|
||||
gid := ck.config.Shards[shard]
|
||||
|
||||
servers, ok := ck.config.Groups[gid]
|
||||
|
||||
if ok {
|
||||
// try each server in the shard's replication group.
|
||||
for _, srv := range servers {
|
||||
args := &PutAppendArgs{}
|
||||
args.Key = key
|
||||
args.Value = value
|
||||
args.Op = op
|
||||
var reply PutAppendReply
|
||||
ok := call(srv, "ShardKV.PutAppend", args, &reply)
|
||||
if ok && reply.Err == OK {
|
||||
return
|
||||
}
|
||||
if ok && (reply.Err == ErrWrongGroup) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// ask master for a new configuration.
|
||||
ck.config = ck.sm.Query(-1)
|
||||
}
|
||||
}
|
||||
|
||||
func (ck *Clerk) Put(key string, value string) {
|
||||
ck.PutAppend(key, value, "Put")
|
||||
}
|
||||
func (ck *Clerk) Append(key string, value string) {
|
||||
ck.PutAppend(key, value, "Append")
|
||||
}
|
||||
43
src/paxos-shardkv/common.go
Normal file
43
src/paxos-shardkv/common.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package shardkv
|
||||
|
||||
//
|
||||
// Sharded key/value server.
|
||||
// Lots of replica groups, each running op-at-a-time paxos.
|
||||
// Shardmaster decides which group serves each shard.
|
||||
// Shardmaster may change shard assignment from time to time.
|
||||
//
|
||||
// You will have to modify these definitions.
|
||||
//
|
||||
|
||||
const (
|
||||
OK = "OK"
|
||||
ErrNoKey = "ErrNoKey"
|
||||
ErrWrongGroup = "ErrWrongGroup"
|
||||
)
|
||||
|
||||
type Err string
|
||||
|
||||
type PutAppendArgs struct {
|
||||
Key string
|
||||
Value string
|
||||
Op string // "Put" or "Append"
|
||||
// You'll have to add definitions here.
|
||||
// Field names must start with capital letters,
|
||||
// otherwise RPC will break.
|
||||
|
||||
}
|
||||
|
||||
type PutAppendReply struct {
|
||||
Err Err
|
||||
}
|
||||
|
||||
type GetArgs struct {
|
||||
Key string
|
||||
// You'll have to add definitions here.
|
||||
}
|
||||
|
||||
type GetReply struct {
|
||||
Err Err
|
||||
Value string
|
||||
}
|
||||
|
||||
166
src/paxos-shardkv/server.go
Normal file
166
src/paxos-shardkv/server.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package shardkv
|
||||
|
||||
import "net"
|
||||
import "fmt"
|
||||
import "net/rpc"
|
||||
import "log"
|
||||
import "time"
|
||||
import "paxos"
|
||||
import "sync"
|
||||
import "sync/atomic"
|
||||
import "os"
|
||||
import "syscall"
|
||||
import "encoding/gob"
|
||||
import "math/rand"
|
||||
import "shardmaster"
|
||||
|
||||
|
||||
const Debug = 0
|
||||
|
||||
func DPrintf(format string, a ...interface{}) (n int, err error) {
|
||||
if Debug > 0 {
|
||||
log.Printf(format, a...)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
type Op struct {
|
||||
// Your definitions here.
|
||||
}
|
||||
|
||||
|
||||
type ShardKV struct {
|
||||
mu sync.Mutex
|
||||
l net.Listener
|
||||
me int
|
||||
dead int32 // for testing
|
||||
unreliable int32 // for testing
|
||||
sm *shardmaster.Clerk
|
||||
px *paxos.Paxos
|
||||
|
||||
gid int64 // my replica group ID
|
||||
|
||||
// Your definitions here.
|
||||
}
|
||||
|
||||
|
||||
func (kv *ShardKV) Get(args *GetArgs, reply *GetReply) error {
|
||||
// Your code here.
|
||||
return nil
|
||||
}
|
||||
|
||||
// RPC handler for client Put and Append requests
|
||||
func (kv *ShardKV) PutAppend(args *PutAppendArgs, reply *PutAppendReply) error {
|
||||
// Your code here.
|
||||
return nil
|
||||
}
|
||||
|
||||
//
|
||||
// Ask the shardmaster if there's a new configuration;
|
||||
// if so, re-configure.
|
||||
//
|
||||
func (kv *ShardKV) tick() {
|
||||
}
|
||||
|
||||
// tell the server to shut itself down.
|
||||
// please don't change these two functions.
|
||||
func (kv *ShardKV) kill() {
|
||||
atomic.StoreInt32(&kv.dead, 1)
|
||||
kv.l.Close()
|
||||
kv.px.Kill()
|
||||
}
|
||||
|
||||
// call this to find out if the server is dead.
|
||||
func (kv *ShardKV) isdead() bool {
|
||||
return atomic.LoadInt32(&kv.dead) != 0
|
||||
}
|
||||
|
||||
// please do not change these two functions.
|
||||
func (kv *ShardKV) Setunreliable(what bool) {
|
||||
if what {
|
||||
atomic.StoreInt32(&kv.unreliable, 1)
|
||||
} else {
|
||||
atomic.StoreInt32(&kv.unreliable, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func (kv *ShardKV) isunreliable() bool {
|
||||
return atomic.LoadInt32(&kv.unreliable) != 0
|
||||
}
|
||||
|
||||
//
|
||||
// Start a shardkv server.
|
||||
// gid is the ID of the server's replica group.
|
||||
// shardmasters[] contains the ports of the
|
||||
// servers that implement the shardmaster.
|
||||
// servers[] contains the ports of the servers
|
||||
// in this replica group.
|
||||
// Me is the index of this server in servers[].
|
||||
//
|
||||
func StartServer(gid int64, shardmasters []string,
|
||||
servers []string, me int) *ShardKV {
|
||||
gob.Register(Op{})
|
||||
|
||||
kv := new(ShardKV)
|
||||
kv.me = me
|
||||
kv.gid = gid
|
||||
kv.sm = shardmaster.MakeClerk(shardmasters)
|
||||
|
||||
// Your initialization code here.
|
||||
// Don't call Join().
|
||||
|
||||
rpcs := rpc.NewServer()
|
||||
rpcs.Register(kv)
|
||||
|
||||
kv.px = paxos.Make(servers, me, rpcs)
|
||||
|
||||
|
||||
os.Remove(servers[me])
|
||||
l, e := net.Listen("unix", servers[me])
|
||||
if e != nil {
|
||||
log.Fatal("listen error: ", e)
|
||||
}
|
||||
kv.l = l
|
||||
|
||||
// please do not change any of the following code,
|
||||
// or do anything to subvert it.
|
||||
|
||||
go func() {
|
||||
for kv.isdead() == false {
|
||||
conn, err := kv.l.Accept()
|
||||
if err == nil && kv.isdead() == false {
|
||||
if kv.isunreliable() && (rand.Int63()%1000) < 100 {
|
||||
// discard the request.
|
||||
conn.Close()
|
||||
} else if kv.isunreliable() && (rand.Int63()%1000) < 200 {
|
||||
// process the request but force discard of reply.
|
||||
c1 := conn.(*net.UnixConn)
|
||||
f, _ := c1.File()
|
||||
err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
|
||||
if err != nil {
|
||||
fmt.Printf("shutdown: %v\n", err)
|
||||
}
|
||||
go rpcs.ServeConn(conn)
|
||||
} else {
|
||||
go rpcs.ServeConn(conn)
|
||||
}
|
||||
} else if err == nil {
|
||||
conn.Close()
|
||||
}
|
||||
if err != nil && kv.isdead() == false {
|
||||
fmt.Printf("ShardKV(%v) accept: %v\n", me, err.Error())
|
||||
kv.kill()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for kv.isdead() == false {
|
||||
kv.tick()
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
|
||||
return kv
|
||||
}
|
||||
360
src/paxos-shardkv/test_test.go
Normal file
360
src/paxos-shardkv/test_test.go
Normal file
@@ -0,0 +1,360 @@
|
||||
package shardkv
|
||||
|
||||
import "testing"
|
||||
import "shardmaster"
|
||||
import "runtime"
|
||||
import "strconv"
|
||||
import "os"
|
||||
import "time"
|
||||
import "fmt"
|
||||
import "sync"
|
||||
import "sync/atomic"
|
||||
import "math/rand"
|
||||
|
||||
// information about the servers of one replica group.
|
||||
type tGroup struct {
|
||||
gid int64
|
||||
servers []*ShardKV
|
||||
ports []string
|
||||
}
|
||||
|
||||
// information about all the servers of a k/v cluster.
|
||||
type tCluster struct {
|
||||
t *testing.T
|
||||
masters []*shardmaster.ShardMaster
|
||||
mck *shardmaster.Clerk
|
||||
masterports []string
|
||||
groups []*tGroup
|
||||
}
|
||||
|
||||
func port(tag string, host int) string {
|
||||
s := "/var/tmp/824-"
|
||||
s += strconv.Itoa(os.Getuid()) + "/"
|
||||
os.Mkdir(s, 0777)
|
||||
s += "skv-"
|
||||
s += strconv.Itoa(os.Getpid()) + "-"
|
||||
s += tag + "-"
|
||||
s += strconv.Itoa(host)
|
||||
return s
|
||||
}
|
||||
|
||||
//
|
||||
// start a k/v replica server thread.
|
||||
//
|
||||
func (tc *tCluster) start1(gi int, si int, unreliable bool) {
|
||||
s := StartServer(tc.groups[gi].gid, tc.masterports, tc.groups[gi].ports, si)
|
||||
tc.groups[gi].servers[si] = s
|
||||
s.Setunreliable(unreliable)
|
||||
}
|
||||
|
||||
func (tc *tCluster) cleanup() {
|
||||
for gi := 0; gi < len(tc.groups); gi++ {
|
||||
g := tc.groups[gi]
|
||||
for si := 0; si < len(g.servers); si++ {
|
||||
if g.servers[si] != nil {
|
||||
g.servers[si].kill()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(tc.masters); i++ {
|
||||
if tc.masters[i] != nil {
|
||||
tc.masters[i].Kill()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *tCluster) shardclerk() *shardmaster.Clerk {
|
||||
return shardmaster.MakeClerk(tc.masterports)
|
||||
}
|
||||
|
||||
func (tc *tCluster) clerk() *Clerk {
|
||||
return MakeClerk(tc.masterports)
|
||||
}
|
||||
|
||||
func (tc *tCluster) join(gi int) {
|
||||
tc.mck.Join(tc.groups[gi].gid, tc.groups[gi].ports)
|
||||
}
|
||||
|
||||
func (tc *tCluster) leave(gi int) {
|
||||
tc.mck.Leave(tc.groups[gi].gid)
|
||||
}
|
||||
|
||||
func setup(t *testing.T, tag string, unreliable bool) *tCluster {
|
||||
runtime.GOMAXPROCS(4)
|
||||
|
||||
const nmasters = 3
|
||||
const ngroups = 3 // replica groups
|
||||
const nreplicas = 3 // servers per group
|
||||
|
||||
tc := &tCluster{}
|
||||
tc.t = t
|
||||
tc.masters = make([]*shardmaster.ShardMaster, nmasters)
|
||||
tc.masterports = make([]string, nmasters)
|
||||
|
||||
for i := 0; i < nmasters; i++ {
|
||||
tc.masterports[i] = port(tag+"m", i)
|
||||
}
|
||||
for i := 0; i < nmasters; i++ {
|
||||
tc.masters[i] = shardmaster.StartServer(tc.masterports, i)
|
||||
}
|
||||
tc.mck = tc.shardclerk()
|
||||
|
||||
tc.groups = make([]*tGroup, ngroups)
|
||||
|
||||
for i := 0; i < ngroups; i++ {
|
||||
tc.groups[i] = &tGroup{}
|
||||
tc.groups[i].gid = int64(i + 100)
|
||||
tc.groups[i].servers = make([]*ShardKV, nreplicas)
|
||||
tc.groups[i].ports = make([]string, nreplicas)
|
||||
for j := 0; j < nreplicas; j++ {
|
||||
tc.groups[i].ports[j] = port(tag+"s", (i*nreplicas)+j)
|
||||
}
|
||||
for j := 0; j < nreplicas; j++ {
|
||||
tc.start1(i, j, unreliable)
|
||||
}
|
||||
}
|
||||
|
||||
// return smh, gids, ha, sa, clean
|
||||
return tc
|
||||
}
|
||||
|
||||
func TestBasic(t *testing.T) {
|
||||
tc := setup(t, "basic", false)
|
||||
defer tc.cleanup()
|
||||
|
||||
fmt.Printf("Test: Basic Join/Leave ...\n")
|
||||
|
||||
tc.join(0)
|
||||
|
||||
ck := tc.clerk()
|
||||
|
||||
ck.Put("a", "x")
|
||||
ck.Append("a", "b")
|
||||
if ck.Get("a") != "xb" {
|
||||
t.Fatalf("Get got wrong value")
|
||||
}
|
||||
|
||||
keys := make([]string, 10)
|
||||
vals := make([]string, len(keys))
|
||||
for i := 0; i < len(keys); i++ {
|
||||
keys[i] = strconv.Itoa(rand.Int())
|
||||
vals[i] = strconv.Itoa(rand.Int())
|
||||
ck.Put(keys[i], vals[i])
|
||||
}
|
||||
|
||||
// are keys still there after joins?
|
||||
for g := 1; g < len(tc.groups); g++ {
|
||||
tc.join(g)
|
||||
time.Sleep(1 * time.Second)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
v := ck.Get(keys[i])
|
||||
if v != vals[i] {
|
||||
t.Fatalf("joining; wrong value; g=%v k=%v wanted=%v got=%v",
|
||||
g, keys[i], vals[i], v)
|
||||
}
|
||||
vals[i] = strconv.Itoa(rand.Int())
|
||||
ck.Put(keys[i], vals[i])
|
||||
}
|
||||
}
|
||||
|
||||
// are keys still there after leaves?
|
||||
for g := 0; g < len(tc.groups)-1; g++ {
|
||||
tc.leave(g)
|
||||
time.Sleep(1 * time.Second)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
v := ck.Get(keys[i])
|
||||
if v != vals[i] {
|
||||
t.Fatalf("leaving; wrong value; g=%v k=%v wanted=%v got=%v",
|
||||
g, keys[i], vals[i], v)
|
||||
}
|
||||
vals[i] = strconv.Itoa(rand.Int())
|
||||
ck.Put(keys[i], vals[i])
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf(" ... Passed\n")
|
||||
}
|
||||
|
||||
func TestMove(t *testing.T) {
|
||||
tc := setup(t, "move", false)
|
||||
defer tc.cleanup()
|
||||
|
||||
fmt.Printf("Test: Shards really move ...\n")
|
||||
|
||||
tc.join(0)
|
||||
|
||||
ck := tc.clerk()
|
||||
|
||||
// insert one key per shard
|
||||
for i := 0; i < shardmaster.NShards; i++ {
|
||||
ck.Put(string('0'+i), string('0'+i))
|
||||
}
|
||||
|
||||
// add group 1.
|
||||
tc.join(1)
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// check that keys are still there.
|
||||
for i := 0; i < shardmaster.NShards; i++ {
|
||||
if ck.Get(string('0'+i)) != string('0'+i) {
|
||||
t.Fatalf("missing key/value")
|
||||
}
|
||||
}
|
||||
|
||||
// remove sockets from group 0.
|
||||
for _, port := range tc.groups[0].ports {
|
||||
os.Remove(port)
|
||||
}
|
||||
|
||||
count := int32(0)
|
||||
var mu sync.Mutex
|
||||
for i := 0; i < shardmaster.NShards; i++ {
|
||||
go func(me int) {
|
||||
myck := tc.clerk()
|
||||
v := myck.Get(string('0' + me))
|
||||
if v == string('0'+me) {
|
||||
mu.Lock()
|
||||
atomic.AddInt32(&count, 1)
|
||||
mu.Unlock()
|
||||
} else {
|
||||
t.Fatalf("Get(%v) yielded %v\n", me, v)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
ccc := atomic.LoadInt32(&count)
|
||||
if ccc > shardmaster.NShards/3 && ccc < 2*(shardmaster.NShards/3) {
|
||||
fmt.Printf(" ... Passed\n")
|
||||
} else {
|
||||
t.Fatalf("%v keys worked after killing 1/2 of groups; wanted %v",
|
||||
ccc, shardmaster.NShards/2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLimp(t *testing.T) {
|
||||
tc := setup(t, "limp", false)
|
||||
defer tc.cleanup()
|
||||
|
||||
fmt.Printf("Test: Reconfiguration with some dead replicas ...\n")
|
||||
|
||||
tc.join(0)
|
||||
|
||||
ck := tc.clerk()
|
||||
|
||||
ck.Put("a", "b")
|
||||
if ck.Get("a") != "b" {
|
||||
t.Fatalf("got wrong value")
|
||||
}
|
||||
|
||||
// kill one server from each replica group.
|
||||
for gi := 0; gi < len(tc.groups); gi++ {
|
||||
sa := tc.groups[gi].servers
|
||||
ns := len(sa)
|
||||
sa[rand.Int()%ns].kill()
|
||||
}
|
||||
|
||||
keys := make([]string, 10)
|
||||
vals := make([]string, len(keys))
|
||||
for i := 0; i < len(keys); i++ {
|
||||
keys[i] = strconv.Itoa(rand.Int())
|
||||
vals[i] = strconv.Itoa(rand.Int())
|
||||
ck.Put(keys[i], vals[i])
|
||||
}
|
||||
|
||||
// are keys still there after joins?
|
||||
for g := 1; g < len(tc.groups); g++ {
|
||||
tc.join(g)
|
||||
time.Sleep(1 * time.Second)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
v := ck.Get(keys[i])
|
||||
if v != vals[i] {
|
||||
t.Fatalf("joining; wrong value; g=%v k=%v wanted=%v got=%v",
|
||||
g, keys[i], vals[i], v)
|
||||
}
|
||||
vals[i] = strconv.Itoa(rand.Int())
|
||||
ck.Put(keys[i], vals[i])
|
||||
}
|
||||
}
|
||||
|
||||
// are keys still there after leaves?
|
||||
for gi := 0; gi < len(tc.groups)-1; gi++ {
|
||||
tc.leave(gi)
|
||||
time.Sleep(2 * time.Second)
|
||||
g := tc.groups[gi]
|
||||
for i := 0; i < len(g.servers); i++ {
|
||||
g.servers[i].kill()
|
||||
}
|
||||
for i := 0; i < len(keys); i++ {
|
||||
v := ck.Get(keys[i])
|
||||
if v != vals[i] {
|
||||
t.Fatalf("leaving; wrong value; g=%v k=%v wanted=%v got=%v",
|
||||
g, keys[i], vals[i], v)
|
||||
}
|
||||
vals[i] = strconv.Itoa(rand.Int())
|
||||
ck.Put(keys[i], vals[i])
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf(" ... Passed\n")
|
||||
}
|
||||
|
||||
func doConcurrent(t *testing.T, unreliable bool) {
|
||||
tc := setup(t, "concurrent-"+strconv.FormatBool(unreliable), unreliable)
|
||||
defer tc.cleanup()
|
||||
|
||||
for i := 0; i < len(tc.groups); i++ {
|
||||
tc.join(i)
|
||||
}
|
||||
|
||||
const npara = 11
|
||||
var ca [npara]chan bool
|
||||
for i := 0; i < npara; i++ {
|
||||
ca[i] = make(chan bool)
|
||||
go func(me int) {
|
||||
ok := true
|
||||
defer func() { ca[me] <- ok }()
|
||||
ck := tc.clerk()
|
||||
mymck := tc.shardclerk()
|
||||
key := strconv.Itoa(me)
|
||||
last := ""
|
||||
for iters := 0; iters < 3; iters++ {
|
||||
nv := strconv.Itoa(rand.Int())
|
||||
ck.Append(key, nv)
|
||||
last = last + nv
|
||||
v := ck.Get(key)
|
||||
if v != last {
|
||||
ok = false
|
||||
t.Fatalf("Get(%v) expected %v got %v\n", key, last, v)
|
||||
}
|
||||
|
||||
gi := rand.Int() % len(tc.groups)
|
||||
gid := tc.groups[gi].gid
|
||||
mymck.Move(rand.Int()%shardmaster.NShards, gid)
|
||||
|
||||
time.Sleep(time.Duration(rand.Int()%30) * time.Millisecond)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
for i := 0; i < npara; i++ {
|
||||
x := <-ca[i]
|
||||
if x == false {
|
||||
t.Fatalf("something is wrong")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConcurrent(t *testing.T) {
|
||||
fmt.Printf("Test: Concurrent Put/Get/Move ...\n")
|
||||
doConcurrent(t, false)
|
||||
fmt.Printf(" ... Passed\n")
|
||||
}
|
||||
|
||||
func TestConcurrentUnreliable(t *testing.T) {
|
||||
fmt.Printf("Test: Concurrent Put/Get/Move (unreliable) ...\n")
|
||||
doConcurrent(t, true)
|
||||
fmt.Printf(" ... Passed\n")
|
||||
}
|
||||
Reference in New Issue
Block a user