libf преди 2 години
родител
ревизия
229c4bf6dc

+ 59 - 0
achord/README.md

@@ -0,0 +1,59 @@
+# Chord
+[WIP]
+Implementation of Chord paper
+
+# Paper
+https://pdos.csail.mit.edu/papers/ton:chord/paper-ton.pdf
+
+## Example Usage
+
+```go
+package main
+
+import (
+	"github.com/arriqaaq/chord"
+	"github.com/arriqaaq/chord/internal"
+	"log"
+	"os"
+	"os/signal"
+	"time"
+)
+
+func createNode(id string, addr string, joinNode *internal.Node) (*chord.Node, error) {
+
+	cnf := chord.DefaultConfig()
+	cnf.Id = id
+	cnf.Addr = addr
+	cnf.Timeout = 10 * time.Millisecond
+	cnf.MaxIdle = 100 * time.Millisecond
+
+	n, err := chord.NewNode(cnf, joinNode)
+	return n, err
+}
+
+
+func main() {
+
+	joinNode := chord.NewInode("1", "0.0.0.0:8001")
+
+	h, err := createNode("8", "0.0.0.0:8003", joinNode)
+	if err != nil {
+		log.Fatalln(err)
+		return
+	}
+
+	c := make(chan os.Signal, 1)
+	signal.Notify(c, os.Interrupt)
+	<-c
+	h.Stop()
+}
+```
+
+
+# References
+This implementation helped me a lot in designing the code base
+https://github.com/r-medina/gmaj
+
+# TODO
+- Add more test cases
+- Add stats/prometheus stats

+ 14 - 0
achord/examples/README.md

@@ -0,0 +1,14 @@
+# Chord
+Running the code
+
+
+## Example Usage
+
+```go
+	Run the three nodes on different windows in a terminal
+
+	go run node-1.go
+	go run node-2.go
+	go run node-2.go
+
+```

+ 44 - 0
achord/examples/node1/node-1.go

@@ -0,0 +1,44 @@
+package main
+
+import (
+	"log"
+	"math/big"
+	"os"
+	"os/signal"
+	"time"
+	"trial/achord/models"
+
+	chord "trial/achord"
+)
+
+func createNode(id string, addr string, sister *models.Node) (*chord.Node, error) {
+
+	cnf := chord.DefaultConfig()
+	cnf.Id = id
+	cnf.Addr = addr
+	cnf.Timeout = 10 * time.Millisecond
+	cnf.MaxIdle = 100 * time.Millisecond
+
+	n, err := chord.NewNode(cnf, sister)
+	return n, err
+}
+
+func createID(id string) []byte {
+	val := big.NewInt(0)
+	val.SetString(id, 10)
+	return val.Bytes()
+}
+
+func main() {
+
+	h, err := createNode("1", "0.0.0.0:8001", nil)
+	if err != nil {
+		log.Fatalln(err)
+	}
+	c := make(chan os.Signal, 1)
+	signal.Notify(c, os.Interrupt)
+	<-time.After(10 * time.Second)
+	<-c
+	h.Stop()
+
+}

+ 70 - 0
achord/examples/node2/node-2.go

@@ -0,0 +1,70 @@
+package main
+
+import (
+	"fmt"
+	"log"
+	"math/big"
+	"os"
+	"os/signal"
+	"strconv"
+	"time"
+	chord "trial/achord"
+	"trial/achord/models"
+)
+
+func createNode(id string, addr string, sister *models.Node) (*chord.Node, error) {
+
+	cnf := chord.DefaultConfig()
+	cnf.Id = id
+	cnf.Addr = addr
+	cnf.Timeout = 10 * time.Millisecond
+	cnf.MaxIdle = 100 * time.Millisecond
+
+	n, err := chord.NewNode(cnf, sister)
+	return n, err
+}
+
+func createID(id string) []byte {
+	val := big.NewInt(0)
+	val.SetString(id, 10)
+	return val.Bytes()
+}
+
+func main() {
+
+	id1 := "1"
+	sister := chord.NewInode(id1, "0.0.0.0:8001")
+
+	h, err := createNode("4", "0.0.0.0:8002", sister)
+	if err != nil {
+		log.Fatalln(err)
+		return
+	}
+
+	shut := make(chan bool)
+	var count int
+	go func() {
+		ticker := time.NewTicker(1 * time.Second)
+		for {
+			select {
+			case <-ticker.C:
+				count++
+				key := strconv.Itoa(count)
+				value := fmt.Sprintf(`{"graph_id" : %d, "nodes" : ["node-%d","node-%d","node-%d"]}`, count, count+1, count+2, count+3)
+				sErr := h.Set(key, value)
+				if sErr != nil {
+					log.Println("err: ", sErr)
+				}
+			case <-shut:
+				ticker.Stop()
+				return
+			}
+		}
+	}()
+
+	c := make(chan os.Signal, 1)
+	signal.Notify(c, os.Interrupt)
+	<-c
+	shut <- true
+	h.Stop()
+}

+ 47 - 0
achord/examples/node3/node-3.go

@@ -0,0 +1,47 @@
+package main
+
+import (
+	"log"
+	"math/big"
+	"os"
+	"os/signal"
+	chord "trial/achord"
+	"trial/achord/models"
+
+	// "strconv"
+	"time"
+)
+
+func createNode(id string, addr string, sister *models.Node) (*chord.Node, error) {
+
+	cnf := chord.DefaultConfig()
+	cnf.Id = id
+	cnf.Addr = addr
+	cnf.Timeout = 10 * time.Millisecond
+	cnf.MaxIdle = 100 * time.Millisecond
+
+	n, err := chord.NewNode(cnf, sister)
+	return n, err
+}
+
+func createID(id string) []byte {
+	val := big.NewInt(0)
+	val.SetString(id, 10)
+	return val.Bytes()
+}
+
+func main() {
+
+	joinNode := chord.NewInode("1", "0.0.0.0:8001")
+
+	h, err := createNode("8", "0.0.0.0:8003", joinNode)
+	if err != nil {
+		log.Fatalln(err)
+		return
+	}
+
+	c := make(chan os.Signal, 1)
+	signal.Notify(c, os.Interrupt)
+	<-c
+	h.Stop()
+}

+ 84 - 0
achord/finger.go

@@ -0,0 +1,84 @@
+package chord
+
+import (
+	"fmt"
+	"math/big"
+	"trial/achord/models"
+)
+
+type fingerTable []*fingerEntry
+
+func newFingerTable(node *models.Node, m int) fingerTable {
+	ft := make([]*fingerEntry, m)
+	for i := range ft {
+		ft[i] = newFingerEntry(fingerID(node.Id, i, m), node)
+	}
+
+	return ft
+}
+
+// fingerEntry represents a single finger table entry
+type fingerEntry struct {
+	Id   []byte       // ID hash of (n + 2^i) mod (2^m)
+	Node *models.Node // RemoteNode that Start points to
+}
+
+// newFingerEntry returns an allocated new finger entry with the attributes set
+func newFingerEntry(id []byte, node *models.Node) *fingerEntry {
+	return &fingerEntry{
+		Id:   id,
+		Node: node,
+	}
+}
+
+// Computes the offset by (n + 2^i) mod (2^m)
+func fingerID(n []byte, i int, m int) []byte {
+
+	// Convert the ID to a bigint
+	idInt := (&big.Int{}).SetBytes(n)
+
+	// Get the offset
+	two := big.NewInt(2)
+	offset := big.Int{}
+	offset.Exp(two, big.NewInt(int64(i)), nil)
+
+	// Sum
+	sum := big.Int{}
+	sum.Add(idInt, &offset)
+
+	// Get the ceiling
+	ceil := big.Int{}
+	ceil.Exp(two, big.NewInt(int64(m)), nil)
+
+	// Apply the mod
+	idInt.Mod(&sum, &ceil)
+
+	// Add together
+	return idInt.Bytes()
+}
+
+// called periodically. refreshes finger table entries.
+// next stores the index of the next finger to fix.
+func (n *Node) fixFinger(next int) int {
+	nextHash := fingerID(n.Id, next, n.cnf.HashSize)
+	succ, err := n.findSuccessor(nextHash)
+	nextNum := (next + 1) % n.cnf.HashSize
+	if err != nil || succ == nil {
+		fmt.Println("error: ", err, succ)
+		fmt.Printf("finger lookup failed %x %x \n", n.Id, nextHash)
+		// TODO: Check how to handle retry, passing ahead for now
+		return nextNum
+	}
+
+	finger := newFingerEntry(nextHash, succ)
+	n.ftMtx.Lock()
+	n.fingerTable[next] = finger
+
+	// aInt := (&big.Int{}).SetBytes(nextHash)
+	// bInt := (&big.Int{}).SetBytes(finger.Node.Id)
+	// fmt.Printf("finger entry %d, %d,%d\n", next, aInt, bInt)
+
+	n.ftMtx.Unlock()
+
+	return nextNum
+}

+ 94 - 0
achord/finger_test.go

@@ -0,0 +1,94 @@
+package chord
+
+import (
+	"crypto/sha1"
+	"fmt"
+	"math/big"
+	"reflect"
+	"testing"
+	"trial/achord/models"
+)
+
+func TestNewFingerTable(t *testing.T) {
+	g := newFingerTable(NewInode("8", "0.0.0.0:8003"), sha1.New().Size())
+	for i, j := range g {
+		fmt.Printf("%d, %x, %x\n", i, j.Id, j.Node.Id)
+	}
+}
+
+func TestNewFingerEntry(t *testing.T) {
+	hashSize := sha1.New().Size() * 8
+	id := GetHashID("0.0.0.0:8083")
+	xInt := (&big.Int{}).SetBytes(id)
+	for i := 0; i < 100; i++ {
+		nextHash := fingerID(id, i, hashSize)
+		aInt := (&big.Int{}).SetBytes(nextHash)
+
+		fmt.Printf("%d, %d %d\n", xInt, aInt, hashSize)
+	}
+}
+
+func Test_newFingerTable(t *testing.T) {
+	type args struct {
+		node *models.Node
+		m    int
+	}
+	tests := []struct {
+		name string
+		args args
+		want fingerTable
+	}{
+		// TODO: Add test cases.
+		// {"1", args{NewInode("8", "0.0.0.0:8083"), 1}, fingerTable},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := newFingerTable(tt.args.node, tt.args.m); !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("newFingerTable() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_newFingerEntry(t *testing.T) {
+	type args struct {
+		id   []byte
+		node *models.Node
+	}
+	tests := []struct {
+		name string
+		args args
+		want *fingerEntry
+	}{
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := newFingerEntry(tt.args.id, tt.args.node); !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("newFingerEntry() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_fingerID(t *testing.T) {
+	type args struct {
+		n []byte
+		i int
+		m int
+	}
+	tests := []struct {
+		name string
+		args args
+		want []byte
+	}{
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := fingerID(tt.args.n, tt.args.i, tt.args.m); !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("fingerID() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}

Файловите разлики са ограничени, защото са твърде много
+ 1041 - 0
achord/models/chord.pb.go


+ 92 - 0
achord/models/chord.proto

@@ -0,0 +1,92 @@
+syntax = "proto3";
+
+package models;
+
+// Chord is the service for inter-node communication.
+// This has all the RPC functions needed to maintain
+// a Chord cluster.
+service Chord {
+    // GetPredecessor returns the node believed to be the current predecessor.
+    rpc GetPredecessor(ER) returns (Node);
+    // GetSuccessor returns the node believed to be the current successor.
+    rpc GetSuccessor(ER) returns (Node);
+    // Notify notifies Chord that Node thinks it is our predecessor. This has
+    // the potential to initiate the transferring of keys.
+    rpc Notify(Node) returns (ER);
+    // FindSuccessor finds the node the succedes ID. May initiate RPC calls to
+    // other nodes.
+    rpc FindSuccessor(ID) returns (Node);
+    // CheckPredecessor checkes whether predecessor has failed.
+    rpc CheckPredecessor(ID) returns (ER);
+    // SetPredecessor sets predecessor for a node.
+    rpc SetPredecessor(Node) returns (ER);
+    // SetPredecessor sets predecessor for a node.
+    rpc SetSuccessor(Node) returns (ER);
+
+    // Get returns the value in Chord ring for the given key.
+    rpc XGet(GetRequest) returns (GetResponse);
+    // Set writes a key value pair to the Chord ring.
+    rpc XSet(SetRequest) returns (SetResponse);
+    // Delete returns the value in Chord ring for the given key.
+    rpc XDelete(DeleteRequest) returns (DeleteResponse);
+    // Multiple delete returns the value in Chord ring between the given keys.
+    rpc XMultiDelete(MultiDeleteRequest) returns (DeleteResponse);
+    // RequestKeys returns the keys between given range from the Chord ring.
+    rpc XRequestKeys(RequestKeysRequest) returns (RequestKeysResponse);
+
+}
+
+
+// Node contains a node ID and address.
+message Node {
+    bytes id = 1;
+    string addr = 2;
+}
+
+message ER {}
+
+message ID {
+    bytes id = 1;
+}
+
+
+message GetRequest {
+    string key = 1;
+}
+
+message GetResponse {
+    bytes value = 1;
+}
+
+message SetRequest {
+    string key = 1;
+    string value = 2;
+}
+
+message SetResponse {}
+
+
+message DeleteRequest {
+    string key = 1;
+}
+
+message DeleteResponse {
+}
+
+message MultiDeleteRequest {
+    repeated string keys = 1;
+}
+
+message RequestKeysRequest {
+    bytes from = 1;
+    bytes to = 2;
+}
+
+message KV {
+    string key = 1;
+    string value = 2;
+}
+
+message RequestKeysResponse {
+    repeated KV values = 1;
+}

+ 678 - 0
achord/node.go

@@ -0,0 +1,678 @@
+package chord
+
+import (
+	"crypto/sha1"
+	"fmt"
+	"hash"
+	"math/big"
+	"sync"
+	"time"
+	"trial/achord/models"
+
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+)
+
+func DefaultConfig() *Config {
+	n := &Config{
+		Hash:     sha1.New,
+		DialOpts: make([]grpc.DialOption, 0, 5),
+	}
+	// n.HashSize = n.Hash().Size()
+	n.HashSize = n.Hash().Size() * 8
+
+	n.DialOpts = append(n.DialOpts,
+		grpc.WithBlock(),
+		grpc.WithTimeout(5*time.Second),
+		grpc.FailOnNonTempDialError(true),
+		grpc.WithInsecure(),
+	)
+	return n
+}
+
+type Config struct {
+	Id   string
+	Addr string
+
+	ServerOpts []grpc.ServerOption
+	DialOpts   []grpc.DialOption
+
+	Hash     func() hash.Hash // Hash function to use
+	HashSize int
+
+	StabilizeMin time.Duration // Minimum stabilization time
+	StabilizeMax time.Duration // Maximum stabilization time
+
+	Timeout time.Duration
+	MaxIdle time.Duration
+}
+
+func (c *Config) Validate() error {
+	// hashsize shouldnt be less than hash func size
+	return nil
+}
+
+func NewInode(id string, addr string) *models.Node {
+	h := sha1.New()
+	if _, err := h.Write([]byte(id)); err != nil {
+		return nil
+	}
+	val := h.Sum(nil)
+
+	return &models.Node{
+		Id:   val,
+		Addr: addr,
+	}
+}
+
+/*
+NewNode creates a new Chord node. Returns error if node already
+exists in the chord ring
+*/
+func NewNode(cnf *Config, joinNode *models.Node) (*Node, error) {
+	if err := cnf.Validate(); err != nil {
+		return nil, err
+	}
+	node := &Node{
+		Node:       new(models.Node),
+		shutdownCh: make(chan struct{}),
+		cnf:        cnf,
+		storage:    NewMapStore(cnf.Hash),
+	}
+
+	var nID string
+	if cnf.Id != "" {
+		nID = cnf.Id
+	} else {
+		nID = cnf.Addr
+	}
+	id, err := node.hashKey(nID)
+	if err != nil {
+		return nil, err
+	}
+	aInt := (&big.Int{}).SetBytes(id)
+
+	fmt.Printf("new node id %d, \n", aInt)
+
+	node.Node.Id = id
+	node.Node.Addr = cnf.Addr
+
+	// Populate finger table
+	node.fingerTable = newFingerTable(node.Node, cnf.HashSize)
+
+	// Start RPC server
+	transport, err := NewGrpcTransport(cnf)
+	if err != nil {
+		return nil, err
+	}
+
+	node.transport = transport
+
+	models.RegisterChordServer(transport.server, node)
+
+	node.transport.Start()
+
+	if err := node.join(joinNode); err != nil {
+		return nil, err
+	}
+
+	// Peridoically stabilize the node.
+	go func() {
+		ticker := time.NewTicker(1 * time.Second)
+		for {
+			select {
+			case <-ticker.C:
+				node.stabilize()
+			case <-node.shutdownCh:
+				ticker.Stop()
+				return
+			}
+		}
+	}()
+
+	// Peridoically fix finger tables.
+	go func() {
+		next := 0
+		ticker := time.NewTicker(100 * time.Millisecond)
+		for {
+			select {
+			case <-ticker.C:
+				next = node.fixFinger(next)
+			case <-node.shutdownCh:
+				ticker.Stop()
+				return
+			}
+		}
+	}()
+
+	// Peridoically checkes whether predecessor has failed.
+
+	go func() {
+		ticker := time.NewTicker(10 * time.Second)
+		for {
+			select {
+			case <-ticker.C:
+				node.checkPredecessor()
+			case <-node.shutdownCh:
+				ticker.Stop()
+				return
+			}
+		}
+	}()
+
+	return node, nil
+}
+
+type Node struct {
+	*models.Node
+
+	cnf *Config
+
+	predecessor *models.Node
+	predMtx     sync.RWMutex
+
+	successor *models.Node
+	succMtx   sync.RWMutex
+
+	shutdownCh chan struct{}
+
+	fingerTable fingerTable
+	ftMtx       sync.RWMutex
+
+	storage Storage
+	stMtx   sync.RWMutex
+
+	transport Transport
+	tsMtx     sync.RWMutex
+
+	lastStablized time.Time
+}
+
+func (n *Node) hashKey(key string) ([]byte, error) {
+	h := n.cnf.Hash()
+	if _, err := h.Write([]byte(key)); err != nil {
+		return nil, err
+	}
+	val := h.Sum(nil)
+	return val, nil
+}
+
+func (n *Node) join(joinNode *models.Node) error {
+	// First check if node already present in the circle
+	// Join this node to the same chord ring as parent
+	var foo *models.Node
+	// // Ask if our id already exists on the ring.
+	if joinNode != nil {
+		remoteNode, err := n.findSuccessorRPC(joinNode, n.Id)
+		if err != nil {
+			return err
+		}
+
+		if isEqual(remoteNode.Id, n.Id) {
+			return ERR_NODE_EXISTS
+		}
+		foo = joinNode
+	} else {
+		foo = n.Node
+	}
+
+	succ, err := n.findSuccessorRPC(foo, n.Id)
+	if err != nil {
+		return err
+	}
+	n.succMtx.Lock()
+	n.successor = succ
+	n.succMtx.Unlock()
+
+	return nil
+}
+
+/*
+	Public storage implementation
+*/
+
+func (n *Node) Find(key string) (*models.Node, error) {
+	return n.locate(key)
+}
+
+func (n *Node) Get(key string) ([]byte, error) {
+	return n.get(key)
+}
+func (n *Node) Set(key, value string) error {
+	return n.set(key, value)
+}
+func (n *Node) Delete(key string) error {
+	return n.delete(key)
+}
+
+/*
+Finds the node for the key
+*/
+func (n *Node) locate(key string) (*models.Node, error) {
+	id, err := n.hashKey(key)
+	if err != nil {
+		return nil, err
+	}
+	succ, err := n.findSuccessor(id)
+	return succ, err
+}
+
+func (n *Node) get(key string) ([]byte, error) {
+	node, err := n.locate(key)
+	if err != nil {
+		return nil, err
+	}
+	val, err := n.getKeyRPC(node, key)
+	if err != nil {
+		return nil, err
+	}
+	return val.Value, nil
+}
+
+func (n *Node) set(key, value string) error {
+	node, err := n.locate(key)
+	if err != nil {
+		return err
+	}
+	err = n.setKeyRPC(node, key, value)
+	return err
+}
+
+func (n *Node) delete(key string) error {
+	node, err := n.locate(key)
+	if err != nil {
+		return err
+	}
+	err = n.deleteKeyRPC(node, key)
+	return err
+}
+
+func (n *Node) transferKeys(pred, succ *models.Node) {
+
+	keys, err := n.requestKeys(pred, succ)
+	if len(keys) > 0 {
+		fmt.Println("transfering: ", keys, err)
+	}
+	delKeyList := make([]string, 0, 10)
+	// store the keys in current node
+	for _, item := range keys {
+		if item == nil {
+			continue
+		}
+		n.storage.Set(item.Key, item.Value)
+		delKeyList = append(delKeyList, item.Key)
+	}
+	// delete the keys from the successor node, as current node
+	// is responsible for the keys
+	if len(delKeyList) > 0 {
+		n.deleteKeys(succ, delKeyList)
+	}
+
+}
+
+func (n *Node) moveKeysFromLocal(pred, succ *models.Node) {
+
+	keys, err := n.storage.Between(pred.Id, succ.Id)
+	if len(keys) > 0 {
+		fmt.Println("transfering: ", keys, succ, err)
+	}
+	delKeyList := make([]string, 0, 10)
+	// store the keys in current node
+	for _, item := range keys {
+		if item == nil {
+			continue
+		}
+		err := n.setKeyRPC(succ, item.Key, item.Value)
+		if err != nil {
+			fmt.Println("error transfering key: ", item.Key, succ.Addr)
+		}
+		delKeyList = append(delKeyList, item.Key)
+	}
+	// delete the keys from the successor node, as current node
+	// is responsible for the keys
+	if len(delKeyList) > 0 {
+		n.deleteKeys(succ, delKeyList)
+	}
+
+}
+
+func (n *Node) deleteKeys(node *models.Node, keys []string) error {
+	return n.deleteKeysRPC(node, keys)
+}
+
+// When a new node joins, it requests keys from it's successor
+func (n *Node) requestKeys(pred, succ *models.Node) ([]*models.KV, error) {
+
+	if isEqual(n.Id, succ.Id) {
+		return nil, nil
+	}
+	return n.requestKeysRPC(
+		succ, pred.Id, n.Id,
+	)
+}
+
+/*
+Fig 5 implementation for find_succesor
+First check if key present in local table, if not
+then look for how to travel in the ring
+*/
+func (n *Node) findSuccessor(id []byte) (*models.Node, error) {
+	// Check if lock is needed throughout the process
+	n.succMtx.RLock()
+	defer n.succMtx.RUnlock()
+	curr := n.Node
+	succ := n.successor
+
+	if succ == nil {
+		return curr, nil
+	}
+
+	var err error
+
+	if betweenRightIncl(id, curr.Id, succ.Id) {
+		return succ, nil
+	} else {
+		pred := n.closestPrecedingNode(id)
+		/*
+			NOT SURE ABOUT THIS, RECHECK from paper!!!
+			if preceeding node and current node are the same,
+			store the key on this node
+		*/
+
+		if isEqual(pred.Id, n.Id) {
+			succ, err = n.getSuccessorRPC(pred)
+			if err != nil {
+				return nil, err
+			}
+			if succ == nil {
+				// not able to wrap around, current node is the successor
+				return pred, nil
+			}
+			return succ, nil
+		}
+
+		succ, err := n.findSuccessorRPC(pred, id)
+		// fmt.Println("successor to closest node ", succ, err)
+		if err != nil {
+			return nil, err
+		}
+		if succ == nil {
+			// not able to wrap around, current node is the successor
+			return curr, nil
+		}
+		return succ, nil
+
+	}
+	return nil, nil
+}
+
+// Fig 5 implementation for closest_preceding_node
+func (n *Node) closestPrecedingNode(id []byte) *models.Node {
+	n.predMtx.RLock()
+	defer n.predMtx.RUnlock()
+
+	curr := n.Node
+
+	m := len(n.fingerTable) - 1
+	for i := m; i >= 0; i-- {
+		f := n.fingerTable[i]
+		if f == nil || f.Node == nil {
+			continue
+		}
+		if between(f.Id, curr.Id, id) {
+			return f.Node
+		}
+	}
+	return curr
+}
+
+/*
+	Periodic functions implementation
+*/
+
+func (n *Node) stabilize() {
+
+	n.succMtx.RLock()
+	succ := n.successor
+	if succ == nil {
+		n.succMtx.RUnlock()
+		return
+	}
+	n.succMtx.RUnlock()
+
+	x, err := n.getPredecessorRPC(succ)
+	if err != nil || x == nil {
+		fmt.Println("error getting predecessor, ", err, x)
+		return
+	}
+	if x.Id != nil && between(x.Id, n.Id, succ.Id) {
+		n.succMtx.Lock()
+		n.successor = x
+		n.succMtx.Unlock()
+	}
+	n.notifyRPC(succ, n.Node)
+}
+
+func (n *Node) checkPredecessor() {
+	// implement using rpc func
+	n.predMtx.RLock()
+	pred := n.predecessor
+	n.predMtx.RUnlock()
+
+	if pred != nil {
+		err := n.transport.CheckPredecessor(pred)
+		if err != nil {
+			fmt.Println("predecessor failed!", err)
+			n.predMtx.Lock()
+			n.predecessor = nil
+			n.predMtx.Unlock()
+		}
+	}
+}
+
+/*
+	RPC callers implementation
+*/
+
+// getSuccessorRPC the successor ID of a remote node.
+func (n *Node) getSuccessorRPC(node *models.Node) (*models.Node, error) {
+	return n.transport.GetSuccessor(node)
+}
+
+// setSuccessorRPC sets the successor of a given node.
+func (n *Node) setSuccessorRPC(node *models.Node, succ *models.Node) error {
+	return n.transport.SetSuccessor(node, succ)
+}
+
+// findSuccessorRPC finds the successor node of a given ID in the entire ring.
+func (n *Node) findSuccessorRPC(node *models.Node, id []byte) (*models.Node, error) {
+	return n.transport.FindSuccessor(node, id)
+}
+
+// getSuccessorRPC the successor ID of a remote node.
+func (n *Node) getPredecessorRPC(node *models.Node) (*models.Node, error) {
+	return n.transport.GetPredecessor(node)
+}
+
+// setPredecessorRPC sets the predecessor of a given node.
+func (n *Node) setPredecessorRPC(node *models.Node, pred *models.Node) error {
+	return n.transport.SetPredecessor(node, pred)
+}
+
+// notifyRPC notifies a remote node that pred is its predecessor.
+func (n *Node) notifyRPC(node, pred *models.Node) error {
+	return n.transport.Notify(node, pred)
+}
+
+func (n *Node) getKeyRPC(node *models.Node, key string) (*models.GetResponse, error) {
+	return n.transport.GetKey(node, key)
+}
+func (n *Node) setKeyRPC(node *models.Node, key, value string) error {
+	return n.transport.SetKey(node, key, value)
+}
+func (n *Node) deleteKeyRPC(node *models.Node, key string) error {
+	return n.transport.DeleteKey(node, key)
+}
+
+func (n *Node) requestKeysRPC(
+	node *models.Node, from []byte, to []byte,
+) ([]*models.KV, error) {
+	return n.transport.RequestKeys(node, from, to)
+}
+
+func (n *Node) deleteKeysRPC(
+	node *models.Node, keys []string,
+) error {
+	return n.transport.DeleteKeys(node, keys)
+}
+
+/*
+	RPC interface implementation
+*/
+
+// GetSuccessor gets the successor on the node..
+func (n *Node) GetSuccessor(ctx context.Context, r *models.ER) (*models.Node, error) {
+	n.succMtx.RLock()
+	succ := n.successor
+	n.succMtx.RUnlock()
+	if succ == nil {
+		return emptyNode, nil
+	}
+
+	return succ, nil
+}
+
+// SetSuccessor sets the successor on the node..
+func (n *Node) SetSuccessor(ctx context.Context, succ *models.Node) (*models.ER, error) {
+	n.succMtx.Lock()
+	n.successor = succ
+	n.succMtx.Unlock()
+	return emptyRequest, nil
+}
+
+// SetPredecessor sets the predecessor on the node..
+func (n *Node) SetPredecessor(ctx context.Context, pred *models.Node) (*models.ER, error) {
+	n.predMtx.Lock()
+	n.predecessor = pred
+	n.predMtx.Unlock()
+	return emptyRequest, nil
+}
+
+func (n *Node) FindSuccessor(ctx context.Context, id *models.ID) (*models.Node, error) {
+	succ, err := n.findSuccessor(id.Id)
+	if err != nil {
+		return nil, err
+	}
+
+	if succ == nil {
+		return nil, ERR_NO_SUCCESSOR
+	}
+
+	return succ, nil
+
+}
+
+func (n *Node) CheckPredecessor(ctx context.Context, id *models.ID) (*models.ER, error) {
+	return emptyRequest, nil
+}
+
+func (n *Node) GetPredecessor(ctx context.Context, r *models.ER) (*models.Node, error) {
+	n.predMtx.RLock()
+	pred := n.predecessor
+	n.predMtx.RUnlock()
+	if pred == nil {
+		return emptyNode, nil
+	}
+	return pred, nil
+}
+
+func (n *Node) Notify(ctx context.Context, node *models.Node) (*models.ER, error) {
+	n.predMtx.Lock()
+	defer n.predMtx.Unlock()
+	var prevPredNode *models.Node
+
+	pred := n.predecessor
+	if pred == nil || between(node.Id, pred.Id, n.Id) {
+		// fmt.Println("setting predecessor", n.Id, node.Id)
+		if n.predecessor != nil {
+			prevPredNode = n.predecessor
+		}
+		n.predecessor = node
+
+		// transfer keys from parent node
+		if prevPredNode != nil {
+			if between(n.predecessor.Id, prevPredNode.Id, n.Id) {
+				n.transferKeys(prevPredNode, n.predecessor)
+			}
+		}
+
+	}
+
+	return emptyRequest, nil
+}
+
+func (n *Node) XGet(ctx context.Context, req *models.GetRequest) (*models.GetResponse, error) {
+	n.stMtx.RLock()
+	defer n.stMtx.RUnlock()
+	val, err := n.storage.Get(req.Key)
+	if err != nil {
+		return emptyGetResponse, err
+	}
+	return &models.GetResponse{Value: val}, nil
+}
+
+func (n *Node) XSet(ctx context.Context, req *models.SetRequest) (*models.SetResponse, error) {
+	n.stMtx.Lock()
+	defer n.stMtx.Unlock()
+	fmt.Println("setting key on ", n.Node.Addr, req.Key, req.Value)
+	err := n.storage.Set(req.Key, req.Value)
+	return emptySetResponse, err
+}
+
+func (n *Node) XDelete(ctx context.Context, req *models.DeleteRequest) (*models.DeleteResponse, error) {
+	n.stMtx.Lock()
+	defer n.stMtx.Unlock()
+	err := n.storage.Delete(req.Key)
+	return emptyDeleteResponse, err
+}
+
+func (n *Node) XRequestKeys(ctx context.Context, req *models.RequestKeysRequest) (*models.RequestKeysResponse, error) {
+	n.stMtx.RLock()
+	defer n.stMtx.RUnlock()
+	val, err := n.storage.Between(req.From, req.To)
+	if err != nil {
+		return emptyRequestKeysResponse, err
+	}
+	return &models.RequestKeysResponse{Values: val}, nil
+}
+
+func (n *Node) XMultiDelete(ctx context.Context, req *models.MultiDeleteRequest) (*models.DeleteResponse, error) {
+	n.stMtx.Lock()
+	defer n.stMtx.Unlock()
+	err := n.storage.MDelete(req.Keys...)
+	return emptyDeleteResponse, err
+}
+
+func (n *Node) Stop() {
+	close(n.shutdownCh)
+
+	// Notify successor to change its predecessor pointer to our predecessor.
+	// Do nothing if we are our own successor (i.e. we are the only node in the
+	// ring).
+	n.succMtx.RLock()
+	succ := n.successor
+	n.succMtx.RUnlock()
+
+	n.predMtx.RLock()
+	pred := n.predecessor
+	n.predMtx.RUnlock()
+
+	if n.Node.Addr != succ.Addr && pred != nil {
+		n.moveKeysFromLocal(pred, succ)
+		predErr := n.setPredecessorRPC(succ, pred)
+		succErr := n.setSuccessorRPC(pred, succ)
+		fmt.Println("stop errors: ", predErr, succErr)
+	}
+
+	n.transport.Stop()
+}

+ 81 - 0
achord/storage.go

@@ -0,0 +1,81 @@
+package chord
+
+import (
+	// "errors"
+	"hash"
+	"trial/achord/models"
+	// "math/big"
+)
+
+type Storage interface {
+	Get(string) ([]byte, error)
+	Set(string, string) error
+	Delete(string) error
+	Between([]byte, []byte) ([]*models.KV, error)
+	MDelete(...string) error
+}
+
+func NewMapStore(hashFunc func() hash.Hash) Storage {
+	return &mapStore{
+		data: make(map[string]string),
+		Hash: hashFunc,
+	}
+}
+
+type mapStore struct {
+	data map[string]string
+	Hash func() hash.Hash // Hash function to use
+
+}
+
+func (a *mapStore) hashKey(key string) ([]byte, error) {
+	h := a.Hash()
+	if _, err := h.Write([]byte(key)); err != nil {
+		return nil, err
+	}
+	val := h.Sum(nil)
+	return val, nil
+}
+
+func (a *mapStore) Get(key string) ([]byte, error) {
+	val, ok := a.data[key]
+	if !ok {
+		return nil, ERR_KEY_NOT_FOUND
+	}
+	return []byte(val), nil
+}
+
+func (a *mapStore) Set(key, value string) error {
+	a.data[key] = value
+	return nil
+}
+
+func (a *mapStore) Delete(key string) error {
+	delete(a.data, key)
+	return nil
+}
+
+func (a *mapStore) Between(from []byte, to []byte) ([]*models.KV, error) {
+	vals := make([]*models.KV, 0, 10)
+	for k, v := range a.data {
+		hashedKey, err := a.hashKey(k)
+		if err != nil {
+			continue
+		}
+		if betweenRightIncl(hashedKey, from, to) {
+			pair := &models.KV{
+				Key:   k,
+				Value: v,
+			}
+			vals = append(vals, pair)
+		}
+	}
+	return vals, nil
+}
+
+func (a *mapStore) MDelete(keys ...string) error {
+	for _, k := range keys {
+		delete(a.data, k)
+	}
+	return nil
+}

+ 223 - 0
achord/storage_test.go

@@ -0,0 +1,223 @@
+package chord
+
+import (
+	"hash"
+	"reflect"
+	"testing"
+
+	"trial/achord/models"
+)
+
+func TestNewMapStore(t *testing.T) {
+	type args struct {
+		hashFunc func() hash.Hash
+	}
+	tests := []struct {
+		name string
+		args args
+		want Storage
+	}{
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := NewMapStore(tt.args.hashFunc); !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("NewMapStore() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_mapStore_hashKey(t *testing.T) {
+	type fields struct {
+		data map[string]string
+		Hash func() hash.Hash
+	}
+	type args struct {
+		key string
+	}
+	tests := []struct {
+		name    string
+		fields  fields
+		args    args
+		want    []byte
+		wantErr bool
+	}{
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			a := &mapStore{
+				data: tt.fields.data,
+				Hash: tt.fields.Hash,
+			}
+			got, err := a.hashKey(tt.args.key)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("mapStore.hashKey() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+			if !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("mapStore.hashKey() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_mapStore_Get(t *testing.T) {
+	type fields struct {
+		data map[string]string
+		Hash func() hash.Hash
+	}
+	type args struct {
+		key string
+	}
+	tests := []struct {
+		name    string
+		fields  fields
+		args    args
+		want    []byte
+		wantErr bool
+	}{
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			a := &mapStore{
+				data: tt.fields.data,
+				Hash: tt.fields.Hash,
+			}
+			got, err := a.Get(tt.args.key)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("mapStore.Get() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+			if !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("mapStore.Get() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_mapStore_Set(t *testing.T) {
+	type fields struct {
+		data map[string]string
+		Hash func() hash.Hash
+	}
+	type args struct {
+		key   string
+		value string
+	}
+	tests := []struct {
+		name    string
+		fields  fields
+		args    args
+		wantErr bool
+	}{
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			a := &mapStore{
+				data: tt.fields.data,
+				Hash: tt.fields.Hash,
+			}
+			if err := a.Set(tt.args.key, tt.args.value); (err != nil) != tt.wantErr {
+				t.Errorf("mapStore.Set() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func Test_mapStore_Delete(t *testing.T) {
+	type fields struct {
+		data map[string]string
+		Hash func() hash.Hash
+	}
+	type args struct {
+		key string
+	}
+	tests := []struct {
+		name    string
+		fields  fields
+		args    args
+		wantErr bool
+	}{
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			a := &mapStore{
+				data: tt.fields.data,
+				Hash: tt.fields.Hash,
+			}
+			if err := a.Delete(tt.args.key); (err != nil) != tt.wantErr {
+				t.Errorf("mapStore.Delete() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func Test_mapStore_Between(t *testing.T) {
+	type fields struct {
+		data map[string]string
+		Hash func() hash.Hash
+	}
+	type args struct {
+		from []byte
+		to   []byte
+	}
+	tests := []struct {
+		name    string
+		fields  fields
+		args    args
+		want    []*models.KV
+		wantErr bool
+	}{
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			a := &mapStore{
+				data: tt.fields.data,
+				Hash: tt.fields.Hash,
+			}
+			got, err := a.Between(tt.args.from, tt.args.to)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("mapStore.Between() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+			if !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("mapStore.Between() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_mapStore_MDelete(t *testing.T) {
+	type fields struct {
+		data map[string]string
+		Hash func() hash.Hash
+	}
+	type args struct {
+		keys []string
+	}
+	tests := []struct {
+		name    string
+		fields  fields
+		args    args
+		wantErr bool
+	}{
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			a := &mapStore{
+				data: tt.fields.data,
+				Hash: tt.fields.Hash,
+			}
+			if err := a.MDelete(tt.args.keys...); (err != nil) != tt.wantErr {
+				t.Errorf("mapStore.MDelete() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}

+ 380 - 0
achord/transport.go

@@ -0,0 +1,380 @@
+package chord
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"sync"
+	"sync/atomic"
+	"time"
+	"trial/achord/models"
+
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+)
+
+var (
+	emptyNode                = &models.Node{}
+	emptyRequest             = &models.ER{}
+	emptyGetResponse         = &models.GetResponse{}
+	emptySetResponse         = &models.SetResponse{}
+	emptyDeleteResponse      = &models.DeleteResponse{}
+	emptyRequestKeysResponse = &models.RequestKeysResponse{}
+)
+
+func Dial(addr string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
+	return grpc.Dial(addr, opts...)
+}
+
+/*
+Transport enables a node to talk to the other nodes in
+the ring
+*/
+type Transport interface {
+	Start() error
+	Stop() error
+
+	//RPC
+	GetSuccessor(*models.Node) (*models.Node, error)
+	FindSuccessor(*models.Node, []byte) (*models.Node, error)
+	GetPredecessor(*models.Node) (*models.Node, error)
+	Notify(*models.Node, *models.Node) error
+	CheckPredecessor(*models.Node) error
+	SetPredecessor(*models.Node, *models.Node) error
+	SetSuccessor(*models.Node, *models.Node) error
+
+	//Storage
+	GetKey(*models.Node, string) (*models.GetResponse, error)
+	SetKey(*models.Node, string, string) error
+	DeleteKey(*models.Node, string) error
+	RequestKeys(*models.Node, []byte, []byte) ([]*models.KV, error)
+	DeleteKeys(*models.Node, []string) error
+}
+
+type GrpcTransport struct {
+	config *Config
+
+	timeout time.Duration
+	maxIdle time.Duration
+
+	sock *net.TCPListener
+
+	pool    map[string]*grpcConn
+	poolMtx sync.RWMutex
+
+	server *grpc.Server
+
+	shutdown int32
+}
+
+// func NewGrpcTransport(config *Config) (models.ChordClient, error) {
+func NewGrpcTransport(config *Config) (*GrpcTransport, error) {
+
+	addr := config.Addr
+	// Try to start the listener
+	listener, err := net.Listen("tcp", addr)
+	if err != nil {
+		return nil, err
+	}
+
+	pool := make(map[string]*grpcConn)
+
+	// Setup the transport
+	grp := &GrpcTransport{
+		sock:    listener.(*net.TCPListener),
+		timeout: config.Timeout,
+		maxIdle: config.MaxIdle,
+		pool:    pool,
+		config:  config,
+	}
+
+	grp.server = grpc.NewServer(config.ServerOpts...)
+
+	// Done
+	return grp, nil
+}
+
+type grpcConn struct {
+	addr       string
+	client     models.ChordClient
+	conn       *grpc.ClientConn
+	lastActive time.Time
+}
+
+func (g *grpcConn) Close() {
+	g.conn.Close()
+}
+
+func (g *GrpcTransport) registerNode(node *Node) {
+	models.RegisterChordServer(g.server, node)
+}
+
+func (g *GrpcTransport) GetServer() *grpc.Server {
+	return g.server
+}
+
+// Gets an outbound connection to a host
+func (g *GrpcTransport) getConn(
+	addr string,
+) (models.ChordClient, error) {
+
+	g.poolMtx.RLock()
+
+	if atomic.LoadInt32(&g.shutdown) == 1 {
+		g.poolMtx.Unlock()
+		return nil, fmt.Errorf("TCP transport is shutdown")
+	}
+
+	cc, ok := g.pool[addr]
+	g.poolMtx.RUnlock()
+	if ok {
+		return cc.client, nil
+	}
+
+	var conn *grpc.ClientConn
+	var err error
+	conn, err = Dial(addr, g.config.DialOpts...)
+	if err != nil {
+		return nil, err
+	}
+
+	client := models.NewChordClient(conn)
+	cc = &grpcConn{addr, client, conn, time.Now()}
+	g.poolMtx.Lock()
+	if g.pool == nil {
+		g.poolMtx.Unlock()
+		return nil, errors.New("must instantiate node before using")
+	}
+	g.pool[addr] = cc
+	g.poolMtx.Unlock()
+
+	return client, nil
+}
+
+func (g *GrpcTransport) Start() error {
+	// Start RPC server
+	go g.listen()
+
+	// Reap old connections
+	go g.reapOld()
+
+	return nil
+
+}
+
+// Returns an outbound TCP connection to the pool
+func (g *GrpcTransport) returnConn(o *grpcConn) {
+	// Update the last asctive time
+	o.lastActive = time.Now()
+
+	// Push back into the pool
+	g.poolMtx.Lock()
+	defer g.poolMtx.Unlock()
+	if atomic.LoadInt32(&g.shutdown) == 1 {
+		o.conn.Close()
+		return
+	}
+	g.pool[o.addr] = o
+}
+
+// Shutdown the TCP transport
+func (g *GrpcTransport) Stop() error {
+	atomic.StoreInt32(&g.shutdown, 1)
+
+	// Close all the connections
+	g.poolMtx.Lock()
+
+	g.server.Stop()
+	for _, conn := range g.pool {
+		conn.Close()
+	}
+	g.pool = nil
+
+	g.poolMtx.Unlock()
+
+	return nil
+}
+
+// Closes old outbound connections
+func (g *GrpcTransport) reapOld() {
+	ticker := time.NewTicker(60 * time.Second)
+
+	for {
+		if atomic.LoadInt32(&g.shutdown) == 1 {
+			return
+		}
+		select {
+		case <-ticker.C:
+			g.reap()
+		}
+
+	}
+}
+
+func (g *GrpcTransport) reap() {
+	g.poolMtx.Lock()
+	defer g.poolMtx.Unlock()
+	for host, conn := range g.pool {
+		if time.Since(conn.lastActive) > g.maxIdle {
+			conn.Close()
+			delete(g.pool, host)
+		}
+	}
+}
+
+// Listens for inbound connections
+func (g *GrpcTransport) listen() {
+	g.server.Serve(g.sock)
+}
+
+// GetSuccessor the successor ID of a remote node.
+func (g *GrpcTransport) GetSuccessor(node *models.Node) (*models.Node, error) {
+	client, err := g.getConn(node.Addr)
+	if err != nil {
+		return nil, err
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), g.timeout)
+	defer cancel()
+	return client.GetSuccessor(ctx, emptyRequest)
+}
+
+// FindSuccessor the successor ID of a remote node.
+func (g *GrpcTransport) FindSuccessor(node *models.Node, id []byte) (*models.Node, error) {
+	// fmt.Println("yo", node.Id, id)
+	client, err := g.getConn(node.Addr)
+	if err != nil {
+		return nil, err
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), g.timeout)
+	defer cancel()
+	return client.FindSuccessor(ctx, &models.ID{Id: id})
+}
+
+// GetPredecessor the successor ID of a remote node.
+func (g *GrpcTransport) GetPredecessor(node *models.Node) (*models.Node, error) {
+	client, err := g.getConn(node.Addr)
+	if err != nil {
+		return nil, err
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), g.timeout)
+	defer cancel()
+	return client.GetPredecessor(ctx, emptyRequest)
+}
+
+func (g *GrpcTransport) SetPredecessor(node *models.Node, pred *models.Node) error {
+	client, err := g.getConn(node.Addr)
+	if err != nil {
+		return err
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), g.timeout)
+	defer cancel()
+	_, err = client.SetPredecessor(ctx, pred)
+	return err
+}
+
+func (g *GrpcTransport) SetSuccessor(node *models.Node, succ *models.Node) error {
+	client, err := g.getConn(node.Addr)
+	if err != nil {
+		return err
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), g.timeout)
+	defer cancel()
+	_, err = client.SetSuccessor(ctx, succ)
+	return err
+}
+
+func (g *GrpcTransport) Notify(node, pred *models.Node) error {
+	client, err := g.getConn(node.Addr)
+	if err != nil {
+		return err
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), g.timeout)
+	defer cancel()
+	_, err = client.Notify(ctx, pred)
+	return err
+
+}
+
+func (g *GrpcTransport) CheckPredecessor(node *models.Node) error {
+	client, err := g.getConn(node.Addr)
+	if err != nil {
+		return err
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), g.timeout)
+	defer cancel()
+	_, err = client.CheckPredecessor(ctx, &models.ID{Id: node.Id})
+	return err
+}
+
+func (g *GrpcTransport) GetKey(node *models.Node, key string) (*models.GetResponse, error) {
+	client, err := g.getConn(node.Addr)
+	if err != nil {
+		return nil, err
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), g.timeout)
+	defer cancel()
+	return client.XGet(ctx, &models.GetRequest{Key: key})
+}
+
+func (g *GrpcTransport) SetKey(node *models.Node, key, value string) error {
+	client, err := g.getConn(node.Addr)
+	if err != nil {
+		return err
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), g.timeout)
+	defer cancel()
+	_, err = client.XSet(ctx, &models.SetRequest{Key: key, Value: value})
+	return err
+}
+
+func (g *GrpcTransport) DeleteKey(node *models.Node, key string) error {
+	client, err := g.getConn(node.Addr)
+	if err != nil {
+		return err
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), g.timeout)
+	defer cancel()
+	_, err = client.XDelete(ctx, &models.DeleteRequest{Key: key})
+	return err
+}
+
+func (g *GrpcTransport) RequestKeys(node *models.Node, from, to []byte) ([]*models.KV, error) {
+	client, err := g.getConn(node.Addr)
+	if err != nil {
+		return nil, err
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), g.timeout)
+	defer cancel()
+	val, err := client.XRequestKeys(
+		ctx, &models.RequestKeysRequest{From: from, To: to},
+	)
+	if err != nil {
+		return nil, err
+	}
+	return val.Values, nil
+}
+
+func (g *GrpcTransport) DeleteKeys(node *models.Node, keys []string) error {
+	client, err := g.getConn(node.Addr)
+	if err != nil {
+		return err
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), g.timeout)
+	defer cancel()
+	_, err = client.XMultiDelete(
+		ctx, &models.MultiDeleteRequest{Keys: keys},
+	)
+	return err
+}

+ 56 - 0
achord/util.go

@@ -0,0 +1,56 @@
+package chord
+
+import (
+	"bytes"
+	"crypto/sha1"
+	"errors"
+	"math/rand"
+	"time"
+)
+
+var (
+	ERR_NO_SUCCESSOR  = errors.New("cannot find successor")
+	ERR_NODE_EXISTS   = errors.New("node with id already exists")
+	ERR_KEY_NOT_FOUND = errors.New("key not found")
+)
+
+func isEqual(a, b []byte) bool {
+	return bytes.Compare(a, b) == 0
+}
+
+func isPowerOfTwo(num int) bool {
+	return (num != 0) && ((num & (num - 1)) == 0)
+}
+
+func randStabilize(min, max time.Duration) time.Duration {
+	r := rand.Float64()
+	return time.Duration((r * float64(max-min)) + float64(min))
+}
+
+// check if key is between a and b, right inclusive
+func betweenRightIncl(key, a, b []byte) bool {
+	return between(key, a, b) || bytes.Equal(key, b)
+}
+
+// Checks if a key is STRICTLY between two ID's exclusively
+func between(key, a, b []byte) bool {
+	switch bytes.Compare(a, b) {
+	case 1:
+		return bytes.Compare(a, key) == -1 || bytes.Compare(b, key) >= 0
+	case -1:
+		return bytes.Compare(a, key) == -1 && bytes.Compare(b, key) >= 0
+	case 0:
+		return bytes.Compare(a, key) != 0
+	}
+	return false
+}
+
+// For testing
+func GetHashID(key string) []byte {
+	h := sha1.New()
+	if _, err := h.Write([]byte(key)); err != nil {
+		return nil
+	}
+	val := h.Sum(nil)
+	return val
+}

+ 142 - 0
achord/util_test.go

@@ -0,0 +1,142 @@
+package chord
+
+import (
+	"strconv"
+	"testing"
+	"time"
+)
+
+func Test_isEqual(t *testing.T) {
+	type args struct {
+		a []byte
+		b []byte
+	}
+	tests := []struct {
+		name string
+		args args
+		want bool
+	}{
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := isEqual(tt.args.a, tt.args.b); got != tt.want {
+				t.Errorf("isEqual() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_isPowerOfTwo(t *testing.T) {
+	type args struct {
+		num int
+	}
+	tests := []struct {
+		name string
+		args args
+		want bool
+	}{
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := isPowerOfTwo(tt.args.num); got != tt.want {
+				t.Errorf("isPowerOfTwo() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_randStabilize(t *testing.T) {
+	type args struct {
+		min time.Duration
+		max time.Duration
+	}
+	tests := []struct {
+		name string
+		args args
+		want time.Duration
+	}{
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := randStabilize(tt.args.min, tt.args.max); got != tt.want {
+				t.Errorf("randStabilize() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestRL(t *testing.T) {
+	t.Parallel()
+
+	min := GetHashID("0.0.0.0:8081")
+	max := GetHashID("0.0.0.0:8083")
+	for i := 2; i < 100; i++ {
+		val := strconv.Itoa(i)
+		key := GetHashID(val)
+		if got := betweenRightIncl(key, min, max); got != true {
+			t.Errorf("betweenRightIncl() %s %x = %v, want %v", val, key, got, true)
+		}
+	}
+}
+
+func Test_betweenRightIncl(t *testing.T) {
+	t.Parallel()
+
+	type args struct {
+		key []byte
+		a   []byte
+		b   []byte
+	}
+	tests := []struct {
+		name string
+		args args
+		want bool
+	}{
+		{"1", args{[]byte{1, 0, 0, 0}, []byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}}, true},
+		{"2", args{[]byte{1, 1, 1, 1}, []byte{1, 1, 1, 0}, []byte{1, 1, 1, 1}}, true},
+		{"3", args{[]byte{1, 1, 1, 1, 1}, []byte{0}, []byte{1, 1, 1, 1}}, false},
+		{"4", args{[]byte{1, 1, 1, 1, 1}, []byte{0}, []byte{1, 1, 1, 1, 1, 1}}, true},
+		{
+			"5",
+			args{
+				[]byte{4, 40, 171},
+				[]byte{53, 106, 25, 43, 121, 19, 176, 76, 84, 87, 77, 24, 194, 141, 70, 230, 57, 84, 40, 171},
+				[]byte{4, 40, 171},
+			},
+			true,
+		},
+		{"6", args{GetHashID("11"), GetHashID("1"), GetHashID("20")}, true},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := betweenRightIncl(tt.args.key, tt.args.a, tt.args.b); got != tt.want {
+				t.Errorf("betweenRightIncl() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_between(t *testing.T) {
+	type args struct {
+		key []byte
+		a   []byte
+		b   []byte
+	}
+	tests := []struct {
+		name string
+		args args
+		want bool
+	}{
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := between(tt.args.key, tt.args.a, tt.args.b); got != tt.want {
+				t.Errorf("between() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}

chord/chord.go → go-chord/chord.go


+ 1 - 1
chord/chord_test/chord_test.go

@@ -4,7 +4,7 @@ import (
 	"runtime"
 	"testing"
 	"time"
-	"trial/chord"
+	"trial/go-chord"
 )
 
 type MultiLocalTrans struct {

+ 1 - 1
chord/chord_test/iter_closest_test.go

@@ -3,7 +3,7 @@ package chord_test
 import (
 	"math/big"
 	"testing"
-	"trial/chord"
+	"trial/go-chord"
 )
 
 func TestNextClosest(t *testing.T) {

+ 4 - 3
chord/chord_test/net_test.go

@@ -4,16 +4,17 @@ import (
 	"fmt"
 	"testing"
 	"time"
-	"trial/chord"
+	"trial/go-chord"
+	"trial/go-chord/tcptransport"
 )
 
-func prepRing(port int) (*chord.Config, *chord.TCPTransport, error) {
+func prepRing(port int) (*chord.Config, *tcptransport.TCPTransport, error) {
 	listen := fmt.Sprintf("localhost:%d", port)
 	conf := chord.DefaultConfig(listen)
 	conf.StabilizeMin = time.Duration(15 * time.Millisecond)
 	conf.StabilizeMax = time.Duration(45 * time.Millisecond)
 	timeout := time.Duration(20 * time.Millisecond)
-	trans, err := chord.InitTCPTransport(listen, timeout)
+	trans, err := tcptransport.InitTCPTransport(listen, timeout)
 	if err != nil {
 		return nil, nil, err
 	}

+ 1 - 1
chord/chord_test/ring_test.go

@@ -6,7 +6,7 @@ import (
 	"sort"
 	"testing"
 	"time"
-	"trial/chord"
+	"trial/go-chord"
 )
 
 type MockDelegate struct {

+ 1 - 1
chord/chord_test/transport_test.go

@@ -3,7 +3,7 @@ package chord_test
 import (
 	"bytes"
 	"testing"
-	"trial/chord"
+	"trial/go-chord"
 )
 
 type MockVnodeRPC struct {

+ 1 - 1
chord/chord_test/util_test.go

@@ -4,7 +4,7 @@ import (
 	"errors"
 	"testing"
 	"time"
-	"trial/chord"
+	"trial/go-chord"
 )
 
 func TestRandStabilize(t *testing.T) {

+ 1 - 1
chord/chord_test/vnode_test.go

@@ -6,7 +6,7 @@ import (
 	"sort"
 	"testing"
 	"time"
-	"trial/chord"
+	"trial/go-chord"
 )
 
 func makeVnode() *chord.LocalVnode {

chord/iter_closest.go → go-chord/iter_closest.go


chord/ring.go → go-chord/ring.go


+ 28 - 27
chord/net.go

@@ -1,4 +1,4 @@
-package chord
+package tcptransport
 
 import (
 	"encoding/gob"
@@ -8,6 +8,7 @@ import (
 	"sync"
 	"sync/atomic"
 	"time"
+	"trial/go-chord"
 )
 
 /*
@@ -25,7 +26,7 @@ type TCPTransport struct {
 	timeout  time.Duration
 	maxIdle  time.Duration
 	lock     sync.RWMutex
-	local    map[string]*localRPC
+	local    map[string]*chord.LocalRPC
 	inbound  map[*net.TCPConn]struct{}
 	poolLock sync.Mutex
 	pool     map[string][]*tcpOutConn
@@ -63,23 +64,23 @@ type tcpBodyString struct {
 	S string
 }
 type tcpBodyVnode struct {
-	Vn *Vnode
+	Vn *chord.Vnode
 }
 type tcpBodyTwoVnode struct {
-	Target *Vnode
-	Vn     *Vnode
+	Target *chord.Vnode
+	Vn     *chord.Vnode
 }
 type tcpBodyFindSuc struct {
-	Target *Vnode
+	Target *chord.Vnode
 	Num    int
 	Key    []byte
 }
 type tcpBodyVnodeError struct {
-	Vnode *Vnode
+	Vnode *chord.Vnode
 	Err   error
 }
 type tcpBodyVnodeListError struct {
-	Vnodes []*Vnode
+	Vnodes []*chord.Vnode
 	Err    error
 }
 type tcpBodyBoolError struct {
@@ -97,7 +98,7 @@ func InitTCPTransport(listen string, timeout time.Duration) (*TCPTransport, erro
 	}
 
 	// allocate maps
-	local := make(map[string]*localRPC)
+	local := make(map[string]*chord.LocalRPC)
 	inbound := make(map[*net.TCPConn]struct{})
 	pool := make(map[string][]*tcpOutConn)
 
@@ -123,13 +124,13 @@ func InitTCPTransport(listen string, timeout time.Duration) (*TCPTransport, erro
 }
 
 // Checks for a local vnode
-func (t *TCPTransport) get(vn *Vnode) (VnodeRPC, bool) {
+func (t *TCPTransport) get(vn *chord.Vnode) (chord.VnodeRPC, bool) {
 	key := vn.String()
 	t.lock.RLock()
 	defer t.lock.RUnlock()
 	w, ok := t.local[key]
 	if ok {
-		return w.obj, ok
+		return w.Obj, ok
 	} else {
 		return nil, ok
 	}
@@ -200,7 +201,7 @@ func (t *TCPTransport) setupConn(c *net.TCPConn) {
 }
 
 // Gets a list of the Vnodes on the box
-func (t *TCPTransport) ListVnodes(host string) ([]*Vnode, error) {
+func (t *TCPTransport) ListVnodes(host string) ([]*chord.Vnode, error) {
 	// Get a conn
 	out, err := t.getConn(host)
 	if err != nil {
@@ -208,7 +209,7 @@ func (t *TCPTransport) ListVnodes(host string) ([]*Vnode, error) {
 	}
 
 	// Response channels
-	respChan := make(chan []*Vnode, 1)
+	respChan := make(chan []*chord.Vnode, 1)
 	errChan := make(chan error, 1)
 
 	go func() {
@@ -250,7 +251,7 @@ func (t *TCPTransport) ListVnodes(host string) ([]*Vnode, error) {
 }
 
 // Ping a Vnode, check for liveness
-func (t *TCPTransport) Ping(vn *Vnode) (bool, error) {
+func (t *TCPTransport) Ping(vn *chord.Vnode) (bool, error) {
 	// Get a conn
 	out, err := t.getConn(vn.Host)
 	if err != nil {
@@ -301,14 +302,14 @@ func (t *TCPTransport) Ping(vn *Vnode) (bool, error) {
 }
 
 // Request a nodes predecessor
-func (t *TCPTransport) GetPredecessor(vn *Vnode) (*Vnode, error) {
+func (t *TCPTransport) GetPredecessor(vn *chord.Vnode) (*chord.Vnode, error) {
 	// Get a conn
 	out, err := t.getConn(vn.Host)
 	if err != nil {
 		return nil, err
 	}
 
-	respChan := make(chan *Vnode, 1)
+	respChan := make(chan *chord.Vnode, 1)
 	errChan := make(chan error, 1)
 
 	go func() {
@@ -351,14 +352,14 @@ func (t *TCPTransport) GetPredecessor(vn *Vnode) (*Vnode, error) {
 }
 
 // Notify our successor of ourselves
-func (t *TCPTransport) Notify(target, self *Vnode) ([]*Vnode, error) {
+func (t *TCPTransport) Notify(target, self *chord.Vnode) ([]*chord.Vnode, error) {
 	// Get a conn
 	out, err := t.getConn(target.Host)
 	if err != nil {
 		return nil, err
 	}
 
-	respChan := make(chan []*Vnode, 1)
+	respChan := make(chan []*chord.Vnode, 1)
 	errChan := make(chan error, 1)
 
 	go func() {
@@ -401,14 +402,14 @@ func (t *TCPTransport) Notify(target, self *Vnode) ([]*Vnode, error) {
 }
 
 // Find a successor
-func (t *TCPTransport) FindSuccessors(vn *Vnode, n int, k []byte) ([]*Vnode, error) {
+func (t *TCPTransport) FindSuccessors(vn *chord.Vnode, n int, k []byte) ([]*chord.Vnode, error) {
 	// Get a conn
 	out, err := t.getConn(vn.Host)
 	if err != nil {
 		return nil, err
 	}
 
-	respChan := make(chan []*Vnode, 1)
+	respChan := make(chan []*chord.Vnode, 1)
 	errChan := make(chan error, 1)
 
 	go func() {
@@ -451,7 +452,7 @@ func (t *TCPTransport) FindSuccessors(vn *Vnode, n int, k []byte) ([]*Vnode, err
 }
 
 // Clears a predecessor if it matches a given vnode. Used to leave.
-func (t *TCPTransport) ClearPredecessor(target, self *Vnode) error {
+func (t *TCPTransport) ClearPredecessor(target, self *chord.Vnode) error {
 	// Get a conn
 	out, err := t.getConn(target.Host)
 	if err != nil {
@@ -501,7 +502,7 @@ func (t *TCPTransport) ClearPredecessor(target, self *Vnode) error {
 }
 
 // Instructs a node to skip a given successor. Used to leave.
-func (t *TCPTransport) SkipSuccessor(target, self *Vnode) error {
+func (t *TCPTransport) SkipSuccessor(target, self *chord.Vnode) error {
 	// Get a conn
 	out, err := t.getConn(target.Host)
 	if err != nil {
@@ -551,10 +552,10 @@ func (t *TCPTransport) SkipSuccessor(target, self *Vnode) error {
 }
 
 // Register for an RPC callbacks
-func (t *TCPTransport) Register(v *Vnode, o VnodeRPC) {
+func (t *TCPTransport) Register(v *chord.Vnode, o chord.VnodeRPC) {
 	key := v.String()
 	t.lock.Lock()
-	t.local[key] = &localRPC{v, o}
+	t.local[key] = &chord.LocalRPC{v, o}
 	t.lock.Unlock()
 }
 
@@ -685,12 +686,12 @@ func (t *TCPTransport) handleConn(conn *net.TCPConn) {
 			}
 
 			// Generate all the local clients
-			res := make([]*Vnode, 0, len(t.local))
+			res := make([]*chord.Vnode, 0, len(t.local))
 
 			// Build list
 			t.lock.RLock()
 			for _, v := range t.local {
-				res = append(res, v.vnode)
+				res = append(res, v.Vnode)
 			}
 			t.lock.RUnlock()
 
@@ -810,7 +811,7 @@ func (t *TCPTransport) handleConn(conn *net.TCPConn) {
 }
 
 // Trims the slice to remove nil elements
-func trimSlice(vn []*Vnode) []*Vnode {
+func trimSlice(vn []*chord.Vnode) []*chord.Vnode {
 	if vn == nil {
 		return vn
 	}

+ 8 - 8
chord/transport.go

@@ -6,9 +6,9 @@ import (
 )
 
 // Wraps vnode and object
-type localRPC struct {
-	vnode *Vnode
-	obj   VnodeRPC
+type LocalRPC struct {
+	*Vnode
+	Obj VnodeRPC
 }
 
 // LocalTransport is used to provides fast routing to Vnodes running
@@ -18,7 +18,7 @@ type LocalTransport struct {
 	host   string
 	Remote Transport
 	lock   sync.RWMutex
-	Local  map[string]*localRPC
+	Local  map[string]*LocalRPC
 }
 
 // Creates a local transport to wrap a remote transport
@@ -28,7 +28,7 @@ func InitLocalTransport(remote Transport) Transport {
 		remote = &BlackholeTransport{}
 	}
 
-	local := make(map[string]*localRPC)
+	local := make(map[string]*LocalRPC)
 	return &LocalTransport{Remote: remote, Local: local}
 }
 
@@ -39,7 +39,7 @@ func (lt *LocalTransport) get(vn *Vnode) (VnodeRPC, bool) {
 	defer lt.lock.RUnlock()
 	w, ok := lt.Local[key]
 	if ok {
-		return w.obj, ok
+		return w.Obj, ok
 	} else {
 		return nil, ok
 	}
@@ -54,7 +54,7 @@ func (lt *LocalTransport) ListVnodes(host string) ([]*Vnode, error) {
 		// Build list
 		lt.lock.RLock()
 		for _, v := range lt.Local {
-			res = append(res, v.vnode)
+			res = append(res, v.Vnode)
 		}
 		lt.lock.RUnlock()
 
@@ -148,7 +148,7 @@ func (lt *LocalTransport) Register(v *Vnode, o VnodeRPC) {
 	key := v.String()
 	lt.lock.Lock()
 	lt.host = v.Host
-	lt.Local[key] = &localRPC{v, o}
+	lt.Local[key] = &LocalRPC{v, o}
 	lt.lock.Unlock()
 
 	// Register with remote transport

chord/util.go → go-chord/util.go


chord/vnode.go → go-chord/vnode.go