From 01d75e9fa74978c8fba1609a15abac41a50d7456 Mon Sep 17 00:00:00 2001 From: liamzebedee Date: Mon, 5 Aug 2024 19:20:22 +1000 Subject: [PATCH 01/15] peer: implement HasBlock sync: test PeerGetCommonAncestor --- core/nakamoto/netpeer.go | 24 ++++- core/nakamoto/node_test.go | 103 ---------------------- core/nakamoto/sync.go | 10 +-- core/nakamoto/sync_test.go | 174 ++++++++++++++++++++++++++++++++++++- core/nakamoto/types.go | 2 +- 5 files changed, 202 insertions(+), 111 deletions(-) diff --git a/core/nakamoto/netpeer.go b/core/nakamoto/netpeer.go index 02436b2..cc5b778 100644 --- a/core/nakamoto/netpeer.go +++ b/core/nakamoto/netpeer.go @@ -50,6 +50,7 @@ type PeerCore struct { OnGetTip func(msg GetTipMessage) (BlockHeader, error) OnSyncGetTipAtDepth func(msg SyncGetTipAtDepthMessage) (SyncGetTipAtDepthReply, error) OnSyncGetData func(msg SyncGetBlockDataMessage) (SyncGetBlockDataReply, error) + OnHasBlock func(msg HasBlockMessage) (bool, error) peerLogger log.Logger } @@ -213,6 +214,27 @@ func NewPeerCore(config PeerConfig) *PeerCore { return reply, nil }) + p.server.RegisterMesageHandler("has_block", func(message []byte) (interface{}, error) { + var msg HasBlockMessage + if err := json.Unmarshal(message, &msg); err != nil { + return nil, err + } + + if p.OnHasBlock == nil { + return nil, fmt.Errorf("HasBlock callback not set") + } + + has, err := p.OnHasBlock(msg) + if err != nil { + return nil, err + } + + return HasBlockReply{ + Type: "has_block", + Has: has, + }, nil + }) + p.server.RegisterMesageHandler("gossip_peers", func(message []byte) (interface{}, error) { var msg GossipPeersMessage if err := json.Unmarshal(message, &msg); err != nil { @@ -411,7 +433,7 @@ func (p *PeerCore) SyncGetBlockData(peer Peer, fromBlock [32]byte, heights core. func (p *PeerCore) HasBlock(peer Peer, blockhash [32]byte) (bool, error) { msg := HasBlockMessage{ Type: "has_block", - BlockHash: fmt.Sprintf("%x", blockhash), + BlockHash: blockhash, } res, err := SendMessageToPeer(peer.Addr, msg, &p.peerLogger) if err != nil { diff --git a/core/nakamoto/node_test.go b/core/nakamoto/node_test.go index 8d198dc..8a9ec59 100644 --- a/core/nakamoto/node_test.go +++ b/core/nakamoto/node_test.go @@ -3,7 +3,6 @@ package nakamoto import ( "encoding/binary" "encoding/json" - "math" "testing" "time" @@ -220,108 +219,6 @@ func TestNodeSyncMissingBlocks(t *testing.T) { assert.Equal(tip1, tip2) } -// One part of the block sync algorithm is determining the common ancestor of two chains: -// -// Chain 1: the chain we have on our local node. -// Chain 2: the chain of a remote peer who has a more recent tip. -// -// We determine the common ancestor in order to download the most minimal set of block headers required to sync to the latest tip. -// There are a few approaches to this: -// - naive approach: download all headers from the tip to the remote peer's genesis block, and then compare the headers to find the common ancestor. This is O(N) where N is the length of the longest chain. -// - naive approach 2: send the peer the block we have at (height - 6), which is according to Nakamoto's calculations, "probabilistically final" and unlikely to be reorg-ed. Ask them if they have this block, and if so, sync the remaining 6 blocks. This fails when there is ongoing volatile reorgs, as well as doesn't work for a full sync. -// - slightly less naive approach: send the peer "checkpoints" at a regular interval. So for the full list of block hashes, we send H/I where I is the interval size, and use this to sync. This is O(H/I). -// - slightly slightly less naive approach: send the peer a list of "checkpoints" at exponentially decreasing intervals. This is smart since the finality of a block increases exponentially with the number of confirmations. This is O(H/log(H)). -// - the most efficient approach. Interactively binary search with the node. At each step of the binary search, we split their view of the chain hash list in half, and ask them if they have the block at the midpoint. -// -// Let me explain the binary search. -// <------------------------> our view -// <------------------------> their view -// n=1 -// <------------|-----------> their view -// <------------------|-----> their view -// <---------------|--------> their view -// At each iteration we ask: do you have a block at height/2 with this hash? -// - if the answer is yes, we move to the right half. -// - if the answer is no, we move to the left half. -// We continue until the length of our search space = 1. -// -// Now for some modelling. -// Finding the common ancestor is O(log N). Each message is (blockhash [32]byte, height uint64). Message size is 40 bytes. -// Total networking cost is O(40 * log N), bitcoin's chain height is 850585, O(40 * log 850585) = O(40 * 20) = O(800) bytes. -// Less than 1KB of data to find common ancestor. -func TestInteractiveBinarySearchFindCommonAncestor(t *testing.T) { - local_chainhashes := [][32]byte{} - remote_chainhashes := [][32]byte{} - - // Populate blockhashes for test. - for i := 0; i < 100; i++ { - local_chainhashes = append(local_chainhashes, uint64To32ByteArray(uint64(i))) - remote_chainhashes = append(remote_chainhashes, uint64To32ByteArray(uint64(i))) - } - // Set remote to branch at block height 90. - for i := 90; i < 100; i++ { - remote_chainhashes[i] = uint64To32ByteArray(uint64(i + 1000)) - } - - // Print both for debugging. - t.Logf("Local chainhashes:\n") - for _, x := range local_chainhashes { - t.Logf("%x", x) - } - t.Logf("\n") - t.Logf("Remote chainhashes:\n") - for _, x := range remote_chainhashes { - t.Logf("%x", x) - } - t.Logf("\n") - - // Peer method. - hasBlockhash := func(blockhash [32]byte) bool { - for _, x := range remote_chainhashes { - if x == blockhash { - return true - } - } - return false - } - - // - // Find the common ancestor. - // - - // This is a classical binary search algorithm. - floor := 0 - ceil := len(local_chainhashes) - n_iterations := 0 - - for (floor + 1) < ceil { - guess_idx := (floor + ceil) / 2 - guess_value := local_chainhashes[guess_idx] - - t.Logf("Iteration %d: floor=%d, ceil=%d, guess_idx=%d, guess_value=%x", n_iterations, floor, ceil, guess_idx, guess_value) - n_iterations += 1 - - // Send our tip's blockhash - // Peer responds with "SEEN" or "NOT SEEN" - // If "SEEN", we move to the right half. - // If "NOT SEEN", we move to the left half. - if hasBlockhash(guess_value) { - // Move to the right half. - floor = guess_idx - } else { - // Move to the left half. - ceil = guess_idx - } - } - - ancestor := local_chainhashes[floor] - t.Logf("Common ancestor: %x", ancestor) - t.Logf("Found in %d iterations.", n_iterations) - - expectedIterations := math.Ceil(math.Log2(float64(len(local_chainhashes)))) - t.Logf("Expected iterations: %f", expectedIterations) -} - func uint64To32ByteArray(num uint64) [32]byte { var arr [32]byte binary.BigEndian.PutUint64(arr[24:], num) // Store the uint64 in the last 8 bytes of the array diff --git a/core/nakamoto/sync.go b/core/nakamoto/sync.go index dfe8dbe..dffb6e8 100644 --- a/core/nakamoto/sync.go +++ b/core/nakamoto/sync.go @@ -396,7 +396,7 @@ func (n *Node) sync_getBestTipFromPeers() [32]byte { // Computes the common ancestor of our local canonical chain and a remote peer's canonical chain through an interactive binary search. // O(log N * query_size). -func (n *Node) sync_computeCommonAncestorWithPeer(peer Peer, local_chainhashes *[][32]byte) [32]byte { +func GetPeerCommonAncestor(localPeer *PeerCore, remotePeer Peer, local_chainhashes *[][32]byte) (ancestor [32]byte, nIterations int, err error) { syncLog := NewLogger("node", "sync") // 6a. Compute the common ancestor (interactive binary search). @@ -416,10 +416,10 @@ func (n *Node) sync_computeCommonAncestorWithPeer(peer Peer, local_chainhashes * // Peer responds with "SEEN" or "NOT SEEN" // If "SEEN", we move to the right half. // If "NOT SEEN", we move to the left half. - has, err := n.Peer.HasBlock(peer, guess_value) + has, err := localPeer.HasBlock(remotePeer, guess_value) if err != nil { syncLog.Printf("Failed to get block from peer: %s\n", err) - continue + return [32]byte{}, 0, err } if has { // Move to the right half. @@ -430,8 +430,8 @@ func (n *Node) sync_computeCommonAncestorWithPeer(peer Peer, local_chainhashes * } } - ancestor := (*local_chainhashes)[floor] + ancestor = (*local_chainhashes)[floor] syncLog.Printf("Common ancestor: %x", ancestor) syncLog.Printf("Found in %d iterations.", n_iterations) - return ancestor + return ancestor, n_iterations, nil } diff --git a/core/nakamoto/sync_test.go b/core/nakamoto/sync_test.go index 8bc81c7..72a0e91 100644 --- a/core/nakamoto/sync_test.go +++ b/core/nakamoto/sync_test.go @@ -3,6 +3,7 @@ package nakamoto import ( "context" "fmt" + "math" "testing" "time" @@ -470,7 +471,6 @@ func TestSyncSyncDownloadDataHeaders(t *testing.T) { func TestSyncSync(t *testing.T) { // After getting the tips, then we need to divide them into work units. - assert := assert.New(t) peers := setupTestNetwork(t) @@ -530,5 +530,177 @@ func TestSyncSync(t *testing.T) { assertIntEqual(t, 0, downloaded2) downloaded3 := node3.Sync() assertIntEqual(t, 0, downloaded3) +} + + +// One part of the block sync algorithm is determining the common ancestor of two chains: +// +// Chain 1: the chain we have on our local node. +// Chain 2: the chain of a remote peer who has a more recent tip. +// +// We determine the common ancestor in order to download the most minimal set of block headers required to sync to the latest tip. +// There are a few approaches to this: +// - naive approach: download all headers from the tip to the remote peer's genesis block, and then compare the headers to find the common ancestor. This is O(N) where N is the length of the longest chain. +// - naive approach 2: send the peer the block we have at (height - 6), which is according to Nakamoto's calculations, "probabilistically final" and unlikely to be reorg-ed. Ask them if they have this block, and if so, sync the remaining 6 blocks. This fails when there is ongoing volatile reorgs, as well as doesn't work for a full sync. +// - slightly less naive approach: send the peer "checkpoints" at a regular interval. So for the full list of block hashes, we send H/I where I is the interval size, and use this to sync. This is O(H/I). +// - slightly slightly less naive approach: send the peer a list of "checkpoints" at exponentially decreasing intervals. This is smart since the finality of a block increases exponentially with the number of confirmations. This is O(H/log(H)). +// - the most efficient approach. Interactively binary search with the node. At each step of the binary search, we split their view of the chain hash list in half, and ask them if they have the block at the midpoint. +// +// Let me explain the binary search. +// <------------------------> our view +// <------------------------> their view +// n=1 +// <------------|-----------> their view +// <------------------|-----> their view +// <---------------|--------> their view +// At each iteration we ask: do you have a block at height/2 with this hash? +// - if the answer is yes, we move to the right half. +// - if the answer is no, we move to the left half. +// We continue until the length of our search space = 1. +// +// Now for some modelling. +// Finding the common ancestor is O(log N). Each message is (blockhash [32]byte, height uint64). Message size is 40 bytes. +// Total networking cost is O(40 * log N), bitcoin's chain height is 850585, O(40 * log 850585) = O(40 * 20) = O(800) bytes. +// Less than 1KB of data to find common ancestor. +func TestInteractiveBinarySearchFindCommonAncestor(t *testing.T) { + local_chainhashes := [][32]byte{} + remote_chainhashes := [][32]byte{} + + // Populate blockhashes for test. + for i := 0; i < 100; i++ { + local_chainhashes = append(local_chainhashes, uint64To32ByteArray(uint64(i))) + remote_chainhashes = append(remote_chainhashes, uint64To32ByteArray(uint64(i))) + } + // Set remote to branch at block height 90. + for i := 90; i < 100; i++ { + remote_chainhashes[i] = uint64To32ByteArray(uint64(i + 1000)) + } + // Print both for debugging. + t.Logf("Local chainhashes:\n") + for _, x := range local_chainhashes { + t.Logf("%x", x) + } + t.Logf("\n") + t.Logf("Remote chainhashes:\n") + for _, x := range remote_chainhashes { + t.Logf("%x", x) + } + t.Logf("\n") + + // Peer method. + hasBlockhash := func(blockhash [32]byte) bool { + for _, x := range remote_chainhashes { + if x == blockhash { + return true + } + } + return false + } + + // + // Find the common ancestor. + // + + // This is a classical binary search algorithm. + floor := 0 + ceil := len(local_chainhashes) + n_iterations := 0 + + for (floor + 1) < ceil { + guess_idx := (floor + ceil) / 2 + guess_value := local_chainhashes[guess_idx] + + t.Logf("Iteration %d: floor=%d, ceil=%d, guess_idx=%d, guess_value=%x", n_iterations, floor, ceil, guess_idx, guess_value) + n_iterations += 1 + + // Send our tip's blockhash + // Peer responds with "SEEN" or "NOT SEEN" + // If "SEEN", we move to the right half. + // If "NOT SEEN", we move to the left half. + if hasBlockhash(guess_value) { + // Move to the right half. + floor = guess_idx + } else { + // Move to the left half. + ceil = guess_idx + } + } + + ancestor := local_chainhashes[floor] + t.Logf("Common ancestor: %x", ancestor) + t.Logf("Found in %d iterations.", n_iterations) + + expectedIterations := math.Ceil(math.Log2(float64(len(local_chainhashes)))) + t.Logf("Expected iterations: %f", expectedIterations) } + +func TestGetPeerCommonAncestor(t *testing.T) { + local_chainhashes := [][32]byte{} + remote_chainhashes := [][32]byte{} + + // Populate blockhashes for test. + for i := 0; i < 100; i++ { + local_chainhashes = append(local_chainhashes, uint64To32ByteArray(uint64(i))) + remote_chainhashes = append(remote_chainhashes, uint64To32ByteArray(uint64(i))) + } + // Set remote to branch at block height 90. + for i := 90; i < 100; i++ { + remote_chainhashes[i] = uint64To32ByteArray(uint64(i + 1000)) + } + + // Print both for debugging. + t.Logf("Local chainhashes:\n") + for _, x := range local_chainhashes { + t.Logf("%x", x) + } + t.Logf("\n") + t.Logf("Remote chainhashes:\n") + for _, x := range remote_chainhashes { + t.Logf("%x", x) + } + t.Logf("\n") + + // Peer mock. + peer1 := NewPeerCore(PeerConfig{ipAddress: "127.0.0.1", port: getRandomPort()}) + peer2 := NewPeerCore(PeerConfig{ipAddress: "127.0.0.1", port: getRandomPort()}) + + go peer1.Start() + go peer2.Start() + + // Wait for peers online. + waitForPeersOnline([]*PeerCore{peer1, peer2}) + + // Bootstrap. + peer1.Bootstrap([]string{peer2.GetLocalAddr()}) + + peer2.OnHasBlock = func(msg HasBlockMessage) (bool, error) { + blockhash := msg.BlockHash + for _, x := range remote_chainhashes { + if x == blockhash { + return true, nil + } + } + return false, nil + } + + // + // Find the common ancestor. + // + + remotePeer := peer1.GetPeers()[0] + ancestor, n_iterations, err := GetPeerCommonAncestor(peer1, remotePeer, &local_chainhashes) + if err != nil { + t.Fatalf("Error finding common ancestor: %s", err) + } + t.Logf("Common ancestor: %x", ancestor) + t.Logf("Found in %d iterations.", n_iterations) + + expectedIterations := math.Ceil(math.Log2(float64(len(local_chainhashes)))) + t.Logf("Expected iterations: %f", expectedIterations) + + // Now we assert the common ancestor. + assert := assert.New(t) + assert.Equal(local_chainhashes[89], ancestor) + assertIntEqual(t, int(expectedIterations), n_iterations) +} \ No newline at end of file diff --git a/core/nakamoto/types.go b/core/nakamoto/types.go index 0a11cd5..6861ed6 100644 --- a/core/nakamoto/types.go +++ b/core/nakamoto/types.go @@ -96,7 +96,7 @@ type GetBlocksReply struct { // has_block type HasBlockMessage struct { Type string `json:"type"` // "have_block" - BlockHash string `json:"blockHash"` + BlockHash [32]byte `json:"blockHash"` } type HasBlockReply struct { From 027dda9eb9a32c8fc7943160e9e624e78b8f0282 Mon Sep 17 00:00:00 2001 From: liamzebedee Date: Mon, 5 Aug 2024 20:15:39 +1000 Subject: [PATCH 02/15] sync: begin mapping out sync divergent branch test --- core/nakamoto/sync_test.go | 108 ++++++++++++++++++++++++++++++++++++- 1 file changed, 107 insertions(+), 1 deletion(-) diff --git a/core/nakamoto/sync_test.go b/core/nakamoto/sync_test.go index 72a0e91..188159a 100644 --- a/core/nakamoto/sync_test.go +++ b/core/nakamoto/sync_test.go @@ -372,7 +372,6 @@ func TestSyncScheduleDownloadWork1(t *testing.T) { func TestSyncSyncDownloadDataHeaders(t *testing.T) { // After getting the tips, then we need to divide them into work units. - assert := assert.New(t) peers := setupTestNetwork(t) @@ -703,4 +702,111 @@ func TestGetPeerCommonAncestor(t *testing.T) { assert := assert.New(t) assert.Equal(local_chainhashes[89], ancestor) assertIntEqual(t, int(expectedIterations), n_iterations) +} + + + +// Sync scenarios: +// +// SCENARIO 1 +// =========== +// DESCRIPTION: local tip is behind remote tip, same branch. remote tip is heavier. +// NETWORK STATE: +// node1: a -> b -> c -> d -> e (work=100) +// node2: a -> b -> c -> d -> e -> f -> g (work=150) +// +// SCENARIO 2 +// =========== +// DESCRIPTION: local tip is behind remote tip, fork branch. remote branch is heavier. +// NETWORK STATE: +// node1: a -> b -> ca -> da -> ea (work=100) +// node2: a -> b -> cb -> db -> eb (work=105) +// +// SCENARIO 3 +// =========== +// DESCRIPTION: local tip is behind remote tip, fork branch. local branch is heavier. +// NETWORK STATE: +// node1: a -> b -> ca -> da -> ea (work=105) +// node2: a -> b -> cb -> db -> eb (work=100) +// + + +func printBlockchainView(t *testing.T, label string, dag *BlockDAG) { + // Print the entire hash chain according to node1. + hashlist, err := dag.GetLongestChainHashList(dag.FullTip.Hash, dag.FullTip.Height+10) + if err != nil { + t.Fatalf("Error getting longest chain: %s", err) + } + t.Logf("") + t.Logf("Longest chain (%s):", label) + for i, hash := range hashlist { + t.Logf("Block #%d: %x", i, hash) + } + t.Logf("") +} + +func TestSyncRemoteForkBranchRemoteHeavier(t *testing.T) { + assert := assert.New(t) + peers := setupTestNetwork(t) + + node1 := peers[0] + node2 := peers[1] + + // Then we check the tips. + tip1 := node1.Dag.FullTip + tip2 := node2.Dag.FullTip + + // Print the height of the tip. + t.Logf("Tip 1 height: %d", tip1.Height) + t.Logf("Tip 2 height: %d", tip2.Height) + + // Check that the tips are the same. + assert.Equal(tip1.HashStr(), tip2.HashStr()) + + // Node 1 mines 15 blocks, gossips with node 2 + node1.Miner.Start(15) + + // Wait for nodes [1,2] to sync. + err := waitForNodesToSyncSameTip([]*Node{node1, node2}) + assert.Nil(err) + + // Disable nodes syncing. + node1.Peer.OnNewBlock = nil + node2.Peer.OnNewBlock = nil + + // Node 1 mines 5 blocks on alternative chain. + // Node 2 mines 7 blocks on alternative chain. + node1.Miner.Start(1) + node2.Miner.Start(5) + + // Assert state. + tip1 = node1.Dag.FullTip + tip2 = node2.Dag.FullTip + assertIntEqual(t, 16, tip1.Height) + assertIntEqual(t, 20, tip2.Height) + assert.NotEqual(tip1.HashStr(), tip2.HashStr()) + + // Now print both hash chains. + printBlockchainView(t, "Node 1", node1.Dag) + printBlockchainView(t, "Node 2", node2.Dag) + + // Now sync node 2 to node 1. + // Get the heavier tip. + // nodes := []*Node{node1, node2} + tips := []Block{tip1, tip2} + var heavierTipIndex int = -1 + if tips[0].AccumulatedWork.Cmp(&tips[1].AccumulatedWork) == -1 { + heavierTipIndex = 1 + } else if tips[1].AccumulatedWork.Cmp(&tips[0].AccumulatedWork) == -1 { + heavierTipIndex = 0 + } else if tips[0].AccumulatedWork.Cmp(&tips[1].AccumulatedWork) == 0 { + t.Errorf("Tips have the same work. Re-run test.") + } + t.Logf("Heavier tip index: %d", heavierTipIndex) + assertIntEqual(t, 1, heavierTipIndex) + + // The common ancestor should be + + + } \ No newline at end of file From 93cbb7d9410978937947f3cd44e24e47a549bafb Mon Sep 17 00:00:00 2001 From: liamzebedee Date: Wed, 7 Aug 2024 12:55:59 +1000 Subject: [PATCH 03/15] sync: test getBestTipFromPeers sync: move sync_search into own function --- core/bittorrent_trackers.go | 4 +- core/nakamoto/netpeer_server.go | 6 +- core/nakamoto/sync.go | 232 ++++++++++++++------------------ core/nakamoto/sync_test.go | 43 +++++- 4 files changed, 147 insertions(+), 138 deletions(-) diff --git a/core/bittorrent_trackers.go b/core/bittorrent_trackers.go index 4c2e718..6933be0 100644 --- a/core/bittorrent_trackers.go +++ b/core/bittorrent_trackers.go @@ -11,7 +11,7 @@ import ( "crypto/rand" "encoding/hex" "fmt" - "io/ioutil" + "io" "log" "net/http" "net/url" @@ -81,7 +81,7 @@ func addPeerToSwarm(peerID string, infoHash string, port int) error { } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return err } diff --git a/core/nakamoto/netpeer_server.go b/core/nakamoto/netpeer_server.go index f55e382..7932421 100644 --- a/core/nakamoto/netpeer_server.go +++ b/core/nakamoto/netpeer_server.go @@ -5,7 +5,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "log" "net/http" "sort" @@ -88,7 +88,7 @@ func (s *PeerServer) inboxHandler(w http.ResponseWriter, r *http.Request) { return } - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if err != nil { http.Error(w, "Failed to read request body", http.StatusBadRequest) return @@ -168,7 +168,7 @@ func SendMessageToPeer(peerUrl string, message any, log *log.Logger) ([]byte, er defer resp.Body.Close() // Read response. - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("failed to read response: %v", err) } diff --git a/core/nakamoto/sync.go b/core/nakamoto/sync.go index dffb6e8..5008eda 100644 --- a/core/nakamoto/sync.go +++ b/core/nakamoto/sync.go @@ -2,7 +2,7 @@ package nakamoto import ( "fmt" - "math/big" + "slices" "sync" "github.com/liamzebedee/tinychain-go/core" @@ -212,82 +212,6 @@ func (n *Node) Sync() int { // The sync algorithm is a greedy iterative search. // We continue downloading block headers from a peer until we reach their tip. - // TODO handle peers joining. - WINDOW_SIZE := 2048 - - // Greedily searches the block DAG from a tip hash, downloading headers in parallel from peers from all subbranches up to a depth. - // The depth is referred to as the "window size", and is a constant value of 2048 blocks. - search := func(currentTipHash [32]byte) int { - // 1. Get the tips from all our peers and bucket them. - peersTips, err := n.getPeerTips(currentTipHash, uint64(WINDOW_SIZE), 1) - if err != nil { - n.syncLog.Printf("Failed to get peer tips: %s\n", err) - return 0 - } - - // 2. For each tip, download a window of headers and ingest them. - downloaded := 0 - for _, peers := range peersTips { - // 2a. Identify heights. - heights := core.NewBitset(WINDOW_SIZE) - for i := 0; i < WINDOW_SIZE; i++ { - heights.Insert(i) - } - - // 2b. Download headers. - headers, _, err := n.SyncDownloadData(currentTipHash, *heights, peers, true, false) - if err != nil { - n.syncLog.Printf("Failed to download headers: %s\n", err) - continue - } - - // 2c. Validate headers. - // Sanity-check: verify we have all the headers for the heights in order. TODO. - headers2 := orderValidateHeaders(currentTipHash, headers) - - // 2d. Ingest headers. - for _, header := range headers2 { - err := n.Dag.IngestHeader(header) - if err != nil { - // Skip. We will not be able to download the bodies. - continue - } - - downloaded += 1 - } - - n.syncLog.Printf("Downloaded %d headers\n", downloaded) - - // Now get the bodies. - // Filter through missing bodies for headers. - heights2 := core.NewBitset(WINDOW_SIZE) - for i, _ := range headers2 { - heights2.Insert(i) - } - _, bodies, err := n.SyncDownloadData(currentTipHash, *heights2, peers, false, true) - if err != nil { - n.syncLog.Printf("Failed to download bodies: %s\n", err) - continue - } - - // Print the bdoeis and exit. - n.syncLog.Printf("Downloaded bodies n=%d\n", len(bodies)) - - // 2d. Ingest bodies. - for i, body := range bodies { - err := n.Dag.IngestBlockBody(body) - if err != nil { - // Skip. We will not be able to download the bodies. - n.syncLog.Printf("Failed to ingest body %d: %s\n", i, err) - continue - } - } - } - - // 3. Return the number of headers downloaded. - return downloaded - } - currentTip, err := n.Dag.GetLatestHeadersTip() if err != nil { n.syncLog.Printf("Failed to get latest tip: %s\n", err) @@ -298,7 +222,7 @@ func (n *Node) Sync() int { for { // Search for headers from current tip. - downloaded := search(currentTip.Hash) + downloaded := n.sync_search(currentTip.Hash) totalSynced += downloaded // Exit when there are no more headers to download. @@ -313,21 +237,93 @@ func (n *Node) Sync() int { return totalSynced } +func (n *Node) sync_search(baseBlock [32]byte) int { + // The size of the window we are searching. + WINDOW_SIZE := 2048 + + // Greedily searches the block DAG from a tip hash, downloading headers in parallel from peers from all subbranches up to a depth. + // The depth is referred to as the "window size", and is a constant value of 2048 blocks. + // 1. Get the tips from all our peers and bucket them. + peersTips, err := n.getPeerTips(baseBlock, uint64(WINDOW_SIZE), 1) + if err != nil { + n.syncLog.Printf("Failed to get peer tips: %s\n", err) + return 0 + } + + // 2. For each tip, download a window of headers and ingest them. + downloaded := 0 + for _, peers := range peersTips { + // 2a. Identify heights. + heights := core.NewBitset(WINDOW_SIZE) + for i := 0; i < WINDOW_SIZE; i++ { + heights.Insert(i) + } + + // 2b. Download headers. + headers, _, err := n.SyncDownloadData(baseBlock, *heights, peers, true, false) + if err != nil { + n.syncLog.Printf("Failed to download headers: %s\n", err) + continue + } + + // 2c. Validate headers. + // Sanity-check: verify we have all the headers for the heights in order. TODO. + headers2 := orderValidateHeaders(baseBlock, headers) + + // 2d. Ingest headers. + for _, header := range headers2 { + err := n.Dag.IngestHeader(header) + if err != nil { + // Skip. We will not be able to download the bodies. + continue + } + + downloaded += 1 + } + + n.syncLog.Printf("Downloaded %d headers\n", downloaded) + + // Now get the bodies. + // Filter through missing bodies for headers. + heights2 := core.NewBitset(WINDOW_SIZE) + for i, _ := range headers2 { + heights2.Insert(i) + } + _, bodies, err := n.SyncDownloadData(baseBlock, *heights2, peers, false, true) + if err != nil { + n.syncLog.Printf("Failed to download bodies: %s\n", err) + continue + } + + // Print the bdoeis and exit. + n.syncLog.Printf("Downloaded bodies n=%d\n", len(bodies)) + + // 2d. Ingest bodies. + for i, body := range bodies { + err := n.Dag.IngestBlockBody(body) + if err != nil { + // Skip. We will not be able to download the bodies. + n.syncLog.Printf("Failed to ingest body %d: %s\n", i, err) + continue + } + } + } + + // 3. Return the number of headers downloaded. + return downloaded +} + // Contacts all our peers in parallel, gets the block header of their tip, and returns the best tip based on total work. -func (n *Node) sync_getBestTipFromPeers() [32]byte { +func (n *Node) sync_getBestTipFromPeers(peers []Peer) (BlockHeader, error) { syncLog := NewLogger("node", "sync") - - // 1. Contact all our peers. - // 2. Get their current tips in parallel. + + // 1. Contact all our peers and get their current tips in parallel. syncLog.Printf("Getting tips from %d peers...\n", len(n.Peer.peers)) - var wg sync.WaitGroup - - tips := make([]BlockHeader, 0) tipsChan := make(chan BlockHeader, len(n.Peer.peers)) - // timeout := time.After(5 * time.Second) + tips := make([]BlockHeader, 0) - for _, peer := range n.Peer.peers { + for _, peer := range peers { wg.Add(1) go func(peer Peer) { defer wg.Done() @@ -341,57 +337,29 @@ func (n *Node) sync_getBestTipFromPeers() [32]byte { }(peer) } - go func() { - wg.Wait() - close(tipsChan) - }() - - // TODO WIP - // for { - // select { - // case tip, ok := <-tipsChan: - // if !ok { - // break - // } - // tips = append(tips, tip) - // case <-timeout: - // syncLog.Printf("Timed out getting tips from peers\n") - // } - // } + wg.Wait() + close(tipsChan) + for tip := range tipsChan { + tips = append(tips, tip) + } syncLog.Printf("Received %d tips\n", len(tips)) if len(tips) == 0 { syncLog.Printf("No tips received. Exiting sync.\n") - return [32]byte{} // TODO, should return error - } - - // 3. Sort the tips by max(work). - // 4. Reduce the tips to (tip, work, num_peers). - // 5. Choose the tip with the highest work and the most peers mining on it. - numPeersOnTip := make(map[[32]byte]int) - tipWork := make(map[[32]byte]*big.Int) - - highestWork := big.NewInt(0) - bestTipHash := [32]byte{} - - for _, tip := range tips { - hash := tip.BlockHash() - // TODO embed difficulty into block header so we can verify POW. - work := CalculateWork(Bytes32ToBigInt(hash)) - - // -1 if x < y - // highestWork < work - if highestWork.Cmp(work) == -1 { - highestWork = work - bestTipHash = hash - } - - numPeersOnTip[hash] += 1 - tipWork[hash] = work + return BlockHeader{}, fmt.Errorf("No tips received") } - syncLog.Printf("Best tip: %s\n", bestTipHash) - return bestTipHash + // 2. Sort the tips by max(work). + slices.SortFunc(tips, func(bh1, bh2 BlockHeader) int { + w1 := CalculateWork(Bytes32ToBigInt(bh1.BlockHash())) + w2 := CalculateWork(Bytes32ToBigInt(bh2.BlockHash())) + return w1.Cmp(w2) + }) + + // 3. Return the best tip. + bestTip := tips[len(tips)-1] + syncLog.Printf("Best tip: %x\n", bestTip.BlockHash()) + return bestTip, nil } // Computes the common ancestor of our local canonical chain and a remote peer's canonical chain through an interactive binary search. diff --git a/core/nakamoto/sync_test.go b/core/nakamoto/sync_test.go index 188159a..92bbc2a 100644 --- a/core/nakamoto/sync_test.go +++ b/core/nakamoto/sync_test.go @@ -806,7 +806,48 @@ func TestSyncRemoteForkBranchRemoteHeavier(t *testing.T) { assertIntEqual(t, 1, heavierTipIndex) // The common ancestor should be +} + + +func TestSyncGetBestTipFromPeers(t *testing.T) { + assert := assert.New(t) + peers := setupTestNetwork(t) + + node1 := peers[0] + node2 := peers[1] + node3 := peers[2] + // Base case: all tips are the same. + bestTip, err := node1.sync_getBestTipFromPeers(node1.Peer.peers) + assert.Nil(err) + assert.Equal(node1.Dag.FullTip.HashStr(), bestTip.BlockHashStr()) + assert.Equal(node2.Dag.FullTip.HashStr(), bestTip.BlockHashStr()) + assert.Equal(node3.Dag.FullTip.HashStr(), bestTip.BlockHashStr()) + // Now we test the case where one peer has a different tip. + // Node 1 mines 15 blocks, gossips with node 2 + node1.Miner.Start(5) + node2.Miner.Start(10) + + // node2 should have best tip. + bestTip, err = node1.sync_getBestTipFromPeers(node1.Peer.peers) + assert.Nil(err) + assert.Equal(node2.Dag.FullTip.HashStr(), bestTip.BlockHashStr()) +} -} \ No newline at end of file +// Sync process: +// 1. Ask all peers for tips +// 2. Choose the tip with highest amount of work +// 3. Find the common ancestor +// 4. Sync from this base block + + +// Sync needs to distinguish between two scenarios: +// 1) live sync: the node is syncing in real-time with the network. +// 2) cold sync: the node is syncing from a cold start, and needs to download all blocks from the network. +// what changes in each scenario? +// - live sync: +// -- we validate timestamps +// -- we download just one branch +// - cold sync +// -- we download all branches From c4ae7d91be08485c90b1d4828edd3c561392363b Mon Sep 17 00:00:00 2001 From: liamzebedee Date: Sat, 4 Jan 2025 21:51:57 +1100 Subject: [PATCH 04/15] docs --- core/nakamoto/block.go | 1 + core/nakamoto/genesis.go | 8 +++---- core/nakamoto/miner.go | 35 +++++++++++++++++++++++++---- core/nakamoto/state_machine_test.go | 23 ------------------- core/nakamoto/types.go | 1 + 5 files changed, 37 insertions(+), 31 deletions(-) diff --git a/core/nakamoto/block.go b/core/nakamoto/block.go index 89d36a5..623acc4 100644 --- a/core/nakamoto/block.go +++ b/core/nakamoto/block.go @@ -185,6 +185,7 @@ func (b *RawBlock) Envelope() []byte { if err != nil { panic(err) } + // TODO: sanity check NumTransactions/TransactionsMerkleRoot matches against what's in Transactions. err = binary.Write(buf, binary.BigEndian, b.Nonce) if err != nil { panic(err) diff --git a/core/nakamoto/genesis.go b/core/nakamoto/genesis.go index b6a74ec..a75983d 100644 --- a/core/nakamoto/genesis.go +++ b/core/nakamoto/genesis.go @@ -7,16 +7,16 @@ import ( // The Nakamoto consensus configuration, pertaining to difficulty readjustment, genesis block, and block size. type ConsensusConfig struct { - // The length of an epoch. + // The length of a difficulty epoch in blocks. EpochLengthBlocks uint64 `json:"epoch_length_blocks"` - // The target block production rate in terms of 1 epoch. + // The target length of one epoch in milliseconds. TargetEpochLengthMillis uint64 `json:"target_epoch_length_millis"` - // Genesis difficulty target. + // The difficulty of the genesis block. GenesisDifficulty big.Int `json:"genesis_difficulty"` - // The genesis parent block hash. + // The parent block hash for the genesis block. This is a special case, as the genesis block has a parent we don't know the preimage for. GenesisParentBlockHash [32]byte `json:"genesis_block_hash"` // Maximum block size. diff --git a/core/nakamoto/miner.go b/core/nakamoto/miner.go index e1069db..a1d7902 100644 --- a/core/nakamoto/miner.go +++ b/core/nakamoto/miner.go @@ -13,6 +13,33 @@ import ( ) // The Miner is responsible for solving the Hashcash proof-of-work puzzle. +// +// The operation of the miner is as follows: +// 1. Begin the miner thread. +// 2. Generate a new POW puzzle: +// a. Create the block template. +// i. Get the current tip and set block.parent_hash to the tip's hash. +// ii. Construct the coinbase transaction with our miner wallet. +// iii. Get the block's body (transactions) using `Miner.GetBlockBody`. This is used to connect the mempool. +// iv. Compute the transaction merkle root. +// b. Compute the difficulty target for mining. +// 3. Begin mining the puzzle: +// a. Send the puzzle to the miner thread. +// b. The miner thread will mine the puzzle until a solution is found. +// i. Increment the nonce. +// ii. Hash the block. +// iii. Check if the guess (hash) is less than the target. +// iv. If the guess is less than the target, the puzzle is solved. Send the solution back to the main thread. +// v. If a new puzzle is received, stop mining the current puzzle and start mining the new puzzle. +// 4. When a solution to the puzzle is found, the miner thread will send the solution back to the main thread. +// 5. The main thread will: +// a. Set the nonce in the block to the solution. +// b. Call `Miner.OnBlockSolution` with the block. +// c. Increment the number of blocks mined. +// d. If the maximum number of blocks to mine has been reached, stop the miner. +// e. Otherwise, generate a new puzzle and send it to the miner thread. +// + type Miner struct { dag BlockDAG CoinbaseWallet *core.Wallet @@ -54,7 +81,7 @@ func MakeCoinbaseTx(wallet *core.Wallet, amount uint64) RawTransaction { ToPubkey: wallet.PubkeyBytes(), Amount: amount, Fee: 0, - Nonce: 0, + Nonce: randomNonce(), } envelope := tx.Envelope() sig, err := wallet.Sign(envelope) @@ -181,12 +208,12 @@ func (miner *Miner) MakeNewPuzzle() POWPuzzle { Transactions: blockBody, Graffiti: miner.GraffitiTag, } + + // Compute the transaction merkle root. raw.TransactionsMerkleRoot = GetMerkleRootForTxs(raw.Transactions) - // Mine the POW solution. + // Compute the difficulty target for mining, which may involve recomputing the difficulty (epoch change). curr_height := current_tip.Height + 1 - - // First get the right epoch. var difficulty big.Int epoch, err := miner.dag.GetEpochForBlockHash(current_tip.Hash) if err != nil { diff --git a/core/nakamoto/state_machine_test.go b/core/nakamoto/state_machine_test.go index 4635f83..b8c04ad 100644 --- a/core/nakamoto/state_machine_test.go +++ b/core/nakamoto/state_machine_test.go @@ -27,29 +27,6 @@ func newStateDB() *sql.DB { } func TestStateMachineIdea(t *testing.T) { - // Basically the state machine works as so: - // - we deliver a rawtransaction - // - we call statemachine.transition - // - there are state snapshots - // a state snapshot is simply the full state of the system at a block - // - there are state diffs - // a state diff is the difference between two state snapshots - // how do we compute a state diff between two state snapshots? - // - what is state? - // (account, balance) pairs - // a state diff is simply a set of effects we apply to get the new state - // rather than manually engineer this, we can compute manual state diffs using diff or something similar. - // iterate over the state namespaces: - // - state_accounts -> hash(leaf) -> hash(account ++ balance) - // iterate over all of the leaves, and compute the diff: - // - additions - // - deletions - // maybe the state is more like: - // create table state_accounts (account text, balance int) - // StateAccountLeaf { Account string, Balance int } - // (leaf StateAccountLeaf) Bytes() []byte { ... } - // - db := newStateDB() wallets := getTestingWallets(t) stateMachine, err := NewStateMachine(db) diff --git a/core/nakamoto/types.go b/core/nakamoto/types.go index 0a11cd5..6e533d9 100644 --- a/core/nakamoto/types.go +++ b/core/nakamoto/types.go @@ -34,6 +34,7 @@ func GetIdForEpoch(startBlockHash [32]byte, startHeight uint64) string { } // The epoch unique ID is the height ++ startblockhash. +// e.g. 1000_0ab1c2d3... is the epoch starting at height 1000 with start block hash 0ab1c2d3... func (e *Epoch) GetId() string { return GetIdForEpoch(e.StartBlockHash, e.StartHeight) } From 0269c07458b6af57d01d7b76d3951fb9eb1bf27e Mon Sep 17 00:00:00 2001 From: liamzebedee Date: Sat, 4 Jan 2025 21:56:36 +1100 Subject: [PATCH 05/15] doc coinbase genesis tx --- core/nakamoto/genesis.go | 3 +++ core/nakamoto/genesis_test.go | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/core/nakamoto/genesis.go b/core/nakamoto/genesis.go index a75983d..4f920a8 100644 --- a/core/nakamoto/genesis.go +++ b/core/nakamoto/genesis.go @@ -30,6 +30,9 @@ type ConsensusConfig struct { // These tests have been marked with the comment string find:GENESIS-BLOCK-ASSERTS so you can find them easily. func GetRawGenesisBlockFromConfig(consensus ConsensusConfig) RawBlock { txs := []RawTransaction{ + // Genesis coinbase transaction. + // Run `go test ./... -count=1 -v -run TestCreateGenesisCoinbaseTx` to generate a genesis coinbase tx. + // This will output a coinbase transaction with a valid signature. RawTransaction{ Version: 1, Sig: [64]byte{0x86, 0xaf, 0x5f, 0x4b, 0x76, 0xea, 0x1c, 0xd2, 0xfb, 0xd4, 0x0f, 0xec, 0x93, 0x90, 0x70, 0x58, 0x47, 0xa1, 0x36, 0xb2, 0xc7, 0x0d, 0x10, 0x7b, 0xdd, 0x3e, 0x92, 0x27, 0xfd, 0xcb, 0x5e, 0xbb, 0x1c, 0x50, 0x0e, 0xfa, 0x02, 0x6a, 0x30, 0x44, 0x71, 0x15, 0xcc, 0x97, 0xf4, 0x15, 0x7f, 0x56, 0xd3, 0x3d, 0xb3, 0x30, 0xd6, 0x66, 0x06, 0xbb, 0xc1, 0x02, 0xae, 0x41, 0x39, 0xdb, 0x67, 0x93}, diff --git a/core/nakamoto/genesis_test.go b/core/nakamoto/genesis_test.go index eefb3e9..f09f924 100644 --- a/core/nakamoto/genesis_test.go +++ b/core/nakamoto/genesis_test.go @@ -9,6 +9,11 @@ import ( "github.com/stretchr/testify/assert" ) +func TestDumpGenesisBlock(t *testing.T) { + // Dump genesis block to JSON. + +} + func TestGetRawGenesisBlockFromConfig(t *testing.T) { assert := assert.New(t) @@ -54,7 +59,7 @@ func formatByteArrayDynamic(b []byte) string { return out } -func TestWalletCreateSignTransferTx(t *testing.T) { +func TestCreateGenesisCoinbaseTx(t *testing.T) { wallet, err := core.CreateRandomWallet() if err != nil { panic(err) From 4128091f517184ecf31611e13bd3869c3b385146 Mon Sep 17 00:00:00 2001 From: liamzebedee Date: Sat, 4 Jan 2025 22:00:12 +1100 Subject: [PATCH 06/15] mempool: connect to miner (wip) node: submit new txs to mempool (wip) --- core/nakamoto/mempool.go | 17 +++++++++++++++++ core/nakamoto/netpeer.go | 7 +++++-- core/nakamoto/node.go | 41 +++++++++++++++++++++++++++++++--------- 3 files changed, 54 insertions(+), 11 deletions(-) diff --git a/core/nakamoto/mempool.go b/core/nakamoto/mempool.go index f735014..4451157 100644 --- a/core/nakamoto/mempool.go +++ b/core/nakamoto/mempool.go @@ -113,3 +113,20 @@ func (m *Mempool) GetFeeStatistics() FeeStatistics { return stats } + +// Creates a bundle from the mempool. +func (m *Mempool) GetBundle(max uint) []RawTransaction { + if max == 0 { + return []RawTransaction{} + } + + bundle := []RawTransaction{} + for _, tx := range m.txs { + bundle = append(bundle, *tx) + if uint(len(bundle)) == max { + break + } + } + + return bundle +} diff --git a/core/nakamoto/netpeer.go b/core/nakamoto/netpeer.go index 02436b2..ae29dda 100644 --- a/core/nakamoto/netpeer.go +++ b/core/nakamoto/netpeer.go @@ -45,7 +45,7 @@ type PeerCore struct { GossipPeersIntervalSeconds int OnNewBlock func(block RawBlock) - OnNewTransaction func(tx RawTransaction) + OnNewTransaction func(tx RawTransaction) error OnGetBlocks func(msg GetBlocksMessage) ([][]byte, error) OnGetTip func(msg GetTipMessage) (BlockHeader, error) OnSyncGetTipAtDepth func(msg SyncGetTipAtDepthMessage) (SyncGetTipAtDepthReply, error) @@ -130,7 +130,10 @@ func NewPeerCore(config PeerConfig) *PeerCore { // Call the OnNewTransaction callback. if p.OnNewTransaction != nil { - p.OnNewTransaction(msg.RawTransaction) + err = p.OnNewTransaction(msg.RawTransaction) + if err != nil { + return nil, err + } } return nil, nil }) diff --git a/core/nakamoto/node.go b/core/nakamoto/node.go index 7f83a0b..50a38d5 100644 --- a/core/nakamoto/node.go +++ b/core/nakamoto/node.go @@ -11,6 +11,7 @@ type Node struct { Miner *Miner Peer *PeerCore StateMachine1 *StateMachine + Mempool *Mempool log *log.Logger syncLog *log.Logger stateLog *log.Logger @@ -22,11 +23,14 @@ func NewNode(dag *BlockDAG, miner *Miner, peer *PeerCore) *Node { panic(err) } + mempool := NewMempool() + n := &Node{ Dag: dag, Miner: miner, Peer: peer, StateMachine1: stateMachine, + Mempool: mempool, log: NewLogger("node", ""), syncLog: NewLogger("node", "sync"), stateLog: NewLogger("node", "state"), @@ -181,8 +185,6 @@ func (n *Node) setup() { // Recompute the state after a new tip. n.Dag.OnNewFullTip = func(new_tip Block, prev_tip Block) { // 1. Rebuild state. - // 2. Regenerate current mempool. - n.stateLog.Printf("rebuild-state\n") start := time.Now() @@ -194,6 +196,33 @@ func (n *Node) setup() { duration := time.Since(start) n.stateLog.Printf("rebuild-state completed duration=%s n_blocks=%d\n", duration.String(), n.Dag.FullTip.Height) + + // 2. Regenerate current mempool. Tx set should not include any txs that are in the chain. + // 3. Restart miner. + // TODO. + } + + // Listen for new transactions and add them to the mempool. + n.Peer.OnNewTransaction = func(tx RawTransaction) error { + n.log.Printf("New transaction gossip from peer: tx=%x\n", tx.Hash()) + + // Submit transaction to mempool. + err := n.Mempool.SubmitTx(tx) + if err != nil { + n.log.Printf("Failed to add transaction to mempool: %s\n", err) + // If the tx was rejected (e.g. fee too low), return the error. + return err + } + + return nil + } + + // Connect the miner to the mempool. + n.Miner.GetBlockBody = func() []RawTransaction { + // Get the mempool transactions. + var MAX_BUNDLE_SIZE uint = 8192 - 1 // minus the miner's coinbase tx + bundle := n.Mempool.GetBundle(MAX_BUNDLE_SIZE) + return bundle } // When we get a tx, add it to the mempool. @@ -209,14 +238,8 @@ func (n *Node) setup() { // - Reinsert any transcations that were included in blocks that were orphaned, to a maximum depth of 1 day of blocks (144 blocks). O(144) // - Revalidate the tx set. O(K). // c. Begin mining on the new tip. - - // When we get new transaction, add it to mempool. - n.Peer.OnNewTransaction = func(tx RawTransaction) { - // Add transaction to mempool. - // TODO. - } - // Load peers from cache. + networkStore, err := LoadDataStore[NetworkStore](n.Dag.db, "network") if err != nil { n.log.Printf("Failed to load network store: %s\n", err) From 6cae7ec1387863ca57118d633ed20296cbfdc5e3 Mon Sep 17 00:00:00 2001 From: liamzebedee Date: Sat, 4 Jan 2025 23:20:03 +1100 Subject: [PATCH 07/15] add project mangagement state --- PROGRESS | 82 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ README.md | 2 +- TODO | 32 +++++++++++++--------- 3 files changed, 102 insertions(+), 14 deletions(-) create mode 100644 PROGRESS diff --git a/PROGRESS b/PROGRESS new file mode 100644 index 0000000..706c148 --- /dev/null +++ b/PROGRESS @@ -0,0 +1,82 @@ + +COMPONENTS +========== + +block +db +netpeer_server +netpeer +pow +tokenomics +tx +wallet +merkle-tree + 100% + +genesis + 90% + + need genesis block serialised as json + need multiple genesis blocks / network configs potentially + +blockdag + 80% + + needs refactoring some methods + needs timestamp calculation (subjective) in there + needs parallel sig verification + needs(?) to verify tx validity + +state-machine + 50% + + needs either UXTO model or account nonces to prevent duplicate txs + needs proper testing + +node + 50% + + needs to sync properly on divergent branches + needs to restart miner on new full tip + needs to revalidate mempool on new full tip + need to manage miner so it doesnt run in headers mode? + needs to discover other nodes via dns seed or something + +cli + 10% + + needs light mode to just follow chain + needs wallet to send/receive txs + +sync + 50% + + needs simplification pass + needs proper testing + +sync-downloader + 50% + + needs fixing since it fails weirdly? + +explorer + 90% + + entire state recomputed after each block. inefficient + coins showed in full integer, no decimalisation yet + + +PROJECT +======= + +testnet 1 + 100% + + done + +post-testnet 1 pass + 50% + + need to clean up comments and docs + need to clean up repo - delete old files + diff --git a/README.md b/README.md index 1de4145..a657156 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ Taking inspiration from projects like SQLite, TempleOS, Cosmos/Tendermint, Linux tinychain is in-development. Currently we can run a node, mine blocks, ingest them into a DAG, create and sign transactions, run a state machine, build the UXTO set from processing transactions, connect to peers and gossip. -In progress: state synchronisation, user wallet API's. +We ran our first testnet back in Aug 2024 in sprint 1. Now we are refining in preparation for testnet 2. See [PROGRESS](./PROGRESS) for the open state of different subcomponents. ## Install. diff --git a/TODO b/TODO index bfa0411..d670803 100644 --- a/TODO +++ b/TODO @@ -1,17 +1,4 @@ -DAG - - [x] test IngestHeader - [x] test IngestBody - test updates full tip / headers tip - -Parallel download - - [x] implement a really basic version which downloads in parallel. basically a dumb bittorrent - [x] make sure it can work with peers joining/leaving - [x] probably try to do it without channels just to start with (simpler mental model) - - Currently: - rename Sign(msg) to accept a sighash, and then add a sighash method to tx.go - why is block body not being matched with block header? @@ -21,6 +8,25 @@ Currently: sync_test.go:361: Error ingesting body: Block header not found for txs merkle root. +mempool + listen for new txs + put them in mempool + inside node/miner? + get a bundle from mempool + mine on it + +adjust miner + restart mechanism to mine on new tip + how does it do it now? + documentation on the functionality + + +state machine + remove sanity check for blockreward/iscoinbase + add proper tests + + + Sync / search From cd36e80e668add0e927a502bd29f1eb67a0ca52f Mon Sep 17 00:00:00 2001 From: liamzebedee Date: Mon, 6 Jan 2025 17:32:31 +1100 Subject: [PATCH 08/15] miner: add restart methods node: restart miner on new full tip --- core/nakamoto/blockdag_client.go | 1 + core/nakamoto/mempool.go | 5 +++ core/nakamoto/miner.go | 75 +++++++++++++++++++++++--------- core/nakamoto/node.go | 4 +- 4 files changed, 64 insertions(+), 21 deletions(-) diff --git a/core/nakamoto/blockdag_client.go b/core/nakamoto/blockdag_client.go index fd280cb..ccf5dd5 100644 --- a/core/nakamoto/blockdag_client.go +++ b/core/nakamoto/blockdag_client.go @@ -295,6 +295,7 @@ func (dag *BlockDAG) GetLatestFullTip() (Block, error) { -- Case 2: Blocks without transactions. -- If a block has no transactions, then it is fully downloaded and is considered for the "full tip". + -- TODO: A block NEVER has 0 transactions! There is always the coinbase. SELECT b.hash, b.acc_work FROM blocks b WHERE b.num_transactions = 0 diff --git a/core/nakamoto/mempool.go b/core/nakamoto/mempool.go index 4451157..61d9b4d 100644 --- a/core/nakamoto/mempool.go +++ b/core/nakamoto/mempool.go @@ -130,3 +130,8 @@ func (m *Mempool) GetBundle(max uint) []RawTransaction { return bundle } + +// Regenerates the mempool from the current state of the chain, removing transactions that are already in the chain. +func (m *Mempool) Regenerate() { + // TODO. +} diff --git a/core/nakamoto/miner.go b/core/nakamoto/miner.go index a1d7902..7298fc1 100644 --- a/core/nakamoto/miner.go +++ b/core/nakamoto/miner.go @@ -46,6 +46,10 @@ type Miner struct { IsRunning bool GraffitiTag [32]byte + // Miner state. + stopCh chan bool + puzzles chan POWPuzzle + // Mutex. mutex sync.Mutex @@ -99,7 +103,13 @@ type POWPuzzle struct { solution big.Int } -func (miner *Miner) MineWithStatus(hashrateChannel chan float64, solutionChannel chan POWPuzzle, puzzleChannel chan POWPuzzle) (big.Int, error) { +// Miner thread: +// States: +// - waiting for puzzle +// - mining puzzle +// - puzzle solved +// - restart on new puzzle +func mineWithStatus(log *log.Logger, hashrates chan float64, solutions chan POWPuzzle, puzzles chan POWPuzzle, stopCh chan bool) (big.Int, error) { // Execute in 3s increments. lastHashrateMeasurement := Timestamp() numHashes := 0 @@ -118,7 +128,7 @@ func (miner *Miner) MineWithStatus(hashrateChannel chan float64, solutionChannel now := Timestamp() duration := now - lastHashrateMeasurement hashrate := float64(numHashes) / float64(duration/1000) - hashrateChannel <- hashrate + hashrates <- hashrate numHashes = 0 lastHashrateMeasurement = now } @@ -127,12 +137,12 @@ func (miner *Miner) MineWithStatus(hashrateChannel chan float64, solutionChannel // Routine: Mine. for { var i uint64 = 0 - miner.log.Println("Waiting for new puzzle") - puzzle := <-puzzleChannel + log.Println("Waiting for new puzzle") + puzzle := <-puzzles block := puzzle.block nonce := puzzle.startNonce target := puzzle.target - miner.log.Printf("New puzzle block=%s target=%s\n", block.HashStr(), target.String()) + log.Printf("New puzzle block=%s target=%s\n", block.HashStr(), target.String()) // Loop: mine 1 hash. for { @@ -151,21 +161,24 @@ func (miner *Miner) MineWithStatus(hashrateChannel chan float64, solutionChannel // Check solution: hash < target. if guess.Cmp(&target) == -1 { - miner.log.Printf("Puzzle solved: iterations=%d\n", i) + log.Printf("Puzzle solved: iterations=%d\n", i) puzzle.solution = nonce - solutionChannel <- puzzle + solutions <- puzzle break } // Check if new puzzle has been received. select { - case newPuzzle := <-puzzleChannel: + case newPuzzle := <-puzzles: puzzle = newPuzzle block = puzzle.block nonce = puzzle.startNonce target = puzzle.target - miner.log.Printf("New puzzle block=%s target=%s\n", block.HashStr(), target.String()) + log.Printf("New puzzle block=%s target=%s\n", block.HashStr(), target.String()) + case <-stopCh: + log.Println("Stopping miner") + return nonce, nil default: // Do nothing. } @@ -234,6 +247,25 @@ func (miner *Miner) MakeNewPuzzle() POWPuzzle { return puzzle } +func (miner *Miner) Stop() { + miner.mutex.Lock() + if !miner.IsRunning { + miner.log.Printf("Miner not running") + miner.mutex.Unlock() + return + } + miner.IsRunning = false + miner.mutex.Unlock() + + miner.log.Println("Sent stop signal to miner") + miner.stopCh <- true +} + +// Send new puzzle to miner thread, based off the latest full tip. +func (miner *Miner) RestartWithNewPuzzle() { + miner.puzzles <- miner.MakeNewPuzzle() +} + func (miner *Miner) Start(mineMaxBlocks int64) []RawBlock { miner.mutex.Lock() if miner.IsRunning { @@ -243,26 +275,29 @@ func (miner *Miner) Start(mineMaxBlocks int64) []RawBlock { miner.IsRunning = true miner.mutex.Unlock() - // The next tip channel. - // next_tip := make(chan Block) - // block_solutions := make(chan Block) - hashrateChannel := make(chan float64, 1) - puzzleChannel := make(chan POWPuzzle, 1) - solutionChannel := make(chan POWPuzzle, 1) + // Set miner thread state. + hashrates := make(chan float64, 1) + puzzles := make(chan POWPuzzle, 1) + solutions := make(chan POWPuzzle, 1) + stopCh := make(chan bool, 1) + + // Set miner state. + miner.stopCh = stopCh + miner.puzzles = puzzles - go miner.MineWithStatus(hashrateChannel, solutionChannel, puzzleChannel) + go mineWithStatus(miner.log, hashrates, solutions, puzzles, stopCh) var blocksMined int64 = 0 mined := []RawBlock{} - puzzleChannel <- miner.MakeNewPuzzle() + puzzles <- miner.MakeNewPuzzle() for { select { - case hashrate := <-hashrateChannel: + case hashrate := <-hashrates: // Print iterations using commas. p := message.NewPrinter(language.English) miner.log.Printf(p.Sprintf("Hashrate: %.2f H/s\n", hashrate)) - case puzzle := <-solutionChannel: + case puzzle := <-solutions: miner.log.Println("Received solution") raw := puzzle.block @@ -288,7 +323,7 @@ func (miner *Miner) Start(mineMaxBlocks int64) []RawBlock { miner.log.Println("Making new puzzle") miner.log.Println("New puzzle ready") - puzzleChannel <- miner.MakeNewPuzzle() + puzzles <- miner.MakeNewPuzzle() } } } diff --git a/core/nakamoto/node.go b/core/nakamoto/node.go index 50a38d5..3ae70a4 100644 --- a/core/nakamoto/node.go +++ b/core/nakamoto/node.go @@ -198,8 +198,10 @@ func (n *Node) setup() { n.stateLog.Printf("rebuild-state completed duration=%s n_blocks=%d\n", duration.String(), n.Dag.FullTip.Height) // 2. Regenerate current mempool. Tx set should not include any txs that are in the chain. + n.Mempool.Regenerate() + // 3. Restart miner. - // TODO. + n.Miner.RestartWithNewPuzzle() } // Listen for new transactions and add them to the mempool. From 6d6a022c3407f4dbefc0dc8dce660d5d33db2828 Mon Sep 17 00:00:00 2001 From: liamzebedee Date: Sun, 19 Jan 2025 18:11:50 +1100 Subject: [PATCH 09/15] chain: add BlockchainNetwork as an instance of the chain config --- cli/cmd/explorer.go | 5 +++-- cli/cmd/node.go | 38 +++----------------------------------- core/nakamoto/chain.go | 34 ++++++++++++++++++++++++++++++++++ core/nakamoto/utils.go | 4 ++++ 4 files changed, 44 insertions(+), 37 deletions(-) create mode 100644 core/nakamoto/chain.go diff --git a/cli/cmd/explorer.go b/cli/cmd/explorer.go index 15fb04e..c62c38e 100644 --- a/cli/cmd/explorer.go +++ b/cli/cmd/explorer.go @@ -1,6 +1,7 @@ package cmd import ( + "github.com/liamzebedee/tinychain-go/core/nakamoto" "github.com/liamzebedee/tinychain-go/explorer" "github.com/urfave/cli/v2" @@ -15,8 +16,8 @@ func RunExplorer(cmdCtx *cli.Context) error { dbPath := cmdCtx.String("db") // DAG. - networks := getNetworks() - dag, _, _ := newBlockdag(dbPath, networks["testnet1"]) + networks := nakamoto.GetNetworks() + dag, _, _ := newBlockdag(dbPath, networks["testnet1"].ConsensusConfig) // Handle process signals. c := make(chan os.Signal, 1) diff --git a/cli/cmd/node.go b/cli/cmd/node.go index 85e9215..1745fc2 100644 --- a/cli/cmd/node.go +++ b/cli/cmd/node.go @@ -7,9 +7,7 @@ import ( "github.com/urfave/cli/v2" "database/sql" - "encoding/hex" "fmt" - "math/big" "net/url" "os" "os/signal" @@ -26,36 +24,6 @@ func (m *MockStateMachine) VerifyTx(tx nakamoto.RawTransaction) error { return nil } -func getNetworks() map[string]nakamoto.ConsensusConfig { - genesis_difficulty := new(big.Int) - genesis_difficulty.SetString("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 16) - - // https://serhack.me/articles/story-behind-alternative-genesis-block-bitcoin/ ;) - genesisBlockHash_, err := hex.DecodeString("000006b15d1327d67e971d1de9116bd60a3a01556c91b6ebaa416ebc0cfaa646") - if err != nil { - panic(err) - } - genesisBlockHash_[0] += 1 - - genesisBlockHash := [32]byte{} - copy(genesisBlockHash[:], genesisBlockHash_) - - network_testnet1 := nakamoto.ConsensusConfig{ - EpochLengthBlocks: 10, - TargetEpochLengthMillis: 1000 * 60, // 1min, 1 block every 10s - GenesisDifficulty: *genesis_difficulty, - GenesisParentBlockHash: genesisBlockHash, - MaxBlockSizeBytes: 2 * 1024 * 1024, // 2MB - } - - networks := map[string]nakamoto.ConsensusConfig{ - "testnet1": network_testnet1, - "terrydavis": network_testnet1, - } - - return networks -} - func newBlockdag(dbPath string, conf nakamoto.ConsensusConfig) (nakamoto.BlockDAG, nakamoto.ConsensusConfig, *sql.DB) { // TODO validate connection string. fmt.Println("database path: ", dbPath) @@ -120,8 +88,8 @@ func RunNode(cmdCtx *cli.Context) error { } // DAG. - networks := getNetworks() - conf, ok := networks[network] + networks := nakamoto.GetNetworks() + net, ok := networks[network] if !ok { availableNetworks := []string{} for k := range networks { @@ -130,7 +98,7 @@ func RunNode(cmdCtx *cli.Context) error { fmt.Printf("Available networks: %s\n", strings.Join(availableNetworks, ", ")) return fmt.Errorf("Unknown network: %s", network) } - dag, _, db := newBlockdag(dbPath, conf) + dag, _, db := newBlockdag(dbPath, net.ConsensusConfig) // Miner. minerWallet, err := getMinerWallet(db) diff --git a/core/nakamoto/chain.go b/core/nakamoto/chain.go new file mode 100644 index 0000000..f983dcb --- /dev/null +++ b/core/nakamoto/chain.go @@ -0,0 +1,34 @@ +package nakamoto + +// A unique instance of the blockchain. +type BlockchainNetwork struct { + GenesisBlock RawBlock + ConsensusConfig ConsensusConfig + Name string +} + +func NewBlockchainNetwork(name string, conf ConsensusConfig) BlockchainNetwork { + return BlockchainNetwork{ + Name: name, + GenesisBlock: GetRawGenesisBlockFromConfig(conf), + ConsensusConfig: conf, + } +} + +func GetNetworks() map[string]BlockchainNetwork { + network_testnet1 := NewBlockchainNetwork("testnet1", ConsensusConfig{ + EpochLengthBlocks: 10, + TargetEpochLengthMillis: 1000 * 60, // 1min, 1 block every 10s + GenesisDifficulty: HexStringToBigInt("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + // https://serhack.me/articles/story-behind-alternative-genesis-block-bitcoin/ ;) + GenesisParentBlockHash: HexStringToBytes32("000006b15d1327d67e971d1de9116bd60a3a01556c91b6ebaa416ebc0cfaa647"), // +1 for testnet + MaxBlockSizeBytes: 2 * 1024 * 1024, // 2MB + }) + + networks := map[string]BlockchainNetwork{ + "testnet1": network_testnet1, + "terrydavis": network_testnet1, + } + + return networks +} diff --git a/core/nakamoto/utils.go b/core/nakamoto/utils.go index 0a955e8..cae0283 100644 --- a/core/nakamoto/utils.go +++ b/core/nakamoto/utils.go @@ -48,6 +48,10 @@ func HexStringToBytes32(s string) [32]byte { return fbuf } +func HexStringToBigInt(s string) big.Int { + return Bytes32ToBigInt(HexStringToBytes32(s)) +} + func StringToBytes32(s string) [32]byte { var b [32]byte copy(b[:], s) From addd8e05fc4f9e379bd42274a190b5e5bf61a789 Mon Sep 17 00:00:00 2001 From: liamzebedee Date: Sun, 19 Jan 2025 18:12:56 +1100 Subject: [PATCH 10/15] refactor: cosmetic updates --- core/nakamoto/blockdag_test.go | 92 +++++++++++++++++++++++------ core/nakamoto/genesis_test.go | 5 +- core/nakamoto/miner_test.go | 20 ++----- core/nakamoto/state_machine_test.go | 20 ++----- core/nakamoto/sync.go | 14 ++--- core/nakamoto/tokenomics.go | 2 +- 6 files changed, 90 insertions(+), 63 deletions(-) diff --git a/core/nakamoto/blockdag_test.go b/core/nakamoto/blockdag_test.go index 49390af..6e3b2b1 100644 --- a/core/nakamoto/blockdag_test.go +++ b/core/nakamoto/blockdag_test.go @@ -57,13 +57,10 @@ func newBlockdag() (BlockDAG, ConsensusConfig, *sql.DB, RawBlock) { stateMachine := newMockStateMachine() - genesis_difficulty := new(big.Int) - genesis_difficulty.SetString("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 16) - conf := ConsensusConfig{ EpochLengthBlocks: 5, TargetEpochLengthMillis: 2000, - GenesisDifficulty: *genesis_difficulty, + GenesisDifficulty: HexStringToBigInt("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // https://serhack.me/articles/story-behind-alternative-genesis-block-bitcoin/ ;) GenesisParentBlockHash: HexStringToBytes32("000006b15d1327d67e971d1de9116bd60a3a01556c91b6ebaa416ebc0cfaa646"), MaxBlockSizeBytes: 2 * 1024 * 1024, // 2MB @@ -745,23 +742,13 @@ func newBlockdagLongEpoch() (BlockDAG, ConsensusConfig, *sql.DB) { stateMachine := newMockStateMachine() - genesis_difficulty := new(big.Int) - genesis_difficulty.SetString("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 16) - - // https://serhack.me/articles/story-behind-alternative-genesis-block-bitcoin/ ;) - genesisBlockHash_, err := hex.DecodeString("000006b15d1327d67e971d1de9116bd60a3a01556c91b6ebaa416ebc0cfaa646") - if err != nil { - panic(err) - } - genesisBlockHash := [32]byte{} - copy(genesisBlockHash[:], genesisBlockHash_) - conf := ConsensusConfig{ EpochLengthBlocks: 20000, TargetEpochLengthMillis: 1000, - GenesisDifficulty: *genesis_difficulty, - GenesisParentBlockHash: genesisBlockHash, - MaxBlockSizeBytes: 2 * 1024 * 1024, // 2MB + GenesisDifficulty: HexStringToBigInt("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + // https://serhack.me/articles/story-behind-alternative-genesis-block-bitcoin/ ;) + GenesisParentBlockHash: HexStringToBytes32("000006b15d1327d67e971d1de9116bd60a3a01556c91b6ebaa416ebc0cfaa646"), + MaxBlockSizeBytes: 2 * 1024 * 1024, // 2MB } blockdag, err := NewBlockDAGFromDB(db, stateMachine, conf) @@ -1198,3 +1185,72 @@ func TestDagIngestBody(t *testing.T) { // Ingest body. // Updates both full and header tip. } + +func TestDagBlockTimestampRules(t *testing.T) { + assert := assert.New(t) + blockdag, _, _, genesisBlock := newBlockdag() + + assert.True(true) + assert.NotNil(blockdag) + assert.NotNil(genesisBlock) + + // 2. Verify timestamp is within bounds. + // 2a. Verify monotonic - parent.timestamp < block.timestamp. + // 2b. Verify not in future - block.timestamp < (now + now*1.05) + // 2c. Verify not in past (if we are in live mode) - + // - if tip is fresh, that is, the tip's age is within statistical bounds for expecting a new block from the network + // - ie. 0 < tip.age < avg_block_time(6 blocks) + // - we can expect a new block soon, then we verify the tip is within the real clock time + // - if we are uncertain when we will be reconnected to the network, due to a split, then we must disable the safety check. + // + + // The block timestamp follows the following validation constraints: + // - monotonic: parentblock.timestamp < block.timestamp + // + // Sync mode: + // - just believe every block received. ensure monotonicity. ensure the increase is bound by 2 weeks at max. + // but what if there is war? who cares. just put it in for now. + // + // Live mode? + // - if the parent block timestamp is within 1d of the current system clock + // - if the current block timestamp is within 1d of + + // Attacks: + // + // 1. Make the timestamp super far in the future. Manipulate the difficulty epoch so the length/span is only + // one block long. + // CAVEAT: this doesn't matter. Accumulated work determines heaviest chain. Just choose heavier chain. Cannot outpace the majority. + // + // 2. Reuse the same block? + // Impossible. Hash challenges changes each block. + // + // 3. Submit a block solution but adjust the timestamp to be before previous block. + // MITIGATED. + // + // 4. Submit a block solution but adjust the timestamp to be on the next difficulty boundary. Continually trigger new difficulty epochs? + // This doesn't work since the epoch is computed based on a span of blocks / height. + // + // 5. Manipulate the timestamp as to increase the block production rate for the next epoch, so we get more rewards. + // Other nodes would have to collude to do this. + // What's the effective solution here? + // + // How do we know when we are in live mode? + // 1. receive a block + // 2. if it's on our branch, check our tip + // 3. if the tip is "fresh" (ie. ) + // tip.age = now() - tip.timestamp + // if tip.age < 6 blocks: fresh = true else false + // what is the statistic for liveness of the network? + // block production rate for the past 120 blocks + // + // the timestamp cannot be more than 15mins in future + // + // if the tip is fresh we are still in live mode. + // else we revert to sync mode. + // in live mode we verify that the timestamp on blocks is recent. + // otherwise we just have to trust that we are receiving the longest chain and we are expecting more blocks. + // + // we may have undergone a network split. + // + +} diff --git a/core/nakamoto/genesis_test.go b/core/nakamoto/genesis_test.go index f09f924..c101dd2 100644 --- a/core/nakamoto/genesis_test.go +++ b/core/nakamoto/genesis_test.go @@ -17,13 +17,10 @@ func TestDumpGenesisBlock(t *testing.T) { func TestGetRawGenesisBlockFromConfig(t *testing.T) { assert := assert.New(t) - genesis_difficulty := new(big.Int) - genesis_difficulty.SetString("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 16) - conf := ConsensusConfig{ EpochLengthBlocks: 5, TargetEpochLengthMillis: 2000, - GenesisDifficulty: *genesis_difficulty, + GenesisDifficulty: HexStringToBigInt("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // https://serhack.me/articles/story-behind-alternative-genesis-block-bitcoin/ ;) GenesisParentBlockHash: HexStringToBytes32("000006b15d1327d67e971d1de9116bd60a3a01556c91b6ebaa416ebc0cfaa646"), MaxBlockSizeBytes: 2 * 1024 * 1024, // 2MB diff --git a/core/nakamoto/miner_test.go b/core/nakamoto/miner_test.go index ab661b3..5e43020 100644 --- a/core/nakamoto/miner_test.go +++ b/core/nakamoto/miner_test.go @@ -2,8 +2,6 @@ package nakamoto import ( "database/sql" - "encoding/hex" - "math/big" "testing" "github.com/liamzebedee/tinychain-go/core" @@ -22,23 +20,13 @@ func newBlockdagForMiner() (BlockDAG, ConsensusConfig, *sql.DB) { stateMachine := newMockStateMachine() - genesis_difficulty := new(big.Int) - genesis_difficulty.SetString("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 16) - - // https://serhack.me/articles/story-behind-alternative-genesis-block-bitcoin/ ;) - genesisBlockHash_, err := hex.DecodeString("000006b15d1327d67e971d1de9116bd60a3a01556c91b6ebaa416ebc0cfaa646") - if err != nil { - panic(err) - } - genesisBlockHash := [32]byte{} - copy(genesisBlockHash[:], genesisBlockHash_) - conf := ConsensusConfig{ EpochLengthBlocks: 5, TargetEpochLengthMillis: 1000, - GenesisDifficulty: *genesis_difficulty, - GenesisParentBlockHash: genesisBlockHash, - MaxBlockSizeBytes: 2 * 1024 * 1024, // 2MB + GenesisDifficulty: HexStringToBigInt("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + // https://serhack.me/articles/story-behind-alternative-genesis-block-bitcoin/ ;) + GenesisParentBlockHash: HexStringToBytes32("000006b15d1327d67e971d1de9116bd60a3a01556c91b6ebaa416ebc0cfaa647"), + MaxBlockSizeBytes: 2 * 1024 * 1024, // 2MB } blockdag, err := NewBlockDAGFromDB(db, stateMachine, conf) diff --git a/core/nakamoto/state_machine_test.go b/core/nakamoto/state_machine_test.go index b8c04ad..2ac5d45 100644 --- a/core/nakamoto/state_machine_test.go +++ b/core/nakamoto/state_machine_test.go @@ -2,8 +2,6 @@ package nakamoto import ( "database/sql" - "encoding/hex" - "math/big" "testing" "github.com/liamzebedee/tinychain-go/core" @@ -326,23 +324,13 @@ func newBlockdagForStateMachine() (BlockDAG, ConsensusConfig, *sql.DB) { stateMachine := newMockStateMachine() - genesis_difficulty := new(big.Int) - genesis_difficulty.SetString("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 16) - - // https://serhack.me/articles/story-behind-alternative-genesis-block-bitcoin/ ;) - genesisBlockHash_, err := hex.DecodeString("000006b15d1327d67e971d1de9116bd60a3a01556c91b6ebaa416ebc0cfaa646") - if err != nil { - panic(err) - } - genesisBlockHash := [32]byte{} - copy(genesisBlockHash[:], genesisBlockHash_) - conf := ConsensusConfig{ EpochLengthBlocks: 5, TargetEpochLengthMillis: 1000, - GenesisDifficulty: *genesis_difficulty, - GenesisParentBlockHash: genesisBlockHash, - MaxBlockSizeBytes: 2 * 1024 * 1024, // 2MB + GenesisDifficulty: HexStringToBigInt("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + // https://serhack.me/articles/story-behind-alternative-genesis-block-bitcoin/ ;) + GenesisParentBlockHash: HexStringToBytes32("000006b15d1327d67e971d1de9116bd60a3a01556c91b6ebaa416ebc0cfaa646"), + MaxBlockSizeBytes: 2 * 1024 * 1024, // 2MB } blockdag, err := NewBlockDAGFromDB(db, stateMachine, conf) diff --git a/core/nakamoto/sync.go b/core/nakamoto/sync.go index dfe8dbe..680f4fe 100644 --- a/core/nakamoto/sync.go +++ b/core/nakamoto/sync.go @@ -235,7 +235,7 @@ func (n *Node) Sync() int { } // 2b. Download headers. - headers, _, err := n.SyncDownloadData(currentTipHash, *heights, peers, true, false) + headersUnsafe, _, err := n.SyncDownloadData(currentTipHash, *heights, peers, true, false) if err != nil { n.syncLog.Printf("Failed to download headers: %s\n", err) continue @@ -243,10 +243,10 @@ func (n *Node) Sync() int { // 2c. Validate headers. // Sanity-check: verify we have all the headers for the heights in order. TODO. - headers2 := orderValidateHeaders(currentTipHash, headers) + headers := orderValidateHeaders(currentTipHash, headersUnsafe) // 2d. Ingest headers. - for _, header := range headers2 { + for _, header := range headers { err := n.Dag.IngestHeader(header) if err != nil { // Skip. We will not be able to download the bodies. @@ -258,10 +258,9 @@ func (n *Node) Sync() int { n.syncLog.Printf("Downloaded %d headers\n", downloaded) - // Now get the bodies. - // Filter through missing bodies for headers. + // 2e. Download bodies. heights2 := core.NewBitset(WINDOW_SIZE) - for i, _ := range headers2 { + for i, _ := range headers { heights2.Insert(i) } _, bodies, err := n.SyncDownloadData(currentTipHash, *heights2, peers, false, true) @@ -270,10 +269,9 @@ func (n *Node) Sync() int { continue } - // Print the bdoeis and exit. n.syncLog.Printf("Downloaded bodies n=%d\n", len(bodies)) - // 2d. Ingest bodies. + // 2f. Ingest bodies. for i, body := range bodies { err := n.Dag.IngestBlockBody(body) if err != nil { diff --git a/core/nakamoto/tokenomics.go b/core/nakamoto/tokenomics.go index 14f82e2..b2fe7d0 100644 --- a/core/nakamoto/tokenomics.go +++ b/core/nakamoto/tokenomics.go @@ -9,7 +9,7 @@ import ( // TODO URGENT: implement this in integer arithmetic to avoid precision differences causing consensus faults. func GetBlockReward(blockHeight int) uint64 { initialReward := 50.0 - halvingInterval := 210000 + halvingInterval := 210_000 // Calculate the number of halvings numHalvings := blockHeight / halvingInterval From 0b95025ba1ce6127d2efba012ac59a1f022acadf Mon Sep 17 00:00:00 2001 From: liamzebedee Date: Sun, 19 Jan 2025 18:27:08 +1100 Subject: [PATCH 11/15] blockdag: begin coding timestamp validation --- core/nakamoto/blockdag.go | 43 ++++++++++++++++++++++++++++++++++++--- docs/missing.md | 21 +++++++++++++++++++ 2 files changed, 61 insertions(+), 3 deletions(-) diff --git a/core/nakamoto/blockdag.go b/core/nakamoto/blockdag.go index 84e9d0c..5e8459f 100644 --- a/core/nakamoto/blockdag.go +++ b/core/nakamoto/blockdag.go @@ -8,6 +8,7 @@ import ( "log" "math/big" "sync" + "time" "github.com/liamzebedee/tinychain-go/core" _ "github.com/mattn/go-sqlite3" @@ -43,9 +44,14 @@ type BlockDAG struct { // The "full node" tip. This is the tip of the heaviest chain of full blocks. FullTip Block - // OnNewTip handler. + // Triggered on a new headers tip (light sync). OnNewHeadersTip func(tip Block, prevTip Block) - OnNewFullTip func(tip Block, prevTip Block) + + // Triggered on a new full tip (fully-synced). + OnNewFullTip func(tip Block, prevTip Block) + + // Get the current time according to the system clock. + ClockTime func() uint64 log *log.Logger } @@ -72,6 +78,29 @@ func NewBlockDAGFromDB(db *sql.DB, stateMachine StateMachineInterface, consensus return dag, nil } +func (dag *BlockDAG) getClockTime() uint64 { + if dag.ClockTime != nil { + return dag.ClockTime() + } + + now := time.Now() + milliseconds := now.UnixMilli() + return uint64(milliseconds) +} + +func (dag *BlockDAG) IsTipFresh(tip Block) bool { + age := dag.getClockTime() - tip.Timestamp + + // sanity-check. + if age < 0 { + panic("tip.Timestamp is from future") + } + + isFresh := age < 6 + // TODO + return isFresh +} + // Initalises the block DAG with the genesis block. func (dag *BlockDAG) initialiseBlockDAG() error { genesisBlock := GetRawGenesisBlockFromConfig(dag.consensus) @@ -215,7 +244,6 @@ func (dag *BlockDAG) UpdateTip() error { // Validation rules for blocks: // 1. Verify parent is known. // 2. Verify timestamp is within bounds. -// TODO: subjectivity. // 3. Verify num transactions is the same as the length of the transactions list. // 4a. Verify coinbase transcation is present. // 4b. Verify transactions are valid. @@ -394,6 +422,14 @@ func (dag *BlockDAG) IngestBlockBody(body []RawTransaction) error { raw.Transactions = body // 2. Verify timestamp is within bounds. + // 2a. Verify monotonic - parent.timestamp < block.timestamp. + // 2b. Verify not in future - block.timestamp < (now + now*1.05) + // 2c. Verify not in past (if we are in live mode) - + // - if tip is fresh, that is, the tip's age is within statistical bounds for expecting a new block from the network + // - ie. 0 < tip.age < avg_block_time(6 blocks) + // - we can expect a new block soon, then we verify the tip is within the real clock time + // - if we are uncertain when we will be reconnected to the network, due to a split, then we must disable the safety check. + // // TODO: subjectivity. // 3. Verify num transactions is the same as the length of the transactions list. @@ -406,6 +442,7 @@ func (dag *BlockDAG) IngestBlockBody(body []RawTransaction) error { if len(raw.Transactions) < 1 { return fmt.Errorf("Missing coinbase tx.") } + // 4b. Verify transactions. // TODO: We can parallelise this. // This is one of the most expensive operations of the blockchain node. diff --git a/docs/missing.md b/docs/missing.md index 41f8e07..3a67f13 100644 --- a/docs/missing.md +++ b/docs/missing.md @@ -17,3 +17,24 @@ I had some trouble implementing this. https://blog.bitmex.com/bitcoins-block-timestamp-protection-rules/ +> When a Bitcoin block is produced there are essentially two times involved: +> +> The timestamp in the block header, put there by the miner +> The actual time the block was produced. +> +> As it happens, there are some incentives for miners to lie about the time. For instance, nefarious miners could add a timestamp which is in the future. For example, if a block took 10 minutes to produce, miners could claim it took them 15 minutes, by adding a timestamp 5 minutes into the future. If this pattern of adding 5 minutes is continued throughout a two week difficulty adjustment period, it would look like the average block time was 15 minutes, when in reality it was shorter than this. The difficulty could then adjust downwards in the next period, increasing mining revenue due to faster block times. Of course, the problem with this approach is that the Bitcoin clock continues to move further and further out of line with the real time. +> +> To resolve or mitigate the above issue, Bitcoin has two mechanisms to protect against miners manipulating the timestamp: +> +> **Median Past Time (MPT) Rule** – The timestamp must be further forwards than the median of the last eleven blocks. The median of eleven blocks implies six blocks could be re-organised and time would still not move backwards, which one can argue is reasonably consistent with the example, provided in Meni Rosenfeld’s 2012 paper, that six confirmations are necessary to decrease the probability of success below 0.1%, for an attacker with 10% of the network hashrate. +> +> **Future Block Time Rule** – The timestamp cannot be more than 2 hours in the future based on the MAX_FUTURE_BLOCK_TIME constant, relative to the median time from the node’s peers. The maximum allowed gap between the time provided by the nodes and the local system clock is 90 minutes, another safeguard. It should be noted that unlike the MPT rule above, this is not a full consensus rule. Blocks with a timestamp too far in the future are not invalid, they can become valid as time moves forwards. +> +> Rule number one ensures that the blockchain continues to move forwards in time and rule number two ensures that the chain does not move too far forwards. These time protection rules are not perfect, for example miners could still move the timestamps forward by producing timestamps in the future, within a two week period, however the impact of this would be limited. +> +> $ 2 hours / 2 weeks = 0.6% $ +> +> As the above ratio illustrates, since two hours is only a small fraction of two weeks, the impact this manipulation has on network reliability and mining profitability may be limited. This is the equivalent of a reduction in the time between blocks from 10 minutes to 9 minutes and 54 seconds, in the two weeks after the difficulty adjustment. In addition to this, it is only a one-off change, as once the two-hour time shift has occurred, it cannot occur again, without first going backwards. At the same time, the miner may want to include a margin of safety before shifting forwards two hours, to reduce the risk of the block being rejected by the network. +> +> + From d6e937974e2f37c4274a97778b5de1074071e166 Mon Sep 17 00:00:00 2001 From: liamzebedee Date: Sun, 19 Jan 2025 18:27:15 +1100 Subject: [PATCH 12/15] doc: uxtreeo --- docs/idea-uxtreeo.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 docs/idea-uxtreeo.md diff --git a/docs/idea-uxtreeo.md b/docs/idea-uxtreeo.md new file mode 100644 index 0000000..5b5b744 --- /dev/null +++ b/docs/idea-uxtreeo.md @@ -0,0 +1,21 @@ +Idea: UXTO +========== + +The state of a Bitcoin blockchain is the UXTO set - the set of unspent transaction outputs. Each UXTO is a "one-use" piece of state - a transaction consumes inputs (UXTO's) and sends their value to different outputs (which is where UXTO's are created). + +There are two implications to this model: + + 1. Nodes must store an entire UXTO set. + 2. In an account-based model, nodes must check for transaction uniqueness via account nonces. + +Here is an idea for simplifying this: + + 1. Each block commits to a sparse merkle tree containing the state. + 2. The state consists of UXTO's which are identified by key and value. + 3. It is simple for light clients to verify the validity of a UXTO based on the current state of this tree, since sparse merkle trees allow for inclusion/exclusion proofs. + 4. But this incurs a large cost - in order to spend a UXTO, users must produce a leaf proof - which is O(N log N) leaves in space. This times 8192 txs (the full size of a 2MB block?) means quite a lot more data storage. Each hash is 32 bytes, so at least 16*32 is a lot of bytes added. + 5. So - what we can do. Nodes can prove these leaves into a ZK proof - thus reducing the cost down to O(log N) where N is the number of computational steps - and close to constant size if proof aggregation is used. + +https://eprint.iacr.org/2019/611 + +https://bitcoinops.org/en/topics/utreexo/ \ No newline at end of file From 437e2fa94cd87cb00ce9544df51a1506da8fd599 Mon Sep 17 00:00:00 2001 From: liamzebedee Date: Wed, 22 Jan 2025 22:16:03 +1100 Subject: [PATCH 13/15] design: light sync --- docs/light-sync.txt | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 docs/light-sync.txt diff --git a/docs/light-sync.txt b/docs/light-sync.txt new file mode 100644 index 0000000..134a6ef --- /dev/null +++ b/docs/light-sync.txt @@ -0,0 +1,29 @@ +The sync algorithm is so simple. + +For each peer: +1. Trade tips. +2. Determine highest common ancestor. +3. Sync headers from there. + + +light-sync(other): + var myself, other : Peer + + # test-case: test if we are synced. + # O(1) + tip = other.get_tip + if dag.has(tip): + return; + + # fast-case: test if we are within 12 blocks. + tips = other.get_tips(-6) + local_chain = get_path(tip, -6) + if intersects(tips, local_chain): + download the last 6 headers and then we're synced + return; + + # base-case: interactive bissect highest common ancestor (binary search). + base_block = find_highest_common_ancestor(other, tip) + download_headers_from(base_block) + + From 49f5cbfb909ab49a76228e5cefa11193b24c1dbd Mon Sep 17 00:00:00 2001 From: liamzebedee Date: Wed, 22 Jan 2025 22:28:15 +1100 Subject: [PATCH 14/15] docs: cleanup --- docs/concepts.md | 3 + docs/differences-from-bitcoin.md | 2 +- docs/{state-sync.md => history-sync.md} | 0 docs/{ => ideas}/chat.md | 0 .../{ => ideas}/realtime-zk-proving-rollup.md | 0 docs/{idea-uxtreeo.md => ideas/uxtreeo.md} | 0 docs/learnings.md | 4 +- docs/philosophy.md | 18 - docs/testing.md | 3 +- docs/tinychain-legacy/.gitignore | 3 - docs/tinychain-legacy/Pipfile | 20 - docs/tinychain-legacy/Pipfile.lock | 518 ------------------ docs/tinychain-legacy/README.md | 113 ---- docs/tinychain-legacy/docs/bitcoin.md | 89 --- docs/tinychain-legacy/docs/bitcoin.rs | 157 ------ docs/tinychain-legacy/docs/missing.md | 10 - docs/tinychain-legacy/docs/philosophy.md | 13 - docs/tinychain-legacy/pyproject.toml | 18 - docs/tinychain-legacy/scripts/bf2.py | 12 - docs/tinychain-legacy/scripts/loc.sh | 3 - .../scripts/make-sample-tx.py | 13 - docs/tinychain-legacy/scripts/sz.py | 74 --- docs/tinychain-legacy/src/__init__.py | 0 docs/ugly-parts.md | 3 +- 24 files changed, 9 insertions(+), 1067 deletions(-) rename docs/{state-sync.md => history-sync.md} (100%) rename docs/{ => ideas}/chat.md (100%) rename docs/{ => ideas}/realtime-zk-proving-rollup.md (100%) rename docs/{idea-uxtreeo.md => ideas/uxtreeo.md} (100%) delete mode 100644 docs/philosophy.md delete mode 100644 docs/tinychain-legacy/.gitignore delete mode 100644 docs/tinychain-legacy/Pipfile delete mode 100644 docs/tinychain-legacy/Pipfile.lock delete mode 100644 docs/tinychain-legacy/README.md delete mode 100644 docs/tinychain-legacy/docs/bitcoin.md delete mode 100644 docs/tinychain-legacy/docs/bitcoin.rs delete mode 100644 docs/tinychain-legacy/docs/missing.md delete mode 100644 docs/tinychain-legacy/docs/philosophy.md delete mode 100644 docs/tinychain-legacy/pyproject.toml delete mode 100644 docs/tinychain-legacy/scripts/bf2.py delete mode 100755 docs/tinychain-legacy/scripts/loc.sh delete mode 100644 docs/tinychain-legacy/scripts/make-sample-tx.py delete mode 100644 docs/tinychain-legacy/scripts/sz.py delete mode 100644 docs/tinychain-legacy/src/__init__.py diff --git a/docs/concepts.md b/docs/concepts.md index 94fd547..48ab393 100644 --- a/docs/concepts.md +++ b/docs/concepts.md @@ -23,6 +23,7 @@ This is an index of the concepts you will encounter throughout a Nakamoto blockc - Height / Depth. - Heaviest chain / accumulated work. - Sync. + - History vs. state sync. - Greedy search. Window sizing. - Bit sets. - Parallel downloads / chunking. @@ -41,6 +42,8 @@ This is an index of the concepts you will encounter throughout a Nakamoto blockc - Mining. - Block template - header and bundle. - Nonce. + - Puzzles. Guess. Solution. + - Hashrate. - Peers and networking. - Messages. - RPC. diff --git a/docs/differences-from-bitcoin.md b/docs/differences-from-bitcoin.md index b03e509..f637ce7 100644 --- a/docs/differences-from-bitcoin.md +++ b/docs/differences-from-bitcoin.md @@ -9,10 +9,10 @@ Design simplifications: Differences: * Transactions do not have a VM environment. - * The state model is not based on UXTO's or accounts. Tinychain computes state like an account-based chain, in that it stores an `account -> balance` mapping. But internally, it stores its state as state leafs - which are more similar to unique UXTO's than in Ethereum's model of accounts. * Bitcoin features protection against quantum computing attacks, since coins are locked to a preimage of a public key (RIPEMD(SHA256(pubkey))) using P2PKH rather than locked to a public key itself. Missing efficiencies: * The difficulty target is represented as `[32]bytes`; it is uncompressed. There is no `nBits` or custom mantissa. * Transaction signatures are in their uncompressed ECDSA form. They are `[65]bytes`, which includes the ECDSA signature type of `0x4`. There is no ECDSA signature recovery. + * Pubkeys are used in their raw form instead of their hash as in Bitcoin. diff --git a/docs/state-sync.md b/docs/history-sync.md similarity index 100% rename from docs/state-sync.md rename to docs/history-sync.md diff --git a/docs/chat.md b/docs/ideas/chat.md similarity index 100% rename from docs/chat.md rename to docs/ideas/chat.md diff --git a/docs/realtime-zk-proving-rollup.md b/docs/ideas/realtime-zk-proving-rollup.md similarity index 100% rename from docs/realtime-zk-proving-rollup.md rename to docs/ideas/realtime-zk-proving-rollup.md diff --git a/docs/idea-uxtreeo.md b/docs/ideas/uxtreeo.md similarity index 100% rename from docs/idea-uxtreeo.md rename to docs/ideas/uxtreeo.md diff --git a/docs/learnings.md b/docs/learnings.md index e6a1ecf..7b8cc8c 100644 --- a/docs/learnings.md +++ b/docs/learnings.md @@ -9,4 +9,6 @@ * there are lots of things I didn't anticipate: * signature caching * `blocks_transactions` table. Originally thought transactions.block was a one-to-one. Derp, it's many-to-many. - * to check if a block is fully synced, we need to + * It was at this point I learnt, there is no way to implement forwards iteration for the GetPath function. + + Why? Because you cannot know in the middle of a chain whether you are on the heaviest chain. Because the accumulated work may be low for the (n+1)th block, but then peak in the (n+2)th block. diff --git a/docs/philosophy.md b/docs/philosophy.md deleted file mode 100644 index 9e09c77..0000000 --- a/docs/philosophy.md +++ /dev/null @@ -1,18 +0,0 @@ -lol let's write down why this code is so based: - -1. minimum lines of code possible. this is the ultimate test of intelligence. intelligence is compression and prediction in one. can you produce a world model which is the smallest most accurate thing? this is what good code looks like - the smallest most functional thing. - -2. > "Show me your [code] and conceal your [data structures], and I shall continue to be mystified. Show me your [data structures], and I won't usually need your [code]; it'll be obvious." - -3. functions should only do 1 big thing. you can do "integrate all the other things" or you can do "this one particular thing" - -4. cut through the verbiage and just name things after what they really after - - RPC - just a way of invoking methods on objects across the network - wire protocol - how we encode the RPC (HTTP+JSON) - protocol - the abstract machine which we run the protocol on - -5. discover the core primitives through distillation - - state sync - iterative greedy search from a single base node in a block DAG - diff --git a/docs/testing.md b/docs/testing.md index c3b284e..a1b266a 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -85,8 +85,7 @@ There are a couple things to keep in mind while testing: Tinychain is implemented in Go, which means we use goroutines for parallelism/concurrency. We have to manually implement checks to wait for nodes to resolve to the state we are testing for; since `n(goroutines) > n(cpu's)`, we never have "true parallelism" as there are always multiple goroutines on a single CPU. However, for testing purposes, we can achieve something approximately similar to the theoretical desired design. - - **Bind to localhost**. When we create a test network, we create a set of nodes which each listen on a random open system port. It's important that the node listens on the `localhost` or `127.0.0.1` hostname - this is the lowest permission host. If we listen on `0.0.0.0` on macOS, then it can trigger permissions popups (for fuck's sake, I wish Steve was still here). - + - **Bind to localhost**. When we create a test network, we create a set of nodes which each listen on a random open system port. It's important that the node listens on the `localhost` or `127.0.0.1` hostname - this is the lowest permission host. If we listen on `0.0.0.0` on macOS, then it can trigger permissions popups (I wish Steve was still here). There are multiple helpers designed to construct test networks: diff --git a/docs/tinychain-legacy/.gitignore b/docs/tinychain-legacy/.gitignore deleted file mode 100644 index 0a3d954..0000000 --- a/docs/tinychain-legacy/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -__pycache__ -*.db -*.log diff --git a/docs/tinychain-legacy/Pipfile b/docs/tinychain-legacy/Pipfile deleted file mode 100644 index 047ec88..0000000 --- a/docs/tinychain-legacy/Pipfile +++ /dev/null @@ -1,20 +0,0 @@ -[[source]] -url = "https://pypi.org/simple" -verify_ssl = true -name = "pypi" - -[packages] -pyparsing = "*" -ecdsa = "*" -pyyaml = "*" -flask = "*" -requests = "*" -peewee = "*" -sqlalchemy = "*" -tabulate = "*" -colored-traceback = "*" - -[dev-packages] - -[requires] -python_version = "3.9" diff --git a/docs/tinychain-legacy/Pipfile.lock b/docs/tinychain-legacy/Pipfile.lock deleted file mode 100644 index 9b9cc05..0000000 --- a/docs/tinychain-legacy/Pipfile.lock +++ /dev/null @@ -1,518 +0,0 @@ -{ - "_meta": { - "hash": { - "sha256": "c01596eaca5732c460a942e5e1833bc025ddc6e3ed5480d6793350ae5e4d0f19" - }, - "pipfile-spec": 6, - "requires": { - "python_version": "3.9" - }, - "sources": [ - { - "name": "pypi", - "url": "https://pypi.org/simple", - "verify_ssl": true - } - ] - }, - "default": { - "blinker": { - "hashes": [ - "sha256:c3f865d4d54db7abc53758a01601cf343fe55b84c1de4e3fa910e420b438d5b9", - "sha256:e6820ff6fa4e4d1d8e2747c2283749c3f547e4fee112b98555cdcdae32996182" - ], - "markers": "python_version >= '3.8'", - "version": "==1.7.0" - }, - "certifi": { - "hashes": [ - "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1", - "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474" - ], - "markers": "python_version >= '3.6'", - "version": "==2023.11.17" - }, - "charset-normalizer": { - "hashes": [ - "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027", - "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087", - "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786", - "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8", - "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09", - "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185", - "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574", - "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e", - "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519", - "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898", - "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269", - "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3", - "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f", - "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6", - "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8", - "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a", - "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73", - "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc", - "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714", - "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2", - "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc", - "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce", - "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d", - "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e", - "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6", - "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269", - "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96", - "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d", - "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a", - "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4", - "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77", - "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d", - "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0", - "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed", - "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068", - "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac", - "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25", - "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8", - "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab", - "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26", - "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2", - "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db", - "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f", - "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5", - "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99", - "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c", - "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d", - "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811", - "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa", - "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a", - "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03", - "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b", - "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04", - "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c", - "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001", - "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458", - "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389", - "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99", - "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985", - "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537", - "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238", - "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f", - "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d", - "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796", - "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a", - "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143", - "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8", - "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c", - "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5", - "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5", - "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711", - "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4", - "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6", - "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c", - "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7", - "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4", - "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b", - "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae", - "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12", - "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c", - "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae", - "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8", - "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887", - "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b", - "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4", - "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f", - "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5", - "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33", - "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519", - "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561" - ], - "markers": "python_full_version >= '3.7.0'", - "version": "==3.3.2" - }, - "click": { - "hashes": [ - "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28", - "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de" - ], - "markers": "python_version >= '3.7'", - "version": "==8.1.7" - }, - "colored-traceback": { - "hashes": [ - "sha256:6da7ce2b1da869f6bb54c927b415b95727c4bb6d9a84c4615ea77d9872911b05", - "sha256:f76c21a4b4c72e9e09763d4d1b234afc469c88693152a763ad6786467ef9e79f" - ], - "index": "pypi", - "version": "==0.3.0" - }, - "ecdsa": { - "hashes": [ - "sha256:190348041559e21b22a1d65cee485282ca11a6f81d503fddb84d5017e9ed1e49", - "sha256:80600258e7ed2f16b9aa1d7c295bd70194109ad5a30fdee0eaeefef1d4c559dd" - ], - "index": "pypi", - "version": "==0.18.0" - }, - "flask": { - "hashes": [ - "sha256:6489f51bb3666def6f314e15f19d50a1869a19ae0e8c9a3641ffe66c77d42403", - "sha256:ca631a507f6dfe6c278ae20112cea3ff54ff2216390bf8880f6b035a5354af13" - ], - "index": "pypi", - "version": "==3.0.1" - }, - "greenlet": { - "hashes": [ - "sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67", - "sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6", - "sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257", - "sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4", - "sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676", - "sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61", - "sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc", - "sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca", - "sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7", - "sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728", - "sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305", - "sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6", - "sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379", - "sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414", - "sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04", - "sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a", - "sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf", - "sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491", - "sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559", - "sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e", - "sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274", - "sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb", - "sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b", - "sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9", - "sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b", - "sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be", - "sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506", - "sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405", - "sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113", - "sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f", - "sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5", - "sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230", - "sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d", - "sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f", - "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a", - "sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e", - "sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61", - "sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6", - "sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d", - "sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71", - "sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22", - "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2", - "sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3", - "sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067", - "sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc", - "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881", - "sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3", - "sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e", - "sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac", - "sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53", - "sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0", - "sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b", - "sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83", - "sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41", - "sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c", - "sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf", - "sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da", - "sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33" - ], - "markers": "platform_machine == 'aarch64' or (platform_machine == 'ppc64le' or (platform_machine == 'x86_64' or (platform_machine == 'amd64' or (platform_machine == 'AMD64' or (platform_machine == 'win32' or platform_machine == 'WIN32')))))", - "version": "==3.0.3" - }, - "idna": { - "hashes": [ - "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca", - "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f" - ], - "markers": "python_version >= '3.5'", - "version": "==3.6" - }, - "importlib-metadata": { - "hashes": [ - "sha256:4805911c3a4ec7c3966410053e9ec6a1fecd629117df5adee56dfc9432a1081e", - "sha256:f238736bb06590ae52ac1fab06a3a9ef1d8dce2b7a35b5ab329371d6c8f5d2cc" - ], - "markers": "python_version < '3.10'", - "version": "==7.0.1" - }, - "itsdangerous": { - "hashes": [ - "sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44", - "sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a" - ], - "markers": "python_version >= '3.7'", - "version": "==2.1.2" - }, - "jinja2": { - "hashes": [ - "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa", - "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90" - ], - "markers": "python_version >= '3.7'", - "version": "==3.1.3" - }, - "markupsafe": { - "hashes": [ - "sha256:0042d6a9880b38e1dd9ff83146cc3c9c18a059b9360ceae207805567aacccc69", - "sha256:0c26f67b3fe27302d3a412b85ef696792c4a2386293c53ba683a89562f9399b0", - "sha256:0fbad3d346df8f9d72622ac71b69565e621ada2ce6572f37c2eae8dacd60385d", - "sha256:15866d7f2dc60cfdde12ebb4e75e41be862348b4728300c36cdf405e258415ec", - "sha256:1c98c33ffe20e9a489145d97070a435ea0679fddaabcafe19982fe9c971987d5", - "sha256:21e7af8091007bf4bebf4521184f4880a6acab8df0df52ef9e513d8e5db23411", - "sha256:23984d1bdae01bee794267424af55eef4dfc038dc5d1272860669b2aa025c9e3", - "sha256:31f57d64c336b8ccb1966d156932f3daa4fee74176b0fdc48ef580be774aae74", - "sha256:3583a3a3ab7958e354dc1d25be74aee6228938312ee875a22330c4dc2e41beb0", - "sha256:36d7626a8cca4d34216875aee5a1d3d654bb3dac201c1c003d182283e3205949", - "sha256:396549cea79e8ca4ba65525470d534e8a41070e6b3500ce2414921099cb73e8d", - "sha256:3a66c36a3864df95e4f62f9167c734b3b1192cb0851b43d7cc08040c074c6279", - "sha256:3aae9af4cac263007fd6309c64c6ab4506dd2b79382d9d19a1994f9240b8db4f", - "sha256:3ab3a886a237f6e9c9f4f7d272067e712cdb4efa774bef494dccad08f39d8ae6", - "sha256:47bb5f0142b8b64ed1399b6b60f700a580335c8e1c57f2f15587bd072012decc", - "sha256:49a3b78a5af63ec10d8604180380c13dcd870aba7928c1fe04e881d5c792dc4e", - "sha256:4df98d4a9cd6a88d6a585852f56f2155c9cdb6aec78361a19f938810aa020954", - "sha256:5045e892cfdaecc5b4c01822f353cf2c8feb88a6ec1c0adef2a2e705eef0f656", - "sha256:5244324676254697fe5c181fc762284e2c5fceeb1c4e3e7f6aca2b6f107e60dc", - "sha256:54635102ba3cf5da26eb6f96c4b8c53af8a9c0d97b64bdcb592596a6255d8518", - "sha256:54a7e1380dfece8847c71bf7e33da5d084e9b889c75eca19100ef98027bd9f56", - "sha256:55d03fea4c4e9fd0ad75dc2e7e2b6757b80c152c032ea1d1de487461d8140efc", - "sha256:698e84142f3f884114ea8cf83e7a67ca8f4ace8454e78fe960646c6c91c63bfa", - "sha256:6aa5e2e7fc9bc042ae82d8b79d795b9a62bd8f15ba1e7594e3db243f158b5565", - "sha256:7653fa39578957bc42e5ebc15cf4361d9e0ee4b702d7d5ec96cdac860953c5b4", - "sha256:765f036a3d00395a326df2835d8f86b637dbaf9832f90f5d196c3b8a7a5080cb", - "sha256:78bc995e004681246e85e28e068111a4c3f35f34e6c62da1471e844ee1446250", - "sha256:7a07f40ef8f0fbc5ef1000d0c78771f4d5ca03b4953fc162749772916b298fc4", - "sha256:8b570a1537367b52396e53325769608f2a687ec9a4363647af1cded8928af959", - "sha256:987d13fe1d23e12a66ca2073b8d2e2a75cec2ecb8eab43ff5624ba0ad42764bc", - "sha256:9896fca4a8eb246defc8b2a7ac77ef7553b638e04fbf170bff78a40fa8a91474", - "sha256:9e9e3c4020aa2dc62d5dd6743a69e399ce3de58320522948af6140ac959ab863", - "sha256:a0b838c37ba596fcbfca71651a104a611543077156cb0a26fe0c475e1f152ee8", - "sha256:a4d176cfdfde84f732c4a53109b293d05883e952bbba68b857ae446fa3119b4f", - "sha256:a76055d5cb1c23485d7ddae533229039b850db711c554a12ea64a0fd8a0129e2", - "sha256:a76cd37d229fc385738bd1ce4cba2a121cf26b53864c1772694ad0ad348e509e", - "sha256:a7cc49ef48a3c7a0005a949f3c04f8baa5409d3f663a1b36f0eba9bfe2a0396e", - "sha256:abf5ebbec056817057bfafc0445916bb688a255a5146f900445d081db08cbabb", - "sha256:b0fe73bac2fed83839dbdbe6da84ae2a31c11cfc1c777a40dbd8ac8a6ed1560f", - "sha256:b6f14a9cd50c3cb100eb94b3273131c80d102e19bb20253ac7bd7336118a673a", - "sha256:b83041cda633871572f0d3c41dddd5582ad7d22f65a72eacd8d3d6d00291df26", - "sha256:b835aba863195269ea358cecc21b400276747cc977492319fd7682b8cd2c253d", - "sha256:bf1196dcc239e608605b716e7b166eb5faf4bc192f8a44b81e85251e62584bd2", - "sha256:c669391319973e49a7c6230c218a1e3044710bc1ce4c8e6eb71f7e6d43a2c131", - "sha256:c7556bafeaa0a50e2fe7dc86e0382dea349ebcad8f010d5a7dc6ba568eaaa789", - "sha256:c8f253a84dbd2c63c19590fa86a032ef3d8cc18923b8049d91bcdeeb2581fbf6", - "sha256:d18b66fe626ac412d96c2ab536306c736c66cf2a31c243a45025156cc190dc8a", - "sha256:d5291d98cd3ad9a562883468c690a2a238c4a6388ab3bd155b0c75dd55ece858", - "sha256:d5c31fe855c77cad679b302aabc42d724ed87c043b1432d457f4976add1c2c3e", - "sha256:d6e427c7378c7f1b2bef6a344c925b8b63623d3321c09a237b7cc0e77dd98ceb", - "sha256:dac1ebf6983148b45b5fa48593950f90ed6d1d26300604f321c74a9ca1609f8e", - "sha256:de8153a7aae3835484ac168a9a9bdaa0c5eee4e0bc595503c95d53b942879c84", - "sha256:e1a0d1924a5013d4f294087e00024ad25668234569289650929ab871231668e7", - "sha256:e7902211afd0af05fbadcc9a312e4cf10f27b779cf1323e78d52377ae4b72bea", - "sha256:e888ff76ceb39601c59e219f281466c6d7e66bd375b4ec1ce83bcdc68306796b", - "sha256:f06e5a9e99b7df44640767842f414ed5d7bedaaa78cd817ce04bbd6fd86e2dd6", - "sha256:f6be2d708a9d0e9b0054856f07ac7070fbe1754be40ca8525d5adccdbda8f475", - "sha256:f9917691f410a2e0897d1ef99619fd3f7dd503647c8ff2475bf90c3cf222ad74", - "sha256:fc1a75aa8f11b87910ffd98de62b29d6520b6d6e8a3de69a70ca34dea85d2a8a", - "sha256:fe8512ed897d5daf089e5bd010c3dc03bb1bdae00b35588c49b98268d4a01e00" - ], - "markers": "python_version >= '3.7'", - "version": "==2.1.4" - }, - "peewee": { - "hashes": [ - "sha256:3a56967f28a43ca7a4287f4803752aeeb1a57a08dee2e839b99868181dfb5df8" - ], - "index": "pypi", - "version": "==3.17.0" - }, - "pygments": { - "hashes": [ - "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c", - "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367" - ], - "markers": "python_version >= '3.7'", - "version": "==2.17.2" - }, - "pyparsing": { - "hashes": [ - "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb", - "sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db" - ], - "index": "pypi", - "version": "==3.1.1" - }, - "pyyaml": { - "hashes": [ - "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5", - "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc", - "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df", - "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741", - "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206", - "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27", - "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595", - "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62", - "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98", - "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696", - "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290", - "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9", - "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d", - "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6", - "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867", - "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47", - "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486", - "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6", - "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3", - "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007", - "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938", - "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0", - "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c", - "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735", - "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d", - "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28", - "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4", - "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba", - "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8", - "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef", - "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5", - "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd", - "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3", - "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0", - "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515", - "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c", - "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c", - "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924", - "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34", - "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43", - "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859", - "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673", - "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54", - "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a", - "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b", - "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab", - "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa", - "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c", - "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585", - "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d", - "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f" - ], - "index": "pypi", - "version": "==6.0.1" - }, - "requests": { - "hashes": [ - "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f", - "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1" - ], - "index": "pypi", - "version": "==2.31.0" - }, - "six": { - "hashes": [ - "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", - "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.16.0" - }, - "sqlalchemy": { - "hashes": [ - "sha256:0d3cab3076af2e4aa5693f89622bef7fa770c6fec967143e4da7508b3dceb9b9", - "sha256:0dacf67aee53b16f365c589ce72e766efaabd2b145f9de7c917777b575e3659d", - "sha256:10331f129982a19df4284ceac6fe87353ca3ca6b4ca77ff7d697209ae0a5915e", - "sha256:14a6f68e8fc96e5e8f5647ef6cda6250c780612a573d99e4d881581432ef1669", - "sha256:1b1180cda6df7af84fe72e4530f192231b1f29a7496951db4ff38dac1687202d", - "sha256:29049e2c299b5ace92cbed0c1610a7a236f3baf4c6b66eb9547c01179f638ec5", - "sha256:342d365988ba88ada8af320d43df4e0b13a694dbd75951f537b2d5e4cb5cd002", - "sha256:420362338681eec03f53467804541a854617faed7272fe71a1bfdb07336a381e", - "sha256:4344d059265cc8b1b1be351bfb88749294b87a8b2bbe21dfbe066c4199541ebd", - "sha256:4f7a7d7fcc675d3d85fbf3b3828ecd5990b8d61bd6de3f1b260080b3beccf215", - "sha256:555651adbb503ac7f4cb35834c5e4ae0819aab2cd24857a123370764dc7d7e24", - "sha256:59a21853f5daeb50412d459cfb13cb82c089ad4c04ec208cd14dddd99fc23b39", - "sha256:5fdd402169aa00df3142149940b3bf9ce7dde075928c1886d9a1df63d4b8de62", - "sha256:605b6b059f4b57b277f75ace81cc5bc6335efcbcc4ccb9066695e515dbdb3900", - "sha256:665f0a3954635b5b777a55111ababf44b4fc12b1f3ba0a435b602b6387ffd7cf", - "sha256:6f9e2e59cbcc6ba1488404aad43de005d05ca56e069477b33ff74e91b6319735", - "sha256:736ea78cd06de6c21ecba7416499e7236a22374561493b456a1f7ffbe3f6cdb4", - "sha256:74b080c897563f81062b74e44f5a72fa44c2b373741a9ade701d5f789a10ba23", - "sha256:75432b5b14dc2fff43c50435e248b45c7cdadef73388e5610852b95280ffd0e9", - "sha256:75f99202324383d613ddd1f7455ac908dca9c2dd729ec8584c9541dd41822a2c", - "sha256:790f533fa5c8901a62b6fef5811d48980adeb2f51f1290ade8b5e7ba990ba3de", - "sha256:798f717ae7c806d67145f6ae94dc7c342d3222d3b9a311a784f371a4333212c7", - "sha256:7c88f0c7dcc5f99bdb34b4fd9b69b93c89f893f454f40219fe923a3a2fd11625", - "sha256:7d505815ac340568fd03f719446a589162d55c52f08abd77ba8964fbb7eb5b5f", - "sha256:84daa0a2055df9ca0f148a64fdde12ac635e30edbca80e87df9b3aaf419e144a", - "sha256:87d91043ea0dc65ee583026cb18e1b458d8ec5fc0a93637126b5fc0bc3ea68c4", - "sha256:87f6e732bccd7dcf1741c00f1ecf33797383128bd1c90144ac8adc02cbb98643", - "sha256:884272dcd3ad97f47702965a0e902b540541890f468d24bd1d98bcfe41c3f018", - "sha256:8b8cb63d3ea63b29074dcd29da4dc6a97ad1349151f2d2949495418fd6e48db9", - "sha256:91f7d9d1c4dd1f4f6e092874c128c11165eafcf7c963128f79e28f8445de82d5", - "sha256:a2c69a7664fb2d54b8682dd774c3b54f67f84fa123cf84dda2a5f40dcaa04e08", - "sha256:a3be4987e3ee9d9a380b66393b77a4cd6d742480c951a1c56a23c335caca4ce3", - "sha256:a86b4240e67d4753dc3092d9511886795b3c2852abe599cffe108952f7af7ac3", - "sha256:aa9373708763ef46782d10e950b49d0235bfe58facebd76917d3f5cbf5971aed", - "sha256:b64b183d610b424a160b0d4d880995e935208fc043d0302dd29fee32d1ee3f95", - "sha256:b801154027107461ee992ff4b5c09aa7cc6ec91ddfe50d02bca344918c3265c6", - "sha256:bb209a73b8307f8fe4fe46f6ad5979649be01607f11af1eb94aa9e8a3aaf77f0", - "sha256:bc8b7dabe8e67c4832891a5d322cec6d44ef02f432b4588390017f5cec186a84", - "sha256:c51db269513917394faec5e5c00d6f83829742ba62e2ac4fa5c98d58be91662f", - "sha256:c55731c116806836a5d678a70c84cb13f2cedba920212ba7dcad53260997666d", - "sha256:cf18ff7fc9941b8fc23437cc3e68ed4ebeff3599eec6ef5eebf305f3d2e9a7c2", - "sha256:d24f571990c05f6b36a396218f251f3e0dda916e0c687ef6fdca5072743208f5", - "sha256:db854730a25db7c956423bb9fb4bdd1216c839a689bf9cc15fada0a7fb2f4570", - "sha256:dc55990143cbd853a5d038c05e79284baedf3e299661389654551bd02a6a68d7", - "sha256:e607cdd99cbf9bb80391f54446b86e16eea6ad309361942bf88318bcd452363c", - "sha256:ecf6d4cda1f9f6cb0b45803a01ea7f034e2f1aed9475e883410812d9f9e3cfcf", - "sha256:f2a159111a0f58fb034c93eeba211b4141137ec4b0a6e75789ab7a3ef3c7e7e3", - "sha256:f37c0caf14b9e9b9e8f6dbc81bc56db06acb4363eba5a633167781a48ef036ed", - "sha256:f5693145220517b5f42393e07a6898acdfe820e136c98663b971906120549da5" - ], - "index": "pypi", - "version": "==2.0.25" - }, - "tabulate": { - "hashes": [ - "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c", - "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f" - ], - "index": "pypi", - "version": "==0.9.0" - }, - "typing-extensions": { - "hashes": [ - "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783", - "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd" - ], - "markers": "python_version >= '3.8'", - "version": "==4.9.0" - }, - "urllib3": { - "hashes": [ - "sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3", - "sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54" - ], - "markers": "python_version >= '3.8'", - "version": "==2.1.0" - }, - "werkzeug": { - "hashes": [ - "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc", - "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10" - ], - "markers": "python_version >= '3.8'", - "version": "==3.0.1" - }, - "zipp": { - "hashes": [ - "sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31", - "sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0" - ], - "markers": "python_version >= '3.8'", - "version": "==3.17.0" - } - }, - "develop": {} -} diff --git a/docs/tinychain-legacy/README.md b/docs/tinychain-legacy/README.md deleted file mode 100644 index 0306080..0000000 --- a/docs/tinychain-legacy/README.md +++ /dev/null @@ -1,113 +0,0 @@ -tinychain -========= - -**an ultralight blockchain core, written in Python.** - -tinychain is the smallest implementation of a blockchain (BFT replicated state machine) you will ever find. It reimplements the full bitcoin consensus (Nakamoto consensus) with a custom VM based on Brainfuck. - -1366 lines of code so far. inspired by [geohot's tinygrad](https://github.com/geohot/tinygrad). - - * cryptography - * transactions - * consensus - Nakamoto POW - * VM’s - Brainfuck - * state machine - * gas markets - * protocol, RPC, and P2P networking - -Let the devs do what they do best - building cool stuff. - -| **Area** | **Description** | **Status** | -|--------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------| -| VM | [Brainfuck](https://en.wikipedia.org/wiki/Brainfuck) smart contracts | ✅⚠️ 60% Done | -| Consensus | Bitcoin / Nakamoto / POW with ZK-friendly hash function | ⚠️ WIP | -| Tokenomics | Ethereum-like - native token + fixed exchange rate to gas | ✅ Done | -| Cryptography | ECDSA wallets, SECP256k1 curve (same as BTC), SHA-2 256 bit hash function | ✅ Done | -| Networking | P2P and RPC servers both use HTTP, gossip network architecture | ⚠️ WIP | -| ZK proofs | ZK for compression of tinychain. Use either [groth16](https://github.com/erhant/zkbrainfuck) or [halo2](https://github.com/cryptape/ckb-bf-zkvm) SNARK proofs for brainfuck. TBA we will rework consensus/crypto to use SNARK-friendly tech (MiMC/Poseidon hash function, SNARK-friendly signature scheme) | | - -## Install. - -Requirements: - - * Python 3. - * `pipenv`, `pip` or something like it. - -Instructions: - -```sh -# Install dependencies. -pipenv install -pipenv shell -``` - -## Usage. - -Demo is a work-in-progress: - -```py -# Run two nodes which will mine and sync. -PYTHONPATH=./src python3 src/tinychain/consensus/bitcoin.py -PYTHONPATH=./src python3 src/tinychain/consensus/bitcoin.py -``` - -## Why? - -It takes too long to digest the architecture of any modern blockchain like Ethereum, Optimism, etc. - -geohot took PyTorch and distilled it into >10,000 LOC. let's do the same for a blockchain. - -maybe we'll learn some things along the way. - -## What is a blockchain? - -It's really quite an interesting combination of many things. - - * a blockchain is a P2P system based on a programmable database - * users can run programs on this database - * they run these programs by cryptographically signing transactions - * users pay nodes in tokens for running the network - * how is the cost of running transactions measured? - * the programs run inside a VM, which has a metering facility for resource usage in terms of computation and storage - * the unit of account for metering is called gas - * gas is bought in an algorithmic market for the blockchain's native token. This is usually implemented as a "gas price auction" - * the order in which these transactions are run is determined according to a consensus algorithm. - * the consensus algorithm elects a node which is given the power to decide the sequence of transactions for that epoch - * bitcoin uses proof-of-work, meaning that the more CPU's you have, the more likely you are to become the leader - * given the sequence of transactions, we can run the state machine - * the state machine bundles the VM, with a shared context of accounts and their gas credits - * and this is all bundled together in the node, which provides facilities for querying the state of database - -The goal of this project is to elucidate the primitives throughout this invention, in a very simple way, so you can experiment and play with different VM's and code. - -## Roadmap. - - - [x] VM - - [ ] smart contracts - - [x] wallet - - [x] transactions - - [ ] CLI - - [x] state machine - - [x] sequencer - - [x] accounts / gas - - [ ] node - - [ ] consensus - - [ ] networking - -See `node.py` for design. - -## Feature set. - - - **VM** and **state machine model**. Brainfuck is used as the programming runtime. It includes its own gas metering - 1 gas for computation, 3 gas for writing to memory. There is no in-built object model for now - there is only the Brainfuck VM, and its memory. Any program can write to memory and overwrite other Brainfuck. - - - **Gas market / tokenomics**. Like Ethereum, this chain has a token and an internal unit of account called gas. There is no progressive gas auctions (PGA's) yet - for now it is a fixed exchange rate (see `gas_market.py`). - - - **Consensus**. Bitcoin/Nakamoto consensus is currently being implemented, meaning the network runs via proof-of-work. In future, I hope to implement Tendermint proof-of-stake (see `consensus/tendermint.py` for more) with the token being staked actually hosted on an Ethereum network (L1/L2). - - - **Cryptography**. ECDSA is used for public-private cryptography, with the SECP256k1 curve (used by Bitcoin) and the SHA-2 hash function with size set to 256 bits. - - - **Networking**. The P2P protocol and node API server both run over HTTP. This was easy. - -``` -curl -X GET http://0.0.0.0:5100/api/machine_eval -H "Content-Type: application/json" -d '{"from_acc":"","to_acc":"","data":"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++."}' -``` diff --git a/docs/tinychain-legacy/docs/bitcoin.md b/docs/tinychain-legacy/docs/bitcoin.md deleted file mode 100644 index 2e2e324..0000000 --- a/docs/tinychain-legacy/docs/bitcoin.md +++ /dev/null @@ -1,89 +0,0 @@ -# Nakamoto consensus. - -Bitcoin consensus is really quite simple. - -``` -Block - Prev block hash - Transactions - Nonce - -Hashcash - a work algorithm, where you can measure CPU expenditure. - -Longest chain consensus -Longest = chain with the most accumulated work - -What is work? -Accumulated hashpower (measured by hashcash solutions) that follows the difficulty readjustment schedule - -Difficulty readjustment - in order to regulate the block production of the network, -the hashcash difficulty target is adjusted every epoch to target an epoch length of 2 weeks - -Epochs are defined every N blocks -Each block includes a timestamp, which is regulated by the network to be close to real clock time - - -Our algorithm for running this is really simple: -- maintain a block DAG structure -- each block has a parent and a nullable child -- each block has its height (depth in DAG path) -- each block has its accumulated difficulty stored with it -- each block has its index - -The "tip" of the chain refers to the block with the most accumulated difficulty - this is the longest chain rule - -There are only 3 routines: -- mine - produce blocks, gossip them -- verify_block - verify the POW solution, the transactions and their signatures -- ingest_block - add the block to the DAG -``` - - -## Bitcoin architecture. - -``` -Miner thread --> works on mining next block --> mine(block) takes a block, which includes existing details like difficulty target --> can be restarted --> start_mining: - takes the current mempool (txs) - and the current tip (block) - and mines until we get a signal to stop - then restarts - -Before we mine, we figure out what the current difficulty is. - -When we receive a new block: -- download any parents until we reach a common block -- then verify the earliest block to the latest -- we also recompute the difficulty for the epochs ingested -- when this is all done, we restart the miner on the freshest tip. - -When we mine a new block: -- broadcast the block -- mine on this new tip -``` - -Finally networking: - -``` -ConsensusEngine calls protocol to gossip blocks -Node uses ConsensusEngine to Sequence transactions -Node uses transaction Sequence to run replicated State Machine -Protocol uses Wire protocol to communicate over JSON-RPC HTTPS -Node uses Protocol to receive user requests to send transcations -Node receives user transactions and gossips them over network - - -This doesn't even do fee markets yet. But it should soon. - -``` - - -And then on top of this- the launch: - -``` -People will download the "brainnode" software - -``` \ No newline at end of file diff --git a/docs/tinychain-legacy/docs/bitcoin.rs b/docs/tinychain-legacy/docs/bitcoin.rs deleted file mode 100644 index 299cf0c..0000000 --- a/docs/tinychain-legacy/docs/bitcoin.rs +++ /dev/null @@ -1,157 +0,0 @@ -use crate::block::{Block, BlockHash}; -pub mod block; - -use std::fs; -use std::cmp; -use sha2::{Sha256, Digest}; -use std::time::{SystemTime}; -use ethereum_types::{U256}; - -const DICT_PATH: &str = "data/dict.txt"; - -// Returns the base-n representation of a number according to an -// alphabet specified in `symbols`. The length of symbols is the -// "base". -// -// e.g. symbols = ["0","1"] is the binary numeral system. -// -fn to_digits(num: u32, symbols: &Vec<&str>) -> String { - let base = u32::try_from(symbols.len()).unwrap(); - let mut digits = String::new(); - let mut n = num; - - while n > 0 { - let i = n.rem_euclid(base); - if !digits.is_empty() { - digits += " "; - } - digits += symbols[usize::try_from(i).unwrap()]; - n /= base; - } - - digits -} - -fn sha256_digest(content: &String) -> U256 { - let mut hasher = Sha256::new(); - hasher.update(content.clone()); - let digest = hasher.finalize(); - return U256::from(digest.as_slice()); -} - -// fn current_time() -> u64 { -// match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) { -// Ok(n) => n.as_secs(), -// Err(_) => panic!("SystemTime before UNIX EPOCH!"), -// } -// } - -fn solve_pow(target: U256, block: &Block, dict: &Vec<&str>) -> u32 { - let mut nonce_idx: u32 = 0; - let _base = dict.len(); - - let mut block2 = block.clone(); - - loop { - block2.nonce = nonce_idx; - - // Build the outrage string. - // Convert nonce number into alphabet of the outrage wordlist. - // let outrage = to_digits(nonce_idx, &dict).as_bytes(); - - // Compute SHA256 digest. - let mut hasher = Sha256::new(); - let buf = serde_json::to_vec(&block2).unwrap(); - hasher.update(buf); - let digest = hasher.finalize(); - - // Convert to U256 number. - let guess = U256::from(digest.as_slice()); - - if guess < target { - // println!("solved target={} value={} dist={} nonce=\"{}\"", target, guess, target - guess, outrage); - // target = target / 2; - return nonce_idx - } - - nonce_idx += 1; - } -} - -fn main() { - let word_dict = fs::read_to_string(DICT_PATH).expect("Unable to read file"); - let dict: Vec<&str> = word_dict.lines().collect(); - let NULL_HASH: U256 = U256::from(0); - - println!("Loaded dictionary of {} words", dict.len()); - - // - // Mining loop. - // - - // This implements a proof-of-work algorithm, whereby the miner - // searches for a value (`guess`) that is less than a `target`. - // The lower the target, the higher the difficulty involved in the search process. - // Unlike the usual PoW algorithms, the input content to the hash function is human-readable outrage propaganda. - // A nonce is generated, which is used to index into a dictionary of outrage content, and then thereby - // hashed. - let genesis_block = Block { - prev_block_hash: NULL_HASH, - number: 0, - dict_hash: sha256_digest(&word_dict.to_string()), - nonce: 0 - }; - let mut prev_block = genesis_block; - let mut target: U256 = U256::from_dec_str("4567192616659071619386515177772132323232230222220222226193865124364247891968").unwrap(); - - let EPOCH_LENGTH = 5; - let EPOCH_TARGET_TIMESPAN_SECONDS = 5; - let mut epoch_start_block_mined_at = SystemTime::now(); - - - println!("Starting miner..."); - println!(" difficulty = {}", target); - println!(); - - loop { - let mut pending_block = Block { - prev_block_hash: prev_block.block_hash(), - number: prev_block.number + 1, - dict_hash: sha256_digest(&word_dict.to_string()), - nonce: 0 - }; - - let guess = solve_pow(target, &pending_block, &dict); - let outrage = to_digits(guess, &dict); - println!("solved target={} value={} dist={} nonce=\"{}\"", target, guess, target - guess, outrage); - - // Seal block. - pending_block.nonce = guess; - pending_block.prev_block_hash = prev_block.block_hash(); - - prev_block = pending_block; - println!("mined block #{} hash={}\n", prev_block.number, prev_block.block_hash()); - - // Update difficulty/target. - if prev_block.number % EPOCH_LENGTH == 0 { - // 5 seconds for epoch of 5 blocks - - let mut timespan = SystemTime::now().duration_since(epoch_start_block_mined_at).unwrap().as_secs(); - if timespan < EPOCH_TARGET_TIMESPAN_SECONDS/4 { - timespan = EPOCH_TARGET_TIMESPAN_SECONDS/4; - } - if timespan > EPOCH_TARGET_TIMESPAN_SECONDS*4 { - timespan = EPOCH_TARGET_TIMESPAN_SECONDS*4; - } - - let epoch = prev_block.number / EPOCH_LENGTH; - let factor = timespan / EPOCH_TARGET_TIMESPAN_SECONDS; - target = target * timespan / EPOCH_TARGET_TIMESPAN_SECONDS; - - println!("epoch #{}", epoch); - println!("adjusting difficulty timespan={}s factor={}", timespan, (timespan as f64) / (EPOCH_TARGET_TIMESPAN_SECONDS as f64)); - - epoch_start_block_mined_at = SystemTime::now(); - } - } -} diff --git a/docs/tinychain-legacy/docs/missing.md b/docs/tinychain-legacy/docs/missing.md deleted file mode 100644 index 2414dfa..0000000 --- a/docs/tinychain-legacy/docs/missing.md +++ /dev/null @@ -1,10 +0,0 @@ -Missing pieces -============== - -## ECDSA public key recovery - -In Ethereum/Bitcoin, we can recover the public key from a signature. - -The way this works - ECDSA public keys have two points (R1, R2), and on each point there is an (X,Y). For any given signature, you can recover two public keys - since ECDSA is based on squaring and there are two solutions to the square root (+ve and -ve). By encoding a value when you create the signature which specifies the parity (positive/negative) of one of these points, you can reliably recover the correct public key from the signature. - -I had some trouble implementing this. \ No newline at end of file diff --git a/docs/tinychain-legacy/docs/philosophy.md b/docs/tinychain-legacy/docs/philosophy.md deleted file mode 100644 index 79669ba..0000000 --- a/docs/tinychain-legacy/docs/philosophy.md +++ /dev/null @@ -1,13 +0,0 @@ -lol let's write down why this code is so based: - -1. minimum lines of code possible. this is the ultimate test of intelligence. intelligence is compression and prediction in one. can you produce a world model which is the smallest most accurate thing? this is what good code looks like - the smallest most functional thing. - -2. > "Show me your [code] and conceal your [data structures], and I shall continue to be mystified. Show me your [data structures], and I won't usually need your [code]; it'll be obvious." - -3. functions should only do 1 big thing. you can do "integrate all the other things" or you can do "this one particular thing" - -4. cut through the verbiage and just name things after what they really after - - RPC - just a way of invoking methods on objects across the network - wire protocol - how we encode the RPC (HTTP+JSON) - protocol - the abstract machine which we run the protocol on \ No newline at end of file diff --git a/docs/tinychain-legacy/pyproject.toml b/docs/tinychain-legacy/pyproject.toml deleted file mode 100644 index 356f27f..0000000 --- a/docs/tinychain-legacy/pyproject.toml +++ /dev/null @@ -1,18 +0,0 @@ -[project] -name = "tinychain" -version = "0.0.1" -authors = [ - { name="Example Author", email="author@example.com" }, -] -description = "A small example package" -readme = "README.md" -requires-python = ">=3.8" -classifiers = [ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: MIT License", - "Operating System :: OS Independent", -] - -[project.urls] -Homepage = "https://github.com/pypa/sampleproject" -Issues = "https://github.com/pypa/sampleproject/issues" \ No newline at end of file diff --git a/docs/tinychain-legacy/scripts/bf2.py b/docs/tinychain-legacy/scripts/bf2.py deleted file mode 100644 index f949ed3..0000000 --- a/docs/tinychain-legacy/scripts/bf2.py +++ /dev/null @@ -1,12 +0,0 @@ -s = """ -++ -[>+<-] ->[-[>+x++<-]x+>[<+>-]<] -""" - -print( - s - .replace("temp0", ">>>") - .replace("temp1", ">>") - .replace("x", ">") -) diff --git a/docs/tinychain-legacy/scripts/loc.sh b/docs/tinychain-legacy/scripts/loc.sh deleted file mode 100755 index 0bfd738..0000000 --- a/docs/tinychain-legacy/scripts/loc.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -LOC=`find . -type f -name "*.py" -exec cat {} + | grep -v '^ *#' | grep -v '^\s*$' | wc -l` -echo $LOC lines of code diff --git a/docs/tinychain-legacy/scripts/make-sample-tx.py b/docs/tinychain-legacy/scripts/make-sample-tx.py deleted file mode 100644 index ad04d87..0000000 --- a/docs/tinychain-legacy/scripts/make-sample-tx.py +++ /dev/null @@ -1,13 +0,0 @@ - -msg = 'so long and thanks for all the fish' -def generate_bf_program(input_string): - bf_program = '' - bf_program += '[-]' # Clear the current cell - - for char in input_string: - bf_program += '+' * ord(char) + '.>' # Increment the value, print character, move to the next cell - - bf_program += '[-]' # Clear the last cell - - return bf_program -print(generate_bf_program(msg)) \ No newline at end of file diff --git a/docs/tinychain-legacy/scripts/sz.py b/docs/tinychain-legacy/scripts/sz.py deleted file mode 100644 index 3e1669b..0000000 --- a/docs/tinychain-legacy/scripts/sz.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python3 -import os, sys -import token -import tokenize -import itertools -from tabulate import tabulate - -TOKEN_WHITELIST = [token.OP, token.NAME, token.NUMBER, token.STRING] - -def gen_stats(base_path="."): - table = [] - for path, _, files in os.walk(os.path.join(base_path, "src")): - for name in files: - if not name.endswith(".py"): continue - filepath = os.path.join(path, name) - relfilepath = os.path.relpath(filepath, base_path) - with tokenize.open(filepath) as file_: - tokens = [t for t in tokenize.generate_tokens(file_.readline) if t.type in TOKEN_WHITELIST] - token_count, line_count = len(tokens), len(set([x for t in tokens for x in range(t.start[0], t.end[0]+1)])) - if line_count == 0 or token_count == 0: continue - table.append([relfilepath, line_count, token_count/line_count]) - return table - -def gen_diff(table_old, table_new): - table = [] - files_new = set([x[0] for x in table_new]) - files_old = set([x[0] for x in table_old]) - added, deleted, unchanged = files_new - files_old, files_old - files_new, files_new & files_old - if added: - for file in added: - file_stat = [stats for stats in table_new if file in stats] - table.append([file_stat[0][0], file_stat[0][1], file_stat[0][1]-0, file_stat[0][2], file_stat[0][2]-0]) - if deleted: - for file in deleted: - file_stat = [stats for stats in table_old if file in stats] - table.append([file_stat[0][0], 0, 0 - file_stat[0][1], 0, 0-file_stat[0][2]]) - if unchanged: - for file in unchanged: - file_stat_old = [stats for stats in table_old if file in stats] - file_stat_new = [stats for stats in table_new if file in stats] - if file_stat_new[0][1]-file_stat_old[0][1] != 0 or file_stat_new[0][2]-file_stat_old[0][2] != 0: - table.append([file_stat_new[0][0], file_stat_new[0][1], file_stat_new[0][1]-file_stat_old[0][1], file_stat_new[0][2], - file_stat_new[0][2]-file_stat_old[0][2]]) - return table - -def display_diff(diff): return "+"+str(diff) if diff > 0 else str(diff) - -if __name__ == "__main__": - if len(sys.argv) == 3: - headers = ["Name", "Lines", "Diff", "Tokens/Line", "Diff"] - table = gen_diff(gen_stats(sys.argv[1]), gen_stats(sys.argv[2])) - elif len(sys.argv) == 2: - headers = ["Name", "Lines", "Tokens/Line"] - table = gen_stats(sys.argv[1]) - else: - headers = ["Name", "Lines", "Tokens/Line"] - table = gen_stats(".") - - if table: - if len(sys.argv) == 3: - print("### Changes") - print("```") - print(tabulate([headers] + sorted(table, key=lambda x: -x[1]), headers="firstrow", intfmt=(..., "d", "+d"), - floatfmt=(..., ..., ..., ".1f", "+.1f"))+"\n") - print(f"\ntotal lines changes: {display_diff(sum([x[2] for x in table]))}") - print("```") - else: - print(tabulate([headers] + sorted(table, key=lambda x: -x[1]), headers="firstrow", floatfmt=".1f")+"\n") - for dir_name, group in itertools.groupby(sorted([(x[0].rsplit("/", 1)[0], x[1], x[2]) for x in table]), key=lambda x:x[0]): - print(f"{dir_name:30s} : {sum([x[1] for x in group]):6d}") - total_lines = sum([x[1] for x in table]) - print(f"\ntotal line count: {total_lines}") - max_line_count = int(os.getenv("MAX_LINE_COUNT", "-1")) - assert max_line_count == -1 or total_lines < max_line_count, f"OVER {max_line_count} LINES" diff --git a/docs/tinychain-legacy/src/__init__.py b/docs/tinychain-legacy/src/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/docs/ugly-parts.md b/docs/ugly-parts.md index 704708c..c5d58d4 100644 --- a/docs/ugly-parts.md +++ b/docs/ugly-parts.md @@ -2,5 +2,4 @@ Ugly parts ========== * Boilerplate: - * The `netpeer.go` generally has a lot of repeated code for implementing RPC methods, message handlers, deserialisation. - * \ No newline at end of file + * The `netpeer.go` generally has a lot of repeated code for implementing RPC methods, message handlers, deserialisation. \ No newline at end of file From e0fecdd2287796d3f317e9238308e42b2ecdba38 Mon Sep 17 00:00:00 2001 From: liamzebedee Date: Wed, 22 Jan 2025 22:28:26 +1100 Subject: [PATCH 15/15] doc: DESIGN --- DESIGN.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 DESIGN.md diff --git a/DESIGN.md b/DESIGN.md new file mode 100644 index 0000000..20bd6d5 --- /dev/null +++ b/DESIGN.md @@ -0,0 +1,18 @@ +Design +====== + +## Methodology. + + 1. Make it work. + 2. Simplify / delete. + 3. Optimize. Make it efficient/fast. + +## Philosophy. + +1. Minimum lines of code possible. This is the ultimate test of intelligence. Intelligence is compression and prediction in one. Can you produce a world model which is the smallest most accurate thing? This is what good code looks like - the smallest most functional thing. + +This has many advantages. Fewest lines of code for same functionality. Purer more functional code. More auditable. More extensible more easily without additional abstraction loading. + +2. Discover the core primitives through distillation. + +3. > "Show me your [code] and conceal your [data structures], and I shall continue to be mystified. Show me your [data structures], and I won't usually need your [code]; it'll be obvious." \ No newline at end of file