-
Notifications
You must be signed in to change notification settings - Fork 201
[Storage] Refactor index execution result #8005
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
56789b8
5f38c5c
ae157d1
11ebaba
cf8fbc8
e069fd9
0c4785d
4c35202
1c7271f
89688a2
3b87400
87f056a
81ec9d3
f85f951
060f438
14dcb42
a731f69
726eaec
0820d94
e1c6cc7
9855b1e
45dc92a
ce2156e
32f653d
f47041d
77127b3
bb10b84
0826270
42a4620
25c93b8
7ceae7d
467cc95
1b1d0d9
0cb7f08
2b061bf
3120db3
b620fec
a83ff30
2cc0f55
c24a29c
27bdbdc
6fa63e7
08d0749
096e441
4595bea
0d166a9
84384b6
f802992
1379487
5213db5
5fe29ef
e0f954e
2359222
bb43d95
80bd5a6
e66a0f4
bc19eb6
a458240
b87016a
f4cb249
a5e1096
9f8690f
951967f
4821ca3
c458be6
8e2be8a
90ef8d5
fe33e7f
1cadc17
9dafecc
03c159b
7eca395
7313207
f863089
bf3187d
e7e4059
d725fed
a0a2985
c2c67aa
41a54b8
a35a911
6dd2401
6a91a9e
87758a9
04414c1
bcf1655
65c3ceb
8ece701
55e86da
5bd5581
50b4ba4
8d8d964
4800c80
544f78d
3bdba3b
2fddb5d
ca2f916
57529c4
c3f1bfd
d523296
a654f2f
ca9058d
c29afee
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
This file was deleted.
This file was deleted.
This file was deleted.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -39,7 +39,6 @@ import ( | |
| read_execution_state "github.com/onflow/flow-go/cmd/util/cmd/read-execution-state" | ||
| read_hotstuff "github.com/onflow/flow-go/cmd/util/cmd/read-hotstuff/cmd" | ||
| read_protocol_state "github.com/onflow/flow-go/cmd/util/cmd/read-protocol-state/cmd" | ||
| index_er "github.com/onflow/flow-go/cmd/util/cmd/reindex/cmd" | ||
| rollback_executed_height "github.com/onflow/flow-go/cmd/util/cmd/rollback-executed-height/cmd" | ||
| run_script "github.com/onflow/flow-go/cmd/util/cmd/run-script" | ||
| "github.com/onflow/flow-go/cmd/util/cmd/snapshot" | ||
|
|
@@ -111,7 +110,6 @@ func addCommands() { | |
| rootCmd.AddCommand(leaders.Cmd) | ||
| rootCmd.AddCommand(epochs.RootCmd) | ||
| rootCmd.AddCommand(edbs.RootCmd) | ||
| rootCmd.AddCommand(index_er.RootCmd) | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This tool is no longer useful since this PR. Because now we don't allow multiple results to be stored for the same block, therefore we don't have multiple results to choose from which one to be reindexed with. |
||
| rootCmd.AddCommand(rollback_executed_height.Cmd) | ||
| rootCmd.AddCommand(read_execution_state.Cmd) | ||
| rootCmd.AddCommand(snapshot.Cmd) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -125,6 +125,7 @@ func (s *Suite) SetupTest() { | |
| s.receipts = new(storagemock.ExecutionReceipts) | ||
| s.transactions = new(storagemock.Transactions) | ||
| s.results = new(storagemock.ExecutionResults) | ||
| s.results.On("BatchIndex", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. would be great to verify that the lock |
||
| collectionsToMarkFinalized := stdmap.NewTimes(100) | ||
| collectionsToMarkExecuted := stdmap.NewTimes(100) | ||
| blocksToMarkExecuted := stdmap.NewTimes(100) | ||
|
|
@@ -208,6 +209,8 @@ func (s *Suite) initEngineAndSyncer() (*Engine, *collections.Syncer, *collection | |
| s.net, | ||
| s.proto.state, | ||
| s.me, | ||
| s.lockManager, | ||
| s.db, | ||
| s.blocks, | ||
| s.results, | ||
| s.receipts, | ||
|
|
@@ -297,7 +300,7 @@ func (s *Suite) TestOnFinalizedBlockSingle() { | |
| } | ||
|
|
||
| // expect that the block storage is indexed with each of the collection guarantee | ||
| s.blocks.On("IndexBlockContainingCollectionGuarantees", block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() | ||
| s.blocks.On("BatchIndexBlockContainingCollectionGuarantees", mock.Anything, mock.Anything, block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. would be great to verify that the lock |
||
| for _, seal := range block.Payload.Seals { | ||
| s.results.On("Index", seal.BlockID, seal.ResultID).Return(nil).Once() | ||
| } | ||
|
|
@@ -324,7 +327,7 @@ func (s *Suite) TestOnFinalizedBlockSingle() { | |
| // assert that the block was retrieved and all collections were requested | ||
| s.headers.AssertExpectations(s.T()) | ||
| s.request.AssertNumberOfCalls(s.T(), "EntityByID", len(block.Payload.Guarantees)) | ||
| s.results.AssertNumberOfCalls(s.T(), "Index", len(block.Payload.Seals)) | ||
| s.results.AssertNumberOfCalls(s.T(), "BatchIndex", len(block.Payload.Seals)) | ||
| } | ||
|
|
||
| // TestOnFinalizedBlockSeveralBlocksAhead checks OnFinalizedBlock with a block several blocks newer than the last block processed | ||
|
|
@@ -384,7 +387,7 @@ func (s *Suite) TestOnFinalizedBlockSeveralBlocksAhead() { | |
|
|
||
| // expected all new blocks after last block processed | ||
| for _, block := range blocks { | ||
| s.blocks.On("IndexBlockContainingCollectionGuarantees", block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() | ||
| s.blocks.On("BatchIndexBlockContainingCollectionGuarantees", mock.Anything, mock.Anything, block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. same: would be great to verify that the lock |
||
|
|
||
| for _, cg := range block.Payload.Guarantees { | ||
| s.request.On("EntityByID", cg.CollectionID, mock.Anything).Return().Run(func(args mock.Arguments) { | ||
|
|
@@ -412,9 +415,9 @@ func (s *Suite) TestOnFinalizedBlockSeveralBlocksAhead() { | |
| } | ||
|
|
||
| s.headers.AssertExpectations(s.T()) | ||
| s.blocks.AssertNumberOfCalls(s.T(), "IndexBlockContainingCollectionGuarantees", newBlocksCount) | ||
| s.blocks.AssertNumberOfCalls(s.T(), "BatchIndexBlockContainingCollectionGuarantees", newBlocksCount) | ||
| s.request.AssertNumberOfCalls(s.T(), "EntityByID", expectedEntityByIDCalls) | ||
| s.results.AssertNumberOfCalls(s.T(), "Index", expectedIndexCalls) | ||
| s.results.AssertNumberOfCalls(s.T(), "BatchIndex", expectedIndexCalls) | ||
| } | ||
|
|
||
| // TestExecutionReceiptsAreIndexed checks that execution receipts are properly indexed | ||
|
|
@@ -517,9 +520,9 @@ func (s *Suite) TestCollectionSyncing() { | |
|
|
||
| // setup finalized block indexer mocks | ||
| guaranteeIDs := []flow.Identifier(flow.GetIDs(block.Payload.Guarantees)) | ||
| s.blocks.On("IndexBlockContainingCollectionGuarantees", block.ID(), guaranteeIDs).Return(nil).Once() | ||
| s.blocks.On("BatchIndexBlockContainingCollectionGuarantees", mock.Anything, mock.Anything, block.ID(), guaranteeIDs).Return(nil).Once() | ||
| for _, seal := range payload.Seals { | ||
| s.results.On("Index", seal.BlockID, seal.ResultID).Return(nil).Once() | ||
| s.results.On("BatchIndex", mock.Anything, mock.Anything, seal.BlockID, seal.ResultID).Return(nil).Once() | ||
| } | ||
|
|
||
| // initialize the engine using the initial finalized block | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Instead of reusing the same lock, I think we should acquire a lock that is specific to the data we are updating. That's why I changed to this new lock.