Skip to content
This repository was archived by the owner on Nov 6, 2020. It is now read-only.

Commit 620c354

Browse files
author
adria0
committed
Merge branch 'master' of github.com:openethereum/openethereum into adria0/fix/tests
2 parents 78e358d + 28207a8 commit 620c354

File tree

15 files changed

+75
-55
lines changed

15 files changed

+75
-55
lines changed

.github/workflows/build-test.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,9 @@ on:
99
jobs:
1010
build-tests:
1111
name: Test and Build
12+
env:
13+
SCCACHE_CACHE_SIZE: "1G"
14+
SCCACHE_IDLE_TIMEOUT: 0
1215
strategy:
1316
matrix:
1417
platform:

.github/workflows/build.yml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,9 @@ on:
1010
jobs:
1111
build:
1212
name: Build Release
13+
env:
14+
SCCACHE_CACHE_SIZE: "1G"
15+
SCCACHE_IDLE_TIMEOUT: 0
1316
strategy:
1417
matrix:
1518
platform:
@@ -89,4 +92,4 @@ jobs:
8992
path: artifacts/
9093
- name: Prepare build directory for cache
9194
shell: bash
92-
run: bash scripts/actions/clean-target.sh
95+
run: bash scripts/actions/clean-target.sh

.github/workflows/check.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,9 @@ jobs:
1010
check:
1111
name: Check
1212
runs-on: ubuntu-16.04
13+
env:
14+
SCCACHE_CACHE_SIZE: "1G"
15+
SCCACHE_IDLE_TIMEOUT: 0
1316
steps:
1417
- name: Checkout sources
1518
uses: actions/checkout@master

ethcore/snapshot/src/lib.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -435,6 +435,8 @@ impl StateRebuilder {
435435
}
436436
}
437437

438+
let batch = self.db.drain_transaction_overlay()?;
439+
self.db.backing().write(batch)?;
438440
Ok(())
439441
}
440442

parity/db/rocksdb/migration.rs

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,10 @@ fn migrate_database(version: u32, db_path: &Path, mut migrations: MigrationManag
186186

187187
// completely in-place migration leads to the paths being equal.
188188
// in that case, no need to shuffle directories.
189-
if temp_path == db_path { return Ok(()) }
189+
if temp_path == db_path {
190+
trace!(target: "migrate", "In-place migration ran; leaving old database in place.");
191+
return Ok(())
192+
}
190193

191194
// create backup
192195
fs::rename(&db_path, &backup_path)?;

scripts/actions/build-windows.sh

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,8 @@
11
#!/bin/bash
22
set -e # fail on any error
33
set -u # treat unset variables as error
4-
5-
# NOTE: Enables the aes-ni instructions for RustCrypto dependency.
6-
# If you change this please remember to also update .cargo/config
4+
# NOTE: Enables the aes-ni instructions for RustCrypto dependency.
5+
# If you change this please remember to also update .cargo/config
76
export RUSTFLAGS=" -Ctarget-feature=+aes,+sse2,+ssse3 -Ctarget-feature=+crt-static -Clink-arg=-s"
87

98
echo "_____ Build OpenEthereum and tools _____"

scripts/actions/install-sccache.ps1

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
11
#!/usr/bin/env pwsh
22
$os=$args[0]
3-
$SCCACHE_CACHE_SIZE="1G"
4-
$SCCACHE_IDLE_TIMEOUT=0
53
$version="0.2.12"
64
echo "Current OS:" $os
75
switch ($os){

scripts/docker/alpine/Dockerfile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ RUN apk add --no-cache \
1111
linux-headers \
1212
perl \
1313
rust \
14+
git \
1415
clang-dev \
1516
llvm-dev
1617

util/journaldb/src/archivedb.rs

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -142,9 +142,8 @@ impl JournalDB for ArchiveDB {
142142
Ok(0)
143143
}
144144

145-
fn inject(&mut self, batch: &mut DBTransaction) -> io::Result<u32> {
146-
let mut inserts = 0usize;
147-
let mut deletes = 0usize;
145+
fn drain_transaction_overlay(&mut self) -> io::Result<DBTransaction> {
146+
let mut batch = DBTransaction::new();
148147

149148
for i in self.overlay.drain() {
150149
let (key, (value, rc)) = i;
@@ -153,19 +152,17 @@ impl JournalDB for ArchiveDB {
153152
return Err(error_key_already_exists(&key));
154153
}
155154
batch.put(self.column, key.as_bytes(), &value);
156-
inserts += 1;
157155
}
158156
if rc < 0 {
159157
assert!(rc == -1);
160158
if self.backing.get(self.column, key.as_bytes())?.is_none() {
161159
return Err(error_negatively_reference_hash(&key));
162160
}
163161
batch.delete(self.column, key.as_bytes());
164-
deletes += 1;
165162
}
166163
}
167164

168-
Ok((inserts + deletes) as u32)
165+
Ok(batch)
169166
}
170167

171168
fn latest_era(&self) -> Option<u64> { self.latest_era }
@@ -209,7 +206,7 @@ mod tests {
209206
use hash_db::{HashDB, EMPTY_PREFIX};
210207
use super::*;
211208
use kvdb_memorydb;
212-
use crate::{JournalDB, inject_batch, commit_batch};
209+
use crate::{JournalDB, drain_overlay, commit_batch};
213210

214211
#[test]
215212
fn insert_same_in_fork() {
@@ -463,11 +460,11 @@ mod tests {
463460
fn inject() {
464461
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(1)), 0);
465462
let key = jdb.insert(EMPTY_PREFIX, b"dog");
466-
inject_batch(&mut jdb).unwrap();
463+
drain_overlay(&mut jdb).unwrap();
467464

468465
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), b"dog".to_vec());
469466
jdb.remove(&key, EMPTY_PREFIX);
470-
inject_batch(&mut jdb).unwrap();
467+
drain_overlay(&mut jdb).unwrap();
471468

472469
assert!(jdb.get(&key, EMPTY_PREFIX).is_none());
473470
}

util/journaldb/src/earlymergedb.rs

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -474,11 +474,9 @@ impl JournalDB for EarlyMergeDB {
474474
Ok(0)
475475
}
476476

477-
fn inject(&mut self, batch: &mut DBTransaction) -> io::Result<u32> {
478-
let mut ops = 0;
477+
fn drain_transaction_overlay(&mut self) -> io::Result<DBTransaction> {
478+
let mut batch = DBTransaction::new();
479479
for (key, (value, rc)) in self.overlay.drain() {
480-
if rc != 0 { ops += 1 }
481-
482480
match rc {
483481
0 => {}
484482
1 => {
@@ -497,7 +495,7 @@ impl JournalDB for EarlyMergeDB {
497495
}
498496
}
499497

500-
Ok(ops)
498+
Ok(batch)
501499
}
502500

503501
fn consolidate(&mut self, with: super::MemoryDB) {
@@ -529,7 +527,7 @@ mod tests {
529527
use hash_db::{HashDB, EMPTY_PREFIX};
530528
use super::*;
531529
use kvdb_memorydb;
532-
use crate::{inject_batch, commit_batch};
530+
use crate::{drain_overlay, commit_batch};
533531

534532
#[test]
535533
fn insert_same_in_fork() {
@@ -1050,11 +1048,11 @@ mod tests {
10501048
fn inject() {
10511049
let mut jdb = new_db();
10521050
let key = jdb.insert(EMPTY_PREFIX, b"dog");
1053-
inject_batch(&mut jdb).unwrap();
1051+
drain_overlay(&mut jdb).unwrap();
10541052

10551053
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), b"dog".to_vec());
10561054
jdb.remove(&key, EMPTY_PREFIX);
1057-
inject_batch(&mut jdb).unwrap();
1055+
drain_overlay(&mut jdb).unwrap();
10581056

10591057
assert!(jdb.get(&key, EMPTY_PREFIX).is_none());
10601058
}

util/journaldb/src/lib.rs

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -72,11 +72,8 @@ pub trait JournalDB: HashDB<KeccakHasher, DBValue> {
7272
/// Commit all queued insert and delete operations without affecting any journalling -- this requires that all insertions
7373
/// and deletions are indeed canonical and will likely lead to an invalid database if that assumption is violated.
7474
///
75-
/// Any keys or values inserted or deleted must be completely independent of those affected
76-
/// by any previous `commit` operations. Essentially, this means that `inject` can be used
77-
/// either to restore a state to a fresh database, or to insert data which may only be journalled
78-
/// from this point onwards.
79-
fn inject(&mut self, batch: &mut DBTransaction) -> io::Result<u32>;
75+
/// Returns a transaction to be committed.
76+
fn drain_transaction_overlay(&mut self) -> io::Result<DBTransaction>;
8077

8178
/// State data query
8279
fn state(&self, _id: &H256) -> Option<Bytes>;
@@ -213,12 +210,11 @@ pub fn new_memory_db() -> MemoryDB {
213210
MemoryDB::from_null_node(&rlp::NULL_RLP, rlp::NULL_RLP.as_ref().into())
214211
}
215212

216-
#[cfg(test)]
217213
/// Inject all changes in a single batch.
218-
pub fn inject_batch(jdb: &mut dyn JournalDB) -> io::Result<u32> {
219-
let mut batch = jdb.backing().transaction();
220-
let res = jdb.inject(&mut batch)?;
221-
jdb.backing().write(batch).map(|_| res).map_err(Into::into)
214+
#[cfg(test)]
215+
pub fn drain_overlay(jdb: &mut dyn JournalDB) -> io::Result<()> {
216+
let batch = jdb.drain_transaction_overlay()?;
217+
jdb.backing().write(batch).map_err(Into::into)
222218
}
223219

224220
/// Commit all changes in a single batch

util/journaldb/src/overlayrecentdb.rs

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -397,11 +397,9 @@ impl JournalDB for OverlayRecentDB {
397397
Ok(ops as u32)
398398
}
399399

400-
fn inject(&mut self, batch: &mut DBTransaction) -> io::Result<u32> {
401-
let mut ops = 0;
400+
fn drain_transaction_overlay(&mut self) -> io::Result<DBTransaction> {
401+
let mut batch = DBTransaction::new();
402402
for (key, (value, rc)) in self.transaction_overlay.drain() {
403-
if rc != 0 { ops += 1 }
404-
405403
match rc {
406404
0 => {}
407405
_ if rc > 0 => {
@@ -417,7 +415,7 @@ impl JournalDB for OverlayRecentDB {
417415
}
418416
}
419417

420-
Ok(ops)
418+
Ok(batch)
421419
}
422420

423421
fn state(&self, key: &H256) -> Option<Bytes> {
@@ -507,7 +505,7 @@ mod tests {
507505
use super::*;
508506
use hash_db::{HashDB, EMPTY_PREFIX};
509507
use kvdb_memorydb;
510-
use crate::{JournalDB, inject_batch, commit_batch};
508+
use crate::{JournalDB, drain_overlay, commit_batch};
511509

512510
fn new_db() -> OverlayRecentDB {
513511
let backing = Arc::new(kvdb_memorydb::create(1));
@@ -1026,11 +1024,11 @@ mod tests {
10261024
fn inject() {
10271025
let mut jdb = new_db();
10281026
let key = jdb.insert(EMPTY_PREFIX, b"dog");
1029-
inject_batch(&mut jdb).unwrap();
1027+
drain_overlay(&mut jdb).unwrap();
10301028

10311029
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), b"dog".to_vec());
10321030
jdb.remove(&key, EMPTY_PREFIX);
1033-
inject_batch(&mut jdb).unwrap();
1031+
drain_overlay(&mut jdb).unwrap();
10341032

10351033
assert!(jdb.get(&key, EMPTY_PREFIX).is_none());
10361034
}

util/journaldb/src/refcounteddb.rs

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -193,12 +193,13 @@ impl JournalDB for RefCountedDB {
193193
Ok(r)
194194
}
195195

196-
fn inject(&mut self, batch: &mut DBTransaction) -> io::Result<u32> {
196+
fn drain_transaction_overlay(&mut self) -> io::Result<DBTransaction> {
197197
self.inserts.clear();
198198
for remove in self.removes.drain(..) {
199199
self.forward.remove(&remove, EMPTY_PREFIX);
200200
}
201-
self.forward.commit_to_batch(batch)
201+
let mut batch = DBTransaction::new();
202+
self.forward.commit_to_batch(&mut batch).map(|_| batch)
202203
}
203204

204205
fn consolidate(&mut self, mut with: super::MemoryDB) {
@@ -224,7 +225,7 @@ mod tests {
224225
use hash_db::{HashDB, EMPTY_PREFIX};
225226
use super::*;
226227
use kvdb_memorydb;
227-
use crate::{JournalDB, inject_batch, commit_batch};
228+
use crate::{JournalDB, drain_overlay, commit_batch};
228229

229230
fn new_db() -> RefCountedDB {
230231
let backing = Arc::new(kvdb_memorydb::create(1));
@@ -338,11 +339,11 @@ mod tests {
338339
fn inject() {
339340
let mut jdb = new_db();
340341
let key = jdb.insert(EMPTY_PREFIX, b"dog");
341-
inject_batch(&mut jdb).unwrap();
342+
drain_overlay(&mut jdb).unwrap();
342343

343344
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), b"dog".to_vec());
344345
jdb.remove(&key, EMPTY_PREFIX);
345-
inject_batch(&mut jdb).unwrap();
346+
drain_overlay(&mut jdb).unwrap();
346347

347348
assert!(jdb.get(&key, EMPTY_PREFIX).is_none());
348349
}

util/migration-rocksdb/src/lib.rs

Lines changed: 24 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ use std::path::{Path, PathBuf};
2121
use std::sync::Arc;
2222
use std::{fs, io, error};
2323

24-
use log::{info, trace};
24+
use log::{info, trace, warn};
2525
use kvdb::DBTransaction;
2626
use kvdb_rocksdb::{CompactionProfile, Database, DatabaseConfig};
2727

@@ -97,10 +97,12 @@ pub trait Migration {
9797
/// Whether this migration alters any existing columns.
9898
/// if not, then column families will simply be added and `migrate` will never be called.
9999
fn alters_existing(&self) -> bool { true }
100+
/// Whether this migration deletes data in any of the existing columns.
101+
fn deletes_existing(&self) -> bool { false }
100102
/// Version of the database after the migration.
101103
fn version(&self) -> u32;
102104
/// Migrate a source to a destination.
103-
fn migrate(&mut self, source: Arc<Database>, config: &Config, destination: &mut Database, col: u32) -> io::Result<()>;
105+
fn migrate(&mut self, source: Arc<Database>, config: &Config, destination: Option<&mut Database>, col: u32) -> io::Result<()>;
104106
}
105107

106108
/// A simple migration over key-value pairs of a single column.
@@ -123,8 +125,15 @@ impl<T: SimpleMigration> Migration for T {
123125

124126
fn version(&self) -> u32 { SimpleMigration::version(self) }
125127

126-
fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: &mut Database, col: u32) -> io::Result<()> {
128+
fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: Option<&mut Database>, col: u32) -> io::Result<()> {
127129
let migration_needed = col == SimpleMigration::migrated_column_index(self);
130+
let dest = match dest {
131+
None => {
132+
warn!(target: "migration", "No destination db provided. No changes made.");
133+
return Ok(());
134+
}
135+
Some(dest) => dest,
136+
};
128137
let mut batch = Batch::new(config, col);
129138

130139
for (key, value) in source.iter(col) {
@@ -156,7 +165,7 @@ impl Migration for ChangeColumns {
156165
fn columns(&self) -> u32 { self.post_columns }
157166
fn alters_existing(&self) -> bool { false }
158167
fn version(&self) -> u32 { self.version }
159-
fn migrate(&mut self, _: Arc<Database>, _: &Config, _: &mut Database, _: u32) -> io::Result<()> {
168+
fn migrate(&mut self, _: Arc<Database>, _: &Config, _: Option<&mut Database>, _: u32) -> io::Result<()> {
160169
Ok(())
161170
}
162171
}
@@ -170,10 +179,11 @@ pub struct VacuumAccountsBloom {
170179
impl Migration for VacuumAccountsBloom {
171180
fn pre_columns(&self) -> u32 { self.columns }
172181
fn columns(&self) -> u32 { self.columns }
173-
fn alters_existing(&self) -> bool { true }
182+
fn alters_existing(&self) -> bool { false }
183+
fn deletes_existing(&self) -> bool { true }
174184
fn version(&self) -> u32 { self.version }
175185

176-
fn migrate(&mut self, db: Arc<Database>, _config: &Config, _dest: &mut Database, col: u32) -> io::Result<()> {
186+
fn migrate(&mut self, db: Arc<Database>, _config: &Config, _dest: Option<&mut Database>, col: u32) -> io::Result<()> {
177187
if col != self.column_to_vacuum {
178188
return Ok(())
179189
}
@@ -300,7 +310,7 @@ impl Manager {
300310
let mut new_db = Database::open(&db_config, temp_path_str)?;
301311

302312
for col in 0..current_columns {
303-
migration.migrate(cur_db.clone(), &config, &mut new_db, col)?
313+
migration.migrate(cur_db.clone(), &config, Some(&mut new_db), col)?
304314
}
305315

306316
// next iteration, we will migrate from this db into the other temp.
@@ -309,6 +319,11 @@ impl Manager {
309319

310320
// remove the other temporary migration database.
311321
let _ = fs::remove_dir_all(temp_idx.path(&db_root));
322+
} else if migration.deletes_existing() {
323+
// Migration deletes data in an existing column.
324+
for col in 0..db_config.columns {
325+
migration.migrate(cur_db.clone(), &config, None, col)?
326+
}
312327
} else {
313328
// migrations which simply add or remove column families.
314329
// we can do this in-place.
@@ -322,6 +337,8 @@ impl Manager {
322337
}
323338
}
324339
}
340+
// If `temp_path` is different from `old_path` we will shuffle database
341+
// directories and delete the old paths.
325342
Ok(temp_path)
326343
}
327344

0 commit comments

Comments
 (0)