|
8 | 8 | // option. This file may not be copied, modified, or distributed
|
9 | 9 | // except according to those terms.
|
10 | 10 |
|
11 |
| -use std::collections::HashMap; |
| 11 | +//! This contains a build plan that is created during the Cargo build routine |
| 12 | +//! and stored afterwards, which can be later queried, given a list of dirty |
| 13 | +//! files, to retrieve a queue of compiler calls to be invoked (including |
| 14 | +//! appropriate arguments and env variables). |
| 15 | +//! The underlying structure is a dependency graph between simplified units |
| 16 | +//! (package id and crate target kind), as opposed to Cargo units (package with |
| 17 | +//! a target info, including crate target kind, profile and host/target kind). |
| 18 | +//! This will be used for a quick check recompilation and does not aim to |
| 19 | +//! reimplement all the intricacies of Cargo. |
| 20 | +//! The unit dependency graph in Cargo also distinguishes between compiling the |
| 21 | +//! build script and running it and collecting the build script output to modify |
| 22 | +//! the subsequent compilations etc. Since build script executions (not building) |
| 23 | +//! are not exposed via `Executor` trait in Cargo, we simply coalesce every unit |
| 24 | +//! with a same package and crate target kind (e.g. both building and running |
| 25 | +//! build scripts). |
| 26 | +
|
| 27 | +use std::collections::{HashMap, HashSet}; |
12 | 28 | use std::fmt;
|
13 | 29 |
|
14 | 30 | use cargo::core::{PackageId, Profile, Target, TargetKind};
|
15 | 31 | use cargo::ops::{Kind, Unit, Context};
|
16 | 32 | use cargo::util::{CargoResult, ProcessBuilder};
|
17 | 33 |
|
18 |
| -pub type DependencyGraph = HashMap<OwnedUnit, Vec<OwnedUnit>>; |
| 34 | +/// Main key type by which `Unit`s will be distinguished in the build plan. |
| 35 | +pub type UnitKey = (PackageId, TargetKind); |
19 | 36 | /// Holds the information how exactly the build will be performed for a given
|
20 | 37 | /// workspace with given, specified features.
|
21 |
| -/// **TODO:** Use it to schedule an analysis build instead of relying on Cargo |
22 |
| -/// invocations. |
23 | 38 | pub struct Plan {
|
24 |
| - pub dep_graph: DependencyGraph, |
25 |
| - /// We don't make a distinction between Units with different Profiles, |
26 |
| - /// as we're practically interested in bin, lib and (built, not run) |
27 |
| - /// build scripts for each package, because for these we can run `rustc` job |
28 |
| - pub compiler_jobs: HashMap<(PackageId, TargetKind), ProcessBuilder>, |
| 39 | + // Stores a full Cargo `Unit` data for a first processed unit with a given key. |
| 40 | + pub units: HashMap<UnitKey, OwnedUnit>, |
| 41 | + // Main dependency graph between the simplified units. |
| 42 | + pub dep_graph: HashMap<UnitKey, HashSet<UnitKey>>, |
| 43 | + /// Reverse dependency graph that's used to construct a dirty compiler call queue. |
| 44 | + pub rev_dep_graph: HashMap<UnitKey, HashSet<UnitKey>>, |
| 45 | + /// Cached compiler calls used when creating a compiler call queue. |
| 46 | + pub compiler_jobs: HashMap<UnitKey, ProcessBuilder>, |
29 | 47 | }
|
30 | 48 |
|
31 | 49 | impl Plan {
|
32 | 50 | pub fn new() -> Plan {
|
33 | 51 | Plan {
|
| 52 | + units: HashMap::new(), |
34 | 53 | dep_graph: HashMap::new(),
|
| 54 | + rev_dep_graph: HashMap::new(), |
35 | 55 | compiler_jobs: HashMap::new(),
|
36 | 56 | }
|
37 | 57 | }
|
38 | 58 |
|
| 59 | + pub fn clear(&mut self) { |
| 60 | + *self = Plan::new(); |
| 61 | + } |
39 | 62 |
|
40 |
| - /// Cache a given compiler invocation in `ProcessBuilder` for a given `PackageId` |
41 |
| - /// and `TargetKind` in `Target`, to be used when processing cached build plan |
| 63 | + /// Cache a given compiler invocation in `ProcessBuilder` for a given |
| 64 | + /// `PackageId` and `TargetKind` in `Target`, to be used when processing |
| 65 | + /// cached build plan. |
42 | 66 | pub fn cache_compiler_job(&mut self, id: &PackageId, target: &Target, cmd: &ProcessBuilder) {
|
43 | 67 | let pkg_key = (id.clone(), target.kind().clone());
|
44 | 68 | self.compiler_jobs.insert(pkg_key, cmd.clone());
|
45 | 69 | }
|
46 | 70 |
|
47 | 71 | /// Emplace a given `Unit`, along with its `Unit` dependencies (recursively)
|
48 |
| - /// into dependency graph |
| 72 | + /// into the dependency graph. |
49 | 73 | #[allow(dead_code)]
|
50 | 74 | pub fn emplace_dep(&mut self, unit: &Unit, cx: &Context) -> CargoResult<()> {
|
51 | 75 | let null_filter = |_unit: &Unit| { true };
|
52 | 76 | self.emplace_dep_with_filter(unit, cx, &null_filter)
|
53 | 77 | }
|
54 | 78 |
|
55 | 79 | /// Emplace a given `Unit`, along with its `Unit` dependencies (recursively)
|
56 |
| - /// into dependency graph as long as the passed `Unit` isn't filtered out by |
57 |
| - /// the `filter` closure. |
| 80 | + /// into the dependency graph as long as the passed `Unit` isn't filtered |
| 81 | + /// out by the `filter` closure. |
58 | 82 | pub fn emplace_dep_with_filter<Filter>(&mut self,
|
59 | 83 | unit: &Unit,
|
60 | 84 | cx: &Context,
|
61 | 85 | filter: &Filter) -> CargoResult<()>
|
62 | 86 | where Filter: Fn(&Unit) -> bool {
|
63 |
| - // We might not want certain deps to be added transitively (e.g. when |
64 |
| - // creating only a sub-dep-graph, limiting the scope to the workspace) |
65 |
| - if filter(unit) == false { return Ok(()); } |
66 |
| - |
67 |
| - let key: OwnedUnit = unit.into(); |
68 |
| - // Process only those units, which are not yet in the dep graph |
69 |
| - if let None = self.dep_graph.get(&key) { |
70 |
| - let units = cx.dep_targets(unit)?; |
71 |
| - let dep_keys: Vec<OwnedUnit> = units |
72 |
| - .iter() |
73 |
| - .map(|x| x.into()) |
74 |
| - .collect(); |
75 |
| - self.dep_graph.insert(key, dep_keys); |
76 |
| - // Recursively process other remaining dependencies. |
77 |
| - // TODO: Should we be careful about blowing the stack and do it |
78 |
| - // iteratively instead? |
79 |
| - for unit in units { |
80 |
| - self.emplace_dep_with_filter(&unit, cx, filter)?; |
81 |
| - } |
| 87 | + if !filter(unit) { return Ok(()); } |
| 88 | + |
| 89 | + let key = key_from_unit(unit); |
| 90 | + self.units.entry(key.clone()).or_insert(unit.into()); |
| 91 | + // Process only those units, which are not yet in the dep graph. |
| 92 | + if self.dep_graph.get(&key).is_some() { return Ok(()); } |
| 93 | + |
| 94 | + // Keep all the additional Unit information for a given unit (It's |
| 95 | + // worth remembering, that the units are only discriminated by a |
| 96 | + // pair of (PackageId, TargetKind), so only first occurrence will be saved. |
| 97 | + self.units.insert(key.clone(), unit.into()); |
| 98 | + |
| 99 | + // Fetch and insert relevant unit dependencies to the forward dep graph. |
| 100 | + let units = cx.dep_targets(unit)?; |
| 101 | + let dep_keys: HashSet<UnitKey> = units.iter() |
| 102 | + // We might not want certain deps to be added transitively (e.g. |
| 103 | + // when creating only a sub-dep-graph, limiting the scope). |
| 104 | + .filter(|unit| filter(unit)) |
| 105 | + .map(key_from_unit) |
| 106 | + // Units can depend on others with different Targets or Profiles |
| 107 | + // (e.g. different `run_custom_build`) despite having the same UnitKey. |
| 108 | + // We coalesce them here while creating the UnitKey dep graph. |
| 109 | + .filter(|dep| key != *dep) |
| 110 | + .collect(); |
| 111 | + self.dep_graph.insert(key.clone(), dep_keys.clone()); |
| 112 | + |
| 113 | + // We also need to track reverse dependencies here, as it's needed |
| 114 | + // to quickly construct a work sub-graph from a set of dirty units. |
| 115 | + self.rev_dep_graph.entry(key.clone()).or_insert(HashSet::new()); |
| 116 | + for unit in dep_keys { |
| 117 | + let revs = self.rev_dep_graph.entry(unit).or_insert(HashSet::new()); |
| 118 | + revs.insert(key.clone()); |
| 119 | + } |
| 120 | + |
| 121 | + // Recursively process other remaining forward dependencies. |
| 122 | + for unit in units { |
| 123 | + self.emplace_dep_with_filter(&unit, cx, filter)?; |
82 | 124 | }
|
83 | 125 | Ok(())
|
84 | 126 | }
|
| 127 | +} |
85 | 128 |
|
86 |
| - pub fn clear(&mut self) { |
87 |
| - self.dep_graph.clear(); |
88 |
| - self.compiler_jobs.clear(); |
89 |
| - } |
| 129 | +fn key_from_unit(unit: &Unit) -> UnitKey { |
| 130 | + (unit.pkg.package_id().clone(), unit.target.kind().clone()) |
90 | 131 | }
|
91 | 132 |
|
92 |
| -impl fmt::Debug for Plan { |
93 |
| - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
94 |
| - f.write_str("Dep graph:\n")?; |
95 |
| - for (key, deps) in &self.dep_graph { |
96 |
| - f.write_str(&format!("{:?}\n", key))?; |
| 133 | +macro_rules! print_dep_graph { |
| 134 | + ($name: expr, $graph: expr, $f: expr) => { |
| 135 | + $f.write_str(&format!("{}:\n", $name))?; |
| 136 | + for (key, deps) in &$graph { |
| 137 | + $f.write_str(&format!("{:?}\n", key))?; |
97 | 138 | for dep in deps {
|
98 |
| - f.write_str(&format!("- {:?}\n", dep))?; |
| 139 | + $f.write_str(&format!("- {:?}\n", dep))?; |
99 | 140 | }
|
100 | 141 | }
|
| 142 | + } |
| 143 | +} |
| 144 | + |
| 145 | +impl fmt::Debug for Plan { |
| 146 | + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
| 147 | + f.write_str(&format!("Units: {:?}\n", self.units))?; |
| 148 | + print_dep_graph!("Dependency graph", self.dep_graph, f); |
| 149 | + print_dep_graph!("Reverse dependency graph", self.rev_dep_graph, f); |
101 | 150 | f.write_str(&format!("Compiler jobs: {:?}\n", self.compiler_jobs))?;
|
102 | 151 | Ok(())
|
103 | 152 | }
|
|
0 commit comments