Skip to content

Commit 8ba6732

Browse files
authored
Remove unnecessary clones with clippy (#12197)
* Remove unnecessary clones with clippy This is automated change done with ``` cargo clippy --fix -- -Aclippy::all -Wclippy::redundant_clone cargo fmt # manually fix few new clippy errors introduced by clippy itself ``` Note: it doesn't remove all unnecessary clones because the command reported error and backed out for the common crate. * Prevent clippy from removing intentional clone clippy can be run with `--fix` and then it won't obey the code comment instructing not to delete the clone. * Remove unnecessary clones pointed out by clippy Change code as instructed by ``` cargo clippy --fix -- -Aclippy::all -Wclippy::redundant_clone ``` where clippy didn't apply the suggested changes by itself.
1 parent 6ffb1f6 commit 8ba6732

File tree

56 files changed

+162
-204
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

56 files changed

+162
-204
lines changed

datafusion-examples/examples/advanced_parquet_index.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@ impl IndexTableProvider {
300300
// analyze the predicate. In a real system, using
301301
// `PruningPredicate::prune` would likely be easier to do.
302302
let pruning_predicate =
303-
PruningPredicate::try_new(Arc::clone(predicate), self.schema().clone())?;
303+
PruningPredicate::try_new(Arc::clone(predicate), self.schema())?;
304304

305305
// The PruningPredicate's guarantees must all be satisfied in order for
306306
// the predicate to possibly evaluate to true.

datafusion-examples/examples/file_stream_provider.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ mod non_windows {
100100
) {
101101
// Timeout for a long period of BrokenPipe error
102102
let broken_pipe_timeout = Duration::from_secs(10);
103-
let sa = file_path.clone();
103+
let sa = file_path;
104104
// Spawn a new thread to write to the FIFO file
105105
#[allow(clippy::disallowed_methods)] // spawn allowed only in tests
106106
tasks.spawn_blocking(move || {

datafusion/common/src/dfschema.rs

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1242,10 +1242,9 @@ mod tests {
12421242
#[test]
12431243
fn into() {
12441244
// Demonstrate how to convert back and forth between Schema, SchemaRef, DFSchema, and DFSchemaRef
1245-
let metadata = test_metadata();
12461245
let arrow_schema = Schema::new_with_metadata(
12471246
vec![Field::new("c0", DataType::Int64, true)],
1248-
metadata.clone(),
1247+
test_metadata(),
12491248
);
12501249
let arrow_schema_ref = Arc::new(arrow_schema.clone());
12511250

datafusion/common/src/file_options/parquet_writer.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -565,7 +565,7 @@ mod tests {
565565
column_options_with_non_defaults(&parquet_options),
566566
)]
567567
.into(),
568-
key_value_metadata: [(key.clone(), value.clone())].into(),
568+
key_value_metadata: [(key, value)].into(),
569569
};
570570

571571
let writer_props = WriterPropertiesBuilder::try_from(&table_parquet_opts)

datafusion/common/src/scalar/mod.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4905,7 +4905,7 @@ mod tests {
49054905
let data_type =
49064906
DataType::List(Arc::new(Field::new("item", DataType::Int32, true)));
49074907

4908-
assert_eq!(non_null_list_scalar.data_type(), data_type.clone());
4908+
assert_eq!(non_null_list_scalar.data_type(), data_type);
49094909
assert_eq!(null_list_scalar.data_type(), data_type);
49104910
}
49114911

@@ -5582,13 +5582,13 @@ mod tests {
55825582

55835583
// Define list-of-structs scalars
55845584

5585-
let nl0_array = ScalarValue::iter_to_array(vec![s0.clone(), s1.clone()]).unwrap();
5585+
let nl0_array = ScalarValue::iter_to_array(vec![s0, s1.clone()]).unwrap();
55865586
let nl0 = ScalarValue::List(Arc::new(array_into_list_array_nullable(nl0_array)));
55875587

5588-
let nl1_array = ScalarValue::iter_to_array(vec![s2.clone()]).unwrap();
5588+
let nl1_array = ScalarValue::iter_to_array(vec![s2]).unwrap();
55895589
let nl1 = ScalarValue::List(Arc::new(array_into_list_array_nullable(nl1_array)));
55905590

5591-
let nl2_array = ScalarValue::iter_to_array(vec![s1.clone()]).unwrap();
5591+
let nl2_array = ScalarValue::iter_to_array(vec![s1]).unwrap();
55925592
let nl2 = ScalarValue::List(Arc::new(array_into_list_array_nullable(nl2_array)));
55935593

55945594
// iter_to_array for list-of-struct

datafusion/common/src/stats.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -557,6 +557,7 @@ mod tests {
557557
let precision: Precision<ScalarValue> =
558558
Precision::Exact(ScalarValue::Int64(Some(42)));
559559
// Clippy would complain about this if it were Copy
560+
#[allow(clippy::redundant_clone)]
560561
let p2 = precision.clone();
561562
assert_eq!(precision, p2);
562563
}

datafusion/core/src/datasource/listing/helpers.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -282,7 +282,7 @@ async fn prune_partitions(
282282
Default::default(),
283283
)?;
284284

285-
let batch = RecordBatch::try_new(schema.clone(), arrays)?;
285+
let batch = RecordBatch::try_new(schema, arrays)?;
286286

287287
// TODO: Plumb this down
288288
let props = ExecutionProps::new();

datafusion/core/src/datasource/listing/table.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1016,7 +1016,7 @@ impl ListingTable {
10161016
.collected_statistics
10171017
.get_with_extra(&part_file.object_meta.location, &part_file.object_meta)
10181018
{
1019-
Some(statistics) => Ok(statistics.clone()),
1019+
Some(statistics) => Ok(statistics),
10201020
None => {
10211021
let statistics = self
10221022
.options

datafusion/core/src/datasource/physical_plan/file_groups.rs

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -394,7 +394,7 @@ mod test {
394394
#[test]
395395
fn repartition_empty_file_only() {
396396
let partitioned_file_empty = pfile("empty", 0);
397-
let file_group = vec![vec![partitioned_file_empty.clone()]];
397+
let file_group = vec![vec![partitioned_file_empty]];
398398

399399
let partitioned_files = FileGroupPartitioner::new()
400400
.with_target_partitions(4)
@@ -817,10 +817,7 @@ mod test {
817817
.with_preserve_order_within_groups(true)
818818
.repartition_file_groups(&file_groups);
819819

820-
assert_partitioned_files(
821-
repartitioned.clone(),
822-
repartitioned_preserving_sort.clone(),
823-
);
820+
assert_partitioned_files(repartitioned.clone(), repartitioned_preserving_sort);
824821
repartitioned
825822
}
826823
}

datafusion/core/src/datasource/physical_plan/file_scan_config.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -908,7 +908,7 @@ mod tests {
908908
schema.clone(),
909909
Some(vec![0, 3, 5, schema.fields().len()]),
910910
Statistics::new_unknown(&schema),
911-
to_partition_cols(partition_cols.clone()),
911+
to_partition_cols(partition_cols),
912912
)
913913
.projected_file_schema();
914914

@@ -941,7 +941,7 @@ mod tests {
941941
schema.clone(),
942942
None,
943943
Statistics::new_unknown(&schema),
944-
to_partition_cols(partition_cols.clone()),
944+
to_partition_cols(partition_cols),
945945
)
946946
.projected_file_schema();
947947

datafusion/core/src/datasource/schema_adapter.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -369,7 +369,7 @@ mod tests {
369369
let f1 = Field::new("id", DataType::Int32, true);
370370
let f2 = Field::new("extra_column", DataType::Utf8, true);
371371

372-
let schema = Arc::new(Schema::new(vec![f1.clone(), f2.clone()]));
372+
let schema = Arc::new(Schema::new(vec![f1, f2]));
373373

374374
let extra_column = Arc::new(StringArray::from(vec!["foo"]));
375375
let mut new_columns = batch.columns().to_vec();

datafusion/core/src/execution/context/mod.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1165,7 +1165,7 @@ impl SessionContext {
11651165
// check schema uniqueness
11661166
let mut batches = batches.into_iter().peekable();
11671167
let schema = if let Some(batch) = batches.peek() {
1168-
batch.schema().clone()
1168+
batch.schema()
11691169
} else {
11701170
Arc::new(Schema::empty())
11711171
};

datafusion/core/src/physical_optimizer/enforce_distribution.rs

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -3908,7 +3908,7 @@ pub(crate) mod tests {
39083908
let alias = vec![("a".to_string(), "a".to_string())];
39093909
let plan_parquet =
39103910
aggregate_exec_with_alias(parquet_exec_multiple(), alias.clone());
3911-
let plan_csv = aggregate_exec_with_alias(csv_exec_multiple(), alias.clone());
3911+
let plan_csv = aggregate_exec_with_alias(csv_exec_multiple(), alias);
39123912

39133913
let expected_parquet = [
39143914
"AggregateExec: mode=FinalPartitioned, gby=[a@0 as a], aggr=[]",
@@ -3934,7 +3934,7 @@ pub(crate) mod tests {
39343934
let alias = vec![("a".to_string(), "a".to_string())];
39353935
let plan_parquet =
39363936
aggregate_exec_with_alias(parquet_exec_multiple(), alias.clone());
3937-
let plan_csv = aggregate_exec_with_alias(csv_exec_multiple(), alias.clone());
3937+
let plan_csv = aggregate_exec_with_alias(csv_exec_multiple(), alias);
39383938

39393939
let expected_parquet = [
39403940
"AggregateExec: mode=FinalPartitioned, gby=[a@0 as a], aggr=[]",
@@ -3964,7 +3964,7 @@ pub(crate) mod tests {
39643964
options: SortOptions::default(),
39653965
}];
39663966
let plan_parquet = limit_exec(sort_exec(sort_key.clone(), parquet_exec(), false));
3967-
let plan_csv = limit_exec(sort_exec(sort_key.clone(), csv_exec(), false));
3967+
let plan_csv = limit_exec(sort_exec(sort_key, csv_exec(), false));
39683968

39693969
let expected_parquet = &[
39703970
"GlobalLimitExec: skip=0, fetch=100",
@@ -4000,8 +4000,7 @@ pub(crate) mod tests {
40004000
parquet_exec(),
40014001
false,
40024002
)));
4003-
let plan_csv =
4004-
limit_exec(filter_exec(sort_exec(sort_key.clone(), csv_exec(), false)));
4003+
let plan_csv = limit_exec(filter_exec(sort_exec(sort_key, csv_exec(), false)));
40054004

40064005
let expected_parquet = &[
40074006
"GlobalLimitExec: skip=0, fetch=100",
@@ -4042,7 +4041,7 @@ pub(crate) mod tests {
40424041
);
40434042
let plan_csv = aggregate_exec_with_alias(
40444043
limit_exec(filter_exec(limit_exec(csv_exec()))),
4045-
alias.clone(),
4044+
alias,
40464045
);
40474046

40484047
let expected_parquet = &[
@@ -4126,7 +4125,7 @@ pub(crate) mod tests {
41264125
);
41274126
let plan_csv = sort_preserving_merge_exec(
41284127
sort_key.clone(),
4129-
csv_exec_with_sort(vec![sort_key.clone()]),
4128+
csv_exec_with_sort(vec![sort_key]),
41304129
);
41314130

41324131
// parallelization is not beneficial for SortPreservingMerge
@@ -4154,7 +4153,7 @@ pub(crate) mod tests {
41544153
union_exec(vec![parquet_exec_with_sort(vec![sort_key.clone()]); 2]);
41554154
let input_csv = union_exec(vec![csv_exec_with_sort(vec![sort_key.clone()]); 2]);
41564155
let plan_parquet = sort_preserving_merge_exec(sort_key.clone(), input_parquet);
4157-
let plan_csv = sort_preserving_merge_exec(sort_key.clone(), input_csv);
4156+
let plan_csv = sort_preserving_merge_exec(sort_key, input_csv);
41584157

41594158
// should not repartition (union doesn't benefit from increased parallelism)
41604159
// should not sort (as the data was already sorted)
@@ -4224,8 +4223,8 @@ pub(crate) mod tests {
42244223
("c".to_string(), "c2".to_string()),
42254224
];
42264225
let proj_parquet = projection_exec_with_alias(
4227-
parquet_exec_with_sort(vec![sort_key.clone()]),
4228-
alias_pairs.clone(),
4226+
parquet_exec_with_sort(vec![sort_key]),
4227+
alias_pairs,
42294228
);
42304229
let sort_key_after_projection = vec![PhysicalSortExpr {
42314230
expr: col("c2", &proj_parquet.schema()).unwrap(),
@@ -4560,7 +4559,7 @@ pub(crate) mod tests {
45604559
}];
45614560
let alias = vec![("a".to_string(), "a".to_string())];
45624561
let input = parquet_exec_with_sort(vec![sort_key]);
4563-
let physical_plan = aggregate_exec_with_alias(input, alias.clone());
4562+
let physical_plan = aggregate_exec_with_alias(input, alias);
45644563

45654564
let expected = &[
45664565
"AggregateExec: mode=FinalPartitioned, gby=[a@0 as a], aggr=[]",
@@ -4584,7 +4583,7 @@ pub(crate) mod tests {
45844583
let alias = vec![("a".to_string(), "a".to_string())];
45854584
let input = parquet_exec_multiple_sorted(vec![sort_key]);
45864585
let aggregate = aggregate_exec_with_alias(input, alias.clone());
4587-
let physical_plan = aggregate_exec_with_alias(aggregate, alias.clone());
4586+
let physical_plan = aggregate_exec_with_alias(aggregate, alias);
45884587

45894588
let expected = &[
45904589
"AggregateExec: mode=FinalPartitioned, gby=[a@0 as a], aggr=[]",

datafusion/core/src/physical_optimizer/join_selection.rs

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -908,7 +908,7 @@ mod tests_statistical {
908908
);
909909

910910
let optimized_join = JoinSelection::new()
911-
.optimize(join.clone(), &ConfigOptions::new())
911+
.optimize(join, &ConfigOptions::new())
912912
.unwrap();
913913

914914
let swapping_projection = optimized_join
@@ -964,7 +964,7 @@ mod tests_statistical {
964964
);
965965

966966
let optimized_join = JoinSelection::new()
967-
.optimize(join.clone(), &ConfigOptions::new())
967+
.optimize(join, &ConfigOptions::new())
968968
.unwrap();
969969

970970
let swapped_join = optimized_join
@@ -1140,7 +1140,7 @@ mod tests_statistical {
11401140
);
11411141

11421142
let optimized_join = JoinSelection::new()
1143-
.optimize(join.clone(), &ConfigOptions::new())
1143+
.optimize(join, &ConfigOptions::new())
11441144
.unwrap();
11451145

11461146
let swapped_join = optimized_join
@@ -1180,7 +1180,7 @@ mod tests_statistical {
11801180
);
11811181

11821182
let optimized_join = JoinSelection::new()
1183-
.optimize(join.clone(), &ConfigOptions::new())
1183+
.optimize(join, &ConfigOptions::new())
11841184
.unwrap();
11851185

11861186
let swapping_projection = optimized_join
@@ -1356,7 +1356,7 @@ mod tests_statistical {
13561356
Arc::new(Column::new_with_schema("small_col", &small.schema()).unwrap()) as _,
13571357
)];
13581358
check_join_partition_mode(
1359-
big.clone(),
1359+
big,
13601360
small.clone(),
13611361
join_on,
13621362
true,
@@ -1380,8 +1380,8 @@ mod tests_statistical {
13801380
Arc::new(Column::new_with_schema("small_col", &small.schema()).unwrap()) as _,
13811381
)];
13821382
check_join_partition_mode(
1383-
empty.clone(),
1384-
small.clone(),
1383+
empty,
1384+
small,
13851385
join_on,
13861386
true,
13871387
PartitionMode::CollectLeft,
@@ -1424,7 +1424,7 @@ mod tests_statistical {
14241424
Arc::new(Column::new_with_schema("big_col", &big.schema()).unwrap()) as _,
14251425
)];
14261426
check_join_partition_mode(
1427-
bigger.clone(),
1427+
bigger,
14281428
big.clone(),
14291429
join_on,
14301430
true,
@@ -1472,7 +1472,7 @@ mod tests_statistical {
14721472
);
14731473

14741474
let optimized_join = JoinSelection::new()
1475-
.optimize(join.clone(), &ConfigOptions::new())
1475+
.optimize(join, &ConfigOptions::new())
14761476
.unwrap();
14771477

14781478
if !is_swapped {
@@ -1913,8 +1913,7 @@ mod hash_join_tests {
19131913
false,
19141914
)?);
19151915

1916-
let optimized_join_plan =
1917-
hash_join_swap_subrule(join.clone(), &ConfigOptions::new())?;
1916+
let optimized_join_plan = hash_join_swap_subrule(join, &ConfigOptions::new())?;
19181917

19191918
// If swap did happen
19201919
let projection_added = optimized_join_plan.as_any().is::<ProjectionExec>();

datafusion/core/src/physical_optimizer/projection_pushdown.rs

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1692,12 +1692,9 @@ mod tests {
16921692
]));
16931693
Arc::new(
16941694
CsvExec::builder(
1695-
FileScanConfig::new(
1696-
ObjectStoreUrl::parse("test:///").unwrap(),
1697-
schema.clone(),
1698-
)
1699-
.with_file(PartitionedFile::new("x".to_string(), 100))
1700-
.with_projection(Some(vec![0, 1, 2, 3, 4])),
1695+
FileScanConfig::new(ObjectStoreUrl::parse("test:///").unwrap(), schema)
1696+
.with_file(PartitionedFile::new("x".to_string(), 100))
1697+
.with_projection(Some(vec![0, 1, 2, 3, 4])),
17011698
)
17021699
.with_has_header(false)
17031700
.with_delimeter(0)
@@ -1719,12 +1716,9 @@ mod tests {
17191716
]));
17201717
Arc::new(
17211718
CsvExec::builder(
1722-
FileScanConfig::new(
1723-
ObjectStoreUrl::parse("test:///").unwrap(),
1724-
schema.clone(),
1725-
)
1726-
.with_file(PartitionedFile::new("x".to_string(), 100))
1727-
.with_projection(Some(vec![3, 2, 1])),
1719+
FileScanConfig::new(ObjectStoreUrl::parse("test:///").unwrap(), schema)
1720+
.with_file(PartitionedFile::new("x".to_string(), 100))
1721+
.with_projection(Some(vec![3, 2, 1])),
17281722
)
17291723
.with_has_header(false)
17301724
.with_delimeter(0)

datafusion/core/src/physical_optimizer/pruning.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1369,7 +1369,6 @@ fn build_predicate_expression(
13691369
let change_expr = in_list
13701370
.list()
13711371
.iter()
1372-
.cloned()
13731372
.map(|e| {
13741373
Arc::new(phys_expr::BinaryExpr::new(
13751374
in_list.expr().clone(),

datafusion/core/src/physical_planner.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -701,7 +701,7 @@ impl DefaultPhysicalPlanner {
701701
let initial_aggr = Arc::new(AggregateExec::try_new(
702702
AggregateMode::Partial,
703703
groups.clone(),
704-
aggregates.clone(),
704+
aggregates,
705705
filters.clone(),
706706
input_exec,
707707
physical_input_schema.clone(),
@@ -2569,7 +2569,7 @@ mod tests {
25692569

25702570
impl NoOpExecutionPlan {
25712571
fn new(schema: SchemaRef) -> Self {
2572-
let cache = Self::compute_properties(schema.clone());
2572+
let cache = Self::compute_properties(schema);
25732573
Self { cache }
25742574
}
25752575

datafusion/core/tests/fuzz_cases/join_fuzz.rs

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -350,12 +350,10 @@ impl JoinFuzzTestCase {
350350
fn left_right(&self) -> (Arc<MemoryExec>, Arc<MemoryExec>) {
351351
let schema1 = self.input1[0].schema();
352352
let schema2 = self.input2[0].schema();
353-
let left = Arc::new(
354-
MemoryExec::try_new(&[self.input1.clone()], schema1.clone(), None).unwrap(),
355-
);
356-
let right = Arc::new(
357-
MemoryExec::try_new(&[self.input2.clone()], schema2.clone(), None).unwrap(),
358-
);
353+
let left =
354+
Arc::new(MemoryExec::try_new(&[self.input1.clone()], schema1, None).unwrap());
355+
let right =
356+
Arc::new(MemoryExec::try_new(&[self.input2.clone()], schema2, None).unwrap());
359357
(left, right)
360358
}
361359

0 commit comments

Comments
 (0)