-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathnextflow.config
129 lines (99 loc) · 2.68 KB
/
nextflow.config
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
manifest {
name = 'FLUFLO'
author = 'Jessica M Caleta'
homePage = 'https://github.com/j3551ca/fluflo'
description = 'Phylogenetics in Nextflow'
mainScript = 'main.nf'
version = '0.4.0'
}
//pipeline parameters
params {
//help message
help = null
//version number
version = null
//conda env local cache
conda_cache = null
//directory containing config/ & data/ folders
work_dir = "/path/to/data/"
//reference for alignment
ref = "${params.work_dir}/config/Ref.gb"
//reference annotation
ref_anno = "NO_FILE"
//use a specific root for the tree
root_name = false
//number of ultrafast bootstraps iq-tree (a command related to iterations;
// ex. -n 10 for FAST mode, -b 1000 for nonparametric bootstrap, -B 1000 for Ultrafast bs)
// you can also sneak other iq-tree commands in here like --keep-ident, etc.
bootstrap = "-n 10"
//nucleotide substitution model to use during IQ-TREE build
sub_model = "GTR+I+R"
//input sequences
seqs = "${params.work_dir}/data/sequences.fasta"
//metadata of input sequences
meta = "${params.work_dir}/data/metadata.csv"
//strains that are excluded
drop_strains = "${params.work_dir}/config/dropped_strains.txt"
//colors used in final auspice visualization
colors = "${params.work_dir}/config/colors.csv"
//latitude and longitudes
lat_long = "${params.work_dir}/config/lat_longs.csv"
//details for auspice visualization
auspice = "${params.work_dir}/config/auspice_config.json"
//refining phylogeny
divergence_units = "mutations"
//env variable AUGUR_RECURSION_LIMIT
recursion_limit = 10000
}
//seamlessly run pipeline on different execution systems by modifying
//the process section of the config file. ex. AWS, SLURM, sun grid engine:
process {
withName: align {
cpus = 28
}
withName: tree {
cpus = 28
}
// penv='smp'
// executor='sge'
// memory='30 GB'
}
profiles {
conda {
process.conda = "${projectDir}/environments/ENV.yml"
conda.createTimeout = '1 h'
if (params.conda_cache) {
conda.cacheDir = params.conda_cache
}
}
low_mem {
process {
memory = '8 GB'
}
}
medium_mem {
process {
memory = '16 GB'
}
}
high_mem {
process {
memory = '64 GB'
}
}
}
//html displaying breakdown of time taken to execute workflow
timeline {
enabled = true
file = "${params.work_dir}/reports/fluflo_timeline.html"
}
//html of cpu/mem usage
report {
enabled = true
file = "${params.work_dir}/reports/fluflo_usage.html"
}
//dag of beast-flow workflow
dag {
enabled = true
file = "${params.work_dir}/reports/fluflo_dag.html"
}