@@ -12,7 +12,10 @@ use crate::{
12
12
bgp:: BgpAnalyser ,
13
13
KrillResult ,
14
14
} ,
15
- constants:: { SCHEDULER_INTERVAL_RENEW_MINS , SCHEDULER_INTERVAL_REPUBLISH_MINS , SCHEDULER_USE_JITTER_CAS_THRESHOLD } ,
15
+ constants:: {
16
+ SCHEDULER_INTERVAL_RENEW_MINS , SCHEDULER_INTERVAL_REPUBLISH_MINS , SCHEDULER_RESYNC_REPO_CAS_THRESHOLD ,
17
+ SCHEDULER_USE_JITTER_CAS_THRESHOLD ,
18
+ } ,
16
19
daemon:: {
17
20
ca:: CaManager ,
18
21
config:: Config ,
@@ -146,20 +149,28 @@ impl Scheduler {
146
149
147
150
debug ! ( "Adding tasks at start up" ) ;
148
151
149
- let use_jitter = cas. len ( ) >= SCHEDULER_USE_JITTER_CAS_THRESHOLD ;
152
+ let too_many_cas_resync_parent = cas. len ( ) >= SCHEDULER_USE_JITTER_CAS_THRESHOLD ;
153
+ let too_many_cas_resync_repo = cas. len ( ) >= SCHEDULER_RESYNC_REPO_CAS_THRESHOLD ;
150
154
151
155
for summary in cas {
152
156
let ca = self . ca_manager . get_ca ( summary. handle ( ) ) . await ?;
153
157
154
- debug ! ( "Adding tasks for CA {}, using jitter: {}" , ca. handle ( ) , use_jitter ) ;
158
+ let too_many_parents = ca. nr_parents ( ) >= self . config . ca_refresh_parents_batch_size ;
155
159
156
160
// Plan a regular sync for each parent. Spread these out if there
157
161
// are too many CAs or parents for a CA. In cases where there are only
158
162
// a handful of CAs/parents, this 'ca_refresh_start_up' will be 'now'.
159
163
//
160
164
// Note: users can change the priority to 'now' by using the 'bulk' functions.
161
- let too_many_parents = ca. nr_parents ( ) >= self . config . ca_refresh_parents_batch_size ;
162
- if !use_jitter && too_many_parents {
165
+ let use_parent_sync_jitter = too_many_cas_resync_parent || too_many_parents;
166
+
167
+ debug ! (
168
+ "Adding tasks for CA {}, using jitter: {}" ,
169
+ ca. handle( ) ,
170
+ use_parent_sync_jitter
171
+ ) ;
172
+
173
+ if !too_many_cas_resync_parent && too_many_parents {
163
174
debug ! (
164
175
"Will force jitter for sync between CA {} and parents. Nr of parents ({}) exceeds batch size ({})" ,
165
176
ca. handle( ) ,
@@ -172,18 +183,19 @@ impl Scheduler {
172
183
self . tasks . sync_parent (
173
184
ca. handle ( ) . clone ( ) ,
174
185
parent. clone ( ) ,
175
- self . config . ca_refresh_start_up ( use_jitter || too_many_parents ) ,
186
+ self . config . ca_refresh_start_up ( use_parent_sync_jitter ) ,
176
187
) ;
177
188
}
178
189
179
- // Plan a sync with the repo. In case we only have a handful of CAs
180
- // then the result is that the sync is scheduled asap. Otherwise
181
- // spread the load.
182
- // Note: if circumstances dictate a sync before it's planned, e.g.
183
- // because ROAs are changed, then it will be rescheduled accordingly.
184
- // Note: users can override using the 'bulk' functions.
185
- self . tasks
186
- . sync_repo ( ca. handle ( ) . clone ( ) , self . config . ca_refresh_start_up ( use_jitter) ) ;
190
+ // Plan a sync with the repo. But only in case we only have a handful
191
+ // of CAs.
192
+ //
193
+ // Note: if circumstances dictate a sync e.g. because ROAs are changed,
194
+ // then it will be scheduled accordingly. Furthermore, users can use the
195
+ // 'bulk' function to explicitly force schedule a sync.
196
+ if !too_many_cas_resync_repo {
197
+ self . tasks . sync_repo ( ca. handle ( ) . clone ( ) , now ( ) ) ;
198
+ }
187
199
188
200
// If suspension is enabled then plan a task for it. Since this is
189
201
// a cheap no-op in most cases, we do not need jitter. If we do not
0 commit comments