@@ -2911,8 +2911,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2911
2911
struct bfq_iocq_bfqq_data * bfqq_data = & bic -> bfqq_data [a_idx ];
2912
2912
2913
2913
/* if a merge has already been setup, then proceed with that first */
2914
- if (bfqq -> new_bfqq )
2915
- return bfqq -> new_bfqq ;
2914
+ new_bfqq = bfqq -> new_bfqq ;
2915
+ if (new_bfqq ) {
2916
+ while (new_bfqq -> new_bfqq )
2917
+ new_bfqq = new_bfqq -> new_bfqq ;
2918
+ return new_bfqq ;
2919
+ }
2916
2920
2917
2921
/*
2918
2922
* Check delayed stable merge for rotational or non-queueing
@@ -3093,8 +3097,8 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
3093
3097
}
3094
3098
3095
3099
3096
- static void
3097
- bfq_reassign_last_bfqq ( struct bfq_queue * cur_bfqq , struct bfq_queue * new_bfqq )
3100
+ void bfq_reassign_last_bfqq ( struct bfq_queue * cur_bfqq ,
3101
+ struct bfq_queue * new_bfqq )
3098
3102
{
3099
3103
if (cur_bfqq -> entity .parent &&
3100
3104
cur_bfqq -> entity .parent -> last_bfqq_created == cur_bfqq )
@@ -3125,10 +3129,12 @@ void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
3125
3129
bfq_put_queue (bfqq );
3126
3130
}
3127
3131
3128
- static void
3129
- bfq_merge_bfqqs ( struct bfq_data * bfqd , struct bfq_io_cq * bic ,
3130
- struct bfq_queue * bfqq , struct bfq_queue * new_bfqq )
3132
+ static struct bfq_queue * bfq_merge_bfqqs ( struct bfq_data * bfqd ,
3133
+ struct bfq_io_cq * bic ,
3134
+ struct bfq_queue * bfqq )
3131
3135
{
3136
+ struct bfq_queue * new_bfqq = bfqq -> new_bfqq ;
3137
+
3132
3138
bfq_log_bfqq (bfqd , bfqq , "merging with queue %lu" ,
3133
3139
(unsigned long )new_bfqq -> pid );
3134
3140
/* Save weight raising and idle window of the merged queues */
@@ -3222,6 +3228,8 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
3222
3228
bfq_reassign_last_bfqq (bfqq , new_bfqq );
3223
3229
3224
3230
bfq_release_process_ref (bfqd , bfqq );
3231
+
3232
+ return new_bfqq ;
3225
3233
}
3226
3234
3227
3235
static bool bfq_allow_bio_merge (struct request_queue * q , struct request * rq ,
@@ -3257,14 +3265,8 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
3257
3265
* fulfilled, i.e., bic can be redirected to new_bfqq
3258
3266
* and bfqq can be put.
3259
3267
*/
3260
- bfq_merge_bfqqs (bfqd , bfqd -> bio_bic , bfqq ,
3261
- new_bfqq );
3262
- /*
3263
- * If we get here, bio will be queued into new_queue,
3264
- * so use new_bfqq to decide whether bio and rq can be
3265
- * merged.
3266
- */
3267
- bfqq = new_bfqq ;
3268
+ while (bfqq != new_bfqq )
3269
+ bfqq = bfq_merge_bfqqs (bfqd , bfqd -> bio_bic , bfqq );
3268
3270
3269
3271
/*
3270
3272
* Change also bqfd->bio_bfqq, as
@@ -5701,9 +5703,7 @@ bfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5701
5703
* state before killing it.
5702
5704
*/
5703
5705
bfqq -> bic = bic ;
5704
- bfq_merge_bfqqs (bfqd , bic , bfqq , new_bfqq );
5705
-
5706
- return new_bfqq ;
5706
+ return bfq_merge_bfqqs (bfqd , bic , bfqq );
5707
5707
}
5708
5708
5709
5709
/*
@@ -6158,6 +6158,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
6158
6158
bool waiting , idle_timer_disabled = false;
6159
6159
6160
6160
if (new_bfqq ) {
6161
+ struct bfq_queue * old_bfqq = bfqq ;
6161
6162
/*
6162
6163
* Release the request's reference to the old bfqq
6163
6164
* and make sure one is taken to the shared queue.
@@ -6174,18 +6175,18 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
6174
6175
* new_bfqq.
6175
6176
*/
6176
6177
if (bic_to_bfqq (RQ_BIC (rq ), true,
6177
- bfq_actuator_index (bfqd , rq -> bio )) == bfqq )
6178
- bfq_merge_bfqqs (bfqd , RQ_BIC (rq ),
6179
- bfqq , new_bfqq );
6178
+ bfq_actuator_index (bfqd , rq -> bio )) == bfqq ) {
6179
+ while (bfqq != new_bfqq )
6180
+ bfqq = bfq_merge_bfqqs (bfqd , RQ_BIC (rq ), bfqq );
6181
+ }
6180
6182
6181
- bfq_clear_bfqq_just_created (bfqq );
6183
+ bfq_clear_bfqq_just_created (old_bfqq );
6182
6184
/*
6183
6185
* rq is about to be enqueued into new_bfqq,
6184
6186
* release rq reference on bfqq
6185
6187
*/
6186
- bfq_put_queue (bfqq );
6188
+ bfq_put_queue (old_bfqq );
6187
6189
rq -> elv .priv [1 ] = new_bfqq ;
6188
- bfqq = new_bfqq ;
6189
6190
}
6190
6191
6191
6192
bfq_update_io_thinktime (bfqd , bfqq );
@@ -6723,7 +6724,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
6723
6724
{
6724
6725
bfq_log_bfqq (bfqq -> bfqd , bfqq , "splitting queue" );
6725
6726
6726
- if (bfqq_process_refs (bfqq ) == 1 ) {
6727
+ if (bfqq_process_refs (bfqq ) == 1 && ! bfqq -> new_bfqq ) {
6727
6728
bfqq -> pid = current -> pid ;
6728
6729
bfq_clear_bfqq_coop (bfqq );
6729
6730
bfq_clear_bfqq_split_coop (bfqq );
@@ -6738,11 +6739,10 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
6738
6739
return NULL ;
6739
6740
}
6740
6741
6741
- static struct bfq_queue * bfq_get_bfqq_handle_split (struct bfq_data * bfqd ,
6742
- struct bfq_io_cq * bic ,
6743
- struct bio * bio ,
6744
- bool split , bool is_sync ,
6745
- bool * new_queue )
6742
+ static struct bfq_queue *
6743
+ __bfq_get_bfqq_handle_split (struct bfq_data * bfqd , struct bfq_io_cq * bic ,
6744
+ struct bio * bio , bool split , bool is_sync ,
6745
+ bool * new_queue )
6746
6746
{
6747
6747
unsigned int act_idx = bfq_actuator_index (bfqd , bio );
6748
6748
struct bfq_queue * bfqq = bic_to_bfqq (bic , is_sync , act_idx );
@@ -6821,6 +6821,84 @@ static void bfq_prepare_request(struct request *rq)
6821
6821
rq -> elv .priv [0 ] = rq -> elv .priv [1 ] = NULL ;
6822
6822
}
6823
6823
6824
+ static struct bfq_queue * bfq_waker_bfqq (struct bfq_queue * bfqq )
6825
+ {
6826
+ struct bfq_queue * new_bfqq = bfqq -> new_bfqq ;
6827
+ struct bfq_queue * waker_bfqq = bfqq -> waker_bfqq ;
6828
+
6829
+ if (!waker_bfqq )
6830
+ return NULL ;
6831
+
6832
+ while (new_bfqq ) {
6833
+ if (new_bfqq == waker_bfqq ) {
6834
+ /*
6835
+ * If waker_bfqq is in the merge chain, and current
6836
+ * is the only procress.
6837
+ */
6838
+ if (bfqq_process_refs (waker_bfqq ) == 1 )
6839
+ return NULL ;
6840
+ break ;
6841
+ }
6842
+
6843
+ new_bfqq = new_bfqq -> new_bfqq ;
6844
+ }
6845
+
6846
+ return waker_bfqq ;
6847
+ }
6848
+
6849
+ static struct bfq_queue * bfq_get_bfqq_handle_split (struct bfq_data * bfqd ,
6850
+ struct bfq_io_cq * bic ,
6851
+ struct bio * bio ,
6852
+ unsigned int idx ,
6853
+ bool is_sync )
6854
+ {
6855
+ struct bfq_queue * waker_bfqq ;
6856
+ struct bfq_queue * bfqq ;
6857
+ bool new_queue = false;
6858
+
6859
+ bfqq = __bfq_get_bfqq_handle_split (bfqd , bic , bio , false, is_sync ,
6860
+ & new_queue );
6861
+ if (unlikely (new_queue ))
6862
+ return bfqq ;
6863
+
6864
+ /* If the queue was seeky for too long, break it apart. */
6865
+ if (!bfq_bfqq_coop (bfqq ) || !bfq_bfqq_split_coop (bfqq ) ||
6866
+ bic -> bfqq_data [idx ].stably_merged )
6867
+ return bfqq ;
6868
+
6869
+ waker_bfqq = bfq_waker_bfqq (bfqq );
6870
+
6871
+ /* Update bic before losing reference to bfqq */
6872
+ if (bfq_bfqq_in_large_burst (bfqq ))
6873
+ bic -> bfqq_data [idx ].saved_in_large_burst = true;
6874
+
6875
+ bfqq = bfq_split_bfqq (bic , bfqq );
6876
+ if (bfqq ) {
6877
+ bfq_bfqq_resume_state (bfqq , bfqd , bic , true);
6878
+ return bfqq ;
6879
+ }
6880
+
6881
+ bfqq = __bfq_get_bfqq_handle_split (bfqd , bic , bio , true, is_sync , NULL );
6882
+ if (unlikely (bfqq == & bfqd -> oom_bfqq ))
6883
+ return bfqq ;
6884
+
6885
+ bfq_bfqq_resume_state (bfqq , bfqd , bic , false);
6886
+ bfqq -> waker_bfqq = waker_bfqq ;
6887
+ bfqq -> tentative_waker_bfqq = NULL ;
6888
+
6889
+ /*
6890
+ * If the waker queue disappears, then new_bfqq->waker_bfqq must be
6891
+ * reset. So insert new_bfqq into the
6892
+ * woken_list of the waker. See
6893
+ * bfq_check_waker for details.
6894
+ */
6895
+ if (waker_bfqq )
6896
+ hlist_add_head (& bfqq -> woken_list_node ,
6897
+ & bfqq -> waker_bfqq -> woken_list );
6898
+
6899
+ return bfqq ;
6900
+ }
6901
+
6824
6902
/*
6825
6903
* If needed, init rq, allocate bfq data structures associated with
6826
6904
* rq, and increment reference counters in the destination bfq_queue
@@ -6852,8 +6930,6 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
6852
6930
struct bfq_io_cq * bic ;
6853
6931
const int is_sync = rq_is_sync (rq );
6854
6932
struct bfq_queue * bfqq ;
6855
- bool new_queue = false;
6856
- bool bfqq_already_existing = false, split = false;
6857
6933
unsigned int a_idx = bfq_actuator_index (bfqd , bio );
6858
6934
6859
6935
if (unlikely (!rq -> elv .icq ))
@@ -6870,54 +6946,9 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
6870
6946
return RQ_BFQQ (rq );
6871
6947
6872
6948
bic = icq_to_bic (rq -> elv .icq );
6873
-
6874
6949
bfq_check_ioprio_change (bic , bio );
6875
-
6876
6950
bfq_bic_update_cgroup (bic , bio );
6877
-
6878
- bfqq = bfq_get_bfqq_handle_split (bfqd , bic , bio , false, is_sync ,
6879
- & new_queue );
6880
-
6881
- if (likely (!new_queue )) {
6882
- /* If the queue was seeky for too long, break it apart. */
6883
- if (bfq_bfqq_coop (bfqq ) && bfq_bfqq_split_coop (bfqq ) &&
6884
- !bic -> bfqq_data [a_idx ].stably_merged ) {
6885
- struct bfq_queue * old_bfqq = bfqq ;
6886
-
6887
- /* Update bic before losing reference to bfqq */
6888
- if (bfq_bfqq_in_large_burst (bfqq ))
6889
- bic -> bfqq_data [a_idx ].saved_in_large_burst =
6890
- true;
6891
-
6892
- bfqq = bfq_split_bfqq (bic , bfqq );
6893
- split = true;
6894
-
6895
- if (!bfqq ) {
6896
- bfqq = bfq_get_bfqq_handle_split (bfqd , bic , bio ,
6897
- true, is_sync ,
6898
- NULL );
6899
- if (unlikely (bfqq == & bfqd -> oom_bfqq ))
6900
- bfqq_already_existing = true;
6901
- } else
6902
- bfqq_already_existing = true;
6903
-
6904
- if (!bfqq_already_existing ) {
6905
- bfqq -> waker_bfqq = old_bfqq -> waker_bfqq ;
6906
- bfqq -> tentative_waker_bfqq = NULL ;
6907
-
6908
- /*
6909
- * If the waker queue disappears, then
6910
- * new_bfqq->waker_bfqq must be
6911
- * reset. So insert new_bfqq into the
6912
- * woken_list of the waker. See
6913
- * bfq_check_waker for details.
6914
- */
6915
- if (bfqq -> waker_bfqq )
6916
- hlist_add_head (& bfqq -> woken_list_node ,
6917
- & bfqq -> waker_bfqq -> woken_list );
6918
- }
6919
- }
6920
- }
6951
+ bfqq = bfq_get_bfqq_handle_split (bfqd , bic , bio , a_idx , is_sync );
6921
6952
6922
6953
bfqq_request_allocated (bfqq );
6923
6954
bfqq -> ref ++ ;
@@ -6935,18 +6966,8 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
6935
6966
* resume its state.
6936
6967
*/
6937
6968
if (likely (bfqq != & bfqd -> oom_bfqq ) && !bfqq -> new_bfqq &&
6938
- bfqq_process_refs (bfqq ) == 1 ) {
6969
+ bfqq_process_refs (bfqq ) == 1 )
6939
6970
bfqq -> bic = bic ;
6940
- if (split ) {
6941
- /*
6942
- * The queue has just been split from a shared
6943
- * queue: restore the idle window and the
6944
- * possible weight raising period.
6945
- */
6946
- bfq_bfqq_resume_state (bfqq , bfqd , bic ,
6947
- bfqq_already_existing );
6948
- }
6949
- }
6950
6971
6951
6972
/*
6952
6973
* Consider bfqq as possibly belonging to a burst of newly
0 commit comments