@@ -20,15 +20,13 @@ import (
20
20
"sync"
21
21
"sync/atomic"
22
22
23
- "golang.org/x/sys/cpu"
24
-
25
23
"github.com/matrixorigin/matrixone/pkg/fileservice/fscache"
24
+ "golang.org/x/sys/cpu"
26
25
)
27
26
28
27
const numShards = 256
29
28
30
29
// Cache implements an in-memory cache with FIFO-based eviction
31
- // it's mostly like the S3-fifo, only without the ghost queue part
32
30
type Cache [K comparable , V any ] struct {
33
31
capacity fscache.CapacityFunc
34
32
capacity1 fscache.CapacityFunc
@@ -51,15 +49,18 @@ type Cache[K comparable, V any] struct {
51
49
queue1 Queue [* _CacheItem [K , V ]]
52
50
used2 int64
53
51
queue2 Queue [* _CacheItem [K , V ]]
52
+ ghostSize int64
53
+ ghost Queue [* _CacheItem [K , V ]]
54
54
55
55
capacityCut atomic.Int64
56
56
}
57
57
58
58
type _CacheItem [K comparable , V any ] struct {
59
- key K
60
- value V
61
- size int64
62
- count atomic.Int32
59
+ key K
60
+ value V
61
+ valueOK bool
62
+ size int64
63
+ count atomic.Int32
63
64
}
64
65
65
66
func (c * _CacheItem [K , V ]) inc () {
@@ -101,6 +102,7 @@ func New[K comparable, V any](
101
102
itemQueue : make (chan * _CacheItem [K , V ], runtime .GOMAXPROCS (0 )* 2 ),
102
103
queue1 : * NewQueue [* _CacheItem [K , V ]](),
103
104
queue2 : * NewQueue [* _CacheItem [K , V ]](),
105
+ ghost : * NewQueue [* _CacheItem [K , V ]](),
104
106
keyShardFunc : keyShardFunc ,
105
107
postSet : postSet ,
106
108
postGet : postGet ,
@@ -116,16 +118,37 @@ func (c *Cache[K, V]) set(ctx context.Context, key K, value V, size int64) *_Cac
116
118
shard := & c .shards [c .keyShardFunc (key )% numShards ]
117
119
shard .Lock ()
118
120
defer shard .Unlock ()
119
- _ , ok := shard .values [key ]
121
+
122
+ oldItem , ok := shard .values [key ]
120
123
if ok {
121
- // existed
124
+
125
+ // ghost item
126
+ if ! oldItem .valueOK {
127
+ // insert new item
128
+ item := & _CacheItem [K , V ]{
129
+ key : key ,
130
+ value : value ,
131
+ valueOK : true ,
132
+ size : size ,
133
+ }
134
+ item .count .Store (oldItem .count .Load ())
135
+ // replacing the oldItem. oldItem will be evicted from ghost queue eventually.
136
+ shard .values [key ] = item
137
+ if c .postSet != nil {
138
+ c .postSet (ctx , key , value , size )
139
+ }
140
+ return item
141
+ }
142
+
143
+ // existed and value ok, skip set
122
144
return nil
123
145
}
124
146
125
147
item := & _CacheItem [K , V ]{
126
- key : key ,
127
- value : value ,
128
- size : size ,
148
+ key : key ,
149
+ value : value ,
150
+ valueOK : true ,
151
+ size : size ,
129
152
}
130
153
shard .values [key ] = item
131
154
if c .postSet != nil {
@@ -137,6 +160,7 @@ func (c *Cache[K, V]) set(ctx context.Context, key K, value V, size int64) *_Cac
137
160
138
161
func (c * Cache [K , V ]) Set (ctx context.Context , key K , value V , size int64 ) {
139
162
if item := c .set (ctx , key , value , size ); item != nil {
163
+ // item inserted, enqueue
140
164
c .enqueue (item )
141
165
c .Evict (ctx , nil , 0 )
142
166
}
@@ -175,19 +199,33 @@ func (c *Cache[K, V]) enqueue(item *_CacheItem[K, V]) {
175
199
}
176
200
177
201
func (c * Cache [K , V ]) Get (ctx context.Context , key K ) (value V , ok bool ) {
202
+ var item * _CacheItem [K , V ]
203
+ defer func () {
204
+ // item ok, increase count
205
+ if item != nil {
206
+ item .inc ()
207
+ }
208
+ }()
209
+
178
210
shard := & c .shards [c .keyShardFunc (key )% numShards ]
179
211
shard .Lock ()
180
- var item * _CacheItem [K , V ]
212
+ defer shard .Unlock ()
213
+
181
214
item , ok = shard .values [key ]
182
215
if ! ok {
183
- shard .Unlock ()
216
+ // not exist
217
+ return
218
+ }
219
+
220
+ // ghost item
221
+ if ! item .valueOK {
222
+ ok = false
184
223
return
185
224
}
225
+
186
226
if c .postGet != nil {
187
227
c .postGet (ctx , item .key , item .value , item .size )
188
228
}
189
- shard .Unlock ()
190
- item .inc ()
191
229
return item .value , true
192
230
}
193
231
@@ -200,11 +238,9 @@ func (c *Cache[K, V]) Delete(ctx context.Context, key K) {
200
238
return
201
239
}
202
240
delete (shard .values , key )
203
- // key deleted, call postEvict
204
- if c .postEvict != nil {
205
- c .postEvict (ctx , item .key , item .value , item .size )
206
- }
207
- // queues will be update in evict
241
+ c .purgeItemValue (ctx , item )
242
+ // we do not update queues here, to reduce cost
243
+ // deleted item in queue will be evicted eventually.
208
244
}
209
245
210
246
func (c * Cache [K , V ]) Evict (ctx context.Context , done chan int64 , capacityCut int64 ) {
@@ -279,27 +315,15 @@ func (c *Cache[K, V]) evict1(ctx context.Context) {
279
315
c .used1 -= item .size
280
316
c .used2 += item .size
281
317
} else {
282
- // evict
283
- c .deleteItem (ctx , item )
318
+ // put ghost
319
+ c .enqueueGhost (ctx , item )
320
+ c .evictGhost ()
284
321
c .used1 -= item .size
285
322
return
286
323
}
287
324
}
288
325
}
289
326
290
- func (c * Cache [K , V ]) deleteItem (ctx context.Context , item * _CacheItem [K , V ]) {
291
- shard := & c .shards [c .keyShardFunc (item .key )% numShards ]
292
- shard .Lock ()
293
- defer shard .Unlock ()
294
- if _ , ok := shard .values [item .key ]; ok {
295
- delete (shard .values , item .key )
296
- // key deleted, call postEvict
297
- if c .postEvict != nil {
298
- c .postEvict (ctx , item .key , item .value , item .size )
299
- }
300
- }
301
- }
302
-
303
327
func (c * Cache [K , V ]) evict2 (ctx context.Context ) {
304
328
// queue 2
305
329
for {
@@ -313,10 +337,55 @@ func (c *Cache[K, V]) evict2(ctx context.Context) {
313
337
c .queue2 .enqueue (item )
314
338
item .dec ()
315
339
} else {
316
- // evict
317
- c .deleteItem (ctx , item )
340
+ // put ghost
341
+ c .enqueueGhost (ctx , item )
342
+ c .evictGhost ()
318
343
c .used2 -= item .size
319
344
return
320
345
}
321
346
}
322
347
}
348
+
349
+ func (c * Cache [K , V ]) enqueueGhost (ctx context.Context , item * _CacheItem [K , V ]) {
350
+ c .ghost .enqueue (item )
351
+ c .ghostSize += item .size
352
+
353
+ shard := & c .shards [c .keyShardFunc (item .key )% numShards ]
354
+ shard .Lock ()
355
+ defer shard .Unlock ()
356
+ c .purgeItemValue (ctx , item )
357
+ }
358
+
359
+ func (c * Cache [K , V ]) purgeItemValue (ctx context.Context , item * _CacheItem [K , V ]) {
360
+ if ! item .valueOK {
361
+ return
362
+ }
363
+ if c .postEvict != nil {
364
+ c .postEvict (ctx , item .key , item .value , item .size )
365
+ }
366
+ item .valueOK = false
367
+ var zero V
368
+ item .value = zero
369
+ }
370
+
371
+ func (c * Cache [K , V ]) evictGhost () {
372
+ ghostCapacity := c .capacity () - c .capacity1 () // same to queue2 capacity
373
+ for c .ghostSize > ghostCapacity {
374
+ item , ok := c .ghost .dequeue ()
375
+ if ! ok {
376
+ break
377
+ }
378
+ c .ghostSize -= item .size
379
+ c .deleteItem (item )
380
+ }
381
+ }
382
+
383
+ func (c * Cache [K , V ]) deleteItem (item * _CacheItem [K , V ]) {
384
+ shard := & c .shards [c .keyShardFunc (item .key )% numShards ]
385
+ shard .Lock ()
386
+ defer shard .Unlock ()
387
+ // item may be replaced in set, check before delete
388
+ if shard .values [item .key ] == item {
389
+ delete (shard .values , item .key )
390
+ }
391
+ }
0 commit comments