|
20 | 20 |
|
21 | 21 | #include "gennvm.h"
|
22 | 22 |
|
| 23 | +static int gennvm_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len) |
| 24 | +{ |
| 25 | + struct gen_nvm *gn = dev->mp; |
| 26 | + struct gennvm_area *area, *prev, *next; |
| 27 | + sector_t begin = 0; |
| 28 | + sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9; |
| 29 | + |
| 30 | + if (len > max_sectors) |
| 31 | + return -EINVAL; |
| 32 | + |
| 33 | + area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL); |
| 34 | + if (!area) |
| 35 | + return -ENOMEM; |
| 36 | + |
| 37 | + prev = NULL; |
| 38 | + |
| 39 | + spin_lock(&dev->lock); |
| 40 | + list_for_each_entry(next, &gn->area_list, list) { |
| 41 | + if (begin + len > next->begin) { |
| 42 | + begin = next->end; |
| 43 | + prev = next; |
| 44 | + continue; |
| 45 | + } |
| 46 | + break; |
| 47 | + } |
| 48 | + |
| 49 | + if ((begin + len) > max_sectors) { |
| 50 | + spin_unlock(&dev->lock); |
| 51 | + kfree(area); |
| 52 | + return -EINVAL; |
| 53 | + } |
| 54 | + |
| 55 | + area->begin = *lba = begin; |
| 56 | + area->end = begin + len; |
| 57 | + |
| 58 | + if (prev) /* insert into sorted order */ |
| 59 | + list_add(&area->list, &prev->list); |
| 60 | + else |
| 61 | + list_add(&area->list, &gn->area_list); |
| 62 | + spin_unlock(&dev->lock); |
| 63 | + |
| 64 | + return 0; |
| 65 | +} |
| 66 | + |
| 67 | +static void gennvm_put_area(struct nvm_dev *dev, sector_t begin) |
| 68 | +{ |
| 69 | + struct gen_nvm *gn = dev->mp; |
| 70 | + struct gennvm_area *area; |
| 71 | + |
| 72 | + spin_lock(&dev->lock); |
| 73 | + list_for_each_entry(area, &gn->area_list, list) { |
| 74 | + if (area->begin != begin) |
| 75 | + continue; |
| 76 | + |
| 77 | + list_del(&area->list); |
| 78 | + spin_unlock(&dev->lock); |
| 79 | + kfree(area); |
| 80 | + return; |
| 81 | + } |
| 82 | + spin_unlock(&dev->lock); |
| 83 | +} |
| 84 | + |
23 | 85 | static void gennvm_blocks_free(struct nvm_dev *dev)
|
24 | 86 | {
|
25 | 87 | struct gen_nvm *gn = dev->mp;
|
@@ -195,7 +257,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
|
195 | 257 | }
|
196 | 258 | }
|
197 | 259 |
|
198 |
| - if (dev->ops->get_l2p_tbl) { |
| 260 | + if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) { |
199 | 261 | ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs,
|
200 | 262 | gennvm_block_map, dev);
|
201 | 263 | if (ret) {
|
@@ -229,6 +291,7 @@ static int gennvm_register(struct nvm_dev *dev)
|
229 | 291 |
|
230 | 292 | gn->dev = dev;
|
231 | 293 | gn->nr_luns = dev->nr_luns;
|
| 294 | + INIT_LIST_HEAD(&gn->area_list); |
232 | 295 | dev->mp = gn;
|
233 | 296 |
|
234 | 297 | ret = gennvm_luns_init(dev, gn);
|
@@ -419,10 +482,23 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
|
419 | 482 | return nvm_erase_ppa(dev, &addr, 1);
|
420 | 483 | }
|
421 | 484 |
|
| 485 | +static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid) |
| 486 | +{ |
| 487 | + return test_and_set_bit(lunid, dev->lun_map); |
| 488 | +} |
| 489 | + |
| 490 | +static void gennvm_release_lun(struct nvm_dev *dev, int lunid) |
| 491 | +{ |
| 492 | + WARN_ON(!test_and_clear_bit(lunid, dev->lun_map)); |
| 493 | +} |
| 494 | + |
422 | 495 | static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
|
423 | 496 | {
|
424 | 497 | struct gen_nvm *gn = dev->mp;
|
425 | 498 |
|
| 499 | + if (unlikely(lunid >= dev->nr_luns)) |
| 500 | + return NULL; |
| 501 | + |
426 | 502 | return &gn->luns[lunid].vlun;
|
427 | 503 | }
|
428 | 504 |
|
@@ -464,7 +540,13 @@ static struct nvmm_type gennvm = {
|
464 | 540 | .erase_blk = gennvm_erase_blk,
|
465 | 541 |
|
466 | 542 | .get_lun = gennvm_get_lun,
|
| 543 | + .reserve_lun = gennvm_reserve_lun, |
| 544 | + .release_lun = gennvm_release_lun, |
467 | 545 | .lun_info_print = gennvm_lun_info_print,
|
| 546 | + |
| 547 | + .get_area = gennvm_get_area, |
| 548 | + .put_area = gennvm_put_area, |
| 549 | + |
468 | 550 | };
|
469 | 551 |
|
470 | 552 | static int __init gennvm_module_init(void)
|
|
0 commit comments