@@ -24,23 +24,13 @@ static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
24
24
static int coalesced_mmio_in_range (struct kvm_coalesced_mmio_dev * dev ,
25
25
gpa_t addr , int len )
26
26
{
27
- struct kvm_coalesced_mmio_zone * zone ;
28
- int i ;
29
-
30
- /* is it in a batchable area ? */
31
-
32
- for (i = 0 ; i < dev -> nb_zones ; i ++ ) {
33
- zone = & dev -> zone [i ];
34
-
35
- /* (addr,len) is fully included in
36
- * (zone->addr, zone->size)
37
- */
27
+ /* is it in a batchable area ?
28
+ * (addr,len) is fully included in
29
+ * (zone->addr, zone->size)
30
+ */
38
31
39
- if (zone -> addr <= addr &&
40
- addr + len <= zone -> addr + zone -> size )
41
- return 1 ;
42
- }
43
- return 0 ;
32
+ return (dev -> zone .addr <= addr &&
33
+ addr + len <= dev -> zone .addr + dev -> zone .size );
44
34
}
45
35
46
36
static int coalesced_mmio_has_room (struct kvm_coalesced_mmio_dev * dev )
@@ -73,10 +63,10 @@ static int coalesced_mmio_write(struct kvm_io_device *this,
73
63
if (!coalesced_mmio_in_range (dev , addr , len ))
74
64
return - EOPNOTSUPP ;
75
65
76
- spin_lock (& dev -> lock );
66
+ spin_lock (& dev -> kvm -> ring_lock );
77
67
78
68
if (!coalesced_mmio_has_room (dev )) {
79
- spin_unlock (& dev -> lock );
69
+ spin_unlock (& dev -> kvm -> ring_lock );
80
70
return - EOPNOTSUPP ;
81
71
}
82
72
@@ -87,14 +77,16 @@ static int coalesced_mmio_write(struct kvm_io_device *this,
87
77
memcpy (ring -> coalesced_mmio [ring -> last ].data , val , len );
88
78
smp_wmb ();
89
79
ring -> last = (ring -> last + 1 ) % KVM_COALESCED_MMIO_MAX ;
90
- spin_unlock (& dev -> lock );
80
+ spin_unlock (& dev -> kvm -> ring_lock );
91
81
return 0 ;
92
82
}
93
83
94
84
static void coalesced_mmio_destructor (struct kvm_io_device * this )
95
85
{
96
86
struct kvm_coalesced_mmio_dev * dev = to_mmio (this );
97
87
88
+ list_del (& dev -> list );
89
+
98
90
kfree (dev );
99
91
}
100
92
@@ -105,39 +97,25 @@ static const struct kvm_io_device_ops coalesced_mmio_ops = {
105
97
106
98
int kvm_coalesced_mmio_init (struct kvm * kvm )
107
99
{
108
- struct kvm_coalesced_mmio_dev * dev ;
109
100
struct page * page ;
110
101
int ret ;
111
102
112
103
ret = - ENOMEM ;
113
104
page = alloc_page (GFP_KERNEL | __GFP_ZERO );
114
105
if (!page )
115
106
goto out_err ;
116
- kvm -> coalesced_mmio_ring = page_address (page );
117
107
118
- ret = - ENOMEM ;
119
- dev = kzalloc (sizeof (struct kvm_coalesced_mmio_dev ), GFP_KERNEL );
120
- if (!dev )
121
- goto out_free_page ;
122
- spin_lock_init (& dev -> lock );
123
- kvm_iodevice_init (& dev -> dev , & coalesced_mmio_ops );
124
- dev -> kvm = kvm ;
125
- kvm -> coalesced_mmio_dev = dev ;
126
-
127
- mutex_lock (& kvm -> slots_lock );
128
- ret = kvm_io_bus_register_dev (kvm , KVM_MMIO_BUS , & dev -> dev );
129
- mutex_unlock (& kvm -> slots_lock );
130
- if (ret < 0 )
131
- goto out_free_dev ;
108
+ ret = 0 ;
109
+ kvm -> coalesced_mmio_ring = page_address (page );
132
110
133
- return ret ;
111
+ /*
112
+ * We're using this spinlock to sync access to the coalesced ring.
113
+ * The list doesn't need it's own lock since device registration and
114
+ * unregistration should only happen when kvm->slots_lock is held.
115
+ */
116
+ spin_lock_init (& kvm -> ring_lock );
117
+ INIT_LIST_HEAD (& kvm -> coalesced_zones );
134
118
135
- out_free_dev :
136
- kvm -> coalesced_mmio_dev = NULL ;
137
- kfree (dev );
138
- out_free_page :
139
- kvm -> coalesced_mmio_ring = NULL ;
140
- __free_page (page );
141
119
out_err :
142
120
return ret ;
143
121
}
@@ -151,51 +129,49 @@ void kvm_coalesced_mmio_free(struct kvm *kvm)
151
129
int kvm_vm_ioctl_register_coalesced_mmio (struct kvm * kvm ,
152
130
struct kvm_coalesced_mmio_zone * zone )
153
131
{
154
- struct kvm_coalesced_mmio_dev * dev = kvm -> coalesced_mmio_dev ;
132
+ int ret ;
133
+ struct kvm_coalesced_mmio_dev * dev ;
155
134
156
- if (dev == NULL )
157
- return - ENXIO ;
135
+ dev = kzalloc (sizeof (struct kvm_coalesced_mmio_dev ), GFP_KERNEL );
136
+ if (!dev )
137
+ return - ENOMEM ;
138
+
139
+ kvm_iodevice_init (& dev -> dev , & coalesced_mmio_ops );
140
+ dev -> kvm = kvm ;
141
+ dev -> zone = * zone ;
158
142
159
143
mutex_lock (& kvm -> slots_lock );
160
- if (dev -> nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX ) {
161
- mutex_unlock (& kvm -> slots_lock );
162
- return - ENOBUFS ;
163
- }
144
+ ret = kvm_io_bus_register_dev (kvm , KVM_MMIO_BUS , & dev -> dev );
145
+ if (ret < 0 )
146
+ goto out_free_dev ;
147
+ list_add_tail (& dev -> list , & kvm -> coalesced_zones );
148
+ mutex_unlock (& kvm -> slots_lock );
164
149
165
- dev -> zone [dev -> nb_zones ] = * zone ;
166
- dev -> nb_zones ++ ;
150
+ return ret ;
167
151
152
+ out_free_dev :
168
153
mutex_unlock (& kvm -> slots_lock );
154
+
155
+ kfree (dev );
156
+
157
+ if (dev == NULL )
158
+ return - ENXIO ;
159
+
169
160
return 0 ;
170
161
}
171
162
172
163
int kvm_vm_ioctl_unregister_coalesced_mmio (struct kvm * kvm ,
173
164
struct kvm_coalesced_mmio_zone * zone )
174
165
{
175
- int i ;
176
- struct kvm_coalesced_mmio_dev * dev = kvm -> coalesced_mmio_dev ;
177
- struct kvm_coalesced_mmio_zone * z ;
178
-
179
- if (dev == NULL )
180
- return - ENXIO ;
166
+ struct kvm_coalesced_mmio_dev * dev , * tmp ;
181
167
182
168
mutex_lock (& kvm -> slots_lock );
183
169
184
- i = dev -> nb_zones ;
185
- while (i ) {
186
- z = & dev -> zone [i - 1 ];
187
-
188
- /* unregister all zones
189
- * included in (zone->addr, zone->size)
190
- */
191
-
192
- if (zone -> addr <= z -> addr &&
193
- z -> addr + z -> size <= zone -> addr + zone -> size ) {
194
- dev -> nb_zones -- ;
195
- * z = dev -> zone [dev -> nb_zones ];
170
+ list_for_each_entry_safe (dev , tmp , & kvm -> coalesced_zones , list )
171
+ if (coalesced_mmio_in_range (dev , zone -> addr , zone -> size )) {
172
+ kvm_io_bus_unregister_dev (kvm , KVM_MMIO_BUS , & dev -> dev );
173
+ kvm_iodevice_destructor (& dev -> dev );
196
174
}
197
- i -- ;
198
- }
199
175
200
176
mutex_unlock (& kvm -> slots_lock );
201
177
0 commit comments