This patch changes coalesced mmio to create one mmio device per
zone instead of handling all zones in one device.
Doing so enables us to take advantage of existing locking and prevents
a race condition between coalesced mmio registration/unregistration
and lookups.
Suggested-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
*/
struct kvm_io_bus {
int dev_count;
*/
struct kvm_io_bus {
int dev_count;
-#define NR_IOBUS_DEVS 200
+#define NR_IOBUS_DEVS 300
struct kvm_io_device *devs[NR_IOBUS_DEVS];
};
struct kvm_io_device *devs[NR_IOBUS_DEVS];
};
struct kvm_arch arch;
atomic_t users_count;
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
struct kvm_arch arch;
atomic_t users_count;
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
+ spinlock_t ring_lock;
+ struct list_head coalesced_zones;
#endif
struct mutex irq_lock;
#endif
struct mutex irq_lock;
static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
gpa_t addr, int len)
{
static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
gpa_t addr, int len)
{
- struct kvm_coalesced_mmio_zone *zone;
- int i;
-
- /* is it in a batchable area ? */
-
- for (i = 0; i < dev->nb_zones; i++) {
- zone = &dev->zone[i];
-
- /* (addr,len) is fully included in
- * (zone->addr, zone->size)
- */
+ /* is it in a batchable area ?
+ * (addr,len) is fully included in
+ * (zone->addr, zone->size)
+ */
- if (zone->addr <= addr &&
- addr + len <= zone->addr + zone->size)
- return 1;
- }
- return 0;
+ return (dev->zone.addr <= addr &&
+ addr + len <= dev->zone.addr + dev->zone.size);
}
static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
}
static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
if (!coalesced_mmio_in_range(dev, addr, len))
return -EOPNOTSUPP;
if (!coalesced_mmio_in_range(dev, addr, len))
return -EOPNOTSUPP;
+ spin_lock(&dev->kvm->ring_lock);
if (!coalesced_mmio_has_room(dev)) {
if (!coalesced_mmio_has_room(dev)) {
- spin_unlock(&dev->lock);
+ spin_unlock(&dev->kvm->ring_lock);
memcpy(ring->coalesced_mmio[ring->last].data, val, len);
smp_wmb();
ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
memcpy(ring->coalesced_mmio[ring->last].data, val, len);
smp_wmb();
ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
- spin_unlock(&dev->lock);
+ spin_unlock(&dev->kvm->ring_lock);
{
struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
{
struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
+ list_del(&dev->list);
+
int kvm_coalesced_mmio_init(struct kvm *kvm)
{
int kvm_coalesced_mmio_init(struct kvm *kvm)
{
- struct kvm_coalesced_mmio_dev *dev;
struct page *page;
int ret;
struct page *page;
int ret;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
goto out_err;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
goto out_err;
- kvm->coalesced_mmio_ring = page_address(page);
- ret = -ENOMEM;
- dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
- if (!dev)
- goto out_free_page;
- spin_lock_init(&dev->lock);
- kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
- dev->kvm = kvm;
- kvm->coalesced_mmio_dev = dev;
-
- mutex_lock(&kvm->slots_lock);
- ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
- mutex_unlock(&kvm->slots_lock);
- if (ret < 0)
- goto out_free_dev;
+ ret = 0;
+ kvm->coalesced_mmio_ring = page_address(page);
+ /*
+ * We're using this spinlock to sync access to the coalesced ring.
+ * The list doesn't need it's own lock since device registration and
+ * unregistration should only happen when kvm->slots_lock is held.
+ */
+ spin_lock_init(&kvm->ring_lock);
+ INIT_LIST_HEAD(&kvm->coalesced_zones);
-out_free_dev:
- kvm->coalesced_mmio_dev = NULL;
- kfree(dev);
-out_free_page:
- kvm->coalesced_mmio_ring = NULL;
- __free_page(page);
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone)
{
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone)
{
- struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
+ int ret;
+ struct kvm_coalesced_mmio_dev *dev;
- if (dev == NULL)
- return -ENXIO;
+ dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
+ dev->kvm = kvm;
+ dev->zone = *zone;
mutex_lock(&kvm->slots_lock);
mutex_lock(&kvm->slots_lock);
- if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
- mutex_unlock(&kvm->slots_lock);
- return -ENOBUFS;
- }
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
+ if (ret < 0)
+ goto out_free_dev;
+ list_add_tail(&dev->list, &kvm->coalesced_zones);
+ mutex_unlock(&kvm->slots_lock);
- dev->zone[dev->nb_zones] = *zone;
- dev->nb_zones++;
mutex_unlock(&kvm->slots_lock);
mutex_unlock(&kvm->slots_lock);
+
+ kfree(dev);
+
+ if (dev == NULL)
+ return -ENXIO;
+
return 0;
}
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone)
{
return 0;
}
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone)
{
- int i;
- struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
- struct kvm_coalesced_mmio_zone *z;
-
- if (dev == NULL)
- return -ENXIO;
+ struct kvm_coalesced_mmio_dev *dev, *tmp;
mutex_lock(&kvm->slots_lock);
mutex_lock(&kvm->slots_lock);
- i = dev->nb_zones;
- while (i) {
- z = &dev->zone[i - 1];
-
- /* unregister all zones
- * included in (zone->addr, zone->size)
- */
-
- if (zone->addr <= z->addr &&
- z->addr + z->size <= zone->addr + zone->size) {
- dev->nb_zones--;
- *z = dev->zone[dev->nb_zones];
+ list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
+ if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
+ kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
+ kvm_iodevice_destructor(&dev->dev);
mutex_unlock(&kvm->slots_lock);
mutex_unlock(&kvm->slots_lock);
-#define KVM_COALESCED_MMIO_ZONE_MAX 100
struct kvm_coalesced_mmio_dev {
struct kvm_coalesced_mmio_dev {
struct kvm_io_device dev;
struct kvm *kvm;
struct kvm_io_device dev;
struct kvm *kvm;
- spinlock_t lock;
- int nb_zones;
- struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX];
+ struct kvm_coalesced_mmio_zone zone;
};
int kvm_coalesced_mmio_init(struct kvm *kvm);
};
int kvm_coalesced_mmio_init(struct kvm *kvm);