static int balloon(void *_vballoon) { struct virtio_balloon *vb = _vballoon; DEFINE_WAIT_FUNC(wait, woken_wake_function); set_freezable(); while (!kthread_should_stop()) { s64 diff; try_to_freeze(); add_wait_queue(&vb->config_change, &wait); for (;;) { if ((diff = towards_target(vb)) != 0 || vb->need_stats_update || kthread_should_stop() || freezing(current)) break; wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } remove_wait_queue(&vb->config_change, &wait); if (vb->need_stats_update) stats_handle_request(vb); if (diff > 0) fill_balloon(vb, diff); else if (diff < 0) leak_balloon(vb, -diff); update_balloon_size(vb); /* * For large balloon changes, we could spend a lot of time * and always have work to do. Be nice if preempt disabled. */ cond_resched(); } return 0; }
static void virtballoon_changed(struct virtio_device *vdev) { struct virtio_balloon *vb = vdev->priv; unsigned long flags; s64 diff = towards_target(vb); if (diff) { spin_lock_irqsave(&vb->stop_update_lock, flags); if (!vb->stop_update) queue_work(system_freezable_wq, &vb->update_balloon_size_work); spin_unlock_irqrestore(&vb->stop_update_lock, flags); } if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { virtio_cread(vdev, struct virtio_balloon_config, free_page_report_cmd_id, &vb->cmd_id_received); if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { /* Pass ULONG_MAX to give back all the free pages */ return_free_pages_to_mm(vb, ULONG_MAX); } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && vb->cmd_id_received != virtio32_to_cpu(vdev, vb->cmd_id_active)) { spin_lock_irqsave(&vb->stop_update_lock, flags); if (!vb->stop_update) { queue_work(vb->balloon_wq, &vb->report_free_page_work); } spin_unlock_irqrestore(&vb->stop_update_lock, flags); } } }
static int balloon(void *_vballoon) { struct virtio_balloon *vb = _vballoon; set_freezable(); while (!kthread_should_stop()) { s64 diff; try_to_freeze(); wait_event_interruptible(vb->config_change, (diff = towards_target(vb)) != 0 || vb->need_stats_update || kthread_should_stop() || freezing(current)); if (vb->need_stats_update) stats_handle_request(vb); if (diff > 0) fill_balloon(vb, diff); else if (diff < 0) leak_balloon(vb, -diff); update_balloon_size(vb); /* * For large balloon changes, we could spend a lot of time * and always have work to do. Be nice if preempt disabled. */ cond_resched(); } return 0; }
static int virtballoon_restore(struct virtio_device *vdev) { struct virtio_balloon *vb = vdev->priv; int ret; ret = init_vqs(vdev->priv); if (ret) return ret; fill_balloon(vb, towards_target(vb)); update_balloon_size(vb); return 0; }
static void update_balloon_size_func(struct work_struct *work) { struct virtio_balloon *vb; s64 diff; vb = container_of(work, struct virtio_balloon, update_balloon_size_work); diff = towards_target(vb); if (diff > 0) diff -= fill_balloon(vb, diff); else if (diff < 0) diff += leak_balloon(vb, -diff); update_balloon_size(vb); if (diff) queue_work(system_freezable_wq, work); }
static int balloon(void *_vballoon) { struct virtio_balloon *vb = _vballoon; set_freezable(); while (!kthread_should_stop()) { s64 diff; try_to_freeze(); wait_event_interruptible(vb->config_change, (diff = towards_target(vb)) != 0 || vb->need_stats_update || kthread_should_stop() || freezing(current)); if (vb->need_stats_update) stats_handle_request(vb); if (diff > 0) fill_balloon(vb, diff); else if (diff < 0) leak_balloon(vb, -diff); update_balloon_size(vb); } return 0; }
static int virtballoon_probe(struct virtio_device *vdev) { struct virtio_balloon *vb; __u32 poison_val; int err; if (!vdev->config->get) { dev_err(&vdev->dev, "%s failure: config access disabled\n", __func__); return -EINVAL; } vdev->priv = vb = kzalloc(sizeof(*vb), GFP_KERNEL); if (!vb) { err = -ENOMEM; goto out; } INIT_WORK(&vb->update_balloon_stats_work, update_balloon_stats_func); INIT_WORK(&vb->update_balloon_size_work, update_balloon_size_func); spin_lock_init(&vb->stop_update_lock); mutex_init(&vb->balloon_lock); init_waitqueue_head(&vb->acked); vb->vdev = vdev; balloon_devinfo_init(&vb->vb_dev_info); err = init_vqs(vb); if (err) goto out_free_vb; #ifdef CONFIG_BALLOON_COMPACTION balloon_mnt = kern_mount(&balloon_fs); if (IS_ERR(balloon_mnt)) { err = PTR_ERR(balloon_mnt); goto out_del_vqs; } vb->vb_dev_info.migratepage = virtballoon_migratepage; vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb); if (IS_ERR(vb->vb_dev_info.inode)) { err = PTR_ERR(vb->vb_dev_info.inode); kern_unmount(balloon_mnt); goto out_del_vqs; } vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops; #endif if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { /* * There is always one entry reserved for cmd id, so the ring * size needs to be at least two to report free page hints. */ if (virtqueue_get_vring_size(vb->free_page_vq) < 2) { err = -ENOSPC; goto out_del_vqs; } vb->balloon_wq = alloc_workqueue("balloon-wq", WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0); if (!vb->balloon_wq) { err = -ENOMEM; goto out_del_vqs; } INIT_WORK(&vb->report_free_page_work, report_free_page_func); vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP; vb->cmd_id_active = cpu_to_virtio32(vb->vdev, VIRTIO_BALLOON_CMD_ID_STOP); vb->cmd_id_stop = cpu_to_virtio32(vb->vdev, VIRTIO_BALLOON_CMD_ID_STOP); vb->num_free_page_blocks = 0; spin_lock_init(&vb->free_page_list_lock); INIT_LIST_HEAD(&vb->free_page_list); if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) { memset(&poison_val, PAGE_POISON, sizeof(poison_val)); virtio_cwrite(vb->vdev, struct virtio_balloon_config, poison_val, &poison_val); } } /* * We continue to use VIRTIO_BALLOON_F_DEFLATE_ON_OOM to decide if a * shrinker needs to be registered to relieve memory pressure. */ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) { err = virtio_balloon_register_shrinker(vb); if (err) goto out_del_balloon_wq; } virtio_device_ready(vdev); if (towards_target(vb)) virtballoon_changed(vdev); return 0; out_del_balloon_wq: if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) destroy_workqueue(vb->balloon_wq); out_del_vqs: vdev->config->del_vqs(vdev); out_free_vb: kfree(vb); out: return err; }
static int virtballoon_probe(struct virtio_device *vdev) { struct virtio_balloon *vb; int err; if (!vdev->config->get) { dev_err(&vdev->dev, "%s failure: config access disabled\n", __func__); return -EINVAL; } vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); if (!vb) { err = -ENOMEM; goto out; } INIT_WORK(&vb->update_balloon_stats_work, update_balloon_stats_func); INIT_WORK(&vb->update_balloon_size_work, update_balloon_size_func); spin_lock_init(&vb->stop_update_lock); vb->stop_update = false; vb->num_pages = 0; mutex_init(&vb->balloon_lock); init_waitqueue_head(&vb->acked); vb->vdev = vdev; balloon_devinfo_init(&vb->vb_dev_info); err = init_vqs(vb); if (err) goto out_free_vb; vb->nb.notifier_call = virtballoon_oom_notify; vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY; err = register_oom_notifier(&vb->nb); if (err < 0) goto out_del_vqs; #ifdef CONFIG_BALLOON_COMPACTION balloon_mnt = kern_mount(&balloon_fs); if (IS_ERR(balloon_mnt)) { err = PTR_ERR(balloon_mnt); unregister_oom_notifier(&vb->nb); goto out_del_vqs; } vb->vb_dev_info.migratepage = virtballoon_migratepage; vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb); if (IS_ERR(vb->vb_dev_info.inode)) { err = PTR_ERR(vb->vb_dev_info.inode); kern_unmount(balloon_mnt); unregister_oom_notifier(&vb->nb); vb->vb_dev_info.inode = NULL; goto out_del_vqs; } vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops; #endif virtio_device_ready(vdev); if (towards_target(vb)) virtballoon_changed(vdev); return 0; out_del_vqs: vdev->config->del_vqs(vdev); out_free_vb: kfree(vb); out: return err; }