void mdss_mdp_clk_ctrl(int enable, int isr)
{
	static atomic_t clk_ref = ATOMIC_INIT(0);
	static DEFINE_MUTEX(clk_ctrl_lock);
	int force_off = 0;

	pr_debug("clk enable=%d isr=%d clk_ref=%d\n", enable, isr,
			atomic_read(&clk_ref));
	/*
	 * It is assumed that if isr = TRUE then start = OFF
	 * if start = ON when isr = TRUE it could happen that the usercontext
	 * could turn off the clocks while the interrupt is updating the
	 * power to ON
	 */
	WARN_ON(isr == true && enable);

	if (enable == MDP_BLOCK_POWER_ON) {
		atomic_inc(&clk_ref);
	} else if (!atomic_add_unless(&clk_ref, -1, 0)) {
		if (enable == MDP_BLOCK_MASTER_OFF) {
			pr_debug("master power-off req\n");
			force_off = 1;
		} else {
			WARN(1, "too many mdp clock off call\n");
		}
	}

	WARN_ON(enable == MDP_BLOCK_MASTER_OFF && !force_off);

	if (isr) {
		/* if it's power off send workqueue to turn off clocks */
		if (mdss_res->clk_ena && !atomic_read(&clk_ref))
			queue_delayed_work(mdss_res->clk_ctrl_wq,
					   &mdss_res->clk_ctrl_worker,
					   mdss_res->timeout);
	} else {
		mutex_lock(&clk_ctrl_lock);
		if (delayed_work_pending(&mdss_res->clk_ctrl_worker))
			cancel_delayed_work(&mdss_res->clk_ctrl_worker);

		if (atomic_read(&clk_ref)) {
			mdss_mdp_clk_ctrl_update(true);
		} else if (mdss_res->clk_ena) {
			mutex_lock(&mdp_suspend_mutex);
			if (force_off || mdss_res->suspend) {
				mdss_mdp_clk_ctrl_update(false);
			} else {
				/* send workqueue to turn off mdp power */
				queue_delayed_work(mdss_res->clk_ctrl_wq,
						   &mdss_res->clk_ctrl_worker,
						   mdss_res->timeout);
			}
			mutex_unlock(&mdp_suspend_mutex);
		}
		mutex_unlock(&clk_ctrl_lock);
	}
}
static int boxer_panel_enable(struct omap_dss_device *dssdev)
{
    if (atomic_add_unless(&boxer_panel_is_enabled, 1, 1)) {
		boxer_panel_dssdev = dssdev;
		queue_work(boxer_panel_wq, &boxer_panel_work);
	}
 
	return 0;
}
示例#3
0
static void hdmi_hotplug_detect_worker(struct work_struct *work)
{
	struct hpd_worker_data *d = container_of(work, typeof(*d), dwork.work);
	int state = atomic_read(&d->state);
// LGE_CHANGE_S [[email protected]] 2012-04-03
	struct omap_dss_device *dssdev = get_hdmi_device();
// LGE_CHANGE_E [[email protected]] 2012-04-03

	HDMIDBG("in hpd work %d, state=%d\n", state, dssdev->state);
	pr_err("in hpd work %d, state=%d\n", state, dssdev->state);
	if (dssdev == NULL)
		return;

	mutex_lock(&hdmi.hdmi_lock);
	if (state == HPD_STATE_OFF) {
		switch_set_state(&hdmi.hpd_switch, 0);
		if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
			mutex_unlock(&hdmi.hdmi_lock);
			dssdev->driver->disable(dssdev);
			omapdss_hdmi_enable_s3d(false);
			mutex_lock(&hdmi.hdmi_lock);
		}
		goto done;
	} else {
		if (state == HPD_STATE_START) {
			mutex_unlock(&hdmi.hdmi_lock);
			dssdev->driver->enable(dssdev);
			mutex_lock(&hdmi.hdmi_lock);
		} else if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE ||
			   hdmi.hpd_switch.state) {
			/* powered down after enable - skip EDID read */
			goto done;
		} else if (hdmi_read_edid(&dssdev->panel.timings)) {
			/* get monspecs from edid */
			hdmi_get_monspecs(&dssdev->panel.monspecs);
			pr_info("panel size %d by %d\n",
					dssdev->panel.monspecs.max_x,
					dssdev->panel.monspecs.max_y);
			HDMIDBG("panel size %d by %d\n",
					dssdev->panel.monspecs.max_x,
					dssdev->panel.monspecs.max_y);
			dssdev->panel.width_in_um =
					dssdev->panel.monspecs.max_x * 10000;
			dssdev->panel.height_in_um =
					dssdev->panel.monspecs.max_y * 10000;
			switch_set_state(&hdmi.hpd_switch, 1);
			goto done;
		} else if (state == HPD_STATE_EDID_TRYLAST){
			pr_info("Failed to read EDID after %d times. Giving up.", state - HPD_STATE_START);
			goto done;
		}
		if (atomic_add_unless(&d->state, 1, HPD_STATE_OFF))
			queue_delayed_work(my_workq, &d->dwork, msecs_to_jiffies(60));
	}
done:
	mutex_unlock(&hdmi.hdmi_lock);
}
static int snapshot_open(struct inode *inode, struct file *filp)
{
	struct snapshot_data *data;
	int error;

	lock_system_sleep();

	if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
		error = -EBUSY;
		goto Unlock;
	}

	if ((filp->f_flags & O_ACCMODE) == O_RDWR) {
		atomic_inc(&snapshot_device_available);
		error = -ENOSYS;
		goto Unlock;
	}
	if(create_basic_memory_bitmaps()) {
		atomic_inc(&snapshot_device_available);
		error = -ENOMEM;
		goto Unlock;
	}
	nonseekable_open(inode, filp);
	data = &snapshot_state;
	filp->private_data = data;
	memset(&data->handle, 0, sizeof(struct snapshot_handle));
	if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
		
		data->swap = swsusp_resume_device ?
			swap_type_of(swsusp_resume_device, 0, NULL) : -1;
		data->mode = O_RDONLY;
		error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
		if (error)
			pm_notifier_call_chain(PM_POST_HIBERNATION);
	} else {
		wait_for_device_probe();
		scsi_complete_async_scans();

		data->swap = -1;
		data->mode = O_WRONLY;
		error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
		if (error)
			pm_notifier_call_chain(PM_POST_RESTORE);
	}
	if (error) {
		free_basic_memory_bitmaps();
		atomic_inc(&snapshot_device_available);
	}
	data->frozen = 0;
	data->ready = 0;
	data->platform_support = 0;

 Unlock:
	unlock_system_sleep();

	return error;
}
示例#5
0
int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
{
	/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
	if (atomic_add_unless(atomic, -1, 1))
		return 0;
	rt_spin_lock(lock);
	if (atomic_dec_and_test(atomic))
		return 1;
	rt_spin_unlock(lock);
	return 0;
}
示例#6
0
文件: pb173.c 项目: thorgrin/pb173
int my_open(struct inode *inode, struct file *filp)
{
	if ((filp->f_mode & FMODE_WRITE) != 0 &&
			atomic_add_unless(&my_opened, 1, 1) == 0) {
		printk(KERN_INFO "Device is already opened for writing.\n");
		return -EBUSY;
	}

	printk(KERN_INFO "Device opened.\n");
	return 0;
}
示例#7
0
/*
 * isp_stat_buf_process - Process statistic buffers.
 * @buf_state: points out if buffer is ready to be processed. It's necessary
 *	       because histogram needs to copy the data from internal memory
 *	       before be able to process the buffer.
 */
static int isp_stat_buf_process(struct ispstat *stat, int buf_state)
{
	int ret = STAT_NO_BUF;

	if (!atomic_add_unless(&stat->buf_err, -1, 0) &&
	    buf_state == STAT_BUF_DONE && stat->state == ISPSTAT_ENABLED) {
		ret = isp_stat_buf_queue(stat);
		isp_stat_buf_next(stat);
	}

	return ret;
}
示例#8
0
int m5mols_wait_interrupt(struct v4l2_subdev *sd, u8 irq_mask, u32 timeout)
{
	struct m5mols_info *info = to_m5mols(sd);

	int ret = wait_event_interruptible_timeout(info->irq_waitq,
				atomic_add_unless(&info->irq_done, -1, 0),
				msecs_to_jiffies(timeout));
	if (ret <= 0)
		return ret ? ret : -ETIMEDOUT;

	return m5mols_busy_wait(sd, SYSTEM_INT_FACTOR, irq_mask,
				M5MOLS_I2C_RDY_WAIT_FL | irq_mask, -1);
}
示例#9
0
/*
 * This is an implementation of the notion of "decrement a
 * reference count, and return locked if it decremented to zero".
 *
 * NOTE NOTE NOTE! This is _not_ equivalent to
 *
 *	if (atomic_dec_and_test(&atomic)) {
 *		spin_lock(&lock);
 *		return 1;
 *	}
 *	return 0;
 *
 * because the spin-lock and the decrement must be
 * "atomic".
 */
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
	/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
	if (atomic_add_unless(atomic, -1, 1))
		return 0;

	/* Otherwise do it the slow way */
	spin_lock(lock);
	if (atomic_dec_and_test(atomic))
		return 1;
	spin_unlock(lock);
	return 0;
}
示例#10
0
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
	
	if (atomic_add_unless(atomic, -1, 1))
		return 0;

	
	spin_lock(lock);
	if (atomic_dec_and_test(atomic))
		return 1;
	spin_unlock(lock);
	return 0;
}
示例#11
0
/*
 * This function runs when the userspace helper
 * sets the Lunix:TNG line discipline on a TTY.
 */
static int lunix_ldisc_open(struct tty_struct *tty)
{
    if (!capable(CAP_SYS_ADMIN))
        return -EPERM;

    /* Can only be associated with a single TTY */
    if ( !atomic_add_unless(&lunix_disc_available, -1, 0))
        return -EBUSY;

    tty->receive_room = 65536; /* No flow control, FIXME */

    //debug("lunix ldisc associated with TTY %s\n", tty->name);
    return 0;
}
示例#12
0
/**
 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
 * @cnt: the atomic which we are to dec
 * @lock: the mutex to return holding if we dec to 0
 *
 * return true and hold lock if we dec to 0, return false otherwise
 */
int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
{
	/* dec if we can't possibly hit 0 */
	if (atomic_add_unless(cnt, -1, 1))
		return 0;
	/* we might hit 0, so take the lock */
	mutex_lock(lock);
	if (!atomic_dec_and_test(cnt)) {
		/* when we actually did the dec, we didn't hit 0 */
		mutex_unlock(lock);
		return 0;
	}
	/* we hit 0, and we hold the lock */
	return 1;
}
示例#13
0
int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
{
	
	if (atomic_add_unless(cnt, -1, 1))
		return 0;
	
	mutex_lock(lock);
	if (!atomic_dec_and_test(cnt)) {
		
		mutex_unlock(lock);
		return 0;
	}
	
	return 1;
}
示例#14
0
void decon_te_irq_handler(struct exynos_drm_crtc *crtc)
{
	struct decon_context *ctx = crtc->ctx;
	u32 val;

	if (!test_bit(BIT_CLKS_ENABLED, &ctx->enabled))
		return;

	if (atomic_add_unless(&ctx->win_updated, -1, 0)) {
		/* trigger */
		val = readl(ctx->addr + DECON_TRIGCON);
		val |= TRIGCON_SWTRIGCMD;
		writel(val, ctx->addr + DECON_TRIGCON);
	}

	drm_crtc_handle_vblank(&ctx->crtc->base);
}
示例#15
0
static int  fl2440adc_release(struct inode *inodp, struct file *filp)
{
    int channel;

    DPRINTK("File operations release...\n");
    channel = MINOR(inodp->i_rdev);
    mutex_lock(&adc_dev.mutex);

    if(!(atomic_add_unless(&adc_dev.client_ref[channel], -1, 0)))
    {
        s3c_adc_release(adc_dev.client[channel]);
        adc_dev.client[channel] = NULL;
    }
    mutex_unlock(&adc_dev.mutex);

    return 0;
}
示例#16
0
void intel_context_unpin(struct intel_context *ce)
{
	if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
		return;

	/* We may be called from inside intel_context_pin() to evict another */
	intel_context_get(ce);
	mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);

	if (likely(atomic_dec_and_test(&ce->pin_count))) {
		ce->ops->unpin(ce);

		i915_gem_context_put(ce->gem_context);
		intel_context_put(ce);
	}

	mutex_unlock(&ce->pin_mutex);
	intel_context_put(ce);
}
示例#17
0
static ssize_t
signal_read(struct file * filp,
            char __user * buffer,
            size_t        length,
            loff_t      * offset)
{
    struct xpmem_thread_group * seg_tg;
    struct xpmem_segment      * seg;
    xpmem_segid_t               segid;
    unsigned long               irqs;
    int                         err;

    if (length != sizeof(unsigned long))
        return -EINVAL;

    segid = (xpmem_segid_t)filp->private_data;

    seg_tg = xpmem_tg_ref_by_segid(segid);
    if (IS_ERR(seg_tg))
        return PTR_ERR(seg_tg);

    seg = xpmem_seg_ref_by_segid(seg_tg, segid);
    if (IS_ERR(seg)) {
        xpmem_tg_deref(seg_tg);
        return PTR_ERR(seg);
    }

    /* Only ack if there are pending notifications */
    err  = (atomic_add_unless(&(seg->irq_count), -1, 0) == 0);
    irqs = atomic_read(&(seg->irq_count));

    xpmem_seg_deref(seg);
    xpmem_tg_deref(seg_tg);

    if (err)
	return -ENODEV;

    if (copy_to_user(buffer, &irqs, sizeof(unsigned long))) 
        return -EFAULT;

    return length;
}
示例#18
0
static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
{
	struct pep_sock *pn = pep_sk(sk);
	struct pnpipehdr *ph;

	if (pn_flow_safe(pn->tx_fc) &&
	    !atomic_add_unless(&pn->tx_credits, -1, 0)) {
		kfree_skb(skb);
		return -ENOBUFS;
	}

	skb_push(skb, 3);
	skb_reset_transport_header(skb);
	ph = pnp_hdr(skb);
	ph->utid = 0;
	ph->message_id = PNS_PIPE_DATA;
	ph->pipe_handle = pn->pipe_handle;

	return pn_skb_send(sk, skb, &pipe_srv);
}
示例#19
0
void nvmap_unpin_ids(struct nvmap_client *client,
		     unsigned int nr, const unsigned long *ids)
{
	unsigned int i;
	int do_wake = 0;

	for (i = 0; i < nr; i++) {
		struct nvmap_handle_ref *ref;

		if (!ids[i])
			continue;

		nvmap_ref_lock(client);
		ref = _nvmap_validate_id_locked(client, ids[i]);
		if (ref) {
			struct nvmap_handle *h = ref->handle;
			int e = atomic_add_unless(&ref->pin, -1, 0);

			nvmap_ref_unlock(client);

			if (!e) {
				nvmap_err(client, "%s unpinning unpinned "
					  "handle %08lx\n",
					  current->group_leader->comm, ids[i]);
			} else {
				do_wake |= handle_unpin(client, h, false);
			}
		} else {
			nvmap_ref_unlock(client);
			if (client->super)
				do_wake |= handle_unpin_noref(client, ids[i]);
			else
				nvmap_err(client, "%s unpinning invalid "
					  "handle %08lx\n",
					  current->group_leader->comm, ids[i]);
		}
	}

	if (do_wake)
		wake_up(&client->share->pin_wait);
}
示例#20
0
/**
 * pvrdma_create_ah - create an address handle
 * @pd: the protection domain
 * @ah_attr: the attributes of the AH
 * @udata: user data blob
 *
 * @return: the ib_ah pointer on success, otherwise errno.
 */
struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
			       struct ib_udata *udata)
{
	struct pvrdma_dev *dev = to_vdev(pd->device);
	struct pvrdma_ah *ah;
	const struct ib_global_route *grh;
	u8 port_num = rdma_ah_get_port_num(ah_attr);

	if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
		return ERR_PTR(-EINVAL);

	grh = rdma_ah_read_grh(ah_attr);
	if ((ah_attr->type != RDMA_AH_ATTR_TYPE_ROCE)  ||
	    rdma_is_multicast_addr((struct in6_addr *)grh->dgid.raw))
		return ERR_PTR(-EINVAL);

	if (!atomic_add_unless(&dev->num_ahs, 1, dev->dsr->caps.max_ah))
		return ERR_PTR(-ENOMEM);

	ah = kzalloc(sizeof(*ah), GFP_KERNEL);
	if (!ah) {
		atomic_dec(&dev->num_ahs);
		return ERR_PTR(-ENOMEM);
	}

	ah->av.port_pd = to_vpd(pd)->pd_handle | (port_num << 24);
	ah->av.src_path_bits = rdma_ah_get_path_bits(ah_attr);
	ah->av.src_path_bits |= 0x80;
	ah->av.gid_index = grh->sgid_index;
	ah->av.hop_limit = grh->hop_limit;
	ah->av.sl_tclass_flowlabel = (grh->traffic_class << 20) |
				      grh->flow_label;
	memcpy(ah->av.dgid, grh->dgid.raw, 16);
	memcpy(ah->av.dmac, ah_attr->roce.dmac, ETH_ALEN);

	ah->ibah.device = pd->device;
	ah->ibah.pd = pd;
	ah->ibah.uobject = NULL;

	return &ah->ibah;
}
示例#21
0
/**
 * Get a handle for a DMM transaction
 */
static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
{
	struct dmm_txn *txn = NULL;
	struct refill_engine *engine = NULL;
	int ret;
	unsigned long flags;


	/* wait until an engine is available */
	ret = wait_event_interruptible(omap_dmm->engine_queue,
		atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
	if (ret)
		return ERR_PTR(ret);

	/* grab an idle engine */
	spin_lock_irqsave(&list_lock, flags);
	if (!list_empty(&dmm->idle_head)) {
		engine = list_entry(dmm->idle_head.next, struct refill_engine,
					idle_node);
		list_del(&engine->idle_node);
	}
示例#22
0
//====================================================================
static void __exit undelete_exit(void) {
    int i;

    DBG("Unloading module undelete");

    // Release references to super blocks.
    for (i = 0; i < num_super_blocks; ++i) {
        if (!atomic_add_unless(&super_block_map[i]->s_active, -1, 1)) {
            return;
        }
    }

    // Free the super block map.
    if (super_block_map) {
        kfree(super_block_map);
    }

    // Unregister our character devices
    unregister_character_devices();

    // Make sure to free allocated device numbers
    unregister_chrdev_region(undelete_devnode, num_super_blocks);
}
示例#23
0
文件: hibernate.c 项目: mdamt/linux
/**
 * software_resume - Resume from a saved hibernation image.
 *
 * This routine is called as a late initcall, when all devices have been
 * discovered and initialized already.
 *
 * The image reading code is called to see if there is a hibernation image
 * available for reading.  If that is the case, devices are quiesced and the
 * contents of memory is restored from the saved image.
 *
 * If this is successful, control reappears in the restored target kernel in
 * hibernation_snapshot() which returns to hibernate().  Otherwise, the routine
 * attempts to recover gracefully and make the kernel return to the normal mode
 * of operation.
 */
static int software_resume(void)
{
	int error, nr_calls = 0;

	/*
	 * If the user said "noresume".. bail out early.
	 */
	if (noresume || !hibernation_available())
		return 0;

	/*
	 * name_to_dev_t() below takes a sysfs buffer mutex when sysfs
	 * is configured into the kernel. Since the regular hibernate
	 * trigger path is via sysfs which takes a buffer mutex before
	 * calling hibernate functions (which take pm_mutex) this can
	 * cause lockdep to complain about a possible ABBA deadlock
	 * which cannot happen since we're in the boot code here and
	 * sysfs can't be invoked yet. Therefore, we use a subclass
	 * here to avoid lockdep complaining.
	 */
	mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING);

	if (swsusp_resume_device)
		goto Check_image;

	if (!strlen(resume_file)) {
		error = -ENOENT;
		goto Unlock;
	}

	pr_debug("Checking hibernation image partition %s\n", resume_file);

	if (resume_delay) {
		pr_info("Waiting %dsec before reading resume device ...\n",
			resume_delay);
		ssleep(resume_delay);
	}

	/* Check if the device is there */
	swsusp_resume_device = name_to_dev_t(resume_file);

	/*
	 * name_to_dev_t is ineffective to verify parition if resume_file is in
	 * integer format. (e.g. major:minor)
	 */
	if (isdigit(resume_file[0]) && resume_wait) {
		int partno;
		while (!get_gendisk(swsusp_resume_device, &partno))
			msleep(10);
	}

	if (!swsusp_resume_device) {
		/*
		 * Some device discovery might still be in progress; we need
		 * to wait for this to finish.
		 */
		wait_for_device_probe();

		if (resume_wait) {
			while ((swsusp_resume_device = name_to_dev_t(resume_file)) == 0)
				msleep(10);
			async_synchronize_full();
		}

		swsusp_resume_device = name_to_dev_t(resume_file);
		if (!swsusp_resume_device) {
			error = -ENODEV;
			goto Unlock;
		}
	}

 Check_image:
	pr_debug("Hibernation image partition %d:%d present\n",
		MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));

	pr_debug("Looking for hibernation image.\n");
	error = swsusp_check();
	if (error)
		goto Unlock;

	/* The snapshot device should not be opened while we're running */
	if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
		error = -EBUSY;
		swsusp_close(FMODE_READ);
		goto Unlock;
	}

	pm_prepare_console();
	error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls);
	if (error) {
		nr_calls--;
		goto Close_Finish;
	}

	pr_debug("Preparing processes for restore.\n");
	error = freeze_processes();
	if (error)
		goto Close_Finish;
	error = load_image_and_restore();
	thaw_processes();
 Finish:
	__pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
	pm_restore_console();
	atomic_inc(&snapshot_device_available);
	/* For success case, the suspend path will release the lock */
 Unlock:
	mutex_unlock(&pm_mutex);
	pr_debug("Hibernation image not present or could not be loaded.\n");
	return error;
 Close_Finish:
	swsusp_close(FMODE_READ);
	goto Finish;
}
示例#24
0
文件: hibernate.c 项目: mdamt/linux
/**
 * hibernate - Carry out system hibernation, including saving the image.
 */
int hibernate(void)
{
	int error, nr_calls = 0;
	bool snapshot_test = false;

	if (!hibernation_available()) {
		pr_debug("Hibernation not available.\n");
		return -EPERM;
	}

	lock_system_sleep();
	/* The snapshot device should not be opened while we're running */
	if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
		error = -EBUSY;
		goto Unlock;
	}

	pm_prepare_console();
	error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
	if (error) {
		nr_calls--;
		goto Exit;
	}

	pr_info("Syncing filesystems ... \n");
	sys_sync();
	pr_info("done.\n");

	error = freeze_processes();
	if (error)
		goto Exit;

	lock_device_hotplug();
	/* Allocate memory management structures */
	error = create_basic_memory_bitmaps();
	if (error)
		goto Thaw;

	error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
	if (error || freezer_test_done)
		goto Free_bitmaps;

	if (in_suspend) {
		unsigned int flags = 0;

		if (hibernation_mode == HIBERNATION_PLATFORM)
			flags |= SF_PLATFORM_MODE;
		if (nocompress)
			flags |= SF_NOCOMPRESS_MODE;
		else
		        flags |= SF_CRC32_MODE;

		pr_debug("Writing image.\n");
		error = swsusp_write(flags);
		swsusp_free();
		if (!error) {
			if (hibernation_mode == HIBERNATION_TEST_RESUME)
				snapshot_test = true;
			else
				power_down();
		}
		in_suspend = 0;
		pm_restore_gfp_mask();
	} else {
		pr_debug("Image restored successfully.\n");
	}

 Free_bitmaps:
	free_basic_memory_bitmaps();
 Thaw:
	unlock_device_hotplug();
	if (snapshot_test) {
		pr_debug("Checking hibernation image\n");
		error = swsusp_check();
		if (!error)
			error = load_image_and_restore();
	}
	thaw_processes();

	/* Don't bother checking whether freezer_test_done is true */
	freezer_test_done = false;
 Exit:
	__pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL);
	pm_restore_console();
	atomic_inc(&snapshot_device_available);
 Unlock:
	unlock_system_sleep();
	return error;
}
示例#25
0
int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache,
	u64 tb_cache_size)
{
	int result;
	u64 tb_size;

	BUG_ON(!lpm_priv);
	BUG_ON(tb_type != PS3_LPM_TB_TYPE_NONE
		&& tb_type != PS3_LPM_TB_TYPE_INTERNAL);

	if (tb_type == PS3_LPM_TB_TYPE_NONE && tb_cache)
		dev_dbg(sbd_core(), "%s:%u: bad in vals\n", __func__, __LINE__);

	if (!atomic_add_unless(&lpm_priv->open, 1, 1)) {
		dev_dbg(sbd_core(), "%s:%u: busy\n", __func__, __LINE__);
		return -EBUSY;
	}

	

	if (tb_type == PS3_LPM_TB_TYPE_NONE) {
		lpm_priv->tb_cache_size = 0;
		lpm_priv->tb_cache_internal = NULL;
		lpm_priv->tb_cache = NULL;
	} else if (tb_cache) {
		if (tb_cache != (void *)_ALIGN_UP((unsigned long)tb_cache, 128)
			|| tb_cache_size != _ALIGN_UP(tb_cache_size, 128)) {
			dev_err(sbd_core(), "%s:%u: unaligned tb_cache\n",
				__func__, __LINE__);
			result = -EINVAL;
			goto fail_align;
		}
		lpm_priv->tb_cache_size = tb_cache_size;
		lpm_priv->tb_cache_internal = NULL;
		lpm_priv->tb_cache = tb_cache;
	} else {
		lpm_priv->tb_cache_size = PS3_LPM_DEFAULT_TB_CACHE_SIZE;
		lpm_priv->tb_cache_internal = kzalloc(
			lpm_priv->tb_cache_size + 127, GFP_KERNEL);
		if (!lpm_priv->tb_cache_internal) {
			dev_err(sbd_core(), "%s:%u: alloc internal tb_cache "
				"failed\n", __func__, __LINE__);
			result = -ENOMEM;
			goto fail_malloc;
		}
		lpm_priv->tb_cache = (void *)_ALIGN_UP(
			(unsigned long)lpm_priv->tb_cache_internal, 128);
	}

	result = lv1_construct_lpm(lpm_priv->node_id, tb_type, 0, 0,
				ps3_mm_phys_to_lpar(__pa(lpm_priv->tb_cache)),
				lpm_priv->tb_cache_size, &lpm_priv->lpm_id,
				&lpm_priv->outlet_id, &tb_size);

	if (result) {
		dev_err(sbd_core(), "%s:%u: lv1_construct_lpm failed: %s\n",
			__func__, __LINE__, ps3_result(result));
		result = -EINVAL;
		goto fail_construct;
	}

	lpm_priv->shadow.pm_control = PS3_LPM_SHADOW_REG_INIT;
	lpm_priv->shadow.pm_start_stop = PS3_LPM_SHADOW_REG_INIT;
	lpm_priv->shadow.group_control = PS3_LPM_SHADOW_REG_INIT;
	lpm_priv->shadow.debug_bus_control = PS3_LPM_SHADOW_REG_INIT;

	dev_dbg(sbd_core(), "%s:%u: lpm_id 0x%llx, outlet_id 0x%llx, "
		"tb_size 0x%llx\n", __func__, __LINE__, lpm_priv->lpm_id,
		lpm_priv->outlet_id, tb_size);

	return 0;

fail_construct:
	kfree(lpm_priv->tb_cache_internal);
	lpm_priv->tb_cache_internal = NULL;
fail_malloc:
fail_align:
	atomic_dec(&lpm_priv->open);
	return result;
}
示例#26
0
/**
 * pvrdma_create_srq - create shared receive queue
 * @pd: protection domain
 * @init_attr: shared receive queue attributes
 * @udata: user data
 *
 * @return: the ib_srq pointer on success, otherwise returns an errno.
 */
struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
				 struct ib_srq_init_attr *init_attr,
				 struct ib_udata *udata)
{
	struct pvrdma_srq *srq = NULL;
	struct pvrdma_dev *dev = to_vdev(pd->device);
	union pvrdma_cmd_req req;
	union pvrdma_cmd_resp rsp;
	struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
	struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
	struct pvrdma_create_srq_resp srq_resp = {0};
	struct pvrdma_create_srq ucmd;
	unsigned long flags;
	int ret;

	if (!udata) {
		/* No support for kernel clients. */
		dev_warn(&dev->pdev->dev,
			 "no shared receive queue support for kernel client\n");
		return ERR_PTR(-EOPNOTSUPP);
	}

	if (init_attr->srq_type != IB_SRQT_BASIC) {
		dev_warn(&dev->pdev->dev,
			 "shared receive queue type %d not supported\n",
			 init_attr->srq_type);
		return ERR_PTR(-EINVAL);
	}

	if (init_attr->attr.max_wr  > dev->dsr->caps.max_srq_wr ||
	    init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) {
		dev_warn(&dev->pdev->dev,
			 "shared receive queue size invalid\n");
		return ERR_PTR(-EINVAL);
	}

	if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq))
		return ERR_PTR(-ENOMEM);

	srq = kmalloc(sizeof(*srq), GFP_KERNEL);
	if (!srq) {
		ret = -ENOMEM;
		goto err_srq;
	}

	spin_lock_init(&srq->lock);
	refcount_set(&srq->refcnt, 1);
	init_completion(&srq->free);

	dev_dbg(&dev->pdev->dev,
		"create shared receive queue from user space\n");

	if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
		ret = -EFAULT;
		goto err_srq;
	}

	srq->umem = ib_umem_get(pd->uobject->context,
				ucmd.buf_addr,
				ucmd.buf_size, 0, 0);
	if (IS_ERR(srq->umem)) {
		ret = PTR_ERR(srq->umem);
		goto err_srq;
	}

	srq->npages = ib_umem_page_count(srq->umem);

	if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
		dev_warn(&dev->pdev->dev,
			 "overflow pages in shared receive queue\n");
		ret = -EINVAL;
		goto err_umem;
	}

	ret = pvrdma_page_dir_init(dev, &srq->pdir, srq->npages, false);
	if (ret) {
		dev_warn(&dev->pdev->dev,
			 "could not allocate page directory\n");
		goto err_umem;
	}

	pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0);

	memset(cmd, 0, sizeof(*cmd));
	cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ;
	cmd->srq_type = init_attr->srq_type;
	cmd->nchunks = srq->npages;
	cmd->pd_handle = to_vpd(pd)->pd_handle;
	cmd->attrs.max_wr = init_attr->attr.max_wr;
	cmd->attrs.max_sge = init_attr->attr.max_sge;
	cmd->attrs.srq_limit = init_attr->attr.srq_limit;
	cmd->pdir_dma = srq->pdir.dir_dma;

	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_SRQ_RESP);
	if (ret < 0) {
		dev_warn(&dev->pdev->dev,
			 "could not create shared receive queue, error: %d\n",
			 ret);
		goto err_page_dir;
	}

	srq->srq_handle = resp->srqn;
	srq_resp.srqn = resp->srqn;
	spin_lock_irqsave(&dev->srq_tbl_lock, flags);
	dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq;
	spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);

	/* Copy udata back. */
	if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
		dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
		pvrdma_destroy_srq(&srq->ibsrq);
		return ERR_PTR(-EINVAL);
	}

	return &srq->ibsrq;

err_page_dir:
	pvrdma_page_dir_cleanup(dev, &srq->pdir);
err_umem:
	ib_umem_release(srq->umem);
err_srq:
	kfree(srq);
	atomic_dec(&dev->num_srqs);

	return ERR_PTR(ret);
}
示例#27
0
static int software_resume(void)
{
	int error;
	unsigned int flags;

	/*
	 * If the user said "noresume".. bail out early.
	 */
	if (noresume)
		return 0;

	/*
	 * name_to_dev_t() below takes a sysfs buffer mutex when sysfs
	 * is configured into the kernel. Since the regular hibernate
	 * trigger path is via sysfs which takes a buffer mutex before
	 * calling hibernate functions (which take pm_mutex) this can
	 * cause lockdep to complain about a possible ABBA deadlock
	 * which cannot happen since we're in the boot code here and
	 * sysfs can't be invoked yet. Therefore, we use a subclass
	 * here to avoid lockdep complaining.
	 */
	mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING);

	if (swsusp_resume_device)
		goto Check_image;

	if (!strlen(resume_file)) {
		error = -ENOENT;
		goto Unlock;
	}

	pr_debug("PM: Checking image partition %s\n", resume_file);

	/* Check if the device is there */
	swsusp_resume_device = name_to_dev_t(resume_file);
	if (!swsusp_resume_device) {
		/*
		 * Some device discovery might still be in progress; we need
		 * to wait for this to finish.
		 */
		wait_for_device_probe();
		/*
		 * We can't depend on SCSI devices being available after loading
		 * one of their modules until scsi_complete_async_scans() is
		 * called and the resume device usually is a SCSI one.
		 */
		scsi_complete_async_scans();

		swsusp_resume_device = name_to_dev_t(resume_file);
		if (!swsusp_resume_device) {
			error = -ENODEV;
			goto Unlock;
		}
	}

 Check_image:
	pr_debug("PM: Resume from partition %d:%d\n",
		MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));

	pr_debug("PM: Checking hibernation image.\n");
	error = swsusp_check();
	if (error)
		goto Unlock;

	/* The snapshot device should not be opened while we're running */
	if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
		error = -EBUSY;
		swsusp_close(FMODE_READ);
		goto Unlock;
	}

	pm_prepare_console();
	error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
	if (error)
		goto close_finish;

	error = usermodehelper_disable();
	if (error)
		goto close_finish;

	error = create_basic_memory_bitmaps();
	if (error)
		goto close_finish;

	pr_debug("PM: Preparing processes for restore.\n");
	error = prepare_processes();
	if (error) {
		swsusp_close(FMODE_READ);
		goto Done;
	}

	pr_debug("PM: Reading hibernation image.\n");

	error = swsusp_read(&flags);
	swsusp_close(FMODE_READ);
	if (!error)
		hibernation_restore(flags & SF_PLATFORM_MODE);

	printk(KERN_ERR "PM: Restore failed, recovering.\n");
	swsusp_free();
	thaw_processes();
 Done:
	free_basic_memory_bitmaps();
	usermodehelper_enable();
 Finish:
	pm_notifier_call_chain(PM_POST_RESTORE);
	pm_restore_console();
	atomic_inc(&snapshot_device_available);
	/* For success case, the suspend path will release the lock */
 Unlock:
	mutex_unlock(&pm_mutex);
	pr_debug("PM: Resume from disk failed.\n");
	return error;
close_finish:
	swsusp_close(FMODE_READ);
	goto Finish;
}
示例#28
0
int hibernate(void)
{
	int error;

	mutex_lock(&pm_mutex);
	/* The snapshot device should not be opened while we're running */
	if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
		error = -EBUSY;
		goto Unlock;
	}

	pm_prepare_console();
	error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
	if (error)
		goto Exit;

	error = usermodehelper_disable();
	if (error)
		goto Exit;

	/* Allocate memory management structures */
	error = create_basic_memory_bitmaps();
	if (error)
		goto Exit;

	printk(KERN_INFO "PM: Syncing filesystems ... ");
	sys_sync();
	printk("done.\n");

	error = prepare_processes();
	if (error)
		goto Finish;

	if (hibernation_test(TEST_FREEZER))
		goto Thaw;

	if (hibernation_testmode(HIBERNATION_TESTPROC))
		goto Thaw;

	error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
	if (error)
		goto Thaw;

	if (in_suspend) {
		unsigned int flags = 0;

		if (hibernation_mode == HIBERNATION_PLATFORM)
			flags |= SF_PLATFORM_MODE;
		pr_debug("PM: writing image.\n");
		error = swsusp_write(flags);
		swsusp_free();
		if (!error)
			power_down();
	} else {
		pr_debug("PM: Image restored successfully.\n");
	}

 Thaw:
	thaw_processes();
 Finish:
	free_basic_memory_bitmaps();
	usermodehelper_enable();
 Exit:
	pm_notifier_call_chain(PM_POST_HIBERNATION);
	pm_restore_console();
	atomic_inc(&snapshot_device_available);
 Unlock:
	mutex_unlock(&pm_mutex);
	return error;
}
示例#29
0
文件: disk.c 项目: maraz/linux-2.6
static int software_resume(void)
{
	int error;
	unsigned int flags;

	/*
	 * name_to_dev_t() below takes a sysfs buffer mutex when sysfs
	 * is configured into the kernel. Since the regular hibernate
	 * trigger path is via sysfs which takes a buffer mutex before
	 * calling hibernate functions (which take pm_mutex) this can
	 * cause lockdep to complain about a possible ABBA deadlock
	 * which cannot happen since we're in the boot code here and
	 * sysfs can't be invoked yet. Therefore, we use a subclass
	 * here to avoid lockdep complaining.
	 */
	mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING);
	if (!swsusp_resume_device) {
		if (!strlen(resume_file)) {
			mutex_unlock(&pm_mutex);
			return -ENOENT;
		}
		swsusp_resume_device = name_to_dev_t(resume_file);
		pr_debug("PM: Resume from partition %s\n", resume_file);
	} else {
		pr_debug("PM: Resume from partition %d:%d\n",
				MAJOR(swsusp_resume_device),
				MINOR(swsusp_resume_device));
	}

	if (noresume) {
		/**
		 * FIXME: If noresume is specified, we need to find the
		 * partition and reset it back to normal swap space.
		 */
		mutex_unlock(&pm_mutex);
		return 0;
	}

	pr_debug("PM: Checking hibernation image.\n");
	error = swsusp_check();
	if (error)
		goto Unlock;

	/* The snapshot device should not be opened while we're running */
	if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
		error = -EBUSY;
		goto Unlock;
	}

	pm_prepare_console();
	error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
	if (error)
		goto Finish;

	error = create_basic_memory_bitmaps();
	if (error)
		goto Finish;

	pr_debug("PM: Preparing processes for restore.\n");
	error = prepare_processes();
	if (error) {
		swsusp_close();
		goto Done;
	}

	pr_debug("PM: Reading hibernation image.\n");

	error = swsusp_read(&flags);
	if (!error)
		hibernation_restore(flags & SF_PLATFORM_MODE);

	printk(KERN_ERR "PM: Restore failed, recovering.\n");
	swsusp_free();
	thaw_processes();
 Done:
	free_basic_memory_bitmaps();
 Finish:
	pm_notifier_call_chain(PM_POST_RESTORE);
	pm_restore_console();
	atomic_inc(&snapshot_device_available);
	/* For success case, the suspend path will release the lock */
 Unlock:
	mutex_unlock(&pm_mutex);
	pr_debug("PM: Resume from disk failed.\n");
	return error;
}
示例#30
0
/**
 * hibernate - Carry out system hibernation, including saving the image.
 */
int hibernate(void)
{
	int error;

	lock_system_sleep();
	/* The snapshot device should not be opened while we're running */
	if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
		error = -EBUSY;
		goto Unlock;
	}

	pm_prepare_console();
	error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
	if (error)
		goto Exit;

	/* Allocate memory management structures */
	error = create_basic_memory_bitmaps();
	if (error)
		goto Exit;

	printk(KERN_INFO "PM: Syncing filesystems ... ");
	sys_sync();
	printk("done.\n");

	error = freeze_processes();
	if (error)
		goto Free_bitmaps;

	error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
	if (error)
		goto Thaw;
	if (freezer_test_done) {
		freezer_test_done = false;
		goto Thaw;
	}

	if (in_suspend) {
		unsigned int flags = 0;

		if (hibernation_mode == HIBERNATION_PLATFORM)
			flags |= SF_PLATFORM_MODE;
		if (nocompress)
			flags |= SF_NOCOMPRESS_MODE;
		else
		        flags |= SF_CRC32_MODE;

		pr_debug("PM: writing image.\n");
		error = swsusp_write(flags);
		swsusp_free();
		if (!error)
			power_down();
		in_suspend = 0;
		pm_restore_gfp_mask();
	} else {
		pr_debug("PM: Image restored successfully.\n");
	}

 Thaw:
	thaw_processes();
 Free_bitmaps:
	free_basic_memory_bitmaps();
 Exit:
	pm_notifier_call_chain(PM_POST_HIBERNATION);
	pm_restore_console();
	atomic_inc(&snapshot_device_available);
 Unlock:
	unlock_system_sleep();
	return error;
}