Esempio n. 1
0
static int powernv_flash_async_op(struct mtd_info *mtd, enum flash_op op,
		loff_t offset, size_t len, size_t *retlen, u_char *buf)
{
	struct powernv_flash *info = (struct powernv_flash *)mtd->priv;
	struct device *dev = &mtd->dev;
	int token;
	struct opal_msg msg;
	int rc;

	dev_dbg(dev, "%s(op=%d, offset=0x%llx, len=%zu)\n",
			__func__, op, offset, len);

	token = opal_async_get_token_interruptible();
	if (token < 0) {
		if (token != -ERESTARTSYS)
			dev_err(dev, "Failed to get an async token\n");

		return token;
	}

	switch (op) {
	case FLASH_OP_READ:
		rc = opal_flash_read(info->id, offset, __pa(buf), len, token);
		break;
	case FLASH_OP_WRITE:
		rc = opal_flash_write(info->id, offset, __pa(buf), len, token);
		break;
	case FLASH_OP_ERASE:
		rc = opal_flash_erase(info->id, offset, len, token);
		break;
	default:
		BUG_ON(1);
	}

	if (rc != OPAL_ASYNC_COMPLETION) {
		dev_err(dev, "opal_flash_async_op(op=%d) failed (rc %d)\n",
				op, rc);
		opal_async_release_token(token);
		return -EIO;
	}

	rc = opal_async_wait_response(token, &msg);
	opal_async_release_token(token);
	if (rc) {
		dev_err(dev, "opal async wait failed (rc %d)\n", rc);
		return -EIO;
	}

	rc = be64_to_cpu(msg.params[1]);
	if (rc == OPAL_SUCCESS) {
		rc = 0;
		if (retlen)
			*retlen = len;
	} else {
		rc = -EIO;
	}

	return rc;
}
Esempio n. 2
0
/*
 * Don't return -ERESTARTSYS if we can't get a token, the MTD core
 * might have split up the call from userspace and called into the
 * driver more than once, we'll already have done some amount of work.
 */
static int powernv_flash_async_op(struct mtd_info *mtd, enum flash_op op,
		loff_t offset, size_t len, size_t *retlen, u_char *buf)
{
	struct powernv_flash *info = (struct powernv_flash *)mtd->priv;
	struct device *dev = &mtd->dev;
	int token;
	struct opal_msg msg;
	int rc;

	dev_dbg(dev, "%s(op=%d, offset=0x%llx, len=%zu)\n",
			__func__, op, offset, len);

	token = opal_async_get_token_interruptible();
	if (token < 0) {
		if (token != -ERESTARTSYS)
			dev_err(dev, "Failed to get an async token\n");
		else
			token = -EINTR;
		return token;
	}

	switch (op) {
	case FLASH_OP_READ:
		rc = opal_flash_read(info->id, offset, __pa(buf), len, token);
		break;
	case FLASH_OP_WRITE:
		rc = opal_flash_write(info->id, offset, __pa(buf), len, token);
		break;
	case FLASH_OP_ERASE:
		rc = opal_flash_erase(info->id, offset, len, token);
		break;
	default:
		WARN_ON_ONCE(1);
		opal_async_release_token(token);
		return -EIO;
	}

	if (rc == OPAL_ASYNC_COMPLETION) {
		rc = opal_async_wait_response_interruptible(token, &msg);
		if (rc) {
			/*
			 * If we return the mtd core will free the
			 * buffer we've just passed to OPAL but OPAL
			 * will continue to read or write from that
			 * memory.
			 * It may be tempting to ultimately return 0
			 * if we're doing a read or a write since we
			 * are going to end up waiting until OPAL is
			 * done. However, because the MTD core sends
			 * us the userspace request in chunks, we need
			 * it to know we've been interrupted.
			 */
			rc = -EINTR;
			if (opal_async_wait_response(token, &msg))
				dev_err(dev, "opal_async_wait_response() failed\n");
			goto out;
		}
		rc = opal_get_async_rc(msg);
	}

	/*
	 * OPAL does mutual exclusion on the flash, it will return
	 * OPAL_BUSY.
	 * During firmware updates by the service processor OPAL may
	 * be (temporarily) prevented from accessing the flash, in
	 * this case OPAL will also return OPAL_BUSY.
	 * Both cases aren't errors exactly but the flash could have
	 * changed, userspace should be informed.
	 */
	if (rc != OPAL_SUCCESS && rc != OPAL_BUSY)
		dev_err(dev, "opal_flash_async_op(op=%d) failed (rc %d)\n",
				op, rc);

	if (rc == OPAL_SUCCESS && retlen)
		*retlen = len;

	rc = opal_error_code(rc);
out:
	opal_async_release_token(token);
	return rc;
}