示例#1
0
文件: dir.c 项目: AK101111/linux
static struct dentry *ovl_clear_empty(struct dentry *dentry,
				      struct list_head *list)
{
	struct dentry *workdir = ovl_workdir(dentry);
	struct inode *wdir = workdir->d_inode;
	struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
	struct inode *udir = upperdir->d_inode;
	struct path upperpath;
	struct dentry *upper;
	struct dentry *opaquedir;
	struct kstat stat;
	int err;

	if (WARN_ON(!workdir))
		return ERR_PTR(-EROFS);

	err = ovl_lock_rename_workdir(workdir, upperdir);
	if (err)
		goto out;

	ovl_path_upper(dentry, &upperpath);
	err = vfs_getattr(&upperpath, &stat);
	if (err)
		goto out_unlock;

	err = -ESTALE;
	if (!S_ISDIR(stat.mode))
		goto out_unlock;
	upper = upperpath.dentry;
	if (upper->d_parent->d_inode != udir)
		goto out_unlock;

	opaquedir = ovl_lookup_temp(workdir, dentry);
	err = PTR_ERR(opaquedir);
	if (IS_ERR(opaquedir))
		goto out_unlock;

	err = ovl_create_real(wdir, opaquedir, &stat, NULL, NULL, true);
	if (err)
		goto out_dput;

	err = ovl_copy_xattr(upper, opaquedir);
	if (err)
		goto out_cleanup;

	err = ovl_set_opaque(opaquedir);
	if (err)
		goto out_cleanup;

	inode_lock(opaquedir->d_inode);
	err = ovl_set_attr(opaquedir, &stat);
	inode_unlock(opaquedir->d_inode);
	if (err)
		goto out_cleanup;

	err = ovl_do_rename(wdir, opaquedir, udir, upper, RENAME_EXCHANGE);
	if (err)
		goto out_cleanup;

	ovl_cleanup_whiteouts(upper, list);
	ovl_cleanup(wdir, upper);
	unlock_rename(workdir, upperdir);

	/* dentry's upper doesn't match now, get rid of it */
	d_drop(dentry);

	return opaquedir;

out_cleanup:
	ovl_cleanup(wdir, opaquedir);
out_dput:
	dput(opaquedir);
out_unlock:
	unlock_rename(workdir, upperdir);
out:
	return ERR_PTR(err);
}
示例#2
0
/*
 * Starting point for SD card init.
 */
int mmc_attach_sd(struct mmc_host *host)
{
	int err;
	u32 ocr;
#ifdef CONFIG_MMC_PARANOID_SD_INIT
	int retries;
#endif

	BUG_ON(!host);
	WARN_ON(!host->claimed);

	err = mmc_send_app_op_cond(host, 0, &ocr);
	if (err)
		return err;

	mmc_sd_attach_bus_ops(host);
	if (host->ocr_avail_sd)
		host->ocr_avail = host->ocr_avail_sd;

	/*
	 * We need to get OCR a different way for SPI.
	 */
	if (mmc_host_is_spi(host)) {
		mmc_go_idle(host);

		err = mmc_spi_read_ocr(host, 0, &ocr);
		if (err)
			goto err;
	}

	/*
	 * Sanity check the voltages that the card claims to
	 * support.
	 */
	if (ocr & 0x7F) {
		printk(KERN_WARNING "%s: card claims to support voltages "
		       "below the defined range. These will be ignored.\n",
		       mmc_hostname(host));
		ocr &= ~0x7F;
	}

	if ((ocr & MMC_VDD_165_195) &&
	    !(host->ocr_avail_sd & MMC_VDD_165_195)) {
		printk(KERN_WARNING "%s: SD card claims to support the "
		       "incompletely defined 'low voltage range'. This "
		       "will be ignored.\n", mmc_hostname(host));
		ocr &= ~MMC_VDD_165_195;
	}

	host->ocr = mmc_select_voltage(host, ocr);

	/*
	 * Can we support the voltage(s) of the card(s)?
	 */
	if (!host->ocr) {
		err = -EINVAL;
		goto err;
	}

	/*
	 * Detect and init the card.
	 */
#ifdef CONFIG_MMC_PARANOID_SD_INIT
	retries = 5;
	while (retries) {
		err = mmc_sd_init_card(host, host->ocr, NULL);
		if (err) {
			retries--;
			continue;
		}
		break;
	}

	if (!retries) {
		printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n",
		       mmc_hostname(host), err);
		goto err;
	}
#else
	err = mmc_sd_init_card(host, host->ocr, NULL);
	if (err)
		goto err;
#endif

	mmc_release_host(host);
	err = mmc_add_card(host->card);
	mmc_claim_host(host);
	if (err)
		goto remove_card;

	return 0;

remove_card:
	mmc_release_host(host);
	mmc_remove_card(host->card);
	host->card = NULL;
	mmc_claim_host(host);
err:
	mmc_detach_bus(host);

	printk(KERN_ERR "%s: error %d whilst initialising SD card\n",
		mmc_hostname(host), err);

	return err;
}
示例#3
0
int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
				  u16 timeout)
{
	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	struct ieee80211_local *local = sdata->local;
	struct tid_ampdu_tx *tid_tx;
	int ret = 0;

	trace_api_start_tx_ba_session(pubsta, tid);

	if (WARN_ON(!local->ops->ampdu_action))
		return -EINVAL;

	if ((tid >= STA_TID_NUM) ||
	    !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) ||
	    (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
		return -EINVAL;

#ifdef CONFIG_MAC80211_HT_DEBUG
	printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n",
	       pubsta->addr, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */

	if (sdata->vif.type != NL80211_IFTYPE_STATION &&
	    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
	    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
	    sdata->vif.type != NL80211_IFTYPE_AP &&
	    sdata->vif.type != NL80211_IFTYPE_ADHOC)
		return -EINVAL;

	if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
		printk(KERN_DEBUG "BA sessions blocked. "
		       "Denying BA session request\n");
#endif
		return -EINVAL;
	}

	/*
	 * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a
	 * member of an IBSS, and has no other existing Block Ack agreement
	 * with the recipient STA, then the initiating STA shall transmit a
	 * Probe Request frame to the recipient STA and shall not transmit an
	 * ADDBA Request frame unless it receives a Probe Response frame
	 * from the recipient within dot11ADDBAFailureTimeout.
	 *
	 * The probe request mechanism for ADDBA is currently not implemented,
	 * but we only build up Block Ack session with HT STAs. This information
	 * is set when we receive a bss info from a probe response or a beacon.
	 */
	if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
	    !sta->sta.ht_cap.ht_supported) {
#ifdef CONFIG_MAC80211_HT_DEBUG
		printk(KERN_DEBUG "BA request denied - IBSS STA %pM"
		       "does not advertise HT support\n", pubsta->addr);
#endif /* CONFIG_MAC80211_HT_DEBUG */
		return -EINVAL;
	}

	spin_lock_bh(&sta->lock);

	/* we have tried too many times, receiver does not want A-MPDU */
	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
		ret = -EBUSY;
		goto err_unlock_sta;
	}

	/*
	 * if we have tried more than HT_AGG_BURST_RETRIES times we
	 * will spread our requests in time to avoid stalling connection
	 * for too long
	 */
	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES &&
	    time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
			HT_AGG_RETRIES_PERIOD)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
		printk(KERN_DEBUG "BA request denied - "
		       "waiting a grace period after %d failed requests "
		       "on tid %u\n",
		       sta->ampdu_mlme.addba_req_num[tid], tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
		ret = -EBUSY;
		goto err_unlock_sta;
	}

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
	/* check if the TID is not in aggregation flow already */
	if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
#ifdef CONFIG_MAC80211_HT_DEBUG
		printk(KERN_DEBUG "BA request denied - session is not "
				 "idle on tid %u\n", tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
		ret = -EAGAIN;
		goto err_unlock_sta;
	}

	/* prepare A-MPDU MLME for Tx aggregation */
	tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
	if (!tid_tx) {
		ret = -ENOMEM;
		goto err_unlock_sta;
	}

	skb_queue_head_init(&tid_tx->pending);
	__set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);

	tid_tx->timeout = timeout;

	/* response timer */
	tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
	tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
	init_timer(&tid_tx->addba_resp_timer);

	/* tx timer */
	tid_tx->session_timer.function = sta_tx_agg_session_timer_expired;
	tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
	init_timer(&tid_tx->session_timer);

	/* assign a dialog token */
	sta->ampdu_mlme.dialog_token_allocator++;
	tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;

	/*
	 * Finally, assign it to the start array; the work item will
	 * collect it and move it to the normal array.
	 */
	sta->ampdu_mlme.tid_start_tx[tid] = tid_tx;

	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);

	/* this flow continues off the work */
 err_unlock_sta:
	spin_unlock_bh(&sta->lock);
	return ret;
}
static int idescsi_eh_reset (struct scsi_cmnd *cmd)
{
	struct request *req;
	idescsi_scsi_t *scsi  = scsihost_to_idescsi(cmd->device->host);
	ide_drive_t    *drive = scsi->drive;
	int             ready = 0;
	int             ret   = SUCCESS;

	/* In idescsi_eh_reset we forcefully remove the command from the ide subsystem and reset the device. */

	if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
		printk (KERN_WARNING "ide-scsi: reset called for %lu\n", cmd->serial_number);

	if (!drive) {
		printk (KERN_WARNING "ide-scsi: Drive not set in idescsi_eh_reset\n");
		WARN_ON(1);
		return FAILED;
	}

	spin_lock_irq(cmd->device->host->host_lock);
	spin_lock(&ide_lock);

	if (!scsi->pc || (req = scsi->pc->rq) != HWGROUP(drive)->rq || !HWGROUP(drive)->handler) {
		printk (KERN_WARNING "ide-scsi: No active request in idescsi_eh_reset\n");
		spin_unlock(&ide_lock);
		spin_unlock_irq(cmd->device->host->host_lock);
		return FAILED;
	}

	/* kill current request */
	if (__blk_end_request(req, -EIO, 0))
		BUG();
	if (blk_sense_request(req))
		kfree(scsi->pc->buf);
	kfree(scsi->pc);
	scsi->pc = NULL;
	blk_put_request(req);

	/* now nuke the drive queue */
	while ((req = elv_next_request(drive->queue))) {
		if (__blk_end_request(req, -EIO, 0))
			BUG();
	}

	HWGROUP(drive)->rq = NULL;
	HWGROUP(drive)->handler = NULL;
	HWGROUP(drive)->busy = 1;		/* will set this to zero when ide reset finished */
	spin_unlock(&ide_lock);

	ide_do_reset(drive);

	/* ide_do_reset starts a polling handler which restarts itself every 50ms until the reset finishes */

	do {
		spin_unlock_irq(cmd->device->host->host_lock);
		msleep(50);
		spin_lock_irq(cmd->device->host->host_lock);
	} while ( HWGROUP(drive)->handler );

	ready = drive_is_ready(drive);
	HWGROUP(drive)->busy--;
	if (!ready) {
		printk (KERN_ERR "ide-scsi: reset failed!\n");
		ret = FAILED;
	}

	spin_unlock_irq(cmd->device->host->host_lock);
	return ret;
}
static int cardhu_panel_enable(void)
{
	int ret;
	printk("Check cardhu_panel_enable \n");

	if (tegra3_get_project_id()==0x4){
		if (gpio_get_value(TEGRA_GPIO_PI6)==0){	//Panel is Panasonic
			printk("Check panel is panasonic \n");
			if (cardhu_lvds_vdd_bl == NULL) {
				cardhu_lvds_vdd_bl = regulator_get(NULL, "vdd_backlight");
				if (WARN_ON(IS_ERR(cardhu_lvds_vdd_bl)))
					pr_err("%s: couldn't get regulator vdd_backlight: %ld\n",
					__func__, PTR_ERR(cardhu_lvds_vdd_bl));
				else
					regulator_enable(cardhu_lvds_vdd_bl);
			}

			ret = gpio_direction_output(TEGRA_GPIO_PU5, 1);
			if (ret < 0) {
				printk("Check can not pull high TEGRA_GPIO_PU5 \n");
				gpio_free(TEGRA_GPIO_PU5);
				return ret;
			}
		}
		else{								//Panel is hydis
			printk("Check panel is hydis \n");
			gpio_set_value(TEGRA_GPIO_PH3, 0);
			ret = gpio_direction_output(TEGRA_GPIO_PU5, 0);
			if (ret < 0) {
				printk("Check can not pull low TEGRA_GPIO_PU5 \n");
				gpio_free(TEGRA_GPIO_PU5);
				return ret;
			}
		}
		mdelay(5);
	}

	if (tegra3_get_project_id()==0x4)
	{
		if (cardhu_lvds_reg == NULL) {
			cardhu_lvds_reg = regulator_get(NULL, "vdd_lvds");
			if (WARN_ON(IS_ERR(cardhu_lvds_reg)))
				pr_err("%s: couldn't get regulator vdd_lvds: %ld\n",
					__func__, PTR_ERR(cardhu_lvds_reg));
			else
				regulator_enable(cardhu_lvds_reg);
		}
	}

	if (cardhu_lvds_vdd_panel == NULL) {
		cardhu_lvds_vdd_panel = regulator_get(NULL, "vdd_lcd_panel");
		if (WARN_ON(IS_ERR(cardhu_lvds_vdd_panel)))
			pr_err("%s: couldn't get regulator vdd_lcd_panel: %ld\n",
				__func__, PTR_ERR(cardhu_lvds_vdd_panel));
		else
			regulator_enable(cardhu_lvds_vdd_panel);
	}
	msleep(20);

	if (tegra3_get_project_id()==0x4){
			printk("Check power on/off for bridge IC \n");
			ret = gpio_direction_output(TEGRA_GPIO_PBB3, 1);
			if (ret < 0) {
					printk("Check can not pull high TEGRA_GPIO_PBB3 \n");
					gpio_free(TEGRA_GPIO_PBB3);
					return ret;
			}

			ret = gpio_direction_output(TEGRA_GPIO_PC6, 1);
			if (ret < 0) {
					printk("Check can not pull high TF700T_1.8V(TEGRA_GPIO_PC6) \n");
					gpio_free(TEGRA_GPIO_PC6);
					return ret;
			}

			mdelay(10);

			ret = gpio_direction_output(TEGRA_GPIO_PX0, 1);
			if (ret < 0) {
					printk("Check can not pull high TF700T_I2C_Switch(TEGRA_GPIO_PX0) \n");
					gpio_free(TEGRA_GPIO_PX0);
					return ret;
			}
			mdelay(10);

	}
	if(tegra3_get_project_id()==0x4)
	{
		if (display_board_info.board_id == BOARD_DISPLAY_PM313) {
			/* lvds configuration */
			gpio_set_value(pm313_R_FDE, 1);
			gpio_set_value(pm313_R_FB, 1);
			gpio_set_value(pm313_MODE0, 1);
			gpio_set_value(pm313_MODE1, 0);
			gpio_set_value(pm313_BPP, PM313_LVDS_PANEL_BPP);

			/* FIXME : it may require more or less delay for latching
			  values correctly before enabling RGB2LVDS */
			mdelay(100);
			gpio_set_value(pm313_lvds_shutdown, 1);
		} else {
			gpio_set_value(e1247_pm269_lvds_shutdown, 1);
		}

		ret = gpio_direction_output(TEGRA_GPIO_PD2, 1);
		if (ret < 0) {
			printk("Check can not pull high TF700T_OSC(TEGRA_GPIO_PD2) \n");
			gpio_free(TEGRA_GPIO_PD2);
			return ret;
		}
		msleep(10);
	}
	return 0;
}
示例#6
0
static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
		 int signr, sigset_t *set, unsigned long handler,
		 int ctx_has_vsx_region)
{
	/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
	 * process never used altivec yet (MSR_VEC is zero in pt_regs of
	 * the context). This is very important because we must ensure we
	 * don't lose the VRSAVE content that may have been set prior to
	 * the process doing its first vector operation
	 * Userland shall check AT_HWCAP to know whether it can rely on the
	 * v_regs pointer or not
	 */
#ifdef CONFIG_ALTIVEC
	elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *)(((unsigned long)sc->vmx_reserve + 15) & ~0xful);
#endif
	unsigned long msr = regs->msr;
	long err = 0;

#ifdef CONFIG_ALTIVEC
	err |= __put_user(v_regs, &sc->v_regs);

	/* save altivec registers */
	if (current->thread.used_vr) {
		flush_altivec_to_thread(current);
		/* Copy 33 vec registers (vr0..31 and vscr) to the stack */
		err |= __copy_to_user(v_regs, &current->thread.vr_state,
				      33 * sizeof(vector128));
		/* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
		 * contains valid data.
		 */
		msr |= MSR_VEC;
	}
	/* We always copy to/from vrsave, it's 0 if we don't have or don't
	 * use altivec.
	 */
	if (cpu_has_feature(CPU_FTR_ALTIVEC))
		current->thread.vrsave = mfspr(SPRN_VRSAVE);
	err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
#else /* CONFIG_ALTIVEC */
	err |= __put_user(0, &sc->v_regs);
#endif /* CONFIG_ALTIVEC */
	flush_fp_to_thread(current);
	/* copy fpr regs and fpscr */
	err |= copy_fpr_to_user(&sc->fp_regs, current);

	/*
	 * Clear the MSR VSX bit to indicate there is no valid state attached
	 * to this context, except in the specific case below where we set it.
	 */
	msr &= ~MSR_VSX;
#ifdef CONFIG_VSX
	/*
	 * Copy VSX low doubleword to local buffer for formatting,
	 * then out to userspace.  Update v_regs to point after the
	 * VMX data.
	 */
	if (current->thread.used_vsr && ctx_has_vsx_region) {
		__giveup_vsx(current);
		v_regs += ELF_NVRREG;
		err |= copy_vsx_to_user(v_regs, current);
		/* set MSR_VSX in the MSR value in the frame to
		 * indicate that sc->vs_reg) contains valid data.
		 */
		msr |= MSR_VSX;
	}
#endif /* CONFIG_VSX */
	err |= __put_user(&sc->gp_regs, &sc->regs);
	WARN_ON(!FULL_REGS(regs));
	err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE);
	err |= __put_user(msr, &sc->gp_regs[PT_MSR]);
	err |= __put_user(signr, &sc->signal);
	err |= __put_user(handler, &sc->handler);
	if (set != NULL)
		err |=  __put_user(set->sig[0], &sc->oldmask);

	return err;
}
示例#7
0
/*
 * Handle the detection and initialisation of a card.
 *
 * In the case of a resume, "oldcard" will contain the card
 * we're trying to reinitialise.
 */
static int mmc_init_card(struct mmc_host *host, u32 ocr,
	struct mmc_card *oldcard)
{
	struct mmc_card *card;
	int err;
	u32 cid[4];
	unsigned int max_dtr;
	u32 rocr;

	BUG_ON(!host);
	WARN_ON(!host->claimed);

	/*
	 * Since we're changing the OCR value, we seem to
	 * need to tell some cards to go back to the idle
	 * state.  We wait 1ms to give cards time to
	 * respond.
	 */
	mmc_go_idle(host);

	/* The extra bit indicates that we support high capacity */
	err = mmc_send_op_cond(host, ocr | MMC_CARD_SECTOR_ADDR, &rocr);
	if (err)
		goto err;

	/*
	 * For SPI, enable CRC as appropriate.
	 */
	if (mmc_host_is_spi(host)) {
		err = mmc_spi_set_crc(host, use_spi_crc);
		if (err)
			goto err;
	}

	/*
	 * Fetch CID from card.
	 */
	if (mmc_host_is_spi(host))
		err = mmc_send_cid(host, cid);
	else
		err = mmc_all_send_cid(host, cid);
	if (err)
		goto err;

	if (oldcard) {
		if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
			err = -ENOENT;
			goto err;
		}

		card = oldcard;
	} else {
		/*
		 * Allocate card structure.
		 */
		card = mmc_alloc_card(host, &mmc_type);
		if (IS_ERR(card)) {
			err = PTR_ERR(card);
			goto err;
		}

		card->type = MMC_TYPE_MMC;
		card->rca = 1;
		memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
	}

	/*
	 * For native busses:  set card RCA and quit open drain mode.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_set_relative_addr(card);
		if (err)
			goto free_card;

		mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
	}

	if (!oldcard) {
		/*
		 * Fetch CSD from card.
		 */
		err = mmc_send_csd(card, card->raw_csd);
		if (err)
			goto free_card;

		err = mmc_decode_csd(card);
		if (err)
			goto free_card;
		err = mmc_decode_cid(card);
		if (err)
			goto free_card;
	}

	/*
	 * Select card, as all following commands rely on that.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_select_card(card);
		if (err)
			goto free_card;
	}

	if (!oldcard) {
		/*
		 * Fetch and process extended CSD.
		 */
		err = mmc_read_ext_csd(card);
		if (err)
			goto free_card;

		if (card->ext_csd.sectors && (rocr & MMC_CARD_SECTOR_ADDR))
			mmc_card_set_blockaddr(card);
	}

	/*
	 * Activate high speed (if supported)
	 */
	if ((card->ext_csd.hs_max_dtr != 0) &&
		(host->caps & MMC_CAP_MMC_HIGHSPEED)) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
			EXT_CSD_HS_TIMING, 1);
		if (err && err != -EBADMSG)
			goto free_card;

		if (err) {
			printk(KERN_WARNING "%s: switch to highspeed failed\n",
			       mmc_hostname(card->host));
			err = 0;
		} else {
			mmc_card_set_highspeed(card);
			mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
		}
	}

	/*
	 * Compute bus speed.
	 */
	max_dtr = (unsigned int)-1;

	if (mmc_card_highspeed(card)) {
		if (max_dtr > card->ext_csd.hs_max_dtr)
			max_dtr = card->ext_csd.hs_max_dtr;
	} else if (max_dtr > card->csd.max_dtr) {
		max_dtr = card->csd.max_dtr;
	}

	mmc_set_clock(host, max_dtr);

	/*
	 * Activate wide bus (if supported).
	 */
	if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
	    (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
		unsigned ext_csd_bit, bus_width;

		if ((host->caps & MMC_CAP_8_BIT_DATA) &&
				!(mmc_bustest(host, card, MMC_BUS_WIDTH_8))) {
			pr_debug("Setting the bus width to 8 bit\n");
			ext_csd_bit = EXT_CSD_BUS_WIDTH_8;
			bus_width = MMC_BUS_WIDTH_8;
		} else if (!(mmc_bustest(host, card, MMC_BUS_WIDTH_4))) {
			pr_debug("Setting the bus width to 4 bit\n");
			ext_csd_bit = EXT_CSD_BUS_WIDTH_4;
			bus_width = MMC_BUS_WIDTH_4;
		} else {
			pr_debug("Setting the bus width to 1 bit\n");
			ext_csd_bit = EXT_CSD_BUS_WIDTH_1;
			bus_width = MMC_BUS_WIDTH_1;
		}
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
				 EXT_CSD_BUS_WIDTH, ext_csd_bit);

		if (err && err != -EBADMSG)
			goto free_card;

		if (err) {
			printk(KERN_WARNING "%s: switch to bus width %d "
			       "failed\n", mmc_hostname(card->host),
			       1 << bus_width);
			err = 0;
		} else {
			mmc_set_bus_width(card->host, bus_width);
		}
	}

	if (!oldcard)
		host->card = card;

	return 0;

free_card:
	if (!oldcard)
		mmc_remove_card(card);
err:

	return err;
}
/**
 * mei_ioctl_connect_client - the connect to fw client IOCTL function
 *
 * @dev: the device structure
 * @data: IOCTL connect data, input and output parameters
 * @file: private data of the file object
 *
 * Locking: called under "dev->device_lock" lock
 *
 * returns 0 on success, <0 on failure.
 */
static int mei_ioctl_connect_client(struct file *file,
			struct mei_connect_client_data *data)
{
	struct mei_device *dev;
	struct mei_client *client;
	struct mei_cl *cl;
	int i;
	int rets;

	cl = file->private_data;
	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;

	if (dev->dev_state != MEI_DEV_ENABLED) {
		rets = -ENODEV;
		goto end;
	}

	if (cl->state != MEI_FILE_INITIALIZING &&
	    cl->state != MEI_FILE_DISCONNECTED) {
		rets = -EBUSY;
		goto end;
	}

	/* find ME client we're trying to connect to */
	i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
	if (i < 0 || dev->me_clients[i].props.fixed_address) {
		dev_dbg(&dev->pdev->dev, "Cannot connect to FW Client UUID = %pUl\n",
				&data->in_client_uuid);
		rets = -ENODEV;
		goto end;
	}

	cl->me_client_id = dev->me_clients[i].client_id;
	cl->state = MEI_FILE_CONNECTING;

	dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
			cl->me_client_id);
	dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
			dev->me_clients[i].props.protocol_version);
	dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
			dev->me_clients[i].props.max_msg_length);

	/* if we're connecting to amthif client then we will use the
	 * existing connection
	 */
	if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) {
		dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
		if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
			rets = -ENODEV;
			goto end;
		}
		mei_cl_unlink(cl);

		kfree(cl);
		cl = NULL;
		dev->iamthif_open_count++;
		file->private_data = &dev->iamthif_cl;

		client = &data->out_client_properties;
		client->max_msg_length =
			dev->me_clients[i].props.max_msg_length;
		client->protocol_version =
			dev->me_clients[i].props.protocol_version;
		rets = dev->iamthif_cl.status;

		goto end;
	}


	/* prepare the output buffer */
	client = &data->out_client_properties;
	client->max_msg_length = dev->me_clients[i].props.max_msg_length;
	client->protocol_version = dev->me_clients[i].props.protocol_version;
	dev_dbg(&dev->pdev->dev, "Can connect?\n");


	rets = mei_cl_connect(cl, file);

end:
	return rets;
}
/**
 * mei_ioctl - the IOCTL function
 *
 * @file: pointer to file structure
 * @cmd: ioctl command
 * @data: pointer to mei message structure
 *
 * returns 0 on success , <0 on error
 */
static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
{
	struct mei_device *dev;
	struct mei_cl *cl = file->private_data;
	struct mei_connect_client_data *connect_data = NULL;
	int rets;

	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;

	dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd);

	mutex_lock(&dev->device_lock);
	if (dev->dev_state != MEI_DEV_ENABLED) {
		rets = -ENODEV;
		goto out;
	}

	switch (cmd) {
	case IOCTL_MEI_CONNECT_CLIENT:
		dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
		connect_data = kzalloc(sizeof(struct mei_connect_client_data),
			GFP_KERNEL);

		if (!connect_data) {
			rets = -ENOMEM;
			goto out;
		}

		dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
		if (copy_from_user(connect_data, (char __user *)data,
			sizeof(struct mei_connect_client_data))) {

			dev_err(&dev->pdev->dev,
				"failed to copy data from userland\n");
			rets = -EFAULT;
			goto out;
		}

		rets = mei_ioctl_connect_client(file, connect_data);

		/* Error out if this failed */
		if (rets) {
			dev_err(&dev->pdev->dev,
				"failed mei_ioctl_connect_client\n");
			goto out;
		}

		/* if all is ok, copying the data back to user. */
		dev_dbg(&dev->pdev->dev, "copy connect data to user\n");
		if (copy_to_user((char __user *)data, connect_data,
			sizeof(struct mei_connect_client_data))) {

			dev_err(&dev->pdev->dev, "failed to copy data to userland\n");
			rets = -EFAULT;
			goto out;
		}
		break;
	case IOCTL_MEI_SETUP_DMA_BUF:
		dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_SETUP_DMA_BUF\n");
		rets = -EOPNOTSUPP;
		break;
	case IOCTL_MEI_UNSET_DMA_BUF:
		dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_UNSET_DMA_BUF\n");
		rets = -EOPNOTSUPP;
		break;
	default:
		dev_err(&dev->pdev->dev, ": unsupported ioctl %d.\n", cmd);
		rets = -EINVAL;
		break;
	}

out:
	kfree(connect_data);
	mutex_unlock(&dev->device_lock);
	return rets;
}
示例#10
0
/**
 * mei_read - the read function.
 *
 * @file: pointer to file structure
 * @ubuf: pointer to user buffer
 * @length: buffer length
 * @offset: data offset in buffer
 *
 * returns >=0 data length on success , <0 on error
 */
static ssize_t mei_read(struct file *file, char __user *ubuf,
			size_t length, loff_t *offset)
{
	struct mei_cl *cl = file->private_data;
	struct mei_cl_cb *cb_pos = NULL;
	struct mei_cl_cb *cb = NULL;
	struct mei_device *dev;
	int rets;
	int err;


	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;


	mutex_lock(&dev->device_lock);
	if (dev->dev_state != MEI_DEV_ENABLED) {
		rets = -ENODEV;
		goto out;
	}

	if (length == 0) {
		rets = 0;
		goto out;
	}

	if (cl == &dev->iamthif_cl) {
		rets = mei_amthif_read(dev, file, ubuf, length, offset);
		goto out;
	}

	if (cl->read_cb) {
		cb = cl->read_cb;
		/* read what left */
		if (cb->buf_idx > *offset)
			goto copy_buffer;
		/* offset is beyond buf_idx we have no more data return 0 */
		if (cb->buf_idx > 0 && cb->buf_idx <= *offset) {
			rets = 0;
			goto free;
		}
		/* Offset needs to be cleaned for contiguous reads*/
		if (cb->buf_idx == 0 && *offset > 0) {
			dev_dbg(&dev->pdev->dev, "idx = 0 offset = %lld\n", (unsigned long long)*offset);
			*offset = 0;
		}
	} else if (*offset > 0) {
		dev_dbg(&dev->pdev->dev, "offset = %lld\n", (unsigned long long)*offset);
		*offset = 0;
	}

	err = mei_cl_read_start(cl, length);
	if (err && err != -EBUSY) {
		dev_dbg(&dev->pdev->dev,
			"mei start read failure with status = %d\n", err);
		rets = err;
		goto out;
	}

	if (MEI_READ_COMPLETE != cl->reading_state &&
			!waitqueue_active(&cl->rx_wait)) {
		if (file->f_flags & O_NONBLOCK) {
			rets = -EAGAIN;
			goto out;
		}

		mutex_unlock(&dev->device_lock);

		if (wait_event_interruptible(cl->rx_wait,
				MEI_READ_COMPLETE == cl->reading_state ||
				mei_cl_is_transitioning(cl))) {

			if (signal_pending(current))
				return -EINTR;
			return -ERESTARTSYS;
		}

		mutex_lock(&dev->device_lock);
		if (mei_cl_is_transitioning(cl)) {
			rets = -EBUSY;
			goto out;
		}
	}

	cb = cl->read_cb;

	if (!cb) {
		rets = -ENODEV;
		goto out;
	}
	if (cl->reading_state != MEI_READ_COMPLETE) {
		rets = 0;
		goto out;
	}
	/* now copy the data to user space */
copy_buffer:
	dev_dbg(&dev->pdev->dev, "buf.size = %d buf.idx= %ld\n",
	    cb->response_buffer.size, cb->buf_idx);
	if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
		rets = -EMSGSIZE;
		goto free;
	}

	/* length is being truncated to PAGE_SIZE,
	 * however buf_idx may point beyond that */
	length = min_t(size_t, length, cb->buf_idx - *offset);

	if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
		dev_err(&dev->pdev->dev, "failed to copy data to userland\n");
		rets = -EFAULT;
		goto free;
	}

	rets = length;
	*offset += length;
	if ((unsigned long)*offset < cb->buf_idx)
		goto out;

free:
	cb_pos = mei_cl_find_read_cb(cl);
	/* Remove entry from read list */
	if (cb_pos)
		list_del(&cb_pos->list);
	mei_io_cb_free(cb);
	cl->reading_state = MEI_IDLE;
	cl->read_cb = NULL;
out:
	dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
	mutex_unlock(&dev->device_lock);
	return rets;
}
示例#11
0
/**
 * mei_write - the write function.
 *
 * @file: pointer to file structure
 * @ubuf: pointer to user buffer
 * @length: buffer length
 * @offset: data offset in buffer
 *
 * returns >=0 data length on success , <0 on error
 */
static ssize_t mei_write(struct file *file, const char __user *ubuf,
			 size_t length, loff_t *offset)
{
	struct mei_cl *cl = file->private_data;
	struct mei_cl_cb *write_cb = NULL;
	struct mei_device *dev;
	unsigned long timeout = 0;
	int rets;
	int id;

	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;

	mutex_lock(&dev->device_lock);

	if (dev->dev_state != MEI_DEV_ENABLED) {
		rets = -ENODEV;
		goto out;
	}

	id = mei_me_cl_by_id(dev, cl->me_client_id);
	if (id < 0) {
		rets = -ENODEV;
		goto out;
	}

	if (length == 0) {
		rets = 0;
		goto out;
	}

	if (length > dev->me_clients[id].props.max_msg_length) {
		rets = -EFBIG;
		goto out;
	}

	if (cl->state != MEI_FILE_CONNECTED) {
		dev_err(&dev->pdev->dev, "host client = %d,  is not connected to ME client = %d",
			cl->host_client_id, cl->me_client_id);
		rets = -ENODEV;
		goto out;
	}
	if (cl == &dev->iamthif_cl) {
		write_cb = mei_amthif_find_read_list_entry(dev, file);

		if (write_cb) {
			timeout = write_cb->read_time +
				mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);

			if (time_after(jiffies, timeout) ||
			    cl->reading_state == MEI_READ_COMPLETE) {
				*offset = 0;
				list_del(&write_cb->list);
				mei_io_cb_free(write_cb);
				write_cb = NULL;
			}
		}
	}

	/* free entry used in read */
	if (cl->reading_state == MEI_READ_COMPLETE) {
		*offset = 0;
		write_cb = mei_cl_find_read_cb(cl);
		if (write_cb) {
			list_del(&write_cb->list);
			mei_io_cb_free(write_cb);
			write_cb = NULL;
			cl->reading_state = MEI_IDLE;
			cl->read_cb = NULL;
		}
	} else if (cl->reading_state == MEI_IDLE)
		*offset = 0;


	write_cb = mei_io_cb_init(cl, file);
	if (!write_cb) {
		dev_err(&dev->pdev->dev, "write cb allocation failed\n");
		rets = -ENOMEM;
		goto out;
	}
	rets = mei_io_cb_alloc_req_buf(write_cb, length);
	if (rets)
		goto out;

	rets = copy_from_user(write_cb->request_buffer.data, ubuf, length);
	if (rets) {
		dev_err(&dev->pdev->dev, "failed to copy data from userland\n");
		rets = -EFAULT;
		goto out;
	}

	if (cl == &dev->iamthif_cl) {
		rets = mei_amthif_write(dev, write_cb);

		if (rets) {
			dev_err(&dev->pdev->dev,
				"amthif write failed with status = %d\n", rets);
			goto out;
		}
		mutex_unlock(&dev->device_lock);
		return length;
	}

	rets = mei_cl_write(cl, write_cb, false);
out:
	mutex_unlock(&dev->device_lock);
	if (rets < 0)
		mei_io_cb_free(write_cb);
	return rets;
}
static void dormant_restore_addnl_reg(void)
{
	int i;
	u32 reg_val, val1, val2;
	u32 insurance = 0;

	/* Allow write access to the CCU registers */
	writel_relaxed(0xA5A501, (KONA_PROC_CLK_VA +
	       KPROC_CLK_MGR_REG_WR_ACCESS_OFFSET));

	for (i = 0; i < ARRAY_SIZE(addnl_save_reg_list); i++) {
		/* Restore the saved data */
		writel_relaxed(addnl_save_reg_list[i][1],
			       addnl_save_reg_list[i][0]);

		if (addnl_save_reg_list[i][0] == (KONA_PROC_CLK_VA +
		    KPROC_CLK_MGR_REG_TGTMASK_DBG1_OFFSET)) {

			/* Finished restoring all the A9 CCU registers
			 * lock the state machine so writing the GO bit
			 * would not cause issues with the state machine
			 */
			writel_relaxed(readl(KONA_PROC_CLK_VA +
			       KPROC_CLK_MGR_REG_LVM_EN_OFFSET) |
			       KPROC_CLK_MGR_REG_LVM_EN_POLICY_CONFIG_EN_MASK,
			       KONA_PROC_CLK_VA +
			       KPROC_CLK_MGR_REG_LVM_EN_OFFSET);

			/* Wait for HW confirmation of policy lock */
			do {
				udelay(1);
				reg_val = readl(KONA_PROC_CLK_VA +
					KPROC_CLK_MGR_REG_LVM_EN_OFFSET);
				insurance++;
			} while ((reg_val &
			KPROC_CLK_MGR_REG_LVM_EN_POLICY_CONFIG_EN_MASK) &&
			insurance < 10000);
			WARN_ON(insurance >= 10000);

			/* Write the go bit to trigger the frequency change
			 */
			writel_relaxed(KPROC_CLK_MGR_REG_POLICY_CTL_GO_AC_MASK |
			       KPROC_CLK_MGR_REG_POLICY_CTL_GO_MASK,
			       KONA_PROC_CLK_VA +
			       KPROC_CLK_MGR_REG_POLICY_CTL_OFFSET);
		}
	}

	/* Wait until the new frequency takes effect */
	do {
		udelay(1);
		val1 =
		    readl(KONA_PROC_CLK_VA +
			  KPROC_CLK_MGR_REG_POLICY_CTL_OFFSET) &
			  KPROC_CLK_MGR_REG_POLICY_CTL_GO_MASK;
		val2 =
		    readl(KONA_PROC_CLK_VA +
			  KPROC_CLK_MGR_REG_POLICY_CTL_OFFSET) &
			  KPROC_CLK_MGR_REG_POLICY_CTL_GO_MASK;
	} while (val1 | val2);
	/* Lock CCU registers */
	writel_relaxed(0xA5A500, (KONA_PROC_CLK_VA +
	       KPROC_CLK_MGR_REG_WR_ACCESS_OFFSET));

}
示例#13
0
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
			  struct net_device *dev)
{
	struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
	struct sk_buff *fp, *head = fq->q.fragments;
	int    payload_len;
	unsigned int nhoff;

	fq_kill(fq);

	
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);

		if (!fp)
			goto out_oom;

		fp->next = head->next;
		prev->next = fp;

		skb_morph(head, fq->q.fragments);
		head->next = fq->q.fragments->next;

		kfree_skb(fq->q.fragments);
		fq->q.fragments = head;
	}

	WARN_ON(head == NULL);
	WARN_ON(FRAG6_CB(head)->offset != 0);

	
	payload_len = ((head->data - skb_network_header(head)) -
		       sizeof(struct ipv6hdr) + fq->q.len -
		       sizeof(struct frag_hdr));
	if (payload_len > IPV6_MAXPLEN)
		goto out_oversize;

	
	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
		goto out_oom;

	
	if (skb_has_frags(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
			goto out_oom;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
			plen += skb_shinfo(head)->frags[i].size;
		clone->len = clone->data_len = head->data_len - plen;
		head->data_len -= clone->len;
		head->len -= clone->len;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;
		atomic_add(clone->truesize, &fq->q.net->mem);
	}

	
	nhoff = fq->nhoffset;
	skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
	memmove(head->head + sizeof(struct frag_hdr), head->head,
		(head->data - head->head) - sizeof(struct frag_hdr));
	head->mac_header += sizeof(struct frag_hdr);
	head->network_header += sizeof(struct frag_hdr);

	skb_shinfo(head)->frag_list = head->next;
	skb_reset_transport_header(head);
	skb_push(head, head->data - skb_network_header(head));
	atomic_sub(head->truesize, &fq->q.net->mem);

	for (fp=head->next; fp; fp = fp->next) {
		head->data_len += fp->len;
		head->len += fp->len;
		if (head->ip_summed != fp->ip_summed)
			head->ip_summed = CHECKSUM_NONE;
		else if (head->ip_summed == CHECKSUM_COMPLETE)
			head->csum = csum_add(head->csum, fp->csum);
		head->truesize += fp->truesize;
		atomic_sub(fp->truesize, &fq->q.net->mem);
	}

	head->next = NULL;
	head->dev = dev;
	head->tstamp = fq->q.stamp;
	ipv6_hdr(head)->payload_len = htons(payload_len);
	IP6CB(head)->nhoff = nhoff;

	
	if (head->ip_summed == CHECKSUM_COMPLETE)
		head->csum = csum_partial(skb_network_header(head),
					  skb_network_header_len(head),
					  head->csum);

	rcu_read_lock();
	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
	rcu_read_unlock();
	fq->q.fragments = NULL;
	return 1;

out_oversize:
	if (net_ratelimit())
		printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
	goto out_fail;
out_oom:
	if (net_ratelimit())
		printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
out_fail:
	rcu_read_lock();
	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
	rcu_read_unlock();
	return -1;
}
示例#14
0
文件: dir.c 项目: AK101111/linux
static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
				    struct kstat *stat, const char *link,
				    struct dentry *hardlink)
{
	struct dentry *workdir = ovl_workdir(dentry);
	struct inode *wdir = workdir->d_inode;
	struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
	struct inode *udir = upperdir->d_inode;
	struct dentry *upper;
	struct dentry *newdentry;
	int err;

	if (WARN_ON(!workdir))
		return -EROFS;

	err = ovl_lock_rename_workdir(workdir, upperdir);
	if (err)
		goto out;

	newdentry = ovl_lookup_temp(workdir, dentry);
	err = PTR_ERR(newdentry);
	if (IS_ERR(newdentry))
		goto out_unlock;

	upper = lookup_one_len(dentry->d_name.name, upperdir,
			       dentry->d_name.len);
	err = PTR_ERR(upper);
	if (IS_ERR(upper))
		goto out_dput;

	err = ovl_create_real(wdir, newdentry, stat, link, hardlink, true);
	if (err)
		goto out_dput2;

	/*
	 * mode could have been mutilated due to umask (e.g. sgid directory)
	 */
	if (!hardlink &&
	    !S_ISLNK(stat->mode) && newdentry->d_inode->i_mode != stat->mode) {
		struct iattr attr = {
			.ia_valid = ATTR_MODE,
			.ia_mode = stat->mode,
		};
		inode_lock(newdentry->d_inode);
		err = notify_change(newdentry, &attr, NULL);
		inode_unlock(newdentry->d_inode);
		if (err)
			goto out_cleanup;
	}

	if (!hardlink && S_ISDIR(stat->mode)) {
		err = ovl_set_opaque(newdentry);
		if (err)
			goto out_cleanup;

		err = ovl_do_rename(wdir, newdentry, udir, upper,
				    RENAME_EXCHANGE);
		if (err)
			goto out_cleanup;

		ovl_cleanup(wdir, upper);
	} else {
		err = ovl_do_rename(wdir, newdentry, udir, upper, 0);
		if (err)
			goto out_cleanup;
	}
	ovl_instantiate(dentry, inode, newdentry, !!hardlink);
	newdentry = NULL;
out_dput2:
	dput(upper);
out_dput:
	dput(newdentry);
out_unlock:
	unlock_rename(workdir, upperdir);
out:
	return err;

out_cleanup:
	ovl_cleanup(wdir, newdentry);
	goto out_dput2;
}

static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
			      struct kstat *stat, const char *link,
			      struct dentry *hardlink)
{
	int err;
	const struct cred *old_cred;
	struct cred *override_cred;

	err = ovl_copy_up(dentry->d_parent);
	if (err)
		return err;

	old_cred = ovl_override_creds(dentry->d_sb);
	err = -ENOMEM;
	override_cred = prepare_creds();
	if (override_cred) {
		override_cred->fsuid = inode->i_uid;
		override_cred->fsgid = inode->i_gid;
		put_cred(override_creds(override_cred));
		put_cred(override_cred);

		if (!ovl_dentry_is_opaque(dentry))
			err = ovl_create_upper(dentry, inode, stat, link,
						hardlink);
		else
			err = ovl_create_over_whiteout(dentry, inode, stat,
							link, hardlink);
	}
	revert_creds(old_cred);
	if (!err) {
		struct inode *realinode = d_inode(ovl_dentry_upper(dentry));

		WARN_ON(inode->i_mode != realinode->i_mode);
		WARN_ON(!uid_eq(inode->i_uid, realinode->i_uid));
		WARN_ON(!gid_eq(inode->i_gid, realinode->i_gid));
	}
	return err;
}
static void __lpss_ssp_write_priv(struct driver_data *drv_data,
				  unsigned offset, u32 value)
{
	WARN_ON(!drv_data->lpss_base);
	writel(value, drv_data->lpss_base + offset);
}
示例#16
0
static int recover_dentry(struct inode *inode, struct page *ipage)
{
	struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
	nid_t pino = le32_to_cpu(raw_inode->i_pino);
	struct f2fs_dir_entry *de;
	struct qstr name;
	struct page *page;
	struct inode *dir, *einode;
	int err = 0;

	dir = f2fs_iget(inode->i_sb, pino);
	if (IS_ERR(dir)) {
		err = PTR_ERR(dir);
		goto out;
	}

	name.len = le32_to_cpu(raw_inode->i_namelen);
	name.name = raw_inode->i_name;

	if (unlikely(name.len > F2FS_NAME_LEN)) {
		WARN_ON(1);
		err = -ENAMETOOLONG;
		goto out_err;
	}
retry:
	de = f2fs_find_entry(dir, &name, &page);
	if (de && inode->i_ino == le32_to_cpu(de->ino)) {
		clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
		goto out_unmap_put;
	}
	if (de) {
		einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
		if (IS_ERR(einode)) {
			WARN_ON(1);
			err = PTR_ERR(einode);
			if (err == -ENOENT)
				err = -EEXIST;
			goto out_unmap_put;
		}
		err = acquire_orphan_inode(F2FS_I_SB(inode));
		if (err) {
			iput(einode);
			goto out_unmap_put;
		}
		f2fs_delete_entry(de, page, dir, einode);
		iput(einode);
		goto retry;
	}
	err = __f2fs_add_link(dir, &name, inode);
	if (err)
		goto out_err;

	if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) {
		iput(dir);
	} else {
		add_dirty_dir_inode(dir);
		set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
	}

	goto out;

out_unmap_put:
	f2fs_dentry_kunmap(dir, page);
	f2fs_put_page(page, 0);
out_err:
	iput(dir);
out:
	f2fs_msg(inode->i_sb, KERN_NOTICE,
			"%s: ino = %x, name = %s, dir = %lx, err = %d",
			__func__, ino_of_node(ipage), raw_inode->i_name,
			IS_ERR(dir) ? 0 : dir->i_ino, err);
	return err;
}
/*
 * Trapped FP/ASIMD access.
 */
void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
{
	/* TODO: implement lazy context saving/restoring */
	WARN_ON(1);
}
示例#18
0
/*==========================================================================*
 * Name:         smp_send_reschedule
 *
 * Description:  This routine requests other CPU to execute rescheduling.
 *               1.Send 'RESCHEDULE_IPI' to other CPU.
 *                 Request other CPU to execute 'smp_reschedule_interrupt()'.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    cpu_id - Target CPU ID
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 *
 *==========================================================================*/
void smp_send_reschedule(int cpu_id)
{
	WARN_ON(cpu_is_offline(cpu_id));
	send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
}
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
	int offset, int len, int odd, struct sk_buff *skb),
	void *from, int length, int transhdrlen,
	int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
	struct rt6_info *rt, unsigned int flags, int dontfrag)
{
	struct inet_sock *inet = inet_sk(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct inet_cork *cork;
	struct sk_buff *skb, *skb_prev = NULL;
	unsigned int maxfraglen, fragheaderlen, mtu;
	int exthdrlen;
	int dst_exthdrlen;
	int hh_len;
	int copy;
	int err;
	int offset = 0;
	__u8 tx_flags = 0;

	if (flags&MSG_PROBE)
		return 0;
	cork = &inet->cork.base;
	if (skb_queue_empty(&sk->sk_write_queue)) {
		/*
		 * setup for corking
		 */
		if (opt) {
			if (WARN_ON(np->cork.opt))
				return -EINVAL;

			np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
			if (unlikely(np->cork.opt == NULL))
				return -ENOBUFS;

			np->cork.opt->tot_len = opt->tot_len;
			np->cork.opt->opt_flen = opt->opt_flen;
			np->cork.opt->opt_nflen = opt->opt_nflen;

			np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
							    sk->sk_allocation);
			if (opt->dst0opt && !np->cork.opt->dst0opt)
				return -ENOBUFS;

			np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
							    sk->sk_allocation);
			if (opt->dst1opt && !np->cork.opt->dst1opt)
				return -ENOBUFS;

			np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
							   sk->sk_allocation);
			if (opt->hopopt && !np->cork.opt->hopopt)
				return -ENOBUFS;

			np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
							    sk->sk_allocation);
			if (opt->srcrt && !np->cork.opt->srcrt)
				return -ENOBUFS;

			/* need source address above miyazawa*/
		}
		dst_hold(&rt->dst);
		cork->dst = &rt->dst;
		inet->cork.fl.u.ip6 = *fl6;
		np->cork.hop_limit = hlimit;
		np->cork.tclass = tclass;
		if (rt->dst.flags & DST_XFRM_TUNNEL)
			mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
			      rt->dst.dev->mtu : dst_mtu(&rt->dst);
		else
			mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
			      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
		if (np->frag_size < mtu) {
			if (np->frag_size)
				mtu = np->frag_size;
		}
		cork->fragsize = mtu;
		if (dst_allfrag(rt->dst.path))
			cork->flags |= IPCORK_ALLFRAG;
		cork->length = 0;
		exthdrlen = (opt ? opt->opt_flen : 0);
		length += exthdrlen;
		transhdrlen += exthdrlen;
		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
	} else {
		rt = (struct rt6_info *)cork->dst;
		fl6 = &inet->cork.fl.u.ip6;
		opt = np->cork.opt;
		transhdrlen = 0;
		exthdrlen = 0;
		dst_exthdrlen = 0;
		mtu = cork->fragsize;
	}

	hh_len = LL_RESERVED_SPACE(rt->dst.dev);

	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
			(opt ? opt->opt_nflen : 0);
	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);

	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
		if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
			ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
			return -EMSGSIZE;
		}
	}

	/* For UDP, check if TX timestamp is enabled */
	if (sk->sk_type == SOCK_DGRAM)
		sock_tx_timestamp(sk, &tx_flags);

	/*
	 * Let's try using as much space as possible.
	 * Use MTU if total length of the message fits into the MTU.
	 * Otherwise, we need to reserve fragment header and
	 * fragment alignment (= 8-15 octects, in total).
	 *
	 * Note that we may need to "move" the data from the tail of
	 * of the buffer to the new fragment when we split
	 * the message.
	 *
	 * FIXME: It may be fragmented into multiple chunks
	 *        at once if non-fragmentable extension headers
	 *        are too large.
	 * --yoshfuji
	 */

	if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
					   sk->sk_protocol == IPPROTO_RAW)) {
		ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
		return -EMSGSIZE;
	}

	skb = skb_peek_tail(&sk->sk_write_queue);
	cork->length += length;
	if (((length > mtu) ||
	     (skb && skb_is_gso(skb))) &&
	    (sk->sk_protocol == IPPROTO_UDP) &&
	    (rt->dst.dev->features & NETIF_F_UFO)) {
		err = ip6_ufo_append_data(sk, getfrag, from, length,
					  hh_len, fragheaderlen,
					  transhdrlen, mtu, flags, rt);
		if (err)
			goto error;
		return 0;
	}

	if (!skb)
		goto alloc_new_skb;

	while (length > 0) {
		/* Check if the remaining data fits into current packet. */
		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
		if (copy < length)
			copy = maxfraglen - skb->len;

		if (copy <= 0) {
			char *data;
			unsigned int datalen;
			unsigned int fraglen;
			unsigned int fraggap;
			unsigned int alloclen;
alloc_new_skb:
			/* There's no room in the current skb */
			if (skb)
				fraggap = skb->len - maxfraglen;
			else
				fraggap = 0;
			/* update mtu and maxfraglen if necessary */
			if (skb == NULL || skb_prev == NULL)
				ip6_append_data_mtu(&mtu, &maxfraglen,
						    fragheaderlen, skb, rt,
						    np->pmtudisc ==
						    IPV6_PMTUDISC_PROBE);

			skb_prev = skb;

			/*
			 * If remaining data exceeds the mtu,
			 * we know we need more fragment(s).
			 */
			datalen = length + fraggap;

			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
			if ((flags & MSG_MORE) &&
			    !(rt->dst.dev->features&NETIF_F_SG))
				alloclen = mtu;
			else
				alloclen = datalen + fragheaderlen;

			alloclen += dst_exthdrlen;

			if (datalen != length + fraggap) {
				/*
				 * this is not the last fragment, the trailer
				 * space is regarded as data space.
				 */
				datalen += rt->dst.trailer_len;
			}

			alloclen += rt->dst.trailer_len;
			fraglen = datalen + fragheaderlen;

			/*
			 * We just reserve space for fragment header.
			 * Note: this may be overallocation if the message
			 * (without MSG_MORE) fits into the MTU.
			 */
			alloclen += sizeof(struct frag_hdr);

			if (transhdrlen) {
				skb = sock_alloc_send_skb(sk,
						alloclen + hh_len,
						(flags & MSG_DONTWAIT), &err);
			} else {
				skb = NULL;
				if (atomic_read(&sk->sk_wmem_alloc) <=
				    2 * sk->sk_sndbuf)
					skb = sock_wmalloc(sk,
							   alloclen + hh_len, 1,
							   sk->sk_allocation);
				if (unlikely(skb == NULL))
					err = -ENOBUFS;
				else {
					/* Only the initial fragment
					 * is time stamped.
					 */
					tx_flags = 0;
				}
			}
			if (skb == NULL)
				goto error;
			/*
			 *	Fill in the control structures
			 */
			skb->protocol = htons(ETH_P_IPV6);
			skb->ip_summed = CHECKSUM_NONE;
			skb->csum = 0;
			/* reserve for fragmentation and ipsec header */
			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
				    dst_exthdrlen);

			if (sk->sk_type == SOCK_DGRAM)
				skb_shinfo(skb)->tx_flags = tx_flags;

			/*
			 *	Find where to start putting bytes
			 */
			data = skb_put(skb, fraglen);
			skb_set_network_header(skb, exthdrlen);
			data += fragheaderlen;
			skb->transport_header = (skb->network_header +
						 fragheaderlen);
			if (fraggap) {
				skb->csum = skb_copy_and_csum_bits(
					skb_prev, maxfraglen,
					data + transhdrlen, fraggap, 0);
				skb_prev->csum = csum_sub(skb_prev->csum,
							  skb->csum);
				data += fraggap;
				pskb_trim_unique(skb_prev, maxfraglen);
			}
			copy = datalen - transhdrlen - fraggap;

			if (copy < 0) {
				err = -EINVAL;
				kfree_skb(skb);
				goto error;
			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
				err = -EFAULT;
				kfree_skb(skb);
				goto error;
			}

			offset += copy;
			length -= datalen - fraggap;
			transhdrlen = 0;
			exthdrlen = 0;
			dst_exthdrlen = 0;

			/*
			 * Put the packet on the pending queue
			 */
			__skb_queue_tail(&sk->sk_write_queue, skb);
			continue;
		}

		if (copy > length)
			copy = length;

		if (!(rt->dst.dev->features&NETIF_F_SG)) {
			unsigned int off;

			off = skb->len;
			if (getfrag(from, skb_put(skb, copy),
						offset, copy, off, skb) < 0) {
				__skb_trim(skb, off);
				err = -EFAULT;
				goto error;
			}
		} else {
			int i = skb_shinfo(skb)->nr_frags;
			struct page_frag *pfrag = sk_page_frag(sk);

			err = -ENOMEM;
			if (!sk_page_frag_refill(sk, pfrag))
				goto error;

			if (!skb_can_coalesce(skb, i, pfrag->page,
					      pfrag->offset)) {
				err = -EMSGSIZE;
				if (i == MAX_SKB_FRAGS)
					goto error;

				__skb_fill_page_desc(skb, i, pfrag->page,
						     pfrag->offset, 0);
				skb_shinfo(skb)->nr_frags = ++i;
				get_page(pfrag->page);
			}
			copy = min_t(int, copy, pfrag->size - pfrag->offset);
			if (getfrag(from,
				    page_address(pfrag->page) + pfrag->offset,
				    offset, copy, skb->len, skb) < 0)
				goto error_efault;

			pfrag->offset += copy;
			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
			skb->len += copy;
			skb->data_len += copy;
			skb->truesize += copy;
			atomic_add(copy, &sk->sk_wmem_alloc);
		}
		offset += copy;
		length -= copy;
	}

	return 0;

error_efault:
	err = -EFAULT;
error:
	cork->length -= length;
	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
	return err;
}
示例#20
0
static void __exit vmcp_exit(void)
{
	WARN_ON(misc_deregister(&vmcp_dev) != 0);
	debug_unregister(vmcp_debug);
	printk(KERN_INFO "z/VM CP interface unloaded.\n");
}
示例#21
0
/*
 * Starting point for MMC card init.
 */
int mmc_attach_mmc(struct mmc_host *host, u32 ocr)
{
	int err;

	BUG_ON(!host);
	WARN_ON(!host->claimed);

	mmc_attach_bus_ops(host);

	/*
	 * We need to get OCR a different way for SPI.
	 */
	if (mmc_host_is_spi(host)) {
		err = mmc_spi_read_ocr(host, 1, &ocr);
		if (err)
			goto err;
	}

	/*
	 * Sanity check the voltages that the card claims to
	 * support.
	 */
	if (ocr & 0x7F) {
		printk(KERN_WARNING "%s: card claims to support voltages "
		       "below the defined range. These will be ignored.\n",
		       mmc_hostname(host));
		ocr &= ~0x7F;
	}

	host->ocr = mmc_select_voltage(host, ocr);

	/*
	 * Can we support the voltage of the card?
	 */
	if (!host->ocr) {
		err = -EINVAL;
		goto err;
	}

	/*
	 * Detect and init the card.
	 */
	err = mmc_init_card(host, host->ocr, NULL);
	if (err)
		goto err;

	mmc_release_host(host);

	err = mmc_add_card(host->card);
	if (err)
		goto remove_card;

	return 0;

remove_card:
	mmc_remove_card(host->card);
	host->card = NULL;
	mmc_claim_host(host);
err:
	mmc_detach_bus(host);
	mmc_release_host(host);

	printk(KERN_ERR "%s: error %d whilst initialising MMC card\n",
		mmc_hostname(host), err);

	return err;
}
示例#22
0
static int sdio_irq_thread(void *_host)
{
	struct mmc_host *host = _host;
	struct sched_param param = { .sched_priority = 1 };
	unsigned long period, idle_period;
	int ret;

	sched_setscheduler(current, SCHED_FIFO, &param);

	/*
	 * We want to allow for SDIO cards to work even on non SDIO
	 * aware hosts.  One thing that non SDIO host cannot do is
	 * asynchronous notification of pending SDIO card interrupts
	 * hence we poll for them in that case.
	 */
	idle_period = msecs_to_jiffies(10);
	period = (host->caps & MMC_CAP_SDIO_IRQ) ?
		MAX_SCHEDULE_TIMEOUT : idle_period;

	pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n",
		 mmc_hostname(host), period);

	do {
		/*
		 * We claim the host here on drivers behalf for a couple
		 * reasons:
		 *
		 * 1) it is already needed to retrieve the CCCR_INTx;
		 * 2) we want the driver(s) to clear the IRQ condition ASAP;
		 * 3) we need to control the abort condition locally.
		 *
		 * Just like traditional hard IRQ handlers, we expect SDIO
		 * IRQ handlers to be quick and to the point, so that the
		 * holding of the host lock does not cover too much work
		 * that doesn't require that lock to be held.
		 */
		ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
		if (ret)
			break;
		ret = process_sdio_pending_irqs(host);
		mmc_release_host(host);

		/*
		 * Give other threads a chance to run in the presence of
		 * errors.
		 */
		if (ret < 0) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (!kthread_should_stop())
				schedule_timeout(HZ);
			set_current_state(TASK_RUNNING);
		}

		/*
		 * Adaptive polling frequency based on the assumption
		 * that an interrupt will be closely followed by more.
		 * This has a substantial benefit for network devices.
		 */
		if (!(host->caps & MMC_CAP_SDIO_IRQ)) {
			if (ret > 0)
				period /= 2;
			else {
				period++;
				if (period > idle_period)
					period = idle_period;
			}
		}

		set_current_state(TASK_INTERRUPTIBLE);
		if (host->caps & MMC_CAP_SDIO_IRQ) {
			mmc_host_clk_hold(host);
			host->sdio_irq_pending = false;
			host->ops->enable_sdio_irq(host, 1);
			mmc_host_clk_release(host);
		}
		if (!kthread_should_stop())
			schedule_timeout(period);
		set_current_state(TASK_RUNNING);
	} while (!kthread_should_stop());

	if (host->caps & MMC_CAP_SDIO_IRQ) {
		mmc_host_clk_hold(host);
		host->ops->enable_sdio_irq(host, 0);
		mmc_host_clk_release(host);
	}

	pr_debug("%s: IRQ thread exiting with code %d\n",
		 mmc_hostname(host), ret);

	return ret;
}

static int sdio_card_irq_get(struct mmc_card *card)
{
	struct mmc_host *host = card->host;

	WARN_ON(!host->claimed);

	if (!host->sdio_irqs++) {
		atomic_set(&host->sdio_irq_thread_abort, 0);
		host->sdio_irq_thread =
			kthread_run(sdio_irq_thread, host, "ksdioirqd/%s",
				mmc_hostname(host));
		if (IS_ERR(host->sdio_irq_thread)) {
			int err = PTR_ERR(host->sdio_irq_thread);
			host->sdio_irqs--;
			return err;
		}
	}

	return 0;
}

static int sdio_card_irq_put(struct mmc_card *card)
{
	struct mmc_host *host = card->host;

	WARN_ON(!host->claimed);
	BUG_ON(host->sdio_irqs < 1);

	if (!--host->sdio_irqs) {
		atomic_set(&host->sdio_irq_thread_abort, 1);
		kthread_stop(host->sdio_irq_thread);
	}

	return 0;
}

/* If there is only 1 function registered set sdio_single_irq */
static void sdio_single_irq_set(struct mmc_card *card)
{
	struct sdio_func *func;
	int i;

	card->sdio_single_irq = NULL;
	if ((card->host->caps & MMC_CAP_SDIO_IRQ) &&
	    card->host->sdio_irqs == 1)
		for (i = 0; i < card->sdio_funcs; i++) {
		       func = card->sdio_func[i];
		       if (func && func->irq_handler) {
			       card->sdio_single_irq = func;
			       break;
		       }
	       }
}

/**
 *	sdio_claim_irq - claim the IRQ for a SDIO function
 *	@func: SDIO function
 *	@handler: IRQ handler callback
 *
 *	Claim and activate the IRQ for the given SDIO function. The provided
 *	handler will be called when that IRQ is asserted.  The host is always
 *	claimed already when the handler is called so the handler must not
 *	call sdio_claim_host() nor sdio_release_host().
 */
int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
{
	int ret;
	unsigned char reg;

	BUG_ON(!func);
	BUG_ON(!func->card);

	pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func));

	if (func->irq_handler) {
		pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func));
		return -EBUSY;
	}

	ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
	if (ret)
		return ret;

	reg |= 1 << func->num;

	reg |= 1; /* Master interrupt enable */

	ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
	if (ret)
		return ret;

	func->irq_handler = handler;
	ret = sdio_card_irq_get(func->card);
	if (ret)
		func->irq_handler = NULL;
	sdio_single_irq_set(func->card);

	return ret;
}
int __init cardhu_panel_init(void)
{
	int err;
	struct resource __maybe_unused *res;

	tegra_get_board_info(&board_info);
	tegra_get_display_board_info(&display_board_info);

#if defined(CONFIG_TEGRA_NVMAP)
	cardhu_carveouts[1].base = tegra_carveout_start;
	cardhu_carveouts[1].size = tegra_carveout_size;
#endif

#if defined(CONFIG_ION_TEGRA)
	tegra_ion_data.heaps[0].base = tegra_carveout_start;
	tegra_ion_data.heaps[0].size = tegra_carveout_size;
#endif

	cardhu_panel_preinit();
	if (is_dsi_panel())
		goto skip_lvds;
#if defined(CONFIG_TEGRA_DC)
	if (WARN_ON(board_info.board_id == BOARD_E1291 &&
		((board_info.sku & SKU_TOUCHSCREEN_MECH_FIX) == 0))) {
		/* use 55Hz panel timings to reduce noise on sensitive touch */
		printk("Using cardhu_panel_modes_55hz\n");
		cardhu_disp1_out.parent_clk = "pll_p";
		cardhu_disp1_out.modes = cardhu_panel_modes_55hz;
		cardhu_disp1_out.n_modes = ARRAY_SIZE(cardhu_panel_modes_55hz);
	}

	if (display_board_info.board_id == BOARD_DISPLAY_PM313) {
		/* initialize the values */
#if defined(PM313_LVDS_PANEL_19X12)
		cardhu_disp1_out.modes = panel_19X12_modes;
		cardhu_disp1_out.n_modes = ARRAY_SIZE(panel_19X12_modes);
		cardhu_disp1_out.parent_clk = "pll_d_out0";
#if (PM313_LVDS_PANEL_BPP == 1)
		cardhu_disp1_out.depth = 18;
#else
		cardhu_disp1_out.depth = 24;
#endif
		cardhu_fb_data.xres = 1920;
		cardhu_fb_data.yres = 1200;

		cardhu_disp2_out.parent_clk = "pll_d2_out0";
		cardhu_hdmi_fb_data.xres = 1920;
		cardhu_hdmi_fb_data.yres = 1200;
#endif

		/* lvds configuration */
		err = gpio_request(pm313_R_FDE, "R_FDE");
		err |= gpio_direction_output(pm313_R_FDE, 1);

		err |= gpio_request(pm313_R_FB, "R_FB");
		err |= gpio_direction_output(pm313_R_FB, 1);

		err |= gpio_request(pm313_MODE0, "MODE0");
		err |= gpio_direction_output(pm313_MODE0, 1);

		err |= gpio_request(pm313_MODE1, "MODE1");
		err |= gpio_direction_output(pm313_MODE1, 0);

		err |= gpio_request(pm313_BPP, "BPP");
		err |= gpio_direction_output(pm313_BPP, PM313_LVDS_PANEL_BPP);

		err = gpio_request(pm313_lvds_shutdown, "lvds_shutdown");
		/* free ride provided by bootloader */
		err |= gpio_direction_output(pm313_lvds_shutdown, 1);

		if (err)
			printk(KERN_ERR "ERROR(s) in LVDS configuration\n");
	}
/*        else if ((display_board_info.board_id == BOARD_DISPLAY_E1247 &&
				board_info.board_id == BOARD_PM269) ||
				(board_info.board_id == BOARD_E1257) ||
				(board_info.board_id == BOARD_PM305) ||
				(board_info.board_id == BOARD_PM311)) {
		gpio_request(e1247_pm269_lvds_shutdown, "lvds_shutdown");
		gpio_direction_output(e1247_pm269_lvds_shutdown, 1);
	} else {
		gpio_request(cardhu_lvds_shutdown, "lvds_shutdown");
		gpio_direction_output(cardhu_lvds_shutdown, 1);
	}
*/
	if ( tegra3_get_project_id() == TEGRA3_PROJECT_P1801 ){
		printk("P1801 display setting, set HDMI as main display\n ");
		cardhu_fb_data.xres = 1920;
		cardhu_fb_data.yres = 1080;

		cardhu_disp1_pdata.default_out = &cardhu_disp1_out_P1801;
		cardhu_disp1_device.resource	= cardhu_disp1_resources_P1801;
		cardhu_disp1_device.num_resources = ARRAY_SIZE(cardhu_disp1_resources_P1801);
	}

	if (tegra3_get_project_id()==0x4 ){
		printk("Check TF700T setting \n ");
		cardhu_disp1_out.modes = panel_19X12_modes;
		cardhu_disp1_out.n_modes = ARRAY_SIZE(panel_19X12_modes);
		cardhu_disp1_out.parent_clk = "pll_d_out0";
		cardhu_disp1_out.depth = 24;

		cardhu_fb_data.xres = 1920;
		cardhu_fb_data.yres = 1200;

		cardhu_disp2_out.parent_clk = "pll_d2_out0";
		cardhu_hdmi_fb_data.xres = 1920;
		cardhu_hdmi_fb_data.yres = 1200;

		gpio_request(TEGRA_GPIO_PU5, "LDO_EN");
		gpio_request(TEGRA_GPIO_PBB3, "TF700T_1.2V");
		gpio_request(TEGRA_GPIO_PC6, "TF700T_1.8V");
		gpio_request(TEGRA_GPIO_PX0, "TF700T_I2C_Switch");
		gpio_request(TEGRA_GPIO_PD2, "TF700T_OSC");
	}

#endif
	if (tegra3_get_project_id()==0x4 ){
		tegra_gpio_enable(cardhu_hdmi_enb);
		gpio_request(cardhu_hdmi_enb, "hdmi_5v_en");
		gpio_direction_output(cardhu_hdmi_enb, 0);
	} else {
		tegra_gpio_enable(cardhu_hdmi_enb);
		gpio_request(cardhu_hdmi_enb, "hdmi_5v_en");
		gpio_direction_output(cardhu_hdmi_enb, 1);
	}

skip_lvds:
	gpio_request(cardhu_hdmi_hpd, "hdmi_hpd");
	gpio_direction_input(cardhu_hdmi_hpd);

#if !(DC_CTRL_MODE & TEGRA_DC_OUT_ONE_SHOT_MODE)
	tegra_gpio_enable(e1506_lcd_te);
	gpio_request(e1506_lcd_te, "lcd_te");
	gpio_direction_input(e1506_lcd_te);
#endif

#ifdef CONFIG_HAS_EARLYSUSPEND
	cardhu_panel_early_suspender.suspend = cardhu_panel_early_suspend;
	cardhu_panel_early_suspender.resume = cardhu_panel_late_resume;
	cardhu_panel_early_suspender.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
	register_early_suspend(&cardhu_panel_early_suspender);
#endif

#ifdef CONFIG_TEGRA_GRHOST
	err = tegra3_register_host1x_devices();
	if (err)
		return err;
#endif

	err = platform_add_devices(cardhu_gfx_devices,
				ARRAY_SIZE(cardhu_gfx_devices));

#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_DC)
	res = nvhost_get_resource_byname(&cardhu_disp1_device,
					 IORESOURCE_MEM, "fbmem");
	res->start = tegra_fb_start;
	res->end = tegra_fb_start + tegra_fb_size - 1;
#endif

	/* Copy the bootloader fb to the fb. */
	tegra_move_framebuffer(tegra_fb_start, tegra_bootloader_fb_start,
				min(tegra_fb_size, tegra_bootloader_fb_size));

#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_DC)
	if (!err)
		err = nvhost_device_register(&cardhu_disp1_device);

	if ( tegra3_get_project_id() != TEGRA3_PROJECT_P1801 ){

		res = nvhost_get_resource_byname(&cardhu_disp2_device,
						 IORESOURCE_MEM, "fbmem");
		res->start = tegra_fb2_start;
		res->end = tegra_fb2_start + tegra_fb2_size - 1;

		/* Copy the bootloader fb to the fb2. */
		tegra_move_framebuffer(tegra_fb2_start, tegra_bootloader_fb_start,
					min(tegra_fb2_size, tegra_bootloader_fb_size));

		if (!err)
			err = nvhost_device_register(&cardhu_disp2_device);
	}

#endif

#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_NVAVP)
	if (!err)
		err = nvhost_device_register(&nvavp_device);
#endif
	return err;
}
示例#24
0
static void
mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
#ifdef CONFIG_MMC_DEBUG
	unsigned int i, sz;
	struct scatterlist *sg;
#endif

	pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
		 mmc_hostname(host), mrq->cmd->opcode,
		 mrq->cmd->arg, mrq->cmd->flags);

	if (mrq->data) {
		pr_debug("%s:     blksz %d blocks %d flags %08x "
			"tsac %d ms nsac %d\n",
			mmc_hostname(host), mrq->data->blksz,
			mrq->data->blocks, mrq->data->flags,
			mrq->data->timeout_ns / 1000000,
			mrq->data->timeout_clks);
	}

	if (mrq->stop) {
		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
			 mmc_hostname(host), mrq->stop->opcode,
			 mrq->stop->arg, mrq->stop->flags);
	}

	WARN_ON(!host->claimed);

	/* led_trigger_event(host->led, LED_FULL); */

	mrq->cmd->error = 0;
	mrq->cmd->mrq = mrq;
	if (mrq->data) {
		BUG_ON(mrq->data->blksz > host->max_blk_size);
		BUG_ON(mrq->data->blocks > host->max_blk_count);
		BUG_ON(mrq->data->blocks * mrq->data->blksz >
			host->max_req_size);

#ifdef CONFIG_MMC_DEBUG
		sz = 0;
		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
			sz += sg->length;
		BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
#endif

		mrq->cmd->data = mrq->data;
		mrq->data->error = 0;
		mrq->data->mrq = mrq;
		if (mrq->stop) {
			mrq->data->stop = mrq->stop;
			mrq->stop->error = 0;
			mrq->stop->mrq = mrq;
		}

#ifdef CONFIG_MMC_PERF_PROFILING
		host->perf.start = ktime_get();
#endif
	}
	host->ops->request(host, mrq);
}
示例#25
0
/*
 * Handle the detection and initialisation of a card.
 *
 * In the case of a resume, "oldcard" will contain the card
 * we're trying to reinitialise.
 */
static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
	struct mmc_card *oldcard)
{
	struct mmc_card *card;
	int err;
	u32 cid[4];

	BUG_ON(!host);
	WARN_ON(!host->claimed);

	err = mmc_sd_get_cid(host, ocr, cid);
	if (err)
		return err;

	if (oldcard) {
		if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0)
			return -ENOENT;

		card = oldcard;
	} else {
		/*
		 * Allocate card structure.
		 */
		card = mmc_alloc_card(host, &sd_type);
		if (IS_ERR(card))
			return PTR_ERR(card);

		card->type = MMC_TYPE_SD;
		memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
	}

	/*
	 * For native busses:  get card RCA and quit open drain mode.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_send_relative_addr(host, &card->rca);
		if (err)
			return err;

		mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
	}

	if (!oldcard) {
		err = mmc_sd_get_csd(host, card);
		if (err)
			return err;

		mmc_decode_cid(card);
	}

	/*
	 * Select card, as all following commands rely on that.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_select_card(card);
		if (err)
			return err;
	}

	err = mmc_sd_setup_card(host, card, oldcard != NULL);
	if (err)
		goto free_card;

	/*
	 * Attempt to change to high-speed (if supported)
	 */
	err = mmc_sd_switch_hs(card);
	if (err > 0)
		mmc_sd_go_highspeed(card);
	else if (err)
		goto free_card;

	/*
	 * Set bus speed.
	 */
	mmc_set_clock(host, mmc_sd_get_max_clock(card));

	/*
	 * Switch to wider bus (if supported).
	 */
	if ((host->caps & MMC_CAP_4_BIT_DATA) &&
		(card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
		err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
		if (err)
			goto free_card;

		mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
	}

	host->card = card;
	return 0;

free_card:
	if (!oldcard)
		mmc_remove_card(card);

	return err;
}
示例#26
0
文件: iwl-power.c 项目: AMouri/linux
static void iwl_static_sleep_cmd(struct iwl_priv *priv,
				 struct iwl_powertable_cmd *cmd,
				 enum iwl_power_level lvl, int period)
{
	const struct iwl_power_vec_entry *table;
	int max_sleep[IWL_POWER_VEC_SIZE] = { 0 };
	int i;
	u8 skip;
	u32 slp_itrvl;

	if (priv->cfg->adv_pm) {
		table = apm_range_2;
		if (period <= IWL_DTIM_RANGE_1_MAX)
			table = apm_range_1;
		if (period <= IWL_DTIM_RANGE_0_MAX)
			table = apm_range_0;
	} else {
		table = range_2;
		if (period <= IWL_DTIM_RANGE_1_MAX)
			table = range_1;
		if (period <= IWL_DTIM_RANGE_0_MAX)
			table = range_0;
	}

	if (WARN_ON(lvl < 0 || lvl >= IWL_POWER_NUM))
		memset(cmd, 0, sizeof(*cmd));
	else
		*cmd = table[lvl].cmd;

	if (period == 0) {
		skip = 0;
		period = 1;
		for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
			max_sleep[i] =  1;

	} else {
		skip = table[lvl].no_dtim;
		for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
			max_sleep[i] = le32_to_cpu(cmd->sleep_interval[i]);
		max_sleep[IWL_POWER_VEC_SIZE - 1] = skip + 1;
	}

	slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
	/* figure out the listen interval based on dtim period and skip */
	if (slp_itrvl == 0xFF)
		cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
			cpu_to_le32(period * (skip + 1));

	slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
	if (slp_itrvl > period)
		cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
			cpu_to_le32((slp_itrvl / period) * period);

	if (skip)
		cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
	else
		cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;

	if (hw_params(priv).shadow_reg_enable)
		cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
	else
		cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;

	if (iwl_advanced_bt_coexist(priv)) {
		if (!priv->cfg->bt_params->bt_sco_disable)
			cmd->flags |= IWL_POWER_BT_SCO_ENA;
		else
			cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
	}


	slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
	if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL)
		cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
			cpu_to_le32(IWL_CONN_MAX_LISTEN_INTERVAL);

	/* enforce max sleep interval */
	for (i = IWL_POWER_VEC_SIZE - 1; i >= 0 ; i--) {
		if (le32_to_cpu(cmd->sleep_interval[i]) >
		    (max_sleep[i] * period))
			cmd->sleep_interval[i] =
				cpu_to_le32(max_sleep[i] * period);
		if (i != (IWL_POWER_VEC_SIZE - 1)) {
			if (le32_to_cpu(cmd->sleep_interval[i]) >
			    le32_to_cpu(cmd->sleep_interval[i+1]))
				cmd->sleep_interval[i] =
					cmd->sleep_interval[i+1];
		}
	}

	if (priv->power_data.bus_pm)
		cmd->flags |= IWL_POWER_PCI_PM_MSK;
	else
		cmd->flags &= ~IWL_POWER_PCI_PM_MSK;

	IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n",
			skip, period);
	IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1);
}
示例#27
0
int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
				    enum ieee80211_back_parties initiator,
				    bool tx)
{
	struct ieee80211_local *local = sta->local;
	struct tid_ampdu_tx *tid_tx;
	int ret;

	lockdep_assert_held(&sta->ampdu_mlme.mtx);

	spin_lock_bh(&sta->lock);

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
	if (!tid_tx) {
		spin_unlock_bh(&sta->lock);
		return -ENOENT;
	}

	/* if we're already stopping ignore any new requests to stop */
	if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
		spin_unlock_bh(&sta->lock);
		return -EALREADY;
	}

	if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
		/* not even started yet! */
		ieee80211_assign_tid_tx(sta, tid, NULL);
		spin_unlock_bh(&sta->lock);
		kfree_rcu(tid_tx, rcu_head);
		return 0;
	}

	set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);

	spin_unlock_bh(&sta->lock);

#ifdef CONFIG_MAC80211_HT_DEBUG
	printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
	       sta->sta.addr, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */

	del_timer_sync(&tid_tx->addba_resp_timer);
	del_timer_sync(&tid_tx->session_timer);

	/*
	 * After this packets are no longer handed right through
	 * to the driver but are put onto tid_tx->pending instead,
	 * with locking to ensure proper access.
	 */
	clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);

	/*
	 * There might be a few packets being processed right now (on
	 * another CPU) that have already gotten past the aggregation
	 * check when it was still OPERATIONAL and consequently have
	 * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
	 * call into the driver at the same time or even before the
	 * TX paths calls into it, which could confuse the driver.
	 *
	 * Wait for all currently running TX paths to finish before
	 * telling the driver. New packets will not go through since
	 * the aggregation session is no longer OPERATIONAL.
	 */
	synchronize_net();

	tid_tx->stop_initiator = initiator;
	tid_tx->tx_stop = tx;

	ret = drv_ampdu_action(local, sta->sdata,
			       IEEE80211_AMPDU_TX_STOP,
			       &sta->sta, tid, NULL, 0);

	/* HW shall not deny going back to legacy */
	if (WARN_ON(ret)) {
		/*
		 * We may have pending packets get stuck in this case...
		 * Not bothering with a workaround for now.
		 */
	}

	return ret;
}
/*
 * Read and write LPSS SSP private registers. Caller must first check that
 * is_lpss_ssp() returns true before these can be called.
 */
static u32 __lpss_ssp_read_priv(struct driver_data *drv_data, unsigned offset)
{
	WARN_ON(!drv_data->lpss_base);
	return readl(drv_data->lpss_base + offset);
}
示例#29
0
int dss_init(struct platform_device *pdev)
{
	int r = 0, dss_irq;
	u32 rev;
	struct resource *dss_mem;
	bool skip_init = false;

	dss.pdata = pdev->dev.platform_data;
	dss.pdev = pdev;
	if (cpu_is_omap44xx())
		dss_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	else
		dss_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);

	if (!dss_mem) {
		WARN_ON(1);
		r = -ENODEV;
		goto fail0;
	}

	dss.base = ioremap(dss_mem->start, resource_size(dss_mem));
	if (!dss.base) {
		DSSERR("can't ioremap DSS\n");
		r = -ENOMEM;
		goto fail0;
	}
	dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M | DSS_CLK_96M);
	dss_mainclk_enable();

#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
	/* DISPC_CONTROL */
	if (omap_readl(0x48050440) & 1)	/* LCD enabled? */
			skip_init = true;
#endif

	if (!skip_init) {
		/* disable LCD and DIGIT output. This seems to fix the synclost
		 * problem that we get, if the bootloader starts the DSS and
		 * the kernel resets it */
		omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);

		/* We need to wait here a bit, otherwise we sometimes start to
		 * get synclost errors, and after that only power cycle will
		 * restore DSS functionality. I have no idea why this happens.
		 * And we have to wait _before_ resetting the DSS, but after
		 * enabling clocks.
		 */
		msleep(50);

		/* In OMAP44xx HWMOD would take care of resetting the module */
//		if (cpu_is_omap44xx())
//			_omap_dss_reset();
	}

	/* autoidle */
	REG_FLD_MOD(DSS_SYSCONFIG, 1, 0, 0);

	/* swin.kim modify for LCD1 clk source keeping */
#ifndef CONFIG_OMAP2_DSS_DSI
		/* Select DPLL */
		REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
#else
		/* Select DPLL */
		REG_FLD_MOD(DSS_CONTROL, 1, 1, 1);
		REG_FLD_MOD(DSS_CONTROL, 1, 0, 0);
	
#endif


#ifdef CONFIG_OMAP2_DSS_VENC
	REG_FLD_MOD(DSS_CONTROL, 1, 4, 4);	/* venc dac demen */
	REG_FLD_MOD(DSS_CONTROL, 1, 3, 3);	/* venc clock 4x enable */
	REG_FLD_MOD(DSS_CONTROL, 0, 2, 2);	/* venc clock mode = normal */
#endif

	if (!cpu_is_omap44xx())
		r = request_irq(INT_24XX_DSS_IRQ,
				cpu_is_omap24xx()
				? dss_irq_handler_omap2
				: dss_irq_handler_omap3,
				0, "OMAP DSS", NULL);
	else {
		dss_irq = platform_get_irq(pdev, 0);
		r = request_irq(dss_irq,
				dss_irq_handler_omap2,
				0, "OMAP DSS", NULL);
	}

	if (r < 0) {
		DSSERR("omap2 dss: request_irq failed\n");
		goto fail1;
	}

	if (cpu_is_omap34xx()) {
		dss.dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck");
		if (IS_ERR(dss.dpll4_m4_ck)) {
			DSSERR("Failed to get dpll4_m4_ck\n");
			r = PTR_ERR(dss.dpll4_m4_ck);
			goto fail2;
		}
	}

	dss.dsi1_clk_source = DSS_SRC_DSS1_ALWON_FCLK;
	dss.dsi2_clk_source = DSS_SRC_DSS1_ALWON_FCLK;
	dss.lcd1_clk_source = DSS_SRC_DSS1_ALWON_FCLK;
	dss.lcd2_clk_source = DSS_SRC_DSS1_ALWON_FCLK;
	dss.dispc_clk_source = DSS_SRC_DSS1_ALWON_FCLK;

	dss_save_context();

	rev = dss_read_reg(DSS_REVISION);
	printk(KERN_INFO "OMAP DSS rev %d.%d\n",
			FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M | DSS_CLK_96M);

	return 0;

fail2:
	if (!cpu_is_omap44xx())
		free_irq(INT_24XX_DSS_IRQ, NULL);
fail1:
	iounmap(dss.base);
fail0:
	return r;
}
示例#30
0
static void knode_kill(struct klist_node *knode)
{
    /* and no knode should die twice ever either, see we're very humane */
    WARN_ON(knode_dead(knode));
    *(unsigned long *)&knode->n_klist |= KNODE_DEAD;
}