Example #1
0
static int cdc_mbim_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
{
	struct sk_buff *skb;
	struct cdc_mbim_state *info = (void *)&dev->data;
	struct cdc_ncm_ctx *ctx = info->ctx;
	int len;
	int nframes;
	int x;
	int offset;
	struct usb_cdc_ncm_ndp16 *ndp16;
	struct usb_cdc_ncm_dpe16 *dpe16;
	int ndpoffset;
	int loopcount = 50; /* arbitrary max preventing infinite loop */
	u8 *c;
	u16 tci;

	ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
	if (ndpoffset < 0)
		goto error;

next_ndp:
	nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
	if (nframes < 0)
		goto error;

	ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);

	switch (ndp16->dwSignature & cpu_to_le32(0x00ffffff)) {
	case cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN):
		c = (u8 *)&ndp16->dwSignature;
		tci = c[3];
		break;
	case cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN):
		c = (u8 *)&ndp16->dwSignature;
		tci = c[3] + 256;
		break;
	default:
		netif_dbg(dev, rx_err, dev->net,
			  "unsupported NDP signature <0x%08x>\n",
			  le32_to_cpu(ndp16->dwSignature));
		goto err_ndp;

	}

	dpe16 = ndp16->dpe16;
	for (x = 0; x < nframes; x++, dpe16++) {
		offset = le16_to_cpu(dpe16->wDatagramIndex);
		len = le16_to_cpu(dpe16->wDatagramLength);

		/*
		 * CDC NCM ch. 3.7
		 * All entries after first NULL entry are to be ignored
		 */
		if ((offset == 0) || (len == 0)) {
			if (!x)
				goto err_ndp; /* empty NTB */
			break;
		}

		/* sanity checking */
		if (((offset + len) > skb_in->len) || (len > ctx->rx_max)) {
			netif_dbg(dev, rx_err, dev->net,
				  "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
				  x, offset, len, skb_in);
			if (!x)
				goto err_ndp;
			break;
		} else {
			skb = cdc_mbim_process_dgram(dev, skb_in->data + offset, len, tci);
			if (!skb)
				goto error;
			usbnet_skb_return(dev, skb);
		}
	}
err_ndp:
	/* are there more NDPs to process? */
	ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex);
	if (ndpoffset && loopcount--)
		goto next_ndp;

	return 1;
error:
	return 0;
}
Example #2
0
u32 sdio_read32(struct intf_hdl *pintfhdl, u32 addr)
{
	PADAPTER padapter;
	u8 bMacPwrCtrlOn;
	u8 deviceId;
	u16 offset;
	u32 ftaddr;
	u8 shift;
	u32 val;
	s32 err;

_func_enter_;

	padapter = pintfhdl->padapter;
	ftaddr = _cvrt2ftaddr(addr, &deviceId, &offset);

	rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
	if (((deviceId == WLAN_IOREG_DEVICE_ID) && (offset < 0x100))
		|| (_FALSE == bMacPwrCtrlOn)
#ifdef CONFIG_LPS_LCLK
		|| (_TRUE == adapter_to_pwrctl(padapter)->bFwCurrentInPSMode)
#endif
		)
	{
		err = sd_cmd52_read(pintfhdl, ftaddr, 4, (u8*)&val);
#ifdef SDIO_DEBUG_IO
		if (!err) {
#endif
			val = le32_to_cpu(val);
			return val;
#ifdef SDIO_DEBUG_IO
		}

		DBG_8192C(KERN_ERR "%s: Mac Power off, Read FAIL(%d)! addr=0x%x\n", __func__, err, addr);
		return SDIO_ERR_VAL32;
#endif
	}

	// 4 bytes alignment
	shift = ftaddr & 0x3;
	if (shift == 0) {
		val = sd_read32(pintfhdl, ftaddr, NULL);
	} else {
		u8 *ptmpbuf;

		ptmpbuf = (u8*)rtw_malloc(8);
		if (NULL == ptmpbuf) {
			DBG_8192C(KERN_ERR "%s: Allocate memory FAIL!(size=8) addr=0x%x\n", __func__, addr);
			return SDIO_ERR_VAL32;
		}

		ftaddr &= ~(u16)0x3;
		sd_read(pintfhdl, ftaddr, 8, ptmpbuf);
		_rtw_memcpy(&val, ptmpbuf+shift, 4);
		val = le32_to_cpu(val);

		rtw_mfree(ptmpbuf, 8);
	}

_func_exit_;

	return val;
}
Example #3
0
static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
				    struct mwifiex_fw_image *fw)
{
	int ret = 0;
	u8 *firmware = fw->fw_buf, *recv_buff;
	u32 retries = USB8XXX_FW_MAX_RETRY, dlen;
	u32 fw_seqnum = 0, tlen = 0, dnld_cmd = 0;
	struct fw_data *fwdata;
	struct fw_sync_header sync_fw;
	u8 check_winner = 1;

	if (!firmware) {
		dev_err(adapter->dev,
			"No firmware image found! Terminating download\n");
		ret = -1;
		goto fw_exit;
	}

	/* Allocate memory for transmit */
	fwdata = kzalloc(FW_DNLD_TX_BUF_SIZE, GFP_KERNEL);
	if (!fwdata)
		goto fw_exit;

	/* Allocate memory for receive */
	recv_buff = kzalloc(FW_DNLD_RX_BUF_SIZE, GFP_KERNEL);
	if (!recv_buff)
		goto cleanup;

	do {
		/* Send pseudo data to check winner status first */
		if (check_winner) {
			memset(&fwdata->fw_hdr, 0, sizeof(struct fw_header));
			dlen = 0;
		} else {
			/* copy the header of the fw_data to get the length */
			memcpy(&fwdata->fw_hdr, &firmware[tlen],
			       sizeof(struct fw_header));

			dlen = le32_to_cpu(fwdata->fw_hdr.data_len);
			dnld_cmd = le32_to_cpu(fwdata->fw_hdr.dnld_cmd);
			tlen += sizeof(struct fw_header);

			memcpy(fwdata->data, &firmware[tlen], dlen);

			fwdata->seq_num = cpu_to_le32(fw_seqnum);
			tlen += dlen;
		}

		/* If the send/receive fails or CRC occurs then retry */
		while (retries--) {
			u8 *buf = (u8 *)fwdata;
			u32 len = FW_DATA_XMIT_SIZE;

			/* send the firmware block */
			ret = mwifiex_write_data_sync(adapter, buf, &len,
						MWIFIEX_USB_EP_CMD_EVENT,
						MWIFIEX_USB_TIMEOUT);
			if (ret) {
				dev_err(adapter->dev,
					"write_data_sync: failed: %d\n", ret);
				continue;
			}

			buf = recv_buff;
			len = FW_DNLD_RX_BUF_SIZE;

			/* Receive the firmware block response */
			ret = mwifiex_read_data_sync(adapter, buf, &len,
						MWIFIEX_USB_EP_CMD_EVENT,
						MWIFIEX_USB_TIMEOUT);
			if (ret) {
				dev_err(adapter->dev,
					"read_data_sync: failed: %d\n", ret);
				continue;
			}

			memcpy(&sync_fw, recv_buff,
			       sizeof(struct fw_sync_header));

			/* check 1st firmware block resp for highest bit set */
			if (check_winner) {
				if (le32_to_cpu(sync_fw.cmd) & 0x80000000) {
					dev_warn(adapter->dev,
						 "USB is not the winner %#x\n",
						 sync_fw.cmd);

					/* returning success */
					ret = 0;
					goto cleanup;
				}

				dev_dbg(adapter->dev,
					"USB is the winner, start to download FW\n");

				check_winner = 0;
				break;
			}

			/* check the firmware block response for CRC errors */
			if (sync_fw.cmd) {
				dev_err(adapter->dev,
					"FW received block with CRC %#x\n",
					sync_fw.cmd);
				ret = -1;
				continue;
			}

			retries = USB8XXX_FW_MAX_RETRY;
			break;
		}
		fw_seqnum++;
	} while ((dnld_cmd != FW_HAS_LAST_BLOCK) && retries);

cleanup:
	dev_dbg(adapter->dev, "%s: %d bytes downloaded\n", __func__, tlen);

	kfree(recv_buff);
	kfree(fwdata);

	if (retries)
		ret = 0;
fw_exit:
	return ret;
}
static inline u32
pdr_id(const struct pdr *pdr)
{
	return le32_to_cpu(pdr->id);
}
static inline u32
pdr_len(const struct pdr *pdr)
{
	return le32_to_cpu(pdr->len);
}
Example #6
0
static int ext2_check_descriptors (struct super_block * sb)
{
    int i;
    int desc_block = 0;
    struct ext2_sb_info *sbi = EXT2_SB(sb);
    unsigned long first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
    unsigned long last_block;
    struct ext2_group_desc * gdp = NULL;

    ext2_debug ("Checking group descriptors");

    for (i = 0; i < sbi->s_groups_count; i++)
    {
        if (i == sbi->s_groups_count - 1)
            last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
        else
            last_block = first_block +
                         (EXT2_BLOCKS_PER_GROUP(sb) - 1);

        if ((i % EXT2_DESC_PER_BLOCK(sb)) == 0)
            gdp = (struct ext2_group_desc *) sbi->s_group_desc[desc_block++]->b_data;
        if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
                le32_to_cpu(gdp->bg_block_bitmap) > last_block)
        {
            ext2_error (sb, "ext2_check_descriptors",
                        "Block bitmap for group %d"
                        " not in group (block %lu)!",
                        i, (unsigned long) le32_to_cpu(gdp->bg_block_bitmap));
            return 0;
        }
        if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block ||
                le32_to_cpu(gdp->bg_inode_bitmap) > last_block)
        {
            ext2_error (sb, "ext2_check_descriptors",
                        "Inode bitmap for group %d"
                        " not in group (block %lu)!",
                        i, (unsigned long) le32_to_cpu(gdp->bg_inode_bitmap));
            return 0;
        }
        if (le32_to_cpu(gdp->bg_inode_table) < first_block ||
                le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group >
                last_block)
        {
            ext2_error (sb, "ext2_check_descriptors",
                        "Inode table for group %d"
                        " not in group (block %lu)!",
                        i, (unsigned long) le32_to_cpu(gdp->bg_inode_table));
            return 0;
        }
        first_block += EXT2_BLOCKS_PER_GROUP(sb);
        gdp++;
    }
    return 1;
}
Example #7
0
void r8712_createbss_cmd_callback(struct _adapter *padapter,
				  struct cmd_obj *pcmd)
{
	unsigned long irqL;
	u8 timer_cancelled;
	struct sta_info *psta = NULL;
	struct wlan_network *pwlan = NULL;
	struct	mlme_priv *pmlmepriv = &padapter->mlmepriv;
	struct ndis_wlan_bssid_ex *pnetwork = (struct ndis_wlan_bssid_ex *)
					      pcmd->parmbuf;
	struct wlan_network *tgt_network = &(pmlmepriv->cur_network);

	if ((pcmd->res != H2C_SUCCESS))
		_set_timer(&pmlmepriv->assoc_timer, 1);
	_cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled);
#ifdef __BIG_ENDIAN
	/* endian_convert */
	pnetwork->Length = le32_to_cpu(pnetwork->Length);
	pnetwork->Ssid.SsidLength = le32_to_cpu(pnetwork->Ssid.SsidLength);
	pnetwork->Privacy = le32_to_cpu(pnetwork->Privacy);
	pnetwork->Rssi = le32_to_cpu(pnetwork->Rssi);
	pnetwork->NetworkTypeInUse = le32_to_cpu(pnetwork->NetworkTypeInUse);
	pnetwork->Configuration.ATIMWindow = le32_to_cpu(pnetwork->
					Configuration.ATIMWindow);
	pnetwork->Configuration.DSConfig = le32_to_cpu(pnetwork->
					Configuration.DSConfig);
	pnetwork->Configuration.FHConfig.DwellTime = le32_to_cpu(pnetwork->
					Configuration.FHConfig.DwellTime);
	pnetwork->Configuration.FHConfig.HopPattern = le32_to_cpu(pnetwork->
					Configuration.FHConfig.HopPattern);
	pnetwork->Configuration.FHConfig.HopSet = le32_to_cpu(pnetwork->
					Configuration.FHConfig.HopSet);
	pnetwork->Configuration.FHConfig.Length = le32_to_cpu(pnetwork->
					Configuration.FHConfig.Length);
	pnetwork->Configuration.Length = le32_to_cpu(pnetwork->
					Configuration.Length);
	pnetwork->InfrastructureMode = le32_to_cpu(pnetwork->
					   InfrastructureMode);
	pnetwork->IELength = le32_to_cpu(pnetwork->IELength);
#endif
	spin_lock_irqsave(&pmlmepriv->lock, irqL);
	if ((pmlmepriv->fw_state) & WIFI_AP_STATE) {
		psta = r8712_get_stainfo(&padapter->stapriv,
					 pnetwork->MacAddress);
		if (!psta) {
			psta = r8712_alloc_stainfo(&padapter->stapriv,
						   pnetwork->MacAddress);
			if (psta == NULL)
				goto createbss_cmd_fail ;
		}
		r8712_indicate_connect(padapter);
	} else {
		pwlan = _r8712_alloc_network(pmlmepriv);
		if (pwlan == NULL) {
			pwlan = r8712_get_oldest_wlan_network(
				&pmlmepriv->scanned_queue);
			if (pwlan == NULL)
				goto createbss_cmd_fail;
			pwlan->last_scanned = jiffies;
		} else
			list_insert_tail(&(pwlan->list),
					 &pmlmepriv->scanned_queue.queue);
		pnetwork->Length = r8712_get_ndis_wlan_bssid_ex_sz(pnetwork);
		memcpy(&(pwlan->network), pnetwork, pnetwork->Length);
		pwlan->fixed = true;
		memcpy(&tgt_network->network, pnetwork,
			(r8712_get_ndis_wlan_bssid_ex_sz(pnetwork)));
		if (pmlmepriv->fw_state & _FW_UNDER_LINKING)
			pmlmepriv->fw_state ^= _FW_UNDER_LINKING;
		/* we will set _FW_LINKED when there is one more sat to
		 * join us (stassoc_event_callback) */
	}
createbss_cmd_fail:
	spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
	r8712_free_cmd_obj(pcmd);
}
Example #8
0
static unsigned merge_threshold(struct btree_node *n)
{
	return le32_to_cpu(n->header.max_entries) / 3;
}
Example #9
0
/*
 * Convert from filesystem to in-memory representation.
 */
static struct posix_acl *
ext4_acl_from_disk(const void *value, size_t size)
{
	const char *end = (char *)value + size;
	int n, count;
	struct posix_acl *acl;

	if (!value)
		return NULL;
	if (size < sizeof(ext4_acl_header))
		 return ERR_PTR(-EINVAL);
	if (((ext4_acl_header *)value)->a_version !=
	    cpu_to_le32(EXT4_ACL_VERSION))
		return ERR_PTR(-EINVAL);
	value = (char *)value + sizeof(ext4_acl_header);
	count = ext4_acl_count(size);
	if (count < 0)
		return ERR_PTR(-EINVAL);
	if (count == 0)
		return NULL;
	acl = posix_acl_alloc(count, GFP_NOFS);
	if (!acl)
		return ERR_PTR(-ENOMEM);
	for (n = 0; n < count; n++) {
		ext4_acl_entry *entry =
			(ext4_acl_entry *)value;
		if ((char *)value + sizeof(ext4_acl_entry_short) > end)
			goto fail;
		acl->a_entries[n].e_tag  = le16_to_cpu(entry->e_tag);
		acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm);

		switch (acl->a_entries[n].e_tag) {
		case ACL_USER_OBJ:
		case ACL_GROUP_OBJ:
		case ACL_MASK:
		case ACL_OTHER:
			value = (char *)value +
				sizeof(ext4_acl_entry_short);
			acl->a_entries[n].e_id = ACL_UNDEFINED_ID;
			break;

		case ACL_USER:
		case ACL_GROUP:
			value = (char *)value + sizeof(ext4_acl_entry);
			if ((char *)value > end)
				goto fail;
			acl->a_entries[n].e_id =
				le32_to_cpu(entry->e_id);
			break;

		default:
			goto fail;
		}
	}
	if (value != end)
		goto fail;
	return acl;

fail:
	posix_acl_release(acl);
	return ERR_PTR(-EINVAL);
}
int zuma_test_dma (int cmd, int size)
{
	static const char *const test_legend[] = {
		"write", "verify",
		"copy", "compare",
		"write inc", "verify inc"
	};
	register int i, j;
	unsigned int p1 = ((unsigned int) test_buf1 + 0xff) & (~0xff);
	unsigned int p2 = ((unsigned int) test_buf2 + 0xff) & (~0xff);
	volatile unsigned int *ps = (unsigned int *) p1;
	volatile unsigned int *pd = (unsigned int *) p2;
	unsigned int funct, pat_lo = PAT_LO, pat_hi = PAT_HI;
	DMA_INT_STATUS stat;
	int ret = 0;

	if (!zuma_pbb_reg) {
		printf ("not initted\n");
		return -1;
	}

	if (cmd < 0 || cmd > 5) {
		printf ("inv cmd %d\n", cmd);
		return -1;
	}

	if (cmd == 2 || cmd == 3) {
		/* not implemented */
		return 0;
	}

	if (size <= 0 || size > 1024)
		size = 1024;

	size &= (~7);				/* throw away bottom 3 bits */

	p1 = ((unsigned int) test_buf1 + 0xff) & (~0xff);
	p2 = ((unsigned int) test_buf2 + 0xff) & (~0xff);

	memset ((void *) p1, 0, size);
	memset ((void *) p2, 0, size);

	for (i = 0; i < size / 4; i += 2) {
		ps[i] = pat_lo;
		ps[i + 1] = pat_hi;
		if (cmd == 4 || cmd == 5) {
			unsigned char *pl = (unsigned char *) &pat_lo;
			unsigned char *ph = (unsigned char *) &pat_hi;

			for (j = 0; j < 4; j++) {
				pl[j] += 8;
				ph[j] += 8;
			}
		}
	}

	funct = (1 << 31) | (cmd << 24) | (size);

	zuma_pbb_reg->int_mask.pci_bits.chan0 =
			EOF_RX_FLAG | EOF_TX_FLAG | EOB_TX_FLAG;

	zuma_pbb_reg->debug_57 = PAT_LO;	/* patl */
	zuma_pbb_reg->debug_58 = PAT_HI;	/* path */

	zuma_pbb_reg->debug_54 = cpu_to_le32 (p1);	/* src 0x01b0 */
	zuma_pbb_reg->debug_55 = cpu_to_le32 (p2);	/* dst 0x01b8 */
	zuma_pbb_reg->debug_56 = cpu_to_le32 (funct);	/* func, 0x01c0 */

	/* give DMA time to chew on things.. dont use DRAM or PCI */
	/* if you can avoid it. */
	do {
		for (i = 0; i < 1000 * 10; i++);
	} while (le32_to_cpu (zuma_pbb_reg->debug_56) & (1 << 31));

	stat.word = zuma_pbb_reg->status.word;
	zuma_pbb_reg->int_mask.word = 0;

	printf ("stat: %08x (%x)\n", stat.word, stat.pci_bits.chan0);

	printf ("func: %08x\n", le32_to_cpu (zuma_pbb_reg->debug_56));
	printf ("src @%08x: %08x %08x %08x %08x\n", p1, ps[0], ps[1], ps[2],
			ps[3]);
	printf ("dst @%08x: %08x %08x %08x %08x\n", p2, pd[0], pd[1], pd[2],
			pd[3]);
	printf ("func: %08x\n", le32_to_cpu (zuma_pbb_reg->debug_56));


	if (cmd == 0 || cmd == 4) {
		/* this is a write */
		if (!(stat.pci_bits.chan0 & EOF_RX_FLAG) ||	/* not done */
			(memcmp ((void *) ps, (void *) pd, size) != 0)) {	/* cmp error */
			for (i = 0; i < size / 4; i += 2) {
				if ((ps[i] != pd[i]) || (ps[i + 1] != pd[i + 1])) {
					printf ("s @%p:%08x %08x\n", &ps[i], ps[i], ps[i + 1]);
					printf ("d @%p:%08x %08x\n", &pd[i], pd[i], pd[i + 1]);
				}
			}
			ret = -1;
		}
	} else {
		/* this is a verify */
		if (!(stat.pci_bits.chan0 & EOF_TX_FLAG) ||	/* not done */
			(stat.pci_bits.chan0 & EOB_TX_FLAG)) {	/* cmp error */
			printf ("%08x: %08x %08x\n",
					le32_to_cpu (zuma_pbb_reg->debug_63),
					zuma_pbb_reg->debug_61, zuma_pbb_reg->debug_62);
			ret = -1;
		}
	}

	printf ("%s cmd %d, %d bytes: %s!\n", test_legend[cmd], cmd, size,
			(ret == 0) ? "PASSED" : "FAILED");
	return 0;
}
Example #11
0
int ext4_ext_migrate(struct inode *inode)
{
	handle_t *handle;
	int retval = 0, i;
	__le32 *i_data;
	ext4_lblk_t blk_count = 0;
	struct ext4_inode_info *ei;
	struct inode *tmp_inode = NULL;
	struct list_blocks_struct lb;
	unsigned long max_entries;
	__u32 goal;

	/*
	 * If the filesystem does not support extents, or the inode
	 * already is extent-based, error out.
	 */
	if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
				       EXT4_FEATURE_INCOMPAT_EXTENTS) ||
	    (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
		return -EINVAL;

	if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
		/*
		 * don't migrate fast symlink
		 */
		return retval;

	handle = ext4_journal_start(inode,
					EXT4_DATA_TRANS_BLOCKS(inode->i_sb) +
					EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
					2 * EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)
					+ 1);
	if (IS_ERR(handle)) {
		retval = PTR_ERR(handle);
		return retval;
	}
	goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
		EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
	tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
				   S_IFREG, 0, goal);
	if (IS_ERR(tmp_inode)) {
		retval = -ENOMEM;
		ext4_journal_stop(handle);
		return retval;
	}
	i_size_write(tmp_inode, i_size_read(inode));
	/*
	 * We don't want the inode to be reclaimed
	 * if we got interrupted in between. We have
	 * this tmp inode carrying reference to the
	 * data blocks of the original file. We set
	 * the i_nlink to zero at the last stage after
	 * switching the original file to extent format
	 */
	tmp_inode->i_nlink = 1;

	ext4_ext_tree_init(handle, tmp_inode);
	ext4_orphan_add(handle, tmp_inode);
	ext4_journal_stop(handle);

	/*
	 * start with one credit accounted for
	 * superblock modification.
	 *
	 * For the tmp_inode we already have commited the
	 * trascation that created the inode. Later as and
	 * when we add extents we extent the journal
	 */
	/*
	 * Even though we take i_mutex we can still cause block allocation
	 * via mmap write to holes. If we have allocated new blocks we fail
	 * migrate.  New block allocation will clear EXT4_EXT_MIGRATE flag.
	 * The flag is updated with i_data_sem held to prevent racing with
	 * block allocation.
	 */
	down_read((&EXT4_I(inode)->i_data_sem));
	EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags | EXT4_EXT_MIGRATE;
	up_read((&EXT4_I(inode)->i_data_sem));

	handle = ext4_journal_start(inode, 1);

	ei = EXT4_I(inode);
	i_data = ei->i_data;
	memset(&lb, 0, sizeof(lb));

	/* 32 bit block address 4 bytes */
	max_entries = inode->i_sb->s_blocksize >> 2;
	for (i = 0; i < EXT4_NDIR_BLOCKS; i++, blk_count++) {
		if (i_data[i]) {
			retval = update_extent_range(handle, tmp_inode,
						le32_to_cpu(i_data[i]),
						blk_count, &lb);
			if (retval)
				goto err_out;
		}
	}
	if (i_data[EXT4_IND_BLOCK]) {
		retval = update_ind_extent_range(handle, tmp_inode,
					le32_to_cpu(i_data[EXT4_IND_BLOCK]),
					&blk_count, &lb);
			if (retval)
				goto err_out;
	} else
		blk_count +=  max_entries;
	if (i_data[EXT4_DIND_BLOCK]) {
		retval = update_dind_extent_range(handle, tmp_inode,
					le32_to_cpu(i_data[EXT4_DIND_BLOCK]),
					&blk_count, &lb);
			if (retval)
				goto err_out;
	} else
		blk_count += max_entries * max_entries;
	if (i_data[EXT4_TIND_BLOCK]) {
		retval = update_tind_extent_range(handle, tmp_inode,
					le32_to_cpu(i_data[EXT4_TIND_BLOCK]),
					&blk_count, &lb);
			if (retval)
				goto err_out;
	}
	/*
	 * Build the last extent
	 */
	retval = finish_range(handle, tmp_inode, &lb);
err_out:
	if (retval)
		/*
		 * Failure case delete the extent information with the
		 * tmp_inode
		 */
		free_ext_block(handle, tmp_inode);
	else {
		retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
		if (retval)
			/*
			 * if we fail to swap inode data free the extent
			 * details of the tmp inode
			 */
			free_ext_block(handle, tmp_inode);
	}

	/* We mark the tmp_inode dirty via ext4_ext_tree_init. */
	if (ext4_journal_extend(handle, 1) != 0)
		ext4_journal_restart(handle, 1);

	/*
	 * Mark the tmp_inode as of size zero
	 */
	i_size_write(tmp_inode, 0);

	/*
	 * set the  i_blocks count to zero
	 * so that the ext4_delete_inode does the
	 * right job
	 *
	 * We don't need to take the i_lock because
	 * the inode is not visible to user space.
	 */
	tmp_inode->i_blocks = 0;

	/* Reset the extent details */
	ext4_ext_tree_init(handle, tmp_inode);

	/*
	 * Set the i_nlink to zero so that
	 * generic_drop_inode really deletes the
	 * inode
	 */
	tmp_inode->i_nlink = 0;

	ext4_journal_stop(handle);

	iput(tmp_inode);

	return retval;
}
			DEBUGP(5, dev, "wait_for_bulk_in_ready rc=%.2x\n", rc);
			DEBUGP(2, dev, "<- cm4040_read (failed)\n");
			if (rc == -ERESTARTSYS)
				return rc;
			return -EIO;
		}
	  	dev->r_buf[i] = xinb(iobase + REG_OFFSET_BULK_IN);
#ifdef CM4040_DEBUG
		pr_debug("%lu:%2x ", i, dev->r_buf[i]);
	}
	pr_debug("\n");
#else
	}
#endif

	bytes_to_read = 5 + le32_to_cpu(*(__le32 *)&dev->r_buf[1]);

	DEBUGP(6, dev, "BytesToRead=%zu\n", bytes_to_read);

	min_bytes_to_read = min(count, bytes_to_read + 5);
	min_bytes_to_read = min_t(size_t, min_bytes_to_read, READ_WRITE_BUFFER_SIZE);

	DEBUGP(6, dev, "Min=%zu\n", min_bytes_to_read);

	for (i = 0; i < (min_bytes_to_read-5); i++) {
		rc = wait_for_bulk_in_ready(dev);
		if (rc <= 0) {
			DEBUGP(5, dev, "wait_for_bulk_in_ready rc=%.2x\n", rc);
			DEBUGP(2, dev, "<- cm4040_read (failed)\n");
			if (rc == -ERESTARTSYS)
				return rc;
Example #13
0
/**
 * cik_sdma_ring_test_ib - test an IB on the DMA engine
 *
 * @ring: amdgpu_ring structure holding ring information
 *
 * Test a simple IB in the DMA ring (CIK).
 * Returns 0 on success, error on failure.
 */
static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
{
    struct amdgpu_device *adev = ring->adev;
    struct amdgpu_ib ib;
    unsigned i;
    unsigned index;
    int r;
    u32 tmp = 0;
    u64 gpu_addr;

    r = amdgpu_wb_get(adev, &index);
    if (r) {
        dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
        return r;
    }

    gpu_addr = adev->wb.gpu_addr + (index * 4);
    tmp = 0xCAFEDEAD;
    adev->wb.wb[index] = cpu_to_le32(tmp);

    r = amdgpu_ib_get(ring, NULL, 256, &ib);
    if (r) {
        amdgpu_wb_free(adev, index);
        DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
        return r;
    }

    ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
    ib.ptr[1] = lower_32_bits(gpu_addr);
    ib.ptr[2] = upper_32_bits(gpu_addr);
    ib.ptr[3] = 1;
    ib.ptr[4] = 0xDEADBEEF;
    ib.length_dw = 5;

    r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
    if (r) {
        amdgpu_ib_free(adev, &ib);
        amdgpu_wb_free(adev, index);
        DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
        return r;
    }
    r = amdgpu_fence_wait(ib.fence, false);
    if (r) {
        amdgpu_ib_free(adev, &ib);
        amdgpu_wb_free(adev, index);
        DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
        return r;
    }
    for (i = 0; i < adev->usec_timeout; i++) {
        tmp = le32_to_cpu(adev->wb.wb[index]);
        if (tmp == 0xDEADBEEF)
            break;
        DRM_UDELAY(1);
    }
    if (i < adev->usec_timeout) {
        DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
                 ib.fence->ring->idx, i);
    } else {
        DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
        r = -EINVAL;
    }
    amdgpu_ib_free(adev, &ib);
    amdgpu_wb_free(adev, index);
    return r;
}
int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
{
	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
	struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
	unsigned int transfer_length = scsi_bufflen(srb);
	unsigned int residue;
	int result;
	int fake_sense = 0;
	unsigned int cswlen;
	unsigned int cbwlen = US_BULK_CB_WRAP_LEN;

	/* Take care of BULK32 devices; set extra byte to 0 */
	if (unlikely(us->fflags & US_FL_BULK32)) {
		cbwlen = 32;
		us->iobuf[31] = 0;
	}

	/* set up the command wrapper */
	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
	bcb->DataTransferLength = cpu_to_le32(transfer_length);
	bcb->Flags = srb->sc_data_direction == DMA_FROM_DEVICE ? 1 << 7 : 0;
	bcb->Tag = ++us->tag;
	bcb->Lun = srb->device->lun;
	if (us->fflags & US_FL_SCM_MULT_TARG)
		bcb->Lun |= srb->device->id << 4;
	bcb->Length = srb->cmd_len;

	/* copy the command payload */
	memset(bcb->CDB, 0, sizeof(bcb->CDB));
	memcpy(bcb->CDB, srb->cmnd, bcb->Length);

	/* send it to out endpoint */
	US_DEBUGP("Bulk Command S 0x%x T 0x%x L %d F %d Trg %d LUN %d CL %d\n",
			le32_to_cpu(bcb->Signature), bcb->Tag,
			le32_to_cpu(bcb->DataTransferLength), bcb->Flags,
			(bcb->Lun >> 4), (bcb->Lun & 0x0F), 
			bcb->Length);
	result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
				bcb, cbwlen, NULL);
	US_DEBUGP("Bulk command transfer result=%d\n", result);
	if (result != USB_STOR_XFER_GOOD)
		return USB_STOR_TRANSPORT_ERROR;

	/* DATA STAGE */
	/* send/receive data payload, if there is any */

	/* Some USB-IDE converter chips need a 100us delay between the
	 * command phase and the data phase.  Some devices need a little
	 * more than that, probably because of clock rate inaccuracies. */
	if (unlikely(us->fflags & US_FL_GO_SLOW))
		udelay(125);

	if (transfer_length) {
		unsigned int pipe = srb->sc_data_direction == DMA_FROM_DEVICE ? 
				us->recv_bulk_pipe : us->send_bulk_pipe;
		result = usb_stor_bulk_srb(us, pipe, srb);
		US_DEBUGP("Bulk data transfer result 0x%x\n", result);
		if (result == USB_STOR_XFER_ERROR)
			return USB_STOR_TRANSPORT_ERROR;

		/* If the device tried to send back more data than the
		 * amount requested, the spec requires us to transfer
		 * the CSW anyway.  Since there's no point retrying the
		 * the command, we'll return fake sense data indicating
		 * Illegal Request, Invalid Field in CDB.
		 */
		if (result == USB_STOR_XFER_LONG)
			fake_sense = 1;
	}

	/* See flow chart on pg 15 of the Bulk Only Transport spec for
	 * an explanation of how this code works.
	 */

	/* get CSW for device status */
	US_DEBUGP("Attempting to get CSW...\n");
	result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
				bcs, US_BULK_CS_WRAP_LEN, &cswlen);

	/* Some broken devices add unnecessary zero-length packets to the
	 * end of their data transfers.  Such packets show up as 0-length
	 * CSWs.  If we encounter such a thing, try to read the CSW again.
	 */
	if (result == USB_STOR_XFER_SHORT && cswlen == 0) {
		US_DEBUGP("Received 0-length CSW; retrying...\n");
		result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
				bcs, US_BULK_CS_WRAP_LEN, &cswlen);
	}

	/* did the attempt to read the CSW fail? */
	if (result == USB_STOR_XFER_STALLED) {

		/* get the status again */
		US_DEBUGP("Attempting to get CSW (2nd try)...\n");
		result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
				bcs, US_BULK_CS_WRAP_LEN, NULL);
	}

	/* if we still have a failure at this point, we're in trouble */
	US_DEBUGP("Bulk status result = %d\n", result);
	if (result != USB_STOR_XFER_GOOD)
		return USB_STOR_TRANSPORT_ERROR;

	/* check bulk status */
	residue = le32_to_cpu(bcs->Residue);
	US_DEBUGP("Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n",
			le32_to_cpu(bcs->Signature), bcs->Tag, 
			residue, bcs->Status);
	if (!(bcs->Tag == us->tag || (us->fflags & US_FL_BULK_IGNORE_TAG)) ||
		bcs->Status > US_BULK_STAT_PHASE) {
		US_DEBUGP("Bulk logical error\n");
		return USB_STOR_TRANSPORT_ERROR;
	}

	/* Some broken devices report odd signatures, so we do not check them
	 * for validity against the spec. We store the first one we see,
	 * and check subsequent transfers for validity against this signature.
	 */
	if (!us->bcs_signature) {
		us->bcs_signature = bcs->Signature;
		if (us->bcs_signature != cpu_to_le32(US_BULK_CS_SIGN))
			US_DEBUGP("Learnt BCS signature 0x%08X\n",
					le32_to_cpu(us->bcs_signature));
	} else if (bcs->Signature != us->bcs_signature) {
		US_DEBUGP("Signature mismatch: got %08X, expecting %08X\n",
			  le32_to_cpu(bcs->Signature),
			  le32_to_cpu(us->bcs_signature));
		return USB_STOR_TRANSPORT_ERROR;
	}

	/* try to compute the actual residue, based on how much data
	 * was really transferred and what the device tells us */
	if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) {

		/* Heuristically detect devices that generate bogus residues
		 * by seeing what happens with INQUIRY and READ CAPACITY
		 * commands.
		 */
		if (bcs->Status == US_BULK_STAT_OK &&
				scsi_get_resid(srb) == 0 &&
					((srb->cmnd[0] == INQUIRY &&
						transfer_length == 36) ||
					(srb->cmnd[0] == READ_CAPACITY &&
						transfer_length == 8))) {
			us->fflags |= US_FL_IGNORE_RESIDUE;

		} else {
			residue = min(residue, transfer_length);
			scsi_set_resid(srb, max(scsi_get_resid(srb),
			                                       (int) residue));
		}
	}

	/* based on the status code, we report good or bad */
	switch (bcs->Status) {
		case US_BULK_STAT_OK:
			/* device babbled -- return fake sense data */
			if (fake_sense) {
				memcpy(srb->sense_buffer, 
				       usb_stor_sense_invalidCDB, 
				       sizeof(usb_stor_sense_invalidCDB));
				return USB_STOR_TRANSPORT_NO_SENSE;
			}

			/* command good -- note that data could be short */
			return USB_STOR_TRANSPORT_GOOD;

		case US_BULK_STAT_FAIL:
			/* command failed */
			return USB_STOR_TRANSPORT_FAILED;

		case US_BULK_STAT_PHASE:
			/* phase error -- note that a transport reset will be
			 * invoked by the invoke_transport() function
			 */
			return USB_STOR_TRANSPORT_ERROR;
	}

	/* we should never get here, but if we do, we're in trouble */
	return USB_STOR_TRANSPORT_ERROR;
}
Example #15
0
static int ext2_fill_super(struct super_block *sb, void *data, int silent)
{
	struct buffer_head * bh;
	struct ext2_sb_info * sbi;
	struct ext2_super_block * es;
	struct inode *root;
	unsigned long block;
	unsigned long sb_block = get_sb_block(&data);
	unsigned long logic_sb_block;
	unsigned long offset = 0;
	unsigned long def_mount_opts;
	long ret = -EINVAL;
	int blocksize = BLOCK_SIZE;
	int db_count;
	int i, j;
	__le32 features;
	int err;

	err = -ENOMEM;
	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
	if (!sbi)
		goto failed_unlock;

	sbi->s_blockgroup_lock =
		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
	if (!sbi->s_blockgroup_lock) {
		kfree(sbi);
		goto failed_unlock;
	}
	sb->s_fs_info = sbi;
	sbi->s_sb_block = sb_block;

	spin_lock_init(&sbi->s_lock);

	/*
	 * See what the current blocksize for the device is, and
	 * use that as the blocksize.  Otherwise (or if the blocksize
	 * is smaller than the default) use the default.
	 * This is important for devices that have a hardware
	 * sectorsize that is larger than the default.
	 */
	blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
	if (!blocksize) {
		ext2_msg(sb, KERN_ERR, "error: unable to set blocksize");
		goto failed_sbi;
	}

	/*
	 * If the superblock doesn't start on a hardware sector boundary,
	 * calculate the offset.  
	 */
	if (blocksize != BLOCK_SIZE) {
		logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
		offset = (sb_block*BLOCK_SIZE) % blocksize;
	} else {
		logic_sb_block = sb_block;
	}

	if (!(bh = sb_bread(sb, logic_sb_block))) {
		ext2_msg(sb, KERN_ERR, "error: unable to read superblock");
		goto failed_sbi;
	}
	/*
	 * Note: s_es must be initialized as soon as possible because
	 *       some ext2 macro-instructions depend on its value
	 */
	es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
	sbi->s_es = es;
	sb->s_magic = le16_to_cpu(es->s_magic);

	if (sb->s_magic != EXT2_SUPER_MAGIC)
		goto cantfind_ext2;

	/* Set defaults before we parse the mount options */
	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
	if (def_mount_opts & EXT2_DEFM_DEBUG)
		set_opt(sbi->s_mount_opt, DEBUG);
	if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
		set_opt(sbi->s_mount_opt, GRPID);
	if (def_mount_opts & EXT2_DEFM_UID16)
		set_opt(sbi->s_mount_opt, NO_UID32);
#ifdef CONFIG_EXT2_FS_XATTR
	if (def_mount_opts & EXT2_DEFM_XATTR_USER)
		set_opt(sbi->s_mount_opt, XATTR_USER);
#endif
#ifdef CONFIG_EXT2_FS_POSIX_ACL
	if (def_mount_opts & EXT2_DEFM_ACL)
		set_opt(sbi->s_mount_opt, POSIX_ACL);
#endif
	
	if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
		set_opt(sbi->s_mount_opt, ERRORS_PANIC);
	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
		set_opt(sbi->s_mount_opt, ERRORS_CONT);
	else
		set_opt(sbi->s_mount_opt, ERRORS_RO);

	sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
	sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
	
	set_opt(sbi->s_mount_opt, RESERVATION);

	if (!parse_options((char *) data, sb))
		goto failed_mount;

	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
		 MS_POSIXACL : 0);

	ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset
				    EXT2_MOUNT_XIP if not */

	if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
	    (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
	     EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
	     EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
		ext2_msg(sb, KERN_WARNING,
			"warning: feature flags set on rev 0 fs, "
			"running e2fsck is recommended");
	/*
	 * Check feature flags regardless of the revision level, since we
	 * previously didn't change the revision level when setting the flags,
	 * so there is a chance incompat flags are set on a rev 0 filesystem.
	 */
	features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
	if (features) {
		ext2_msg(sb, KERN_ERR,	"error: couldn't mount because of "
		       "unsupported optional features (%x)",
			le32_to_cpu(features));
		goto failed_mount;
	}
	if (!(sb->s_flags & MS_RDONLY) &&
	    (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){
		ext2_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of "
		       "unsupported optional features (%x)",
		       le32_to_cpu(features));
		goto failed_mount;
	}

	blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);

	if (ext2_use_xip(sb) && blocksize != PAGE_SIZE) {
		if (!silent)
			ext2_msg(sb, KERN_ERR,
				"error: unsupported blocksize for xip");
		goto failed_mount;
	}

	/* If the blocksize doesn't match, re-read the thing.. */
	if (sb->s_blocksize != blocksize) {
		brelse(bh);

		if (!sb_set_blocksize(sb, blocksize)) {
			ext2_msg(sb, KERN_ERR,
				"error: bad blocksize %d", blocksize);
			goto failed_sbi;
		}

		logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
		offset = (sb_block*BLOCK_SIZE) % blocksize;
		bh = sb_bread(sb, logic_sb_block);
		if(!bh) {
			ext2_msg(sb, KERN_ERR, "error: couldn't read"
				"superblock on 2nd try");
			goto failed_sbi;
		}
		es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
		sbi->s_es = es;
		if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
			ext2_msg(sb, KERN_ERR, "error: magic mismatch");
			goto failed_mount;
		}
	}

	sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);
	sb->s_max_links = EXT2_LINK_MAX;

	if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
		sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
		sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
	} else {
		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
		if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
		    !is_power_of_2(sbi->s_inode_size) ||
		    (sbi->s_inode_size > blocksize)) {
			ext2_msg(sb, KERN_ERR,
				"error: unsupported inode size: %d",
				sbi->s_inode_size);
			goto failed_mount;
		}
	}

	sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
				   le32_to_cpu(es->s_log_frag_size);
	if (sbi->s_frag_size == 0)
		goto cantfind_ext2;
	sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;

	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
	sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);

	if (EXT2_INODE_SIZE(sb) == 0)
		goto cantfind_ext2;
	sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
	if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
		goto cantfind_ext2;
	sbi->s_itb_per_group = sbi->s_inodes_per_group /
					sbi->s_inodes_per_block;
	sbi->s_desc_per_block = sb->s_blocksize /
					sizeof (struct ext2_group_desc);
	sbi->s_sbh = bh;
	sbi->s_mount_state = le16_to_cpu(es->s_state);
	sbi->s_addr_per_block_bits =
		ilog2 (EXT2_ADDR_PER_BLOCK(sb));
	sbi->s_desc_per_block_bits =
		ilog2 (EXT2_DESC_PER_BLOCK(sb));

	if (sb->s_magic != EXT2_SUPER_MAGIC)
		goto cantfind_ext2;

	if (sb->s_blocksize != bh->b_size) {
		if (!silent)
			ext2_msg(sb, KERN_ERR, "error: unsupported blocksize");
		goto failed_mount;
	}

	if (sb->s_blocksize != sbi->s_frag_size) {
		ext2_msg(sb, KERN_ERR,
			"error: fragsize %lu != blocksize %lu"
			"(not supported yet)",
			sbi->s_frag_size, sb->s_blocksize);
		goto failed_mount;
	}

	if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
		ext2_msg(sb, KERN_ERR,
			"error: #blocks per group too big: %lu",
			sbi->s_blocks_per_group);
		goto failed_mount;
	}
	if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
		ext2_msg(sb, KERN_ERR,
			"error: #fragments per group too big: %lu",
			sbi->s_frags_per_group);
		goto failed_mount;
	}
	if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
		ext2_msg(sb, KERN_ERR,
			"error: #inodes per group too big: %lu",
			sbi->s_inodes_per_group);
		goto failed_mount;
	}

	if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
		goto cantfind_ext2;
 	sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
 				le32_to_cpu(es->s_first_data_block) - 1)
 					/ EXT2_BLOCKS_PER_GROUP(sb)) + 1;
	db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
		   EXT2_DESC_PER_BLOCK(sb);
	sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
	if (sbi->s_group_desc == NULL) {
		ext2_msg(sb, KERN_ERR, "error: not enough memory");
		goto failed_mount;
	}
	bgl_lock_init(sbi->s_blockgroup_lock);
	sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
	if (!sbi->s_debts) {
		ext2_msg(sb, KERN_ERR, "error: not enough memory");
		goto failed_mount_group_desc;
	}
	for (i = 0; i < db_count; i++) {
		block = descriptor_loc(sb, logic_sb_block, i);
		sbi->s_group_desc[i] = sb_bread(sb, block);
		if (!sbi->s_group_desc[i]) {
			for (j = 0; j < i; j++)
				brelse (sbi->s_group_desc[j]);
			ext2_msg(sb, KERN_ERR,
				"error: unable to read group descriptors");
			goto failed_mount_group_desc;
		}
	}
	if (!ext2_check_descriptors (sb)) {
		ext2_msg(sb, KERN_ERR, "group descriptors corrupted");
		goto failed_mount2;
	}
	sbi->s_gdb_count = db_count;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
	spin_lock_init(&sbi->s_next_gen_lock);

	/* per fileystem reservation list head & lock */
	spin_lock_init(&sbi->s_rsv_window_lock);
	sbi->s_rsv_window_root = RB_ROOT;
	/*
	 * Add a single, static dummy reservation to the start of the
	 * reservation window list --- it gives us a placeholder for
	 * append-at-start-of-list which makes the allocation logic
	 * _much_ simpler.
	 */
	sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
	sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
	sbi->s_rsv_window_head.rsv_alloc_hit = 0;
	sbi->s_rsv_window_head.rsv_goal_size = 0;
	ext2_rsv_window_add(sb, &sbi->s_rsv_window_head);

	err = percpu_counter_init(&sbi->s_freeblocks_counter,
				ext2_count_free_blocks(sb));
	if (!err) {
		err = percpu_counter_init(&sbi->s_freeinodes_counter,
				ext2_count_free_inodes(sb));
	}
	if (!err) {
		err = percpu_counter_init(&sbi->s_dirs_counter,
				ext2_count_dirs(sb));
	}
	if (err) {
		ext2_msg(sb, KERN_ERR, "error: insufficient memory");
		goto failed_mount3;
	}
	/*
	 * set up enough so that it can read an inode
	 */
	sb->s_op = &ext2_sops;
	sb->s_export_op = &ext2_export_ops;
	sb->s_xattr = ext2_xattr_handlers;

#ifdef CONFIG_QUOTA
	sb->dq_op = &dquot_operations;
	sb->s_qcop = &dquot_quotactl_ops;
#endif

	root = ext2_iget(sb, EXT2_ROOT_INO);
	if (IS_ERR(root)) {
		ret = PTR_ERR(root);
		goto failed_mount3;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
		iput(root);
		ext2_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
		goto failed_mount3;
	}

	sb->s_root = d_make_root(root);
	if (!sb->s_root) {
		ext2_msg(sb, KERN_ERR, "error: get root inode failed");
		ret = -ENOMEM;
		goto failed_mount3;
	}
	if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
		ext2_msg(sb, KERN_WARNING,
			"warning: mounting ext3 filesystem as ext2");
	if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY))
		sb->s_flags |= MS_RDONLY;
	ext2_write_super(sb);
	return 0;

cantfind_ext2:
	if (!silent)
		ext2_msg(sb, KERN_ERR,
			"error: can't find an ext2 filesystem on dev %s.",
			sb->s_id);
	goto failed_mount;
failed_mount3:
	percpu_counter_destroy(&sbi->s_freeblocks_counter);
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
failed_mount2:
	for (i = 0; i < db_count; i++)
		brelse(sbi->s_group_desc[i]);
failed_mount_group_desc:
	kfree(sbi->s_group_desc);
	kfree(sbi->s_debts);
failed_mount:
	brelse(bh);
failed_sbi:
	sb->s_fs_info = NULL;
	kfree(sbi->s_blockgroup_lock);
	kfree(sbi);
failed_unlock:
	return ret;
}
Example #16
0
irqreturn_t
islpci_interrupt(int irq, void *config)
{
	u32 reg;
	islpci_private *priv = config;
	struct net_device *ndev = priv->ndev;
	void __iomem *device = priv->device_base;
	int powerstate = ISL38XX_PSM_POWERSAVE_STATE;

	/* lock the interrupt handler */
	spin_lock(&priv->slock);

	/* received an interrupt request on a shared IRQ line
	 * first check whether the device is in sleep mode */
	reg = readl(device + ISL38XX_CTRL_STAT_REG);
	if (reg & ISL38XX_CTRL_STAT_SLEEPMODE)
		/* device is in sleep mode, IRQ was generated by someone else */
	{
#if VERBOSE > SHOW_ERROR_MESSAGES
		DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n");
#endif
		spin_unlock(&priv->slock);
		return IRQ_NONE;
	}


	/* check whether there is any source of interrupt on the device */
	reg = readl(device + ISL38XX_INT_IDENT_REG);

	/* also check the contents of the Interrupt Enable Register, because this
	 * will filter out interrupt sources from other devices on the same irq ! */
	reg &= readl(device + ISL38XX_INT_EN_REG);
	reg &= ISL38XX_INT_SOURCES;

	if (reg != 0) {
		if (islpci_get_state(priv) != PRV_STATE_SLEEP)
			powerstate = ISL38XX_PSM_ACTIVE_STATE;

		/* reset the request bits in the Identification register */
		isl38xx_w32_flush(device, reg, ISL38XX_INT_ACK_REG);

#if VERBOSE > SHOW_ERROR_MESSAGES
		DEBUG(SHOW_FUNCTION_CALLS,
		      "IRQ: Identification register 0x%p 0x%x\n", device, reg);
#endif

		/* check for each bit in the register separately */
		if (reg & ISL38XX_INT_IDENT_UPDATE) {
#if VERBOSE > SHOW_ERROR_MESSAGES
			/* Queue has been updated */
			DEBUG(SHOW_TRACING, "IRQ: Update flag\n");

			DEBUG(SHOW_QUEUE_INDEXES,
			      "CB drv Qs: [%i][%i][%i][%i][%i][%i]\n",
			      le32_to_cpu(priv->control_block->
					  driver_curr_frag[0]),
			      le32_to_cpu(priv->control_block->
					  driver_curr_frag[1]),
			      le32_to_cpu(priv->control_block->
					  driver_curr_frag[2]),
			      le32_to_cpu(priv->control_block->
					  driver_curr_frag[3]),
			      le32_to_cpu(priv->control_block->
					  driver_curr_frag[4]),
			      le32_to_cpu(priv->control_block->
					  driver_curr_frag[5])
			    );

			DEBUG(SHOW_QUEUE_INDEXES,
			      "CB dev Qs: [%i][%i][%i][%i][%i][%i]\n",
			      le32_to_cpu(priv->control_block->
					  device_curr_frag[0]),
			      le32_to_cpu(priv->control_block->
					  device_curr_frag[1]),
			      le32_to_cpu(priv->control_block->
					  device_curr_frag[2]),
			      le32_to_cpu(priv->control_block->
					  device_curr_frag[3]),
			      le32_to_cpu(priv->control_block->
					  device_curr_frag[4]),
			      le32_to_cpu(priv->control_block->
					  device_curr_frag[5])
			    );
#endif

			/* cleanup the data low transmit queue */
			islpci_eth_cleanup_transmit(priv, priv->control_block);

			/* device is in active state, update the
			 * powerstate flag if necessary */
			powerstate = ISL38XX_PSM_ACTIVE_STATE;

			/* check all three queues in priority order
			 * call the PIMFOR receive function until the
			 * queue is empty */
			if (isl38xx_in_queue(priv->control_block,
						ISL38XX_CB_RX_MGMTQ) != 0) {
#if VERBOSE > SHOW_ERROR_MESSAGES
				DEBUG(SHOW_TRACING,
				      "Received frame in Management Queue\n");
#endif
				islpci_mgt_receive(ndev);

				islpci_mgt_cleanup_transmit(ndev);

				/* Refill slots in receive queue */
				islpci_mgmt_rx_fill(ndev);

				/* no need to trigger the device, next
                                   islpci_mgt_transaction does it */
			}

			while (isl38xx_in_queue(priv->control_block,
						ISL38XX_CB_RX_DATA_LQ) != 0) {
#if VERBOSE > SHOW_ERROR_MESSAGES
				DEBUG(SHOW_TRACING,
				      "Received frame in Data Low Queue\n");
#endif
				islpci_eth_receive(priv);
			}

			/* check whether the data transmit queues were full */
			if (priv->data_low_tx_full) {
				/* check whether the transmit is not full anymore */
				if (ISL38XX_CB_TX_QSIZE -
				    isl38xx_in_queue(priv->control_block,
						     ISL38XX_CB_TX_DATA_LQ) >=
				    ISL38XX_MIN_QTHRESHOLD) {
					/* nope, the driver is ready for more network frames */
					netif_wake_queue(priv->ndev);

					/* reset the full flag */
					priv->data_low_tx_full = 0;
				}
			}
		}

		if (reg & ISL38XX_INT_IDENT_INIT) {
			/* Device has been initialized */
#if VERBOSE > SHOW_ERROR_MESSAGES
			DEBUG(SHOW_TRACING,
			      "IRQ: Init flag, device initialized\n");
#endif
			wake_up(&priv->reset_done);
		}

		if (reg & ISL38XX_INT_IDENT_SLEEP) {
			/* Device intends to move to powersave state */
#if VERBOSE > SHOW_ERROR_MESSAGES
			DEBUG(SHOW_TRACING, "IRQ: Sleep flag\n");
#endif
			isl38xx_handle_sleep_request(priv->control_block,
						     &powerstate,
						     priv->device_base);
		}

		if (reg & ISL38XX_INT_IDENT_WAKEUP) {
			/* Device has been woken up to active state */
#if VERBOSE > SHOW_ERROR_MESSAGES
			DEBUG(SHOW_TRACING, "IRQ: Wakeup flag\n");
#endif

			isl38xx_handle_wakeup(priv->control_block,
					      &powerstate, priv->device_base);
		}
	} else {
#if VERBOSE > SHOW_ERROR_MESSAGES
		DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n");
#endif
		spin_unlock(&priv->slock);
		return IRQ_NONE;
	}

	/* sleep -> ready */
	if (islpci_get_state(priv) == PRV_STATE_SLEEP
	    && powerstate == ISL38XX_PSM_ACTIVE_STATE)
		islpci_set_state(priv, PRV_STATE_READY);

	/* !sleep -> sleep */
	if (islpci_get_state(priv) != PRV_STATE_SLEEP
	    && powerstate == ISL38XX_PSM_POWERSAVE_STATE)
		islpci_set_state(priv, PRV_STATE_SLEEP);

	/* unlock the interrupt handler */
	spin_unlock(&priv->slock);

	return IRQ_HANDLED;
}
Example #17
0
static int ext2_remount (struct super_block * sb, int * flags, char * data)
{
    struct ext2_sb_info * sbi = EXT2_SB(sb);
    struct ext2_super_block * es;
    unsigned long old_mount_opt = sbi->s_mount_opt;
    struct ext2_mount_options old_opts;
    unsigned long old_sb_flags;
    int err;

    /* Store the old options */
    old_sb_flags = sb->s_flags;
    old_opts.s_mount_opt = sbi->s_mount_opt;
    old_opts.s_resuid = sbi->s_resuid;
    old_opts.s_resgid = sbi->s_resgid;

    /*
     * Allow the "check" option to be passed as a remount option.
     */
    if (!parse_options (data, sbi)) {
        err = -EINVAL;
        goto restore_opts;
    }

    sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
                  ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);

    es = sbi->s_es;
    if (((sbi->s_mount_opt & EXT2_MOUNT_XIP) !=
            (old_mount_opt & EXT2_MOUNT_XIP)) &&
            invalidate_inodes(sb))
        ext2_warning(sb, __FUNCTION__, "busy inodes while remounting "\
                     "xip remain in cache (no functional problem)");
    if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
        return 0;
    if (*flags & MS_RDONLY) {
        if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
                !(sbi->s_mount_state & EXT2_VALID_FS))
            return 0;
        /*
         * OK, we are remounting a valid rw partition rdonly, so set
         * the rdonly flag and then mark the partition as valid again.
         */
        es->s_state = cpu_to_le16(sbi->s_mount_state);
        es->s_mtime = cpu_to_le32(get_seconds());
    } else {
        __le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb,
                                                ~EXT2_FEATURE_RO_COMPAT_SUPP);
        if (ret) {
            printk("EXT2-fs: %s: couldn't remount RDWR because of "
                   "unsupported optional features (%x).\n",
                   sb->s_id, le32_to_cpu(ret));
            err = -EROFS;
            goto restore_opts;
        }
        /*
         * Mounting a RDONLY partition read-write, so reread and
         * store the current valid flag.  (It may have been changed
         * by e2fsck since we originally mounted the partition.)
         */
        sbi->s_mount_state = le16_to_cpu(es->s_state);
        if (!ext2_setup_super (sb, es, 0))
            sb->s_flags &= ~MS_RDONLY;
    }
    ext2_sync_super(sb, es);
    return 0;
restore_opts:
    sbi->s_mount_opt = old_opts.s_mount_opt;
    sbi->s_resuid = old_opts.s_resuid;
    sbi->s_resgid = old_opts.s_resgid;
    sb->s_flags = old_sb_flags;
    return err;
}
Example #18
0
static int crb_acpi_add(struct acpi_device *device)
{
	struct tpm_chip *chip;
	struct acpi_tpm2 *buf;
	struct crb_priv *priv;
	struct device *dev = &device->dev;
	acpi_status status;
	u32 sm;
	u64 pa;
	int rc;

	status = acpi_get_table(ACPI_SIG_TPM2, 1,
				(struct acpi_table_header **) &buf);
	if (ACPI_FAILURE(status)) {
		dev_err(dev, "failed to get TPM2 ACPI table\n");
		return -ENODEV;
	}

	/* Should the FIFO driver handle this? */
	if (buf->start_method == TPM2_START_FIFO)
		return -ENODEV;

	chip = tpmm_chip_alloc(dev, &tpm_crb);
	if (IS_ERR(chip))
		return PTR_ERR(chip);

	chip->flags = TPM_CHIP_FLAG_TPM2;

	if (buf->hdr.length < sizeof(struct acpi_tpm2)) {
		dev_err(dev, "TPM2 ACPI table has wrong size");
		return -EINVAL;
	}

	priv = (struct crb_priv *) devm_kzalloc(dev, sizeof(struct crb_priv),
						GFP_KERNEL);
	if (!priv) {
		dev_err(dev, "failed to devm_kzalloc for private data\n");
		return -ENOMEM;
	}

	sm = le32_to_cpu(buf->start_method);

	/* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
	 * report only ACPI start but in practice seems to require both
	 * ACPI start and CRB start.
	 */
	if (sm == TPM2_START_CRB || sm == TPM2_START_FIFO ||
	    !strcmp(acpi_device_hid(device), "MSFT0101"))
		priv->flags |= CRB_FL_CRB_START;

	if (sm == TPM2_START_ACPI || sm == TPM2_START_CRB_WITH_ACPI)
		priv->flags |= CRB_FL_ACPI_START;

	priv->cca = (struct crb_control_area __iomem *)
		devm_ioremap_nocache(dev, buf->control_area_pa, 0x1000);
	if (!priv->cca) {
		dev_err(dev, "ioremap of the control area failed\n");
		return -ENOMEM;
	}

	memcpy_fromio(&pa, &priv->cca->cmd_pa, 8);
	pa = le64_to_cpu(pa);
	priv->cmd = devm_ioremap_nocache(dev, pa,
					 ioread32(&priv->cca->cmd_size));
	if (!priv->cmd) {
		dev_err(dev, "ioremap of the command buffer failed\n");
		return -ENOMEM;
	}

	memcpy_fromio(&pa, &priv->cca->rsp_pa, 8);
	pa = le64_to_cpu(pa);
	priv->rsp = devm_ioremap_nocache(dev, pa,
					 ioread32(&priv->cca->rsp_size));
	if (!priv->rsp) {
		dev_err(dev, "ioremap of the response buffer failed\n");
		return -ENOMEM;
	}

	chip->vendor.priv = priv;

	/* Default timeouts and durations */
	chip->vendor.timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
	chip->vendor.timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
	chip->vendor.timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
	chip->vendor.timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
	chip->vendor.duration[TPM_SHORT] =
		msecs_to_jiffies(TPM2_DURATION_SHORT);
	chip->vendor.duration[TPM_MEDIUM] =
		msecs_to_jiffies(TPM2_DURATION_MEDIUM);
	chip->vendor.duration[TPM_LONG] =
		msecs_to_jiffies(TPM2_DURATION_LONG);

	chip->acpi_dev_handle = device->handle;

	rc = tpm2_do_selftest(chip);
	if (rc)
		return rc;

	return tpm_chip_register(chip);
}
Example #19
0
static int ext2_fill_super(struct super_block *sb, void *data, int silent)
{
    struct buffer_head * bh;
    struct ext2_sb_info * sbi;
    struct ext2_super_block * es;
    struct inode *root;
    unsigned long block;
    unsigned long sb_block = get_sb_block(&data);
    unsigned long logic_sb_block;
    unsigned long offset = 0;
    unsigned long def_mount_opts;
    int blocksize = BLOCK_SIZE;
    int db_count;
    int i, j;
    __le32 features;

    sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
    if (!sbi)
        return -ENOMEM;
    sb->s_fs_info = sbi;

    /*
     * See what the current blocksize for the device is, and
     * use that as the blocksize.  Otherwise (or if the blocksize
     * is smaller than the default) use the default.
     * This is important for devices that have a hardware
     * sectorsize that is larger than the default.
     */
    blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
    if (!blocksize) {
        printk ("EXT2-fs: unable to set blocksize\n");
        goto failed_sbi;
    }

    /*
     * If the superblock doesn't start on a hardware sector boundary,
     * calculate the offset.
     */
    if (blocksize != BLOCK_SIZE) {
        logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
        offset = (sb_block*BLOCK_SIZE) % blocksize;
    } else {
        logic_sb_block = sb_block;
    }

    if (!(bh = sb_bread(sb, logic_sb_block))) {
        printk ("EXT2-fs: unable to read superblock\n");
        goto failed_sbi;
    }
    /*
     * Note: s_es must be initialized as soon as possible because
     *       some ext2 macro-instructions depend on its value
     */
    es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
    sbi->s_es = es;
    sb->s_magic = le16_to_cpu(es->s_magic);

    if (sb->s_magic != EXT2_SUPER_MAGIC)
        goto cantfind_ext2;

    /* Set defaults before we parse the mount options */
    def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
    if (def_mount_opts & EXT2_DEFM_DEBUG)
        set_opt(sbi->s_mount_opt, DEBUG);
    if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
        set_opt(sbi->s_mount_opt, GRPID);
    if (def_mount_opts & EXT2_DEFM_UID16)
        set_opt(sbi->s_mount_opt, NO_UID32);
    if (def_mount_opts & EXT2_DEFM_XATTR_USER)
        set_opt(sbi->s_mount_opt, XATTR_USER);
    if (def_mount_opts & EXT2_DEFM_ACL)
        set_opt(sbi->s_mount_opt, POSIX_ACL);

    if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
        set_opt(sbi->s_mount_opt, ERRORS_PANIC);
    else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_RO)
        set_opt(sbi->s_mount_opt, ERRORS_RO);
    else
        set_opt(sbi->s_mount_opt, ERRORS_CONT);

    sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
    sbi->s_resgid = le16_to_cpu(es->s_def_resgid);

    if (!parse_options ((char *) data, sbi))
        goto failed_mount;

    sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
                  ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
                   MS_POSIXACL : 0);

    ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset
				    EXT2_MOUNT_XIP if not */

    if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
            (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
             EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
             EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
        printk("EXT2-fs warning: feature flags set on rev 0 fs, "
               "running e2fsck is recommended\n");
    /*
     * Check feature flags regardless of the revision level, since we
     * previously didn't change the revision level when setting the flags,
     * so there is a chance incompat flags are set on a rev 0 filesystem.
     */
    features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
    if (features) {
        printk("EXT2-fs: %s: couldn't mount because of "
               "unsupported optional features (%x).\n",
               sb->s_id, le32_to_cpu(features));
        goto failed_mount;
    }
    if (!(sb->s_flags & MS_RDONLY) &&
            (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))) {
        printk("EXT2-fs: %s: couldn't mount RDWR because of "
               "unsupported optional features (%x).\n",
               sb->s_id, le32_to_cpu(features));
        goto failed_mount;
    }

    blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);

    if ((ext2_use_xip(sb)) && ((blocksize != PAGE_SIZE) ||
                               (sb->s_blocksize != blocksize))) {
        if (!silent)
            printk("XIP: Unsupported blocksize\n");
        goto failed_mount;
    }

    /* If the blocksize doesn't match, re-read the thing.. */
    if (sb->s_blocksize != blocksize) {
        brelse(bh);

        if (!sb_set_blocksize(sb, blocksize)) {
            printk(KERN_ERR "EXT2-fs: blocksize too small for device.\n");
            goto failed_sbi;
        }

        logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
        offset = (sb_block*BLOCK_SIZE) % blocksize;
        bh = sb_bread(sb, logic_sb_block);
        if(!bh) {
            printk("EXT2-fs: Couldn't read superblock on "
                   "2nd try.\n");
            goto failed_sbi;
        }
        es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
        sbi->s_es = es;
        if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
            printk ("EXT2-fs: Magic mismatch, very weird !\n");
            goto failed_mount;
        }
    }

    sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);

    if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
        sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
        sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
    } else {
        sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
        sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
        if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
                (sbi->s_inode_size & (sbi->s_inode_size - 1)) ||
                (sbi->s_inode_size > blocksize)) {
            printk ("EXT2-fs: unsupported inode size: %d\n",
                    sbi->s_inode_size);
            goto failed_mount;
        }
    }

    sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
                       le32_to_cpu(es->s_log_frag_size);
    if (sbi->s_frag_size == 0)
        goto cantfind_ext2;
    sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;

    sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
    sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
    sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);

    if (EXT2_INODE_SIZE(sb) == 0)
        goto cantfind_ext2;
    sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
    if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
        goto cantfind_ext2;
    sbi->s_itb_per_group = sbi->s_inodes_per_group /
                           sbi->s_inodes_per_block;
    sbi->s_desc_per_block = sb->s_blocksize /
                            sizeof (struct ext2_group_desc);
    sbi->s_sbh = bh;
    sbi->s_mount_state = le16_to_cpu(es->s_state);
    sbi->s_addr_per_block_bits =
        ilog2 (EXT2_ADDR_PER_BLOCK(sb));
    sbi->s_desc_per_block_bits =
        ilog2 (EXT2_DESC_PER_BLOCK(sb));

    if (sb->s_magic != EXT2_SUPER_MAGIC)
        goto cantfind_ext2;

    if (sb->s_blocksize != bh->b_size) {
        if (!silent)
            printk ("VFS: Unsupported blocksize on dev "
                    "%s.\n", sb->s_id);
        goto failed_mount;
    }

    if (sb->s_blocksize != sbi->s_frag_size) {
        printk ("EXT2-fs: fragsize %lu != blocksize %lu (not supported yet)\n",
                sbi->s_frag_size, sb->s_blocksize);
        goto failed_mount;
    }

    if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
        printk ("EXT2-fs: #blocks per group too big: %lu\n",
                sbi->s_blocks_per_group);
        goto failed_mount;
    }
    if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
        printk ("EXT2-fs: #fragments per group too big: %lu\n",
                sbi->s_frags_per_group);
        goto failed_mount;
    }
    if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
        printk ("EXT2-fs: #inodes per group too big: %lu\n",
                sbi->s_inodes_per_group);
        goto failed_mount;
    }

    if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
        goto cantfind_ext2;
    sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
                            le32_to_cpu(es->s_first_data_block) - 1)
                           / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
    db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
               EXT2_DESC_PER_BLOCK(sb);
    sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
    if (sbi->s_group_desc == NULL) {
        printk ("EXT2-fs: not enough memory\n");
        goto failed_mount;
    }
    bgl_lock_init(&sbi->s_blockgroup_lock);
    sbi->s_debts = kmalloc(sbi->s_groups_count * sizeof(*sbi->s_debts),
                           GFP_KERNEL);
    if (!sbi->s_debts) {
        printk ("EXT2-fs: not enough memory\n");
        goto failed_mount_group_desc;
    }
    memset(sbi->s_debts, 0, sbi->s_groups_count * sizeof(*sbi->s_debts));
    for (i = 0; i < db_count; i++) {
        block = descriptor_loc(sb, logic_sb_block, i);
        sbi->s_group_desc[i] = sb_bread(sb, block);
        if (!sbi->s_group_desc[i]) {
            for (j = 0; j < i; j++)
                brelse (sbi->s_group_desc[j]);
            printk ("EXT2-fs: unable to read group descriptors\n");
            goto failed_mount_group_desc;
        }
    }
    if (!ext2_check_descriptors (sb)) {
        printk ("EXT2-fs: group descriptors corrupted!\n");
        goto failed_mount2;
    }
    sbi->s_gdb_count = db_count;
    get_random_bytes(&sbi->s_next_generation, sizeof(u32));
    spin_lock_init(&sbi->s_next_gen_lock);

    percpu_counter_init(&sbi->s_freeblocks_counter,
                        ext2_count_free_blocks(sb));
    percpu_counter_init(&sbi->s_freeinodes_counter,
                        ext2_count_free_inodes(sb));
    percpu_counter_init(&sbi->s_dirs_counter,
                        ext2_count_dirs(sb));
    /*
     * set up enough so that it can read an inode
     */
    sb->s_op = &ext2_sops;
    sb->s_export_op = &ext2_export_ops;
    sb->s_xattr = ext2_xattr_handlers;
    root = iget(sb, EXT2_ROOT_INO);
    sb->s_root = d_alloc_root(root);
    if (!sb->s_root) {
        iput(root);
        printk(KERN_ERR "EXT2-fs: get root inode failed\n");
        goto failed_mount3;
    }
    if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
        dput(sb->s_root);
        sb->s_root = NULL;
        printk(KERN_ERR "EXT2-fs: corrupt root inode, run e2fsck\n");
        goto failed_mount3;
    }
    if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
        ext2_warning(sb, __FUNCTION__,
                     "mounting ext3 filesystem as ext2");
    ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY);
    return 0;

cantfind_ext2:
    if (!silent)
        printk("VFS: Can't find an ext2 filesystem on dev %s.\n",
               sb->s_id);
    goto failed_mount;
failed_mount3:
    percpu_counter_destroy(&sbi->s_freeblocks_counter);
    percpu_counter_destroy(&sbi->s_freeinodes_counter);
    percpu_counter_destroy(&sbi->s_dirs_counter);
failed_mount2:
    for (i = 0; i < db_count; i++)
        brelse(sbi->s_group_desc[i]);
failed_mount_group_desc:
    kfree(sbi->s_group_desc);
    kfree(sbi->s_debts);
failed_mount:
    brelse(bh);
failed_sbi:
    sb->s_fs_info = NULL;
    kfree(sbi);
    return -EINVAL;
}
u32 mcf_pci_inl(u32 addr)
{
	return le32_to_cpu(__raw_readl(iospace + (addr & PCI_IO_MASK)));
}
static inline u32
dblock_addr(const struct dblock *blk)
{
	return le32_to_cpu(blk->addr);
}
Example #22
0
/*
 * Initialise VFS inode by reading inode from inode table (compressed
 * metadata).  The format and amount of data read depends on type.
 */
int squashfs_read_inode(struct inode *inode, long long ino)
{
	struct super_block *sb = inode->i_sb;
	struct squashfs_sb_info *msblk = sb->s_fs_info;
	u64 block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table;
	int err, type, offset = SQUASHFS_INODE_OFFSET(ino);
	union squashfs_inode squashfs_ino;
	struct squashfs_base_inode *sqshb_ino = &squashfs_ino.base;
	int xattr_id = SQUASHFS_INVALID_XATTR;

	TRACE("Entered squashfs_read_inode\n");

	/*
	 * Read inode base common to all inode types.
	 */
	err = squashfs_read_metadata(sb, sqshb_ino, &block,
				&offset, sizeof(*sqshb_ino));
	if (err < 0)
		goto failed_read;

	err = squashfs_new_inode(sb, inode, sqshb_ino);
	if (err)
		goto failed_read;

	block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table;
	offset = SQUASHFS_INODE_OFFSET(ino);

	type = le16_to_cpu(sqshb_ino->inode_type);
	switch (type) {
	case SQUASHFS_REG_TYPE: {
		unsigned int frag_offset, frag;
		int frag_size;
		u64 frag_blk;
		struct squashfs_reg_inode *sqsh_ino = &squashfs_ino.reg;

		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
							sizeof(*sqsh_ino));
		if (err < 0)
			goto failed_read;

		frag = le32_to_cpu(sqsh_ino->fragment);
		if (frag != SQUASHFS_INVALID_FRAG) {
			frag_offset = le32_to_cpu(sqsh_ino->offset);
			frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
			if (frag_size < 0) {
				err = frag_size;
				goto failed_read;
			}
		} else {
			frag_blk = SQUASHFS_INVALID_BLK;
			frag_size = 0;
			frag_offset = 0;
		}

		inode->i_nlink = 1;
		inode->i_size = le32_to_cpu(sqsh_ino->file_size);
		inode->i_fop = &generic_ro_fops;
		inode->i_mode |= S_IFREG;
		inode->i_blocks = ((inode->i_size - 1) >> 9) + 1;
		squashfs_i(inode)->fragment_block = frag_blk;
		squashfs_i(inode)->fragment_size = frag_size;
		squashfs_i(inode)->fragment_offset = frag_offset;
		squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
		squashfs_i(inode)->block_list_start = block;
		squashfs_i(inode)->offset = offset;
		inode->i_data.a_ops = &squashfs_aops;

		TRACE("File inode %x:%x, start_block %llx, block_list_start "
			"%llx, offset %x\n", SQUASHFS_INODE_BLK(ino),
			offset, squashfs_i(inode)->start, block, offset);
		break;
	}
	case SQUASHFS_LREG_TYPE: {
		unsigned int frag_offset, frag;
		int frag_size;
		u64 frag_blk;
		struct squashfs_lreg_inode *sqsh_ino = &squashfs_ino.lreg;

		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
							sizeof(*sqsh_ino));
		if (err < 0)
			goto failed_read;

		frag = le32_to_cpu(sqsh_ino->fragment);
		if (frag != SQUASHFS_INVALID_FRAG) {
			frag_offset = le32_to_cpu(sqsh_ino->offset);
			frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
			if (frag_size < 0) {
				err = frag_size;
				goto failed_read;
			}
		} else {
			frag_blk = SQUASHFS_INVALID_BLK;
			frag_size = 0;
			frag_offset = 0;
		}

		xattr_id = le32_to_cpu(sqsh_ino->xattr);
		inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
		inode->i_size = le64_to_cpu(sqsh_ino->file_size);
		inode->i_op = &squashfs_inode_ops;
		inode->i_fop = &generic_ro_fops;
		inode->i_mode |= S_IFREG;
		inode->i_blocks = ((inode->i_size -
				le64_to_cpu(sqsh_ino->sparse) - 1) >> 9) + 1;

		squashfs_i(inode)->fragment_block = frag_blk;
		squashfs_i(inode)->fragment_size = frag_size;
		squashfs_i(inode)->fragment_offset = frag_offset;
		squashfs_i(inode)->start = le64_to_cpu(sqsh_ino->start_block);
		squashfs_i(inode)->block_list_start = block;
		squashfs_i(inode)->offset = offset;
		inode->i_data.a_ops = &squashfs_aops;

		TRACE("File inode %x:%x, start_block %llx, block_list_start "
			"%llx, offset %x\n", SQUASHFS_INODE_BLK(ino),
			offset, squashfs_i(inode)->start, block, offset);
		break;
	}
	case SQUASHFS_DIR_TYPE: {
		struct squashfs_dir_inode *sqsh_ino = &squashfs_ino.dir;

		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
				sizeof(*sqsh_ino));
		if (err < 0)
			goto failed_read;

		inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
		inode->i_size = le16_to_cpu(sqsh_ino->file_size);
		inode->i_op = &squashfs_dir_inode_ops;
		inode->i_fop = &squashfs_dir_ops;
		inode->i_mode |= S_IFDIR;
		squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
		squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset);
		squashfs_i(inode)->dir_idx_cnt = 0;
		squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode);

		TRACE("Directory inode %x:%x, start_block %llx, offset %x\n",
				SQUASHFS_INODE_BLK(ino), offset,
				squashfs_i(inode)->start,
				le16_to_cpu(sqsh_ino->offset));
		break;
	}
	case SQUASHFS_LDIR_TYPE: {
		struct squashfs_ldir_inode *sqsh_ino = &squashfs_ino.ldir;

		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
				sizeof(*sqsh_ino));
		if (err < 0)
			goto failed_read;

		xattr_id = le32_to_cpu(sqsh_ino->xattr);
		inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
		inode->i_size = le32_to_cpu(sqsh_ino->file_size);
		inode->i_op = &squashfs_dir_inode_ops;
		inode->i_fop = &squashfs_dir_ops;
		inode->i_mode |= S_IFDIR;
		squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
		squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset);
		squashfs_i(inode)->dir_idx_start = block;
		squashfs_i(inode)->dir_idx_offset = offset;
		squashfs_i(inode)->dir_idx_cnt = le16_to_cpu(sqsh_ino->i_count);
		squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode);

		TRACE("Long directory inode %x:%x, start_block %llx, offset "
				"%x\n", SQUASHFS_INODE_BLK(ino), offset,
				squashfs_i(inode)->start,
				le16_to_cpu(sqsh_ino->offset));
		break;
	}
	case SQUASHFS_SYMLINK_TYPE:
	case SQUASHFS_LSYMLINK_TYPE: {
		struct squashfs_symlink_inode *sqsh_ino = &squashfs_ino.symlink;

		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
				sizeof(*sqsh_ino));
		if (err < 0)
			goto failed_read;

		inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
		inode->i_size = le32_to_cpu(sqsh_ino->symlink_size);
		inode->i_op = &squashfs_symlink_inode_ops;
		inode->i_data.a_ops = &squashfs_symlink_aops;
		inode->i_mode |= S_IFLNK;
		squashfs_i(inode)->start = block;
		squashfs_i(inode)->offset = offset;

		if (type == SQUASHFS_LSYMLINK_TYPE) {
			__le32 xattr;

			err = squashfs_read_metadata(sb, NULL, &block,
						&offset, inode->i_size);
			if (err < 0)
				goto failed_read;
			err = squashfs_read_metadata(sb, &xattr, &block,
						&offset, sizeof(xattr));
			if (err < 0)
				goto failed_read;
			xattr_id = le32_to_cpu(xattr);
		}

		TRACE("Symbolic link inode %x:%x, start_block %llx, offset "
				"%x\n", SQUASHFS_INODE_BLK(ino), offset,
				block, offset);
		break;
	}
	case SQUASHFS_BLKDEV_TYPE:
	case SQUASHFS_CHRDEV_TYPE: {
		struct squashfs_dev_inode *sqsh_ino = &squashfs_ino.dev;
		unsigned int rdev;

		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
				sizeof(*sqsh_ino));
		if (err < 0)
			goto failed_read;

		if (type == SQUASHFS_CHRDEV_TYPE)
			inode->i_mode |= S_IFCHR;
		else
			inode->i_mode |= S_IFBLK;
		inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
		rdev = le32_to_cpu(sqsh_ino->rdev);
		init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));

		TRACE("Device inode %x:%x, rdev %x\n",
				SQUASHFS_INODE_BLK(ino), offset, rdev);
		break;
	}
	case SQUASHFS_LBLKDEV_TYPE:
	case SQUASHFS_LCHRDEV_TYPE: {
		struct squashfs_ldev_inode *sqsh_ino = &squashfs_ino.ldev;
		unsigned int rdev;

		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
				sizeof(*sqsh_ino));
		if (err < 0)
			goto failed_read;

		if (type == SQUASHFS_LCHRDEV_TYPE)
			inode->i_mode |= S_IFCHR;
		else
			inode->i_mode |= S_IFBLK;
		xattr_id = le32_to_cpu(sqsh_ino->xattr);
		inode->i_op = &squashfs_inode_ops;
		inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
		rdev = le32_to_cpu(sqsh_ino->rdev);
		init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));

		TRACE("Device inode %x:%x, rdev %x\n",
				SQUASHFS_INODE_BLK(ino), offset, rdev);
		break;
	}
	case SQUASHFS_FIFO_TYPE:
	case SQUASHFS_SOCKET_TYPE: {
		struct squashfs_ipc_inode *sqsh_ino = &squashfs_ino.ipc;

		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
				sizeof(*sqsh_ino));
		if (err < 0)
			goto failed_read;

		if (type == SQUASHFS_FIFO_TYPE)
			inode->i_mode |= S_IFIFO;
		else
			inode->i_mode |= S_IFSOCK;
		inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
		init_special_inode(inode, inode->i_mode, 0);
		break;
	}
	case SQUASHFS_LFIFO_TYPE:
	case SQUASHFS_LSOCKET_TYPE: {
		struct squashfs_lipc_inode *sqsh_ino = &squashfs_ino.lipc;

		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
				sizeof(*sqsh_ino));
		if (err < 0)
			goto failed_read;

		if (type == SQUASHFS_LFIFO_TYPE)
			inode->i_mode |= S_IFIFO;
		else
			inode->i_mode |= S_IFSOCK;
		xattr_id = le32_to_cpu(sqsh_ino->xattr);
		inode->i_op = &squashfs_inode_ops;
		inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
		init_special_inode(inode, inode->i_mode, 0);
		break;
	}
	default:
		ERROR("Unknown inode type %d in squashfs_iget!\n", type);
		return -EINVAL;
	}

	if (xattr_id != SQUASHFS_INVALID_XATTR && msblk->xattr_id_table) {
		err = squashfs_xattr_lookup(sb, xattr_id,
					&squashfs_i(inode)->xattr_count,
					&squashfs_i(inode)->xattr_size,
					&squashfs_i(inode)->xattr);
		if (err < 0)
			goto failed_read;
		inode->i_blocks += ((squashfs_i(inode)->xattr_size - 1) >> 9)
				+ 1;
	} else
static inline u32
pdr_addr(const struct pdr *pdr)
{
	return le32_to_cpu(pdr->addr);
}
Example #24
0
static int ext2_remount (struct super_block * sb, int * flags, char * data)
{
	struct ext2_sb_info * sbi = EXT2_SB(sb);
	struct ext2_super_block * es;
	unsigned long old_mount_opt = sbi->s_mount_opt;
	struct ext2_mount_options old_opts;
	unsigned long old_sb_flags;
	int err;

	spin_lock(&sbi->s_lock);

	/* Store the old options */
	old_sb_flags = sb->s_flags;
	old_opts.s_mount_opt = sbi->s_mount_opt;
	old_opts.s_resuid = sbi->s_resuid;
	old_opts.s_resgid = sbi->s_resgid;

	/*
	 * Allow the "check" option to be passed as a remount option.
	 */
	if (!parse_options(data, sb)) {
		err = -EINVAL;
		goto restore_opts;
	}

	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);

	ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset
				    EXT2_MOUNT_XIP if not */

	if ((ext2_use_xip(sb)) && (sb->s_blocksize != PAGE_SIZE)) {
		ext2_msg(sb, KERN_WARNING,
			"warning: unsupported blocksize for xip");
		err = -EINVAL;
		goto restore_opts;
	}

	es = sbi->s_es;
	if ((sbi->s_mount_opt ^ old_mount_opt) & EXT2_MOUNT_XIP) {
		ext2_msg(sb, KERN_WARNING, "warning: refusing change of "
			 "xip flag with busy inodes while remounting");
		sbi->s_mount_opt &= ~EXT2_MOUNT_XIP;
		sbi->s_mount_opt |= old_mount_opt & EXT2_MOUNT_XIP;
	}
	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
		spin_unlock(&sbi->s_lock);
		return 0;
	}
	if (*flags & MS_RDONLY) {
		if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
		    !(sbi->s_mount_state & EXT2_VALID_FS)) {
			spin_unlock(&sbi->s_lock);
			return 0;
		}

		/*
		 * OK, we are remounting a valid rw partition rdonly, so set
		 * the rdonly flag and then mark the partition as valid again.
		 */
		es->s_state = cpu_to_le16(sbi->s_mount_state);
		es->s_mtime = cpu_to_le32(get_seconds());
		spin_unlock(&sbi->s_lock);

		err = dquot_suspend(sb, -1);
		if (err < 0) {
			spin_lock(&sbi->s_lock);
			goto restore_opts;
		}

		ext2_sync_super(sb, es, 1);
	} else {
		__le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb,
					       ~EXT2_FEATURE_RO_COMPAT_SUPP);
		if (ret) {
			ext2_msg(sb, KERN_WARNING,
				"warning: couldn't remount RDWR because of "
				"unsupported optional features (%x).",
				le32_to_cpu(ret));
			err = -EROFS;
			goto restore_opts;
		}
		/*
		 * Mounting a RDONLY partition read-write, so reread and
		 * store the current valid flag.  (It may have been changed
		 * by e2fsck since we originally mounted the partition.)
		 */
		sbi->s_mount_state = le16_to_cpu(es->s_state);
		if (!ext2_setup_super (sb, es, 0))
			sb->s_flags &= ~MS_RDONLY;
		spin_unlock(&sbi->s_lock);

		ext2_write_super(sb);

		dquot_resume(sb, -1);
	}

	return 0;
restore_opts:
	sbi->s_mount_opt = old_opts.s_mount_opt;
	sbi->s_resuid = old_opts.s_resuid;
	sbi->s_resgid = old_opts.s_resgid;
	sb->s_flags = old_sb_flags;
	spin_unlock(&sbi->s_lock);
	return err;
}
Example #25
0
void sd_int_dpc(PADAPTER padapter)
{
	PHAL_DATA_TYPE phal;
	struct dvobj_priv *dvobj;
	struct intf_hdl * pintfhdl=&padapter->iopriv.intf;
	struct pwrctrl_priv *pwrctl;


	phal = GET_HAL_DATA(padapter);
	dvobj = adapter_to_dvobj(padapter);
	pwrctl = dvobj_to_pwrctl(dvobj);

	if (phal->sdio_hisr & SDIO_HISR_CPWM1)
	{
		struct reportpwrstate_parm report;

#ifdef CONFIG_LPS_RPWM_TIMER
		u8 bcancelled;
		_cancel_timer(&(pwrctl->pwr_rpwm_timer), &bcancelled);
#endif // CONFIG_LPS_RPWM_TIMER

		report.state = SdioLocalCmd52Read1Byte(padapter, SDIO_REG_HCPWM1_8723B);

#ifdef CONFIG_LPS_LCLK
		//cpwm_int_hdl(padapter, &report);
		_set_workitem(&(pwrctl->cpwm_event));
#endif
	}

	if (phal->sdio_hisr & SDIO_HISR_TXERR)
	{
		u8 *status;
		u32 addr;

		status = rtw_malloc(4);
		if (status)
		{
			addr = REG_TXDMA_STATUS;
			HalSdioGetCmdAddr8723BSdio(padapter, WLAN_IOREG_DEVICE_ID, addr, &addr);
			_sd_read(pintfhdl, addr, 4, status);
			_sd_write(pintfhdl, addr, 4, status);
			DBG_8192C("%s: SDIO_HISR_TXERR (0x%08x)\n", __func__, le32_to_cpu(*(u32*)status));
			rtw_mfree(status, 4);
		} else {
			DBG_8192C("%s: SDIO_HISR_TXERR, but can't allocate memory to read status!\n", __func__);
		}
	}

	if (phal->sdio_hisr & SDIO_HISR_TXBCNOK)
	{
		DBG_8192C("%s: SDIO_HISR_TXBCNOK\n", __func__);
	}

	if (phal->sdio_hisr & SDIO_HISR_TXBCNERR)
	{
		DBG_8192C("%s: SDIO_HISR_TXBCNERR\n", __func__);
	}
#ifndef CONFIG_C2H_PACKET_EN
	if (phal->sdio_hisr & SDIO_HISR_C2HCMD)
	{
		struct c2h_evt_hdr_88xx *c2h_evt;

		DBG_8192C("%s: C2H Command\n", __func__);
		if ((c2h_evt = (struct c2h_evt_hdr_88xx*)rtw_zmalloc(16)) != NULL) {
			if (rtw_hal_c2h_evt_read(padapter, (u8 *)c2h_evt) == _SUCCESS) {
				if (c2h_id_filter_ccx_8723b((u8 *)c2h_evt)) {
					/* Handle CCX report here */
					rtw_hal_c2h_handler(padapter, (u8 *)c2h_evt);
					rtw_mfree((u8*)c2h_evt, 16);
				} else {
					rtw_c2h_wk_cmd(padapter, (u8 *)c2h_evt);
				}
			}
		} else {
			/* Error handling for malloc fail */
			if (rtw_cbuf_push(padapter->evtpriv.c2h_queue, (void*)NULL) != _SUCCESS)
				DBG_871X("%s rtw_cbuf_push fail\n", __func__);
			_set_workitem(&padapter->evtpriv.c2h_wk);
		}
	}
#endif

	if (phal->sdio_hisr & SDIO_HISR_RXFOVW)
	{
		DBG_8192C("%s: Rx Overflow\n", __func__);
	}
	if (phal->sdio_hisr & SDIO_HISR_RXERR)
	{
		DBG_8192C("%s: Rx Error\n", __func__);
	}

	if (phal->sdio_hisr & SDIO_HISR_RX_REQUEST)
	{
		struct recv_buf *precvbuf;
		int alloc_fail_time=0;
		u32 hisr;

//		DBG_8192C("%s: RX Request, size=%d\n", __func__, phal->SdioRxFIFOSize);
		phal->sdio_hisr ^= SDIO_HISR_RX_REQUEST;
		do {
			phal->SdioRxFIFOSize = SdioLocalCmd52Read2Byte(padapter, SDIO_REG_RX0_REQ_LEN);
			if (phal->SdioRxFIFOSize != 0)
			{
#ifdef CONFIG_MAC_LOOPBACK_DRIVER
				sd_recv_loopback(padapter, phal->SdioRxFIFOSize);
#else
				precvbuf = sd_recv_rxfifo(padapter, phal->SdioRxFIFOSize);
				if (precvbuf)
					sd_rxhandler(padapter, precvbuf);
				else
				{
					alloc_fail_time++;
					DBG_871X("precvbuf is Null for %d times because alloc memory failed\n", alloc_fail_time);
					if (alloc_fail_time >= 10)
						break;
				}
				phal->SdioRxFIFOSize = 0;
#endif
			}
			else
				break;

			hisr = 0;
			ReadInterrupt8723BSdio(padapter, &hisr);
			hisr &= SDIO_HISR_RX_REQUEST;
			if (!hisr)
				break;
		} while (1);

		if(alloc_fail_time==10)
			DBG_871X("exit because alloc memory failed more than 10 times \n");

	}
}
Example #26
0
static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
{
	struct super_block *sb = dentry->d_sb;
	struct ext2_sb_info *sbi = EXT2_SB(sb);
	struct ext2_super_block *es = sbi->s_es;
	u64 fsid;

	spin_lock(&sbi->s_lock);

	if (test_opt (sb, MINIX_DF))
		sbi->s_overhead_last = 0;
	else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) {
		unsigned long i, overhead = 0;
		smp_rmb();

		/*
		 * Compute the overhead (FS structures). This is constant
		 * for a given filesystem unless the number of block groups
		 * changes so we cache the previous value until it does.
		 */

		/*
		 * All of the blocks before first_data_block are
		 * overhead
		 */
		overhead = le32_to_cpu(es->s_first_data_block);

		/*
		 * Add the overhead attributed to the superblock and
		 * block group descriptors.  If the sparse superblocks
		 * feature is turned on, then not all groups have this.
		 */
		for (i = 0; i < sbi->s_groups_count; i++)
			overhead += ext2_bg_has_super(sb, i) +
				ext2_bg_num_gdb(sb, i);

		/*
		 * Every block group has an inode bitmap, a block
		 * bitmap, and an inode table.
		 */
		overhead += (sbi->s_groups_count *
			     (2 + sbi->s_itb_per_group));
		sbi->s_overhead_last = overhead;
		smp_wmb();
		sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count);
	}

	buf->f_type = EXT2_SUPER_MAGIC;
	buf->f_bsize = sb->s_blocksize;
	buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last;
	buf->f_bfree = ext2_count_free_blocks(sb);
	es->s_free_blocks_count = cpu_to_le32(buf->f_bfree);
	buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count);
	if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count))
		buf->f_bavail = 0;
	buf->f_files = le32_to_cpu(es->s_inodes_count);
	buf->f_ffree = ext2_count_free_inodes(sb);
	es->s_free_inodes_count = cpu_to_le32(buf->f_ffree);
	buf->f_namelen = EXT2_NAME_LEN;
	fsid = le64_to_cpup((void *)es->s_uuid) ^
	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
	spin_unlock(&sbi->s_lock);
	return 0;
}
Example #27
0
/* This function handles received packet. Necessary action is taken based on
 * cmd/event/data.
 */
static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
			    struct sk_buff *skb, u8 ep)
{
	struct device *dev = adapter->dev;
	u32 recv_type;
	__le32 tmp;
	int ret;

	if (adapter->hs_activated)
		mwifiex_process_hs_config(adapter);

	if (skb->len < INTF_HEADER_LEN) {
		dev_err(dev, "%s: invalid skb->len\n", __func__);
		return -1;
	}

	switch (ep) {
	case MWIFIEX_USB_EP_CMD_EVENT:
		dev_dbg(dev, "%s: EP_CMD_EVENT\n", __func__);
		skb_copy_from_linear_data(skb, &tmp, INTF_HEADER_LEN);
		recv_type = le32_to_cpu(tmp);
		skb_pull(skb, INTF_HEADER_LEN);

		switch (recv_type) {
		case MWIFIEX_USB_TYPE_CMD:
			if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
				dev_err(dev, "CMD: skb->len too large\n");
				ret = -1;
				goto exit_restore_skb;
			} else if (!adapter->curr_cmd) {
				dev_dbg(dev, "CMD: no curr_cmd\n");
				if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
					mwifiex_process_sleep_confirm_resp(
							adapter, skb->data,
							skb->len);
					ret = 0;
					goto exit_restore_skb;
				}
				ret = -1;
				goto exit_restore_skb;
			}

			adapter->curr_cmd->resp_skb = skb;
			adapter->cmd_resp_received = true;
			break;
		case MWIFIEX_USB_TYPE_EVENT:
			if (skb->len < sizeof(u32)) {
				dev_err(dev, "EVENT: skb->len too small\n");
				ret = -1;
				goto exit_restore_skb;
			}
			skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
			adapter->event_cause = le32_to_cpu(tmp);
			dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);

			if (skb->len > MAX_EVENT_SIZE) {
				dev_err(dev, "EVENT: event body too large\n");
				ret = -1;
				goto exit_restore_skb;
			}

			memcpy(adapter->event_body, skb->data +
			       MWIFIEX_EVENT_HEADER_LEN, skb->len);

			adapter->event_received = true;
			adapter->event_skb = skb;
			break;
		default:
			dev_err(dev, "unknown recv_type %#x\n", recv_type);
			return -1;
		}
		break;
	case MWIFIEX_USB_EP_DATA:
		dev_dbg(dev, "%s: EP_DATA\n", __func__);
		if (skb->len > MWIFIEX_RX_DATA_BUF_SIZE) {
			dev_err(dev, "DATA: skb->len too large\n");
			return -1;
		}
		skb_queue_tail(&adapter->usb_rx_data_q, skb);
		adapter->data_received = true;
		break;
	default:
		dev_err(dev, "%s: unknown endport %#x\n", __func__, ep);
		return -1;
	}

	return -EINPROGRESS;

exit_restore_skb:
	/* The buffer will be reused for further cmds/events */
	skb_push(skb, INTF_HEADER_LEN);

	return ret;
}
Example #28
0
static int ext2_show_options(struct seq_file *seq, struct dentry *root)
{
	struct super_block *sb = root->d_sb;
	struct ext2_sb_info *sbi = EXT2_SB(sb);
	struct ext2_super_block *es = sbi->s_es;
	unsigned long def_mount_opts;

	spin_lock(&sbi->s_lock);
	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);

	if (sbi->s_sb_block != 1)
		seq_printf(seq, ",sb=%lu", sbi->s_sb_block);
	if (test_opt(sb, MINIX_DF))
		seq_puts(seq, ",minixdf");
	if (test_opt(sb, GRPID))
		seq_puts(seq, ",grpid");
	if (!test_opt(sb, GRPID) && (def_mount_opts & EXT2_DEFM_BSDGROUPS))
		seq_puts(seq, ",nogrpid");
	if (!uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT2_DEF_RESUID)) ||
	    le16_to_cpu(es->s_def_resuid) != EXT2_DEF_RESUID) {
		seq_printf(seq, ",resuid=%u",
				from_kuid_munged(&init_user_ns, sbi->s_resuid));
	}
	if (!gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT2_DEF_RESGID)) ||
	    le16_to_cpu(es->s_def_resgid) != EXT2_DEF_RESGID) {
		seq_printf(seq, ",resgid=%u",
				from_kgid_munged(&init_user_ns, sbi->s_resgid));
	}
	if (test_opt(sb, ERRORS_RO)) {
		int def_errors = le16_to_cpu(es->s_errors);

		if (def_errors == EXT2_ERRORS_PANIC ||
		    def_errors == EXT2_ERRORS_CONTINUE) {
			seq_puts(seq, ",errors=remount-ro");
		}
	}
	if (test_opt(sb, ERRORS_CONT))
		seq_puts(seq, ",errors=continue");
	if (test_opt(sb, ERRORS_PANIC))
		seq_puts(seq, ",errors=panic");
	if (test_opt(sb, NO_UID32))
		seq_puts(seq, ",nouid32");
	if (test_opt(sb, DEBUG))
		seq_puts(seq, ",debug");
	if (test_opt(sb, OLDALLOC))
		seq_puts(seq, ",oldalloc");

#ifdef CONFIG_EXT2_FS_XATTR
	if (test_opt(sb, XATTR_USER))
		seq_puts(seq, ",user_xattr");
	if (!test_opt(sb, XATTR_USER) &&
	    (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
		seq_puts(seq, ",nouser_xattr");
	}
#endif

#ifdef CONFIG_EXT2_FS_POSIX_ACL
	if (test_opt(sb, POSIX_ACL))
		seq_puts(seq, ",acl");
	if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT2_DEFM_ACL))
		seq_puts(seq, ",noacl");
#endif

	if (test_opt(sb, NOBH))
		seq_puts(seq, ",nobh");

#if defined(CONFIG_QUOTA)
	if (sbi->s_mount_opt & EXT2_MOUNT_USRQUOTA)
		seq_puts(seq, ",usrquota");

	if (sbi->s_mount_opt & EXT2_MOUNT_GRPQUOTA)
		seq_puts(seq, ",grpquota");
#endif

#if defined(CONFIG_EXT2_FS_XIP)
	if (sbi->s_mount_opt & EXT2_MOUNT_XIP)
		seq_puts(seq, ",xip");
#endif

	if (!test_opt(sb, RESERVATION))
		seq_puts(seq, ",noreservation");

	spin_unlock(&sbi->s_lock);
	return 0;
}
Example #29
0
/* Download either STA or AP firmware into the card. */
static int
orinoco_dl_firmware(struct orinoco_private *priv,
		    const struct fw_info *fw,
		    int ap)
{
	/* Plug Data Area (PDA) */
	__le16 *pda;

	hermes_t *hw = &priv->hw;
	const struct firmware *fw_entry;
	const struct orinoco_fw_header *hdr;
	const unsigned char *first_block;
	const void *end;
	const char *firmware;
	const char *fw_err;
	struct device *dev = priv->dev;
	int err = 0;

	pda = kzalloc(fw->pda_size, GFP_KERNEL);
	if (!pda)
		return -ENOMEM;

	if (ap)
		firmware = fw->ap_fw;
	else
		firmware = fw->sta_fw;

	dev_dbg(dev, "Attempting to download firmware %s\n", firmware);

	/* Read current plug data */
	err = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size);
	dev_dbg(dev, "Read PDA returned %d\n", err);
	if (err)
		goto free;

	if (!orinoco_cached_fw_get(priv, false)) {
		err = request_firmware(&fw_entry, firmware, priv->dev);

		if (err) {
			dev_err(dev, "Cannot find firmware %s\n", firmware);
			err = -ENOENT;
			goto free;
		}
	} else
		fw_entry = orinoco_cached_fw_get(priv, false);

	hdr = (const struct orinoco_fw_header *) fw_entry->data;

	fw_err = validate_fw(hdr, fw_entry->size);
	if (fw_err) {
		dev_warn(dev, "Invalid firmware image detected (%s). "
			 "Aborting download\n", fw_err);
		err = -EINVAL;
		goto abort;
	}

	/* Enable aux port to allow programming */
	err = hw->ops->program_init(hw, le32_to_cpu(hdr->entry_point));
	dev_dbg(dev, "Program init returned %d\n", err);
	if (err != 0)
		goto abort;

	/* Program data */
	first_block = (fw_entry->data +
		       le16_to_cpu(hdr->headersize) +
		       le32_to_cpu(hdr->block_offset));
	end = fw_entry->data + fw_entry->size;

	err = hermes_program(hw, first_block, end);
	dev_dbg(dev, "Program returned %d\n", err);
	if (err != 0)
		goto abort;

	/* Update production data */
	first_block = (fw_entry->data +
		       le16_to_cpu(hdr->headersize) +
		       le32_to_cpu(hdr->pdr_offset));

	err = hermes_apply_pda_with_defaults(hw, first_block, end, pda,
					     &pda[fw->pda_size / sizeof(*pda)]);
	dev_dbg(dev, "Apply PDA returned %d\n", err);
	if (err)
		goto abort;

	/* Tell card we've finished */
	err = hw->ops->program_end(hw);
	dev_dbg(dev, "Program end returned %d\n", err);
	if (err != 0)
		goto abort;

	/* Check if we're running */
	dev_dbg(dev, "hermes_present returned %d\n", hermes_present(hw));

abort:
	/* If we requested the firmware, release it. */
	if (!orinoco_cached_fw_get(priv, false))
		release_firmware(fw_entry);

free:
	kfree(pda);
	return err;
}
Example #30
0
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct f2fs_sb_info *sbi;
	struct f2fs_super_block *raw_super;
	struct buffer_head *raw_super_buf;
	struct inode *root;
	long err = -EINVAL;
	int i;

	/* allocate memory for f2fs-specific super block info */
	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

	/* set a block size */
	if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) {
		f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
		goto free_sbi;
	}

	if (validate_superblock(sb, &raw_super, &raw_super_buf, 0)) {
		brelse(raw_super_buf);
		if (validate_superblock(sb, &raw_super, &raw_super_buf, 1))
			goto free_sb_buf;
	}
	/* init some FS parameters */
	sbi->active_logs = NR_CURSEG_TYPE;

	set_opt(sbi, BG_GC);

#ifdef CONFIG_F2FS_FS_XATTR
	set_opt(sbi, XATTR_USER);
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
	set_opt(sbi, POSIX_ACL);
#endif
	/* parse mount options */
	if (parse_options(sb, sbi, (char *)data))
		goto free_sb_buf;

	sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
	sb->s_max_links = F2FS_LINK_MAX;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));

	sb->s_op = &f2fs_sops;
	sb->s_xattr = f2fs_xattr_handlers;
	sb->s_export_op = &f2fs_export_ops;
	sb->s_magic = F2FS_SUPER_MAGIC;
	sb->s_fs_info = sbi;
	sb->s_time_gran = 1;
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
	memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));

	/* init f2fs-specific super block info */
	sbi->sb = sb;
	sbi->raw_super = raw_super;
	sbi->raw_super_buf = raw_super_buf;
	mutex_init(&sbi->gc_mutex);
	mutex_init(&sbi->write_inode);
	mutex_init(&sbi->writepages);
	mutex_init(&sbi->cp_mutex);
	for (i = 0; i < NR_LOCK_TYPE; i++)
		mutex_init(&sbi->fs_lock[i]);
	sbi->por_doing = 0;
	spin_lock_init(&sbi->stat_lock);
	init_rwsem(&sbi->bio_sem);
	init_sb_info(sbi);

	/* get an inode for meta space */
	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
	if (IS_ERR(sbi->meta_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
		err = PTR_ERR(sbi->meta_inode);
		goto free_sb_buf;
	}

	err = get_valid_checkpoint(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
		goto free_meta_inode;
	}

	/* sanity checking of checkpoint */
	err = -EINVAL;
	if (sanity_check_ckpt(sbi)) {
		f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
		goto free_cp;
	}

	sbi->total_valid_node_count =
				le32_to_cpu(sbi->ckpt->valid_node_count);
	sbi->total_valid_inode_count =
				le32_to_cpu(sbi->ckpt->valid_inode_count);
	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
	sbi->total_valid_block_count =
				le64_to_cpu(sbi->ckpt->valid_block_count);
	sbi->last_valid_block_count = sbi->total_valid_block_count;
	sbi->alloc_valid_block_count = 0;
	INIT_LIST_HEAD(&sbi->dir_inode_list);
	spin_lock_init(&sbi->dir_inode_lock);

	init_orphan_info(sbi);

	/* setup f2fs internal modules */
	err = build_segment_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS segment manager");
		goto free_sm;
	}
	err = build_node_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS node manager");
		goto free_nm;
	}

	build_gc_manager(sbi);

	/* get an inode for node space */
	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
	if (IS_ERR(sbi->node_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
		err = PTR_ERR(sbi->node_inode);
		goto free_nm;
	}

	/* if there are nt orphan nodes free them */
	err = -EINVAL;
	if (recover_orphan_inodes(sbi))
		goto free_node_inode;

	/* read root inode and dentry */
	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
	if (IS_ERR(root)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
		err = PTR_ERR(root);
		goto free_node_inode;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size)
		goto free_root_inode;

	sb->s_root = d_make_root(root); /* allocate root dentry */
	if (!sb->s_root) {
		err = -ENOMEM;
		goto free_root_inode;
	}

	/* recover fsynced data */
	if (!test_opt(sbi, DISABLE_ROLL_FORWARD))
		recover_fsync_data(sbi);

	/* After POR, we can run background GC thread */
	err = start_gc_thread(sbi);
	if (err)
		goto fail;

	err = f2fs_build_stats(sbi);
	if (err)
		goto fail;

	return 0;
fail:
	stop_gc_thread(sbi);
free_root_inode:
	dput(sb->s_root);
	sb->s_root = NULL;
free_node_inode:
	iput(sbi->node_inode);
free_nm:
	destroy_node_manager(sbi);
free_sm:
	destroy_segment_manager(sbi);
free_cp:
	kfree(sbi->ckpt);
free_meta_inode:
	make_bad_inode(sbi->meta_inode);
	iput(sbi->meta_inode);
free_sb_buf:
	brelse(raw_super_buf);
free_sbi:
	kfree(sbi);
	return err;
}