Exemple #1
0
static struct sock *tcp_fastopen_create_child(struct sock *sk,
					      struct sk_buff *skb,
					      struct dst_entry *dst,
					      struct request_sock *req)
{
	struct tcp_sock *tp;
	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
	struct sock *child;
	bool own_req;

	req->num_retrans = 0;
	req->num_timeout = 0;
	req->sk = NULL;

	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
							 NULL, &own_req);
	if (!child)
		return NULL;

	spin_lock(&queue->fastopenq.lock);
	queue->fastopenq.qlen++;
	spin_unlock(&queue->fastopenq.lock);

	/* Initialize the child socket. Have to fix some values to take
	 * into account the child is a Fast Open socket and is created
	 * only out of the bits carried in the SYN packet.
	 */
	tp = tcp_sk(child);

	tp->fastopen_rsk = req;
	tcp_rsk(req)->tfo_listener = true;

	/* RFC1323: The window in SYN & SYN/ACK segments is never
	 * scaled. So correct it appropriately.
	 */
	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);

	/* Activate the retrans timer so that SYNACK can be retransmitted.
	 * The request socket is not added to the ehash
	 * because it's been added to the accept queue directly.
	 */
	inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
				  TCP_TIMEOUT_INIT, TCP_RTO_MAX);

	atomic_set(&req->rsk_refcnt, 2);

	/* Now finish processing the fastopen child socket. */
	inet_csk(child)->icsk_af_ops->rebuild_header(child);
	tcp_init_congestion_control(child);
	tcp_mtup_init(child);
	tcp_init_metrics(child);
	tcp_init_buffer_space(child);

	tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;

	tcp_fastopen_add_skb(child, skb);

	tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
	/* tcp_conn_request() is sending the SYNACK,
	 * and queues the child into listener accept queue.
	 */
	return child;
}
Exemple #2
0
/*
 * NOTE! When we get the inode, we're the only people
 * that have access to it, and as such there are no
 * race conditions we have to worry about. The inode
 * is not on the hash-lists, and it cannot be reached
 * through the filesystem because the directory entry
 * has been deleted earlier.
 *
 * HOWEVER: we must make sure that we get no aliases,
 * which means that we have to call "clear_inode()"
 * _before_ we mark the inode not in use in the inode
 * bitmaps. Otherwise a newly created file might use
 * the same inode number (not actually the same pointer
 * though), and then we'd have two inodes sharing the
 * same inode number and space on the harddisk.
 */
void ext3_free_inode (handle_t *handle, struct inode * inode)
{
	struct super_block * sb = inode->i_sb;
	int is_directory;
	unsigned long ino;
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *bh2;
	unsigned long block_group;
	unsigned long bit;
	struct ext3_group_desc * gdp;
	struct ext3_super_block * es;
	struct ext3_sb_info *sbi;
	int fatal = 0, err;

	if (atomic_read(&inode->i_count) > 1) {
		printk ("ext3_free_inode: inode has count=%d\n",
					atomic_read(&inode->i_count));
		return;
	}
	if (inode->i_nlink) {
		printk ("ext3_free_inode: inode has nlink=%d\n",
			inode->i_nlink);
		return;
	}
	if (!sb) {
;
		return;
	}
	sbi = EXT3_SB(sb);

	ino = inode->i_ino;
	ext3_debug ("freeing inode %lu\n", ino);

	is_directory = S_ISDIR(inode->i_mode);

	es = EXT3_SB(sb)->s_es;
	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
		ext3_error (sb, "ext3_free_inode",
			    "reserved or nonexistent inode %lu", ino);
		goto error_return;
	}
	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT3_INODES_PER_GROUP(sb);
	bitmap_bh = read_inode_bitmap(sb, block_group);
	if (!bitmap_bh)
		goto error_return;

	BUFFER_TRACE(bitmap_bh, "get_write_access");
	fatal = ext3_journal_get_write_access(handle, bitmap_bh);
	if (fatal)
		goto error_return;

	/* Ok, now we can actually update the inode bitmaps.. */
	if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
					bit, bitmap_bh->b_data))
		ext3_error (sb, "ext3_free_inode",
			      "bit already cleared for inode %lu", ino);
	else {
		gdp = ext3_get_group_desc (sb, block_group, &bh2);

		BUFFER_TRACE(bh2, "get_write_access");
		fatal = ext3_journal_get_write_access(handle, bh2);
		if (fatal) goto error_return;

		if (gdp) {
			spin_lock(sb_bgl_lock(sbi, block_group));
			le16_add_cpu(&gdp->bg_free_inodes_count, 1);
			if (is_directory)
				le16_add_cpu(&gdp->bg_used_dirs_count, -1);
			spin_unlock(sb_bgl_lock(sbi, block_group));
			percpu_counter_inc(&sbi->s_freeinodes_counter);
			if (is_directory)
				percpu_counter_dec(&sbi->s_dirs_counter);

		}
		BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
		err = ext3_journal_dirty_metadata(handle, bh2);
		if (!fatal) fatal = err;
	}
	BUFFER_TRACE(bitmap_bh, "call ext3_journal_dirty_metadata");
	err = ext3_journal_dirty_metadata(handle, bitmap_bh);
	if (!fatal)
		fatal = err;

error_return:
	brelse(bitmap_bh);
	ext3_std_error(sb, fatal);
}
Exemple #3
0
/*
 * User open of the XPMEM driver. Called whenever /dev/xpmem is opened.
 * Create a struct xpmem_thread_group structure for the specified thread group.
 * And add the structure to the tg hash table.
 */
static int
xpmem_open(struct inode *inode, struct file *file)
{
	struct xpmem_thread_group *tg;
	int index;
	struct proc_dir_entry *unpin_entry;
	char tgid_string[XPMEM_TGID_STRING_LEN];

	/* if this has already been done, just return silently */
	tg = xpmem_tg_ref_by_tgid(current->tgid);
	if (!IS_ERR(tg)) {
		xpmem_tg_deref(tg);
		return 0;
	}

	/* create tg */
	tg = kzalloc(sizeof(struct xpmem_thread_group), GFP_KERNEL);
	if (tg == NULL) {
		return -ENOMEM;
	}

	tg->lock = SPIN_LOCK_UNLOCKED;
	tg->tgid = current->tgid;
	tg->uid = current->cred->uid;
	tg->gid = current->cred->gid;
	atomic_set(&tg->uniq_segid, 0);
	atomic_set(&tg->uniq_apid, 0);
	atomic_set(&tg->n_pinned, 0);
	tg->addr_limit = TASK_SIZE;
	tg->seg_list_lock = RW_LOCK_UNLOCKED;
	INIT_LIST_HEAD(&tg->seg_list);
	INIT_LIST_HEAD(&tg->tg_hashlist);
	atomic_set(&tg->n_recall_PFNs, 0);
	mutex_init(&tg->recall_PFNs_mutex);
	init_waitqueue_head(&tg->block_recall_PFNs_wq);
	init_waitqueue_head(&tg->allow_recall_PFNs_wq);
	tg->mmu_initialized = 0;
	tg->mmu_unregister_called = 0;
	tg->mm = current->mm;

	/* Register MMU notifier callbacks */
	if (xpmem_mmu_notifier_init(tg) != 0) {
		kfree(tg);
		return -EFAULT;
	}
	
	/* create and initialize struct xpmem_access_permit hashtable */
	tg->ap_hashtable = kzalloc(sizeof(struct xpmem_hashlist) *
				     XPMEM_AP_HASHTABLE_SIZE, GFP_KERNEL);
	if (tg->ap_hashtable == NULL) {
		xpmem_mmu_notifier_unlink(tg);
		kfree(tg);
		return -ENOMEM;
	}
	for (index = 0; index < XPMEM_AP_HASHTABLE_SIZE; index++) {
		tg->ap_hashtable[index].lock = RW_LOCK_UNLOCKED;
		INIT_LIST_HEAD(&tg->ap_hashtable[index].list);
	}

	snprintf(tgid_string, XPMEM_TGID_STRING_LEN, "%d", current->tgid);
	spin_lock(&xpmem_unpin_procfs_lock);
	unpin_entry = create_proc_entry(tgid_string, 0644,
					xpmem_unpin_procfs_dir);
	spin_unlock(&xpmem_unpin_procfs_lock);
	if (unpin_entry != NULL) {
		unpin_entry->data = (void *)(unsigned long)current->tgid;
		unpin_entry->write_proc = xpmem_unpin_procfs_write;
		unpin_entry->read_proc = xpmem_unpin_procfs_read;
		//unpin_entry->owner = THIS_MODULE;
		unpin_entry->uid = current->cred->uid;
		unpin_entry->gid = current->cred->gid;
	}

	xpmem_tg_not_destroyable(tg);

	/* add tg to its hash list */
	index = xpmem_tg_hashtable_index(tg->tgid);
	write_lock(&xpmem_my_part->tg_hashtable[index].lock);
	list_add_tail(&tg->tg_hashlist,
		      &xpmem_my_part->tg_hashtable[index].list);
	write_unlock(&xpmem_my_part->tg_hashtable[index].lock);

	/*
	 * Increment 'usage' and 'mm->mm_users' for the current task's thread
	 * group leader. This ensures that both its task_struct and mm_struct
	 * will still be around when our thread group exits. (The Linux kernel
	 * normally tears down the mm_struct prior to calling a module's
	 * 'flush' function.) Since all XPMEM thread groups must go through
	 * this path, this extra reference to mm_users also allows us to
	 * directly inc/dec mm_users in xpmem_ensure_valid_PFNs() and avoid
	 * mmput() which has a scaling issue with the mmlist_lock.
	 */
	get_task_struct(current->group_leader);
	tg->group_leader = current->group_leader;
	BUG_ON(current->mm != current->group_leader->mm);
	atomic_inc(&current->group_leader->mm->mm_users);

	return 0;
}
irqreturn_t mdss_dsi_isr(int irq, void *ptr)
{
	u32 isr;
	struct mdss_dsi_ctrl_pdata *ctrl =
			(struct mdss_dsi_ctrl_pdata *)ptr;

	if (!ctrl->ctrl_base)
		pr_err("%s:%d DSI base adr no Initialized",
				       __func__, __LINE__);

	isr = MIPI_INP(ctrl->ctrl_base + 0x0110);/* DSI_INTR_CTRL */
	MIPI_OUTP(ctrl->ctrl_base + 0x0110, isr);

	if (ctrl->shared_pdata.broadcast_enable)
		if ((ctrl->panel_data.panel_info.pdest == DISPLAY_2)
		    && (left_ctrl_pdata != NULL)) {
			u32 isr0;
			isr0 = MIPI_INP(left_ctrl_pdata->ctrl_base
						+ 0x0110);/* DSI_INTR_CTRL */
			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0110, isr0);
		}

	pr_debug("%s: ndx=%d isr=%x\n", __func__, ctrl->ndx, isr);

	if (isr & DSI_INTR_ERROR) {
#ifdef F_WA_WATCHDOG_DURING_BOOTUP
		if(ctrl->octa_blck_set)
#endif	
		
		pr_err("%s: ndx=%d isr=%x\n", __func__, ctrl->ndx, isr);
		mdss_dsi_error(ctrl);
	}

	if (isr & DSI_INTR_VIDEO_DONE) {
		spin_lock(&ctrl->mdp_lock);
		mdss_dsi_disable_irq_nosync(ctrl, DSI_VIDEO_TERM);
		complete(&ctrl->video_comp);
		spin_unlock(&ctrl->mdp_lock);
	}

	if (isr & DSI_INTR_CMD_DMA_DONE) {
		spin_lock(&ctrl->mdp_lock);
		mdss_dsi_disable_irq_nosync(ctrl, DSI_CMD_TERM);
		complete(&ctrl->dma_comp);
		spin_unlock(&ctrl->mdp_lock);
	}

	if (isr & DSI_INTR_CMD_MDP_DONE) {
		spin_lock(&ctrl->mdp_lock);
		ctrl->mdp_busy = false;
		mdss_dsi_disable_irq_nosync(ctrl, DSI_MDP_TERM);
		complete(&ctrl->mdp_comp);
		spin_unlock(&ctrl->mdp_lock);
	}

	if (isr & DSI_INTR_BTA_DONE) {
		spin_lock(&ctrl->mdp_lock);
		mdss_dsi_disable_irq_nosync(ctrl, DSI_BTA_TERM);
		complete(&ctrl->bta_comp);
		spin_unlock(&ctrl->mdp_lock);
	}

	return IRQ_HANDLED;
}
Exemple #5
0
static long
fd_link_ioctl (struct file *f, unsigned int ioctl, unsigned long arg)
{
	void __user *argp = (void __user *) arg;
	struct task_struct *task_target = NULL;
	struct file *file;
	struct files_struct *files;
	struct fdtable *fdt;
	struct fd_copy fd_copy;

	switch (ioctl)
	{
		case FD_COPY:
			if (copy_from_user (&fd_copy, argp, sizeof (struct fd_copy)))
				return -EFAULT;

			/*
			 * Find the task struct for the target pid
			 */
			task_target =
				pid_task (find_vpid (fd_copy.target_pid), PIDTYPE_PID);
			if (task_target == NULL)
			{
				printk (KERN_DEBUG "Failed to get mem ctx for target pid\n");
				return -EFAULT;
			}

			files = get_files_struct (current);
			if (files == NULL)
			{
				printk (KERN_DEBUG "Failed to get files struct\n");
				return -EFAULT;
			}

			rcu_read_lock ();
			file = fcheck_files (files, fd_copy.source_fd);
			if (file)
			{
				if (file->f_mode & FMODE_PATH
						|| !atomic_long_inc_not_zero (&file->f_count))
					file = NULL;
			}
			rcu_read_unlock ();
			put_files_struct (files);

			if (file == NULL)
			{
				printk (KERN_DEBUG "Failed to get file from source pid\n");
				return 0;
			}

			/*
			 * Release the existing fd in the source process
			 */
			spin_lock (&files->file_lock);
			filp_close (file, files);
			fdt = files_fdtable (files);
			fdt->fd[fd_copy.source_fd] = NULL;
			spin_unlock (&files->file_lock);

			/*
			 * Find the file struct associated with the target fd.
			 */

			files = get_files_struct (task_target);
			if (files == NULL)
			{
				printk (KERN_DEBUG "Failed to get files struct\n");
				return -EFAULT;
			}

			rcu_read_lock ();
			file = fcheck_files (files, fd_copy.target_fd);
			if (file)
			{
				if (file->f_mode & FMODE_PATH
						|| !atomic_long_inc_not_zero (&file->f_count))
					file = NULL;
			}
			rcu_read_unlock ();
			put_files_struct (files);

			if (file == NULL)
			{
				printk (KERN_DEBUG "Failed to get file from target pid\n");
				return 0;
			}


			/*
			 * Install the file struct from the target process into the
			 * file desciptor of the source process,
			 */

			fd_install (fd_copy.source_fd, file);

			return 0;

		default:
			return -ENOIOCTLCMD;
	}
}
Exemple #6
0
static void
fec_enet_tx(struct net_device *ndev)
{
	struct	fec_enet_private *fep;
	struct  fec_ptp_private *fpp;
	struct bufdesc *bdp;
	unsigned short status;
	struct	sk_buff	*skb;

	fep = netdev_priv(ndev);
	fpp = fep->ptp_priv;
	spin_lock(&fep->hw_lock);
	bdp = fep->dirty_tx;

	while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
		if (bdp == fep->cur_tx && fep->tx_full == 0)
			break;

		if (bdp->cbd_bufaddr)
			dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
				FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
		bdp->cbd_bufaddr = 0;

		skb = fep->tx_skbuff[fep->skb_dirty];
		if (!skb)
			break;
		/* Check for errors. */
		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
				   BD_ENET_TX_RL | BD_ENET_TX_UN |
				   BD_ENET_TX_CSL)) {
			ndev->stats.tx_errors++;
			if (status & BD_ENET_TX_HB)  /* No heartbeat */
				ndev->stats.tx_heartbeat_errors++;
			if (status & BD_ENET_TX_LC)  /* Late collision */
				ndev->stats.tx_window_errors++;
			if (status & BD_ENET_TX_RL)  /* Retrans limit */
				ndev->stats.tx_aborted_errors++;
			if (status & BD_ENET_TX_UN)  /* Underrun */
				ndev->stats.tx_fifo_errors++;
			if (status & BD_ENET_TX_CSL) /* Carrier lost */
				ndev->stats.tx_carrier_errors++;
		} else {
			ndev->stats.tx_packets++;
		}

		if (status & BD_ENET_TX_READY)
			printk("HEY! Enet xmit interrupt and TX_READY.\n");

		/* Deferred means some collisions occurred during transmit,
		 * but we eventually sent the packet OK.
		 */
		if (status & BD_ENET_TX_DEF)
			ndev->stats.collisions++;

#if defined(CONFIG_ENHANCED_BD)
		if (fep->ptimer_present) {
			if (bdp->cbd_esc & BD_ENET_TX_TS)
				fec_ptp_store_txstamp(fpp, skb, bdp);
		}
#elif defined(CONFIG_IN_BAND)
		if (fep->ptimer_present) {
			if (status & BD_ENET_TX_PTP)
				fec_ptp_store_txstamp(fpp, skb, bdp);
		}
#endif

		/* Free the sk buffer associated with this last transmit */
		dev_kfree_skb_any(skb);
		fep->tx_skbuff[fep->skb_dirty] = NULL;
		fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;

		/* Update pointer to next buffer descriptor to be transmitted */
		if (status & BD_ENET_TX_WRAP)
			bdp = fep->tx_bd_base;
		else
			bdp++;

		/* Since we have freed up a buffer, the ring is no longer full
		 */
		if (fep->tx_full) {
			fep->tx_full = 0;
			if (netif_queue_stopped(ndev))
				netif_wake_queue(ndev);
		}
	}
	fep->dirty_tx = bdp;
	spin_unlock(&fep->hw_lock);
}
Exemple #7
0
static int
id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
{
	int rc = 0;
	struct key *sidkey;
	const struct cred *saved_cred;
	struct cifs_sid *lsid;
	struct cifs_sid_id *psidid, *npsidid;
	struct rb_root *cidtree;
	spinlock_t *cidlock;

	if (sidtype == SIDOWNER) {
		cidlock = &siduidlock;
		cidtree = &uidtree;
	} else if (sidtype == SIDGROUP) {
		cidlock = &sidgidlock;
		cidtree = &gidtree;
	} else
		return -EINVAL;

	spin_lock(cidlock);
	psidid = sid_rb_search(cidtree, cid);

	if (!psidid) { /* node does not exist, allocate one & attempt adding */
		spin_unlock(cidlock);
		npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
		if (!npsidid)
			return -ENOMEM;

		npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
		if (!npsidid->sidstr) {
			kfree(npsidid);
			return -ENOMEM;
		}

		spin_lock(cidlock);
		psidid = sid_rb_search(cidtree, cid);
		if (psidid) { /* node happened to get inserted meanwhile */
			++psidid->refcount;
			spin_unlock(cidlock);
			kfree(npsidid->sidstr);
			kfree(npsidid);
		} else {
			psidid = npsidid;
			sid_rb_insert(cidtree, cid, &psidid,
					sidtype == SIDOWNER ? "oi:" : "gi:");
			++psidid->refcount;
			spin_unlock(cidlock);
		}
	} else {
		++psidid->refcount;
		spin_unlock(cidlock);
	}

	/*
	 * If we are here, it is safe to access psidid and its fields
	 * since a reference was taken earlier while holding the spinlock.
	 * A reference on the node is put without holding the spinlock
	 * and it is OK to do so in this case, shrinker will not erase
	 * this node until all references are put and we do not access
	 * any fields of the node after a reference is put .
	 */
	if (test_bit(SID_ID_MAPPED, &psidid->state)) {
		memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
		psidid->time = jiffies; /* update ts for accessing */
		goto id_sid_out;
	}

	if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) {
		rc = -EINVAL;
		goto id_sid_out;
	}

	if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
		saved_cred = override_creds(root_cred);
		sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
		if (IS_ERR(sidkey)) {
			rc = -EINVAL;
			cFYI(1, "%s: Can't map and id to a SID", __func__);
		} else {
			lsid = (struct cifs_sid *)sidkey->payload.data;
			memcpy(&psidid->sid, lsid,
				sidkey->datalen < sizeof(struct cifs_sid) ?
				sidkey->datalen : sizeof(struct cifs_sid));
			memcpy(ssid, &psidid->sid,
				sidkey->datalen < sizeof(struct cifs_sid) ?
				sidkey->datalen : sizeof(struct cifs_sid));
			set_bit(SID_ID_MAPPED, &psidid->state);
			key_put(sidkey);
			kfree(psidid->sidstr);
		}
		psidid->time = jiffies; /* update ts for accessing */
		revert_creds(saved_cred);
		clear_bit(SID_ID_PENDING, &psidid->state);
		wake_up_bit(&psidid->state, SID_ID_PENDING);
	} else {
		rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
				sidid_pending_wait, TASK_INTERRUPTIBLE);
		if (rc) {
			cFYI(1, "%s: sidid_pending_wait interrupted %d",
					__func__, rc);
			--psidid->refcount;
			return rc;
		}
		if (test_bit(SID_ID_MAPPED, &psidid->state))
			memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
		else
			rc = -EINVAL;
	}
id_sid_out:
	--psidid->refcount;
	return rc;
}
Exemple #8
0
static struct gdbio_state *gdbio_htable_lookup(char *gdbcons){
	spin_lock(&htable_lock); 
	struct gdbio_state *gs = htable_lookup(gdbio_htable, gdbcons);
	spin_unlock(&htable_lock);
	return gs;
}
Exemple #9
0
/*
 * Find the corresponding entry in the search. Note that the SMB server returns
 * search entries for . and .. which complicates logic here if we choose to
 * parse for them and we do not assume that they are located in the findfirst
 * return buffer. We start counting in the buffer with entry 2 and increment for
 * every entry (do not increment for . or .. entry).
 */
static int
find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
		struct file *file, char **current_entry, int *num_to_ret)
{
	__u16 search_flags;
	int rc = 0;
	int pos_in_buf = 0;
	loff_t first_entry_in_buffer;
	loff_t index_to_find = pos;
	struct cifsFileInfo *cfile = file->private_data;
	struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
	struct TCP_Server_Info *server = tcon->ses->server;
	/* check if index in the buffer */

	if (!server->ops->query_dir_first || !server->ops->query_dir_next)
		return -ENOSYS;

	if ((cfile == NULL) || (current_entry == NULL) || (num_to_ret == NULL))
		return -ENOENT;

	*current_entry = NULL;
	first_entry_in_buffer = cfile->srch_inf.index_of_last_entry -
					cfile->srch_inf.entries_in_buffer;

	/*
	 * If first entry in buf is zero then is first buffer
	 * in search response data which means it is likely . and ..
	 * will be in this buffer, although some servers do not return
	 * . and .. for the root of a drive and for those we need
	 * to start two entries earlier.
	 */

	dump_cifs_file_struct(file, "In fce ");
	if (((index_to_find < cfile->srch_inf.index_of_last_entry) &&
	     is_dir_changed(file)) || (index_to_find < first_entry_in_buffer)) {
		/* close and restart search */
		cifs_dbg(FYI, "search backing up - close and restart search\n");
		spin_lock(&cifs_file_list_lock);
		if (server->ops->dir_needs_close(cfile)) {
			cfile->invalidHandle = true;
			spin_unlock(&cifs_file_list_lock);
			if (server->ops->close)
				server->ops->close(xid, tcon, &cfile->fid);
		} else
			spin_unlock(&cifs_file_list_lock);
		if (cfile->srch_inf.ntwrk_buf_start) {
			cifs_dbg(FYI, "freeing SMB ff cache buf on search rewind\n");
			if (cfile->srch_inf.smallBuf)
				cifs_small_buf_release(cfile->srch_inf.
						ntwrk_buf_start);
			else
				cifs_buf_release(cfile->srch_inf.
						ntwrk_buf_start);
			cfile->srch_inf.ntwrk_buf_start = NULL;
		}
		rc = initiate_cifs_search(xid, file);
		if (rc) {
			cifs_dbg(FYI, "error %d reinitiating a search on rewind\n",
				 rc);
			return rc;
		}
		/* FindFirst/Next set last_entry to NULL on malformed reply */
		if (cfile->srch_inf.last_entry)
			cifs_save_resume_key(cfile->srch_inf.last_entry, cfile);
	}

	search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
	if (backup_cred(cifs_sb))
		search_flags |= CIFS_SEARCH_BACKUP_SEARCH;

	while ((index_to_find >= cfile->srch_inf.index_of_last_entry) &&
	       (rc == 0) && !cfile->srch_inf.endOfSearch) {
		cifs_dbg(FYI, "calling findnext2\n");
		rc = server->ops->query_dir_next(xid, tcon, &cfile->fid,
						 search_flags,
						 &cfile->srch_inf);
		/* FindFirst/Next set last_entry to NULL on malformed reply */
		if (cfile->srch_inf.last_entry)
			cifs_save_resume_key(cfile->srch_inf.last_entry, cfile);
		if (rc)
			return -ENOENT;
	}
	if (index_to_find < cfile->srch_inf.index_of_last_entry) {
		/* we found the buffer that contains the entry */
		/* scan and find it */
		int i;
		char *cur_ent;
		char *end_of_smb = cfile->srch_inf.ntwrk_buf_start +
			server->ops->calc_smb_size(
					cfile->srch_inf.ntwrk_buf_start);

		cur_ent = cfile->srch_inf.srch_entries_start;
		first_entry_in_buffer = cfile->srch_inf.index_of_last_entry
					- cfile->srch_inf.entries_in_buffer;
		pos_in_buf = index_to_find - first_entry_in_buffer;
		cifs_dbg(FYI, "found entry - pos_in_buf %d\n", pos_in_buf);

		for (i = 0; (i < (pos_in_buf)) && (cur_ent != NULL); i++) {
			/* go entry by entry figuring out which is first */
			cur_ent = nxt_dir_entry(cur_ent, end_of_smb,
						cfile->srch_inf.info_level);
		}
		if ((cur_ent == NULL) && (i < pos_in_buf)) {
			/* BB fixme - check if we should flag this error */
			cifs_dbg(VFS, "reached end of buf searching for pos in buf %d index to find %lld rc %d\n",
				 pos_in_buf, index_to_find, rc);
		}
		rc = 0;
		*current_entry = cur_ent;
	} else {
		cifs_dbg(FYI, "index not in buffer - could not findnext into it\n");
		return 0;
	}

	if (pos_in_buf >= cfile->srch_inf.entries_in_buffer) {
		cifs_dbg(FYI, "can not return entries pos_in_buf beyond last\n");
		*num_to_ret = 0;
	} else
		*num_to_ret = cfile->srch_inf.entries_in_buffer - pos_in_buf;

	return rc;
}
struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
{
	struct super_block *sb = dir->i_sb;
	struct the_nilfs *nilfs = sb->s_fs_info;
	struct inode *inode;
	struct nilfs_inode_info *ii;
	struct nilfs_root *root;
	int err = -ENOMEM;
	ino_t ino;

	inode = new_inode(sb);
	if (unlikely(!inode))
		goto failed;

	mapping_set_gfp_mask(inode->i_mapping,
			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);

	root = NILFS_I(dir)->i_root;
	ii = NILFS_I(inode);
	ii->i_state = 1 << NILFS_I_NEW;
	ii->i_root = root;

	err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
	if (unlikely(err))
		goto failed_ifile_create_inode;
	/* reference count of i_bh inherits from nilfs_mdt_read_block() */

	atomic_inc(&root->inodes_count);
	inode_init_owner(inode, dir, mode);
	inode->i_ino = ino;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;

	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
		err = nilfs_bmap_read(ii->i_bmap, NULL);
		if (err < 0)
			goto failed_bmap;

		set_bit(NILFS_I_BMAP, &ii->i_state);
		/* No lock is needed; iget() ensures it. */
	}

	ii->i_flags = nilfs_mask_flags(
		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);

	/* ii->i_file_acl = 0; */
	/* ii->i_dir_acl = 0; */
	ii->i_dir_start_lookup = 0;
	nilfs_set_inode_flags(inode);
	spin_lock(&nilfs->ns_next_gen_lock);
	inode->i_generation = nilfs->ns_next_generation++;
	spin_unlock(&nilfs->ns_next_gen_lock);
	insert_inode_hash(inode);

	err = nilfs_init_acl(inode, dir);
	if (unlikely(err))
		goto failed_acl; /* never occur. When supporting
				    nilfs_init_acl(), proper cancellation of
				    above jobs should be considered */

	return inode;

 failed_acl:
 failed_bmap:
	clear_nlink(inode);
	iput(inode);  /* raw_inode will be deleted through
			 generic_delete_inode() */
	goto failed;

 failed_ifile_create_inode:
	make_bad_inode(inode);
	iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
			 called */
 failed:
	return ERR_PTR(err);
}
Exemple #11
0
static void gdbio_htable_del(struct gdbio_state *gs){
	spin_lock(&htable_lock); 
	htable_del(gdbio_htable, gs);
	spin_unlock(&htable_lock);
}
Exemple #12
0
/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
 * A requeued skb (via q->gso_skb) can also be a SKB list.
 */
static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
				   int *packets)
{
	const struct netdev_queue *txq = q->dev_queue;
	struct sk_buff *skb = NULL;

	*packets = 1;
	if (unlikely(!skb_queue_empty(&q->gso_skb))) {
		spinlock_t *lock = NULL;

		if (q->flags & TCQ_F_NOLOCK) {
			lock = qdisc_lock(q);
			spin_lock(lock);
		}

		skb = skb_peek(&q->gso_skb);

		/* skb may be null if another cpu pulls gso_skb off in between
		 * empty check and lock.
		 */
		if (!skb) {
			if (lock)
				spin_unlock(lock);
			goto validate;
		}

		/* skb in gso_skb were already validated */
		*validate = false;
		if (xfrm_offload(skb))
			*validate = true;
		/* check the reason of requeuing without tx lock first */
		txq = skb_get_tx_queue(txq->dev, skb);
		if (!netif_xmit_frozen_or_stopped(txq)) {
			skb = __skb_dequeue(&q->gso_skb);
			if (qdisc_is_percpu_stats(q)) {
				qdisc_qstats_cpu_backlog_dec(q, skb);
				qdisc_qstats_cpu_qlen_dec(q);
			} else {
				qdisc_qstats_backlog_dec(q, skb);
				q->q.qlen--;
			}
		} else {
			skb = NULL;
		}
		if (lock)
			spin_unlock(lock);
		goto trace;
	}
validate:
	*validate = true;

	if ((q->flags & TCQ_F_ONETXQUEUE) &&
	    netif_xmit_frozen_or_stopped(txq))
		return skb;

	skb = qdisc_dequeue_skb_bad_txq(q);
	if (unlikely(skb))
		goto bulk;
	skb = q->dequeue(q);
	if (skb) {
bulk:
		if (qdisc_may_bulk(q))
			try_bulk_dequeue_skb(q, skb, txq, packets);
		else
			try_bulk_dequeue_skb_slow(q, skb, packets);
	}
trace:
	trace_qdisc_dequeue(q, txq, *packets, skb);
	return skb;
}
static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
                          unsigned int keylen)
{
    struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
    struct device *dev;

    spin_lock(&ctx->lock);
    if (ctx->enc_cd) {
        /* rekeying */
        dev = &GET_DEV(ctx->inst->accel_dev);
        memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
        memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
        memzero_explicit(&ctx->enc_fw_req_tmpl,
                         sizeof(struct icp_qat_fw_la_bulk_req));
        memzero_explicit(&ctx->dec_fw_req_tmpl,
                         sizeof(struct icp_qat_fw_la_bulk_req));
    } else {
        /* new key */
        int node = get_current_node();
        struct qat_crypto_instance *inst =
            qat_crypto_get_instance_node(node);
        if (!inst) {
            spin_unlock(&ctx->lock);
            return -EINVAL;
        }

        dev = &GET_DEV(inst->accel_dev);
        ctx->inst = inst;
        ctx->enc_cd = dma_zalloc_coherent(dev,
                                          sizeof(struct qat_alg_cd),
                                          &ctx->enc_cd_paddr,
                                          GFP_ATOMIC);
        if (!ctx->enc_cd) {
            spin_unlock(&ctx->lock);
            return -ENOMEM;
        }
        ctx->dec_cd = dma_zalloc_coherent(dev,
                                          sizeof(struct qat_alg_cd),
                                          &ctx->dec_cd_paddr,
                                          GFP_ATOMIC);
        if (!ctx->dec_cd) {
            spin_unlock(&ctx->lock);
            goto out_free_enc;
        }
    }
    spin_unlock(&ctx->lock);
    if (qat_alg_init_sessions(ctx, key, keylen))
        goto out_free_all;

    return 0;

out_free_all:
    memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
    dma_free_coherent(dev, sizeof(struct qat_alg_cd),
                      ctx->dec_cd, ctx->dec_cd_paddr);
    ctx->dec_cd = NULL;
out_free_enc:
    memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
    dma_free_coherent(dev, sizeof(struct qat_alg_cd),
                      ctx->enc_cd, ctx->enc_cd_paddr);
    ctx->enc_cd = NULL;
    return -ENOMEM;
}
Exemple #14
0
void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
{
	struct hfi1_devdata *dd = packet->rcd->dd;
	struct hfi1_vnic_vport_info *vinfo = NULL;
	struct hfi1_vnic_rx_queue *rxq;
	struct sk_buff *skb;
	int l4_type, vesw_id = -1;
	u8 q_idx;

	l4_type = HFI1_GET_L4_TYPE(packet->ebuf);
	if (likely(l4_type == OPA_VNIC_L4_ETHR)) {
		vesw_id = HFI1_VNIC_GET_VESWID(packet->ebuf);
		vinfo = idr_find(&dd->vnic.vesw_idr, vesw_id);

		/*
		 * In case of invalid vesw id, count the error on
		 * the first available vport.
		 */
		if (unlikely(!vinfo)) {
			struct hfi1_vnic_vport_info *vinfo_tmp;
			int id_tmp = 0;

			vinfo_tmp =  idr_get_next(&dd->vnic.vesw_idr, &id_tmp);
			if (vinfo_tmp) {
				spin_lock(&vport_cntr_lock);
				vinfo_tmp->stats[0].netstats.rx_nohandler++;
				spin_unlock(&vport_cntr_lock);
			}
		}
	}

	if (unlikely(!vinfo)) {
		dd_dev_warn(dd, "vnic rcv err: l4 %d vesw id %d ctx %d\n",
			    l4_type, vesw_id, packet->rcd->ctxt);
		return;
	}

	q_idx = packet->rcd->vnic_q_idx;
	rxq = &vinfo->rxq[q_idx];
	if (unlikely(!netif_oper_up(vinfo->netdev))) {
		vinfo->stats[q_idx].rx_drop_state++;
		skb_queue_purge(&rxq->skbq);
		return;
	}

	if (unlikely(skb_queue_len(&rxq->skbq) > HFI1_VNIC_RCV_Q_SIZE)) {
		vinfo->stats[q_idx].netstats.rx_fifo_errors++;
		return;
	}

	skb = netdev_alloc_skb(vinfo->netdev, packet->tlen);
	if (unlikely(!skb)) {
		vinfo->stats[q_idx].netstats.rx_fifo_errors++;
		return;
	}

	memcpy(skb->data, packet->ebuf, packet->tlen);
	skb_put(skb, packet->tlen);
	skb_queue_tail(&rxq->skbq, skb);

	if (napi_schedule_prep(&rxq->napi)) {
		v_dbg("napi %d scheduling\n", q_idx);
		__napi_schedule(&rxq->napi);
	}
}
Exemple #15
0
/**
 * gether_connect - notify network layer that USB link is active
 * @link: the USB link, set up with endpoints, descriptors matching
 *	current device speed, and any framing wrapper(s) set up.
 * Context: irqs blocked
 *
 * This is called to activate endpoints and let the network layer know
 * the connection is active ("carrier detect").  It may cause the I/O
 * queues to open and start letting network packets flow, but will in
 * any case activate the endpoints so that they respond properly to the
 * USB host.
 *
 * Verify net_device pointer returned using IS_ERR().  If it doesn't
 * indicate some error code (negative errno), ep->driver_data values
 * have been overwritten.
 */
struct net_device *gether_connect(struct gether *link)
{
	struct eth_dev		*dev = the_dev;
	int			result = 0;

	if (!dev)
		return ERR_PTR(-EINVAL);

	link->in_ep->driver_data = dev;
	result = usb_ep_enable(link->in_ep, link->in);
	if (result != 0) {
		DBG(dev, "enable %s --> %d\n",
			link->in_ep->name, result);
		goto fail0;
	}

	link->out_ep->driver_data = dev;
	result = usb_ep_enable(link->out_ep, link->out);
	if (result != 0) {
		DBG(dev, "enable %s --> %d\n",
			link->out_ep->name, result);
		goto fail1;
	}

	if (result == 0)
		result = alloc_requests(dev, link, qlen(dev->gadget));

	if (result == 0) {
		dev->zlp = link->is_zlp_ok;
		DBG(dev, "qlen %d\n", qlen(dev->gadget));

		dev->header_len = link->header_len;
		dev->unwrap = link->unwrap;
		dev->wrap = link->wrap;

		spin_lock(&dev->lock);
		dev->port_usb = link;
		link->ioport = dev;
		if (netif_running(dev->net)) {
			if (link->open)
				link->open(link);
		} else {
			if (link->close)
				link->close(link);
		}
		spin_unlock(&dev->lock);

		netif_carrier_on(dev->net);
		if (netif_running(dev->net))
			eth_start(dev, GFP_ATOMIC);

	/* on error, disable any endpoints  */
	} else {
		(void) usb_ep_disable(link->out_ep);
fail1:
		(void) usb_ep_disable(link->in_ep);
	}
fail0:
	/* caller is responsible for cleanup on error */
	if (result < 0)
		return ERR_PTR(result);
	return dev->net;
}
Exemple #16
0
static
int ll_getxattr_common(struct inode *inode, const char *name,
                       void *buffer, size_t size, __u64 valid)
{
        struct ll_sb_info *sbi = ll_i2sbi(inode);
        struct ptlrpc_request *req = NULL;
        struct mdt_body *body;
        int xattr_type, rc;
        void *xdata;
        struct obd_capa *oc;
        struct rmtacl_ctl_entry *rce = NULL;
	struct ll_inode_info *lli = ll_i2info(inode);
        ENTRY;

	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
	       PFID(ll_inode2fid(inode)), inode);

        /* listxattr have slightly different behavior from of ext3:
         * without 'user_xattr' ext3 will list all xattr names but
         * filtered out "^user..*"; we list them all for simplicity.
         */
        if (!name) {
                xattr_type = XATTR_OTHER_T;
                goto do_getxattr;
        }

        xattr_type = get_xattr_type(name);
        rc = xattr_type_filter(sbi, xattr_type);
        if (rc)
                RETURN(rc);

        /* b15587: ignore security.capability xattr for now */
        if ((xattr_type == XATTR_SECURITY_T &&
            strcmp(name, "security.capability") == 0))
                RETURN(-ENODATA);

        /* LU-549:  Disable security.selinux when selinux is disabled */
        if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() &&
            strcmp(name, "security.selinux") == 0)
                RETURN(-EOPNOTSUPP);

#ifdef CONFIG_FS_POSIX_ACL
	if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
	    (xattr_type == XATTR_ACL_ACCESS_T ||
	    xattr_type == XATTR_ACL_DEFAULT_T)) {
		rce = rct_search(&sbi->ll_rct, current_pid());
		if (rce == NULL ||
		    (rce->rce_ops != RMT_LSETFACL &&
		    rce->rce_ops != RMT_LGETFACL &&
		    rce->rce_ops != RMT_RSETFACL &&
		    rce->rce_ops != RMT_RGETFACL))
			RETURN(-EOPNOTSUPP);
	}

        /* posix acl is under protection of LOOKUP lock. when calling to this,
         * we just have path resolution to the target inode, so we have great
         * chance that cached ACL is uptodate.
         */
        if (xattr_type == XATTR_ACL_ACCESS_T &&
            !(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {

                struct posix_acl *acl;

		spin_lock(&lli->lli_lock);
		acl = posix_acl_dup(lli->lli_posix_acl);
		spin_unlock(&lli->lli_lock);

                if (!acl)
                        RETURN(-ENODATA);

                rc = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
                posix_acl_release(acl);
                RETURN(rc);
        }
        if (xattr_type == XATTR_ACL_DEFAULT_T && !S_ISDIR(inode->i_mode))
                RETURN(-ENODATA);
#endif

do_getxattr:
	if (sbi->ll_xattr_cache_enabled && xattr_type != XATTR_ACL_ACCESS_T) {
		rc = ll_xattr_cache_get(inode, name, buffer, size, valid);
		if (rc == -EAGAIN)
			goto getxattr_nocache;
		if (rc < 0)
			GOTO(out_xattr, rc);

		/* Add "system.posix_acl_access" to the list */
		if (lli->lli_posix_acl != NULL && valid & OBD_MD_FLXATTRLS) {
			if (size == 0) {
				rc += sizeof(XATTR_NAME_ACL_ACCESS);
			} else if (size - rc >= sizeof(XATTR_NAME_ACL_ACCESS)) {
				memcpy(buffer + rc, XATTR_NAME_ACL_ACCESS,
				       sizeof(XATTR_NAME_ACL_ACCESS));
				rc += sizeof(XATTR_NAME_ACL_ACCESS);
			} else {
				GOTO(out_xattr, rc = -ERANGE);
			}
		}
	} else {
getxattr_nocache:
		oc = ll_mdscapa_get(inode);
		rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
				valid | (rce ? rce_ops2valid(rce->rce_ops) : 0),
				name, NULL, 0, size, 0, &req);
		capa_put(oc);

		if (rc < 0)
			GOTO(out_xattr, rc);

		body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
		LASSERT(body);

		/* only detect the xattr size */
		if (size == 0)
			GOTO(out, rc = body->eadatasize);

		if (size < body->eadatasize) {
			CERROR("server bug: replied size %u > %u\n",
				body->eadatasize, (int)size);
			GOTO(out, rc = -ERANGE);
		}

		if (body->eadatasize == 0)
			GOTO(out, rc = -ENODATA);

		/* do not need swab xattr data */
		xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
							body->eadatasize);
		if (!xdata)
			GOTO(out, rc = -EFAULT);

		memcpy(buffer, xdata, body->eadatasize);
		rc = body->eadatasize;
	}

#ifdef CONFIG_FS_POSIX_ACL
	if (rce != NULL && rce->rce_ops == RMT_LSETFACL) {
		ext_acl_xattr_header *acl;

		acl = lustre_posix_acl_xattr_2ext(buffer, rc);
		if (IS_ERR(acl))
			GOTO(out, rc = PTR_ERR(acl));

		rc = ee_add(&sbi->ll_et, current_pid(), ll_inode2fid(inode),
			    xattr_type, acl);
		if (unlikely(rc < 0)) {
			lustre_ext_acl_xattr_free(acl);
			GOTO(out, rc);
		}
	}
#endif
	EXIT;

out_xattr:
	if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
		LCONSOLE_INFO("%s: disabling user_xattr feature because "
				"it is not supported on the server: rc = %d\n",
				ll_get_fsname(inode->i_sb, NULL, 0), rc);
		sbi->ll_flags &= ~LL_SBI_USER_XATTR;
	}
out:
        ptlrpc_req_finished(req);
        return rc;
}
Exemple #17
0
STATIC int
xfs_fs_encode_fh(
	struct dentry		*dentry,
	__u32			*fh,
	int			*max_len,
	int			connectable)
{
	struct fid		*fid = (struct fid *)fh;
	struct xfs_fid64	*fid64 = (struct xfs_fid64 *)fh;
	struct inode		*inode = dentry->d_inode;
	int			fileid_type;
	int			len;

	/* Directories don't need their parent encoded, they have ".." */
	if (S_ISDIR(inode->i_mode) || !connectable)
		fileid_type = FILEID_INO32_GEN;
	else
		fileid_type = FILEID_INO32_GEN_PARENT;

	/*
	 * If the the filesystem may contain 64bit inode numbers, we need
	 * to use larger file handles that can represent them.
	 *
	 * While we only allocate inodes that do not fit into 32 bits any
	 * large enough filesystem may contain them, thus the slightly
	 * confusing looking conditional below.
	 */
	if (!(XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_SMALL_INUMS) ||
	    (XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_32BITINODES))
		fileid_type |= XFS_FILEID_TYPE_64FLAG;

	/*
	 * Only encode if there is enough space given.  In practice
	 * this means we can't export a filesystem with 64bit inodes
	 * over NFSv2 with the subtree_check export option; the other
	 * seven combinations work.  The real answer is "don't use v2".
	 */
	len = xfs_fileid_length(fileid_type);
	if (*max_len < len) {
		*max_len = len;
		return 255;
	}
	*max_len = len;

	switch (fileid_type) {
	case FILEID_INO32_GEN_PARENT:
		spin_lock(&dentry->d_lock);
		fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino;
		fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
		spin_unlock(&dentry->d_lock);
		/*FALLTHRU*/
	case FILEID_INO32_GEN:
		fid->i32.ino = inode->i_ino;
		fid->i32.gen = inode->i_generation;
		break;
	case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
		spin_lock(&dentry->d_lock);
		fid64->parent_ino = dentry->d_parent->d_inode->i_ino;
		fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
		spin_unlock(&dentry->d_lock);
		/*FALLTHRU*/
	case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
		fid64->ino = inode->i_ino;
		fid64->gen = inode->i_generation;
		break;
	}

	return fileid_type;
}
Exemple #18
0
static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
{
	struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
	int cpuid, cpuphys;

	cpuid = first_cpu(mask);
	cpuphys = cpu_physical_id(cpuid);

	list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
				 sn_irq_lh[irq], list) {
		u64 bridge;
		int local_widget, status;
		nasid_t local_nasid;
		struct sn_irq_info *new_irq_info;
		struct sn_pcibus_provider *pci_provider;

		new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
		if (new_irq_info == NULL)
			break;
		memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));

		bridge = (u64) new_irq_info->irq_bridge;
		if (!bridge) {
			kfree(new_irq_info);
			break; /* irq is not a device interrupt */
		}

		local_nasid = NASID_GET(bridge);

		if (local_nasid & 1)
			local_widget = TIO_SWIN_WIDGETNUM(bridge);
		else
			local_widget = SWIN_WIDGETNUM(bridge);

		/* Free the old PROM new_irq_info structure */
		sn_intr_free(local_nasid, local_widget, new_irq_info);
		/* Update kernels new_irq_info with new target info */
		unregister_intr_pda(new_irq_info);

		/* allocate a new PROM new_irq_info struct */
		status = sn_intr_alloc(local_nasid, local_widget,
				       __pa(new_irq_info), irq,
				       cpuid_to_nasid(cpuid),
				       cpuid_to_slice(cpuid));

		/* SAL call failed */
		if (status) {
			kfree(new_irq_info);
			break;
		}

		new_irq_info->irq_cpuid = cpuid;
		register_intr_pda(new_irq_info);

		pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
		if (pci_provider && pci_provider->target_interrupt)
			(pci_provider->target_interrupt)(new_irq_info);

		spin_lock(&sn_irq_info_lock);
		list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
		spin_unlock(&sn_irq_info_lock);
		call_rcu(&sn_irq_info->rcu, sn_irq_info_free);

#ifdef CONFIG_SMP
		set_irq_affinity_info((irq & 0xff), cpuphys, 0);
#endif
	}
Exemple #19
0
	__releases(rcu_get_root(rsp)->lock)
{
	struct rcu_data *rdp = rsp->rda[smp_processor_id()];
	struct rcu_node *rnp = rcu_get_root(rsp);
	struct rcu_node *rnp_cur;
	struct rcu_node *rnp_end;

	if (!cpu_needs_another_gp(rsp, rdp)) {
		spin_unlock_irqrestore(&rnp->lock, flags);
		return;
	}

	/* Advance to a new grace period and initialize state. */
	rsp->gpnum++;
	rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
	rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
	record_gp_stall_check_time(rsp);
	dyntick_record_completed(rsp, rsp->completed - 1);
	note_new_gpnum(rsp, rdp);

	/*
	 * Because we are first, we know that all our callbacks will
	 * be covered by this upcoming grace period, even the ones
	 * that were registered arbitrarily recently.
	 */
	rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
	rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];

	/* Special-case the common single-level case. */
	if (NUM_RCU_NODES == 1) {
		rnp->qsmask = rnp->qsmaskinit;
		rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
		spin_unlock_irqrestore(&rnp->lock, flags);
		return;
	}

	spin_unlock(&rnp->lock);  /* leave irqs disabled. */


	/* Exclude any concurrent CPU-hotplug operations. */
	spin_lock(&rsp->onofflock);  /* irqs already disabled. */

	/*
	 * Set the quiescent-state-needed bits in all the non-leaf RCU
	 * nodes for all currently online CPUs.  This operation relies
	 * on the layout of the hierarchy within the rsp->node[] array.
	 * Note that other CPUs will access only the leaves of the
	 * hierarchy, which still indicate that no grace period is in
	 * progress.  In addition, we have excluded CPU-hotplug operations.
	 *
	 * We therefore do not need to hold any locks.  Any required
	 * memory barriers will be supplied by the locks guarding the
	 * leaf rcu_nodes in the hierarchy.
	 */

	rnp_end = rsp->level[NUM_RCU_LVLS - 1];
	for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++)
		rnp_cur->qsmask = rnp_cur->qsmaskinit;

	/*
	 * Now set up the leaf nodes.  Here we must be careful.  First,
	 * we need to hold the lock in order to exclude other CPUs, which
	 * might be contending for the leaf nodes' locks.  Second, as
	 * soon as we initialize a given leaf node, its CPUs might run
	 * up the rest of the hierarchy.  We must therefore acquire locks
	 * for each node that we touch during this stage.  (But we still
	 * are excluding CPU-hotplug operations.)
	 *
	 * Note that the grace period cannot complete until we finish
	 * the initialization process, as there will be at least one
	 * qsmask bit set in the root node until that time, namely the
	 * one corresponding to this CPU.
	 */
	rnp_end = &rsp->node[NUM_RCU_NODES];
	rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
	for (; rnp_cur < rnp_end; rnp_cur++) {
		spin_lock(&rnp_cur->lock);	/* irqs already disabled. */
		rnp_cur->qsmask = rnp_cur->qsmaskinit;
		spin_unlock(&rnp_cur->lock);	/* irqs already disabled. */
	}

	rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
	spin_unlock_irqrestore(&rsp->onofflock, flags);
}
static irqreturn_t ehci_irq (struct usb_hcd *hcd)
{
	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
	u32			status, masked_status, pcd_status = 0, cmd;
	int			bh;

	spin_lock (&ehci->lock);

	status = ehci_readl(ehci, &ehci->regs->status);

	/* e.g. cardbus physical eject */
	if (status == ~(u32) 0) {
		ehci_dbg (ehci, "device removed\n");
		goto dead;
	}

	masked_status = status & INTR_MASK;
	if (!masked_status) {		/* irq sharing? */
		spin_unlock(&ehci->lock);
		return IRQ_NONE;
	}

	/* clear (just) interrupts */
	ehci_writel(ehci, masked_status, &ehci->regs->status);
	cmd = ehci_readl(ehci, &ehci->regs->command);
	bh = 0;

#ifdef	VERBOSE_DEBUG
	/* unrequested/ignored: Frame List Rollover */
	dbg_status (ehci, "irq", status);
#endif

	/* INT, ERR, and IAA interrupt rates can be throttled */

	/* normal [4.15.1.2] or error [4.15.1.1] completion */
	if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
		if (likely ((status & STS_ERR) == 0))
			COUNT (ehci->stats.normal);
		else
			COUNT (ehci->stats.error);
		bh = 1;
	}

	/* complete the unlinking of some qh [4.15.2.3] */
	if (status & STS_IAA) {
		/* guard against (alleged) silicon errata */
		if (cmd & CMD_IAAD) {
			ehci_writel(ehci, cmd & ~CMD_IAAD,
					&ehci->regs->command);
			ehci_dbg(ehci, "IAA with IAAD still set?\n");
		}
		if (ehci->reclaim) {
			COUNT(ehci->stats.reclaim);
			end_unlink_async(ehci);
		} else
			ehci_dbg(ehci, "IAA with nothing to reclaim?\n");
	}

	/* remote wakeup [4.3.1] */
	if (status & STS_PCD) {
		unsigned	i = HCS_N_PORTS (ehci->hcs_params);

		/* kick root hub later */
		pcd_status = status;

		/* resume root hub? */
		if (!(cmd & CMD_RUN))
			usb_hcd_resume_root_hub(hcd);

		while (i--) {
			int pstatus = ehci_readl(ehci,
						 &ehci->regs->port_status [i]);

			if (pstatus & PORT_OWNER)
				continue;
			if (!(test_bit(i, &ehci->suspended_ports) &&
					((pstatus & PORT_RESUME) ||
						!(pstatus & PORT_SUSPEND)) &&
					(pstatus & PORT_PE) &&
					ehci->reset_done[i] == 0))
				continue;

			/* start 20 msec resume signaling from this port,
			 * and make khubd collect PORT_STAT_C_SUSPEND to
			 * stop that signaling.
			 */
			ehci->reset_done [i] = jiffies + msecs_to_jiffies (20);
			ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
			mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
		}
	}

	/* PCI errors [4.15.2.4] */
	if (unlikely ((status & STS_FATAL) != 0)) {
		ehci_err(ehci, "fatal error\n");
		dbg_cmd(ehci, "fatal", cmd);
		dbg_status(ehci, "fatal", status);
		ehci_halt(ehci);
dead:
		ehci_reset(ehci);
		ehci_writel(ehci, 0, &ehci->regs->configured_flag);
		/* generic layer kills/unlinks all urbs, then
		 * uses ehci_stop to clean up the rest
		 */
		bh = 1;
	}

	if (bh)
		ehci_work (ehci);
	spin_unlock (&ehci->lock);
	if (pcd_status)
		usb_hcd_poll_rh_status(hcd);
	return IRQ_HANDLED;
}
Exemple #21
0
static int
sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
		struct cifs_fattr *fattr, uint sidtype)
{
	int rc;
	unsigned long cid;
	struct key *idkey;
	const struct cred *saved_cred;
	struct cifs_sid_id *psidid, *npsidid;
	struct rb_root *cidtree;
	spinlock_t *cidlock;

	if (sidtype == SIDOWNER) {
		cid = cifs_sb->mnt_uid; /* default uid, in case upcall fails */
		cidlock = &siduidlock;
		cidtree = &uidtree;
	} else if (sidtype == SIDGROUP) {
		cid = cifs_sb->mnt_gid; /* default gid, in case upcall fails */
		cidlock = &sidgidlock;
		cidtree = &gidtree;
	} else
		return -ENOENT;

	spin_lock(cidlock);
	psidid = id_rb_search(cidtree, psid);

	if (!psidid) { /* node does not exist, allocate one & attempt adding */
		spin_unlock(cidlock);
		npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
		if (!npsidid)
			return -ENOMEM;

		npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
		if (!npsidid->sidstr) {
			kfree(npsidid);
			return -ENOMEM;
		}

		spin_lock(cidlock);
		psidid = id_rb_search(cidtree, psid);
		if (psidid) { /* node happened to get inserted meanwhile */
			++psidid->refcount;
			spin_unlock(cidlock);
			kfree(npsidid->sidstr);
			kfree(npsidid);
		} else {
			psidid = npsidid;
			id_rb_insert(cidtree, psid, &psidid,
					sidtype == SIDOWNER ? "os:" : "gs:");
			++psidid->refcount;
			spin_unlock(cidlock);
		}
	} else {
		++psidid->refcount;
		spin_unlock(cidlock);
	}

	/*
	 * If we are here, it is safe to access psidid and its fields
	 * since a reference was taken earlier while holding the spinlock.
	 * A reference on the node is put without holding the spinlock
	 * and it is OK to do so in this case, shrinker will not erase
	 * this node until all references are put and we do not access
	 * any fields of the node after a reference is put .
	 */
	if (test_bit(SID_ID_MAPPED, &psidid->state)) {
		cid = psidid->id;
		psidid->time = jiffies; /* update ts for accessing */
		goto sid_to_id_out;
	}

	if (time_after(psidid->time + SID_MAP_RETRY, jiffies))
		goto sid_to_id_out;

	if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
		saved_cred = override_creds(root_cred);
		idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
		if (IS_ERR(idkey))
			cFYI(1, "%s: Can't map SID to an id", __func__);
		else {
			cid = *(unsigned long *)idkey->payload.value;
			psidid->id = cid;
			set_bit(SID_ID_MAPPED, &psidid->state);
			key_put(idkey);
			kfree(psidid->sidstr);
		}
		revert_creds(saved_cred);
		psidid->time = jiffies; /* update ts for accessing */
		clear_bit(SID_ID_PENDING, &psidid->state);
		wake_up_bit(&psidid->state, SID_ID_PENDING);
	} else {
		rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
				sidid_pending_wait, TASK_INTERRUPTIBLE);
		if (rc) {
			cFYI(1, "%s: sidid_pending_wait interrupted %d",
					__func__, rc);
			--psidid->refcount; /* decremented without spinlock */
			return rc;
		}
		if (test_bit(SID_ID_MAPPED, &psidid->state))
			cid = psidid->id;
	}

sid_to_id_out:
	--psidid->refcount; /* decremented without spinlock */
	if (sidtype == SIDOWNER)
		fattr->cf_uid = cid;
	else
		fattr->cf_gid = cid;

	return 0;
}
static irqreturn_t xiic_process(int irq, void *dev_id)
{
	struct xiic_i2c *i2c = dev_id;
	u32 pend, isr, ier;
	unsigned long flags;
	u32 clr = 0;

	/* Get the interrupt Status from the IPIF. There is no clearing of
	 * interrupts in the IPIF. Interrupts must be cleared at the source.
	 * To find which interrupts are pending; AND interrupts pending with
	 * interrupts masked.
	 */
	spin_lock(&i2c->lock);
	isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
	ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
	pend = isr & ier;

	dev_dbg(i2c->adap.dev.parent, "%s: IER: 0x%x, ISR: 0x%x, pend: 0x%x\n",
		__func__, ier, isr, pend);
	dev_dbg(i2c->adap.dev.parent, "%s: SR: 0x%x, msg: %p, nmsgs: %d\n",
		__func__, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
		i2c->tx_msg, i2c->nmsgs);


	/* Service requesting interrupt */
	if ((pend & XIIC_INTR_ARB_LOST_MASK) ||
		((pend & XIIC_INTR_TX_ERROR_MASK) &&
		!(pend & XIIC_INTR_RX_FULL_MASK))) {
		/* bus arbritration lost, or...
		 * Transmit error _OR_ RX completed
		 * if this happens when RX_FULL is not set
		 * this is probably a TX error
		 */

		dev_dbg(i2c->adap.dev.parent, "%s error\n", __func__);

		/* dynamic mode seem to suffer from problems if we just flushes
		 * fifos and the next message is a TX with len 0 (only addr)
		 * reset the IP instead of just flush fifos
		 */
		xiic_reinit(i2c);

		if (i2c->rx_msg)
			xiic_wakeup(i2c, STATE_ERROR);
		if (i2c->tx_msg)
			xiic_wakeup(i2c, STATE_ERROR);

	}
	if (pend & XIIC_INTR_RX_FULL_MASK) {
		/* Receive register/FIFO is full */

		clr |= XIIC_INTR_RX_FULL_MASK;
		if (!i2c->rx_msg) {
			dev_dbg(i2c->adap.dev.parent,
				"%s unexpexted RX IRQ\n", __func__);
			xiic_clear_rx_fifo(i2c);
			goto out;
		}

		xiic_read_rx(i2c);
		if (xiic_rx_space(i2c) == 0) {
			/* this is the last part of the message */
			i2c->rx_msg = NULL;

			/* also clear TX error if there (RX complete) */
			clr |= (isr & XIIC_INTR_TX_ERROR_MASK);

			dev_dbg(i2c->adap.dev.parent,
				"%s end of message, nmsgs: %d\n",
				__func__, i2c->nmsgs);

			/* send next message if this wasn't the last,
			 * otherwise the transfer will be finialise when
			 * receiving the bus not busy interrupt
			 */
			if (i2c->nmsgs > 1) {
				i2c->nmsgs--;
				i2c->tx_msg++;
				dev_dbg(i2c->adap.dev.parent,
					"%s will start next...\n", __func__);

				__xiic_start_xfer(i2c);
			}
		}
	}
	if (pend & XIIC_INTR_BNB_MASK) {
		/* IIC bus has transitioned to not busy */
		clr |= XIIC_INTR_BNB_MASK;

		/* The bus is not busy, disable BusNotBusy interrupt */
		xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK);

		if (!i2c->tx_msg)
			goto out;

		if ((i2c->nmsgs == 1) && !i2c->rx_msg &&
			xiic_tx_space(i2c) == 0)
			xiic_wakeup(i2c, STATE_DONE);
		else
			xiic_wakeup(i2c, STATE_ERROR);

	}
	if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
		/* Transmit register/FIFO is empty or ½ empty */

		clr |= (pend &
			(XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK));

		if (!i2c->tx_msg) {
			dev_dbg(i2c->adap.dev.parent,
				"%s unexpexted TX IRQ\n", __func__);
			goto out;
		}

		xiic_fill_tx_fifo(i2c);

		/* current message sent and there is space in the fifo */
		if (!xiic_tx_space(i2c) && xiic_tx_fifo_space(i2c) >= 2) {
			dev_dbg(i2c->adap.dev.parent,
				"%s end of message sent, nmsgs: %d\n",
				__func__, i2c->nmsgs);
			if (i2c->nmsgs > 1) {
				i2c->nmsgs--;
				i2c->tx_msg++;
				__xiic_start_xfer(i2c);
			} else {
				xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);

				dev_dbg(i2c->adap.dev.parent,
					"%s Got TX IRQ but no more to do...\n",
					__func__);
			}
		} else if (!xiic_tx_space(i2c) && (i2c->nmsgs == 1))
			/* current frame is sent and is last,
			 * make sure to disable tx half
			 */
			xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
	}
out:
	dev_dbg(i2c->adap.dev.parent, "%s clr: 0x%x\n", __func__, clr);

	xiic_setreg32(i2c, XIIC_IISR_OFFSET, clr);
	spin_unlock(&i2c->lock);
	return IRQ_HANDLED;
}
Exemple #23
0
static irqreturn_t interrupt_handler(int irq, void *dev_id)
{
	struct nozomi *dc = dev_id;
	unsigned int a;
	u16 read_iir;

	if (!dc)
		return IRQ_NONE;

	spin_lock(&dc->spin_mutex);
	read_iir = readw(dc->reg_iir);

	/* Card removed */
	if (read_iir == (u16)-1)
		goto none;
	/*
	 * Just handle interrupt enabled in IER
	 * (by masking with dc->last_ier)
	 */
	read_iir &= dc->last_ier;

	if (read_iir == 0)
		goto none;


	DBG4("%s irq:0x%04X, prev:0x%04X", interrupt2str(read_iir), read_iir,
		dc->last_ier);

	if (read_iir & RESET) {
		if (unlikely(!nozomi_read_config_table(dc))) {
			dc->last_ier = 0x0;
			writew(dc->last_ier, dc->reg_ier);
			dev_err(&dc->pdev->dev, "Could not read status from "
				"card, we should disable interface\n");
		} else {
			writew(RESET, dc->reg_fcr);
		}
		/* No more useful info if this was the reset interrupt. */
		goto exit_handler;
	}
	if (read_iir & CTRL_UL) {
		DBG1("CTRL_UL");
		dc->last_ier &= ~CTRL_UL;
		writew(dc->last_ier, dc->reg_ier);
		if (send_flow_control(dc)) {
			writew(CTRL_UL, dc->reg_fcr);
			dc->last_ier = dc->last_ier | CTRL_UL;
			writew(dc->last_ier, dc->reg_ier);
		}
	}
	if (read_iir & CTRL_DL) {
		receive_flow_control(dc);
		writew(CTRL_DL, dc->reg_fcr);
	}
	if (read_iir & MDM_DL) {
		if (!handle_data_dl(dc, PORT_MDM,
				&(dc->port[PORT_MDM].toggle_dl), read_iir,
				MDM_DL1, MDM_DL2)) {
			dev_err(&dc->pdev->dev, "MDM_DL out of sync!\n");
			goto exit_handler;
		}
	}
	if (read_iir & MDM_UL) {
		if (!handle_data_ul(dc, PORT_MDM, read_iir)) {
			dev_err(&dc->pdev->dev, "MDM_UL out of sync!\n");
			goto exit_handler;
		}
	}
	if (read_iir & DIAG_DL) {
		if (!handle_data_dl(dc, PORT_DIAG,
				&(dc->port[PORT_DIAG].toggle_dl), read_iir,
				DIAG_DL1, DIAG_DL2)) {
			dev_err(&dc->pdev->dev, "DIAG_DL out of sync!\n");
			goto exit_handler;
		}
	}
	if (read_iir & DIAG_UL) {
		dc->last_ier &= ~DIAG_UL;
		writew(dc->last_ier, dc->reg_ier);
		if (send_data(PORT_DIAG, dc)) {
			writew(DIAG_UL, dc->reg_fcr);
			dc->last_ier = dc->last_ier | DIAG_UL;
			writew(dc->last_ier, dc->reg_ier);
		}
	}
	if (read_iir & APP1_DL) {
		if (receive_data(PORT_APP1, dc))
			writew(APP1_DL, dc->reg_fcr);
	}
	if (read_iir & APP1_UL) {
		dc->last_ier &= ~APP1_UL;
		writew(dc->last_ier, dc->reg_ier);
		if (send_data(PORT_APP1, dc)) {
			writew(APP1_UL, dc->reg_fcr);
			dc->last_ier = dc->last_ier | APP1_UL;
			writew(dc->last_ier, dc->reg_ier);
		}
	}
	if (read_iir & APP2_DL) {
		if (receive_data(PORT_APP2, dc))
			writew(APP2_DL, dc->reg_fcr);
	}
	if (read_iir & APP2_UL) {
		dc->last_ier &= ~APP2_UL;
		writew(dc->last_ier, dc->reg_ier);
		if (send_data(PORT_APP2, dc)) {
			writew(APP2_UL, dc->reg_fcr);
			dc->last_ier = dc->last_ier | APP2_UL;
			writew(dc->last_ier, dc->reg_ier);
		}
	}

exit_handler:
	spin_unlock(&dc->spin_mutex);

	for (a = 0; a < NOZOMI_MAX_PORTS; a++)
		if (test_and_clear_bit(a, &dc->flip))
			tty_flip_buffer_push(&dc->port[a].port);

	return IRQ_HANDLED;
none:
	spin_unlock(&dc->spin_mutex);
	return IRQ_NONE;
}
static int __init
probe(int index)
{
	u32 *phys_reg_addr;
	struct xilinxfb_info *i;
	struct page *page, *end_page;

	switch (index) {
#if defined(CONFIG_XILINX_TFT_CNTLR_REF_0_INSTANCE)
	case 0:
		phys_reg_addr = 
			(u32 *) CONFIG_XILINX_TFT_CNTLR_REF_0_DCR_BASEADDR;
		break;
#if defined(CONFIG_XILINX_TFT_CNTRLR_REF_1_INSTANCE)
	case 1:
		phys_reg_addr = 
			(u32 *) CONFIG_XILINX_TFT_CNTLR_REF_1_DCR_BASEADDR;
		break;
#if defined(CONFIG_XILINX_TFT_CNTRLR_REF_2_INSTANCE)
	case 2:
		phys_reg_addr = 
			(u32 *) CONFIG_XILINX_TFT_CNTLR_REF_2_DCR_BASEADDR;
		break;
#if defined(CONFIG_XILINX_TFT_CNTLR_REF_3_INSTANCE)
#error Edit this file to add more devices.
#endif				/* 3 */
#endif				/* 2 */
#endif				/* 1 */
#endif				/* 0 */
	default:
		return -ENODEV;
	}

	/* Convert DCR register address to OPB address */
	phys_reg_addr = (unsigned *)(((unsigned)phys_reg_addr*4)+0xd0000000);

	/* Allocate the info and zero it out. */
	i = (struct xilinxfb_info *) kmalloc(sizeof (struct xilinxfb_info),
					     GFP_KERNEL);
	if (!i) {
		printk(KERN_ERR "Could not allocate Xilinx "
		       "frame buffer #%d information.\n", index);
		return -ENOMEM;
	}
	memset(i, 0, sizeof (struct xilinxfb_info));

	/* Make it the head of info_list. */
	spin_lock(&info_lock);
	i->next = info_list;
	info_list = i;
	spin_unlock(&info_lock);

	/*
	 * At this point, things are ok for us to call remove_head_info() to
	 * clean up if we run into any problems; i is on info_list and
	 * all the pointers are zeroed because of the memset above.
	 */

	i->fb_virt_start = (unsigned long) consistent_alloc(GFP_KERNEL|GFP_DMA,
							    FB_SIZE,
							    &i->fb_phys);
	if (!i->fb_virt_start) {
		printk(KERN_ERR "Could not allocate frame buffer memory "
		       "for Xilinx device #%d.\n", index);
		remove_head_info();
		return -ENOMEM;
	}

	/*
	 * The 2.4 PPC version of consistent_alloc does not set the
	 * pages reserved.  The pages need to be reserved so that mmap
	 * will work.  This means that we need the following code.  When
	 * consistent_alloc gets fixed, this will no longer be needed.
	 * Note that in 2.4, consistent_alloc doesn't free up the extra
	 * pages either.  This is already fixed in 2.5.
	 */
	page = virt_to_page(__va(i->fb_phys));
	end_page = page + ((FB_SIZE+PAGE_SIZE-1)/PAGE_SIZE);
	while (page < end_page)
		mem_map_reserve(page++);

	/* Clear the frame buffer. */
	memset((void *) i->fb_virt_start, 0, FB_SIZE);

	/* Map the control registers in. */
	i->regs = (u32 *) ioremap((unsigned long) phys_reg_addr, NUM_REGS);

	/* Tell the hardware where the frame buffer is. */
	out_be32(i->regs + REG_FB_ADDR, i->fb_phys);

	/* Turn on the display. */
	out_be32(i->regs + REG_CTRL, REG_CTRL_DEFAULT);

	current_par.var.xres = XRES;
	current_par.var.xres_virtual = XRES_VIRTUAL;
	current_par.var.yres = YRES;
	current_par.var.yres_virtual = YRES_VIRTUAL;
	current_par.var.bits_per_pixel = BITS_PER_PIXEL;

	i->gen.parsize = sizeof (struct xilinxfb_par);
	i->gen.fbhw = &xilinx_switch;

	strcpy(i->gen.info.modename, "Xilinx LCD");
	i->gen.info.changevar = NULL;
	i->gen.info.node = -1;

	i->gen.info.fbops = &xilinxfb_ops;
	i->gen.info.disp = &i->disp;
	i->gen.info.switch_con = &fbgen_switch;
	i->gen.info.updatevar = &fbgen_update_var;
	i->gen.info.blank = &fbgen_blank;
	i->gen.info.flags = FBINFO_FLAG_DEFAULT;

	/* This should give a reasonable default video mode */
	fbgen_get_var(&i->disp.var, -1, &i->gen.info);
	fbgen_do_set_var(&i->disp.var, 1, &i->gen);
	fbgen_set_disp(-1, &i->gen);
	fbgen_install_cmap(0, &i->gen);
	if (register_framebuffer(&i->gen.info) < 0) {
		printk(KERN_ERR "Could not register frame buffer "
		       "for Xilinx device #%d.\n", index);
		remove_head_info();
		return -EINVAL;
	}
	printk(KERN_INFO "fb%d: %s frame buffer at 0x%08X mapped to 0x%08lX\n",
	       GET_FB_IDX(i->gen.info.node), i->gen.info.modename,
	       i->fb_phys, i->fb_virt_start);

	return 0;
}
Exemple #25
0
/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
struct inode *ext3_new_inode(handle_t *handle, struct inode * dir,
			     const struct qstr *qstr, int mode)
{
	struct super_block *sb;
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *bh2;
	int group;
	unsigned long ino = 0;
	struct inode * inode;
	struct ext3_group_desc * gdp = NULL;
	struct ext3_super_block * es;
	struct ext3_inode_info *ei;
	struct ext3_sb_info *sbi;
	int err = 0;
	struct inode *ret;
	int i;

	/* Cannot create files in a deleted directory */
	if (!dir || !dir->i_nlink)
		return ERR_PTR(-EPERM);

	sb = dir->i_sb;
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);
	ei = EXT3_I(inode);

	sbi = EXT3_SB(sb);
	es = sbi->s_es;
	if (S_ISDIR(mode))
		group = find_group_orlov(sb, dir);
	else
		group = find_group_other(sb, dir);

	err = -ENOSPC;
	if (group == -1)
		goto out;

	for (i = 0; i < sbi->s_groups_count; i++) {
		err = -EIO;

		gdp = ext3_get_group_desc(sb, group, &bh2);
		if (!gdp)
			goto fail;

		brelse(bitmap_bh);
		bitmap_bh = read_inode_bitmap(sb, group);
		if (!bitmap_bh)
			goto fail;

		ino = 0;

repeat_in_this_group:
		ino = ext3_find_next_zero_bit((unsigned long *)
				bitmap_bh->b_data, EXT3_INODES_PER_GROUP(sb), ino);
		if (ino < EXT3_INODES_PER_GROUP(sb)) {

			BUFFER_TRACE(bitmap_bh, "get_write_access");
			err = ext3_journal_get_write_access(handle, bitmap_bh);
			if (err)
				goto fail;

			if (!ext3_set_bit_atomic(sb_bgl_lock(sbi, group),
						ino, bitmap_bh->b_data)) {
				/* we won it */
				BUFFER_TRACE(bitmap_bh,
					"call ext3_journal_dirty_metadata");
				err = ext3_journal_dirty_metadata(handle,
								bitmap_bh);
				if (err)
					goto fail;
				goto got;
			}
			/* we lost it */
			journal_release_buffer(handle, bitmap_bh);

			if (++ino < EXT3_INODES_PER_GROUP(sb))
				goto repeat_in_this_group;
		}

		/*
		 * This case is possible in concurrent environment.  It is very
		 * rare.  We cannot repeat the find_group_xxx() call because
		 * that will simply return the same blockgroup, because the
		 * group descriptor metadata has not yet been updated.
		 * So we just go onto the next blockgroup.
		 */
		if (++group == sbi->s_groups_count)
			group = 0;
	}
	err = -ENOSPC;
	goto out;

got:
	ino += group * EXT3_INODES_PER_GROUP(sb) + 1;
	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
		ext3_error (sb, "ext3_new_inode",
			    "reserved inode or inode > inodes count - "
			    "block_group = %d, inode=%lu", group, ino);
		err = -EIO;
		goto fail;
	}

	BUFFER_TRACE(bh2, "get_write_access");
	err = ext3_journal_get_write_access(handle, bh2);
	if (err) goto fail;
	spin_lock(sb_bgl_lock(sbi, group));
	le16_add_cpu(&gdp->bg_free_inodes_count, -1);
	if (S_ISDIR(mode)) {
		le16_add_cpu(&gdp->bg_used_dirs_count, 1);
	}
	spin_unlock(sb_bgl_lock(sbi, group));
	BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
	err = ext3_journal_dirty_metadata(handle, bh2);
	if (err) goto fail;

	percpu_counter_dec(&sbi->s_freeinodes_counter);
	if (S_ISDIR(mode))
		percpu_counter_inc(&sbi->s_dirs_counter);


	if (test_opt(sb, GRPID)) {
		inode->i_mode = mode;
		inode->i_uid = current_fsuid();
		inode->i_gid = dir->i_gid;
	} else
		inode_init_owner(inode, dir, mode);

	inode->i_ino = ino;
	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;

	memset(ei->i_data, 0, sizeof(ei->i_data));
	ei->i_dir_start_lookup = 0;
	ei->i_disksize = 0;

	ei->i_flags =
		ext3_mask_flags(mode, EXT3_I(dir)->i_flags & EXT3_FL_INHERITED);
#ifdef EXT3_FRAGMENTS
	ei->i_faddr = 0;
	ei->i_frag_no = 0;
	ei->i_frag_size = 0;
#endif
	ei->i_file_acl = 0;
	ei->i_dir_acl = 0;
	ei->i_dtime = 0;
	ei->i_block_alloc_info = NULL;
	ei->i_block_group = group;

	ext3_set_inode_flags(inode);
	if (IS_DIRSYNC(inode))
		handle->h_sync = 1;
	if (insert_inode_locked(inode) < 0) {
		/*
		 * Likely a bitmap corruption causing inode to be allocated
		 * twice.
		 */
		err = -EIO;
		goto fail;
	}
	spin_lock(&sbi->s_next_gen_lock);
	inode->i_generation = sbi->s_next_generation++;
	spin_unlock(&sbi->s_next_gen_lock);

	ei->i_state_flags = 0;
	ext3_set_inode_state(inode, EXT3_STATE_NEW);

	/* See comment in ext3_iget for explanation */
	if (ino >= EXT3_FIRST_INO(sb) + 1 &&
	    EXT3_INODE_SIZE(sb) > EXT3_GOOD_OLD_INODE_SIZE) {
		ei->i_extra_isize =
			sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE;
	} else {
		ei->i_extra_isize = 0;
	}

	ret = inode;
	dquot_initialize(inode);
	err = dquot_alloc_inode(inode);
	if (err)
		goto fail_drop;

	err = ext3_init_acl(handle, inode, dir);
	if (err)
		goto fail_free_drop;

	err = ext3_init_security(handle, inode, dir, qstr);
	if (err)
		goto fail_free_drop;

	err = ext3_mark_inode_dirty(handle, inode);
	if (err) {
		ext3_std_error(sb, err);
		goto fail_free_drop;
	}

	ext3_debug("allocating inode %lu\n", inode->i_ino);
	goto really_out;
fail:
	ext3_std_error(sb, err);
out:
	iput(inode);
	ret = ERR_PTR(err);
really_out:
	brelse(bitmap_bh);
	return ret;

fail_free_drop:
	dquot_free_inode(inode);

fail_drop:
	dquot_drop(inode);
	inode->i_flags |= S_NOQUOTA;
	inode->i_nlink = 0;
	unlock_new_inode(inode);
	iput(inode);
	brelse(bitmap_bh);
	return ERR_PTR(err);
}
Exemple #26
0
static void opticon_read_bulk_callback(struct urb *urb)
{
	struct opticon_private *priv = urb->context;
	unsigned char *data = urb->transfer_buffer;
	struct usb_serial_port *port = priv->port;
	int status = urb->status;
	struct tty_struct *tty;
	int result;
	int data_length;
	unsigned long flags;

	dbg("%s - port %d", __func__, port->number);

	switch (status) {
	case 0:
		/* success */
		break;
	case -ECONNRESET:
	case -ENOENT:
	case -ESHUTDOWN:
		/* this urb is terminated, clean up */
		dbg("%s - urb shutting down with status: %d",
		    __func__, status);
		return;
	default:
		dbg("%s - nonzero urb status received: %d",
		    __func__, status);
		goto exit;
	}

	usb_serial_debug_data(debug, &port->dev, __func__, urb->actual_length,
			      data);

	if (urb->actual_length > 2) {
		data_length = urb->actual_length - 2;

		/*
		 * Data from the device comes with a 2 byte header:
		 *
		 * <0x00><0x00>data...
		 *	This is real data to be sent to the tty layer
		 * <0x00><0x01)level
		 *	This is a CTS level change, the third byte is the CTS
		 *	value (0 for low, 1 for high).
		 */
		if ((data[0] == 0x00) && (data[1] == 0x00)) {
			/* real data, send it to the tty layer */
			tty = tty_port_tty_get(&port->port);
			if (tty) {
				tty_insert_flip_string(tty, data + 2,
						       data_length);
				tty_flip_buffer_push(tty);
				tty_kref_put(tty);
			}
		} else {
			if ((data[0] == 0x00) && (data[1] == 0x01)) {
				spin_lock_irqsave(&priv->lock, flags);
				/* CTS status information package */
				if (data[2] == 0x00)
					priv->cts = false;
				else
					priv->cts = true;
				spin_unlock_irqrestore(&priv->lock, flags);
			} else {
				dev_dbg(&priv->udev->dev,
					"Unknown data packet received from the device:"
					" %2x %2x\n",
					data[0], data[1]);
			}
		}
	} else {
		dev_dbg(&priv->udev->dev,
			"Improper amount of data received from the device, "
			"%d bytes", urb->actual_length);
	}

exit:
	spin_lock(&priv->lock);

	/* Continue trying to always read if we should */
	if (!priv->throttled) {
		usb_fill_bulk_urb(priv->bulk_read_urb, priv->udev,
				  usb_rcvbulkpipe(priv->udev,
						  priv->bulk_address),
				  priv->bulk_in_buffer, priv->buffer_size,
				  opticon_read_bulk_callback, priv);
		result = usb_submit_urb(priv->bulk_read_urb, GFP_ATOMIC);
		if (result)
			dev_err(&port->dev,
			    "%s - failed resubmitting read urb, error %d\n",
							__func__, result);
	} else
		priv->actually_throttled = true;
	spin_unlock(&priv->lock);
}
Exemple #27
0
/**
 * xuartps_isr - Interrupt handler
 * @irq: Irq number
 * @dev_id: Id of the port
 *
 * Returns IRQHANDLED
 **/
static irqreturn_t xuartps_isr(int irq, void *dev_id)
{
	struct uart_port *port = (struct uart_port *)dev_id;
	unsigned long flags;
	unsigned int isrstatus, numbytes;
	unsigned int data;
	char status = TTY_NORMAL;

	spin_lock_irqsave(&port->lock, flags);

	/* Read the interrupt status register to determine which
	 * interrupt(s) is/are active.
	 */
	isrstatus = xuartps_readl(XUARTPS_ISR_OFFSET);

	/* drop byte with parity error if IGNPAR specified */
	if (isrstatus & port->ignore_status_mask & XUARTPS_IXR_PARITY)
		isrstatus &= ~(XUARTPS_IXR_RXTRIG | XUARTPS_IXR_TOUT);

	isrstatus &= port->read_status_mask;
	isrstatus &= ~port->ignore_status_mask;

	if ((isrstatus & XUARTPS_IXR_TOUT) ||
		(isrstatus & XUARTPS_IXR_RXTRIG)) {
		/* Receive Timeout Interrupt */
		while ((xuartps_readl(XUARTPS_SR_OFFSET) &
			XUARTPS_SR_RXEMPTY) != XUARTPS_SR_RXEMPTY) {
			data = xuartps_readl(XUARTPS_FIFO_OFFSET);
			port->icount.rx++;

			if (isrstatus & XUARTPS_IXR_PARITY) {
				port->icount.parity++;
				status = TTY_PARITY;
			} else if (isrstatus & XUARTPS_IXR_FRAMING) {
				port->icount.frame++;
				status = TTY_FRAME;
			} else if (isrstatus & XUARTPS_IXR_OVERRUN)
				port->icount.overrun++;

			uart_insert_char(port, isrstatus, XUARTPS_IXR_OVERRUN,
					data, status);
		}
		spin_unlock(&port->lock);
		tty_flip_buffer_push(&port->state->port);
		spin_lock(&port->lock);
	}

	/* Dispatch an appropriate handler */
	if ((isrstatus & XUARTPS_IXR_TXEMPTY) == XUARTPS_IXR_TXEMPTY) {
		if (uart_circ_empty(&port->state->xmit)) {
			xuartps_writel(XUARTPS_IXR_TXEMPTY,
						XUARTPS_IDR_OFFSET);
		} else {
			numbytes = port->fifosize;
			/* Break if no more data available in the UART buffer */
			while (numbytes--) {
				if (uart_circ_empty(&port->state->xmit))
					break;
				/* Get the data from the UART circular buffer
				 * and write it to the xuartps's TX_FIFO
				 * register.
				 */
				xuartps_writel(
					port->state->xmit.buf[port->state->xmit.
					tail], XUARTPS_FIFO_OFFSET);

				port->icount.tx++;

				/* Adjust the tail of the UART buffer and wrap
				 * the buffer if it reaches limit.
				 */
				port->state->xmit.tail =
					(port->state->xmit.tail + 1) & \
						(UART_XMIT_SIZE - 1);
			}

			if (uart_circ_chars_pending(
					&port->state->xmit) < WAKEUP_CHARS)
				uart_write_wakeup(port);
		}
	}

	xuartps_writel(isrstatus, XUARTPS_ISR_OFFSET);

	/* be sure to release the lock and tty before leaving */
	spin_unlock_irqrestore(&port->lock, flags);

	return IRQ_HANDLED;
}
Exemple #28
0
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb = req->context, *skb2;
	struct eth_dev	*dev = ep->driver_data;
	int		status = req->status;

	switch (status) {

	/* normal completion */
	case 0:
		skb_put(skb, req->actual);

		if (dev->unwrap) {
			unsigned long	flags;

			spin_lock_irqsave(&dev->lock, flags);
			if (dev->port_usb) {
				status = dev->unwrap(dev->port_usb,
							skb,
							&dev->rx_frames);
			} else {
				dev_kfree_skb_any(skb);
				status = -ENOTCONN;
			}
			spin_unlock_irqrestore(&dev->lock, flags);
		} else {
			skb_queue_tail(&dev->rx_frames, skb);
		}
		skb = NULL;

		skb2 = skb_dequeue(&dev->rx_frames);
		while (skb2) {
			if (status < 0
					|| ETH_HLEN > skb2->len
					|| skb2->len > ETH_FRAME_LEN) {
				dev->net->stats.rx_errors++;
				dev->net->stats.rx_length_errors++;
				DBG(dev, "rx length %d\n", skb2->len);
				dev_kfree_skb_any(skb2);
				goto next_frame;
			}
			skb2->protocol = eth_type_trans(skb2, dev->net);
			dev->net->stats.rx_packets++;
			dev->net->stats.rx_bytes += skb2->len;

			/* no buffer copies needed, unless hardware can't
			 * use skb buffers.
			 */
			status = netif_rx(skb2);
next_frame:
			skb2 = skb_dequeue(&dev->rx_frames);
		}
		break;

	/* software-driven interface shutdown */
	case -ECONNRESET:		/* unlink */
	case -ESHUTDOWN:		/* disconnect etc */
		VDBG(dev, "rx shutdown, code %d\n", status);
		goto quiesce;

	/* for hardware automagic (such as pxa) */
	case -ECONNABORTED:		/* endpoint reset */
		DBG(dev, "rx %s reset\n", ep->name);
		defer_kevent(dev, WORK_RX_MEMORY);
quiesce:
		dev_kfree_skb_any(skb);
		goto clean;

	/* data overrun */
	case -EOVERFLOW:
		dev->net->stats.rx_over_errors++;
		/* FALLTHROUGH */

	default:
		dev->net->stats.rx_errors++;
		DBG(dev, "rx status %d\n", status);
		break;
	}

	if (skb)
		dev_kfree_skb_any(skb);
	if (!netif_running(dev->net)) {
clean:
		spin_lock(&dev->req_lock);
		list_add(&req->list, &dev->rx_reqs);
		spin_unlock(&dev->req_lock);
		req = NULL;
	}
	if (req)
		rx_submit(dev, req, GFP_ATOMIC);
}
Exemple #29
0
irqreturn_t mdss_dsi_isr(int irq, void *ptr)
{
	u32 isr;
	u32 isr0 = 0;
	struct mdss_dsi_ctrl_pdata *ctrl =
			(struct mdss_dsi_ctrl_pdata *)ptr;

	if (!ctrl) {
		pr_err("%s unable to access ctrl\n", __func__);
		return IRQ_HANDLED;
	}

	if (!ctrl->ctrl_base) {
		pr_err("%s:%d DSI base adr no Initialized",
				       __func__, __LINE__);
		return IRQ_HANDLED;
	}

	isr = MIPI_INP(ctrl->ctrl_base + 0x0110);/* DSI_INTR_CTRL */
	MIPI_OUTP(ctrl->ctrl_base + 0x0110, isr);

	if (ctrl->shared_pdata.broadcast_enable)
		if ((ctrl->panel_data.panel_info.pdest == DISPLAY_2)
		    && (left_ctrl_pdata != NULL)) {
			isr0 = MIPI_INP(left_ctrl_pdata->ctrl_base
						+ 0x0110);/* DSI_INTR_CTRL */
			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0110, isr0 & (~DSI_INTR_CMD_MDP_DONE));
		}

	pr_debug("%s: isr=%x, isr0=%x", __func__, isr, isr0);

	if (isr & DSI_INTR_ERROR) {
#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
	xlog(__func__, ctrl->ndx, ctrl->mdp_busy, isr, 0, 0, 0x97);
#endif
		pr_err("%s: isr[%d]=%x %x", __func__, ctrl->ndx, isr, (int)DSI_INTR_ERROR);
		mdss_dsi_error(ctrl);
	}

	if (isr & DSI_INTR_VIDEO_DONE) {
		spin_lock(&ctrl->mdp_lock);
		mdss_dsi_disable_irq_nosync(ctrl, DSI_VIDEO_TERM);
		complete(&ctrl->video_comp);
		spin_unlock(&ctrl->mdp_lock);
	}

	if (isr & DSI_INTR_CMD_DMA_DONE) {
		spin_lock(&ctrl->mdp_lock);
#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
	xlog(__func__,ctrl->ndx, ctrl->mdp_busy, isr, 0, 0, 0x98);
#endif
		mdss_dsi_disable_irq_nosync(ctrl, DSI_CMD_TERM);
		complete(&ctrl->dma_comp);
		spin_unlock(&ctrl->mdp_lock);
	}

	if (isr & DSI_INTR_CMD_MDP_DONE) {
		spin_lock(&ctrl->mdp_lock);
#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
	xlog(__func__, ctrl->ndx, ctrl->mdp_busy, isr, 0, 0, 0x99);
#endif
		ctrl->mdp_busy = false;
		mdss_dsi_disable_irq_nosync(ctrl, DSI_MDP_TERM);
		complete(&ctrl->mdp_comp);
		spin_unlock(&ctrl->mdp_lock);
	}

	if (isr & DSI_INTR_BTA_DONE) {
		spin_lock(&ctrl->mdp_lock);
		mdss_dsi_disable_irq_nosync(ctrl, DSI_BTA_TERM);
		complete(&ctrl->bta_comp);
		spin_unlock(&ctrl->mdp_lock);
	}

	return IRQ_HANDLED;
}
Exemple #30
0
int
affs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
{
	struct super_block	*sb = dir->i_sb;
	struct buffer_head	*bh;
	struct inode		*inode;
	char			*p;
	int			 i, maxlen, error;
	char			 c, lc;

	pr_debug("AFFS: symlink(%lu,\"%.*s\" -> \"%s\")\n",dir->i_ino,
		 (int)dentry->d_name.len,dentry->d_name.name,symname);

	maxlen = AFFS_SB(sb)->s_hashsize * sizeof(u32) - 1;
	inode  = affs_new_inode(dir);
	if (!inode)
		return -ENOSPC;

	inode->i_op = &affs_symlink_inode_operations;
	inode->i_data.a_ops = &affs_symlink_aops;
	inode->i_mode = S_IFLNK | 0777;
	mode_to_prot(inode);

	error = -EIO;
	bh = affs_bread(sb, inode->i_ino);
	if (!bh)
		goto err;
	i  = 0;
	p  = (char *)AFFS_HEAD(bh)->table;
	lc = '/';
	if (*symname == '/') {
		struct affs_sb_info *sbi = AFFS_SB(sb);
		while (*symname == '/')
			symname++;
		spin_lock(&sbi->symlink_lock);
		while (sbi->s_volume[i])	/* Cannot overflow */
			*p++ = sbi->s_volume[i++];
		spin_unlock(&sbi->symlink_lock);
	}
	while (i < maxlen && (c = *symname++)) {
		if (c == '.' && lc == '/' && *symname == '.' && symname[1] == '/') {
			*p++ = '/';
			i++;
			symname += 2;
			lc = '/';
		} else if (c == '.' && lc == '/' && *symname == '/') {
			symname++;
			lc = '/';
		} else {
			*p++ = c;
			lc   = c;
			i++;
		}
		if (lc == '/')
			while (*symname == '/')
				symname++;
	}
	*p = 0;
	mark_buffer_dirty_inode(bh, inode);
	affs_brelse(bh);
	mark_inode_dirty(inode);

	error = affs_add_entry(dir, inode, dentry, ST_SOFTLINK);
	if (error)
		goto err;

	return 0;

err:
	clear_nlink(inode);
	mark_inode_dirty(inode);
	iput(inode);
	return error;
}