예제 #1
0
파일: xt_SECMARK.c 프로젝트: Lyude/linux
static int secmark_tg_check(const struct xt_tgchk_param *par)
{
	struct xt_secmark_target_info *info = par->targinfo;
	int err;

	if (strcmp(par->table, "mangle") != 0 &&
	    strcmp(par->table, "security") != 0) {
		pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
				    par->table);
		return -EINVAL;
	}

	if (mode && mode != info->mode) {
		pr_info_ratelimited("mode already set to %hu cannot mix with rules for mode %hu\n",
				    mode, info->mode);
		return -EINVAL;
	}

	switch (info->mode) {
	case SECMARK_MODE_SEL:
		break;
	default:
		pr_info_ratelimited("invalid mode: %hu\n", info->mode);
		return -EINVAL;
	}

	err = checkentry_lsm(info);
	if (err)
		return err;

	if (!mode)
		mode = info->mode;
	return 0;
}
예제 #2
0
파일: xt_SECMARK.c 프로젝트: Lyude/linux
static int checkentry_lsm(struct xt_secmark_target_info *info)
{
	int err;

	info->secctx[SECMARK_SECCTX_MAX - 1] = '\0';
	info->secid = 0;

	err = security_secctx_to_secid(info->secctx, strlen(info->secctx),
				       &info->secid);
	if (err) {
		if (err == -EINVAL)
			pr_info_ratelimited("invalid security context \'%s\'\n",
					    info->secctx);
		return err;
	}

	if (!info->secid) {
		pr_info_ratelimited("unable to map security context \'%s\'\n",
				    info->secctx);
		return -ENOENT;
	}

	err = security_secmark_relabel_packet(info->secid);
	if (err) {
		pr_info_ratelimited("unable to obtain relabeling permission\n");
		return err;
	}

	security_secmark_refcount_inc();
	return 0;
}
예제 #3
0
static int l2tp_mt_check(const struct xt_mtchk_param *par)
{
	const struct xt_l2tp_info *info = par->matchinfo;

	/* Check for invalid flags */
	if (info->flags & ~(XT_L2TP_TID | XT_L2TP_SID | XT_L2TP_VERSION |
			    XT_L2TP_TYPE)) {
		pr_info_ratelimited("unknown flags: %x\n", info->flags);
		return -EINVAL;
	}

	/* At least one of tid, sid or type=control must be specified */
	if ((!(info->flags & XT_L2TP_TID)) &&
	    (!(info->flags & XT_L2TP_SID)) &&
	    ((!(info->flags & XT_L2TP_TYPE)) ||
	     (info->type != XT_L2TP_TYPE_CONTROL))) {
		pr_info_ratelimited("invalid flags combination: %x\n",
				    info->flags);
		return -EINVAL;
	}

	/* If version 2 is specified, check that incompatible params
	 * are not supplied
	 */
	if (info->flags & XT_L2TP_VERSION) {
		if ((info->version < 2) || (info->version > 3)) {
			pr_info_ratelimited("wrong L2TP version: %u\n",
					    info->version);
			return -EINVAL;
		}

		if (info->version == 2) {
			if ((info->flags & XT_L2TP_TID) &&
			    (info->tid > 0xffff)) {
				pr_info_ratelimited("v2 tid > 0xffff: %u\n",
						    info->tid);
				return -EINVAL;
			}
			if ((info->flags & XT_L2TP_SID) &&
			    (info->sid > 0xffff)) {
				pr_info_ratelimited("v2 sid > 0xffff: %u\n",
						    info->sid);
				return -EINVAL;
			}
		}
	}

	return 0;
}
예제 #4
0
asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
{
	struct rt_sigframe __user *frame;

	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

	/*
	 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
	 * be word aligned here.
	 */
	if (regs->sp & 15)
		goto badframe;

	frame = (struct rt_sigframe __user *)regs->sp;

	if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
		goto badframe;

	if (restore_sigframe(regs, frame))
		goto badframe;

	if (restore_altstack(&frame->uc.uc_stack))
		goto badframe;

	return regs->regs[0];

badframe:
	if (show_unhandled_signals)
		pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
				    current->comm, task_pid_nr(current), __func__,
				    regs->pc, regs->sp);
	force_sig(SIGSEGV, current);
	return 0;
}
예제 #5
0
static int srh_mt6_check(const struct xt_mtchk_param *par)
{
	const struct ip6t_srh *srhinfo = par->matchinfo;

	if (srhinfo->mt_flags & ~IP6T_SRH_MASK) {
		pr_info_ratelimited("unknown srh match flags  %X\n",
				    srhinfo->mt_flags);
		return -EINVAL;
	}

	if (srhinfo->mt_invflags & ~IP6T_SRH_INV_MASK) {
		pr_info_ratelimited("unknown srh invflags %X\n",
				    srhinfo->mt_invflags);
		return -EINVAL;
	}

	return 0;
}
예제 #6
0
void l4x_evict_tasks(struct task_struct *exclude)
{
	struct task_struct *p;
	int cnt = 0;

	rcu_read_lock();
	for_each_process(p) {
		l4_cap_idx_t t;
		struct mm_struct *mm;

		if (p == exclude)
			continue;

		task_lock(p);
		mm = p->mm;
		if (!mm) {
			task_unlock(p);
			continue;
		}

		t = ACCESS_ONCE(mm->context.task);

		if (l4_is_invalid_cap(t)) {
			task_unlock(p);
			continue;
		}

		if (down_read_trylock(&mm->mmap_sem)) {
			struct vm_area_struct *vma;
			for (vma = mm->mmap; vma; vma = vma->vm_next)
				if (vma->vm_flags & VM_LOCKED) {
					t = L4_INVALID_CAP;
					break;
				}
			up_read(&mm->mmap_sem);
			if (!vma)
				if (cmpxchg(&mm->context.task, t, L4_INVALID_CAP) != t)
					t = L4_INVALID_CAP;
		} else
			t = L4_INVALID_CAP;

		task_unlock(p);

		if (!l4_is_invalid_cap(t)) {
			l4lx_task_delete_task(t);
			l4lx_task_number_free(t);

			if (++cnt > 10)
				break;
		}
	}
	rcu_read_unlock();

	if (cnt == 0)
		pr_info_ratelimited("l4x-evict: Found no process to free.\n");
}
예제 #7
0
static int state_mt_check(const struct xt_mtchk_param *par)
{
	int ret;

	ret = nf_ct_netns_get(par->net, par->family);
	if (ret < 0)
		pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
				    par->family);
	return ret;
}
예제 #8
0
static void rsp_cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr,
			int congested)
{
	caam_congested = congested;

	if (congested)
		pr_warn_ratelimited("CAAM rsp path congested\n");
	else
		pr_info_ratelimited("CAAM rsp path congestion state exit\n");
}
예제 #9
0
/*
 * ima_calc_file_hash - calculate file hash
 *
 * Asynchronous hash (ahash) allows using HW acceleration for calculating
 * a hash. ahash performance varies for different data sizes on different
 * crypto accelerators. shash performance might be better for smaller files.
 * The 'ima.ahash_minsize' module parameter allows specifying the best
 * minimum file size for using ahash on the system.
 *
 * If the ima.ahash_minsize parameter is not specified, this function uses
 * shash for the hash calculation.  If ahash fails, it falls back to using
 * shash.
 */
int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
{
	loff_t i_size;
	int rc;
	struct file *f = file;
	bool new_file_instance = false, modified_flags = false;

	/*
	 * For consistency, fail file's opened with the O_DIRECT flag on
	 * filesystems mounted with/without DAX option.
	 */
	if (file->f_flags & O_DIRECT) {
		hash->length = hash_digest_size[ima_hash_algo];
		hash->algo = ima_hash_algo;
		return -EINVAL;
	}

	/* Open a new file instance in O_RDONLY if we cannot read */
	if (!(file->f_mode & FMODE_READ)) {
		int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
				O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
		flags |= O_RDONLY;
		f = dentry_open(&file->f_path, flags, file->f_cred);
		if (IS_ERR(f)) {
			/*
			 * Cannot open the file again, lets modify f_flags
			 * of original and continue
			 */
			pr_info_ratelimited("Unable to reopen file for reading.\n");
			f = file;
			f->f_flags |= FMODE_READ;
			modified_flags = true;
		} else {
			new_file_instance = true;
		}
	}

	i_size = i_size_read(file_inode(f));

	if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
		rc = ima_calc_file_ahash(f, hash);
		if (!rc)
			goto out;
	}

	rc = ima_calc_file_shash(f, hash);
out:
	if (new_file_instance)
		fput(f);
	else if (modified_flags)
		f->f_flags &= ~FMODE_READ;
	return rc;
}
예제 #10
0
/* first (valid) write to this device should be 4 bytes cal file size */
static ssize_t wcnss_wlan_write(struct file *fp, const char __user
			*user_buffer, size_t count, loff_t *position)
{
	int rc = 0;
	size_t size = 0;

	if (!penv || !penv->device_opened || penv->user_cal_available)
		return -EFAULT;

	if (penv->user_cal_rcvd == 0 && count >= 4
			&& !penv->user_cal_data) {
		rc = copy_from_user((void *)&size, user_buffer, 4);
		if (!size || size > MAX_CALIBRATED_DATA_SIZE) {
			pr_err(DEVICE " invalid size to write %d\n", size);
			return -EFAULT;
		}

		rc += count;
		count -= 4;
		penv->user_cal_exp_size =  size;
		penv->user_cal_data = kmalloc(size, GFP_KERNEL);
		if (penv->user_cal_data == NULL) {
			pr_err(DEVICE " no memory to write\n");
			return -ENOMEM;
		}
		if (0 == count)
			goto exit;

	} else if (penv->user_cal_rcvd == 0 && count < 4)
		return -EFAULT;

	if ((UINT32_MAX - count < penv->user_cal_rcvd) ||
	     MAX_CALIBRATED_DATA_SIZE < count + penv->user_cal_rcvd) {
		pr_err(DEVICE " invalid size to write %d\n", count +
				penv->user_cal_rcvd);
		rc = -ENOMEM;
		goto exit;
	}
	rc = copy_from_user((void *)penv->user_cal_data +
			penv->user_cal_rcvd, user_buffer, count);
	if (0 == rc) {
		penv->user_cal_rcvd += count;
		rc += count;
	}
	if (penv->user_cal_rcvd == penv->user_cal_exp_size) {
		penv->user_cal_available = true;
		pr_info_ratelimited("wcnss: user cal written");
	}

exit:
	return rc;
}
예제 #11
0
파일: xt_time.c 프로젝트: avagin/linux
static int time_mt_check(const struct xt_mtchk_param *par)
{
	const struct xt_time_info *info = par->matchinfo;

	if (info->daytime_start > XT_TIME_MAX_DAYTIME ||
	    info->daytime_stop > XT_TIME_MAX_DAYTIME) {
		pr_info_ratelimited("invalid argument - start or stop time greater than 23:59:59\n");
		return -EDOM;
	}

	if (info->flags & ~XT_TIME_ALL_FLAGS) {
		pr_info_ratelimited("unknown flags 0x%x\n",
				    info->flags & ~XT_TIME_ALL_FLAGS);
		return -EINVAL;
	}

	if ((info->flags & XT_TIME_CONTIGUOUS) &&
	     info->daytime_start < info->daytime_stop)
		return -EINVAL;

	return 0;
}
예제 #12
0
/* wcnss_reset_intr() is invoked when host drivers fails to
 * communicate with WCNSS over SMD; so logging these registers
 * helps to know WCNSS failure reason
 */
void wcnss_riva_log_debug_regs(void)
{
	void __iomem *ccu_reg;
	u32 reg = 0;

	ccu_reg = penv->riva_ccu_base + CCU_RIVA_INVALID_ADDR_OFFSET;
	reg = readl_relaxed(ccu_reg);
	pr_info_ratelimited("%s: CCU_CCPU_INVALID_ADDR %08x\n", __func__, reg);

	ccu_reg = penv->riva_ccu_base + CCU_RIVA_LAST_ADDR0_OFFSET;
	reg = readl_relaxed(ccu_reg);
	pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR0 %08x\n", __func__, reg);

	ccu_reg = penv->riva_ccu_base + CCU_RIVA_LAST_ADDR1_OFFSET;
	reg = readl_relaxed(ccu_reg);
	pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR1 %08x\n", __func__, reg);

	ccu_reg = penv->riva_ccu_base + CCU_RIVA_LAST_ADDR2_OFFSET;
	reg = readl_relaxed(ccu_reg);
	pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR2 %08x\n", __func__, reg);

}
예제 #13
0
static int ipvs_mt_check(const struct xt_mtchk_param *par)
{
	if (par->family != NFPROTO_IPV4
#ifdef CONFIG_IP_VS_IPV6
	    && par->family != NFPROTO_IPV6
#endif
		) {
		pr_info_ratelimited("protocol family %u not supported\n",
				    par->family);
		return -EINVAL;
	}

	return 0;
}
예제 #14
0
파일: bind.c 프로젝트: AshishNamdev/linux
int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
	struct sock *sk = sock->sk;
	struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
	struct rds_sock *rs = rds_sk_to_rs(sk);
	struct rds_transport *trans;
	int ret = 0;

	lock_sock(sk);

	if (addr_len != sizeof(struct sockaddr_in) ||
	    sin->sin_family != AF_INET ||
	    rs->rs_bound_addr ||
	    sin->sin_addr.s_addr == htonl(INADDR_ANY)) {
		ret = -EINVAL;
		goto out;
	}

	ret = rds_add_bound(rs, sin->sin_addr.s_addr, &sin->sin_port);
	if (ret)
		goto out;

	if (rs->rs_transport) { /* previously bound */
		trans = rs->rs_transport;
		if (trans->laddr_check(sock_net(sock->sk),
				       sin->sin_addr.s_addr) != 0) {
			ret = -ENOPROTOOPT;
			rds_remove_bound(rs);
		} else {
			ret = 0;
		}
		goto out;
	}
	trans = rds_trans_get_preferred(sock_net(sock->sk),
					sin->sin_addr.s_addr);
	if (!trans) {
		ret = -EADDRNOTAVAIL;
		rds_remove_bound(rs);
		pr_info_ratelimited("RDS: %s could not find a transport for %pI4, load rds_tcp or rds_rdma?\n",
				    __func__, &sin->sin_addr.s_addr);
		goto out;
	}

	rs->rs_transport = trans;
	ret = 0;

out:
	release_sock(sk);
	return ret;
}
예제 #15
0
static void wcnss_nvbin_dnld_main(struct work_struct *worker)
{
	int retry = 0;

	if (!FW_CALDATA_CAPABLE())
		goto nv_download;

	if (!penv->fw_cal_available && WCNSS_CONFIG_UNSPECIFIED
		!= has_calibrated_data && !penv->user_cal_available) {
		while (!penv->user_cal_available && retry++ < 5)
			msleep(500);
	}

	/* only cal data is sent during ssr (if available) */
	if (penv->fw_cal_available && penv->ssr_boot) {
		pr_info_ratelimited("wcnss: cal download during SSR, using fw cal");
		wcnss_caldata_dnld(penv->fw_cal_data, penv->fw_cal_rcvd, false);
		return;

	} else if (penv->user_cal_available && penv->ssr_boot) {
		pr_info_ratelimited("wcnss: cal download during SSR, using user cal");
		wcnss_caldata_dnld(penv->user_cal_data,
		penv->user_cal_rcvd, false);
		return;

	} else if (penv->user_cal_available) {
		pr_info_ratelimited("wcnss: cal download during cold boot, using user cal");
		wcnss_caldata_dnld(penv->user_cal_data,
		penv->user_cal_rcvd, true);
	}

nv_download:
	pr_info_ratelimited("wcnss: NV download");
	wcnss_nvbin_dnld();

	return;
}
예제 #16
0
static irqreturn_t anx7816_cbl_det_isr(int irq, void *data)
{
	struct anx7816_data *anx7816 = data;

	if (gpio_get_value(anx7816->pdata->gpio_cbl_det) && irq_enable) {
		if (!anx7816->slimport_connected) {
			wake_lock(&anx7816->slimport_lock);
			anx7816->slimport_connected = true;
			pr_info_ratelimited("%s %s : detect cable insertion\n", LOG_TAG, __func__);
			queue_delayed_work(anx7816->workqueue, &anx7816->work, 0);
	/*		queue_delayed_work(anx7816->workqueue, &anx7816->dwc3_ref_clk_work, 0); */
		}
	} else if (!gpio_get_value(anx7816->pdata->gpio_cbl_det) && irq_enable) {
		if (anx7816->slimport_connected) {
			anx7816->slimport_connected = false;
			pr_info_ratelimited("%s %s : detect cable removal\n", LOG_TAG, __func__);
			cancel_delayed_work_sync(&anx7816->work);
			wake_unlock(&anx7816->slimport_lock);
			wake_lock_timeout(&anx7816->slimport_lock, 2*HZ);
	/*		queue_delayed_work(anx7816->workqueue, &anx7816->dwc3_ref_clk_work, 0); */
		}
	}
	return IRQ_HANDLED;
}
예제 #17
0
static int tproxy_tg4_check(const struct xt_tgchk_param *par)
{
	const struct ipt_ip *i = par->entryinfo;
	int err;

	err = nf_defrag_ipv4_enable(par->net);
	if (err)
		return err;

	if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP)
	    && !(i->invflags & IPT_INV_PROTO))
		return 0;

	pr_info_ratelimited("Can be used only with -p tcp or -p udp\n");
	return -EINVAL;
}
예제 #18
0
/*
 * Handle stack alignment exceptions.
 */
asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
					   unsigned int esr,
					   struct pt_regs *regs)
{
	struct siginfo info;
	struct task_struct *tsk = current;

	if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS))
		pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
				    tsk->comm, task_pid_nr(tsk),
				    esr_get_class_string(esr), (void *)regs->pc,
				    (void *)regs->sp);

	info.si_signo = SIGBUS;
	info.si_errno = 0;
	info.si_code  = BUS_ADRALN;
	info.si_addr  = (void __user *)addr;
	arm64_notify_die("Oops - SP/PC alignment exception", regs, &info, esr);
}
예제 #19
0
파일: tfc_io.c 프로젝트: 19Dan01/linux
/*
 * Deliver read data back to initiator.
 * XXX TBD handle resource problems later.
 */
int ft_queue_data_in(struct se_cmd *se_cmd)
{
	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
	struct fc_frame *fp = NULL;
	struct fc_exch *ep;
	struct fc_lport *lport;
	struct scatterlist *sg = NULL;
	size_t remaining;
	u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
	u32 mem_off = 0;
	u32 fh_off = 0;
	u32 frame_off = 0;
	size_t frame_len = 0;
	size_t mem_len = 0;
	size_t tlen;
	size_t off_in_page;
	struct page *page = NULL;
	int use_sg;
	int error;
	void *page_addr;
	void *from;
	void *to = NULL;

	if (cmd->aborted)
		return 0;

	if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL)
		goto queue_status;

	ep = fc_seq_exch(cmd->seq);
	lport = ep->lp;
	cmd->seq = lport->tt.seq_start_next(cmd->seq);

	remaining = se_cmd->data_length;

	/*
	 * Setup to use first mem list entry, unless no data.
	 */
	BUG_ON(remaining && !se_cmd->t_data_sg);
	if (remaining) {
		sg = se_cmd->t_data_sg;
		mem_len = sg->length;
		mem_off = sg->offset;
		page = sg_page(sg);
	}

	/* no scatter/gather in skb for odd word length due to fc_seq_send() */
	use_sg = !(remaining % 4);

	while (remaining) {
		struct fc_seq *seq = cmd->seq;

		if (!seq) {
			pr_debug("%s: Command aborted, xid 0x%x\n",
				 __func__, ep->xid);
			break;
		}
		if (!mem_len) {
			sg = sg_next(sg);
			mem_len = min((size_t)sg->length, remaining);
			mem_off = sg->offset;
			page = sg_page(sg);
		}
		if (!frame_len) {
			/*
			 * If lport's has capability of Large Send Offload LSO)
			 * , then allow 'frame_len' to be as big as 'lso_max'
			 * if indicated transfer length is >= lport->lso_max
			 */
			frame_len = (lport->seq_offload) ? lport->lso_max :
							  cmd->sess->max_frame;
			frame_len = min(frame_len, remaining);
			fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
			if (!fp)
				return -ENOMEM;
			to = fc_frame_payload_get(fp, 0);
			fh_off = frame_off;
			frame_off += frame_len;
			/*
			 * Setup the frame's max payload which is used by base
			 * driver to indicate HW about max frame size, so that
			 * HW can do fragmentation appropriately based on
			 * "gso_max_size" of underline netdev.
			 */
			fr_max_payload(fp) = cmd->sess->max_frame;
		}
		tlen = min(mem_len, frame_len);

		if (use_sg) {
			off_in_page = mem_off;
			BUG_ON(!page);
			get_page(page);
			skb_fill_page_desc(fp_skb(fp),
					   skb_shinfo(fp_skb(fp))->nr_frags,
					   page, off_in_page, tlen);
			fr_len(fp) += tlen;
			fp_skb(fp)->data_len += tlen;
			fp_skb(fp)->truesize +=
					PAGE_SIZE << compound_order(page);
		} else {
			BUG_ON(!page);
			from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
			page_addr = from;
			from += mem_off & ~PAGE_MASK;
			tlen = min(tlen, (size_t)(PAGE_SIZE -
						(mem_off & ~PAGE_MASK)));
			memcpy(to, from, tlen);
			kunmap_atomic(page_addr);
			to += tlen;
		}

		mem_off += tlen;
		mem_len -= tlen;
		frame_len -= tlen;
		remaining -= tlen;

		if (frame_len &&
		    (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
			continue;
		if (!remaining)
			f_ctl |= FC_FC_END_SEQ;
		fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
			       FC_TYPE_FCP, f_ctl, fh_off);
		error = lport->tt.seq_send(lport, seq, fp);
		if (error) {
			pr_info_ratelimited("%s: Failed to send frame %p, "
						"xid <0x%x>, remaining %zu, "
						"lso_max <0x%x>\n",
						__func__, fp, ep->xid,
						remaining, lport->lso_max);
			/*
			 * Go ahead and set TASK_SET_FULL status ignoring the
			 * rest of the DataIN, and immediately attempt to
			 * send the response via ft_queue_status() in order
			 * to notify the initiator that it should reduce it's
			 * per LUN queue_depth.
			 */
			se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
			break;
		}
	}
queue_status:
	return ft_queue_status(se_cmd);
}
예제 #20
0
static bool
socket_mt4_v0(const struct sk_buff *skb, struct xt_action_param *par)
{
	static struct xt_socket_mtinfo1 xt_info_v0 = {
		.flags = 0,
	};

	return socket_match(skb, par, &xt_info_v0);
}

static bool
socket_mt4_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
{
	return socket_match(skb, par, par->matchinfo);
}

#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static bool
socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
{
	const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
	struct sk_buff *pskb = (struct sk_buff *)skb;
	struct sock *sk = skb->sk;

	if (sk && !net_eq(xt_net(par), sock_net(sk)))
		sk = NULL;

	if (!sk)
		sk = nf_sk_lookup_slow_v6(xt_net(par), skb, xt_in(par));

	if (sk) {
		bool wildcard;
		bool transparent = true;

		/* Ignore sockets listening on INADDR_ANY
		 * unless XT_SOCKET_NOWILDCARD is set
		 */
		wildcard = (!(info->flags & XT_SOCKET_NOWILDCARD) &&
			    sk_fullsock(sk) &&
			    ipv6_addr_any(&sk->sk_v6_rcv_saddr));

		/* Ignore non-transparent sockets,
		 * if XT_SOCKET_TRANSPARENT is used
		 */
		if (info->flags & XT_SOCKET_TRANSPARENT)
			transparent = inet_sk_transparent(sk);

		if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
		    transparent && sk_fullsock(sk))
			pskb->mark = sk->sk_mark;

		if (sk != skb->sk)
			sock_gen_put(sk);

		if (wildcard || !transparent)
			sk = NULL;
	}

	return sk != NULL;
}
#endif

static int socket_mt_enable_defrag(struct net *net, int family)
{
	switch (family) {
	case NFPROTO_IPV4:
		return nf_defrag_ipv4_enable(net);
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
	case NFPROTO_IPV6:
		return nf_defrag_ipv6_enable(net);
#endif
	}
	WARN_ONCE(1, "Unknown family %d\n", family);
	return 0;
}

static int socket_mt_v1_check(const struct xt_mtchk_param *par)
{
	const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
	int err;

	err = socket_mt_enable_defrag(par->net, par->family);
	if (err)
		return err;

	if (info->flags & ~XT_SOCKET_FLAGS_V1) {
		pr_info_ratelimited("unknown flags 0x%x\n",
				    info->flags & ~XT_SOCKET_FLAGS_V1);
		return -EINVAL;
	}
	return 0;
}

static int socket_mt_v2_check(const struct xt_mtchk_param *par)
{
	const struct xt_socket_mtinfo2 *info = (struct xt_socket_mtinfo2 *) par->matchinfo;
	int err;

	err = socket_mt_enable_defrag(par->net, par->family);
	if (err)
		return err;

	if (info->flags & ~XT_SOCKET_FLAGS_V2) {
		pr_info_ratelimited("unknown flags 0x%x\n",
				    info->flags & ~XT_SOCKET_FLAGS_V2);
		return -EINVAL;
	}
	return 0;
}

static int socket_mt_v3_check(const struct xt_mtchk_param *par)
{
	const struct xt_socket_mtinfo3 *info =
				    (struct xt_socket_mtinfo3 *)par->matchinfo;
	int err;

	err = socket_mt_enable_defrag(par->net, par->family);
	if (err)
		return err;
	if (info->flags & ~XT_SOCKET_FLAGS_V3) {
		pr_info_ratelimited("unknown flags 0x%x\n",
				    info->flags & ~XT_SOCKET_FLAGS_V3);
		return -EINVAL;
	}
	return 0;
}

static struct xt_match socket_mt_reg[] __read_mostly = {
	{
		.name		= "socket",
		.revision	= 0,
		.family		= NFPROTO_IPV4,
		.match		= socket_mt4_v0,
		.hooks		= (1 << NF_INET_PRE_ROUTING) |
				  (1 << NF_INET_LOCAL_IN),
		.me		= THIS_MODULE,
	},
	{
		.name		= "socket",
		.revision	= 1,
		.family		= NFPROTO_IPV4,
		.match		= socket_mt4_v1_v2_v3,
		.checkentry	= socket_mt_v1_check,
		.matchsize	= sizeof(struct xt_socket_mtinfo1),
		.hooks		= (1 << NF_INET_PRE_ROUTING) |
				  (1 << NF_INET_LOCAL_IN),
		.me		= THIS_MODULE,
	},
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
	{
		.name		= "socket",
예제 #21
0
파일: nx-842.c 프로젝트: avagin/linux
static int decompress(struct nx842_crypto_ctx *ctx,
		      struct nx842_crypto_param *p,
		      struct nx842_crypto_header_group *g,
		      struct nx842_constraints *c,
		      u16 ignore)
{
	unsigned int slen = be32_to_cpu(g->compressed_length);
	unsigned int required_len = be32_to_cpu(g->uncompressed_length);
	unsigned int dlen = p->oremain, tmplen;
	unsigned int adj_slen = slen;
	u8 *src = p->in, *dst = p->out;
	u16 padding = be16_to_cpu(g->padding);
	int ret, spadding = 0;
	ktime_t timeout;

	if (!slen || !required_len)
		return -EINVAL;

	if (p->iremain <= 0 || padding + slen > p->iremain)
		return -EOVERFLOW;

	if (p->oremain <= 0 || required_len - ignore > p->oremain)
		return -ENOSPC;

	src += padding;

	if (slen % c->multiple)
		adj_slen = round_up(slen, c->multiple);
	if (slen < c->minimum)
		adj_slen = c->minimum;
	if (slen > c->maximum)
		goto usesw;
	if (slen < adj_slen || (u64)src % c->alignment) {
		/* we can append padding bytes because the 842 format defines
		 * an "end" template (see lib/842/842_decompress.c) and will
		 * ignore any bytes following it.
		 */
		if (slen < adj_slen)
			memset(ctx->sbounce + slen, 0, adj_slen - slen);
		memcpy(ctx->sbounce, src, slen);
		src = ctx->sbounce;
		spadding = adj_slen - slen;
		slen = adj_slen;
		pr_debug("using decomp sbounce buffer, len %x\n", slen);
	}

	if (dlen % c->multiple)
		dlen = round_down(dlen, c->multiple);
	if (dlen < required_len || (u64)dst % c->alignment) {
		dst = ctx->dbounce;
		dlen = min(required_len, BOUNCE_BUFFER_SIZE);
		pr_debug("using decomp dbounce buffer, len %x\n", dlen);
	}
	if (dlen < c->minimum)
		goto usesw;
	if (dlen > c->maximum)
		dlen = c->maximum;

	tmplen = dlen;
	timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT);
	do {
		dlen = tmplen; /* reset dlen, if we're retrying */
		ret = ctx->driver->decompress(src, slen, dst, &dlen, ctx->wmem);
	} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
	if (ret) {
usesw:
		/* reset everything, sw doesn't have constraints */
		src = p->in + padding;
		slen = be32_to_cpu(g->compressed_length);
		spadding = 0;
		dst = p->out;
		dlen = p->oremain;
		if (dlen < required_len) { /* have ignore bytes */
			dst = ctx->dbounce;
			dlen = BOUNCE_BUFFER_SIZE;
		}
		pr_info_ratelimited("using software 842 decompression\n");
		ret = sw842_decompress(src, slen, dst, &dlen);
	}
	if (ret)
		return ret;

	slen -= spadding;

	dlen -= ignore;
	if (ignore)
		pr_debug("ignoring last %x bytes\n", ignore);

	if (dst == ctx->dbounce)
		memcpy(p->out, dst, dlen);

	pr_debug("decompress slen %x padding %x dlen %x ignore %x\n",
		 slen, padding, dlen, ignore);

	return update_param(p, slen + padding, dlen);
}
예제 #22
0
static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
				     const struct net_device *dev, u8 flags)
{
	struct rt6_info *rt;
	struct ipv6hdr *iph = ipv6_hdr(skb);
	bool ret = false;
	struct flowi6 fl6 = {
		.flowi6_iif = LOOPBACK_IFINDEX,
		.flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK,
		.flowi6_proto = iph->nexthdr,
		.daddr = iph->saddr,
	};
	int lookup_flags;

	if (rpfilter_addr_unicast(&iph->daddr)) {
		memcpy(&fl6.saddr, &iph->daddr, sizeof(struct in6_addr));
		lookup_flags = RT6_LOOKUP_F_HAS_SADDR;
	} else {
		lookup_flags = 0;
	}

	fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
	if ((flags & XT_RPFILTER_LOOSE) == 0) {
		fl6.flowi6_oif = dev->ifindex;
		lookup_flags |= RT6_LOOKUP_F_IFACE;
	}

	rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags);
	if (rt->dst.error)
		goto out;

	if (rt->rt6i_flags & (RTF_REJECT|RTF_ANYCAST))
		goto out;

	if (rt->rt6i_flags & RTF_LOCAL) {
		ret = flags & XT_RPFILTER_ACCEPT_LOCAL;
		goto out;
	}

	if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
		ret = true;
 out:
	ip6_rt_put(rt);
	return ret;
}

static bool
rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
{
	return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
}

static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
	const struct xt_rpfilter_info *info = par->matchinfo;
	int saddrtype;
	struct ipv6hdr *iph;
	bool invert = info->flags & XT_RPFILTER_INVERT;

	if (rpfilter_is_loopback(skb, xt_in(par)))
		return true ^ invert;

	iph = ipv6_hdr(skb);
	saddrtype = ipv6_addr_type(&iph->saddr);
	if (unlikely(saddrtype == IPV6_ADDR_ANY))
		return true ^ invert; /* not routable: forward path will drop it */

	return rpfilter_lookup_reverse6(xt_net(par), skb, xt_in(par),
					info->flags) ^ invert;
}

static int rpfilter_check(const struct xt_mtchk_param *par)
{
	const struct xt_rpfilter_info *info = par->matchinfo;
	unsigned int options = ~XT_RPFILTER_OPTION_MASK;

	if (info->flags & options) {
		pr_info_ratelimited("unknown options\n");
		return -EINVAL;
	}

	if (strcmp(par->table, "mangle") != 0 &&
	    strcmp(par->table, "raw") != 0) {
		pr_info_ratelimited("only valid in \'raw\' or \'mangle\' table, not \'%s\'\n",
				    par->table);
		return -EINVAL;
	}

	return 0;
}

static struct xt_match rpfilter_mt_reg __read_mostly = {
	.name		= "rpfilter",
	.family		= NFPROTO_IPV6,
	.checkentry	= rpfilter_check,
	.match		= rpfilter_mt,
	.matchsize	= sizeof(struct xt_rpfilter_info),
	.hooks		= (1 << NF_INET_PRE_ROUTING),
	.me		= THIS_MODULE
};

static int __init rpfilter_mt_init(void)
{
	return xt_register_match(&rpfilter_mt_reg);
}

static void __exit rpfilter_mt_exit(void)
{
	xt_unregister_match(&rpfilter_mt_reg);
}

module_init(rpfilter_mt_init);
module_exit(rpfilter_mt_exit);
예제 #23
0
/* Log pronto debug registers before sending reset interrupt */
void wcnss_pronto_log_debug_regs(void)
{
	void __iomem *reg_addr, *tst_addr, *tst_ctrl_addr;
	u32 reg = 0;

	reg_addr = penv->pronto_a2xb_base + A2XB_CFG_OFFSET;
	reg = readl_relaxed(reg_addr);
	pr_info_ratelimited("%s: A2XB_CFG_OFFSET %08x\n", __func__, reg);

	reg_addr = penv->pronto_a2xb_base + A2XB_INT_SRC_OFFSET;
	reg = readl_relaxed(reg_addr);
	pr_info_ratelimited("%s: A2XB_INT_SRC_OFFSET %08x\n", __func__, reg);

	reg_addr = penv->pronto_a2xb_base + A2XB_ERR_INFO_OFFSET;
	reg = readl_relaxed(reg_addr);
	pr_info_ratelimited("%s: A2XB_ERR_INFO_OFFSET %08x\n", __func__, reg);

	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_INVALID_ADDR_OFFSET;
	reg = readl_relaxed(reg_addr);
	pr_info_ratelimited("%s: CCU_CCPU_INVALID_ADDR %08x\n", __func__, reg);

	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR0_OFFSET;
	reg = readl_relaxed(reg_addr);
	pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR0 %08x\n", __func__, reg);

	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR1_OFFSET;
	reg = readl_relaxed(reg_addr);
	pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR1 %08x\n", __func__, reg);

	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR2_OFFSET;
	reg = readl_relaxed(reg_addr);
	pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR2 %08x\n", __func__, reg);

	tst_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_OFFSET;
	tst_ctrl_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_CTRL_OFFSET;

	/*  read data FIFO */
	reg = 0;
	reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_RDFIFO;
	writel_relaxed(reg, tst_ctrl_addr);
	reg = readl_relaxed(tst_addr);
	pr_info_ratelimited("%s:  Read data FIFO testbus %08x\n",
					__func__, reg);

	/*  command FIFO */
	reg = 0;
	reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_CMDFIFO;
	writel_relaxed(reg, tst_ctrl_addr);
	reg = readl_relaxed(tst_addr);
	pr_info_ratelimited("%s:  Command FIFO testbus %08x\n",
					__func__, reg);

	/*  write data FIFO */
	reg = 0;
	reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_WRFIFO;
	writel_relaxed(reg, tst_ctrl_addr);
	reg = readl_relaxed(tst_addr);
	pr_info_ratelimited("%s:  Rrite data FIFO testbus %08x\n",
					__func__, reg);

	/*   AXIM SEL CFG0 */
	reg = 0;
	reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_AXIM |
				WCNSS_TSTBUS_CTRL_AXIM_CFG0;
	writel_relaxed(reg, tst_ctrl_addr);
	reg = readl_relaxed(tst_addr);
	pr_info_ratelimited("%s:  AXIM SEL CFG0 testbus %08x\n",
					__func__, reg);

	/*   AXIM SEL CFG1 */
	reg = 0;
	reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_AXIM |
				WCNSS_TSTBUS_CTRL_AXIM_CFG1;
	writel_relaxed(reg, tst_ctrl_addr);
	reg = readl_relaxed(tst_addr);
	pr_info_ratelimited("%s:  AXIM SEL CFG1 testbus %08x\n",
					__func__, reg);

	/*   CTRL SEL CFG0 */
	reg = 0;
	reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_CTRL |
		WCNSS_TSTBUS_CTRL_CTRL_CFG0;
	writel_relaxed(reg, tst_ctrl_addr);
	reg = readl_relaxed(tst_addr);
	pr_info_ratelimited("%s:  CTRL SEL CFG0 testbus %08x\n",
					__func__, reg);

	/*   CTRL SEL CFG1 */
	reg = 0;
	reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_CTRL |
		WCNSS_TSTBUS_CTRL_CTRL_CFG1;
	writel_relaxed(reg, tst_ctrl_addr);
	reg = readl_relaxed(tst_addr);
	pr_info_ratelimited("%s:  CTRL SEL CFG1 testbus %08x\n", __func__, reg);

}
예제 #24
0
파일: tfc_cmd.c 프로젝트: 383530895/linux
/*
 * Send response.
 */
int ft_queue_status(struct se_cmd *se_cmd)
{
	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
	struct fc_frame *fp;
	struct fcp_resp_with_ext *fcp;
	struct fc_lport *lport;
	struct fc_exch *ep;
	size_t len;
	int rc;

	if (cmd->aborted)
		return 0;
	ft_dump_cmd(cmd, __func__);
	ep = fc_seq_exch(cmd->seq);
	lport = ep->lp;
	len = sizeof(*fcp) + se_cmd->scsi_sense_length;
	fp = fc_frame_alloc(lport, len);
	if (!fp) {
		se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
		return -ENOMEM;
	}

	fcp = fc_frame_payload_get(fp, len);
	memset(fcp, 0, len);
	fcp->resp.fr_status = se_cmd->scsi_status;

	len = se_cmd->scsi_sense_length;
	if (len) {
		fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
		fcp->ext.fr_sns_len = htonl(len);
		memcpy((fcp + 1), se_cmd->sense_buffer, len);
	}

	/*
	 * Test underflow and overflow with one mask.  Usually both are off.
	 * Bidirectional commands are not handled yet.
	 */
	if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
		if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
			fcp->resp.fr_flags |= FCP_RESID_OVER;
		else
			fcp->resp.fr_flags |= FCP_RESID_UNDER;
		fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
	}

	/*
	 * Send response.
	 */
	cmd->seq = lport->tt.seq_start_next(cmd->seq);
	fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
		       FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);

	rc = lport->tt.seq_send(lport, cmd->seq, fp);
	if (rc) {
		pr_info_ratelimited("%s: Failed to send response frame %p, "
				    "xid <0x%x>\n", __func__, fp, ep->xid);
		/*
		 * Generate a TASK_SET_FULL status to notify the initiator
		 * to reduce it's queue_depth after the se_cmd response has
		 * been re-queued by target-core.
		 */
		se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
		return -ENOMEM;
	}
	lport->tt.exch_done(cmd->seq);
	return 0;
}
예제 #25
0
static int
aoeblk_make_request(struct request_queue *q, struct bio *bio)
{
	struct sk_buff_head queue;
	struct aoedev *d;
	struct buf *buf;
	ulong flags;

	blk_queue_bounce(q, &bio);

	if (bio == NULL) {
		printk(KERN_ERR "aoe: bio is NULL\n");
		BUG();
		return 0;
	}
	d = bio->bi_bdev->bd_disk->private_data;
	if (d == NULL) {
		printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n");
		BUG();
		bio_endio(bio, -ENXIO);
		return 0;
	} else if (bio->bi_io_vec == NULL) {
		printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
		BUG();
		bio_endio(bio, -ENXIO);
		return 0;
	}
	buf = mempool_alloc(d->bufpool, GFP_NOIO);
	if (buf == NULL) {
		printk(KERN_INFO "aoe: buf allocation failure\n");
		bio_endio(bio, -ENOMEM);
		return 0;
	}
	memset(buf, 0, sizeof(*buf));
	INIT_LIST_HEAD(&buf->bufs);
	buf->stime = jiffies;
	buf->bio = bio;
	buf->resid = bio->bi_size;
	buf->sector = bio->bi_sector;
	buf->bv = &bio->bi_io_vec[bio->bi_idx];
	buf->bv_resid = buf->bv->bv_len;
	WARN_ON(buf->bv_resid == 0);
	buf->bv_off = buf->bv->bv_offset;

	spin_lock_irqsave(&d->lock, flags);

	if ((d->flags & DEVFL_UP) == 0) {
		pr_info_ratelimited("aoe: device %ld.%d is not up\n",
			d->aoemajor, d->aoeminor);
		spin_unlock_irqrestore(&d->lock, flags);
		mempool_free(buf, d->bufpool);
		bio_endio(bio, -ENXIO);
		return 0;
	}

	list_add_tail(&buf->bufs, &d->bufq);

	aoecmd_work(d);
	__skb_queue_head_init(&queue);
	skb_queue_splice_init(&d->sendq, &queue);

	spin_unlock_irqrestore(&d->lock, flags);
	aoenet_xmit(&queue);

	return 0;
}
예제 #26
0
/**
 * dgrp_dpa() -- send data to the device monitor queue
 * @nd: pointer to a node structure
 * @buf: buffer of data to copy to the monitoring buffer
 * @len: number of bytes to transfer to the buffer
 *
 * Called by the net device routines to send data to the device
 * monitor queue.  If the device monitor buffer is too full to
 * accept the data, it waits until the buffer is ready.
 */
static void dgrp_dpa(struct nd_struct *nd, u8 *buf, int nbuf)
{
	int n;
	int r;
	unsigned long lock_flags;

	/*
	 *  Grab DPA lock.
	 */
	spin_lock_irqsave(&nd->nd_dpa_lock, lock_flags);

	/*
	 *  Loop while data remains.
	 */
	while (nbuf > 0 && nd->nd_dpa_buf != NULL) {

		n = (nd->nd_dpa_out - nd->nd_dpa_in - 1) & DPA_MASK;

		/*
		 * Enforce flow control on the DPA device.
		 */
		if (n < (DPA_MAX - DPA_HIGH_WATER))
			nd->nd_dpa_flag |= DPA_WAIT_SPACE;

		/*
		 * This should never happen, as the flow control above
		 * should have stopped things before they got to this point.
		 */
		if (n == 0) {
			spin_unlock_irqrestore(&nd->nd_dpa_lock, lock_flags);
			return;
		}

		/*
		 * Copy as much data as will fit.
		 */

		if (n > nbuf)
			n = nbuf;

		r = DPA_MAX - nd->nd_dpa_in;

		if (r <= n) {
			memcpy(nd->nd_dpa_buf + nd->nd_dpa_in, buf, r);

			n -= r;

			nd->nd_dpa_in = 0;

			buf += r;
			nbuf -= r;
		}

		memcpy(nd->nd_dpa_buf + nd->nd_dpa_in, buf, n);

		nd->nd_dpa_in += n;

		buf += n;
		nbuf -= n;

		if (nd->nd_dpa_in >= DPA_MAX)
			pr_info_ratelimited("%s - nd->nd_dpa_in (%i) >= DPA_MAX\n",
					    __func__, nd->nd_dpa_in);

		/*
		 *  Wakeup any thread waiting for data
		 */
		if (nd->nd_dpa_flag & DPA_WAIT_DATA) {
			nd->nd_dpa_flag &= ~DPA_WAIT_DATA;
			wake_up_interruptible(&nd->nd_dpa_wqueue);
		}
	}

	/*
	 *  Release the DPA lock.
	 */
	spin_unlock_irqrestore(&nd->nd_dpa_lock, lock_flags);
}