static int exynos4_usb_phy1_init(struct platform_device *pdev)
{
	struct clk *otg_clk;
	struct clk *xusbxti_clk;
	u32 phyclk;
	u32 rstcon;
	int err;

	atomic_inc(&host_usage);

	otg_clk = clk_get(&pdev->dev, "otg");
	if (IS_ERR(otg_clk)) {
		dev_err(&pdev->dev, "Failed to get otg clock\n");
		return PTR_ERR(otg_clk);
	}

	err = clk_enable(otg_clk);
	if (err) {
		clk_put(otg_clk);
		return err;
	}

	if (exynos4_usb_host_phy_is_on())
		return 0;

	writel(readl(S5P_USBHOST_PHY_CONTROL) | S5P_USBHOST_PHY_ENABLE,
			S5P_USBHOST_PHY_CONTROL);

	/*                             */
	phyclk = readl(EXYNOS4_PHYCLK) & ~CLKSEL_MASK;

	xusbxti_clk = clk_get(&pdev->dev, "xusbxti");
	if (xusbxti_clk && !IS_ERR(xusbxti_clk)) {
		switch (clk_get_rate(xusbxti_clk)) {
		case 12 * MHZ:
			phyclk |= CLKSEL_12M;
			break;
		case 24 * MHZ:
			phyclk |= CLKSEL_24M;
			break;
		default:
		case 48 * MHZ:
			/*                         */
			break;
		}
		clk_put(xusbxti_clk);
	}

	writel(phyclk, EXYNOS4_PHYCLK);

	/*                                    */
	writel((readl(EXYNOS4_PHY1CON) | FPENABLEN), EXYNOS4_PHY1CON);

	/*                                    */
	writel((readl(EXYNOS4_PHYPWR) & ~PHY1_HSIC_NORMAL_MASK),
			EXYNOS4_PHYPWR);

	/*                                    */
	writel((readl(EXYNOS4_PHYPWR) & ~PHY1_STD_NORMAL_MASK), EXYNOS4_PHYPWR);

	/*                                      */
	rstcon = readl(EXYNOS4_RSTCON) | HOST_LINK_PORT_SWRST_MASK |
		PHY1_SWRST_MASK;
	writel(rstcon, EXYNOS4_RSTCON);
	udelay(10);

	rstcon &= ~(HOST_LINK_PORT_SWRST_MASK | PHY1_SWRST_MASK);
	writel(rstcon, EXYNOS4_RSTCON);
	udelay(80);

	clk_disable(otg_clk);
	clk_put(otg_clk);

	return 0;
}
Пример #2
0
/*
 * iowarrior_write
 */
static ssize_t iowarrior_write(struct file *file,
			       const char __user *user_buffer,
			       size_t count, loff_t *ppos)
{
	struct iowarrior *dev;
	int retval = 0;
	char *buf = NULL;	/* for IOW24 and IOW56 we need a buffer */
	struct urb *int_out_urb = NULL;

	dev = (struct iowarrior *)file->private_data;

	mutex_lock(&dev->mutex);
	/* verify that the device wasn't unplugged */
	if (!dev->present) {
		retval = -ENODEV;
		goto exit;
	}
	dbg("%s - minor %d, count = %zd", __func__, dev->minor, count);
	/* if count is 0 we're already done */
	if (count == 0) {
		retval = 0;
		goto exit;
	}
	/* We only accept full reports */
	if (count != dev->report_size) {
		retval = -EINVAL;
		goto exit;
	}
	switch (dev->product_id) {
	case USB_DEVICE_ID_CODEMERCS_IOW24:
	case USB_DEVICE_ID_CODEMERCS_IOWPV1:
	case USB_DEVICE_ID_CODEMERCS_IOWPV2:
	case USB_DEVICE_ID_CODEMERCS_IOW40:
		/* IOW24 and IOW40 use a synchronous call */
		buf = kmalloc(8, GFP_KERNEL);	/* 8 bytes are enough for both products */
		if (!buf) {
			retval = -ENOMEM;
			goto exit;
		}
		if (copy_from_user(buf, user_buffer, count)) {
			retval = -EFAULT;
			kfree(buf);
			goto exit;
		}
		retval = usb_set_report(dev->interface, 2, 0, buf, count);
		kfree(buf);
		goto exit;
		break;
	case USB_DEVICE_ID_CODEMERCS_IOW56:
		/* The IOW56 uses asynchronous IO and more urbs */
		if (atomic_read(&dev->write_busy) == MAX_WRITES_IN_FLIGHT) {
			/* Wait until we are below the limit for submitted urbs */
			if (file->f_flags & O_NONBLOCK) {
				retval = -EAGAIN;
				goto exit;
			} else {
				retval = wait_event_interruptible(dev->write_wait,
								  (!dev->present || (atomic_read (&dev-> write_busy) < MAX_WRITES_IN_FLIGHT)));
				if (retval) {
					/* we were interrupted by a signal */
					retval = -ERESTART;
					goto exit;
				}
				if (!dev->present) {
					/* The device was unplugged */
					retval = -ENODEV;
					goto exit;
				}
				if (!dev->opened) {
					/* We were closed while waiting for an URB */
					retval = -ENODEV;
					goto exit;
				}
			}
		}
		atomic_inc(&dev->write_busy);
		int_out_urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!int_out_urb) {
			retval = -ENOMEM;
			dbg("%s Unable to allocate urb ", __func__);
			goto error_no_urb;
		}
		buf = usb_buffer_alloc(dev->udev, dev->report_size,
				       GFP_KERNEL, &int_out_urb->transfer_dma);
		if (!buf) {
			retval = -ENOMEM;
			dbg("%s Unable to allocate buffer ", __func__);
			goto error_no_buffer;
		}
		usb_fill_int_urb(int_out_urb, dev->udev,
				 usb_sndintpipe(dev->udev,
						dev->int_out_endpoint->bEndpointAddress),
				 buf, dev->report_size,
				 iowarrior_write_callback, dev,
				 dev->int_out_endpoint->bInterval);
		int_out_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
		if (copy_from_user(buf, user_buffer, count)) {
			retval = -EFAULT;
			goto error;
		}
		retval = usb_submit_urb(int_out_urb, GFP_KERNEL);
		if (retval) {
			dbg("%s submit error %d for urb nr.%d", __func__,
			    retval, atomic_read(&dev->write_busy));
			goto error;
		}
		/* submit was ok */
		retval = count;
		usb_free_urb(int_out_urb);
		goto exit;
		break;
	default:
		/* what do we have here ? An unsupported Product-ID ? */
		dev_err(&dev->interface->dev, "%s - not supported for product=0x%x\n",
			__func__, dev->product_id);
		retval = -EFAULT;
		goto exit;
		break;
	}
error:
	usb_buffer_free(dev->udev, dev->report_size, buf,
			int_out_urb->transfer_dma);
error_no_buffer:
	usb_free_urb(int_out_urb);
error_no_urb:
	atomic_dec(&dev->write_busy);
	wake_up_interruptible(&dev->write_wait);
exit:
	mutex_unlock(&dev->mutex);
	return retval;
}
Пример #3
0
void p9stat_enter(ktime_t *t)
{
	*t = ktime_get();
	atomic_inc(&active_requests);
}
Пример #4
0
static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
{
	struct mm_struct * mm, *oldmm;
	int retval;

	tsk->min_flt = tsk->maj_flt = 0;
	tsk->nvcsw = tsk->nivcsw = 0;

	tsk->mm = NULL;
	tsk->active_mm = NULL;

	/*
	 * Are we cloning a kernel thread?
	 *
	 * We need to steal a active VM for that..
	 */
	oldmm = current->mm;
	if (!oldmm)
		return 0;

	if (clone_flags & CLONE_VM) {
		atomic_inc(&oldmm->mm_users);
		mm = oldmm;
		/*
		 * There are cases where the PTL is held to ensure no
		 * new threads start up in user mode using an mm, which
		 * allows optimizing out ipis; the tlb_gather_mmu code
		 * is an example.
		 */
		spin_unlock_wait(&oldmm->page_table_lock);
		goto good_mm;
	}

	retval = -ENOMEM;
	mm = allocate_mm();
	if (!mm)
		goto fail_nomem;

	/* Copy the current MM stuff.. */
	memcpy(mm, oldmm, sizeof(*mm));
	if (!mm_init(mm))
		goto fail_nomem;

	if (init_new_context(tsk,mm))
		goto fail_nocontext;

	retval = dup_mmap(mm, oldmm);
	if (retval)
		goto free_pt;

	mm->hiwater_rss = mm->rss;
	mm->hiwater_vm = mm->total_vm;

good_mm:
	tsk->mm = mm;
	tsk->active_mm = mm;
	return 0;

free_pt:
	mmput(mm);
fail_nomem:
	return retval;

fail_nocontext:
	/*
	 * If init_new_context() failed, we cannot use mmput() to free the mm
	 * because it calls destroy_context()
	 */
	mm_free_pgd(mm);
	free_mm(mm);
	return retval;
}
Пример #5
0
/* 拷贝进程 */
static task_t *copy_process(unsigned long clone_flags,
				 unsigned long stack_start,
				 struct pt_regs *regs,
				 unsigned long stack_size,
				 int __user *parent_tidptr,
				 int __user *child_tidptr,
				 int pid)
{
	int retval;
	struct task_struct *p = NULL;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	retval = -EAGAIN;
	if (atomic_read(&p->user->processes) >=
			p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
				p->user != &root_user)
			goto bad_fork_free;
	}

	atomic_inc(&p->user->__count);
	atomic_inc(&p->user->processes);
	get_group_info(p->group_info);

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(p->thread_info->exec_domain->module))
		goto bad_fork_cleanup_count;

	if (p->binfmt && !try_module_get(p->binfmt->module))
		goto bad_fork_cleanup_put_domain;

	p->did_exec = 0;
	copy_flags(clone_flags, p);
	p->pid = pid;
	retval = -EFAULT;
	if (clone_flags & CLONE_PARENT_SETTID)
		if (put_user(p->pid, parent_tidptr))
			goto bad_fork_cleanup;

	p->proc_dentry = NULL;

	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);
	spin_lock_init(&p->proc_lock);

	clear_tsk_thread_flag(p, TIF_SIGPENDING);
	init_sigpending(&p->pending);

	p->it_real_value = 0;
	p->it_real_incr = 0;
	p->it_virt_value = cputime_zero;
	p->it_virt_incr = cputime_zero;
	p->it_prof_value = cputime_zero;
	p->it_prof_incr = cputime_zero;
	init_timer(&p->real_timer);
	p->real_timer.data = (unsigned long) p;

	p->utime = cputime_zero;
	p->stime = cputime_zero;
	p->rchar = 0;		/* I/O counter: bytes read */
	p->wchar = 0;		/* I/O counter: bytes written */
	p->syscr = 0;		/* I/O counter: read syscalls */
	p->syscw = 0;		/* I/O counter: write syscalls */
	acct_clear_integrals(p);

	p->lock_depth = -1;		/* -1 = no lock */
	do_posix_clock_monotonic_gettime(&p->start_time);
	p->security = NULL;
	p->io_context = NULL;
	p->io_wait = NULL;
	p->audit_context = NULL;
#ifdef CONFIG_NUMA
 	p->mempolicy = mpol_copy(p->mempolicy);
 	if (IS_ERR(p->mempolicy)) {
 		retval = PTR_ERR(p->mempolicy);
 		p->mempolicy = NULL;
 		goto bad_fork_cleanup;
 	}
#endif

	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	if ((retval = security_task_alloc(p)))
		goto bad_fork_cleanup_policy;
	if ((retval = audit_alloc(p)))
		goto bad_fork_cleanup_security;
	/* copy all the process information */
	if ((retval = copy_semundo(clone_flags, p)))
		goto bad_fork_cleanup_audit;
	if ((retval = copy_files(clone_flags, p)))
		goto bad_fork_cleanup_semundo;
	if ((retval = copy_fs(clone_flags, p)))
		goto bad_fork_cleanup_files;
	if ((retval = copy_sighand(clone_flags, p)))
		goto bad_fork_cleanup_fs;
	if ((retval = copy_signal(clone_flags, p)))
		goto bad_fork_cleanup_sighand;
	if ((retval = copy_mm(clone_flags, p)))
		goto bad_fork_cleanup_signal;
	if ((retval = copy_keys(clone_flags, p)))
		goto bad_fork_cleanup_mm;
	if ((retval = copy_namespace(clone_flags, p)))
		goto bad_fork_cleanup_keys;
	retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_namespace;

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;

	/*
	 * Syscall tracing should be turned off in the child regardless
	 * of CLONE_PTRACE.
	 */
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);

	/* Our parent execution domain becomes current domain
	   These must match for thread signalling to apply */
	   
	p->parent_exec_id = p->self_exec_id;

	/* ok, now we should be set up.. */
	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
	p->pdeath_signal = 0;
	p->exit_state = 0;

	/* Perform scheduler related setup */
	sched_fork(p);

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->ptrace_children);
	INIT_LIST_HEAD(&p->ptrace_list);

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/*
	 * The task hasn't been attached yet, so cpus_allowed mask cannot
	 * have changed. The cpus_allowed mask of the parent may have
	 * changed after it was copied first time, and it may then move to
	 * another CPU - so we re-copy it here and set the child's CPU to
	 * the parent's CPU. This avoids alot of nasty races.
	 */
	p->cpus_allowed = current->cpus_allowed;
	set_task_cpu(p, smp_processor_id());

	/*
	 * Check for pending SIGKILL! The new thread should not be allowed
	 * to slip out of an OOM kill. (or normal SIGKILL.)
	 */
	if (sigismember(&current->pending.signal, SIGKILL)) {
		write_unlock_irq(&tasklist_lock);
		retval = -EINTR;
		goto bad_fork_cleanup_namespace;
	}

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
		p->real_parent = current->real_parent;
	else
		p->real_parent = current;
	p->parent = p->real_parent;

	if (clone_flags & CLONE_THREAD) {
		spin_lock(&current->sighand->siglock);
		/*
		 * Important: if an exit-all has been started then
		 * do not create this new thread - the whole thread
		 * group is supposed to exit anyway.
		 */
		if (current->signal->flags & SIGNAL_GROUP_EXIT) {
			spin_unlock(&current->sighand->siglock);
			write_unlock_irq(&tasklist_lock);
			retval = -EAGAIN;
			goto bad_fork_cleanup_namespace;
		}
		p->group_leader = current->group_leader;

		if (current->signal->group_stop_count > 0) {
			/*
			 * There is an all-stop in progress for the group.
			 * We ourselves will stop as soon as we check signals.
			 * Make the new thread part of that group stop too.
			 */
			current->signal->group_stop_count++;
			set_tsk_thread_flag(p, TIF_SIGPENDING);
		}

		spin_unlock(&current->sighand->siglock);
	}

	SET_LINKS(p);
	if (unlikely(p->ptrace & PT_PTRACED))
		__ptrace_link(p, current->parent);

	attach_pid(p, PIDTYPE_PID, p->pid);
	attach_pid(p, PIDTYPE_TGID, p->tgid);
	if (thread_group_leader(p)) {
		attach_pid(p, PIDTYPE_PGID, process_group(p));
		attach_pid(p, PIDTYPE_SID, p->signal->session);
		if (p->pid)
			__get_cpu_var(process_counts)++;
	}

	nr_threads++;
	total_forks++;
	write_unlock_irq(&tasklist_lock);
	retval = 0;

fork_out:
	if (retval)
		return ERR_PTR(retval);
	return p;

bad_fork_cleanup_namespace:
	exit_namespace(p);
bad_fork_cleanup_keys:
	exit_keys(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
	exit_signal(p);
bad_fork_cleanup_sighand:
	exit_sighand(p);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_security:
	security_task_free(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
	mpol_free(p->mempolicy);
#endif
bad_fork_cleanup:
	if (p->binfmt)
		module_put(p->binfmt->module);
bad_fork_cleanup_put_domain:
	module_put(p->thread_info->exec_domain->module);
bad_fork_cleanup_count:
	put_group_info(p->group_info);
	atomic_dec(&p->user->processes);
	free_uid(p->user);
bad_fork_free:
	free_task(p);
	goto fork_out;
}
Пример #6
0
int
SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
	    int *pbytes_returned, const int long_op)
{
	int rc = 0;
	unsigned int receive_len;
	long timeout;
	struct mid_q_entry *midQ;

	if (ses == NULL) {
		cERROR(1,("Null smb session"));
		return -EIO;
	}
	if(ses->server == NULL) {
		cERROR(1,("Null tcp session"));
		return -EIO;
	}

	/* Ensure that we do not send more than 50 overlapping requests 
	   to the same server. We may make this configurable later or
	   use ses->maxReq */
	if(long_op == -1) {
		/* oplock breaks must not be held up */
		atomic_inc(&ses->server->inFlight);
	} else {
		spin_lock(&GlobalMid_Lock); 
		while(1) {        
			if(atomic_read(&ses->server->inFlight) >= CIFS_MAX_REQ){
				spin_unlock(&GlobalMid_Lock);
				wait_event(ses->server->request_q,
					atomic_read(&ses->server->inFlight)
					 < CIFS_MAX_REQ);
				spin_lock(&GlobalMid_Lock);
			} else {
				if(ses->server->tcpStatus == CifsExiting) {
					spin_unlock(&GlobalMid_Lock);
					return -ENOENT;
				}

			/* can not count locking commands against total since
			   they are allowed to block on server */
					
				if(long_op < 3) {
				/* update # of requests on the wire to server */
					atomic_inc(&ses->server->inFlight);
				}
				spin_unlock(&GlobalMid_Lock);
				break;
			}
		}
	}
	/* make sure that we sign in the same order that we send on this socket 
	   and avoid races inside tcp sendmsg code that could cause corruption
	   of smb data */

	down(&ses->server->tcpSem); 

	if (ses->server->tcpStatus == CifsExiting) {
		rc = -ENOENT;
		goto out_unlock;
	} else if (ses->server->tcpStatus == CifsNeedReconnect) {
		cFYI(1,("tcp session dead - return to caller to retry"));
		rc = -EAGAIN;
		goto out_unlock;
	} else if (ses->status != CifsGood) {
		/* check if SMB session is bad because we are setting it up */
		if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && 
			(in_buf->Command != SMB_COM_NEGOTIATE)) {
			rc = -EAGAIN;
			goto out_unlock;
		} /* else ok - we are setting up session */
	}
	midQ = AllocMidQEntry(in_buf, ses);
	if (midQ == NULL) {
		up(&ses->server->tcpSem);
		/* If not lock req, update # of requests on wire to server */
		if(long_op < 3) {
			atomic_dec(&ses->server->inFlight); 
			wake_up(&ses->server->request_q);
		}
		return -ENOMEM;
	}

	if (in_buf->smb_buf_length > CIFS_MAX_MSGSIZE + MAX_CIFS_HDR_SIZE - 4) {
		up(&ses->server->tcpSem);
		cERROR(1,
		       ("Illegal length, greater than maximum frame, %d ",
			in_buf->smb_buf_length));
		DeleteMidQEntry(midQ);
		/* If not lock req, update # of requests on wire to server */
		if(long_op < 3) {
			atomic_dec(&ses->server->inFlight); 
			wake_up(&ses->server->request_q);
		}
		return -EIO;
	}

	if (in_buf->smb_buf_length > 12)
		in_buf->Flags2 = cpu_to_le16(in_buf->Flags2);
	
	rc = cifs_sign_smb(in_buf, ses, &midQ->sequence_number);

	midQ->midState = MID_REQUEST_SUBMITTED;
	rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length,
		      (struct sockaddr *) &(ses->server->addr.sockAddr));
	if(rc < 0) {
		DeleteMidQEntry(midQ);
		up(&ses->server->tcpSem);
		/* If not lock req, update # of requests on wire to server */
		if(long_op < 3) {
			atomic_dec(&ses->server->inFlight); 
			wake_up(&ses->server->request_q);
		}
		return rc;
	} else
		up(&ses->server->tcpSem);
	if (long_op == -1)
		goto cifs_no_response_exit;
	else if (long_op == 2) /* writes past end of file can take looooong time */
		timeout = 300 * HZ;
	else if (long_op == 1)
		timeout = 45 * HZ; /* should be greater than 
			servers oplock break timeout (about 43 seconds) */
	else if (long_op > 2) {
		timeout = MAX_SCHEDULE_TIMEOUT;
	} else
		timeout = 15 * HZ;
	/* wait for 15 seconds or until woken up due to response arriving or 
	   due to last connection to this server being unmounted */
	if (signal_pending(current)) {
		/* if signal pending do not hold up user for full smb timeout
		but we still give response a change to complete */
		if(midQ->midState & MID_REQUEST_SUBMITTED) {
			set_current_state(TASK_UNINTERRUPTIBLE);
			timeout = sleep_on_timeout(&ses->server->response_q,2 * HZ);
		}
	} else { /* using normal timeout */
		/* timeout = wait_event_interruptible_timeout(ses->server->response_q,
			(midQ->midState & MID_RESPONSE_RECEIVED) || 
			((ses->server->tcpStatus != CifsGood) &&
			 (ses->server->tcpStatus != CifsNew)),
			timeout); */ 
		/* Can not allow user interrupts- wreaks havoc with performance */
		if(midQ->midState & MID_REQUEST_SUBMITTED) {
			set_current_state(TASK_UNINTERRUPTIBLE);
			timeout = sleep_on_timeout(&ses->server->response_q,timeout);
		}
	}
    
	spin_lock(&GlobalMid_Lock);
	if (midQ->resp_buf) {
		spin_unlock(&GlobalMid_Lock);
		receive_len = be32_to_cpu(midQ->resp_buf->smb_buf_length);
	} else {
		cERROR(1,("No response buffer"));
		if(midQ->midState == MID_REQUEST_SUBMITTED) {
			if(ses->server->tcpStatus == CifsExiting)
				rc = -EHOSTDOWN;
			else {
				ses->server->tcpStatus = CifsNeedReconnect;
				midQ->midState = MID_RETRY_NEEDED;
			}
		}

		if (rc != -EHOSTDOWN) {
			if(midQ->midState == MID_RETRY_NEEDED) {
				rc = -EAGAIN;
				cFYI(1,("marking request for retry"));
			} else {
				rc = -EIO;
			}
		}
		spin_unlock(&GlobalMid_Lock);
		DeleteMidQEntry(midQ);
		/* If not lock req, update # of requests on wire to server */
		if(long_op < 3) {
			atomic_dec(&ses->server->inFlight); 
			wake_up(&ses->server->request_q);
		}
		return rc;
	}
  
	if (receive_len > CIFS_MAX_MSGSIZE + MAX_CIFS_HDR_SIZE) {
		cERROR(1,
		       ("Frame too large received.  Length: %d  Xid: %d",
			receive_len, xid));
		rc = -EIO;
	} else {		/* rcvd frame is ok */

		if (midQ->resp_buf && out_buf
		    && (midQ->midState == MID_RESPONSE_RECEIVED)) {
			memcpy(out_buf, midQ->resp_buf,
			       receive_len +
			       4 /* include 4 byte RFC1001 header */ );

			dump_smb(out_buf, 92);
			/* convert the length into a more usable form */
			out_buf->smb_buf_length =
			    be32_to_cpu(out_buf->smb_buf_length);
			if((out_buf->smb_buf_length > 24) &&
			   (ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))) {
				rc = cifs_verify_signature(out_buf, ses->mac_signing_key,midQ->sequence_number); /* BB fix BB */
				if(rc)
					cFYI(1,("Unexpected signature received from server"));
			}

			if (out_buf->smb_buf_length > 12)
				out_buf->Flags2 = le16_to_cpu(out_buf->Flags2);
			if (out_buf->smb_buf_length > 28)
				out_buf->Pid = le16_to_cpu(out_buf->Pid);
			if (out_buf->smb_buf_length > 28)
				out_buf->PidHigh =
				    le16_to_cpu(out_buf->PidHigh);

			*pbytes_returned = out_buf->smb_buf_length;

			/* BB special case reconnect tid and reconnect uid here? */
			rc = map_smb_to_linux_error(out_buf);

			/* convert ByteCount if necessary */
			if (receive_len >=
			    sizeof (struct smb_hdr) -
			    4 /* do not count RFC1001 header */  +
			    (2 * out_buf->WordCount) + 2 /* bcc */ )
				BCC(out_buf) = le16_to_cpu(BCC(out_buf));
		} else {
			rc = -EIO;
			cFYI(1,("Bad MID state? "));
		}
	}
cifs_no_response_exit:
	DeleteMidQEntry(midQ);

	if(long_op < 3) {
		atomic_dec(&ses->server->inFlight); 
		wake_up(&ses->server->request_q);
	}

	return rc;

out_unlock:
	up(&ses->server->tcpSem);
	/* If not lock req, update # of requests on wire to server */
	if(long_op < 3) {
		atomic_dec(&ses->server->inFlight); 
		wake_up(&ses->server->request_q);
	}

	return rc;
}
Пример #7
0
/*
 * journal_commit_transaction
 *
 * The primary function for committing a transaction to the log.  This
 * function is called by the journal thread to begin a complete commit.
 */
void journal_commit_transaction(journal_t *journal)
{
	transaction_t *commit_transaction;
	struct journal_head *jh, *new_jh, *descriptor;
	struct buffer_head **wbuf = journal->j_wbuf;
	int bufs;
	int flags;
	int err;
	unsigned int blocknr;
	ktime_t start_time;
	u64 commit_time;
	char *tagp = NULL;
	journal_header_t *header;
	journal_block_tag_t *tag = NULL;
	int space_left = 0;
	int first_tag = 0;
	int tag_flag;
	int i;
	int write_op = WRITE_SYNC_PLUG;

	/*
	 * First job: lock down the current transaction and wait for
	 * all outstanding updates to complete.
	 */

#ifdef COMMIT_STATS
	spin_lock(&journal->j_list_lock);
	summarise_journal_usage(journal);
	spin_unlock(&journal->j_list_lock);
#endif

	/* Do we need to erase the effects of a prior journal_flush? */
	if (journal->j_flags & JFS_FLUSHED) {
		jbd_debug(3, "super block updated\n");
		journal_update_superblock(journal, 1);
	} else {
		jbd_debug(3, "superblock not updated\n");
	}

	J_ASSERT(journal->j_running_transaction != NULL);
	J_ASSERT(journal->j_committing_transaction == NULL);

	commit_transaction = journal->j_running_transaction;
	J_ASSERT(commit_transaction->t_state == T_RUNNING);

	trace_jbd_start_commit(journal, commit_transaction);
	jbd_debug(1, "JBD: starting commit of transaction %d\n",
			commit_transaction->t_tid);

	spin_lock(&journal->j_state_lock);
	commit_transaction->t_state = T_LOCKED;

	trace_jbd_commit_locking(journal, commit_transaction);
	spin_lock(&commit_transaction->t_handle_lock);
	while (commit_transaction->t_updates) {
		DEFINE_WAIT(wait);

		prepare_to_wait(&journal->j_wait_updates, &wait,
					TASK_UNINTERRUPTIBLE);
		if (commit_transaction->t_updates) {
			spin_unlock(&commit_transaction->t_handle_lock);
			spin_unlock(&journal->j_state_lock);
			schedule();
			spin_lock(&journal->j_state_lock);
			spin_lock(&commit_transaction->t_handle_lock);
		}
		finish_wait(&journal->j_wait_updates, &wait);
	}
	spin_unlock(&commit_transaction->t_handle_lock);

	J_ASSERT (commit_transaction->t_outstanding_credits <=
			journal->j_max_transaction_buffers);

	/*
	 * First thing we are allowed to do is to discard any remaining
	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
	 * that there are no such buffers: if a large filesystem
	 * operation like a truncate needs to split itself over multiple
	 * transactions, then it may try to do a journal_restart() while
	 * there are still BJ_Reserved buffers outstanding.  These must
	 * be released cleanly from the current transaction.
	 *
	 * In this case, the filesystem must still reserve write access
	 * again before modifying the buffer in the new transaction, but
	 * we do not require it to remember exactly which old buffers it
	 * has reserved.  This is consistent with the existing behaviour
	 * that multiple journal_get_write_access() calls to the same
	 * buffer are perfectly permissable.
	 */
	while (commit_transaction->t_reserved_list) {
		jh = commit_transaction->t_reserved_list;
		JBUFFER_TRACE(jh, "reserved, unused: refile");
		/*
		 * A journal_get_undo_access()+journal_release_buffer() may
		 * leave undo-committed data.
		 */
		if (jh->b_committed_data) {
			struct buffer_head *bh = jh2bh(jh);

			jbd_lock_bh_state(bh);
			jbd_free(jh->b_committed_data, bh->b_size);
			jh->b_committed_data = NULL;
			jbd_unlock_bh_state(bh);
		}
		journal_refile_buffer(journal, jh);
	}

	/*
	 * Now try to drop any written-back buffers from the journal's
	 * checkpoint lists.  We do this *before* commit because it potentially
	 * frees some memory
	 */
	spin_lock(&journal->j_list_lock);
	__journal_clean_checkpoint_list(journal);
	spin_unlock(&journal->j_list_lock);

	jbd_debug (3, "JBD: commit phase 1\n");

	/*
	 * Clear revoked flag to reflect there is no revoked buffers
	 * in the next transaction which is going to be started.
	 */
	journal_clear_buffer_revoked_flags(journal);

	/*
	 * Switch to a new revoke table.
	 */
	journal_switch_revoke_table(journal);

	trace_jbd_commit_flushing(journal, commit_transaction);
	commit_transaction->t_state = T_FLUSH;
	journal->j_committing_transaction = commit_transaction;
	journal->j_running_transaction = NULL;
	start_time = ktime_get();
	commit_transaction->t_log_start = journal->j_head;
	wake_up(&journal->j_wait_transaction_locked);
	spin_unlock(&journal->j_state_lock);

	jbd_debug (3, "JBD: commit phase 2\n");

	/*
	 * Now start flushing things to disk, in the order they appear
	 * on the transaction lists.  Data blocks go first.
	 */
	err = journal_submit_data_buffers(journal, commit_transaction,
					  write_op);

	/*
	 * Wait for all previously submitted IO to complete.
	 */
	spin_lock(&journal->j_list_lock);
	while (commit_transaction->t_locked_list) {
		struct buffer_head *bh;

		jh = commit_transaction->t_locked_list->b_tprev;
		bh = jh2bh(jh);
		get_bh(bh);
		if (buffer_locked(bh)) {
			spin_unlock(&journal->j_list_lock);
			wait_on_buffer(bh);
			spin_lock(&journal->j_list_lock);
		}
		if (unlikely(!buffer_uptodate(bh))) {
			if (!trylock_page(bh->b_page)) {
				spin_unlock(&journal->j_list_lock);
				lock_page(bh->b_page);
				spin_lock(&journal->j_list_lock);
			}
			if (bh->b_page->mapping)
				set_bit(AS_EIO, &bh->b_page->mapping->flags);

			unlock_page(bh->b_page);
			SetPageError(bh->b_page);
			err = -EIO;
		}
		if (!inverted_lock(journal, bh)) {
			put_bh(bh);
			spin_lock(&journal->j_list_lock);
			continue;
		}
		if (buffer_jbd(bh) && bh2jh(bh) == jh &&
		    jh->b_transaction == commit_transaction &&
		    jh->b_jlist == BJ_Locked) {
			__journal_unfile_buffer(jh);
			jbd_unlock_bh_state(bh);
			journal_remove_journal_head(bh);
			put_bh(bh);
		} else {
			jbd_unlock_bh_state(bh);
		}
		release_data_buffer(bh);
		cond_resched_lock(&journal->j_list_lock);
	}
	spin_unlock(&journal->j_list_lock);

	if (err) {
		char b[BDEVNAME_SIZE];

		printk(KERN_WARNING
			"JBD: Detected IO errors while flushing file data "
			"on %s\n", bdevname(journal->j_fs_dev, b));
		if (journal->j_flags & JFS_ABORT_ON_SYNCDATA_ERR)
			journal_abort(journal, err);
		err = 0;
	}

	journal_write_revoke_records(journal, commit_transaction, write_op);

	/*
	 * If we found any dirty or locked buffers, then we should have
	 * looped back up to the write_out_data label.  If there weren't
	 * any then journal_clean_data_list should have wiped the list
	 * clean by now, so check that it is in fact empty.
	 */
	J_ASSERT (commit_transaction->t_sync_datalist == NULL);

	jbd_debug (3, "JBD: commit phase 3\n");

	/*
	 * Way to go: we have now written out all of the data for a
	 * transaction!  Now comes the tricky part: we need to write out
	 * metadata.  Loop over the transaction's entire buffer list:
	 */
	spin_lock(&journal->j_state_lock);
	commit_transaction->t_state = T_COMMIT;
	spin_unlock(&journal->j_state_lock);

	trace_jbd_commit_logging(journal, commit_transaction);
	J_ASSERT(commit_transaction->t_nr_buffers <=
		 commit_transaction->t_outstanding_credits);

	descriptor = NULL;
	bufs = 0;
	while (commit_transaction->t_buffers) {

		/* Find the next buffer to be journaled... */

		jh = commit_transaction->t_buffers;

		/* If we're in abort mode, we just un-journal the buffer and
		   release it. */

		if (is_journal_aborted(journal)) {
			clear_buffer_jbddirty(jh2bh(jh));
			JBUFFER_TRACE(jh, "journal is aborting: refile");
			journal_refile_buffer(journal, jh);
			/* If that was the last one, we need to clean up
			 * any descriptor buffers which may have been
			 * already allocated, even if we are now
			 * aborting. */
			if (!commit_transaction->t_buffers)
				goto start_journal_io;
			continue;
		}

		/* Make sure we have a descriptor block in which to
		   record the metadata buffer. */

		if (!descriptor) {
			struct buffer_head *bh;

			J_ASSERT (bufs == 0);

			jbd_debug(4, "JBD: get descriptor\n");

			descriptor = journal_get_descriptor_buffer(journal);
			if (!descriptor) {
				journal_abort(journal, -EIO);
				continue;
			}

			bh = jh2bh(descriptor);
			jbd_debug(4, "JBD: got buffer %llu (%p)\n",
				(unsigned long long)bh->b_blocknr, bh->b_data);
			header = (journal_header_t *)&bh->b_data[0];
			header->h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
			header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK);
			header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);

			tagp = &bh->b_data[sizeof(journal_header_t)];
			space_left = bh->b_size - sizeof(journal_header_t);
			first_tag = 1;
			set_buffer_jwrite(bh);
			set_buffer_dirty(bh);
			wbuf[bufs++] = bh;

			/* Record it so that we can wait for IO
                           completion later */
			BUFFER_TRACE(bh, "ph3: file as descriptor");
			journal_file_buffer(descriptor, commit_transaction,
					BJ_LogCtl);
		}

		/* Where is the buffer to be written? */

		err = journal_next_log_block(journal, &blocknr);
		/* If the block mapping failed, just abandon the buffer
		   and repeat this loop: we'll fall into the
		   refile-on-abort condition above. */
		if (err) {
			journal_abort(journal, err);
			continue;
		}

		/*
		 * start_this_handle() uses t_outstanding_credits to determine
		 * the free space in the log, but this counter is changed
		 * by journal_next_log_block() also.
		 */
		commit_transaction->t_outstanding_credits--;

		/* Bump b_count to prevent truncate from stumbling over
                   the shadowed buffer!  @@@ This can go if we ever get
                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
		atomic_inc(&jh2bh(jh)->b_count);

		/* Make a temporary IO buffer with which to write it out
                   (this will requeue both the metadata buffer and the
                   temporary IO buffer). new_bh goes on BJ_IO*/

		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
		/*
		 * akpm: journal_write_metadata_buffer() sets
		 * new_bh->b_transaction to commit_transaction.
		 * We need to clean this up before we release new_bh
		 * (which is of type BJ_IO)
		 */
		JBUFFER_TRACE(jh, "ph3: write metadata");
		flags = journal_write_metadata_buffer(commit_transaction,
						      jh, &new_jh, blocknr);
		set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
		wbuf[bufs++] = jh2bh(new_jh);

		/* Record the new block's tag in the current descriptor
                   buffer */

		tag_flag = 0;
		if (flags & 1)
			tag_flag |= JFS_FLAG_ESCAPE;
		if (!first_tag)
			tag_flag |= JFS_FLAG_SAME_UUID;

		tag = (journal_block_tag_t *) tagp;
		tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr);
		tag->t_flags = cpu_to_be32(tag_flag);
		tagp += sizeof(journal_block_tag_t);
		space_left -= sizeof(journal_block_tag_t);

		if (first_tag) {
			memcpy (tagp, journal->j_uuid, 16);
			tagp += 16;
			space_left -= 16;
			first_tag = 0;
		}

		/* If there's no more to do, or if the descriptor is full,
		   let the IO rip! */

		if (bufs == journal->j_wbufsize ||
		    commit_transaction->t_buffers == NULL ||
		    space_left < sizeof(journal_block_tag_t) + 16) {

			jbd_debug(4, "JBD: Submit %d IOs\n", bufs);

			/* Write an end-of-descriptor marker before
                           submitting the IOs.  "tag" still points to
                           the last tag we set up. */

			tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG);

start_journal_io:
			for (i = 0; i < bufs; i++) {
				struct buffer_head *bh = wbuf[i];
				lock_buffer(bh);
				clear_buffer_dirty(bh);
				set_buffer_uptodate(bh);
				bh->b_end_io = journal_end_buffer_io_sync;
				submit_bh(write_op, bh);
			}
			cond_resched();

			/* Force a new descriptor to be generated next
                           time round the loop. */
			descriptor = NULL;
			bufs = 0;
		}
	}

	/* Lo and behold: we have just managed to send a transaction to
           the log.  Before we can commit it, wait for the IO so far to
           complete.  Control buffers being written are on the
           transaction's t_log_list queue, and metadata buffers are on
           the t_iobuf_list queue.

	   Wait for the buffers in reverse order.  That way we are
	   less likely to be woken up until all IOs have completed, and
	   so we incur less scheduling load.
	*/

	jbd_debug(3, "JBD: commit phase 4\n");

	/*
	 * akpm: these are BJ_IO, and j_list_lock is not needed.
	 * See __journal_try_to_free_buffer.
	 */
wait_for_iobuf:
	while (commit_transaction->t_iobuf_list != NULL) {
		struct buffer_head *bh;

		jh = commit_transaction->t_iobuf_list->b_tprev;
		bh = jh2bh(jh);
		if (buffer_locked(bh)) {
			wait_on_buffer(bh);
			goto wait_for_iobuf;
		}
		if (cond_resched())
			goto wait_for_iobuf;

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;

		clear_buffer_jwrite(bh);

		JBUFFER_TRACE(jh, "ph4: unfile after journal write");
		journal_unfile_buffer(journal, jh);

		/*
		 * ->t_iobuf_list should contain only dummy buffer_heads
		 * which were created by journal_write_metadata_buffer().
		 */
		BUFFER_TRACE(bh, "dumping temporary bh");
		journal_put_journal_head(jh);
		__brelse(bh);
		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
		free_buffer_head(bh);

		/* We also have to unlock and free the corresponding
                   shadowed buffer */
		jh = commit_transaction->t_shadow_list->b_tprev;
		bh = jh2bh(jh);
		clear_bit(BH_JWrite, &bh->b_state);
		J_ASSERT_BH(bh, buffer_jbddirty(bh));

		/* The metadata is now released for reuse, but we need
                   to remember it against this transaction so that when
                   we finally commit, we can do any checkpointing
                   required. */
		JBUFFER_TRACE(jh, "file as BJ_Forget");
		journal_file_buffer(jh, commit_transaction, BJ_Forget);
		/*
		 * Wake up any transactions which were waiting for this
		 * IO to complete. The barrier must be here so that changes
		 * by journal_file_buffer() take effect before wake_up_bit()
		 * does the waitqueue check.
		 */
		smp_mb();
		wake_up_bit(&bh->b_state, BH_Unshadow);
		JBUFFER_TRACE(jh, "brelse shadowed buffer");
		__brelse(bh);
	}

	J_ASSERT (commit_transaction->t_shadow_list == NULL);

	jbd_debug(3, "JBD: commit phase 5\n");

	/* Here we wait for the revoke record and descriptor record buffers */
 wait_for_ctlbuf:
	while (commit_transaction->t_log_list != NULL) {
		struct buffer_head *bh;

		jh = commit_transaction->t_log_list->b_tprev;
		bh = jh2bh(jh);
		if (buffer_locked(bh)) {
			wait_on_buffer(bh);
			goto wait_for_ctlbuf;
		}
		if (cond_resched())
			goto wait_for_ctlbuf;

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;

		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
		clear_buffer_jwrite(bh);
		journal_unfile_buffer(journal, jh);
		journal_put_journal_head(jh);
		__brelse(bh);		/* One for getblk */
		/* AKPM: bforget here */
	}

	if (err)
		journal_abort(journal, err);

	jbd_debug(3, "JBD: commit phase 6\n");

	if (journal_write_commit_record(journal, commit_transaction))
		err = -EIO;

	if (err)
		journal_abort(journal, err);

	/* End of a transaction!  Finally, we can do checkpoint
           processing: any buffers committed as a result of this
           transaction can be removed from any checkpoint list it was on
           before. */

	jbd_debug(3, "JBD: commit phase 7\n");

	J_ASSERT(commit_transaction->t_sync_datalist == NULL);
	J_ASSERT(commit_transaction->t_buffers == NULL);
	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
	J_ASSERT(commit_transaction->t_iobuf_list == NULL);
	J_ASSERT(commit_transaction->t_shadow_list == NULL);
	J_ASSERT(commit_transaction->t_log_list == NULL);

restart_loop:
	/*
	 * As there are other places (journal_unmap_buffer()) adding buffers
	 * to this list we have to be careful and hold the j_list_lock.
	 */
	spin_lock(&journal->j_list_lock);
	while (commit_transaction->t_forget) {
		transaction_t *cp_transaction;
		struct buffer_head *bh;

		jh = commit_transaction->t_forget;
		spin_unlock(&journal->j_list_lock);
		bh = jh2bh(jh);
		jbd_lock_bh_state(bh);
		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction ||
			jh->b_transaction == journal->j_running_transaction);

		/*
		 * If there is undo-protected committed data against
		 * this buffer, then we can remove it now.  If it is a
		 * buffer needing such protection, the old frozen_data
		 * field now points to a committed version of the
		 * buffer, so rotate that field to the new committed
		 * data.
		 *
		 * Otherwise, we can just throw away the frozen data now.
		 */
		if (jh->b_committed_data) {
			jbd_free(jh->b_committed_data, bh->b_size);
			jh->b_committed_data = NULL;
			if (jh->b_frozen_data) {
				jh->b_committed_data = jh->b_frozen_data;
				jh->b_frozen_data = NULL;
			}
		} else if (jh->b_frozen_data) {
			jbd_free(jh->b_frozen_data, bh->b_size);
			jh->b_frozen_data = NULL;
		}

		spin_lock(&journal->j_list_lock);
		cp_transaction = jh->b_cp_transaction;
		if (cp_transaction) {
			JBUFFER_TRACE(jh, "remove from old cp transaction");
			__journal_remove_checkpoint(jh);
		}

		/* Only re-checkpoint the buffer_head if it is marked
		 * dirty.  If the buffer was added to the BJ_Forget list
		 * by journal_forget, it may no longer be dirty and
		 * there's no point in keeping a checkpoint record for
		 * it. */

		/* A buffer which has been freed while still being
		 * journaled by a previous transaction may end up still
		 * being dirty here, but we want to avoid writing back
		 * that buffer in the future now that the last use has
		 * been committed.  That's not only a performance gain,
		 * it also stops aliasing problems if the buffer is left
		 * behind for writeback and gets reallocated for another
		 * use in a different page. */
		if (buffer_freed(bh)) {
			clear_buffer_freed(bh);
			clear_buffer_jbddirty(bh);
		}

		if (buffer_jbddirty(bh)) {
			JBUFFER_TRACE(jh, "add to new checkpointing trans");
			__journal_insert_checkpoint(jh, commit_transaction);
			if (is_journal_aborted(journal))
				clear_buffer_jbddirty(bh);
			JBUFFER_TRACE(jh, "refile for checkpoint writeback");
			__journal_refile_buffer(jh);
			jbd_unlock_bh_state(bh);
		} else {
			J_ASSERT_BH(bh, !buffer_dirty(bh));
			/* The buffer on BJ_Forget list and not jbddirty means
			 * it has been freed by this transaction and hence it
			 * could not have been reallocated until this
			 * transaction has committed. *BUT* it could be
			 * reallocated once we have written all the data to
			 * disk and before we process the buffer on BJ_Forget
			 * list. */
			JBUFFER_TRACE(jh, "refile or unfile freed buffer");
			__journal_refile_buffer(jh);
			if (!jh->b_transaction) {
				jbd_unlock_bh_state(bh);
				 /* needs a brelse */
				journal_remove_journal_head(bh);
				release_buffer_page(bh);
			} else
				jbd_unlock_bh_state(bh);
		}
		cond_resched_lock(&journal->j_list_lock);
	}
	spin_unlock(&journal->j_list_lock);
	/*
	 * This is a bit sleazy.  We use j_list_lock to protect transition
	 * of a transaction into T_FINISHED state and calling
	 * __journal_drop_transaction(). Otherwise we could race with
	 * other checkpointing code processing the transaction...
	 */
	spin_lock(&journal->j_state_lock);
	spin_lock(&journal->j_list_lock);
	/*
	 * Now recheck if some buffers did not get attached to the transaction
	 * while the lock was dropped...
	 */
	if (commit_transaction->t_forget) {
		spin_unlock(&journal->j_list_lock);
		spin_unlock(&journal->j_state_lock);
		goto restart_loop;
	}

	/* Done with this transaction! */

	jbd_debug(3, "JBD: commit phase 8\n");

	J_ASSERT(commit_transaction->t_state == T_COMMIT);

	commit_transaction->t_state = T_FINISHED;
	J_ASSERT(commit_transaction == journal->j_committing_transaction);
	journal->j_commit_sequence = commit_transaction->t_tid;
	journal->j_committing_transaction = NULL;
	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));

	/*
	 * weight the commit time higher than the average time so we don't
	 * react too strongly to vast changes in commit time
	 */
	if (likely(journal->j_average_commit_time))
		journal->j_average_commit_time = (commit_time*3 +
				journal->j_average_commit_time) / 4;
	else
		journal->j_average_commit_time = commit_time;

	spin_unlock(&journal->j_state_lock);

	if (commit_transaction->t_checkpoint_list == NULL &&
	    commit_transaction->t_checkpoint_io_list == NULL) {
		__journal_drop_transaction(journal, commit_transaction);
	} else {
		if (journal->j_checkpoint_transactions == NULL) {
			journal->j_checkpoint_transactions = commit_transaction;
			commit_transaction->t_cpnext = commit_transaction;
			commit_transaction->t_cpprev = commit_transaction;
		} else {
			commit_transaction->t_cpnext =
				journal->j_checkpoint_transactions;
			commit_transaction->t_cpprev =
				commit_transaction->t_cpnext->t_cpprev;
			commit_transaction->t_cpnext->t_cpprev =
				commit_transaction;
			commit_transaction->t_cpprev->t_cpnext =
				commit_transaction;
		}
	}
	spin_unlock(&journal->j_list_lock);

	trace_jbd_end_commit(journal, commit_transaction);
	jbd_debug(1, "JBD: commit %d complete, head %d\n",
		  journal->j_commit_sequence, journal->j_tail_sequence);

	wake_up(&journal->j_wait_done_commit);
}
Пример #8
0
/**
 * rcu_expedite_gp - Expedite future RCU grace periods
 *
 * After a call to this function, future calls to synchronize_rcu() and
 * friends act as the corresponding synchronize_rcu_expedited() function
 * had instead been called.
 */
void rcu_expedite_gp(void)
{
	atomic_inc(&rcu_expedited_nesting);
}
Пример #9
0
/*
 * The idea behind this state machine is taken from Linux's
 * lockless stop-machine code.
 */
void *state_machine(void *args) {
	struct rusage u, v;
	int tid = (int)((unsigned long)args);
	unsigned long assemble, stamp_counter, stamp_counter_mp;
	int master = 0, slave;

	__e_m(tid > (spawn - 1), "How'd this happen?");

	if (tid == 0)
		master = 1;

	slave = !master;

	/* We want to see how badly we ended up spinning,
	 * waiting for threads on other CPUs to catch up.
	 *
	 * We'll use these variables as very unscientific
	 * counters.
	 */
	assemble = stamp_counter = stamp_counter_mp = 0;

	if (slave) {
		while (atomic_read(&command) != SETUP);
	}

	if (master) {
		atomic_set(&assembled, 0);
		atomic_set(&command, SETUP);
	}

	/*
	 * We are screwed if this fails: there will be gaps
	 * in ids. And the _mp version will hang forever.
	 */
	thread_bind(tid);

	/* Remember the page-faults and such. */
	__w(
	getrusage(RUSAGE_THREAD, &u) != 0,
	   	"(errno=%d)", errno);

	/* Done with setup. */
	atomic_inc(&assembled);

	/* wait for master to tell us to assemble */
	if (slave) {
		while (atomic_read(&command) != BEFORE_STAMP)
			assemble++;
	}

	/* give assemble command */
	if (master) {
		/* We want to kick-off all the slaves at
		 * the same time. Wait for them to assemble. */
		while (atomic_read(&assembled) != spawn);

		atomic_set(&assembled, 0);
		atomic_set(&command, BEFORE_STAMP);
	}

	/* assemble work */
	atomic_inc(&assembled);

	/* Everybody is at BEFORE_STAMP, raring to STAMP */

	/* give stamp command */
	if (master) {
		/* wait for slaves */
		while (atomic_read(&assembled) != spawn)
			assemble++;
		atomic_set(&assembled, 0);
		atomic_set(&command, STAMP_COUNTER);
	}

	/* wait for stamp command */
	if (slave) {
		while (atomic_read(&command) != STAMP_COUNTER)
			stamp_counter++;
	}

	/* stamp work */
	rdtscll(counter[tid].ts);
	atomic_inc(&assembled);

	/* give stamp_mp command */
	if (master) {
		/* wait for slaves */
		while (atomic_read(&assembled) != spawn)
			stamp_counter++;

		atomic_set(&assembled, 0);
		atomic_set(&command, STAMP_COUNTER_MP);
	}

	/* wait for stamp command */
	if (slave) {
		while (atomic_read(&command) != STAMP_COUNTER_MP)
			stamp_counter_mp++;
	}

	/*
	 * Do the work.
	 */
	while (atomic_read(&assembled) != tid);
	rdtscll(counter_mp[tid].ts);
	atomic_inc(&assembled);

	if (master) {
		while (atomic_read(&assembled) != spawn);
	}

	__w(
		getrusage(RUSAGE_THREAD, &v) != 0,
	   	"(errno=%d)", errno);
	
	rundata[tid].u = u;
	rundata[tid].v = v;
	rundata[tid].assemble = assemble;

	rundata[tid].stamp_counter = stamp_counter;
	rundata[tid].stamp_counter_mp = stamp_counter_mp;

	return NULL;
}
Пример #10
0
static int rtc_release(struct inode *inode, struct file *file)
{
	atomic_inc(&rtc_status);
	return 0;
}
Пример #11
0
void llog_handle_get(struct llog_handle *loghandle)
{
	atomic_inc(&loghandle->lgh_refcount);
}
Пример #12
0
void ehca_process_eq(struct ehca_shca *shca, int is_irq)
{
    struct ehca_eq *eq = &shca->eq;
    struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
    u64 eqe_value, ret;
    int eqe_cnt, i;
    int eq_empty = 0;

    spin_lock(&eq->irq_spinlock);
    if (is_irq) {
        const int max_query_cnt = 100;
        int query_cnt = 0;
        int int_state = 1;
        do {
            int_state = hipz_h_query_int_state(
                            shca->ipz_hca_handle, eq->ist);
            query_cnt++;
            iosync();
        } while (int_state && query_cnt < max_query_cnt);
        if (unlikely((query_cnt == max_query_cnt)))
            ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
                     int_state, query_cnt);
    }

    /* read out all eqes */
    eqe_cnt = 0;
    do {
        u32 token;
        eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq);
        if (!eqe_cache[eqe_cnt].eqe)
            break;
        eqe_value = eqe_cache[eqe_cnt].eqe->entry;
        if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
            token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
            read_lock(&ehca_cq_idr_lock);
            eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
            if (eqe_cache[eqe_cnt].cq)
                atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
            read_unlock(&ehca_cq_idr_lock);
            if (!eqe_cache[eqe_cnt].cq) {
                ehca_err(&shca->ib_device,
                         "Invalid eqe for non-existing cq "
                         "token=%x", token);
                continue;
            }
        } else
            eqe_cache[eqe_cnt].cq = NULL;
        eqe_cnt++;
    } while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
    if (!eqe_cnt) {
        if (is_irq)
            ehca_dbg(&shca->ib_device,
                     "No eqe found for irq event");
        goto unlock_irq_spinlock;
    } else if (!is_irq) {
        ret = hipz_h_eoi(eq->ist);
        if (ret != H_SUCCESS)
            ehca_err(&shca->ib_device,
                     "bad return code EOI -rc = %lld\n", ret);
        ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
    }
    if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
        ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
    /* enable irq for new packets */
    for (i = 0; i < eqe_cnt; i++) {
        if (eq->eqe_cache[i].cq)
            reset_eq_pending(eq->eqe_cache[i].cq);
    }
    /* check eq */
    spin_lock(&eq->spinlock);
    eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
    spin_unlock(&eq->spinlock);
    /* call completion handler for cached eqes */
    for (i = 0; i < eqe_cnt; i++)
        if (eq->eqe_cache[i].cq) {
            if (ehca_scaling_code)
                queue_comp_task(eq->eqe_cache[i].cq);
            else {
                struct ehca_cq *cq = eq->eqe_cache[i].cq;
                comp_event_callback(cq);
                if (atomic_dec_and_test(&cq->nr_events))
                    wake_up(&cq->wait_completion);
            }
        } else {
            ehca_dbg(&shca->ib_device, "Got non completion event");
            parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
        }
    /* poll eq if not empty */
    if (eq_empty)
        goto unlock_irq_spinlock;
    do {
        struct ehca_eqe *eqe;
        eqe = ehca_poll_eq(shca, &shca->eq);
        if (!eqe)
            break;
        process_eqe(shca, eqe);
    } while (1);

unlock_irq_spinlock:
    spin_unlock(&eq->irq_spinlock);
}
Пример #13
0
Файл: msg.c Проект: 274914765/C
long do_msgsnd(int msqid, long mtype, void __user *mtext,
        size_t msgsz, int msgflg)
{
    struct msg_queue *msq;
    struct msg_msg *msg;
    int err;
    struct ipc_namespace *ns;

    ns = current->nsproxy->ipc_ns;

    if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
        return -EINVAL;
    if (mtype < 1)
        return -EINVAL;

    msg = load_msg(mtext, msgsz);
    if (IS_ERR(msg))
        return PTR_ERR(msg);

    msg->m_type = mtype;
    msg->m_ts = msgsz;

    msq = msg_lock_check(ns, msqid);
    if (IS_ERR(msq)) {
        err = PTR_ERR(msq);
        goto out_free;
    }

    for (;;) {
        struct msg_sender s;

        err = -EACCES;
        if (ipcperms(&msq->q_perm, S_IWUGO))
            goto out_unlock_free;

        err = security_msg_queue_msgsnd(msq, msg, msgflg);
        if (err)
            goto out_unlock_free;

        if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
                1 + msq->q_qnum <= msq->q_qbytes) {
            break;
        }

        /* queue full, wait: */
        if (msgflg & IPC_NOWAIT) {
            err = -EAGAIN;
            goto out_unlock_free;
        }
        ss_add(msq, &s);
        ipc_rcu_getref(msq);
        msg_unlock(msq);
        schedule();

        ipc_lock_by_ptr(&msq->q_perm);
        ipc_rcu_putref(msq);
        if (msq->q_perm.deleted) {
            err = -EIDRM;
            goto out_unlock_free;
        }
        ss_del(&s);

        if (signal_pending(current)) {
            err = -ERESTARTNOHAND;
            goto out_unlock_free;
        }
    }

    msq->q_lspid = task_tgid_vnr(current);
    msq->q_stime = get_seconds();

    if (!pipelined_send(msq, msg)) {
        /* noone is waiting for this message, enqueue it */
        list_add_tail(&msg->m_list, &msq->q_messages);
        msq->q_cbytes += msgsz;
        msq->q_qnum++;
        atomic_add(msgsz, &ns->msg_bytes);
        atomic_inc(&ns->msg_hdrs);
    }

    err = 0;
    msg = NULL;

out_unlock_free:
    msg_unlock(msq);
out_free:
    if (msg != NULL)
        free_msg(msg);
    return err;
}
Пример #14
0
/**
 * Release file.
 *
 * \param inode device inode
 * \param file_priv DRM file private.
 * \return zero on success or a negative number on failure.
 *
 * If the hardware lock is held then free it, and take it again for the kernel
 * context since it's necessary to reclaim buffers. Unlink the file private
 * data from its list and free it. Decreases the open count and if it reaches
 * zero calls drm_lastclose().
 */

#if 0 /* old drm_release equivalent from DragonFly */
void drm_cdevpriv_dtor(void *cd)
{
	struct drm_file *file_priv = cd;
	struct drm_device *dev = file_priv->dev;
	int retcode = 0;

	DRM_DEBUG("open_count = %d\n", dev->open_count);

	DRM_LOCK(dev);

	if (dev->driver->preclose != NULL)
		dev->driver->preclose(dev, file_priv);

	/* ========================================================
	 * Begin inline drm_release
	 */

	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
	    DRM_CURRENTPID, (long)dev->dev, dev->open_count);

	if (dev->driver->driver_features & DRIVER_GEM)
		drm_gem_release(dev, file_priv);

	if (dev->primary->master->lock.hw_lock
	    && _DRM_LOCK_IS_HELD(dev->primary->master->lock.hw_lock->lock)
	    && dev->primary->master->lock.file_priv == file_priv) {
		DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
			  DRM_CURRENTPID,
			  _DRM_LOCKING_CONTEXT(dev->primary->master->lock.hw_lock->lock));
		if (dev->driver->reclaim_buffers_locked != NULL)
			dev->driver->reclaim_buffers_locked(dev, file_priv);

		drm_lock_free(&dev->primary->master->lock,
		    _DRM_LOCKING_CONTEXT(dev->primary->master->lock.hw_lock->lock));

				/* FIXME: may require heavy-handed reset of
                                   hardware at this point, possibly
                                   processed via a callback to the X
                                   server. */
	} else if (dev->driver->reclaim_buffers_locked != NULL &&
	    dev->primary->master->lock.hw_lock != NULL) {
		/* The lock is required to reclaim buffers */
		for (;;) {
			if (!dev->primary->master->lock.hw_lock) {
				/* Device has been unregistered */
				retcode = EINTR;
				break;
			}
			if (drm_lock_take(&dev->primary->master->lock, DRM_KERNEL_CONTEXT)) {
				dev->primary->master->lock.file_priv = file_priv;
				dev->primary->master->lock.lock_time = jiffies;
				atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
				break;	/* Got lock */
			}
			/* Contention */
			retcode = DRM_LOCK_SLEEP(dev, &dev->primary->master->lock.lock_queue,
			    PCATCH, "drmlk2", 0);
			if (retcode)
				break;
		}
		if (retcode == 0) {
			dev->driver->reclaim_buffers_locked(dev, file_priv);
			drm_lock_free(&dev->primary->master->lock, DRM_KERNEL_CONTEXT);
		}
	}

	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
	    !dev->driver->reclaim_buffers_locked)
		drm_reclaim_buffers(dev, file_priv);

	funsetown(&dev->buf_sigio);

	if (dev->driver->postclose != NULL)
		dev->driver->postclose(dev, file_priv);
	list_del(&file_priv->lhead);


	/* ========================================================
	 * End inline drm_release
	 */

	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
	device_unbusy(dev->dev);
	if (--dev->open_count == 0) {
		retcode = drm_lastclose(dev);
	}

	DRM_UNLOCK(dev);
}
Пример #15
0
int adm_connect_afe_port(int mode, int session_id, int port_id)
{
	struct adm_cmd_connect_afe_port	cmd;
	int ret = 0;
	int index;

	pr_debug("%s: port %d session id:%d mode:%d\n", __func__,
				port_id, session_id, mode);

	port_id = afe_convert_virtual_to_portid(port_id);

	if (afe_validate_port(port_id) < 0) {
		pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
		return -ENODEV;
	}
	if (this_adm.apr == NULL) {
		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
						0xFFFFFFFF, &this_adm);
		if (this_adm.apr == NULL) {
			pr_err("%s: Unable to register ADM\n", __func__);
			ret = -ENODEV;
			return ret;
		}
		rtac_set_adm_handle(this_adm.apr);
	}
	index = afe_get_port_index(port_id);
	pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);

	cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	cmd.hdr.pkt_size = sizeof(cmd);
	cmd.hdr.src_svc = APR_SVC_ADM;
	cmd.hdr.src_domain = APR_DOMAIN_APPS;
	cmd.hdr.src_port = port_id;
	cmd.hdr.dest_svc = APR_SVC_ADM;
	cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
	cmd.hdr.dest_port = port_id;
	cmd.hdr.token = port_id;
	cmd.hdr.opcode = ADM_CMD_CONNECT_AFE_PORT;

	cmd.mode = mode;
	cmd.session_id = session_id;
	cmd.afe_port_id = port_id;

	atomic_set(&this_adm.copp_stat[index], 0);
	ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd);
	if (ret < 0) {
		pr_err("%s:ADM enable for port %d failed\n",
					__func__, port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}
	
	ret = wait_event_timeout(this_adm.wait,
		atomic_read(&this_adm.copp_stat[index]),
		msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s ADM connect AFE failed for port %d\n", __func__,
							port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}
	atomic_inc(&this_adm.copp_cnt[index]);
	return 0;

fail_cmd:

	return ret;
}
Пример #16
0
Файл: rtas.c Проект: 710leo/LVS
int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data)
{
	atomic_inc(&data->working);
	return __rtas_suspend_last_cpu(data, 0);
}
Пример #17
0
int adm_multi_ch_copp_pseudo_open_v3(int port_id, int path,
				int rate, int channel_mode,
				int topology)
{
	struct adm_multi_channel_copp_open_v3	open;
	int ret = 0;
	int index;

	pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__,
				port_id, path, rate, channel_mode);

	port_id = afe_convert_virtual_to_portid(port_id);

	if (afe_validate_port(port_id) < 0) {
		pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
		return -ENODEV;
	}

	index = afe_get_port_index(port_id);
	pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);

	if (this_adm.apr == NULL) {
		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
						0xFFFFFFFF, &this_adm);
		if (this_adm.apr == NULL) {
			pr_err("%s: Unable to register ADM\n", __func__);
			ret = -ENODEV;
			return ret;
		}
		rtac_set_adm_handle(this_adm.apr);
	}


	{
		open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
		open.hdr.pkt_size = sizeof(open);
		open.hdr.src_svc = APR_SVC_ADM;
		open.hdr.src_domain = APR_DOMAIN_APPS;
		open.hdr.src_port = port_id;
		open.hdr.dest_svc = APR_SVC_ADM;
		open.hdr.dest_domain = APR_DOMAIN_ADSP;
		open.hdr.dest_port = port_id;
		open.hdr.token = port_id;
		open.hdr.opcode = ADM_CMD_MULTI_CHANNEL_COPP_OPEN_V3;
		memset(open.dev_channel_mapping, 0, 8);

		if (channel_mode == 1)	{
			open.dev_channel_mapping[0] = PCM_CHANNEL_FC;
		} else if (channel_mode == 2) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
		} else if (channel_mode == 4) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
			open.dev_channel_mapping[2] = PCM_CHANNEL_LS;
			open.dev_channel_mapping[3] = PCM_CHANNEL_RS;
		} else if (channel_mode == 6) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FC;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[2] = PCM_CHANNEL_FR;
			open.dev_channel_mapping[3] = PCM_CHANNEL_LS;
			open.dev_channel_mapping[4] = PCM_CHANNEL_RS;
			open.dev_channel_mapping[5] = PCM_CHANNEL_LFE;
		} else {
			pr_err("%s invalid num_chan %d\n", __func__,
					channel_mode);
			return -EINVAL;
		}

		open.mode = path;
		open.endpoint_id1 = port_id;
		open.endpoint_id2 = 0xFFFF;
		open.bit_width = 16;

		if (path == ADM_PATH_PLAYBACK)
			open.topology_id = get_adm_rx_topology();
		else {
			open.topology_id = get_adm_tx_topology();
			if ((open.topology_id ==
				VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
				(open.topology_id ==
				VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
				rate = 16000;
		}

		if (open.topology_id  == 0)
			open.topology_id = topology;

		open.channel_config = channel_mode & 0x00FF;
		open.rate  = rate;
		open.flags = 0;

		pr_debug("%s: channel_config=%d port_id=%d rate=%d" \
			"topology_id=0x%X\n", __func__, open.channel_config,\
			open.endpoint_id1, open.rate,\
			open.topology_id);

		atomic_set(&this_adm.copp_stat[index], 0);
		ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
		if (ret < 0) {
			pr_err("%s:ADM enable for port %d failed\n",
						__func__, port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
		ret = wait_event_timeout(this_adm.wait,
			atomic_read(&this_adm.copp_stat[index]),
			msecs_to_jiffies(TIMEOUT_MS));
		if (!ret) {
			pr_err("%s ADM open failed for port %d\n", __func__,
								port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
	}
	atomic_inc(&this_adm.copp_cnt[index]);
	return 0;

fail_cmd:
	return ret;

}
Пример #18
0
int sock_cntr_open(struct fid_domain *domain, struct fi_cntr_attr *attr,
		   struct fid_cntr **cntr, void *context)
{
	int ret;
	struct sock_domain *dom;
	struct sock_cntr *_cntr;
	struct fi_wait_attr wait_attr;
	struct sock_fid_list *list_entry;
	struct sock_wait *wait;

	dom = container_of(domain, struct sock_domain, dom_fid);
	if (attr && sock_cntr_verify_attr(attr))
		return -FI_ENOSYS;

	_cntr = calloc(1, sizeof(*_cntr));
	if (!_cntr)
		return -FI_ENOMEM;

	ret = pthread_cond_init(&_cntr->cond, NULL);
	if (ret)
		goto err;

	if (attr == NULL)
		memcpy(&_cntr->attr, &sock_cntr_add, sizeof(sock_cntr_attr));
	else
		memcpy(&_cntr->attr, attr, sizeof(sock_cntr_attr));

	switch (_cntr->attr.wait_obj) {

	case FI_WAIT_NONE:
	case FI_WAIT_UNSPEC:
	case FI_WAIT_MUTEX_COND:
		_cntr->signal = 0;
		break;

	case FI_WAIT_FD:
		wait_attr.flags = 0;
		wait_attr.wait_obj = FI_WAIT_FD;
		ret = sock_wait_open(&dom->fab->fab_fid, &wait_attr,
				     &_cntr->waitset);
		if (ret) {
			ret = FI_EINVAL;
			goto err;
		}
		_cntr->signal = 1;
		break;

	case FI_WAIT_SET:
		if (!attr) {
			ret = FI_EINVAL;
			goto err;
		}

		_cntr->waitset = attr->wait_set;
		_cntr->signal = 1;
		wait = container_of(attr->wait_set, struct sock_wait, wait_fid);
		list_entry = calloc(1, sizeof(*list_entry));
		dlist_init(&list_entry->entry);
		list_entry->fid = &_cntr->cntr_fid.fid;
		dlist_insert_after(&list_entry->entry, &wait->fid_list);
		break;

	default:
		break;
	}

	pthread_mutex_init(&_cntr->mut, NULL);
	fastlock_init(&_cntr->list_lock);

	atomic_initialize(&_cntr->ref, 0);
	atomic_initialize(&_cntr->err_cnt, 0);

	atomic_initialize(&_cntr->value, 0);
	atomic_initialize(&_cntr->threshold, ~0);

	dlist_init(&_cntr->tx_list);
	dlist_init(&_cntr->rx_list);

	dlist_init(&_cntr->trigger_list);
	fastlock_init(&_cntr->trigger_lock);

	_cntr->cntr_fid.fid.fclass = FI_CLASS_CNTR;
	_cntr->cntr_fid.fid.context = context;
	_cntr->cntr_fid.fid.ops = &sock_cntr_fi_ops;
	_cntr->cntr_fid.ops = &sock_cntr_ops;

	atomic_inc(&dom->ref);
	_cntr->domain = dom;
	*cntr = &_cntr->cntr_fid;
	return 0;

err:
	free(_cntr);
	return -ret;
}
Пример #19
0
static int
sdp_post_recv(struct sdp_sock *ssk)
{
	struct sdp_buf *rx_req;
	int i, rc;
	u64 addr;
	struct ib_device *dev;
	struct ib_recv_wr rx_wr = { NULL };
	struct ib_sge ibsge[SDP_MAX_RECV_SGES];
	struct ib_sge *sge = ibsge;
	struct ib_recv_wr *bad_wr;
	struct mbuf *mb, *m;
	struct sdp_bsdh *h;
	int id = ring_head(ssk->rx_ring);

	/* Now, allocate and repost recv */
	sdp_prf(ssk->socket, mb, "Posting mb");
	mb = m_getm2(NULL, ssk->recv_bytes, M_NOWAIT, MT_DATA, M_PKTHDR);
	if (mb == NULL) {
		/* Retry so we can't stall out with no memory. */
		if (!rx_ring_posted(ssk))
			queue_work(rx_comp_wq, &ssk->rx_comp_work);
		return -1;
	}
	for (m = mb; m != NULL; m = m->m_next) {
		m->m_len = (m->m_flags & M_EXT) ? m->m_ext.ext_size :
                        ((m->m_flags & M_PKTHDR) ? MHLEN : MLEN);
		mb->m_pkthdr.len += m->m_len;
	}
	h = mtod(mb, struct sdp_bsdh *);
	rx_req = ssk->rx_ring.buffer + (id & (SDP_RX_SIZE - 1));
	rx_req->mb = mb;
	dev = ssk->ib_device;
        for (i = 0;  mb != NULL; i++, mb = mb->m_next, sge++) {
		addr = ib_dma_map_single(dev, mb->m_data, mb->m_len,
		    DMA_TO_DEVICE);
		/* TODO: proper error handling */
		BUG_ON(ib_dma_mapping_error(dev, addr));
		BUG_ON(i >= SDP_MAX_RECV_SGES);
		rx_req->mapping[i] = addr;
		sge->addr = addr;
		sge->length = mb->m_len;
		sge->lkey = ssk->sdp_dev->mr->lkey;
        }

	rx_wr.next = NULL;
	rx_wr.wr_id = id | SDP_OP_RECV;
	rx_wr.sg_list = ibsge;
	rx_wr.num_sge = i;
	rc = ib_post_recv(ssk->qp, &rx_wr, &bad_wr);
	if (unlikely(rc)) {
		sdp_warn(ssk->socket, "ib_post_recv failed. status %d\n", rc);

		sdp_cleanup_sdp_buf(ssk, rx_req, DMA_FROM_DEVICE);
		m_freem(mb);

		sdp_notify(ssk, ECONNRESET);

		return -1;
	}

	atomic_inc(&ssk->rx_ring.head);
	SDPSTATS_COUNTER_INC(post_recv);

	return 0;
}
Пример #20
0
void kvm_hv_vm_activated(void)
{
	get_online_cpus();
	atomic_inc(&hv_vm_count);
	put_online_cpus();
}
Пример #21
0
static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3])
{
	u8 ctlreg, reg;
	s16 x, y, z;
	u8 selftest;
	int ret;
	u8 ctrl_reg_data;
	unsigned char irq_cfg;

	mutex_lock(&lis3->mutex);

	irq_cfg = lis3->irq_cfg;
	if (lis3_dev.whoami == WAI_8B) {
		lis3->data_ready_count[IRQ_LINE0] = 0;
		lis3->data_ready_count[IRQ_LINE1] = 0;

		/* Change interrupt cfg to data ready for selftest */
		atomic_inc(&lis3_dev.wake_thread);
		lis3->irq_cfg = LIS3_IRQ1_DATA_READY | LIS3_IRQ2_DATA_READY;
		lis3->read(lis3, CTRL_REG3, &ctrl_reg_data);
		lis3->write(lis3, CTRL_REG3, (ctrl_reg_data &
				~(LIS3_IRQ1_MASK | LIS3_IRQ2_MASK)) |
				(LIS3_IRQ1_DATA_READY | LIS3_IRQ2_DATA_READY));
	}

	if (lis3_dev.whoami == WAI_3DC) {
		ctlreg = CTRL_REG4;
		selftest = CTRL4_ST0;
	} else {
		ctlreg = CTRL_REG1;
		if (lis3_dev.whoami == WAI_12B)
			selftest = CTRL1_ST;
		else
			selftest = CTRL1_STP;
	}

	lis3->read(lis3, ctlreg, &reg);
	lis3->write(lis3, ctlreg, (reg | selftest));
	msleep(lis3->pwron_delay / lis3lv02d_get_odr());

	/* Read directly to avoid axis remap */
	x = lis3->read_data(lis3, OUTX);
	y = lis3->read_data(lis3, OUTY);
	z = lis3->read_data(lis3, OUTZ);

	/* back to normal settings */
	lis3->write(lis3, ctlreg, reg);
	msleep(lis3->pwron_delay / lis3lv02d_get_odr());

	results[0] = x - lis3->read_data(lis3, OUTX);
	results[1] = y - lis3->read_data(lis3, OUTY);
	results[2] = z - lis3->read_data(lis3, OUTZ);

	ret = 0;

	if (lis3_dev.whoami == WAI_8B) {
		/* Restore original interrupt configuration */
		atomic_dec(&lis3_dev.wake_thread);
		lis3->write(lis3, CTRL_REG3, ctrl_reg_data);
		lis3->irq_cfg = irq_cfg;

		if ((irq_cfg & LIS3_IRQ1_MASK) &&
			lis3->data_ready_count[IRQ_LINE0] < 2) {
			ret = SELFTEST_IRQ;
			goto fail;
		}

		if ((irq_cfg & LIS3_IRQ2_MASK) &&
			lis3->data_ready_count[IRQ_LINE1] < 2) {
			ret = SELFTEST_IRQ;
			goto fail;
		}
	}

	if (lis3->pdata) {
		int i;
		for (i = 0; i < 3; i++) {
			/* Check against selftest acceptance limits */
			if ((results[i] < lis3->pdata->st_min_limits[i]) ||
			    (results[i] > lis3->pdata->st_max_limits[i])) {
				ret = SELFTEST_FAIL;
				goto fail;
			}
		}
	}

	/* test passed */
fail:
	mutex_unlock(&lis3->mutex);
	return ret;
}
Пример #22
0
int fimg2d_add_command(struct fimg2d_control *info, struct fimg2d_context *ctx,
			struct fimg2d_blit *blit)
{
	int i, ret;
	struct fimg2d_image *buf[MAX_IMAGES] = image_table(blit);
	struct fimg2d_bltcmd *cmd;
	struct fimg2d_image dst;

	if (blit->dst)
		if (copy_from_user(&dst, (void *)blit->dst, sizeof(dst)))
			return -EFAULT;

	if ((blit->dst) && (dst.addr.type == ADDR_USER))
		up_write(&page_alloc_slow_rwsem);
	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
	if ((blit->dst) && (dst.addr.type == ADDR_USER))
		down_write(&page_alloc_slow_rwsem);

	if (!cmd)
		return -ENOMEM;

	for (i = 0; i < MAX_IMAGES; i++) {
		if (!buf[i])
			continue;

		if (copy_from_user(&cmd->image[i], buf[i],
					sizeof(struct fimg2d_image))) {
			ret = -EFAULT;
			goto err_user;
		}
	}

	cmd->ctx = ctx;
	cmd->op = blit->op;
	cmd->sync = blit->sync;
	cmd->seq_no = blit->seq_no;
	memcpy(&cmd->param, &blit->param, sizeof(cmd->param));

#ifdef CONFIG_VIDEO_FIMG2D_DEBUG
	fimg2d_dump_command(cmd);
#endif

	if (fimg2d_check_params(cmd)) {
		printk(KERN_ERR "[%s] invalid params\n", __func__);
		fimg2d_dump_command(cmd);
		ret = -EINVAL;
		goto err_user;
	}

	fimg2d_fixup_params(cmd);

	if (fimg2d_check_dma_sync(cmd)) {
		ret = -EFAULT;
		goto err_user;
	}

	/* add command node and increase ncmd */
	spin_lock(&info->bltlock);
	if (atomic_read(&info->suspended)) {
		fimg2d_debug("fimg2d suspended, do sw fallback\n");
		spin_unlock(&info->bltlock);
		ret = -EFAULT;
		goto err_user;
	}
	atomic_inc(&ctx->ncmd);
	fimg2d_enqueue(&cmd->node, &info->cmd_q);
	fimg2d_debug("ctx %p pgd %p ncmd(%d) seq_no(%u)\n",
			cmd->ctx, (unsigned long *)cmd->ctx->mm->pgd,
			atomic_read(&ctx->ncmd), cmd->seq_no);
	spin_unlock(&info->bltlock);

	return 0;

err_user:
	kfree(cmd);
	return ret;
}
Пример #23
0
static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
{
	struct files_struct *oldf, *newf;
	struct file **old_fds, **new_fds;
	int open_files, size, i, error = 0, expand;

	/*
	 * A background process may not have any files ...
	 */
	oldf = current->files;
	if (!oldf)
		goto out;

	if (clone_flags & CLONE_FILES) {
		atomic_inc(&oldf->count);
		goto out;
	}

	/*
	 * Note: we may be using current for both targets (See exec.c)
	 * This works because we cache current->files (old) as oldf. Don't
	 * break this.
	 */
	tsk->files = NULL;
	error = -ENOMEM;
	newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL);
	if (!newf) 
		goto out;

	atomic_set(&newf->count, 1);

	spin_lock_init(&newf->file_lock);
	newf->next_fd	    = 0;
	newf->max_fds	    = NR_OPEN_DEFAULT;
	newf->max_fdset	    = __FD_SETSIZE;
	newf->close_on_exec = &newf->close_on_exec_init;
	newf->open_fds	    = &newf->open_fds_init;
	newf->fd	    = &newf->fd_array[0];

	spin_lock(&oldf->file_lock);

	open_files = count_open_files(oldf, oldf->max_fdset);
	expand = 0;

	/*
	 * Check whether we need to allocate a larger fd array or fd set.
	 * Note: we're not a clone task, so the open count won't  change.
	 */
	if (open_files > newf->max_fdset) {
		newf->max_fdset = 0;
		expand = 1;
	}
	if (open_files > newf->max_fds) {
		newf->max_fds = 0;
		expand = 1;
	}

	/* if the old fdset gets grown now, we'll only copy up to "size" fds */
	if (expand) {
		spin_unlock(&oldf->file_lock);
		spin_lock(&newf->file_lock);
		error = expand_files(newf, open_files-1);
		spin_unlock(&newf->file_lock);
		if (error < 0)
			goto out_release;
		spin_lock(&oldf->file_lock);
	}

	old_fds = oldf->fd;
	new_fds = newf->fd;

	memcpy(newf->open_fds->fds_bits, oldf->open_fds->fds_bits, open_files/8);
	memcpy(newf->close_on_exec->fds_bits, oldf->close_on_exec->fds_bits, open_files/8);

	for (i = open_files; i != 0; i--) {
		struct file *f = *old_fds++;
		if (f) {
			get_file(f);
		} else {
			/*
			 * The fd may be claimed in the fd bitmap but not yet
			 * instantiated in the files array if a sibling thread
			 * is partway through open().  So make sure that this
			 * fd is available to the new process.
			 */
			FD_CLR(open_files - i, newf->open_fds);
		}
		*new_fds++ = f;
	}
	spin_unlock(&oldf->file_lock);

	/* compute the remainder to be cleared */
	size = (newf->max_fds - open_files) * sizeof(struct file *);

	/* This is long word aligned thus could use a optimized version */ 
	memset(new_fds, 0, size); 

	if (newf->max_fdset > open_files) {
		int left = (newf->max_fdset-open_files)/8;
		int start = open_files / (8 * sizeof(unsigned long));

		memset(&newf->open_fds->fds_bits[start], 0, left);
		memset(&newf->close_on_exec->fds_bits[start], 0, left);
	}

	tsk->files = newf;
	error = 0;
out:
	return error;

out_release:
	free_fdset (newf->close_on_exec, newf->max_fdset);
	free_fdset (newf->open_fds, newf->max_fdset);
	free_fd_array(newf->fd, newf->max_fds);
	kmem_cache_free(files_cachep, newf);
	goto out;
}
Пример #24
0
static int fi_ibv_rdm_ep_bind(struct fid *fid, struct fid *bfid, uint64_t flags)
{
	struct fi_ibv_rdm_ep *ep;
	struct fi_ibv_rdm_cq *cq;
	struct fi_ibv_av *av;
	struct fi_ibv_rdm_cntr *cntr;
	int ret;

	ep = container_of(fid, struct fi_ibv_rdm_ep, ep_fid.fid);
	ret = ofi_ep_bind_valid(&fi_ibv_prov, bfid, flags);
	if (ret)
		return ret;

	switch (bfid->fclass) {
	case FI_CLASS_CQ:
		cq = container_of(bfid, struct fi_ibv_rdm_cq, cq_fid);
		if (ep->domain != cq->domain) {
			return -FI_EINVAL;
		}

		if (flags & FI_RECV) {
			if (ep->fi_rcq)
				return -EINVAL;
			ep->fi_rcq = cq;
			ep->rx_selective_completion = 
				(flags & FI_SELECTIVE_COMPLETION) ? 1 : 0;
		}

		if (flags & FI_SEND) {
			if (ep->fi_scq)
				return -EINVAL;
			ep->fi_scq = cq;
			ep->tx_selective_completion = 
				(flags & FI_SELECTIVE_COMPLETION) ? 1 : 0;
		}

		/* TODO: this is wrong. CQ to EP is 1:n */
		cq->ep = ep;
		break;
	case FI_CLASS_AV:
		av = container_of(bfid, struct fi_ibv_av, av_fid.fid);
		if (ep->domain != av->domain) {
			return -FI_EINVAL;
		}

		ep->av = av;

		/* TODO: this is wrong, AV to EP is 1:n */
		ep->av->ep = ep;
		break;
	case FI_CLASS_CNTR:
		cntr = container_of(bfid, struct fi_ibv_rdm_cntr, fid.fid);
		if (ep->domain != cntr->domain) {
			return -FI_EINVAL;
		}

		if ((flags & FI_REMOTE_READ) || (flags & FI_REMOTE_WRITE)) {
			return -FI_ENOSYS;
		}

		if (flags & FI_SEND) {
			ep->send_cntr = cntr;
			atomic_inc(&ep->send_cntr->ep_ref);
		}
		if (flags & FI_RECV) {
			ep->recv_cntr = cntr;
			atomic_inc(&ep->recv_cntr->ep_ref);
		}
		if (flags & FI_READ) {
			ep->read_cntr = cntr;
			atomic_inc(&ep->read_cntr->ep_ref);
		}
		if (flags & FI_WRITE) {
			ep->write_cntr = cntr;
			atomic_inc(&ep->write_cntr->ep_ref);
		}

		break;
	default:
		return -EINVAL;
	}

	return 0;
}
Пример #25
0
static inline struct tty_ldisc *get_ldisc(struct tty_ldisc *ld)
{
	if (ld)
		atomic_inc(&ld->users);
	return ld;
}
Пример #26
0
static void unix_fill_in_inode(struct inode *tmp_inode,
	FILE_UNIX_INFO *pfindData, unsigned int *pobject_type, int isNewInode)
{
	loff_t local_size;
	struct timespec local_mtime;

	struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
	struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb);

	__u32 type = le32_to_cpu(pfindData->Type);
	__u64 num_of_bytes = le64_to_cpu(pfindData->NumOfBytes);
	__u64 end_of_file = le64_to_cpu(pfindData->EndOfFile);
	cifsInfo->time = jiffies;
	atomic_inc(&cifsInfo->inUse);

	/* save mtime and size */
	local_mtime = tmp_inode->i_mtime;
	local_size  = tmp_inode->i_size;

	tmp_inode->i_atime =
	    cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime));
	tmp_inode->i_mtime =
	    cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastModificationTime));
	tmp_inode->i_ctime =
	    cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastStatusChange));

	tmp_inode->i_mode = le64_to_cpu(pfindData->Permissions);
	/* since we set the inode type below we need to mask off type
	   to avoid strange results if bits above were corrupt */
	tmp_inode->i_mode &= ~S_IFMT;
	if (type == UNIX_FILE) {
		*pobject_type = DT_REG;
		tmp_inode->i_mode |= S_IFREG;
	} else if (type == UNIX_SYMLINK) {
		*pobject_type = DT_LNK;
		tmp_inode->i_mode |= S_IFLNK;
	} else if (type == UNIX_DIR) {
		*pobject_type = DT_DIR;
		tmp_inode->i_mode |= S_IFDIR;
	} else if (type == UNIX_CHARDEV) {
		*pobject_type = DT_CHR;
		tmp_inode->i_mode |= S_IFCHR;
		tmp_inode->i_rdev = MKDEV(le64_to_cpu(pfindData->DevMajor),
				le64_to_cpu(pfindData->DevMinor) & MINORMASK);
	} else if (type == UNIX_BLOCKDEV) {
		*pobject_type = DT_BLK;
		tmp_inode->i_mode |= S_IFBLK;
		tmp_inode->i_rdev = MKDEV(le64_to_cpu(pfindData->DevMajor),
				le64_to_cpu(pfindData->DevMinor) & MINORMASK);
	} else if (type == UNIX_FIFO) {
		*pobject_type = DT_FIFO;
		tmp_inode->i_mode |= S_IFIFO;
	} else if (type == UNIX_SOCKET) {
		*pobject_type = DT_SOCK;
		tmp_inode->i_mode |= S_IFSOCK;
	} else {
		/* safest to just call it a file */
		*pobject_type = DT_REG;
		tmp_inode->i_mode |= S_IFREG;
		cFYI(1, ("unknown inode type %d", type));
	}

	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
		tmp_inode->i_uid = cifs_sb->mnt_uid;
	else
		tmp_inode->i_uid = le64_to_cpu(pfindData->Uid);
	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
		tmp_inode->i_gid = cifs_sb->mnt_gid;
	else
		tmp_inode->i_gid = le64_to_cpu(pfindData->Gid);
	tmp_inode->i_nlink = le64_to_cpu(pfindData->Nlinks);

	spin_lock(&tmp_inode->i_lock);
	if (is_size_safe_to_change(cifsInfo, end_of_file)) {
		/* can not safely change the file size here if the
		client is writing to it due to potential races */
		i_size_write(tmp_inode, end_of_file);

	/* 512 bytes (2**9) is the fake blocksize that must be used */
	/* for this calculation, not the real blocksize */
		tmp_inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
	}
	spin_unlock(&tmp_inode->i_lock);

	if (S_ISREG(tmp_inode->i_mode)) {
		cFYI(1, ("File inode"));
		tmp_inode->i_op = &cifs_file_inode_ops;

		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
			if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
				tmp_inode->i_fop = &cifs_file_direct_nobrl_ops;
			else
				tmp_inode->i_fop = &cifs_file_direct_ops;
		} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
			tmp_inode->i_fop = &cifs_file_nobrl_ops;
		else
			tmp_inode->i_fop = &cifs_file_ops;

		if ((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
		   (cifs_sb->tcon->ses->server->maxBuf <
			PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE))
			tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
		else
			tmp_inode->i_data.a_ops = &cifs_addr_ops;

		if (isNewInode)
			return; /* No sense invalidating pages for new inode
				   since we have not started caching readahead
				   file data for it yet */

		if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
			(local_size == tmp_inode->i_size)) {
			cFYI(1, ("inode exists but unchanged"));
		} else {
			/* file may have changed on server */
			cFYI(1, ("invalidate inode, readdir detected change"));
			invalidate_remote_inode(tmp_inode);
		}
	} else if (S_ISDIR(tmp_inode->i_mode)) {
		cFYI(1, ("Directory inode"));
		tmp_inode->i_op = &cifs_dir_inode_ops;
		tmp_inode->i_fop = &cifs_dir_ops;
	} else if (S_ISLNK(tmp_inode->i_mode)) {
		cFYI(1, ("Symbolic Link inode"));
		tmp_inode->i_op = &cifs_symlink_inode_ops;
/* tmp_inode->i_fop = *//* do not need to set to anything */
	} else {
		cFYI(1, ("Special inode"));
		init_special_inode(tmp_inode, tmp_inode->i_mode,
				   tmp_inode->i_rdev);
	}
}
void xenvif_get(struct xenvif *vif)
{
	atomic_inc(&vif->refcnt);
}
Пример #28
0
/*
 * 'what should we do if we get a hw irq event on an illegal vector'.
 * each architecture has to answer this themselves, it doesn't deserve
 * a generic callback i think.
 */
void ack_bad_irq(unsigned int irq)
{
	atomic_inc(&irq_err_count);
	printk("unexpected IRQ trap at vector %02x\n", irq);
}
Пример #29
0
void SharedStoreStats::updateDirect(int32 dataTotalOld, int32 dataTotalNew) {
  atomic_add(s_dataTotalSize, 0 - (int64)dataTotalOld);
  atomic_add(s_dataTotalSize, (int64)dataTotalNew);
  atomic_inc(s_updateCount);
}
Пример #30
0
/* Loopback test RX callback
 * This is called for each received packet during loopback testing.
 */
void efx_loopback_rx_packet(struct efx_nic *efx,
                            const char *buf_ptr, int pkt_len)
{
    struct efx_loopback_state *state = efx->loopback_selftest;
    struct efx_loopback_payload *received;
    struct efx_loopback_payload *payload;

    BUG_ON(!buf_ptr);

    /* If we are just flushing, then drop the packet */
    if ((state == NULL) || state->flush)
        return;

    payload = &state->payload;

    received = (struct efx_loopback_payload *) buf_ptr;
    received->ip.saddr = payload->ip.saddr;
    if (state->offload_csum)
        received->ip.check = payload->ip.check;

    /* Check that header exists */
    if (pkt_len < sizeof(received->header)) {
        EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback "
                "test\n", pkt_len, LOOPBACK_MODE(efx));
        goto err;
    }

    /* Check that the ethernet header exists */
    if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
        EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n",
                LOOPBACK_MODE(efx));
        goto err;
    }

    /* Check packet length */
    if (pkt_len != sizeof(*payload)) {
        EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in "
                "%s loopback test\n", pkt_len, (int)sizeof(*payload),
                LOOPBACK_MODE(efx));
        goto err;
    }

    /* Check that IP header matches */
    if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
        EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n",
                LOOPBACK_MODE(efx));
        goto err;
    }

    /* Check that msg and padding matches */
    if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
        EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n",
                LOOPBACK_MODE(efx));
        goto err;
    }

    /* Check that iteration matches */
    if (received->iteration != payload->iteration) {
        EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in "
                "%s loopback test\n", ntohs(received->iteration),
                ntohs(payload->iteration), LOOPBACK_MODE(efx));
        goto err;
    }

    /* Increase correct RX count */
    EFX_TRACE(efx, "got loopback RX in %s loopback test\n",
              LOOPBACK_MODE(efx));

    atomic_inc(&state->rx_good);
    return;

err:
#ifdef EFX_ENABLE_DEBUG
    if (atomic_read(&state->rx_bad) == 0) {
        EFX_ERR(efx, "received packet:\n");
        print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
                       buf_ptr, pkt_len, 0);
        EFX_ERR(efx, "expected packet:\n");
        print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
                       &state->payload, sizeof(state->payload), 0);
    }
#endif
    atomic_inc(&state->rx_bad);
}