Esempio n. 1
0
int s3c_mem_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
#ifdef USE_DMA_ALLOC
	unsigned long virt_addr;
#else
	unsigned long *virt_addr;
#endif

	struct mm_struct *mm = current->mm;
	struct s3c_mem_alloc param;
	struct s3c_mem_dma_param dma_param;

	switch (cmd) {
		case S3C_MEM_ALLOC:
			mutex_lock(&mem_alloc_lock);
			if(copy_from_user(&param, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_alloc_lock);
				return -EFAULT;
			}
			flag = MEM_ALLOC;
			param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
			DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__);
			if(param.vir_addr == -EINVAL) {
				printk("S3C_MEM_ALLOC FAILED\n");
				flag = 0;
				mutex_unlock(&mem_alloc_lock);
				return -EFAULT;
			}
			param.phy_addr = physical_address;
#ifdef USE_DMA_ALLOC
			param.kvir_addr = virtual_address;
#endif

			DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__);

			if(copy_to_user((struct s3c_mem_alloc *)arg, &param, sizeof(struct s3c_mem_alloc))){
				flag = 0;
				mutex_unlock(&mem_alloc_lock);
				return -EFAULT;
			}
			flag = 0;
			mutex_unlock(&mem_alloc_lock);

			break;

		case S3C_MEM_CACHEABLE_ALLOC:
			mutex_lock(&mem_cacheable_alloc_lock);
			if(copy_from_user(&param, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_cacheable_alloc_lock);
				return -EFAULT;
			}
			flag = MEM_ALLOC_CACHEABLE;
			param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
			DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__);
			if(param.vir_addr == -EINVAL) {
				printk("S3C_MEM_ALLOC FAILED\n");
				flag = 0;
				mutex_unlock(&mem_cacheable_alloc_lock);
				return -EFAULT;
			}
			param.phy_addr = physical_address;
			DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__);

			if(copy_to_user((struct s3c_mem_alloc *)arg, &param, sizeof(struct s3c_mem_alloc))){
				flag = 0;
				mutex_unlock(&mem_cacheable_alloc_lock);
				return -EFAULT;
			}
			flag = 0;
			mutex_unlock(&mem_cacheable_alloc_lock);

			break;

		case S3C_MEM_SHARE_ALLOC:
			mutex_lock(&mem_share_alloc_lock);
			if(copy_from_user(&param, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_share_alloc_lock);
				return -EFAULT;
			}
			flag = MEM_ALLOC_SHARE;
			physical_address = param.phy_addr;
			DEBUG("param.phy_addr = %08x, %d\n", physical_address, __LINE__);
			param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
			DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__);
			if(param.vir_addr == -EINVAL) {
				printk("S3C_MEM_SHARE_ALLOC FAILED\n");
				flag = 0;
				mutex_unlock(&mem_share_alloc_lock);
				return -EFAULT;
			}
			DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__);

			if(copy_to_user((struct s3c_mem_alloc *)arg, &param, sizeof(struct s3c_mem_alloc))){
				flag = 0;
				mutex_unlock(&mem_share_alloc_lock);
				return -EFAULT;
			}
			flag = 0;
			mutex_unlock(&mem_share_alloc_lock);

			break;

		case S3C_MEM_CACHEABLE_SHARE_ALLOC:
			mutex_lock(&mem_cacheable_share_alloc_lock);
			if(copy_from_user(&param, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_cacheable_share_alloc_lock);
				return -EFAULT;
			}
			flag = MEM_ALLOC_CACHEABLE_SHARE;
			physical_address = param.phy_addr;
			DEBUG("param.phy_addr = %08x, %d\n", physical_address, __LINE__);
			param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
			DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__);
			if(param.vir_addr == -EINVAL) {
				printk("S3C_MEM_SHARE_ALLOC FAILED\n");
			flag = 0;
				mutex_unlock(&mem_cacheable_share_alloc_lock);
				return -EFAULT;
			}
			DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__);

			if(copy_to_user((struct s3c_mem_alloc *)arg, &param, sizeof(struct s3c_mem_alloc))){
				flag = 0;
				mutex_unlock(&mem_cacheable_share_alloc_lock);
				return -EFAULT;
			}
			flag = 0;
			mutex_unlock(&mem_cacheable_share_alloc_lock);

			break;

		case S3C_MEM_FREE:
			mutex_lock(&mem_free_lock);
			if(copy_from_user(&param, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_free_lock);
				return -EFAULT;
			}

			DEBUG("KERNEL FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__);

			if (do_munmap(mm, param.vir_addr, param.size) < 0) {
				printk("do_munmap() failed !!\n");
				mutex_unlock(&mem_free_lock);
				return -EINVAL;
			}

#ifdef USE_DMA_ALLOC
			virt_addr = param.kvir_addr;
			dma_free_writecombine(NULL, param.size, (unsigned int *) virt_addr, param.phy_addr);
#else
			virt_addr = (unsigned long *)phys_to_virt(param.phy_addr);
			kfree(virt_addr);
#endif
			param.size = 0;
			DEBUG("do_munmap() succeed !!\n");

			if(copy_to_user((struct s3c_mem_alloc *)arg, &param, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_free_lock);
				return -EFAULT;
			}

			mutex_unlock(&mem_free_lock);

			break;

		case S3C_MEM_SHARE_FREE:
			mutex_lock(&mem_share_free_lock);
			if(copy_from_user(&param, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_share_free_lock);
				return -EFAULT;
			}

			DEBUG("MEM_SHARE_FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__);

			if (do_munmap(mm, param.vir_addr, param.size) < 0) {
				printk("do_munmap() failed - MEM_SHARE_FREE!!\n");
				mutex_unlock(&mem_share_free_lock);
				return -EINVAL;
			}

			param.vir_addr = 0;
			DEBUG("do_munmap() succeed !! - MEM_SHARE_FREE\n");

			if(copy_to_user((struct s3c_mem_alloc *)arg, &param, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_share_free_lock);
				return -EFAULT;
			}

			mutex_unlock(&mem_share_free_lock);

			break;


		case S3C_MEM_DMA_COPY:
			if(copy_from_user(&dma_param, (struct s3c_mem_dma_param *)arg, sizeof(struct s3c_mem_dma_param))) {
				return -EFAULT;
			}
			//printk("S3C_MEM_DMA_COPY called\n");

			if (s3c2410_dma_request(DMACH_3D_M2M, &s3c_m2m_dma_client, NULL)) {
				printk(KERN_WARNING "Unable to get DMA channel.\n");
				return -1;
			}

			s3c2410_dma_set_buffdone_fn(DMACH_3D_M2M, s3c_m2m_dma_finish);

			//dma_cache_maint(dma_param.src_addr,sizeof(unsigned long long), DMA_BIDIRECTIONAL);

 //  		    	printk("MEMCPY src=%p,dst=%p,size=%d\n", dma_param.src_addr,dma_param.dst_addr, dma_param.size);

			/* Source address */
			s3c2410_dma_devconfig(DMACH_3D_M2M, S3C_DMA_MEM2MEM, 1, dma_param.src_addr);
			s3c2410_dma_config(DMACH_3D_M2M, 8, 0);

			/* Destination address : Data buffer address */
			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, dma_param.dst_addr, dma_param.size);
			s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START);

			wait_for_completion(&s3c_m2m_dma_complete);
#if 0
			/* Destination address : Data buffer address */
			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000, 0x4000);
			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x10000, 0x4000);
			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x20000, 0x4000);
			s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START);

			wait_for_completion(&s3c_m2m_dma_complete);
			//wait_for_completion(&s3c_m2m_dma_complete);
			//wait_for_completion(&s3c_m2m_dma_complete);

			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x30000, 0x4000);
			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x40000, 0x4000);
			s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START);
			wait_for_completion(&s3c_m2m_dma_complete);
			//wait_for_completion(&s3c_m2m_dma_complete);

			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x50000, 0x4000);
			s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START);
			wait_for_completion(&s3c_m2m_dma_complete);
#endif

			s3c2410_dma_free(DMACH_3D_M2M, &s3c_m2m_dma_client);

			if(copy_to_user((struct s3c_mem_dma_param *)arg, &dma_param, sizeof(struct s3c_mem_dma_param))) {
				return -EFAULT;
			}

			break;

		case S3C_MEM_DMA_SET:
			if(copy_from_user(&dma_param, (struct s3c_mem_dma_param *)arg, sizeof(struct s3c_mem_dma_param))) {
				return -EFAULT;
			}

			if (s3c2410_dma_request(DMACH_3D_M2M, &s3c_m2m_dma_client, NULL)) {
				printk(KERN_WARNING "Unable to get DMA channel.\n");
				return -1;
			}

			s3c2410_dma_set_buffdone_fn(DMACH_3D_M2M, s3c_m2m_dma_finish);

			//dma_cache_maint(dma_param.src_addr,sizeof(unsigned long long), DMA_BIDIRECTIONAL);

//   		    	printk("MEMSET src=%p,dst=%p,size=%d\n", dma_param.src_addr,dma_param.dst_addr, dma_param.size);

			/* Source address */
			s3c2410_dma_devconfig(DMACH_3D_M2M, S3C_DMA_MEM2MEM_SET, 1,dma_param.src_addr); 
			s3c2410_dma_config(DMACH_3D_M2M, 8, 0);
			
			/* Destination address : Data buffer address */
			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, dma_param.dst_addr, dma_param.size);
			s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START);

			wait_for_completion(&s3c_m2m_dma_complete);

			s3c2410_dma_free(DMACH_3D_M2M, &s3c_m2m_dma_client);

			if(copy_to_user((struct s3c_mem_dma_param *)arg, &dma_param, sizeof(struct s3c_mem_dma_param))) {
				return -EFAULT;
			}
			break;

		default:
			DEBUG("s3c_mem_ioctl() : default !!\n");
			return -EINVAL;
	}

	return 0;
}
static int32_t vqec_reader_start (test_vqec_reader_params_t *p)
{
    struct completion wait_reader_spawned;
    struct in_addr src, dst;
    uint16_t port;
    int tunerid;
    uint32_t use_iobufs;
    uint32_t use_iobuf_size;
    int32_t use_timeout;
    struct socket *sk;
    start_params_t params;

    if (!p) {
        return (0);
    }

    if (p->use_module_params) {
        /* use the values from the module params */
        src.s_addr = in_aton(src_ipaddr);
        dst.s_addr = in_aton(dest_ipaddr);
        if (!src.s_addr || !dst.s_addr) {
            printk("test_reader: failed to parse src/dest addresses\n");
            return (0);
        }
        port = dest_port;
        tunerid = cp_tuner_id;
        use_iobufs = iobufs;
        use_iobuf_size = iobuf_size;
        use_timeout = timeout;
    } else {
        /* use the values from argument params */
        src.s_addr = p->srcaddr;
        dst.s_addr = p->dstaddr;
        port = p->dstport;
        tunerid = p->tid;
        use_iobufs = p->iobufs;
        use_iobuf_size = p->iobuf_size;
        use_timeout = p->timeout;
        if (!port) {
            /* use default port if none specified */
            port = dest_port;
        }
    }

    if (!src.s_addr || !dst.s_addr) {
        sk = NULL;
    } else {
        sk = udp_tx_sock_create(src, htons(port), dst, 0);
        if (!sk) {
            printk("test_reader: failed to create socket\n");
            return (0);
        }
    }

    if (s_reader_task_running) {
        return (-1);
    }
    s_reader_task_running = TRUE;

    params.sk = sk;
    params.cp_tuner_id = tunerid;
    params.completion = &wait_reader_spawned;
    params.iobufs = use_iobufs;
    params.iobuf_size = use_iobuf_size;
    params.timeout = use_timeout;
    init_completion(&wait_reader_spawned);
    s_reader_task = kernel_thread(vqec_reader_loop, 
                                  &params, 
                                  0);
    if (s_reader_task >= 0) {
        wait_for_completion(&wait_reader_spawned);
    }

    return (0);
}
Esempio n. 3
0
static void tsc210x_wait_data(struct tsc210x_dev *dev)
{
	wait_for_completion(&dev->data_avail);

	tsc210x_data_report(dev);
}
Esempio n. 4
0
/*
 *  Ok, this is the main fork-routine.
 *
 * It copies the process, and if successful kick-starts
 * it and waits for it to finish using the VM if required.
 */
long do_fork(unsigned long clone_flags,
	      unsigned long stack_start,
	      struct pt_regs *regs,
	      unsigned long stack_size,
	      int __user *parent_tidptr,
	      int __user *child_tidptr)
{
	struct task_struct *p;
	int trace = 0;
	long nr;

	/*
	 * Do some preliminary argument and permissions checking before we
	 * actually start allocating stuff
	 */
	if (clone_flags & CLONE_NEWUSER) {
		if (clone_flags & CLONE_THREAD)
			return -EINVAL;
		/* hopefully this check will go away when userns support is
		 * complete
		 */
		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
				!capable(CAP_SETGID))
			return -EPERM;
	}

	/*
	 * When called from kernel_thread, don't do user tracing stuff.
	 */
	if (likely(user_mode(regs)))
		trace = tracehook_prepare_clone(clone_flags);

	p = copy_process(clone_flags, stack_start, regs, stack_size,
			 child_tidptr, NULL, trace);
	/*
	 * Do this prior waking up the new thread - the thread pointer
	 * might get invalid after that point, if the thread exits quickly.
	 */
	if (!IS_ERR(p)) {
		struct completion vfork;

		trace_sched_process_fork(current, p);
		atomic_notifier_call_chain(&task_fork_notifier, 0, p);

		nr = task_pid_vnr(p);

		if (clone_flags & CLONE_PARENT_SETTID)
			put_user(nr, parent_tidptr);

		if (clone_flags & CLONE_VFORK) {
			p->vfork_done = &vfork;
			init_completion(&vfork);
		}

		audit_finish_fork(p);
		tracehook_report_clone(regs, clone_flags, nr, p);

		/*
		 * We set PF_STARTING at creation in case tracing wants to
		 * use this to distinguish a fully live task from one that
		 * hasn't gotten to tracehook_report_clone() yet.  Now we
		 * clear it and set the child going.
		 */
		p->flags &= ~PF_STARTING;

		wake_up_new_task(p);

		tracehook_report_clone_complete(trace, regs,
						clone_flags, nr, p);

		if (clone_flags & CLONE_VFORK) {
			freezer_do_not_count();
			wait_for_completion(&vfork);
			freezer_count();
			tracehook_report_vfork_done(p, nr);
		}
	} else {
		nr = PTR_ERR(p);
	}
	return nr;
}
Esempio n. 5
0
static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
{
	struct pm8001_ioctl_payload	*payload;
	DECLARE_COMPLETION_ONSTACK(completion);
	u8		*ioctlbuffer = NULL;
	u32		length = 0;
	struct fw_control_info	*fwControl;
	u32		loopNumber, loopcount = 0;
	u32		sizeRead = 0;
	u32		partitionSize, partitionSizeTmp;
	u32		ret = 0;
	u32		partitionNumber = 0;
	struct pm8001_fw_image_header *image_hdr;

	length = 1024 * 16 + sizeof(*payload) - 1;
	ioctlbuffer = kzalloc(length, GFP_KERNEL);
	image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data;
	if (!ioctlbuffer)
		return -ENOMEM;
	if (pm8001_ha->fw_image->size < 28) {
		ret = FAIL_FILE_SIZE;
		goto out;
	}

	while (sizeRead < pm8001_ha->fw_image->size) {
		partitionSizeTmp =
			*(u32 *)((u8 *)&image_hdr->image_length + sizeRead);
		partitionSize = be32_to_cpu(partitionSizeTmp);
		loopcount = (partitionSize + HEADER_LEN)/IOCTL_BUF_SIZE;
		if (loopcount % IOCTL_BUF_SIZE)
			loopcount++;
		if (loopcount == 0)
			loopcount++;
		for (loopNumber = 0; loopNumber < loopcount; loopNumber++) {
			payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
			payload->length = 1024*16;
			payload->id = 0;
			fwControl =
			      (struct fw_control_info *)payload->func_specific;
			fwControl->len = IOCTL_BUF_SIZE;   /* IN */
			fwControl->size = partitionSize + HEADER_LEN;/* IN */
			fwControl->retcode = 0;/* OUT */
			fwControl->offset = loopNumber * IOCTL_BUF_SIZE;/*OUT */

		/* for the last chunk of data in case file size is not even with
		4k, load only the rest*/
		if (((loopcount-loopNumber) == 1) &&
			((partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE)) {
			fwControl->len =
				(partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
			memcpy((u8 *)fwControl->buffer,
				(u8 *)pm8001_ha->fw_image->data + sizeRead,
				(partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE);
			sizeRead +=
				(partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
		} else {
			memcpy((u8 *)fwControl->buffer,
				(u8 *)pm8001_ha->fw_image->data + sizeRead,
				IOCTL_BUF_SIZE);
			sizeRead += IOCTL_BUF_SIZE;
		}

		pm8001_ha->nvmd_completion = &completion;
		ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload);
		wait_for_completion(&completion);
		if (ret || (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS)) {
			ret = fwControl->retcode;
			kfree(ioctlbuffer);
			ioctlbuffer = NULL;
			break;
		}
	}
	if (ret)
		break;
	partitionNumber++;
}
out:
	kfree(ioctlbuffer);
	return ret;
}
static int usb_send(struct link_device *ld, struct io_device *iod,
			struct sk_buff *skb)
{
	struct sk_buff_head *txq;
	size_t tx_size;
	struct usb_link_device *usb_ld = to_usb_link_device(ld);
	struct link_pm_data *pm_data = usb_ld->link_pm_data;

	switch (iod->format) {
	case IPC_RAW:
		txq = &ld->sk_raw_tx_q;

		if (unlikely(ld->raw_tx_suspended)) {
			/* Unlike misc_write, vnet_xmit is in interrupt.
			 * Despite call netif_stop_queue on CMD_SUSPEND,
			 * packets can be reached here.
			 */
			if (in_irq()) {
				mif_err("raw tx is suspended, "
						"drop packet. size=%d",
						skb->len);
				return -EBUSY;
			}

			mif_err("wait RESUME CMD...\n");
			INIT_COMPLETION(ld->raw_tx_resumed_by_cp);
			wait_for_completion(&ld->raw_tx_resumed_by_cp);
			mif_err("resumed done.\n");
		}
		break;
	case IPC_BOOT:
	case IPC_FMT:
	case IPC_RFS:
	default:
		txq = &ld->sk_fmt_tx_q;
		break;
	}
	/* store the tx size before run the tx_delayed_work*/
	tx_size = skb->len;

	/* drop packet, when link is not online */
	if (ld->com_state == COM_BOOT && iod->format != IPC_BOOT) {
		mif_err("%s: drop packet, size=%d, com_state=%d\n",
				iod->name, skb->len, ld->com_state);
		dev_kfree_skb_any(skb);
		return 0;
	}

	/* en queue skb data */
	skb_queue_tail(txq, skb);
	/* Hold wake_lock for getting schedule the tx_work */
#ifdef CONFIG_HAS_WAKELOCK
	wake_lock(&pm_data->tx_async_wake);
#else
	pm_stay_awake(pm_data->miscdev.this_device);
#endif

	if (!work_pending(&ld->tx_delayed_work.work))
		queue_delayed_work(ld->tx_wq, &ld->tx_delayed_work, 0);

	return tx_size;
}
Esempio n. 7
0
static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
{
	struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
	u32 ipif_ier;

	/* We get here with transmitter inhibited */

	xspi->tx_ptr = t->tx_buf;
	xspi->rx_ptr = t->rx_buf;
	xspi->remaining_bytes = t->len;
	INIT_COMPLETION(xspi->done);


	/* Enable the transmit empty interrupt, which we use to determine
	 * progress on the transmission.
	 */
	ipif_ier = xspi->read_fn(xspi->regs + XIPIF_V123B_IIER_OFFSET);
	xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY,
		xspi->regs + XIPIF_V123B_IIER_OFFSET);

	for (;;) {
		u16 cr;
		u8 sr;

		xilinx_spi_fill_tx_fifo(xspi);

		/* Start the transfer by not inhibiting the transmitter any
		 * longer
		 */
		cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
							~XSPI_CR_TRANS_INHIBIT;
		xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);

		wait_for_completion(&xspi->done);

		/* A transmit has just completed. Process received data and
		 * check for more data to transmit. Always inhibit the
		 * transmitter while the Isr refills the transmit register/FIFO,
		 * or make sure it is stopped if we're done.
		 */
		cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
		xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
			       xspi->regs + XSPI_CR_OFFSET);

		/* Read out all the data from the Rx FIFO */
		sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
		while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
			xspi->rx_fn(xspi);
			sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
		}

		/* See if there is more data to send */
		if (xspi->remaining_bytes <= 0)
			break;
	}

	/* Disable the transmit empty interrupt */
	xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET);

	return t->len - xspi->remaining_bytes;
}
Esempio n. 8
0
/**
 * ext4_fname_encrypt() -
 *
 * This function encrypts the input filename, and returns the length of the
 * ciphertext. Errors are returned as negative numbers.  We trust the caller to
 * allocate sufficient memory to oname string.
 */
static int ext4_fname_encrypt(struct inode *inode,
			      const struct qstr *iname,
			      struct ext4_str *oname)
{
	u32 ciphertext_len;
	struct skcipher_request *req = NULL;
	DECLARE_EXT4_COMPLETION_RESULT(ecr);
	struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
	struct crypto_skcipher *tfm = ci->ci_ctfm;
	int res = 0;
	char iv[EXT4_CRYPTO_BLOCK_SIZE];
	struct scatterlist src_sg, dst_sg;
	int padding = 4 << (ci->ci_flags & EXT4_POLICY_FLAGS_PAD_MASK);
	char *workbuf, buf[32], *alloc_buf = NULL;
	unsigned lim = max_name_len(inode);

	if (iname->len <= 0 || iname->len > lim)
		return -EIO;

	ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ?
		EXT4_CRYPTO_BLOCK_SIZE : iname->len;
	ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding);
	ciphertext_len = (ciphertext_len > lim)
			? lim : ciphertext_len;

	if (ciphertext_len <= sizeof(buf)) {
		workbuf = buf;
	} else {
		alloc_buf = kmalloc(ciphertext_len, GFP_NOFS);
		if (!alloc_buf)
			return -ENOMEM;
		workbuf = alloc_buf;
	}

	/* Allocate request */
	req = skcipher_request_alloc(tfm, GFP_NOFS);
	if (!req) {
		printk_ratelimited(
		    KERN_ERR "%s: crypto_request_alloc() failed\n", __func__);
		kfree(alloc_buf);
		return -ENOMEM;
	}
	skcipher_request_set_callback(req,
		CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
		ext4_dir_crypt_complete, &ecr);

	/* Copy the input */
	memcpy(workbuf, iname->name, iname->len);
	if (iname->len < ciphertext_len)
		memset(workbuf + iname->len, 0, ciphertext_len - iname->len);

	/* Initialize IV */
	memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE);

	/* Create encryption request */
	sg_init_one(&src_sg, workbuf, ciphertext_len);
	sg_init_one(&dst_sg, oname->name, ciphertext_len);
	skcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
	res = crypto_skcipher_encrypt(req);
	if (res == -EINPROGRESS || res == -EBUSY) {
		wait_for_completion(&ecr.completion);
		res = ecr.res;
	}
	kfree(alloc_buf);
	skcipher_request_free(req);
	if (res < 0) {
		printk_ratelimited(
		    KERN_ERR "%s: Error (error code %d)\n", __func__, res);
	}
	oname->len = ciphertext_len;
	return res;
}
Esempio n. 9
0
/*
 *  Ok, this is the main fork-routine.
 *
 * It copies the process, and if successful kick-starts
 * it and waits for it to finish using the VM if required.
 */
long do_fork(unsigned long clone_flags,
	      unsigned long stack_start,
	      struct pt_regs *regs,
	      unsigned long stack_size,
	      int __user *parent_tidptr,
	      int __user *child_tidptr)
{
	struct task_struct *p;
	int trace = 0;
	long nr;

	/*
	 * Do some preliminary argument and permissions checking before we
	 * actually start allocating stuff
	 */
	if (clone_flags & CLONE_NEWUSER) {
		if (clone_flags & CLONE_THREAD)
			return -EINVAL;
		/* hopefully this check will go away when userns support is
		 * complete
		 */
		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
				!capable(CAP_SETGID))
			return -EPERM;
	}

	/*
	 * We hope to recycle these flags after 2.6.26
	 */
	if (unlikely(clone_flags & CLONE_STOPPED)) {
		static int __read_mostly count = 100;

		if (count > 0 && printk_ratelimit()) {
			char comm[TASK_COMM_LEN];

			count--;
			printk(KERN_INFO "fork(): process `%s' used deprecated "
					"clone flags 0x%lx\n",
				get_task_comm(comm, current),
				clone_flags & CLONE_STOPPED);
		}
	}

	/*
	 * When called from kernel_thread, don't do user tracing stuff.
	 */
	if (likely(user_mode(regs)))
		trace = tracehook_prepare_clone(clone_flags);

	p = copy_process(clone_flags, stack_start, regs, stack_size,
			 child_tidptr, NULL, trace);
	/*
	 * Do this prior waking up the new thread - the thread pointer
	 * might get invalid after that point, if the thread exits quickly.
	 */
	if (!IS_ERR(p)) {
		struct completion vfork;

		trace_sched_process_fork(current, p);

		nr = task_pid_vnr(p);

		if (clone_flags & CLONE_PARENT_SETTID)
			put_user(nr, parent_tidptr);

		if (clone_flags & CLONE_VFORK) {
			p->vfork_done = &vfork;
			init_completion(&vfork);
		}

		audit_finish_fork(p);
		tracehook_report_clone(regs, clone_flags, nr, p);

		/*
		 * We set PF_STARTING at creation in case tracing wants to
		 * use this to distinguish a fully live task from one that
		 * hasn't gotten to tracehook_report_clone() yet.  Now we
		 * clear it and set the child going.
		 */
		p->flags &= ~PF_STARTING;

		if (unlikely(clone_flags & CLONE_STOPPED)) {
			/*
			 * We'll start up with an immediate SIGSTOP.
			 */
			sigaddset(&p->pending.signal, SIGSTOP);
			set_tsk_thread_flag(p, TIF_SIGPENDING);
			__set_task_state(p, TASK_STOPPED);
		} else {
			wake_up_new_task(p, clone_flags);
		}

		tracehook_report_clone_complete(trace, regs,
						clone_flags, nr, p);

		if (clone_flags & CLONE_VFORK) {
			freezer_do_not_count();
			wait_for_completion(&vfork);
			freezer_count();
			tracehook_report_vfork_done(p, nr);
		}
	} else {
		nr = PTR_ERR(p);
	}
	return nr;
}
Esempio n. 10
0
static int join(struct mddev *mddev, int nodes)
{
	struct md_cluster_info *cinfo;
	int ret, ops_rv;
	char str[64];

	cinfo = kzalloc(sizeof(struct md_cluster_info), GFP_KERNEL);
	if (!cinfo)
		return -ENOMEM;

	INIT_LIST_HEAD(&cinfo->suspend_list);
	spin_lock_init(&cinfo->suspend_lock);
	init_completion(&cinfo->completion);
	set_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state);
	init_waitqueue_head(&cinfo->wait);
	mutex_init(&cinfo->recv_mutex);

	mddev->cluster_info = cinfo;

	memset(str, 0, 64);
	sprintf(str, "%pU", mddev->uuid);
	ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name,
				DLM_LSFL_FS, LVB_SIZE,
				&md_ls_ops, mddev, &ops_rv, &cinfo->lockspace);
	if (ret)
		goto err;
	wait_for_completion(&cinfo->completion);
	if (nodes < cinfo->slot_number) {
		pr_err("md-cluster: Slot allotted(%d) is greater than available slots(%d).",
			cinfo->slot_number, nodes);
		ret = -ERANGE;
		goto err;
	}
	/* Initiate the communication resources */
	ret = -ENOMEM;
	cinfo->recv_thread = md_register_thread(recv_daemon, mddev, "cluster_recv");
	if (!cinfo->recv_thread) {
		pr_err("md-cluster: cannot allocate memory for recv_thread!\n");
		goto err;
	}
	cinfo->message_lockres = lockres_init(mddev, "message", NULL, 1);
	if (!cinfo->message_lockres)
		goto err;
	cinfo->token_lockres = lockres_init(mddev, "token", NULL, 0);
	if (!cinfo->token_lockres)
		goto err;
	cinfo->no_new_dev_lockres = lockres_init(mddev, "no-new-dev", NULL, 0);
	if (!cinfo->no_new_dev_lockres)
		goto err;

	ret = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
	if (ret) {
		ret = -EAGAIN;
		pr_err("md-cluster: can't join cluster to avoid lock issue\n");
		goto err;
	}
	cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0);
	if (!cinfo->ack_lockres) {
		ret = -ENOMEM;
		goto err;
	}
	/* get sync CR lock on ACK. */
	if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR))
		pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n",
				ret);
	dlm_unlock_sync(cinfo->token_lockres);
	/* get sync CR lock on no-new-dev. */
	if (dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR))
		pr_err("md-cluster: failed to get a sync CR lock on no-new-dev!(%d)\n", ret);


	pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number);
	snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1);
	cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1);
	if (!cinfo->bitmap_lockres) {
		ret = -ENOMEM;
		goto err;
	}
	if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) {
		pr_err("Failed to get bitmap lock\n");
		ret = -EINVAL;
		goto err;
	}

	cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0);
	if (!cinfo->resync_lockres) {
		ret = -ENOMEM;
		goto err;
	}

	return 0;
err:
	md_unregister_thread(&cinfo->recovery_thread);
	md_unregister_thread(&cinfo->recv_thread);
	lockres_free(cinfo->message_lockres);
	lockres_free(cinfo->token_lockres);
	lockres_free(cinfo->ack_lockres);
	lockres_free(cinfo->no_new_dev_lockres);
	lockres_free(cinfo->resync_lockres);
	lockres_free(cinfo->bitmap_lockres);
	if (cinfo->lockspace)
		dlm_release_lockspace(cinfo->lockspace, 2);
	mddev->cluster_info = NULL;
	kfree(cinfo);
	return ret;
}
Esempio n. 11
0
static int __sram spi_transfer(struct spi_device *spi,
        struct spi_transfer *transfer)
{
  struct spi_stellaris_data *priv_master = spi_master_get_devdata(spi->master);
  struct spi_stellaris_config *dev_priv = spi_get_ctldata(spi);

  dev_dbg(&spi->dev, "%s: tx_buf %p, rx_buf %p, len %u, cr0 0x%x, cpsdvsr 0x%x\n", __func__,
     transfer->tx_buf, transfer->rx_buf, transfer->len,
     dev_priv->cr0, dev_priv->cpsdvsr);

  /* Set up to perform the transfer */

  priv_master->txbuffer     = (uint8_t*)transfer->tx_buf; /* Source buffer */
  priv_master->rxbuffer     = (uint8_t*)transfer->rx_buf; /* Destination buffer */
  priv_master->ntxwords     = transfer->len;              /* Number of words left to send */
  priv_master->nrxwords     = 0;                          /* Number of words received */
  priv_master->nwords       = transfer->len;              /* Total number of exchanges */

#ifdef CONFIG_STELLARIS_DMA
	priv_master->dma_tx_flags = DMA_XFER_MEMORY_TO_DEVICE;
	priv_master->dma_rx_flags = DMA_XFER_DEVICE_TO_MEMORY;

	if (dev_priv->bits_per_word > 8)
	{
		priv_master->dma_tx_flags |= DMA_XFER_UNIT_WORD;
		priv_master->dma_rx_flags |= DMA_XFER_UNIT_WORD;
	}
  else
	{
		priv_master->dma_tx_flags |= DMA_XFER_UNIT_BYTE;
		priv_master->dma_rx_flags |= DMA_XFER_UNIT_BYTE;
	}
#else
  if (!priv_master->txbuffer)
    priv_master->txword = ssi_txnull;
  else
  {
    if (dev_priv->bits_per_word > 8)
      priv_master->txword = ssi_txuint16;
    else
      priv_master->txword = ssi_txuint8;
  }

  if (!priv_master->rxbuffer)
    priv_master->rxword = ssi_rxnull;
  else
  {
    if (dev_priv->bits_per_word > 8)
      priv_master->rxword = ssi_rxuint16;
    else
      priv_master->rxword = ssi_rxuint8;
  }
#endif

  /* Set CR1 */
  ssi_putreg(priv_master, STLR_SSI_CR1_OFFSET, 0);

  /* Set CPDVSR */
  ssi_putreg(priv_master, STLR_SSI_CPSR_OFFSET, dev_priv->cpsdvsr);

  /* Set CR0 */
  ssi_putreg(priv_master, STLR_SSI_CR0_OFFSET, dev_priv->cr0);

#ifndef POLLING_MODE
  init_completion(&priv_master->xfer_done);
#endif

  ssi_enable(priv_master);

#ifndef POLLING_MODE
  spi_transfer_step(priv_master);
#else
  while( spi_transfer_step(priv_master) ) {};
#endif

#ifndef POLLING_MODE
  wait_for_completion(&priv_master->xfer_done);
#endif

  ssi_disable(priv_master);

  return transfer->len;
}
static int pm8xxx_tz_get_temp_pm8058_adc(struct thermal_zone_device *thermal,
			      unsigned long *temp)
{
	struct pm8xxx_tm_chip *chip = thermal->devdata;
	DECLARE_COMPLETION_ONSTACK(wait);
	struct adc_chan_result adc_result = {
		.physical = 0lu,
	};
	int rc;

	if (!chip || !temp)
		return -EINVAL;

	*temp = chip->temp;

	rc = adc_channel_request_conv(chip->adc_handle, &wait);
	if (rc < 0) {
		pr_err("%s: adc_channel_request_conv() failed, rc = %d\n",
			__func__, rc);
		return rc;
	}

	wait_for_completion(&wait);

	rc = adc_channel_read_result(chip->adc_handle, &adc_result);
	if (rc < 0) {
		pr_err("%s: adc_channel_read_result() failed, rc = %d\n",
			__func__, rc);
		return rc;
	}

	*temp = adc_result.physical;
	chip->temp = adc_result.physical;

	return 0;
}

static int pm8xxx_tz_get_temp_pm8xxx_adc(struct thermal_zone_device *thermal,
				      unsigned long *temp)
{
	struct pm8xxx_tm_chip *chip = thermal->devdata;
	struct pm8xxx_adc_chan_result result = {
		.physical = 0lu,
	};
	int rc;

	if (!chip || !temp)
		return -EINVAL;

	*temp = chip->temp;

	rc = pm8xxx_adc_read(chip->cdata.adc_channel, &result);
	if (rc < 0) {
		pr_err("%s: adc_channel_read_result() failed, rc = %d\n",
			chip->cdata.tm_name, rc);
		return rc;
	}

	*temp = result.physical;
	chip->temp = result.physical;

	return 0;
}

static int pm8xxx_tz_get_mode(struct thermal_zone_device *thermal,
			      enum thermal_device_mode *mode)
{
	struct pm8xxx_tm_chip *chip = thermal->devdata;

	if (!chip || !mode)
		return -EINVAL;

	*mode = chip->mode;

	return 0;
}

static int pm8xxx_tz_set_mode(struct thermal_zone_device *thermal,
			      enum thermal_device_mode mode)
{
	struct pm8xxx_tm_chip *chip = thermal->devdata;

	if (!chip)
		return -EINVAL;

	/* Mask software override requests if they are not allowed. */
	if (!chip->cdata.allow_software_override)
		mode = THERMAL_DEVICE_DISABLED;

	if (mode != chip->mode) {
		if (mode == THERMAL_DEVICE_ENABLED)
			pm8xxx_tm_shutdown_override(chip,
						    SOFTWARE_OVERRIDE_ENABLED);
		else
			pm8xxx_tm_shutdown_override(chip,
						    SOFTWARE_OVERRIDE_DISABLED);
	}
	chip->mode = mode;

	return 0;
}

static int pm8xxx_tz_get_trip_type(struct thermal_zone_device *thermal,
				   int trip, enum thermal_trip_type *type)
{
	if (trip < 0 || !type)
		return -EINVAL;

	switch (trip) {
	case TRIP_STAGE3:
		*type = THERMAL_TRIP_CRITICAL;
		break;
	case TRIP_STAGE2:
		*type = THERMAL_TRIP_HOT;
		break;
	case TRIP_STAGE1:
		*type = THERMAL_TRIP_HOT;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

static int pm8xxx_tz_get_trip_temp(struct thermal_zone_device *thermal,
				   int trip, unsigned long *temp)
{
	struct pm8xxx_tm_chip *chip = thermal->devdata;
	int thresh_temp;

	if (!chip || trip < 0 || !temp)
		return -EINVAL;

	thresh_temp = chip->thresh * TEMP_THRESH_STEP +
			TEMP_THRESH_MIN;

	switch (trip) {
	case TRIP_STAGE3:
		thresh_temp += 2 * TEMP_STAGE_STEP;
		break;
	case TRIP_STAGE2:
		thresh_temp += TEMP_STAGE_STEP;
		break;
	case TRIP_STAGE1:
		break;
	default:
		return -EINVAL;
	}

	*temp = thresh_temp;

	return 0;
}

static int pm8xxx_tz_get_crit_temp(struct thermal_zone_device *thermal,
				   unsigned long *temp)
{
	struct pm8xxx_tm_chip *chip = thermal->devdata;

	if (!chip || !temp)
		return -EINVAL;

	*temp = chip->thresh * TEMP_THRESH_STEP + TEMP_THRESH_MIN +
		2 * TEMP_STAGE_STEP;

	return 0;
}

static struct thermal_zone_device_ops pm8xxx_thermal_zone_ops_no_adc = {
	.get_temp = pm8xxx_tz_get_temp_no_adc,
	.get_mode = pm8xxx_tz_get_mode,
	.set_mode = pm8xxx_tz_set_mode,
	.get_trip_type = pm8xxx_tz_get_trip_type,
	.get_trip_temp = pm8xxx_tz_get_trip_temp,
	.get_crit_temp = pm8xxx_tz_get_crit_temp,
};

static struct thermal_zone_device_ops pm8xxx_thermal_zone_ops_pm8xxx_adc = {
	.get_temp = pm8xxx_tz_get_temp_pm8xxx_adc,
	.get_mode = pm8xxx_tz_get_mode,
	.set_mode = pm8xxx_tz_set_mode,
	.get_trip_type = pm8xxx_tz_get_trip_type,
	.get_trip_temp = pm8xxx_tz_get_trip_temp,
	.get_crit_temp = pm8xxx_tz_get_crit_temp,
};

static struct thermal_zone_device_ops pm8xxx_thermal_zone_ops_pm8058_adc = {
	.get_temp = pm8xxx_tz_get_temp_pm8058_adc,
	.get_mode = pm8xxx_tz_get_mode,
	.set_mode = pm8xxx_tz_set_mode,
	.get_trip_type = pm8xxx_tz_get_trip_type,
	.get_trip_temp = pm8xxx_tz_get_trip_temp,
	.get_crit_temp = pm8xxx_tz_get_crit_temp,
};

static void pm8xxx_tm_work(struct work_struct *work)
{
	struct delayed_work *dwork
		= container_of(work, struct delayed_work, work);
	struct pm8xxx_tm_chip *chip
		= container_of(dwork, struct pm8xxx_tm_chip, irq_work);
	unsigned long temp = 0;
	int rc, stage, thresh;
	u8 reg;

	rc = pm8xxx_tm_read_ctrl(chip, &reg);
	if (rc < 0)
		goto bail;

	/* Clear status bits. */
	if (reg & (TEMP_ALARM_CTRL_ST2_SD | TEMP_ALARM_CTRL_ST3_SD)) {
		reg &= ~(TEMP_ALARM_CTRL_ST2_SD | TEMP_ALARM_CTRL_ST3_SD
			 | TEMP_ALARM_CTRL_STATUS_MASK);

		pm8xxx_tm_write_ctrl(chip, reg);
	}

	stage = (reg & TEMP_ALARM_CTRL_STATUS_MASK)
		>> TEMP_ALARM_CTRL_STATUS_SHIFT;
	thresh = (reg & TEMP_ALARM_CTRL_THRESH_MASK)
		>> TEMP_ALARM_CTRL_THRESH_SHIFT;

	thermal_zone_device_update(chip->tz_dev);

	if (stage != chip->prev_stage) {
		chip->prev_stage = stage;

		switch (chip->cdata.adc_type) {
		case PM8XXX_TM_ADC_NONE:
			rc = pm8xxx_tz_get_temp_no_adc(chip->tz_dev, &temp);
			break;
		case PM8XXX_TM_ADC_PM8058_ADC:
			rc = pm8xxx_tz_get_temp_pm8058_adc(chip->tz_dev, &temp);
			break;
		case PM8XXX_TM_ADC_PM8XXX_ADC:
			rc = pm8xxx_tz_get_temp_pm8xxx_adc(chip->tz_dev, &temp);
			break;
		}
		if (rc < 0)
			goto bail;

		pr_crit("%s: PMIC Temp Alarm - stage=%u, threshold=%u, temp=%lu mC\n",
			chip->cdata.tm_name, stage, thresh, temp);

		/* Notify user space */
		sysfs_notify(&chip->tz_dev->device.kobj, NULL, "type");
	}

bail:
	return;
}

static irqreturn_t pm8xxx_tm_isr(int irq, void *data)
{
	struct pm8xxx_tm_chip *chip = data;

	schedule_delayed_work(&chip->irq_work,
		msecs_to_jiffies(STATUS_REGISTER_DELAY_MS) + 1);

	return IRQ_HANDLED;
}

static int pm8xxx_tm_init_reg(struct pm8xxx_tm_chip *chip)
{
	int rc;
	u8 reg;

	rc = pm8xxx_tm_read_ctrl(chip, &reg);
	if (rc < 0)
		return rc;

	chip->stage = (reg & TEMP_ALARM_CTRL_STATUS_MASK)
			>> TEMP_ALARM_CTRL_STATUS_SHIFT;
	chip->temp = 0;

	/* Use temperature threshold set 0: (105, 125, 145) */
	chip->thresh = 0;
	reg = (chip->thresh << TEMP_ALARM_CTRL_THRESH_SHIFT)
		& TEMP_ALARM_CTRL_THRESH_MASK;
	rc = pm8xxx_tm_write_ctrl(chip, reg);
	if (rc < 0)
		return rc;

	/*
	 * Set the PMIC temperature alarm module to be always off. This
	 * is necessary due to erroneous behavior of temperature alarm
	 * module during system shutdown.
	 */
	rc = pm8xxx_tm_write_pwm(chip, TEMP_ALARM_PWM_EN_NEVER);

	return rc;
}

static int pm8xxx_init_adc(struct pm8xxx_tm_chip *chip, bool enable)
{
	int rc = 0;

	if (chip->cdata.adc_type == PM8XXX_TM_ADC_PM8058_ADC) {
		if (enable) {
			rc = adc_channel_open(chip->cdata.adc_channel,
						&(chip->adc_handle));
			if (rc < 0)
				pr_err("adc_channel_open() failed.\n");
		} else {
			adc_channel_close(chip->adc_handle);
		}
	}

	return rc;
}
/* For SSC SPI as MASTER, TX/RX is handled as follows:

   1. Fill the TX_FIFO with up to (SSC_TXFIFO_SIZE - 1) words, and enable
      TX_FIFO_EMPTY interrupts.
   2. When the last word of TX_FIFO is copied to the shift register,
      a TX_FIFO_EMPTY interrupt is issued, and the last word will *start* being
      shifted out/in.
   3. On receiving a TX_FIFO_EMPTY interrupt, copy all *available* received
      words from the RX_FIFO. Note, depending on the time taken to shift out/in
      the 'last' word compared to the IRQ latency, the 'last' word may not be
      available yet in the RX_FIFO.
   4. If there are more bytes to TX, refill the TX_FIFO.  Since the 'last' word
      from the previous iteration may still be (or about to be) in the RX_FIFO,
      only add up to (SSC_TXFIFO_SIZE - 1) words.  If all bytes have been
      transmitted, disable TX and set completion.
   5. If we are interested in the received data, check to see if the 'last' word
      has been received.  If not, then wait the period of shifting 1 word, then
      read the 'last' word from the RX_FIFO.

*/
static void spi_stmssc_fill_tx_fifo(struct spi_stm_ssc *st_ssc)
{
	union {
		u8 bytes[4];
		u32 dword;
	} tmp = {.dword = 0,};
	int i;

	for (i = 0;
	     i < SSC_TXFIFO_SIZE - 1 && st_ssc->tx_bytes_pending > 0; i++) {
		if (st_ssc->bits_per_word > 8) {
			if (st_ssc->tx_ptr) {
				tmp.bytes[1] = *st_ssc->tx_ptr++;
				tmp.bytes[0] = *st_ssc->tx_ptr++;
			} else {
				tmp.bytes[1] = 0;
				tmp.bytes[0] = 0;
			}

			st_ssc->tx_bytes_pending -= 2;

		} else {
			if (st_ssc->tx_ptr)
				tmp.bytes[0] = *st_ssc->tx_ptr++;
			else
				tmp.bytes[0] = 0;

			st_ssc->tx_bytes_pending--;
		}
		ssc_store32(st_ssc, SSC_TBUF, tmp.dword);
	}
}

static int spi_stmssc_rx_mopup(struct spi_stm_ssc *st_ssc)
{
	unsigned long word_period_ns;
	u32 rx_fifo_status;
	union {
		u8 bytes[4];
		u32 dword;
	} tmp = {.dword = 0,};

	dgb_print("\n");

	word_period_ns = 1000000000 / st_ssc->baud;
	word_period_ns *= st_ssc->bits_per_word;

	/* delay for period equivalent to shifting 1 complete word
	   out of and into shift register */
	ndelay(word_period_ns);

	/* Check 'last' word is actually there! */
	rx_fifo_status = ssc_load32(st_ssc, SSC_RX_FSTAT);
	if (rx_fifo_status == 1) {
		tmp.dword = ssc_load32(st_ssc, SSC_RBUF);

		if (st_ssc->bits_per_word > 8) {
			if (st_ssc->rx_ptr) {
				*st_ssc->rx_ptr++ = tmp.bytes[1];
				*st_ssc->rx_ptr++ = tmp.bytes[0];
			}
			st_ssc->rx_bytes_pending -= 2;
		} else {
			if (st_ssc->rx_ptr)
				*st_ssc->rx_ptr++ = tmp.bytes[0];
			st_ssc->rx_bytes_pending--;
		}
	} else {
		dgb_print("should only be one word in RX_FIFO"
			  "(rx_fifo_status = %d)\n", rx_fifo_status);
	}

	return 0;
}


static int spi_stmssc_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
{
	struct spi_stm_ssc *st_ssc;

	dgb_print("\n");

	st_ssc = spi_master_get_devdata(spi->master);

	st_ssc->tx_ptr = t->tx_buf;
	st_ssc->rx_ptr = t->rx_buf;
	st_ssc->tx_bytes_pending = t->len;
	st_ssc->rx_bytes_pending = t->len;
	INIT_COMPLETION(st_ssc->done);

	/* fill TX_FIFO */
	spi_stmssc_fill_tx_fifo(st_ssc);

	/* enable TX_FIFO_EMPTY interrupts */
	ssc_store32(st_ssc, SSC_IEN, SSC_IEN_TIEN);

	/* wait for all bytes to be transmitted*/
	wait_for_completion(&st_ssc->done);

	/* check 'last' byte has been received */
	/* NOTE: need to read rxbuf, even if ignoring the result! */
	if (st_ssc->rx_bytes_pending)
		spi_stmssc_rx_mopup(st_ssc);

	/* disable ints */
	ssc_store32(st_ssc, SSC_IEN, 0x0);

	return t->len - st_ssc->tx_bytes_pending;
}



static irqreturn_t spi_stmssc_irq(int irq, void *dev_id)
{
	struct spi_stm_ssc *st_ssc = (struct spi_stm_ssc *)dev_id;
	unsigned int rx_fifo_status;
	u32 ssc_status;

	union {
		u8 bytes[4];
		u32 dword;
	} tmp = {.dword = 0,};

	ssc_status = ssc_load32(st_ssc, SSC_STA);

	/* FIFO_TX_EMPTY */
	if (ssc_status & SSC_STA_TIR) {
		/* Find number of words available in RX_FIFO: 8 if RX_FIFO_FULL,
		   else SSC_RX_FSTAT (0-7)
		*/
		rx_fifo_status = (ssc_status & SSC_STA_RIR) ? 8 :
			ssc_load32(st_ssc, SSC_RX_FSTAT);

		/* Read all available words from RX_FIFO */
		while (rx_fifo_status) {
			tmp.dword = ssc_load32(st_ssc, SSC_RBUF);

			if (st_ssc->bits_per_word > 8) {
				if (st_ssc->rx_ptr) {
					*st_ssc->rx_ptr++ = tmp.bytes[1];
					*st_ssc->rx_ptr++ = tmp.bytes[0];
				}
				st_ssc->rx_bytes_pending -= 2;
			} else {
				if (st_ssc->rx_ptr)
					*st_ssc->rx_ptr++ = tmp.bytes[0];
				st_ssc->rx_bytes_pending--;
			}

			rx_fifo_status = ssc_load32(st_ssc, SSC_RX_FSTAT);
		}

		/* See if there is more data to send */
		if (st_ssc->tx_bytes_pending > 0)
			spi_stmssc_fill_tx_fifo(st_ssc);
		else {
			/* No more data to send */
			ssc_store32(st_ssc, SSC_IEN, 0x0);
			complete(&st_ssc->done);
		}
	}

	return IRQ_HANDLED;
}


static int __init spi_stm_probe(struct platform_device *pdev)
{
	struct ssc_pio_t *pio_info =
			(struct ssc_pio_t *)pdev->dev.platform_data;
	struct spi_master *master;
	struct resource *res;
	struct spi_stm_ssc *st_ssc;

	u32 reg;

	master = spi_alloc_master(&pdev->dev, sizeof(struct spi_stm_ssc));
	if (!master)
		return -ENOMEM;

	platform_set_drvdata(pdev, master);

	st_ssc = spi_master_get_devdata(master);
	st_ssc->bitbang.master     = spi_master_get(master);
	st_ssc->bitbang.setup_transfer = spi_stmssc_setup_transfer;
	st_ssc->bitbang.txrx_bufs  = spi_stmssc_txrx_bufs;
	st_ssc->bitbang.master->setup = spi_stmssc_setup;

	if (pio_info->chipselect)
		st_ssc->bitbang.chipselect = (void (*)
					      (struct spi_device *, int))
			pio_info->chipselect;
	else
		st_ssc->bitbang.chipselect = spi_stpio_chipselect;

	master->num_chipselect = SPI_NO_CHIPSELECT + 1;
	master->bus_num = pdev->id;
	init_completion(&st_ssc->done);

	/* Get resources */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;

	if (!devm_request_mem_region(&pdev->dev, res->start,
				     res->end - res->start, "spi")) {
		printk(KERN_ERR NAME " Request mem 0x%x region failed\n",
		       res->start);
		return -ENOMEM;
	}

	st_ssc->base =
		(unsigned long) devm_ioremap_nocache(&pdev->dev, res->start,
						     res->end - res->start);
	if (!st_ssc->base) {
		printk(KERN_ERR NAME " Request iomem 0x%x region failed\n",
		       res->start);
		return -ENOMEM;
	}

	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!res) {
		printk(KERN_ERR NAME " Request irq %d failed\n", res->start);
		return -ENODEV;
	}

	if (devm_request_irq(&pdev->dev, res->start, spi_stmssc_irq,
		IRQF_DISABLED, "stspi", st_ssc) < 0) {
		printk(KERN_ERR NAME " Request irq failed\n");
		return -ENODEV;
	}

	/* Check for hard wired SSC which doesn't use PIO pins */
	if (pio_info->pio[0].pio_port == SSC_NO_PIO)
		goto ssc_hard_wired;

	/* Get PIO pins */
	pio_info->clk = stpio_request_set_pin(pio_info->pio[0].pio_port,
					  pio_info->pio[0].pio_pin,
					      "SPI Clock", STPIO_BIDIR, 0);
	if (!pio_info->clk) {
		printk(KERN_ERR NAME
		       " Failed to allocate clk pin (PIO%d[%d])\n",
		       pio_info->pio[0].pio_port, pio_info->pio[0].pio_pin);
		return -ENODEV;
	}
	pio_info->sdout = stpio_request_set_pin(pio_info->pio[1].pio_port,
					    pio_info->pio[1].pio_pin,
						"SPI Data out", STPIO_BIDIR, 0);
	if (!pio_info->sdout) {
		printk(KERN_ERR NAME
		       " Failed to allocate sdo pin (PIO%d[%d])\n",
		       pio_info->pio[1].pio_port, pio_info->pio[1].pio_pin);
		return -ENODEV;
	}
	pio_info->sdin = stpio_request_pin(pio_info->pio[2].pio_port,
					   pio_info->pio[2].pio_pin,
					   "SPI Data in", STPIO_IN);
	if (!pio_info->sdin) {
		printk(KERN_ERR NAME
		       " Failed to allocate sdi pin (PIO%d[%d])\n",
		       pio_info->pio[2].pio_port, pio_info->pio[2].pio_pin);
		return -ENODEV;
	}

ssc_hard_wired:

	/* Disable I2C and Reset SSC */
	ssc_store32(st_ssc, SSC_I2C, 0x0);
	reg = ssc_load16(st_ssc, SSC_CTL);
	reg |= SSC_CTL_SR;
	ssc_store32(st_ssc, SSC_CTL, reg);

	udelay(1);
	reg = ssc_load32(st_ssc, SSC_CTL);
	reg &= ~SSC_CTL_SR;
	ssc_store32(st_ssc, SSC_CTL, reg);

	/* Set SSC into slave mode before reconfiguring PIO pins */
	reg = ssc_load32(st_ssc, SSC_CTL);
	reg &= ~SSC_CTL_MS;
	ssc_store32(st_ssc, SSC_CTL, reg);

	if (pio_info->pio[0].pio_port == SSC_NO_PIO)
		goto ssc_hard_wired2;

#ifdef CONFIG_CPU_SUBTYPE_STX7141
	stpio_configure_pin(pio_info->clk, STPIO_OUT);
	stpio_configure_pin(pio_info->sdout, STPIO_OUT);
	stpio_configure_pin(pio_info->sdin, STPIO_IN);
#else
	stpio_configure_pin(pio_info->clk, STPIO_ALT_OUT);
	stpio_configure_pin(pio_info->sdout, STPIO_ALT_OUT);
	stpio_configure_pin(pio_info->sdin, STPIO_IN);
#endif

ssc_hard_wired2:

	st_ssc->fcomms = clk_get_rate(clk_get(NULL, "comms_clk"));;

	/* Start bitbang worker */
	if (spi_bitbang_start(&st_ssc->bitbang)) {
		printk(KERN_ERR NAME
		       " The SPI Core refuses the spi_stm_ssc adapter\n");
		return -1;
	}

	printk(KERN_INFO NAME ": Registered SPI Bus %d: "
	       "CLK[%d,%d] SDOUT[%d, %d] SDIN[%d, %d]\n", master->bus_num,
	       pio_info->pio[0].pio_port, pio_info->pio[0].pio_pin,
	       pio_info->pio[1].pio_port, pio_info->pio[1].pio_pin,
	       pio_info->pio[2].pio_port, pio_info->pio[2].pio_pin);

	return 0;
}

static int  spi_stm_remove(struct platform_device *pdev)
{
	struct spi_stm_ssc *st_ssc;
	struct spi_master *master;
	struct ssc_pio_t *pio_info =
		(struct ssc_pio_t *)pdev->dev.platform_data;

	master = platform_get_drvdata(pdev);
	st_ssc = spi_master_get_devdata(master);

	spi_bitbang_stop(&st_ssc->bitbang);

	if (pio_info->sdin) {
		stpio_free_pin(pio_info->sdin);
		stpio_free_pin(pio_info->clk);
		stpio_free_pin(pio_info->sdout);
	}

	return 0;
}

static struct platform_driver spi_hw_driver = {
	.driver.name = "spi_st_ssc",
	.driver.owner = THIS_MODULE,
	.probe = spi_stm_probe,
	.remove = spi_stm_remove,
};


static int __init spi_stm_ssc_init(void)
{
	printk(KERN_INFO NAME ": SSC SPI Driver\n");
	return platform_driver_register(&spi_hw_driver);
}

static void __exit spi_stm_ssc_exit(void)
{
	dgb_print("\n");
	platform_driver_unregister(&spi_hw_driver);
}

module_init(spi_stm_ssc_init);
module_exit(spi_stm_ssc_exit);

MODULE_AUTHOR("STMicroelectronics <www.st.com>");
MODULE_DESCRIPTION("STM SSC SPI driver");
MODULE_LICENSE("GPL");
Esempio n. 14
0
static ssize_t adg739_write(struct file *filp, const char __user *buf, size_t count, loff_t *fpos)
{
	char buf_term[NUM_MULTIPLEXER];	//буфер, куда копируются сообщения из пользовательского пространства, и где они проходят предварительное форматирование
	int status = 0;
	int i =0;
	struct spi_transfer t = {		//формируется передача
			.tx_buf = adg739_status->buffer,
			.len = NUM_MULTIPLEXER * 2,
	};
	struct spi_message	m;	// сообщение
	DECLARE_COMPLETION_ONSTACK(done);	//объявляется и инициализуется условная переменная
	//проверка на достоверность переданного буфера
	if (count > NUM_MULTIPLEXER)
		return (-EMSGSIZE);
	if (copy_from_user(buf_term, buf, count))
		return (-EFAULT);
	for (i=0; i<count; i++)
	{
		switch(buf_term[i])
		{
		case 's':
			buf_term[i] = 0x11;
			break;
		case 'v':
			buf_term[i] = 0x82;
			break;
		case 'g':
			buf_term[i] = 0x88;
			break;
		default:
			return (-EINVAL);
		}
	}
	//передача сообщения драйверу контроллера
	mutex_lock(&device_lockk);
	for (i=0; i<count; i++) {
		adg739_status->buffer[i]= buf_term[i];
		adg739_status->buffer[i+4]= buf_term[i];
	}

	spi_message_init(&m);	//инициализация сообщения
	spi_message_add_tail(&t, &m);	//постановка передачи в очередь сообщения
	m.complete = adg739_complete;
	m.context = &done;
	if (adg739_status->spi == NULL)
		status = -ESHUTDOWN;
	else
	{
		status = spi_async(adg739_status->spi, &m);	//передача сообщения
		printk(KERN_INFO "Status function spi_async = %d\n", status);	
	}
	if (status == 0) {
		wait_for_completion(&done);	//ожидание обработки сообщения контроллером spi
		status = m.status;
		printk(KERN_INFO "Status message = %d\n", status);
		if (status == 0)
			status = m.actual_length/2;
	}
	mutex_unlock(&device_lockk);
	return (status);
}

//ФУНКЦИИ СТРУКТУРЫ SPI_DRIVER

static int	__devinit adg739_probe(struct spi_device *spi)
{
	int status, dev;
	//регистрация устройства
	dev =device_create(devclass, &spi->dev, dev_adg739, NULL, MULTIPLEXER_NAME);	//создание устройства
	status = IS_ERR(dev) ? PTR_ERR(dev) : 0;
	if(status != 0)
	{
		printk(KERN_ERR "The device_create function failed\n");
		return (status);
	}
	//инициализация членов структуры состояния драйвера
	mutex_lock(&device_lockk);
	adg739_status->users = 0;
	adg739_status->spi = spi;
	spi->bits_per_word = 16;
	spi->max_speed_hz = 700000;
	spin_lock_init(&adg739_status->spi_lock);
	memset(adg739_status->buffer, 0, sizeof(adg739_status->buffer));
	spi_set_drvdata(spi, adg739_status);	//присваевает указателю spi->dev->driver_data значение adg739_status
	mutex_unlock(&device_lockk);
	return (0);
}
Esempio n. 15
0
/*
 *  Ok, this is the main fork-routine.
 *
 * It copies the process, and if successful kick-starts
 * it and waits for it to finish using the VM if required.
 */
long do_fork(unsigned long clone_flags,
	      unsigned long stack_start,
	      struct pt_regs *regs,
	      unsigned long stack_size,
	      int __user *parent_tidptr,
	      int __user *child_tidptr)
{
	struct task_struct *p;
	int trace = 0;
	struct pid *pid = alloc_pid();
	long nr;

	if (!pid)
		return -EAGAIN;
	nr = pid->nr;
	if (unlikely(current->ptrace)) {
		trace = fork_traceflag (clone_flags);
		if (trace)
			clone_flags |= CLONE_PTRACE;
	}

	p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, nr);
	/*
	 * Do this prior waking up the new thread - the thread pointer
	 * might get invalid after that point, if the thread exits quickly.
	 */
	if (!IS_ERR(p)) {
		struct completion vfork;

		if (clone_flags & CLONE_VFORK) {
			p->vfork_done = &vfork;
			init_completion(&vfork);
		}

		if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
			/*
			 * We'll start up with an immediate SIGSTOP.
			 */
			sigaddset(&p->pending.signal, SIGSTOP);
			set_tsk_thread_flag(p, TIF_SIGPENDING);
		}

		if (!(clone_flags & CLONE_STOPPED))
			wake_up_new_task(p, clone_flags);
		else
			p->state = TASK_STOPPED;

		if (unlikely (trace)) {
			current->ptrace_message = nr;
			ptrace_notify ((trace << 8) | SIGTRAP);
		}

		if (clone_flags & CLONE_VFORK) {
			wait_for_completion(&vfork);
			if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) {
				current->ptrace_message = nr;
				ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
			}
		}
	} else {
		free_pid(pid);
		nr = PTR_ERR(p);
	}
	return nr;
}
Esempio n. 16
0
/*
 *  Ok, this is the main fork-routine.
 *
 * It copies the process, and if successful kick-starts
 * it and waits for it to finish using the VM if required.
 */
long do_fork(unsigned long clone_flags,
	      unsigned long stack_start,
	      struct pt_regs *regs,
	      unsigned long stack_size,
	      int __user *parent_tidptr,
	      int __user *child_tidptr)
{
	struct task_struct *p;
	int trace = 0;
	long nr;

	/*
	 * Do some preliminary argument and permissions checking before we
	 * actually start allocating stuff
	 */
	if (clone_flags & CLONE_NEWUSER) {
		if (clone_flags & CLONE_THREAD)
			return -EINVAL;
		/* hopefully this check will go away when userns support is
		 * complete
		 */
		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
				!capable(CAP_SETGID))
			return -EPERM;
	}

	/*
	 * Determine whether and which event to report to ptracer.  When
	 * called from kernel_thread or CLONE_UNTRACED is explicitly
	 * requested, no event is reported; otherwise, report if the event
	 * for the type of forking is enabled.
	 */
	if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) {
		if (clone_flags & CLONE_VFORK)
			trace = PTRACE_EVENT_VFORK;
		else if ((clone_flags & CSIGNAL) != SIGCHLD)
			trace = PTRACE_EVENT_CLONE;
		else
			trace = PTRACE_EVENT_FORK;

		if (likely(!ptrace_event_enabled(current, trace)))
			trace = 0;
	}

	p = copy_process(clone_flags, stack_start, regs, stack_size,
			 child_tidptr, NULL, trace);

	/*
	 * Do this prior waking up the new thread - the thread pointer
	 * might get invalid after that point, if the thread exits quickly.
	 */
	if (!IS_ERR(p)) {
		struct completion vfork;

		trace_sched_process_fork(current, p);
		atomic_notifier_call_chain(&task_fork_notifier, 0, p);

		nr = task_pid_vnr(p);

		if (clone_flags & CLONE_PARENT_SETTID)
			put_user(nr, parent_tidptr);

		if (clone_flags & CLONE_VFORK) {
			p->vfork_done = &vfork;
			init_completion(&vfork);
		}

		audit_finish_fork(p);

		/*
		 * We set PF_STARTING at creation in case tracing wants to
		 * use this to distinguish a fully live task from one that
		 * hasn't finished SIGSTOP raising yet.  Now we clear it
		 * and set the child going.
		 */
		p->flags &= ~PF_STARTING;

		wake_up_new_task(p);

		/* forking complete and child started to run, tell ptracer */
		if (unlikely(trace))
			ptrace_event(trace, nr);

		if (clone_flags & CLONE_VFORK) {
			freezer_do_not_count();
			wait_for_completion(&vfork);
			freezer_count();
			ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
		}
	} else {
		nr = PTR_ERR(p);
	}
	return nr;
}
Esempio n. 17
0
/**
 * i2c_pnx_xfer - generic transfer entry point
 * @adap:		pointer to I2C adapter structure
 * @msgs:		array of messages
 * @num:		number of messages
 *
 * Initiates the transfer
 */
static int
i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
	struct i2c_msg *pmsg;
	int rc = 0, completed = 0, i;
	struct i2c_pnx_algo_data *alg_data = adap->algo_data;
	u32 stat = ioread32(I2C_REG_STS(alg_data));

	dev_dbg(&alg_data->adapter.dev,
		"%s(): entering: %d messages, stat = %04x.\n",
		__func__, num, ioread32(I2C_REG_STS(alg_data)));

	bus_reset_if_active(alg_data);

	/* Process transactions in a loop. */
	for (i = 0; rc >= 0 && i < num; i++) {
		u8 addr;

		pmsg = &msgs[i];
		addr = pmsg->addr;

		if (pmsg->flags & I2C_M_TEN) {
			dev_err(&alg_data->adapter.dev,
				"%s: 10 bits addr not supported!\n",
				alg_data->adapter.name);
			rc = -EINVAL;
			break;
		}

		alg_data->mif.buf = pmsg->buf;
		alg_data->mif.len = pmsg->len;
		alg_data->mif.mode = (pmsg->flags & I2C_M_RD) ?
			I2C_SMBUS_READ : I2C_SMBUS_WRITE;
		alg_data->mif.ret = 0;
		alg_data->last = (i == num - 1);

		dev_dbg(&alg_data->adapter.dev, "%s(): mode %d, %d bytes\n",
			__func__, alg_data->mif.mode, alg_data->mif.len);

		i2c_pnx_arm_timer(alg_data);

		/* initialize the completion var */
		init_completion(&alg_data->mif.complete);

		/* Enable master interrupt */
		iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_afie |
				mcntrl_naie | mcntrl_drmie,
			  I2C_REG_CTL(alg_data));

		/* Put start-code and slave-address on the bus. */
		rc = i2c_pnx_start(addr, alg_data);
		if (rc < 0)
			break;

		/* Wait for completion */
		wait_for_completion(&alg_data->mif.complete);

		if (!(rc = alg_data->mif.ret))
			completed++;
		dev_dbg(&alg_data->adapter.dev,
			"%s(): Complete, return code = %d.\n",
			__func__, rc);

		/* Clear TDI and AFI bits in case they are set. */
		if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_tdi) {
			dev_dbg(&alg_data->adapter.dev,
				"%s: TDI still set... clearing now.\n",
				alg_data->adapter.name);
			iowrite32(stat, I2C_REG_STS(alg_data));
		}
		if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_afi) {
			dev_dbg(&alg_data->adapter.dev,
				"%s: AFI still set... clearing now.\n",
				alg_data->adapter.name);
			iowrite32(stat, I2C_REG_STS(alg_data));
		}
	}

	bus_reset_if_active(alg_data);

	/* Cleanup to be sure... */
	alg_data->mif.buf = NULL;
	alg_data->mif.len = 0;

	dev_dbg(&alg_data->adapter.dev, "%s(): exiting, stat = %x\n",
		__func__, ioread32(I2C_REG_STS(alg_data)));

	if (completed != num)
		return ((rc < 0) ? rc : -EREMOTEIO);

	return num;
}
/**ltl
功能:提交scatterlist urb请求,并等待执行结果
参数:
说明:此接口给usb_storage驱动使用<usb_stor_bulk_transfer_sglist>
*/
void usb_sg_wait (struct usb_sg_request *io)
{
	int		i, entries = io->entries;

	/* queue the urbs.  */
	spin_lock_irq (&io->lock);
	for (i = 0; i < entries && !io->status; i++) {
		int	retval;

		io->urbs [i]->dev = io->dev;
		retval = usb_submit_urb (io->urbs [i], SLAB_ATOMIC);

		/* after we submit, let completions or cancelations fire;
		 * we handshake using io->status.
		 */
		spin_unlock_irq (&io->lock);
		switch (retval) {
			/* maybe we retrying will recover */
		case -ENXIO:	// hc didn't queue this one
		case -EAGAIN:
		case -ENOMEM:
			io->urbs[i]->dev = NULL;
			retval = 0;
			i--;
			yield ();
			break;

			/* no error? continue immediately.
			 *
			 * NOTE: to work better with UHCI (4K I/O buffer may
			 * need 3K of TDs) it may be good to limit how many
			 * URBs are queued at once; N milliseconds?
			 */
		case 0:
			cpu_relax ();
			break;

			/* fail any uncompleted urbs */
		default:
			io->urbs [i]->dev = NULL;
			io->urbs [i]->status = retval;
			dev_dbg (&io->dev->dev, "%s, submit --> %d\n",
				__FUNCTION__, retval);
			usb_sg_cancel (io);
		}
		spin_lock_irq (&io->lock);
		if (retval && (io->status == 0 || io->status == -ECONNRESET))
			io->status = retval;
	}
	io->count -= entries - i;
	if (io->count == 0)
		complete (&io->complete);
	spin_unlock_irq (&io->lock);

	/* OK, yes, this could be packaged as non-blocking.
	 * So could the submit loop above ... but it's easier to
	 * solve neither problem than to solve both!
	 */
	wait_for_completion (&io->complete);

	sg_clean (io);
}
Esempio n. 19
0
static long mtx1_wdt_ioctl(struct file *file, unsigned int cmd,
							unsigned long arg)
{
	void __user *argp = (void __user *)arg;
	int __user *p = (int __user *)argp;
	unsigned int value;
	static const struct watchdog_info ident = {
		.options = WDIOF_CARDRESET,
		.identity = "MTX-1 WDT",
	};

	switch (cmd) {
	case WDIOC_GETSUPPORT:
		if (copy_to_user(argp, &ident, sizeof(ident)))
			return -EFAULT;
		break;
	case WDIOC_GETSTATUS:
	case WDIOC_GETBOOTSTATUS:
		put_user(0, p);
		break;
	case WDIOC_SETOPTIONS:
		if (get_user(value, p))
			return -EFAULT;
		if (value & WDIOS_ENABLECARD)
			mtx1_wdt_start();
		else if (value & WDIOS_DISABLECARD)
			mtx1_wdt_stop();
		else
			return -EINVAL;
		return 0;
	case WDIOC_KEEPALIVE:
		mtx1_wdt_reset();
		break;
	default:
		return -ENOTTY;
	}
	return 0;
}


static ssize_t mtx1_wdt_write(struct file *file, const char *buf,
						size_t count, loff_t *ppos)
{
	if (!count)
		return -EIO;
	mtx1_wdt_reset();
	return count;
}

static const struct file_operations mtx1_wdt_fops = {
	.owner		= THIS_MODULE,
	.llseek		= no_llseek,
	.unlocked_ioctl	= mtx1_wdt_ioctl,
	.open		= mtx1_wdt_open,
	.write		= mtx1_wdt_write,
	.release	= mtx1_wdt_release,
};


static struct miscdevice mtx1_wdt_misc = {
	.minor	= WATCHDOG_MINOR,
	.name	= "watchdog",
	.fops	= &mtx1_wdt_fops,
};


static int __devinit mtx1_wdt_probe(struct platform_device *pdev)
{
	int ret;

	mtx1_wdt_device.gpio = pdev->resource[0].start;
	ret = gpio_request_one(mtx1_wdt_device.gpio,
				GPIOF_OUT_INIT_HIGH, "mtx1-wdt");
	if (ret < 0) {
		dev_err(&pdev->dev, "failed to request gpio");
		return ret;
	}

	spin_lock_init(&mtx1_wdt_device.lock);
	init_completion(&mtx1_wdt_device.stop);
	mtx1_wdt_device.queue = 0;
	clear_bit(0, &mtx1_wdt_device.inuse);
	setup_timer(&mtx1_wdt_device.timer, mtx1_wdt_trigger, 0L);
	mtx1_wdt_device.default_ticks = ticks;

	ret = misc_register(&mtx1_wdt_misc);
	if (ret < 0) {
		dev_err(&pdev->dev, "failed to register\n");
		return ret;
	}
	mtx1_wdt_start();
	dev_info(&pdev->dev, "MTX-1 Watchdog driver\n");
	return 0;
}

static int __devexit mtx1_wdt_remove(struct platform_device *pdev)
{
	/* FIXME: do we need to lock this test ? */
	if (mtx1_wdt_device.queue) {
		mtx1_wdt_device.queue = 0;
		wait_for_completion(&mtx1_wdt_device.stop);
	}

	gpio_free(mtx1_wdt_device.gpio);
	misc_deregister(&mtx1_wdt_misc);
	return 0;
}

static struct platform_driver mtx1_wdt_driver = {
	.probe = mtx1_wdt_probe,
	.remove = __devexit_p(mtx1_wdt_remove),
	.driver.name = "mtx1-wdt",
	.driver.owner = THIS_MODULE,
};

static int __init mtx1_wdt_init(void)
{
	return platform_driver_register(&mtx1_wdt_driver);
}

static void __exit mtx1_wdt_exit(void)
{
	platform_driver_unregister(&mtx1_wdt_driver);
}

module_init(mtx1_wdt_init);
module_exit(mtx1_wdt_exit);

MODULE_AUTHOR("Michael Stickel, Florian Fainelli");
MODULE_DESCRIPTION("Driver for the MTX-1 watchdog");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:mtx1-wdt");
Esempio n. 20
0
static int ipmi_heartbeat(void)
{
	struct kernel_ipmi_msg            msg;
	int                               rv;
	struct ipmi_system_interface_addr addr;

	if (ipmi_ignore_heartbeat)
		return 0;

	if (ipmi_start_timer_on_heartbeat) {
		ipmi_start_timer_on_heartbeat = 0;
		ipmi_watchdog_state = action_val;
		return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
	} else if (pretimeout_since_last_heartbeat) {
		
		return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
	}

	mutex_lock(&heartbeat_lock);

	atomic_set(&heartbeat_tofree, 2);

	
	if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) {
		mutex_unlock(&heartbeat_lock);
		return 0;
	}

	addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
	addr.channel = IPMI_BMC_CHANNEL;
	addr.lun = 0;

	msg.netfn = 0x06;
	msg.cmd = IPMI_WDOG_RESET_TIMER;
	msg.data = NULL;
	msg.data_len = 0;
	rv = ipmi_request_supply_msgs(watchdog_user,
				      (struct ipmi_addr *) &addr,
				      0,
				      &msg,
				      NULL,
				      &heartbeat_smi_msg,
				      &heartbeat_recv_msg,
				      1);
	if (rv) {
		mutex_unlock(&heartbeat_lock);
		printk(KERN_WARNING PFX "heartbeat failure: %d\n",
		       rv);
		return rv;
	}

	
	wait_for_completion(&heartbeat_wait);

	if (heartbeat_recv_msg.msg.data[0] != 0) {
		
		rv = -EINVAL;
	}

	mutex_unlock(&heartbeat_lock);

	return rv;
}
Esempio n. 21
0
de265_error de265_decode_NAL(de265_decoder_context* de265ctx, rbsp_buffer* data)
{
  decoder_context* ctx = (decoder_context*)de265ctx;

  /*
    if (ctx->num_skipped_bytes>0) {
    printf("skipped bytes:\n  ");
    for (int i=0;i<ctx->num_skipped_bytes;i++)
    printf("%d ",ctx->skipped_bytes[i]);
    printf("\n");
    }
  */

  de265_error err = DE265_OK;

  bitreader reader;
  bitreader_init(&reader, data);

  nal_header nal_hdr;
  nal_read_header(&reader, &nal_hdr);
  process_nal_hdr(ctx, &nal_hdr);

  logdebug(LogHighlevel,"NAL: 0x%x 0x%x -  %d %d\n",
           data->data[0], data->data[1],
           nal_hdr.nal_unit_type,
           nal_hdr.nuh_temporal_id);


  if (nal_hdr.nal_unit_type<32) {
    logdebug(LogHeaders,"---> read slice segment header\n");

    //printf("-------- slice header --------\n");

    int sliceIndex = get_next_slice_index(ctx);
    if (sliceIndex<0) {
      return DE265_ERROR_MAX_NUMBER_OF_SLICES_EXCEEDED;
    }

    slice_segment_header* hdr = &ctx->slice[sliceIndex];
    hdr->slice_index = sliceIndex;
    bool continueDecoding;
    err = read_slice_segment_header(&reader,hdr,ctx, &continueDecoding);
    if (!continueDecoding) {
      return err;
    }
    else {
      if (ctx->param_slice_headers_fd>=0) {
        dump_slice_segment_header(hdr, ctx, ctx->param_slice_headers_fd);
      }

      if (process_slice_segment_header(ctx, hdr, &err) == false)
        {
          ctx->img->integrity = INTEGRITY_NOT_DECODED;
          return err;
        }

      skip_bits(&reader,1); // TODO: why?
      prepare_for_CABAC(&reader);


      // modify entry_point_offsets

      int headerLength = reader.data - data->data;
      for (int i=0;i<ctx->num_skipped_bytes;i++)
        {
          ctx->skipped_bytes[i] -= headerLength;
        }

      for (int i=0;i<hdr->num_entry_point_offsets;i++) {
        for (int k=ctx->num_skipped_bytes-1;k>=0;k--)
          if (ctx->skipped_bytes[k] <= hdr->entry_point_offset[i]) {
            hdr->entry_point_offset[i] -= k+1;
            break;
          }
      }


      int nRows = hdr->num_entry_point_offsets +1;

      bool use_WPP = (ctx->num_worker_threads > 0 &&
                      ctx->current_pps->entropy_coding_sync_enabled_flag);

      if (ctx->num_worker_threads > 0 &&
          ctx->current_pps->entropy_coding_sync_enabled_flag == false) {
        add_warning(ctx, DE265_WARNING_NO_WPP_CANNOT_USE_MULTITHREADING, true);
      }

      if (!use_WPP) {
        init_thread_context(&hdr->thread_context[0]);

        init_CABAC_decoder(&hdr->thread_context[0].cabac_decoder,
                           reader.data,
                           reader.bytes_remaining);

        hdr->thread_context[0].shdr = hdr;
        hdr->thread_context[0].decctx = ctx;


        // fixed context 0
        if ((err=read_slice_segment_data(ctx, &hdr->thread_context[0])) != DE265_OK)
          { return err; }
      }
      else {
        if (nRows > MAX_THREAD_CONTEXTS) {
          return DE265_ERROR_MAX_THREAD_CONTEXTS_EXCEEDED;
        }

        for (int i=0;i<nRows;i++) {
          int dataStartIndex;
          if (i==0) { dataStartIndex=0; }
          else      { dataStartIndex=hdr->entry_point_offset[i-1]; }

          int dataEnd;
          if (i==nRows-1) dataEnd = reader.bytes_remaining;
          else            dataEnd = hdr->entry_point_offset[i];

          init_thread_context(&hdr->thread_context[i]);

          init_CABAC_decoder(&hdr->thread_context[i].cabac_decoder,
                             &reader.data[dataStartIndex],
                             dataEnd-dataStartIndex);

          hdr->thread_context[i].shdr = hdr;
          hdr->thread_context[i].decctx = ctx;
        }

        // TODO: hard-coded thread context

        assert(ctx->img->tasks_pending == 0);

        //printf("-------- decode --------\n");

        add_CTB_decode_task_syntax(&hdr->thread_context[0], 0,0  ,0,0, NULL);

        /*
          for (int x=0;x<ctx->current_sps->PicWidthInCtbsY;x++)
          for (int y=0;y<ctx->current_sps->PicHeightInCtbsY;y++)
          {
          add_CTB_decode_task_syntax(&hdr->thread_context[y], x,y);
          }
        */

        wait_for_completion(ctx->img);
        //flush_thread_pool(&ctx->thread_pool);

        //printf("slice decoding finished\n");
      }
    }
  }
  else switch (nal_hdr.nal_unit_type) {
    case NAL_UNIT_VPS_NUT:
      {
        logdebug(LogHeaders,"---> read VPS\n");

        video_parameter_set vps;
        read_vps(&reader,&vps);
        if (ctx->param_vps_headers_fd>=0) {
          dump_vps(&vps, ctx->param_vps_headers_fd);
        }

        process_vps(ctx, &vps);
      }
      break;

    case NAL_UNIT_SPS_NUT:
      {
        logdebug(LogHeaders,"----> read SPS\n");

        seq_parameter_set sps;

        if ((err=read_sps(ctx, &reader,&sps, &ctx->ref_pic_sets)) != DE265_OK) {
          break;
        }

        if (ctx->param_sps_headers_fd>=0) {
          dump_sps(&sps, ctx->ref_pic_sets, ctx->param_sps_headers_fd);
        }

        process_sps(ctx, &sps);
      }
      break;

    case NAL_UNIT_PPS_NUT:
      {
        logdebug(LogHeaders,"----> read PPS\n");

        pic_parameter_set pps;

        init_pps(&pps);
        bool success = read_pps(&reader,&pps,ctx);

        if (ctx->param_pps_headers_fd>=0) {
          dump_pps(&pps, ctx->param_pps_headers_fd);
        }

        if (success) {
          process_pps(ctx,&pps);
        }
      }
      break;

    case NAL_UNIT_PREFIX_SEI_NUT:
    case NAL_UNIT_SUFFIX_SEI_NUT:
      logdebug(LogHeaders,"----> read SEI\n");

      sei_message sei;

      push_current_picture_to_output_queue(ctx);

      read_sei(&reader,&sei, nal_hdr.nal_unit_type==NAL_UNIT_SUFFIX_SEI_NUT, ctx);
      dump_sei(&sei, ctx);

      err = process_sei(&sei, ctx);
      break;
    }

  return err;
}
Esempio n. 22
0
static int trace_wakeup_test_thread(void *data)
{
	/* Make this a RT thread, doesn't need to be too high */
	static const struct sched_param param = { .sched_priority = 5 };
	struct completion *x = data;

	sched_setscheduler(current, SCHED_FIFO, &param);

	/* Make it know we have a new prio */
	complete(x);

	/* now go to sleep and let the test wake us up */
	set_current_state(TASK_INTERRUPTIBLE);
	schedule();

	/* we are awake, now wait to disappear */
	while (!kthread_should_stop()) {
		/*
		 * This is an RT task, do short sleeps to let
		 * others run.
		 */
		msleep(100);
	}

	return 0;
}

int
trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
{
	unsigned long save_max = tracing_max_latency;
	struct task_struct *p;
	struct completion isrt;
	unsigned long count;
	int ret;

	init_completion(&isrt);

	/* create a high prio thread */
	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
	if (IS_ERR(p)) {
		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
		return -1;
	}

	/* make sure the thread is running at an RT prio */
	wait_for_completion(&isrt);

	/* start the tracing */
	ret = tracer_init(trace, tr);
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

	/* reset the max latency */
	tracing_max_latency = 0;

	/* sleep to let the RT thread sleep too */
	msleep(100);

	/*
	 * Yes this is slightly racy. It is possible that for some
	 * strange reason that the RT thread we created, did not
	 * call schedule for 100ms after doing the completion,
	 * and we do a wakeup on a task that already is awake.
	 * But that is extremely unlikely, and the worst thing that
	 * happens in such a case, is that we disable tracing.
	 * Honestly, if this race does happen something is horrible
	 * wrong with the system.
	 */

	wake_up_process(p);

	/* give a little time to let the thread wake up */
	msleep(100);

	/* stop the tracing. */
	tracing_stop();
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
	if (!ret)
		ret = trace_test_buffer(&max_tr, &count);

	trace->reset(tr);
	tracing_start();

	tracing_max_latency = save_max;

	/* kill the thread */
	kthread_stop(p);

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

	return ret;
}
Esempio n. 23
0
File: rtas.c Progetto: 168519/linux
int rtas_ibm_suspend_me(u64 handle, int *vasi_return)
{
	long state;
	long rc;
	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
	struct rtas_suspend_me_data data;
	DECLARE_COMPLETION_ONSTACK(done);
	cpumask_var_t offline_mask;
	int cpuret;

	if (!rtas_service_present("ibm,suspend-me"))
		return -ENOSYS;

	/* Make sure the state is valid */
	rc = plpar_hcall(H_VASI_STATE, retbuf, handle);

	state = retbuf[0];

	if (rc) {
		printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc);
		return rc;
	} else if (state == H_VASI_ENABLED) {
		*vasi_return = RTAS_NOT_SUSPENDABLE;
		return 0;
	} else if (state != H_VASI_SUSPENDING) {
		printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n",
		       state);
		*vasi_return = -1;
		return 0;
	}

	if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
		return -ENOMEM;

	atomic_set(&data.working, 0);
	atomic_set(&data.done, 0);
	atomic_set(&data.error, 0);
	data.token = rtas_token("ibm,suspend-me");
	data.complete = &done;

	/* All present CPUs must be online */
	cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
	cpuret = rtas_online_cpus_mask(offline_mask);
	if (cpuret) {
		pr_err("%s: Could not bring present CPUs online.\n", __func__);
		atomic_set(&data.error, cpuret);
		goto out;
	}

	stop_topology_update();

	/* Call function on all CPUs.  One of us will make the
	 * rtas call
	 */
	if (on_each_cpu(rtas_percpu_suspend_me, &data, 0))
		atomic_set(&data.error, -EINVAL);

	wait_for_completion(&done);

	if (atomic_read(&data.error) != 0)
		printk(KERN_ERR "Error doing global join\n");

	start_topology_update();

	/* Take down CPUs not online prior to suspend */
	cpuret = rtas_offline_cpus_mask(offline_mask);
	if (cpuret)
		pr_warn("%s: Could not restore CPUs to offline state.\n",
				__func__);

out:
	free_cpumask_var(offline_mask);
	return atomic_read(&data.error);
}
Esempio n. 24
0
void tux3_iowait_wait(struct iowait *iowait)
{
	/* All I/O was submitted, release initial 1, then wait I/O */
	iowait_inflight_dec(iowait);
	wait_for_completion(&iowait->done);
}
static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
{
	struct Scsi_Host *scsi_host = scpnt->device->host;
	struct zfcp_adapter *adapter =
		(struct zfcp_adapter *) scsi_host->hostdata[0];
	struct zfcp_fsf_req *old_req, *abrt_req;
	unsigned long flags;
	unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
	int retval = SUCCESS, ret;
	int retry = 3;
	char *dbf_tag;

	
	write_lock_irqsave(&adapter->abort_lock, flags);

	old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
	if (!old_req) {
		write_unlock_irqrestore(&adapter->abort_lock, flags);
		zfcp_dbf_scsi_abort("abrt_or", scpnt, NULL);
		return FAILED; 
	}
	old_req->data = NULL;

	
	write_unlock_irqrestore(&adapter->abort_lock, flags);

	while (retry--) {
		abrt_req = zfcp_fsf_abort_fcp_cmnd(scpnt);
		if (abrt_req)
			break;

		zfcp_erp_wait(adapter);
		ret = fc_block_scsi_eh(scpnt);
		if (ret) {
			zfcp_dbf_scsi_abort("abrt_bl", scpnt, NULL);
			return ret;
		}
		if (!(atomic_read(&adapter->status) &
		      ZFCP_STATUS_COMMON_RUNNING)) {
			zfcp_dbf_scsi_abort("abrt_ru", scpnt, NULL);
			return SUCCESS;
		}
	}
	if (!abrt_req) {
		zfcp_dbf_scsi_abort("abrt_ar", scpnt, NULL);
		return FAILED;
	}

	wait_for_completion(&abrt_req->completion);

	if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
		dbf_tag = "abrt_ok";
	else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED)
		dbf_tag = "abrt_nn";
	else {
		dbf_tag = "abrt_fa";
		retval = FAILED;
	}
	zfcp_dbf_scsi_abort(dbf_tag, scpnt, abrt_req);
	zfcp_fsf_req_free(abrt_req);
	return retval;
}
Esempio n. 26
0
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct f2fs_sb_info *sbi;
	struct f2fs_super_block *raw_super;
	struct inode *root;
	long err;
	bool retry = true, need_fsck = false;
	char *options = NULL;
	int recovery, i, valid_super_block;
	struct curseg_info *seg_i;

try_onemore:
	err = -EINVAL;
	raw_super = NULL;
	valid_super_block = -1;
	recovery = 0;

	/* allocate memory for f2fs-specific super block info */
	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

	/* Load the checksum driver */
	sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
	if (IS_ERR(sbi->s_chksum_driver)) {
		f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
		err = PTR_ERR(sbi->s_chksum_driver);
		sbi->s_chksum_driver = NULL;
		goto free_sbi;
	}

	/* set a block size */
	if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
		f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
		goto free_sbi;
	}

	err = read_raw_super_block(sb, &raw_super, &valid_super_block,
								&recovery);
	if (err)
		goto free_sbi;

	sb->s_fs_info = sbi;
	default_options(sbi);
	/* parse mount options */
	options = kstrdup((const char *)data, GFP_KERNEL);
	if (data && !options) {
		err = -ENOMEM;
		goto free_sb_buf;
	}

	err = parse_options(sb, options);
	if (err)
		goto free_options;

	sbi->max_file_blocks = max_file_blocks();
	sb->s_maxbytes = sbi->max_file_blocks <<
				le32_to_cpu(raw_super->log_blocksize);
	sb->s_max_links = F2FS_LINK_MAX;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));

	sb->s_op = &f2fs_sops;
	sb->s_cop = &f2fs_cryptops;
	sb->s_xattr = f2fs_xattr_handlers;
	sb->s_export_op = &f2fs_export_ops;
	sb->s_magic = F2FS_SUPER_MAGIC;
	sb->s_time_gran = 1;
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
	memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));

	/* init f2fs-specific super block info */
	sbi->sb = sb;
	sbi->raw_super = raw_super;
	sbi->valid_super_block = valid_super_block;
	mutex_init(&sbi->gc_mutex);
	mutex_init(&sbi->writepages);
	mutex_init(&sbi->cp_mutex);
	init_rwsem(&sbi->node_write);

	/* disallow all the data/node/meta page writes */
	set_sbi_flag(sbi, SBI_POR_DOING);
	spin_lock_init(&sbi->stat_lock);

	init_rwsem(&sbi->read_io.io_rwsem);
	sbi->read_io.sbi = sbi;
	sbi->read_io.bio = NULL;
	for (i = 0; i < NR_PAGE_TYPE; i++) {
		init_rwsem(&sbi->write_io[i].io_rwsem);
		sbi->write_io[i].sbi = sbi;
		sbi->write_io[i].bio = NULL;
	}

	init_rwsem(&sbi->cp_rwsem);
	init_waitqueue_head(&sbi->cp_wait);
	init_sb_info(sbi);

	/* get an inode for meta space */
	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
	if (IS_ERR(sbi->meta_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
		err = PTR_ERR(sbi->meta_inode);
		goto free_options;
	}

	err = get_valid_checkpoint(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
		goto free_meta_inode;
	}

	sbi->total_valid_node_count =
				le32_to_cpu(sbi->ckpt->valid_node_count);
	sbi->total_valid_inode_count =
				le32_to_cpu(sbi->ckpt->valid_inode_count);
	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
	sbi->total_valid_block_count =
				le64_to_cpu(sbi->ckpt->valid_block_count);
	sbi->last_valid_block_count = sbi->total_valid_block_count;
	sbi->alloc_valid_block_count = 0;
	for (i = 0; i < NR_INODE_TYPE; i++) {
		INIT_LIST_HEAD(&sbi->inode_list[i]);
		spin_lock_init(&sbi->inode_lock[i]);
	}

	init_extent_cache_info(sbi);

	init_ino_entry_info(sbi);

	/* setup f2fs internal modules */
	err = build_segment_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS segment manager");
		goto free_sm;
	}
	err = build_node_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS node manager");
		goto free_nm;
	}

	/* For write statistics */
	if (sb->s_bdev->bd_part)
		sbi->sectors_written_start =
			(u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);

	/* Read accumulated write IO statistics if exists */
	seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
	if (__exist_node_summaries(sbi))
		sbi->kbytes_written =
			le64_to_cpu(seg_i->sum_blk->journal.info.kbytes_written);

	build_gc_manager(sbi);

	/* get an inode for node space */
	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
	if (IS_ERR(sbi->node_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
		err = PTR_ERR(sbi->node_inode);
		goto free_nm;
	}

	f2fs_join_shrinker(sbi);

	/* if there are nt orphan nodes free them */
	err = recover_orphan_inodes(sbi);
	if (err)
		goto free_node_inode;

	/* read root inode and dentry */
	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
	if (IS_ERR(root)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
		err = PTR_ERR(root);
		goto free_node_inode;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
		iput(root);
		err = -EINVAL;
		goto free_node_inode;
	}

	sb->s_root = d_make_root(root); /* allocate root dentry */
	if (!sb->s_root) {
		err = -ENOMEM;
		goto free_root_inode;
	}

	err = f2fs_build_stats(sbi);
	if (err)
		goto free_root_inode;

	if (f2fs_proc_root)
		sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);

	if (sbi->s_proc)
		proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
				 &f2fs_seq_segment_info_fops, sb);

	sbi->s_kobj.kset = f2fs_kset;
	init_completion(&sbi->s_kobj_unregister);
	err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
							"%s", sb->s_id);
	if (err)
		goto free_proc;

	/* recover fsynced data */
	if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
		/*
		 * mount should be failed, when device has readonly mode, and
		 * previous checkpoint was not done by clean system shutdown.
		 */
		if (bdev_read_only(sb->s_bdev) &&
				!is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) {
			err = -EROFS;
			goto free_kobj;
		}

		if (need_fsck)
			set_sbi_flag(sbi, SBI_NEED_FSCK);

		err = recover_fsync_data(sbi);
		if (err) {
			need_fsck = true;
			f2fs_msg(sb, KERN_ERR,
				"Cannot recover all fsync data errno=%ld", err);
			goto free_kobj;
		}
	}
	/* recover_fsync_data() cleared this already */
	clear_sbi_flag(sbi, SBI_POR_DOING);

	/*
	 * If filesystem is not mounted as read-only then
	 * do start the gc_thread.
	 */
	if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
		/* After POR, we can run background GC thread.*/
		err = start_gc_thread(sbi);
		if (err)
			goto free_kobj;
	}
	kfree(options);

	/* recover broken superblock */
	if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) {
		err = f2fs_commit_super(sbi, true);
		f2fs_msg(sb, KERN_INFO,
			"Try to recover %dth superblock, ret: %ld",
			sbi->valid_super_block ? 1 : 2, err);
	}

	f2fs_update_time(sbi, CP_TIME);
	f2fs_update_time(sbi, REQ_TIME);
	return 0;

free_kobj:
	kobject_del(&sbi->s_kobj);
	kobject_put(&sbi->s_kobj);
	wait_for_completion(&sbi->s_kobj_unregister);
free_proc:
	if (sbi->s_proc) {
		remove_proc_entry("segment_info", sbi->s_proc);
		remove_proc_entry(sb->s_id, f2fs_proc_root);
	}
	f2fs_destroy_stats(sbi);
free_root_inode:
	dput(sb->s_root);
	sb->s_root = NULL;
free_node_inode:
	mutex_lock(&sbi->umount_mutex);
	f2fs_leave_shrinker(sbi);
	iput(sbi->node_inode);
	mutex_unlock(&sbi->umount_mutex);
free_nm:
	destroy_node_manager(sbi);
free_sm:
	destroy_segment_manager(sbi);
	kfree(sbi->ckpt);
free_meta_inode:
	make_bad_inode(sbi->meta_inode);
	iput(sbi->meta_inode);
free_options:
	kfree(options);
free_sb_buf:
	kfree(raw_super);
free_sbi:
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
	kfree(sbi);

	/* give only one another chance */
	if (retry) {
		retry = false;
		shrink_dcache_sb(sb);
		goto try_onemore;
	}
	return err;
}
Esempio n. 27
0
static void create_kthread(struct kthread_create_info *create)
{
	int pid;

	/* We want our own signal handler (we take no signals by default). */
	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
	if (pid < 0) {
		create->result = ERR_PTR(pid);
	} else {
		struct sched_param param = { .sched_priority = 0 };
		wait_for_completion(&create->started);
		read_lock(&tasklist_lock);
		create->result = find_task_by_pid_ns(pid, &init_pid_ns);
		read_unlock(&tasklist_lock);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler(create->result, SCHED_NORMAL, &param);
		set_user_nice(create->result, KTHREAD_NICE_LEVEL);
		set_cpus_allowed_ptr(create->result, CPU_MASK_ALL_PTR);
	}
	complete(&create->done);
}

/**
 * kthread_create - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run(), kthread_create_on_cpu().
 *
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which noone will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create(int (*threadfn)(void *data),
				   void *data,
				   const char namefmt[],
				   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	init_completion(&create.started);
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		va_list args;
		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create);

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @k: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *k, unsigned int cpu)
{
	/* Must have done schedule() in kthread() before we set_task_cpu */
	if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) {
		WARN_ON(1);
		return;
	}
	set_task_cpu(k, cpu);
	k->cpus_allowed = cpumask_of_cpu(cpu);
	k->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit.  Your threadfn() must not call do_exit()
 * itself if you use this function!  This can also be called after
 * kthread_create() instead of calling wake_up_process(): the thread
 * will exit without calling threadfn().
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	int ret;

	mutex_lock(&kthread_stop_lock);

	/* It could exit after stop_info.k set, but before wake_up_process. */
	get_task_struct(k);

	trace_sched_kthread_stop(k);

	/* Must init completion *before* thread sees kthread_stop_info.k */
	init_completion(&kthread_stop_info.done);
	smp_wmb();

	/* Now set kthread_should_stop() to true, and wake it up. */
	kthread_stop_info.k = k;
	wake_up_process(k);
	put_task_struct(k);

	/* Once it dies, reset stop ptr, gather result and we're done. */
	wait_for_completion(&kthread_stop_info.done);
	kthread_stop_info.k = NULL;
	ret = kthread_stop_info.err;
	mutex_unlock(&kthread_stop_lock);

	trace_sched_kthread_stop_ret(ret);

	return ret;
}
Esempio n. 28
0
static void f2fs_put_super(struct super_block *sb)
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);

	if (sbi->s_proc) {
		remove_proc_entry("segment_info", sbi->s_proc);
		remove_proc_entry(sb->s_id, f2fs_proc_root);
	}
	kobject_del(&sbi->s_kobj);

	stop_gc_thread(sbi);

	/* prevent remaining shrinker jobs */
	mutex_lock(&sbi->umount_mutex);

	/*
	 * We don't need to do checkpoint when superblock is clean.
	 * But, the previous checkpoint was not done by umount, it needs to do
	 * clean checkpoint again.
	 */
	if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
			!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
		struct cp_control cpc = {
			.reason = CP_UMOUNT,
		};
		write_checkpoint(sbi, &cpc);
	}

	/* write_checkpoint can update stat informaion */
	f2fs_destroy_stats(sbi);

	/*
	 * normally superblock is clean, so we need to release this.
	 * In addition, EIO will skip do checkpoint, we need this as well.
	 */
	release_ino_entry(sbi);
	release_discard_addrs(sbi);

	f2fs_leave_shrinker(sbi);
	mutex_unlock(&sbi->umount_mutex);

	/* our cp_error case, we can wait for any writeback page */
	if (get_pages(sbi, F2FS_WRITEBACK))
		f2fs_flush_merged_bios(sbi);

	iput(sbi->node_inode);
	iput(sbi->meta_inode);

	/* destroy f2fs internal modules */
	destroy_node_manager(sbi);
	destroy_segment_manager(sbi);

	kfree(sbi->ckpt);
	kobject_put(&sbi->s_kobj);
	wait_for_completion(&sbi->s_kobj_unregister);

	sb->s_fs_info = NULL;
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
	kfree(sbi->raw_super);
	kfree(sbi);
}
Esempio n. 29
0
static int __init kernel_init(void * unused)
{
	/*
	 * Wait until kthreadd is all set-up.
	 */
	wait_for_completion(&kthreadd_done);
	lock_kernel();
	/*
	 * init can run on any cpu.
	 */
	set_cpus_allowed(current, CPU_MASK_ALL);
	/*
	 * Tell the world that we're going to be the grim
	 * reaper of innocent orphaned children.
	 *
	 * We don't want people to have to make incorrect
	 * assumptions about where in the task array this
	 * can be found.
	 */
	init_pid_ns.child_reaper = current;

	__set_special_pids(1, 1);
	cad_pid = task_pid(current);

	smp_prepare_cpus(max_cpus);

	do_pre_smp_initcalls();

	smp_init();
	sched_init_smp();

	cpuset_init_smp();

	do_basic_setup();

	/* Open the /dev/console on the rootfs, this should never fail */
	if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
		printk(KERN_WARNING "Warning: unable to open an initial console.\n");

	(void) sys_dup(0);
	(void) sys_dup(0);
	/*
	 * check if there is an early userspace init.  If yes, let it do all
	 * the work
	 */

	if (!ramdisk_execute_command)
		ramdisk_execute_command = "/init";

	if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
		ramdisk_execute_command = NULL;
		prepare_namespace();
	}

	/*
	 * Ok, we have completed the initial bootup, and
	 * we're essentially up and running. Get rid of the
	 * initmem segments and start the user-mode stuff..
	 */
	init_post();
	return 0;
}
Esempio n. 30
0
static int wakeup_event_thread(void *param)
{
	struct wakeup_ctrl *ctrl = (struct wakeup_ctrl *)param;
	struct sched_param sch_param = {.sched_priority = 1};

	sched_setscheduler(current, SCHED_RR, &sch_param);
	while (1) {
		wait_for_completion(&ctrl->event);
		if (kthread_should_stop())
			break;
		wakeup_event_handler(ctrl);
		enable_irq(ctrl->wakeup_irq);
		if ((ctrl->usb_irq > 0) && (ctrl->wakeup_irq != ctrl->usb_irq))
			enable_irq(ctrl->usb_irq);
	}
	return 0;
}

static int wakeup_dev_probe(struct platform_device *pdev)
{
	struct fsl_usb2_wakeup_platform_data *pdata;
	struct wakeup_ctrl *ctrl = NULL;
	int status;
	unsigned long interrupt_flag;

	printk(KERN_INFO "IMX usb wakeup probe\n");

	if (!pdev || !pdev->dev.platform_data)
		return -ENODEV;
	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
	if (!ctrl)
		return -ENOMEM;
	pdata = pdev->dev.platform_data;
	ctrl->pdata = pdata;
	init_waitqueue_head(&pdata->wq);
	pdata->usb_wakeup_is_pending = false;

	init_completion(&ctrl->event);
	ctrl->wakeup_irq = platform_get_irq(pdev, 0);
	ctrl->usb_irq = platform_get_irq(pdev, 1);
	if (ctrl->wakeup_irq != ctrl->wakeup_irq)
		interrupt_flag = IRQF_DISABLED;
	else
		interrupt_flag = IRQF_SHARED;
	status = request_irq(ctrl->wakeup_irq, usb_wakeup_handler, interrupt_flag, "usb_wakeup", (void *)ctrl);
	if (status)
		goto error1;

	ctrl->thread = kthread_run(wakeup_event_thread, (void *)ctrl, "usb_wakeup thread");
	status = IS_ERR(ctrl->thread) ? -1 : 0;
	if (status)
		goto error2;
	g_ctrl = ctrl;

	printk(KERN_DEBUG "the wakeup pdata is 0x%p\n", pdata);
	return 0;
error2:
	free_irq(ctrl->wakeup_irq, (void *)ctrl);
error1:
	kfree(ctrl);
	return status;
}

static int  wakeup_dev_exit(struct platform_device *pdev)
{
	if (g_ctrl->thread) {
		complete(&g_ctrl->event);
		kthread_stop(g_ctrl->thread);
	}
	free_irq(g_ctrl->wakeup_irq, (void *)g_ctrl);
	kfree(g_ctrl);
	return 0;
}
static struct platform_driver wakeup_d = {
	.probe   = wakeup_dev_probe,
	.remove  = wakeup_dev_exit,
	.driver = {
		.name = "usb_wakeup",
	},
};

static int __init wakeup_dev_init(void)
{
	return platform_driver_register(&wakeup_d);
}
static void __exit wakeup_dev_uninit(void)
{
	platform_driver_unregister(&wakeup_d);
}