Exemple #1
0
void mdp_config_vsync(struct msm_fb_data_type *mfd)
{
	/* vsync on primary lcd only for now */
	if ((mfd->dest != DISPLAY_LCD) || (mfd->panel_info.pdest != DISPLAY_1)
	    || (!vsync_mode)) {
		goto err_handle;
	}

	vsync_clk_status = 0;
	if (mfd->panel_info.lcd.vsync_enable) {
		mfd->total_porch_lines = mfd->panel_info.lcd.v_back_porch +
		    mfd->panel_info.lcd.v_front_porch +
		    mfd->panel_info.lcd.v_pulse_width;
		mfd->total_lcd_lines =
		    mfd->panel_info.yres + mfd->total_porch_lines;
		mfd->lcd_ref_usec_time =
		    100000000 / mfd->panel_info.lcd.refx100;
		mfd->vsync_handler_pending = FALSE;

		mfd->last_vsync_timetick.tv64 = 0;

#ifdef MDP_HW_VSYNC
		if (mdp_vsync_clk == NULL)
			mdp_vsync_clk = clk_get(NULL, "mdp_vsync_clk");

		if (IS_ERR(mdp_vsync_clk)) {
			printk(KERN_ERR "error: can't get mdp_vsync_clk!\n");
			mfd->use_mdp_vsync = 0;
		} else
			mfd->use_mdp_vsync = 1;

		if (mfd->use_mdp_vsync) {
			uint32 vsync_cnt_cfg_dem;
			uint32 mdp_vsync_clk_speed_hz;

			mdp_vsync_clk_speed_hz = clk_get_rate(mdp_vsync_clk);

			if (mdp_vsync_clk_speed_hz == 0) {
				mfd->use_mdp_vsync = 0;
			} else {
				/*
				 * Do this calculation in 2 steps for
				 * rounding uint32 properly.
				 */
				vsync_cnt_cfg_dem =
				    (mfd->panel_info.lcd.refx100 *
				     mfd->total_lcd_lines) / 100;
				vsync_cnt_cfg =
				    (mdp_vsync_clk_speed_hz) /
				    vsync_cnt_cfg_dem;
				mdp_vsync_cfg_regs(mfd, TRUE);
			}
		}
#else
		mfd->use_mdp_vsync = 0;
		hrtimer_init(&mfd->dma_hrtimer, CLOCK_MONOTONIC,
			     HRTIMER_MODE_REL);
		mfd->dma_hrtimer.function = mdp_dma2_vsync_hrtimer_handler;
		mfd->vsync_width_boundary = vmalloc(mfd->panel_info.xres * 4);
#endif

#ifdef CONFIG_FB_MSM_MDDI
		mfd->channel_irq = 0;
		if (mfd->panel_info.lcd.hw_vsync_mode) {
			u32 vsync_gpio = mfd->vsync_gpio;
			u32 ret;

			if (vsync_gpio == -1) {
				MSM_FB_INFO("vsync_gpio not defined!\n");
				goto err_handle;
			}

			ret = gpio_tlmm_config(GPIO_CFG
					(vsync_gpio,
					(mfd->use_mdp_vsync) ? 1 : 0,
					GPIO_CFG_INPUT,
					GPIO_CFG_PULL_DOWN,
					GPIO_CFG_2MA),
					GPIO_CFG_ENABLE);
			if (ret)
				goto err_handle;

			/*
			 * if use_mdp_vsync, then no interrupt need since
			 * mdp_vsync is feed directly to mdp to reset the
			 * write pointer counter. therefore no irq_handler
			 * need to reset write pointer counter.
			 */
			if (!mfd->use_mdp_vsync) {
				mfd->channel_irq = MSM_GPIO_TO_INT(vsync_gpio);
				if (request_irq
				    (mfd->channel_irq,
				     &mdp_hw_vsync_handler_proxy,
				     IRQF_TRIGGER_FALLING, "VSYNC_GPIO",
				     (void *)mfd)) {
					MSM_FB_INFO
					("irq=%d failed! vsync_gpio=%d\n",
						mfd->channel_irq,
						vsync_gpio);
					goto err_handle;
				}
			}
		}
#endif
		mdp_hw_vsync_clk_enable(mfd);
		mdp_set_vsync((unsigned long)mfd);
	}

	return;

err_handle:
	if (mfd->vsync_width_boundary)
		vfree(mfd->vsync_width_boundary);
	mfd->panel_info.lcd.vsync_enable = FALSE;
	printk(KERN_ERR "%s: failed!\n", __func__);
}
int rt5677_ioctl_common(struct snd_hwdep *hw, struct file *file,
			unsigned int cmd, unsigned long arg)
{
	struct snd_soc_codec *codec = hw->private_data;
	struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
	struct rt_codec_cmd __user *_rt_codec = (struct rt_codec_cmd *)arg;
	struct rt_codec_cmd rt_codec;
	int ret = -EFAULT;
	u32 addr = RT5677_MIC_BUF_ADDR;
	size_t size;
	u32 mic_write_offset;
	size_t bytes_to_user = 0;
	size_t first_chunk_start, first_chunk_len;
	size_t second_chunk_start, second_chunk_len;

#ifdef CONFIG_COMPAT
	if (is_compat_task()) {
		struct compat_rt_codec_cmd compat_rt_codec;

		if (copy_from_user(&compat_rt_codec, _rt_codec,
				   sizeof(compat_rt_codec)))
			return -EFAULT;
		rt_codec.number = compat_rt_codec.number;
		rt_codec.buf = compat_ptr(compat_rt_codec.buf);
	} else
#endif
	{
		if (copy_from_user(&rt_codec, _rt_codec, sizeof(rt_codec)))
			return -EFAULT;
	}

	dev_dbg(codec->dev, "%s: rt_codec.number=%zu, cmd=%u\n",
		__func__, rt_codec.number, cmd);

	size = sizeof(int) * rt_codec.number;
	switch (cmd) {
	case RT_READ_CODEC_DSP_IOCTL:
	case RT_READ_CODEC_DSP_IOCTL_COMPAT:
		/* Grab the first 4 bytes that holds the write pointer on the
		   dsp, and check to make sure that it points somewhere inside
		   the buffer. */
		ret = rt5677_spi_read(addr, rt5677->mic_buf, 4);
		if (ret)
			return ret;
		mic_write_offset = le32_to_cpup((u32 *)rt5677->mic_buf);

		if (mic_write_offset < sizeof(u32) ||
		    mic_write_offset >= RT5677_MIC_BUF_SIZE) {
			dev_err(codec->dev,
				"Invalid offset in the mic buffer %d\n",
				mic_write_offset);
			return -EFAULT;
		}

		/* If the mic_read_offset is zero, this means it's the first
		   time that we've asked for streaming data. We should start
		   reading from the previous 2 seconds of audio from wherever
		   the mic_write_offset is currently (note that this needs to
		   wraparound the buffer). */
		if (rt5677->mic_read_offset == 0) {
			if (mic_write_offset <
			    RT5677_MIC_BUF_FIRST_READ_SIZE + sizeof(u32)) {
				rt5677->mic_read_offset = (RT5677_MIC_BUF_SIZE -
					(RT5677_MIC_BUF_FIRST_READ_SIZE -
						(mic_write_offset - sizeof(u32))));
			} else {
				rt5677->mic_read_offset = (mic_write_offset -
					RT5677_MIC_BUF_FIRST_READ_SIZE);
			}
		}

		/* If the audio wrapped around, then we need to do the copy in
		   two passes, otherwise, we can do it on one. We should also
		   make sure that we don't read more bytes than we have in the
		   user buffer, or we'll just waste time. */
		if (mic_write_offset < rt5677->mic_read_offset) {
			/* Copy the audio from the last read offset until the
			   end of the buffer, then do the second chunk that
			   starts after the u32. */
			first_chunk_start = rt5677->mic_read_offset;
			first_chunk_len =
				RT5677_MIC_BUF_SIZE - rt5677->mic_read_offset;
			if (first_chunk_len > size) {
				first_chunk_len = size;
				second_chunk_start = 0;
				second_chunk_len = 0;
			} else {
				second_chunk_start = sizeof(u32);
				second_chunk_len =
					mic_write_offset - sizeof(u32);
				if (first_chunk_len + second_chunk_len > size) {
					second_chunk_len =
						size - first_chunk_len;
				}
			}
		} else {
			first_chunk_start = rt5677->mic_read_offset;
			first_chunk_len =
				mic_write_offset - rt5677->mic_read_offset;
			if (first_chunk_len > size)
				first_chunk_len = size;
			second_chunk_start = 0;
			second_chunk_len = 0;
		}

		ret = rt5677_spi_read(addr + first_chunk_start, rt5677->mic_buf,
				      first_chunk_len);
		if (ret)
			return ret;
		bytes_to_user += first_chunk_len;

		if (second_chunk_len) {
			ret = rt5677_spi_read(addr + second_chunk_start,
					      rt5677->mic_buf + first_chunk_len,
					      second_chunk_len);
			if (!ret)
				bytes_to_user += second_chunk_len;
		}

		bytes_to_user -= copy_to_user(rt_codec.buf, rt5677->mic_buf,
					      bytes_to_user);

		rt5677->mic_read_offset += bytes_to_user;
		if (rt5677->mic_read_offset >= RT5677_MIC_BUF_SIZE) {
			rt5677->mic_read_offset -=
				RT5677_MIC_BUF_SIZE - sizeof(u32);
		}
		return bytes_to_user >> 1;

	case RT_WRITE_CODEC_DSP_IOCTL:
	case RT_WRITE_CODEC_DSP_IOCTL_COMPAT:
		if (!rt5677->model_buf || rt5677->model_len < size) {
			vfree(rt5677->model_buf);
			rt5677->model_len = 0;
			rt5677->model_buf = vmalloc(size);
			if (!rt5677->model_buf)
				return -ENOMEM;
		}
		if (copy_from_user(rt5677->model_buf, rt_codec.buf, size))
			return -EFAULT;
		rt5677->model_len = size;
		return 0;

	default:
		return -ENOTSUPP;
	}
}
Exemple #3
0
/**
 * ipath_diagpkt_write - write an IB packet
 * @fp: the diag data device file pointer
 * @data: ipath_diag_pkt structure saying where to get the packet
 * @count: size of data to write
 * @off: unused by this code
 */
static ssize_t ipath_diagpkt_write(struct file *fp,
				   const char __user *data,
				   size_t count, loff_t *off)
{
	u32 __iomem *piobuf;
	u32 plen, pbufn, maxlen_reserve;
	struct ipath_diag_pkt odp;
	struct ipath_diag_xpkt dp;
	u32 *tmpbuf = NULL;
	struct ipath_devdata *dd;
	ssize_t ret = 0;
	u64 val;
	u32 l_state, lt_state; /* LinkState, LinkTrainingState */


	if (count == sizeof(dp)) {
		if (copy_from_user(&dp, data, sizeof(dp))) {
			ret = -EFAULT;
			goto bail;
		}
	} else if (count == sizeof(odp)) {
		if (copy_from_user(&odp, data, sizeof(odp))) {
			ret = -EFAULT;
			goto bail;
		}
	} else {
		ret = -EINVAL;
		goto bail;
	}

	/* send count must be an exact number of dwords */
	if (dp.len & 3) {
		ret = -EINVAL;
		goto bail;
	}

	plen = dp.len >> 2;

	dd = ipath_lookup(dp.unit);
	if (!dd || !(dd->ipath_flags & IPATH_PRESENT) ||
	    !dd->ipath_kregbase) {
		ipath_cdbg(VERBOSE, "illegal unit %u for diag data send\n",
			   dp.unit);
		ret = -ENODEV;
		goto bail;
	}

	if (ipath_diag_inuse && !diag_set_link &&
	    !(dd->ipath_flags & IPATH_LINKACTIVE)) {
		diag_set_link = 1;
		ipath_cdbg(VERBOSE, "Trying to set to set link active for "
			   "diag pkt\n");
		ipath_set_linkstate(dd, IPATH_IB_LINKARM);
		ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);
	}

	if (!(dd->ipath_flags & IPATH_INITTED)) {
		/* no hardware, freeze, etc. */
		ipath_cdbg(VERBOSE, "unit %u not usable\n", dd->ipath_unit);
		ret = -ENODEV;
		goto bail;
	}
	/*
	 * Want to skip check for l_state if using custom PBC,
	 * because we might be trying to force an SM packet out.
	 * first-cut, skip _all_ state checking in that case.
	 */
	val = ipath_ib_state(dd, dd->ipath_lastibcstat);
	lt_state = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
	l_state = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
	if (!dp.pbc_wd && (lt_state != INFINIPATH_IBCS_LT_STATE_LINKUP ||
	    (val != dd->ib_init && val != dd->ib_arm &&
	    val != dd->ib_active))) {
		ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n",
			   dd->ipath_unit, (unsigned long long) val);
		ret = -EINVAL;
		goto bail;
	}

	/*
	 * need total length before first word written, plus 2 Dwords. One Dword
	 * is for padding so we get the full user data when not aligned on
	 * a word boundary. The other Dword is to make sure we have room for the
	 * ICRC which gets tacked on later.
	 */
	maxlen_reserve = 2 * sizeof(u32);
	if (dp.len > dd->ipath_ibmaxlen - maxlen_reserve) {
		ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n",
			  dp.len, dd->ipath_ibmaxlen);
		ret = -EINVAL;
		goto bail;
	}

	plen = sizeof(u32) + dp.len;

	tmpbuf = vmalloc(plen);
	if (!tmpbuf) {
		dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, "
			 "failing\n");
		ret = -ENOMEM;
		goto bail;
	}

	if (copy_from_user(tmpbuf,
			   (const void __user *) (unsigned long) dp.data,
			   dp.len)) {
		ret = -EFAULT;
		goto bail;
	}

	plen >>= 2;		/* in dwords */

	piobuf = ipath_getpiobuf(dd, plen, &pbufn);
	if (!piobuf) {
		ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n",
			   dd->ipath_unit);
		ret = -EBUSY;
		goto bail;
	}
	/* disarm it just to be extra sure */
	ipath_disarm_piobufs(dd, pbufn, 1);

	if (ipath_debug & __IPATH_PKTDBG)
		ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n",
			   dd->ipath_unit, plen - 1, pbufn);

	if (dp.pbc_wd == 0)
		dp.pbc_wd = plen;
	writeq(dp.pbc_wd, piobuf);
	/*
	 * Copy all by the trigger word, then flush, so it's written
	 * to chip before trigger word, then write trigger word, then
	 * flush again, so packet is sent.
	 */
	if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
		ipath_flush_wc();
		__iowrite32_copy(piobuf + 2, tmpbuf, plen - 1);
		ipath_flush_wc();
		__raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
	} else
		__iowrite32_copy(piobuf + 2, tmpbuf, plen);

	ipath_flush_wc();

	ret = sizeof(dp);

bail:
	vfree(tmpbuf);
	return ret;
}
Exemple #4
0
static int agp_backend_initialize(struct agp_bridge_data *bridge)
{
	int size_value, rc, got_gatt=0, got_keylist=0;

	bridge->max_memory_agp = agp_find_max();
	bridge->version = &agp_current_version;

	if (bridge->driver->needs_scratch_page) {
		void *addr = bridge->driver->agp_alloc_page(bridge);

		if (!addr) {
			printk(KERN_ERR PFX "unable to get memory for scratch page.\n");
			return -ENOMEM;
		}

		bridge->scratch_page_real = virt_to_gart(addr);
		bridge->scratch_page =
		    bridge->driver->mask_memory(bridge, bridge->scratch_page_real, 0);
	}

	size_value = bridge->driver->fetch_size();
	if (size_value == 0) {
		printk(KERN_ERR PFX "unable to determine aperture size.\n");
		rc = -EINVAL;
		goto err_out;
	}
	if (bridge->driver->create_gatt_table(bridge)) {
		printk(KERN_ERR PFX
		    "unable to get memory for graphics translation table.\n");
		rc = -ENOMEM;
		goto err_out;
	}
	got_gatt = 1;

	bridge->key_list = vmalloc(PAGE_SIZE * 4);
	if (bridge->key_list == NULL) {
		printk(KERN_ERR PFX "error allocating memory for key lists.\n");
		rc = -ENOMEM;
		goto err_out;
	}
	got_keylist = 1;

	/* FIXME vmalloc'd memory not guaranteed contiguous */
	memset(bridge->key_list, 0, PAGE_SIZE * 4);

	if (bridge->driver->configure()) {
		printk(KERN_ERR PFX "error configuring host chipset.\n");
		rc = -EINVAL;
		goto err_out;
	}

	return 0;

err_out:
	if (bridge->driver->needs_scratch_page) {
		bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
						 AGP_PAGE_DESTROY_UNMAP);
		bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
						 AGP_PAGE_DESTROY_FREE);
	}
	if (got_gatt)
		bridge->driver->free_gatt_table(bridge);
	if (got_keylist) {
		vfree(bridge->key_list);
		bridge->key_list = NULL;
	}
	return rc;
}
Exemple #5
0
static int isp_open (struct inode *node, struct file *file)
{
	int ret = 0;
	struct isp_k_private *isp_private = s_isp_private;//platform_get_drvdata(rot_get_platform_device())
	struct isp_k_file *fd = NULL;

	if (!file) {
		ret = -EINVAL;
		printk("isp_open: file is null error.\n");
		return ret;
	}

	if (!isp_private) {
		ret = -EFAULT;
		printk("isp_open: isp_private is null, error.\n");
		return ret;
	}

	down(&isp_private->device_lock);

	fd = vzalloc(sizeof(*fd));
	if (!fd) {
		ret = -ENOMEM;
		up(&isp_private->device_lock);
		printk("isp_open: no memory for fd, error.\n");
		return ret;
	}

	fd->isp_private = isp_private;

	spin_lock_init(&fd->drv_private.isr_lock);
	sema_init(&fd->drv_private.isr_done_lock, 0);
	ret = isp_queue_init(&(fd->drv_private.queue));
	if (unlikely(0 != ret)) {
		ret = -EFAULT;
		vfree(fd);
		up(&isp_private->device_lock);
		printk("isp_open: isp_queue_init error.\n");
		return ret;
	}

	ret = isp_module_eb(fd);
	if (unlikely(0 != ret)) {
		ret = -EIO;
		printk("isp_open: isp_module_eb error.\n");
		goto open_exit;
	}

	ret = isp_module_rst(fd);
	if (unlikely(0 != ret)) {
		ret = -EIO;
		isp_module_dis(fd);
		printk("isp_open: isp_module_rst error.\n");
		goto open_exit;
	}

	ret = isp_register_irq(fd);
	if (unlikely(0 != ret)) {
		ret = -EIO;
		isp_module_dis(fd);
		printk("isp_open: isp_register_irq error.\n");
		goto open_exit;
	}

	file->private_data = fd;

	printk("isp_open: success.\n");

	return ret;

open_exit:
	vfree(fd);
	fd = NULL;

	file->private_data = NULL;

	up(&isp_private->device_lock);

	return ret;
}
Exemple #6
0
static int bq275x0_update_firmware(struct i2c_client *client, const char *pFilePath) 
{
    char *buf;
    struct file *filp;
    struct inode *inode = NULL;
    mm_segment_t oldfs;
    int length;
    int ret = 0;

    /* open file */
    oldfs = get_fs();
    set_fs(KERNEL_DS);
    filp = filp_open(pFilePath, O_RDONLY, S_IRUSR);
    if (IS_ERR(filp)) 
    {
        printk(KERN_ERR "[%s,%d] filp_open failed\n",__FUNCTION__,__LINE__);
        set_fs(oldfs);
        return -1;
    }

    if (!filp->f_op) 
    {
        printk(KERN_ERR "[%s,%d] File Operation Method Error\n",__FUNCTION__,__LINE__);        
        filp_close(filp, NULL);
        set_fs(oldfs);
        return -1;
    }

    inode = filp->f_path.dentry->d_inode;
    if (!inode) 
    {
        printk(KERN_ERR "[%s,%d] Get inode from filp failed\n",__FUNCTION__,__LINE__);          
        filp_close(filp, NULL);
        set_fs(oldfs);
        return -1;
    }

    /* file's size */
    length = i_size_read(inode->i_mapping->host);
    printk("bq275x0 firmware image size is %d \n",length);
    if (!( length > 0 && length < BSP_FIRMWARE_FILE_SIZE))
    {
        printk(KERN_ERR "[%s,%d] Get file size error\n",__FUNCTION__,__LINE__);
        filp_close(filp, NULL);
        set_fs(oldfs);
        return -1;
    }

    /* allocation buff size */
    buf = vmalloc(length+(length%2));       /* buf size if even */
    if (!buf) 
    {
        printk(KERN_ERR "[%s,%d] Alloctation memory failed\n",__FUNCTION__,__LINE__);
        filp_close(filp, NULL);
        set_fs(oldfs);
        return -1;
    }

    /* read data */
    if (filp->f_op->read(filp, buf, length, &filp->f_pos) != length) 
    {
        printk(KERN_ERR "[%s,%d] File read error\n",__FUNCTION__,__LINE__);
        filp_close(filp, NULL);
        filp_close(filp, NULL);
        set_fs(oldfs);
        vfree(buf);
        return -1;
    }

    ret = bq275x0_firmware_download(client, buf, length);

    filp_close(filp, NULL);
    set_fs(oldfs);
    vfree(buf);
    
    return ret;
    
}
Exemple #7
0
/**
 * free_sq_skb_arr - free sq array for saved skb
 * @sq: HW Send Queue
 **/
static void free_sq_skb_arr(struct hinic_sq *sq)
{
	vfree(sq->saved_skb);
}
Exemple #8
0
static int
dom0_ioctl(struct file *file,
	unsigned int ioctl_num,
	unsigned long ioctl_param)
{
	int idx, ret;
	char name[DOM0_NAME_MAX] = {0};
	struct memory_info meminfo;
	struct dom0_mm_data *mm_data = file->private_data;

	XEN_PRINT("IOCTL num=0x%0x param=0x%0lx \n", ioctl_num, ioctl_param);

	/**
	 * Switch according to the ioctl called
	 */
	switch _IOC_NR(ioctl_num) {
	case _IOC_NR(RTE_DOM0_IOCTL_PREPARE_MEMSEG):
		ret = copy_from_user(&meminfo, (void *)ioctl_param,
			sizeof(struct memory_info));
		if (ret)
			return  -EFAULT;

		if (mm_data != NULL) {
			XEN_ERR("Cannot create memory segment for the same"
				" file descriptor\n");
			return -EINVAL;
		}

		/* Allocate private data */
		mm_data = vmalloc(sizeof(struct dom0_mm_data));
		if (!mm_data) {
			XEN_ERR("Unable to allocate device private data\n");
			return -ENOMEM;
		}
		memset(mm_data, 0, sizeof(struct dom0_mm_data));

		mutex_lock(&dom0_dev.data_lock);
		/* check if we can allocate memory*/
		if (dom0_check_memory(&meminfo) < 0) {
			mutex_unlock(&dom0_dev.data_lock);
			vfree(mm_data);
			return -EINVAL;
		}

		/* allocate memory and created memory segments*/
		if (dom0_prepare_memsegs(&meminfo, mm_data) < 0) {
			XEN_ERR("create memory segment fail.\n");
			mutex_unlock(&dom0_dev.data_lock);
			return -EIO;
		}

		file->private_data = mm_data;
		mutex_unlock(&dom0_dev.data_lock);
		break;

	/* support multiple process in term of memory mapping*/
	case _IOC_NR(RTE_DOM0_IOCTL_ATTACH_TO_MEMSEG):
		ret = copy_from_user(name, (void *)ioctl_param,
				sizeof(char) * DOM0_NAME_MAX);
		if (ret)
			return -EFAULT;

		mutex_lock(&dom0_dev.data_lock);
		idx = dom0_find_memdata(name);
		if (idx < 0) {
			mutex_unlock(&dom0_dev.data_lock);
			return -EINVAL;
		}

		mm_data = dom0_dev.mm_data[idx];
		mm_data->refcnt++;
		file->private_data = mm_data;
		mutex_unlock(&dom0_dev.data_lock);
		break;

	case _IOC_NR(RTE_DOM0_IOCTL_GET_NUM_MEMSEG):
		ret = copy_to_user((void *)ioctl_param, &mm_data->num_memseg,
				sizeof(int));
		if (ret)
			return -EFAULT;
		break;

	case _IOC_NR(RTE_DOM0_IOCTL_GET_MEMSEG_INFO):
		ret = copy_to_user((void *)ioctl_param,
				&mm_data->seg_info[0],
				sizeof(struct memseg_info) *
				mm_data->num_memseg);
		if (ret)
			return -EFAULT;
		break;
	default:
		XEN_PRINT("IOCTL default \n");
		break;
	}

	return 0;
}
Exemple #9
0
int mfc_init_mem_mgr(struct mfc_dev *dev)
{
	int i;
#if !defined(CONFIG_VIDEO_MFC_VCM_UMP)
	dma_addr_t base[MAX_ALLOCATION];
#else
	/* FIXME: for support user-side allocation. it's temporary solution */
	struct vcm_res	*hole;
#endif
#ifndef SYSMMU_MFC_ON
	size_t size;
#endif
#ifdef CONFIG_S5P_MEM_CMA
	struct cma_info cma_infos[2];
#ifdef CONFIG_EXYNOS4_CONTENT_PATH_PROTECTION
	size_t bound_size;
	size_t available_size;
	size_t hole_size;
#else
	int cma_index = 0;
#endif
#else
	unsigned int align_margin;
#endif

	dev->mem_ports = MFC_MAX_MEM_PORT_NUM;
	memset(dev->mem_infos, 0, sizeof(dev->mem_infos));

#ifdef SYSMMU_MFC_ON
#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
	dev->vcm_info.sysmmu_vcm = vcm_create_unified(
		SZ_256M * dev->mem_ports,
			VCM_DEV_MFC,
			&mfc_vcm_driver);

	memcpy(&vcm_info, &dev->vcm_info, sizeof(struct mfc_vcm));

	dev->mem_infos[0].vcm_s = vcm_reserve(dev->vcm_info.sysmmu_vcm,
		MFC_MEMSIZE_PORT_A, 0);

	if (IS_ERR(dev->mem_infos[0].vcm_s))
		return PTR_ERR(dev->mem_infos[0].vcm_s);

	dev->mem_infos[0].base = ALIGN(dev->mem_infos[0].vcm_s->start,
		ALIGN_128KB);
	align_margin = dev->mem_infos[0].base - dev->mem_infos[0].vcm_s->start;
	/* FIXME: for offset operation. it's temporary solution */
	/*
	dev->mem_infos[0].size = MFC_MEMSIZE_PORT_A - align_margin;
	*/
	dev->mem_infos[0].size = SZ_256M - align_margin;
	dev->mem_infos[0].addr = NULL;

	/* FIXME: for support user-side allocation. it's temporary solution */
	if (MFC_MEMSIZE_PORT_A < SZ_256M)
		hole = vcm_reserve(dev->vcm_info.sysmmu_vcm,
			SZ_256M - MFC_MEMSIZE_PORT_A, 0);

	if (dev->mem_ports == 2) {
		dev->mem_infos[1].vcm_s = vcm_reserve(dev->vcm_info.sysmmu_vcm,
			MFC_MEMSIZE_PORT_B, 0);

		if (IS_ERR(dev->mem_infos[1].vcm_s)) {
			vcm_unreserve(dev->mem_infos[0].vcm_s);
			return PTR_ERR(dev->mem_infos[1].vcm_s);
		}

		dev->mem_infos[1].base = ALIGN(dev->mem_infos[1].vcm_s->start,
			ALIGN_128KB);
		align_margin = dev->mem_infos[1].base - dev->mem_infos[1].vcm_s->start;
		dev->mem_infos[1].size = MFC_MEMSIZE_PORT_B - align_margin;
		dev->mem_infos[1].addr = NULL;
	}

	/* FIXME: for support user-side allocation. it's temporary solution */
	vcm_unreserve(hole);

	dev->fw.vcm_s = mfc_vcm_bind(dev->mem_infos[0].base, MFC_FW_SYSTEM_SIZE);
	if (IS_ERR(dev->fw.vcm_s))
		return PTR_ERR(dev->fw.vcm_s);

	dev->fw.vcm_k = mfc_vcm_map(dev->fw.vcm_s->res.phys);
	if (IS_ERR(dev->fw.vcm_k)) {
		mfc_vcm_unbind(dev->fw.vcm_s, 0);
		return PTR_ERR(dev->fw.vcm_k);
	}

	/* FIXME: it's very tricky! MUST BE FIX */
	dev->mem_infos[0].addr = (unsigned char *)dev->fw.vcm_k->start;
#elif defined(CONFIG_S5P_VMEM)
	base[0] = MFC_FREEBASE;

	dev->mem_infos[0].base = ALIGN(base[0], ALIGN_128KB);
	align_margin = dev->mem_infos[0].base - base[0];
	dev->mem_infos[0].size = MFC_MEMSIZE_PORT_A - align_margin;
	dev->mem_infos[0].addr = (unsigned char *)dev->mem_infos[0].base;

	if (dev->mem_ports == 2) {
		base[1] = dev->mem_infos[0].base + dev->mem_infos[0].size;
		dev->mem_infos[1].base = ALIGN(base[1], ALIGN_128KB);
		align_margin = dev->mem_infos[1].base - base[1];
		dev->mem_infos[1].size = MFC_MEMSIZE_PORT_B - align_margin;
		dev->mem_infos[1].addr = (unsigned char *)dev->mem_infos[1].base;
	}

	dev->fw.vmem_cookie = s5p_vmem_vmemmap(MFC_FW_SYSTEM_SIZE,
		dev->mem_infos[0].base,
		dev->mem_infos[0].base + MFC_FW_SYSTEM_SIZE);

	if (!dev->fw.vmem_cookie)
		return -ENOMEM;
#else	/* not CONFIG_VIDEO_MFC_VCM_UMP && not CONFIG_S5P_VMEM */
	/* kernel virtual memory allocator */

	dev->mem_infos[0].vmalloc_addr = vmalloc(MFC_MEMSIZE_PORT_A);
	if (dev->mem_infos[0].vmalloc_addr == NULL)
		return -ENOMEM;

	base[0] = (unsigned long)dev->mem_infos[0].vmalloc_addr;
	dev->mem_infos[0].base = ALIGN(base[0], ALIGN_128KB);
	align_margin = dev->mem_infos[0].base - base[0];
	dev->mem_infos[0].size = MFC_MEMSIZE_PORT_A - align_margin;
	dev->mem_infos[0].addr = (unsigned char *)dev->mem_infos[0].base;

	if (dev->mem_ports == 2) {
		dev->mem_infos[1].vmalloc_addr = vmalloc(MFC_MEMSIZE_PORT_B);
		if (dev->mem_infos[1].vmalloc_addr == NULL) {
			vfree(dev->mem_infos[0].vmalloc_addr);
			return -ENOMEM;
		}

		base[1] = (unsigned long)dev->mem_infos[1].vmalloc_addr;
		dev->mem_infos[1].base = ALIGN(base[1], ALIGN_128KB);
		align_margin = dev->mem_infos[1].base - base[1];
		dev->mem_infos[1].size = MFC_MEMSIZE_PORT_B - align_margin;
		dev->mem_infos[1].addr = (unsigned char *)dev->mem_infos[1].base;
	}
#endif	/* end of CONFIG_VIDEO_MFC_VCM_UMP */
#else	/* not SYSMMU_MFC_ON */
	/* early allocator */
#if defined(CONFIG_S5P_MEM_CMA)
#ifdef CONFIG_EXYNOS4_CONTENT_PATH_PROTECTION
	if (cma_info(&cma_infos[0], dev->device, "A")) {
		mfc_info("failed to get CMA info of 'mfc-secure'\n");
		return -ENOMEM;
	}

	if (cma_info(&cma_infos[1], dev->device, "B")) {
		mfc_info("failed to get CMA info of 'mfc-normal'\n");
		return -ENOMEM;
	}

	if (cma_infos[0].lower_bound > cma_infos[1].lower_bound) {
		mfc_info("'mfc-secure' region must be lower than 'mfc-normal' region\n");
		return -ENOMEM;
	}

	/*
	 * available = secure + normal
	 * bound = secure + hole + normal
	 * hole = bound - available
	 */
	available_size = cma_infos[0].free_size + cma_infos[1].free_size;
	bound_size = cma_infos[1].upper_bound - cma_infos[0].lower_bound;
	hole_size = bound_size - available_size;
	mfc_dbg("avail: 0x%08x, bound: 0x%08x offset: 0x%08x, hole: 0x%08x\n",
		available_size, bound_size, MAX_MEM_OFFSET, hole_size);

	/* re-assign actually available size */
	if (bound_size > MAX_MEM_OFFSET) {
		if (cma_infos[0].free_size > MAX_MEM_OFFSET)
			/* it will be return error */
			available_size = MAX_MEM_OFFSET;
		else if ((cma_infos[0].free_size + hole_size) >= MAX_MEM_OFFSET)
			/* it will be return error */
			available_size = cma_infos[0].free_size;
		else
			available_size -= (bound_size - MAX_MEM_OFFSET);
	}
	mfc_dbg("avail: 0x%08x\n", available_size);

	size = cma_infos[0].free_size;
	if (size > available_size) {
		mfc_info("'mfc-secure' region is too large (%d:%d)",
			size >> 10,
			MAX_MEM_OFFSET >> 10);
		return -ENOMEM;
	}
Exemple #10
0
// mkzruntimedefs writes zruntime_defs_$GOOS_$GOARCH.h,
// which contains Go struct definitions equivalent to the C ones.
// Mostly we just write the output of 6c -q to the file.
// However, we run it on multiple files, so we have to delete
// the duplicated definitions, and we don't care about the funcs
// and consts, so we delete those too.
// 
void
mkzruntimedefs(char *dir, char *file)
{
	int i, skip;
	char *p;
	Buf in, b, b1, out;
	Vec argv, lines, fields, seen;
	
	binit(&in);
	binit(&b);
	binit(&b1);
	binit(&out);
	vinit(&argv);
	vinit(&lines);
	vinit(&fields);
	vinit(&seen);
	
	bwritestr(&out, "// auto generated by go tool dist\n"
		"\n"
		"package runtime\n"
		"import \"unsafe\"\n"
		"var _ unsafe.Pointer\n"
		"\n"
	);

	
	// Run 6c -D GOOS_goos -D GOARCH_goarch -I workdir -q -n -o workdir/runtimedefs
	// on each of the runtimedefs C files.
	vadd(&argv, bpathf(&b, "%s/%sc", tooldir, gochar));
	vadd(&argv, "-D");
	vadd(&argv, bprintf(&b, "GOOS_%s", goos));
	vadd(&argv, "-D");
	vadd(&argv, bprintf(&b, "GOARCH_%s", goarch));
	vadd(&argv, "-I");
	vadd(&argv, bprintf(&b, "%s", workdir));
	vadd(&argv, "-q");
	vadd(&argv, "-n");
	vadd(&argv, "-o");
	vadd(&argv, bpathf(&b, "%s/runtimedefs", workdir));
	vadd(&argv, "");
	p = argv.p[argv.len-1];
	for(i=0; i<nelem(runtimedefs); i++) {
		argv.p[argv.len-1] = runtimedefs[i];
		runv(nil, dir, CheckExit, &argv);
		readfile(&b, bpathf(&b1, "%s/runtimedefs", workdir));
		bwriteb(&in, &b);
	}
	argv.p[argv.len-1] = p;
		
	// Process the aggregate output.
	skip = 0;
	splitlines(&lines, bstr(&in));
	for(i=0; i<lines.len; i++) {
		p = lines.p[i];
		// Drop comment, func, and const lines.
		if(hasprefix(p, "//") || hasprefix(p, "const") || hasprefix(p, "func"))
			continue;
		
		// Note beginning of type or var decl, which can be multiline.
		// Remove duplicates.  The linear check of seen here makes the
		// whole processing quadratic in aggregate, but there are only
		// about 100 declarations, so this is okay (and simple).
		if(hasprefix(p, "type ") || hasprefix(p, "var ")) {
			splitfields(&fields, p);
			if(fields.len < 2)
				continue;
			if(find(fields.p[1], seen.p, seen.len) >= 0) {
				if(streq(fields.p[fields.len-1], "{"))
					skip = 1;  // skip until }
				continue;
			}
			vadd(&seen, fields.p[1]);
		}
		if(skip) {
			if(hasprefix(p, "}"))
				skip = 0;
			continue;
		}
		
		bwritestr(&out, p);
	}
	
	writefile(&out, file, 0);

	bfree(&in);
	bfree(&b);
	bfree(&b1);
	bfree(&out);
	vfree(&argv);
	vfree(&lines);
	vfree(&fields);
	vfree(&seen);
}
Exemple #11
0
static int
dom0_memory_free(uint32_t rsv_size)
{
	uint64_t vstart, vaddr;
	uint32_t i, num_block, size;

	if (!xen_pv_domain())
		return -1;

	/* each memory block is 2M */
	num_block = rsv_size / SIZE_PER_BLOCK;
	if (num_block == 0)
		return -EINVAL;

	/* free all memory blocks of size of 4M and destroy contiguous region */
	for (i = 0; i < dom0_dev.num_bigblock * 2; i += 2) {
		vstart = rsv_mm_info[i].vir_addr;
		if (vstart) {
		#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
			if (rsv_mm_info[i].exchange_flag)
				xen_destroy_contiguous_region(vstart,
						DOM0_CONTIG_NUM_ORDER);
			if (rsv_mm_info[i + 1].exchange_flag)
				xen_destroy_contiguous_region(vstart +
						DOM0_MEMBLOCK_SIZE,
						DOM0_CONTIG_NUM_ORDER);
		#else
			if (rsv_mm_info[i].exchange_flag)
				xen_destroy_contiguous_region(rsv_mm_info[i].pfn
					* PAGE_SIZE,
					DOM0_CONTIG_NUM_ORDER);
			if (rsv_mm_info[i + 1].exchange_flag)
				xen_destroy_contiguous_region(rsv_mm_info[i].pfn
					* PAGE_SIZE + DOM0_MEMBLOCK_SIZE,
					DOM0_CONTIG_NUM_ORDER);
		#endif

			size = DOM0_MEMBLOCK_SIZE * 2;
			vaddr = vstart;
			while (size > 0) {
				ClearPageReserved(virt_to_page(vaddr));
				vaddr += PAGE_SIZE;
				size -= PAGE_SIZE;
			}
			free_pages(vstart, MAX_NUM_ORDER);
		}
	}

	/* free all memory blocks size of 2M and destroy contiguous region */
	for (; i < num_block; i++) {
		vstart = rsv_mm_info[i].vir_addr;
		if (vstart) {
			if (rsv_mm_info[i].exchange_flag)
				xen_destroy_contiguous_region(vstart,
					DOM0_CONTIG_NUM_ORDER);

			size = DOM0_MEMBLOCK_SIZE;
			vaddr = vstart;
			while (size > 0) {
				ClearPageReserved(virt_to_page(vaddr));
				vaddr += PAGE_SIZE;
				size -= PAGE_SIZE;
			}
			free_pages(vstart, DOM0_CONTIG_NUM_ORDER);
		}
	}

	memset(rsv_mm_info, 0, sizeof(struct memblock_info) * num_block);
	vfree(rsv_mm_info);
	rsv_mm_info = NULL;

	return 0;
}
Exemple #12
0
// mkzasm writes zasm_$GOOS_$GOARCH.h,
// which contains struct offsets for use by
// assembly files.  It also writes a copy to the work space
// under the name zasm_GOOS_GOARCH.h (no expansion).
// 
void
mkzasm(char *dir, char *file)
{
	int i, n;
	char *aggr, *p;
	Buf in, b, out, exp;
	Vec argv, lines, fields;

	binit(&in);
	binit(&b);
	binit(&out);
	binit(&exp);
	vinit(&argv);
	vinit(&lines);
	vinit(&fields);
	
	bwritestr(&out, "// auto generated by go tool dist\n\n");
	for(i=0; i<nelem(zasmhdr); i++) {
		if(hasprefix(goarch, zasmhdr[i].goarch) && hasprefix(goos, zasmhdr[i].goos)) {
			bwritestr(&out, zasmhdr[i].hdr);
			goto ok;
		}
	}
	fatal("unknown $GOOS/$GOARCH in mkzasm");
ok:

	// Run 6c -D GOOS_goos -D GOARCH_goarch -I workdir -a -n -o workdir/proc.acid proc.c
	// to get acid [sic] output. Run once without the -a -o workdir/proc.acid in order to
	// report compilation failures (the -o redirects all messages, unfortunately).
	vreset(&argv);
	vadd(&argv, bpathf(&b, "%s/%sc", tooldir, gochar));
	vadd(&argv, "-D");
	vadd(&argv, bprintf(&b, "GOOS_%s", goos));
	vadd(&argv, "-D");
	vadd(&argv, bprintf(&b, "GOARCH_%s", goarch));
	vadd(&argv, "-I");
	vadd(&argv, bprintf(&b, "%s", workdir));
	vadd(&argv, "-n");
	vadd(&argv, "-a");
	vadd(&argv, "-o");
	vadd(&argv, bpathf(&b, "%s/proc.acid", workdir));
	vadd(&argv, "proc.c");
	runv(nil, dir, CheckExit, &argv);
	readfile(&in, bpathf(&b, "%s/proc.acid", workdir));
	
	// Convert input like
	//	aggr G
	//	{
	//		Gobuf 24 sched;
	//		'Y' 48 stack0;
	//	}
	//	StackMin = 128;
	// into output like
	//	#define g_sched 24
	//	#define g_stack0 48
	//	#define const_StackMin 128
	aggr = nil;
	splitlines(&lines, bstr(&in));
	for(i=0; i<lines.len; i++) {
		splitfields(&fields, lines.p[i]);
		if(fields.len == 2 && streq(fields.p[0], "aggr")) {
			if(streq(fields.p[1], "G"))
				aggr = "g";
			else if(streq(fields.p[1], "M"))
				aggr = "m";
			else if(streq(fields.p[1], "P"))
				aggr = "p";
			else if(streq(fields.p[1], "Gobuf"))
				aggr = "gobuf";
			else if(streq(fields.p[1], "LibCall"))
				aggr = "libcall";
			else if(streq(fields.p[1], "WinCallbackContext"))
				aggr = "cbctxt";
			else if(streq(fields.p[1], "SEH"))
				aggr = "seh";
		}
		if(hasprefix(lines.p[i], "}"))
			aggr = nil;
		if(aggr && hasprefix(lines.p[i], "\t") && fields.len >= 2) {
			n = fields.len;
			p = fields.p[n-1];
			if(p[xstrlen(p)-1] == ';')
				p[xstrlen(p)-1] = '\0';
			bwritestr(&out, bprintf(&b, "#define %s_%s %s\n", aggr, fields.p[n-1], fields.p[n-2]));
		}
		if(fields.len == 3 && streq(fields.p[1], "=")) { // generated from enumerated constants
			p = fields.p[2];
			if(p[xstrlen(p)-1] == ';')
				p[xstrlen(p)-1] = '\0';
			bwritestr(&out, bprintf(&b, "#define const_%s %s\n", fields.p[0], p));
		}
	}

	// Some #defines that are used for .c files.
	if(streq(goos, "windows")) {
		bwritestr(&out, bprintf(&b, "#define cb_max %d\n", MAXWINCB));
	}
	
	xgetenv(&exp, "GOEXPERIMENT");
	bwritestr(&out, bprintf(&b, "#define GOEXPERIMENT \"%s\"\n", bstr(&exp)));
	
	// Write both to file and to workdir/zasm_GOOS_GOARCH.h.
	writefile(&out, file, 0);
	writefile(&out, bprintf(&b, "%s/zasm_GOOS_GOARCH.h", workdir), 0);

	bfree(&in);
	bfree(&b);
	bfree(&out);
	bfree(&exp);
	vfree(&argv);
	vfree(&lines);
	vfree(&fields);
}
static int mtk_wcn_stp_uart_init(void)
{
	static struct tty_ldisc_ops stp_uart_ldisc;
	INT32 err;
	INT32 fifo_init_done = 0;

	UART_INFO_FUNC("mtk_wcn_stp_uart_init(): MTK STP UART driver\n");

#if  (LDISC_RX == LDISC_RX_TASKLET)
	err = stp_uart_fifo_init();
	if (err != 0) {
		goto init_err;
	}
	fifo_init_done = 1;
	/*init rx tasklet */
	tasklet_init(&g_stp_uart_rx_fifo_tasklet, stp_uart_rx_handling, (unsigned long)0);

#elif (LDISC_RX == LDISC_RX_WORK)
	err = stp_uart_fifo_init();
	if (err != 0) {
		UART_ERR_FUNC("stp_uart_fifo_init(WORK) error(%d)\n", err);
		err = -EFAULT;
		goto init_err;
	}
	fifo_init_done = 1;

	g_stp_uart_rx_work = vmalloc(sizeof(struct work_struct));
	if (!g_stp_uart_rx_work) {
		UART_ERR_FUNC("vmalloc work_struct(%d) fail\n", sizeof(struct work_struct));
		err = -ENOMEM;
		goto init_err;
	}

	g_stp_uart_rx_wq = create_singlethread_workqueue("mtk_urxd");
	if (!g_stp_uart_rx_wq) {
		UART_ERR_FUNC("create_singlethread_workqueue fail\n");
		err = -ENOMEM;
		goto init_err;
	}

	/* init rx work */
	INIT_WORK(g_stp_uart_rx_work, stp_uart_rx_worker);

#endif

	/* Register the tty discipline */
	memset(&stp_uart_ldisc, 0, sizeof(stp_uart_ldisc));
	stp_uart_ldisc.magic = TTY_LDISC_MAGIC;
	stp_uart_ldisc.name = "n_mtkstp";
	stp_uart_ldisc.open = stp_uart_tty_open;
	stp_uart_ldisc.close = stp_uart_tty_close;
	stp_uart_ldisc.read = stp_uart_tty_read;
	stp_uart_ldisc.write = stp_uart_tty_write;
	stp_uart_ldisc.ioctl = stp_uart_tty_ioctl;
	stp_uart_ldisc.poll = stp_uart_tty_poll;
	stp_uart_ldisc.receive_buf = stp_uart_tty_receive;
	stp_uart_ldisc.write_wakeup = stp_uart_tty_wakeup;
	stp_uart_ldisc.owner = THIS_MODULE;

	if ((err = tty_register_ldisc(N_MTKSTP, &stp_uart_ldisc))) {
		UART_ERR_FUNC("MTK STP line discipline registration failed. (%d)\n", err);
		goto init_err;
	}

	/*
	   mtk_wcn_stp_register_if_tx( mtk_wcn_uart_tx);
	 */

	return 0;

 init_err:

#if (LDISC_RX == LDISC_RX_TASKLET)
	/* nothing */
	if (fifo_init_done) {
		stp_uart_fifo_deinit();
	}
#elif (LDISC_RX == LDISC_RX_WORK)
	if (g_stp_uart_rx_wq) {
		destroy_workqueue(g_stp_uart_rx_wq);
		g_stp_uart_rx_wq = NULL;
	}
	if (g_stp_uart_rx_work) {
		vfree(g_stp_uart_rx_work);
	}
	if (fifo_init_done) {
		stp_uart_fifo_deinit();
	}
#endif
	UART_ERR_FUNC("init fail, return(%d)\n", err);

	return err;

}
static INT32 stp_uart_fifo_init(VOID)
{
	INT32 err = 0;

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
	g_stp_uart_rx_buf = vzalloc(LDISC_RX_BUF_SIZE);
	if (!g_stp_uart_rx_buf) {
		UART_ERR_FUNC("kfifo_alloc failed (kernel version >= 2.6.37)\n");
		err = -4;
		goto fifo_init_end;
	}
#else
	g_stp_uart_rx_buf = vmalloc(LDISC_RX_BUF_SIZE);
	if (!g_stp_uart_rx_buf) {
		UART_ERR_FUNC("kfifo_alloc failed (kernel version < 2.6.37)\n");
		err = -4;
		goto fifo_init_end;
	}
	memset(g_stp_uart_rx_buf, 0, LDISC_RX_BUF_SIZE);
#endif

	UART_INFO_FUNC("g_stp_uart_rx_buf alloc ok(0x%p, %d)\n",
		       g_stp_uart_rx_buf, LDISC_RX_BUF_SIZE);

	/*add rx fifo */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
	spin_lock_init(&g_stp_uart_rx_fifo_spinlock);
	g_stp_uart_rx_fifo =
	    kfifo_alloc(LDISC_RX_FIFO_SIZE, GFP_KERNEL, &g_stp_uart_rx_fifo_spinlock);
	if (NULL == g_stp_uart_rx_fifo) {
		UART_ERR_FUNC("kfifo_alloc failed (kernel version < 2.6.33)\n");
		err = -1;
		goto fifo_init_end;
	}
#else
	/* allocate struct kfifo first */
	g_stp_uart_rx_fifo = kzalloc(sizeof(struct kfifo), GFP_KERNEL);
	if (NULL == g_stp_uart_rx_fifo) {
		err = -2;
		UART_ERR_FUNC("kzalloc struct kfifo failed (kernel version > 2.6.33)\n");
		goto fifo_init_end;
	}

	/* allocate kfifo data buffer then */
	err = kfifo_alloc(g_stp_uart_rx_fifo, LDISC_RX_FIFO_SIZE, GFP_KERNEL);
	if (0 != err) {
		UART_ERR_FUNC("kfifo_alloc failed, err(%d)(kernel version > 2.6.33)\n", err);
		kfree(g_stp_uart_rx_fifo);
		g_stp_uart_rx_fifo = NULL;
		err = -3;
		goto fifo_init_end;
	}
#endif
	UART_INFO_FUNC("g_stp_uart_rx_fifo alloc ok\n");

 fifo_init_end:

	if (0 == err) {
		/* kfifo init ok */
		kfifo_reset(g_stp_uart_rx_fifo);
		UART_DBG_FUNC("g_stp_uart_rx_fifo init success\n");
	} else {
		UART_ERR_FUNC("stp_uart_fifo_init() fail(%d)\n", err);
		if (g_stp_uart_rx_buf) {
			UART_ERR_FUNC("free g_stp_uart_rx_buf\n");
			vfree(g_stp_uart_rx_buf);
			g_stp_uart_rx_buf = NULL;
		}
	}

	return err;
}
Exemple #15
0
static int z2_open(struct block_device *bdev, fmode_t mode)
{
    int device;
    int max_z2_map = ( Z2RAM_SIZE / Z2RAM_CHUNKSIZE ) *
	sizeof( z2ram_map[0] );
    int max_chip_map = ( amiga_chip_size / Z2RAM_CHUNKSIZE ) *
	sizeof( z2ram_map[0] );
    int rc = -ENOMEM;

    device = MINOR(bdev->bd_dev);

    if ( current_device != -1 && current_device != device )
    {
	rc = -EBUSY;
	goto err_out;
    }

    if ( current_device == -1 )
    {
	z2_count   = 0;
	chip_count = 0;
	list_count = 0;
	z2ram_size = 0;

	/* Use a specific list entry. */
	if (device >= Z2MINOR_MEMLIST1 && device <= Z2MINOR_MEMLIST4) {
		int index = device - Z2MINOR_MEMLIST1 + 1;
		unsigned long size, paddr, vaddr;

		if (index >= m68k_realnum_memory) {
			printk( KERN_ERR DEVICE_NAME
				": no such entry in z2ram_map\n" );
		        goto err_out;
		}

		paddr = m68k_memory[index].addr;
		size = m68k_memory[index].size & ~(Z2RAM_CHUNKSIZE-1);

#ifdef __powerpc__
		/* FIXME: ioremap doesn't build correct memory tables. */
		{
			vfree(vmalloc (size));
		}

		vaddr = (unsigned long) __ioremap (paddr, size, 
						   _PAGE_WRITETHRU);

#else
		vaddr = (unsigned long)z_remap_nocache_nonser(paddr, size);
#endif
		z2ram_map = 
			kmalloc((size/Z2RAM_CHUNKSIZE)*sizeof(z2ram_map[0]),
				GFP_KERNEL);
		if ( z2ram_map == NULL )
		{
		    printk( KERN_ERR DEVICE_NAME
			": cannot get mem for z2ram_map\n" );
		    goto err_out;
		}

		while (size) {
			z2ram_map[ z2ram_size++ ] = vaddr;
			size -= Z2RAM_CHUNKSIZE;
			vaddr += Z2RAM_CHUNKSIZE;
			list_count++;
		}

		if ( z2ram_size != 0 )
		    printk( KERN_INFO DEVICE_NAME
			": using %iK List Entry %d Memory\n",
			list_count * Z2RAM_CHUNK1024, index );
	} else

	switch ( device )
	{
	    case Z2MINOR_COMBINED:

		z2ram_map = kmalloc( max_z2_map + max_chip_map, GFP_KERNEL );
		if ( z2ram_map == NULL )
		{
		    printk( KERN_ERR DEVICE_NAME
			": cannot get mem for z2ram_map\n" );
		    goto err_out;
		}

		get_z2ram();
		get_chipram();

		if ( z2ram_size != 0 )
		    printk( KERN_INFO DEVICE_NAME 
			": using %iK Zorro II RAM and %iK Chip RAM (Total %dK)\n",
			z2_count * Z2RAM_CHUNK1024,
			chip_count * Z2RAM_CHUNK1024,
			( z2_count + chip_count ) * Z2RAM_CHUNK1024 );

	    break;

    	    case Z2MINOR_Z2ONLY:
		z2ram_map = kmalloc( max_z2_map, GFP_KERNEL );
		if ( z2ram_map == NULL )
		{
		    printk( KERN_ERR DEVICE_NAME
			": cannot get mem for z2ram_map\n" );
		    goto err_out;
		}

		get_z2ram();

		if ( z2ram_size != 0 )
		    printk( KERN_INFO DEVICE_NAME 
			": using %iK of Zorro II RAM\n",
			z2_count * Z2RAM_CHUNK1024 );

	    break;

	    case Z2MINOR_CHIPONLY:
		z2ram_map = kmalloc( max_chip_map, GFP_KERNEL );
		if ( z2ram_map == NULL )
		{
		    printk( KERN_ERR DEVICE_NAME
			": cannot get mem for z2ram_map\n" );
		    goto err_out;
		}

		get_chipram();

		if ( z2ram_size != 0 )
		    printk( KERN_INFO DEVICE_NAME 
			": using %iK Chip RAM\n",
			chip_count * Z2RAM_CHUNK1024 );
		    
	    break;

	    default:
		rc = -ENODEV;
		goto err_out;
	
	    break;
	}

	if ( z2ram_size == 0 )
	{
	    printk( KERN_NOTICE DEVICE_NAME
		": no unused ZII/Chip RAM found\n" );
	    goto err_out_kfree;
	}

	current_device = device;
	z2ram_size <<= Z2RAM_CHUNKSHIFT;
	set_capacity(z2ram_gendisk, z2ram_size >> 9);
    }
/**
 *	seq_read -	->read() method for sequential files.
 *	@file: the file to read from
 *	@buf: the buffer to read to
 *	@size: the maximum number of bytes to read
 *	@ppos: the current position in the file
 *
 *	Ready-made ->f_op->read()
 */
ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
{
	struct seq_file *m = file->private_data;
	size_t copied = 0;
	loff_t pos;
	size_t n;
	void *p;
	int err = 0;

	mutex_lock(&m->lock);

	/*
	 * seq_file->op->..m_start/m_stop/m_next may do special actions
	 * or optimisations based on the file->f_version, so we want to
	 * pass the file->f_version to those methods.
	 *
	 * seq_file->version is just copy of f_version, and seq_file
	 * methods can treat it simply as file version.
	 * It is copied in first and copied out after all operations.
	 * It is convenient to have it as  part of structure to avoid the
	 * need of passing another argument to all the seq_file methods.
	 */
	m->version = file->f_version;

	/* Don't assume *ppos is where we left it */
	if (unlikely(*ppos != m->read_pos)) {
		while ((err = traverse(m, *ppos)) == -EAGAIN)
			;
		if (err) {
			/* With prejudice... */
			m->read_pos = 0;
			m->version = 0;
			m->index = 0;
			m->count = 0;
			goto Done;
		} else {
			m->read_pos = *ppos;
		}
	}

	/* grab buffer if we didn't have one */
	if (!m->buf) {
		m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
		if (!m->buf)
			goto Enomem;
	}
	/* if not empty - flush it first */
	if (m->count) {
		n = min(m->count, size);
		err = copy_to_user(buf, m->buf + m->from, n);
		if (err)
			goto Efault;
		m->count -= n;
		m->from += n;
		size -= n;
		buf += n;
		copied += n;
		if (!m->count)
			m->index++;
		if (!size)
			goto Done;
	}
	/* we need at least one record in buffer */
	pos = m->index;
	p = m->op->start(m, &pos);
	while (1) {
		err = PTR_ERR(p);
		if (!p || IS_ERR(p))
			break;
		err = m->op->show(m, p);
		if (err < 0)
			break;
		if (unlikely(err))
			m->count = 0;
		if (unlikely(!m->count)) {
			p = m->op->next(m, p, &pos);
			m->index = pos;
			continue;
		}
		if (m->count < m->size)
			goto Fill;
		m->op->stop(m, p);
		is_vmalloc_addr(m->buf) ? vfree(m->buf) : kfree(m->buf);
		m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | __GFP_NOWARN);
		if (!m->buf)
			m->buf = vmalloc(m->size);
		if (!m->buf)
			goto Enomem;
		m->count = 0;
		m->version = 0;
		pos = m->index;
		p = m->op->start(m, &pos);
	}
	m->op->stop(m, p);
	m->count = 0;
	goto Done;
Fill:
	/* they want more? let's try to get some more */
	while (m->count < size) {
		size_t offs = m->count;
		loff_t next = pos;
		p = m->op->next(m, p, &next);
		if (!p || IS_ERR(p)) {
			err = PTR_ERR(p);
			break;
		}
		err = m->op->show(m, p);
		if (seq_overflow(m) || err) {
			m->count = offs;
			if (likely(err <= 0))
				break;
		}
		pos = next;
	}
	m->op->stop(m, p);
	n = min(m->count, size);
	err = copy_to_user(buf, m->buf, n);
	if (err)
		goto Efault;
	copied += n;
	m->count -= n;
	if (m->count)
		m->from = n;
	else
		pos++;
	m->index = pos;
Done:
	if (!copied)
		copied = err;
	else {
		*ppos += copied;
		m->read_pos += copied;
	}
	file->f_version = m->version;
	mutex_unlock(&m->lock);
	return copied;
Enomem:
	err = -ENOMEM;
	goto Done;
Efault:
	err = -EFAULT;
	goto Done;
}
static void buffer_cleanup(struct vb2_buffer *vb)
{
	struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);

	vfree(buf->data);
}
Exemple #18
0
static void lzo_exit(struct crypto_tfm *tfm)
{
    struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);

    vfree(ctx->lzo_comp_mem);
}
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
	vfree(module_region);
}
int pxafb_minilcd_ioctl(struct fb_info *info, unsigned int cmd,
			unsigned long arg)
{
	struct pxafb_info *fbi = (struct pxafb_info *)info;
	struct pxafb_minilcd_info *mi;

	if (fbi == NULL)
		return -ENODEV;

	mi = &fbi->minilcd_info;

	switch (cmd) {
	case PXAFB_MINILCD_ENABLE:
		mi->enable = (uint32_t)(arg);
		break;

	case PXAFB_MINILCD_BACKLIGHT:
		mi->backlight = (uint32_t)(arg);
		break;

	case PXAFB_MINILCD_WAKEUP:
		if (arg == 0) {
			mi->framecount &=
				~(MLFRMCNT_WKUP | MLFRMCNT_FRCOUNT_MASK);
		} else {
			mi->framecount |= MLFRMCNT_FRCOUNT(arg);
			mi->framecount |= MLFRMCNT_WKUP;
		}
		break;

	case PXAFB_MINILCD_FWAKEUP:
		if (arg == 0) {
			mi->framecount &= ~(MLFRMCNT_FWKUP);
		} else {
			mi->framecount |= MLFRMCNT_FWKUP;
		}
		break;

	case PXAFB_MINILCD_FRAMEDATA:
	{
		unsigned int size;
	       
		size = fbi->fb.var.xres * fbi->fb.var.yres;
		size = (size * fbi->fb.var.bits_per_pixel) >> 3;

		if (arg == 0) {
			if (mi->framedata)
				vfree(mi->framedata);

			mi->framedata = NULL;
		}else{
			if (mi->framedata == NULL) {
				mi->framedata = vmalloc(size);
				if (mi->framedata == NULL){
					return -ENOMEM;
				}
			}
			if (copy_from_user(mi->framedata, (void *)arg, size))
				return -EFAULT;
		}
		break;
	}
	default:
		return -EINVAL;
	}

	return 0;
}
Exemple #21
0
/**
 * free_rq_skb_arr - free rq array for saved skb
 * @rq: HW Receive Queue
 **/
static void free_rq_skb_arr(struct hinic_rq *rq)
{
	vfree(rq->saved_skb);
}
Exemple #22
0
static int ath9k_debugfs_release_buf(struct inode *inode, struct file *file)
{
	vfree(file->private_data);
	return 0;
}
Exemple #23
0
static int igbvf_set_ringparam(struct net_device *netdev,
			       struct ethtool_ringparam *ring)
{
	struct igbvf_adapter *adapter = netdev_priv(netdev);
	struct igbvf_ring *temp_ring;
	int err = 0;
	u32 new_rx_count, new_tx_count;

	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
		return -EINVAL;

	new_rx_count = max_t(u32, ring->rx_pending, IGBVF_MIN_RXD);
	new_rx_count = min_t(u32, new_rx_count, IGBVF_MAX_RXD);
	new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);

	new_tx_count = max_t(u32, ring->tx_pending, IGBVF_MIN_TXD);
	new_tx_count = min_t(u32, new_tx_count, IGBVF_MAX_TXD);
	new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);

	if ((new_tx_count == adapter->tx_ring->count) &&
	    (new_rx_count == adapter->rx_ring->count)) {
		/* nothing to do */
		return 0;
	}

	while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
		usleep_range(1000, 2000);

	if (!netif_running(adapter->netdev)) {
		adapter->tx_ring->count = new_tx_count;
		adapter->rx_ring->count = new_rx_count;
		goto clear_reset;
	}

	temp_ring = vmalloc(sizeof(struct igbvf_ring));
	if (!temp_ring) {
		err = -ENOMEM;
		goto clear_reset;
	}

	igbvf_down(adapter);

	/* We can't just free everything and then setup again,
	 * because the ISRs in MSI-X mode get passed pointers
	 * to the Tx and Rx ring structs.
	 */
	if (new_tx_count != adapter->tx_ring->count) {
		memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring));

		temp_ring->count = new_tx_count;
		err = igbvf_setup_tx_resources(adapter, temp_ring);
		if (err)
			goto err_setup;

		igbvf_free_tx_resources(adapter->tx_ring);

		memcpy(adapter->tx_ring, temp_ring, sizeof(struct igbvf_ring));
	}

	if (new_rx_count != adapter->rx_ring->count) {
		memcpy(temp_ring, adapter->rx_ring, sizeof(struct igbvf_ring));

		temp_ring->count = new_rx_count;
		err = igbvf_setup_rx_resources(adapter, temp_ring);
		if (err)
			goto err_setup;

		igbvf_free_rx_resources(adapter->rx_ring);

		memcpy(adapter->rx_ring, temp_ring, sizeof(struct igbvf_ring));
	}
err_setup:
	igbvf_up(adapter);
	vfree(temp_ring);
clear_reset:
	clear_bit(__IGBVF_RESETTING, &adapter->state);
	return err;
}
Exemple #24
0
int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
{
	int err = 0;
	struct reiserfs_super_block *sb;
	struct reiserfs_bitmap_info *bitmap;
	struct reiserfs_bitmap_info *info;
	struct reiserfs_bitmap_info *old_bitmap = SB_AP_BITMAP(s);
	struct buffer_head *bh;
	struct reiserfs_transaction_handle th;
	unsigned int bmap_nr_new, bmap_nr;
	unsigned int block_r_new, block_r;

	struct reiserfs_list_bitmap *jb;
	struct reiserfs_list_bitmap jbitmap[JOURNAL_NUM_BITMAPS];

	unsigned long int block_count, free_blocks;
	int i;
	int copy_size;

	sb = SB_DISK_SUPER_BLOCK(s);

	if (SB_BLOCK_COUNT(s) >= block_count_new) {
		printk("can\'t shrink filesystem on-line\n");
		return -EINVAL;
	}

	/* check the device size */
	bh = sb_bread(s, block_count_new - 1);
	if (!bh) {
		printk("reiserfs_resize: can\'t read last block\n");
		return -EINVAL;
	}
	bforget(bh);

	/* old disk layout detection; those partitions can be mounted, but
	 * cannot be resized */
	if (SB_BUFFER_WITH_SB(s)->b_blocknr * SB_BUFFER_WITH_SB(s)->b_size
	    != REISERFS_DISK_OFFSET_IN_BYTES) {
		printk
		    ("reiserfs_resize: unable to resize a reiserfs without distributed bitmap (fs version < 3.5.12)\n");
		return -ENOTSUPP;
	}

	/* count used bits in last bitmap block */
	block_r = SB_BLOCK_COUNT(s) - (SB_BMAP_NR(s) - 1) * s->s_blocksize * 8;

	/* count bitmap blocks in new fs */
	bmap_nr_new = block_count_new / (s->s_blocksize * 8);
	block_r_new = block_count_new - bmap_nr_new * s->s_blocksize * 8;
	if (block_r_new)
		bmap_nr_new++;
	else
		block_r_new = s->s_blocksize * 8;

	/* save old values */
	block_count = SB_BLOCK_COUNT(s);
	bmap_nr = SB_BMAP_NR(s);

	/* resizing of reiserfs bitmaps (journal and real), if needed */
	if (bmap_nr_new > bmap_nr) {
		/* reallocate journal bitmaps */
		if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) {
			printk
			    ("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
			unlock_super(s);
			return -ENOMEM;
		}
		/* the new journal bitmaps are zero filled, now we copy in the bitmap
		 ** node pointers from the old journal bitmap structs, and then
		 ** transfer the new data structures into the journal struct.
		 **
		 ** using the copy_size var below allows this code to work for
		 ** both shrinking and expanding the FS.
		 */
		copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr;
		copy_size =
		    copy_size * sizeof(struct reiserfs_list_bitmap_node *);
		for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
			struct reiserfs_bitmap_node **node_tmp;
			jb = SB_JOURNAL(s)->j_list_bitmap + i;
			memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size);

			/* just in case vfree schedules on us, copy the new
			 ** pointer into the journal struct before freeing the 
			 ** old one
			 */
			node_tmp = jb->bitmaps;
			jb->bitmaps = jbitmap[i].bitmaps;
			vfree(node_tmp);
		}

		/* allocate additional bitmap blocks, reallocate array of bitmap
		 * block pointers */
		bitmap =
		    vmalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new);
		if (!bitmap) {
			/* Journal bitmaps are still supersized, but the memory isn't
			 * leaked, so I guess it's ok */
			printk("reiserfs_resize: unable to allocate memory.\n");
			return -ENOMEM;
		}
		memset(bitmap, 0,
		       sizeof(struct reiserfs_bitmap_info) * SB_BMAP_NR(s));
		for (i = 0; i < bmap_nr; i++)
			bitmap[i] = old_bitmap[i];

		/* This doesn't go through the journal, but it doesn't have to.
		 * The changes are still atomic: We're synced up when the journal
		 * transaction begins, and the new bitmaps don't matter if the
		 * transaction fails. */
		for (i = bmap_nr; i < bmap_nr_new; i++) {
			/* don't use read_bitmap_block since it will cache
			 * the uninitialized bitmap */
			bh = sb_bread(s, i * s->s_blocksize * 8);
			memset(bh->b_data, 0, sb_blocksize(sb));
			reiserfs_test_and_set_le_bit(0, bh->b_data);
			reiserfs_cache_bitmap_metadata(s, bh, bitmap + i);

			set_buffer_uptodate(bh);
			mark_buffer_dirty(bh);
			sync_dirty_buffer(bh);
			// update bitmap_info stuff
			bitmap[i].first_zero_hint = 1;
			bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
			brelse(bh);
		}
		/* free old bitmap blocks array */
		SB_AP_BITMAP(s) = bitmap;
		vfree(old_bitmap);
	}

	/* begin transaction, if there was an error, it's fine. Yes, we have
	 * incorrect bitmaps now, but none of it is ever going to touch the
	 * disk anyway. */
	err = journal_begin(&th, s, 10);
	if (err)
		return err;

	/* Extend old last bitmap block - new blocks have been made available */
	info = SB_AP_BITMAP(s) + bmap_nr - 1;
	bh = reiserfs_read_bitmap_block(s, bmap_nr - 1);
	if (!bh) {
		int jerr = journal_end(&th, s, 10);
		if (jerr)
			return jerr;
		return -EIO;
	}

	reiserfs_prepare_for_journal(s, bh, 1);
	for (i = block_r; i < s->s_blocksize * 8; i++)
		reiserfs_test_and_clear_le_bit(i, bh->b_data);
	info->free_count += s->s_blocksize * 8 - block_r;
	if (!info->first_zero_hint)
		info->first_zero_hint = block_r;

	journal_mark_dirty(&th, s, bh);
	brelse(bh);

	/* Correct new last bitmap block - It may not be full */
	info = SB_AP_BITMAP(s) + bmap_nr_new - 1;
	bh = reiserfs_read_bitmap_block(s, bmap_nr_new - 1);
	if (!bh) {
		int jerr = journal_end(&th, s, 10);
		if (jerr)
			return jerr;
		return -EIO;
	}

	reiserfs_prepare_for_journal(s, bh, 1);
	for (i = block_r_new; i < s->s_blocksize * 8; i++)
		reiserfs_test_and_set_le_bit(i, bh->b_data);
	journal_mark_dirty(&th, s, bh);
	brelse(bh);

	info->free_count -= s->s_blocksize * 8 - block_r_new;
	/* Extreme case where last bitmap is the only valid block in itself. */
	if (!info->free_count)
		info->first_zero_hint = 0;
	/* update super */
	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
	free_blocks = SB_FREE_BLOCKS(s);
	PUT_SB_FREE_BLOCKS(s,
			   free_blocks + (block_count_new - block_count -
					  (bmap_nr_new - bmap_nr)));
	PUT_SB_BLOCK_COUNT(s, block_count_new);
	PUT_SB_BMAP_NR(s, bmap_nr_new);
	s->s_dirt = 1;

	journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));

	SB_JOURNAL(s)->j_must_wait = 1;
	return journal_end(&th, s, 10);
}
Exemple #25
0
static long isp_ioctl( struct file *file, unsigned int cmd, unsigned long param)
{
	long ret = 0;
	struct isp_irq irq_param;
	struct isp_node node;
	struct isp_reg_param reg_param = {0, 0};
	struct isp_reg_bits *reg_bits_ptr = NULL;
	struct isp_k_private *isp_private = NULL;
	struct isp_drv_private *drv_private = NULL;
	struct isp_k_file *fd = NULL;

	if (!file) {
		ret = -EINVAL;
		printk("isp_ioctl: file is null error.\n");
		return ret;
	}

	fd = file->private_data;
	if (!fd) {
		ret = - EFAULT;
		printk("isp_ioctl: private_data is null error.\n");
		return ret;
	}

	isp_private = fd->isp_private;
	if (!isp_private) {
		ret = -EFAULT;
		printk("isp_ioctl: isp_private is null error.\n");
		return ret;
	}

	drv_private = &fd->drv_private;

	switch (cmd) {
	case ISP_IO_IRQ:
	{
		ret = down_interruptible(&fd->drv_private.isr_done_lock);
		if (ret) {
			memset(&irq_param, 0, sizeof(irq_param));
			irq_param.ret_val = ret;
			ret = copy_to_user ((void *)param, (void *)&irq_param, sizeof(irq_param));
			if ( 0 != ret) {
				printk("isp_ioctl: irq: copy_to_user error ret = %d\n", (uint32_t)ret);
			}
			ret = -ERESTARTSYS;
			return ret;
		}

		ret = isp_queue_read(&drv_private->queue, &node);
		if (0 != ret) {
			ret = -EFAULT;
			printk("isp_ioctl: isp_queue_read error, ret = 0x%x\n", (uint32_t)ret);
			return ret;
		}

		memset(&irq_param, 0, sizeof(irq_param));
		irq_param.irq_val0 = node.irq_val0;
		irq_param.irq_val1 = node.irq_val1;
		irq_param.irq_val2 = node.irq_val2;
		irq_param.irq_val3 = node.irq_val3;
		irq_param.reserved = node.reserved;
		ret = copy_to_user ((void *)param, (void *)&irq_param, sizeof(irq_param));
		if (0 != ret) {
			ret = -EFAULT;
			printk("isp_k: ioctl irq: copy_to_user error, ret = 0x%x", (uint32_t)ret);
		}
		break;
	}

	case ISP_IO_READ:
	{
		uint32_t buf_size = 0;

		down(&isp_private->ioctl_lock);

		ret = copy_from_user((void *)&reg_param, (void *)param, sizeof(struct isp_reg_param));
		if ( 0 != ret) {
			ret = -EFAULT;
			printk("isp_ioctl: read copy_from_user 0 error, ret = 0x%x\n", (uint32_t)ret);
			goto io_read_exit;
		}

		buf_size = reg_param.counts * sizeof(struct isp_reg_bits);
		if (buf_size > isp_private->reg_buf_len) {
			ret = -EFAULT;
			printk("isp_ioctl: read buf len error.\n");
			goto io_read_exit;
		}

		reg_bits_ptr = (struct isp_reg_bits *)isp_private->reg_buf_addr;
		ret = copy_from_user((void *)reg_bits_ptr, (void *)reg_param.reg_param, buf_size);
		if ( 0 != ret) {
			ret = -EFAULT;
			printk("isp_ioctl: read copy_from_user 1 error, ret = 0x%x\n", (uint32_t)ret);
			goto io_read_exit;
		}

		isp_read_reg(reg_bits_ptr, reg_param.counts);

		ret = copy_to_user((void *)reg_param.reg_param, (void *)reg_bits_ptr, buf_size);
		if ( 0 != ret) {
			ret = -EFAULT;
			printk("isp_ioctl: read copy_to_user error, ret = 0x%x\n", (uint32_t)ret);
			goto io_read_exit;
		}

		io_read_exit:
		if (reg_bits_ptr) {
			memset((void *)isp_private->reg_buf_addr, 0x00, buf_size);
			reg_bits_ptr = NULL;
		}

		up(&isp_private->ioctl_lock);

		break;
	}

	case ISP_IO_WRITE:
	{
		uint32_t buf_size = 0;

		down(&isp_private->ioctl_lock);

		ret = copy_from_user((void *)&reg_param, (void *)param, sizeof(struct isp_reg_param));
		if ( 0 != ret) {
			printk("isp_ioctl: write copy_from_user 0 error, ret = 0x%x\n", (uint32_t)ret);
			ret = -EFAULT;
			goto io_write_exit;
		}

		buf_size = reg_param.counts * sizeof(struct isp_reg_bits);
		if (buf_size > isp_private->reg_buf_len) {
			ret = -EFAULT;
			printk("isp_ioctl: write buf len error.\n");
			goto io_write_exit;
		}

		reg_bits_ptr = (struct isp_reg_bits *)isp_private->reg_buf_addr;
		ret = copy_from_user((void *)reg_bits_ptr, (void *)reg_param.reg_param, buf_size);
		if ( 0 != ret) {
			ret = -EFAULT;
			printk("isp_ioctl: write copy_from_user 1 error, ret = 0x%x\n", (uint32_t)ret);
			goto io_write_exit;
		}

		isp_write_reg(reg_bits_ptr, reg_param.counts);

		io_write_exit:
		if (reg_bits_ptr) {
			memset((void *)isp_private->reg_buf_addr, 0x00, buf_size);
			reg_bits_ptr = NULL;
		}

		up(&isp_private->ioctl_lock);

		break;
	}

	case ISP_IO_RST:
	{
		down(&isp_private->ioctl_lock);

		ret = isp_module_rst(fd);
		if (ret) {
			ret = -EFAULT;
			printk("isp_ioctl: restet error.\n");
		}

		up(&isp_private->ioctl_lock);

		break;
	}

	case ISP_IO_STOP:
	{
		unsigned long flag = 0;
		struct isp_node node;

		down(&isp_private->ioctl_lock);

		isp_en_irq(ISP_INT_CLEAR_MODE);

		spin_lock_irqsave(&drv_private->isr_lock,flag);
		memset(&node, 0x00, sizeof(node));
		node.reserved = ISP_INT_EVT_STOP;
		isp_queue_write((struct isp_queue *)&drv_private->queue, (struct isp_node *)&node);
		spin_unlock_irqrestore(&drv_private->isr_lock, flag);

		up(&fd->drv_private.isr_done_lock);

		up(&isp_private->ioctl_lock);

		break;
	}

	case ISP_IO_INT:
	{
		struct isp_interrupt int_param;

		down(&isp_private->ioctl_lock);

		ret = copy_from_user((void *)&int_param, (void *)param, sizeof(int_param));
		if (ret) {
			ret = -EFAULT;
			printk("isp_ioctl: int copy_from_user error, ret = %d\n", (uint32_t)ret);
		}

		if (0 == ret)
			isp_en_irq(int_param.int_mode);

		up(&isp_private->ioctl_lock);

		break;
	}

	case ISP_IO_CFG_PARAM:
		down(&isp_private->ioctl_lock);
		ret = isp_cfg_param((void *)param, isp_private);
		up(&isp_private->ioctl_lock);
		break;

	case ISP_IO_CAPABILITY:
		ret = isp_capability((void *)param);
		break;

	case ISP_REG_READ:
	{
		int ret;
		int num;
		int i;
		int ISP_REG_NUM = 20467;

		struct isp_reg_bits *ptr = (struct isp_reg_bits *)vmalloc(ISP_REG_NUM * sizeof(struct isp_reg_bits));

		if (NULL == ptr) {
			printk("isp_ioctl:REG_READ: kmalloc error\n");
			return -ENOMEM;
		}
		memset(ptr, 0, ISP_REG_NUM * sizeof(struct isp_reg_bits));

		num = isp_wr_addr(ptr);

		isp_read_reg(ptr, num);

		ret = copy_to_user((void *)param, (void *)ptr, ISP_REG_NUM * sizeof(struct isp_reg_bits));
		if ( 0 != ret) {
			printk("isp_ioctl: REG_READ: copy_to_user error ret = %d\n", (uint32_t)ret);
			vfree(ptr);
			return ret;
			}
		vfree(ptr);
		break;
	}

	default:
		printk("isp_ioctl: cmd is unsupported, cmd = %x\n", (int32_t)cmd);
		return -EFAULT;
	}

	return ret;
}
Exemple #26
0
static ssize_t
ncp_file_read(struct file *file, char *buf, size_t count, loff_t *ppos)
{
	struct dentry *dentry = file->f_dentry;
	struct inode *inode = dentry->d_inode;
	size_t already_read = 0;
	off_t pos;
	size_t bufsize;
	int error;
	void* freepage;
	size_t freelen;

	DPRINTK("ncp_file_read: enter %s/%s\n",
		dentry->d_parent->d_name.name, dentry->d_name.name);

	if (!ncp_conn_valid(NCP_SERVER(inode)))
		return -EIO;
	if (!S_ISREG(inode->i_mode)) {
		DPRINTK("ncp_file_read: read from non-file, mode %07o\n",
			inode->i_mode);
		return -EINVAL;
	}

	pos = *ppos;

	if ((ssize_t) count < 0) {
		return -EINVAL;
	}
	if (!count)
		return 0;
	if (pos > inode->i_sb->s_maxbytes)
		return 0;
	if (pos + count > inode->i_sb->s_maxbytes) {
		count = inode->i_sb->s_maxbytes - pos;
	}

	error = ncp_make_open(inode, O_RDONLY);
	if (error) {
		DPRINTK(KERN_ERR "ncp_file_read: open failed, error=%d\n", error);
		return error;
	}

	bufsize = NCP_SERVER(inode)->buffer_size;

	error = -EIO;
	freelen = ncp_read_bounce_size(bufsize);
	freepage = vmalloc(freelen);
	if (!freepage)
		goto outrel;
	error = 0;
	/* First read in as much as possible for each bufsize. */
	while (already_read < count) {
		int read_this_time;
		size_t to_read = min_t(unsigned int,
				     bufsize - (pos % bufsize),
				     count - already_read);

		error = ncp_read_bounce(NCP_SERVER(inode),
			 	NCP_FINFO(inode)->file_handle,
				pos, to_read, buf, &read_this_time, 
				freepage, freelen);
		if (error) {
			error = -EIO;	/* NW errno -> Linux errno */
			break;
		}
		pos += read_this_time;
		buf += read_this_time;
		already_read += read_this_time;

		if (read_this_time != to_read) {
			break;
		}
	}
	vfree(freepage);

	*ppos = pos;

	if (!IS_RDONLY(inode)) {
		inode->i_atime = CURRENT_TIME;
	}
	
	DPRINTK("ncp_file_read: exit %s/%s\n",
		dentry->d_parent->d_name.name, dentry->d_name.name);
outrel:
	ncp_inode_close(inode);		
	return already_read ? already_read : error;
}
Exemple #27
0
static void deflate_comp_exit(struct deflate_ctx *ctx)
{
	zlib_deflateEnd(&ctx->comp_stream);
	vfree(ctx->comp_stream.workspace);
}
Exemple #28
0
static ssize_t
ncp_file_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
{
	struct dentry *dentry = file->f_dentry;
	struct inode *inode = dentry->d_inode;
	size_t already_written = 0;
	off_t pos;
	size_t bufsize;
	int errno;
	void* bouncebuffer;

	DPRINTK("ncp_file_write: enter %s/%s\n",
		dentry->d_parent->d_name.name, dentry->d_name.name);
	if (!ncp_conn_valid(NCP_SERVER(inode)))
		return -EIO;
	if (!S_ISREG(inode->i_mode)) {
		DPRINTK("ncp_file_write: write to non-file, mode %07o\n",
			inode->i_mode);
		return -EINVAL;
	}
	if ((ssize_t) count < 0)
		return -EINVAL;
	pos = *ppos;
	if (file->f_flags & O_APPEND) {
		pos = inode->i_size;
	}

	if (pos + count > MAX_NON_LFS && !(file->f_flags&O_LARGEFILE)) {
		if (pos >= MAX_NON_LFS) {
			send_sig(SIGXFSZ, current, 0);
			return -EFBIG;
		}
		if (count > MAX_NON_LFS - (u32)pos) {
			count = MAX_NON_LFS - (u32)pos;
		}
	}
	if (pos >= inode->i_sb->s_maxbytes) {
		if (count || pos > inode->i_sb->s_maxbytes) {
			send_sig(SIGXFSZ, current, 0);
			return -EFBIG;
		}
	}
	if (pos + count > inode->i_sb->s_maxbytes) {
		count = inode->i_sb->s_maxbytes - pos;
	}
	
	if (!count)
		return 0;
	errno = ncp_make_open(inode, O_WRONLY);
	if (errno) {
		DPRINTK(KERN_ERR "ncp_file_write: open failed, error=%d\n", errno);
		return errno;
	}
	bufsize = NCP_SERVER(inode)->buffer_size;

	already_written = 0;

	bouncebuffer = vmalloc(bufsize);
	if (!bouncebuffer) {
		errno = -EIO;	/* -ENOMEM */
		goto outrel;
	}
	while (already_written < count) {
		int written_this_time;
		size_t to_write = min_t(unsigned int,
				      bufsize - (pos % bufsize),
				      count - already_written);

		if (copy_from_user(bouncebuffer, buf, to_write)) {
			errno = -EFAULT;
			break;
		}
		if (ncp_write_kernel(NCP_SERVER(inode), 
		    NCP_FINFO(inode)->file_handle,
		    pos, to_write, bouncebuffer, &written_this_time) != 0) {
			errno = -EIO;
			break;
		}
		pos += written_this_time;
		buf += written_this_time;
		already_written += written_this_time;

		if (written_this_time != to_write) {
			break;
		}
	}
	vfree(bouncebuffer);
	inode->i_mtime = inode->i_atime = CURRENT_TIME;
	
	*ppos = pos;

	if (pos > inode->i_size) {
		inode->i_size = pos;
	}
	DPRINTK("ncp_file_write: exit %s/%s\n",
		dentry->d_parent->d_name.name, dentry->d_name.name);
outrel:
	ncp_inode_close(inode);		
	return already_written ? already_written : errno;
}
Exemple #29
0
int __devinit asihpi_adapter_probe(struct pci_dev *pci_dev,
	const struct pci_device_id *pci_id)
{
	int err, idx, nm;
	unsigned int memlen;
	struct hpi_message hm;
	struct hpi_response hr;
	struct hpi_adapter adapter;
	struct hpi_pci pci;

	memset(&adapter, 0, sizeof(adapter));

	printk(KERN_DEBUG "probe PCI device (%04x:%04x,%04x:%04x,%04x)\n",
		pci_dev->vendor, pci_dev->device, pci_dev->subsystem_vendor,
		pci_dev->subsystem_device, pci_dev->devfn);

	hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
		HPI_SUBSYS_CREATE_ADAPTER);
	hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER,
		HPI_ERROR_PROCESSING_MESSAGE);

	hm.adapter_index = -1;	/* an invalid index */

	/* fill in HPI_PCI information from kernel provided information */
	adapter.pci = pci_dev;

	nm = HPI_MAX_ADAPTER_MEM_SPACES;

	for (idx = 0; idx < nm; idx++) {
		HPI_DEBUG_LOG(INFO, "resource %d %s %08llx-%08llx %04llx\n",
			idx, pci_dev->resource[idx].name,
			(unsigned long long)pci_resource_start(pci_dev, idx),
			(unsigned long long)pci_resource_end(pci_dev, idx),
			(unsigned long long)pci_resource_flags(pci_dev, idx));

		if (pci_resource_flags(pci_dev, idx) & IORESOURCE_MEM) {
			memlen = pci_resource_len(pci_dev, idx);
			adapter.ap_remapped_mem_base[idx] =
				ioremap(pci_resource_start(pci_dev, idx),
				memlen);
			if (!adapter.ap_remapped_mem_base[idx]) {
				HPI_DEBUG_LOG(ERROR,
					"ioremap failed, aborting\n");
				/* unmap previously mapped pci mem space */
				goto err;
			}
		}

		pci.ap_mem_base[idx] = adapter.ap_remapped_mem_base[idx];
	}

	/* could replace Pci with direct pointer to pci_dev for linux
	   Instead wrap accessor functions for IDs etc.
	   Would it work for windows?
	 */
	pci.bus_number = pci_dev->bus->number;
	pci.vendor_id = (u16)pci_dev->vendor;
	pci.device_id = (u16)pci_dev->device;
	pci.subsys_vendor_id = (u16)(pci_dev->subsystem_vendor & 0xffff);
	pci.subsys_device_id = (u16)(pci_dev->subsystem_device & 0xffff);
	pci.device_number = pci_dev->devfn;
	pci.interrupt = pci_dev->irq;
	pci.p_os_data = pci_dev;

	hm.u.s.resource.bus_type = HPI_BUS_PCI;
	hm.u.s.resource.r.pci = &pci;

	/* call CreateAdapterObject on the relevant hpi module */
	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
	if (hr.error)
		goto err;

	if (prealloc_stream_buf) {
		adapter.p_buffer = vmalloc(prealloc_stream_buf);
		if (!adapter.p_buffer) {
			HPI_DEBUG_LOG(ERROR,
				"HPI could not allocate "
				"kernel buffer size %d\n",
				prealloc_stream_buf);
			goto err;
		}
	}

	adapter.index = hr.u.s.adapter_index;
	adapter.type = hr.u.s.aw_adapter_list[adapter.index];
	hm.adapter_index = adapter.index;

	err = hpi_adapter_open(NULL, adapter.index);
	if (err)
		goto err;

	adapter.snd_card_asihpi = NULL;
	/* WARNING can't init mutex in 'adapter'
	 * and then copy it to adapters[] ?!?!
	 */
	adapters[hr.u.s.adapter_index] = adapter;
	mutex_init(&adapters[adapter.index].mutex);
	pci_set_drvdata(pci_dev, &adapters[adapter.index]);

	printk(KERN_INFO "probe found adapter ASI%04X HPI index #%d.\n",
		adapter.type, adapter.index);

	return 0;

err:
	for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
		if (adapter.ap_remapped_mem_base[idx]) {
			iounmap(adapter.ap_remapped_mem_base[idx]);
			adapter.ap_remapped_mem_base[idx] = NULL;
		}
	}

	if (adapter.p_buffer) {
		adapter.buffer_size = 0;
		vfree(adapter.p_buffer);
	}

	HPI_DEBUG_LOG(ERROR, "adapter_probe failed\n");
	return -ENODEV;
}
Exemple #30
0
void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
{
	void *p;
	char drv_version[64];
	struct usb_card_rec *cardp;
	struct sdio_mmc_card *sdio_card;
	struct mwifiex_private *priv;
	int i, idx;
	struct netdev_queue *txq;
	struct mwifiex_debug_info *debug_info;

	if (adapter->drv_info_dump) {
		vfree(adapter->drv_info_dump);
		adapter->drv_info_dump = NULL;
		adapter->drv_info_size = 0;
	}

	mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump start===\n");

	adapter->drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX);

	if (!adapter->drv_info_dump)
		return;

	p = (char *)(adapter->drv_info_dump);
	p += sprintf(p, "driver_name = " "\"mwifiex\"\n");

	mwifiex_drv_get_driver_version(adapter, drv_version,
				       sizeof(drv_version) - 1);
	p += sprintf(p, "driver_version = %s\n", drv_version);

	if (adapter->iface_type == MWIFIEX_USB) {
		cardp = (struct usb_card_rec *)adapter->card;
		p += sprintf(p, "tx_cmd_urb_pending = %d\n",
			     atomic_read(&cardp->tx_cmd_urb_pending));
		p += sprintf(p, "tx_data_urb_pending_port_0 = %d\n",
			     atomic_read(&cardp->port[0].tx_data_urb_pending));
		p += sprintf(p, "tx_data_urb_pending_port_1 = %d\n",
			     atomic_read(&cardp->port[1].tx_data_urb_pending));
		p += sprintf(p, "rx_cmd_urb_pending = %d\n",
			     atomic_read(&cardp->rx_cmd_urb_pending));
		p += sprintf(p, "rx_data_urb_pending = %d\n",
			     atomic_read(&cardp->rx_data_urb_pending));
	}

	p += sprintf(p, "tx_pending = %d\n",
		     atomic_read(&adapter->tx_pending));
	p += sprintf(p, "rx_pending = %d\n",
		     atomic_read(&adapter->rx_pending));

	if (adapter->iface_type == MWIFIEX_SDIO) {
		sdio_card = (struct sdio_mmc_card *)adapter->card;
		p += sprintf(p, "\nmp_rd_bitmap=0x%x curr_rd_port=0x%x\n",
			     sdio_card->mp_rd_bitmap, sdio_card->curr_rd_port);
		p += sprintf(p, "mp_wr_bitmap=0x%x curr_wr_port=0x%x\n",
			     sdio_card->mp_wr_bitmap, sdio_card->curr_wr_port);
	}

	for (i = 0; i < adapter->priv_num; i++) {
		if (!adapter->priv[i] || !adapter->priv[i]->netdev)
			continue;
		priv = adapter->priv[i];
		p += sprintf(p, "\n[interface  : \"%s\"]\n",
			     priv->netdev->name);
		p += sprintf(p, "wmm_tx_pending[0] = %d\n",
			     atomic_read(&priv->wmm_tx_pending[0]));
		p += sprintf(p, "wmm_tx_pending[1] = %d\n",
			     atomic_read(&priv->wmm_tx_pending[1]));
		p += sprintf(p, "wmm_tx_pending[2] = %d\n",
			     atomic_read(&priv->wmm_tx_pending[2]));
		p += sprintf(p, "wmm_tx_pending[3] = %d\n",
			     atomic_read(&priv->wmm_tx_pending[3]));
		p += sprintf(p, "media_state=\"%s\"\n", !priv->media_connected ?
			     "Disconnected" : "Connected");
		p += sprintf(p, "carrier %s\n", (netif_carrier_ok(priv->netdev)
			     ? "on" : "off"));
		for (idx = 0; idx < priv->netdev->num_tx_queues; idx++) {
			txq = netdev_get_tx_queue(priv->netdev, idx);
			p += sprintf(p, "tx queue %d:%s  ", idx,
				     netif_tx_queue_stopped(txq) ?
				     "stopped" : "started");
		}
		p += sprintf(p, "\n%s: num_tx_timeout = %d\n",
			     priv->netdev->name, priv->num_tx_timeout);
	}

	if (adapter->iface_type == MWIFIEX_SDIO ||
	    adapter->iface_type == MWIFIEX_PCIE) {
		p += sprintf(p, "\n=== %s register dump===\n",
			     adapter->iface_type == MWIFIEX_SDIO ?
							"SDIO" : "PCIE");
		if (adapter->if_ops.reg_dump)
			p += adapter->if_ops.reg_dump(adapter, p);
	}
	p += sprintf(p, "\n=== more debug information\n");
	debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL);
	if (debug_info) {
		for (i = 0; i < adapter->priv_num; i++) {
			if (!adapter->priv[i] || !adapter->priv[i]->netdev)
				continue;
			priv = adapter->priv[i];
			mwifiex_get_debug_info(priv, debug_info);
			p += mwifiex_debug_info_to_buffer(priv, p, debug_info);
			break;
		}
		kfree(debug_info);
	}

	adapter->drv_info_size = p - adapter->drv_info_dump;
	mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n");
}