Ejemplo n.º 1
0
static
struct device *ipmr_new_tunnel(struct vifctl *v)
{
	struct device  *dev = NULL;

	rtnl_lock();
	dev = dev_get("tunl0");

	if (dev) {
		int err;
		struct ifreq ifr;
		mm_segment_t	oldfs;
		struct ip_tunnel_parm p;
		struct in_device  *in_dev;

		memset(&p, 0, sizeof(p));
		p.iph.daddr = v->vifc_rmt_addr.s_addr;
		p.iph.saddr = v->vifc_lcl_addr.s_addr;
		p.iph.version = 4;
		p.iph.ihl = 5;
		p.iph.protocol = IPPROTO_IPIP;
		sprintf(p.name, "dvmrp%d", v->vifc_vifi);
		ifr.ifr_ifru.ifru_data = (void*)&p;

		oldfs = get_fs(); set_fs(KERNEL_DS);
		err = dev->do_ioctl(dev, &ifr, SIOCADDTUNNEL);
		set_fs(oldfs);

		if (err == 0 && (dev = dev_get(p.name)) != NULL) {
			dev->flags |= IFF_MULTICAST;

			in_dev = dev->ip_ptr;
			if (in_dev == NULL && (in_dev = inetdev_init(dev)) == NULL)
				goto failure;
			in_dev->cnf.rp_filter = 0;

			if (dev_open(dev))
				goto failure;
		}
	}
	rtnl_unlock();
	return dev;

failure:
	unregister_netdevice(dev);
	rtnl_unlock();
	return NULL;
}
Ejemplo n.º 2
0
static int eql_emancipate(struct device *dev, slaving_request_t *srqp)
{
	struct device *master_dev;
	struct device *slave_dev;
	slaving_request_t srq;
	int err;

	err = verify_area(VERIFY_READ, (void *)srqp, sizeof (slaving_request_t));
	if (err) 
		return err;

	memcpy_fromfs (&srq, srqp, sizeof (slaving_request_t));
#ifdef EQL_DEBUG
	if (eql_debug >= 20)
		printk ("%s: emancipate `%s`\n", dev->name, srq.slave_name);
#endif
	master_dev = dev;		/* for "clarity" */
	slave_dev  = dev_get (srq.slave_name);

	if ( eql_is_slave (slave_dev) )	/* really is a slave */
	{
		equalizer_t *eql = (equalizer_t *) master_dev->priv;
		slave_dev->flags = slave_dev->flags & ~IFF_SLAVE;
		eql_remove_slave_dev (eql->queue, slave_dev);
		return 0;
	}
	return -EINVAL;
}
Ejemplo n.º 3
0
static int eql_s_slave_cfg(struct device *dev, slave_config_t *scp)
{
	slave_t *slave;
	equalizer_t *eql;
	struct device *slave_dev;
	slave_config_t sc;
	int err;

	err = verify_area(VERIFY_READ, (void *)scp, sizeof (slave_config_t));
	if (err) 
		return err;

#ifdef EQL_DEBUG
	if (eql_debug >= 20)
		printk ("%s: set config for slave `%s'\n", dev->name, sc.slave_name);
#endif
  
	memcpy_fromfs (&sc, scp, sizeof (slave_config_t));

	eql = (equalizer_t *) dev->priv;
	slave_dev = dev_get (sc.slave_name);

	if ( eql_is_slave (slave_dev) )
	{
		slave = eql_find_slave_dev (eql->queue, slave_dev);
		if (slave != 0)
		{
			slave->priority = sc.priority;
			slave->priority_bps = sc.priority;
			slave->priority_Bps = sc.priority / 8;
			return 0;
		}
	}
	return -EINVAL;
}
Ejemplo n.º 4
0
static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct exynos_drm_subdrv *subdrv;
	struct drm_hdmi_context *ctx;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx) {
		DRM_LOG_KMS("failed to alloc common hdmi context.\n");
		return -ENOMEM;
	}

	subdrv = &ctx->subdrv;

	subdrv->dev = dev;
	subdrv->manager = &hdmi_manager;
	subdrv->probe = hdmi_subdrv_probe;
	subdrv->open = hdmi_subdrv_open;
	subdrv->close = hdmi_subdrv_close;

#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
	/* To lock bus frequency in OPP mode */
	ctx->bus_dev = dev_get("exynos-busfreq");
#endif

	platform_set_drvdata(pdev, subdrv);

	exynos_drm_subdrv_register(subdrv);

	return 0;
}
static int exynos_frequency_unlock(struct device *dev)
{
	int ret = 0;
	struct device *busdev = dev_get("exynos-busfreq");

	if (atomic_read(&umts_link_pm_data.freqlock) == 1) {
		/* cpu frequency unlock */
		exynos_cpufreq_lock_free(DVFS_LOCK_ID_USB_IF);

		/* bus frequency unlock */
		ret = dev_unlock(busdev, dev);
		if (ret < 0) {
			mif_err("ERR: dev_unlock error: %d\n", ret);
			goto exit;
		}

		/* unlock minimum number of cpu cores */
		cpufreq_pegasusq_min_cpu_unlock();

		atomic_set(&umts_link_pm_data.freqlock, 0);
		mif_debug("success\n");
	}
exit:
	return ret;
}
Ejemplo n.º 6
0
/*
 *	Sanity check: remove all devices that ceased to exists and
 *	return '1' if the given LAPB device was affected.
 */
static int lapbeth_check_devices(struct net_device *dev)
{
	struct lapbethdev *lapbeth, *lapbeth_prev;
	int result = 0;
	unsigned long flags;
	
	save_flags(flags);
	cli();

	lapbeth_prev = NULL;

	for (lapbeth = lapbeth_devices; lapbeth != NULL; lapbeth = lapbeth->next) {
		if (!dev_get(lapbeth->ethname)) {
			if (lapbeth_prev)
				lapbeth_prev->next = lapbeth->next;
			else
				lapbeth_devices = lapbeth->next;
				
			if (&lapbeth->axdev == dev)
				result = 1;

			unregister_netdev(&lapbeth->axdev);
			dev_put(lapbeth->ethdev);
			kfree(lapbeth);
		}

		lapbeth_prev = lapbeth;
	}
	
	restore_flags(flags);
	
	return result;
}
Ejemplo n.º 7
0
static int shaper_ioctl(struct device *dev,  struct ifreq *ifr, int cmd)
{
	struct shaperconf *ss= (struct shaperconf *)&ifr->ifr_data;
	struct shaper *sh=dev->priv;
	switch(ss->ss_cmd)
	{
		case SHAPER_SET_DEV:
		{
			struct device *them=dev_get(ss->ss_name);
			if(them==NULL)
				return -ENODEV;
			if(sh->dev)
				return -EBUSY;
			return shaper_attach(dev,dev->priv, them);
		}
		case SHAPER_GET_DEV:
			if(sh->dev==NULL)
				return -ENODEV;
			strcpy(ss->ss_name, sh->dev->name);
			return 0;
		case SHAPER_SET_SPEED:
			shaper_setspeed(sh,ss->ss_speed);
			return 0;
		case SHAPER_GET_SPEED:
			ss->ss_speed=sh->bitspersec;
			return 0;
		default:
			return -EINVAL;
	}
}
Ejemplo n.º 8
0
struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int create)
{
	u32 remote = parms->iph.daddr;
	u32 local = parms->iph.saddr;
	struct ip_tunnel *t, **tp, *nt;
	struct device *dev;
	unsigned h = 0;
	int prio = 0;

	if (remote) {
		prio |= 2;
		h ^= HASH(remote);
	}
	if (local) {
		prio |= 1;
		h ^= HASH(local);
	}
	for (tp = &tunnels[prio][h]; (t = *tp) != NULL; tp = &t->next) {
		if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
			return t;
	}
	if (!create)
		return NULL;

	MOD_INC_USE_COUNT;
	dev = kmalloc(sizeof(*dev) + sizeof(*t), GFP_KERNEL);
	if (dev == NULL) {
		MOD_DEC_USE_COUNT;
		return NULL;
	}
	memset(dev, 0, sizeof(*dev) + sizeof(*t));
	dev->priv = (void*)(dev+1);
	nt = (struct ip_tunnel*)dev->priv;
	nt->dev = dev;
	dev->name = nt->parms.name;
	dev->init = ipip_tunnel_init;
	memcpy(&nt->parms, parms, sizeof(*parms));
	if (dev->name[0] == 0) {
		int i;
		for (i=1; i<100; i++) {
			sprintf(dev->name, "tunl%d", i);
			if (dev_get(dev->name) == NULL)
				break;
		}
		if (i==100)
			goto failed;
		memcpy(parms->name, dev->name, IFNAMSIZ);
	}
	if (register_netdevice(dev) < 0)
		goto failed;

	ipip_tunnel_link(nt);
	/* Do not decrement MOD_USE_COUNT here. */
	return nt;

failed:
	kfree(dev);
	MOD_DEC_USE_COUNT;
	return NULL;
}
static int exynos_frequency_lock(struct device *dev)
{
	unsigned int level, cpufreq = 600; /* 200 ~ 1400 */
	unsigned int busfreq = 400200; /* 100100 ~ 400200 */
	int ret = 0, lock_id;
	atomic_t *freqlock;
	struct device *busdev = dev_get("exynos-busfreq");

	if (!strcmp(dev->bus->name, "usb")) {
		lock_id = DVFS_LOCK_ID_USB_IF;
		cpufreq = 600;
		freqlock = &umts_link_pm_data.freqlock;
	} else if (!strcmp(dev->bus->name, "platform")) { // for dpram lock
		lock_id = DVFS_LOCK_ID_DPRAM_IF;
		cpufreq = 800;
		freqlock = &umts_link_pm_data.freq_dpramlock;
	} else {
		mif_err("ERR: Unkown unlock ID (%s)\n", dev->bus->name);
		goto exit;
	}
	
	if (atomic_read(freqlock) == 0) {
		/* cpu frequency lock */
		ret = exynos_cpufreq_get_level(cpufreq * 1000, &level);
		if (ret < 0) {
			mif_err("ERR: exynos_cpufreq_get_level fail: %d\n",
					ret);
			goto exit;
		}

		ret = exynos_cpufreq_lock(lock_id, level);
		if (ret < 0) {
			mif_err("ERR: exynos_cpufreq_lock fail: %d\n", ret);
			goto exit;
		}

		/* bus frequncy lock */
		if (!busdev) {
			mif_err("ERR: busdev is not exist\n");
			ret = -ENODEV;
			goto exit;
		}

		ret = dev_lock(busdev, dev, busfreq);
		if (ret < 0) {
			mif_err("ERR: dev_lock error: %d\n", ret);
			goto exit;
		}

		/* lock minimum number of cpu cores */
		cpufreq_pegasusq_min_cpu_lock(2);

		atomic_set(freqlock, 1);
		mif_info("level=%d, cpufreq=%d MHz, busfreq=%06d\n",
				level, cpufreq, busfreq);
	}
exit:
	return ret;
}
Ejemplo n.º 10
0
static int eql_enslave(struct device *dev, slaving_request_t *srqp)
{
	struct device *master_dev;
	struct device *slave_dev;
	slaving_request_t srq;
	int err;

	err = verify_area(VERIFY_READ, (void *)srqp, sizeof (slaving_request_t));
	if (err)  
	  {
#ifdef EQL_DEBUG
	if (eql_debug >= 20)
		printk ("EQL enslave: error detected by verify_area\n");
#endif  
		return err;
	  }
	memcpy_fromfs (&srq, srqp, sizeof (slaving_request_t));

#ifdef EQL_DEBUG
	if (eql_debug >= 20)
		printk ("%s: enslave '%s' %ld bps\n", dev->name, 
			srq.slave_name, srq.priority);
#endif  
	master_dev = dev;		/* for "clarity" */
	slave_dev  = dev_get (srq.slave_name);

	if (master_dev != 0 && slave_dev != 0)
	{
		if (! eql_is_master (slave_dev)  &&   /* slave is not a master */
			! eql_is_slave (slave_dev)      ) /* slave is not already a slave */
		{
			slave_t *s = eql_new_slave ();
			equalizer_t *eql = (equalizer_t *) master_dev->priv;
			s->dev = slave_dev;
			s->priority = srq.priority;
			s->priority_bps = srq.priority;
			s->priority_Bps = srq.priority / 8;
			slave_dev->flags |= IFF_SLAVE;
			eql_insert_slave (eql->queue, s);
			return 0;
		}
#ifdef EQL_DEBUG
	if (eql_debug >= 20)
		printk ("EQL enslave: slave is master or slave is already slave\n");
#endif  

		return -EINVAL;
	}
#ifdef EQL_DEBUG
	if (eql_debug >= 20)
		printk ("EQL enslave: master or slave are NULL");
#endif  
	return -EINVAL;
}
Ejemplo n.º 11
0
/*
 *	Check that the device given is a valid AX.25 interface that is "up".
 */
struct device *rose_ax25_dev_get(char *devname)
{
	struct device *dev;

	if ((dev = dev_get(devname)) == NULL)
		return NULL;

	if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
		return dev;

	return NULL;
}
Ejemplo n.º 12
0
Archivo: dev.c Proyecto: 0xffea/gnumach
extern __inline__ void dev_load(const char *name)
{
	if(!dev_get(name) && suser()) {
#ifdef CONFIG_NET_ALIAS
		const char *sptr;
 
		for (sptr=name ; *sptr ; sptr++) if(*sptr==':') break;
		if (!(*sptr && *(sptr+1)))
#endif
		request_module(name);
	}
}
Ejemplo n.º 13
0
/*
 *	Set destination address.
 *	Special case for SIT interfaces where we create a new "virtual"
 *	device.
 */
int addrconf_set_dstaddr(void *arg)
{
    struct in6_ifreq ireq;
    struct device *dev;
    int err = -EINVAL;

    rtnl_lock();

    err = -EFAULT;
    if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
        goto err_exit;

    dev = dev_get_by_index(ireq.ifr6_ifindex);

    err = -ENODEV;
    if (dev == NULL)
        goto err_exit;

    if (dev->type == ARPHRD_SIT) {
        struct ifreq ifr;
        mm_segment_t	oldfs;
        struct ip_tunnel_parm p;

        err = -EADDRNOTAVAIL;
        if (!(ipv6_addr_type(&ireq.ifr6_addr) & IPV6_ADDR_COMPATv4))
            goto err_exit;

        memset(&p, 0, sizeof(p));
        p.iph.daddr = ireq.ifr6_addr.s6_addr32[3];
        p.iph.saddr = 0;
        p.iph.version = 4;
        p.iph.ihl = 5;
        p.iph.protocol = IPPROTO_IPV6;
        p.iph.ttl = 64;
        ifr.ifr_ifru.ifru_data = (void*)&p;

        oldfs = get_fs();
        set_fs(KERNEL_DS);
        err = dev->do_ioctl(dev, &ifr, SIOCADDTUNNEL);
        set_fs(oldfs);

        if (err == 0) {
            err = -ENOBUFS;
            if ((dev = dev_get(p.name)) == NULL)
                goto err_exit;
            err = dev_open(dev);
        }
    }

err_exit:
    rtnl_unlock();
    return err;
}
Ejemplo n.º 14
0
int init_module(void)
{
	dev_ethertap.base_addr=unit+NETLINK_TAPBASE;
	sprintf(devicename,"tap%d",unit);
	if (dev_get(devicename))
	{
		printk(KERN_INFO "%s already loaded.\n", devicename);
		return -EBUSY;
	}
	if (register_netdev(&dev_ethertap) != 0)
		return -EIO;
	return 0;
}
Ejemplo n.º 15
0
static int append_to_chain(struct ip_fw *volatile* chainptr, struct ip_fw *frwl,int len)
{
	struct ip_fw *ftmp;
	struct ip_fw *chtmp=NULL;
	struct ip_fw *volatile chtmp_prev=NULL;
	unsigned long flags;

	save_flags(flags);

	ftmp = kmalloc( sizeof(struct ip_fw), GFP_ATOMIC );
	if ( ftmp == NULL ) 
	{
#ifdef DEBUG_IP_FIREWALL
		printk("ip_fw_ctl:  malloc said no\n");
#endif
		return( ENOMEM );
	}

	memcpy(ftmp, frwl, len);
	/*
	 *	Allow the more recent "minimise cost" flag to be
	 *	set. [Rob van Nieuwkerk]
	 */
	ftmp->fw_tosand |= 0x01;
	ftmp->fw_tosxor &= 0xFE;
	ftmp->fw_pcnt=0L;
	ftmp->fw_bcnt=0L;

	ftmp->fw_next = NULL;

	cli();

	if ((ftmp->fw_vianame)[0]) {
		if (!(ftmp->fw_viadev = dev_get(ftmp->fw_vianame)))
			ftmp->fw_viadev = (struct device *) -1;
	} else
		ftmp->fw_viadev = NULL;

	chtmp_prev=NULL;
	for (chtmp=*chainptr;chtmp!=NULL;chtmp=chtmp->fw_next) 
		chtmp_prev=chtmp;
	
	if (chtmp_prev)
		chtmp_prev->fw_next=ftmp;
	else
        	*chainptr=ftmp;
	restore_flags(flags);
	return(0);
}
Ejemplo n.º 16
0
int init_module(void)
{
	/* Find a name for this unit */
	int ct= 1;
	
	while(dev_get(dev_dummy.name)!=NULL && ct<100)
	{
		sprintf(dev_dummy.name,"dummy%d",ct);
		ct++;
	}
	if(ct==100)
		return -ENFILE;
	
	if (register_netdev(&dev_dummy) != 0)
		return -EIO;
	return 0;
}
static int exynos_frequency_lock(struct device *dev)
{
	unsigned int level, cpufreq = 600; /* 200 ~ 1400 */
	unsigned int busfreq = 400200; /* 100100 ~ 400200 */
	int ret = 0;
	struct device *busdev = dev_get("exynos-busfreq");

	if (atomic_read(&umts_link_pm_data.freqlock) == 0) {
		/* cpu frequency lock */
		ret = exynos_cpufreq_get_level(cpufreq * 1000, &level);
		if (ret < 0) {
			mif_err("ERR: exynos_cpufreq_get_level fail: %d\n",
					ret);
			goto exit;
		}

		ret = exynos_cpufreq_lock(DVFS_LOCK_ID_USB_IF, level);
		if (ret < 0) {
			mif_err("ERR: exynos_cpufreq_lock fail: %d\n", ret);
			goto exit;
		}

		/* bus frequncy lock */
		if (!busdev) {
			mif_err("ERR: busdev is not exist\n");
			ret = -ENODEV;
			goto exit;
		}

		ret = dev_lock(busdev, dev, busfreq);
		if (ret < 0) {
			mif_err("ERR: dev_lock error: %d\n", ret);
			goto exit;
		}

		/* lock minimum number of cpu cores */
		cpufreq_pegasusq_min_cpu_lock(2);

		atomic_set(&umts_link_pm_data.freqlock, 1);
		mif_debug("level=%d, cpufreq=%d MHz, busfreq=%06d\n",
				level, cpufreq, busfreq);
	}
exit:
	return ret;
}
Ejemplo n.º 18
0
int dlci_del(struct dlci_add *dlci)
{
   struct dlci_local *dlp;
   struct frad_local *flp;
   struct device     *master, *slave;
   int               i, err;

   /* validate slave device */
   master = dev_get(dlci->devname);
   if (!master)
      return(-ENODEV);

   if (master->start)
      return(-EBUSY);

   dlp = master->priv;
   slave = dlp->slave;
   flp = slave->priv;

   err = (*flp->deassoc)(slave, master);
   if (err)
      return(err);

   unregister_netdev(master);

   for(i=0;i<CONFIG_DLCI_COUNT;i++)
      if (master == open_dev[i])
         break;

   if (i<CONFIG_DLCI_COUNT)
      open_dev[i] = NULL;

   kfree(master->priv);
   kfree(master->name);
   kfree(master);

   MOD_DEC_USE_COUNT;

   return(0);
}
Ejemplo n.º 19
0
/*
 *	Sanity check: remove all devices that ceased to exists and
 *	return '1' if the given BPQ device was affected.
 */
static int bpq_check_devices(struct net_device *dev)
{
	struct bpqdev *bpq, *bpq_prev, *bpq_next;
	int result = 0;
	unsigned long flags;

	save_flags(flags);
	cli();

	bpq_prev = NULL;

	for (bpq = bpq_devices; bpq != NULL; bpq = bpq_next) {
		bpq_next = bpq->next;
		if (!dev_get(bpq->ethname)) {
			if (bpq_prev)
				bpq_prev->next = bpq->next;
			else
				bpq_devices = bpq->next;

			if (&bpq->axdev == dev)
				result = 1;

			/* We should be locked, call 
			 * unregister_netdevice directly 
			 */

			unregister_netdevice(&bpq->axdev);
			kfree(bpq);
		}
		else
			bpq_prev = bpq;
	}

	restore_flags(flags);

	return result;
}
static int exynos_frequency_unlock(struct device *dev)
{
	int ret = 0, lock_id;
	atomic_t *freqlock;
	struct device *busdev = dev_get("exynos-busfreq");

	if (!strcmp(dev->bus->name, "usb")) {
		lock_id = DVFS_LOCK_ID_USB_IF;
		freqlock = &umts_link_pm_data.freqlock;
	} else if (!strcmp(dev->bus->name, "platform")) { // for dpram lock
		lock_id = DVFS_LOCK_ID_DPRAM_IF;
		freqlock = &umts_link_pm_data.freq_dpramlock;
	} else {
		mif_err("ERR: Unkown unlock ID (%s)\n", dev->bus->name);
		goto exit;
	}

	if (atomic_read(freqlock) == 1) {
		/* cpu frequency unlock */
		exynos_cpufreq_lock_free(lock_id);

		/* bus frequency unlock */
		ret = dev_unlock(busdev, dev);
		if (ret < 0) {
			mif_err("ERR: dev_unlock error: %d\n", ret);
			goto exit;
		}

		/* unlock minimum number of cpu cores */
		cpufreq_pegasusq_min_cpu_unlock();

		atomic_set(freqlock, 0);
		mif_info("success\n");
	}
exit:
	return ret;
}
Ejemplo n.º 21
0
int
init_module(void)
{
	int i;
	for (i = 0; i < 100; i++) {
		sprintf(dev_sb1000.name, "cm%d", i);
		if (dev_get(dev_sb1000.name) == 0) break;
	}
	if (i == 100) {
		printk(KERN_ERR "sb1000: can't register any device cm<n>\n");
		return -ENFILE;
	}
	dev_sb1000.init = sb1000_probe;
	dev_sb1000.base_addr = io[0];
	/* rmem_end holds the second I/O address - fv */
	dev_sb1000.rmem_end = io[1];
	dev_sb1000.irq = irq;
	if (register_netdev(&dev_sb1000) != 0) {
		printk(KERN_ERR "sb1000: failed to register device (io: %03x,%03x   "
			"irq: %d)\n", io[0], io[1], irq);
		return -EIO;
	}
	return 0;
}
Ejemplo n.º 22
0
static int fimc_is_probe(struct platform_device *pdev)
{
	struct exynos4_platform_fimc_is *pdata;
	struct resource *mem_res;
	struct resource *regs_res;
	struct fimc_is_dev *dev;
#if defined(CONFIG_VIDEO_EXYNOS_FIMC_IS_BAYER)
	struct v4l2_device *v4l2_dev;
	struct vb2_queue *isp_q;
#endif
	int ret = -ENODEV;

	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (!dev) {
		dev_err(&pdev->dev, "Not enough memory for FIMC-IS device.\n");
		return -ENOMEM;
	}
	mutex_init(&dev->lock);
	spin_lock_init(&dev->slock);
	init_waitqueue_head(&dev->irq_queue1);

	dev->pdev = pdev;
	if (!dev->pdev) {
		dev_err(&pdev->dev, "No platform data specified\n");
		goto p_err_info;
	}

	pdata = pdev->dev.platform_data;
	if (!pdata) {
		dev_err(&pdev->dev, "Platform data not set\n");
		goto p_err_info;
	}
	dev->pdata = pdata;
	/*
	 * I/O remap
	*/
	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem_res) {
		dev_err(&pdev->dev, "Failed to get io memory region\n");
		ret = -ENOENT;
		goto p_err_info;
	}

	regs_res = request_mem_region(mem_res->start,
		resource_size(mem_res), pdev->name);
	if (!regs_res) {
		dev_err(&pdev->dev, "Failed to request io memory region\n");
		ret = -ENOENT;
		goto p_err_info;
	}
	dev->regs_res = regs_res;

	dev->regs = ioremap(mem_res->start, resource_size(mem_res));
	if (!dev->regs) {
		dev_err(&pdev->dev, "Failed to remap io region\n");
		ret = -ENXIO;
		goto p_err_req_region;
	}

	/*
	 * initialize IRQ , FIMC-IS IRQ : ISP[0] -> SPI[90] , ISP[1] -> SPI[95]
	*/
	dev->irq1 = platform_get_irq(pdev, 0);
	if (dev->irq1 < 0) {
		ret = dev->irq1;
		dev_err(&pdev->dev, "Failed to get irq\n");
		goto p_err_get_irq;
	}

	ret = request_irq(dev->irq1, fimc_is_irq_handler1,
		IRQF_DISABLED, dev_name(&pdev->dev), dev);
	if (ret) {
		dev_err(&pdev->dev, "failed to allocate irq (%d)\n", ret);
		goto p_err_req_irq;
	}

#if defined(CONFIG_VIDEO_EXYNOS_FIMC_IS_BAYER)
	/* Init v4l2 device (ISP) */
#if defined(CONFIG_VIDEOBUF2_CMA_PHYS)
	dev->vb2 = &fimc_is_vb2_cma;
#elif defined(CONFIG_VIDEOBUF2_ION)
	dev->vb2 = &fimc_is_vb2_ion;
#endif

	/* Init and register V4L2 device */
	v4l2_dev = &dev->video[FIMC_IS_VIDEO_NUM_BAYER].v4l2_dev;
	if (!v4l2_dev->name[0])
		snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
			 "%s.isp", dev_name(&dev->pdev->dev));
	ret = v4l2_device_register(NULL, v4l2_dev);

	snprintf(dev->video[FIMC_IS_VIDEO_NUM_BAYER].vd.name,
			sizeof(dev->video[FIMC_IS_VIDEO_NUM_BAYER].vd.name),
			"%s", "exynos4-fimc-is-bayer");
	dev->video[FIMC_IS_VIDEO_NUM_BAYER].vd.fops		=
						&fimc_is_isp_video_fops;
	dev->video[FIMC_IS_VIDEO_NUM_BAYER].vd.ioctl_ops	=
						&fimc_is_isp_video_ioctl_ops;
	dev->video[FIMC_IS_VIDEO_NUM_BAYER].vd.minor		= -1;
	dev->video[FIMC_IS_VIDEO_NUM_BAYER].vd.release		=
						video_device_release;
	dev->video[FIMC_IS_VIDEO_NUM_BAYER].vd.lock		=
						&dev->lock;
	video_set_drvdata(&dev->video[FIMC_IS_VIDEO_NUM_BAYER].vd, dev);
	dev->video[FIMC_IS_VIDEO_NUM_BAYER].dev = dev;

	isp_q = &dev->video[FIMC_IS_VIDEO_NUM_BAYER].vbq;
	memset(isp_q, 0, sizeof(*isp_q));
	isp_q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
	isp_q->io_modes = VB2_MMAP | VB2_USERPTR;
	isp_q->drv_priv = &dev->video[FIMC_IS_VIDEO_NUM_BAYER];
	isp_q->ops = &fimc_is_isp_qops;
	isp_q->mem_ops = dev->vb2->ops;

	vb2_queue_init(isp_q);

	ret = video_register_device(&dev->video[FIMC_IS_VIDEO_NUM_BAYER].vd,
							VFL_TYPE_GRABBER, 30);
	if (ret) {
		v4l2_err(v4l2_dev, "Failed to register video device\n");
		goto err_vd_reg;
	}

	printk(KERN_INFO "FIMC-IS Video node :: ISP %d minor : %d\n",
		dev->video[FIMC_IS_VIDEO_NUM_BAYER].vd.num,
		dev->video[FIMC_IS_VIDEO_NUM_BAYER].vd.minor);
#endif
	/*
	 * initialize memory manager
	*/
	ret = fimc_is_init_mem_mgr(dev);
	if (ret) {
		dev_err(&pdev->dev,
			"failed to fimc_is_init_mem_mgr (%d)\n", ret);
		goto p_err_init_mem;
	}
	dbg("Parameter region = 0x%08x\n", (unsigned int)dev->is_p_region);

	/*
	 * Get related clock for FIMC-IS
	*/
	if (dev->pdata->clk_get) {
		dev->pdata->clk_get(pdev);
	} else {
		err("#### failed to Get Clock####\n");
		goto p_err_init_mem;
	}
	/* Init v4l2 sub device */
	v4l2_subdev_init(&dev->sd, &fimc_is_subdev_ops);
	dev->sd.owner = THIS_MODULE;
	strcpy(dev->sd.name, MODULE_NAME);
	v4l2_set_subdevdata(&dev->sd, pdev);

	platform_set_drvdata(pdev, &dev->sd);

	pm_runtime_enable(&pdev->dev);

#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
	/* To lock bus frequency in OPP mode */
	dev->bus_dev = dev_get("exynos-busfreq");
#endif
	dev->power = 0;
	dev->state = 0;
	dev->sensor_num = FIMC_IS_SENSOR_NUM;
	dev->sensor.id = 0;
	dev->p_region_index1 = 0;
	dev->p_region_index2 = 0;
	dev->sensor.offset_x = 16;
	dev->sensor.offset_y = 12;
	dev->sensor.framerate_update = false;
	atomic_set(&dev->p_region_num, 0);
	set_bit(IS_ST_IDLE, &dev->state);
	set_bit(IS_PWR_ST_POWEROFF, &dev->power);
	dev->af.af_state = FIMC_IS_AF_IDLE;
	dev->af.mode = IS_FOCUS_MODE_IDLE;
	dev->low_power_mode = false;
	dev->fw.state = 0;
	dev->setfile.state = 0;
#if defined(M0)
	s5k6a3_dev = device_create(camera_class, NULL, 0, NULL, "front");
	if (IS_ERR(s5k6a3_dev)) {
		printk(KERN_ERR "failed to create device!\n");
	} else {
		if (device_create_file(s5k6a3_dev, &dev_attr_front_camtype)
				< 0) {
			printk(KERN_ERR "failed to create device file, %s\n",
				dev_attr_front_camtype.attr.name);
		}
		if (device_create_file(s5k6a3_dev, &dev_attr_front_camfw) < 0) {
			printk(KERN_ERR "failed to create device file, %s\n",
				dev_attr_front_camfw.attr.name);
		}
	}
#endif
	printk(KERN_INFO "FIMC-IS probe completed\n");
	return 0;

p_err_init_mem:
	free_irq(dev->irq1, dev);
#if defined(CONFIG_VIDEO_EXYNOS_FIMC_IS_BAYER)
err_vd_reg:
	video_device_release(&dev->video[FIMC_IS_VIDEO_NUM_BAYER].vd);
#endif
p_err_req_irq:
p_err_get_irq:
	iounmap(dev->regs);
p_err_req_region:
	release_mem_region(regs_res->start, resource_size(regs_res));
p_err_info:
	dev_err(&dev->pdev->dev, "failed to install\n");
	kfree(dev);
	return ret;
}
Ejemplo n.º 23
0
static int dev_ifsioc(void *arg, unsigned int getset)
{
	struct ifreq ifr;
	struct device *dev;
	int ret;

	/*
	 *	Fetch the caller's info block into kernel space
	 */

	int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
	if(err)
		return err;
	
	memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));

	/*
	 *	See which interface the caller is talking about. 
	 */
	 
	if ((dev = dev_get(ifr.ifr_name)) == NULL) 
		return(-ENODEV);

	switch(getset) 
	{
		case SIOCGIFFLAGS:	/* Get interface flags */
			ifr.ifr_flags = dev->flags;
			memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
			ret = 0;
			break;
		case SIOCSIFFLAGS:	/* Set interface flags */
			{
				int old_flags = dev->flags;
#ifdef CONFIG_SLAVE_BALANCING				
				if(dev->flags&IFF_SLAVE)
					return -EBUSY;
#endif					
				dev->flags = ifr.ifr_flags & (
					IFF_UP | IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
					IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
					IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
					| IFF_MULTICAST);
#ifdef CONFIG_SLAVE_BALANCING				
				if(!(dev->flags&IFF_MASTER) && dev->slave)
				{
					dev->slave->flags&=~IFF_SLAVE;
					dev->slave=NULL;
				}
#endif
				/*
				 *	Load in the correct multicast list now the flags have changed.
				 */				

				dev_mc_upload(dev);
#if 0
				if( dev->set_multicast_list!=NULL)
				{
				
					/*
					 *	Has promiscuous mode been turned off
					 */	
				
					if ( (old_flags & IFF_PROMISC) && ((dev->flags & IFF_PROMISC) == 0))
			 			dev->set_multicast_list(dev,0,NULL);
			 		
			 		/*
			 		 *	Has it been turned on
			 		 */
	
					if ( (dev->flags & IFF_PROMISC) && ((old_flags & IFF_PROMISC) == 0))
			  			dev->set_multicast_list(dev,-1,NULL);
			  	}
#endif			  		
			  	/*
			  	 *	Have we downed the interface
			  	 */
		
				if ((old_flags & IFF_UP) && ((dev->flags & IFF_UP) == 0)) 
				{
					ret = dev_close(dev);
				}
				else
				{
					/*
					 *	Have we upped the interface 
					 */
					 
			      		ret = (! (old_flags & IFF_UP) && (dev->flags & IFF_UP))
						? dev_open(dev) : 0;
					/* 
					 *	Check the flags.
					 */
					if(ret<0)
						dev->flags&=~IFF_UP;	/* Didn't open so down the if */
			  	}
	        	}
			break;
		
		case SIOCGIFADDR:	/* Get interface address (and family) */
			(*(struct sockaddr_in *)
				  &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
			(*(struct sockaddr_in *)
				  &ifr.ifr_addr).sin_family = dev->family;
			(*(struct sockaddr_in *)
				  &ifr.ifr_addr).sin_port = 0;
			memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
			ret = 0;
			break;
	
		case SIOCSIFADDR:	/* Set interface address (and family) */
			dev->pa_addr = (*(struct sockaddr_in *)
				 &ifr.ifr_addr).sin_addr.s_addr;
			dev->family = ifr.ifr_addr.sa_family;
			
#ifdef CONFIG_INET	
			/* This is naughty. When net-032e comes out It wants moving into the net032
			   code not the kernel. Till then it can sit here (SIGH) */		
			dev->pa_mask = ip_get_mask(dev->pa_addr);
#endif			
			dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
			ret = 0;
			break;
			
		case SIOCGIFBRDADDR:	/* Get the broadcast address */
			(*(struct sockaddr_in *)
				&ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
			(*(struct sockaddr_in *)
				&ifr.ifr_broadaddr).sin_family = dev->family;
			(*(struct sockaddr_in *)
				&ifr.ifr_broadaddr).sin_port = 0;
			memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
			ret = 0;
			break;

		case SIOCSIFBRDADDR:	/* Set the broadcast address */
			dev->pa_brdaddr = (*(struct sockaddr_in *)
				&ifr.ifr_broadaddr).sin_addr.s_addr;
			ret = 0;
			break;
			
		case SIOCGIFDSTADDR:	/* Get the destination address (for point-to-point links) */
			(*(struct sockaddr_in *)
				&ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
			(*(struct sockaddr_in *)
				&ifr.ifr_broadaddr).sin_family = dev->family;
			(*(struct sockaddr_in *)
				&ifr.ifr_broadaddr).sin_port = 0;
				memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
			ret = 0;
			break;
	
		case SIOCSIFDSTADDR:	/* Set the destination address (for point-to-point links) */
			dev->pa_dstaddr = (*(struct sockaddr_in *)
				&ifr.ifr_dstaddr).sin_addr.s_addr;
			ret = 0;
			break;
			
		case SIOCGIFNETMASK:	/* Get the netmask for the interface */
			(*(struct sockaddr_in *)
				&ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
			(*(struct sockaddr_in *)
				&ifr.ifr_netmask).sin_family = dev->family;
			(*(struct sockaddr_in *)
				&ifr.ifr_netmask).sin_port = 0;
			memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
			ret = 0;
			break;

		case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
			{
				unsigned long mask = (*(struct sockaddr_in *)
					&ifr.ifr_netmask).sin_addr.s_addr;
				ret = -EINVAL;
				/*
				 *	The mask we set must be legal.
				 */
				if (bad_mask(mask,0))
					break;
				dev->pa_mask = mask;
				ret = 0;
			}
			break;
			
		case SIOCGIFMETRIC:	/* Get the metric on the interface (currently unused) */
			
			ifr.ifr_metric = dev->metric;
			memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
			ret = 0;
			break;
			
		case SIOCSIFMETRIC:	/* Set the metric on the interface (currently unused) */
			dev->metric = ifr.ifr_metric;
			ret = 0;
			break;
	
		case SIOCGIFMTU:	/* Get the MTU of a device */
			ifr.ifr_mtu = dev->mtu;
			memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
			ret = 0;
			break;
	
		case SIOCSIFMTU:	/* Set the MTU of a device */
		
			/*
			 *	MTU must be positive and under the page size problem
			 */
			 
			if(ifr.ifr_mtu<1 || ifr.ifr_mtu>3800)
				return -EINVAL;
			dev->mtu = ifr.ifr_mtu;
			ret = 0;
			break;
	
		case SIOCGIFMEM:	/* Get the per device memory space. We can add this but currently
					   do not support it */
			printk("NET: ioctl(SIOCGIFMEM, %p)\n", arg);
			ret = -EINVAL;
			break;
		
		case SIOCSIFMEM:	/* Set the per device memory buffer space. Not applicable in our case */
			printk("NET: ioctl(SIOCSIFMEM, %p)\n", arg);
			ret = -EINVAL;
			break;

		case OLD_SIOCGIFHWADDR:	/* Get the hardware address. This will change and SIFHWADDR will be added */
			memcpy(ifr.old_ifr_hwaddr,dev->dev_addr, MAX_ADDR_LEN);
			memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
			ret=0;
			break;

		case SIOCGIFHWADDR:
			memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
			ifr.ifr_hwaddr.sa_family=dev->type;			
			memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
			ret=0;
			break;
			
		case SIOCSIFHWADDR:
			if(dev->set_mac_address==NULL)
				return -EOPNOTSUPP;
			if(ifr.ifr_hwaddr.sa_family!=dev->type)
				return -EINVAL;
			ret=dev->set_mac_address(dev,ifr.ifr_hwaddr.sa_data);
			break;
			
		case SIOCGIFMAP:
			ifr.ifr_map.mem_start=dev->mem_start;
			ifr.ifr_map.mem_end=dev->mem_end;
			ifr.ifr_map.base_addr=dev->base_addr;
			ifr.ifr_map.irq=dev->irq;
			ifr.ifr_map.dma=dev->dma;
			ifr.ifr_map.port=dev->if_port;
			memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
			ret=0;
			break;
			
		case SIOCSIFMAP:
			if(dev->set_config==NULL)
				return -EOPNOTSUPP;
			return dev->set_config(dev,&ifr.ifr_map);
			
		case SIOCGIFSLAVE:
#ifdef CONFIG_SLAVE_BALANCING		
			if(dev->slave==NULL)
				return -ENOENT;
			strncpy(ifr.ifr_name,dev->name,sizeof(ifr.ifr_name));
			memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
			ret=0;
#else
			return -ENOENT;
#endif			
			break;
#ifdef CONFIG_SLAVE_BALANCING			
		case SIOCSIFSLAVE:
		{
		
		/*
		 *	Fun game. Get the device up and the flags right without
		 *	letting some scummy user confuse us.
		 */
			unsigned long flags;
			struct device *slave=dev_get(ifr.ifr_slave);
			save_flags(flags);
			if(slave==NULL)
			{
				return -ENODEV;
			}
			cli();
			if((slave->flags&(IFF_UP|IFF_RUNNING))!=(IFF_UP|IFF_RUNNING))
			{
				restore_flags(flags);
				return -EINVAL;
			}
			if(dev->flags&IFF_SLAVE)
			{
				restore_flags(flags);
				return -EBUSY;
			}
			if(dev->slave!=NULL)
			{
				restore_flags(flags);
				return -EBUSY;
			}
			if(slave->flags&IFF_SLAVE)
			{
				restore_flags(flags);
				return -EBUSY;
			}
			dev->slave=slave;
			slave->flags|=IFF_SLAVE;
			dev->flags|=IFF_MASTER;
			restore_flags(flags);
			ret=0;
		}
		break;
#endif			

		case SIOCADDMULTI:
			if(dev->set_multicast_list==NULL)
				return -EINVAL;
			if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
				return -EINVAL;
			dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
			return 0;

		case SIOCDELMULTI:
			if(dev->set_multicast_list==NULL)
				return -EINVAL;
			if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
				return -EINVAL;
			dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
			return 0;
		/*
		 *	Unknown or private ioctl
		 */

		default:
			if((getset >= SIOCDEVPRIVATE) &&
			   (getset <= (SIOCDEVPRIVATE + 15))) {
				if(dev->do_ioctl==NULL)
					return -EOPNOTSUPP;
				ret=dev->do_ioctl(dev, &ifr, getset);
				memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
				break;
			}
			
			ret = -EINVAL;
	}
	return(ret);
}
static int exynos_tmu_init(struct tmu_info *info)
{
	struct tmu_data *data = info->dev->platform_data;
	unsigned int te_temp, con;
	unsigned int temp_throttle, temp_warning, temp_trip;
	unsigned int hw_temp_trip;
	unsigned int rising_thr = 0, cooling_thr = 0;

	/* must reload for using efuse value at EXYNOS4212 */
	__raw_writel(TRIMINFO_RELOAD, info->tmu_base + TRIMINFO_CON);

	/* get the compensation parameter */
	te_temp = __raw_readl(info->tmu_base + TRIMINFO);
	info->te1 = te_temp & TRIM_INFO_MASK;
	info->te2 = ((te_temp >> 8) & TRIM_INFO_MASK);

	if ((EFUSE_MIN_VALUE > info->te1) || (info->te1 > EFUSE_MAX_VALUE)
		||  (info->te2 != 0))
		info->te1 = data->efuse_value;

	/*Get rising Threshold and Set interrupt level*/
	temp_throttle = data->ts.start_throttle
			+ info->te1 - TMU_DC_VALUE;
	temp_warning = data->ts.start_warning
			+ info->te1 - TMU_DC_VALUE;
	temp_trip =  data->ts.start_tripping
			+ info->te1 - TMU_DC_VALUE;
	hw_temp_trip = data->ts.start_hw_tripping
			+ info->te1 - TMU_DC_VALUE;

	rising_thr = (temp_throttle | (temp_warning<<8) | \
		(temp_trip<<16) | (hw_temp_trip<<24));

	__raw_writel(rising_thr, info->tmu_base + THD_TEMP_RISE);

#if defined(CONFIG_TC_VOLTAGE)
	/* Get set temperature for tc_voltage and set falling interrupt
	 * trigger level
	*/
	cooling_thr = data->ts.start_tc
			+ info->te1 - TMU_DC_VALUE;
#endif
	__raw_writel(cooling_thr, info->tmu_base + THD_TEMP_FALL);

	/* Set TMU status */
	info->tmu_state = TMU_STATUS_INIT;

	/* Set frequecny level */
	exynos_cpufreq_get_level(data->cpulimit.throttle_freq,
				&info->throttle_freq);
	exynos_cpufreq_get_level(data->cpulimit.warning_freq,
				&info->warning_freq);
	/* Map auto_refresh_rate of normal & tq0 mode */
	info->auto_refresh_tq0 =
		get_refresh_interval(FREQ_IN_PLL, AUTO_REFRESH_PERIOD_TQ0);
	info->auto_refresh_normal =
		get_refresh_interval(FREQ_IN_PLL, AUTO_REFRESH_PERIOD_NORMAL);

	/* To poll current temp, set sampling rate */
	info->sampling_rate  = usecs_to_jiffies(200 * 1000);

#if defined(CONFIG_TC_VOLTAGE) /* Temperature compensated voltage */
	if (exynos_find_cpufreq_level_by_volt(data->temp_compensate.arm_volt,
		&info->cpulevel_tc) < 0) {
		pr_err("cpufreq_get_level error\n");
		return  -EINVAL;
	}
#ifdef CONFIG_BUSFREQ_OPP
	/* To lock bus frequency in OPP mode */
	info->bus_dev = dev_get("exynos-busfreq");
	if (info->bus_dev < 0) {
		pr_err("Failed to get_dev\n");
		return -EINVAL;
	}
	if (exynos4x12_find_busfreq_by_volt(data->temp_compensate.bus_volt,
		&info->busfreq_tc)) {
		pr_err("get_busfreq_value error\n");
	}
#endif
	if (mali_voltage_lock_init()) {
		pr_err("Failed to initialize mail voltage lock.\n");
		return -EINVAL;
	}

	pr_info("%s: cpufreq_level[%d], busfreq_value[%d]\n",
		 __func__, info->cpulevel_tc, info->busfreq_tc);
#endif
	/* Need to initail regsiter setting after getting parameter info */
	/* [28:23] vref [11:8] slope - Tunning parameter */
	__raw_writel(data->slope, info->tmu_base + TMU_CON);

	__raw_writel((CLEAR_RISE_INT | CLEAR_FALL_INT),	\
				info->tmu_base + INTCLEAR);

	/* TMU core enable and HW trpping enable */
	con = __raw_readl(info->tmu_base + TMU_CON);
	con &= ~(HW_TRIP_MODE);
	con |= (HW_TRIPPING_EN | MUX_ADDR_VALUE<<20 | CORE_EN);
	__raw_writel(con, info->tmu_base + TMU_CON);

	/* Because temperature sensing time is appro 940us,
	* tmu is enabled and 1st valid sample can get 1ms after.
	*/
	mdelay(1);

	te_temp = __raw_readl(S5P_PMU_PS_HOLD_CONTROL);
	te_temp |= S5P_PS_HOLD_EN;
	__raw_writel(te_temp, S5P_PMU_PS_HOLD_CONTROL);

	/*LEV0 LEV1 LEV2 interrupt enable */
	__raw_writel(INTEN_RISE0 | INTEN_RISE1 | INTEN_RISE2,	\
		     info->tmu_base + INTEN);

#if defined(CONFIG_TC_VOLTAGE)
	te_temp = __raw_readl(info->tmu_base + INTEN);
	te_temp |= INTEN_FALL0;
	__raw_writel(te_temp, info->tmu_base + INTEN);

	/* s/w workaround for fast service when interrupt is not occured,
	* such as current temp is lower than tc interrupt temperature
	* or current temp is continuosly increased.
	*/
	if (get_cur_temp(info) <= data->ts.start_tc) {
		disable_irq_nosync(info->irq);
		if (exynos_tc_volt(info, 1) < 0)
			pr_err("%s\n", __func__);

		info->tmu_state = TMU_STATUS_TC;
		already_limit = 1;
		queue_delayed_work_on(0, tmu_monitor_wq,
				&info->polling, usecs_to_jiffies(1000));
}
#endif
	return 0;
}
static int fimg2d_probe(struct platform_device *pdev)
{
	struct resource *res;
	struct fimg2d_platdata *pdata;
	int ret;

	pdata = to_fimg2d_plat(&pdev->dev);
	if (!pdata) {
		printk(KERN_ERR "FIMG2D failed to get platform data\n");
		ret = -ENOMEM;
		goto err_plat;
	}

	/* global structure */
	info = kzalloc(sizeof(*info), GFP_KERNEL);
	if (!info) {
		printk(KERN_ERR "FIMG2D failed to allocate memory for controller\n");
		ret = -ENOMEM;
		goto err_plat;
	}

	/* setup global info */
	ret = fimg2d_setup_controller(info);
	if (ret) {
		printk(KERN_ERR "FIMG2D failed to setup controller\n");
		goto err_setup;
	}
	info->dev = &pdev->dev;

	/* memory region */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		printk(KERN_ERR "FIMG2D failed to get resource\n");
		ret = -ENOENT;
		goto err_res;
	}

	info->mem = request_mem_region(res->start, resource_size(res),
					pdev->name);
	if (!info->mem) {
		printk(KERN_ERR "FIMG2D failed to request memory region\n");
		ret = -ENOMEM;
		goto err_region;
	}

	/* ioremap */
	info->regs = ioremap(res->start, resource_size(res));
	if (!info->regs) {
		printk(KERN_ERR "FIMG2D failed to ioremap for SFR\n");
		ret = -ENOENT;
		goto err_map;
	}
	fimg2d_debug("device name: %s base address: 0x%lx\n",
			pdev->name, (unsigned long)res->start);

	/* irq */
	info->irq = platform_get_irq(pdev, 0);
	if (!info->irq) {
		printk(KERN_ERR "FIMG2D failed to get irq resource\n");
		ret = -ENOENT;
		goto err_map;
	}
	fimg2d_debug("irq: %d\n", info->irq);

	ret = request_irq(info->irq, fimg2d_irq, IRQF_DISABLED, pdev->name, info);
	if (ret) {
		printk(KERN_ERR "FIMG2D failed to request irq\n");
		ret = -ENOENT;
		goto err_irq;
	}

	ret = fimg2d_clk_setup(info);
	if (ret) {
		printk(KERN_ERR "FIMG2D failed to setup clk\n");
		ret = -ENOENT;
		goto err_clk;
	}

#ifdef CONFIG_PM_RUNTIME
	pm_runtime_enable(info->dev);
	fimg2d_debug("enable runtime pm\n");
#endif

#ifdef CONFIG_BUSFREQ_OPP
#if defined(CONFIG_CPU_EXYNOS4412)
	/* To lock bus frequency in OPP mode */
	info->bus_dev = dev_get("exynos-busfreq");
#endif
#endif

	s5p_sysmmu_set_fault_handler(info->dev, fimg2d_sysmmu_fault_handler);
	fimg2d_debug("register sysmmu page fault handler\n");

	/* misc register */
	ret = misc_register(&fimg2d_dev);
	if (ret) {
		printk(KERN_ERR "FIMG2D failed to register misc driver\n");
		goto err_reg;
	}

	printk(KERN_INFO "Samsung Graphics 2D driver, (c) 2011 Samsung Electronics\n");
	return 0;

err_reg:
	fimg2d_clk_release(info);

err_clk:
	free_irq(info->irq, NULL);

err_irq:
	iounmap(info->regs);

err_map:
	kfree(info->mem);

err_region:
	release_resource(info->mem);

err_res:
	destroy_workqueue(info->work_q);

err_setup:
	kfree(info);

err_plat:
	return ret;
}
Ejemplo n.º 26
0
static irqreturn_t exynos4x12_tmu_irq_handler(int irq, void *id)
{
	struct s5p_tmu_info *info = id;
	unsigned int status;

	disable_irq_nosync(irq);

	status = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTSTAT) & 0x1FFFF;
	pr_info("EXYNOS4x12_tmu interrupt: INTSTAT = 0x%08x\n", status);

	/* To handle multiple interrupt pending,
	 * interrupt by high temperature are serviced with priority.
	*/
#if defined(CONFIG_TC_VOLTAGE)
	if (status & INTSTAT_FALL0) {
		info->tmu_state = TMU_STATUS_TC;

		__raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
		exynos_interrupt_enable(info, 0);
	} else if (status & INTSTAT_RISE2) {
		info->tmu_state = TMU_STATUS_TRIPPED;
		__raw_writel(INTCLEAR_RISE2, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
#else
	if (status & INTSTAT_RISE2) {
		info->tmu_state = TMU_STATUS_TRIPPED;
		__raw_writel(INTCLEAR_RISE2, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
#endif
	} else if (status & INTSTAT_RISE1) {
		info->tmu_state = TMU_STATUS_WARNING;
		__raw_writel(INTCLEAR_RISE1, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
	} else if (status & INTSTAT_RISE0) {
		info->tmu_state = TMU_STATUS_THROTTLED;
		__raw_writel(INTCLEAR_RISE0, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
	} else {
		pr_err("%s: interrupt error\n", __func__);
		__raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
		queue_delayed_work_on(0, tmu_monitor_wq,
			&info->polling, info->sampling_rate / 2);
		return -ENODEV;
	}

	/* read current temperature & save */
	info->last_temperature =  get_curr_temp(info);

	queue_delayed_work_on(0, tmu_monitor_wq, &info->polling,
		info->sampling_rate);

	return IRQ_HANDLED;
}

static irqreturn_t exynos4210_tmu_irq_handler(int irq, void *id)
{
	struct s5p_tmu_info *info = id;
	unsigned int status;

	disable_irq_nosync(irq);

	status = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTSTAT);
	pr_info("EXYNOS4212_tmu interrupt: INTSTAT = 0x%08x\n", status);

	/* To handle multiple interrupt pending,
	 * interrupt by high temperature are serviced with priority.
	*/
	if (status & TMU_INTSTAT2) {
		info->tmu_state = TMU_STATUS_TRIPPED;
		__raw_writel(INTCLEAR2, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
	} else if (status & TMU_INTSTAT1) {
		info->tmu_state = TMU_STATUS_WARNING;
		__raw_writel(INTCLEAR1, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
	} else if (status & TMU_INTSTAT0) {
		info->tmu_state = TMU_STATUS_THROTTLED;
		__raw_writel(INTCLEAR0, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
	} else {
		pr_err("%s: interrupt error\n", __func__);
		__raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
		queue_delayed_work_on(0, tmu_monitor_wq,
			&info->polling, info->sampling_rate / 2);
		return -ENODEV;
	}

	/* read current temperature & save */
	info->last_temperature =  get_curr_temp(info);

	queue_delayed_work_on(0, tmu_monitor_wq, &info->polling,
		info->sampling_rate);

	return IRQ_HANDLED;
}

#ifdef CONFIG_TMU_SYSFS
static ssize_t s5p_tmu_show_curr_temp(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct s5p_tmu_info *info = dev_get_drvdata(dev);
	unsigned int curr_temp;

	curr_temp = get_curr_temp(info);
	curr_temp *= 10;
	pr_info("curr temp = %d\n", curr_temp);

	return sprintf(buf, "%d\n", curr_temp);
}
static DEVICE_ATTR(curr_temp, S_IRUGO, s5p_tmu_show_curr_temp, NULL);
#endif

static int __devinit s5p_tmu_probe(struct platform_device *pdev)
{
	struct s5p_tmu_info *info;
	struct s5p_platform_tmu *pdata;
	struct resource *res;
	unsigned int mask = (enable_mask & ENABLE_DBGMASK);
	int ret = 0;

	pr_debug("%s: probe=%p\n", __func__, pdev);

	info = kzalloc(sizeof(struct s5p_tmu_info), GFP_KERNEL);
	if (!info) {
		dev_err(&pdev->dev, "failed to alloc memory!\n");
		ret = -ENOMEM;
		goto err_nomem;
	}
	platform_set_drvdata(pdev, info);

	info->dev = &pdev->dev;
	info->tmu_state = TMU_STATUS_INIT;

	/* set cpufreq limit level at 1st_throttle & 2nd throttle */
	pdata = info->dev->platform_data;
	if (pdata->cpufreq.limit_1st_throttle)
		exynos_cpufreq_get_level(pdata->cpufreq.limit_1st_throttle,
				&info->cpufreq_level_1st_throttle);

	if (pdata->cpufreq.limit_2nd_throttle)
		exynos_cpufreq_get_level(pdata->cpufreq.limit_2nd_throttle,
				&info->cpufreq_level_2nd_throttle);

	pr_info("@@@ %s: cpufreq_limit: 1st_throttle: %u, 2nd_throttle = %u\n",
		__func__, info->cpufreq_level_1st_throttle,
		 info->cpufreq_level_2nd_throttle);

#if defined(CONFIG_TC_VOLTAGE) /* Temperature compensated voltage */
	if (exynos_find_cpufreq_level_by_volt(pdata->temp_compensate.arm_volt,
		&info->cpulevel_tc) < 0) {
		dev_err(&pdev->dev, "cpufreq_get_level error\n");
		ret = -EINVAL;
		goto err_nores;
	}
#ifdef CONFIG_BUSFREQ_OPP
	/* To lock bus frequency in OPP mode */
	info->bus_dev = dev_get("exynos-busfreq");
	if (info->bus_dev < 0) {
		dev_err(&pdev->dev, "Failed to get_dev\n");
		ret = -EINVAL;
		goto err_nores;
	}
	if (exynos4x12_find_busfreq_by_volt(pdata->temp_compensate.bus_volt,
		&info->busfreq_tc)) {
		dev_err(&pdev->dev, "get_busfreq_value error\n");
		ret = -EINVAL;
		goto err_nores;
	}
#endif
	pr_info("%s: cpufreq_level[%u], busfreq_value[%u]\n",
		 __func__, info->cpulevel_tc, info->busfreq_tc);
#endif
	/* Map auto_refresh_rate of normal & tq0 mode */
	info->auto_refresh_tq0 =
		get_refresh_interval(FREQ_IN_PLL, AUTO_REFRESH_PERIOD_TQ0);
	info->auto_refresh_normal =
		get_refresh_interval(FREQ_IN_PLL, AUTO_REFRESH_PERIOD_NORMAL);

	/* To poll current temp, set sampling rate to ONE second sampling */
	info->sampling_rate  = usecs_to_jiffies(1000 * 1000);
	/* 10sec monitroing */
	info->monitor_period = usecs_to_jiffies(10000 * 1000);

	/* support test mode */
	if (mask & ENABLE_TEST_MODE)
		set_temperature_params(info);
	else
		print_temperature_params(info);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "failed to get memory region resource\n");
		ret = -ENODEV;
		goto err_nores;
	}

	info->ioarea = request_mem_region(res->start,
			res->end-res->start + 1, pdev->name);
	if (!(info->ioarea)) {
		dev_err(&pdev->dev, "failed to reserve memory region\n");
		ret = -EBUSY;
		goto err_nores;
	}

	info->tmu_base = ioremap(res->start, (res->end - res->start) + 1);
	if (!(info->tmu_base)) {
		dev_err(&pdev->dev, "failed ioremap()\n");
		ret = -ENOMEM;
		goto err_nomap;
	}
	tmu_monitor_wq = create_freezable_workqueue(dev_name(&pdev->dev));
	if (!tmu_monitor_wq) {
		pr_info("Creation of tmu_monitor_wq failed\n");
		ret = -ENOMEM;
		goto err_wq;
	}

	/* To support periodic temprature monitoring */
	if (mask & ENABLE_TEMP_MON) {
		INIT_DELAYED_WORK_DEFERRABLE(&info->monitor,
					exynos4_poll_cur_temp);
		queue_delayed_work_on(0, tmu_monitor_wq, &info->monitor,
			info->monitor_period);
	}
	INIT_DELAYED_WORK_DEFERRABLE(&info->polling, exynos4_handler_tmu_state);

	info->irq = platform_get_irq(pdev, 0);
	if (info->irq < 0) {
		dev_err(&pdev->dev, "no irq for thermal %d\n", info->irq);
		ret = -EINVAL;
		goto err_irq;
	}

	if (soc_is_exynos4210())
		ret = request_irq(info->irq, exynos4210_tmu_irq_handler,
				IRQF_DISABLED,  "s5p-tmu interrupt", info);
	else
		ret = request_irq(info->irq, exynos4x12_tmu_irq_handler,
				IRQF_DISABLED,  "s5p-tmu interrupt", info);

	if (ret) {
		dev_err(&pdev->dev, "request_irq is failed. %d\n", ret);
		goto err_irq;
	}

	ret = device_create_file(&pdev->dev, &dev_attr_temperature);
	if (ret != 0) {
		pr_err("Failed to create temperatue file: %d\n", ret);
		goto err_sysfs_file1;
	}

	ret = device_create_file(&pdev->dev, &dev_attr_tmu_state);
	if (ret != 0) {
		pr_err("Failed to create tmu_state file: %d\n", ret);
		goto err_sysfs_file2;
	}
	ret = device_create_file(&pdev->dev, &dev_attr_lot_id);
	if (ret != 0) {
		pr_err("Failed to create lot id file: %d\n", ret);
		goto err_sysfs_file3;
	}

	ret = tmu_initialize(pdev);
	if (ret)
		goto err_init;

#ifdef CONFIG_TMU_SYSFS
	ret = device_create_file(&pdev->dev, &dev_attr_curr_temp);
	if (ret < 0) {
		dev_err(&pdev->dev, "Failed to create sysfs group\n");
		goto err_init;
	}
#endif

#ifdef CONFIG_TMU_DEBUG
	ret = device_create_file(&pdev->dev, &dev_attr_print_state);
	if (ret) {
		dev_err(&pdev->dev, "Failed to create tmu sysfs group\n\n");
		return ret;
	}
#endif

#if defined(CONFIG_TC_VOLTAGE)
	/* s/w workaround for fast service when interrupt is not occured,
	 * such as current temp is lower than tc interrupt temperature
	 * or current temp is continuosly increased.
	*/
	if (get_curr_temp(info) <= pdata->ts.start_tc) {
		if (exynos_tc_volt(info, 1) < 0)
			pr_err("TMU: lock error!\n");
	}
#if defined(CONFIG_VIDEO_MALI400MP)
	if (mali_voltage_lock_init())
		pr_err("Failed to initialize mail voltage lock.\n");
#endif
#endif

	/* initialize tmu_state */
	queue_delayed_work_on(0, tmu_monitor_wq, &info->polling,
		info->sampling_rate);

	return ret;

err_init:
	device_remove_file(&pdev->dev, &dev_attr_lot_id);

err_sysfs_file3:
	device_remove_file(&pdev->dev, &dev_attr_tmu_state);

err_sysfs_file2:
	device_remove_file(&pdev->dev, &dev_attr_temperature);

err_sysfs_file1:
	if (info->irq >= 0)
		free_irq(info->irq, info);

err_irq:
	destroy_workqueue(tmu_monitor_wq);

err_wq:
	iounmap(info->tmu_base);

err_nomap:
	release_resource(info->ioarea);
	kfree(info->ioarea);

err_nores:
	kfree(info);
	info = NULL;

err_nomem:
	dev_err(&pdev->dev, "initialization failed.\n");

	return ret;
}

static int __devinit s5p_tmu_remove(struct platform_device *pdev)
{
	struct s5p_tmu_info *info = platform_get_drvdata(pdev);

	cancel_delayed_work(&info->polling);
	destroy_workqueue(tmu_monitor_wq);

	device_remove_file(&pdev->dev, &dev_attr_temperature);
	device_remove_file(&pdev->dev, &dev_attr_tmu_state);

	if (info->irq >= 0)
		free_irq(info->irq, info);

	iounmap(info->tmu_base);

	release_resource(info->ioarea);
	kfree(info->ioarea);

	kfree(info);
	info = NULL;

	pr_info("%s is removed\n", dev_name(&pdev->dev));
	return 0;
}

#ifdef CONFIG_PM
static int s5p_tmu_suspend(struct platform_device *pdev, pm_message_t state)
{
	struct s5p_tmu_info *info = platform_get_drvdata(pdev);

	if (!info)
		return -EAGAIN;

	/* save register value */
	info->reg_save[0] = __raw_readl(info->tmu_base + EXYNOS4_TMU_CONTROL);
	info->reg_save[1] = __raw_readl(info->tmu_base + EXYNOS4_TMU_SAMPLING_INTERNAL);
	info->reg_save[2] = __raw_readl(info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE0);
	info->reg_save[3] = __raw_readl(info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE1);
	info->reg_save[4] = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTEN);

	if (soc_is_exynos4210()) {
		info->reg_save[5] =
			__raw_readl(info->tmu_base + EXYNOS4210_TMU_THRESHOLD_TEMP);
		info->reg_save[6] =
			 __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL0);
		info->reg_save[7] =
			 __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL1);
		info->reg_save[8] =
			 __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL2);
		info->reg_save[9] =
			 __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL3);
	} else {
		info->reg_save[5] =
			__raw_readl(info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_RISE);
#if defined(CONFIG_TC_VOLTAGE)
		info->reg_save[6] = __raw_readl(info->tmu_base
					+ EXYNOS4x12_TMU_TRESHOLD_TEMP_FALL);
#endif
	}
	disable_irq(info->irq);

	return 0;
}

static int s5p_tmu_resume(struct platform_device *pdev)
{
	struct s5p_tmu_info *info = platform_get_drvdata(pdev);
	struct s5p_platform_tmu *data;

	if (!info || !(info->dev))
		return -EAGAIN;

	data = info->dev->platform_data;

	/* restore tmu register value */
	__raw_writel(info->reg_save[0], info->tmu_base + EXYNOS4_TMU_CONTROL);
	__raw_writel(info->reg_save[1],
			info->tmu_base + EXYNOS4_TMU_SAMPLING_INTERNAL);
	__raw_writel(info->reg_save[2],
			info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE0);
	__raw_writel(info->reg_save[3],
			info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE1);

	if (soc_is_exynos4210()) {
		__raw_writel(info->reg_save[5],
			info->tmu_base + EXYNOS4210_TMU_THRESHOLD_TEMP);
		__raw_writel(info->reg_save[6],
			info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL0);
		__raw_writel(info->reg_save[7],
			info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL1);
		__raw_writel(info->reg_save[8],
			info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL2);
		__raw_writel(info->reg_save[9],
			info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL3);
	} else {
		__raw_writel(info->reg_save[5],
			info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_RISE);
#if defined(CONFIG_TC_VOLTAGE)
		__raw_writel(info->reg_save[6],
			info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_FALL);
#endif
	}
	__raw_writel(info->reg_save[4],
			info->tmu_base + EXYNOS4_TMU_INTEN);

#if defined(CONFIG_TC_VOLTAGE)
	/* s/w workaround for fast service when interrupt is not occured,
	 * such as current temp is lower than tc interrupt temperature
	 * or current temp is continuosly increased..
	*/
	mdelay(1);
	if (get_curr_temp(info) <= data->ts.start_tc) {
		if (exynos_tc_volt(info, 1) < 0)
			pr_err("TMU: lock error!\n");
	}
#endif
	/* Find out tmu_state after wakeup */
	queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, 0);

	return 0;
}
#else
#define s5p_tmu_suspend	NULL
#define s5p_tmu_resume	NULL
#endif

static struct platform_driver s5p_tmu_driver = {
	.probe		= s5p_tmu_probe,
	.remove		= s5p_tmu_remove,
	.suspend	= s5p_tmu_suspend,
	.resume		= s5p_tmu_resume,
	.driver		= {
		.name   = "s5p-tmu",
		.owner  = THIS_MODULE,
	},
};

static int __init s5p_tmu_driver_init(void)
{
	return platform_driver_register(&s5p_tmu_driver);
}

static void __exit s5p_tmu_driver_exit(void)
{
	platform_driver_unregister(&s5p_tmu_driver);
}
Ejemplo n.º 27
0
static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void *arg)
{
	struct ifreq ifr;
	struct ec_device *edev;
	struct device *dev;
	unsigned long flags;
	struct sockaddr_ec *sec;

	/*
	 *	Fetch the caller's info block into kernel space
	 */

	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
		return -EFAULT;

	if ((dev = dev_get(ifr.ifr_name)) == NULL) 
		return -ENODEV;

	sec = (struct sockaddr_ec *)&ifr.ifr_addr;

	switch (cmd)
	{
	case SIOCSIFADDR:
		spin_lock_irqsave(&edevlist_lock, flags);
		edev = __edev_get(dev);
		if (edev == NULL)
		{
			/* Magic up a new one. */
			edev = kmalloc(GFP_KERNEL, sizeof(struct ec_device));
			if (edev == NULL) {
				printk("af_ec: memory squeeze.\n");
				spin_unlock_irqrestore(&edevlist_lock, flags);
				return -ENOMEM;
			}
			memset(edev, 0, sizeof(struct ec_device));
			edev->dev = dev;
			edev->next = edevlist;
			edevlist = edev;
		}
		edev->station = sec->addr.station;
		edev->net = sec->addr.net;
		spin_unlock_irqrestore(&edevlist_lock, flags);
		return 0;

	case SIOCGIFADDR:
		spin_lock_irqsave(&edevlist_lock, flags);
		edev = __edev_get(dev);
		if (edev == NULL)
		{
			spin_unlock_irqrestore(&edevlist_lock, flags);
			return -ENODEV;
		}
		memset(sec, 0, sizeof(struct sockaddr_ec));
		sec->addr.station = edev->station;
		sec->addr.net = edev->net;
		sec->sec_family = AF_ECONET;
		spin_unlock_irqrestore(&edevlist_lock, flags);
		if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
			return -EFAULT;
		return 0;
	}

	return -EINVAL;
}
Ejemplo n.º 28
0
static int packet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
	char name[15];
	struct device *dev;
	
	/*
	 *	Check legality
	 */
	 
	if(addr_len!=sizeof(struct sockaddr))
		return -EINVAL;
	strncpy(name,uaddr->sa_data,14);
	name[14]=0;
	
	/*
	 *	Lock the device chain while we sanity check
	 *	the bind request.
	 */
	 
	dev_lock_list();
	dev=dev_get(name);
	if(dev==NULL)
	{
		dev_unlock_list();
		return -ENODEV;
	}
	
	if(!(dev->flags&IFF_UP))
	{
		dev_unlock_list();
		return -ENETDOWN;
	}
	
	/*
	 *	Perform the request.
	 */
	 
	memcpy(sk->protinfo.af_packet.device_name,name,15);
	
	/*
	 *	Rewrite an existing hook if present.
	 */
	 
	if(sk->protinfo.af_packet.prot_hook)
	{
		dev_remove_pack(sk->protinfo.af_packet.prot_hook);
		sk->protinfo.af_packet.prot_hook->dev=dev;
		sk->protinfo.af_packet.bound_dev=dev;
		dev_add_pack(sk->protinfo.af_packet.prot_hook);
	}
	else
	{
		int err=packet_attach(sk, dev);
		if(err)
		{
			dev_unlock_list();
			return err;
		}
	}
	/*
	 *	Now the notifier is set up right this lot is safe.
	 */
	dev_unlock_list();
	return 0;
}
Ejemplo n.º 29
0
static int packet_sendmsg(struct sock *sk, struct msghdr *msg, int len,
	      int noblock, int flags)
{
	struct sk_buff *skb;
	struct device *dev;
	struct sockaddr_pkt *saddr=(struct sockaddr_pkt *)msg->msg_name;
	unsigned short proto=0;

	/*
	 *	Check the flags. 
	 */

	if (flags) 
		return(-EINVAL);

	/*
	 *	Get and verify the address. 
	 */
	 
	if (saddr) 
	{
		if (msg->msg_namelen < sizeof(struct sockaddr)) 
			return(-EINVAL);
		if (msg->msg_namelen==sizeof(struct sockaddr_pkt))
			proto=saddr->spkt_protocol;
	} 
	else
		return(-ENOTCONN);	/* SOCK_PACKET must be sent giving an address */
	
	/*
	 *	Find the device first to size check it 
	 */

	saddr->spkt_device[13] = 0;
	dev = dev_get(saddr->spkt_device);
	if (dev == NULL) 
	{
		return(-ENODEV);
  	}
	
	/*
	 *	You may not queue a frame bigger than the mtu. This is the lowest level
	 *	raw protocol and you must do your own fragmentation at this level.
	 */
	 
	if(len>dev->mtu+dev->hard_header_len)
  		return -EMSGSIZE;

	skb = sock_wmalloc(sk, len, 0, GFP_KERNEL);

	/*
	 *	If the write buffer is full, then tough. At this level the user gets to
	 *	deal with the problem - do your own algorithmic backoffs. That's far
	 *	more flexible.
	 */
	 
	if (skb == NULL) 
	{
		return(-ENOBUFS);
	}
	
	/*
	 *	Fill it in 
	 */
	 
	skb->sk = sk;
	skb->free = 1;
	memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
	skb->arp = 1;		/* No ARP needs doing on this (complete) frame */
	skb->protocol = proto;

	/*
	 *	Now send it
	 */

	if (dev->flags & IFF_UP) 
		dev_queue_xmit(skb, dev, sk->priority);
	else
		kfree_skb(skb, FREE_WRITE);
	return(len);
}
Ejemplo n.º 30
0
int ip_fw_ctl(int stage, void *m, int len)
{
	int cmd, fwtype;

	cmd = stage & IP_FW_COMMAND;
	fwtype = (stage & IP_FW_TYPE) >> IP_FW_SHIFT;

	if ( cmd == IP_FW_FLUSH )
	{
		free_fw_chain(chains[fwtype]);
		return(0);
	}  

	if ( cmd == IP_FW_ZERO )
	{
		zero_fw_chain(*chains[fwtype]);
		return(0);
	}  

	if ( cmd == IP_FW_POLICY )
	{
		int *tmp_policy_ptr;
		tmp_policy_ptr=(int *)m;
		*policies[fwtype] = *tmp_policy_ptr;
		return 0;
	}

	if ( cmd == IP_FW_CHECK )
	{
		struct device *viadev;
		struct ip_fwpkt *ipfwp;
		struct iphdr *ip;

		if ( len != sizeof(struct ip_fwpkt) )
		{
#ifdef DEBUG_IP_FIREWALL
			printk("ip_fw_ctl: length=%d, expected %d\n",
				len, sizeof(struct ip_fwpkt));
#endif
			return( EINVAL );
		}

	 	ipfwp = (struct ip_fwpkt *)m;
	 	ip = &(ipfwp->fwp_iph);

		if ( !(viadev = dev_get(ipfwp->fwp_vianame)) ) {
#ifdef DEBUG_IP_FIREWALL
			printk("ip_fw_ctl: invalid device \"%s\"\n", ipfwp->fwp_vianame);
#endif
			return(EINVAL);
		} else if ( viadev->pa_addr != ipfwp->fwp_via.s_addr ) {
#ifdef DEBUG_IP_FIREWALL
			printk("ip_fw_ctl: device \"%s\" has another IP address\n",
				ipfwp->fwp_vianame);
#endif
			return(EINVAL);
		} else if ( ip->ihl != sizeof(struct iphdr) / sizeof(int)) {
#ifdef DEBUG_IP_FIREWALL
			printk("ip_fw_ctl: ip->ihl=%d, want %d\n",ip->ihl,
					sizeof(struct iphdr)/sizeof(int));
#endif
			return(EINVAL);
		}

		switch (ip_fw_chk(ip, viadev, NULL, *chains[fwtype],
				*policies[fwtype], IP_FW_MODE_CHK))
		{
			case FW_ACCEPT:
				return(0);
	    		case FW_REDIRECT:
				return(ECONNABORTED);
	    		case FW_MASQUERADE:
				return(ECONNRESET);
	    		case FW_REJECT:
				return(ECONNREFUSED);
			default: /* FW_BLOCK */
				return(ETIMEDOUT);
		}
	}

	if ( cmd == IP_FW_MASQ_TIMEOUTS )
	{
#ifdef CONFIG_IP_MASQUERADE
		struct ip_fw_masq *masq;

		if ( len != sizeof(struct ip_fw_masq) )
		{
#ifdef DEBUG_IP_FIREWALL
			printk("ip_fw_ctl (masq): length %d, expected %d\n",
				len, sizeof(struct ip_fw_masq));

#endif
			return( EINVAL );
		}

		masq = (struct ip_fw_masq *) m;

		if (masq->tcp_timeout)
		{
			ip_masq_expire->tcp_timeout = masq->tcp_timeout;
		}

		if (masq->tcp_fin_timeout)
		{
			ip_masq_expire->tcp_fin_timeout = masq->tcp_fin_timeout;
		}

		if (masq->udp_timeout)
		{
			ip_masq_expire->udp_timeout = masq->udp_timeout;
		}

		return 0;
#else
		return( EINVAL );
#endif
	}

/*
 *	Here we really working hard-adding new elements
 *	to blocking/forwarding chains or deleting 'em
 */

	if ( cmd == IP_FW_INSERT || cmd == IP_FW_APPEND || cmd == IP_FW_DELETE )
	{
		struct ip_fw *frwl;
		int fwtype;

		frwl=check_ipfw_struct(m,len);
		if (frwl==NULL)
			return (EINVAL);
		fwtype = (stage & IP_FW_TYPE) >> IP_FW_SHIFT;
		
		switch (cmd) 
		{
			case IP_FW_INSERT:
				return(insert_in_chain(chains[fwtype],frwl,len));
			case IP_FW_APPEND:
				return(append_to_chain(chains[fwtype],frwl,len));
			case IP_FW_DELETE:
				return(del_from_chain(chains[fwtype],frwl));
			default:
			/*
	 		 *	Should be panic but... (Why are BSD people panic obsessed ??)
			 */
#ifdef DEBUG_IP_FIREWALL
				printk("ip_fw_ctl:  unknown request %d\n",stage);
#endif
				return(EINVAL);
		}
	} 

#ifdef DEBUG_IP_FIREWALL
	printk("ip_fw_ctl:  unknown request %d\n",stage);
#endif
	return(EINVAL);
}