Esempio n. 1
0
int ip_mc_dec_group(struct in_device *in_dev, u32 addr)
{
	struct ip_mc_list *i, **ip;

	for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
		if (i->multiaddr==addr) {
			if (--i->users == 0) {
				*ip = i->next;
				synchronize_bh();

				igmp_group_dropped(i);
				if (in_dev->dev->flags & IFF_UP)
					ip_rt_multicast_event(in_dev);
				kfree_s(i, sizeof(*i));
			}
			return 0;
		}
	}
	return -ESRCH;
}
Esempio n. 2
0
int dev_mc_delete(struct device *dev, void *addr, int alen, int glbl)
{
	int err = 0;
	struct dev_mc_list *dmi, **dmip;

	start_bh_atomic();
	for (dmip=&dev->mc_list; (dmi=*dmip)!=NULL; dmip=&dmi->next) {
		/*
		 *	Find the entry we want to delete. The device could
		 *	have variable length entries so check these too.
		 */
		if (memcmp(dmi->dmi_addr,addr,dmi->dmi_addrlen)==0 && alen==dmi->dmi_addrlen) {
			if (glbl) {
				int old_glbl = dmi->dmi_gusers;
				dmi->dmi_gusers = 0;
				if (old_glbl == 0)
					break;
			}
			if(--dmi->dmi_users)
				goto done;

			/*
			 *	Last user. So delete the entry.
			 */
			*dmip = dmi->next;
			dev->mc_count--;
			kfree_s(dmi,sizeof(*dmi));
			/*
			 *	We have altered the list, so the card
			 *	loaded filter is now wrong. Fix it
			 */
			end_bh_atomic();
			dev_mc_upload(dev);
			return 0;
		}
	}
	err = -ENOENT;
done:
	end_bh_atomic();
	return err;
}
Esempio n. 3
0
void unregister_8022_client(unsigned char type)
{
	struct datalink_proto *tmp, **clients = &p8022_list;
	unsigned long flags;

	save_flags(flags);
	cli();

	while ((tmp = *clients) != NULL)
	{
		if (tmp->type[0] == type) {
			*clients = tmp->next;
			kfree_s(tmp, sizeof(struct datalink_proto));
			break;
		} else {
			clients = &tmp->next;
		}
	}

	restore_flags(flags);
}
Esempio n. 4
0
static void rt_kick_backlog()
{
	if (!ip_rt_lock)
	{
		struct rt_req * rtr;

		ip_rt_fast_lock();

		while ((rtr = rt_req_dequeue(&rt_backlog)) != NULL)
		{
			sti();
			rt_redirect_1(rtr->dst, rtr->gw, rtr->dev);
			kfree_s(rtr, sizeof(struct rt_req));
			cli();
		}

		ip_rt_bh_mask &= ~RT_BH_REDIRECT;

		ip_rt_fast_unlock();
	}
}
Esempio n. 5
0
void unregister_snap_client(unsigned char *desc)
{
	struct datalink_proto **clients = &snap_list;
	struct datalink_proto *tmp;
	unsigned long flags;

	save_flags(flags);
	cli();

	while ((tmp = *clients) != NULL)
	{
		if (memcmp(tmp->type,desc,5) == 0)
		{
			*clients = tmp->next;
			kfree_s(tmp, sizeof(struct datalink_proto));
			break;
		}
		else
			clients = &tmp->next;
	}

	restore_flags(flags);
}
Esempio n. 6
0
static void arp_check_expire(unsigned long dummy)
{
	int i;
	unsigned long now = jiffies;
	unsigned long flags;
	save_flags(flags);
	cli();

	for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
	{
		struct arp_table *entry;
		struct arp_table **pentry = &arp_tables[i];

		while ((entry = *pentry) != NULL)
		{
			if ((now - entry->last_used) > ARP_TIMEOUT
				&& !(entry->flags & ATF_PERM))
			{
				*pentry = entry->next;	/* remove from list */
				del_timer(&entry->timer);	/* Paranoia */
				kfree_s(entry, sizeof(struct arp_table));
			}
			else
				pentry = &entry->next;	/* go to next entry */
		}
	}
	restore_flags(flags);

	/*
	 *	Set the timer again.
	 */

	del_timer(&arp_timer);
	arp_timer.expires = ARP_CHECK_INTERVAL;
	add_timer(&arp_timer);
}
Esempio n. 7
0
/*
 * Initialize a module.
 */
asmlinkage int
sys_init_module(char *module_name, char *code, unsigned codesize,
		struct mod_routines *routines,
		struct symbol_table *symtab)
{
	struct module *mp;
	struct symbol_table *newtab;
	char name[MOD_MAX_NAME];
	int error;
	struct mod_routines rt;

	if (!suser())
		return -EPERM;

#ifdef __i386__
	/* A little bit of protection... we "know" where the user stack is... */

	if (symtab && ((unsigned long)symtab > 0xb0000000)) {
		printk(KERN_WARNING "warning: you are using an old insmod, no symbols will be inserted!\n");
		symtab = NULL;
	}
#endif
	if ((error = get_mod_name(module_name, name)) != 0)
		return error;
	pr_debug("initializing module `%s', %d (0x%x) bytes\n",
		name, codesize, codesize);
	memcpy_fromfs(&rt, routines, sizeof rt);
	if ((mp = find_module(name)) == NULL)
		return -ENOENT;
	if (codesize & MOD_AUTOCLEAN) {
		/*
		 * set autoclean marker from codesize...
		 * set usage count to "zero"
		 */
		codesize &= ~MOD_AUTOCLEAN;
		GET_USE_COUNT(mp) = MOD_AUTOCLEAN;
	}
	if ((codesize + sizeof (long) + PAGE_SIZE - 1) / PAGE_SIZE > mp->size)
		return -EINVAL;
	memcpy_fromfs((char *)mp->addr + sizeof (long), code, codesize);
	memset((char *)mp->addr + sizeof (long) + codesize, 0,
		mp->size * PAGE_SIZE - (codesize + sizeof (long)));
	pr_debug("module init entry = 0x%08lx, cleanup entry = 0x%08lx\n",
		(unsigned long) rt.init, (unsigned long) rt.cleanup);
	mp->cleanup = rt.cleanup;

	/* update kernel symbol table */
	if (symtab) { /* symtab == NULL means no new entries to handle */
		struct internal_symbol *sym;
		struct module_ref *ref;
		int size;
		int i;
		int legal_start;

		if ((error = verify_area(VERIFY_READ, &symtab->size, sizeof(symtab->size))))
			return error;
		size = get_user(&symtab->size);

		if ((newtab = (struct symbol_table*) kmalloc(size, GFP_KERNEL)) == NULL) {
			return -ENOMEM;
		}

		if ((error = verify_area(VERIFY_READ, symtab, size))) {
			kfree_s(newtab, size);
			return error;
		}
		memcpy_fromfs((char *)(newtab), symtab, size);

		/* sanity check */
		legal_start = sizeof(struct symbol_table) +
			newtab->n_symbols * sizeof(struct internal_symbol) +
			newtab->n_refs * sizeof(struct module_ref);

		if ((newtab->n_symbols < 0) || (newtab->n_refs < 0) || (legal_start > size)) {
			printk(KERN_WARNING "Rejecting illegal symbol table (n_symbols=%d,n_refs=%d)\n",
			       newtab->n_symbols, newtab->n_refs);
			kfree_s(newtab, size);
			return -EINVAL;
		}

		/* relocate name pointers, index referred from start of table */
		for (sym = &(newtab->symbol[0]), i = 0; i < newtab->n_symbols; ++sym, ++i) {
			if ((unsigned long)sym->name < legal_start || size <= (unsigned long)sym->name) {
				printk(KERN_WARNING "Rejecting illegal symbol table\n");
				kfree_s(newtab, size);
				return -EINVAL;
			}
			/* else */
			sym->name += (long)newtab;
		}
		mp->symtab = newtab;

		/* Update module references.
		 * On entry, from "insmod", ref->module points to
		 * the referenced module!
		 * Now it will point to the current module instead!
		 * The ref structure becomes the first link in the linked
		 * list of references to the referenced module.
		 * Also, "sym" from above, points to the first ref entry!!!
		 */
		for (ref = (struct module_ref *)sym, i = 0;
			i < newtab->n_refs; ++ref, ++i) {

			/* Check for valid reference */
			struct module *link = module_list;
			while (link && (ref->module != link))
				link = link->next;

			if (link == (struct module *)0) {
				printk(KERN_WARNING "Non-module reference! Rejected!\n");
				return -EINVAL;
			}

			ref->next = ref->module->ref;
			ref->module->ref = ref;
			ref->module = mp;
		}
	}

	GET_USE_COUNT(mp) += 1;
	if ((*rt.init)() != 0) {
		GET_USE_COUNT(mp) = 0;
		return -EBUSY;
	}
	GET_USE_COUNT(mp) -= 1;
	mp->state = MOD_RUNNING;

	return 0;
}
Esempio n. 8
0
static void receive_packet(struct device * dev,
                           elp_device * adapter,
                           int len)

{
    register int i;
    unsigned short * ptr;
    short d;
    int timeout;
    int rlen;
    struct sk_buff *skb;

    /*
     * allocate a buffer to put the packet into.
     * (for kernels prior to 1.1.4 only)
     */
#if (ELP_KERNEL_TYPE < 2)
    int sksize = sizeof(struct sk_buff) + len + 4;
#endif

    CHECK_NULL(dev);
    CHECK_NULL(adapter);

    if (len <= 0 || ((len & ~1) != len))
        if (elp_debug >= 3)
            printk("*** bad packet len %d at %s(%d)\n",len,filename,__LINE__);

    rlen = (len+1) & ~1;

#if (ELP_KERNEL_TYPE < 2)
    skb = alloc_skb(sksize, GFP_ATOMIC);
#else
    skb = alloc_skb(rlen, GFP_ATOMIC);
#endif

    /*
     * make sure the data register is going the right way
     */
    OUTB(INB(adapter->io_addr+PORT_CONTROL)|CONTROL_DIR, adapter->io_addr+PORT_CONTROL);

    /*
     * if buffer could not be allocated, swallow it
     */
    if (skb == NULL) {
        for (i = 0; i < (rlen/2); i++) {
            timeout = jiffies + TIMEOUT;
            while ((INB(adapter->io_addr+PORT_STATUS)&STATUS_HRDY) == 0 &&
                    jiffies < timeout)
                ;
            if (jiffies >= timeout)
                TIMEOUT_MSG();

            d = inw(adapter->io_addr+PORT_DATA);
        }
        adapter->stats.rx_dropped++;

    } else {
        skb->lock     = 0;
        skb->len = rlen;
        skb->dev = dev;

        /*
         * for kernels before 1.1.4, the driver allocated the buffer
         */
#if (ELP_KERNEL_TYPE < 2)
        skb->mem_len = sksize;
        skb->mem_addr = skb;
#endif

        /*
         * now read the data from the adapter
         */
        ptr = (unsigned short *)SKB_DATA;
        for (i = 0; i < (rlen/2); i++) {
            timeout = jiffies + TIMEOUT;
            while ((INB(adapter->io_addr+PORT_STATUS)&STATUS_HRDY) == 0 &&
                    jiffies < timeout)
                ;
            if (jiffies >= timeout)
            {
                printk("*** timeout at %s(%d) reading word %d of %d ***\n",
                       filename,__LINE__, i, rlen/2);
#if (ELP_KERNEL_TYPE < 2)
                kfree_s(skb, sksize);
#else
                kfree_s(skb, rlen);
#endif
                return;
            }

            *ptr = inw(adapter->io_addr+PORT_DATA);
            ptr++;
        }

        /*
         * the magic routine "dev_rint" passes the packet up the
         * protocol chain. If it returns 0, we can assume the packet was
         * swallowed up. If not, then we are responsible for freeing memory
         */

        IS_SKB(skb);

        /*
         * for kernels before 1.1.4, the driver allocated the buffer, so it had
         * to free it
         */
#if (ELP_KERNEL_TYPE < 2)
        if (dev_rint((unsigned char *)skb, rlen, IN_SKBUFF, dev) != 0) {
            printk("%s: receive buffers full.\n", dev->name);
            kfree_s(skb, sksize);
        }
#else
        netif_rx(skb);
#endif
    }

    OUTB(INB(adapter->io_addr+PORT_CONTROL)&(~CONTROL_DIR), adapter->io_addr+PORT_CONTROL);
}
Esempio n. 9
0
static void * usb_hpna_probe( struct usb_device *dev, unsigned int ifnum )
{
//	struct net_device 		*net_dev;
	struct device 		*net_dev;
	usb_hpna_t			*hpna = &usb_dev_hpna;
#ifdef PEGASUS_PRINT_PRODUCT_NAME
	int dev_index;
#endif

	spinlock_t xxx = { };

#ifdef PEGASUS_PRINT_PRODUCT_NAME /* XXX */
	if ( (dev_index = match_product(dev->descriptor.idVendor,
				dev->descriptor.idProduct)) == -1 ) {
		return NULL;
	}

	printk("USB Ethernet(Pegasus) %s found\n",
					product_list[dev_index].name);
#else
	if ( dev->descriptor.idVendor != ADMTEK_VENDOR_ID ||
	     dev->descriptor.idProduct != ADMTEK_HPNA_PEGASUS ) {
		return	NULL;
	}

	printk("USB HPNA Pegasus found\n");
#endif

	if ( usb_set_configuration(dev, dev->config[0].bConfigurationValue)) {
		err("usb_set_configuration() failed");
		return NULL;
	}

	hpna->usb_dev = dev;

	hpna->rx_pipe = usb_rcvbulkpipe(hpna->usb_dev, 1);
	hpna->tx_pipe = usb_sndbulkpipe(hpna->usb_dev, 2);
	hpna->intr_pipe = usb_rcvintpipe(hpna->usb_dev, 0);

	if ( reset_mac(dev) ) {
		err("can't reset MAC");
	}

	hpna->present = 1;

	if(!(hpna->rx_buff=kmalloc(MAX_MTU, GFP_KERNEL))) {
		err("not enough mem for out buff");
		return	NULL;
	}
	if(!(hpna->tx_buff=kmalloc(MAX_MTU, GFP_KERNEL))) {
		kfree_s(hpna->rx_buff, MAX_MTU);
		err("not enough mem for out buff");
		return	NULL;
	}

	net_dev = init_etherdev( 0, 0 );
	hpna->net_dev = net_dev;
	net_dev->priv = hpna;
	net_dev->open = hpna_open;
	net_dev->stop = hpna_close;
//	net_dev->watchdog_timeo = TX_TIMEOUT;
//	net_dev->tx_timeout = tx_timeout;
	net_dev->do_ioctl = hpna_ioctl; 
	net_dev->hard_start_xmit = hpna_start_xmit;
	net_dev->set_multicast_list = set_rx_mode;
	net_dev->get_stats = hpna_netdev_stats; 
	net_dev->mtu = HPNA_MTU;
#if 1
{
/*
 * to support dhcp client daemon(dhcpcd), it needs to get HW address
 * in probe routine.
 */
	struct usb_device *usb_dev = hpna->usb_dev;
	__u8	node_id[6];
	
	if ( get_node_id(usb_dev, node_id) ) {
		printk("USB Pegasus can't get HW address in probe routine.\n");
		printk("But Pegasus will re-try in open routine.\n");
		goto next;
	}
	hpna_set_registers(usb_dev, 0x10, 6, node_id);
	memcpy(net_dev->dev_addr, node_id, 6);
}
next:
#endif
	hpna->hpna_lock = xxx;	//SPIN_LOCK_UNLOCKED;
	
	FILL_BULK_URB( &hpna->rx_urb, hpna->usb_dev, hpna->rx_pipe, 
			hpna->rx_buff, MAX_MTU, hpna_read_irq, net_dev );
	FILL_BULK_URB( &hpna->tx_urb, hpna->usb_dev, hpna->tx_pipe, 
			hpna->tx_buff, MAX_MTU, hpna_write_irq, net_dev );
	FILL_INT_URB( &hpna->intr_urb, hpna->usb_dev, hpna->intr_pipe,
			hpna->intr_buff, 8, hpna_irq, net_dev, 250 );
	
/*	list_add( &hpna->list, &hpna_list );*/
	
	return	net_dev; 
}
Esempio n. 10
0
int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
{
	int val,err;
	unsigned char ucval;
#if defined(CONFIG_IP_FIREWALL) || defined(CONFIG_IP_ACCT)
	struct ip_fw tmp_fw;
#endif	
	if (optval == NULL)
	{
		val=0;
		ucval=0;
	}
	else
	{
		err=verify_area(VERIFY_READ, optval, sizeof(int));
		if(err)
			return err;
		val = get_user((int *) optval);
		ucval=get_user((unsigned char *) optval);
	}
	
	if(level!=SOL_IP)
		return -EOPNOTSUPP;
#ifdef CONFIG_IP_MROUTE
	if(optname>=MRT_BASE && optname <=MRT_BASE+10)
	{
		return ip_mroute_setsockopt(sk,optname,optval,optlen);
	}
#endif
	
	switch(optname)
	{
		case IP_OPTIONS:
	          {
			  struct options * opt = NULL;
			  struct options * old_opt;
			  if (optlen > 40 || optlen < 0)
			  	return -EINVAL;
			  err = verify_area(VERIFY_READ, optval, optlen);
			  if (err)
			  	return err;
			  opt = kmalloc(sizeof(struct options)+((optlen+3)&~3), GFP_KERNEL);
			  if (!opt)
			  	return -ENOMEM;
			  memset(opt, 0, sizeof(struct options));
			  if (optlen)
			  	memcpy_fromfs(opt->__data, optval, optlen);
			  while (optlen & 3)
			  	opt->__data[optlen++] = IPOPT_END;
			  opt->optlen = optlen;
			  opt->is_data = 1;
			  opt->is_setbyuser = 1;
			  if (optlen && ip_options_compile(opt, NULL)) 
			  {
				  kfree_s(opt, sizeof(struct options) + optlen);
				  return -EINVAL;
			  }
			  /*
			   * ANK: I'm afraid that receive handler may change
			   * options from under us.
			   */
			  cli();
			  old_opt = sk->opt;
			  sk->opt = opt;
			  sti();
			  if (old_opt)
			  	kfree_s(old_opt, sizeof(struct optlen) + old_opt->optlen);
			  return 0;
		  }
		case IP_TOS:		/* This sets both TOS and Precedence */
			if (val<0 || val>63)	/* Reject setting of unused bits */
				return -EINVAL;
			if ((val&7) > 4 && !suser())	/* Only root can set Prec>4 */
				return -EPERM;
			sk->ip_tos=val;
			switch (val & 0x38) {
				case IPTOS_LOWDELAY:
					sk->priority=SOPRI_INTERACTIVE;
					break;
				case IPTOS_THROUGHPUT:
					sk->priority=SOPRI_BACKGROUND;
					break;
				default:
					sk->priority=SOPRI_NORMAL;
					break;
			}
			return 0;
		case IP_TTL:
			if(val<1||val>255)
				return -EINVAL;
			sk->ip_ttl=val;
			return 0;
		case IP_HDRINCL:
			if(sk->type!=SOCK_RAW)
				return -ENOPROTOOPT;
			sk->ip_hdrincl=val?1:0;
			return 0;
#ifdef CONFIG_IP_MULTICAST
		case IP_MULTICAST_TTL: 
		{
			sk->ip_mc_ttl=(int)ucval;
	                return 0;
		}
		case IP_MULTICAST_LOOP: 
		{
			if(ucval!=0 && ucval!=1)
				 return -EINVAL;
			sk->ip_mc_loop=(int)ucval;
			return 0;
		}
		case IP_MULTICAST_IF: 
		{
			struct in_addr addr;
			struct device *dev=NULL;
			
			/*
			 *	Check the arguments are allowable
			 */

			err=verify_area(VERIFY_READ, optval, sizeof(addr));
			if(err)
				return err;
				
			memcpy_fromfs(&addr,optval,sizeof(addr));
			
			
			/*
			 *	What address has been requested
			 */
			
			if(addr.s_addr==INADDR_ANY)	/* Default */
			{
				sk->ip_mc_name[0]=0;
				return 0;
			}
			
			/*
			 *	Find the device
			 */
			 
			dev=ip_mc_find_devfor(addr.s_addr);
						
			/*
			 *	Did we find one
			 */
			 
			if(dev) 
			{
				strcpy(sk->ip_mc_name,dev->name);
				return 0;
			}
			return -EADDRNOTAVAIL;
		}
		
		case IP_ADD_MEMBERSHIP: 
		{
		
/*
 *	FIXME: Add/Del membership should have a semaphore protecting them from re-entry
 */
			struct ip_mreq mreq;
			__u32 route_src;
			struct rtable *rt;
			struct device *dev=NULL;
			
			/*
			 *	Check the arguments.
			 */

			err=verify_area(VERIFY_READ, optval, sizeof(mreq));
			if(err)
				return err;

			memcpy_fromfs(&mreq,optval,sizeof(mreq));

			/* 
			 *	Get device for use later
			 */

			if(mreq.imr_interface.s_addr==INADDR_ANY) 
			{
				/*
				 *	Not set so scan.
				 */
				if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,0))!=NULL)
				{
					dev=rt->rt_dev;
					route_src = rt->rt_src;
					atomic_dec(&rt->rt_use);
					ip_rt_put(rt);
				}
			}
			else
			{
				/*
				 *	Find a suitable device.
				 */
				
				dev=ip_mc_find_devfor(mreq.imr_interface.s_addr);
			}
			
			/*
			 *	No device, no cookies.
			 */
			 
			if(!dev)
				return -ENODEV;
				
			/*
			 *	Join group.
			 */
			 
			return ip_mc_join_group(sk,dev,mreq.imr_multiaddr.s_addr);
		}
		
		case IP_DROP_MEMBERSHIP: 
		{
			struct ip_mreq mreq;
			struct rtable *rt;
			__u32 route_src;
			struct device *dev=NULL;

			/*
			 *	Check the arguments
			 */
			 
			err=verify_area(VERIFY_READ, optval, sizeof(mreq));
			if(err)
				return err;

			memcpy_fromfs(&mreq,optval,sizeof(mreq));

			/*
			 *	Get device for use later 
			 */
 
			if(mreq.imr_interface.s_addr==INADDR_ANY) 
			{
				if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,0))!=NULL)
			        {
					dev=rt->rt_dev;
					atomic_dec(&rt->rt_use);
					route_src = rt->rt_src;
					ip_rt_put(rt);
				}
			}
			else 
			{
			
				dev=ip_mc_find_devfor(mreq.imr_interface.s_addr);
			}
			
			/*
			 *	Did we find a suitable device.
			 */
			 
			if(!dev)
				return -ENODEV;
				
			/*
			 *	Leave group
			 */
			 
			return ip_mc_leave_group(sk,dev,mreq.imr_multiaddr.s_addr);
		}
#endif			
#ifdef CONFIG_IP_FIREWALL
		case IP_FW_INSERT_IN:
		case IP_FW_INSERT_OUT:
		case IP_FW_INSERT_FWD:
		case IP_FW_APPEND_IN:
		case IP_FW_APPEND_OUT:
		case IP_FW_APPEND_FWD:
		case IP_FW_DELETE_IN:
		case IP_FW_DELETE_OUT:
		case IP_FW_DELETE_FWD:
		case IP_FW_CHECK_IN:
		case IP_FW_CHECK_OUT:
		case IP_FW_CHECK_FWD:
		case IP_FW_FLUSH_IN:
		case IP_FW_FLUSH_OUT:
		case IP_FW_FLUSH_FWD:
		case IP_FW_ZERO_IN:
		case IP_FW_ZERO_OUT:
		case IP_FW_ZERO_FWD:
		case IP_FW_POLICY_IN:
		case IP_FW_POLICY_OUT:
		case IP_FW_POLICY_FWD:
		case IP_FW_MASQ_TIMEOUTS:
			if(!suser())
				return -EPERM;
			if(optlen>sizeof(tmp_fw) || optlen<1)
				return -EINVAL;
			err=verify_area(VERIFY_READ,optval,optlen);
			if(err)
				return err;
			memcpy_fromfs(&tmp_fw,optval,optlen);
			err=ip_fw_ctl(optname, &tmp_fw,optlen);
			return -err;	/* -0 is 0 after all */
			
#endif
#ifdef CONFIG_IP_ACCT
		case IP_ACCT_INSERT:
		case IP_ACCT_APPEND:
		case IP_ACCT_DELETE:
		case IP_ACCT_FLUSH:
		case IP_ACCT_ZERO:
			if(!suser())
				return -EPERM;
			if(optlen>sizeof(tmp_fw) || optlen<1)
				return -EINVAL;
			err=verify_area(VERIFY_READ,optval,optlen);
			if(err)
				return err;
			memcpy_fromfs(&tmp_fw, optval,optlen);
			err=ip_acct_ctl(optname, &tmp_fw,optlen);
			return -err;	/* -0 is 0 after all */
#endif
		/* IP_OPTIONS and friends go here eventually */
		default:
			return(-ENOPROTOOPT);
	}
}
Esempio n. 11
0
void destroy_sock(struct sock *sk)
{
	struct sk_buff *skb;

  	sk->inuse = 1;			/* just to be safe.对sock结构上锁 */

	/* 首先检查dead标志,只有dead=1时才表示sock结构等待释放	*/
  	/* In case it's sleeping somewhere. */
  	if (!sk->dead) 
  		sk->write_space(sk);	// sock如果出现SO_NOSPACE标志,需要唤醒异步等待sock的进程

  	remove_sock(sk);

  	/* 移除sock连接使用的定时器	*/
  	/* Now we can no longer get new packets. */
  	delete_timer(sk);				// 移除通用定时器
  	/* Nor send them */
	del_timer(&sk->retransmit_timer);	// 移除重传定时器

	/* 释放sock::partial指向的数据写缓冲	*/
	while ((skb = tcp_dequeue_partial(sk)) != NULL) {
		IS_SKB(skb);
		kfree_skb(skb, FREE_WRITE);
	}

	/* Cleanup up the write buffer. */
  	while((skb = skb_dequeue(&sk->write_queue)) != NULL) {
		IS_SKB(skb);
		kfree_skb(skb, FREE_WRITE);
  	}
  	
  	/*
  	 *	Don't discard received data until the user side kills its
  	 *	half of the socket.
  	 */

	if (sk->dead) 
	{
  		while((skb=skb_dequeue(&sk->receive_queue))!=NULL) 
  		{
		/*
		 * This will take care of closing sockets that were
		 * listening and didn't accept everything.
		 */
			if (skb->sk != NULL && skb->sk != sk) 
			{
			/* 如果当前sock对应一个listen socket就需要关闭尚未完成创建的socket连接
			 * 这其中的sock可能是establish或者syn_recv状态	*/
				IS_SKB(skb);
				skb->sk->dead = 1;
				skb->sk->prot->close(skb->sk, 0);
			}
			IS_SKB(skb);
			kfree_skb(skb, FREE_READ);
		}
	}	

	/* Now we need to clean up the send head. 清理重发队列 */
	cli();
	for(skb = sk->send_head; skb != NULL; )
	{
		struct sk_buff *skb2;

		/*
		 * We need to remove skb from the transmit queue,
		 * or maybe the arp queue.
		 */
		if (skb->next  && skb->prev) {
/*			printk("destroy_sock: unlinked skb\n");*/
			IS_SKB(skb);
			skb_unlink(skb);
		}
		skb->dev = NULL;
		skb2 = skb->link3;	// link3在TCP中构建重发队列
		kfree_skb(skb, FREE_WRITE);
		skb = skb2;
	}
	sk->send_head = NULL;
	sti();

  	/* And now the backlog. */
  	while((skb=skb_dequeue(&sk->back_log))!=NULL) 
  	{
		/* this should never happen. */
/*		printk("cleaning back_log\n");*/
		kfree_skb(skb, FREE_READ);
	}

	/* Now if it has a half accepted/ closed socket. */
	if (sk->pair) 
	{
		sk->pair->dead = 1;
		sk->pair->prot->close(sk->pair, 0);
		sk->pair = NULL;
  	}

	/*
	 * Now if everything is gone we can free the socket
	 * structure, otherwise we need to keep it around until
	 * everything is gone.
	 */

	  if (sk->dead && sk->rmem_alloc == 0 && sk->wmem_alloc == 0) 
	  {
		kfree_s((void *)sk,sizeof(*sk));
	  } 
	  else 
	  {
		/* this should never happen. */
		/* actually it can if an ack has just been sent. */
		sk->destroy = 1;
		sk->ack_backlog = 0;
		sk->inuse = 0;
		reset_timer(sk, TIME_DESTROY, SOCK_DESTROY_TIME);
  	}
}
Esempio n. 12
0
void sk_free(struct sock *sk)
{
	kfree_s(sk,sizeof(*sk));
}
Esempio n. 13
0
/*
 * This is so ripe with races that you should *really* not touch this
 * unless you know exactly what you are doing. All the changes have to be
 * made atomically, or there may be incorrect pointers all over the place.
 */
static int init_dev(int dev)
{
	struct tty_struct *tty, *o_tty;
	struct termios *tp, *o_tp, *ltp, *o_ltp;
	int retval;
	int o_dev;

	o_dev = PTY_OTHER(dev);
	tty = o_tty = NULL;
	tp = o_tp = NULL;
	ltp = o_ltp = NULL;
repeat:
	retval = -EAGAIN;
	if (IS_A_PTY_MASTER(dev) && tty_table[dev] && tty_table[dev]->count)
		goto end_init;
	retval = -ENOMEM;
	if (!tty_table[dev] && !tty) {
		if (!(tty = (struct tty_struct*) get_free_page(GFP_KERNEL)))
			goto end_init;
		initialize_tty_struct(dev, tty);
		goto repeat;
	}
	if (!tty_termios[dev] && !tp) {
		tp = (struct termios *) kmalloc(sizeof(struct termios),
						GFP_KERNEL);
		if (!tp)
			goto end_init;
		initialize_termios(dev, tp);
		goto repeat;
	}
	if (!termios_locked[dev] && !ltp) {
		ltp = (struct termios *) kmalloc(sizeof(struct termios),
						 GFP_KERNEL);
		if (!ltp)
			goto end_init;
		memset(ltp, 0, sizeof(struct termios));
		goto repeat;
	}
	if (IS_A_PTY(dev)) {
		if (!tty_table[o_dev] && !o_tty) {
			o_tty = (struct tty_struct *)
				get_free_page(GFP_KERNEL);
			if (!o_tty)
				goto end_init;
			initialize_tty_struct(o_dev, o_tty);
			goto repeat;
		}
		if (!tty_termios[o_dev] && !o_tp) {
			o_tp = (struct termios *)
				kmalloc(sizeof(struct termios), GFP_KERNEL);
			if (!o_tp)
				goto end_init;
			initialize_termios(o_dev, o_tp);
			goto repeat;
		}
		if (!termios_locked[o_dev] && !o_ltp) {
			o_ltp = (struct termios *)
				kmalloc(sizeof(struct termios), GFP_KERNEL);
			if (!o_ltp)
				goto end_init;
			memset(o_ltp, 0, sizeof(struct termios));
			goto repeat;
		}
		
	}
	/* Now we have allocated all the structures: update all the pointers.. */
	if (!tty_termios[dev]) {
		tty_termios[dev] = tp;
		tp = NULL;
	}
	if (!tty_table[dev]) {
		tty->termios = tty_termios[dev];
		tty_table[dev] = tty;
		tty = NULL;
	}
	if (!termios_locked[dev]) {
		termios_locked[dev] = ltp;
		ltp = NULL;
	}
	if (IS_A_PTY(dev)) {
		if (!tty_termios[o_dev]) {
			tty_termios[o_dev] = o_tp;
			o_tp = NULL;
		}
		if (!termios_locked[o_dev]) {
			termios_locked[o_dev] = o_ltp;
			o_ltp = NULL;
		}
		if (!tty_table[o_dev]) {
			o_tty->termios = tty_termios[o_dev];
			tty_table[o_dev] = o_tty;
			o_tty = NULL;
		}
		tty_table[dev]->link = tty_table[o_dev];
		tty_table[o_dev]->link = tty_table[dev];
	}
	tty_table[dev]->count++;
	if (IS_A_PTY_MASTER(dev))
		tty_table[o_dev]->count++;
	retval = 0;
end_init:
	if (tty)
		free_page((unsigned long) tty);
	if (o_tty)
		free_page((unsigned long) o_tty);
	if (tp)
		kfree_s(tp, sizeof(struct termios));
	if (o_tp)
		kfree_s(o_tp, sizeof(struct termios));
	if (ltp)
		kfree_s(ltp, sizeof(struct termios));
	if (o_ltp)
		kfree_s(o_ltp, sizeof(struct termios));
	return retval;
}
Esempio n. 14
0
int
register_symtab_from(struct symbol_table *intab, long *from)
{
	struct module *mp;
	struct module *link;
	struct symbol_table *oldtab;
	struct symbol_table *newtab;
	struct module_ref *newref;
	int size;

	if (intab && (intab->n_symbols == 0)) {
		struct internal_symbol *sym;
		/* How many symbols, really? */

		for (sym = intab->symbol; sym->name; ++sym)
			intab->n_symbols +=1;
	}

	for (mp = module_list; mp != &kernel_module; mp = mp->next) {
		/*
		 * "from" points to "mod_use_count_" (== start of module)
		 * or is == 0 if called from a non-module
		 */
		if ((unsigned long)(mp->addr) == (unsigned long)from)
			break;
	}

	if (mp == &kernel_module) {
		/* Aha! Called from an "internal" module */
		if (!intab)
			return 0; /* or -ESILLY_PROGRAMMER :-) */

		/* create a pseudo module! */
		if (!(mp = (struct module*) kmalloc(MODSIZ, GFP_KERNEL))) {
			/* panic time! */
			printk(KERN_ERR "Out of memory for new symbol table!\n");
			return -ENOMEM;
		}
		/* else  OK */
		memset(mp, 0, MODSIZ);
		mp->state = MOD_RUNNING; /* Since it is resident... */
		mp->name = ""; /* This is still the "kernel" symbol table! */
		mp->symtab = intab;

		/* link it in _after_ the resident symbol table */
		mp->next = kernel_module.next;
		kernel_module.next = mp;

		return 0;
	}

	/* else ******** Called from a loadable module **********/

	/*
	 * This call should _only_ be done in the context of the
	 * call to  init_module  i.e. when loading the module!!
	 * Or else...
	 */

	/* Any table there before? */
	if ((oldtab = mp->symtab) == (struct symbol_table*)0) {
		/* No, just insert it! */
		mp->symtab = intab;
		return 0;
	}

	/* else  ****** we have to replace the module symbol table ******/

	if (oldtab->n_refs == 0) { /* no problems! */
		mp->symtab = intab;
		/* if the old table was kmalloc-ed, drop it */
		if (oldtab->size > 0)
			kfree_s(oldtab, oldtab->size);

		return 0;
	}

	/* else */
	/***** The module references other modules... insmod said so! *****/
	/* We have to allocate a new symbol table, or we lose them! */
	if (intab == (struct symbol_table*)0)
		intab = &nulltab; /* easier code with zeroes in place */

	/* the input symbol table space does not include the string table */
	/* (it does for symbol tables that insmod creates) */

	if (!(newtab = (struct symbol_table*)kmalloc(
		size = SYMSIZ + intab->n_symbols * INTSIZ +
			oldtab->n_refs * REFSIZ,
		GFP_KERNEL))) {
		/* panic time! */
		printk(KERN_ERR "Out of memory for new symbol table!\n");
		return -ENOMEM;
	}

	/* copy up to, and including, the new symbols */
	memcpy(newtab, intab, SYMSIZ + intab->n_symbols * INTSIZ);

	newtab->size = size;
	newtab->n_refs = oldtab->n_refs;

	/* copy references */
	memcpy( ((char *)newtab) + SYMSIZ + intab->n_symbols * INTSIZ,
		((char *)oldtab) + SYMSIZ + oldtab->n_symbols * INTSIZ,
		oldtab->n_refs * REFSIZ);

	/* relink references from the old table to the new one */

	/* pointer to the first reference entry in newtab! Really! */
	newref = (struct module_ref*) &(newtab->symbol[newtab->n_symbols]);

	/* check for reference links from previous modules */
	for (	link = module_list;
		link && (link != &kernel_module);
		link = link->next) {

		if (link->ref && (link->ref->module == mp))
			link->ref = newref++;
	}

	mp->symtab = newtab;

	/* all references (if any) have been handled */

	/* if the old table was kmalloc-ed, drop it */
	if (oldtab->size > 0)
		kfree_s(oldtab, oldtab->size);

	return 0;
}
Esempio n. 15
0
static __inline__ void fib_add_1(short flags, __u32 dst, __u32 mask,
	__u32 gw, struct device *dev, unsigned short mss,
	unsigned long window, unsigned short irtt, short metric)
{
	struct fib_node *f, *f1;
	struct fib_node **fp;
	struct fib_node **dup_fp = NULL;
	struct fib_zone * fz;
	struct fib_info * fi;
	int logmask;

	/*
	 *	Allocate an entry and fill it in.
	 */
	 
	f = (struct fib_node *) kmalloc(sizeof(struct fib_node), GFP_KERNEL);
	if (f == NULL)
		return;

	memset(f, 0, sizeof(struct fib_node));
	f->fib_dst = dst;
	f->fib_metric = metric;
	f->fib_tos    = 0;

	if  ((fi = fib_create_info(gw, dev, flags, mss, window, irtt)) == NULL)
	{
		kfree_s(f, sizeof(struct fib_node));
		return;
	}
	f->fib_info = fi;

	logmask = rt_logmask(mask);
	fz = fib_zones[logmask];


	if (!fz)
	{
		int i;
		fz = kmalloc(sizeof(struct fib_zone), GFP_KERNEL);
		if (!fz)
		{
			fib_free_node(f);
			return;
		}
		memset(fz, 0, sizeof(struct fib_zone));
		fz->fz_logmask = logmask;
		fz->fz_mask = mask;
		for (i=logmask-1; i>=0; i--)
			if (fib_zones[i])
				break;
		cli();
		if (i<0)
		{
			fz->fz_next = fib_zone_list;
			fib_zone_list = fz;
		}
		else
		{
			fz->fz_next = fib_zones[i]->fz_next;
			fib_zones[i]->fz_next = fz;
		}
		fib_zones[logmask] = fz;
		sti();
	}

	/*
	 * If zone overgrows RTZ_HASHING_LIMIT, create hash table.
	 */

	if (fz->fz_nent >= RTZ_HASHING_LIMIT && !fz->fz_hash_table && logmask<32)
	{
		struct fib_node ** ht;
#if RT_CACHE_DEBUG >= 2
		printk("fib_add_1: hashing for zone %d started\n", logmask);
#endif
		ht = kmalloc(RTZ_HASH_DIVISOR*sizeof(struct rtable*), GFP_KERNEL);

		if (ht)
		{
			memset(ht, 0, RTZ_HASH_DIVISOR*sizeof(struct fib_node*));
			cli();
			f1 = fz->fz_list;
			while (f1)
			{
				struct fib_node * next, **end;
				unsigned hash = fz_hash_code(f1->fib_dst, logmask);
				next = f1->fib_next;
				f1->fib_next = NULL;
				end = &ht[hash];
				while(*end != NULL)
					end = &(*end)->fib_next;
				*end = f1;
				f1 = next;
			}
			fz->fz_list = NULL;
			fz->fz_hash_table = ht; 
			sti();
		}
	}

	if (fz->fz_hash_table)
		fp = &fz->fz_hash_table[fz_hash_code(dst, logmask)];
	else
		fp = &fz->fz_list;

	/*
	 * Scan list to find the first route with the same destination
	 */
	while ((f1 = *fp) != NULL)
	{
		if (f1->fib_dst == dst)
			break;
		fp = &f1->fib_next;
	}

	/*
	 * Find route with the same destination and less (or equal) metric.
	 */
	while ((f1 = *fp) != NULL && f1->fib_dst == dst)
	{
		if (f1->fib_metric >= metric)
			break;
		/*
		 *	Record route with the same destination and gateway,
		 *	but less metric. We'll delete it 
		 *	after instantiation of new route.
		 */
		if (f1->fib_info->fib_gateway == gw &&
		    (gw || f1->fib_info->fib_dev == dev))
			dup_fp = fp;
		fp = &f1->fib_next;
	}

	/*
	 * Is it already present?
	 */

	if (f1 && f1->fib_metric == metric && f1->fib_info == fi)
	{
		fib_free_node(f);
		return;
	}
	
	/*
	 * Insert new entry to the list.
	 */

	cli();
	f->fib_next = f1;
	*fp = f;
	if (!fib_loopback && (fi->fib_dev->flags & IFF_LOOPBACK))
		fib_loopback = f;
	sti();
	fz->fz_nent++;
	ip_netlink_msg(RTMSG_NEWROUTE, dst, gw, mask, flags, metric, fi->fib_dev->name);

	/*
	 *	Delete route with the same destination and gateway.
	 *	Note that we should have at most one such route.
	 */
	if (dup_fp)
		fp = dup_fp;
	else
		fp = &f->fib_next;

	while ((f1 = *fp) != NULL && f1->fib_dst == dst)
	{
		if (f1->fib_info->fib_gateway == gw &&
		    (gw || f1->fib_info->fib_dev == dev))
		{
			cli();
			*fp = f1->fib_next;
			if (fib_loopback == f1)
				fib_loopback = NULL;
			sti();
			ip_netlink_msg(RTMSG_DELROUTE, dst, gw, mask, flags, metric, f1->fib_info->fib_dev->name);
			fib_free_node(f1);
			fz->fz_nent--;
			break;
		}
		fp = &f1->fib_next;
	}
	rt_cache_flush();
	return;
}
Esempio n. 16
0
struct super_block * ext2_read_super (struct super_block * sb, void * data,
                      int silent)
{
    struct buffer_head * bh;
    struct ext2_super_block * es;
    unsigned long sb_block = 1;
    unsigned short resuid = EXT2_DEF_RESUID;
    unsigned short resgid = EXT2_DEF_RESGID;
    unsigned long logic_sb_block = 1;
    int dev = sb->s_dev;
    int db_count;
    int i, j;
#ifdef EXT2FS_PRE_02B_COMPAT
    int fs_converted = 0;
#endif

#ifndef OS2
    set_opt (sb->u.ext2_sb.s_mount_opt, CHECK_NORMAL);
#else
    set_opt (sb->u.ext2_sb.s_mount_opt, CHECK_STRICT);
#endif
    if (!parse_options ((char *) data, &sb_block, &resuid, &resgid,
        &sb->u.ext2_sb.s_mount_opt)) {
        sb->s_dev = 0;
        return NULL;
    }

    lock_super (sb);
    set_blocksize (dev, BLOCK_SIZE);
    if (!(bh = bread (dev, sb_block, BLOCK_SIZE))) {
        sb->s_dev = 0;
        unlock_super (sb);
        printk ("EXT2-fs: unable to read superblock\n");
        return NULL;
    }
    /*
     * Note: s_es must be initialized s_es as soon as possible because
     * some ext2 macro-instructions depend on its value
     */
    es = (struct ext2_super_block *) bh->b_data;
    sb->u.ext2_sb.s_es = es;
    sb->s_magic = es->s_magic;
    if (sb->s_magic != EXT2_SUPER_MAGIC
#ifdef EXT2FS_PRE_02B_COMPAT
       && sb->s_magic != EXT2_PRE_02B_MAGIC
#endif
       ) {
        if (!silent)
            printk ("VFS: Can't find an ext2 filesystem on dev %d/%d.\n",
                MAJOR(dev), MINOR(dev));
        failed_mount:
        sb->s_dev = 0;
        unlock_super (sb);
        if (bh)
                brelse (bh);
        return NULL;
    }
#ifdef EXT2_DYNAMIC_REV
    if (es->s_rev_level > EXT2_GOOD_OLD_REV) {
        if (es->s_feature_incompat & ~EXT2_FEATURE_INCOMPAT_SUPP) {
#ifndef OS2
            printk("EXT2-fs: %s: couldn't mount because of "
                   "unsupported optional features.\n",
                   kdevname(dev));
#else
            printk("EXT2-fs (drive %c:) couldn't mount because of "
                   "unsupported optional features.\n",
                   sb->s_drive + 'A');
#endif
            goto failed_mount;
        }
        if (!(sb->s_flags & MS_RDONLY) &&
            (es->s_feature_ro_compat & ~EXT2_FEATURE_RO_COMPAT_SUPP)) {
#ifndef OS2
            printk("EXT2-fs: %s: couldn't mount RDWR because of "
                   "unsupported optional features.\n",
                   kdevname(dev));
#else
            printk("EXT2-fs (drive %c:) couldn't mount RDWR because of "
                   "unsupported optional features.\n",
                   sb->s_drive + 'A');
#endif
            goto failed_mount;
        }
    }
#endif
    sb->s_blocksize = EXT2_MIN_BLOCK_SIZE << es->s_log_block_size;
    sb->s_blocksize_bits = EXT2_BLOCK_SIZE_BITS(sb);
    if (sb->s_blocksize != BLOCK_SIZE &&
        (sb->s_blocksize == 1024 || sb->s_blocksize == 2048 ||
         sb->s_blocksize == 4096)) {
        unsigned long offset;

        brelse (bh);
        set_blocksize (dev, sb->s_blocksize);
        logic_sb_block = (sb_block*BLOCK_SIZE) / sb->s_blocksize;
        offset = (sb_block*BLOCK_SIZE) % sb->s_blocksize;
        bh = bread (dev, logic_sb_block, sb->s_blocksize);
        if(!bh)
            return NULL;
        es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
        sb->u.ext2_sb.s_es = es;
        if (es->s_magic != EXT2_SUPER_MAGIC) {
            sb->s_dev = 0;
            unlock_super (sb);
            brelse (bh);
            printk ("EXT2-fs: Magic mismatch, very weird !\n");
            return NULL;
        }
    }
#ifdef EXT2_DYNAMIC_REV
    if (es->s_rev_level == EXT2_GOOD_OLD_REV) {
        sb->u.ext2_sb.s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
        sb->u.ext2_sb.s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
    } else {
        sb->u.ext2_sb.s_inode_size = es->s_inode_size;
        sb->u.ext2_sb.s_first_ino = es->s_first_ino;
        if (sb->u.ext2_sb.s_inode_size != EXT2_GOOD_OLD_INODE_SIZE) {
            printk ("EXT2-fs: unsupported inode size: %d\n",
                sb->u.ext2_sb.s_inode_size);
            goto failed_mount;
        }
    }
#endif
    sb->u.ext2_sb.s_frag_size = EXT2_MIN_FRAG_SIZE <<
                   es->s_log_frag_size;
    if (sb->u.ext2_sb.s_frag_size)
        sb->u.ext2_sb.s_frags_per_block = sb->s_blocksize /
                          sb->u.ext2_sb.s_frag_size;
    else
        sb->s_magic = 0;
    sb->u.ext2_sb.s_blocks_per_group = es->s_blocks_per_group;
    sb->u.ext2_sb.s_frags_per_group = es->s_frags_per_group;
    sb->u.ext2_sb.s_inodes_per_group = es->s_inodes_per_group;
    sb->u.ext2_sb.s_inodes_per_block = sb->s_blocksize /
                       sizeof (struct ext2_inode);
    sb->u.ext2_sb.s_itb_per_group = sb->u.ext2_sb.s_inodes_per_group /
                        sb->u.ext2_sb.s_inodes_per_block;
    sb->u.ext2_sb.s_desc_per_block = sb->s_blocksize /
                     sizeof (struct ext2_group_desc);
    sb->u.ext2_sb.s_sbh = bh;
    sb->u.ext2_sb.s_es = es;
    if (resuid != EXT2_DEF_RESUID)
        sb->u.ext2_sb.s_resuid = resuid;
    else
        sb->u.ext2_sb.s_resuid = es->s_def_resuid;
    if (resgid != EXT2_DEF_RESGID)
        sb->u.ext2_sb.s_resgid = resgid;
    else
        sb->u.ext2_sb.s_resgid = es->s_def_resgid;
    sb->u.ext2_sb.s_mount_state = es->s_state;
    sb->u.ext2_sb.s_rename_lock = 0;
#ifndef OS2
    sb->u.ext2_sb.s_rename_wait = NULL;
#else
    sb->u.ext2_sb.s_rename_wait = 0;
#endif
#ifdef EXT2FS_PRE_02B_COMPAT
    if (sb->s_magic == EXT2_PRE_02B_MAGIC) {
        if (es->s_blocks_count > 262144) {
            /*
             * fs > 256 MB can't be converted
             */
            sb->s_dev = 0;
            unlock_super (sb);
            brelse (bh);
            printk ("EXT2-fs: trying to mount a pre-0.2b file"
                "system which cannot be converted\n");
            return NULL;
        }
        printk ("EXT2-fs: mounting a pre 0.2b file system, "
            "will try to convert the structure\n");
        if (!(sb->s_flags & MS_RDONLY)) {
            sb->s_dev = 0;
            unlock_super (sb);
            brelse (bh);
            printk ("EXT2-fs: cannot convert a read-only fs\n");
            return NULL;
        }
        if (!convert_pre_02b_fs (sb, bh)) {
            sb->s_dev = 0;
            unlock_super (sb);
            brelse (bh);
            printk ("EXT2-fs: conversion failed !!!\n");
            return NULL;
        }
        printk ("EXT2-fs: conversion succeeded !!!\n");
        fs_converted = 1;
    }
#endif
    if (sb->s_magic != EXT2_SUPER_MAGIC) {
        sb->s_dev = 0;
        unlock_super (sb);
        brelse (bh);
        if (!silent)
            printk ("VFS: Can't find an ext2 filesystem on dev %d/%d.\n",
                MAJOR(dev), MINOR(dev));
        return NULL;
    }
    if (sb->s_blocksize != bh->b_size) {
        sb->s_dev = 0;
        unlock_super (sb);
        brelse (bh);
        if (!silent)
            printk ("VFS: Unsupported blocksize on dev 0x%04x.\n",
                dev);
        return NULL;
    }

    if (sb->s_blocksize != sb->u.ext2_sb.s_frag_size) {
        sb->s_dev = 0;
        unlock_super (sb);
        brelse (bh);
        printk ("EXT2-fs: fragsize %lu != blocksize %lu (not supported yet)\n",
            sb->u.ext2_sb.s_frag_size, sb->s_blocksize);
        return NULL;
    }

    sb->u.ext2_sb.s_groups_count = (es->s_blocks_count -
                        es->s_first_data_block +
                       EXT2_BLOCKS_PER_GROUP(sb) - 1) /
                       EXT2_BLOCKS_PER_GROUP(sb);
    db_count = (sb->u.ext2_sb.s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
           EXT2_DESC_PER_BLOCK(sb);
#ifndef OS2
    sb->u.ext2_sb.s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
    if (sb->u.ext2_sb.s_group_desc == NULL) {
#else
    if (DevHlp32_VMAlloc(db_count * sizeof (struct buffer_head *), VMDHA_NOPHYSADDR, VMDHA_SWAP, (void **)(&(sb->u.ext2_sb.s_group_desc))) != NO_ERROR) {
#endif
        sb->s_dev = 0;
        unlock_super (sb);
        brelse (bh);
        printk ("EXT2-fs: not enough memory\n");
        return NULL;
    }
    for (i = 0; i < db_count; i++) {
        sb->u.ext2_sb.s_group_desc[i] = bread (dev, logic_sb_block + i + 1,
                               sb->s_blocksize);
        if (!sb->u.ext2_sb.s_group_desc[i]) {
            sb->s_dev = 0;
            unlock_super (sb);
            for (j = 0; j < i; j++)
                brelse (sb->u.ext2_sb.s_group_desc[j]);
            kfree_s (sb->u.ext2_sb.s_group_desc,
                 db_count * sizeof (struct buffer_head *));
            brelse (bh);
            printk ("EXT2-fs: unable to read group descriptors\n");
            return NULL;
        }
    }
    if (!ext2_check_descriptors (sb)) {
        sb->s_dev = 0;
        unlock_super (sb);
        for (j = 0; j < db_count; j++)
            brelse (sb->u.ext2_sb.s_group_desc[j]);
        kfree_s (sb->u.ext2_sb.s_group_desc,
             db_count * sizeof (struct buffer_head *));
        brelse (bh);
        printk ("EXT2-fs: group descriptors corrupted !\n");
        return NULL;
    }
    for (i = 0; i < EXT2_MAX_GROUP_LOADED; i++) {
        sb->u.ext2_sb.s_inode_bitmap_number[i] = 0;
        sb->u.ext2_sb.s_inode_bitmap[i] = NULL;
        sb->u.ext2_sb.s_block_bitmap_number[i] = 0;
        sb->u.ext2_sb.s_block_bitmap[i] = NULL;
    }
    sb->u.ext2_sb.s_loaded_inode_bitmaps = 0;
    sb->u.ext2_sb.s_loaded_block_bitmaps = 0;
    sb->u.ext2_sb.s_db_per_group = db_count;
    unlock_super (sb);
    /*
     * set up enough so that it can read an inode
     */
    sb->s_dev = dev;
    sb->s_op = &ext2_sops;
    if (!(sb->s_mounted = iget (sb, EXT2_ROOT_INO))) {
        sb->s_dev = 0;
        for (i = 0; i < db_count; i++)
            if (sb->u.ext2_sb.s_group_desc[i])
                brelse (sb->u.ext2_sb.s_group_desc[i]);
        kfree_s (sb->u.ext2_sb.s_group_desc,
             db_count * sizeof (struct buffer_head *));
        brelse (bh);
        printk ("EXT2-fs: get root inode failed\n");
        return NULL;
    }
#ifdef EXT2FS_PRE_02B_COMPAT
    if (fs_converted) {
        for (i = 0; i < db_count; i++)
            mark_buffer_dirty(sb->u.ext2_sb.s_group_desc[i], 1);
        sb->s_dirt = 1;
    }
#endif
    ext2_setup_super (sb, es);
    return sb;
}

static void ext2_commit_super (struct super_block * sb,
                   struct ext2_super_block * es)
{
    es->s_wtime = CURRENT_TIME;
    mark_buffer_dirty(sb->u.ext2_sb.s_sbh, 1);
    sb->s_dirt = 0;
}
Esempio n. 17
0
static int vfc_debug(struct vfc_dev *dev, int cmd, unsigned long arg) 
{
	struct vfc_debug_inout inout;
	unsigned char *buffer;
	int ret;

	if(!capable(CAP_SYS_ADMIN)) return -EPERM;

	switch(cmd) {
	case VFC_I2C_SEND:
		if(copy_from_user(&inout, (void *)arg, sizeof(inout))) {
			return -EFAULT;
		}

		buffer = kmalloc(inout.len*sizeof(char), GFP_KERNEL);
		if (!buffer)
			return -ENOMEM;

		if(copy_from_user(buffer, inout.buffer, 
				  inout.len*sizeof(char))) {
			kfree_s(buffer,inout.len*sizeof(char));
			return -EFAULT;
		}
		

		vfc_lock_device(dev);
		inout.ret=
			vfc_i2c_sendbuf(dev,inout.addr & 0xff,
					inout.buffer,inout.len);

		if (copy_to_user((void *)arg,&inout,sizeof(inout))) {
			kfree_s(buffer, inout.len);
			return -EFAULT;
		}
		vfc_unlock_device(dev);

		break;
	case VFC_I2C_RECV:
		if (copy_from_user(&inout, (void *)arg, sizeof(inout))) {
			return -EFAULT;
		}

		buffer = kmalloc(inout.len, GFP_KERNEL);
		if (!buffer)
			return -ENOMEM;
		memset(buffer,0,inout.len*sizeof(char));
		vfc_lock_device(dev);
		inout.ret=
			vfc_i2c_recvbuf(dev,inout.addr & 0xff
					,buffer,inout.len);
		vfc_unlock_device(dev);
		
		if (copy_to_user(inout.buffer, buffer, inout.len)) {
			kfree_s(buffer,inout.len);
			return -EFAULT;
		}
		if (copy_to_user((void *)arg,&inout,sizeof(inout))) {
			kfree_s(buffer,inout.len);
			return -EFAULT;
		}
		kfree_s(buffer,inout.len);
		break;
	default:
		return -EINVAL;
	}
	return 0;
}
Esempio n. 18
0
struct ip_masq * ip_masq_new_enh(struct device *dev, int proto, __u32 saddr, __u16 sport, __u32 daddr, __u16 dport, unsigned mflags, __u16 matchport)
{
        struct ip_masq *ms, *mst;
        int ports_tried, *free_ports_p;
	unsigned long flags;
        static int n_fails = 0;

        free_ports_p = &ip_masq_free_ports[masq_proto_num(proto)];

        if (*free_ports_p == 0) {
                if (++n_fails < 5)
                        printk("ip_masq_new(proto=%s): no free ports.\n",
                               masq_proto_name(proto));
                return NULL;
        }
        ms = (struct ip_masq *) kmalloc(sizeof(struct ip_masq), GFP_ATOMIC);
        if (ms == NULL) {
                if (++n_fails < 5)
                        printk("ip_masq_new(proto=%s): no memory available.\n",
                               masq_proto_name(proto));
                return NULL;
        }
        memset(ms, 0, sizeof(*ms));
	init_timer(&ms->timer);
	ms->timer.data     = (unsigned long)ms;
	ms->timer.function = masq_expire;
        ms->protocol	   = proto;
        ms->saddr    	   = saddr;
        ms->sport	   = sport;
        ms->daddr	   = daddr;
        ms->dport	   = dport;
        ms->flags	   = mflags;
        ms->app_data	   = NULL;
	ms->control	   = NULL;

        if (proto == IPPROTO_UDP && !matchport)
                ms->flags |= IP_MASQ_F_NO_DADDR;
        
        /* get masq address from rif */
        ms->maddr	   = dev->pa_addr;
        /*
         *	Setup new entry as not replied yet.
         *	This flag will allow masq. addr (ms->maddr)
         *	to follow forwarding interface address.
         */
        ms->flags         |= IP_MASQ_F_NO_REPLY;

        for (ports_tried = 0; 
	     (*free_ports_p && (ports_tried <= (PORT_MASQ_END - PORT_MASQ_BEGIN)));
	     ports_tried++){
                save_flags(flags);
                cli();
                
		/*
                 *	Try the next available port number
                 */
                if (!matchport || ports_tried)
			ms->mport = htons(masq_port++);
		else
			ms->mport = matchport;
			
		if (masq_port==PORT_MASQ_END) masq_port = PORT_MASQ_BEGIN;
                
                restore_flags(flags);
                
                /*
                 *	lookup to find out if this port is used.
                 */
                
                mst = ip_masq_getbym(proto, ms->maddr, ms->mport);
                if (mst == NULL || matchport) {
                        save_flags(flags);
                        cli();
                
                        if (*free_ports_p == 0) {
                                restore_flags(flags);
                                break;
                        }
                        (*free_ports_p)--;
                        ip_masq_hash(ms);
                        
                        restore_flags(flags);
                        
                        if (proto != IPPROTO_ICMP)
                              ip_masq_bind_app(ms);
                        n_fails = 0;
                        return ms;
                }
        }
        
        if (++n_fails < 5)
                printk("ip_masq_new(proto=%s): could not get free masq entry (free=%d).\n",
                       masq_proto_name(ms->protocol), *free_ports_p);
        kfree_s(ms, sizeof(*ms));
        return NULL;
}
Esempio n. 19
0
void leak_check_free_s(void * obj, int size){
  check_malloc--;
  return kfree_s(obj, size);
}
Esempio n. 20
0
/*
 * Even releasing the tty structures is a tricky business.. We have
 * to be very careful that the structures are all released at the
 * same time, as interrupts might otherwise get the wrong pointers.
 */
static void release_dev(int dev, struct file * filp)
{
	struct tty_struct *tty, *o_tty;
	struct termios *tp, *o_tp;
	struct task_struct **p;

	tty = tty_table[dev];
	tp = tty_termios[dev];
	o_tty = NULL;
	o_tp = NULL;
	if (!tty) {
		printk("release_dev: tty_table[%d] was NULL\n", dev);
		return;
	}
	if (!tp) {
		printk("release_dev: tty_termios[%d] was NULL\n", dev);
		return;
	}
#ifdef TTY_DEBUG_HANGUP
	printk("release_dev of tty%d (tty count=%d)...", dev, tty->count);
#endif
	if (IS_A_PTY(dev)) {
		o_tty = tty_table[PTY_OTHER(dev)];
		o_tp = tty_termios[PTY_OTHER(dev)];
		if (!o_tty) {
			printk("release_dev: pty pair(%d) was NULL\n", dev);
			return;
		}
		if (!o_tp) {
			printk("release_dev: pty pair(%d) termios was NULL\n", dev);
			return;
		}
		if (tty->link != o_tty || o_tty->link != tty) {
			printk("release_dev: bad pty pointers\n");
			return;
		}
	}
	tty->write_data_cnt = 0; /* Clear out pending trash */
	if (tty->close)
		tty->close(tty, filp);
	if (IS_A_PTY_MASTER(dev)) {
		if (--tty->link->count < 0) {
			printk("release_dev: bad tty slave count (dev = %d): %d\n",
			       dev, tty->count);
			tty->link->count = 0;
		}
	}
	if (--tty->count < 0) {
		printk("release_dev: bad tty_table[%d]->count: %d\n",
		       dev, tty->count);
		tty->count = 0;
	}
	if (tty->count)
		return;
	
#ifdef TTY_DEBUG_HANGUP
	printk("freeing tty structure...");
#endif

	/*
	 * Make sure there aren't any processes that still think this
	 * tty is their controlling tty.
	 */
	for (p = &LAST_TASK ; p > &FIRST_TASK ; --p) {
		if ((*p) && (*p)->tty == tty->line)
		(*p)->tty = -1;
	}

	/*
	 * Shutdown the current line discipline, and reset it to
	 * N_TTY.
	 */
	if (ldiscs[tty->disc].close != NULL)
		ldiscs[tty->disc].close(tty);
	tty->disc = N_TTY;
	tty->termios->c_line = N_TTY;
	
	if (o_tty) {
		if (o_tty->count)
			return;
		else {
			tty_table[PTY_OTHER(dev)] = NULL;
			tty_termios[PTY_OTHER(dev)] = NULL;
		}
	}
	tty_table[dev] = NULL;
	if (IS_A_PTY(dev)) {
		tty_termios[dev] = NULL;
		kfree_s(tp, sizeof(struct termios));
	}
	if (tty == redirect || o_tty == redirect)
		redirect = NULL;
	free_page((unsigned long) tty);
	if (o_tty)
		free_page((unsigned long) o_tty);
	if (o_tp)
		kfree_s(o_tp, sizeof(struct termios));
}
Esempio n. 21
0
static int nfs_file_read(struct inode *inode, struct file *file, char *buf,
			 int count)
{
	int result, hunk, i, n, fs;
	struct nfs_fattr fattr;
	char *data;
	off_t pos;

	if (!inode) {
		printk("nfs_file_read: inode = NULL\n");
		return -EINVAL;
	}
	if (!S_ISREG(inode->i_mode)) {
		printk("nfs_file_read: read from non-file, mode %07o\n",
			inode->i_mode);
		return -EINVAL;
	}
	pos = file->f_pos;
	if (pos + count > inode->i_size)
		count = inode->i_size - pos;
	if (count <= 0)
		return 0;
	++num_requests;
	cli();
	for (i = 0; i < READ_CACHE_SIZE; i++)
		if ((cache[i].inode_num == inode->i_ino)
			&& (cache[i].file_pos <= pos)
			&& (cache[i].file_pos + cache[i].len >= pos + count)
			&& (abs(jiffies - cache[i].time) <= EXPIRE_CACHE))
			break;
	if (i < READ_CACHE_SIZE) {
		++cache[i].in_use;
		sti();
		++num_cache_hits;
		memcpy_tofs(buf, cache[i].buf + pos - cache[i].file_pos, count);
		--cache[i].in_use;
		file->f_pos += count;
		return count;
	}
	sti();
	n = NFS_SERVER(inode)->rsize;
	for (i = 0; i < count - n; i += n) {
		result = nfs_proc_read(NFS_SERVER(inode), NFS_FH(inode), 
			pos, n, buf, &fattr, 1);
		if (result < 0)
			return result;
		pos += result;
		buf += result;
		if (result < n) {
			file->f_pos = pos;
			nfs_refresh_inode(inode, &fattr);
			return i + result;
		}
	}
	fs = 0;
	if (!(data = (char *)kmalloc(n, GFP_KERNEL))) {
		data = buf;
		fs = 1;
	}
	result = nfs_proc_read(NFS_SERVER(inode), NFS_FH(inode),
		pos, n, data, &fattr, fs);
	if (result < 0) {
		if (!fs)
			kfree_s(data, n);
		return result;
	}
	hunk = count - i;
	if (result < hunk)
		hunk = result;
	if (fs) {
		file->f_pos = pos + hunk;
		nfs_refresh_inode(inode, &fattr);
		return i + hunk;
	}
	memcpy_tofs(buf, data, hunk);
	file->f_pos = pos + hunk;
	nfs_refresh_inode(inode, &fattr);
	cli();
	if (cache[tail].in_use == 0) {
		if (cache[tail].buf)
			kfree_s(cache[tail].buf, cache[tail].buf_size);
		cache[tail].buf = data;
		cache[tail].buf_size = n;
		cache[tail].inode_num = inode->i_ino;
		cache[tail].file_pos = pos;
		cache[tail].len = result;
		cache[tail].time = jiffies;
		if (++tail >= READ_CACHE_SIZE)
			tail = 0;
	} else
		kfree_s(data, n);
	sti();
	return i + hunk;
}
Esempio n. 22
0
/* We have a good packet(s), get it/them out of the buffers. */
static void
net_rx(struct device *dev)
{
	struct net_local *lp = (struct net_local *)dev->priv;
	int ioaddr = dev->base_addr;
	int boguscount = 10;

	do {
		int status = inw(ioaddr);
		int pkt_len = inw(ioaddr);
	  
		if (pkt_len == 0)		/* Read all the frames? */
			break;			/* Done for now */

		if (status & 0x40) {	/* There was an error. */
			lp->stats.rx_errors++;
			if (status & 0x20) lp->stats.rx_frame_errors++;
			if (status & 0x10) lp->stats.rx_over_errors++;
			if (status & 0x08) lp->stats.rx_crc_errors++;
			if (status & 0x04) lp->stats.rx_fifo_errors++;
		} else {
			/* Malloc up new buffer. */
			int sksize = sizeof(struct sk_buff) + pkt_len;
			struct sk_buff *skb;

			skb = alloc_skb(sksize, GFP_ATOMIC);
			if (skb == NULL) {
				printk("%s: Memory squeeze, dropping packet.\n", dev->name);
				lp->stats.rx_dropped++;
				break;
			}
			skb->mem_len = sksize;
			skb->mem_addr = skb;
			skb->len = pkt_len;
			skb->dev = dev;

			/* 'skb->data' points to the start of sk_buff data area. */
			memcpy(skb->data, (void*)dev->rmem_start,
				   pkt_len);
			/* or */
			insw(ioaddr, skb->data, (pkt_len + 1) >> 1);

#ifdef HAVE_NETIF_RX
			netif_rx(skb);
#else
			skb->lock = 0;
			if (dev_rint((unsigned char*)skb, pkt_len, IN_SKBUFF, dev) != 0) {
				kfree_s(skb, sksize);
				lp->stats.rx_dropped++;
				break;
			}
#endif
			lp->stats.rx_packets++;
		}
	} while (--boguscount);

	/* If any worth-while packets have been received, dev_rint()
	   has done a mark_bh(INET_BH) for us and will work on them
	   when we get to the bottom-half routine. */
	return;
}
Esempio n. 23
0
struct super_block * ext2_read_super (struct super_block * sb, void * data,
				      int silent)
{
	struct buffer_head * bh;
	struct ext2_super_block * es;
	unsigned long sb_block = 1;
	unsigned short resuid = EXT2_DEF_RESUID;
	unsigned short resgid = EXT2_DEF_RESGID;
	unsigned long logic_sb_block = 1;
	kdev_t dev = sb->s_dev;
	int db_count;
	int i, j;

	set_opt (sb->u.ext2_sb.s_mount_opt, CHECK_NORMAL);
	if (!parse_options ((char *) data, &sb_block, &resuid, &resgid,
	    &sb->u.ext2_sb.s_mount_opt)) {
		sb->s_dev = 0;
		return NULL;
	}

	MOD_INC_USE_COUNT;
	lock_super (sb);
	set_blocksize (dev, BLOCK_SIZE);
	if (!(bh = bread (dev, sb_block, BLOCK_SIZE))) {
		sb->s_dev = 0;
		unlock_super (sb);
		printk ("EXT2-fs: unable to read superblock\n");
		MOD_DEC_USE_COUNT;
		return NULL;
	}
	/*
	 * Note: s_es must be initialized s_es as soon as possible because
	 * some ext2 macro-instructions depend on its value
	 */
	es = (struct ext2_super_block *) bh->b_data;
	sb->u.ext2_sb.s_es = es;
	sb->s_magic = es->s_magic;
	if (sb->s_magic != EXT2_SUPER_MAGIC) {
		if (!silent)
			printk ("VFS: Can't find an ext2 filesystem on dev "
				"%s.\n", kdevname(dev));
	failed_mount:
		sb->s_dev = 0;
		unlock_super (sb);
		if (bh)
			brelse(bh);
		MOD_DEC_USE_COUNT;
		return NULL;
	}
	if (es->s_rev_level > EXT2_GOOD_OLD_REV) {
		if (es->s_feature_incompat & ~EXT2_FEATURE_INCOMPAT_SUPP) {
			printk("EXT2-fs: %s: couldn't mount because of "
			       "unsupported optional features.\n", 
			       kdevname(dev));
			goto failed_mount;
		}
		if (!(sb->s_flags & MS_RDONLY) &&
		    (es->s_feature_ro_compat & ~EXT2_FEATURE_RO_COMPAT_SUPP)) {
			printk("EXT2-fs: %s: couldn't mount RDWR because of "
			       "unsupported optional features.\n", 
			       kdevname(dev));
			goto failed_mount;
		}
	}
	sb->s_blocksize_bits = sb->u.ext2_sb.s_es->s_log_block_size + 10;
	sb->s_blocksize = 1 << sb->s_blocksize_bits;
	if (sb->s_blocksize != BLOCK_SIZE && 
	    (sb->s_blocksize == 1024 || sb->s_blocksize == 2048 ||  
	     sb->s_blocksize == 4096)) {
		unsigned long offset;

		brelse (bh);
		set_blocksize (dev, sb->s_blocksize);
		logic_sb_block = (sb_block*BLOCK_SIZE) / sb->s_blocksize;
		offset = (sb_block*BLOCK_SIZE) % sb->s_blocksize;
		bh = bread (dev, logic_sb_block, sb->s_blocksize);
		if(!bh) {
			printk("EXT2-fs: Couldn't read superblock on "
			       "2nd try.\n");
			goto failed_mount;
		}
		es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
		sb->u.ext2_sb.s_es = es;
		if (es->s_magic != EXT2_SUPER_MAGIC) {
			printk ("EXT2-fs: Magic mismatch, very weird !\n");
			goto failed_mount;
		}
	}
	if (es->s_rev_level == EXT2_GOOD_OLD_REV) {
		sb->u.ext2_sb.s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
		sb->u.ext2_sb.s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
	} else {
		sb->u.ext2_sb.s_inode_size = es->s_inode_size;
		sb->u.ext2_sb.s_first_ino = es->s_first_ino;
		if (sb->u.ext2_sb.s_inode_size != EXT2_GOOD_OLD_INODE_SIZE) {
			printk ("EXT2-fs: unsupported inode size: %d\n",
				sb->u.ext2_sb.s_inode_size);
			goto failed_mount;
		}
	}
	sb->u.ext2_sb.s_frag_size = EXT2_MIN_FRAG_SIZE <<
				   es->s_log_frag_size;
	if (sb->u.ext2_sb.s_frag_size)
		sb->u.ext2_sb.s_frags_per_block = sb->s_blocksize /
						  sb->u.ext2_sb.s_frag_size;
	else
		sb->s_magic = 0;
	sb->u.ext2_sb.s_blocks_per_group = es->s_blocks_per_group;
	sb->u.ext2_sb.s_frags_per_group = es->s_frags_per_group;
	sb->u.ext2_sb.s_inodes_per_group = es->s_inodes_per_group;
	sb->u.ext2_sb.s_inodes_per_block = sb->s_blocksize /
					   EXT2_INODE_SIZE(sb);
	sb->u.ext2_sb.s_itb_per_group = sb->u.ext2_sb.s_inodes_per_group /
				        sb->u.ext2_sb.s_inodes_per_block;
	sb->u.ext2_sb.s_desc_per_block = sb->s_blocksize /
					 sizeof (struct ext2_group_desc);
	sb->u.ext2_sb.s_sbh = bh;
	if (resuid != EXT2_DEF_RESUID)
		sb->u.ext2_sb.s_resuid = resuid;
	else
		sb->u.ext2_sb.s_resuid = es->s_def_resuid;
	if (resgid != EXT2_DEF_RESGID)
		sb->u.ext2_sb.s_resgid = resgid;
	else
		sb->u.ext2_sb.s_resgid = es->s_def_resgid;
	sb->u.ext2_sb.s_mount_state = es->s_state;
	sb->u.ext2_sb.s_rename_lock = 0;
	sb->u.ext2_sb.s_rename_wait = NULL;
	sb->u.ext2_sb.s_addr_per_block_bits =
		log2 (EXT2_ADDR_PER_BLOCK(sb));
	sb->u.ext2_sb.s_desc_per_block_bits =
		log2 (EXT2_DESC_PER_BLOCK(sb));
	if (sb->s_magic != EXT2_SUPER_MAGIC) {
		if (!silent)
			printk ("VFS: Can't find an ext2 filesystem on dev "
				"%s.\n",
				kdevname(dev));
		goto failed_mount;
	}
	if (sb->s_blocksize != bh->b_size) {
		if (!silent)
			printk ("VFS: Unsupported blocksize on dev "
				"%s.\n", kdevname(dev));
		goto failed_mount;
	}

	if (sb->s_blocksize != sb->u.ext2_sb.s_frag_size) {
		printk ("EXT2-fs: fragsize %lu != blocksize %lu (not supported yet)\n",
			sb->u.ext2_sb.s_frag_size, sb->s_blocksize);
		goto failed_mount;
	}

	if (sb->u.ext2_sb.s_blocks_per_group > sb->s_blocksize * 8) {
		printk ("EXT2-fs: #blocks per group too big: %lu\n",
			sb->u.ext2_sb.s_blocks_per_group);
		goto failed_mount;
	}
	if (sb->u.ext2_sb.s_frags_per_group > sb->s_blocksize * 8) {
		printk ("EXT2-fs: #fragments per group too big: %lu\n",
			sb->u.ext2_sb.s_frags_per_group);
		goto failed_mount;
	}
	if (sb->u.ext2_sb.s_inodes_per_group > sb->s_blocksize * 8) {
		printk ("EXT2-fs: #inodes per group too big: %lu\n",
			sb->u.ext2_sb.s_inodes_per_group);
		goto failed_mount;
	}

	sb->u.ext2_sb.s_groups_count = (es->s_blocks_count -
				        es->s_first_data_block +
				       EXT2_BLOCKS_PER_GROUP(sb) - 1) /
				       EXT2_BLOCKS_PER_GROUP(sb);
	db_count = (sb->u.ext2_sb.s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
		   EXT2_DESC_PER_BLOCK(sb);
	sb->u.ext2_sb.s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
	if (sb->u.ext2_sb.s_group_desc == NULL) {
		printk ("EXT2-fs: not enough memory\n");
		goto failed_mount;
	}
	for (i = 0; i < db_count; i++) {
		sb->u.ext2_sb.s_group_desc[i] = bread (dev, logic_sb_block + i + 1,
						       sb->s_blocksize);
		if (!sb->u.ext2_sb.s_group_desc[i]) {
			for (j = 0; j < i; j++)
				brelse (sb->u.ext2_sb.s_group_desc[j]);
			kfree_s (sb->u.ext2_sb.s_group_desc,
				 db_count * sizeof (struct buffer_head *));
			printk ("EXT2-fs: unable to read group descriptors\n");
			goto failed_mount;
		}
	}
	if (!ext2_check_descriptors (sb)) {
		for (j = 0; j < db_count; j++)
			brelse (sb->u.ext2_sb.s_group_desc[j]);
		kfree_s (sb->u.ext2_sb.s_group_desc,
			 db_count * sizeof (struct buffer_head *));
		printk ("EXT2-fs: group descriptors corrupted !\n");
		goto failed_mount;
	}
	for (i = 0; i < EXT2_MAX_GROUP_LOADED; i++) {
		sb->u.ext2_sb.s_inode_bitmap_number[i] = 0;
		sb->u.ext2_sb.s_inode_bitmap[i] = NULL;
		sb->u.ext2_sb.s_block_bitmap_number[i] = 0;
		sb->u.ext2_sb.s_block_bitmap[i] = NULL;
	}
	sb->u.ext2_sb.s_loaded_inode_bitmaps = 0;
	sb->u.ext2_sb.s_loaded_block_bitmaps = 0;
	sb->u.ext2_sb.s_db_per_group = db_count;
	unlock_super (sb);
	/*
	 * set up enough so that it can read an inode
	 */
	sb->s_dev = dev;
	sb->s_op = &ext2_sops;
	if (!(sb->s_mounted = iget (sb, EXT2_ROOT_INO))) {
		sb->s_dev = 0;
		for (i = 0; i < db_count; i++)
			if (sb->u.ext2_sb.s_group_desc[i])
				brelse (sb->u.ext2_sb.s_group_desc[i]);
		kfree_s (sb->u.ext2_sb.s_group_desc,
			 db_count * sizeof (struct buffer_head *));
		brelse (bh);
		printk ("EXT2-fs: get root inode failed\n");
		MOD_DEC_USE_COUNT;
		return NULL;
	}
	ext2_setup_super (sb, es);
	return sb;
}
Esempio n. 24
0
static int inet_create(struct socket *sock, int protocol)
{
	struct sock *sk;
	struct proto *prot;
	int err;

	sk = (struct sock *) kmalloc(sizeof(*sk), GFP_KERNEL);
	if (sk == NULL) 
		return(-ENOBUFS);
	
	sk->num = 0;
	sk->reuse = 0;
	
	switch(sock->type) 
	{
		case SOCK_STREAM:
		case SOCK_SEQPACKET:
			if (protocol && protocol != IPPROTO_TCP) 
			{
				kfree_s((void *)sk, sizeof(*sk));
				return(-EPROTONOSUPPORT);
			}
			protocol = IPPROTO_TCP;
			/* TCP_NO_CHECK tcp协议全局默认值	*/
			sk->no_check = TCP_NO_CHECK;
			prot = &tcp_prot;
			break;

		case SOCK_DGRAM:
			if (protocol && protocol != IPPROTO_UDP) 
			{
				kfree_s((void *)sk, sizeof(*sk));
				return(-EPROTONOSUPPORT);
			}
			protocol = IPPROTO_UDP;
			sk->no_check = UDP_NO_CHECK;
			prot=&udp_prot;
			break;
      
		case SOCK_RAW:
			if (!suser()) 
			{
				kfree_s((void *)sk, sizeof(*sk));
				return(-EPERM);
			}
			if (!protocol) 
			{
				kfree_s((void *)sk, sizeof(*sk));
				return(-EPROTONOSUPPORT);
			}
			prot = &raw_prot;
			sk->reuse = 1;
			sk->no_check = 0;	/*
						 * Doesn't matter no checksum is
						 * performed anyway.
						 */
			sk->num = protocol;
			break;

		case SOCK_PACKET:
			if (!suser()) 
			{
				kfree_s((void *)sk, sizeof(*sk));
				return(-EPERM);
			}
			if (!protocol) 
			{
				kfree_s((void *)sk, sizeof(*sk));
				return(-EPROTONOSUPPORT);
			}
			prot = &packet_prot;
			sk->reuse = 1;
			sk->no_check = 0;	/* Doesn't matter no checksum is
						 * performed anyway.
						 */
			sk->num = protocol;
			break;

		default:
			kfree_s((void *)sk, sizeof(*sk));
			return(-ESOCKTNOSUPPORT);
	}
	
	sk->socket = sock;
#ifdef CONFIG_TCP_NAGLE_OFF
	sk->nonagle = 1;
#else    
	sk->nonagle = 0;
#endif  
	sk->type = sock->type;
	sk->stamp.tv_sec=0;
	sk->protocol = protocol;		// 传输层协议值
	sk->wmem_alloc = 0;
	sk->rmem_alloc = 0;
	sk->sndbuf = SK_WMEM_MAX;	// 初始化该链接的最大写缓冲区
	sk->rcvbuf = SK_RMEM_MAX;	// 初始化该链接的最大读缓冲区
	sk->pair = NULL;
	sk->opt = NULL;
	sk->write_seq = 0;
	sk->acked_seq = 0;
	sk->copied_seq = 0;
	sk->fin_seq = 0;
	sk->urg_seq = 0;
	sk->urg_data = 0;
	sk->proc = 0;
	sk->rtt = 0;				/*TCP_WRITE_TIME << 3;*/
	sk->rto = TCP_TIMEOUT_INIT;		/*TCP_WRITE_TIME*/
	sk->mdev = 0;
	sk->backoff = 0;
	sk->packets_out = 0;
	sk->cong_window = 1;		 /* start with only sending one packet at a time. 
							  * 拥塞窗口设置为1,即tcp首先进入慢启动状态	*/
	sk->cong_count = 0;
	sk->ssthresh = 0;
	sk->max_window = 0;
	sk->urginline = 0;
	sk->intr = 0;
	sk->linger = 0;
	sk->destroy = 0;
	sk->priority = 1;
	sk->shutdown = 0;
	sk->keepopen = 0;
	sk->zapped = 0;
	sk->done = 0;
	sk->ack_backlog = 0;
	sk->window = 0;
	sk->bytes_rcv = 0;
	sk->state = TCP_CLOSE;	// 本地初创socket,状态是关闭状态
	sk->dead = 0;
	sk->ack_timed = 0;
	sk->partial = NULL;
	sk->user_mss = 0;
	sk->debug = 0;

	/* 最大可暂缓应答的数据字节数	*/
	/* this is how many unacked bytes we will accept for this socket.  */
	sk->max_unacked = 2048; /* needs to be at most 2 full packets. */

	/* 已发送但未收到确认的数据报个数上限	*/
	/* how many packets we should send before forcing an ack. 
	   if this is set to zero it is the same as sk->delay_acks = 0 */
	sk->max_ack_backlog = 0;
	sk->inuse = 0;
	sk->delay_acks = 0;
	skb_queue_head_init(&sk->write_queue);
	skb_queue_head_init(&sk->receive_queue);
	sk->mtu = 576;		// mtu设置成保守的576,该大小在绝大多数连接上不会造成碎片
	sk->prot = prot;
	sk->sleep = sock->wait;
	sk->daddr = 0;
	sk->saddr = 0 /* ip_my_addr() */;
	sk->err = 0;
	sk->next = NULL;
	sk->pair = NULL;
	sk->send_tail = NULL;
	sk->send_head = NULL;
	sk->timeout = 0;
	sk->broadcast = 0;
	sk->localroute = 0;
	init_timer(&sk->timer);
	init_timer(&sk->retransmit_timer);
	sk->timer.data = (unsigned long)sk;
	sk->timer.function = &net_timer;
	skb_queue_head_init(&sk->back_log);
	sk->blog = 0;
	sock->data =(void *) sk;
	sk->dummy_th.doff = sizeof(sk->dummy_th)/4;
	sk->dummy_th.res1=0;
	sk->dummy_th.res2=0;
	sk->dummy_th.urg_ptr = 0;
	sk->dummy_th.fin = 0;
	sk->dummy_th.syn = 0;
	sk->dummy_th.rst = 0;
	sk->dummy_th.psh = 0;
	sk->dummy_th.ack = 0;
	sk->dummy_th.urg = 0;
	sk->dummy_th.dest = 0;
	sk->ip_tos=0;
	sk->ip_ttl=64;
#ifdef CONFIG_IP_MULTICAST
	sk->ip_mc_loop=1;
	sk->ip_mc_ttl=1;
	*sk->ip_mc_name=0;
	sk->ip_mc_list=NULL;
#endif
  	
	sk->state_change = def_callback1;
	sk->data_ready = def_callback2;
	sk->write_space = def_callback3;
	sk->error_report = def_callback1;

	if (sk->num) 
	{
	/*
	 * It assumes that any protocol which allows
	 * the user to assign a number at socket
	 * creation time automatically
	 * shares.
	 */
		put_sock(sk->num, sk);
		sk->dummy_th.source = ntohs(sk->num);
	}

	if (sk->prot->init) 
	{
		err = sk->prot->init(sk);
		if (err != 0) 
		{
			destroy_sock(sk);
			return(err);
		}
	}
	return(0);
}
Esempio n. 25
0
static void
i596_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
    struct device *dev = (struct device *)(irq2dev_map[irq]);
    struct i596_private *lp;
    short ioaddr;
    int boguscnt = 200;
    unsigned short status, ack_cmd = 0;

    if (dev == NULL) {
	printk ("i596_interrupt(): irq %d for unknown device.\n", irq);
	return;
    }

    if (i596_debug > 3) printk ("%s: i596_interrupt(): irq %d\n",dev->name, irq);

    if (dev->interrupt)
	printk("%s: Re-entering the interrupt handler.\n", dev->name);

    dev->interrupt = 1;

    ioaddr = dev->base_addr;

    lp = (struct i596_private *)dev->priv;

    while (lp->scb.status, lp->scb.command)
	if (--boguscnt == 0)
	    {
		printk("%s: i596 interrupt, timeout status %4.4x command %4.4x.\n", dev->name, lp->scb.status, lp->scb.command);
		break;
	    }
    status = lp->scb.status;

    if (i596_debug > 4)
	printk("%s: i596 interrupt, status %4.4x.\n", dev->name, status);

    ack_cmd = status & 0xf000;

    if ((status & 0x8000) || (status & 0x2000))
    {
	struct i596_cmd *ptr;

	if ((i596_debug > 4) && (status & 0x8000))
	    printk("%s: i596 interrupt completed command.\n", dev->name);
	if ((i596_debug > 4) && (status & 0x2000))
	    printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700);

	while ((lp->cmd_head != (struct i596_cmd *) I596_NULL) && (lp->cmd_head->status & STAT_C))
	{
	    ptr = lp->cmd_head;

	    lp->cmd_head = lp->cmd_head->next;
	    lp->cmd_backlog--;

	    switch ((ptr->command) & 0x7)
	    {
		case CmdTx:
		{
		    struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
		    struct sk_buff *skb = ((struct sk_buff *)(tx_cmd->tbd->data)) -1;

		    dev_kfree_skb(skb, FREE_WRITE);

		    if ((ptr->status) & STAT_OK)
		    {
	    		if (i596_debug >2) print_eth(skb->data);
		    }
		    else
		    {
			lp->stats.tx_errors++;
			if ((ptr->status) & 0x0020) lp->stats.collisions++;
			if (!((ptr->status) & 0x0040)) lp->stats.tx_heartbeat_errors++;
			if ((ptr->status) & 0x0400) lp->stats.tx_carrier_errors++;
			if ((ptr->status) & 0x0800) lp->stats.collisions++;
			if ((ptr->status) & 0x1000) lp->stats.tx_aborted_errors++;
		    }


		    ptr->next = (struct i596_cmd * ) I596_NULL;
		    kfree_s((unsigned char *)tx_cmd, (sizeof (struct tx_cmd) + sizeof (struct i596_tbd)));
		    break;
		}
		case CmdMulticastList:
		{
		    unsigned short count = *((unsigned short *) (ptr + 1));

		    ptr->next = (struct i596_cmd * ) I596_NULL;
		    kfree_s((unsigned char *)ptr, (sizeof (struct i596_cmd) + count + 2));
		    break;
		}
		case CmdTDR:
		{
		    unsigned long status = *((unsigned long *) (ptr + 1));

		    if (status & 0x8000)
		    {
			if (i596_debug > 3)
	    		    printk("%s: link ok.\n", dev->name);
		    }
		    else
		    {
			if (status & 0x4000)
	    		    printk("%s: Transceiver problem.\n", dev->name);
			if (status & 0x2000)
	    		    printk("%s: Termination problem.\n", dev->name);
			if (status & 0x1000)
	    		    printk("%s: Short circuit.\n", dev->name);

	    		printk("%s: Time %ld.\n", dev->name, status & 0x07ff);
		    }
		}
		default:
		    ptr->next = (struct i596_cmd * ) I596_NULL;

		lp->last_cmd = jiffies;
 	    }
	}

	ptr = lp->cmd_head;
	while ((ptr != (struct i596_cmd *) I596_NULL) && (ptr != lp->cmd_tail))
	{
	    ptr->command &= 0x1fff;
	    ptr = ptr->next;
	}

	if ((lp->cmd_head != (struct i596_cmd *) I596_NULL) && (dev->start)) ack_cmd |= CUC_START;
	lp->scb.cmd = lp->cmd_head;
    }

    if ((status & 0x1000) || (status & 0x4000))
    {
	if ((i596_debug > 4) && (status & 0x4000))
	    printk("%s: i596 interrupt received a frame.\n", dev->name);
	if ((i596_debug > 4) && (status & 0x1000))
	    printk("%s: i596 interrupt receive unit inactive %x.\n", dev->name, status & 0x0070);

	i596_rx(dev);

	if (dev->start) ack_cmd |= RX_START;
    }

    /* acknowledge the interrupt */

/*
    if ((lp->scb.cmd != (struct i596_cmd *) I596_NULL) && (dev->start)) ack_cmd | = CUC_START;
*/
    boguscnt = 100;
    while (lp->scb.status, lp->scb.command)
	if (--boguscnt == 0)
	    {
		printk("%s: i596 interrupt, timeout status %4.4x command %4.4x.\n", dev->name, lp->scb.status, lp->scb.command);
		break;
	    }
    lp->scb.command = ack_cmd;

    (void) inb (ioaddr+0x10);
    outb (4, ioaddr+0xf);
    outw (0, ioaddr+4);

    if (i596_debug > 4)
	printk("%s: exiting interrupt.\n", dev->name);

    dev->interrupt = 0;
    return;
}
Esempio n. 26
0
static int lp_ioctl(struct inode *inode, struct file *file,
		    unsigned int cmd, unsigned long arg)
{
	unsigned int minor = MINOR(inode->i_rdev);
	int retval = 0;

#ifdef LP_DEBUG
	printk(KERN_DEBUG "lp%d ioctl, cmd: 0x%x, arg: 0x%x\n", minor, cmd, arg);
#endif
	if (minor >= LP_NO)
		return -ENODEV;
	if ((LP_F(minor) & LP_EXIST) == 0)
		return -ENODEV;
	switch ( cmd ) {
		case LPTIME:
			LP_TIME(minor) = arg * HZ/100;
			break;
		case LPCHAR:
			LP_CHAR(minor) = arg;
			break;
		case LPABORT:
			if (arg)
				LP_F(minor) |= LP_ABORT;
			else
				LP_F(minor) &= ~LP_ABORT;
			break;
		case LPABORTOPEN:
			if (arg)
				LP_F(minor) |= LP_ABORTOPEN;
			else
				LP_F(minor) &= ~LP_ABORTOPEN;
			break;
		case LPCAREFUL:
			if (arg)
				LP_F(minor) |= LP_CAREFUL;
			else
				LP_F(minor) &= ~LP_CAREFUL;
			break;
		case LPWAIT:
			LP_WAIT(minor) = arg;
			break;
		case LPSETIRQ: {
			int oldirq;
			int newirq = arg;
			struct lp_struct *lp = &lp_table[minor];

			if (!suser())
				return -EPERM;

			oldirq = LP_IRQ(minor);

			/* Allocate buffer now if we are going to need it */
			if (!oldirq && newirq) {
				lp->lp_buffer = (char *) kmalloc(LP_BUFFER_SIZE, GFP_KERNEL);
				if (!lp->lp_buffer)
					return -ENOMEM;
			}

			if (oldirq) {
				free_irq(oldirq, NULL);
			}
			if (newirq) {
				/* Install new irq */
				if ((retval = request_irq(newirq, lp_interrupt, SA_INTERRUPT, "printer", NULL))) {
					if (oldirq) {
						/* restore old irq */
						request_irq(oldirq, lp_interrupt, SA_INTERRUPT, "printer", NULL);
					} else {
						/* We don't need the buffer */
						kfree_s(lp->lp_buffer, LP_BUFFER_SIZE);
						lp->lp_buffer = NULL;
					}
					return retval;
				}
			}
			if (oldirq && !newirq) {
				/* We don't need the buffer */
				kfree_s(lp->lp_buffer, LP_BUFFER_SIZE);
				lp->lp_buffer = NULL;
			}
			LP_IRQ(minor) = newirq;
			lp_reset(minor);
			break;
		}
		case LPGETIRQ:
			retval = verify_area(VERIFY_WRITE, (void *) arg,
			    sizeof(int));
		    	if (retval)
		    		return retval;
			memcpy_tofs((int *) arg, &LP_IRQ(minor), sizeof(int));
			break;
		case LPGETSTATUS:
			retval = verify_area(VERIFY_WRITE, (void *) arg,
			    sizeof(int));
		    	if (retval)
		    		return retval;
			else {
				int status = LP_S(minor);
				memcpy_tofs((int *) arg, &status, sizeof(int));
			}
			break;
		case LPRESET:
			lp_reset(minor);
			break;
		case LPGETSTATS:
			retval = verify_area(VERIFY_WRITE, (void *) arg,
			    sizeof(struct lp_stats));
		    	if (retval)
		    		return retval;
			else {
				memcpy_tofs((int *) arg, &LP_STAT(minor), sizeof(struct lp_stats));
				if (suser())
					memset(&LP_STAT(minor), 0, sizeof(struct lp_stats));
			}
			break;
 		case LPGETFLAGS:
 			retval = verify_area(VERIFY_WRITE, (void *) arg,
 			    sizeof(int));
 		    	if (retval)
 		    		return retval;
 			else {
 				int status = LP_F(minor);
				memcpy_tofs((int *) arg, &status, sizeof(int));
			}
			break;
		default:
			retval = -EINVAL;
	}
	return retval;
}
Esempio n. 27
0
/*
 *	Add a new route to a node, and in the process add the node and the
 *	neighbour if it is new.
 */
static int rose_add_node(struct rose_route_struct *rose_route, struct device *dev)
{
	struct rose_node  *rose_node, *rose_tmpn, *rose_tmpp;
	struct rose_neigh *rose_neigh;
	struct rose_route_struct dummy_route;
	unsigned long flags;
	int i;

	for (rose_node = rose_node_list; rose_node != NULL; rose_node = rose_node->next)
		if ((rose_node->mask == rose_route->mask) && (rosecmpm(&rose_route->address, &rose_node->address, rose_route->mask) == 0))
			break;

	for (rose_neigh = rose_neigh_list; rose_neigh != NULL; rose_neigh = rose_neigh->next)
		if (ax25cmp(&rose_route->neighbour, &rose_neigh->callsign) == 0 && rose_neigh->dev == dev)
			break;

	if (rose_neigh == NULL) {
		if ((rose_neigh = (struct rose_neigh *)kmalloc(sizeof(*rose_neigh), GFP_ATOMIC)) == NULL)
			return -ENOMEM;

		rose_neigh->callsign  = rose_route->neighbour;
		rose_neigh->digipeat  = NULL;
		rose_neigh->ax25      = NULL;
		rose_neigh->dev       = dev;
		rose_neigh->count     = 0;
		rose_neigh->use       = 0;
		rose_neigh->dce_mode  = 0;
		rose_neigh->number    = rose_neigh_no++;
		rose_neigh->restarted = 0;
		skb_queue_head_init(&rose_neigh->queue);
		rose_neigh->t0timer   = 0;
		rose_neigh->ftimer    = 0;
		init_timer(&rose_neigh->timer);

		if (rose_route->ndigis != 0) {
			if ((rose_neigh->digipeat = kmalloc(sizeof(ax25_digi), GFP_KERNEL)) == NULL) {
				kfree_s(rose_neigh, sizeof(*rose_neigh));
				return -ENOMEM;
			}

			rose_neigh->digipeat->ndigi      = rose_route->ndigis;
			rose_neigh->digipeat->lastrepeat = -1;

			for (i = 0; i < rose_route->ndigis; i++) {
				rose_neigh->digipeat->calls[i]    = rose_route->digipeaters[i];
				rose_neigh->digipeat->repeated[i] = 0;
			}
		}

		save_flags(flags); cli();
		rose_neigh->next = rose_neigh_list;
		rose_neigh_list  = rose_neigh;
		restore_flags(flags);
	}

	/*
	 * This is a new node to be inserted into the list. Find where it needs
	 * to be inserted into the list, and insert it. We want to be sure
	 * to order the list in descending order of mask size to ensure that
	 * later when we are searching this list the first match will be the
	 * best match.
	 */
	if (rose_node == NULL) {
		rose_tmpn = rose_node_list;
		rose_tmpp = NULL;

		while (rose_tmpn != NULL) {
			if (rose_tmpn->mask > rose_route->mask) {
				rose_tmpp = rose_tmpn;
				rose_tmpn = rose_tmpn->next;
			} else {
				break;
			}
		}

		/* create new node */
		if ((rose_node = (struct rose_node *)kmalloc(sizeof(*rose_node), GFP_ATOMIC)) == NULL)
			return -ENOMEM;

		rose_node->address = rose_route->address;
		rose_node->mask    = rose_route->mask;
		rose_node->count   = 1;
		rose_node->neighbour[0] = rose_neigh;

		save_flags(flags); cli();

		if (rose_tmpn == NULL) {
			if (rose_tmpp == NULL) {	/* Empty list */
				rose_node_list  = rose_node;
				rose_node->next = NULL;
			} else {
				rose_tmpp->next = rose_node;
				rose_node->next = NULL;
			}
		} else {
			if (rose_tmpp == NULL) {	/* 1st node */
				rose_node->next = rose_node_list;
				rose_node_list  = rose_node;
			} else {
				rose_tmpp->next = rose_node;
				rose_node->next = rose_tmpn;
			}
		}

		restore_flags(flags);

		rose_neigh->count++;

	} else if (rose_node->count < ROSE_MAX_ALTERNATE) {
		/* We have space, slot it in */
		rose_node->neighbour[rose_node->count] = rose_neigh;
		rose_node->count++;
		rose_neigh->count++;
	}

	if (!rose_is_null(&rose_route->address))
	{
		/* Delete this neighbourg from the dummy node 0000000000 */
		dummy_route = *rose_route;
		dummy_route.mask = 10;
		memset(&dummy_route.address, 0, sizeof(rose_address));
		rose_del_node(&dummy_route, dev);
	}
	
	return 0;
}
Esempio n. 28
0
static int del_from_chain(struct ip_fw *volatile*chainptr, struct ip_fw *frwl)
{
	struct ip_fw 	*ftmp,*ltmp;
	unsigned short	tport1,tport2,tmpnum;
	char		matches,was_found;
	unsigned long 	flags;

	save_flags(flags);
	cli();

	ftmp=*chainptr;

	if ( ftmp == NULL ) 
	{
#ifdef DEBUG_IP_FIREWALL
		printk("ip_fw_ctl:  chain is empty\n");
#endif
		restore_flags(flags);
		return( EINVAL );
	}

	ltmp=NULL;
	was_found=0;

	while( !was_found && ftmp != NULL )
	{
		matches=1;
		if (ftmp->fw_src.s_addr!=frwl->fw_src.s_addr 
		     ||  ftmp->fw_dst.s_addr!=frwl->fw_dst.s_addr
		     ||  ftmp->fw_smsk.s_addr!=frwl->fw_smsk.s_addr
		     ||  ftmp->fw_dmsk.s_addr!=frwl->fw_dmsk.s_addr
		     ||  ftmp->fw_via.s_addr!=frwl->fw_via.s_addr
		     ||  ftmp->fw_flg!=frwl->fw_flg)
        		matches=0;

		tport1=ftmp->fw_nsp+ftmp->fw_ndp;
		tport2=frwl->fw_nsp+frwl->fw_ndp;
		if (tport1!=tport2)
		        matches=0;
		else if (tport1!=0)
		{
			for (tmpnum=0;tmpnum < tport1 && tmpnum < IP_FW_MAX_PORTS;tmpnum++)
        		if (ftmp->fw_pts[tmpnum]!=frwl->fw_pts[tmpnum])
				matches=0;
		}
		if (strncmp(ftmp->fw_vianame, frwl->fw_vianame, IFNAMSIZ))
		        matches=0;
		if(matches)
		{
			was_found=1;
			if (ltmp)
			{
				ltmp->fw_next=ftmp->fw_next;
				kfree_s(ftmp,sizeof(*ftmp));
				ftmp=ltmp->fw_next;
        		}
      			else
      			{
      				*chainptr=ftmp->fw_next; 
	 			kfree_s(ftmp,sizeof(*ftmp));
				ftmp=*chainptr;
			}       
		}
		else
		{
			ltmp = ftmp;
			ftmp = ftmp->fw_next;
		 }
	}
	restore_flags(flags);
	if (was_found)
		return 0;
	else
		return(EINVAL);
}
Esempio n. 29
0
extern __inline__ void frag_kfree_s(void *ptr, int len)
{
	atomic_sub(len, &ip_frag_mem);
	kfree_s(ptr,len);
}