Esempio n. 1
0
static void list_test(void)
{
	int i ;
	struct data *node,*next;
	LIST_HEAD(lhead);
	for(i = 0; i < MAX_LIST; i++)
	{
		struct data *pdata;
		pdata = kmalloc(sizeof(struct data),GFP_KERNEL);
		pdata->i = i;
		if(!pdata)
			goto clean;
		list_add(&pdata->list,&lhead);
	}
	list_for_each_entry(node,&lhead,list)
		printk("list entry:%d\n",node->i);
clean:
	list_for_each_entry_safe(node,next,&lhead,list)
		kfree(node);
}
Esempio n. 2
0
/* Test if realserver is marked UP for a specific checker */
int
svr_checker_up(checker_id_t cid, real_server_t *rs)
{
	element e;
	list l = rs->failed_checkers;
	checker_id_t *id;

	/*
	 * We assume there is not too much checker per
	 * real server, so we consider this lookup as
	 * o(1).
	 */
	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		id = ELEMENT_DATA(e);
		if (*id == cid)
			return 0;
	}

	return 1;
}
static void __linkwatch_run_queue(int urgent_only)
{
	struct net_device *dev;
	LIST_HEAD(wrk);

	/*
                                               
                                                
                                            
                                                   
                                   
  */
	if (!urgent_only)
		linkwatch_nextevent = jiffies + HZ;
	/*                                    */
	else if (time_after(linkwatch_nextevent, jiffies + HZ))
		linkwatch_nextevent = jiffies;

	clear_bit(LW_URGENT, &linkwatch_flags);

	spin_lock_irq(&lweventlist_lock);
	list_splice_init(&lweventlist, &wrk);

	while (!list_empty(&wrk)) {

		dev = list_first_entry(&wrk, struct net_device, link_watch_list);
		list_del_init(&dev->link_watch_list);

		if (urgent_only && !linkwatch_urgent_event(dev)) {
			list_add_tail(&dev->link_watch_list, &lweventlist);
			continue;
		}
		spin_unlock_irq(&lweventlist_lock);
		linkwatch_do_dev(dev);
		spin_lock_irq(&lweventlist_lock);
	}

	if (!list_empty(&lweventlist))
		linkwatch_schedule_work(0);
	spin_unlock_irq(&lweventlist_lock);
}
Esempio n. 4
0
/**
 * 获得远程返回的结果,
 */
int distdb_fetch_result_remote(struct DISTDB_SQL_RESULT * reslt,
		char ** table[], int ifpeek)
{
	int i;
	struct sql_result_plain_text * ptext;
	pthread_mutex_lock(&reslt->lock);
	if (LIST_ISEMPTY(reslt->sql_result))
	{
		if (reslt->ref && !ifpeek) //还有结果会陆续进来,呵呵
		{
			pthread_cond_wait(&reslt->waitcond, &reslt->lock);
		}
		else if (reslt->ref && ifpeek)
		{
			pthread_mutex_unlock(&reslt->lock);
			return 1;
		}
		else if( reslt->ref ==0)
		{
			pthread_mutex_unlock(&reslt->lock);
			return -1;
		}
	}
	if (reslt->ref == 0)
		return -1;

	if (reslt->last)
		free(reslt->last);

	ptext = LIST_HEAD(reslt->sql_result.head,sql_result_plain_text,resultlist);
	LIST_DELETE_AT(&ptext->resultlist);
	reslt->last = ptext;

	for (i = 0; i < reslt->colums; ++i)
	{
		reslt ->last_table[i] = ptext->plaindata + ptext->strings[i].offset;
	}

	pthread_mutex_unlock(&reslt->lock);
	return 0;
}
Esempio n. 5
0
void
vrrp_print_stats(void)
{
	FILE *file;
	file = fopen ("/tmp/keepalived.stats","w");

	list l = vrrp_data->vrrp;
	element e;
	vrrp_t *vrrp;

	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		vrrp = ELEMENT_DATA(e);
		fprintf(file, "VRRP Instance: %s\n", vrrp->iname);
		fprintf(file, "  Advertisements:\n");
		fprintf(file, "    Received: %d\n", vrrp->stats->advert_rcvd);
		fprintf(file, "    Sent: %d\n", vrrp->stats->advert_sent);
		fprintf(file, "  Became master: %d\n", vrrp->stats->become_master);
		fprintf(file, "  Released master: %d\n",
			vrrp->stats->release_master);
		fprintf(file, "  Packet Errors:\n");
		fprintf(file, "    Length: %d\n", vrrp->stats->packet_len_err);
		fprintf(file, "    TTL: %d\n", vrrp->stats->ip_ttl_err);
		fprintf(file, "    Invalid Type: %d\n",
			vrrp->stats->invalid_type_rcvd);
		fprintf(file, "    Advertisement Interval: %d\n",
			vrrp->stats->advert_interval_err);
		fprintf(file, "    Address List: %d\n",
			vrrp->stats->addr_list_err);
		fprintf(file, "  Authentication Errors:\n");
		fprintf(file, "    Invalid Type: %d\n",
			vrrp->stats->invalid_authtype);
		fprintf(file, "    Type Mismatch: %d\n",
			vrrp->stats->authtype_mismatch);
		fprintf(file, "    Failure: %d\n",
			vrrp->stats->auth_failure);
		fprintf(file, "  Priority Zero:\n");
		fprintf(file, "    Received: %d\n", vrrp->stats->pri_zero_rcvd);
		fprintf(file, "    Sent: %d\n", vrrp->stats->pri_zero_sent);
	}
	fclose(file);
}
static int __init print_processes_backwards(void)
{
    LIST_HEAD(data_stack);
    int result = 0; // OK

    stack_entry_t *task_entry = NULL;
    struct task_struct *task = NULL;
    char *task_name = NULL;

    for_each_process (task) {
        task_name = kmalloc(TASK_COMM_LEN, GFP_KERNEL);

        if (task_name) {
            task_entry = create_stack_entry(task_name);
            if (task_entry) {
                task_name = get_task_comm(task_name, task);
                stack_push(&data_stack, task_entry);
            } else {
                kfree(task_name);
                result = -ENOMEM;
                break;
            }
        } else {
            result = -ENOMEM;
            break;
        }
    }

    while (!stack_empty(&data_stack)) {
        task_entry = stack_pop(&data_stack);
        task_name = STACK_ENTRY_DATA(task_entry, char*);
        delete_stack_entry(task_entry);

        if (result != -ENOMEM) {
            printk(KERN_ALERT "task name = %s\n", task_name);
        }
        kfree(task_name);
    }

    return result;
}
Esempio n. 7
0
static void
nfs_flushd(struct rpc_task *task)
{
	struct nfs_server	*server;
	struct nfs_reqlist	*cache;
	LIST_HEAD(head);

        dprintk("NFS: %4d flushd starting\n", task->tk_pid);
	server = (struct nfs_server *) task->tk_calldata;
        cache = server->rw_requests;

	for(;;) {
		spin_lock(&nfs_wreq_lock);
		if (nfs_scan_lru_dirty_timeout(server, &head)) {
			spin_unlock(&nfs_wreq_lock);
			nfs_flush_list(&head, server->wpages, FLUSH_AGING);
			continue;
		}
		if (nfs_scan_lru_read_timeout(server, &head)) {
			spin_unlock(&nfs_wreq_lock);
			nfs_pagein_list(&head, server->rpages);
			continue;
		}
#ifdef CONFIG_NFS_V3
		if (nfs_scan_lru_commit_timeout(server, &head)) {
			spin_unlock(&nfs_wreq_lock);
			nfs_commit_list(&head, FLUSH_AGING);
			continue;
		}
#endif
		spin_unlock(&nfs_wreq_lock);
		break;
	}

	dprintk("NFS: %4d flushd back to sleep\n", task->tk_pid);
	if (task->tk_action) {
		task->tk_timeout = NFS_FLUSHD_TIMEOUT;
		cache->runat = jiffies + task->tk_timeout;
		rpc_sleep_on(&flushd_queue, task, NULL, NULL);
	}
}
Esempio n. 8
0
/*
 * Initialization. Try all known PCI access methods. Note that we support
 * using both PCI BIOS and direct access: in such cases, we use I/O ports
 * to access config space, but we still keep BIOS order of cards to be
 * compatible with 2.0.X. This should go away some day.
 */
static int __init pcibios_init(void)
{
	resource_size_t io_offset, mem_offset;
	LIST_HEAD(resources);
	struct pci_bus *bus;

	ioport_resource.start	= 0xA0000000;
	ioport_resource.end	= 0xDFFFFFFF;
	iomem_resource.start	= 0xA0000000;
	iomem_resource.end	= 0xDFFFFFFF;

	if (insert_resource(&iomem_resource, &pci_iomem_resource) < 0)
		panic("Unable to insert PCI IOMEM resource\n");
	if (insert_resource(&ioport_resource, &pci_ioport_resource) < 0)
		panic("Unable to insert PCI IOPORT resource\n");

	if (!pci_probe)
		return 0;

	if (pci_check_direct() < 0) {
		printk(KERN_WARNING "PCI: No PCI bus detected\n");
		return 0;
	}

	printk(KERN_INFO "PCI: Probing PCI hardware [mempage %08x]\n",
	       MEM_PAGING_REG);

	io_offset = pci_ioport_resource.start -
	    (pci_ioport_resource.start & 0x00ffffff);
	mem_offset = pci_iomem_resource.start -
	    ((pci_iomem_resource.start & 0x03ffffff) | MEM_PAGING_REG);

	pci_add_resource_offset(&resources, &pci_ioport_resource, io_offset);
	pci_add_resource_offset(&resources, &pci_iomem_resource, mem_offset);
	bus = pci_scan_root_bus(NULL, 0, &pci_direct_ampci, NULL, &resources);
	if (!bus)
		return 0;
	pcibios_resource_survey();
	pci_bus_add_devices(bus);
	return 0;
}
static void __linkwatch_run_queue(int urgent_only)
{
	struct net_device *dev;
	LIST_HEAD(wrk);

	/*
	 * Limit the number of linkwatch events to one
	 * per second so that a runaway driver does not
	 * cause a storm of messages on the netlink
	 * socket.  This limit does not apply to up events
	 * while the device qdisc is down.
	 */
	if (!urgent_only)
		linkwatch_nextevent = jiffies + HZ;
	/* Limit wrap-around effect on delay. */
	else if (time_after(linkwatch_nextevent, jiffies + HZ))
		linkwatch_nextevent = jiffies;

	clear_bit(LW_URGENT, &linkwatch_flags);

	spin_lock_irq(&lweventlist_lock);
	list_splice_init(&lweventlist, &wrk);

	while (!list_empty(&wrk)) {

		dev = list_first_entry(&wrk, struct net_device, link_watch_list);
		list_del_init(&dev->link_watch_list);

		if (urgent_only && !linkwatch_urgent_event(dev)) {
			list_add_tail(&dev->link_watch_list, &lweventlist);
			continue;
		}
		spin_unlock_irq(&lweventlist_lock);
		linkwatch_do_dev(dev);
		spin_lock_irq(&lweventlist_lock);
	}

	if (!list_empty(&lweventlist))
		linkwatch_schedule_work(0);
	spin_unlock_irq(&lweventlist_lock);
}
Esempio n. 10
0
static int expand_single_string(struct token *tok, struct token *next,
				struct string **string_ret)
{
	int ret;
	LIST_HEAD(string_list);
	ret = expand_params_and_word_split(tok, next, &string_list);
	if (ret)
		goto out_free_string_list;
	ret = glue_strings(&string_list);
	if (ret)
		goto out_free_string_list;
	if (!mysh_filename_expansion_disabled) {
		ret = do_filename_expansion(&string_list);
		if (ret)
			goto out_free_string_list;
	}
	if (list_empty(&string_list))
		*string_ret = NULL;
	else if (list_is_singular(&string_list))
		*string_ret = list_entry(string_list.next, struct string, list);
	else {
Esempio n. 11
0
int main()
{
    LIST_HEAD(list);
    append_node(&list);
    append_node(&list);

    struct node *pos;
    list_for_each_entry(pos, &list, embedded_head)
        ___sl_plot(NULL);

    ___sl_plot(NULL);

    // insane -- better to use list_for_each_entry_safe()
    while (&list != list.prev) {
        list.next = list.prev->prev;
        free(list_entry(list.prev, struct node, embedded_head));
        list.prev = list.next;
    }

    return 0;
}
Esempio n. 12
0
static int
vrrp_timer_vrid_timeout(const int fd)
{
	vrrp_rt *vrrp;
	element e;
	list l = &vrrp_data->vrrp_index_fd[fd%1024 + 1];
	TIMEVAL timer;
	int vrid = 0;

	/* Multiple instances on the same interface */
	TIMER_RESET(timer);
	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		vrrp = ELEMENT_DATA(e);
		if (timer_cmp(vrrp->sands, timer) < 0 ||
		    TIMER_ISNULL(timer)) {
			timer = timer_dup(vrrp->sands);
			vrid = vrrp->vrid;
		}
	}
	return vrid;
}
Esempio n. 13
0
static void take_over_work(struct ehca_comp_pool *pool, int cpu)
{
    struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
    LIST_HEAD(list);
    struct ehca_cq *cq;
    unsigned long flags_cct;

    spin_lock_irqsave(&cct->task_lock, flags_cct);

    list_splice_init(&cct->cq_list, &list);

    while (!list_empty(&list)) {
        cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);

        list_del(&cq->entry);
        __queue_comp_task(cq, this_cpu_ptr(pool->cpu_comp_tasks));
    }

    spin_unlock_irqrestore(&cct->task_lock, flags_cct);

}
Esempio n. 14
0
File: battle.c Progetto: phoboz/yz
static void draw_battle_target(
    BATTLE *battle,
    BATTLE_ACTOR *ba
)
{
    int x, y;
    STEP_POINT *p;

    for (p = (STEP_POINT *) LIST_HEAD(&ba->targetList);
            p != NULL;
            p = (STEP_POINT *) LIST_NEXT(&p->listNode)) {

        x = p->i * BMARKER_WIDTH - battle->world->x;
        y = p->j * BMARKER_HEIGHT - battle->world->y;

        draw_sprite(x, y,
                    TARGET_MARKER, battle->bmarker,
                    0, 0, window_width, window_height);

    }
}
Esempio n. 15
0
static void
rpc_timeout_upcall_queue(struct work_struct *work)
{
	LIST_HEAD(free_list);
	struct rpc_pipe *pipe =
		container_of(work, struct rpc_pipe, queue_timeout.work);
	void (*destroy_msg)(struct rpc_pipe_msg *);
	struct dentry *dentry;

	spin_lock(&pipe->lock);
	destroy_msg = pipe->ops->destroy_msg;
	if (pipe->nreaders == 0) {
		list_splice_init(&pipe->pipe, &free_list);
		pipe->pipelen = 0;
	}
	dentry = dget(pipe->dentry);
	spin_unlock(&pipe->lock);
	rpc_purge_list(dentry ? &RPC_I(dentry->d_inode)->waitq : NULL,
			&free_list, destroy_msg, -ETIMEDOUT);
	dput(dentry);
}
Esempio n. 16
0
void kmem_cache_reap(struct kmem_cache *cache)
{
	LIST_HEAD(list);

	const bool enabled = spin_lock_irqsave(&cache->lock);

	list_splice(&cache->free_list, &list);
	spin_unlock_irqrestore(&cache->lock, enabled);

	for (struct list_head *ptr = list.next; ptr != &list;) {
		struct kmem_slab *slab =
			LIST_ENTRY(ptr, struct kmem_slab, link);
		struct page *pages = slab->pages;

		ptr = ptr->next;
		if (cache->ops->destroy)
			cache->ops->destroy(cache, slab);

		free_pages(pages, cache->order);	
	}
}
Esempio n. 17
0
/*
 * General routine to allocate a hash table.
 */
void *
hashinit(int elements, int type, u_long *hashmask)
{
	long hashsize;
	LIST_HEAD(generic, generic) *hashtbl;
	int i;

	if (elements <= 0)
		panic("hashinit: bad cnt");
	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
		continue;
	hashsize >>= 1;
	MALLOC(hashtbl, struct generic *, 
		(u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK|M_ZERO);
	if (hashtbl != NULL) {
		for (i = 0; i < hashsize; i++)
			LIST_INIT(&hashtbl[i]);
		*hashmask = hashsize - 1;
	}
	return (hashtbl);
}
Esempio n. 18
0
int main()
{
    // NOTE: two nodes should be enough to trigger DLS abstraction by default
    //       and consequently the spurious memory leak in the end
    LIST_HEAD(list);
    append_node(&list);
    append_node(&list);

    // plot heap in each iteration
    struct node *pos;
    list_for_each_entry(pos, &list, embedded_head)
        ___sl_plot(NULL);

    // plot heap after list_for_each_entry() -- an off-value should be there
    ___sl_plot(NULL);

    // use list_entry()
    free(list_entry(list.next, struct node, embedded_head));
    free(list_entry(list.prev, struct node, embedded_head));
    return 0;
}
Esempio n. 19
0
/*
 * General routine to allocate a prime number sized hash table.
 */
void *
phashinit(int elements, struct malloc_type *type, u_long *nentries)
{
	long hashsize;
	LIST_HEAD(generic, generic) *hashtbl;
	int i;

	KASSERT(elements > 0, ("%s: bad elements", __func__));
	for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
		i++;
		if (i == NPRIMES)
			break;
		hashsize = primes[i];
	}
	hashsize = primes[i - 1];
	hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
	for (i = 0; i < hashsize; i++)
		LIST_INIT(&hashtbl[i]);
	*nentries = hashsize;
	return (hashtbl);
}
Esempio n. 20
0
/* Add/Delete a list of IP routes */
void
netlink_rtlist(list rt_list, int cmd)
{
	ip_route_t *iproute;
	element e;

	/* No routes to add */
	if (LIST_ISEMPTY(rt_list))
		return;

	for (e = LIST_HEAD(rt_list); e; ELEMENT_NEXT(e)) {
		iproute = ELEMENT_DATA(e);
		if ((cmd == IPROUTE_ADD && !iproute->set) ||
		    (cmd == IPROUTE_DEL && iproute->set)) {
			if (netlink_route(iproute, cmd) > 0)
				iproute->set = (cmd == IPROUTE_ADD);
			else
				iproute->set = false;
		}
	}
}
Esempio n. 21
0
/* All interface are UP in the same group */
int
vrrp_sync_group_up(vrrp_sgroup * vgroup)
{
	vrrp_rt *vrrp;
	element e;
	list l = vgroup->index_list;
	int is_up = 0;

	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		vrrp = ELEMENT_DATA(e);
		if (VRRP_ISUP(vrrp))
			is_up++;
	}

	if (is_up == LIST_SIZE(vgroup->index_list)) {
		log_message(LOG_INFO, "Kernel is reporting: Group(%s) UP"
			       , GROUP_NAME(vgroup));
		return 1;
	}
	return 0;
}
Esempio n. 22
0
void
put_lseg(struct pnfs_layout_segment *lseg)
{
	struct inode *inode;

	if (!lseg)
		return;

	dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
		atomic_read(&lseg->pls_refcount),
		test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
	inode = lseg->pls_layout->plh_inode;
	if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
		LIST_HEAD(free_me);

		put_lseg_common(lseg);
		list_add(&lseg->pls_list, &free_me);
		spin_unlock(&inode->i_lock);
		pnfs_free_lseg_list(&free_me);
	}
}
Esempio n. 23
0
static int
vrrp_timer_vrid_timeout(const int fd)
{
	vrrp_t *vrrp;
	element e;
	list l = &vrrp_data->vrrp_index_fd[fd%1024 + 1];
	timeval_t timer;
	int vrid = 0;

	/* Multiple instances on the same interface */
	timer_reset(timer);
	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		vrrp = ELEMENT_DATA(e);
		if (timer_cmp(vrrp->sands, timer) < 0 ||
		    timer_isnull(timer)) {
			timer = timer_dup(vrrp->sands);
			vrid = vrrp->vrid;
		}
	}
	return vrid;
}
Esempio n. 24
0
/* Check if a vsg entry is in new data */
static int
vsge_exist(virtual_server_group_entry *vsg_entry, list l)
{
	element e;
	virtual_server_group_entry *vsge;

	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		vsge = ELEMENT_DATA(e);
		if (VSGE_ISEQ(vsg_entry, vsge)) {
			/*
			 * If vsge exist this entry
			 * is alive since only rs entries
			 * are changing from alive state.
			 */
			SET_ALIVE(vsge);
			return 1;
		}
	}

	return 0;
}
Esempio n. 25
0
static void cli_complete(char *line)
{
	const HIST_ENTRY *hist;
	const char *c;
	LIST_HEAD(msgs);

	if (line == NULL) {
		printf("\n");
		cli_exit();
		exit(0);
	}

	line = cli_append_multiline(line);
	if (line == NULL)
		return;

	for (c = line; *c != '\0'; c++)
		if (!isspace(*c))
			break;
	if (*c == '\0')
		return;

	if (!strcmp(line, "quit")) {
		cli_exit();
		exit(0);
	}

	/* avoid duplicate history entries */
	hist = history_get(history_length);
	if (hist == NULL || strcmp(hist->line, line))
		add_history(line);

	parser_init(state, &msgs);
	scanner_push_buffer(scanner, &indesc_cli, line);
	nft_run(scanner, state, &msgs);
	erec_print_list(stdout, &msgs);
	xfree(line);
	cache_release();
	iface_cache_release();
}
static NV_STATUS uvm_unmap_external_allocation(uvm_va_space_t *va_space, NvU64 base, const NvProcessorUuid *gpu_uuid)
{
    uvm_va_range_t *va_range;
    uvm_gpu_t *gpu = NULL;
    NV_STATUS status = NV_OK;
    LIST_HEAD(deferred_free_list);

    // TODO: Bug 1799173: Consider a va_range lock for external ranges so we can
    //       do the unmap in read mode.
    uvm_va_space_down_write(va_space);

    va_range = uvm_va_range_find(va_space, base);
    if (!va_range || va_range->type != UVM_VA_RANGE_TYPE_EXTERNAL || va_range->node.start != base) {
        status = NV_ERR_INVALID_ADDRESS;
        goto out;
    }

    gpu = uvm_va_space_get_gpu_by_uuid(va_space, gpu_uuid);
    if (!gpu || !uvm_va_range_ext_gpu_map(va_range, gpu)) {
        status = NV_ERR_INVALID_DEVICE;
        goto out;
    }

    // Retain the GPU which maps the allocation because it's the parent of
    // dup_handle. The owning GPU (if any) is retained internally by the
    // deferred free layer.
    uvm_gpu_retain(gpu);

    uvm_ext_gpu_map_destroy(va_range, gpu, &deferred_free_list);

out:
    uvm_va_space_up_write(va_space);

    if (status == NV_OK) {
        uvm_deferred_free_object_list(&deferred_free_list);
        uvm_gpu_release(gpu);
    }

    return status;
}
Esempio n. 27
0
static int my_init(void)
{
     int result, i;
     dev_t dev = 0;
     LIST_HEAD(head);
   printk("DEVICE GETTING INITIALIZED:\n");
   
   if (dev_major) {
      dev = MKDEV(dev_major, dev_minor);
      result = register_chrdev_region(dev, NUM_DEVICES, MYDEV_NAME);
   } else {
      result = alloc_chrdev_region(&dev, dev_minor, NUM_DEVICES,
               MYDEV_NAME);
      dev_major = MAJOR(dev);
   }
   if (result < 0) {
      printk(KERN_WARNING "DEVICE CAN'T GET A MAJOR NUMBER %d\n", dev_major);
      return result;
   }
   printk("DEVICE MODULE REGISTERED AND ITS MAJOR NUMBER IS:%d\n", dev_major);
   dev_devices = kmalloc(NUM_DEVICES*sizeof(struct asp_mycdrv), GFP_KERNEL);
   if (!dev_devices) {
      result = -ENOMEM;
      printk("FAILURE:MALLOC FUNC\n");  
   }
   memset(dev_devices, 0, NUM_DEVICES * sizeof(struct asp_mycdrv));
   
   foo_class = class_create(THIS_MODULE, "my_class");
   sema_init(&sem_d, 1);
   for (i = 0; i <  NUM_DEVICES; i++) {
      dev_devices[i].ramdisk = kmalloc(ramdisk_size, GFP_KERNEL);
      dev_devices[i].devNo=i;
      sema_init(&dev_devices[i].sem, 1);
      device_setup_cdev(&dev_devices[i], i);
      device_create(foo_class, NULL, MKDEV(MAJOR(dev), MINOR(dev) + i) , NULL, "mycdrv%d", i);
      list_add (&dev_devices[i].list ,&head) ;
      printk("set up the %dth device.\n",i);
   }
   return 0;
 }
Esempio n. 28
0
/* add or remove _alive_ real servers from a virtual server */
static void
perform_quorum_state(virtual_server_t *vs, int add)
{
	element e;
	real_server_t *rs;

	if (LIST_ISEMPTY(vs->rs))
		return;

	log_message(LOG_INFO, "%s the pool for VS %s"
			    , add?"Adding alive servers to":"Removing alive servers from"
			    , FMT_VS(vs));
	for (e = LIST_HEAD(vs->rs); e; ELEMENT_NEXT(e)) {
		rs = ELEMENT_DATA(e);
		if (!ISALIVE(rs)) /* We only handle alive servers */
			continue;
		if (add)
			rs->alive = 0;
		ipvs_cmd(add?LVS_CMD_ADD_DEST:LVS_CMD_DEL_DEST, vs, rs);
		rs->alive = 1;
	}
}
Esempio n. 29
0
void CondSignal (struct Cond *cond)
{
	struct Process *proc;
	
	if (current_process == NULL)
		return;
	
	
	DisableInterrupts();
	
	proc = LIST_HEAD (&cond->blocked_list);
	
	if (proc != NULL)
	{
		LIST_REM_HEAD (&cond->blocked_list, blocked_entry);
		proc->state = PROC_STATE_READY;
		SchedReady (proc);
		Reschedule();
	}

	EnableInterrupts();
}
Esempio n. 30
0
static void
rpc_timeout_upcall_queue(struct work_struct *work)
{
	LIST_HEAD(free_list);
	struct rpc_inode *rpci =
		container_of(work, struct rpc_inode, queue_timeout.work);
	struct inode *inode = &rpci->vfs_inode;
	void (*destroy_msg)(struct rpc_pipe_msg *);

	spin_lock(&inode->i_lock);
	if (rpci->ops == NULL) {
		spin_unlock(&inode->i_lock);
		return;
	}
	destroy_msg = rpci->ops->destroy_msg;
	if (rpci->nreaders == 0) {
		list_splice_init(&rpci->pipe, &free_list);
		rpci->pipelen = 0;
	}
	spin_unlock(&inode->i_lock);
	rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
}