Beispiel #1
0
float
MinMaxCore (graph_t* gp1, graph_t* gp2, int depth){
	float min_max;
	pathArray_t* pA1=XMALLOC(pathArray_t,1);
    pathArray_t* pA2=XMALLOC(pathArray_t,1);
    
    initPathArray(pA1, 32);
    initPathArray(pA2, 32);
        
    /* MOLECOLA 1*/
    hnode_t* hiter = gp1->head;
    while(hiter) {
    	DFS_Search(gp1, hiter, depth, pA1, WITH_MULT);	    	
    	hiter = list_next_entry(hnode_t, hiter);
    }
    
    /* MOLECOLA 2*/
    hiter = gp2->head;
    while(hiter) {
    	DFS_Search(gp2, hiter, depth, pA2, WITH_MULT);	    	
    	hiter = list_next_entry(hnode_t, hiter);
    }
    
    /* RISULTATI */
    displayPathArray(pA1);
    printf("\n");
    displayPathArray(pA2);
    
    min_max=commonPathsWithMult(pA1, pA2);

    freePathArray(pA1);
    freePathArray(pA2);
    
    return min_max;
}
Beispiel #2
0
struct ntfs_mp *idx_blocks2mpl(const struct nhr_idx *idx)
{
	const struct nhr_idx_node *idxn;
	struct ntfs_mp *mp_buf, *mp;
	unsigned mp_buf_sz = 2;	/* Optimized for compact indexes */

	idxn = list_first_entry(&idx->nodes, typeof(*idxn), list);
	while (&idxn->list != &idx->nodes && idxn->vcn < 0)
		idxn = list_next_entry(idxn, list);

	if (&idxn->list == &idx->nodes)	/* No blocks */
		return NULL;
	if (idxn->vcn != 0)		/* Wrong start block */
		return NULL;

	mp_buf = malloc(mp_buf_sz * sizeof(*mp));
	mp = mp_buf;

	mp->vcn = 0;
	mp->lcn = idxn->lcn;
	mp->clen = 1;

	for (idxn = list_next_entry(idxn, list);
	     &idxn->list != &idx->nodes;
	     idxn = list_next_entry(idxn, list)) {
		if (mp->vcn + mp->clen != idxn->vcn) {
			free(mp_buf);
			return NULL;
		}

		if (mp->lcn + mp->clen == idxn->lcn) {
			mp->clen++;
		} else {
			mp++;
			mp->vcn = idxn->vcn;
			mp->lcn = idxn->lcn;
			mp->clen = 1;

			if ((mp - mp_buf) + 1 == mp_buf_sz) {
				mp_buf_sz += 4;
				mp_buf = realloc(mp_buf, mp_buf_sz * sizeof(*mp));
				mp = mp_buf + mp_buf_sz - 4 - 1;
			}
		}
	}

	/* Set end marker */
	mp++;
	mp->vcn = 0;
	mp->lcn = 0;
	mp->clen = 0;

	return mp_buf;
}
Beispiel #3
0
/*
 * get the next readable area.
 * return -1 mean no data for read, return 0 mean there is data for read.
 */
int spy_rw_buffer_next_readable(spy_rw_buffer_t *buffer, char **buf, size_t *size)
{
    spy_mem_block_t *mem_block;

    // no data for read
    if (buffer->read_pos == buffer->write_pos)
        return -1;

    assert(buffer->read_block);

    // jump to next mem_block
    if (buffer->read_base + buffer->read_block->size
            == buffer->read_pos) {

        mem_block = list_next_entry(buffer->read_block, list);
        // assert ( (void*)mem_block != (void*)&buffer->mem_blocks );

        buffer->read_base  += buffer->read_block->size;
        buffer->read_block = mem_block;
    }

    *buf = buffer->read_block->buf + buffer->read_pos - buffer->read_base;
    *size = MIN(buffer->read_base + buffer->read_block->size -
                buffer->read_pos, buffer->write_pos - buffer->read_pos);

    return 0;
}
Beispiel #4
0
/* return bytes read */
size_t spy_rw_buffer_read_n(spy_rw_buffer_t *buffer, char *buf, size_t size)
{
    spy_mem_block_t *mem_block;

    size_t data_len = buffer->write_pos - buffer->read_pos;
    size_t left     = size;
    size_t readable = 0, nread = 0;

    while (left > 0 && data_len > 0) {
        // jump to next mem block
        if (buffer->read_base + buffer->read_block->size == buffer->read_pos) {
            mem_block = list_next_entry(buffer->read_block, list);

            buffer->read_base  += buffer->read_block->size;
            buffer->read_block  = mem_block;
        }

        // current block readable size
        readable = MIN(buffer->write_pos, buffer->read_base + buffer->read_block->size)
                   - buffer->read_pos;

        nread = MIN(readable, left);
        memcpy((void*)(buf + size - left),
               (void*)(buffer->read_block->buf + buffer->read_pos - buffer->read_base),
               nread);

        buffer->read_pos += nread;
        data_len         -= nread;
        left             -= nread;
    }

    return size - left;
}
Beispiel #5
0
void spy_rw_buffer_reset_read(spy_rw_buffer_t *buffer, size_t pos)
{
    spy_mem_block_t *mem_block;

    assert(pos <= buffer->write_pos);

    buffer->read_block = NULL;
    buffer->read_base  = 0;
    buffer->read_pos   = 0;

    if (list_empty(&buffer->mem_blocks)) {
        assert(pos == 0);
        return;
    }

    mem_block = list_first_entry(&buffer->mem_blocks, spy_mem_block_t, list);

    while (buffer->read_base + mem_block->size < pos) {
        buffer->read_base += mem_block->size;

        mem_block = list_next_entry(mem_block, list);
    }

    buffer->read_block = mem_block;
    buffer->read_pos   = pos;
}
Beispiel #6
0
static struct request *
noop_latter_request(struct request_queue *q, struct request *rq)
{
	struct noop_data *nd = q->elevator->elevator_data;

	if (rq->queuelist.next == &nd->queue)
		return NULL;
	return list_next_entry(rq, queuelist);
}
static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
					    struct list_head *stack,
					    struct list_head *top,
					    struct list_head *secondary_spawns)
{
	struct crypto_spawn *spawn, *n;

	spawn = list_first_entry_or_null(stack, struct crypto_spawn, list);
	if (!spawn)
		return NULL;

	n = list_next_entry(spawn, list);

	if (spawn->alg && &n->list != stack && !n->alg)
		n->alg = (n->list.next == stack) ? alg :
			 &list_next_entry(n, list)->inst->alg;

	list_move(&spawn->list, secondary_spawns);

	return &n->list == stack ? top : &n->inst->alg.cra_users;
}
Beispiel #8
0
float
TanimotoCore (graph_t* gp1, graph_t* gp2, int depth){
	int paths, paths_pA1, paths_pA2;
	pathArray_t* pA1=XMALLOC(pathArray_t,1);
	pathArray_t* pA2=XMALLOC(pathArray_t,1);
	
	initPathArray(pA1, 32);
    initPathArray(pA2, 32);
        
    /* MOLECOLA 1*/
    hnode_t* hiter = gp1->head;
    while(hiter) {
    	DFS_Search(gp1, hiter, depth, pA1, NO_MULT);	    	
    	hiter = list_next_entry(hnode_t, hiter);
    }
    
    
    /* MOLECOLA 2*/
    hiter = gp2->head;
    while(hiter) {
    	DFS_Search(gp2, hiter, depth, pA2, NO_MULT);	    	
    	hiter = list_next_entry(hnode_t, hiter);
    }
    
    /* RISULTATI */
    displayPathArray(pA1);
    displayPathArray(pA2);
    
    paths=commonPaths(pA1, pA2);
    paths_pA1=pA1->dim - pA1->free;
    paths_pA2=pA2->dim - pA2->free;
    printf("\nNumero path pA1; %d", paths_pA1);
    printf("\nNumero path pA2: %d", paths_pA2);
    printf("\nNumero path comuni: %d\n", paths);
	
    freePathArray(pA1);
    freePathArray(pA2);
    
    return (float) paths / (paths_pA1 + paths_pA2 - paths);
}
Beispiel #9
0
/**
 * ccp_del_device - remove a CCP device from the list
 *
 * @ccp: ccp_device struct pointer
 *
 * Remove this unit from the list of devices. If the next device
 * up for use is this one, adjust the pointer. If this is the last
 * device, NULL the pointer.
 */
void ccp_del_device(struct ccp_device *ccp)
{
	unsigned long flags;

	write_lock_irqsave(&ccp_unit_lock, flags);
	if (ccp_rr == ccp) {
		/* ccp_unit_lock is read/write; any read access
		 * will be suspended while we make changes to the
		 * list and RR pointer.
		 */
		if (list_is_last(&ccp_rr->entry, &ccp_units))
			ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
						  entry);
		else
			ccp_rr = list_next_entry(ccp_rr, entry);
	}
Beispiel #10
0
/*
 * coresight_disable_path_from : Disable components in the given path beyond
 * @nd in the list. If @nd is NULL, all the components, except the SOURCE are
 * disabled.
 */
static void coresight_disable_path_from(struct list_head *path,
					struct coresight_node *nd)
{
	u32 type;
	struct coresight_device *csdev, *parent, *child;

	if (!nd)
		nd = list_first_entry(path, struct coresight_node, link);

	list_for_each_entry_continue(nd, path, link) {
		csdev = nd->csdev;
		type = csdev->type;

		/*
		 * ETF devices are tricky... They can be a link or a sink,
		 * depending on how they are configured.  If an ETF has been
		 * "activated" it will be configured as a sink, otherwise
		 * go ahead with the link configuration.
		 */
		if (type == CORESIGHT_DEV_TYPE_LINKSINK)
			type = (csdev == coresight_get_sink(path)) ?
						CORESIGHT_DEV_TYPE_SINK :
						CORESIGHT_DEV_TYPE_LINK;

		switch (type) {
		case CORESIGHT_DEV_TYPE_SINK:
			coresight_disable_sink(csdev);
			break;
		case CORESIGHT_DEV_TYPE_SOURCE:
			/*
			 * We skip the first node in the path assuming that it
			 * is the source. So we don't expect a source device in
			 * the middle of a path.
			 */
			WARN_ON(1);
			break;
		case CORESIGHT_DEV_TYPE_LINK:
			parent = list_prev_entry(nd, link)->csdev;
			child = list_next_entry(nd, link)->csdev;
			coresight_disable_link(csdev, parent, child);
			break;
		default:
			break;
		}
	}
Beispiel #11
0
void
ksocknal_next_tx_carrier(struct ksock_conn *conn)
{
	struct ksock_tx *tx = conn->ksnc_tx_carrier;

	/* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
	LASSERT(!list_empty(&conn->ksnc_tx_queue));
	LASSERT(tx);

	/* Next TX that can carry ZC-ACK or LNet message */
	if (tx->tx_list.next == &conn->ksnc_tx_queue) {
		/* no more packets queued */
		conn->ksnc_tx_carrier = NULL;
	} else {
		conn->ksnc_tx_carrier = list_next_entry(tx, tx_list);
		LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type);
	}
}
Beispiel #12
0
/*
 * get the next writeable area.
 * return -1 mean no place for write, return 0 mean there is place for write
 */
int spy_rw_buffer_next_writeable(spy_rw_buffer_t *buffer, char **buf, size_t *size)
{
    spy_mem_block_t *mem_block;

    // no palce for write
    if (buffer->write_pos == buffer->cap)
        return -1;

    // jump to next mem_block
    if (buffer->write_base + buffer->write_block->size
            == buffer->write_pos) {

        mem_block = list_next_entry(buffer->write_block, list);

        buffer->write_base      += buffer->write_block->size;
        buffer->write_block     = mem_block;
    }

    *buf = buffer->write_block->buf + buffer->write_pos - buffer->write_base;
    *size = buffer->write_base + buffer->write_block->size - buffer->write_pos;

    return 0;
}
Beispiel #13
0
void coresight_disable_path(struct list_head *path)
{
	u32 type;
	struct coresight_node *nd;
	struct coresight_device *csdev, *parent, *child;

	list_for_each_entry(nd, path, link) {
		csdev = nd->csdev;
		type = csdev->type;

		/*
		 * ETF devices are tricky... They can be a link or a sink,
		 * depending on how they are configured.  If an ETF has been
		 * "activated" it will be configured as a sink, otherwise
		 * go ahead with the link configuration.
		 */
		if (type == CORESIGHT_DEV_TYPE_LINKSINK)
			type = (csdev == coresight_get_sink(path)) ?
						CORESIGHT_DEV_TYPE_SINK :
						CORESIGHT_DEV_TYPE_LINK;

		switch (type) {
		case CORESIGHT_DEV_TYPE_SINK:
			coresight_disable_sink(csdev);
			break;
		case CORESIGHT_DEV_TYPE_SOURCE:
			/* sources are disabled from either sysFS or Perf */
			break;
		case CORESIGHT_DEV_TYPE_LINK:
			parent = list_prev_entry(nd, link)->csdev;
			child = list_next_entry(nd, link)->csdev;
			coresight_disable_link(csdev, parent, child);
			break;
		default:
			break;
		}
	}
Beispiel #14
0
static int __klp_disable_patch(struct klp_patch *patch)
{
	struct klp_object *obj;

	if (WARN_ON(!patch->enabled))
		return -EINVAL;

	if (klp_transition_patch)
		return -EBUSY;

	/* enforce stacking: only the last enabled patch can be disabled */
	if (!list_is_last(&patch->list, &klp_patches) &&
	    list_next_entry(patch, list)->enabled)
		return -EBUSY;

	klp_init_transition(patch, KLP_UNPATCHED);

	klp_for_each_object(patch, obj)
		if (obj->patched)
			klp_pre_unpatch_callback(obj);

	/*
	 * Enforce the order of the func->transition writes in
	 * klp_init_transition() and the TIF_PATCH_PENDING writes in
	 * klp_start_transition().  In the rare case where klp_ftrace_handler()
	 * is called shortly after klp_update_patch_state() switches the task,
	 * this ensures the handler sees that func->transition is set.
	 */
	smp_wmb();

	klp_start_transition();
	klp_try_complete_transition();
	patch->enabled = false;

	return 0;
}
Beispiel #15
0
	list_del(&obj->list);
	free(obj);
}

struct bpf_object *
bpf_object__next(struct bpf_object *prev)
{
	struct bpf_object *next;

	if (!prev)
		next = list_first_entry(&bpf_objects_list,
					struct bpf_object,
					list);
	else
		next = list_next_entry(prev, list);

	/* Empty list is noticed here so don't need checking on entry. */
	if (&next->list == &bpf_objects_list)
		return NULL;

	return next;
}

const char *
bpf_object__get_name(struct bpf_object *obj)
{
	if (!obj)
		return ERR_PTR(-EINVAL);
	return obj->path;
}
Beispiel #16
0
bool
routing_get_bootstrap_contacts(
                              ROUTING_ZONE* rz,
                              // [LOCK]
                              uint32_t max_required,
                              LIST** kn_lst_out,
                              bool top_level_call
                              )
{
  bool result = false;
  LIST* kn_lst = NULL;
  LIST* entry = NULL;
  uint32_t ent_cnt = 0;
  void* data = NULL;
  uint32_t copy_cnt = 0;

  do {

    // [LOCK] lock active zones.

    if (!rz || !kn_lst_out) break;

    if (!routing_get_top_depth_entries(rz, LOG_BASE_EXPONENT, &kn_lst, false)){

      LOG_ERROR("Failed to get top entries.");

      break;

    }
    
    list_entries_count(kn_lst, &ent_cnt);

    entry = kn_lst;

    if (ent_cnt){

      copy_cnt = ent_cnt > max_required? max_required : ent_cnt;

      while (copy_cnt--) {

        if (!entry) break;

        list_get_entry_data(entry, &data);

        list_add_entry(kn_lst_out, data);

        list_next_entry(entry, &entry);

      }

    }

    result = true;

  } while (false);

  if (kn_lst) list_destroy(kn_lst, true);

  // [LOCK] unlock active zones.

  return result;
}
Beispiel #17
0
	/*
	 * When multiple devices are present in system select
	 * device in round-robin fashion for crypto operations
	 * Although One session must use the same device to
	 * maintain request-response ordering.
	 */
	mutex_lock(&dev_mutex);
	if (!list_empty(&uld_ctx_list)) {
		u_ctx = ctx_rr;
		if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
			ctx_rr = list_first_entry(&uld_ctx_list,
						  struct uld_ctx,
						  entry);
		else
			ctx_rr = list_next_entry(ctx_rr, entry);
	}
	mutex_unlock(&dev_mutex);
	return u_ctx;
}

static int chcr_dev_add(struct uld_ctx *u_ctx)
{
	struct chcr_dev *dev;

	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (!dev)
		return -ENXIO;

	spin_lock_init(&dev->lock_chcr_dev);
	u_ctx->dev = dev;