Пример #1
0
/*
 * []----
 * | session_alloc -- create a new session attached to the lead connection
 * []----
 */
Boolean_t
session_alloc(iscsi_conn_t *c, uint8_t *isid)
{
	iscsi_sess_t	*s,
			*n;

	if (c->c_sess != NULL)
		return (True);

	s = (iscsi_sess_t *)calloc(sizeof (iscsi_sess_t), 1);
	if (s == NULL)
		return (False);

	(void) pthread_mutex_lock(&sess_mutex);
	s->s_num	= sess_num++;
	s->s_state	= SS_STARTED;

	if (sess_head == NULL)
		sess_head = s;
	else {
		for (n = sess_head; n->s_next; n = n->s_next)
			;
		n->s_next = s;
	}
	(void) pthread_mutex_unlock(&sess_mutex);

	bcopy(isid, s->s_isid, 6);

	(void) pthread_mutex_init(&s->s_mutex, NULL);
	c->c_sess	= s;
	s->s_conn_head	= c;
	s->s_sessq	= queue_alloc();
	s->s_t10q	= queue_alloc();
	c->c_sessq	= s->s_sessq;
	s->s_mgmtq	= c->c_mgmtq;
	s->s_type	= SessionNormal;
	s->s_tsid	= s->s_num;

	sess_set_auth(s);

	(void) pthread_create(&s->s_thr_id_t10, NULL, sess_from_t10, s);
	(void) pthread_create(&s->s_thr_id_conn, NULL, sess_process, s);

	util_title(s->s_mgmtq, Q_SESS_LOGIN, s->s_num, "Start Session");

	return (True);
}
Пример #2
0
/* Ask for an event handle corresponding to the given client request (in
 * ASYNC mode). The event handle must be released again using the
 * framework_release_handle function.
 * The return value is NAF_QUERY_NEW if this is the first time the client
 * request is seen (i.e. a new event handle is created). In this case
 * the buffer pointed to by handle will be assigned the new event handle.
 * Note that this handle will never be 0 (to distinguish the case when
 * calling this function in the SYNC model).
 * The return value is NAF_QUERY_QUEUED if the client request is already
 * known (and is pending to be processed). In this case the buffer pointed
 * to by handle is left unctouched.
 * The return value is NAF_QUERY_OUT_OF_RESOURCES if no more memory is
 * available for a new client request. The buffer pointed to by handle is
 * left unchanged. */
naf_query framework_event_query(const char* clientId, uint16_t reqId, naf_handle* handle)
{
    client_req_query query;
    queue_entry* entry;

    NABTO_LOG_TRACE(("APPREQ framework_event_query: client=" PRI_client_id2, CLIENT_ID_ARGS2(clientId, reqId)));

    /* First look for the request in the queue of pending requests. */
    query.clientId = clientId;
    query.reqId = reqId;
    query.found = NULL;
    queue_enum(is_client_request_cb, &query);
    if (query.found) {
        //The client request was found in the queue
        LOG_APPREQ_STATE("framework_event_query", "QUEUED", query.found);
        LOG_APPREQ_QUEUE();
        return NAF_QUERY_QUEUED;
    }

    /* Now the request was not found.
     * Reserve the next free entry in the queue. */
    entry = queue_alloc();
    if (!entry) {
        //The queue is full
        LOG_APPREQ_ERROR("framework_event_query", "OUT_OF_RESOURCES");
        LOG_APPREQ_QUEUE();
        return NAF_QUERY_OUT_OF_RESOURCES;
    }

    UNABTO_ASSERT(entry->state == APPREQ_FREE);
    if (entry->state != APPREQ_FREE) {
        //Hmmm - that's strange!. The new entry should have been free.
        if (clientId == entry->data.applicationRequest.clientId && reqId == entry->data.header.seq) {
            // - and it seems to be ourself!!
            LOG_APPREQ_STATE("framework_event_query", "QUEUED?", entry);
            LOG_APPREQ_QUEUE();
            return NAF_QUERY_QUEUED;
        }
        // The new entry belongs to someone else. The queue must be full !?
        LOG_APPREQ_ERROR("framework_event_query", "OUT_OF_RESOURCES?");
        LOG_APPREQ_QUEUE();
        return NAF_QUERY_OUT_OF_RESOURCES;
    }

    // Now we have a new request

    // Be sure we "own" the entry - advance to next free entry in the queue
    entry->state = APPREQ_WAITING;
    queue_push();

    // *handle cannot be initialized yet, as the received packet has not
    // yet been decrypted.
    // See framework_event() for initialization of handle.
    *handle = &entry->data;

    LOG_APPREQ_STATE("framework_event_query", "NEW", entry);
    LOG_APPREQ_QUEUE();
    return NAF_QUERY_NEW;
}
Пример #3
0
static void
target_stat(char **msg, char *targ_name, mgmt_type_t type)
{
	iscsi_conn_t	*c;
	msg_t		*m;
	target_queue_t	*q = queue_alloc();
	mgmt_request_t	mgmt_rqst;
	int		msg_sent,
			i;
	extern pthread_mutex_t	port_mutex;

	mgmt_rqst.m_q		= q;
	mgmt_rqst.m_u.m_resp	= msg;
	mgmt_rqst.m_time	= time(NULL);
	mgmt_rqst.m_request	= type;
	(void) pthread_mutex_init(&mgmt_rqst.m_resp_mutex, NULL);

	(void) pthread_mutex_lock(&port_mutex);
	mgmt_rqst.m_targ_name	= targ_name;
	msg_sent		= 0;
	for (c = conn_head; c; c = c->c_next) {
		if (c->c_state == S5_LOGGED_IN) {
			/*
			 * Only send requests for statistics to
			 * connections that are up. Could even
			 * go further and only look at connections
			 * which are S5_LOGGED_IN, but there may
			 * be statistics, such as connection time,
			 * which we'd like to have.
			 */
			queue_message_set(c->c_dataq, 0, msg_mgmt_rqst,
			    &mgmt_rqst);
			msg_sent++;
		}
	}
	(void) pthread_mutex_unlock(&port_mutex);

	/*
	 * Comment: main.c:list_targets:1
	 * We wait for the responses without the port_mutex
	 * being held. There is a small window between when the
	 * connection last listens for a message and when the
	 * queue is freed. During that time the connection will
	 * attempt to grab the port_mutex lock so that it
	 * can unlink itself and call queueu_free(). If we sent
	 * the message with the lock held and then wait for a response
	 * it's possible that the connection will deadlock waiting
	 * to get the port_mutex.
	 */
	for (i = 0; i < msg_sent; i++) {
		m = queue_message_get(q);
		queue_message_free(m);
	}
	queue_free(q, NULL);
}
Пример #4
0
void cofact_queue_algo(cofact_algo_t *algo)
{
	cofact_queue_t *queue_item;

	queue_item = algo->queue;
	if(!queue_item->n_batch)
		return;
	queue_item->algo = algo;
	queue_item->next = cofact_queue;
	cofact_queue = queue_item;

	algo->queue = queue_alloc();
	return;
}
Пример #5
0
static struct device_t * buzzer_pwm_probe(struct driver_t * drv, struct dtnode_t * n)
{
	struct buzzer_pwm_pdata_t * pdat;
	struct pwm_t * pwm;
	struct buzzer_t * buzzer;
	struct device_t * dev;

	if(!(pwm = search_pwm(dt_read_string(n, "pwm-name", NULL))))
		return NULL;

	pdat = malloc(sizeof(struct buzzer_pwm_pdata_t));
	if(!pdat)
		return NULL;

	buzzer = malloc(sizeof(struct buzzer_t));
	if(!buzzer)
	{
		free(pdat);
		return NULL;
	}

	timer_init(&pdat->timer, buzzer_pwm_timer_function, buzzer);
	pdat->queue = queue_alloc();
	pdat->pwm = pwm;
	pdat->polarity = dt_read_bool(n, "pwm-polarity", 0);
	pdat->frequency = -1;

	buzzer->name = alloc_device_name(dt_read_name(n), dt_read_id(n));
	buzzer->set = buzzer_pwm_set;
	buzzer->get = buzzer_pwm_get;
	buzzer->beep = buzzer_pwm_beep;
	buzzer->priv = pdat;

	buzzer_pwm_set(buzzer, 0);

	if(!register_buzzer(&dev, buzzer))
	{
		timer_cancel(&pdat->timer);
		queue_free(pdat->queue, iter_queue_node);

		free_device_name(buzzer->name);
		free(buzzer->priv);
		free(buzzer);
		return NULL;
	}
	dev->driver = drv;

	return dev;
}
Пример #6
0
static bool_t buzzer_pwm_register_buzzer(struct resource_t * res)
{
	struct buzzer_pwm_data_t * rdat = (struct buzzer_pwm_data_t *)res->data;
	struct buzzer_pwm_pdata_t * pdat;
	struct buzzer_t * buzzer;
	struct pwm_t * pwm;
	char name[64];

	pwm = search_pwm(rdat->pwm);
	if(!pwm)
		return FALSE;

	pdat = malloc(sizeof(struct buzzer_pwm_pdata_t));
	if(!pdat)
		return FALSE;

	buzzer = malloc(sizeof(struct buzzer_t));
	if(!buzzer)
	{
		free(pdat);
		return FALSE;
	}

	snprintf(name, sizeof(name), "%s.%d", res->name, res->id);

	timer_init(&pdat->timer, buzzer_pwm_timer_function, buzzer);
	pdat->beep = queue_alloc();
	pdat->frequency = 0;
	pdat->polarity = rdat->polarity;
	pdat->pwm = pwm;

	buzzer->name = strdup(name);
	buzzer->init = buzzer_pwm_init;
	buzzer->exit = buzzer_pwm_exit;
	buzzer->set = buzzer_pwm_set,
	buzzer->get = buzzer_pwm_get,
	buzzer->beep = buzzer_pwm_beep,
	buzzer->suspend = buzzer_pwm_suspend,
	buzzer->resume = buzzer_pwm_resume,
	buzzer->priv = pdat;

	if(register_buzzer(buzzer))
		return TRUE;

	free(buzzer->priv);
	free(buzzer->name);
	free(buzzer);
	return FALSE;
}
Пример #7
0
/* =============================================================================
 * decoder_alloc
 * =============================================================================
 */
decoder_t*
decoder_alloc ()
{
    decoder_t* decoderPtr;

    decoderPtr = (decoder_t*)SEQ_MALLOC(sizeof(decoder_t));
    if (decoderPtr) {
        decoderPtr->fragmentedMapPtr = MAP_ALLOC(NULL, NULL);
        assert(decoderPtr->fragmentedMapPtr);
        decoderPtr->decodedQueuePtr = queue_alloc(1024);
        assert(decoderPtr->decodedQueuePtr);
    }

    return decoderPtr;
}
Пример #8
0
Файл: util.c Проект: imp/slist
static void
send_named_msg(iscsi_conn_t *c, msg_type_t t, char *name)
{
	target_queue_t	*q = queue_alloc();
	msg_t		*m;
	name_request_t	n;

	n.nr_q		= q;
	n.nr_name	= name;

	queue_message_set(c->c_sessq, 0, t, &n);
	m = queue_message_get(q);
	queue_message_free(m);
	queue_free(q, NULL);
}
Пример #9
0
/* =============================================================================
 * stream_alloc
 * =============================================================================
 */
stream_t*
stream_alloc (long percentAttack)
{
    stream_t* streamPtr;

    streamPtr = (stream_t*)malloc(sizeof(stream_t));
    if (streamPtr) {
        streamPtr->percentAttack = percentAttack;
        streamPtr->randomPtr = random_alloc();
        streamPtr->allocVectorPtr = vector_alloc(1);
        streamPtr->packetQueuePtr = queue_alloc(-1);
        streamPtr->attackMapPtr = MAP_ALLOC(NULL, NULL);
    }

    return streamPtr;
}
Пример #10
0
int
main ()
{
    queue_t* queuePtr;
    random_t* randomPtr;
    long data[] = {3, 1, 4, 1, 5};
    long numData = sizeof(data) / sizeof(data[0]);
    long i;

    randomPtr = random_alloc();
    assert(randomPtr);
    random_seed(randomPtr, 0);

    puts("Starting tests...");

    queuePtr = queue_alloc(-1);

    assert(queue_isEmpty(queuePtr));
    for (i = 0; i < numData; i++) {
        insertData(queuePtr, &data[i]);
    }
    assert(!queue_isEmpty(queuePtr));

    for (i = 0; i < numData; i++) {
        long* dataPtr = (long*)queue_pop(queuePtr);
        printf("Removing %li: ", *dataPtr);
        printQueue(queuePtr);
    }
    assert(!queue_pop(queuePtr));
    assert(queue_isEmpty(queuePtr));

    puts("All tests passed.");

    for (i = 0; i < numData; i++) {
        insertData(queuePtr, &data[i]);
    }
    for (i = 0; i < numData; i++) {
        printf("Shuffle %li: ", i);
        queue_shuffle(queuePtr, randomPtr);
        printQueue(queuePtr);
    }
    assert(!queue_isEmpty(queuePtr));

    queue_free(queuePtr);

    return 0;
}
Пример #11
0
static data_xpath_tag_t * data_xpath_search_tag(data_xpath_selector_t * selector,hcchar *tagName,hlist_t filters,InvokeTickDeclare){
	data_xpath_tag_t * tag = data_xpath_find_tag(selector,tagName,filters,InvokeTickArg);
	if(tag == NULL){
		data_xpath_search_tag_param_t param = {queue_alloc(),NULL,tagName,filters};
		data_xpath_selector_t * s;
		map_each(selector->child_tags, data_xpath_search_tag_map_each, &param, NULL);
		
		while(param.result==NULL && (s = queue_out(param.queue))){
			map_each(s->child_tags, data_xpath_search_tag_map_each, &param, NULL);
		}
		
		queue_dealloc(param.queue);
		return param.result;
	}
	else{
		return tag;
	}
}
Пример #12
0
static bool_t buzzer_gpio_register_buzzer(struct resource_t * res)
{
	struct buzzer_gpio_data_t * rdat = (struct buzzer_gpio_data_t *)res->data;
	struct buzzer_gpio_pdata_t * pdat;
	struct buzzer_t * buzzer;
	char name[64];

	pdat = malloc(sizeof(struct buzzer_gpio_pdata_t));
	if(!pdat)
		return FALSE;

	buzzer = malloc(sizeof(struct buzzer_t));
	if(!buzzer)
	{
		free(pdat);
		return FALSE;
	}

	snprintf(name, sizeof(name), "%s.%d", res->name, res->id);

	timer_init(&pdat->timer, buzzer_gpio_timer_function, buzzer);
	pdat->beep = queue_alloc();
	pdat->frequency = 0;
	pdat->gpio = rdat->gpio;
	pdat->active_low = rdat->active_low;

	buzzer->name = strdup(name);
	buzzer->init = buzzer_gpio_init;
	buzzer->exit = buzzer_gpio_exit;
	buzzer->set = buzzer_gpio_set,
	buzzer->get = buzzer_gpio_get,
	buzzer->beep = buzzer_gpio_beep,
	buzzer->suspend = buzzer_gpio_suspend,
	buzzer->resume = buzzer_gpio_resume,
	buzzer->priv = pdat;

	if(register_buzzer(buzzer))
		return TRUE;

	free(buzzer->priv);
	free(buzzer->name);
	free(buzzer);
	return FALSE;
}
Пример #13
0
static bool_t sandbox_buzzer_register_buzzer(struct resource_t * res)
{
	struct sandbox_buzzer_data_t * rdat = (struct sandbox_buzzer_data_t *)res->data;
	struct sandbox_buzzer_private_data_t * dat;
	struct buzzer_t * buzzer;
	char name[64];

	dat = malloc(sizeof(struct sandbox_buzzer_private_data_t));
	if(!dat)
		return FALSE;

	buzzer = malloc(sizeof(struct buzzer_t));
	if(!buzzer)
	{
		free(dat);
		return FALSE;
	}

	snprintf(name, sizeof(name), "%s.%d", res->name, res->id);

	timer_init(&dat->timer, sandbox_buzzer_timer_function, buzzer);
	dat->beep = queue_alloc();
	dat->frequency = 0;
	dat->path = strdup(rdat->path);

	buzzer->name = strdup(name);
	buzzer->init = sandbox_buzzer_init;
	buzzer->exit = sandbox_buzzer_exit;
	buzzer->set = sandbox_buzzer_set,
	buzzer->get = sandbox_buzzer_get,
	buzzer->beep = sandbox_buzzer_beep,
	buzzer->suspend = sandbox_buzzer_suspend,
	buzzer->resume = sandbox_buzzer_resume,
	buzzer->priv = dat;

	if(register_buzzer(buzzer))
		return TRUE;

	free(buzzer->priv);
	free(buzzer->name);
	free(buzzer);
	return FALSE;
}
Пример #14
0
/* =============================================================================
 * decoder_alloc
 * =============================================================================
 */
decoder_t*
decoder_alloc (long numFlow)
{
    decoder_t* decoderPtr;

    decoderPtr = (decoder_t*)SEQ_MALLOC(sizeof(decoder_t));
    if (decoderPtr) {
      printf("hastable alloc size %lx\n", numFlow);
#ifdef MAP_USE_RBTREE
      decoderPtr->fragmentedMapPtr = MAP_ALLOC(NULL, NULL);
#else
      decoderPtr->fragmentedMapPtr = hashtable_alloc(numFlow, NULL, NULL, 2, 2);
#endif
      assert(decoderPtr->fragmentedMapPtr);
        decoderPtr->decodedQueuePtr = queue_alloc(1024);
        assert(decoderPtr->decodedQueuePtr);
    }

    return decoderPtr;
}
Пример #15
0
/* =============================================================================
 * maze_alloc
 * =============================================================================
 */
maze_t*
maze_alloc ()
{
    maze_t* mazePtr;

    mazePtr = (maze_t*)malloc(sizeof(maze_t));
    if (mazePtr) {
        mazePtr->gridPtr = NULL;
        mazePtr->workQueuePtr = queue_alloc(1024);
        mazePtr->wallVectorPtr = vector_alloc(1);
        mazePtr->srcVectorPtr = vector_alloc(1);
        mazePtr->dstVectorPtr = vector_alloc(1);
        assert(mazePtr->workQueuePtr &&
               mazePtr->wallVectorPtr &&
               mazePtr->srcVectorPtr &&
               mazePtr->dstVectorPtr);
    }

    return mazePtr;
}
Пример #16
0
int main(int argc, char **argv) {

  int i, sum;

  pthread_t thread[NUM_THREADS];
  Queue *queue = queue_alloc(NUM_THREADS);


  for (i = 0; i < NUM_THREADS; ++i) {
    pthread_create( &thread[i], NULL, doSum, queue);
  }

  int expected = 0;
  for(i = 0; i < N; ++i) {
    Task *task = (Task*)malloc(sizeof(Task));
    task->value = i;


    queue_put(queue, task);
    expected += i;
  }


  for (i = 0; i < NUM_THREADS; ++i) {
    queue_put(queue, NULL);
  }


  intptr_t value;
  sum = 0;
  for (i = 0; i < NUM_THREADS; ++i) {
    pthread_join(thread[i], (void**)&value);
    sum += value;
  }


  printf("total sum: %d, expected sum: %d\n", (int)sum, expected);
  queue_free(queue);

  return 0;
}
Пример #17
0
/**
 * Create a message queue.
 *
 *     Create a message queue.
 *     This service may panic if err parameter is NULL and:
 *     -# no queue is available, or
 *     -# when called from an ISR.
 *
 *     Authorized execution levels:  task, fiber.
 *
 *     As for semaphores and mutexes, queues are picked from a pool of
 *     statically-allocated objects.
 *
 * @param maxSize: maximum number of  messages in the queue.
 *     (Rationale: queues only contain pointer to messages)
 *
 * @param err (out): execution status:
 *          -# E_OS_OK : queue was created
 *          -# E_OS_ERR: all queues from the pool are already being used
 *          -# E_OS_ERR_NOT_ALLOWED: service cannot be executed from ISR context.
 *
 * @return Handler on the created queue.
 *     NULL if all allocated queues are already being used.
 */
T_QUEUE queue_create(uint32_t max_size, OS_ERR_TYPE* err)
{
    queue_impl_t * q = NULL;
    T_EXEC_LEVEL execLvl = _getExecLevel();
    OS_ERR_TYPE _err;


    if(max_size==0 || max_size>QUEUE_ELEMENT_POOL_SIZE)
    {
        error_management (err, E_OS_ERR);
        return NULL;
    }

    /* check execution level */
    if ((E_EXEC_LVL_FIBER == execLvl) || (E_EXEC_LVL_TASK == execLvl))
    {
        /* Block concurrent accesses to the pool of queue_list */
        lock_pool();
        q = queue_alloc();
        unlock_pool();

        if (q != NULL)
        {
           list_init(&q->_list);  // replace the following commented code
           q->current_size = 0;
           q->max_size = max_size;
           q->sema = semaphore_create(0, &_err);
           error_management (err, _err);
        }
        else
        {
            error_management (err, E_OS_ERR);
        }
    }
    else
    {
        error_management (err, E_OS_ERR_NOT_ALLOWED);
    }

    return (T_QUEUE)q;
}
Пример #18
0
/* =============================================================================
 * net_generateRandomEdges
 * =============================================================================
 */
void
net_generateRandomEdges (net_t* netPtr,
                         long maxNumParent,
                         long percentParent,
                         random_t* randomPtr)
{
    vector_t* nodeVectorPtr = netPtr->nodeVectorPtr;

    long numNode = vector_getSize(nodeVectorPtr);
    bitmap_t* visitedBitmapPtr = bitmap_alloc(numNode);
    assert(visitedBitmapPtr);
    queue_t* workQueuePtr = queue_alloc(-1);

    long n;

    for (n = 0; n < numNode; n++) {
        long p;
        for (p = 0; p < maxNumParent; p++) {
            long value = random_generate(randomPtr) % 100;
            if (value < percentParent) {
                long parent = random_generate(randomPtr) % numNode;
                if ((parent != n) &&
                    !net_hasEdge(netPtr, parent, n) &&
                    !net_isPath(netPtr, n, parent, visitedBitmapPtr, workQueuePtr))
                {
#ifdef TEST_NET
                    printf("node=%li parent=%li\n", n, parent);
#endif
                    insertEdge(netPtr, parent, n);
                }
            }
        }
    }

    assert(!net_isCycle(netPtr));

    bitmap_free(visitedBitmapPtr);
    queue_free(workQueuePtr);
}
Пример #19
0
int
main ()
{
    long numNode = 100;

    puts("Starting tests...");

    bool_t status;

    net_t* netPtr = net_alloc(numNode);
    assert(netPtr);
    bitmap_t* visitedBitmapPtr = bitmap_alloc(numNode);
    assert(visitedBitmapPtr);
    queue_t* workQueuePtr = queue_alloc(-1);
    assert(workQueuePtr);

    assert(!net_isCycle(netPtr));

    long aId = 31;
    long bId = 14;
    long cId = 5;
    long dId = 92;

    net_applyOperation(netPtr, OPERATION_INSERT, aId, bId);
    assert(net_isPath(netPtr, aId, bId, visitedBitmapPtr, workQueuePtr));
    assert(!net_isPath(netPtr, bId, aId, visitedBitmapPtr, workQueuePtr));
    assert(!net_isPath(netPtr, aId, cId, visitedBitmapPtr, workQueuePtr));
    assert(!net_isPath(netPtr, aId, dId, visitedBitmapPtr, workQueuePtr));
    assert(!net_isCycle(netPtr));

    net_applyOperation(netPtr, OPERATION_INSERT, bId, cId);
    net_applyOperation(netPtr, OPERATION_INSERT, aId, cId);
    net_applyOperation(netPtr, OPERATION_INSERT, dId, aId);
    assert(!net_isCycle(netPtr));
    net_applyOperation(netPtr, OPERATION_INSERT, cId, dId);
    assert(net_isCycle(netPtr));
    net_applyOperation(netPtr, OPERATION_REVERSE, cId, dId);
    assert(!net_isCycle(netPtr));
    net_applyOperation(netPtr, OPERATION_REVERSE, dId, cId);
    assert(net_isCycle(netPtr));
    assert(net_isPath(netPtr, aId, dId, visitedBitmapPtr, workQueuePtr));
    net_applyOperation(netPtr, OPERATION_REMOVE, cId, dId);
    assert(!net_isPath(netPtr, aId, dId, visitedBitmapPtr, workQueuePtr));

    bitmap_t* ancestorBitmapPtr = bitmap_alloc(numNode);
    assert(ancestorBitmapPtr);
    status = net_findAncestors(netPtr, cId, ancestorBitmapPtr, workQueuePtr);
    assert(status);
    assert(bitmap_isSet(ancestorBitmapPtr, aId));
    assert(bitmap_isSet(ancestorBitmapPtr, bId));
    assert(bitmap_isSet(ancestorBitmapPtr, dId));
    assert(bitmap_getNumSet(ancestorBitmapPtr) == 3);

    bitmap_t* descendantBitmapPtr = bitmap_alloc(numNode);
    assert(descendantBitmapPtr);
    status = net_findDescendants(netPtr, aId, descendantBitmapPtr, workQueuePtr);
    assert(status);
    assert(bitmap_isSet(descendantBitmapPtr, bId));
    assert(bitmap_isSet(descendantBitmapPtr, cId));
    assert(bitmap_getNumSet(descendantBitmapPtr) == 2);

    bitmap_free(visitedBitmapPtr);
    queue_free(workQueuePtr);
    bitmap_free(ancestorBitmapPtr);
    bitmap_free(descendantBitmapPtr);
    net_free(netPtr);

    random_t* randomPtr = random_alloc();
    assert(randomPtr);
    netPtr = net_alloc(numNode);
    assert(netPtr);
    net_generateRandomEdges(netPtr, 10, 10, randomPtr);
    net_free(netPtr);

    puts("All tests passed.");

    return 0;
}
Пример #20
0
/* Smooth a periodic array with a moving average: equal weights and
 * length = 5% of the period. */
int apply_smoother(
    rrd_t *rrd,
    unsigned long rra_idx,
    unsigned long rra_start,
    rrd_file_t *rrd_file)
{
    unsigned long i, j, k;
    unsigned long totalbytes;
    rrd_value_t *rrd_values;
    unsigned long row_length = rrd->stat_head->ds_cnt;
    unsigned long row_count = rrd->rra_def[rra_idx].row_cnt;
    unsigned long offset;
    FIFOqueue **buffers;
    rrd_value_t *working_average;
    rrd_value_t *rrd_values_cpy;
    rrd_value_t *baseline;

    if (atoi(rrd->stat_head->version) >= 4) {
        offset = floor(rrd->rra_def[rra_idx].
                       par[RRA_seasonal_smoothing_window].
                       u_val / 2 * row_count);
    } else {
        offset = floor(0.05 / 2 * row_count);
    }

    if (offset == 0)
        return 0;       /* no smoothing */

    /* allocate memory */
    totalbytes = sizeof(rrd_value_t) * row_length * row_count;
    rrd_values = (rrd_value_t *) malloc(totalbytes);
    if (rrd_values == NULL) {
        rrd_set_error("apply smoother: memory allocation failure");
        return -1;
    }

    /* rra_start is at the beginning of this rra */
    if (rrd_seek(rrd_file, rra_start, SEEK_SET)) {
        rrd_set_error("seek to rra %d failed", rra_start);
        free(rrd_values);
        return -1;
    }

    /* could read all data in a single block, but we need to
     * check for NA values */
    for (i = 0; i < row_count; ++i) {
        for (j = 0; j < row_length; ++j) {
            if (rrd_read
                (rrd_file, &(rrd_values[i * row_length + j]),
                 sizeof(rrd_value_t) * 1)
                != (ssize_t) (sizeof(rrd_value_t) * 1)) {
                rrd_set_error("reading value failed: %s",
                              rrd_strerror(errno));
            }
            if (isnan(rrd_values[i * row_length + j])) {
                /* can't apply smoothing, still uninitialized values */
#ifdef DEBUG
                fprintf(stderr,
                        "apply_smoother: NA detected in seasonal array: %ld %ld\n",
                        i, j);
#endif
                free(rrd_values);
                return 0;
            }
        }
    }

    /* allocate queues, one for each data source */
    buffers = (FIFOqueue **) malloc(sizeof(FIFOqueue *) * row_length);
    for (i = 0; i < row_length; ++i) {
        queue_alloc(&(buffers[i]), 2 * offset + 1);
    }
    /* need working average initialized to 0 */
    working_average = (rrd_value_t *) calloc(row_length, sizeof(rrd_value_t));
    baseline = (rrd_value_t *) calloc(row_length, sizeof(rrd_value_t));

    /* compute sums of the first 2*offset terms */
    for (i = 0; i < 2 * offset; ++i) {
        k = MyMod(i - offset, row_count);
        for (j = 0; j < row_length; ++j) {
            queue_push(buffers[j], rrd_values[k * row_length + j]);
            working_average[j] += rrd_values[k * row_length + j];
        }
    }

    /* as we are working through the value, we have to make sure to not double
       apply the smoothing after wrapping around. so best is to copy the rrd_values first */

    rrd_values_cpy = (rrd_value_t *) calloc(row_length*row_count, sizeof(rrd_value_t));
    memcpy(rrd_values_cpy,rrd_values,sizeof(rrd_value_t)*row_length*row_count);

    /* compute moving averages */
    for (i = offset; i < row_count + offset; ++i) {
        for (j = 0; j < row_length; ++j) {
            k = MyMod(i, row_count);
            /* add a term to the sum */
            working_average[j] += rrd_values_cpy[k * row_length + j];
            queue_push(buffers[j], rrd_values_cpy[k * row_length + j]);

            /* reset k to be the center of the window */
            k = MyMod(i - offset, row_count);
            /* overwrite rdd_values entry, the old value is already
             * saved in buffers */
            rrd_values[k * row_length + j] =
                working_average[j] / (2 * offset + 1);
            baseline[j] += rrd_values[k * row_length + j];

            /* remove a term from the sum */
            working_average[j] -= queue_pop(buffers[j]);
        }
    }

    for (i = 0; i < row_length; ++i) {
        queue_dealloc(buffers[i]);
        baseline[i] /= row_count;
    }
    free(rrd_values_cpy);
    free(buffers);
    free(working_average);

    if (cf_conv(rrd->rra_def[rra_idx].cf_nam) == CF_SEASONAL) {
        rrd_value_t (
    *init_seasonality) (
    rrd_value_t seasonal_coef,
    rrd_value_t intercept);

        switch (cf_conv(rrd->rra_def[hw_dep_idx(rrd, rra_idx)].cf_nam)) {
        case CF_HWPREDICT:
            init_seasonality = hw_additive_init_seasonality;
            break;
        case CF_MHWPREDICT:
            init_seasonality = hw_multiplicative_init_seasonality;
            break;
        default:
            rrd_set_error("apply smoother: SEASONAL rra doesn't have "
                          "valid dependency: %s",
                          rrd->rra_def[hw_dep_idx(rrd, rra_idx)].cf_nam);
            return -1;
        }

        for (j = 0; j < row_length; ++j) {
            for (i = 0; i < row_count; ++i) {
                rrd_values[i * row_length + j] =
                    init_seasonality(rrd_values[i * row_length + j],
                                     baseline[j]);
            }
            /* update the baseline coefficient,
             * first, compute the cdp_index. */
            offset = hw_dep_idx(rrd, rra_idx) * row_length + j;
            (rrd->cdp_prep[offset]).scratch[CDP_hw_intercept].u_val +=
                baseline[j];
        }
/* if we are not running on mmap, lets write stuff to disk now */
#ifndef HAVE_MMAP
        /* flush cdp to disk */
        if (rrd_seek(rrd_file, sizeof(stat_head_t) +
                     rrd->stat_head->ds_cnt * sizeof(ds_def_t) +
                     rrd->stat_head->rra_cnt * sizeof(rra_def_t) +
                     sizeof(live_head_t) +
                     rrd->stat_head->ds_cnt * sizeof(pdp_prep_t), SEEK_SET)) {
            rrd_set_error("apply_smoother: seek to cdp_prep failed");
            free(rrd_values);
            return -1;
        }
        if (rrd_write(rrd_file, rrd->cdp_prep,
                      sizeof(cdp_prep_t) *
                      (rrd->stat_head->rra_cnt) * rrd->stat_head->ds_cnt)
            != (ssize_t) (sizeof(cdp_prep_t) * (rrd->stat_head->rra_cnt) *
                          (rrd->stat_head->ds_cnt))) {
            rrd_set_error("apply_smoother: cdp_prep write failed");
            free(rrd_values);
            return -1;
        }
#endif

    }

    /* endif CF_SEASONAL */
    /* flush updated values to disk */
    if (rrd_seek(rrd_file, rra_start, SEEK_SET)) {
        rrd_set_error("apply_smoother: seek to pos %d failed", rra_start);
        free(rrd_values);
        return -1;
    }
    /* write as a single block */
    if (rrd_write
        (rrd_file, rrd_values, sizeof(rrd_value_t) * row_length * row_count)
        != (ssize_t) (sizeof(rrd_value_t) * row_length * row_count)) {
        rrd_set_error("apply_smoother: write failed to %lu", rra_start);
        free(rrd_values);
        return -1;
    }

    free(rrd_values);
    free(baseline);
    return 0;
}
Пример #21
0
/* =============================================================================
 * data_generate
 * -- Binary variables of random PDFs
 * -- If seed is <0, do not reseed
 * -- Returns random network
 * =============================================================================
 */
net_t*
data_generate (data_t* dataPtr, long seed, long maxNumParent, long percentParent)
{
    random_t* randomPtr = dataPtr->randomPtr;
    if (seed >= 0) {
        random_seed(randomPtr, seed);
    }

    /*
     * Generate random Bayesian network
     */

    long numVar = dataPtr->numVar;
    net_t* netPtr = net_alloc(numVar);
    assert(netPtr);
    net_generateRandomEdges(netPtr, maxNumParent, percentParent, randomPtr);

    /*
     * Create a threshold for each of the possible permutation of variable
     * value instances
     */

    long** thresholdsTable = (long**)SEQ_MALLOC(numVar * sizeof(long*));
    assert(thresholdsTable);
    long v;
    for (v = 0; v < numVar; v++) {
        list_t* parentIdListPtr = net_getParentIdListPtr(netPtr, v);
        long numThreshold = 1 << list_getSize(parentIdListPtr);
        long* thresholds = (long*)SEQ_MALLOC(numThreshold * sizeof(long));
        assert(thresholds);
        long t;
        for (t = 0; t < numThreshold; t++) {
            long threshold = random_generate(randomPtr) % (DATA_PRECISION + 1);
            thresholds[t] = threshold;
        }
        thresholdsTable[v] = thresholds;
    }

    /*
     * Create variable dependency ordering for record generation
     */

    long* order = (long*)SEQ_MALLOC(numVar * sizeof(long));
    assert(order);
    long numOrder = 0;

    queue_t* workQueuePtr = queue_alloc(-1);
    assert(workQueuePtr);

    vector_t* dependencyVectorPtr = vector_alloc(1);
    assert(dependencyVectorPtr);

    bitmap_t* orderedBitmapPtr = bitmap_alloc(numVar);
    assert(orderedBitmapPtr);
    bitmap_clearAll(orderedBitmapPtr);

    bitmap_t* doneBitmapPtr = bitmap_alloc(numVar);
    assert(doneBitmapPtr);
    bitmap_clearAll(doneBitmapPtr);
    v = -1;
    while ((v = bitmap_findClear(doneBitmapPtr, (v + 1))) >= 0) {
        list_t* childIdListPtr = net_getChildIdListPtr(netPtr, v);
        long numChild = list_getSize(childIdListPtr);
        if (numChild == 0) {

            bool status;

            /*
             * Use breadth-first search to find net connected to this leaf
             */

            queue_clear(workQueuePtr);
            status = queue_push(workQueuePtr, (void*)v);
            assert(status);
            while (!queue_isEmpty(workQueuePtr)) {
                long id = (long)queue_pop(workQueuePtr);
                status = bitmap_set(doneBitmapPtr, id);
                assert(status);
                status = vector_pushBack(dependencyVectorPtr, (void*)id);
                assert(status);
                list_t* parentIdListPtr = net_getParentIdListPtr(netPtr, id);
                list_iter_t it;
                list_iter_reset(&it, parentIdListPtr);
                while (list_iter_hasNext(&it, parentIdListPtr)) {
                    long parentId = (long)list_iter_next(&it, parentIdListPtr);
                    status = queue_push(workQueuePtr, (void*)parentId);
                    assert(status);
                }
            }

            /*
             * Create ordering
             */

            long i;
            long n = vector_getSize(dependencyVectorPtr);
            for (i = 0; i < n; i++) {
                long id = (long)vector_popBack(dependencyVectorPtr);
                if (!bitmap_isSet(orderedBitmapPtr, id)) {
                    bitmap_set(orderedBitmapPtr, id);
                    order[numOrder++] = id;
                }
            }

        }
    }
    assert(numOrder == numVar);

    /*
     * Create records
     */

    char* record = dataPtr->records;
    long r;
    long numRecord = dataPtr->numRecord;
    for (r = 0; r < numRecord; r++) {
        long o;
        for (o = 0; o < numOrder; o++) {
            long v = order[o];
            list_t* parentIdListPtr = net_getParentIdListPtr(netPtr, v);
            long index = 0;
            list_iter_t it;
            list_iter_reset(&it, parentIdListPtr);
            while (list_iter_hasNext(&it, parentIdListPtr)) {
                long parentId = (long)list_iter_next(&it, parentIdListPtr);
                long value = record[parentId];
                assert(value != DATA_INIT);
                index = (index << 1) + value;
            }
            long rnd = random_generate(randomPtr) % DATA_PRECISION;
            long threshold = thresholdsTable[v][index];
            record[v] = ((rnd < threshold) ? 1 : 0);
        }
        record += numVar;
        assert(record <= (dataPtr->records + numRecord * numVar));
    }

    /*
     * Clean up
     */

    bitmap_free(doneBitmapPtr);
    bitmap_free(orderedBitmapPtr);
    vector_free(dependencyVectorPtr);
    queue_free(workQueuePtr);
    SEQ_FREE(order);
    for (v = 0; v < numVar; v++) {
        SEQ_FREE(thresholdsTable[v]);
    }
    SEQ_FREE(thresholdsTable);

    return netPtr;
}
Пример #22
0
/* Smooth a periodic array with a moving average: equal weights and
 * length = 5% of the period. */
int apply_smoother( rrd_t *rrd, unsigned long rra_idx, unsigned long rra_start,
    rrd_file_t *rrd_file) {
    unsigned long i, j, k;
    unsigned long totalbytes;
    rrd_value_t *rrd_values;
    unsigned long row_length = rrd->stat_head->ds_cnt;
    unsigned long row_count = rrd->rra_def[rra_idx].row_cnt;
    unsigned long offset;
    FIFOqueue **buffers;
    rrd_value_t *working_average;
    rrd_value_t *baseline;
	int ret = 0;

    if (atoi(rrd->stat_head->version) >= 4) {
        offset = floor(rrd->rra_def[rra_idx].
                       par[RRA_seasonal_smoothing_window].
                       u_val / 2 * row_count);
    } else {
        offset = floor(0.05 / 2 * row_count);
    }

    if (offset == 0)
        return 0;       /* no smoothing */

    /* allocate memory */
    totalbytes = sizeof(rrd_value_t) * row_length * row_count;
    rrd_values = (rrd_value_t *) malloc(totalbytes);
    if (rrd_values == NULL) {
        return -RRD_ERR_MALLOC5;
    }

    /* rra_start is at the beginning of this rra */
    if (rrd_seek(rrd_file, rra_start, SEEK_SET)) {
        free(rrd_values);
        return -RRD_ERR_SEEK2;
    }

    /* could read all data in a single block, but we need to
     * check for NA values */
    for (i = 0; i < row_count; ++i) {
        for (j = 0; j < row_length; ++j) {
            if (rrd_read
                (rrd_file, &(rrd_values[i * row_length + j]),
                 sizeof(rrd_value_t) * 1)
                != (ssize_t) (sizeof(rrd_value_t) * 1)) {
				ret = -RRD_ERR_READ2;
            }
            if (isnan(rrd_values[i * row_length + j])) {
                /* can't apply smoothing, still uninitialized values */
#ifdef DEBUG
                fprintf(stderr,
                        "apply_smoother: NA detected in seasonal array: %ld %ld\n",
                        i, j);
#endif
                free(rrd_values);
                return ret;
            }
        }
    }

    /* allocate queues, one for each data source */
    buffers = (FIFOqueue **) malloc(sizeof(FIFOqueue *) * row_length);
    for (i = 0; i < row_length; ++i) {
        queue_alloc(&(buffers[i]), 2 * offset + 1);
    }
    /* need working average initialized to 0 */
    working_average = (rrd_value_t *) calloc(row_length, sizeof(rrd_value_t));
    baseline = (rrd_value_t *) calloc(row_length, sizeof(rrd_value_t));

    /* compute sums of the first 2*offset terms */
    for (i = 0; i < 2 * offset; ++i) {
        k = MyMod(i - offset, row_count);
        for (j = 0; j < row_length; ++j) {
            queue_push(buffers[j], rrd_values[k * row_length + j]);
            working_average[j] += rrd_values[k * row_length + j];
        }
    }

    /* compute moving averages */
    for (i = offset; i < row_count + offset; ++i) {
        for (j = 0; j < row_length; ++j) {
            k = MyMod(i, row_count);
            /* add a term to the sum */
            working_average[j] += rrd_values[k * row_length + j];
            queue_push(buffers[j], rrd_values[k * row_length + j]);

            /* reset k to be the center of the window */
            k = MyMod(i - offset, row_count);
            /* overwrite rdd_values entry, the old value is already
             * saved in buffers */
            rrd_values[k * row_length + j] =
                working_average[j] / (2 * offset + 1);
            baseline[j] += rrd_values[k * row_length + j];

            /* remove a term from the sum */
            working_average[j] -= queue_pop(buffers[j]);
        }
    }

    for (i = 0; i < row_length; ++i) {
        queue_dealloc(buffers[i]);
        baseline[i] /= row_count;
    }
    free(buffers);
    free(working_average);

    if (cf_conv(rrd->rra_def[rra_idx].cf_nam) == CF_SEASONAL) {
        rrd_value_t (
    *init_seasonality) (
    rrd_value_t seasonal_coef,
    rrd_value_t intercept);

        switch (cf_conv(rrd->rra_def[hw_dep_idx(rrd, rra_idx)].cf_nam)) {
        case CF_HWPREDICT:
            init_seasonality = hw_additive_init_seasonality;
            break;
        case CF_MHWPREDICT:
            init_seasonality = hw_multiplicative_init_seasonality;
            break;
        default:
            return -RRD_ERR_DEP1;
        }

        for (j = 0; j < row_length; ++j) {
            for (i = 0; i < row_count; ++i) {
                rrd_values[i * row_length + j] =
                    init_seasonality(rrd_values[i * row_length + j],
                                     baseline[j]);
            }
            /* update the baseline coefficient,
             * first, compute the cdp_index. */
            offset = hw_dep_idx(rrd, rra_idx) * row_length + j;
            (rrd->cdp_prep[offset]).scratch[CDP_hw_intercept].u_val +=
                baseline[j];
        }
        /* flush cdp to disk */
        if (rrd_seek(rrd_file, sizeof(stat_head_t) +
                     rrd->stat_head->ds_cnt * sizeof(ds_def_t) +
                     rrd->stat_head->rra_cnt * sizeof(rra_def_t) +
                     sizeof(live_head_t) +
                     rrd->stat_head->ds_cnt * sizeof(pdp_prep_t), SEEK_SET)) {
            free(rrd_values);
			return -RRD_ERR_SEEK3;
        }
        if (rrd_write(rrd_file, rrd->cdp_prep,
                      sizeof(cdp_prep_t) *
                      (rrd->stat_head->rra_cnt) * rrd->stat_head->ds_cnt)
            != (ssize_t) (sizeof(cdp_prep_t) * (rrd->stat_head->rra_cnt) *
                          (rrd->stat_head->ds_cnt))) {
            free(rrd_values);
            return -RRD_ERR_WRITE1;
        }
    }

    /* endif CF_SEASONAL */
    /* flush updated values to disk */
    if (rrd_seek(rrd_file, rra_start, SEEK_SET)) {
        free(rrd_values);
        return -RRD_ERR_SEEK4;
    }
    /* write as a single block */
    if (rrd_write
        (rrd_file, rrd_values, sizeof(rrd_value_t) * row_length * row_count)
        != (ssize_t) (sizeof(rrd_value_t) * row_length * row_count)) {
        free(rrd_values);
        return -RRD_ERR_WRITE2;
    }

    free(rrd_values);
    free(baseline);
    return 0;
}
Пример #23
0
void cofact_init(gls_config_t cfg)
{
	unsigned int i, j, lpb, n;

	cofact_queue = NULL;
	for(i = 0; i < sizeof(cfg->lpb) / sizeof(cfg->lpb[0]); i++)
	{
		cand_lpb[i] = cfg->lpb[i];
		if(lpb < cfg->lpb[i])
			lpb = cfg->lpb[i];
	}
	n = nb_curves(cfg->lpb[APOLY_IDX]);
	n_cofact_algos = n + 3;

	cofact_algos = (cofact_algo_t **) malloc(COFACT_SIZES * sizeof(cofact_algo_t *));
	for(i = 0; i < COFACT_SIZES; i++)
	{
		cofact_algos[i] = (cofact_algo_t *) malloc(n_cofact_algos * sizeof(cofact_algo_t));
		memset(cofact_algos[i], 0, n_cofact_algos * sizeof(cofact_algo_t));
		for(j = 0; j < n_cofact_algos; j++)
		{
			cofact_algos[i][j].queue = queue_alloc();
		}
	}

#if USE_OPENCL
	int PP1_STAGE2_XJ_LEN = 0;
	int ECM_COMMONZ_T_LEN = 0;
	int ECM_STAGE2_PID_LEN = 0;
	int ECM_STAGE2_PJ_LEN = 0;

	/* pm1 */
	cofact_algos[0][0].process = pm1_ul32_process_ocl;
	cofact_algos[0][0].plan = malloc(sizeof(pm1_plan_t));
	pm1_plan_init(cofact_algos[0][0].plan, 315, 2205);
	cofact_algos[0][0].algo_idx = 0;
    PP1_STAGE2_XJ_LEN = ((pm1_plan_t *)cofact_algos[0][0].plan)->stage2.n_S1;

    cofact_algos[1][0].process = pm1_ul64_process_ocl;
    cofact_algos[1][0].plan = cofact_algos[0][0].plan;
    cofact_algos[1][0].algo_idx = 0;

	cofact_algos[2][0].process = pm1_ul96_process_ocl;
	cofact_algos[2][0].plan = cofact_algos[0][0].plan;
	cofact_algos[2][0].algo_idx = 0;

	cofact_algos[3][0].process = pm1_ul128_process_ocl;
	cofact_algos[3][0].plan = cofact_algos[0][0].plan;
	cofact_algos[3][0].algo_idx = 0;

	cofact_algos[4][0].process = pm1_ul160_process_ocl;
	cofact_algos[4][0].plan = cofact_algos[0][0].plan;
	cofact_algos[4][0].algo_idx = 0;

	cofact_algos[5][0].process = pm1_ul192_process_ocl;
	cofact_algos[5][0].plan = cofact_algos[0][0].plan;
	cofact_algos[5][0].algo_idx = 0;

	cofact_algos[6][0].process = pm1_ul224_process_ocl;
	cofact_algos[6][0].plan = cofact_algos[0][0].plan;
	cofact_algos[6][0].algo_idx = 0;

	cofact_algos[7][0].process = pm1_ul256_process_ocl;
	cofact_algos[7][0].plan = cofact_algos[0][0].plan;
	cofact_algos[7][0].algo_idx = 0;

	cofact_algos[8][0].process = pm1_mpz_process;
	cofact_algos[8][0].plan = cofact_algos[0][0].plan;
	cofact_algos[8][0].algo_idx = 0;

	/* pp1 */
	cofact_algos[0][1].process = pp1_ul32_process_ocl;
	cofact_algos[0][1].plan = malloc(sizeof(pp1_plan_t));
	pp1_plan_init(cofact_algos[0][1].plan, 525, 3255);
	cofact_algos[0][1].algo_idx = 1;
    PP1_STAGE2_XJ_LEN = MAX(PP1_STAGE2_XJ_LEN, ((pp1_plan_t *)cofact_algos[0][1].plan)->stage2.n_S1);

	cofact_algos[1][1].process = pp1_ul64_process_ocl;
	cofact_algos[1][1].plan = cofact_algos[0][1].plan;
	cofact_algos[1][1].algo_idx = 1;

	cofact_algos[2][1].process = pp1_ul96_process_ocl;
	cofact_algos[2][1].plan = cofact_algos[0][1].plan;
	cofact_algos[2][1].algo_idx = 1;

	cofact_algos[3][1].process = pp1_ul128_process_ocl;
	cofact_algos[3][1].plan = cofact_algos[0][1].plan;
	cofact_algos[3][1].algo_idx = 1;

	cofact_algos[4][1].process = pp1_ul160_process_ocl;
	cofact_algos[4][1].plan = cofact_algos[0][1].plan;
	cofact_algos[4][1].algo_idx = 1;

	cofact_algos[5][1].process = pp1_ul192_process_ocl;
	cofact_algos[5][1].plan = cofact_algos[0][1].plan;
	cofact_algos[5][1].algo_idx = 1;

	cofact_algos[6][1].process = pp1_ul224_process_ocl;
	cofact_algos[6][1].plan = cofact_algos[0][1].plan;
	cofact_algos[6][1].algo_idx = 1;

	cofact_algos[7][1].process = pp1_ul256_process_ocl;
	cofact_algos[7][1].plan = cofact_algos[0][1].plan;
	cofact_algos[7][1].algo_idx = 1;

	cofact_algos[8][1].process = pp1_mpz_process;
	cofact_algos[8][1].plan = cofact_algos[0][1].plan;
	cofact_algos[8][1].algo_idx = 1;

	/* ecm */
	cofact_algos[0][2].process = ecm_ul32_process_ocl;
	cofact_algos[0][2].plan = malloc(sizeof(ecm_plan_t));
	ecm_plan_init(cofact_algos[0][2].plan, 105, 3255, MONTY12, 2);
	cofact_algos[0][2].algo_idx = 2;
    {
        ecm_plan_t *_ecm_plan = (ecm_plan_t *)cofact_algos[0][2].plan;
        ECM_COMMONZ_T_LEN = (_ecm_plan->stage2.n_S1) + (_ecm_plan->stage2.i1 - _ecm_plan->stage2.i0 - ((_ecm_plan->stage2.i0 == 0) ? 1 : 0));
        ECM_STAGE2_PID_LEN = _ecm_plan->stage2.i1 - _ecm_plan->stage2.i0;
        ECM_STAGE2_PJ_LEN = _ecm_plan->stage2.n_S1;
    }

	cofact_algos[1][2].process = ecm_ul64_process_ocl;
	cofact_algos[1][2].plan = cofact_algos[0][2].plan;
	cofact_algos[1][2].algo_idx = 2;

	cofact_algos[2][2].process = ecm_ul96_process_ocl;
	cofact_algos[2][2].plan = cofact_algos[0][2].plan;
	cofact_algos[2][2].algo_idx = 2;

	cofact_algos[3][2].process = ecm_ul128_process_ocl;
	cofact_algos[3][2].plan = cofact_algos[0][2].plan;
	cofact_algos[3][2].algo_idx = 2;

	cofact_algos[4][2].process = ecm_ul160_process_ocl;
	cofact_algos[4][2].plan = cofact_algos[0][2].plan;
	cofact_algos[4][2].algo_idx = 2;

	cofact_algos[5][2].process = ecm_ul192_process_ocl;
	cofact_algos[5][2].plan = cofact_algos[0][2].plan;
	cofact_algos[5][2].algo_idx = 2;

	cofact_algos[6][2].process = ecm_ul224_process_ocl;
	cofact_algos[6][2].plan = cofact_algos[0][2].plan;
	cofact_algos[6][2].algo_idx = 2;

	cofact_algos[7][2].process = ecm_ul256_process_ocl;
	cofact_algos[7][2].plan = cofact_algos[0][2].plan;
	cofact_algos[7][2].algo_idx = 2;

	cofact_algos[8][2].process = ecm_mpz_process;
	cofact_algos[8][2].plan = cofact_algos[0][2].plan;
	cofact_algos[8][2].algo_idx = 2;

	if(n > 0)
	{
		cofact_algos[0][3].process = ecm_ul32_process_ocl;
		cofact_algos[0][3].plan = malloc(sizeof(ecm_plan_t));
		ecm_plan_init(cofact_algos[0][3].plan, 315, 5355, BRENT12, 11);
		cofact_algos[0][3].algo_idx = 3;
        {
            ecm_plan_t *_ecm_plan = (ecm_plan_t *)cofact_algos[0][3].plan;
            int _ECM_COMMONZ_T_LEN = (_ecm_plan->stage2.n_S1) + (_ecm_plan->stage2.i1 - _ecm_plan->stage2.i0 - ((_ecm_plan->stage2.i0 == 0) ? 1 : 0));
            int _ECM_STAGE2_PID_LEN = _ecm_plan->stage2.i1 - _ecm_plan->stage2.i0;
            int _ECM_STAGE2_PJ_LEN = _ecm_plan->stage2.n_S1;
            ECM_COMMONZ_T_LEN = MAX(ECM_COMMONZ_T_LEN, _ECM_COMMONZ_T_LEN);
            ECM_STAGE2_PID_LEN = MAX(ECM_STAGE2_PID_LEN, _ECM_STAGE2_PID_LEN);
            ECM_STAGE2_PJ_LEN = MAX(ECM_STAGE2_PJ_LEN, _ECM_STAGE2_PJ_LEN);
        }

	    cofact_algos[1][3].process = ecm_ul64_process_ocl;
	    cofact_algos[1][3].plan = cofact_algos[0][3].plan;
	    cofact_algos[1][3].algo_idx = 3;

	    cofact_algos[2][3].process = ecm_ul96_process_ocl;
	    cofact_algos[2][3].plan = cofact_algos[0][3].plan;
	    cofact_algos[2][3].algo_idx = 3;

	    cofact_algos[3][3].process = ecm_ul128_process_ocl;
	    cofact_algos[3][3].plan = cofact_algos[0][3].plan;
	    cofact_algos[3][3].algo_idx = 3;

	    cofact_algos[4][3].process = ecm_ul160_process_ocl;
	    cofact_algos[4][3].plan = cofact_algos[0][3].plan;
	    cofact_algos[4][3].algo_idx = 3;

	    cofact_algos[5][3].process = ecm_ul192_process_ocl;
	    cofact_algos[5][3].plan = cofact_algos[0][3].plan;
	    cofact_algos[5][3].algo_idx = 3;

	    cofact_algos[6][3].process = ecm_ul224_process_ocl;
	    cofact_algos[6][3].plan = cofact_algos[0][3].plan;
	    cofact_algos[6][3].algo_idx = 3;

	    cofact_algos[7][3].process = ecm_ul256_process_ocl;
	    cofact_algos[7][3].plan = cofact_algos[0][3].plan;
	    cofact_algos[7][3].algo_idx = 3;

	    cofact_algos[8][3].process = ecm_mpz_process;
	    cofact_algos[8][3].plan = cofact_algos[0][3].plan;
	    cofact_algos[8][3].algo_idx = 3;
	}
#else /* USE_OPENCL */
	/* pm1 */
	cofact_algos[0][0].process = pm1_ul64_process;
	cofact_algos[0][0].plan = malloc(sizeof(pm1_plan_t));
	pm1_plan_init(cofact_algos[0][0].plan, 315, 2205);
	cofact_algos[0][0].algo_idx = 0;

	cofact_algos[1][0].process = pm1_ul128_process;
	cofact_algos[1][0].plan = cofact_algos[0][0].plan;
	cofact_algos[1][0].algo_idx = 0;

	cofact_algos[2][0].process = pm1_mpz_process;
	cofact_algos[2][0].plan = cofact_algos[0][0].plan;
	cofact_algos[2][0].algo_idx = 0;

	/* pp1 */
	cofact_algos[0][1].process = pp1_ul64_process;
	cofact_algos[0][1].plan = malloc(sizeof(pp1_plan_t));
	pp1_plan_init(cofact_algos[0][1].plan, 525, 3255);
	cofact_algos[0][1].algo_idx = 1;

	cofact_algos[1][1].process = pp1_ul128_process;
	cofact_algos[1][1].plan = cofact_algos[0][1].plan;
	cofact_algos[1][1].algo_idx = 1;

	cofact_algos[2][1].process = pp1_mpz_process;
	cofact_algos[2][1].plan = cofact_algos[0][1].plan;
	cofact_algos[2][1].algo_idx = 1;

	/* ecm */
	cofact_algos[0][2].process = ecm_ul64_process;
	cofact_algos[0][2].plan = malloc(sizeof(ecm_plan_t));
	ecm_plan_init(cofact_algos[0][2].plan, 105, 3255, MONTY12, 2);
	cofact_algos[0][2].algo_idx = 2;

	cofact_algos[1][2].process = ecm_ul128_process;
	cofact_algos[1][2].plan = cofact_algos[0][2].plan;
	cofact_algos[1][2].algo_idx = 2;

	cofact_algos[2][2].process = ecm_mpz_process;
	cofact_algos[2][2].plan = cofact_algos[0][2].plan;
	cofact_algos[2][2].algo_idx = 2;

	if(n > 0)
	{
		cofact_algos[0][3].process = ecm_ul64_process;
		cofact_algos[0][3].plan = malloc(sizeof(ecm_plan_t));
		ecm_plan_init(cofact_algos[0][3].plan, 315, 5355, BRENT12, 11);
		cofact_algos[0][3].algo_idx = 3;

		cofact_algos[1][3].process = ecm_ul128_process;
		cofact_algos[1][3].plan = cofact_algos[0][3].plan;
		cofact_algos[1][3].algo_idx = 3;

		cofact_algos[2][3].process = ecm_mpz_process;
		cofact_algos[2][3].plan = cofact_algos[0][3].plan;
		cofact_algos[2][3].algo_idx = 3;
	}
#endif /* USE_OPENCL */

	/* heuristic strategy where B1 is increased by sqrt(B1) at each curve */
	double B1 = 105.0;
	for (i = 4; i < n + 3; i++)
	{
		double B2;
		unsigned int k;

		B1 += sqrt (B1);
		B2 = 17.0 * B1;
		/* we round B2 to (2k+1)*105, thus k is the integer nearest to B2/210-0.5 */
		k = B2 / 210.0;

#if USE_OPENCL
		cofact_algos[0][i].process = ecm_ul32_process_ocl;
		cofact_algos[0][i].plan = malloc(sizeof(ecm_plan_t));
		ecm_plan_init(cofact_algos[0][i].plan, (unsigned int) B1, (2 * k + 1) * 105, MONTY12, i - 1);
		cofact_algos[0][i].algo_idx = i;
        {
            ecm_plan_t *_ecm_plan = (ecm_plan_t *)cofact_algos[0][i].plan;
            int _ECM_COMMONZ_T_LEN = (_ecm_plan->stage2.n_S1) + (_ecm_plan->stage2.i1 - _ecm_plan->stage2.i0 - ((_ecm_plan->stage2.i0 == 0) ? 1 : 0));
            int _ECM_STAGE2_PID_LEN = _ecm_plan->stage2.i1 - _ecm_plan->stage2.i0;
            int _ECM_STAGE2_PJ_LEN = _ecm_plan->stage2.n_S1;
            ECM_COMMONZ_T_LEN = MAX(ECM_COMMONZ_T_LEN, _ECM_COMMONZ_T_LEN);
            ECM_STAGE2_PID_LEN = MAX(ECM_STAGE2_PID_LEN, _ECM_STAGE2_PID_LEN);
            ECM_STAGE2_PJ_LEN = MAX(ECM_STAGE2_PJ_LEN, _ECM_STAGE2_PJ_LEN);
        }

		cofact_algos[1][i].process = ecm_ul64_process_ocl;
		cofact_algos[1][i].plan = cofact_algos[0][i].plan;
		cofact_algos[1][i].algo_idx = i;

		cofact_algos[2][i].process = ecm_ul96_process_ocl;
		cofact_algos[2][i].plan = cofact_algos[0][i].plan;
		cofact_algos[2][i].algo_idx = i;

		cofact_algos[3][i].process = ecm_ul128_process_ocl;
		cofact_algos[3][i].plan = cofact_algos[0][i].plan;
		cofact_algos[3][i].algo_idx = i;

		cofact_algos[4][i].process = ecm_ul160_process_ocl;
		cofact_algos[4][i].plan = cofact_algos[0][i].plan;
		cofact_algos[4][i].algo_idx = i;

		cofact_algos[5][i].process = ecm_ul192_process_ocl;
		cofact_algos[5][i].plan = cofact_algos[0][i].plan;
		cofact_algos[5][i].algo_idx = i;

		cofact_algos[6][i].process = ecm_ul224_process_ocl;
		cofact_algos[6][i].plan = cofact_algos[0][i].plan;
		cofact_algos[6][i].algo_idx = i;

		cofact_algos[7][i].process = ecm_ul256_process_ocl;
		cofact_algos[7][i].plan = cofact_algos[0][i].plan;
		cofact_algos[7][i].algo_idx = i;

		cofact_algos[8][i].process = ecm_mpz_process;
		cofact_algos[8][i].plan = cofact_algos[0][i].plan;
		cofact_algos[8][i].algo_idx = i;
#else /* USE_OPENCL */
		cofact_algos[0][i].process = ecm_ul64_process;
		cofact_algos[0][i].plan = malloc(sizeof(ecm_plan_t));
		ecm_plan_init(cofact_algos[0][i].plan, (unsigned int) B1, (2 * k + 1) * 105, MONTY12, i - 1);
		cofact_algos[0][i].algo_idx = i;

		cofact_algos[1][i].process = ecm_ul128_process;
		cofact_algos[1][i].plan = cofact_algos[0][i].plan;
		cofact_algos[1][i].algo_idx = i;

		cofact_algos[2][i].process = ecm_mpz_process;
		cofact_algos[2][i].plan = cofact_algos[0][i].plan;
		cofact_algos[2][i].algo_idx = i;
#endif /* USE_OPENCL */
	}
	assert(i == n_cofact_algos);

#if USE_OPENCL
    /* Construct build arguments */
	const char *config_mp_source = "las/ocl/las.cl";            /* File name of kernel source */

    char *build_opts = NULL;
    {
		int build_opts_len =
				snprintf(NULL, 0, "%s -D PP1_STAGE2_XJ_LEN=%d -D ECM_COMMONZ_T_LEN=%d -D ECM_STAGE2_PID_LEN=%d -D ECM_STAGE2_PJ_LEN=%d",
						 ocl_state.buildopts,
						 PP1_STAGE2_XJ_LEN,
						 ECM_COMMONZ_T_LEN,
						 ECM_STAGE2_PID_LEN,
						 ECM_STAGE2_PJ_LEN);

		build_opts_len++; /* snprintf does not include null byte in ret value */
		build_opts = (char *)malloc(build_opts_len);

		snprintf(build_opts, build_opts_len, "%s -D PP1_STAGE2_XJ_LEN=%d -D ECM_COMMONZ_T_LEN=%d -D ECM_STAGE2_PID_LEN=%d -D ECM_STAGE2_PJ_LEN=%d",
				 ocl_state.buildopts,
				 PP1_STAGE2_XJ_LEN,
				 ECM_COMMONZ_T_LEN,
				 ECM_STAGE2_PID_LEN,
				 ECM_STAGE2_PJ_LEN);
		printf("build_opts=%s\n", build_opts);
    }
    
    /* Now do the build */
    ocl_build(&ocl_state, config_mp_source, build_opts);
    
    free(build_opts);
#endif /* USE_OPENCL */

	return;
}
Пример #24
0
static int8_t localize(ir_graph_t *ir_graph, node_id_t ref_node)
{
    uint8_t localized = 0; /* bitmask */
    uint8_t i, k;
    node_id_t node_i, node_j;
    ir_edge_t *edge;
    location_t *loc_i, *loc_j;
    float angle_rad;

    memset(&locations, 0, sizeof(locations));
    memset(&loc_queue, 0, sizeof(loc_queue));
    memset(&loc_queue_data, 0, sizeof(loc_queue_data));

    LOG("localize: ref node ");
    LOGP("%u\r\n", ref_node);

    locations[ref_node].valid = true;
    locations[ref_node].pt.x = 0;
    locations[ref_node].pt.y = 0;
    localized |= 1 << ref_node;

    i = queue_alloc(&loc_queue);
    loc_queue_data[i] = ref_node;
    queue_enqueue(&loc_queue);

    while (!queue_empty(&loc_queue)) {
        i = queue_peek(&loc_queue);
        node_i = loc_queue_data[i];
        queue_dequeue(&loc_queue);
        loc_i = &locations[node_i];

        LOG("localizing neighbors of node ");
        LOGP("%u (%d,%d)\r\n", node_i, loc_i->pt.x, loc_i->pt.y);

        for (node_j = 0; node_j < MAX_NODES; ++node_j) {
            edge = &((*ir_graph)[node_i][node_j]);
            if (edge->valid && !(localized & (1 << node_j))) {
                loc_j = &locations[node_j];
                angle_rad = (float)edge->angle / 180 * M_PI;
                loc_j->valid = true;
                loc_j->pt.x = loc_i->pt.x + edge->dist * cosf(angle_rad);
                loc_j->pt.y = loc_i->pt.y + edge->dist * sinf(angle_rad);

                LOG("localized node: ");
                LOGP("%u [%u,%u] ", node_j, edge->angle, edge->dist);
                LOG(" -> ");
                LOGP("(%d,%d)\r\n", loc_j->pt.x, loc_j->pt.y);

                localized |= 1 << node_j;

                k = queue_alloc(&loc_queue);
                loc_queue_data[k] = node_j;
                queue_enqueue(&loc_queue);
            }
        }
    }

    print_loc_graph(ir_graph, locations);

    return NRK_OK;
}