示例#1
0
void sync_cont_init(void** h)
{
	sync_hmr_container_t* cont = (sync_hmr_container_t *)calloc(1,sizeof(sync_hmr_container_t));
	SEM_INIT(cont->buffs_clean.semaphore, 0,MAX_CONT_ELEMENTS);
	SEM_COPY(cont->buffs_clean.sem, cont->buffs_clean.semaphore);
	SEM_INIT(cont->buffs_filled.semaphore, 0,MAX_CONT_ELEMENTS);
	SEM_COPY(cont->buffs_filled.sem, cont->buffs_filled.semaphore);

	*h = cont; 
}
示例#2
0
/* =============================================================================
 * controller_alloc
 * =============================================================================
 */
void controller_alloc(long numThreads)
{
    posix_memalign((void **)&global_metadata, sizeof(metadata_t), sizeof(metadata_t) * numThreads);
    //semaphores = P_MALLOC(numThreads * sizeof(SEM_T));
    assert(global_metadata != NULL);
    for (long i = 0 ; i < numThreads ; i++)
    {
        SEM_INIT(global_metadata[i].semaphore, 0);
        global_metadata[i].operations = 0;
    }
    global_windowStart = 0;
    global_windowSize = 1;
    global_numThreads = numThreads;

    pthread_attr_t controller_attr;
    struct sched_param param;
    param.__sched_priority = 99;
    pthread_attr_init(&controller_attr);
    pthread_attr_setschedpolicy(&controller_attr, SCHED_RR);
    pthread_attr_setschedparam(&controller_attr,&param);

    /* Initialize mutex and condition variable objects */
    //pthread_mutex_init(&count_mutex, NULL);
    //pthread_cond_init(&count_threshold_cv, NULL);
    pthread_create(&controllerThread, &controller_attr, &controller, NULL);

}
示例#3
0
fast_mtq fast_mtq_init(int threads, fast_mtq_cb cb, void *arg)
{
	fast_mtq f_mtq;
	int i;

	log_debug("Fast mtq init, threads:%d", threads);

	f_mtq =
	    (fast_mtq) malloc(sizeof(_fast_mtq) +
			      (threads * sizeof(THREAD_VAR)));
	memset(f_mtq, 0, sizeof(_fast_mtq));
	f_mtq->threads = threads;
	f_mtq->cb = cb;
	f_mtq->arg = arg;
	f_mtq->shutdown = 0;
	f_mtq->first = NULL;
	f_mtq->last = NULL;
	f_mtq->queue = 0;

	SEM_INIT(f_mtq->sem);
	COND_INIT(f_mtq->cond);

	for (i = 0; i < f_mtq->threads; i++) {
		THREAD_CREATE(f_mtq->threads_table[i], fast_mtq_thread,
			      f_mtq);
	}

	return f_mtq;
}
示例#4
0
WORKQUEUE* workqueue_create()
{
    WORKQUEUE* queue = lib_malloc(sizeof(WORKQUEUE));
    if (queue == NULL) {
        return NULL;
    }

    SEM_INIT(&queue->lock, 1);
    SEM_INIT(&queue->cond, 0);
    queue->state = STATE_INIT;
    queue->thread_count = 0;
    queue->thread_available = 0;
    queue->work_items = bp_queue_new(QUEUE_INITIAL_SIZE);

    return queue;
}
示例#5
0
int tapeio_init()
{
int i;
THREAD tid;
static char *fid = "tapeio_init";

    SEM_INIT(&sp, 0, 1);

    MUTEX_LOCK(&Status->lock);
        strncpy(Status->output.device, Params->odev, ISP_DEVNAMELEN - 1);
        Status->output.device[ISP_DEVNAMELEN-1] = 0;
        Status->output.type  = ISP_OUTPUT_TAPE;
        Status->output.state = ISP_OUTPUT_UNKNOWN;
    MUTEX_UNLOCK(&Status->lock);

/* Start tape watch thread */

    if (!THREAD_CREATE(&tid, TapeWatchThread, NULL)) {
        util_log(1, "%s: failed to start TapeWatchThread", fid);
        ispd_die(MY_MOD_ID + 3);
    }

/* Start tape writer thread */

    if (!THREAD_CREATE(&tid, TapeWriteThread, NULL)) {
        util_log(1, "%s: failed to start TapeWriteThread", fid);
        ispd_die(MY_MOD_ID + 4);
    }
 
    return 0;
}
示例#6
0
void ispd_die(int status)
{
BOOL first = FALSE;
static int count  = 0;
static MUTEX mp = MUTEX_INITIALIZER;
static char *fid  = "ispd_die";

    MUTEX_LOCK(&mp);
        if (++count == 1) {
            first = TRUE;
            SEM_INIT(&sp, 0, 1);
        } else if (status == 0) {
            util_log(1, "multiple termination signals... force exit");
            die(0);
        }
    MUTEX_UNLOCK(&mp);

    if (first) {
        if (dl != NULL) isidlCloseDiskLoop(dl);
        set_shutdown(1);
        if (OutputMediaType() == ISP_OUTPUT_TAPE) {
            flush_buffer(ISPD_FLUSH_FORCE);
            SEM_WAIT(&sp);
        }
        /* shutdown_inject(); */
        die(status);
    } else {
        pause();
    }
}
示例#7
0
文件: dal.c 项目: thewacokid/marfs
int   mc_config(struct DAL*     dal,
                xDALConfigOpt** opts,
                size_t          opt_count) {
   ENTRY();

   MC_Config* config         = malloc(sizeof(MC_Config));
   config->degraded_log_fd   = -1;
   config->degraded_log_path = NULL;

   int i;
   for(i = 0; i < opt_count; i++) {
      if(!strcmp(opts[i]->key, "n")) { // ? Should be strncmp?
         config->n = strtol(opts[i]->val.value.str, NULL, 10);
         LOG(LOG_INFO, "parsing mc option \"n\" = %d\n", config->n);
      }
      else if(!strcmp(opts[i]->key, "e")) {
         config->e = strtol(opts[i]->val.value.str, NULL, 10);
         LOG(LOG_INFO, "parsing mc option \"e\" = %d\n", config->e);
      }
      else if(!strcmp(opts[i]->key, "num_pods")) {
         config->num_pods = strtol(opts[i]->val.value.str, NULL, 10);
         LOG(LOG_INFO, "parsing mc option \"num_pods\" = %d\n",
             config->num_pods);
      }
      else if(!strcmp(opts[i]->key, "num_cap")) {
         config->num_cap = strtol(opts[i]->val.value.str, NULL, 10);
         LOG(LOG_INFO, "parsing mc option \"num_cap\" = %d\n",
             config->num_cap);
      }
      else if(!strcmp(opts[i]->key, "scatter_width")) {
         config->scatter_width = strtol(opts[i]->val.value.str, NULL, 10);
         LOG(LOG_INFO, "parsing mc option \"scatter_width\" = %d\n",
             config->scatter_width);
      }
      else if(!strcmp(opts[i]->key, "degraded_log_dir")) {
         config->degraded_log_path = strdup(opts[i]->val.value.str);
      }
      else {
         LOG(LOG_ERR, "Unrecognized MC DAL config option: %s\n",
             opts[i]->key);
         free(config);
         return -1;
      }
   }

   if(config->degraded_log_path == NULL) {
      LOG(LOG_ERR, "no degraded_log_dir specified in DAL.\n");
      return -1;
   }
   else {
      // initialize the lock to prevent concurrent writes to the log.
      SEM_INIT(&config->lock, 0, 1);
   }

   dal->global_state = config;
   EXIT();
   return 0;
}
示例#8
0
文件: purge.c 项目: Fran89/seiscomp3
/*======================================================================
----------------------------------------------------------------------*/
VOID InitPurge(PURGE *purge)
   {
   ASSERT(purge != NULL);

   purge->active = FALSE;
   purge->stop = FALSE;
   MUTEX_INIT(&purge->mutex);
   SEM_INIT(&purge->semaphore, 0, 1);
   purge->thread_id = 0;

   return ;
   }  /* end InitPurge() */
示例#9
0
文件: LoopBytes.c 项目: chenws/codes
int LoopBytesMgrInit(int max_bytes, int threshold_add, int threshold_get)
{

    struct StLoopBytesMgr *pMgr =  &gLoopBytesMgr;
    if (pMgr->flag_init != 0) {
        return 0;
    }
    memset(pMgr, 0, sizeof(struct StLoopBytesMgr));
    pMgr->flag_init = 1;

    LOCK_INIT(pMgr->lock);
    SEM_INIT(pMgr->sem_add, 0);
    SEM_INIT(pMgr->sem_get, 0);
    pMgr->pLoopObjMgr = CreateLoopObjMgr(sizeof(char), max_bytes, LOOP_MODE_BLOCKED);
    if (!pMgr->pLoopObjMgr) {
        printf("error: %s->CreateLoopObjMgr failed!\r\n", __FUNCTION__);
        return -1;
    }
    pMgr->bytes_max = pMgr->pLoopObjMgr->total_cnts;
    pMgr->threshold_add = threshold_add;
    pMgr->threshold_get = threshold_get;
    return 0;
}
示例#10
0
// External API. For a task queue.
// Actually uses multiple internal queues.
// The main queue is X producers to 1 consumer. Scheduler threads to a worker.
// Recycle queues are between all worker threads and a scheduler.
// All based on initq/qpush/qpop. 
// Fixed size and does no allocations after first calls.
queue *queue_create()
{
	queue *ret;

	ret = (queue *) calloc(1,sizeof(struct queue_t));
	if(ret == NULL) 
		return NULL;

	if (SEM_INIT(ret->sem))
		return NULL;
	initq(&ret->q);

	return ret;
}
示例#11
0
文件: mtq.c 项目: Doap/transports
void mtq_init() {
    mtq mtq = NULL; /* queue */
    mth t = NULL;
    int n,k; 
    pool newp;


	mtq__master = malloc(sizeof(_mtqmaster)); /* happens once, global */
	mtq__master->random = 0;

	/* start MTQ threads */
	for(n=0;n<MTQ_THREADS;n++)  {
	  newp = pool_new();
	  t = pmalloco(newp, sizeof(_mth));
	  t->p = newp;
	  t->mtq = pmalloco(newp,sizeof(_mtq));
	  t->mtq->first = t->mtq->last = NULL;
	  t->mtq->free_first = t->mtq->free_last = NULL;	    
	  t->mtq->users_count = 0;
	  t->mtq->dl = 0;	    
	  t->mtq->length = 0;

	  mtq = t->mtq;
	
	  /* build queue cache */
	  for (k=0;k<MTQ_QUEUE_LONG;k++)  {
		/* mtq->free_last if the first to take from queue*/	
		mtq->queue[k].memory = 0;
		mtq->queue[k].prev   = NULL;
		
		/* if queue is empty */
		if (mtq->free_last == NULL)
		  mtq->free_last = &(mtq->queue[k]);
		else
		  mtq->free_first->prev = &(mtq->queue[k]);
		
		mtq->free_first = &(mtq->queue[k]);
		mtq->length++;
	  }
	  
	  SEM_INIT(t->mtq->sem);
	  COND_INIT(t->mtq->cond);
		
	  pthread_create(&(t->thread), NULL, mtq_main, (void *)t);
	  mtq__master->all[n] = t; /* assign it as available */
	}
}
示例#12
0
static BOOL InitPipe(TTYIO *tp)
{
static char *fid = "ttyio:InitPipe";

    SEM_INIT(&tp->pipe.semaphore, 0, 1);
    tp->pipe.buf = (UINT8 *) malloc(tp->attr.at_pipe);
    if (tp->pipe.buf == NULL) {
        logioMsg(tp->lp, LOG_INFO, "%s: malloc: %s", fid, strerror(errno));
        return FALSE;
    }

    tp->pipe.priority = THREAD_PRIORITY_HIGHEST;

    if (CreatePipe(&tp->pipe.out, &tp->pipe.in, NULL, tp->attr.at_pipe * 2)) {
        tp->pipe.active = TRUE;
        return TRUE;
    } else {
        return FALSE;
    }
}
示例#13
0
bool openavbAcmpSMControllerStart()
{
	AVB_TRACE_ENTRY(AVB_TRACE_ACMP);

	openavbAcmpSMControllerVars.inflight = openavbListNewList();
	if (!openavbAcmpSMControllerVars.inflight) {
		AVB_LOG_ERROR("Unable to create inflight list. ACMP protocol not started.");
		AVB_TRACE_EXIT(AVB_TRACE_ACMP);
		return FALSE;
	}

	MUTEX_ATTR_HANDLE(mta);
	MUTEX_ATTR_INIT(mta);
	MUTEX_ATTR_SET_TYPE(mta, MUTEX_ATTR_TYPE_DEFAULT);
	MUTEX_ATTR_SET_NAME(mta, "openavbAcmpSMControllerMutex");
	MUTEX_CREATE_ERR();
	MUTEX_CREATE(openavbAcmpSMControllerMutex, mta);
	MUTEX_LOG_ERR("Could not create/initialize 'openavbAcmpSMControllerMutex' mutex");

	SEM_ERR_T(err);
	SEM_INIT(openavbAcmpSMControllerSemaphore, 1, err);
	SEM_LOG_ERR(err);

	// Start the State Machine
	bool errResult;
	bRunning = TRUE;
	THREAD_CREATE(openavbAcmpSmControllerThread, openavbAcmpSmControllerThread, NULL, openavbAcmpSMControllerThreadFn, NULL);
	THREAD_CHECK_ERROR(openavbAcmpSmControllerThread, "Thread / task creation failed", errResult);
	if (errResult) {
		bRunning = FALSE;
		AVB_TRACE_EXIT(AVB_TRACE_ACMP);
		return FALSE;
	}

	AVB_TRACE_EXIT(AVB_TRACE_ACMP);
	return TRUE;
}
示例#14
0
文件: xdb.c 项目: smokku/wpjabber
xdbcache xdb_cache(instance id)
{
	xdbcache xc;

	if (id == NULL) {
		log_alert("xdb",
			  "Programming Error: xdb_cache() called with NULL\n");
		return NULL;
	}

	xc = pmalloco(id->p, sizeof(_xdbcache));
	xc->i = id;		/* flags it as the top of the ring too */
	xc->first = NULL;	/* init ring */
	SEM_INIT(xc->sem);

	/* register the handler in the instance to filter out xdb results */
	register_phandler(id, o_PRECOND, xdb_results, (void *) xc);

	/* heartbeat to keep a watchful eye on xdb_cache */
	register_beat(10, xdb_thump, (void *) xc);

	register_shutdown_first(xdb_shutdown, (void *) xc);
	return xc;
}
示例#15
0
/** Start up transport. Read configuration, register callbacks. */
void icqtrans(instance i, xmlnode x)
{
    iti ti;
    pool p = i->p;
    xmlnode config;
    xmlnode cur;
	int check;

    log_debug(ZONE,"ICQ Transport, initializing for section '%s'",i->id);

    /* create new transport instance */
    ti = pmalloco(p,sizeof(_iti));
    ti->i = i;
    ti->xc = xdb_cache(i);

    config = xdb_get(ti->xc,jid_new(xmlnode_pool(x),"config@-internal"),"jabber:config:icqtrans");
    if (config == NULL)
    {
        log_error(i->id,"Configuration not found!");
        return;
    }

    ti->registration_instructions = pstrdup(p,xmlnode_get_tag_data(config,"instructions"));
    if (ti->registration_instructions == NULL)
    { 
        log_debug(i->id,"Registration instructions not found");
    }

    ti->search_instructions = pstrdup(p,xmlnode_get_tag_data(config,"search"));
    if (ti->search_instructions == NULL)
    {
        log_debug(i->id,"Search instructions not found");
    }

    ti->charset = pstrdup(p,xmlnode_get_tag_data(config,"charset"));
    if (ti->charset == NULL)
    {
	  log_debug(i->id,"Charset not specified, set default to %s ",DEFAULT_CHARSET);
	  ti->charset = pstrdup(p,DEFAULT_CHARSET);
    }

    _ucs2utf = iconv_open("UTF-8","UCS-2BE");

    _win2utf = iconv_open("UTF-8",ti->charset);
    if (_win2utf==(iconv_t)-1) {
      ti->charset = pstrdup(p,DEFAULT_CHARSET);
      _win2utf = iconv_open("UTF-8",ti->charset);
      if (_win2utf==(iconv_t)-1) {
        log_error(i->id,"Charset error!");
        return;
      }
    }

    _utf2win = iconv_open(ti->charset,"UTF-8");
    if (_utf2win ==(iconv_t)-1) {
      ti->charset = pstrdup(p,DEFAULT_CHARSET);
      _utf2win = iconv_open(ti->charset,"UTF-8");
      if (_utf2win ==(iconv_t)-1) {
        log_error(i->id,"Charset error!");
        return;
      }
    }

	log_notice("config","charset %s",ti->charset);
	
    ti->msg_chat = xmlnode_get_tag(config,"chat") ? 1 : 0;
	if (ti->msg_chat) {
	  log_notice("config","chat messages enabled");
	}
    ti->web_aware = xmlnode_get_tag(config,"web") ? 1 : 0;
	if (ti->web_aware) {
	  log_notice("config","web presence enabled");
	}
    ti->own_roster = xmlnode_get_tag(config,"own_roster") ? 1 : 0;
	if (ti->own_roster) {
	  log_notice("config","JIT will use own roster");
	}
    ti->no_jabber_roster = xmlnode_get_tag(config,"no_jabber_roster") ? 1 : 0;
	if (ti->no_jabber_roster) {
	  log_notice("config","JIT willn't get users from jabber roster");
	}
	ti->no_x_data = xmlnode_get_tag(config,"no_xdata") ? 1 : 0;
	if (ti->no_x_data) {
	  log_notice("config","JIT will not use xdata");
	}

    cur = xmlnode_get_tag(config,"sms");

    if (cur) {
      ti->sms_id = pstrdup(p,xmlnode_get_tag_data(cur,"host"));
      if (ti->sms_id) {
        ti->sms_show = jit_show2status(xmlnode_get_tag_data(cur,"show"));

        if (ti->sms_show==ICQ_STATUS_NOT_IN_LIST) {
          ti->sms_show = ICQ_STATUS_ONLINE;
        }
        
        log_notice("config","sms host %s show: %d",ti->sms_id,ti->sms_show);

        ti->sms_status = pstrdup(p,xmlnode_get_tag_data(cur,"status"));

        if (ti->sms_status) {
          log_debug(ZONE,"sms st %s ",ti->sms_status);
        }

        ti->sms_name = pstrdup(p,xmlnode_get_tag_data(cur,"name"));

	if (ti->sms_name) {
	  log_debug(ZONE,"sms name %s",ti->sms_name);
	}
      }
    }

    ti->count_file = pstrdup(p,xmlnode_get_tag_data(config,"user_count_file"));
    if (ti->count_file == NULL)  {
      ti->count_file = "icqcount";
    }

	log_notice("config","Using %s as count log file",ti->count_file);

    for (cur = xmlnode_get_firstchild(xmlnode_get_tag(config,"server"));
     cur != NULL;
     cur = xmlnode_get_nextsibling(cur)) {

      char * port;
      char * host;

      if (xmlnode_get_type(cur) != NTYPE_TAG) continue;

      if ((port = xmlnode_get_attrib(cur,"port")) == NULL) continue;
   
      if ((host = xmlnode_get_data(cur)) == NULL) continue;

      ti->auth_hosts[ti->auth_hosts_count] = pstrdup(p,host);
      ti->auth_ports[ti->auth_hosts_count] = j_atoi(port,5190);
      log_debug(ZONE,"Host %s port %d at pos %d",
				ti->auth_hosts[ti->auth_hosts_count],
				ti->auth_ports[ti->auth_hosts_count],
				ti->auth_hosts_count);
      ti->auth_hosts_count++;

      if (ti->auth_hosts_count >= MAX_AUTH_HOSTS) break;
    }

    if (ti->auth_hosts_count == 0) {
      log_alert("err","No hosts to auth icq client !. Using default");
      ti->auth_hosts[ti->auth_hosts_count] = pstrdup(p,"205.188.179.233");
      ti->auth_ports[ti->auth_hosts_count] = 5190;
      ti->auth_hosts_count++;
    }

	/* add queue for unknown packets */
	ti->q = mtq_new(i->p);

    ti->sessions = wpxhash_new(j_atoi(xmlnode_get_tag_data(config,"prime"),509));
    ti->sessions_alt = wpxhash_new(j_atoi(xmlnode_get_tag_data(config,"prime"),509));
    SEM_INIT(ti->sessions_sem);
    ti->vcard = xmlnode_new_tag_pool(p,"vCard");

    xmlnode_put_attrib(ti->vcard,"xmlns",NS_VCARD);
    xmlnode_insert_node(ti->vcard,xmlnode_get_firstchild(xmlnode_get_tag(config,"vCard")));

    /* default 5 hours */
    ti->session_timeout = j_atoi(xmlnode_get_tag_data(config,"session_timeout"),18000);
	log_notice("config","session_timeout in sec : %d",ti->session_timeout);

    ti->reconnect = j_atoi(xmlnode_get_tag_data(config,"reconnects"),0);
    log_notice("config","Number of reconnects for session %d",ti->reconnect);

	check = j_atoi(xmlnode_get_tag_data(config,"session_check"),10);
    log_notice("config","JIT will check session every %d sec",check);
      
	//    ti->admin = xmlnode_dup(xmlnode_get_tag(config,"admin"));
    ti->start = time(NULL);

    /* Register callbacks */
    register_phandler(i,o_DELIVER,it_receive,(void *) ti);
    register_shutdown(it_shutdown,(void *) ti);

    /* Start up heartbeat thread */
    register_beat(check,it_sessions_check,(void *) ti);

    xmlnode_free(config);
}
示例#16
0
int stream_open(ObjectStream* os,
                IsPut         put,
                curl_off_t    content_length,
                uint8_t       preserve_os_written) {
   LOG(LOG_INFO, "%s\n", ((put) ? "PUT" : "GET"));

   if (os->flags & OSF_OPEN) {
      LOG(LOG_ERR, "%s is already open\n", os->url);
      errno = EINVAL;
      return -1;                // already open
   }
   if (os->flags) {
      if (os->flags & OSF_CLOSED) {
         LOG(LOG_INFO, "stream being re-opened with %s\n", os->url);
         stream_reset(os, preserve_os_written); // previously-used
      }
      else {
         LOG(LOG_ERR, "%s has flags asserted, but is not CLOSED\n", os->url);
         errno = EINVAL;
         return -1;
      }
   }

   os->flags |= OSF_OPEN;
   if (put)
      os->flags |= OSF_WRITING;
   else
      os->flags |= OSF_READING;

   if (! preserve_os_written)
      os->written = 0;          // total read/written through OS

   // caller's open-flags, in case we need to close/repoen
   // (e.g. for Multi, or marfs_ftruncate())
   //
   //   os->open_flags = open_flags;

   // shorthand
   IOBuf* b = &os->iob;

   // readfunc/writefunc just get the IOBuf from libaws4c, but they need
   // the ObjectStream.  So IOBuf now has a pointer to allow this.
   b->user_data = os;

   // install copy of global default-context as per-connection context 
   if (! b->context) {
      LOG(LOG_INFO, "No context.  Cloning from defaults.\n");
      aws_iobuf_context(b, aws_context_clone());
   }

   AWSContext* ctx = b->context;

   os->content_len = content_length;
   if (content_length) {
      s3_set_content_length_r(content_length, ctx);
      // os->flags |= OSF_LENGTH;
   }
   else
      s3_chunked_transfer_encoding_r(1, ctx);

   aws_iobuf_reset(b);          // doesn't affect <user_data> or <context>
   if (put) {
      SEM_INIT(&os->iob_empty, 0, 0);
      SEM_INIT(&os->iob_full,  0, 0);
      aws_iobuf_readfunc(b, &streaming_readfunc);
   }
   else {
      SEM_INIT(&os->iob_empty, 0, 0);
      SEM_INIT(&os->iob_full,  0, 0);
      aws_iobuf_headerfunc(b, &streaming_writeheaderfunc);
      aws_iobuf_writefunc(b, &streaming_writefunc);
   }

   // thread runs the GET/PUT, with the iobuf in <os>
   LOG(LOG_INFO, "starting thread\n");
   if (pthread_create(&os->op, NULL, &s3_op, os)) {
      LOG(LOG_ERR, "pthread_create failed: '%s'\n", strerror(errno));
      return -1;
   }
   return 0;
}