Example #1
0
void *my_thread_process (void * arg)
{
    int i;
    CLIENT *clnt = NULL;
    struct datas vars;
    static double result = 0;
    struct timeval total_timeout;
    struct netconfig *nconf = NULL;
    struct netbuf svcaddr;
    char addrbuf[ADDRBUFSIZE];

    total_timeout.tv_sec = 1;
    total_timeout.tv_usec = 1;

    nconf = getnetconfigent("udp");

    if ((struct netconfig *)nconf == NULL)
    {
        //Test failed
        printf("5\n");
        pthread_exit(5);
    }

    svcaddr.len = 0;
    svcaddr.maxlen = ADDRBUFSIZE;
    svcaddr.buf = addrbuf;

    if (svcaddr.buf == NULL)
    {
        printf("5\n");
        pthread_exit(5);
    }

    if (!rpcb_getaddr(progNum, VERSNUM, nconf,
                      &svcaddr, hostname))
    {
        fprintf(stderr, "rpcb_getaddr failed!!\n");
        printf("5\n");
        pthread_exit(5);
    }

    if (run_mode == 1)
    {
        fprintf(stderr, "Thread %d\n", atoi(arg));
    }

    vars.a = getRand();
    vars.b = getRand();
    vars.c = getRand();

    resTbl[atoi(arg)].locRes = vars.a + (vars.b * vars.c);

    rpcb_rmtcall(nconf, hostname, progNum, VERSNUM, CALCTHREADPROC,
                 (xdrproc_t)xdr_datas, (char *)&vars,
                 (xdrproc_t)xdr_double, (char *)&resTbl[atoi(arg)].svcRes,
                 total_timeout, &svcaddr);

    thread_array_result[atoi(arg)] = (resTbl[atoi(arg)].svcRes == resTbl[atoi(arg)].locRes) ? 0 : 1;

    if (run_mode == 1)
    {
        fprintf(stderr, "Thread #%d calc : %lf, received : %lf\n",
                atoi(arg), resTbl[atoi(arg)].locRes,
                resTbl[atoi(arg)].svcRes);
    }

    pthread_exit(0);
}
Example #2
0
void *workThreadTransmit( void *pObject )
#endif
{
#ifdef WIN32
	DWORD errorCode = 0;
#else
	int rv = 0;
#endif

	CApoxObj * pobj = ( CApoxObj *)pObject;
	if ( NULL == pobj ) {
#ifdef WIN32	
		ExitThread( errorCode ); // Fail
#else
		pthread_exit( &rv );
#endif
	}
	
	while ( pobj->m_bRun ) {
		
		// Noting to do if we should end...
		if ( !pobj->m_bRun ) continue;

		// Is there something to transmit
		while ( ( NULL != pobj->m_transmitList.pHead ) && 
				( NULL != pobj->m_transmitList.pHead->pObject ) ) {

			canalMsg msg;
			memcpy( &msg, pobj->m_transmitList.pHead->pObject, sizeof( canalMsg ) ); 
			LOCK_MUTEX( pobj->m_transmitMutex );
			dll_removeNode( &pobj->m_transmitList, pobj->m_transmitList.pHead );
			UNLOCK_MUTEX( pobj->m_transmitMutex );

			// Outgoing CAN message
			// --------------------------
			// [0] ([1][RTR][EXT][unused 0..4])
			// [1] ID MSB
			// [2] ID
			// [3] ID
			// [4] ID LSB
			// [5] FUTURE USE (CANopen or DeviceNet) // ex. Wait for response? Etc..
			// [6] FUTURE USE (CANopen or DeviceNet)
			// [7] RESERVED FOR TX FLAGS 
			// [8] DATA LEN (0-8)
			// [9-16] 

			uint8_t sendData[ 20 ];
			short size = 0;

			sendData[ size++ ] = 
				0x80 | 
				( ( msg.flags & CANAL_IDFLAG_RTR ) ? 0x40:0x00) | 
				( ( msg.flags & CANAL_IDFLAG_EXTENDED ) ? 0x20 : 0x00 );
			sendData[ size++ ] = ( uint8_t )( msg.id >> 24 ) & 0x1f;
			sendData[ size++ ] = ( uint8_t )( msg.id >> 16 ) & 0xff;
			sendData[ size++ ] = ( uint8_t )( msg.id >> 8 )  & 0xff;
			sendData[ size++ ] = ( uint8_t )( msg.id ) & 0xff;

			sendData[size++] = 0x00; // future use
			sendData[size++] = 0x00; // future use

			sendData[size++] = 0; // txFlags;
			sendData[size++] = msg.sizeData;

			memcpy( sendData, msg.data, msg.sizeData );
			size += msg.sizeData;
			 
			LOCK_MUTEX( pobj->m_apoxMutex );

			if ( USB_OK == pobj->sendUSBMsg( sendData, size ) ) {
	
				// Message sent successfully
				// Update statistics
				pobj->m_stat.cntTransmitData += msg.sizeData;
				pobj->m_stat.cntTransmitFrames += 1;

			}
			else {
					
				// Failed - put message back in queue front
				PCANALMSG pMsg	= new canalMsg;
				if ( NULL != pMsg ) {
						
					// Copy in data
					memcpy ( pMsg, &msg, sizeof( canalMsg ) );

					dllnode *pNode = new dllnode; 
					if ( NULL != pNode ) {
																
						pNode->pObject = pMsg;
						LOCK_MUTEX( pobj->m_transmitMutex );
						dll_addNodeHead( &pobj->m_transmitList, pNode );
						UNLOCK_MUTEX( pobj->m_transmitMutex );

					}
					else {

						delete pMsg;

					}

				}

			}

			UNLOCK_MUTEX( pobj->m_apoxMutex );
										
		} // while data


		// No data to write

		SLEEP( 1 );

		//}	 
	
	} // while 	 


#ifdef WIN32
	ExitThread( errorCode );
#else
	pthread_exit( &rv );
#endif

}
Example #3
0
void *pvrThread(void *pvrfd)
{
	char tempBuffer[BLOCKSIZE];
	int rd = 0;
	int tsBufferSize = 0;
	int errors = 0;
	eDebug("[MOVIEPLAYER] pvrThread starting: pvrfd = %d", *(int *)pvrfd);
	pthread_cleanup_push(pvrThreadCleanup, (void *)pvrfd);
	nice(-1);
	while (true)
	{
		pthread_testcancel();
		pthread_mutex_lock(&mutex);
		rd = tsBuffer.read(tempBuffer, BLOCKSIZE);
		tsBufferSize = tsBuffer.size();
		pthread_mutex_unlock(&mutex);
		if (rd > 0)
		{
			errors = 0;
			while (1)
			{
				int result;
				result = write(*(int *)pvrfd, tempBuffer, rd);
				if (result < 0)
				{
					int error = errno;
					if (error == EINTR)
					{
						continue;
					}
					else if (error == EAGAIN)
					{
						usleep(100000);
						continue;
					}
					else
					{
						/* all other errors are fatal */
						eDebug("[MOVIEPLAYER] fatal pvr write error occurred %d", error);
					}
				}
				break;
			}
			// eDebug("[MOVIEPLAYER] %d >>> writing %d bytes to pvr...", tsBufferSize, rd); - docasne
		}
		else
		{
			if (tsBufferSize == 0)
			{
				if (++errors > 100)
				{
					eDebug("[MOVIEPLAYER] pvrThread: exit after %d attempts reading from empty buffer", errors);
					break;
				}
				/* wait a bit for new data to arrive */
				usleep(100000);
			}
			else
			{
				/* fatal error, should never happen, stop at once */
				eDebug("[MOVIEPLAYER] pvrThread: fatal error: failed to read from nonempty buffer");
				break;
			}
		}
	}
	pthread_exit(NULL);
	pthread_cleanup_pop(1);
}
Example #4
0
void *mgr_rx_thread(void * threadId)
{
   
    DTNMP_DEBUG_ENTRY("mgr_rx_thread","(0x%x)", (unsigned long) threadId);
    
    DTNMP_DEBUG_INFO("mgr_rx_thread","Receiver thread running...", NULL);
    
    uint32_t num_msgs = 0;
    uint8_t *buf = NULL;
    uint8_t *cursor = NULL;
    uint32_t bytes = 0;
    uint32_t i = 0;
    pdu_header_t *hdr = NULL;
    pdu_acl_t *acl = NULL;
    uint32_t size = 0;
    pdu_metadata_t meta;
    uvast val;
    eid_t *sender_eid = NULL;
    agent_t *agent = NULL;
    time_t group_timestamp;
    uint32_t incoming_idx = 0;
    uint32_t hdr_len = 0;

    /* 
     * g_running controls the overall execution of threads in the
     * NM Agent.
     */
    while(g_running) {
        
        /* Step 1: Receive a message from the Bundle Protocol Agent. */

        buf = iif_receive(&ion_ptr, &size, &meta, NM_RECEIVE_TIMEOUT_MILLIS);
        sender_eid = &(meta.originatorEid);
        
        if(buf != NULL)
        {
            DTNMP_DEBUG_INFO("mgr_rx_thread","Received buf (%x) of size %d",
            		(unsigned long) buf, size);


            /* Grab # messages in, and timestamp for, this group. */
            cursor = buf;

            bytes = utils_grab_sdnv(cursor, size, &val);
            num_msgs = val;
            cursor += bytes;
            size -= bytes;

            bytes = utils_grab_sdnv(cursor, size, &val);
            group_timestamp = val;
            cursor += bytes;
            size -= bytes;

            DTNMP_DEBUG_INFO("mgr_rx_thread","# Msgs %d, TS %llu", num_msgs, group_timestamp);

#ifdef HAVE_MYSQL
            /* Copy the message group to the database tables */
            incoming_idx = db_incoming_initialize(group_timestamp);
#endif

            /* For each message in the group. */
            for(i = 0; i < num_msgs; i++)
            {
            	hdr = pdu_deserialize_hdr(cursor, size, &bytes);
            	cursor += bytes;
            	size -= bytes;
            	hdr_len = bytes;

            	DTNMP_DEBUG_INFO("mgr_rx_thread","Header id %d with len %d", hdr->id, hdr_len);
            	switch (hdr->id)
            	{
                	case MSG_TYPE_RPT_DATA_RPT:
                	{
                		DTNMP_DEBUG_ALWAYS("mgr_rx_thread",
                				         "Processing a data report.\n\n", NULL);

                		msg_rx_data_rpt(sender_eid, cursor, size, &bytes);

                		cursor += bytes;
                		size -= bytes;
                	}
                	break;
                
                	case MSG_TYPE_ADMIN_REG_AGENT:
                	{
                		DTNMP_DEBUG_ALWAYS("mgr_rx_thread",
                						   "Processing Agent Registration.\n\n",
                						   NULL);

                		adm_reg_agent_t *reg = NULL;
                		reg = msg_deserialize_reg_agent(cursor, size, &bytes);
                		cursor += bytes;
                		size -= bytes;

                		mgr_agent_add(reg->agent_id);

#ifdef HAVE_MYSQL
                		/* Add agent to agent database. */
                		db_add_agent(reg->agent_id);
#endif

                		msg_release_reg_agent(reg);

                	}
                	break;

                	default:
                	{
                		DTNMP_DEBUG_WARN("mgr_rx_thread","Unknown message type: %d",
                				hdr->type);
                		bytes = 0;
                	}
                	break;
            	}

#ifdef HAVE_MYSQL
            	if(bytes > 0)
            	{
            		db_incoming_process_message(incoming_idx, cursor - (hdr_len + bytes), hdr_len + bytes);
            	}
#endif

            }
#ifdef HAVE_MYSQL
            db_incoming_finalize(incoming_idx);
#endif

        }
    }
   
    DTNMP_DEBUG_EXIT("mgr_rx_thread","->.", NULL);
    pthread_exit(NULL);
}
Example #5
0
static void TestBug57421_main() {
  pthread_t t;
  ASSERT_EQ(0, pthread_create(&t, NULL, TestBug57421_child, reinterpret_cast<void*>(pthread_self())));
  pthread_exit(NULL);
}
Example #6
0
File: psl.c Project: ibm-capi/pslse
// PSL thread loop
static void *_psl_loop(void *ptr)
{
	struct psl *psl = (struct psl *)ptr;
	struct cmd_event *event, *temp;
	int events, i, stopped, reset;
	uint8_t ack = PSLSE_DETACH;

	stopped = 1;
	pthread_mutex_lock(psl->lock);
	while (psl->state != PSLSE_DONE) {
		// idle_cycles continues to generate clock cycles for some
		// time after the AFU has gone idle.  Eventually clocks will
		// not be presented to an idle AFU to keep simulation
		// waveforms from getting huge with no activity cycles.
		if (psl->state != PSLSE_IDLE) {
		  // if we have clients or we are in the reset state, refresh idle_cycles 
		  // so that the afu clock will not be allowed to stop to save afu event simulator cycles
		  if ((psl->attached_clients > 0) || (psl->state == PSLSE_RESET)) {
			psl->idle_cycles = PSL_IDLE_CYCLES;
			if (stopped)
				info_msg("Clocking %s", psl->name);
			fflush(stdout);
			stopped = 0;
		  }
		}
		if (psl->idle_cycles) {
			// Clock AFU
//printf("before psl_signal_afu_model in psl_loop \n");
			psl_signal_afu_model(psl->afu_event);
			// Check for events from AFU
			events = psl_get_afu_events(psl->afu_event);
//printf("after psl_get_afu_events, events is 0x%3x \n", events);
			// Error on socket
			if (events < 0) {
				warn_msg("Lost connection with AFU");
				break;
			}
			// Handle events from AFU
			if (events > 0)
				_handle_afu(psl);

			// Drive events to AFU
			send_job(psl->job);
			send_pe(psl->job);
			send_mmio(psl->mmio);

			if (psl->mmio->list == NULL)
				psl->idle_cycles--;
		} else {
			if (!stopped)
				info_msg("Stopping clocks to %s", psl->name);
			stopped = 1;
			lock_delay(psl->lock);
		}

		// Skip client section if AFU descriptor hasn't been read yet
		if (psl->client == NULL) {
			lock_delay(psl->lock);
			continue;
		}
		// Check for event from application
		reset = 0;
		for (i = 0; i < psl->max_clients; i++) {
			if (psl->client[i] == NULL)
				continue;
			if ((psl->client[i]->type == 'd') && 
			    (psl->client[i]->state == CLIENT_NONE) &&
			    (psl->client[i]->idle_cycles == 0)) {
			        // this was the old way of detaching a dedicated process app/afu pair
			        // we get the detach message, drop the client, and wait for idle cycle to get to 0
				put_bytes(psl->client[i]->fd, 1, &ack,
					  psl->dbg_fp, psl->dbg_id,
					  psl->client[i]->context);
				_free(psl, psl->client[i]);
				psl->client[i] = NULL;  // aha - this is how we only called _free once the old way
				                        // why do we not free client[i]?
				                        // because this was a short cut pointer
				                        // the *real* client point is in client_list in pslse
				reset = 1;
				// for m/s devices we need to do this differently and not send a reset...
				// _handle_client - creates the llcmd's to term and remove
				// send_pe - sends the llcmd pe's to afu one at a time
				// _handle_afu calls _handle_aux2
				// _handle_aux2 finishes the llcmd pe's when jcack is asserted by afu
				//   when the remove llcmd is processed, we should put_bytes, _free and set client[i] to NULL
				continue;
			}
			if (psl->state == PSLSE_RESET)
				continue;
			_handle_client(psl, psl->client[i]);
			if (psl->client[i]->idle_cycles) {
				psl->client[i]->idle_cycles--;
			}
			if (client_cmd(psl->cmd, psl->client[i])) {
				psl->client[i]->idle_cycles = PSL_IDLE_CYCLES;
			}
		}

		// Send reset to AFU
		if (reset == 1) {
			psl->cmd->buffer_read = NULL;
			event = psl->cmd->list;
			while (event != NULL) {
				if (reset) {
					warn_msg
					    ("Client dropped context before AFU completed");
					reset = 0;
				}
				info_msg("Dumping command tag=0x%02x",
					 event->tag);
#ifdef PSL9
				info_msg("Dumping itag=0x%02x utag=0x%02x type=0x%02x state=0x%02x",
					event->itag, event->utag, event->type, event->state);
#endif
				if (event->data) {
					free(event->data);
				}
				if (event->parity) {
					free(event->parity);
				}
				temp = event;
				event = event->_next;
				free(temp);
			}
			psl->cmd->list = NULL;
			info_msg("Sending reset to AFU");
			add_job(psl->job, PSL_JOB_RESET, 0L);
		}

		lock_delay(psl->lock);
	}

	// Disconnect clients
	for (i = 0; i < psl->max_clients; i++) {
		if ((psl->client != NULL) && (psl->client[i] != NULL)) {
			// FIXME: Send warning to clients first?
			info_msg("Disconnecting %s context %d", psl->name,
				 psl->client[i]->context);
			close_socket(&(psl->client[i]->fd));
		}
	}

	// DEBUG
	debug_afu_drop(psl->dbg_fp, psl->dbg_id);

	// Disconnect from simulator, free memory and shut down thread
	info_msg("Disconnecting %s @ %s:%d", psl->name, psl->host, psl->port);
	if (psl->client)
		free(psl->client);
	if (psl->_prev)
		psl->_prev->_next = psl->_next;
	if (psl->_next)
		psl->_next->_prev = psl->_prev;
	if (psl->cmd) {
		free(psl->cmd);
	}
	if (psl->job) {
		free(psl->job);
	}
	if (psl->mmio) {
		free(psl->mmio);
	}
	if (psl->host)
		free(psl->host);
	if (psl->afu_event) {
		psl_close_afu_event(psl->afu_event);
		free(psl->afu_event);
	}
	if (psl->name)
		free(psl->name);
	if (*(psl->head) == psl)
		*(psl->head) = psl->_next;
	pthread_mutex_unlock(psl->lock);
	free(psl);
	pthread_exit(NULL);
}
// ****************************************************************
// TimerThreadMain()
//
void* FastResearchInterface::TimerThreadMain(void *ObjectPointer)
{
	int								OurChannelID	=	0
								,	ReceptionID		=	0;

	timer_t 						TimerID;

	struct sigevent 				Event;

	struct itimerspec 				Timer;

	struct _pulse  					PulseMsg;

	FastResearchInterface			*ThisObjectPtr		=	(FastResearchInterface*)ObjectPointer;

	OurChannelID				=	ChannelCreate(0); //create communication channel

	// Initialize event data structure
	// attach the timer to the channel OurChannelID
	Event.sigev_notify			=	SIGEV_PULSE;
	Event.sigev_coid			=	ConnectAttach(0, 0, OurChannelID, _NTO_SIDE_CHANNEL, 0);
	Event.sigev_priority		=	getprio(0);
	Event.sigev_code			=	TIMER_PULSE;

	timer_create(CLOCK_REALTIME, &Event, &TimerID);

	// Configure the timer
	Timer.it_value.tv_sec		=	0L;
	Timer.it_value.tv_nsec		=	(long int)(1000000000.0 * ThisObjectPtr->CycleTime);	// wait one cycle time interval before start
	Timer.it_interval.tv_sec	=	0L;
	Timer.it_interval.tv_nsec	=	(long int)(1000000000.0 * ThisObjectPtr->CycleTime);

	pthread_mutex_lock(&(ThisObjectPtr->MutexForThreadCreation));
	ThisObjectPtr->ThreadCreated	=	true;
	pthread_mutex_unlock(&(ThisObjectPtr->MutexForThreadCreation));

	pthread_cond_signal(&(ThisObjectPtr->CondVarForThreadCreation));

	// Start the timer
	timer_settime(TimerID, 0, &Timer, NULL);

	pthread_mutex_lock(&(ThisObjectPtr->MutexForCondVarForTimer));

	while(!(ThisObjectPtr->TerminateTimerThread))
	{
		pthread_mutex_unlock(&(ThisObjectPtr->MutexForCondVarForTimer));

		ReceptionID	= MsgReceive(OurChannelID, &PulseMsg, sizeof(PulseMsg), NULL);

		pthread_mutex_lock(&(ThisObjectPtr->MutexForCondVarForTimer));
		if (ReceptionID == 0)
		{
			ThisObjectPtr->TimerFlag = true;
			pthread_cond_signal(&(ThisObjectPtr->CondVarForTimer));
		}
	}
	pthread_mutex_unlock(&(ThisObjectPtr->MutexForCondVarForTimer));

	if (timer_delete(TimerID) != 0)
    {
		ThisObjectPtr->OutputConsole->printf("FastResearchInterface::TimerThreadMain(): ERROR, cannot delete timer...\n");
    }

	pthread_exit(NULL);
}
Example #8
0
//The main method for thread. 
void *mainTrack(void *train)
{

	struct train currentTrain = *(struct train *)train;
	
	//printf("%s", trainBuffer);
	usleep(currentTrain.loadTime * 1E5F);
	printf("00:00:0%0.1f Train %2d is ready to go %4s\n", interval(), currentTrain.number, currentTrain.direction);

	//add the train to queue. 
	//in the waiting queue, the train will wait for signal to cross the track. 
	pthread_mutex_lock(&queue);
	addToQueue(currentTrain);

	//Following if statement for debugging. 
	if(0){
		
		struct queueNode *conductor;
		conductor = queueRoot;

		while(conductor != NULL){
			printf("I am #%d \n", currentTrain.number);
			printf("%d: %d, %d\n", conductor->aTrain.number, conductor->aTrain.loadTime, conductor->aTrain.crossTime);
			conductor = conductor->next;
		}
	}
	
	pthread_mutex_unlock(&queue);
	//let thread to sleep a lit bit time,
	//because it has to wait the other thread which they finish loading at the same time.
	usleep(0.3 * 1E5F);

	if(onTrack == -1 && queueNumber == 1){
		pthread_mutex_lock(&track);
		onTrack = currentTrain.number;
		pthread_mutex_unlock(&track);
		printf("00:00:0%0.1f Train %2d is ON the main track going %4s\n", interval(), currentTrain.number, currentTrain.direction);
		usleep(currentTrain.crossTime * 1E5F);
		printf("00:00:0%0.1f Train %2d is OFF the main track going %4s\n", interval(), currentTrain.number, currentTrain.direction);
	}else
	{
		if(onTrack == -1){
			onTrack = queueRoot->aTrain.number;
		}
		while(onTrack != currentTrain.number)
		{ // wait for a siganl which allow current train to cross
			//printf("I am waiting, my  number is %d\n",currentTrain.number);
		}

		pthread_mutex_lock(&track);
		onTrack = currentTrain.number;
		pthread_mutex_unlock(&track);
		printf("00:00:0%0.1f Train %2d is ON the main track going %4s\n", interval(), currentTrain.number, currentTrain.direction);
		usleep(currentTrain.crossTime * 1E5F);
		printf("00:00:0%0.1f Train %2d is OFF the main track going %4s\n", interval(), currentTrain.number, currentTrain.direction);
	}
	
	pthread_mutex_lock(&track);
	pthread_mutex_lock(&queue);
	//delelte the root node in the queue. 
	if(queueRoot->next == NULL){
		onTrack = -1;
	}else{
	//delete the currentTrain node
		struct queueNode *deleteNode, *preDeleteNode;
		if(queueRoot->aTrain.number == currentTrain.number){
			deleteNode = queueRoot;
			queueRoot = queueRoot->next;
			free(deleteNode);
			onTrack = queueRoot->aTrain.number;
		}else{
			deleteNode = queueRoot;
			while(deleteNode != NULL){
				if(deleteNode->aTrain.number == currentTrain.number){
					preDeleteNode->next = deleteNode->next;
					free(deleteNode);
					onTrack = queueRoot->aTrain.number;
					break;
				}else{
					preDeleteNode = deleteNode;
					deleteNode = deleteNode->next;
				}
			}
		}
		//printf("ROOT track number: %d\n", queueRoot->aTrain.number);
		//printf("On track number: %d\n", onTrack);
	}
	
	pthread_mutex_unlock(&queue);
	pthread_mutex_unlock(&track);
	
	
	pthread_exit(NULL);
}
int 
main(int argc, char *argv[])
{
	int ret;
	int i;

	if(argc < 6)
	{
		printf("Usage: fs_slammer <hddisk> <dir> <no. of files> <no-of-pages> <time in secs> <rw>\n");
		return -1;
	}
	
	time_t tt;
	time(&tt);
	srandom(tt);

	char fs_path[256];
	char *hd_name;
	struct stat stat_buf;
	char **argp =++argv;

	hd_name = (char *)malloc(10);
	strcpy(hd_name, *argp++);
	if((ret = stat(hd_name, &stat_buf))<0)
	{
		perror("stat:");
		return ret;
	}
	
	strcpy(fs_path, *argp++);
	if((ret = stat(fs_path, &stat_buf))<0)
	{
		perror("stat:");
		return ret;
	}
	
	if((ret=chdir(fs_path))<0)
	{
		printf("%s:\n", fs_path);
		perror("chdir:");
		return ret;
	}
	
	int no_threads = 0;

	sscanf(*argp++, "%d", &no_threads);

	sscanf(*argp++, "%d", &max_pages);

	int slam_time;
	sscanf(*argp++, "%d", &slam_time);

	char rw[4];
	sscanf(*argp++, "%s", rw);

	pthread_t *read_threads;
	pthread_t *slam_threads;

	if(strchr(rw, 'w'))
	{
		slam_threads = (pthread_t*)malloc(sizeof(pthread_t)*no_threads);

		pthread_attr_t attr;
		pthread_attr_init(&attr);
		pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);


		for(i = 0; i < no_threads; i++)
		{
			if((ret = pthread_create(slam_threads+i, &attr, slam_junk, NULL)))
			{
				perror("pthread_create:");
				return ret;
			}
		}
	}
	
	if(strchr(rw, 'r'))
	{
		read_threads = (pthread_t*)malloc(sizeof(pthread_t)*no_threads);

		pthread_attr_t attr;
		pthread_attr_init(&attr);
		pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);


		for(i = 0; i < no_threads; i++)
		{
			if((ret = pthread_create(read_threads+i, &attr, raw_reader, hd_name)))
			{
				perror("pthread_create:");
				return ret;
			}
		}
	}
	

	signal(SIGINT, termination_hdlr);
	signal(SIGTERM, termination_hdlr);
	signal(SIGQUIT, termination_hdlr);
	if(slam_time <0)
		pause();
	else
		sleep(slam_time);

	for(i = 0; i < no_threads; i++)
	{
		if(strchr(rw, 'w'))
			pthread_cancel(slam_threads[i]);
		if(strchr(rw, 'r'))
			pthread_cancel(read_threads[i]);
	}

	printf("Total bytes written = %lu\n", total_bytes_written);
	printf("Total bytes raw read = %lu\n", total_bytes_read);
	pthread_exit(NULL);

}
/*******************************************************************************
 * Relay Out Thread
 * Catches TCP messages from a remote network and examines the header of the
 * message to determine if the local network has already seen it. If so, the
 * message is ignored. If not, the message is broadcast over UDP multicast to
 * the local network.
 * @param arg           a pointer to a RelayOutThreadParam object
 */
void* UdpRelay::relayOutThread(void* arg) {
   // Extract parameters
   RelayOutParam* param = (RelayOutParam*)arg;
   UdpRelay* parent = param->self;
   char* peerAddr = param->addr;
   int tcpSd = param->sd;
   // Clean up dynamic memory that is not needed anymore
   delete param;
   param = NULL;
   // Loop to catch remote TCP messages
   while (true) {
      char* localMessage = new char[BUFFER_MAX_SIZE];
      // Receive a message
      int recvValue = recv(tcpSd, localMessage, BUFFER_MAX_SIZE, 0);
      if (recvValue < 0) {
         cerr << "recv() failed: relayOutThread" << endl;
         pthread_exit((void*)EXIT_FAILURE);
      }
      if (recvValue == 0) {
         // The peer has closed the connection
         break;
      }
      // Lock mutex on msgBuffer
      if (pthread_mutex_lock(parent->msgMutex) != 0) {
         cerr << "pthread_mutex_lock() failed: relayOutThread" << endl;
         pthread_exit((void*)EXIT_FAILURE);
      }
      parent->msgBuffer = localMessage;
      localMessage = NULL;
      // Determine if message header contains groupIP
      bool headerContainsGroupIp = parent->checkMsgForGrpIp();
      if (headerContainsGroupIp) {
         continue;
      } else {
         // Get the UDP socket descriptor for multicast
         int clientSd = parent->localUdpMulticast->getClientSocket();
         if (clientSd == -1) {
            cerr << "UdpMulticast.getClientSocket() failed: relayOutThread" << endl;
            pthread_exit((void*)EXIT_FAILURE);
         }
         parent->printMsgBuffer();
         // Multicast the message locally over UDP
         if (!parent->localUdpMulticast->multicast(parent->msgBuffer)) {
            cerr << "UdpMulticast.multicast() failed: relayOutThread" << endl;
            pthread_exit((void*)EXIT_FAILURE);
         }
         // Wait for relayInThread to process message
         if (pthread_cond_wait(parent->cond, parent->msgMutex) != 0) {
            cerr << "pthread_cond_wait() failed: relayOutThread" << endl;
            pthread_exit((void*)EXIT_FAILURE);
         }
         printf("> broadcast %d bytes to %s:%d\n", (int)strlen(parent->msgBuffer), parent->groupIp, parent->groupPort);
      }
      // Unlock mutex on msgBuffer
      if (pthread_mutex_unlock(parent->msgMutex) != 0) {
         cerr << "pthread_mutex_unlock() failed: relayOutThread" << endl;
         pthread_exit((void*)EXIT_FAILURE);
      }
   }
   // remove this thread from the maps
   if (parent->acceptedConnections.count(string(peerAddr)) != 0) {
      parent->acceptedConnections.erase(string(peerAddr));
   }
   if (parent->addedConnections.count(string(peerAddr)) != 0) {
      parent->addedConnections.erase(string(peerAddr));
   }
   if (parent->relayOutThreads.count(string(peerAddr)) != 0) {
      parent->relayOutThreads.erase(string(peerAddr));
      pthread_t* relayOutT = parent->relayOutThreads[string(peerAddr)];
      // Clean dynamic memory
      delete relayOutT;
      relayOutT = NULL;
   }
   // exit this thread
   pthread_exit(EXIT_SUCCESS);
}
/*******************************************************************************
 * Accepts Thread
 * Uses the class's Socket object to receive TCP connections to remote network
 * groups. When a connection is established, a relayOutThread is started. When
 * a connection already exists and another one is requested from the same
 * network group, the old connection is closed and the associated relayOutThread
 * is cancelled and a new connection is created with a new relayOutThread.
 * @param arg           a pointer to the this thread's parent, the UdpRelay object
 */
void* UdpRelay::acceptsThread(void* arg) {
   UdpRelay* parent = (UdpRelay*)arg;
   while (true) {
      // Accept a connection request from a remote relay
      int serverTcpSd = parent->localSocket->getServerSocket();
      // Retrieve hostname and ipaddress (NOTE: this code is duplicated in commandAdd)
      struct sockaddr_storage sockAddrStorage;
      memset(&sockAddrStorage, 0, sizeof(sockAddrStorage));
      struct sockaddr_in sockAddrIn;
      memset(&sockAddrIn, 0, sizeof(sockAddrIn));
      char ipAddrStr[INET_ADDRSTRLEN];
      socklen_t len = sizeof(sockAddrStorage);
      getpeername(serverTcpSd, (struct sockaddr*)&sockAddrStorage, &len);
      struct sockaddr_in* sockAddrInPtr = (struct sockaddr_in*)&sockAddrStorage;
      unsigned short int port = ntohs(sockAddrInPtr->sin_port);
      inet_ntop(AF_INET, &sockAddrInPtr->sin_addr, ipAddrStr, INET_ADDRSTRLEN);
      char* hostStr = parent->addrToHost(ipAddrStr);
      // Check if a connection already exists with the remote relay
      if (parent->acceptedConnections.count(string(hostStr)) != 0) {
         int serverTcpSdOld = parent->acceptedConnections[string(hostStr)];
         pthread_t* relayOutTOld = parent->relayOutThreads[string(hostStr)];
         // Cancel relayOutT thread
         int relayOutCancelValue = pthread_cancel(*relayOutTOld);
         if (relayOutCancelValue != 0) {
            cerr << "pthread_cancel() failed: acceptsThread" << endl;
            exit(EXIT_FAILURE);
         }
         // Close the socket descriptor
         close(serverTcpSdOld);
         // Clean dynamic memory
         delete relayOutTOld;
         relayOutTOld = NULL;
         // Remove connection from maps
         parent->acceptedConnections.erase(string(hostStr));
         parent->relayOutThreads.erase(string(hostStr));
      }
      if (parent->addedConnections.count(string(hostStr)) != 0) {
         int serverTcpSdOld = parent->addedConnections[string(hostStr)];
         pthread_t* relayOutTOld = parent->relayOutThreads[string(hostStr)];
         // Cancel relayOutT thread
         int relayOutCancelValue = pthread_cancel(*relayOutTOld);
         if (relayOutCancelValue != 0) {
            cerr << "pthread_cancel() failed: acceptsThread" << endl;
            exit(EXIT_FAILURE);
         }
         // Close the socket descriptor
         close(serverTcpSdOld);
         // Clean dynamic memory
         delete relayOutTOld;
         relayOutTOld = NULL;
         // Remove connection from maps
         parent->addedConnections.erase(string(hostStr));
         parent->relayOutThreads.erase(string(hostStr));
      }
      // Create relay out thread
      pthread_t* relayOutT = new pthread_t;
      RelayOutParam* param = new RelayOutParam(parent, hostStr, serverTcpSd);
      int relayOutCreateValue = pthread_create(relayOutT, NULL, relayOutThread, (void*)param);
      if (relayOutCreateValue != 0) {
         cerr << "pthread_create() failed: acceptsThread" << endl;
         exit(EXIT_FAILURE);
      }
      // Add values to maps to keep track of this connection
      parent->acceptedConnections[string(hostStr)] = serverTcpSd;
      parent->relayOutThreads[string(hostStr)] = relayOutT;
      // Print confirmation message
      printf("> accepted %s (%s) on port %d, sd = %d\n", hostStr, ipAddrStr, port, serverTcpSd);
   }
   // exit this thread
   pthread_exit(EXIT_SUCCESS);
}
void ithread_exit(void *val_ptr)
{
    return pthread_exit(val_ptr);
}
Example #13
0
void *thr_fn2(void* arg)
{
  printf("thread 2: ID is %d\n", pthread_self);
  pthread_exit((void*)0);
}
Example #14
0
void* viagem(void *arg){
	int tipoViag = (int) arg;
	time_t mytime;
	time_t mytime2;
	int nrComboio;
	FILE *ficheiro;
	
	ficheiro= fopen("output.txt", "a");
	
	
	

	if(tipoViag==1){
		pthread_mutex_lock(&muxNrComboio);
		strt->numeroComboio++;
		nrComboio=strt->numeroComboio;
		pthread_mutex_unlock(&muxNrComboio);
		pthread_mutex_lock(&muxB_C);
		mytime=horaAtual();
		strt->c++;
		fprintf(ficheiro,"\n\nComboio nº %d.   Origem: Cidade C.   Destino: Cidade A.   Linha: cidadeC-cidadeB\n", nrComboio); 
		sleep(3);
		pthread_mutex_unlock(&muxB_C);
		pthread_mutex_lock(&muxA_B);
		fprintf(ficheiro,"\nComboio nº %d.   Origem: Cidade C.   Destino: Cidade A.   Linha: cidadeB-cidadeA\n", nrComboio); 
		strt->b++;
		sleep(10);
		strt->a++;
		pthread_mutex_unlock(&muxA_B);
		fprintf(ficheiro,"\nComboio nº %d chegou à cidade A.\nData de saída: ", nrComboio);
		fprintf(ficheiro, ctime(&mytime));
		fprintf(ficheiro,"Data de chegada: ");
		mytime2=horaAtual(); 
		fprintf(ficheiro,ctime(&mytime2));
	}
	
	if(tipoViag==2){
		pthread_mutex_lock(&muxNrComboio);
		strt->numeroComboio++;
		nrComboio=strt->numeroComboio;
		pthread_mutex_unlock(&muxNrComboio); 
		pthread_mutex_lock(&muxA_B);
		mytime=horaAtual();
		strt->a++;
		fprintf(ficheiro,"\n\nComboio nº %d.   Origem: Cidade A.   Destino: Cidade D.   Linha: cidadeA-cidadeB\n", nrComboio); 
		sleep(10);
		pthread_mutex_unlock(&muxA_B);
		pthread_mutex_lock(&muxB_D);
		strt->b++;
		fprintf(ficheiro,"\nComboio nº %d.   Origem: Cidade C.   Destino: Cidade A.   Linha: cidadeB-cidadeA\n", nrComboio); 
		sleep(3);
		strt->d++;
		pthread_mutex_unlock(&muxB_D);
		fprintf(ficheiro,"\nComboio nº %d chegou à cidade D.\nData de saída: ", nrComboio);
		fprintf(ficheiro,ctime(&mytime));
		fprintf(ficheiro,"Data de chegada: ");
		mytime2=horaAtual(); 
		fprintf(ficheiro,ctime(&mytime2));
	}
	fclose(ficheiro);
	
	pthread_exit(NULL);
}
Example #15
0
void *thread1235(void *threadid)
{
     printf("Das ist Thread %d.\n", (int)threadid);
     pthread_exit(NULL);
}
void*
slam_junk(void *data)
{
	int fd, fd_rand;
	int pages, bytes_wrote, ret;
	char fname[100];
	char buff[4096];

	gen_random_filename(fname, sizeof(fname));
	pthread_cleanup_push(slam_cleanup, fname);
	while(1)
	{
		// O_SYNC tried
		fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXO);
		if(fd < 0)
		{
			printf("open:%s:\n",fname);
			perror("open:");
			pthread_exit(NULL);
		}
		fd_rand = open("/dev/zero", O_RDONLY);
		if(fd_rand < 0)
		{
			perror("open:/dev/zero:");
			close(fd);
			pthread_exit(NULL);
		}
		//pages = random()%max_pages + 1;
		pages = max_pages;
		bytes_wrote = 0;
		while(pages--)
		{
			if((ret = read(fd_rand, buff, sizeof(buff)))<0)
			{	
				perror("read:");
				close(fd_rand);
				close(fd);
				pthread_exit(NULL);
			}
			if((ret = write(fd, buff, ret))<0)
			{
				perror("write:");
				close(fd_rand);
				close(fd);
				pthread_exit(NULL);
				
			}
			bytes_wrote+=ret;
			total_bytes_written += bytes_wrote;
		}
		
		//lets see..whether it hangs
		fsync(fd);
		close(fd);
		close(fd_rand);

		fd = open(fname, O_RDONLY);
		while(read(fd, buff, sizeof(buff)));
		close(fd);

		//printf("%d bytes written\n", bytes_wrote);
		//pthread_mutex_lock(&total_bytes_mutex);
		//pthread_mutex_unlock(&total_bytes_mutex);		

			
		pthread_testcancel();
	}
	pthread_cleanup_pop(0);
	pthread_exit(NULL);
}
// Worker thread
static void* networkThread(void *data)
{    
    CCHttpRequest *request = NULL;
    
    while (true) 
    {
        if (need_quit)
        {
            break;
        }
        
        // step 1: send http request if the requestQueue isn't empty
        request = NULL;
        
        pthread_mutex_lock(&s_requestQueueMutex); //Get request task from queue
        if (0 != s_requestQueue->count())
        {
            request = dynamic_cast<CCHttpRequest*>(s_requestQueue->objectAtIndex(0));
            s_requestQueue->removeObjectAtIndex(0);  
            // request's refcount = 1 here
        }
        pthread_mutex_unlock(&s_requestQueueMutex);
        
        if (NULL == request)
        {;            
        	// Wait for http request tasks from main thread
        	pthread_cond_wait(&s_SleepCondition, &s_SleepMutex);
            continue;
        }
        
        // step 2: libcurl sync access
        
        // Create a HttpResponse object, the default setting is http access failed
        CCHttpResponse *response = new CCHttpResponse(request);
        
        // request's refcount = 2 here, it's retained by HttpRespose constructor
        request->release();
        // ok, refcount = 1 now, only HttpResponse hold it.
        
        int32_t responseCode = -1;
        int retValue = 0;

        // Process the request -> get response packet
        switch (request->getRequestType())
        {
            case CCHttpRequest::kHttpGet: // HTTP GET
                retValue = processGetTask(request,
                                          writeData, 
                                          response->getResponseData(), 
                                          &responseCode,
                                          writeHeaderData,
                                          response->getResponseHeader());
                break;
            
            case CCHttpRequest::kHttpPost: // HTTP POST
                retValue = processPostTask(request,
                                           writeData, 
                                           response->getResponseData(), 
                                           &responseCode,
                                           writeHeaderData,
                                           response->getResponseHeader());
                break;

            case CCHttpRequest::kHttpPut:
                retValue = processPutTask(request,
                                          writeData,
                                          response->getResponseData(),
                                          &responseCode,
                                          writeHeaderData,
                                          response->getResponseHeader());
                break;

            case CCHttpRequest::kHttpDelete:
                retValue = processDeleteTask(request,
                                             writeData,
                                             response->getResponseData(),
                                             &responseCode,
                                             writeHeaderData,
                                             response->getResponseHeader());
                break;
            
            default:
                CCAssert(true, "CCHttpClient: unkown request type, only GET and POSt are supported");
                break;
        }
                
        // write data to HttpResponse
        response->setResponseCode(responseCode);
        
        if (retValue != 0) 
        {
            response->setSucceed(false);
            response->setErrorBuffer(s_errorBuffer);
        }
        else
        {
            response->setSucceed(true);
        }

        
        // add response packet into queue
        pthread_mutex_lock(&s_responseQueueMutex);
        s_responseQueue->addObject(response);
        pthread_mutex_unlock(&s_responseQueueMutex);
        
        // resume dispatcher selector
        CCDirector::sharedDirector()->getScheduler()->resumeTarget(CCHttpClient::getInstance());
    }
    
    // cleanup: if worker thread received quit signal, clean up un-completed request queue
    pthread_mutex_lock(&s_requestQueueMutex);
    s_requestQueue->removeAllObjects();
    pthread_mutex_unlock(&s_requestQueueMutex);
    s_asyncRequestCount -= s_requestQueue->count();
    
    if (s_requestQueue != NULL) {
        
        pthread_mutex_destroy(&s_requestQueueMutex);
        pthread_mutex_destroy(&s_responseQueueMutex);
        
        pthread_mutex_destroy(&s_SleepMutex);
        pthread_cond_destroy(&s_SleepCondition);

        s_requestQueue->release();
        s_requestQueue = NULL;
        s_responseQueue->release();
        s_responseQueue = NULL;
    }

    pthread_exit(NULL);
    
    return 0;
}
Example #18
0
//controlling function for the Publisher procs communication with server 
void *
pubThr_fn(void *param) 
{

  //pthread_mutex_lock(&mutexlock);
  //printf("here\n");

  Record *threadRecord;
  threadRecord = (Record *) param;
  Publisher pubT;
  pubT.pubConnect = "Pub Connect";
  pubT.pubTopic = "Topic 1";
  pubT.pubEnd = "End";
  pubT.pubterm = "terminate";

  //struct Topic* topicEntry;
  //trcpy(topicEntry->data, pubT.pubTopic);
  //topicEntry->pubId = threadRecord->pid;
  int x;
  char  *data = "accept";
  char  *data2 = "reject";
  char  *terminate = "terminate";
  char  *success = "successful";
  char  *retry = "retry";
  char  *temp;
  if ((x = read(threadRecord->pipe[0], pubT.buf, 1024)) >= 0){
    //printf("pub read %d bytes from the Publisher pipe: \"%s\"\n", x, pubT.buf); 
    pubT.buf[x] = 0;
    if (strcmp(pubT.buf, pubT.pubConnect) == 0){
      write(threadRecord->serverPipe[1], data, strlen(data));
      if ((x = read(threadRecord->pipe[0], pubT.buf2, 1024)) >= 0){
        //printf("pub read %d bytes from the Publisher pipe: \"%s\"\n", x, pubT.buf2); 
        pubT.buf2[x] = 0;
        if (strcmp(pubT.buf2, pubT.pubTopic) == 0){
          write(threadRecord->serverPipe[1], data, strlen(data));
          if ((x = read(threadRecord->pipe[0], pubT.buf3, 1024)) >= 0){
            //printf("pub read %d bytes from the Publisher pipe: \"%s\"\n", x, pubT.buf3);
            pubT.buf3[x] = 0;
            if(strcmp(pubT.buf3, pubT.pubEnd) == 0){
              //threadRecord.term = "terminated";
              write(threadRecord->serverPipe[1], data, strlen(data));
              //printf("pub successful connection to server!\n");
/*
              if((x = read(threadRecord->pipe[0], pubT.buf, 1024)) >= 0){
                pubT.buf[x] = 0;
                temp = pubT.buf;
                write(threadRecord->serverPipe[1], success, strlen(success));
              }
              else
                write(threadRecord->serverPipe[1], retry, strlen(retry));
              //enqueue(topicEntry);
*/
              //continue;
            } 
            else{
              write(threadRecord->serverPipe[1], data2, strlen(data2));
            }
          }
        }
        else if (strcmp(pubT.buf2, pubT.pubterm) == 0){
          //threadRecord.term = "terminated";
          ;
        }
        else{
          write(threadRecord->serverPipe[1], data2, strlen(data2));
        }
      }
    }
    else if (strcmp(pubT.buf, pubT.pubterm) == 0){
      //threadRecord.term = "terminated";
      ;
    }
    else{
      write(threadRecord->serverPipe[1], data2, strlen(data2));
    }
  }
  //pthread_mutex_unlock (&mutexlock);

//sleep(1);
  pthread_exit(NULL);
}
Example #19
0
File: main.c Project: olivo/BP
void *t1(void *arg)
{
  pthread_exit(0);
  g=1;
}
Example #20
0
//controlling function for the Subscriber procs communication with server 
void *
subThr_fn(void *param) 
{
  //pthread_mutex_lock (&mutexlock);
  Record *threadRecord;
  threadRecord = (Record *) param;
  Subscriber subT;
  subT.subConnect = "Sub Connect";
  subT.subTopic = "Topic 1";
  subT.subEnd = "End";
  subT.subterm = "terminate";
  //subfileDescriptor = threadRecord->pipe;
  //ServerFD = threadRecord->serverPipe;
  int x;
  char  *data = "accept";
  char  *data2 = "reject";
  char  *terminate = "terminate";
  //struct Topic* topic;

  if ((x = read(threadRecord->pipe[0], subT.buf, 1024)) >= 0){
    //printf("sub read %d bytes from the Subscriber pipe: \"%s\"\n", x, subT.buf); 
    subT.buf[x] = 0;
    if (strcmp(subT.buf, subT.subConnect) == 0){
      write(threadRecord->serverPipe[1], data, strlen(data));
      if ((x = read(threadRecord->pipe[0], subT.buf2, 1024)) >= 0){
        //printf("sub read %d bytes from the Subscriber pipe: \"%s\"\n", x, subT.buf2); 
        subT.buf2[x] = 0;
        if (strcmp(subT.buf2, subT.subTopic) == 0){
          write(threadRecord->serverPipe[1], data, strlen(data));
          if ((x = read(threadRecord->pipe[0], subT.buf3, 1024)) >= 0){
            //printf("sub read %d bytes from the Subscriber pipe: \"%s\"\n", x, subT.buf3);
            subT.buf3[x] = 0;
            if(strcmp(subT.buf3, subT.subEnd) == 0){
              //threadRecord.term = "terminated";
              write(threadRecord->serverPipe[1], data, strlen(data));
              //printf("sub successful connection to server!\n");
              //topic = dequeue();
              //continue;
            } 
            else{
              write(threadRecord->serverPipe[1], data2, strlen(data2));
            }
          }
        }
        else if (strcmp(subT.buf2, subT.subterm) == 0){
          //threadRecord.term = "terminated";
          ;
        }
        else{
          write(threadRecord->serverPipe[1], data2, strlen(data2));
        }
      }
    }
    else if (strcmp(subT.buf, subT.subterm) == 0){
      //threadRecord.term = "terminated";
      ;
    }
    else{
      write(threadRecord->serverPipe[1], data2, strlen(data2));
    }
}
//sleep(1);
  //pthread_mutex_unlock (&mutexlock);

  pthread_exit(NULL);

}
Example #21
0
/*
 * init_power_save - Onitialize the power save module. Started as a
 *	pthread. Terminates automatically at slurmctld shutdown time.
 *	Input and output are unused.
 */
static void *_init_power_save(void *arg)
{
        /* Locks: Read nodes */
        slurmctld_lock_t node_read_lock = {
                NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK };
        /* Locks: Write nodes */
        slurmctld_lock_t node_write_lock = {
                NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
	time_t now, boot_time = 0, last_power_scan = 0;

	if (_init_power_config())
		goto fini;

	suspend_node_bitmap = bit_alloc(node_record_count);

	while (slurmctld_config.shutdown_time == 0) {
		sleep(1);

		if (_reap_procs() < 2) {
			debug("power_save programs getting backlogged");
			continue;
		}

		if ((last_config != slurmctld_conf.last_update) &&
		    (_init_power_config())) {
			info("power_save mode has been disabled due to "
			     "configuration changes");
			goto fini;
		}

		now = time(NULL);
		if (boot_time == 0)
			boot_time = now;

		/* Only run every 60 seconds or after a node state change,
		 *  whichever happens first */
		if ((last_node_update >= last_power_scan) ||
		    (now >= (last_power_scan + 60))) {
			lock_slurmctld(node_write_lock);
			_do_power_work(now);
			unlock_slurmctld(node_write_lock);
			last_power_scan = now;
		}

		if (slurmd_timeout &&
		    (now > (boot_time + (slurmd_timeout / 2)))) {
			lock_slurmctld(node_read_lock);
			_re_wake();
			unlock_slurmctld(node_read_lock);
			/* prevent additional executions */
			boot_time += (365 * 24 * 60 * 60);
			slurmd_timeout = 0;
		}
	}

fini:	_clear_power_config();
	FREE_NULL_BITMAP(suspend_node_bitmap);
	_shutdown_power();
	slurm_mutex_lock(&power_mutex);
	power_save_enabled = false;
	slurm_mutex_unlock(&power_mutex);
	pthread_exit(NULL);
	return NULL;
}
Example #22
0
int
main(int argc, char *argv[])
{
	pid_t 	pid;
	int		status;
  int   ServerFD[2];
  int   ServerFD2[2];
  char  *data = "accept";
  char  *data2 = "reject";
  char  *terminate = "terminate";
  char  pubbuf[1025];
  char  pubbufend[1025];
  char  pubbufterm[1025];
  char  subbuf[1025];
  char  subbufend[1025];
  char  subbufterm[1025];

  pipe(ServerFD);
  pipe(ServerFD2);
	pid = fork();

	if(pid == 0){//this is the child of the main process, the DIServer
    int i;
		//n is # of publisher
		int n = atoi(argv[1]);
		//m is number of subscribers
		int m = atoi(argv[2]);
    //t are topics	
    int t = atoi(argv[3]);
    pthread_t pubthreads[n];
    pthread_t subthreads[m];
    int   rc;
    int   rc2;
    void  *pubthreadstatus;
    void  *subthreadstatus;

    pthread_attr_t pubattr;
    pthread_attr_t subattr;

    pthread_attr_init(&pubattr);
    pthread_attr_setdetachstate(&pubattr, PTHREAD_CREATE_JOINABLE);

    pthread_attr_init(&subattr);
    pthread_attr_setdetachstate(&subattr, PTHREAD_CREATE_JOINABLE);

    pthread_mutex_init(&mutexlock,NULL);


    Record record[n+m];
		/*need to use n and m to create child procs of the 
		DIServer, publishers and subscriers*/
		//loop to create publisher procs and stroe pid in array;
		pid_t pubpid;
		pid_t subpid;

		//map space for the pub array
		pubpids = mmap(0, MAX_PIDS*sizeof(pid_t), PROT_READ|PROT_WRITE,
              MAP_SHARED | MAP_ANONYMOUS, -1, 0);
		 if (!pubpids) {
    		perror("mmap failed");
    		exit(1);
 		 }
  		memset((void *)pubpids, 0, MAX_PIDS*sizeof(pid_t)); 		 
 		 //map space for sub array
		subpids = mmap(0, MAX_PIDS*sizeof(pid_t), PROT_READ|PROT_WRITE,
              MAP_SHARED | MAP_ANONYMOUS, -1, 0);
		 if (!subpids) {
    		perror("mmap failed");
    		exit(1);
 		 }
  		memset((void *)subpids, 0, MAX_PIDS*sizeof(pid_t));

  		//loop to creat forked pubs
  		for(i=0;i<n;i++)
  		{
        //create the record struct
        //create the publisher struct
        int z;
        Publisher pub;
        pub.pubConnect = "Pub Connect";
        pub.pubTopic = "Topic 1";//topic of interest
        pub.pubEnd = "End";
        pub.pubterm = "terminate";
        pipe(pub.fileDescriptor);
  			pubpid = fork();
        int articles;
        char article[12];
  			if(pubpid == 0)
  			{
          //doPublisher(n);
          write(pub.fileDescriptor[1], pub.pubConnect, strlen(pub.pubConnect));
          if ((z = read(ServerFD[0], pubbuf, 1024)) >= 0) {
              pubbuf[z] = 0; /* terminate the string */ 
              //printf("pub read %d bytes from the DIServer pipe: \"%s\"\n", z, pubbuf);
              if (strcmp(pubbuf, data) == 0)
              {
                write(pub.fileDescriptor[1], pub.pubTopic, strlen(pub.pubTopic));
                if ((z = read(ServerFD[0], pubbufend, 1024)) >= 0) {
                  pubbufend[z] = 0;
                  //printf("pub read %d bytes from the DIServer pipe: \"%s\"\n", z, pubbufend);
                  if (strcmp(pubbufend, data) == 0){
                    write(pub.fileDescriptor[1], pub.pubEnd, strlen(pub.pubEnd));
                    if((z = read(ServerFD[0], pubbufend, 1024)) >= 0) {
                     // printf("pub read %d bytes from the DIServer pipe: \"%s\"\n", z, pubbufend);
                      /*if (strcmp(pubbufend, data)==0){
                        for (articles = 0; articles<10;articles++){
                          if(articles%2 != 0){
                            sprintf(article, "Topic 1 Article %d", articles);
                            write(pub.fileDescriptor[1], article, strlen(article));
                          }
                        }
                      }*/
                    }

                  }
                  else{
                    write(pub.fileDescriptor[1], pub.pubterm, strlen(pub.pubterm));
                    continue;
                 }
                }
              }
              else{
                write(pub.fileDescriptor[1], pub.pubterm, strlen(pub.pubterm));
                continue;
              }

          } 
          else 
              perror("read");
          //if reads accept then write to publisher pipe 
  				exit(0);
  			}
  			else if (pubpid < 0){
  				perror("fork failed");
  			}
        
  			else{//back to the DIServer

          record[i].pipe = pub.fileDescriptor;
          record[i].type = "Publisher";
          record[i].pid = i+1;
          record[i].selectTopics = pub.pubTopic;
          record[i].serverPipe = ServerFD;
          rc=pthread_create(&pubthreads[i],&pubattr,pubThr_fn,(void *) &record[i]);
          if(rc){
            printf("ERROR; return code from pthread_create() is %d\n", rc);
            exit(-1);
          }
          pthread_attr_destroy(&pubattr);
          rc = pthread_join(pubthreads[i], &pubthreadstatus);
          // want to create the thread here, then handle all of reading a writng of pipes in thread handler}
  		  }
      }

  		//loop to create forked subs;
      for(i=0;i<m;i++)
      {
        //create the subscriber struct
        int z;
        Subscriber sub;
        sub.subConnect = "Sub Connect";
        sub.subTopic = "Topic 1"; //topic of interest
        sub.subEnd = "End";
        sub.subterm = "terminate";
        pipe(sub.fileDescriptor);
        subpid = fork();
        if(subpid == 0)
        {
          //doPublisher(n);
          write(sub.fileDescriptor[1], sub.subConnect, strlen(sub.subConnect));
          if ((z = read(ServerFD2[0], subbuf, 1024)) >= 0) {
              subbuf[z] = 0; /* terminate the string */ 
              //printf("sub read %d bytes from the DIServer pipe: \"%s\"\n", z, subbuf);
              if (strcmp(subbuf, data) == 0)
              {
                write(sub.fileDescriptor[1], sub.subTopic, strlen(sub.subTopic));
                if ((z = read(ServerFD2[0], subbufend, 1024)) >= 0) {
                  subbufend[z] = 0;
                 // printf("sub read %d bytes from the DIServer pipe: \"%s\"\n", z, subbufend);
                  if (strcmp(subbufend, data) == 0){
                    write(sub.fileDescriptor[1], sub.subEnd, strlen(sub.subEnd));
                    if((z = read(ServerFD2[0], subbufend, 1024)) >= 0) {
                      //printf("sub read %d bytes from the DIServer pipe: \"%s\"\n", z, subbufend);
                    }
                  }
                  else{
                    write(sub.fileDescriptor[1], sub.subterm, strlen(sub.subterm));
                    continue;
                 }
                }
              }
              else{
                write(sub.fileDescriptor[1], sub.subterm, strlen(sub.subterm));
                continue;
              }

          } 
          else 
              perror("read");
          //if reads accept then write to publisher pipe 
          exit(0);
        }
        else if (subpid < 0){
          perror("fork failed");
        }
        else{//back to the DIServer
          record[i+n].pipe = sub.fileDescriptor;
          record[i+n].type = "Subscriber";
          record[i+n].pid = i+1;
          record[i+n].selectTopics = sub.subTopic;
          record[i+n].serverPipe = ServerFD2;

          rc2 = pthread_create(&subthreads[i],&subattr,subThr_fn,(void *) &record[i+n]);
          if(rc2){
            printf("ERROR; return code from pthread_create() is %d\n", rc2);
            exit(-1);
          }
          pthread_attr_destroy(&subattr);
          rc2 = pthread_join(subthreads[i], &subthreadstatus);


        }
      }
      /*
      pthread_attr_destroy(&subattr);
      for(i=0;i<n;i++){
        printf("here\n");
        rc = pthread_join(pubthreads[i], &pubthreadstatus);
        if (rc) {
          printf("ERROR; return code from pthread_join() is %d\n", rc);
          exit(-1);
        }
      }
      pthread_attr_destroy(&subattr);
      for(i=0;i<m;i++){
        printf("here\n");
        rc2 = pthread_join(subthreads[i], &subthreadstatus);
        if (rc2) {
          printf("ERROR; return code from pthread_join() is %d\n", rc2);
          exit(-1);
        }
      }
      */
      /*this works because the way code is written, if it 
      makes it here all connections have been made*/
      //printf("all pubs and subs have connected to server\n");
      for(i = 0; i<n+m; i++){
        printf("Type:%s, ID: %d, Topic: %s\n", record[i].type, record[i].pid, record[i].selectTopics);
      }
      //begin termination

      write(ServerFD[1], terminate, strlen(terminate));
      close(ServerFD[1]);
      for(i = 0; i<n+m; i++){
        printf("%s-%d: terminated\n", record[i].type,record[i].pid);
      }
      //printf("%s\n", );
      pthread_exit(NULL);

	
}
	else
		wait(&status);
		//main process just needs to wait for everything else to be done.


	return(0);
}
void *user_authentication_main(void *arg)
{
    	BIO     *bio_acc    = NULL;
	BIO     *bio_client = NULL;
    	SSL     *ssl_client = NULL;
    	SSL_CTX *ctx        = NULL;

	char    username[USER_NAME_LENGTH + 1];
	char    key_exchange_passwd[PASSWD_LENGTH + 1];
	boolean is_admin_flag;

    	ctx = setup_server_ctx(UA_CERTFILE_PATH, UA_CERTFILE_PASSWD, PHR_ROOT_CA_ONLY_CERT_CERTFILE_PATH);
    	bio_acc = BIO_new_accept(UA_USER_AUTHENTICATION_PORT);
    	if(!bio_acc)
        	int_error("Creating server socket failed");
  
    	if(BIO_do_accept(bio_acc) <= 0)
        	int_error("Binding server socket failed");
  
    	for(;;)
    	{
        	if(BIO_do_accept(bio_acc) <= 0)
            		int_error("Accepting connection failed");
 
        	bio_client = BIO_pop(bio_acc);

		// Verify the user
		if(verify_authentication_request(bio_client, username, &is_admin_flag, key_exchange_passwd))
		{
			int  err;
			char *hosts[1];

			// SSL certificate response
			if(!ssl_cert_response(bio_client, username, is_admin_flag, key_exchange_passwd))
				goto ERROR_AT_BIO_LAYER;

        		if(!(ssl_client = SSL_new(ctx)))
            			int_error("Creating SSL context failed");

        		SSL_set_bio(ssl_client, bio_client, bio_client);
			if(SSL_accept(ssl_client) <= 0)
			{
        			fprintf(stderr, "Accepting SSL connection failed\n");
				goto ERROR_AT_SSL_LAYER;
			}

			hosts[0] = is_admin_flag ? ADMIN_CN : USER_CN; 
    			if((err = post_connection_check(ssl_client, hosts, 1, true, GLOBAL_authority_name)) != X509_V_OK)
    			{
        			fprintf(stderr, "Checking peer certificate failed\n\"%s\"\n", X509_verify_cert_error_string(err));
        			goto ERROR_AT_SSL_LAYER;
    			}

			// Verify the certificate owner
			if(!verify_cert_owner(ssl_client, username, is_admin_flag))
				goto ERROR_AT_SSL_LAYER;

			// Record transaction login log
			record_transaction_complete_login_log(ssl_client, username, is_admin_flag);

			// Basic information response
			if(!basic_info_response(ssl_client, username, is_admin_flag))
				goto ERROR_AT_SSL_LAYER;

			if(!is_admin_flag)
			{
				// CP-ABE private key response
	    			if(!cpabe_private_key_response(ssl_client, username))
					goto ERROR_AT_SSL_LAYER;
			}

ERROR_AT_SSL_LAYER:

			SSL_cleanup(ssl_client);
			ssl_client = NULL;
    			ERR_remove_state(0);
			continue;

ERROR_AT_BIO_LAYER:

			BIO_free(bio_client);
			bio_client = NULL;
			ERR_remove_state(0);
		}
		else
		{
			fprintf(stderr, "Incorrect the verification information\n");

			// Record transaction login log
			record_transaction_incomplete_login_log(bio_client);

			BIO_free(bio_client);
			bio_client = NULL;
		}
    	}
    
    	SSL_CTX_free(ctx);
	ctx = NULL;

    	BIO_free(bio_acc);
	bio_acc = NULL;

	pthread_exit(NULL);
    	return NULL;
}
Example #24
0
/*
 * Receive UDP data for 1 observation, continually writing it to 
 * datablocks
 */
void * leda_udpdb_receive_obs (void * arg)
{
  udpdb_t * ctx = (udpdb_t *) arg;

  // multilogging facility
  multilog_t * log = ctx->log;

  // decoded sequence number
  uint64_t seq_no = 0;
  uint64_t ch_id = 0;
  uint64_t tmp;
  unsigned char * b = (unsigned char *) ctx->sock->buf;

  // decoded channel id
  uint16_t ant_id = 0;

  // data received from a recv_from call
  size_t got = 0;

  // determine the sequence number boundaries for curr and next buffers
  int errsv;

  // offset of current packet in bytes from start of block
  int64_t byte_offset = 0;

  // offset of current packet in bytes from start of obs
  uint64_t seq_byte = 0;

  // for "saving" out of order packets near edges of blocks
  unsigned int temp_idx = 0;
  unsigned int temp_max = 200;
  char * temp_buffers[temp_max][UDP_DATA];
  uint64_t temp_seq_byte[temp_max];

#ifdef _DEBUG
  uint64_t prev_seq[8];
  uint64_t ant_dropped[8];
  uint64_t ant_recv[8];

  prev_seq[0] = ant_dropped[0] = ant_recv[0] = 0;
  prev_seq[1] = ant_dropped[1] = ant_recv[1] = 0;
  prev_seq[2] = ant_dropped[2] = ant_recv[2] = 0;
  prev_seq[3] = ant_dropped[3] = ant_recv[3] = 0;
  prev_seq[4] = ant_dropped[4] = ant_recv[4] = 0;
  prev_seq[5] = ant_dropped[5] = ant_recv[5] = 0;
  prev_seq[6] = ant_dropped[6] = ant_recv[6] = 0;
  prev_seq[7] = ant_dropped[7] = ant_recv[7] = 0;
#endif

  unsigned i = 0;
  int thread_result = 0;

  if (ctx->verbose)
    multilog(log, LOG_INFO, "leda_udpdb_receive_obs()\n");

  // set the CPU that this thread shall run on
  if (ctx->recv_core >= 0)
  {
    multilog(log, LOG_INFO, "receive_obs: binding to core %d\n", ctx->recv_core);
    if (dada_bind_thread_to_core(ctx->recv_core) < 0)
      multilog(ctx->log, LOG_WARNING, "receive_obs: failed to bind to core %d\n", ctx->recv_core);
    multilog(log, LOG_INFO, "receive_obs: bound\n");
  }

  // set recording state once we enter this main loop
  recording = 1;

  // open first hdu and first buffer
  ctx->block_start_byte = 0;
  ctx->block_end_byte = ctx->block_start_byte + ( ctx->packets_per_buffer - 1) * UDP_DATA;

  // TODO move the opening of the first block here ?
  //
  uint64_t timeouts = 0;
  uint64_t timeout_max = 1000000;

  // Continue to receive packets
  while (!quit_threads && !stop_pending) 
  {
    ctx->sock->have_packet = 0; 

    // incredibly tight loop to try and get a packet
    while (!ctx->sock->have_packet && !quit_threads && !stop_pending)
    {
      // receive 1 packet into the socket buffer
      got = recvfrom ( ctx->sock->fd, ctx->sock->buf, UDP_PAYLOAD, 0, NULL, NULL );

      if (got == UDP_PAYLOAD) 
      {
        ctx->sock->have_packet = 1;
      } 
      else if (got == -1) 
      {
        errsv = errno;
        if (errsv == EAGAIN) 
        {
          ctx->n_sleeps++;
          if (ctx->capture_started)
            timeouts++;
          if (timeouts > timeout_max)
          {
            multilog(log, LOG_INFO, "timeouts[%"PRIu64"] > timeout_max[%"PRIu64"]\n",timeouts, timeout_max);
            stop_byte = ctx->last_byte;
            stop_pending = 1;
          }
        } 
        else 
        {
          multilog (log, LOG_ERR, "receive_obs: recvfrom failed %s\n", strerror(errsv));
          thread_result = -1;
          pthread_exit((void *) &thread_result);
        }
      } 
      else // we received a packet of the WRONG size, ignore it
      {
        multilog (log, LOG_ERR, "receive_obs: received %d bytes, expected %d\n", got, UDP_PAYLOAD);
      }
    }
    timeouts = 0;

    // we have a valid packet within the timeout
    if (ctx->sock->have_packet) 
    {

      //tmp = UINT64_C (0);
      seq_no = UINT64_C (0);
      for (i = 0; i < 8; i++ )
      {
        tmp = b[8 - i - 1];
        seq_no |= (tmp << ((i & 7) << 3));
      }

      ch_id = UINT64_C (0);
      for (i = 0; i < 8; i++ )
      {
        tmp = UINT64_C (0);
        tmp = b[16 - i - 1];
        ch_id |= (tmp << ((i & 7) << 3));
      }

      //if (ctx->num_inputs == 1)
      //  ant_id = 0;
      //else
        ant_id = (uint16_t) ch_id;

      // decode sequence number
      //leda_decode_header(ctx->sock->buf, &seq_no, &ant_id);
#ifdef _DEBUG
      if ((prev_seq[ant_id] > 0) && (seq_no != prev_seq[ant_id] + 1))
        ant_dropped[ant_id]++;
      prev_seq[ant_id] = seq_no;
#endif
      


      // if first packet
      if (!ctx->capture_started)
      {
        ctx->block_start_byte = ctx->num_inputs * seq_no * UDP_DATA;
        ctx->block_end_byte   = (ctx->block_start_byte + ctx->hdu_bufsz) - UDP_DATA;
        ctx->capture_started = 1;

        if (ctx->verbose)
          multilog (ctx->log, LOG_INFO, "receive_obs: START [%"PRIu64
                    " - %"PRIu64"]\n", ctx->block_start_byte, ctx->block_end_byte);
      }

      if (ctx->capture_started)
      {
        seq_byte = (ctx->num_inputs * seq_no * UDP_DATA) + (ant_id * UDP_DATA);
        if (ctx->verbose > 2)
           multilog(ctx->log, LOG_INFO, "seq_byte=%"PRIu64", num_inputs=%d, seq_no=%"PRIu64", ant_id =%d, UDP_DATA=%d\n",seq_byte,ctx->num_inputs,seq_no,ant_id, UDP_DATA);


        ctx->last_seq = seq_no;
        ctx->last_byte = seq_byte;

        // if packet arrived too late, ignore
        if (seq_byte < ctx->block_start_byte)
        {
          multilog (ctx->log, LOG_INFO, "receive_obs: seq_byte < block_start_byte\n");
          ctx->packets->dropped++;
          ctx->bytes->dropped += UDP_DATA;
        }
        else
        {
          // packet belongs in this block
          if (seq_byte <= ctx->block_end_byte)
          {
            byte_offset = seq_byte - ctx->block_start_byte;
            memcpy (ctx->block + byte_offset, ctx->sock->buf + UDP_HEADER, UDP_DATA);
            ctx->packets->received++;
            ctx->bytes->received += UDP_DATA;
            ctx->block_count++;
#ifdef _DEBUG  
            ant_recv[ant_id]++;
#endif
          }
          // packet belongs in subsequent block
          else
          {
            if (ctx->verbose)
	       multilog (log, LOG_INFO, "receive_obs: received packet for subsequent buffer: temp_idx=%d, ant_id=%d, seq_no=%"PRIu64"\n",temp_idx,ant_id,seq_no);
            //multilog (log, LOG_INFO, "receive_obs: received packet for subsequent buffer: temp_idx=%d\n",temp_idx);
            if (temp_idx < temp_max)
            {
              // save packet to temp buffer
              memcpy (temp_buffers[temp_idx], ctx->sock->buf + UDP_HEADER, UDP_DATA);
              temp_seq_byte[temp_idx] = seq_byte;
              temp_idx++;
            }
            else
            {
              ctx->packets->dropped++;
              ctx->bytes->dropped += UDP_DATA;
            }
          }
        }
      }

      // now check for a full buffer or full temp queue
      if ((ctx->block_count >= ctx->packets_per_buffer) || (temp_idx >= temp_max))
      {
        if (ctx->verbose)
          multilog (log, LOG_INFO, "BLOCK COMPLETE seq_no=%"PRIu64", "
                    "ant_id=%"PRIu16", block_count=%"PRIu64", "
                    "temp_idx=%d\n", seq_no, ant_id,  ctx->block_count, 
                    temp_idx);

        uint64_t dropped = ctx->packets_per_buffer - ctx->block_count;
        if (dropped)
        {
          ctx->packets->dropped += dropped;
          ctx->bytes->dropped += (dropped * UDP_DATA);
        }
#ifdef _DEBUG
  if ((ant_dropped[0] > 0) || (ant_dropped[1] > 0) || (ant_dropped[2] > 0) || (ant_dropped[3] > 0))
  {
  multilog(log, LOG_INFO, "dropped: ant0=%"PRIu64", ant1=%"PRIu64", ant2=%"PRIu64", ant3=%"PRIu64", ant4=%"PRIu64", ant5=%"PRIu64", ant6=%"PRIu64", ant7=%"PRIu64"\n", ant_dropped[0], ant_dropped[1], ant_dropped[2], ant_dropped[3], ant_dropped[4], ant_dropped[5], ant_dropped[6], ant_dropped[7]);
  multilog(log, LOG_INFO, "recv:    ant0=%"PRIu64", ant1=%"PRIu64", ant2=%"PRIu64", ant3=%"PRIu64", ant4=%"PRIu64", ant5=%"PRIu64", ant6=%"PRIu64", ant7=%"PRIu64"\n", ant_recv[0], ant_recv[1], ant_recv[2], ant_recv[3], ant_recv[4], ant_recv[5], ant_recv[6], ant_recv[7]);
  }
#endif
        // get a new buffer and write any temp packets saved 
        if (leda_udpdb_new_buffer (ctx) < 0)
        {
          multilog(ctx->log, LOG_ERR, "receive_obs: leda_udpdb_new_buffer failed\n");
          thread_result = -1;
          pthread_exit((void *) &thread_result);
        }

        if (ctx->verbose > 1)
          multilog(log, LOG_INFO, "block bytes: %"PRIu64" - %"PRIu64"\n", ctx->block_start_byte, ctx->block_end_byte);
  
        // include any futuristic packets we saved
        for (i=0; i < temp_idx; i++)
        {
          seq_byte = temp_seq_byte[i];
          byte_offset = seq_byte - ctx->block_start_byte;
          if (byte_offset < ctx->hdu_bufsz)
          {
            memcpy (ctx->block + byte_offset, temp_buffers[i], UDP_DATA);
            ctx->block_count++;
            ctx->packets->received++;
            ctx->bytes->received += UDP_DATA;
          }
          else
          {
            ctx->packets->dropped++;
            ctx->bytes->dropped += UDP_DATA;
          }
        }
        temp_idx = 0;
      }
    }

    // packet has been inserted or saved by this point
    ctx->sock->have_packet = 0;

    // check for the stopping condition
    if (stop_byte)
    {
      if (seq_byte >= stop_byte)
      {
        if (ctx->verbose)
        {
          multilog(ctx->log, LOG_INFO, "receive_obs: STOP seq_byte[%"PRIu64"]"
                   " >= stop_byte[%"PRIu64"], stopping\n", seq_byte, stop_byte);
          multilog(ctx->log, LOG_INFO, "receive_obs: STOP buffer[%"PRIu64" - "
                   "%"PRIu64"]\n", ctx->block_start_byte, ctx->block_end_byte);
        }
        stop_pending = 1;

        // try to determine how much data has been written
        uint64_t bytes_just_written = 0;
        if (seq_byte <= ctx->block_start_byte)
          bytes_just_written = 1;
        else if (seq_byte < ctx->block_end_byte)
          bytes_just_written = seq_byte - ctx->block_start_byte;
        else
          bytes_just_written = ctx->hdu_bufsz;

        if (ctx->verbose)
        {
          multilog(ctx->log, LOG_INFO, "receive_obs: STOP bytes_just_written=%"PRIu64"\n",
                   bytes_just_written);
        }

        // close buffer signalling EOD
        if (leda_udpdb_close_buffer (ctx, bytes_just_written, 1) < 0)
        {
          multilog(ctx->log, LOG_ERR, "receive_obs: leda_udpdb_close_hdu failed\n");
          thread_result = -1;
          pthread_exit((void *) &thread_result);
        }
        stop_byte = 0;
        break;
      }
    }

    if (stop_pending) 
    {
      multilog(ctx->log, LOG_ERR, "receive_obs: stop_pending after break - SHOULD NOT HAPPEN!\n");
    }
  }

  // TODO move the closing of datablock here

  stop_pending = 0;

  if (quit_threads && ctx->verbose) 
    multilog (ctx->log, LOG_INFO, "main_function: quit_threads detected\n");
 
  if (ctx->verbose) 
    multilog(log, LOG_INFO, "receiving thread exiting\n");

  /* return 0 */
  pthread_exit((void *) &thread_result);
}
/* ****************************************************************************
*
* QueueWorkers::start() -
*/
static void *workerFunc(void* pSyncQ)
{
  SyncQOverflow<SenderThreadParams*> *queue = (SyncQOverflow<SenderThreadParams*> *) pSyncQ;
  CURL *curl;

  // Initialize curl context
  curl = curl_easy_init();

  if (curl == NULL)
  {
    LM_E(("Runtime Error (curl_easy_init)"));
    pthread_exit(NULL);
  }

  for (;;)
  {
    SenderThreadParams* params = queue->pop();
    struct timespec     now;
    struct timespec     howlong;
    size_t              estimatedQSize;

    QueueStatistics::incOut();
    clock_gettime(CLOCK_REALTIME, &now);
    clock_difftime(&now, &params->timeStamp, &howlong);
    estimatedQSize = queue->size();
    QueueStatistics::addTimeInQWithSize(&howlong, estimatedQSize);


    strncpy(transactionId, params->transactionId, sizeof(transactionId));

    LM_T(LmtNotifier, ("worker sending to: host='%s', port=%d, verb=%s, tenant='%s', service-path: '%s', xauthToken: '%s', path='%s', content-type: %s",
                       params->ip.c_str(),
                       params->port,
                       params->verb.c_str(),
                       params->tenant.c_str(),
                       params->servicePath.c_str(),
                       params->xauthToken.c_str(),
                       params->resource.c_str(),
                       params->content_type.c_str()));

    if (simulatedNotification)
    {
      LM_T(LmtNotifier, ("simulatedNotification is 'true', skipping outgoing request"));
      __sync_fetch_and_add(&noOfSimulatedNotifications, 1);
    }
    else // we'll send the notification
    {
      std::string  out;
      int          r;

      r =  httpRequestSendWithCurl(curl,
                                   params->ip,
                                   params->port,
                                   params->protocol,
                                   params->verb,
                                   params->tenant,
                                   params->servicePath,
                                   params->xauthToken,
                                   params->resource,
                                   params->content_type,
                                   params->content,
                                   true,
                                   NOTIFICATION_WAIT_MODE,
                                   &out);

      //
      // FIXME: ok and error counter should be incremented in the other notification modes (generalizing the concept, i.e.
      // not as member of QueueStatistics:: which seems to be tied to just the threadpool notification mode)
      //
      if (r == 0)
      {
        statisticsUpdate(NotifyContextSent, params->format);
        QueueStatistics::incSentOK();
      }
      else
      {
        QueueStatistics::incSentError();
      }

    }

    // Free params memory
    delete params;

    // Reset curl for next iteration
    curl_easy_reset(curl);
  }
}
Example #26
0
void *productor(void *params) {
	prod_param *parg = (prod_param *) params;
	int err;
	
	//3 cas : on est un blur, on est avant un blur ou bien le cas normal. Si on est avant un blur, il faut lancer la copie de l'image quand, dans mat_blur, on a, pour l'image, le nombre de paquet
	//en lesquels elle a été divisée. Pour cela, il faut un tableau global npackim[NIMAGE] qui donne, pr chaque image, en combien de paquet elle a été divisée ou bien, on divise chaque image en
	//NPACK/NIMAGE de paquet (on divise chacune des images avec le ppcm du nombre de thread), ce qui fait qu'on a le nombre de paquet dans une image avec le nombre de paquet total
	//ces 3 cas sont dans le while
	while(1) {

		err = pthread_mutex_lock(&(to_pass_mutex[parg->num_filter]));
		if (err!=0)
			error(err,"pthread_mutex_lock pour to_pass_mutex dans producteur");
		if(to_pass[parg->num_filter]>=NPACK){
			err = pthread_mutex_unlock(&(to_pass_mutex[parg->num_filter]));
			if (err!=0)
				error(err,"pthread_mutex_unlock pour to_pass_mutex dans producteur");
			pthread_exit(NULL);
		}
		to_pass[parg->num_filter]++;
		err = pthread_mutex_unlock(&(to_pass_mutex[parg->num_filter]));
		if (err!=0)
			error(err,"pthread_mutex_unlock pour to_pass_mutex dans producteur");

		printf(".");
		fflush(stdout);
		//cas du filtre blur
		if((parg->isblur)!=0) {
			
			//1e étape : vérifer qu'il y a un des éléments de matblur en la ligne isblur-1
			//printf("Je suis dans le cas blur\n");
			//err = pthread_mutex_lock(&(check_for_c_mutex[(parg->isblur)-1])); //deux threads ne peuvent rechercher un c en même temps sinon ils copieront la même image

			err = sem_wait(&(can_copi[(parg->isblur)-1])); 
			if (err!=0)
				error(err,"sem_wait sur can_copi dans productor");

			err = pthread_mutex_lock(&(can_i_take_c_mutex[(parg->isblur)-1]));//ce lock est nécessaire car la sémaphore ne fait des wait que pour empêcher des thread d'entrer quand il n'y a pas d'image prête pour la copie. Mais la sémaphore  peut être incrémentée par le fait que deux images différentes peuvent être prête presque en même temps pour la copie. Alors, il faut faire un lock sur le can_i_take_c_mutex pour éviter qu'il n'y ait deux threads qui entrent en même temps dans le check_lmat_blur, ce qui poserait des problème vu qu'une des threads pourraient lire dans can_i_take_c pendant que l'autre écrit dedans.
			if (err!=0)
				error(err,"pthread_mutex_lock pour can_i_take_c_mutex dans producteur du cas blur");

			int c = check_lmat_blur((parg->isblur)-1);
			if(c==-1) {
				error(-1,"check_lmat_blur a renvoyé -1");
			}
			
			if (can_i_take_c[(parg->isblur)-1][c] < NPACK/NIMAGE) {
				err = sem_post(&(can_copi[(parg->isblur)-1])); 
				if (err!=0)
					error(err,"sem_post sur can_copi dans productor");
			}
			
			err = pthread_mutex_unlock(&(can_i_take_c_mutex[(parg->isblur)-1]));
			if (err!=0)
				error(err,"pthread_mutex_unlock pour can_i_take_c_mutex dans producteur du cas blur");

			//	error(err,"pthread_mutex_unlock pour check_for_c_mutex dans producteur du cas blur");

			//2e étape : on a une image dont le précédent a été appliquer sur toutes les parties. On la copie

			err = pthread_mutex_lock(&(copi_is_done_mutex[(parg->isblur)-1][c]));
			if (err!=0)
				error(err,"pthread_mutex_lock pour copi_is_done_mutex dans producteur du cas blur pour image_copi");

			if(copi_is_done[(parg->isblur)-1][c]==0) {

				err = image_copi(c,parg->num_filter); //on donne le numéro correspondant à l'image et le numéro de la ligne dans le mat_buf où les parties de cette image se trouve. Il faut mettre à jour les éléments read-only des pointeur vers des elem_buf dans cette ligne (note, il est nécessaire d'avoir une structure copie par image mais pas plus ! Car quand on fait une 2e copie pour un 2e blur, alors on peut écraser l'ancienne copie !)
				if (err!=0) {
					error(err,"Erreur de image_copi");
				}
				copi_is_done[(parg->isblur)-1][c]++;				
			}
			err = pthread_mutex_unlock(&(copi_is_done_mutex[(parg->isblur)-1][c]));
			if (err!=0)
				error(err,"pthread_mutex_unlock pour copi_is_done_mutex dans producteur du cas blur pour image_copi");

			//3e étape : on a fait les copies. Mnt, on doit appeler le consommateur en lui-disant de consommer un elem-buffer mais dont l'image est c
			elem_buf *paquet = consommator(parg->num_filter,c);//mettre c en 2e argument permet de dire que tous les elem_buf sont bons, quelque soit l'image
			apply_filter(paquet,parg->num_filter);
			
			err = sem_wait(&(empty[(parg->num_filter)+1])); //amélioration possible du code : ce sem_wait ne sert pas vraiment, vu que on sait de toute façon que les buffer ne vont pas être rempli
			if (err!=0)
				error(err,"sem_wait sur empty dans productor");
			
			err = pthread_mutex_lock(&(buf_mutex[(parg->num_filter)+1]));
			if (err!=0)
				error(err,"pthread_mutex_lock pour buf_mutex dans producteur");
			
			insert_elem(&paquet,(parg->num_filter)+1);
			
			err = pthread_mutex_lock(&(choosen_c_mutex[(parg->isblur)-1][c]));
			if (err!=0)
				error(err,"pthread_mutex_lock pour choosen_c_mutex dans producteur, check_lmat_blur");

			choosen_c[(parg->isblur)-1][c]++;

			if((choosen_c[(parg->isblur)-1][c]==NPACK/NIMAGE)) {
				err = image_copi_destroy(c,parg->num_filter+1);

				if (err!=0)
					error(err,"Erreur de image_copi_destroy");
			}

			err = pthread_mutex_unlock(&(choosen_c_mutex[(parg->isblur)-1][c]));
			if (err!=0)
				error(err,"pthread_mutex_unlock pour choosen_c_mutex dans producteur");
			
			err = pthread_mutex_unlock(&(buf_mutex[(parg->num_filter)+1]));
			if (err!=0)
				error(err,"pthread_mutex_unlock pour buf_mutex dans producteur");
			
			err = sem_post(&(full[(parg->num_filter)+1])); //amélioration possible du code : ce sem_wait ne sert pas vraiment, vu que on sait de toute façon que les buffer ne vont pas être rempli
			if (err!=0)
				error(err,"sem_post sur full dans productor");
			
			//on est avant un filtre blur
			if((parg->beforeblur)!=0) {//parg_beforeblur contient 1 si c'est le premier blur, 2 si c'est le deuxième, ...

				err = pthread_mutex_lock(&(blur_mutex[(parg->beforeblur)-1][(paquet)->num_img]));
				if (err!=0)
					error(err,"pthread_mutex_lock pour blur_mutex dans producteur");
				
				(mat_blur[(parg->beforeblur)-1][(paquet)->num_img])++;

				if ((mat_blur[(parg->beforeblur)-1][(paquet)->num_img])==NPACK/NIMAGE) {
					err = sem_post(&(can_copi[(parg->beforeblur)-1])); 
					if (err!=0)
						error(err,"sem_post sur can_copi dans productor, before_blur");
				}
				
				err = pthread_mutex_unlock(&(blur_mutex[(parg->beforeblur)-1][(paquet)->num_img]));
				if (err!=0)
					error(err,"pthread_mutex_unlock pour blur_mutex dans producteur");
			}
			
		} else {
		//cas normal

			elem_buf *paquet = consommator(parg->num_filter,-1);//mettre -1 en 2e argument permet de dire que tous les elem_buf sont bons, quelque soit l'image
			apply_filter(paquet,parg->num_filter);
			err = sem_wait(&(empty[(parg->num_filter)+1])); //amélioration possible du code : ce sem_wait ne sert pas vraiment, vu que on sait de toute façon que les buffer ne vont pas être rempli
			if (err!=0)
				error(err,"sem_wait sur empty dans productor");
			err = pthread_mutex_lock(&(buf_mutex[(parg->num_filter)+1]));
			if (err!=0)
				error(err,"pthread_mutex_lock pour buf_mutex dans producteur");
			insert_elem(&paquet,(parg->num_filter)+1);
			err = pthread_mutex_unlock(&(buf_mutex[(parg->num_filter)+1]));
			if (err!=0)
				error(err,"pthread_mutex_unlock pour buf_mutex dans producteur");
			
			err = sem_post(&(full[(parg->num_filter)+1])); //amélioration possible du code : ce sem_wait ne sert pas vraiment, vu que on sait de toute façon que les buffer ne vont pas être rempli
			if (err!=0)
				error(err,"sem_post sur full dans productor");
			
			//on est avant un filtre blur
			if((parg->beforeblur)!=0) {//parg_beforeblur contient 1 si c'est le premier blur, 2 si c'est le deuxième, ...

				err = pthread_mutex_lock(&(blur_mutex[(parg->beforeblur)-1][(paquet)->num_img]));
				if (err!=0)
					error(err,"pthread_mutex_lock pour blur_mutex dans producteur");
				
				(mat_blur[(parg->beforeblur)-1][(paquet)->num_img])++;

				if ((mat_blur[(parg->beforeblur)-1][(paquet)->num_img])==NPACK/NIMAGE) {
					err = sem_post(&(can_copi[(parg->beforeblur)-1])); 
					if (err!=0)
						error(err,"sem_post sur can_copi dans productor, before_blur");
				}

				err = pthread_mutex_unlock(&(blur_mutex[(parg->beforeblur)-1][(paquet)->num_img]));
				if (err!=0)
					error(err,"pthread_mutex_unlock pour blur_mutex dans producteur");
			}
		}
	}
}
Example #27
0
/**
 * Use message queue ipc
 */
int swFactoryProcess_writer_loop_queue(swThreadParam *param)
{
    swEventData *resp;
    swServer *serv = SwooleG.serv;

    int pti = param->pti;
    swQueue_data sdata;
    //必须加1,msg_type必须不能为0
    sdata.mtype = pti + 1;

    swSignal_none();
    while (SwooleG.running > 0)
    {
        swTrace("[Writer]wt_queue[%ld]->out wait", sdata.mtype);
        if (serv->write_queue.out(&serv->write_queue, &sdata, sizeof(sdata.mdata)) < 0)
        {
            if (errno == EINTR)
            {
                continue;
            }
            swSysError("[writer#%d]wt_queue->out() failed.", pti);
        }
        else
        {
            int ret;
            resp = (swEventData *) sdata.mdata;

            //close connection
            //TODO: thread safe, should close in reactor thread.
            if (resp->info.type == SW_EVENT_CLOSE)
            {
                close_fd:
                swServer_connection_close(SwooleG.serv, resp->info.fd);
                continue;
            }
            //sendfile
            else if (resp->info.type == SW_EVENT_SENDFILE)
            {
                ret = swSocket_sendfile_sync(resp->info.fd, resp->data, SW_WRITER_TIMEOUT);
            }
            //send data
            else
            {
                ret = swConnection_send_blocking(resp->info.fd, resp->data, resp->info.len, 1000 * SW_WRITER_TIMEOUT);
            }

            if (ret < 0)
            {
                switch (swConnection_error(errno))
                {
                case SW_ERROR:
                    swSysError("send to client[%d] failed.", resp->info.fd);
                    break;
                case SW_CLOSE:
                    goto close_fd;
                default:
                    break;
                }
            }
        }
    }
    pthread_exit((void *) param);
    return SW_OK;
}
Example #28
0
/*
 * fonction qui lance les threads
 * pre : reçoit un tableau avec le nombre de threads par filtre et le nombre de thread maximal
 *       l'initialisation des constantes doit être effectuées
 *       appel applyfilter via en donnant simplement des numéros comme filtre.
 *       la premiere ligne du buffer mat_buf doit être remplie
 * post : lance les threads et traite les images.
 * Attention : la fonction doit faire des join sur la dernière salve de thread.
 */
void* image_treatement(void* arrayThreads) {
	
	int* nthreads = (int*)arrayThreads;
	int nthreadmax = nthreads[0];
	int i;
	for (i=1; i<NFILTERS; i++) {
		if (nthreads[i]>nthreadmax)
			nthreadmax = nthreads[i];
	}
	
	int j;
	prod_param parg[NFILTERS][nthreadmax];
	pthread_t threads[NFILTERS][nthreadmax];
	int err;
	
	int val_init_can_copi = 0;
	
	//initialisation de la sémaphore can_copi et des mutex de blur_mutex, choosen_c_mutex, can_i_take_c_mutex, copi_is_done_mutex et de ceux de check_for_c_mutex :
	for(i=0;i<NBLUR;i++) {

		err=pthread_mutex_init(&(can_i_take_c_mutex[i]),NULL);
		if (err!=0)
			error(err,"pthread_mutex_init pour can_i_take_c_mutex");

		if ((i==0)&&(f_isblur[0]!=0)) {
			val_init_can_copi = NIMAGE;
		}
		else {
			val_init_can_copi = 0;
		}

		err = sem_init(&(can_copi[i]),0,val_init_can_copi);
		if (err!=0)
			error(err,"sem_init pour val_init_can_copi");

		for(j=0;j<NIMAGE;j++) {
			choosen_c[i][j]=0;
			copi_is_done[i][j]=0;
			can_i_take_c[i][j]=0;
			
			err=pthread_mutex_init(&(choosen_c_mutex[i][j]),NULL);
			if (err!=0)
				error(err,"pthread_mutex_init pour choosen_c_mutex");
			
			err=pthread_mutex_init(&(blur_mutex[i][j]),NULL);
			if (err!=0)
				error(err,"pthread_mutex_init pour blur_mutex");

			err=pthread_mutex_init(&(copi_is_done_mutex[i][j]),NULL);
			if (err!=0)
				error(err,"pthread_mutex_init pour copi_is_done_mutex");
		}
	}
	
	//initialisation des sémaphores full et empty et des mutex buf_mutex
	int val_init_full; //valeur initiale des sémaphores dans full
	int val_init_empty; //valeur initiale des sémaphores dans empty
	
	//initilisation des sémaphores full et empty et des mutex de isdone, de to_pass_mutex et de buf_mutex
	for(i=0;i<NFILTERS+1;i++) {
		if(i==0) {
			val_init_full = NPACK;
			val_init_empty = 0;
		} else {
			val_init_full = 0;
			val_init_empty = NPACK;
		}
		
		err = sem_init(&(full[i]),0,val_init_full);
		if (err!=0)
			error(err,"sem_init pour full");
		
		err = sem_init(&(empty[i]),0,val_init_empty);
		if (err!=0)
			error(err,"sem_init pour empty");
		
		err=pthread_mutex_init(&(buf_mutex[i]),NULL);
		if (err!=0)
			error(err,"pthread_mutex_init pour buf_mutex");
		
		err=pthread_mutex_init(&(to_pass_mutex[i]),NULL);
		if (err!=0)
			error(err,"pthread_mutex_init pour to_pass_mutex");
	}
	
	//lancement des threads des filtres
	for(i=0;i<NFILTERS;i++) {
		for(j=0;j<nthreads[i];j++) {
			(parg[i][j]).num_filter = i;
			(parg[i][j]).isblur = f_isblur[i];
			(parg[i][j]).beforeblur = f_beforeblur[i];
			err = pthread_create(&(threads[i][j]),NULL,&(productor),(void *)(&(parg[i][j])));
			if (err!=0)
				error(err,"pthread_create");
		}
	}
	
	//Attente des threads : IL FAUT LES ATTENDRE TOUTES DANS L'ORDRE !
	for(j=0;j<NFILTERS;j++) {
		for(i=0;i<nthreads[j];i++) {
			err = pthread_join(threads[j][i],NULL);
			if (err!=0)
				error(err,"pthread_join");
		}
	}

	//libération de la sémaphore can_copi et des mutex de blur_mutex, choosen_c_mutex, can_i_take_c_mutex, copi_is_done_mutex et de check_for_c_mutex
	for(i=0;i<NBLUR;i++) {

		err=pthread_mutex_destroy(&(can_i_take_c_mutex[i]));
		if (err!=0)
			error(err,"pthread_mutex_destroy pour can_i_take_c_mutex");	

		err = sem_destroy(&(can_copi[i]));
		if (err!=0)
			error(err,"sem_destroy pour can_copi");	

		for(j=0;j<NIMAGE;j++) {
			
			err=pthread_mutex_destroy(&(choosen_c_mutex[i][j]));
			if (err!=0)
				error(err,"pthread_mutex_destroy pour choosen_c_mutex");
			
			err=pthread_mutex_destroy(&(blur_mutex[i][j]));
			if (err!=0)
				error(err,"pthread_mutex_destroy pour blur_mutex");

			err=pthread_mutex_destroy(&(copi_is_done_mutex[i][j]));
			if (err!=0)
				error(err,"pthread_mutex_destroy pour copi_is_done_mutex");
		}
	}

	//libération des sémaphores full et empty, des mutex de buf_mutex, de to_pass_mutex et des mutex de isdone
	for(i=0;i<NFILTERS+1;i++) {
		err = sem_destroy(&(full[i]));
		if (err!=0)
			error(err,"sem_destroy pour full");
		
		err = sem_destroy(&(empty[i]));
		if (err!=0)
			error(err,"sem_destroy pour empty");
		
		err=pthread_mutex_destroy(&(buf_mutex[i]));
		if (err!=0)
			error(err,"pthread_mutex_destroy pour buf_mutex");

		err=pthread_mutex_destroy(&(to_pass_mutex[i]));
		if (err!=0)
			error(err,"pthread_mutex_destroy pour to_pass_mutex");
	}
	pthread_exit(NULL);
}
Example #29
0
void *receiverThread(void *fd)
{
	char tempBuffer[BLOCKSIZE];
	int len = 0;
	int tsBufferSize = 0;
	
	eDebug("[MOVIEPLAYER] receiverThread starting: fd = %d", *(int *)fd);
	pthread_cleanup_push(receiverThreadCleanup, (void *)fd);
	nice(-1);
	// fill buffer
	while (true)
	{
		pthread_testcancel();
		pthread_mutex_lock(&mutex);
		tsBufferSize = tsBuffer.size();
		pthread_mutex_unlock(&mutex);
		if (tsBufferSize < INITIALBUFFER)
		{
			while (1)
			{
				len = recv(*(int *)fd, tempBuffer, BLOCKSIZE, 0);
				if (len < 0)
				{
					int error = errno;
					if (error == EINTR)
					{
						continue;
					}
					else if (error == EAGAIN)
					{
						usleep(100000);
						continue;
					}
					/* all other errors are fatal */
					eDebug("[MOVIEPLAYER] fatal recv error %d", error);
				}
				else if (len == 0)
				{
					eDebug("[MOVIEPLAYER] socket closed");
				}
				break;
			}
//			eDebug("[MOVIEPLAYER] %d <<< writing %d bytes to buffer...", tsBufferSize, len);
			if (len > 0)
			{
				pthread_mutex_lock(&mutex);
				tsBuffer.write(tempBuffer, len);
				pthread_mutex_unlock(&mutex);
			}
			else
			{
				/* socket closed, or fatal error */
				break;
			}
		}
		else
		{
			/* buffer full, wait a bit */
			usleep(200000);
		}
	}
	pthread_exit(NULL);
	pthread_cleanup_pop(1);
}
Example #30
0
/**
 * Use message queue ipc
 */
int swFactoryProcess_writer_loop_queue(swThreadParam *param)
{
	swFactory *factory = param->object;
	swFactoryProcess *object = factory->object;
	swEventData *resp;

	int pti = param->pti;
	swQueue_data sdata;
	//必须加1,msg_type必须不能为0
	sdata.mtype = pti + 1;

	swSignal_none();
	while (SwooleG.running > 0)
	{
		swTrace("[Writer]wt_queue[%ld]->out wait", sdata.mtype);
		if (object->wt_queue.out(&object->wt_queue, &sdata, sizeof(sdata.mdata)) < 0)
		{
			if (errno == EINTR)
			{
				continue;
			}
			swWarn("[writer]wt_queue->out fail.Error: %s [%d]", strerror(errno), errno);
		}
		else
		{
			int ret;
			resp = (swEventData *) sdata.mdata;

			//Close
			//TODO: thread safe, should close in reactor thread.
			if (resp->info.len == 0)
			{
				close_fd:
				swServer_connection_close(SwooleG.serv, resp->info.fd, 0);
				continue;
			}
			else
			{
				if (resp->info.type == SW_EVENT_SENDFILE)
				{
					ret = swConnection_sendfile_blocking(resp->info.fd, resp->data, 1000 * SW_WRITER_TIMEOUT);
				}
				else
				{
					ret = swConnection_send_blocking(resp->info.fd, resp->data, resp->info.len, 1000 * SW_WRITER_TIMEOUT);
				}

				if (ret < 0)
				{
					switch (swConnection_error(errno))
					{
					case SW_ERROR:
						swWarn("send to fd[%d] failed. Error: %s[%d]", resp->info.fd, strerror(errno), errno);
						break;
					case SW_CLOSE:
						goto close_fd;
					default:
						break;
					}
				}
			}
		}
	}
	pthread_exit((void *) param);
	return SW_OK;
}