예제 #1
0
파일: print_funcs.c 프로젝트: cole14/sysres
//100%  free: [--------------------|]
//<100% free: [-------------------| ]
//<95%  free: [------------------|  ]
//<90%  free: [-----------------|   ]
//<85%  free: [----------------|    ]
//<80%  free: [---------------|     ]
//<75%  free: [--------------|      ]
//<70%  free: [-------------|       ]
//<65%  free: [------------|        ]
//<60%  free: [-----------|         ]
//<55%  free: [----------|          ]
//<50%  free: [---------|           ]
//<45%  free: [--------|            ]
//<40%  free: [-------|             ]
//<35%  free: [------|              ]
//<30%  free: [-----|               ]
//<25%  free: [----|                ]
//<20%  free: [---|                 ]
//<15%  free: [--|                  ]
//<10%  free: [-|                   ]
//<5%   free: [|                    ]
void print_free_visual(double percent_used){
    double vis_step;
    char step = '-';

    fprintf(stdout, "%s: [", get_cur_time());
    for(vis_step = 5.0; vis_step < 110.0; vis_step += 5.0){
        if(vis_step > percent_used)
            step = (step == '-') ? '|' : ' ';
        fprintf(stdout, "%c", step);
    }
    fprintf(stdout, "] (%lf%% Utilized)\n", percent_used);
}
예제 #2
0
int main(int argc, char* argv[])
{
    size_t size;
    double btime, etime;
    double *vec_x;
    double result;
    srand(time(NULL));
    if(argc > 1)
    {
        size = atoi(argv[1]);
#ifndef DEBUG
        printf("%zd\t", size);
#endif
    }
    else
    {
        printf("Not enough parameters!\n");
        return EXIT_FAILURE;
    }
    
    vec_x = (double*)malloc(size*sizeof(double));
    if(vec_x == NULL)
    {
        printf("The memory allocation failed! Exit Now!\n");
        return EXIT_FAILURE;
    }
    else
    {
        size_t i;
        for(i = 0; i < size; ++i)
        {
            vec_x[i] = rand()%(MAX-MIN+1);
#ifdef DEBUG
            printf("The random data generated is %.4f\n", vec_x[i]);
#endif
        }
    }

    //Calculate the execution time of the 2 norm of a vector
    btime = get_cur_time();
    result = twoNorm(vec_x, size);
    etime = get_cur_time();

    //verify the result
    if(verify(result, vec_x, size))
    {
#ifdef DEBUG
        printf("The result is accurate!\n");
#endif
    }
    else
    {
#ifdef DEBUG
        printf("The result is not acccurate! Exit Now!\n");
#endif
        return EXIT_FAILURE;
    }

    //Free the allocated memory
    free(vec_x);
    vec_x = NULL;

#ifdef DEBUG
    printf("Elapsed time is %lf seconds\n", etime - btime);
    printf("The FLOPS is %lf GOps/sec\n", size * 2 / (etime - btime));
#else
    printf("%.16f\t", etime - btime);
    printf("%.16f\n", size * 2 / (etime - btime) / 1e+9);
#endif

    return EXIT_SUCCESS;
}
예제 #3
0
int GB_handle_RCV_STATUS_4XX(GB_CONNECT_STATE *gb_cons, osip_event_t * osip_event)
{
	if(gb_cons == NULL || osip_event == NULL || osip_event->sip == NULL)
	{
		return -1;
	}
	
	switch(osip_event->sip->status_code)
	{
		case 401: // 未授权
		{
			if(gb_cons->cur_state == GB_STATE_REGISTER)  
			{
				osip_www_authenticate_t * wwwa;

				if(gb_cons->wwwa != NULL)
				{
					TRACE(SCI_TRACE_NORMAL,MOD_GB,"%s  line=%d  Unauthorized, Please Check Carefully!\n",__FUNCTION__,__LINE__);
				}
				
				wwwa = osip_list_get (&osip_event->sip->www_authenticates, 0);
				if (wwwa != NULL)
				{
					osip_www_authenticate_free(gb_cons->wwwa);
					osip_www_authenticate_clone(wwwa, &gb_cons->wwwa);
					GB_sipd_register_auth(gb_cons, 0); //  带认证的注册请求
					gb_cons->last_sendtime = get_cur_time()/1000;
				}
			}
			else if(gb_cons->cur_state == GB_STATE_RUNNING && gb_cons->bUnRegister == 1)  
			{
				osip_www_authenticate_t * wwwa;

				wwwa = osip_list_get (&osip_event->sip->www_authenticates, 0);
				if (wwwa != NULL)
				{
					osip_www_authenticate_free(gb_cons->wwwa);
					osip_www_authenticate_clone(wwwa, &gb_cons->wwwa);
					GB_sipd_register_auth(gb_cons, 1); //  带认证的注销请求
					gb_cons->last_sendtime = get_cur_time()/1000;
				}
			}
		}
		break;
		case 403: // 禁止
		{
			// 重新认证
			if(gb_cons->cur_state == GB_STATE_RUNNING && gb_cons->bUnRegister == 0)
			{
				close(gb_cons->connfd);
				GB_ResetConState(gb_cons);

				GB_Refresh_GBCfg();
				GB_Set_gGBConnStatus(0);

				TRACE(SCI_TRACE_NORMAL,MOD_GB,"%s  line=%d  Get 403 err, Register again\n",__FUNCTION__,__LINE__);
			}
			
		}
		break;

		default:
			TRACE(SCI_TRACE_NORMAL,MOD_GB,"Can't handle sip->status_code=%d  message\n",osip_event->sip->status_code);
			break;
	}
	
	return 0;
}
예제 #4
0
int GB_handle_RCV_STATUS_2XX(GB_CONNECT_STATE *gb_cons, osip_event_t * osip_event)
{
	GB_Record_Node *record = NULL;
	int index = -1;

	if(gb_cons == NULL || osip_event == NULL || osip_event->sip == NULL || osip_event->sip->call_id == NULL)
	{
		return -1;
	}
	
	record = GB_Find_Record_Node_by_Call_ID(gb_cons,osip_event->sip->call_id ,&index);
	if(record != NULL)
	{
		switch(record->cmd)
		{
			case gb_CommandType_KeepAlive:
			{
				gb_cons->keepalive_timeout_cnt = 0;
			}
			break;
			
			default:
			{
				TRACE(SCI_TRACE_NORMAL,MOD_GB,"Get 200 OK, But do nothing\n");
			}
			break;
		}

		GB_Remove_Record_Node(gb_cons, index);
	}
	else
	{
		if(gb_cons->cur_state == GB_STATE_REGISTER)  
		{
			gb_cons->cur_state = GB_STATE_RUNNING;	// 注册成功

			if (gb_cons->wwwa)
			{
				osip_www_authenticate_free(gb_cons->wwwa);
				gb_cons->wwwa = NULL;	
			}

			gb_cons->last_registertime = gb_cons->last_keepalivetime = get_cur_time()/1000;
			GB_Set_gGBConnStatus(1);

			TRACE(SCI_TRACE_NORMAL,MOD_GB,"%s  line=%d REGISTER Success!\n",__FUNCTION__,__LINE__);

//			GB_Change_Mode(0);
		}
		else if(gb_cons->cur_state == GB_STATE_RUNNING && gb_cons->bUnRegister == 1)  
		{
			if (gb_cons->wwwa)
			{
				osip_www_authenticate_free(gb_cons->wwwa);
				gb_cons->wwwa = NULL;	
			}

			close(gb_cons->connfd);
			GB_ResetConState(gb_cons);

			GB_Refresh_GBCfg();
			GB_Set_gGBConnStatus(0);

			TRACE(SCI_TRACE_NORMAL,MOD_GB,"%s  line=%d UNREGISTER Success!\n",__FUNCTION__,__LINE__);
		}
	}

	return 0;
}
예제 #5
0
static void* GB_Server(void *pParam)
{
	pthread_detach(pthread_self());
	PRM_GB_SIPD_CFG gb_cfg;
	struct pollfd poll_table[MAX_GB_MSG_NUM+MAX_GB_CONNECTION_NUM+1];
	struct pollfd *poll_entry = NULL;
	int i, ret;
	int rlen = 0;
	GB_CONNECT_STATE *gb_cons = NULL;
	char localip[16] = {0};	
	char sipserver_ip[16] = {0};
	char localmsg_buf[GB_MAX_PLAYLOAD_BUF];

	gb_cons = SN_MALLOC(MAX_GB_CONNECTION_NUM*sizeof(GB_CONNECT_STATE));
	if(gb_cons == NULL)
	{
		printf("SN_MALLOC gb_cons Err!\n");
		return NULL;
	}
	SN_MEMSET(gb_cons, 0, MAX_GB_CONNECTION_NUM*sizeof(GB_CONNECT_STATE));	
	SN_MEMSET(&gb_cfg,0,sizeof(gb_cfg));

	for(i=0; i<MAX_GB_CONNECTION_NUM; i++)
	{
		GB_ResetConState(&gb_cons[i]);
	}

	Log_pid(__FUNCTION__);
	
	while(1)
	{
		SN_MEMSET(poll_table, 0, sizeof(poll_table));
		poll_entry = poll_table;

		if(localmsg_readSock > 0)
		{
			poll_entry->fd = localmsg_readSock;
			poll_entry->events = POLLIN;
			poll_entry++;
		}
		if(gbmsg_readSock > 0)
		{
			poll_entry->fd = gbmsg_readSock;
			poll_entry->events = POLLIN;
			poll_entry++;
		}

		if(gb_ipchange == 1)
		{
			if(IsTimeOfArrival(gb_ipchange_time,30))
			{
				gb_ipchange = 0;
			}
		}

		for(i=0; i<MAX_GB_CONNECTION_NUM; i++)
		{
			gb_cons[i].poll_act = NULL;

			if(gb_cons[i].cur_state > GB_STATE_CONNECTING)
			{
				poll_entry->fd = gb_cons[i].connfd;
				poll_entry->events = POLLIN;
				gb_cons[i].poll_act = poll_entry;
				poll_entry++;
			}
			else if(gb_cons[i].cur_state == GB_STATE_IDEL)
			{
				if(gb_ipchange == 1)
				{
					TRACE(SCI_TRACE_NORMAL,MOD_GB, "IP Changed! Please Wait a Moment!");
					continue;
				}

				GB_Get_GBCfg(&gb_cfg);

				if(gb_cfg.enable == 1)  // 启用
				{
					if(SN_STRLEN((char *)gb_cfg.deviceID) <= 0 || SN_STRLEN((char *)gb_cfg.sipserver_ID) <= 0)
					{
						TRACE(SCI_TRACE_NORMAL,MOD_GB,"%s  line=%d deviceID=%s   sipserver_ID=%s\n",__FUNCTION__,__LINE__,gb_cfg.deviceID,gb_cfg.sipserver_ID);

						if(gb_cons[i].connfd > 0)
							close(gb_cons[i].connfd);
						GB_ResetConState(&gb_cons[i]);
						GB_Refresh_GBCfg();
						continue;
					}
					
					//   进入国标模式

					
					if(gb_cfg.transfer_protocol == GB_TRANSFER_UDP) // UDP
					{
						gb_cons[i].connfd = GB_CreateSocket(SOCK_DGRAM, gb_cfg.local_port);
					}
					else // TCP
					{
						gb_cons[i].connfd = GB_CreateSocket(SOCK_STREAM, gb_cfg.local_port);
					}

					if(gb_cons[i].connfd <= 0)
					{
						TRACE(SCI_TRACE_NORMAL,MOD_GB,"%s  line=%d GB_CreateSocket Err\n",__FUNCTION__,__LINE__);
						continue;
					}

					SN_MEMSET(sipserver_ip,0,sizeof(sipserver_ip));
					SN_SPRINTF(sipserver_ip,sizeof(sipserver_ip),"%d.%d.%d.%d",
						gb_cfg.sipserver_ip[0],gb_cfg.sipserver_ip[1],gb_cfg.sipserver_ip[2],gb_cfg.sipserver_ip[3]);

					gb_cons[i].transfer_protocol = gb_cfg.transfer_protocol;
					SN_MEMSET(&gb_cons[i].remoteAddr,0,sizeof(gb_cons[i].remoteAddr));
					gb_cons[i].remoteAddr.sin_family = AF_INET;
					gb_cons[i].remoteAddr.sin_addr.s_addr =inet_addr(sipserver_ip);
					gb_cons[i].remoteAddr.sin_port = htons(gb_cfg.sipserver_port);
					gb_cons[i].beginconect_time = system_uptime();
					gb_cons[i].cur_state = GB_STATE_CONNECTING;	
					ret = connect(gb_cons[i].connfd, (struct sockaddr *) &(gb_cons[i].remoteAddr), sizeof(gb_cons[i].remoteAddr));
					if ( ret == 0 || errno == EISCONN)
					{
						gb_cons[i].cur_state = GB_STATE_REGISTER;	
						poll_entry->fd = gb_cons[i].connfd;
						poll_entry->events = POLLIN;
						gb_cons[i].poll_act = poll_entry;
						poll_entry++;				

						GB_GetLocalIPaddrFromSock(gb_cons[i].connfd,localip,sizeof(localip));
						GB_Set_LocalIP(localip);
						
						GB_sipd_register(&gb_cons[i], 0); // 不带认证的注册请求
						gb_cons[i].last_sendtime = get_cur_time()/1000;
					}
				}
				else
				{
					GB_Set_gGBConnStatus(0);
					usleep(100);
				}
			}
			else if(gb_cons[i].cur_state == GB_STATE_CONNECTING)
			{
				ret = connect(gb_cons[i].connfd, (struct sockaddr *) &(gb_cons[i].remoteAddr), sizeof(gb_cons[i].remoteAddr));
				if ( ret == 0 || errno == EISCONN)
				{
					gb_cons[i].cur_state = GB_STATE_REGISTER;	
					poll_entry->fd = gb_cons[i].connfd;
					poll_entry->events = POLLIN;
					gb_cons[i].poll_act = poll_entry;
					poll_entry++;			

					GB_GetLocalIPaddrFromSock(gb_cons[i].connfd,localip,sizeof(localip));
					GB_Set_LocalIP(localip);
					
					GB_sipd_register(&gb_cons[i], 0); // 不带认证的注册请求
					gb_cons[i].last_sendtime = get_cur_time()/1000;
				}
				else
				{
					if (system_uptime() - gb_cons[i].beginconect_time > 10)
					{
						TRACE(SCI_TRACE_NORMAL,MOD_GB,"%s  line=%d connect timeout\n",__FUNCTION__,__LINE__);
						close(gb_cons[i].connfd);
						GB_ResetConState(&gb_cons[i]);						
					}
				}
			}

			if(gb_cons[i].cur_state == GB_STATE_REGISTER)  // 未成功注册上
			{
				if(IsTimeOfArrival(gb_cons[i].last_sendtime, 60)) // 间隔60s 后重新注册
				{
					TRACE(SCI_TRACE_NORMAL,MOD_GB,"%s  line=%d REGISTER Fail !  try again!\n",__FUNCTION__,__LINE__);
					close(gb_cons[i].connfd);
					GB_ResetConState(&gb_cons[i]);
					GB_Refresh_GBCfg();
				}
			}
			else if(gb_cons[i].cur_state == GB_STATE_RUNNING && gb_cons[i].bUnRegister == 1) // 注销检查
			{
				if(IsTimeOfArrival(gb_cons[i].last_sendtime, 5)) // 超过5s 则认为成功注销
				{
					TRACE(SCI_TRACE_NORMAL,MOD_GB,"%s  line=%d UNREGISTER Success!\n",__FUNCTION__,__LINE__);
					close(gb_cons[i].connfd);
					GB_ResetConState(&gb_cons[i]);
					GB_Set_gGBConnStatus(0);
					GB_Refresh_GBCfg();
				}
			}
			else if(gb_cons[i].cur_state == GB_STATE_RUNNING && gb_cons[i].bUnRegister != 1)
			{
				if(IsTimeOfArrival(gb_cons[i].last_registertime, gb_cfg.register_period-60)) // 刷新注册, 提前60s 
				{
					gb_cons[i].cur_state = GB_STATE_REGISTER;	
					GB_sipd_register(&gb_cons[i], 0); // 不带认证的注册请求
					gb_cons[i].last_sendtime = get_cur_time()/1000;
				}

				if(gb_cons[i].keepalive_timeout_cnt > gb_cfg.keepalive_timeout_cnt)  // 对方离线
				{
					close(gb_cons[i].connfd);
					GB_ResetConState(&gb_cons[i]);
					GB_Set_gGBConnStatus(2);
				}
				else if(IsTimeOfArrival(gb_cons[i].last_keepalivetime, gb_cfg.keepalive_interval-1)) // 心跳, 减1 是为了去除poll 的超时时间的影响
				{
					gb_cons[i].keepalive_timeout_cnt++;
					
					GB_Send_KeepAlive(&gb_cons[i]);
					
					gb_cons[i].last_keepalivetime = get_cur_time()/1000;
				}
			}
		}

		ret = poll(poll_table, poll_entry-poll_table, 1000);

		if(ret<0) 
		{
			perror("GB_Server:poll error");
			poll(NULL, 0, 1000);
			continue;
		}
		poll_entry = poll_table;

		if(localmsg_readSock > 0)
		{
			if(poll_entry->revents & POLLIN)
			{
				// 处理本地消息
				SN_MEMSET(localmsg_buf, 0, sizeof(localmsg_buf));
				rlen = recvfrom(localmsg_readSock, localmsg_buf,  sizeof(localmsg_buf), 0, NULL, 0);
				if (rlen > 0 && rlen <= sizeof(localmsg_buf))
				{
					SN_MSG * pMsg;
					SYS_MSG *sys_msg = (SYS_MSG *)localmsg_buf;
					pMsg = sys_msg->pmsg;
					GB_MsgHandle(pMsg, &(gb_cons[0]));
				}
			}

			poll_entry++;
		}

		if(gbmsg_readSock > 0)
		{
			if(poll_entry->revents & POLLIN)
			{
				// 处理媒体流模块发来的消息

				SN_MEMSET(localmsg_buf, 0, sizeof(localmsg_buf));
				rlen = recvfrom(gbmsg_readSock, localmsg_buf,  sizeof(localmsg_buf), 0, NULL, 0);
				if (rlen > 0 && rlen <= sizeof(localmsg_buf))
				{
					
				}
			}
			

			poll_entry++;
		}

		for(i=0; i<MAX_GB_CONNECTION_NUM; i++)
		{
			if (gb_cons[i].poll_act != NULL)
			{
				if(gb_cons[i].poll_act->revents & POLLIN)
				{
					rlen = recv(gb_cons[i].connfd, gb_cons[i].buffer_ptr,  gb_cons[i].buffer_end - gb_cons[i].buffer_ptr, 0);
					if(rlen > 0)
					{
						gb_cons[i].buffer_ptr += rlen;
						gb_cons[i].datasize += rlen;

						ret = is_recv_whole_messages(&gb_cons[i]);
						
						if(ret > 0) // 接收完全部数据
						{
							//处理接收到的数据			
							GB_handle_messages(&gb_cons[i]);

							GB_reset_recv_buffer(&gb_cons[i], ret);
						}
					}
				}
				else if (gb_cons[i].poll_act->revents & POLLERR || gb_cons[i].poll_act->revents & POLLHUP || gb_cons[i].poll_act->revents & POLLNVAL)
				{															
					if(gb_cons[i].cur_state == GB_STATE_RUNNING 
							&& gb_cons[i].keepalive_timeout_cnt > gb_cfg.keepalive_timeout_cnt)
					{
						close(gb_cons[i].connfd);
						GB_ResetConState(&gb_cons[i]);
						GB_Set_gGBConnStatus(2);
						GB_Refresh_GBCfg();
					}
				}
			}
		}
		
	}

	return NULL;
}
예제 #6
0
static void GB_MsgHandle(SN_MSG *msg, GB_CONNECT_STATE *gb_cons)
{
	if (msg == NULL)
		return;

	switch (msg->msgId)
	{
		case MSG_ID_FWK_UPDATE_PARAM_IND:
		{
			stParamUpdateNotify *stNotify = (stParamUpdateNotify *)msg->para;
			switch(stNotify->prm_id)
			{
				case PRM_ID_GB_SIPD_CFG:
				{
					if(GB_Get_gGBConnStatus() == 0)
					{
						if(gb_cons->connfd > 0)
						{
							close(gb_cons->connfd);
							GB_ResetConState(gb_cons);
						}
						GB_Refresh_GBCfg();
					}
					else if((gb_cons->cur_state == GB_STATE_RUNNING && gb_cons->bUnRegister == 1)// 正在注销
						|| (GB_Get_gGBConnStatus() == 2) // 离线
						)  
					{
						if(gb_cons->connfd > 0)
						{
							close(gb_cons->connfd);
							GB_ResetConState(gb_cons);
						}
						GB_Set_gGBConnStatus(0);
						GB_Refresh_GBCfg();
					}
					else
					{
						gb_cons->bUnRegister = 1;
						GB_sipd_register(gb_cons, 1); // 不带认证的注销请求
						gb_cons->last_sendtime = get_cur_time()/1000;
						GB_Set_gGBConnStatus(0);
					}		
				}
				break;
				case PRM_ID_GB_SIPD_DEVMODE_CFG:
				{
					
				}
				break;
				
				default:
				break;
			}
		}
		break;
		
		case MSG_ID_FWK_REBOOT_REQ:
		case MSG_ID_FWK_POWER_OFF_REQ:
		case MSG_IF_FWK_IPCHANGE_IND:
		{
			PRM_GB_SIPD_CFG gb_cfg;
			
			TRACE(SCI_TRACE_NORMAL,MOD_GB,"%s  line=%d    msg->msgId=%d\n",__FUNCTION__,__LINE__,msg->msgId);

			SN_MEMSET(&gb_cfg,0,sizeof(gb_cfg));

			GB_Get_GBCfg(&gb_cfg);

			if(gb_cfg.enable != 1)
			{
				break;
			}
			
			if(GB_Get_gGBConnStatus() == 1)
			{
				gb_cons->bUnRegister = 1;
				GB_sipd_register(gb_cons, 1); // 不带认证的注销请求
				gb_cons->last_sendtime = get_cur_time()/1000;
			}
			else
			{
				if(gb_cons->connfd > 0)
				{
					close(gb_cons->connfd);
					GB_ResetConState(gb_cons);
				}

				GB_Set_gGBConnStatus(0);
			}

				gb_ipchange = 1;
				gb_ipchange_time = get_cur_time()/1000;
				//  退出国标模式
			
		}
		break;

		case MSG_ID_GB_GET_STATUS_REQ:
		{
			GB_GET_STATUS_RSP rsp;

			SN_MEMSET(&rsp,0,sizeof(rsp));

			rsp.result = 0;
			rsp.status = GB_Get_gGBConnStatus();

			SendMessageEx(msg->user, MOD_GB, msg->source, msg->xid, msg->thread, msg->msgId + 1, &rsp, sizeof(rsp));
		}
		break;
		
		default:
		{
			
		}
		break;
	}
	FreeMessage(&msg);
}
예제 #7
0
파일: sgemm.c 프로젝트: 529038378/BLASX
void cblas_sgemm(const enum CBLAS_ORDER Order, 
                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_TRANSPOSE TransB, 
                 const int M, const int N, const int K, 
                 const float alpha, const float *A, const int lda, 
                 const float *B, const int ldb,
                 const float beta, float *C, const int ldc )
{
    cublasOperation_t transa, transb;
    cublasStatus_t status;
    /*---error handler---*/
    int nrowa, ncola, nrowb, ncolb;
    if (TransA == CblasNoTrans) {
        nrowa = M;
        ncola = K;
    } else {
        nrowa = K;
        ncola = M;
    }
    if (TransB == CblasNoTrans) {
        nrowb = K;
        ncolb = N;
    } else {
        nrowb = N;
        ncolb = K;
    }
    int nrowc = M;
    int ncolc = N;
    int info = 0;
    if (CBLasTransToCuBlasTrans(TransA,&transa) <  0) info = 1;
    else if (CBLasTransToCuBlasTrans(TransB,&transb) < 0) info = 2;
    else if (M < 0) info = 3;
    else if (N < 0) info = 4;
    else if (K < 0) info = 5;
    else if (lda < MAX(1, nrowa)) info = 8;
    else if (ldb < MAX(1, nrowb)) info = 10;
    else if (ldc < MAX(1, M)) info = 13;
    if (info != 0) {
        xerbla_(ERROR_NAME, &info);
        return;
    }
    /*-------------------*/
    
    /*----dispatcher-----*/
    int type = 0; //1:cpu 2:cublasxt 3:blasx
    if (M <= 0 || N <= 0 || K <= 0) type = 1;
    if (type == 0 && (M > 1000 || N > 1000 || K > 1000)) type = 3;
    else                                                 type = 1;
    //Blasx_Debug_Output("type after dispatcher:%d\n",type);
    /*-------------------*/
    

    switch (type) {
        case 1:
        CPU_BLAS:
            Blasx_Debug_Output("calling cblas_sgemm:");
            if (cpublas_handle == NULL) blasx_init(CPU);
            if (cblas_sgemm_p == NULL) blasx_init_cblas_func(&cblas_sgemm_p, "cblas_sgemm");
            (*cblas_sgemm_p)(Order,TransA,TransB,M,N,K,alpha,A,lda,B,ldb,beta,C,ldc);
            break;
        case 2:
            if (cublasXt_handle == NULL) blasx_init(CUBLASXT);
            Blasx_Debug_Output("calling cublasSgemmXt:");
            status = cublasXtSgemm(cublasXt_handle,
                                   transa, transb,
                                   M, N, K,
                                   (float*)&alpha, (float*)A, lda,
                                   (float*)B, ldb,
                                   (float*)&beta, (float*)C, ldc);
            if( status != CUBLAS_STATUS_SUCCESS ) goto CPU_BLAS;
            break;
        case 3:
            Blasx_Debug_Output("calling BLASX:\n");
            cudaHostRegister(A,sizeof(float)*nrowa*ncola,cudaHostRegisterPortable);
            cudaHostRegister(B,sizeof(float)*nrowb*ncolb,cudaHostRegisterPortable);
            cudaHostRegister(C,sizeof(float)*nrowc*ncolc,cudaHostRegisterPortable);
#ifdef BENCHMARK
            double Gflops = FLOPS_DGEMM(M, N, K)/(1000000000);
            double gpu_start, gpu_end;
            gpu_start = get_cur_time();
#endif
            if (is_blasx_enable == 0) blasx_init(BLASX);
            assert( is_blasx_enable == 1 );
            assert( SYS_GPUS > 0 );
            assert( event_SGEMM[0] != NULL );
            assert( C_dev_SGEMM[0] != NULL );
            assert( handles_SGEMM[0] != NULL );
            assert( streams_SGEMM[0] != NULL );
            LRU_t* LRUs[10];
            int GPU_id = 0;
            for (GPU_id = 0; GPU_id < SYS_GPUS; GPU_id++)    LRUs[GPU_id] = LRU_init( GPU_id );
            blasx_sgemm(SYS_GPUS, handles_SGEMM, LRUs,
                        TransA, TransB,
                        M, N, K, alpha,
                        A, lda,
                        B, ldb,
                        beta,
                        C, ldc);
            for (GPU_id = 0; GPU_id < SYS_GPUS; GPU_id++)    LRU_free( LRUs[GPU_id], GPU_id );
#ifdef BENCHMARK
            gpu_end = get_cur_time();
            printf("BLASX (M:%5d,N:%5d,K:%5d) Speed:%9.1f type:%2d\n", M, N, K, (double)Gflops/(gpu_end - gpu_start), type);
#endif
            cudaHostUnregister(A);
            cudaHostUnregister(B);
            cudaHostUnregister(C);
            break;
        default:
            break;
    }

    //Blasx_Debug_Output("eventually use type:%d to compute\n",type);
}
예제 #8
0
void *net_io_config(void *args) {
	/* Get the args passed from parent thread */
	thread_args_t *targs = (thread_args_t *) args;
	cmd_line_args *cmd_args = targs->cmd_args;
	benchmark_test *bmtest = targs->bmtest;
	result_table_t *res_table = targs->res_table;
	test_results_t *result = NULL;
	int retry = 0;
	int test_status;

	/* Other Local variables of this function */
	int loop = 0, done = 0, count = 0;
	struct timespec start_time, end_time;
	long long time_diff = 0;
	pthread_t server_tid = 0;
	struct sockaddr_in server_det;
	int cl_sock_fd = 0;
	server_arguments_t *serv_args = NULL;
	pthread_attr_t attr;

	/* Use this to set the timeout value for the socket */
	struct timeval t;

	char *msg = NULL;
	int msg_size = DEF_NETIO_SZ;
	int new_iters = bmtest->_iterations;
	double clock_accuracy;
	char retry_status = UNACCEPTABLE_DEVIATIONS;
	char print_flag = FALSE;

	/*
	 * Make sure both client and server threads are running
	 * the right sched policy and priority
	 */

	/* Keep server thread alive with this stay_alive argument variable */
	serv_args = (server_arguments_t *) malloc(sizeof(server_arguments_t));
	if (serv_args == (server_arguments_t *) NULL) {
		RTMB_printf(stderr, "net_io_config: malloc() failed\n");
		abort();
	}
	memset(serv_args, 0, sizeof(server_arguments_t));
	serv_args->stay_alive = 1;
	serv_args-> iterations = new_iters;
	pthread_attr_init(&attr);

	clock_accuracy = get_min_exec_time(res_table);
	if (set_pthreadattr_sched_param(&attr, SCHED_FIFO, HIGH_PRIO_VAL) < 0) {
		targs->ret = ERROR;
		return (void *) NULL;
	}

	/* Start the server thread and let the server get into accept() code */
	if (pthread_create(&server_tid, &attr, net_server_impl, serv_args)
	        != SUCCESS) {
		RTMB_printf(stderr,
		        "net_io_config: Error creating server thread\n");
		perror("net_io_config");
		server_tid = 0;
		cleanup(serv_args, server_tid, msg, cl_sock_fd);
		targs->ret = ERROR;
		return (void *) NULL;
	}

	/* Alloc and init the msg buffer */
	msg = (char *) msg_alloc_init(msg, msg_size);
	if (msg == NULL) {
		RTMB_printf(stderr, "net_io_config: malloc() failed\n");
		abort();
	}

	/* Set the params for the server side stuff */
	server_det.sin_family = AF_INET;

	if (get_local_address(&server_det.sin_addr) < 0) {
		RTMB_printf(stderr,
		        "net_io_config: unable to get local IP \n");
		server_tid = 0;
		cleanup(serv_args, server_tid, msg, cl_sock_fd);
		targs->ret = ERROR;
		return (void *) NULL;
	}

	server_det.sin_port = htons(SERVER_PORT);
	memset(server_det.sin_zero, '\0', sizeof(server_det.sin_zero));

	/* Create the socket fd */
	cl_sock_fd = socket(AF_INET, SOCK_STREAM, 0);
	if (cl_sock_fd == -1) {
		RTMB_printf(stderr,
		        "net_io_config: Error creating client socket\n");
		perror("net_io_config");
		cleanup(serv_args, server_tid, msg, cl_sock_fd);
		targs->ret = ERROR;
		return (void *) NULL;
	}

	/*
	 * We do not want the server thread to indefinitely wait.
	 * Set timeout to some safe value (10 secs).
	 */
	t.tv_sec = 10;
	t.tv_usec = 0;
	if (setsockopt(cl_sock_fd, SOL_SOCKET, SO_RCVTIMEO, (void *) &t,
	        sizeof(struct timeval)) != 0) {
		RTMB_printf(stderr,
		        "net_io_config: Error while setting socket option\n");
		cleanup(serv_args, server_tid, msg, cl_sock_fd);
		targs->ret = ERROR;
		return (void *) NULL;
	}

	/* give some time for the server thread to start */
	sleep(2);

	/* Connect to the server */
	while ((connect(cl_sock_fd, (struct sockaddr *) &server_det,
	        sizeof(server_det)) == -1) && (retry <= 10)) {
		do_nano_sleep(400LL * MS);
		retry++;
		RTMB_verbose_printf(stderr, cmd_args, 1,
		        "net_io_config: Retrying connect to the "
			        "server socket\n");
	}

	/* Connection done. Now, try exchanging some msgs with server */
	result = create_test_result(1, new_iters);
	if (result == NULL) {
		RTMB_printf(stderr,
		        "ERROR: Cannot allocate memory for test_results_t");
		RTMB_printf(stderr, " in net_io_config()\n");
		abort();
	}
	strcpy((char *) result->desc, "Network I/O configuration test");

	RTMB_verbose_printf(stdout, cmd_args, 1, "\nTest Report for %s:\n",
	        (char*) &result->desc);
	RTMB_verbose_printf(stdout, cmd_args, 1,
	        "========================================================="
		        "==\n");
	RTMB_verbose_printf(stdout, cmd_args, 1,
	        "\nnet_io_config : Total number of iterations = %d\n\n",
	        new_iters);

	while (!done) {
		int bytes_remaining;
		char* buffer;

		for (loop = 0; loop < new_iters; loop++) {
			int ret = SUCCESS, retry = 0;
			bytes_remaining = msg_size;
			buffer = msg;

			/* Record start time */
			if (get_cur_time(&start_time) == ERROR) {
				cleanup(serv_args, server_tid, msg, cl_sock_fd);
				free_chain(result, 0);
				targs->ret = ERROR;
				return (void *) NULL;
			}

			/* Send the msg */
			if (send(cl_sock_fd, msg, msg_size, 0) != msg_size) {
				cleanup(serv_args, server_tid, msg, cl_sock_fd);
				free_chain(result, 0);
				targs->ret = ERROR;
				return (void *) NULL;
			}

			/* Wait till you recv response */
			do {
				ret = recv(cl_sock_fd, buffer, bytes_remaining,
				        0);
				retry = 0;
				if (ret == ERROR) {
					if (errno== EINTR) {
						retry = 1;
					}
				} else if (ret > 0 && ret < bytes_remaining) {
					buffer += ret;
					bytes_remaining -= ret;
					retry = 1;

#ifdef TRACE_MSG
					RTMB_printf( stdout, "server: "
						"received = %d   "
						"expected = %d "
						"get_next = %d \n",
						ret,
						bytes_remaining + ret,
						bytes_remaining);

				} else if ( ret> 0 && ret == bytes_remaining
					&& ret != msg_size) {
					RTMB_printf( stdout, "server: "
						"received = %d   "
						"expected = %d "
						"msg_size  = %d \n",
						ret,
						bytes_remaining,
						msg_size);
#endif

				}
			} while (retry != 0);

			/*  Now, record end time */
			if (get_cur_time(&end_time) == ERROR) {
				cleanup(serv_args, server_tid, msg, cl_sock_fd);
				free_chain(result, 0);
				targs->ret = ERROR;
				return (void *) NULL;
			}

			/* If there was error other than EINTR, return fail */
			if ((retry == 0) && (ret == ERROR)) {
				cleanup(serv_args, server_tid, msg, cl_sock_fd);
				free_chain(result, 0);
				targs->ret = ERROR;
				return (void *) NULL;
			}

			/* Get the time difference of start and end times */
			time_diff = get_time_diff(start_time, end_time);
			RTMB_verbose_printf(stdout, cmd_args, 2,
			        "net_io_config: Difference between end"
				        " and start times "
				        "= %.3f us \n", MICROSEC(time_diff));

			fflush(stdout);

			add_entry(result, time_diff, 0);
		}

		if (IS_EXEC_TIME_GRT_THAN_CLK(result, clock_accuracy)) {
			print_flag = TRUE;

			/* Check against the computed median for this test */
			test_status = check_pass_criteria(result, cmd_args,
			        bmtest, 0);

			if (test_status == SUCCESS) {
				retry_status = ACCEPTABLE_DEVIATIONS;
				done = 1;
				break;
			} else {
				if (++count == bmtest->_threshold) {
					RTMB_printf(stderr,
					        "net_io_config: exceeded "
						        "maximum attempts \n");
					break;
				}
			}
		}

		if (print_flag) {
			RTMB_verbose_printf(stdout, cmd_args, 1,
			        "\nnet_io_config: Retrying test ");
			RTMB_verbose_printf(stdout, cmd_args, 1,
			        " with bigger work quantum to get"
				        " lesser variance...\n");
		}
		/*
		 * measured times are not consistent so retry with
		 * larger buffer.
		 */
		free_chain(result, 0);
		if (msg) {
			free(msg);
		}
		msg_size = msg_size * MULTIPLIER_FOR_SUB_ITER;
		msg = (char *) msg_alloc_init(msg, (msg_size));

		if (msg == NULL) {
			abort();
		}
	}

	/*net  IO rate is determined*/
	result->opern_amount = msg_size;

	add_result_to_result_table2(res_table, result, NETIO_CONFIG, bmtest);

	fflush(stdout);
	/* Clean up and leave */
	cleanup(serv_args, server_tid, msg, cl_sock_fd);
	fflush(stdout);
	targs->ret = SUCCESS;
	return (void *) NULL;
}
예제 #9
0
/**
 * @ingroup COND
 * @brief   Wait on the condition.
 *
 * The ULT calling \c ABT_cond_timedwait() waits on the condition variable
 * until it is signaled or the absolute time specified by \c abstime passes.
 * If system time equals or exceeds \c abstime before \c cond is signaled,
 * the error code \c ABT_ERR_COND_TIMEDOUT is returned.
 *
 * The user should call this routine while the mutex specified as \c mutex is
 * locked. The mutex will be automatically released while waiting. After signal
 * is received and the waiting ULT is awakened, the mutex will be
 * automatically locked for use by the ULT. The user is then responsible for
 * unlocking mutex when the ULT is finished with it.
 *
 * @param[in] cond     handle to the condition variable
 * @param[in] mutex    handle to the mutex
 * @param[in] abstime  absolute time for timeout
 * @return Error code
 * @retval ABT_SUCCESS            on success
 * @retval ABT_ERR_COND_TIMEDOUT  timeout
 */
int ABT_cond_timedwait(ABT_cond cond, ABT_mutex mutex,
                       const struct timespec *abstime)
{
    int abt_errno = ABT_SUCCESS;
    ABTI_cond *p_cond = ABTI_cond_get_ptr(cond);
    ABTI_CHECK_NULL_COND_PTR(p_cond);
    ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex);
    ABTI_CHECK_NULL_MUTEX_PTR(p_mutex);

    double tar_time = convert_timespec_to_sec(abstime);

    ABTI_unit *p_unit;
    volatile int ext_signal = 0;

    p_unit = (ABTI_unit *)ABTU_calloc(1, sizeof(ABTI_unit));
    p_unit->pool = (ABT_pool)&ext_signal;
    p_unit->type = ABT_UNIT_TYPE_EXT;

    ABTI_mutex_spinlock(&p_cond->mutex);

    if (p_cond->p_waiter_mutex == NULL) {
        p_cond->p_waiter_mutex = p_mutex;
    } else {
        ABT_bool result = ABTI_mutex_equal(p_cond->p_waiter_mutex, p_mutex);
        if (result == ABT_FALSE) {
            ABTI_mutex_unlock(&p_cond->mutex);
            abt_errno = ABT_ERR_INV_MUTEX;
            goto fn_fail;
        }
    }

    if (p_cond->num_waiters == 0) {
        p_unit->p_prev = p_unit;
        p_unit->p_next = p_unit;
        p_cond->p_head = p_unit;
        p_cond->p_tail = p_unit;
    } else {
        p_cond->p_tail->p_next = p_unit;
        p_cond->p_head->p_prev = p_unit;
        p_unit->p_prev = p_cond->p_tail;
        p_unit->p_next = p_cond->p_head;
        p_cond->p_tail = p_unit;
    }

    p_cond->num_waiters++;

    ABTI_mutex_unlock(&p_cond->mutex);

    /* Unlock the mutex that the calling ULT is holding */
    ABTI_mutex_unlock(p_mutex);

    while (!ext_signal) {
        double cur_time = get_cur_time();
        if (cur_time >= tar_time) {
            remove_unit(p_cond, p_unit);
            abt_errno = ABT_ERR_COND_TIMEDOUT;
            break;
        }
        ABT_thread_yield();
    }
    ABTU_free(p_unit);

    /* Lock the mutex again */
    ABTI_mutex_spinlock(p_mutex);

  fn_exit:
    return abt_errno;

  fn_fail:
    HANDLE_ERROR_FUNC_WITH_CODE(abt_errno);
    goto fn_exit;
}
예제 #10
0
char disk_io_read(test_results_t* result, int iters, cmd_line_args * cmd_args,
        benchmark_test* bmtest, result_table_t* res_table) {
	FILE *read_fp = NULL;
	long long time_diff;
	int block_size = 0;
	int done, loop, j;
	double clock_accuracy;
	char retry_status = UNACCEPTABLE_DEVIATIONS;
	int sub_iters = DEFAULT_SUBITERS;

	int count = 0;
	char *_read_buf = NULL;
	char print_flag = FALSE;
	struct timespec start_time, end_time;

	block_size = get_block_size();

	assert(block_size> 0);

	clock_accuracy = get_min_exec_time(res_table);

	RTMB_verbose_printf(stdout, cmd_args, 1,
	        "\nTest Report for disk I/O read configuration:\n");

	RTMB_verbose_printf(stdout, cmd_args, 1,
	        "=================================================\n");
	RTMB_verbose_printf(stdout, cmd_args, 1,
	        "\ndisk_io_config: Total number of iterations = %d\n\n", iters);

	done = 0;
	while (!done) {
		int n;
		_read_buf = calloc(1, block_size);

		if (_read_buf == NULL) {
			RTMB_printf(stderr,
			        "calloc() failed in disk_io_read_config()\n");
			abort();
		}

		/*Make sure there is data in the file before attempting a read*/
		setup_file_for_read(block_size * sub_iters, 1);

		for (loop = 0; loop < iters; loop++) {
			open_file_for_read(&read_fp);

			if (get_cur_time(&start_time) == ERROR) {
				abort();
			}

			for (j = 0; j < sub_iters; j++) {
				if ((n = fread(_read_buf, sizeof(char),
				        block_size, read_fp)) != block_size) {
					perror("fwrite:");
					abort();
				}
			}

			if (get_cur_time(&end_time) == ERROR) {
				abort();
			}

			/* Get the time difference of start and end times */
			time_diff = get_time_diff(start_time, end_time);
			RTMB_verbose_printf(stdout, cmd_args, 2,
			        "disk_io_read_config: Difference between end"
				        " and start times = %.3f us\n",
			        MICROSEC(time_diff));

			add_entry(result, time_diff, 0);
			fclose(read_fp);
		}

		if (IS_EXEC_TIME_GRT_THAN_CLK(result, clock_accuracy)) {
			print_flag = TRUE;
			if (check_pass_criteria(result, cmd_args, bmtest, 0)
			        == SUCCESS) {
				/*
				 * test passed,
				 * disk IO rate is determined
				 */
				retry_status = ACCEPTABLE_DEVIATIONS;
				done = 1;
				break;
			} else {
				/*If we have completed, return error*/
				if (++count == bmtest->_threshold) {
					RTMB_printf(stderr,
					        "disk_io_read_config: exceeded"
						        " maximum attempts \n");
					break;
				}

			}
		}

		if (print_flag == TRUE) {
			RTMB_verbose_printf(stdout, cmd_args, 1,
			        "\ndisk_io_read_config: Retrying test");
			RTMB_verbose_printf(stdout, cmd_args, 1,
			        " with bigger work quantum to get"
				        " lesser variance...\n");
		}

		/*
		 * measured times are not accurate enough,
		 * hence retry.
		 */
		free_chain(result, 0);
		sub_iters *= MULTIPLIER_FOR_SUB_ITER;
		free(_read_buf);
	}

	result->opern_amount = block_size * sub_iters;

	return retry_status;
}
예제 #11
0
void disp_time(char *time_str)
{
    get_cur_time(time_str);
    printf("%s\n", time_str);
}
예제 #12
0
파일: print_funcs.c 프로젝트: cole14/sysres
void print_free_default(double percent_used){
    fprintf(stdout, "%s: %lf%% Utilized\n", get_cur_time(), percent_used);
}
예제 #13
0
int main(int argc, char *argv[])
{
    int clientfd;
    char *buf;

    //绑定端口
    int sockfd = bindPort(MYPORT);
    
    //创建共享存储区
    key_t shmid, onlineShmid;
    char *r_addr, *w_addr;
    shmid = shm_create();
    w_addr = shmat(shmid, 0, 0);
    r_addr = shmat(shmid, 0, 0);
    
    onlineShmid = initOnlineShmat();
    
    // //定义临时存储区
    char *temp, *time_str;
    temp = (char *)malloc(255);
    time_str=(char *)malloc(20);
    
    
    while(1)
    {
        //在指定端口上监听
        if(listen(sockfd,BACKLOG) == -1)
        {
            perror("listen error");
            exit(1);
        }
        printf("listening......\n");
        
        
        //接收客户端连接
        int sin_size = 0;
        struct sockaddr_in their_addr;
        if((clientfd = accept(sockfd,(struct sockaddr*)&their_addr,&sin_size)) == -1)
        {
            perror("accept error");
            exit(1);
        }
        printf("discriptor:%d\n",clientfd);
        printf("accept from:%d\n",inet_ntoa(their_addr.sin_addr));
        
        //发送问候信息
        send(clientfd, WELCOME, strlen(WELCOME), 0);
        
        //build buff
        buf = (char *)malloc(255);
        
        //创建子进程
        pid_t ppid = fork();
        if(ppid == 0)
        {
            printf("fork ppid=0\n");
            
            //再次创建子进程
            pid_t pid = fork();
            bool bBeRequested = false;
            while(1)
            {
                //父进程用于接收信息
                if(pid > 0)
                {
                    memset(buf,0,255);
                    printf("begin recv\n");
                    //sleep(1);
                    
                    int recvbytes = 0;
                    if((recvbytes = recv(clientfd,buf,255,0)) <= 0)
                    {
                        perror("recv1 error");
                        close(clientfd);
                        raise(SIGKILL);
                        exit(1);
                    }
                    
                    
                    //write buf's data to share memory
                    memset(w_addr, '\0', 1024);
                    strncpy(w_addr, buf, 1024);
                    bBeRequested = true;
                    printf("w_addr->%s\n",w_addr);
                    
                    
                    //strcat time info
                    get_cur_time(time_str);
                    strcat(buf,time_str);
                    printf("buf:%s\n",buf);
                }
                
                //子进程用于发送信息
                else if(pid == 0)
                {
                    sleep(1);
                    printf("r_addr:|%s| temp:|%s|\n",r_addr, temp);
                    
                    //swap shmat buffer
                    if(strcmp(temp,r_addr) != 0)
                    {
                        printf("swap buffer!\n");
                        get_cur_time(time_str);
                        //strcat(r_addr,time_str);
                        strcpy(temp,r_addr);
                        memset(r_addr, '\0', 255);
                        printf("temp:%s\n",temp);

                        char *delims = { "@" };
                        char *cmd = strtok( temp, delims);
                        if( NULL == cmd )
                        {
                            printf("temp:%s parse error,no cmd!\n", temp);
                            memset(temp, '\0', 1024);
                            continue;
                        }
                        
                        
                        char* rbody = strtok( NULL, delims);
                        if( NULL == rbody )
                        {
                            printf("temp:%s parse error,no rbody!\n", temp);
                            memset(temp, '\0', 1024);
                            continue;
                        }
                        printf("cmd:%s rbody:%s\n", cmd, rbody);
                        
                        
                        if( 0 == strcmp(cmd, "reg") )
                        {
                            regOnlineShmat(onlineShmid, rbody);
                        }
                        

                        //send temp buffer
                        //if(send(clientfd,temp,strlen(temp),0) == -1)
                        //{
                        //    perror("send error");
                        //}
                        memset(temp, '\0', 1024);
                    }
                }
                else
                {
                    perror("fork error");
                }
            }
        }
    }
    printf("------------------------------\n");
    free(buf);
    close(sockfd);
    close(clientfd);
    return 0;
}
예제 #14
0
void Recorder::on_receive_video_frame(uint8_t* data, int len, int width, int height, float ratio, int camera_facing)
{
	if (NULL == data || 0 >= len || 0 >= width || 0 >= height || 0 >= ratio ||
		(CAMERA_FACING_FRONT != camera_facing && CAMERA_FACING_BACK != camera_facing))
		return;

	//得到当前的时间
	long cur_time = get_cur_time();

	//间隔时间 过短 这个视频帧直接丢掉
	if (-1 != pre_video_frame_time && (cur_time - pre_video_frame_time) < VIDEO_FRAME_INTER)
		return;

	/**
	 * 得到原始的camera预览之后需要进行裁剪旋转 缩放等 操作
	 */
	if(0 >= src_width || 0 >= src_height || NULL == src_frame_buffer || NULL == src_frame ||
	   0 >= dst_width || 0 >= dst_height || NULL == dst_frame_buffer || NULL == dst_frame ||
	   NULL == sws_ctx)
		return;

	//-------------------------------------------------------------
	//LOGD("camera width:%d", width);
	//LOGD("camera height:%d", height);
	//LOGD("camera facing:%d", camera_facing);
	//---------------------------------------------------------

	/**
	 * 将图片裁剪缩放后 放到src_frame_buffer中
	 */
	if(CAMERA_FACING_FRONT == camera_facing)
	{
		if (0 > rotate_acw90_cut_nv21(src_frame_buffer, src_width, src_height, data, width, height))
		{
			return;
		}
	}
	else if(CAMERA_FACING_BACK == camera_facing)
	{
		//LOGD("if(CAMERA_FACING_BACK == camera_facing)");

		/**
		 * 将图片进行顺时针旋转90度 然后裁剪到src_frame_buffer中
		 */
		if(0 > rotate_cw90_cut_nv21(src_frame_buffer, src_width, src_height, data, width, height))
		{
			return;
		}
	}

	/**
	 * 对裁剪好的图片进行格式转换 不执行缩放
	 * 缩放好给的时间过长
	 */
	sws_scale(sws_ctx,
			src_frame->data, src_frame->linesize,
			0, src_frame->height,
			dst_frame->data, dst_frame->linesize);

	//此时dst_frame中存储这说放过的yuv420p的视频帧数据 放入到队列中即可

	//生成一帧数据
	VideoFrame* frame = new VideoFrame(dst_frame_buffer, dst_width, dst_height);

	/************************************************************************/
	/*如果存储的视频帧已经超过上限 删除一个                                                                      */
	/************************************************************************/
	pthread_mutex_lock(&video_queue_mutex);	//对视频队列进行加锁
	if (video_que.size() >= VIDEO_FRAME_NUM)
	{
		VideoFrame* front_frame = video_que.front();
		video_que.pop();

		delete front_frame;
	}

	video_que.push(frame);	//将视频帧放入到队列中
	pthread_mutex_unlock(&video_queue_mutex); //释放锁,供其他线程使用  

	//更新时间
	pre_video_frame_time = cur_time;

	//测试
	//LOGD("video size:%d", video_que.size());
}
예제 #15
0
void blasx_gpu_dgemm(void *arg_data)
{
    int i;
    //----------GPU Argument Prepare------------//
    struct gpu_thread_data *arg = (struct gpu_thread_data *) arg_data;
    const int GPU_id = arg->GPU_id;
    cuda_err = cudaSetDevice(GPU_id);
    assert(cuda_err == cudaSuccess);
    //matrix configuration
    reader_tracker addr_track[1300]; //CRITICAL
    int x                       = arg->mat_conf->x;
    int y                       = arg->mat_conf->y;
    int z                       = arg->mat_conf->z;
    double *A                    = (double*) arg->mat_conf->A;
    double *B                    = (double*) arg->mat_conf->B;
    double *C                    = (double*) arg->mat_conf->C;
    int lda                     = arg->mat_conf->lda;
    int ldb                     = arg->mat_conf->ldb;
    int ldc                     = arg->mat_conf->ldc;
    double beta                  = arg->mat_conf->beta;
    double alpha                 = arg->mat_conf->alpha;
    int nrowa                   = arg->mat_conf->nrowa;
    int nrowb                   = arg->mat_conf->nrowb;
    int nrowc                   = arg->mat_conf->nrowc;
    int ncola                   = arg->mat_conf->ncola;
    int ncolb                   = arg->mat_conf->ncolb;
    int ncolc                   = arg->mat_conf->ncolc;
    enum CBLAS_TRANSPOSE TransA = arg->mat_conf->TransA;
    enum CBLAS_TRANSPOSE TransB = arg->mat_conf->TransB;
    int block_dim               = arg->mat_conf->block_dim;
    //GPU configuration
    const int GPUs              = arg->GPUs;
    LRU_t   **LRUs              = arg->LRUs;
    cublasHandle_t  handle      = handles_DGEMM[GPU_id];
    queue_t *tasks_queue        = arg->tasks_queue;
    //------------------------------------------//
    //hook C_dev
    double *C_dev[STREAMNUM*2];
    for (i = 0; i < STREAMNUM*2; i++) {
        C_dev[i] = C_dev_DGEMM[i+STREAMNUM*GPU_id*2];
    }
    cudaStream_t stream[STREAMNUM];
    cudaEvent_t task_event[STREAMNUM];
    for (i = 0 ; i < STREAMNUM; i++) {
        //hook event
        task_event[i] = event_DGEMM[i+GPU_id*STREAMNUM];
        //hook stream
        stream[i]     = streams_DGEMM[i+GPU_id*STREAMNUM];
    }
    
#ifdef affinity
    //thread setup
    assert( blasx_set_affinity(GPU_id) == 0);
#endif
#ifdef thread_barrier
    pthread_barrier_t* barr     = arg->barr;
    int rc = pthread_barrier_wait(barr);
    assert(!(rc != 0 && rc != PTHREAD_BARRIER_SERIAL_THREAD));
#endif
#ifdef thread_profile
    printf("thread%d start@%f\n", GPU_id, get_cur_time());
#endif
    //------------------------------------------//


    //----------------GPU-START-----------------//
    int tasks_rs[STREAMNUM*2]; // mimic reseravation station
    int tasks_rs_size[2] = { 0, 0 };   // always tracking the first unused
    int switcher = 0;
    int task_batch_counter = 0;
    int mem_cpy_counter = 0;

    while (tasks_queue->TAIL >= 0) {
        /*------RS------*/
        int rs_counter          = 0;
        tasks_rs_size[switcher] = 0;
        for (rs_counter = 0; rs_counter < STREAMNUM; rs_counter++) {
            int task_id = dequeue(tasks_queue);
#ifdef task_tracker
            printf("==>GPU%d %d\n", GPU_id, task_id);
#endif
            if (task_id >= 0) {
                tasks_rs[tasks_rs_size[switcher]+STREAMNUM*switcher] = task_id;
                tasks_rs_size[switcher]++;
            }
        }
        
        /*--event_sync---*/
        while (cudaEventQuery(task_event[0]) != cudaSuccess);
        
        /*--reduce_reader--*/
        int addr_counter = 0;
        for (addr_counter = 0; addr_counter < mem_cpy_counter; addr_counter++) {
            void *key          = addr_track[addr_counter].addr;
            int target_GPU_id  = addr_track[addr_counter].GPU_id;
            int is_trans_done  = addr_track[addr_counter].is_trans_done;
            rbt_node *n        = rbt_find(key, &(LRUs[target_GPU_id]->hash_map));
            assert(n != NULL);
            if (is_trans_done == 0 && (target_GPU_id == GPU_id)) {
                assert(target_GPU_id == GPU_id);
                n->associated_LRU_elem->is_trans_done = 1;
            }
            atomic_reader_minus(n);
        }

        /*--kernel_exe---*/
        mem_cpy_counter = 0;
        int j = 0;
        for(j = 0; j <= z; j++){
            for (rs_counter = 0; rs_counter < tasks_rs_size[switcher]; rs_counter++) {
                int current_stream   = rs_counter;
                int current_task   = tasks_rs[rs_counter+STREAMNUM*switcher];
                int prior_task     = tasks_rs[rs_counter+(1-switcher)*STREAMNUM];
                cudaStream_t *curt_stream = &stream[current_stream];
                blasx_gpu_dgemm_kernel(j,
                                       nrowa, ncola,
                                       nrowb, ncolb,
                                       nrowc, ncolc,
                                       current_task, prior_task,
                                       TransA, TransB,
                                       A, B, C,
                                       lda, ldb, ldc,
                                       x, y, z,
                                       C_dev,
                                       curt_stream, &handle,
                                       current_stream,
                                       alpha, beta, block_dim,
                                       switcher, &task_batch_counter,
                                       LRUs, GPUs,
                                       &mem_cpy_counter,
                                       addr_track,
                                       GPU_id);
                if ( j == z && rs_counter == tasks_rs_size[switcher]-1) {
                    /*--event_record--*/
                    cudaError_t err = cudaEventRecord(task_event[0], stream[0]);
                    if(err != cudaSuccess) printf("event record fail\n");
                }
            }
        }
        switcher = 1 - switcher;
        task_batch_counter++;
    }
    //------------------------------------------//

    //---------------RESULT-HARVEST-------------//
    collect_final_result_dgemm(tasks_rs, tasks_rs_size, switcher, stream, C_dev, block_dim, STREAMNUM, x, y, z, nrowc, ncolc, ldc, C);
    //------------------------------------------//
#ifdef thread_profile
    printf("thread%d end@%f\n", GPU_id, get_cur_time());
#endif
}
예제 #16
0
int main(int argc, char *argv[])
{
    int sockfd, clientfd, sin_size, recvbytes;  //定义监听套接字、客户套接字
    pid_t pid, ppid;            //定义父子线程标记变量
    char *buf, *r_addr, *w_addr, *temp, *time_str;      //="\0"; //定义临时存储区
    struct sockaddr_in their_addr;      //定义地址结构
    key_t shmid;
    shmid = shm_create();       //创建共享存储区
    temp = (char *) malloc(255);
    time_str = (char *) malloc(20);
    sockfd = bindPort(MYPORT);  //绑定端口
    while (1)
    {
        if (listen(sockfd, BACKLOG) == -1)      //在指定端口上监听
        {
            perror("listen");
            exit(1);
        }
        printf("listening......\n");
        if ((clientfd = accept(sockfd, (struct sockaddr *) &their_addr, &sin_size)) == -1)      //接收客户端连接
        {
            perror("accept");
            exit(1);
        }
        printf("accept from:%d\n", inet_ntoa(their_addr.sin_addr));
        send(clientfd, WELCOME, strlen(WELCOME), 0);    //发送问候信息
        buf = (char *) malloc(255);
        ppid = fork();          //创建子进程
        if (ppid == 0)
        {
            //printf("ppid=0\n");
            pid = fork();       //创建子进程  
            while (1)
            {
                if (pid > 0)
                {
                    //父进程用于接收信息
                    memset(buf, 0, 255);
                    //printf("recv\n");
                    //sleep(1);
                    if ((recvbytes = recv(clientfd, buf, 255, 0)) <= 0)
                    {
                        perror("recv1");
                        close(clientfd);
                        raise(SIGKILL);
                        exit(1);
                    }
                    //write buf's data to share memory
                    w_addr = shmat(shmid, 0, 0);
                    memset(w_addr, '\0', 1024);
                    strncpy(w_addr, buf, 1024);
                    get_cur_time(time_str);
                    strcat(buf, time_str);
                    printf(" %s\n", buf);
                }
                else if (pid == 0)
                {
                    //子进程用于发送信息
                    //scanf("%s",buf);
                    sleep(1);
                    r_addr = shmat(shmid, 0, 0);
                    //printf("---%s\n",r_addr);
                    //printf("cmp:%d\n",strcmp(temp,r_addr));
                    if (strcmp(temp, r_addr) != 0)
                    {
                        strcpy(temp, r_addr);
                        get_cur_time(time_str);
                        strcat(r_addr, time_str);
                        //printf("discriptor:%d\n",clientfd);
                        //if(send(clientfd,buf,strlen(buf),0) == -1)
                        if (send(clientfd, r_addr, strlen(r_addr), 0) ==
                            -1)
                        {
                            perror("send");
                        }
                        memset(r_addr, '\0', 1024);
                        strcpy(r_addr, temp);
                    }
                }
                else
                    perror("fork");
            }
        }
    }
    printf("------------------------------\n");
    free(buf);
    close(sockfd);
    close(clientfd);
    return 0;
}
예제 #17
0
파일: zgemm.c 프로젝트: alongwithyou/BLASX
void cblas_zgemm(const enum CBLAS_ORDER Order, 
                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_TRANSPOSE TransB, 
                 const int M, const int N, const int K, 
                 const blasxDoubleComplex *alpha, const blasxDoubleComplex *A, const int lda, 
                 const blasxDoubleComplex *B, const int ldb,
                 const blasxDoubleComplex *beta, blasxDoubleComplex *C, const int ldc )
{
    cublasOperation_t transa, transb;
    cublasStatus_t status;
    /*---error handler---*/
    int nrowa, ncola, nrowb, ncolb;
    if (TransA == CblasNoTrans) {
        nrowa = M;
        ncola = K;
    } else {
        nrowa = K;
        ncola = M;
    }
    if (TransB == CblasNoTrans) {
        nrowb = K;
        ncolb = N;
    } else {
        nrowb = N;
        ncolb = K;
    }
    int info = 0;
    if (CBLasTransToCuBlasTrans(TransA,&transa) <  0) info = 1;
    else if (CBLasTransToCuBlasTrans(TransB,&transb) < 0) info = 2;
    else if (M < 0) info = 3;
    else if (N < 0) info = 4;
    else if (K < 0) info = 5;
    if (info != 0) {
        xerbla_(ERROR_NAME, &info);
    }
    /*-------------------*/
    
    /*----dispatcher-----*/
    int type = 0; //1:cpu 2:cublasxt 3:blasx
    if (M <= 0 || N <= 0 || K <= 0)                      type = 1;
    if (type == 0 && (M > 1000 || N > 1000 || K > 1000)) type = 2; //WAITING IMPLEMENT
    else                                                 type = 1;
    /*-------------------*/

#ifdef BENCHMARK
    double Gflops = FLOPS_ZGEMM(M, N, K)/(1000000000);
    double gpu_start, gpu_end;
    gpu_start = get_cur_time();
#endif
    switch (type) {
        case 1:
        CPU_BLAS:
            Blasx_Debug_Output("calling cblas_zgemm:");
            if (cpublas_handle == NULL) blasx_init(CPU);
            if (cblas_zgemm_p == NULL) blasx_init_cblas_func(&cblas_zgemm_p, "cblas_zgemm");
            (*cblas_zgemm_p)(Order,TransA,TransB,M,N,K,alpha,A,lda,B,ldb,beta,C,ldc);
            break;
        case 2:
            if (cublasXt_handle == NULL) blasx_init(CUBLASXT);
            Blasx_Debug_Output("calling cublasZgemmXt:");
            status = cublasXtZgemm(cublasXt_handle,
                                   transa, transb,
                                   M, N, K,
                                   (cuDoubleComplex*)&alpha, (cuDoubleComplex*)A, lda,
                                   (cuDoubleComplex*)B, ldb,
                                   (cuDoubleComplex*)&beta, (cuDoubleComplex*)C, ldc);
            if( status != CUBLAS_STATUS_SUCCESS ) goto CPU_BLAS;
            break;
//        case 3:
//            Blasx_Debug_Output("calling BLASX:\n");
//            if (is_blasx_enable == 0) blasx_init(BLASX);
//            assert( is_blasx_enable == 1 );
//            assert( SYS_GPUS > 0 );
//            assert( event_ZGEMM[0] != NULL );
//            assert( C_dev_ZGEMM[0] != NULL );
//            assert( handles_ZGEMM[0] != NULL );
//            assert( streams_ZGEMM[0] != NULL );
//            LRU_t* LRUs[10];
//            int GPU_id = 0;
//            for (GPU_id = 0; GPU_id < SYS_GPUS; GPU_id++)    LRUs[GPU_id] = LRU_init( GPU_id );
//            blasx_zgemm(SYS_GPUS, handles_ZGEMM, LRUs,
//                        TransA, TransB,
//                        M, N, K, alpha,
//                        A, lda,
//                        B, ldb,
//                        beta,
//                        C, ldc);
//            for (GPU_id = 0; GPU_id < SYS_GPUS; GPU_id++)    LRU_free( LRUs[GPU_id], GPU_id );
//            break;
        default:
            break;
    }
#ifdef BENCHMARK
    gpu_end = get_cur_time();
    printf("BLASX (M:%5d,N:%5d,K:%5d) Speed:%9.1f type:%2d\n", M, N, K, (double)Gflops/(gpu_end - gpu_start), type);
#endif

}
예제 #18
0
static 
void log_write_impl(const char *file, int line, const char *log_level_str, const char *format, va_list ap) {
	char buf_text[1024];
	char buf_time[32];
	memset(buf_text, 0, sizeof(buf_text));
	memset(buf_time, 0, sizeof(buf_time));
	int count_text;
	int count_time;

	/**
	* thus, no need to call a system call gettid() everytime
	*/
	static __thread int t_tid = -1;

	if(t_tid == -1) {
		t_tid = gettid();
	}
	
	count_text = sprintf(buf_text, " %-6s %d %s:%d ", log_level_str, t_tid, file, line);
	count_text += vsprintf(buf_text + count_text, format, ap);
	if(buf_text[count_text-1] != '\n') {
		buf_text[count_text] = '\n';
		buf_text[++count_text] = '\0';
	} else {
		buf_text[count_text] = '\0';
	}

	time_info_t ti;
	
	while(1) {
		pthread_mutex_lock(&mutex);

		/****************************************************************/
		/**
		 * 这个地方可以优化一下
		 * 当第一次失败后,返回来在写日志的时候,又重新写了一遍时间
		 * 是否合理,还需要在仔细琢磨一下
		 */
		ti = get_cur_time();

		count_time = sprintf(buf_time, "[ %02d:%02d:%02d.%06ld ]", ti.hour, ti.min, ti.sec, ti.usec);

		/****************************************************************/

		/**
		* create a new log file
		*/
		if(ti.day_num > cur_log_day_num) {
			g_new_fd = 1;
			pthread_cond_signal(&cond);
			pthread_mutex_unlock(&mutex);
		}
		/**
		* buf is full
		*/
		if(w_buf->pos + count_time + count_text >= MAX_BUF_SIZE) {
			pthread_cond_signal(&cond);
			pthread_mutex_unlock(&mutex);
		} else {
			strncpy(w_buf->buf+w_buf->pos, buf_time, count_time);
			w_buf->pos += count_time;
			strncpy(w_buf->buf+w_buf->pos, buf_text, count_text);
			w_buf->pos += count_text;
			
			pthread_mutex_unlock(&mutex);
			break;
		}
	}
}