void webserver_loop() { dmd_log(LOG_INFO, "in function %s, starting webserver main loop.\n", __func__); int serverfd = newSocket(); webserver_serverAddr = newAddress(); bindAddress(serverfd, webserver_serverAddr); listenAddress(serverfd); struct epoll_event events[MAX_EPOLL_EVENT]; int epollfd = newEpollSocket(); dmd_log(LOG_DEBUG, "in function %s, begin to work\n", __func__); addSockfd(epollfd, serverfd); while (1) { int ret = epoll_wait(epollfd, events, MAX_EPOLL_EVENT, -1); dmd_log(LOG_DEBUG, "in function %s, after epoll wait\n", __func__); if (ret < 0) { dmd_log(LOG_ERR, "in function %s, epoll failure\n", __func__); } else { handleEvent(epollfd, serverfd, events, ret); } } // while closeSocket(serverfd); releaseAddress(webserver_serverAddr); }
int webserver_fork() { pid_t pid; pid = fork(); if (pid < 0) { dmd_log(LOG_ERR, "webserver process: fork error(%s)", strerror(errno)); return -1; } else if (pid > 0) { // parent, just return; global.webserver_pid = pid; dmd_log(LOG_INFO, "Starting webserver process, pid = %d\n", pid); if (global.client.working_mode == WEBSERVER_ONLY) { dmd_log(LOG_INFO, "working_mode = webserver, main process exit\n"); exit(EXIT_SUCCESS); } return 0; } webserver_loop(); // Obviously, after main loop, webserver process exit directly. exit(EXIT_SUCCESS); }
static int dump_server() { // server settings; dmd_log(LOG_INFO, "server repository dir:%s\n", global.server.server_repo); dmd_log(LOG_INFO, "client_scale:%d\n", global.server.client_scale); dmd_log(LOG_INFO, "server_ip:%s\n", global.server.server_ip); dmd_log(LOG_INFO, "server port base:%d\n", global.server.server_port_base); dmd_log(LOG_INFO, "server last_duration:%d\n", global.server.last_duration); return 0; }
static void release_client() { dmd_log(LOG_INFO, "at function %s, free malloced memory\n", __func__); // free reusable buffers; free(global.client.referenceYUYV422); global.client.referenceYUYV422 = NULL; free(global.client.rgbbuffer); global.client.rgbbuffer = NULL; free(global.client.pyuyv422buffer); global.client.pyuyv422buffer = NULL; free(global.client.vyuyv422buffer); global.client.vyuyv422buffer = NULL; free(global.client.yuv420pbuffer); global.client.yuv420pbuffer = NULL; free(global.client.bufferingYUYV422); global.client.bufferingYUYV422 = NULL; // wait worker thread; if (global.client.working_mode == CAPTURE_ALL) { pthread_join(global.client.thread_attr.picture_thread_id, NULL); pthread_join(global.client.thread_attr.video_thread_id, NULL); } else if (global.client.working_mode == CAPTURE_PICTURE) { pthread_join(global.client.thread_attr.picture_thread_id, NULL); } else if (global.client.working_mode == CAPTURE_VIDEO) { pthread_join(global.client.thread_attr.video_thread_id, NULL); } else { dmd_log(LOG_ERR, "in function %s, impossible reach here!\n", __func__); assert(0); } pthread_attr_destroy(&global.client.thread_attr.global_attr); pthread_rwlock_destroy(&global.client.thread_attr.bufferYUYV_rwlock); pthread_mutex_destroy(&global.client.thread_attr.picture_mutex); pthread_cond_destroy(&global.client.thread_attr.picture_cond); pthread_mutex_destroy(&global.client.thread_attr.video_mutex); pthread_cond_destroy(&global.client.thread_attr.video_cond); }
int server_init_client_repodir(int client_number) { char client_repodir[PATH_MAX]; snprintf(client_repodir, PATH_MAX, "%s/client-%02d", global.server.server_repo, client_number); dmd_log(LOG_DEBUG, "in function %s, client repodir is : %s\n", __func__, client_repodir); return test_and_mkdir(client_repodir); }
int test_and_mkdir(const char *path) { // first, find parent path; const char *ptr = path + strlen(path); while (*ptr != '/' && ptr > path) { ptr--; } char *parent = strndupa(path, ptr - path); // no need to free manually assert(parent != NULL); dmd_log(LOG_DEBUG, "in function %s, parent path is %s\n", __func__, parent); dmd_log(LOG_DEBUG, "in function %s, path is %s\n", __func__, path); if (access(parent, F_OK) == 0) { // parent exist, just mkdir if (access(path, F_OK) == 0) { // path exist, just return dmd_log(LOG_DEBUG, "in function %s, dir %s existed already\n", __func__, path); return 0; } else { // else just mkdir path; if (mkdir(path, 0755) == -1) { dmd_log(LOG_ERR, "in function %s, mkdir %s error:%s\n", __func__, path, strerror(errno)); return -1; } else { dmd_log(LOG_DEBUG, "in function %s, mkdir %s succeed\n", __func__, path); } } } else { // parent doesn't exist, recursively call test_and_mkdir(); // first mkdir parent; int ret = test_and_mkdir(parent); if (ret == -1) { return ret; } // then mkdir path; if (mkdir(path, 0755) == -1) { dmd_log(LOG_ERR, "in function %s, mkdir %s error:%s\n", __func__, path, strerror(errno)); return -1; } else { dmd_log(LOG_DEBUG, "in function %s, mkdir2 %s succeed\n", __func__, path); } } return 0; }
static void dump_client_rtp() { dmd_log(LOG_INFO, "local ip:%s\n", global.client.clientrtp.local_ip); dmd_log(LOG_INFO, "local port:%d\n", global.client.clientrtp.local_port); dmd_log(LOG_INFO, "local_sequence_number:%d\n", global.client.clientrtp.local_sequence_number); dmd_log(LOG_INFO, "server ip:%s\n", global.client.clientrtp.server_ip); dmd_log(LOG_INFO, "server port base %d\n", global.client.clientrtp.server_port_base); dmd_log(LOG_INFO, "server rtp port:%d\n", global.client.clientrtp.server_rtp_port); dmd_log(LOG_INFO, "server rtcp port:%d\n", global.client.clientrtp.server_rtcp_port); }
int dump_global_config() { // only dump, no error detect. dmd_log(LOG_INFO, "in function %s:\n", __func__); int ret = dump_common(); assert(ret == 0); if (global.cluster_mode == CLUSTER_CLIENT || global.cluster_mode == CLUSTER_SINGLETON) { ret = dump_client(); assert(ret == 0); } else if (global.cluster_mode == CLUSTER_SERVER) { ret = dump_server(); assert(ret == 0); } return 0; }
// called at atexit() to free malloced memory in variable global; void release_default_global() { // clean threads; if (global.cluster_mode == CLUSTER_CLIENT || global.cluster_mode == CLUSTER_SINGLETON) { release_client(); } else { // TODO(weizhenwei): master thread clean utils; release_server(); } pthread_mutex_destroy(&total_thread_mutex); dmd_log(LOG_ERR, "in function %s, before dump stats!\n", __func__); // dump and release global statistics; pthread_mutex_lock(&global_stats->mutex); dump_statistics(global_stats); pthread_mutex_unlock(&global_stats->mutex); release_statistics(global_stats); // close database connection; close_db(opendmd_db); }
/*{ ** Name: dmv_unufmap - UNDO of an Fmap Update operation. ** ** Description: ** ** Inputs: ** dmve Pointer to dmve control block. ** tabio Pointer to table io control block ** fmap Table's FMAP page. ** log_rec Fmap log record. ** plv Pointer to page level accessor ** ** Outputs: ** error Pointer to Error return area ** Returns: ** E_DB_OK ** E_DB_ERROR ** ** Exceptions: ** none ** ** Side Effects: ** none ** ** History: ** 23-Feb-2009 (hanal04) Bug 121652 ** Created. */ static DB_STATUS dmv_unufmap( DMVE_CB *dmve, DMP_TABLE_IO *tabio, DMP_PINFO *fmappinfo, DM0L_FMAP *log_rec, DMPP_ACC_PLV *loc_plv) { LG_LSN *log_lsn = &log_rec->fmap_header.lsn; LG_LSN lsn; DB_STATUS status; i4 dm0l_flags; i4 *err_code = &dmve->dmve_error.err_code; i4 page_type = log_rec->fmap_pg_type; i4 fseg = DM1P_FSEG_MACRO(page_type, log_rec->fmap_page_size); i4 first_bit = 0; DM1P_FMAP *fmap = (DM1P_FMAP*)fmappinfo->page; CLRDBERR(&dmve->dmve_error); /* ** If recovery was found to be unneeded to both the old and new pages ** then we can just return. */ if (fmap == NULL) return (E_DB_OK); if(log_rec->fmap_first_used / fseg == log_rec->fmap_map_index) first_bit = (log_rec->fmap_first_used % fseg) + 1; if (DM1P_VPT_GET_FMAP_SEQUENCE_MACRO(page_type, fmap) != log_rec->fmap_map_index) { uleFormat(NULL, E_DM9677_DMVE_FMAP_FMAP_STATE, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, err_code, 6, sizeof(DB_DB_NAME), tabio->tbio_dbname->db_db_name, sizeof(DB_TAB_NAME), tabio->tbio_relid->db_tab_name, sizeof(DB_OWN_NAME), tabio->tbio_relowner->db_own_name, 0, DM1P_VPT_GET_FMAP_PAGE_PAGE_MACRO(page_type, fmap), 0, DM1P_VPT_GET_FMAP_SEQUENCE_MACRO(page_type, fmap), 0, log_rec->fmap_map_index); dmd_log(1, (PTR) log_rec, 4096); uleFormat(NULL, E_DM9642_UNDO_FMAP, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, err_code, 0); } /* ** Check direction of recovery operation: ** ** If this is a normal Undo, then we log the CLR for the operation ** and write the LSN of this CLR onto the newly updated page (unless ** dmve_logging is turned off - in which case the rollback is not ** logged and the page lsn is unchanged). ** ** If the record being processed is itself a CLR, then we are REDOing ** an update made during rollback processing. Updates are not relogged ** in redo processing and the LSN is moved forward to the LSN value of ** of the original update. */ if ((log_rec->fmap_header.flags & DM0L_CLR) == 0) { if (dmve->dmve_logging) { dm0l_flags = DM0L_CLR; if (log_rec->fmap_header.flags & DM0L_JOURNAL) dm0l_flags |= DM0L_JOURNAL; status = dm0l_ufmap(dmve->dmve_log_id, dm0l_flags, &log_rec->fmap_tblid, tabio->tbio_relid, tabio->tbio_relowner, log_rec->fmap_pg_type, log_rec->fmap_page_size, log_rec->fmap_loc_cnt, log_rec->fmap_fhdr_pageno, log_rec->fmap_fmap_pageno, log_rec->fmap_map_index, log_rec->fmap_hw_mark, log_rec->fmap_fhdr_cnf_loc_id, log_rec->fmap_fmap_cnf_loc_id, log_rec->fmap_first_used, log_rec->fmap_last_used, log_lsn, &lsn, &dmve->dmve_error); if (status != E_DB_OK) { /* * Bug56702: return logfull indication. */ dmve->dmve_logfull = dmve->dmve_error.err_code; uleFormat(&dmve->dmve_error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, err_code, 0); SETDBERR(&dmve->dmve_error, 0, E_DM9642_UNDO_FMAP); return(E_DB_ERROR); } } } else { /* ** If we are processing recovery of an FMAP CLR (redo-ing the undo ** of an extend) then we don't log a CLR but instead save the LSN ** of the log record we are processing with which to update the ** page lsn's. */ lsn = *log_lsn; } /* ** Undo the Update FMAP operation. */ /* ** FHDR updates will be performed in the associated DM0L_FMAP and/or ** DM0L_EXTEND processing. ** ** Mark the appropriate ranges of pages as free. */ dmveMutex(dmve, fmappinfo); DM1P_VPT_SET_FMAP_FIRSTBIT_MACRO(page_type, fmap, first_bit); dm1p_fmfree(fmap, log_rec->fmap_first_used, log_rec->fmap_last_used, page_type, log_rec->fmap_page_size); DM1P_VPT_SET_FMAP_PAGE_STAT_MACRO(page_type,fmap,DMPP_MODIFY); if (dmve->dmve_logging) DM1P_VPT_SET_FMAP_PG_LOGADDR_MACRO(page_type, fmap, lsn); dmveUnMutex(dmve, fmappinfo); /* ** Release log file space allocated for logfile forces that may be ** required by the buffer manager when unfixing the pages just recovered. */ if (((log_rec->fmap_header.flags & DM0L_CLR) == 0) && ((log_rec->fmap_header.flags & DM0L_FASTCOMMIT) == 0) && (dmve->dmve_logging)) { dmve_unreserve_space(dmve, 1); } return(E_DB_OK); }
static int dump_common() { // basic settings; if (global.cluster_mode == CLUSTER_CLIENT) { dmd_log(LOG_INFO, "cluster_mode: client\n"); } else if (global.cluster_mode == CLUSTER_SERVER) { dmd_log(LOG_INFO, "cluster_mode: server\n"); } else if (global.cluster_mode == CLUSTER_SINGLETON) { dmd_log(LOG_INFO, "cluster_mode: singleton\n"); } else { dmd_log(LOG_ERR, "Unsupported cluster mode\n"); return -1; } if (global.daemon_mode == DAEMON_ON) { dmd_log(LOG_INFO, "daemon_mode: on\n"); } else if (global.daemon_mode == DAEMON_OFF) { dmd_log(LOG_INFO, "daemon_mode: off\n"); } else { dmd_log(LOG_ERR, "Unsupported daemon mode\n"); return -1; } dmd_log(LOG_INFO, "pid file:%s\n", global.pid_file); dmd_log(LOG_INFO, "cfg file:%s\n", global.cfg_file); dmd_log(LOG_INFO, "x264_fps:%d\n", global.x264_fps); dmd_log(LOG_INFO, "database file directory:%s\n", global.database_repo); dmd_log(LOG_INFO, "webserver ip:%s\n", global.webserver_ip); dmd_log(LOG_INFO, "webserver port:%d\n", global.webserver_port); dmd_log(LOG_INFO, "webserver port:%d\n", global.webserver_root); return 0; }
static int dump_client() { // client settings; if (global.client.working_mode == CAPTURE_VIDEO) { dmd_log(LOG_INFO, "working_mode: capture video\n"); } else if (global.client.working_mode == CAPTURE_PICTURE) { dmd_log(LOG_INFO, "working_mode: capture picture\n"); } else if (global.client.working_mode == WEBSERVER_ONLY) { // for debug dmd_log(LOG_INFO, "working_mode: webserver only\n"); } else if (global.client.working_mode == CAPTURE_ALL) { dmd_log(LOG_INFO, "working_mode: capture video and picture\n"); } else { dmd_log(LOG_ERR, "Unsupported client working mode\n"); return -1; } dmd_log(LOG_INFO, "video device:%s\n", global.client.video_device); dmd_log(LOG_INFO, "image width:%d\n", global.client.image_width); dmd_log(LOG_INFO, "image height:%d\n", global.client.image_height); dmd_log(LOG_INFO, "req count:%d\n", global.client.req_count); dmd_log(LOG_INFO, "diff pixels:%d\n", global.client.diff_pixels); dmd_log(LOG_INFO, "diff deviation:%d\n", global.client.diff_deviation); dmd_log(LOG_INFO, "video duration:%d\n", global.client.video_duration); if (global.client.picture_format == PICTURE_BMP) { dmd_log(LOG_INFO, "picture_format: bmp\n"); } else if (global.client.picture_format == PICTURE_PNG) { dmd_log(LOG_INFO, "picture_format: png\n"); } else if (global.client.picture_format == PICTURE_JPEG) { dmd_log(LOG_INFO, "picture_format: jpeg\n"); } else { dmd_log(LOG_ERR, "Unsupported client picture format\n"); return -1; } if (global.client.video_format == VIDEO_H264) { dmd_log(LOG_INFO, "video_format: h264\n"); } else { dmd_log(LOG_ERR, "Unsupported client video format\n"); return -1; } dmd_log(LOG_INFO, "client repo dir:%s\n", global.client.client_repo); dump_client_rtp(); return 0; }
// encapulate IDR/SLICE nalu; int encapulate_nalu(uint8_t *nalu, int nalu_len, const char *filename, uint32_t ts, int type) { int offset = 0; // 9 = 5 bytes flv video header + 4 nalu length; uint32_t body_len = nalu_len + 9; // 9 = 5 + 4; uint32_t total_tag_len = body_len + FLV_TAG_HEADER_SIZE + FLV_PRE_TAG_SIZE; uint8_t *buffer = (uint8_t *)malloc(sizeof(uint8_t) * total_tag_len); assert(buffer != NULL); // fill flv tag header, 11 bytes; buffer[offset++] = 0x09; // tagtype: video; buffer[offset++] = (uint8_t)(body_len >> 16); // data len; buffer[offset++] = (uint8_t)(body_len >> 8); // data len; buffer[offset++] = (uint8_t)(body_len); // data len; buffer[offset++] = (uint8_t)(ts >> 16); // timestamp; buffer[offset++] = (uint8_t)(ts >> 8); // timestamp; buffer[offset++] = (uint8_t)(ts); // timestamp; buffer[offset++] = (uint8_t)(ts >> 24); // timestamp; buffer[offset++] = 0x00; // stream id 0; buffer[offset++] = 0x00; // stream id 0; buffer[offset++] = 0x00; // stream id 0; // fill flv video header, 5 bytes; if (type == NAL_SLICE_IDR) { buffer[offset++] = 0x17; // key frame, AVC; } else if (type == NAL_SLICE) { buffer[offset++] = 0x27; // key frame, AVC; } else { dmd_log(LOG_INFO, "impossible to reach here!\n"); assert(0); } buffer[offset++] = 0x01; // AVC NALU unit; buffer[offset++] = 0x00; // composition time; buffer[offset++] = 0x00; // composition time; buffer[offset++] = 0x00; // composition time; // fill flv video body; buffer[offset++] = (uint8_t)(nalu_len >> 24); // nalu length; buffer[offset++] = (uint8_t)(nalu_len >> 16); // nalu length; buffer[offset++] = (uint8_t)(nalu_len >> 8); // nalu length; buffer[offset++] = (uint8_t)(nalu_len); // nalu length; memcpy(buffer + offset, nalu, nalu_len); offset += nalu_len; // fill previous tag size; uint32_t prev_tag_size = body_len + FLV_TAG_HEADER_SIZE; buffer[offset++] = (uint8_t)(prev_tag_size >> 24); // prev tag size; buffer[offset++] = (uint8_t)(prev_tag_size >> 16); // prev tag size; buffer[offset++] = (uint8_t)(prev_tag_size >> 8); // prev tag size; buffer[offset++] = (uint8_t)(prev_tag_size); // prev tag size; assert(offset == total_tag_len); // write to flv file; FILE *fp = fopen(filename, "ab"); assert(fp != NULL); int writelen = fwrite(buffer, sizeof(uint8_t), total_tag_len, fp); assert(writelen == total_tag_len); fflush(fp); fclose(fp); free(buffer); return 0; }
/*{ ** Name: dmv_rebtree_del - Redo the Delete of a btree key ** ** Description: ** This function adds a key to a btree index for the recovery of a ** delete record operation. ** ** Inputs: ** dmve Pointer to dmve control block. ** tabio Pointer to table io control block ** page Pointer to the page to which to insert ** ** Outputs: ** error Pointer to Error return area ** Returns: ** E_DB_OK ** E_DB_ERROR ** ** Exceptions: ** none ** ** Side Effects: ** none ** ** History: ** 14-dec-1992 (rogerk) ** Written for 6.5 recovery. ** 18-jan-1992 (rogerk) ** Add check in redo routine for case when null page pointer is ** passed because redo was found to be not needed. ** 26-apr-1993 (bryanp) ** 6.5 Cluster support: ** Replace all uses of DM_LOG_ADDR with LG_LA or LG_LSN. ** 06-may-1996 (thaju02 & nanpr01) ** New page format support: change page header references to use ** macros. ** 22-nov-96 (stial01,dilma04) ** Row Locking Project: ** When calling dm1cxdel(), pass reclaim_space param ** 04-feb-97 (stial01) ** Tuple headers are on LEAF and overflow (CHAIN) pages ** Tuple headers are not on INDEX pages. ** 27-feb-97 (stial01) ** dmv_rebtree_del() Space is reclaimed when redoing DELETE, ** unless last entry on leaf page with overflow chain. ** Init flag param for dm1cxdel() ** 07-apr-97 (stial01) ** dmv_rebtree_del() NonUnique primary btree (V2) dups span leaf pages, ** not overflow chain. Remove code skipping reclaim of overflow key ** 21-may-1997 (stial01) ** Added flags arg to dm0p_unmutex call(s). ** 13-Jun-2006 (jenjo02) ** Clustered TIDs need not match, as long as the keys match. */ static DB_STATUS dmv_rebtree_del( DMVE_CB *dmve, DMP_TABLE_IO *tabio, DMP_PINFO *pinfo, DM_TID *bid) { DM0L_BTDEL *log_rec = (DM0L_BTDEL *)dmve->dmve_log_rec; DB_STATUS status = E_DB_OK; DM_LINE_IDX childkey; DM_LINE_IDX childtid; DM_TID deltid; i4 delpartno; i4 page_type = log_rec->btd_pg_type; char *key; char *key_ptr; i4 key_len; bool index_update; bool Clustered; i4 dmcx_flag; i4 ix_compressed; DMP_DCB *dcb = dmve->dmve_dcb_ptr; i4 *err_code = &dmve->dmve_error.err_code; LG_LRI lri; DMPP_PAGE *page = pinfo->page; CLRDBERR(&dmve->dmve_error); /* ** If there is nothing to recover, just return. */ if (page == NULL) return (E_DB_OK); key = &log_rec->btd_vbuf[0]; index_update = ((DM1B_VPT_GET_PAGE_STAT_MACRO(page_type, page) & DMPP_INDEX) != 0); Clustered = ((DM1B_VPT_GET_PAGE_STAT_MACRO(page_type, page) & DMPP_CLUSTERED) != 0); ix_compressed = DM1CX_UNCOMPRESSED; if (log_rec->btd_cmp_type != TCB_C_NONE) ix_compressed = DM1CX_COMPRESSED; /* ** Deletes to non-leaf index pages actually effect more than one entry ** on the page. The logged bid describes the entry from which the ** TID pointer is deleted. The key entry is deleted from the previous ** position (if there is one). */ if (index_update) { childtid = log_rec->btd_bid_child; childkey = log_rec->btd_bid_child; } else { childtid = bid->tid_tid.tid_line; childkey = bid->tid_tid.tid_line; } if (index_update && (childkey != 0)) childkey--; /* ** Consistency Checks: ** ** Verify that there is an entry at the indicated BID and that it ** matches the logged key, tid, partition entry. ** */ dm1cxrecptr(page_type, log_rec->btd_page_size, page, childkey, &key_ptr); dm1cxtget(page_type, log_rec->btd_page_size, page, childtid, &deltid, &delpartno); /* ** We can only validate the key size on compressed tables; otherwise ** we must assume that the logged value was the correct table key length. */ key_len = log_rec->btd_key_size; if (ix_compressed != DM1CX_UNCOMPRESSED) { dm1cx_klen(page_type, log_rec->btd_page_size, page, childkey, &key_len); } /* ** Compare the key,tid pair we are about to delete with the one we logged ** to make sure they are identical. If the keys don't match but the tids ** do, then we make an assumption here that the mismatch is most likely due ** to this check being wrong (we have garbage at the end of the tuple ** buffer or we allowed some sort of non-logged update to the row) and ** we continue with the operation after logging the unexpected condition. */ if ((log_rec->btd_key_size != key_len) || (MEcmp((PTR)key, (PTR)key_ptr, key_len) != 0) || (!Clustered && (log_rec->btd_tid.tid_i4 != deltid.tid_i4 || log_rec->btd_partno != delpartno)) ) { uleFormat(NULL, E_DM966A_DMVE_KEY_MISMATCH, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, err_code, 8, sizeof(DB_DB_NAME), tabio->tbio_dbname->db_db_name, sizeof(DB_TAB_NAME), tabio->tbio_relid->db_tab_name, sizeof(DB_OWN_NAME), tabio->tbio_relowner->db_own_name, 0, bid->tid_tid.tid_page, 0, bid->tid_tid.tid_line, 5, (index_update ? "INDEX" : "LEAF "), 0, log_rec->btd_bid.tid_tid.tid_page, 0, log_rec->btd_bid.tid_tid.tid_line); uleFormat(NULL, E_DM966B_DMVE_KEY_MISMATCH, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, err_code, 7, 0, key_len, 0, log_rec->btd_key_size, 0, deltid.tid_tid.tid_page, 0, deltid.tid_tid.tid_line, 0, log_rec->btd_tid.tid_tid.tid_page, 0, log_rec->btd_tid.tid_tid.tid_line, 0, dmve->dmve_action); dmd_log(1, (PTR) log_rec, 4096); uleFormat(NULL, E_DM9653_REDO_BTREE_DEL, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, err_code, 0); } /* ** Mutex the page while updating it. */ dmveMutex(dmve, pinfo); /* ** Redo the delete operation. */ for (;;) { /* ** If redoing a delete to a non leaf page, save the tid value from the ** entry we are about to delete from (the key's position) and write it ** over the entry at the next position (effectively deleting the TID). */ if (index_update && (childkey != childtid)) { dm1cxtget(page_type, log_rec->btd_page_size, page, childkey, &deltid, &delpartno); status = dm1cxtput(page_type, log_rec->btd_page_size, page, childtid, &deltid, delpartno); if (status != E_DB_OK) { dm1cxlog_error(E_DM93EB_BAD_INDEX_TPUT, (DMP_RCB *)NULL, page, page_type, log_rec->btd_page_size, childtid); break; } } /* ** REDO recovery can usually reclaim space ** (except REDO physical page lock) */ if (!index_update && page_type != TCB_PG_V1 && ((dcb->dcb_status & DCB_S_EXCLUSIVE) == 0) && (log_rec->btd_header.flags & DM0L_PHYS_LOCK)) dmcx_flag = 0; else dmcx_flag = DM1CX_RECLAIM; status = dm1cxdel(page_type, log_rec->btd_page_size, page, DM1C_DIRECT, ix_compressed, &dmve->dmve_tran_id, LOG_ID_ID(dmve->dmve_log_id), (i4)0, dmcx_flag, childkey); if (status != E_DB_OK) { dm1cxlog_error(E_DM93E2_BAD_INDEX_DEL, (DMP_RCB*)NULL, page, page_type, log_rec->btd_page_size, childkey); break; } break; } /* ** Write the LSN, etc, of the Put log record to the updated page */ DM0L_MAKE_LRI_FROM_LOG_RECORD(&lri, log_rec); DM1B_VPT_SET_PAGE_LRI_MACRO(page_type, page, &lri); DM1B_VPT_SET_PAGE_STAT_MACRO(page_type, page, DMPP_MODIFY); dmveUnMutex(dmve, pinfo); if (status != E_DB_OK) { SETDBERR(&dmve->dmve_error, 0, E_DM9653_REDO_BTREE_DEL); return(E_DB_ERROR); } return(E_DB_OK); }
struct path_t *server_get_filepath(int path_type, int client_number) { char filepath[PATH_MAX]; // #define PATH_MAX 4096 at linux/limits.h char storepath[PATH_MAX]; char *suffix = NULL; if (path_type == JPEG_FILE) { suffix = "jpg"; } else if (path_type == H264_FILE) { suffix = "h264"; } else if (path_type == FLV_FILE) { suffix = "flv"; } else { dmd_log(LOG_ERR, "in function %s, error to reach here!\n", __func__); return NULL; } // global.server.server_repo's correctness is checked at // check_path() at src/config.c snprintf(storepath, PATH_MAX, "%s/client-%02d/%s", global.server.server_repo, client_number, suffix); assert(test_and_mkdir(storepath) == 0); time_t now; struct tm tmp = {0}; struct tm *tmptr = &tmp; now = time(&now); assert(now != -1); tmptr = localtime_r(&now, tmptr); assert(tmptr != NULL); if (strcmp(suffix, "jpg") == 0) { snprintf(filepath, PATH_MAX, "%s/%04d%02d%02d%02d%02d%02d-%02d.%s", storepath, tmptr->tm_year + 1900, tmptr->tm_mon + 1, tmptr->tm_mday, tmptr->tm_hour, tmptr->tm_min, tmptr->tm_sec, global.client.counter_in_second, suffix); } else { snprintf(filepath, PATH_MAX, "%s/%04d%02d%02d%02d%02d%02d.%s", storepath, tmptr->tm_year + 1900, tmptr->tm_mon + 1, tmptr->tm_mday, tmptr->tm_hour, tmptr->tm_min, tmptr->tm_sec, suffix); } assert(strlen(filepath) < PATH_MAX); struct path_t *path = (struct path_t *)malloc(sizeof(struct path_t)); assert(path != NULL); int len = strlen(filepath); path->path = (char *)malloc(sizeof(char) * (len + 1)); assert(path->path != NULL); strcpy(path->path, filepath); path->len = len; dmd_log(LOG_DEBUG, "in function %s, get filename: %s\n", __func__, filepath); return path; }