/* Find the first file or directory in a directory listing. Supports absolute and relative. If the path is invalid, returns a negative DFS_errno. If a file or directory is found, returns the flags of the entry and copies the name into buf. */ int dfs_dir_findfirst(const char * const path, char *buf) { directory_entry_t *dirent; int ret = recurse_path(path, WALK_OPEN, &dirent, TYPE_DIR); /* Ensure that if this fails, they can't call findnext */ next_entry = 0; if(ret != DFS_ESUCCESS) { /* File not found, or other error */ return ret; } /* We now have the pointer to the first entry */ directory_entry_t t_node; grab_sector(dirent, &t_node); if(buf) { strcpy(buf, t_node.path); } /* Set up directory to point to next entry */ next_entry = get_next_entry(&t_node); return get_flags(&t_node); }
static off_t m_pi_mr_trans(mcm_scif_dev_t *smd, uint64_t raddr, uint32_t rkey, int len) { struct mcm_mr *m_mr; uint64_t mr_start, mr_end, scif_off = 0; uint32_t offset; mlog(8,"LOCATE: 0x%Lx to 0x%Lx, rkey 0x%x len %d\n", raddr, raddr+len, rkey, len); mpxy_lock(&smd->mrlock); m_mr = get_head_entry(&smd->mrlist); while (m_mr) { mlog(8, "mr %p: ib_addr 0x%Lx ib_rkey 0x%x len %d\n", m_mr, m_mr->mre.ib_addr, m_mr->mre.ib_rkey, m_mr->mre.mr_len); if (m_mr->mre.ib_rkey == rkey) { mr_start = m_mr->mre.ib_addr; mr_end = m_mr->mre.ib_addr + m_mr->mre.mr_len; mlog(8, "rkey match: start %Lx end %Lx\n", mr_start, mr_end); if ((raddr >= mr_start) && ((raddr+len) <= mr_end)) { mlog(8, " FOUND: mr %p: ib_addr 0x%Lx ib_rkey 0x%x len %d sci_addr %Lx sci_off %x\n", m_mr, m_mr->mre.ib_addr, m_mr->mre.ib_rkey, m_mr->mre.mr_len, m_mr->mre.sci_addr, m_mr->mre.sci_off); offset = raddr - mr_start; scif_off = m_mr->mre.sci_addr + m_mr->mre.sci_off + offset; goto done; } } m_mr = get_next_entry(&m_mr->entry, &smd->mrlist); } done: mpxy_unlock(&smd->mrlock); mlog(8,"LOCATE: return scif_off == 0x%Lx \n", scif_off); return scif_off; }
void print_table(symtable t){ int i; int curr_lev = 0, new_lev; int openul = 0; struct entry* entry; printf("<H1>Indice</H1>\n"); while(has_entry(t)){ entry = get_next_entry(t); new_lev = entry->level; if(new_lev > curr_lev){ printf("<ul>\n"); curr_lev = new_lev; openul++; } else if(new_lev < curr_lev){ printf("</ul>\n"); curr_lev = new_lev; openul--; } printf("<li><a href=#%s>%s</a>\n", entry->anchor, entry->name); } for( ; openul > 0; openul--){ printf("</ul>\n"); } }
int64_t ObLogBuffer::get_start_id() const { int err = OB_SUCCESS; int64_t next_pos = 0; int64_t log_id = 0; int64_t start_id = 0; if (OB_SUCCESS != (err = get_next_entry(start_pos_, next_pos, log_id)) && OB_DATA_NOT_SERVE != err) { TBSYS_LOG(ERROR, "get_next_entry(pos=%ld)=>%d", start_pos_, err); } else if (OB_SUCCESS == err) { start_id = log_id; } return start_id; }
int ObLogBuffer::seek(const int64_t log_id, const int64_t advised_pos, int64_t& real_pos) const { int err = OB_SUCCESS; int64_t cur_id = 0; int64_t next_pos = 0; if (OB_SUCCESS != (err = check_state())) { TBSYS_LOG(ERROR, "check_state()=>%d", err); } else if (log_id >= end_id_) { err = OB_DATA_NOT_SERVE; } else { real_pos = max(advised_pos, start_pos_); } while(OB_SUCCESS == err) { if (OB_SUCCESS != (err = get_next_entry(real_pos, next_pos, cur_id))) { if (OB_DATA_NOT_SERVE != err) { TBSYS_LOG(ERROR, "get_next_entry(pos=%ld)=>%d", real_pos, err); } } else if (log_id < cur_id) { err = OB_DATA_NOT_SERVE; } else if (log_id == cur_id) { break; } else { real_pos = next_pos; } } return err; }
char * filter_and_format_log(const LogType log_type, int * len) { AndroidLogEntry alog_entry; LoggerEntry *p_logger_entry; char *print_buffer; char *print_buffer_pos; int i; print_buffer=allocate_print_buffer(log_type); if (print_buffer == NULL) { printk(KERN_ERR "[LastAlog] filter_and_format_log allocate print buffer failed\n"); *len =0; return NULL; } print_buffer_pos = print_buffer; p_logger_entry = NULL; while ((p_logger_entry = get_next_entry( log_type, p_logger_entry)) != NULL) { #ifdef OUTPUT_TEXT_DEBUG printk(KERN_INFO "<p_logger_entry>:0x%X ", (unsigned int)p_logger_entry); printk(" [header: "); for (i=0 ;i<20; i++) printk("%02X ",*((char *)p_logger_entry+i)); printk("]"); printk(" [data: "); for (i=0 ;i<p_logger_entry->len; i++) printk("%02X ",*((char *)p_logger_entry+20+i)); printk("]\n"); #endif alog_processLogBuffer(p_logger_entry, &alog_entry); print_buffer_pos += alog_filterAndPrintLogLine(&g_format, (const AndroidLogEntry *)&alog_entry, print_buffer_pos); } *len = (unsigned int)print_buffer_pos - (unsigned int)print_buffer; return print_buffer; }
/* Find the next file or directory in a directory listing. Should be called after doing a dfs_dir_findfirst. */ int dfs_dir_findnext(char *buf) { if(!next_entry) { /* No file found */ return FLAGS_EOF; } /* We already calculated the pointer, just grab the information */ directory_entry_t t_node; grab_sector(next_entry, &t_node); if(buf) { strcpy(buf, t_node.path); } /* Set up directory to point to next entry */ next_entry = get_next_entry(&t_node); return get_flags(&t_node); }
/* Find a directory node in the current path given a name */ static directory_entry_t *find_dirent(char *name, directory_entry_t *cur_node) { while(cur_node) { /* Fetch sector off of 'disk' */ directory_entry_t node; grab_sector(cur_node, &node); /* Do a string comparison on the filename */ if(strcmp(node.path, name) == 0) { /* We have a match! */ return cur_node; } /* Follow linked list */ cur_node = get_next_entry(&node); } /* Couldn't find entry */ return 0; }
int dpth_init(struct dpth *dpth) { int max; int ret=0; char *tmp=NULL; if(get_highest_entry(dpth->base_path, &max, NULL)) goto error; if(max<0) max=0; dpth->prim=max; tmp=dpth_mk_prim(dpth); if(!(tmp=prepend_s(dpth->base_path, tmp))) goto error; if(get_highest_entry(tmp, &max, NULL)) goto error; if(max<0) max=0; dpth->seco=max; free(tmp); tmp=dpth_mk_seco(dpth); if(!(tmp=prepend_s(dpth->base_path, tmp))) goto error; if(get_next_entry(tmp, &max, dpth)) goto error; if(max<0) max=0; dpth->tert=max; dpth->sig=0; dpth->need_data_lock=1; goto end; error: ret=-1; end: if(tmp) free(tmp); return ret; }
/* 함수명 : writeDirEntryInDirCluster 하는일 : 디렉토리 클러스터안에 디렉토리 엔트리를 추가한다. 디렉토리의 모든 클러스터를 검사하여, 빈 엔트리를 찾고 그 위치에 디렉토리 엔트리를 추가한다. 인자 : fVolume : 루프백이미지/파일 볼륨의 포인터 nDirClusterNumber : 디렉토리 클러스터 번호 pDirectoryEntry : 디렉토리 엔트리의 포인터 pFileName : 파일 이름의 문자열 포인터 리턴 값 : BOOL */ BOOL alloc_new_dirent(struct mfs_volume* volume, u128 dir_cluster_number, struct mfs_dirent* dirent, ps16_t file_name) { u8_t cluster[CLUSTER_SIZE] = {0, }; const u32_t entry_per_data_cluster = CLUSTER_SIZE / sizeof(struct mfs_dirent); BOOL has_long_file_name_next_entry = FALSE; u128 read_position = 0; u128 wirte_position = 0; u128 current_cluster_number = dir_cluster_number; u128 before_cluster_number = 0; u32_t current_entry_number = 0; struct mfs_dirent* current_dirent = NULL; u128 end_cluster = get_end_cluster(volume); // 디렉토리의 모든 클러스터를 검사한다. while(current_cluster_number != end_cluster) { read_position = read_cluster(volume, current_cluster_number); #ifdef __KERNEL__ seek_volume(volume, read_position); #else seek_volume(volume, read_position, SEEK_SET); #endif read_volume(volume, cluster, sizeof(u8_t), CLUSTER_SIZE); current_dirent = get_first_entry(cluster, ¤t_entry_number, has_long_file_name_next_entry); while(current_entry_number != entry_per_data_cluster) { // if (current_dirent->size == 0) if( is_deleted_file(current_dirent->attribute) || is_deleted_dir(current_dirent->attribute) ) { wirte_position = read_position + (current_entry_number * sizeof(struct mfs_dirent)); #ifdef __KERNEL__ seek_volume(volume, wirte_position); #else seek_volume(volume, wirte_position, SEEK_SET); #endif write_volume(volume, dirent, sizeof(struct mfs_dirent), 1); printf("alloc_new_dirent %d %d\n", current_cluster_number, wirte_position); return TRUE; } // 다음 엔트리를 얻는다. current_dirent = get_next_entry(cluster, ¤t_entry_number, &has_long_file_name_next_entry); } before_cluster_number = current_cluster_number; current_cluster_number = read_fat_index(volume, current_cluster_number); } // 디렉토리 클러스터에 빈 공간이 없다, 클러스터 추가 current_cluster_number = find_empty_fat_index(volume); write_in_fat_index(volume, before_cluster_number, current_cluster_number); write_in_fat_index(volume, current_cluster_number, end_cluster); wirte_position = read_cluster(volume, current_cluster_number); #ifdef __KERNEL__ seek_volume(volume, wirte_position); #else seek_volume(volume, wirte_position, SEEK_SET); #endif write_volume(volume, dirent, sizeof(struct mfs_dirent), 1); printf("alloc_new_dirent %d %d\n", current_cluster_number, wirte_position); return TRUE; }
void __mfs_readdir(const ps16_t route, struct file *file, struct dir_context *ctx) { u8_t cluster[CLUSTER_SIZE] = {0, }; const u32_t entry_per_data_cluster = CLUSTER_SIZE / sizeof(struct mfs_dirent); s16_t composited_file_name[128] = {0, }; BOOL has_long_file_name_entry = FALSE; u128 current_cluster_number = 0; u32_t current_entry_number = 0; u128 read_position = 0; struct mfs_dirent* current_dirent = NULL; static s16_t path[1024]={0}; struct dentry *de = file->f_dentry; struct mfs_volume* volume = de->d_sb->s_fs_info; u128 end_cluster = get_end_cluster(volume); strncpy(path, route, 1024); current_cluster_number = get_cluster_number(volume, path); if(current_cluster_number == 0) { return ; } printk("__mfs_readdir\n"); // 볼륨이 무효하다. if(volume == NULL) return ; read_position = read_cluster(volume, current_cluster_number); #ifdef __KERNEL__ seek_volume(volume, read_position); #else seek_volume(volume, read_position, SEEK_SET); #endif read_volume(volume, cluster, sizeof(u8_t), CLUSTER_SIZE); printk("current cluster number before : %d\n", current_cluster_number); while(current_cluster_number != end_cluster) { // 클러스터의 첫 엔트리를 얻는다. current_dirent = get_first_entry(cluster, ¤t_entry_number, has_long_file_name_entry); printk("current dentry : %x\n", current_dirent); // 클러스터의 모든 엔트리를 검사한다. while(current_entry_number != entry_per_data_cluster) { printk("current cluster number after : %d\n", current_cluster_number); // if(current_dirent->size != 0) { // printk("current_dentry size is NOT 0\n"); // 얻은 엔트리가 LongFileName인지 여부 검사 if(is_long_file_name(current_dirent->attribute) == TRUE) { // LongFileName일 경우 LongFileName을 조합한다. composite_long_file_name(volume, current_cluster_number, current_entry_number, composited_file_name); } else { // 일반 FileName일 경우 복사 strcpy(composited_file_name, current_dirent->name); } if(is_normal_dir(current_dirent->attribute) == TRUE) { static char buf[1024]=""; int len; buf[0]='\0'; strcat(buf,path); len=strlen(path); if(len > 0){ if(path[len-1] != '/') strcat(buf,"/"); } strcat(buf, composited_file_name); if(!dir_emit(ctx, composited_file_name, strlen(composited_file_name), 2, DT_DIR)) { printk("WARNING %s %d", __FILE__, __LINE__); return ; } ctx->pos++; strcat(buf, "*<DIR>\n"); printk(buf); } else if(is_normal_file(current_dirent->attribute) == TRUE) { static char buf[1024]=""; buf[0]='\0'; if(!dir_emit(ctx, composited_file_name, strlen(composited_file_name), 2, DT_REG)) { printk("WARNING %s %d", __FILE__, __LINE__); return ; } ctx->pos++; strcat(buf, composited_file_name); strcat(buf, "*<FILE>\n"); printk(buf); } // } // 다음 엔트리를 얻는다. current_dirent = get_next_entry(cluster, ¤t_entry_number, &has_long_file_name_entry); } current_cluster_number = read_fat_index(volume, current_cluster_number); // 지정된 번호의 클러스터를 읽는다. read_position = read_cluster(volume, current_cluster_number); #ifdef __KERNEL__ seek_volume(volume, read_position); #else seek_volume(volume, read_position, SEEK_SET); #endif read_volume(volume, cluster, sizeof(u8_t), CLUSTER_SIZE); } return ; }
int __mfs_lookup(struct mfs_volume* volume, const ps16_t route, const ps16_t file_name) { u8_t cluster[CLUSTER_SIZE] = {0, }; const u32_t entry_per_data_cluster = CLUSTER_SIZE / sizeof(struct mfs_dirent); s16_t composited_file_name[128] = {0, }; BOOL has_long_file_name_next_entry = FALSE; u128 current_cluster_number = 0; u32_t current_entry_number = 0; u128 read_position = 0; struct mfs_dirent* current_dentry = NULL; static s16_t path[1024]={0}; u128 end_cluster = get_end_cluster(volume); strncpy(path, route, 1024); current_cluster_number = get_cluster_number(volume, route); // if(current_cluster_number == 0) // { // return 0; // } // 볼륨이 무효하다. if(volume == NULL) return 0; read_position = read_cluster(volume, current_cluster_number); #ifdef __KERNEL__ seek_volume(volume, read_position); #else seek_volume(volume, read_position, SEEK_SET); #endif read_volume(volume, cluster, sizeof(u8_t), CLUSTER_SIZE); while(current_cluster_number != end_cluster) { // 클러스터의 첫 엔트리를 얻는다. current_dentry = get_first_entry(cluster, ¤t_entry_number, has_long_file_name_next_entry); // 클러스터의 모든 엔트리를 검사한다. while(current_entry_number != entry_per_data_cluster) { // 얻은 엔트리가 LongFileName인지 여부 검사 if(is_long_file_name(current_dentry->attribute) == TRUE) { // LongFileName일 경우 LongFileName을 조합한다. composite_long_file_name(volume, current_cluster_number, current_entry_number, composited_file_name); } else { // 일반 FileName일 경우 복사 strcpy(composited_file_name, current_dentry->name); } if(strcmp(composited_file_name, file_name) == 0){ if(is_normal_dir(current_dentry->attribute) == TRUE){ return DIR_DENTRY; } else if(is_normal_file(current_dentry->attribute) == TRUE){ return FILE_DENTRY; } } // 다음 엔트리를 얻는다. current_dentry = get_next_entry(cluster, ¤t_entry_number, &has_long_file_name_next_entry); } current_cluster_number = read_fat_index(volume, current_cluster_number); // 지정된 번호의 클러스터를 읽는다. read_position = read_cluster(volume, current_cluster_number); #ifdef __KERNEL__ seek_volume(volume, read_position); #else seek_volume(volume, read_position, SEEK_SET); #endif read_volume(volume, cluster, sizeof(u8_t), CLUSTER_SIZE); } return 0; }
void __mfs_readdir(struct mfs_volume* volume, const ps16_t route, struct printdir *printdir) { u8_t cluster[CLUSTER_SIZE] = {0, }; const u32_t entry_per_data_cluster = CLUSTER_SIZE / sizeof(struct mfs_dirent); s16_t composited_file_name[128] = {0, }; BOOL has_long_file_name_next_entry = FALSE; u128 current_cluster_number = 0; u32_t current_entry_number = 0; u128 read_position = 0; struct mfs_dirent* current_dirent = NULL; static s16_t path[1024]={0}; struct file* filp = printdir->filp; void* dirent = printdir->dirent; filldir_t filldir = printdir->filldir; u128 end_cluster = get_end_cluster(volume); strncpy(path, route, 1024); current_cluster_number = get_cluster_number(volume, path); if(current_cluster_number == 0) { return ; } // 볼륨이 무효하다. if(volume == NULL) return ; read_position = read_cluster(volume, current_cluster_number); seek_volume(volume, read_position); read_volume(volume, cluster, sizeof(u8_t), CLUSTER_SIZE); while(current_cluster_number != end_cluster) { // 클러스터의 첫 엔트리를 얻는다. current_dirent = get_first_entry(cluster, ¤t_entry_number, has_long_file_name_next_entry); // 클러스터의 모든 엔트리를 검사한다. while(current_entry_number != entry_per_data_cluster) { // if(current_dirent->size != 0) // { // 얻은 엔트리가 LongFileName인지 여부 검사 if(is_long_file_name(current_dirent->attribute) == TRUE) { // LongFileName일 경우 LongFileName을 조합한다. composite_long_file_name(volume, current_cluster_number, current_entry_number, composited_file_name); } else { // 일반 FileName일 경우 복사 strcpy(composited_file_name, current_dirent->name); } if(is_normal_dir(current_dirent->attribute) == TRUE) { static char buf[1024] = ""; int len; buf[0] = '\0'; strcat(buf, path); len = strlen(path); if(len > 0){ if(path[len-1] != '/') strcat(buf, "/"); } strcat(buf, composited_file_name); if(filldir(dirent, composited_file_name, strlen(composited_file_name), filp->f_pos++, 2, DT_DIR)){ printk("WARNING %s %d", __FILE__,__LINE__); return ; } strcat(buf, "*<DIR>\n"); printk(buf); } else if(is_normal_file(current_dirent->attribute) == TRUE) { static char buf[1024]=""; buf[0]='\0'; printk("garig %p %p %p %s\n", filp, dirent, filldir, composited_file_name); if(filldir(dirent, composited_file_name, strlen(composited_file_name), filp->f_pos++, 2, DT_REG)){ printk("WARNING %s %d", __FILE__, __LINE__); return ; } strcat(buf, composited_file_name); strcat(buf, "*<FILE>\n"); printk(buf); } // } // 다음 엔트리를 얻는다. current_dirent = get_next_entry(cluster, ¤t_entry_number, &has_long_file_name_next_entry); } current_cluster_number = read_fat_index(volume, current_cluster_number); // 지정된 번호의 클러스터를 읽는다. read_position = read_cluster(volume, current_cluster_number); seek_volume(volume, read_position); read_volume(volume, cluster, sizeof(u8_t), CLUSTER_SIZE); } return ; }
/************************************************************************* * TESTS ON SINGLE DATA STRUCTURES LIKE INODES * * - This procedure is the central routine for working on the directory * tree. A backtracking mechanism with no recursive procedure-overhead * is used to avoid memory allocation and stack overflow problems. * - It is called as "step 2" during the checking process after working * on unique data-structures and allocation all needed buffers for * tables etc. * - The root-inode was validated during a separate step in check_unique(). * * Parameter : root_de = The directory-entry of the local root which * has to be examined * root_bnr = The number of the directory block which keeps * the root entry * root_offs = The offset in this directory block * Return : TRUE : under all normal checking conditions * FALSE : if a fatal error occurs * *************************************************************************/ word check_inodes ( struct dir_elem *root_de, daddr_t root_bnr, word root_offs ) { word spare; /* # of entries in the direct. */ daddr_t bnr; /* For temporary storage */ word offset; word depth; /* To keep track about the dis- */ /* tance from the root-direct. */ struct buf *bpar; /* Points to the current parent */ /* directory - block */ struct buf *blpt; /* For symbolic-link reference- */ /* path block */ tree_e head; word first_time; /* Used to signal, if an entry */ /* is scanned the first time */ tree_e *parent; /* Pointer to the actual parent */ /* tree-element */ tree_e *tpt; /* Pointer to the actual wor- */ /* king element. */ struct dir_elem *de_pt; /* Used for temporary storage */ /*---------------------- Init operations ----------------------------*/ head.bnr = root_bnr; /* Init the head element */ head.offset = root_offs; head.parent_used = TRUE; /* Copy the root entry */ memcpy ( (void *) &head.de, (void *) root_de, sizeof (struct dir_elem) ); head.enxt = (tree_e *) NULL; /* Zero the head-pointers */ head.eprv = (tree_e *) NULL; head.lnxt = (tree_e *) NULL; head.lprv = (tree_e *) NULL; /* Clear the comparison struct */ memset ( &dir_cmp, 0, sizeof (struct dir_elem) ); strcpy ( path, "/" ); /* Set the name of the root */ strcat ( path, head.de.de_name ); /* of the filesystem */ spare = 0; /* No entry at the beginning */ first_time = TRUE; /* Start up value */ parent = &head; /* At the beginning the head */ tpt = &head; /* element is the only one. */ remove_de_hash (); /* Free previously used hash- */ /* table elements */ changes_de = 0; /* Nothing changed so far... */ depth = 0; /* We start on root-position */ /*--------------- Traverse the whole directory tree -----------------*/ for ( ;; ) { /* Get the next entry to check */ de_pt = get_next_entry ( &parent->de.de_inode, first_time, &bnr, &offset ); first_time = FALSE; /* We want to get all other en- */ /* tries from this directory */ /* We have got an entry ? */ if ( de_pt != (struct dir_elem *) NULL ) { strncpy (actual_path, path, 512); #if 0 IOdebug ("%sChecking %s / [%s]", S_INFO, path, de_pt->de_name); #endif /* An entry was extracted. Now */ /* check it's validity */ if ( validate_entry ( de_pt, path, UNKNOWN, TRUE ) ) { /* Put the entry name into the */ /* appropriate hash-queue */ append_de_hash ( de_pt->de_name ); /* We have to increment the */ spare++; /* total count of entries */ /*-------------- Storage of symbolic-link information ----------------*/ /* Was it a symbolic link ? */ if ( de_pt->de_inode.i_mode == Type_Link ) { /* Read the block with the re- */ /* ference path in buffer-cache */ blpt = bread ( 0, de_pt->de_inode.i_db[0], 1, SAVEA ); /* Append the link to the hash- */ /* table for symbolic links */ append_link_hash ( bnr, offset, parent->bnr, parent->offset, blpt->b_un.b_link->name); /* One more symbolic link found */ found_links++; /* The ref-block is not used */ /* anymore */ brelse ( blpt->b_tbp, TAIL ); } /*----- Appending new elements to the hierarchical directory-tree ----*/ if ( de_pt->de_inode.i_mode == Type_Directory ) { /* Increment the counter for */ /* the number of valid directo- */ /* ries in a cylinder group. */ add_dir ( bnr ); found_dirs++; if ( tpt == parent ) /* Add a new directory level */ tpt = append_level ( tpt ); else /* .. or a new entry */ tpt = append_entry ( tpt ); /* Save the entry's location */ tpt->bnr = bnr; tpt->offset = offset; /* Save the entry's content */ memcpy ( &tpt->de, de_pt, sizeof (struct dir_elem) ); /* At this time NOT used as a */ /* parent-dir */ tpt->parent_used = FALSE; #if DEBUG IOdebug (" check_inodes : New element created for %s (bnr= %d, off= %d)", tpt->de.de_name, tpt->bnr, tpt->offset ); #endif } /* Update the actual number of */ /* file checked. */ if ( de_pt->de_inode.i_mode == Type_File ) found_files++; } else /* If the validation does not */ { /* succeeds: */ /* We have to make a note, if */ /* /lost+found will be deleted ! */ if ( depth == 0 && ! strcmp ( de_pt->de_name, "lost+found" ) ) { IOdebug ("%sUnusable /lost+found-dir !", S_WARNING); lost_found = FALSE; } IOdebug ("%sThe entry is invalid and will be deleted!", S_SERIOUS); if ( ! no_corrections ) { /* We cannot make use anymore */ /* of the corrupted entry and */ /* delete it. */ memset ( (void *) de_pt, 0, sizeof (struct dir_elem) ); /* Note the deletion */ fst.deleted_inodes++; changes_de++; } } } /*------ After checking a complete directory with all entries: --------*/ else /* One directory fully scanned: */ { #if DEBUG IOdebug (" check_inodes : Have examined all entries. Change the directory"); IOdebug (" Current parent = %s 'used' = %d", parent->de.de_name, parent->parent_used); IOdebug (" Current entry = %s 'used' = %d", tpt->de.de_name, tpt->parent_used); #endif /*------- Go on with the last directory of the current sub-dir ------*/ if ( tpt->enxt == (tree_e *) NULL && tpt->lnxt == (tree_e *) NULL && tpt != parent && ! tpt->parent_used ) { #if DEBUG IOdebug (" check_inodes : Take the last sub-dir as parent directory"); #endif #if DEBUG IOdebug (" check_inodes : Spare value found = %d , counted = %d", parent->de.de_inode.i_spare, spare ); #endif /* Different # of entries ? */ if ( spare != parent->de.de_inode.i_spare ) { /* Save the new spare value */ /* Read the parent-directory */ #if DEBUG IOdebug (" check_inodes : Correct number of entries (spare value)"); #endif bpar = bread ( 0, parent->bnr, 1, SAVEA ); /* Change the entry */ if ( parent->bnr == 1 ) { /* Special handling for root */ bpar->b_un.b_sum->root_dir.de_inode.i_spare = spare; bpar->b_un.b_sum->root_dir.de_inode.i_size = spare * sizeof(struct dir_elem); } else { bpar->b_un.b_dir[parent->offset].de_inode.i_spare = spare; bpar->b_un.b_dir[parent->offset].de_inode.i_size = spare * sizeof(struct dir_elem); } /* ... and write it back. */ test_bwrite ( bpar ); } /* Prepare the tree-element */ /* for a new pass as "parent": */ parent = tpt; parent->parent_used = TRUE; first_time = TRUE; spare = 0; /* Append to the pathname-string*/ strcat ( path , "/" ); /* ... a new sub-dir branch */ strcat ( path , parent->de.de_name ); /* One level lower than the */ depth++; /* root-directory */ /* Clear the name-hash-table */ remove_de_hash (); /* Begin with the first entry.. */ continue; /* and scan again all entries. */ } /*-------------------- Backtracking conditions ----------------------*/ if ( tpt->enxt == (tree_e *) NULL && tpt->lnxt == (tree_e *) NULL && tpt == parent && tpt->parent_used ) { #if DEBUG IOdebug (" check_inodes : Use backtracking to get a new parent-dir"); #endif #if DEBUG IOdebug (" check_inodes : Spare value found = %d , counted = %d", parent->de.de_inode.i_spare, spare ); #endif /* Different # of entries ? */ if ( spare != parent->de.de_inode.i_spare ) { #if DEBUG IOdebug (" check_inodes : Correct number of entries (spare value)"); #endif /* Save the new spare value */ /* Read the directory block */ bpar = bread ( 0, parent->bnr, 1, SAVEA ); /* Change the entry */ if ( parent->bnr == 1 ) { /* Special handling for root */ bpar->b_un.b_sum->root_dir.de_inode.i_spare = spare; bpar->b_un.b_sum->root_dir.de_inode.i_size = spare * sizeof(struct dir_elem); } else { bpar->b_un.b_dir[parent->offset].de_inode.i_spare = spare; bpar->b_un.b_dir[parent->offset].de_inode.i_size = spare * sizeof(struct dir_elem); } /* ... and write it back. */ test_bwrite ( bpar ); } /* Look for a directory, which */ /* wasn't used as a "parent" */ do { /* THE MAIN EXIT-POINT ! */ if ( tpt->eprv == (tree_e *) NULL && tpt->lprv == (tree_e *) NULL ) return TRUE; /* The "move-back" operation */ if ( tpt->lprv != (tree_e *) NULL ) { /* Remove the lowest level */ parent = remove_level ( tpt ); depth--; } else /* Remove the last entry from */ /* the directory */ parent = remove_entry ( tpt ); /* Adjust the work-pointer */ tpt = parent; /* Cut the pathname string and */ *( strrchr ( path , '/' ) ) = '\0'; #if DEBUG IOdebug (" check_inodes : Backtracking to path = %s (depth: %d)", path, depth ); #endif } /* try it again until an unused */ /* dir-entry is found */ while ( tpt->parent_used ); /* Append the new sub-dir */ if ( tpt->de.de_name ) { strcat ( path, "/" ); strcat ( path, tpt->de.de_name ); } /* Prepare the tree-element as */ /* the new parent-directory */ tpt->parent_used = TRUE; first_time = TRUE; spare = 0; /* Clear the hash-table */ remove_de_hash (); /* ... and go to the beginning */ continue; /* Now we will examine the en- */ /* tries of the "new" directory.*/ } } } /* end of <for (;;)> */ }
u32_t get_cluster_number(struct mfs_volume* volume, ps16_t path) { u8_t current_cluster[CLUSTER_SIZE] = {0, }; ps16_t seperated_path = strtok(path, "/"); const u32_t entry_per_data_cluster = CLUSTER_SIZE / sizeof(struct mfs_dirent); struct mfs_dirent* current_dir_entry = NULL; s16_t composited_file_name[128] = {0, }; u128 current_cluster_number = 2; // Root Directory의 클러스터 값 u32_t current_entry_number = 0; BOOL is_dir_changed = FALSE; BOOL has_long_file_name_next_entry = FALSE; u128 end_cluster = get_end_cluster(volume); u128 read_position = read_cluster(volume, current_cluster_number); #ifdef __KERNEL__ seek_volume(volume, read_position); #else seek_volume(volume, read_position, SEEK_SET); #endif read_volume(volume, current_cluster, sizeof(u8_t), CLUSTER_SIZE); while(seperated_path != NULL) { is_dir_changed = FALSE; // 클러스터의 첫 엔트리를 얻는다. current_dir_entry = get_first_entry(current_cluster, ¤t_entry_number, has_long_file_name_next_entry); // 클러스터의 모든 엔트리를 검사한다. while(current_entry_number != entry_per_data_cluster) { // 얻은 엔트리가 폴더일 경우 if(is_normal_dir(current_dir_entry->attribute) == TRUE) { // 얻은 엔트리가 LongFileName인지 여부 검사 if(is_long_file_name(current_dir_entry->attribute) == TRUE) { // LongFileName일 경우 LongFileName을 조합한다. composite_long_file_name(volume, current_cluster_number, current_entry_number, composited_file_name); } else { // 일반 FileName일 경우 복사 strcpy(composited_file_name, current_dir_entry->name); } // Name 비교 if(!strcmp(seperated_path, composited_file_name)) { // 일치한다면 다음 route 경로 명을 얻는다. seperated_path = strtok(NULL, "/"); current_cluster_number = current_dir_entry->head_cluster_number; is_dir_changed = TRUE; break; } } // 다음 엔트리를 얻는다. current_dir_entry = get_next_entry(current_cluster, ¤t_entry_number, &has_long_file_name_next_entry); } // 현재 탐색 디렉토리가 변경되지 않았다면 현재 디렉토리의 다음 클러스터를 얻는다. if (is_dir_changed == FALSE) { current_cluster_number = read_fat_index(volume, current_cluster_number); // route 경로가 잘못되었다. if(current_cluster_number == end_cluster) { printf("wrong route\n"); return 0; } } // 지정된 번호의 클러스터를 읽는다. read_position = read_cluster(volume, current_cluster_number); #ifdef __KERNEL__ seek_volume(volume, read_position); #else seek_volume(volume, read_position, SEEK_SET); #endif read_volume(volume, current_cluster, sizeof(u8_t), CLUSTER_SIZE); } return current_cluster_number; }
/* 함수명 : search_fileDirectoryEntryInDirCluster 하는일 : 디렉토리 클러스터 안에 파일 디렉토리 엔트리를 찾는다. 디렉토리가 가지고 있는 모든 클러스터를 순환하여 파일 이름과 같은 엔트리를 찾는다. 인자 : fVolume : 루프백이미지/파일 볼륨의 포인터 nDirClusterNumber : 디렉토리 클러스터 번호 pFileName : 파일이름의 문자열 포인터 pSearchedDirEntry : 검색된 디렉토리 엔트리의 포인터 리턴 값 : BOOL */ BOOL get_dentry(struct mfs_volume* volume, u128 dir_cluster_number, ps16_t file_name, struct mfs_dirent* searched_dir_entry) { u8_t cluster[CLUSTER_SIZE] = {0, }; const u32_t entry_per_data_cluster = CLUSTER_SIZE / sizeof(struct mfs_dirent); s16_t composited_file_name[128] = {0, }; BOOL has_long_file_name_next_entry = FALSE; u128 read_position = 0; u128 current_cluster_number = dir_cluster_number; u32_t current_entry_number = 0; struct mfs_dirent* current_dir_entry = NULL; u128 end_cluster = get_end_cluster(volume); printf("get_dentry: %s\n", file_name); // 디렉토리의 모든 클러스터를 검사한다. while(current_cluster_number != end_cluster) { printf("current_cluster_number: %d\n", current_cluster_number); read_position = read_cluster(volume, current_cluster_number); #ifdef __KERNEL__ seek_volume(volume, read_position); #else seek_volume(volume, read_position, SEEK_SET); #endif read_volume(volume, cluster, sizeof(u8_t), CLUSTER_SIZE); current_dir_entry = get_first_entry(cluster, ¤t_entry_number, has_long_file_name_next_entry); while(current_entry_number != entry_per_data_cluster) { printf("current entry number after : %d\n", current_entry_number); if(is_normal_file(current_dir_entry->attribute) == TRUE) { // 얻은 엔트리가 LongFileName인지 여부 검사 if(is_long_file_name(current_dir_entry->attribute) == TRUE) { // LongFileName일 경우 LongFileName을 조합한다. composite_long_file_name(volume, current_cluster_number, current_entry_number, composited_file_name); } else { // 일반 FileName일 경우 복사 strcpy(composited_file_name, current_dir_entry->name); } // Name 비교 if(!strcmp(file_name, composited_file_name)) { memcpy(searched_dir_entry, current_dir_entry, sizeof(struct mfs_dirent)); return TRUE; } } // 다음 엔트리를 얻는다. current_dir_entry = get_next_entry(cluster, ¤t_entry_number, &has_long_file_name_next_entry); } current_cluster_number = read_fat_index(volume, current_cluster_number); printf("read fat index : %d\n", current_cluster_number); } printf("file not exist\n"); return FALSE; }
static void process_logs(char *log_file) { uint64_t content_length, start_offset, end_offset; int resp, cacheable, provider; time_t acs_time; char uri[20480]; GHashTable *uri_hash_table = NULL; uri_entry_t *objp; int i; g_thread_init(NULL); uri_hash_table = g_hash_table_new(g_str_hash, g_str_equal); for(i=0;i<MAX_HOT_VAL;i++) TAILQ_INIT(&hotness_bucket[i]); /* Input */ while(!get_next_entry(log_file, uri, &content_length, &resp, &start_offset, &end_offset, &provider, &cacheable, &acs_time)) { /* Runtime Statistics */ if(!(line_no % 1000000)) fprintf(log_fp, "Cache_hit_ratio = %ld\n", cache_hit_ratio); if(!uri[0]) continue; if(!acs_time) continue; total_num_requests++; objp = g_hash_table_lookup(uri_hash_table, uri); if(!objp) { if(cacheable) { objp = calloc(1, MAX_URI_LEN); objp->uri_name = malloc(strlen(uri) + 1); strcpy(objp->uri_name, uri); *(uint64_t *)&uri[0] = 0; g_hash_table_insert(uri_hash_table, objp->uri_name, objp); objp->state = ENTRY_NEW; } } else { total_dup_requests++; } total_bytes_delivered += content_length; if(cacheable) { if(objp->state & ENTRY_NEW) { total_num_cacheable_entries++; total_bytes_in_ram += content_length; objp->state = ENTRY_NOT_INGESTED | ENTRY_IN_RAM; } else { if(!(objp->state & ENTRY_EVICTED)) { /* Duplicate Hit */ if(resp == 200) { total_dup_bytes_delivered += content_length; } else if(resp == 206) { if(start_offset && (start_offset > objp->start_offset) && (start_offset < objp->end_offset) && (end_offset > objp->end_offset)) total_dup_bytes_delivered += (objp->end_offset - start_offset); if(start_offset && (start_offset > objp->start_offset) && (start_offset < objp->end_offset) && (end_offset < objp->end_offset)) total_dup_bytes_delivered += (end_offset - start_offset); if(start_offset && (start_offset < objp->start_offset) && (end_offset > objp->start_offset) && (end_offset < objp->end_offset)) total_dup_bytes_delivered += (end_offset - objp->start_offset); if(start_offset && (start_offset < objp->start_offset) && (end_offset > objp->start_offset) && (end_offset > objp->end_offset)) total_dup_bytes_delivered += (objp->end_offset - objp->start_offset); } cache_hit_ratio = (total_dup_bytes_delivered * 100) / (total_bytes_delivered); } else { objp->state = ENTRY_NOT_INGESTED | ENTRY_IN_RAM; objp->hotness = 0; } } /* Update Access time and offsets */ objp->last_access_time = acs_time; nkn_cur_ts = acs_time; if(resp == 206) { if(start_offset && start_offset < objp->start_offset) objp->start_offset = start_offset; if(end_offset && end_offset > objp->end_offset) objp->end_offset = end_offset; } if(resp == 200) { objp->start_offset = 0; objp->end_offset = content_length; } /* Analytics plugin */ hotness_update(objp); if(objp->state & ENTRY_NOT_INGESTED) { /* Ingest plugin */ //total_bytes_in_cache += ingest(objp); } } /* Ram Eviction plugin */ /* Disk Evict plugin */ } fprintf(log_fp, "\nTotal_bytes_delivered = %ld\n", total_bytes_delivered); fprintf(log_fp, "Total_bytes_in_ram = %ld\n", total_bytes_in_ram); fprintf(log_fp, "Total_num_requests = %ld\n", total_num_requests); fprintf(log_fp, "Total_num_dup_requests = %ld\n", total_dup_requests); fprintf(log_fp, "Total_num_cacheable_entries = %ld\n", total_num_cacheable_entries); fprintf(log_fp, "Total_dup_bytes_delivered = %ld\n", total_dup_bytes_delivered); fprintf(log_fp, "cache_hit_ratio = %ld\n", cache_hit_ratio); fprintf(log_fp, "Hotness Table \n"); fprintf(log_fp, "-----------------------------\n"); fprintf(log_fp, "Hotness\tNum. objects\n"); for(i=0;i<MAX_HOT_VAL;i++) { if(num_hot_objects[i]) fprintf(log_fp, "%d\t%ld\n", i, num_hot_objects[i]); } fprintf(log_fp, "-----------------------------\n"); /* Post Process */ }