/* * Every DELAY, check the average load of online CPUs. If the average load * is above up_threshold bring online one more CPU if up_timer has expired. * If the average load is below up_threshold offline one more CPU if the * down_timer has expired. */ static __ref void load_timer(struct work_struct *work) { unsigned int cpu; unsigned int avg_load = 0; if (down_timer < down_timer_cnt) down_timer++; if (up_timer < up_timer_cnt) up_timer++; for_each_online_cpu(cpu) avg_load += cpufreq_quick_get_util(cpu); avg_load /= num_online_cpus(); #if DEBUG pr_debug("%s: avg_load: %u, num_online_cpus: %u\n", __func__, avg_load, num_online_cpus()); pr_debug("%s: up_timer: %u, down_timer: %u\n", __func__, up_timer, down_timer); #endif if (avg_load >= up_threshold && up_timer >= up_timer_cnt) up_one(); else if (down_timer >= down_timer_cnt) down_one(); queue_delayed_work_on(0, dyn_workq, &dyn_work, delay); }
static s64 file_close(void* obj) { assert(IS_PTR(obj)); fatfile_t* file = (fatfile_t*)obj; down(&file->sem); assert(file->ref > 0); file->ref --; up_one(&file->sem); return 0; }
void add_thread_msg(thread_t * t, thread_msg_t* msg) { spin_lock(&t->thread_msg_lock); if (t->msg_head == NULL) // empty list { t->msg_head = t->msg_tail = msg; } else { t->msg_tail->next = msg; t->msg_tail = msg; // add to list tail } t->msg_tail = NULL; // tail next is NULL up_one(&t->thread_msg_sem); spin_unlock(&t->thread_msg_lock); }
static void* file_open(void* obj, char* path, s64 flag, s64 mode) { assert(IS_PTR(obj)); fatfile_t* file = (fatfile_t*)obj; down(&file->sem); if(*path == NULL) // last name in path { file->ref ++; if((!file->attr.directory) && (flag & O_TRUNC)) file_trunc(file); up_one(&file->sem); return file; } if(!file->attr.directory) // 路径中的名称必须是目录 { up_one(&file->sem); return (void*)-1; } char filename[32]; char* end = strchr(path, '/'); strncpy(filename, path, end -path); UpperStr(filename); if(*end == '/') // skip '/' end ++; // find in child list fatfile_t* child = findChild(file, filename); if(child == NULL) { child = (fatfile_t*)kmalloc(sizeof(fatfile_t)); memset(child, 0, sizeof(fatfile_t)); child->ops = &file_ops; sem_init(&child->sem, 1, "file sem"); child->fatfs = file->fatfs; child->name = strdup(filename); struct FAT_ENTRY entry; int index = findEntryByName(file, filename, &entry); if(index >= 0) { child->index = index; child->cluster = (entry.start_clusterHI << 16) | entry.start_clusterLO; child->size = entry.file_size; child->attr = entry.attribute; getEntryCreateDate(child, &entry); getEntryWriteDate(child, &entry); } else { if((flag &O_CREAT) && (*end == NULL)) { rtcdate rtc; cmostime(&rtc); child->cdatetime = rtc; child->wdatetime = rtc; child->dirty = TRUE; // update entry } else { kmfree(child->name); kmfree(child); up_one(&file->sem); return (void*)-2; } } insertChild(file, child); } up_one(&file->sem); return file_open(child, end, flag, mode); }
static int fat_rw(fatfile_t* file, u8 * buffer, s64 size, s64 pos, int mode) { assert(file); assert(buffer); assert(size > 0); assert(pos >= 0); down(&file->sem); int bytes_to_rw = 0, bytes_rw = 0, cluster_offset = 0; int cluster, i; u64 file_size = file->size; if(pos > file_size) { up_one(&file->sem); return -1; } cluster = file->cluster; // get the correct cluster to begin reading/writing from i = pos / file->fatfs->cluster_size; // we traverse the cluster chain i times while (i--) { // get the next cluster in the file cluster = fat_getFATCluster(file->fatfs, cluster); // fail if we have gone beyond the files cluster chain if (cluster == FAT_FREECLUSTER || cluster == -1) { up_one(&file->sem); return -1; } } //iobuf_t* dir_iobuf = lock_iobuf(file->fatfs, file->dir_cluster); //struct FAT_ENTRY* entry = &((struct FAT_ENTRY*)dir_iobuf->cluster_buf)[file->dir_index]; // reduce size if we are trying to read past the end of the file if (pos + size > file_size) { // but if we are writing we will need to expand the file size if (mode == FAT_WRITE) { int new_clusters = ((pos + size - file_size) / file->fatfs->cluster_size) + 1; int prev_cluster = cluster, next_cluster; // alloc more clusters while (new_clusters--) { // get a free cluster next_cluster = fat_getFreeCluster(file->fatfs); if (next_cluster < 0) { up_one(&file->sem); return -1; } if(prev_cluster == 0) { file->cluster = cluster = next_cluster; file->dirty = TRUE; // update entry } else fat_setFATCluster(file->fatfs, prev_cluster, next_cluster, FALSE);// add it on to the cluster chain // update our previous cluster number prev_cluster = next_cluster; } fat_setFATCluster(file->fatfs, prev_cluster, FAT_ENDOFCLUSTER, FALSE); } else size = file_size - pos; } while (TRUE) { cluster_offset = pos % file->fatfs->cluster_size; bytes_to_rw = file->fatfs->cluster_size - cluster_offset; if(bytes_to_rw > size) bytes_to_rw = size; iobuf_t* iobuf = lock_iobuf(file->fatfs, cluster); assert(iobuf); if (mode == FAT_WRITE) { memcpy((iobuf->cluster_buf + cluster_offset), buffer, bytes_to_rw); unlock_iobuf(file->fatfs, iobuf, TRUE, FALSE); } else { memcpy(buffer, (iobuf->cluster_buf + cluster_offset), bytes_to_rw); unlock_iobuf(file->fatfs, iobuf, FALSE, FALSE); } buffer += bytes_to_rw; pos += bytes_to_rw; bytes_rw += bytes_to_rw; size -= bytes_to_rw; if (size <= 0) break; cluster = fat_getFATCluster(file->fatfs, cluster); assert(cluster > 1 && cluster != FAT_FREECLUSTER); } if (mode == FAT_WRITE) { if(bytes_rw > 0) { cmostime(&file->wdatetime); file->dirty = TRUE; // update entry if(file->size < pos + size) file->size = pos + size; } } up_one(&file->sem); return bytes_rw; }
iobuf_t* lock_iobuf(fatfs_t* fatfs, int cluster) { ENTER(); down(&fatfs->sem_iobuflist); iobuf_t* ptr = fatfs->iobuflist; iobuf_t* ptr_prev = ptr; iobuf_t* ptr_prev_prev = ptr; while(ptr) // 查找缓冲块 { if(ptr->cluster_idx == cluster) break; ptr_prev_prev = ptr_prev; ptr_prev = ptr; ptr = ptr->next; } if(ptr) { //交换到链表头部 if(ptr_prev != ptr) // 不是第一个缓冲块 { ptr_prev->next = ptr->next; ptr->next = fatfs->iobuflist;//放到链表头部 fatfs->iobuflist = ptr; } LOG("cluster %d iobuf found, acccnt:%d locked:%d\n", cluster, ptr->acccnt, ptr->locked); } else // 没有发现缓冲块 { if(fatfs->iobuf_cnt < fatfs->iobuf_max) // 分配新的缓冲块 { // 缓冲头部 ptr = kmalloc(sizeof(iobuf_t)); assert(ptr); ptr->cluster_buf = kmalloc(fatfs->cluster_size); assert(ptr->cluster_buf); fatfs->iobuf_cnt ++; LOG("cluster %d iobuf alloc\n", cluster); } else // 缓冲块用完,淘汰最后的缓冲块使用 { ptr_prev_prev->next = 0; // 连接结尾 ptr = ptr_prev; if(ptr->dirty) // 刷新缓冲块 { if (fat_rwCluster(fatfs, ptr->cluster_idx, ptr->cluster_buf, FAT_WRITE) == -1) { panic("write cluster %d error\n", cluster); return 0; } LOG("cluster %d iobuf dirty, write to disk\n", cluster); } LOG("cluster %d iobuf reuse\n", cluster); } ptr->acccnt= 0;// 清零 ptr->locked= 0; ptr->dirty = 0; ptr->cluster_idx = cluster; ptr->next = fatfs->iobuflist;//放到链表头部 fatfs->iobuflist = ptr; if (fat_rwCluster(fatfs, cluster, ptr->cluster_buf, FAT_READ) == -1) { panic("read cluster %d error\n", cluster); return 0; } } ptr->acccnt ++; ptr->locked ++; up_one(&fatfs->sem_iobuflist); LEAVE(); return ptr; }