/*---------------------------------------------------------------------------*/ int cfs_read(int fd, void *buf, unsigned size) { struct file_desc *fdp; struct file *file; #if COFFEE_MICRO_LOGS struct file_header hdr; struct log_param lp; unsigned bytes_left; int r; #endif if(!(FD_VALID(fd) && FD_READABLE(fd))) { return -1; } fdp = &coffee_fd_set[fd]; file = fdp->file; if(fdp->offset + size > file->end) { size = file->end - fdp->offset; } /* If the file is allocated, read directly in the file. */ if(!FILE_MODIFIED(file)) { COFFEE_READ(buf, size, absolute_offset(file->page, fdp->offset)); fdp->offset += size; return size; } #if COFFEE_MICRO_LOGS read_header(&hdr, file->page); /* * Fill the buffer by copying from the log in first hand, or the * ordinary file if the page has no log record. */ for(bytes_left = size; bytes_left > 0; bytes_left -= r) { r = -1; lp.offset = fdp->offset; lp.buf = buf; lp.size = bytes_left; r = read_log_page(&hdr, file->record_count, &lp); /* Read from the original file if we cannot find the data in the log. */ if(r < 0) { COFFEE_READ(buf, lp.size, absolute_offset(file->page, fdp->offset)); r = lp.size; } fdp->offset += r; buf = (char *)buf + r; } #endif /* COFFEE_MICRO_LOGS */ return size; }
/*---------------------------------------------------------------------------*/ int cfs_read(int fd, void *buf, unsigned size) { struct file_desc *fdp; struct file *file; #if COFFEE_MICRO_LOGS struct file_header hdr; struct log_param lp; unsigned bytes_left; int r; #endif if(!(FD_VALID(fd) && FD_READABLE(fd))) { return -1; } fdp = &coffee_fd_set[fd]; file = fdp->file; if(fdp->offset + size > file->end) { size = file->end - fdp->offset; } /* If the file is not modified, read directly from the file extent. */ if(!FILE_MODIFIED(file)) { COFFEE_READ(buf, size, absolute_offset(file->page, fdp->offset)); fdp->offset += size; return size; } #if COFFEE_MICRO_LOGS read_header(&hdr, file->page); /* * Copy the contents of the most recent log record. If there is * no log record for the file area to read from, we simply read * from the original file extent. */ for(bytes_left = size; bytes_left > 0; bytes_left -= r) { lp.offset = fdp->offset; lp.buf = buf; lp.size = bytes_left; r = read_log_page(&hdr, file->record_count, &lp); /* Read from the original file if we cannot find the data in the log. */ if(r < 0) { COFFEE_READ(buf, lp.size, absolute_offset(file->page, fdp->offset)); r = lp.size; } fdp->offset += r; buf = (char *)buf + r; } #endif /* COFFEE_MICRO_LOGS */ return size; }
static int read_log_page(struct file_header *hdr, int16_t record_count, struct log_param *lp) { uint16_t region; int16_t match_index; uint16_t log_record_size; uint16_t log_records; cfs_offset_t base; uint16_t search_records; adjust_log_config(hdr, &log_record_size, &log_records); region = modify_log_buffer(log_record_size, &lp->offset, &lp->size); search_records = record_count < 0 ? log_records : record_count; match_index = get_record_index(hdr->log_page, search_records, region); if(match_index < 0) { return -1; } base = absolute_offset(hdr->log_page, log_records * sizeof(region)); base += (cfs_offset_t)match_index * log_record_size; base += lp->offset; COFFEE_READ(lp->buf, lp->size, base); return lp->size; }
/*---------------------------------------------------------------------------*/ static cfs_offset_t file_end(coffee_page_t start) { struct file_header hdr; unsigned char buf[COFFEE_PAGE_SIZE]; coffee_page_t page; int i; read_header(&hdr, start); /* * Move from the end of the range towards the beginning and look for * a byte that has been modified. * * An important implication of this is that if the last written bytes * are zeroes, then these are skipped from the calculation. */ for(page = hdr.max_pages - 1; page >= 0; page--) { COFFEE_READ(buf, sizeof(buf), (start + page) * COFFEE_PAGE_SIZE); for(i = COFFEE_PAGE_SIZE - 1; i >= 0; i--) { if(buf[i] != 0) { if(page == 0 && i < sizeof(hdr)) { return 0; } return 1 + i + (page * COFFEE_PAGE_SIZE) - sizeof(hdr); } } } /* All bytes are writable. */ return 0; }
/*---------------------------------------------------------------------------*/ static void read_header(struct file_header *hdr, coffee_page_t page) { COFFEE_READ(hdr, sizeof(*hdr), page * COFFEE_PAGE_SIZE); #if DEBUG if(HDR_ACTIVE(*hdr) && !HDR_VALID(*hdr)) { PRINTF("Invalid header at page %u!\n", (unsigned)page); } #endif }
/*---------------------------------------------------------------------------*/ int cfs_read(int fd, void *buf, unsigned size) { struct file_header hdr; struct file_desc *fdp; struct file *file; unsigned bytes_left; int r; #if COFFEE_MICRO_LOGS struct log_param lp; #endif if(!(FD_VALID(fd) && FD_READABLE(fd))) { return -1; } fdp = &coffee_fd_set[fd]; file = fdp->file; if(fdp->offset + size > file->end) { size = file->end - fdp->offset; } bytes_left = size; if(FILE_MODIFIED(file)) { read_header(&hdr, file->page); } /* * Fill the buffer by copying from the log in first hand, or the * ordinary file if the page has no log record. */ while(bytes_left) { watchdog_periodic(); r = -1; #if COFFEE_MICRO_LOGS if(FILE_MODIFIED(file)) { lp.offset = fdp->offset; lp.buf = buf; lp.size = bytes_left; r = read_log_page(&hdr, file->record_count, &lp); } #endif /* COFFEE_MICRO_LOGS */ /* Read from the original file if we cannot find the data in the log. */ if(r < 0) { r = bytes_left; COFFEE_READ(buf, r, absolute_offset(file->page, fdp->offset)); } bytes_left -= r; fdp->offset += r; buf += r; } return size; }
/*---------------------------------------------------------------------------*/ static int32_t find_offset_in_file(int first_page) { struct file_header hdr; unsigned char buf[COFFEE_PAGE_SIZE]; int page; int i; int search_limit; uint32_t range_start, range_end, part_size; READ_HEADER(&hdr, first_page); for(search_limit = i = 0; i < sizeof(hdr.eof_hint) * CHAR_BIT; i++) { if(hdr.eof_hint >> i) { search_limit = i + 1; } } part_size = hdr.max_pages / sizeof(hdr.eof_hint) / CHAR_BIT; if(part_size == 0) { part_size = 1; } range_start = part_size * search_limit; range_end = range_start + part_size; if(range_end > hdr.max_pages) { range_end = hdr.max_pages; } /* * Move from the end of the range towards the beginning and look for * a byte that has been modified. * * An important implication of this is that if the last written bytes * are zeroes, then these are skipped from the calculation. */ for(page = first_page + range_end; page >= first_page + range_start; page--) { watchdog_periodic(); COFFEE_READ(buf, sizeof(buf), page * COFFEE_PAGE_SIZE); for(i = COFFEE_PAGE_SIZE - 1; i >= 0; i--) { if(buf[i] != 0) { if(page == first_page) { return i < sizeof(hdr) ? 0 : 1 + i - sizeof(hdr); } else { return 1 + i + (page - first_page) * COFFEE_PAGE_SIZE - sizeof(hdr); } } } } /* All bytes are writable. */ return 0; }
static int get_record_index(coffee_page_t log_page, uint16_t search_records, uint16_t region) { cfs_offset_t base; uint16_t processed; uint16_t batch_size; int16_t match_index, i; base = absolute_offset(log_page, sizeof(uint16_t) * search_records); batch_size = search_records > COFFEE_LOG_TABLE_LIMIT ? COFFEE_LOG_TABLE_LIMIT : search_records; processed = 0; match_index = -1; { uint16_t indices[batch_size]; while(processed < search_records && match_index < 0) { if(batch_size + processed > search_records) { batch_size = search_records - processed; } base -= batch_size * sizeof(indices[0]); COFFEE_READ(&indices, sizeof(indices[0]) * batch_size, base); for(i = batch_size - 1; i >= 0; i--) { if(indices[i] - 1 == region) { match_index = search_records - processed - (batch_size - i); break; } } processed += batch_size; } } return match_index; }
static int find_next_record(struct file *file, coffee_page_t log_page, int log_records) { int log_record, preferred_batch_size; if(file->record_count >= 0) { return file->record_count; } preferred_batch_size = log_records > COFFEE_LOG_TABLE_LIMIT ? COFFEE_LOG_TABLE_LIMIT : log_records; { /* The next log record is unknown at this point; search for it. */ uint16_t indices[preferred_batch_size]; uint16_t processed; uint16_t batch_size; log_record = log_records; for(processed = 0; processed < log_records; processed += batch_size) { batch_size = log_records - processed >= preferred_batch_size ? preferred_batch_size : log_records - processed; COFFEE_READ(&indices, batch_size * sizeof(indices[0]), absolute_offset(log_page, processed * sizeof(indices[0]))); for(log_record = 0; log_record < batch_size; log_record++) { if(indices[log_record] == 0) { log_record += processed; break; } } } } return log_record; }
static int write_log_page(struct file *file, struct log_param *lp) { struct file_header hdr; uint16_t region; coffee_page_t log_page; int16_t log_record; uint16_t log_record_size; uint16_t log_records; cfs_offset_t offset; struct log_param lp_out; read_header(&hdr, file->page); adjust_log_config(&hdr, &log_record_size, &log_records); region = modify_log_buffer(log_record_size, &lp->offset, &lp->size); log_page = 0; if(HDR_MODIFIED(hdr)) { /* A log structure has already been created. */ log_page = hdr.log_page; log_record = find_next_record(file, log_page, log_records); if(log_record >= log_records) { /* The log is full; merge the log. */ PRINTF(("Coffee: Merging the file %s with its log\n", hdr.name)); return merge_log(file->page, 0); } } else { /* Create a log structure. */ log_page = create_log(file, &hdr); if(log_page == INVALID_PAGE) { return -1; } PRINTF(("Coffee: Created a log structure for file %s at page %u\n", hdr.name, (unsigned)log_page)); hdr.log_page = log_page; log_record = 0; } { char copy_buf[log_record_size]; lp_out.offset = offset = region * log_record_size; lp_out.buf = copy_buf; lp_out.size = log_record_size; if((lp->offset > 0 || lp->size != log_record_size) && read_log_page(&hdr, log_record, &lp_out) < 0) { COFFEE_READ(copy_buf, sizeof(copy_buf), absolute_offset(file->page, offset)); } memcpy(©_buf[lp->offset], lp->buf, lp->size); /* * Write the region number in the region index table. * The region number is incremented to avoid values of zero. */ offset = absolute_offset(log_page, 0); ++region; COFFEE_WRITE(®ion, sizeof(region), offset + log_record * sizeof(region)); offset += log_records * sizeof(region); COFFEE_WRITE(copy_buf, sizeof(copy_buf), offset + log_record * log_record_size); file->record_count = log_record + 1; } return lp->size; }
/*---------------------------------------------------------------------------*/ static int read_log_page(struct file_header *hdr, int16_t last_entry, struct log_param *lp) { uint16_t page; int16_t match_index; int16_t i; uint16_t log_entry_size; uint16_t log_entries; unsigned long base; uint16_t entry_count; uint16_t search_entries; log_entries = hdr->log_entries == 0 ? COFFEE_LOG_SIZE / COFFEE_PAGE_SIZE : hdr->log_entries; log_entry_size = hdr->log_entry_size == 0 ? COFFEE_PAGE_SIZE : hdr->log_entry_size; page = lp->offset / log_entry_size; lp->offset %= log_entry_size; if(lp->size > log_entry_size - lp->offset) { lp->size = log_entry_size - lp->offset; } search_entries = last_entry < 0 ? log_entries : last_entry + 1; entry_count = search_entries > COFFEE_LOG_TABLE_LIMIT ? COFFEE_LOG_TABLE_LIMIT : search_entries; { uint16_t indices[entry_count]; uint16_t processed; uint16_t current_batch_size; base = ABS_OFFSET(hdr->log_page, sizeof(indices[0]) * search_entries); processed = 0; match_index = -1; while(processed < search_entries && match_index < 0) { if(entry_count + processed > search_entries) { current_batch_size = search_entries - processed; } else { current_batch_size = entry_count; } base -= current_batch_size * sizeof(indices[0]); COFFEE_READ(&indices, sizeof(indices[0]) * current_batch_size, base); for(i = current_batch_size - 1; i >= 0; i--) { if(indices[i] - 1 == page) { match_index = search_entries - processed - (current_batch_size - i); break; } } processed += current_batch_size; } if(match_index == -1) { return -1; } base = hdr->log_page * COFFEE_PAGE_SIZE; base += sizeof(struct file_header) + log_entries * sizeof(page); base += (unsigned long)match_index * log_entry_size; base += lp->offset; COFFEE_READ(lp->buf, lp->size, base); } return lp->size; }
/*---------------------------------------------------------------------------*/ static void get_sector_status(uint16_t sector, uint16_t *active, uint16_t *free, uint16_t *obsolete) { uint32_t offset, sector_start; uint32_t end; struct file_header hdr; static int16_t skip_pages; static int last_pages_are_active; int i; *active = *free = *obsolete = 0; if(sector == 0) { skip_pages = 0; last_pages_are_active = 0; } else if(skip_pages > COFFEE_PAGES_PER_SECTOR) { skip_pages -= COFFEE_PAGES_PER_SECTOR; if(last_pages_are_active) { *active = COFFEE_PAGES_PER_SECTOR; } else { *obsolete = COFFEE_PAGES_PER_SECTOR; } return; } sector_start = sector * COFFEE_SECTOR_SIZE; if(last_pages_are_active) { *active = skip_pages; } else { *obsolete = skip_pages; /* Split an obsolete file starting in the previous sector and mark the following pages as isolated. */ offset = sector_start; for(i = 0; i < skip_pages; i++) { COFFEE_READ(&hdr, sizeof(hdr), offset); hdr.flags |= COFFEE_FLAG_ISOLATED; COFFEE_WRITE(&hdr, sizeof(hdr), offset); offset += COFFEE_PAGE_SIZE; } PRINTF("Coffee: Isolated %u pages starting in sector %d\n", (unsigned)skip_pages, (int)sector); } offset = sector_start + skip_pages * COFFEE_PAGE_SIZE; end = (sector + 1) * COFFEE_SECTOR_SIZE; while(offset < end) { COFFEE_READ(&hdr, sizeof(hdr), offset); if(COFFEE_PAGE_ACTIVE(hdr)) { last_pages_are_active = 1; offset += hdr.max_pages * COFFEE_PAGE_SIZE; *active += hdr.max_pages; } else if(COFFEE_PAGE_ISOLATED(hdr)) { last_pages_are_active = 0; offset += COFFEE_PAGE_SIZE; *obsolete++; } else if(COFFEE_PAGE_OBSOLETE(hdr)) { last_pages_are_active = 0; offset += hdr.max_pages * COFFEE_PAGE_SIZE; *obsolete += hdr.max_pages; } else if(COFFEE_PAGE_FREE(hdr)) { *free = (end - offset) / COFFEE_PAGE_SIZE; break; } } skip_pages = *active + *obsolete - COFFEE_PAGES_PER_SECTOR; if(skip_pages > 0) { if(last_pages_are_active) { *active = COFFEE_PAGES_PER_SECTOR - *obsolete; } else { *obsolete = COFFEE_PAGES_PER_SECTOR - *active; } } }