/** * Load the initrd from the passed in address */ uint64_t InitRd::Load(RdHeader *header) { m_header = header; if (m_header == 0) { return INITRD_FILE_NOT_FOUND; } if (m_header->magic != INITRD_MAGIC) { return INITRD_INVALID_FILE; } if (m_header->version != INITRD_VERSION) { return INITRD_WRONG_VERSION; } m_symbol_table = (char*)((size_t)m_header + sizeof(RdHeader)); m_entry_list = (RdFileEntry *)((size_t)m_symbol_table + header->symbol_size); //Set our system parameter for the initrd g_system_params.init_rd = PTR_TO_U64(m_header); VirtualMemory::MapRange ( g_system_params.init_rd, g_system_params.init_rd, m_header->size, VIRTUAL_FLAG_PRESENT | VIRTUAL_FLAG_READWRITE | VIRTUAL_FLAG_GLOBAL ); return INITRD_OK; }
ham_status_t blob_overwrite(ham_env_t *env, ham_db_t *db, ham_offset_t old_blobid, ham_record_t *record, ham_u32_t flags, ham_offset_t *new_blobid) { ham_status_t st; ham_size_t alloc_size; blob_t old_hdr; blob_t new_hdr; ham_page_t *page; /* * PARTIAL WRITE * * if offset+partial_size equals the full record size, then we won't * have any gaps. In this case we just write the full record and ignore * the partial parameters. */ if (flags&HAM_PARTIAL) { if (record->partial_offset==0 && record->partial_offset+record->partial_size==record->size) flags&=~HAM_PARTIAL; } /* * inmemory-databases: free the old blob, * allocate a new blob (but if both sizes are equal, just overwrite * the data) */ if (env_get_rt_flags(env)&HAM_IN_MEMORY_DB) { blob_t *nhdr, *phdr=(blob_t *)U64_TO_PTR(old_blobid); if (blob_get_size(phdr)==record->size) { ham_u8_t *p=(ham_u8_t *)phdr; if (flags&HAM_PARTIAL) { memmove(p+sizeof(blob_t)+record->partial_offset, record->data, record->partial_size); } else { memmove(p+sizeof(blob_t), record->data, record->size); } *new_blobid=(ham_offset_t)PTR_TO_U64(phdr); } else { st=blob_allocate(env, db, record, flags, new_blobid); if (st) return (st); nhdr=(blob_t *)U64_TO_PTR(*new_blobid); blob_set_flags(nhdr, blob_get_flags(phdr)); allocator_free(env_get_allocator(env), phdr); } return (HAM_SUCCESS); } ham_assert(old_blobid%DB_CHUNKSIZE==0, (0)); /* * blobs are CHUNKSIZE-allocated */ alloc_size=sizeof(blob_t)+record->size; alloc_size += DB_CHUNKSIZE - 1; alloc_size -= alloc_size % DB_CHUNKSIZE; /* * first, read the blob header; if the new blob fits into the * old blob, we overwrite the old blob (and add the remaining * space to the freelist, if there is any) */ st=__read_chunk(env, 0, &page, old_blobid, (ham_u8_t *)&old_hdr, sizeof(old_hdr)); if (st) return (st); ham_assert(blob_get_alloc_size(&old_hdr)%DB_CHUNKSIZE==0, (0)); /* * sanity check */ ham_verify(blob_get_self(&old_hdr)==old_blobid, ("invalid blobid %llu != %llu", blob_get_self(&old_hdr), old_blobid)); if (blob_get_self(&old_hdr)!=old_blobid) return (HAM_BLOB_NOT_FOUND); /* * now compare the sizes; does the new data fit in the old allocated * space? */ if (alloc_size<=blob_get_alloc_size(&old_hdr)) { ham_u8_t *chunk_data[2]; ham_size_t chunk_size[2]; /* * setup the new blob header */ blob_set_self(&new_hdr, blob_get_self(&old_hdr)); blob_set_size(&new_hdr, record->size); blob_set_flags(&new_hdr, blob_get_flags(&old_hdr)); if (blob_get_alloc_size(&old_hdr)-alloc_size>SMALLEST_CHUNK_SIZE) blob_set_alloc_size(&new_hdr, alloc_size); else blob_set_alloc_size(&new_hdr, blob_get_alloc_size(&old_hdr)); /* * PARTIAL WRITE * * if we have a gap at the beginning, then we have to write the * blob header and the blob data in two steps; otherwise we can * write both immediately */ if ((flags&HAM_PARTIAL) && (record->partial_offset)) { chunk_data[0]=(ham_u8_t *)&new_hdr; chunk_size[0]=sizeof(new_hdr); st=__write_chunks(env, page, blob_get_self(&new_hdr), HAM_FALSE, HAM_FALSE, chunk_data, chunk_size, 1); if (st) return (st); chunk_data[0]=record->data; chunk_size[0]=record->partial_size; st=__write_chunks(env, page, blob_get_self(&new_hdr)+sizeof(new_hdr) +record->partial_offset, HAM_FALSE, HAM_FALSE, chunk_data, chunk_size, 1); if (st) return (st); } else { chunk_data[0]=(ham_u8_t *)&new_hdr; chunk_size[0]=sizeof(new_hdr); chunk_data[1]=record->data; chunk_size[1]=(flags&HAM_PARTIAL) ? record->partial_size : record->size; st=__write_chunks(env, page, blob_get_self(&new_hdr), HAM_FALSE, HAM_FALSE, chunk_data, chunk_size, 2); if (st) return (st); } /* * move remaining data to the freelist */ if (blob_get_alloc_size(&old_hdr)!=blob_get_alloc_size(&new_hdr)) { (void)freel_mark_free(env, db, blob_get_self(&new_hdr)+blob_get_alloc_size(&new_hdr), (ham_size_t)(blob_get_alloc_size(&old_hdr)- blob_get_alloc_size(&new_hdr)), HAM_FALSE); } /* * the old rid is the new rid */ *new_blobid=blob_get_self(&new_hdr); return (HAM_SUCCESS); } else { /* * when the new data is larger, allocate a fresh space for it * and discard the old; 'overwrite' has become (delete + insert) now. */ st=blob_allocate(env, db, record, flags, new_blobid); if (st) return (st); (void)freel_mark_free(env, db, old_blobid, (ham_size_t)blob_get_alloc_size(&old_hdr), HAM_FALSE); } return (HAM_SUCCESS); }
/** * Allocate space in storage for and write the content references by 'data' * (and length 'size') to storage. * * Conditions will apply whether the data is written through cache or direct * to device. * * The content is, of course, prefixed by a BLOB header. * * Partial writes are handled in this function. */ ham_status_t blob_allocate(ham_env_t *env, ham_db_t *db, ham_record_t *record, ham_u32_t flags, ham_offset_t *blobid) { ham_status_t st; ham_page_t *page=0; ham_offset_t addr; blob_t hdr; ham_u8_t *chunk_data[2]; ham_size_t alloc_size; ham_size_t chunk_size[2]; ham_device_t *device=env_get_device(env); ham_bool_t freshly_created = HAM_FALSE; *blobid=0; /* * PARTIAL WRITE * * if offset+partial_size equals the full record size, then we won't * have any gaps. In this case we just write the full record and ignore * the partial parameters. */ if (flags&HAM_PARTIAL) { if (record->partial_offset==0 && record->partial_offset+record->partial_size==record->size) flags&=~HAM_PARTIAL; } /* * in-memory-database: the blobid is actually a pointer to the memory * buffer, in which the blob (with the blob-header) is stored */ if (env_get_rt_flags(env)&HAM_IN_MEMORY_DB) { blob_t *hdr; ham_u8_t *p=(ham_u8_t *)allocator_alloc(env_get_allocator(env), record->size+sizeof(blob_t)); if (!p) { return HAM_OUT_OF_MEMORY; } /* initialize the header */ hdr=(blob_t *)p; memset(hdr, 0, sizeof(*hdr)); blob_set_self(hdr, (ham_offset_t)PTR_TO_U64(p)); blob_set_alloc_size(hdr, record->size+sizeof(blob_t)); blob_set_size(hdr, record->size); /* do we have gaps? if yes, fill them with zeroes */ if (flags&HAM_PARTIAL) { ham_u8_t *s=p+sizeof(blob_t); if (record->partial_offset) memset(s, 0, record->partial_offset); memcpy(s+record->partial_offset, record->data, record->partial_size); if (record->partial_offset+record->partial_size<record->size) memset(s+record->partial_offset+record->partial_size, 0, record->size-(record->partial_offset+record->partial_size)); } else { memcpy(p+sizeof(blob_t), record->data, record->size); } *blobid=(ham_offset_t)PTR_TO_U64(p); return (0); } memset(&hdr, 0, sizeof(hdr)); /* * blobs are CHUNKSIZE-allocated */ alloc_size=sizeof(blob_t)+record->size; alloc_size += DB_CHUNKSIZE - 1; alloc_size -= alloc_size % DB_CHUNKSIZE; /* * check if we have space in the freelist */ st = freel_alloc_area(&addr, env, db, alloc_size); if (!addr) { if (st) return st; /* * if the blob is small AND if logging is disabled: load the page * through the cache */ if (__blob_from_cache(env, alloc_size)) { st = db_alloc_page(&page, db, PAGE_TYPE_BLOB, PAGE_IGNORE_FREELIST); ham_assert(st ? page == NULL : 1, (0)); ham_assert(!st ? page != NULL : 1, (0)); if (st) return st; /* blob pages don't have a page header */ page_set_npers_flags(page, page_get_npers_flags(page)|PAGE_NPERS_NO_HEADER); addr=page_get_self(page); /* move the remaining space to the freelist */ (void)freel_mark_free(env, db, addr+alloc_size, env_get_pagesize(env)-alloc_size, HAM_FALSE); blob_set_alloc_size(&hdr, alloc_size); } else { /* * otherwise use direct IO to allocate the space */ ham_size_t aligned=alloc_size; aligned += env_get_pagesize(env) - 1; aligned -= aligned % env_get_pagesize(env); st=device->alloc(device, aligned, &addr); if (st) return (st); /* if aligned!=size, and the remaining chunk is large enough: * move it to the freelist */ { ham_size_t diff=aligned-alloc_size; if (diff > SMALLEST_CHUNK_SIZE) { (void)freel_mark_free(env, db, addr+alloc_size, diff, HAM_FALSE); blob_set_alloc_size(&hdr, aligned-diff); } else { blob_set_alloc_size(&hdr, aligned); } } freshly_created = HAM_TRUE; } ham_assert(HAM_SUCCESS == freel_check_area_is_allocated(env, db, addr, alloc_size), (0)); } else { ham_assert(!st, (0)); blob_set_alloc_size(&hdr, alloc_size); } blob_set_size(&hdr, record->size); blob_set_self(&hdr, addr); /* * PARTIAL WRITE * * are there gaps at the beginning? If yes, then we'll fill with zeros */ if ((flags&HAM_PARTIAL) && (record->partial_offset)) { ham_u8_t *ptr; ham_size_t gapsize=record->partial_offset; ptr=allocator_calloc(env_get_allocator(env), gapsize > env_get_pagesize(env) ? env_get_pagesize(env) : gapsize); if (!ptr) return (HAM_OUT_OF_MEMORY); /* * first: write the header */ chunk_data[0]=(ham_u8_t *)&hdr; chunk_size[0]=sizeof(hdr); st=__write_chunks(env, page, addr, HAM_TRUE, freshly_created, chunk_data, chunk_size, 1); if (st) return (st); addr+=sizeof(hdr); /* now fill the gap; if the gap is bigger than a pagesize we'll * split the gap into smaller chunks */ while (gapsize>=env_get_pagesize(env)) { chunk_data[0]=ptr; chunk_size[0]=env_get_pagesize(env); st=__write_chunks(env, page, addr, HAM_TRUE, freshly_created, chunk_data, chunk_size, 1); if (st) break; gapsize-=env_get_pagesize(env); addr+=env_get_pagesize(env); } /* fill the remaining gap */ if (gapsize) { chunk_data[0]=ptr; chunk_size[0]=gapsize; st=__write_chunks(env, page, addr, HAM_TRUE, freshly_created, chunk_data, chunk_size, 1); if (st) return (st); addr+=gapsize; } allocator_free(env_get_allocator(env), ptr); /* now write the "real" data */ chunk_data[0]=(ham_u8_t *)record->data; chunk_size[0]=record->partial_size; st=__write_chunks(env, page, addr, HAM_TRUE, freshly_created, chunk_data, chunk_size, 1); if (st) return (st); addr+=record->partial_size; } else { /* * not writing partially: write header and data, then we're done */ chunk_data[0]=(ham_u8_t *)&hdr; chunk_size[0]=sizeof(hdr); chunk_data[1]=(ham_u8_t *)record->data; chunk_size[1]=(flags&HAM_PARTIAL) ? record->partial_size : record->size; st=__write_chunks(env, page, addr, HAM_TRUE, freshly_created, chunk_data, chunk_size, 2); if (st) return (st); addr+=sizeof(hdr)+ ((flags&HAM_PARTIAL) ? record->partial_size : record->size); } /* * store the blobid; it will be returned to the caller */ *blobid=blob_get_self(&hdr); /* * PARTIAL WRITES: * * if we have gaps at the end of the blob: just append more chunks to * fill these gaps. Since they can be pretty large we split them into * smaller chunks if necessary. */ if (flags&HAM_PARTIAL) { if (record->partial_offset+record->partial_size < record->size) { ham_u8_t *ptr; ham_size_t gapsize=record->size - (record->partial_offset+record->partial_size); /* now fill the gap; if the gap is bigger than a pagesize we'll * split the gap into smaller chunks * * we split this loop in two - the outer loop will allocate the * memory buffer, thus saving some allocations */ while (gapsize>env_get_pagesize(env)) { ham_u8_t *ptr=allocator_calloc(env_get_allocator(env), env_get_pagesize(env)); if (!ptr) return (HAM_OUT_OF_MEMORY); while (gapsize>env_get_pagesize(env)) { chunk_data[0]=ptr; chunk_size[0]=env_get_pagesize(env); st=__write_chunks(env, page, addr, HAM_TRUE, freshly_created, chunk_data, chunk_size, 1); if (st) break; gapsize-=env_get_pagesize(env); addr+=env_get_pagesize(env); } allocator_free(env_get_allocator(env), ptr); if (st) return (st); } /* now write the remainder, which is less than a pagesize */ ham_assert(gapsize<env_get_pagesize(env), ("")); chunk_size[0]=gapsize; ptr=chunk_data[0]=allocator_calloc(env_get_allocator(env), gapsize); if (!ptr) return (HAM_OUT_OF_MEMORY); st=__write_chunks(env, page, addr, HAM_TRUE, freshly_created, chunk_data, chunk_size, 1); allocator_free(env_get_allocator(env), ptr); if (st) return (st); } } return (0); }
forensic1394_result platform_send_requests(forensic1394_dev *dev, request_type t, const forensic1394_req *req, size_t nreq) { int i = 0; int in_pipeline = 0; struct pollfd fdp = { .fd = dev->pdev->fd, .events = POLLIN }; // Keep going until all requests have been sent and all responses received while (i < nreq || in_pipeline > 0) { // Ensure the request pipeline is full while (in_pipeline < REQUEST_PIPELINE_SZ && i < nreq) { struct fw_cdev_send_request request; // Fill out the common request structure request.tcode = request_tcode(&req[i], t); request.length = req[i].len; request.offset = req[i].addr; request.data = (t == REQUEST_TYPE_WRITE) ? PTR_TO_U64(req[i].buf) : 0; request.closure = i; request.generation = dev->generation; // Make the request if (ioctl(dev->pdev->fd, FW_CDEV_IOC_SEND_REQUEST, &request) == -1) { // EIO errors are usually because of bad request sizes return (errno == EIO) ? FORENSIC1394_RESULT_IO_SIZE : FORENSIC1394_RESULT_IO_ERROR; } i++; in_pipeline++; } // Wait for a response poll(&fdp, 1, FORENSIC1394_TIMEOUT_MS); // If we got a response (and not a timeout) if (fdp.revents == POLLIN) { char buffer[16 * 1024]; ssize_t response_len; union fw_cdev_event *event = (void *) buffer; // Read an event from the device; blocking if need be response_len = read(dev->pdev->fd, buffer, 16*1024); if (response_len != -1) switch (event->common.type) { // We have a response to our request (input or output) case FW_CDEV_EVENT_RESPONSE: { // Check the response code switch (event->response.rcode) { // Request was okay; continue processing case RCODE_COMPLETE: break; case RCODE_BUSY: return FORENSIC1394_RESULT_BUSY; break; // Different generations are a consequence of bus resets case RCODE_GENERATION: return FORENSIC1394_RESULT_BUS_RESET; break; default: return FORENSIC1394_RESULT_IO_ERROR; break; } // If we are expecting some data if (t == REQUEST_TYPE_READ) { // Check the lengths match (they should!) if (event->response.length == req[event->common.closure].len) { memcpy(req[event->common.closure].buf, event->response.data, event->response.length); } else { return FORENSIC1394_RESULT_IO_ERROR; } } in_pipeline--; break; } // Ignore everything else default: break; } // Problem reading the response back from the device else { return FORENSIC1394_RESULT_IO_ERROR; } } // Poll timed out else { return FORENSIC1394_RESULT_IO_TIMEOUT; } } return FORENSIC1394_RESULT_SUCCESS; }
static ham_status_t _remote_fun_get_parameters(ham_db_t *db, ham_parameter_t *param) { static char filename[1024]; ham_status_t st; ham_env_t *env=db_get_env(db); proto_wrapper_t *request, *reply; ham_size_t i, num_names=0; ham_u32_t *names; ham_parameter_t *p; /* count number of parameters */ p=param; if (p) { for (; p->name; p++) { num_names++; } } /* allocate a memory and copy the parameter names */ names=(ham_u32_t *)allocator_alloc(env_get_allocator(env), num_names*sizeof(ham_u32_t)); if (!names) return (HAM_OUT_OF_MEMORY); p=param; if (p) { for (i=0; p->name; p++) { names[i]=p->name; i++; } } request=proto_init_db_get_parameters_request(db_get_remote_handle(db), names, num_names); st=_perform_request(env, env_get_curl(env), request, &reply); proto_delete(request); allocator_free(env_get_allocator(env), names); if (st) { if (reply) proto_delete(reply); return (st); } ham_assert(reply!=0, ("")); ham_assert(proto_has_db_get_parameters_reply(reply), ("")); st=proto_db_get_parameters_reply_get_status(reply); if (st) { proto_delete(reply); return (st); } p=param; while (p && p->name) { switch (p->name) { case HAM_PARAM_CACHESIZE: ham_assert(proto_db_get_parameters_reply_has_cachesize(reply), ("")); p->value=proto_db_get_parameters_reply_get_cachesize(reply); break; case HAM_PARAM_PAGESIZE: ham_assert(proto_db_get_parameters_reply_has_pagesize(reply), ("")); p->value=proto_db_get_parameters_reply_get_pagesize(reply); break; case HAM_PARAM_MAX_ENV_DATABASES: ham_assert(proto_db_get_parameters_reply_has_max_env_databases(reply), ("")); p->value=proto_db_get_parameters_reply_get_max_env_databases(reply); break; case HAM_PARAM_GET_FLAGS: ham_assert(proto_db_get_parameters_reply_has_flags(reply), ("")); p->value=proto_db_get_parameters_reply_get_flags(reply); break; case HAM_PARAM_GET_FILEMODE: ham_assert(proto_db_get_parameters_reply_has_filemode(reply), ("")); p->value=proto_db_get_parameters_reply_get_filemode(reply); break; case HAM_PARAM_GET_FILENAME: ham_assert(proto_db_get_parameters_reply_has_filename(reply), ("")); strncpy(filename, proto_db_get_parameters_reply_get_filename(reply), sizeof(filename)); p->value=PTR_TO_U64(&filename[0]); break; case HAM_PARAM_KEYSIZE: ham_assert(proto_db_get_parameters_reply_has_keysize(reply), ("")); p->value=proto_db_get_parameters_reply_get_keysize(reply); break; case HAM_PARAM_GET_DATABASE_NAME: ham_assert(proto_db_get_parameters_reply_has_dbname(reply), ("")); p->value=proto_db_get_parameters_reply_get_dbname(reply); break; case HAM_PARAM_GET_KEYS_PER_PAGE: ham_assert(proto_db_get_parameters_reply_has_keys_per_page(reply), ("")); p->value=proto_db_get_parameters_reply_get_keys_per_page(reply); break; case HAM_PARAM_GET_DATA_ACCESS_MODE: ham_assert(proto_db_get_parameters_reply_has_dam(reply), ("")); p->value=proto_db_get_parameters_reply_get_dam(reply); break; default: ham_trace(("unknown parameter %d", (int)p->name)); break; } p++; } proto_delete(reply); return (st); }