/** * Copies data from this buffer to the "dst" address. The buffer is * shrunk if possible. If the "dst" address is NULL, then the message * is dequeued but is not copied. */ void circular_buffer::dequeue(void *dst) { assert(unit_sz > 0); assert(_unread >= unit_sz); assert(_unread <= _buffer_sz); assert(_buffer); KLOG(kernel, mem, "circular_buffer dequeue " "unread: %d, next: %d, buffer_sz: %d, unit_sz: %d", _unread, _next, _buffer_sz, unit_sz); assert(_next + unit_sz <= _buffer_sz); if (dst != NULL) { memcpy(dst, &_buffer[_next], unit_sz); } KLOG(kernel, mem, "shifted data from index %d", _next); _unread -= unit_sz; _next += unit_sz; if (_next == _buffer_sz) { _next = 0; } // Shrink if possible. if (_buffer_sz > initial_size() && _unread <= _buffer_sz / 4) { shrink(); } }
/** * Copies the data at the "src" address into this buffer. The buffer is * grown if it isn't large enough. */ void circular_buffer::enqueue(void *src) { assert(src); assert(_unread <= _buffer_sz); assert(_buffer); // Grow if necessary. if (_unread == _buffer_sz) { grow(); } KLOG(kernel, mem, "circular_buffer enqueue " "unread: %d, next: %d, buffer_sz: %d, unit_sz: %d", _unread, _next, _buffer_sz, unit_sz); assert(_unread < _buffer_sz); assert(_unread + unit_sz <= _buffer_sz); // Copy data size_t dst_idx = _next + _unread; assert(dst_idx >= _buffer_sz || dst_idx + unit_sz <= _buffer_sz); if (dst_idx >= _buffer_sz) { dst_idx -= _buffer_sz; assert(_next >= unit_sz); assert(dst_idx <= _next - unit_sz); } assert(dst_idx + unit_sz <= _buffer_sz); memcpy(&_buffer[dst_idx], src, unit_sz); _unread += unit_sz; KLOG(kernel, mem, "circular_buffer pushed data at index: %d", dst_idx); }
static struct nkfs_btree_node *nkfs_btree_node_alloc(int zero_pages) { struct nkfs_btree_node *node; int i; node = kmem_cache_alloc(nkfs_btree_node_cachep, GFP_NOIO); if (!node) { KLOG(KL_ERR, "no memory"); return NULL; } memset(node, 0, sizeof(*node)); node->header = alloc_page(GFP_KERNEL); if (!node->header) goto fail; for (i = 0; i < ARRAY_SIZE(node->keys); i++) { node->keys[i] = alloc_page(GFP_KERNEL); if (!node->keys[i]) goto fail; } for (i = 0; i < ARRAY_SIZE(node->childs); i++) { node->childs[i] = alloc_page(GFP_KERNEL); if (!node->childs[i]) goto fail; } for (i = 0; i < ARRAY_SIZE(node->values); i++) { node->values[i] = alloc_page(GFP_KERNEL); if (!node->values[i]) goto fail; } if (zero_pages) nkfs_btree_node_zero_pages(node); node->t = NKFS_BTREE_T; node->sig1 = NKFS_BTREE_SIG1; node->sig2 = NKFS_BTREE_SIG2; atomic_set(&node->ref, 1); KLOG(KL_DBG1, "node %p", node); return node; fail: __nkfs_btree_node_free(node); return NULL; }
void rust_port::send(void *sptr) { bool did_rendezvous = false; { scoped_lock with(lock); buffer.enqueue(sptr); assert(!buffer.is_empty() && "rust_chan::transmit with nothing to send."); { scoped_lock with(task->lifecycle_lock); if (task->blocked_on(this)) { KLOG(kernel, comm, "dequeued in rendezvous_ptr"); buffer.dequeue(task->rendezvous_ptr); task->rendezvous_ptr = 0; task->wakeup_inner(this); did_rendezvous = true; } } } if (!did_rendezvous) { // If the task wasn't waiting specifically on this port, // it may be waiting on a group of ports rust_port_selector *port_selector = task->get_port_selector(); // The port selector will check if the task is blocked, not us. port_selector->msg_sent_on(this); } }
void InitProc (void) { KLOG ("InitProc() ***"); InitProcessTables(); InitProcesses(); }
static void __nkfs_btree_node_free(struct nkfs_btree_node *node) { int i; KLOG(KL_DBG1, "node %p leaf %d nr_keys %d", node, node->leaf, node->nr_keys); for (i = 0; i < ARRAY_SIZE(node->keys); i++) { if (node->keys[i]) put_page(node->keys[i]); } for (i = 0; i < ARRAY_SIZE(node->childs); i++) { if (node->childs[i]) put_page(node->childs[i]); } for (i = 0; i < ARRAY_SIZE(node->values); i++) { if (node->values[i]) put_page(node->values[i]); } if (node->header) put_page(node->header); kmem_cache_free(nkfs_btree_node_cachep, node); }
circular_buffer::~circular_buffer() { KLOG(kernel, mem, "~circular_buffer 0x%" PRIxPTR, this); assert(_buffer); assert(_unread == 0 && "didn't expect bytes in the circular buffer"); kernel->free(_buffer); }
void rust_scheduler::create_task_threads() { KLOG(kernel, kern, "Using %d scheduler threads.", num_threads); for(size_t i = 0; i < num_threads; ++i) { threads.push(create_task_thread(i)); } }
void rust_scheduler::destroy_task_thread(rust_task_thread *thread) { KLOG(kernel, kern, "deleting task thread: " PTR ", name: %s, index: %d", thread, thread->name, thread->list_index); rust_srv *srv = thread->srv; delete thread; delete srv; }
static void __nkfs_btree_node_release(struct nkfs_btree_node *node) { KLOG(KL_DBG1, "node %p leaf %d nr_keys %d", node, node->leaf, node->nr_keys); nkfs_btree_nodes_remove(node->tree, node); __nkfs_btree_node_free(node); }
rust_sched_launcher * rust_scheduler::create_task_thread(rust_sched_launcher_factory *launchfac, int id, bool killed) { rust_sched_launcher *thread = launchfac->create(this, id, killed); KLOG(kernel, kern, "created task thread: " PTR ", id: %d", thread, id); return thread; }
static void __exit crt_exit(void) { KLOG(KL_INF, "exiting"); destroy_workqueue(crt_wq); nk8_release(); crt_random_release(); klog_release(); printk("nkfs_crt: exited\n"); }
static void nkfs_btree_node_delete(struct nkfs_btree_node *node) { KLOG(KL_DBG1, "node %p leaf %d nr_keys %d block %llu", node, node->leaf, node->nr_keys, node->block); nkfs_btree_nodes_remove(node->tree, node); nkfs_balloc_block_free(node->tree->sb, node->block); node->block = 0; }
int crt_queue_work(work_func_t func) { struct work_struct *work = NULL; work = kzalloc(sizeof(struct work_struct), GFP_ATOMIC); if (!work) { KLOG(KL_ERR, "cant alloc work"); return -ENOMEM; } INIT_WORK(work, func); if (!queue_work(crt_wq, work)) { kfree(work); KLOG(KL_ERR, "cant queue work"); return -ENOMEM; } return 0; }
rust_task_thread * rust_scheduler::create_task_thread(int id) { rust_srv *srv = this->srv->clone(); rust_task_thread *thread = new (kernel, "rust_task_thread") rust_task_thread(this, srv, id); KLOG(kernel, kern, "created task thread: " PTR ", id: %d, index: %d", thread, id, thread->list_index); return thread; }
void rust_scheduler::create_task_threads(rust_sched_launcher_factory *launchfac, bool killed) { KLOG(kernel, kern, "Using %d scheduler threads.", num_threads); for(size_t i = 0; i < num_threads; ++i) { threads.push(create_task_thread(launchfac, i, killed)); } }
struct nkfs_btree *nkfs_btree_create(struct nkfs_sb *sb, u64 root_block) { struct nkfs_btree *tree; int err; tree = kmem_cache_alloc(nkfs_btree_cachep, GFP_NOIO); if (!tree) { KLOG(KL_ERR, "no memory"); return NULL; } memset(tree, 0, sizeof(*tree)); atomic_set(&tree->ref, 1); init_rwsem(&tree->rw_lock); rwlock_init(&tree->nodes_lock); tree->nodes = RB_ROOT; tree->sb = sb; tree->sig1 = NKFS_BTREE_SIG1; if (root_block) tree->root = nkfs_btree_node_read(tree, root_block); else tree->root = nkfs_btree_node_create(tree); if (!tree->root) goto fail; if (!root_block) { tree->root->leaf = 1; err = nkfs_btree_node_write(tree->root); if (err) goto fail; } KLOG(KL_DBG1, "tree %p created root %p ref=%d", tree, tree->root, atomic_read(&tree->root->ref)); return tree; fail: nkfs_btree_deref(tree); return NULL; }
int nkfs_btree_node_write(struct nkfs_btree_node *node) { struct dio_cluster *clu; struct nkfs_btree_header_page *header; int err; NKFS_BUG_ON(node->sig1 != NKFS_BTREE_SIG1 || node->sig2 != NKFS_BTREE_SIG2); NKFS_BUG_ON(!node->tree); NKFS_BUG_ON(sizeof(struct nkfs_btree_node_disk) > node->tree->sb->bsize); NKFS_BUG_ON(node->block == 0 || node->block >= node->tree->sb->nr_blocks); KLOG(KL_DBG1, "node %p block %llu", node, node->block); clu = dio_clu_get(node->tree->sb->ddev, node->block); if (!clu) { KLOG(KL_ERR, "cant get clu for block %llu", node->block); return -EIO; } header = page_address(node->header); header->sig1 = node->sig1; header->sig2 = node->sig2; header->leaf = node->leaf; header->nr_keys = node->nr_keys; nkfs_btree_node_calc_sum(node, nkfs_btree_node_map_sum(node), 1); KLOG(KL_DBG3, "node block %llu nr_keys %d", node->block, node->nr_keys); nkfs_btree_node_to_ondisk(node, clu); KLOG_NKFS_BTREE_KEY(KL_DBG3, dio_clu_map(clu, PAGE_SIZE)); KLOG_NKFS_BTREE_KEY(KL_DBG3, page_address(node->keys[0])); dio_clu_set_dirty(clu); err = dio_clu_sync(clu); if (err) { KLOG(KL_ERR, "sync err %d", err); } dio_clu_put(clu); return err; }
static int __init crt_init(void) { int err = -EINVAL; printk("nkfs_crt: initing\n"); err = klog_init(); if (err) goto out; err = crt_random_init(); if (err) { goto rel_klog; } crt_wq = alloc_workqueue("crt_wq", WQ_MEM_RECLAIM|WQ_UNBOUND, 1); if (!crt_wq) { KLOG(KL_ERR, "cant create wq"); err = -ENOMEM; goto rel_rnd; } KLOG(KL_INF, "nk8 initing"); err = nk8_init(); if (err) { KLOG(KL_ERR, "nk8 init err %d", err); goto del_wq; } KLOG(KL_INF, "inited"); return 0; del_wq: destroy_workqueue(crt_wq); rel_rnd: crt_random_release(); rel_klog: klog_release(); out: return err; }
static void nkfs_btree_node_calc_sum(struct nkfs_btree_node *node, struct csum *sum, int write) { struct csum_ctx ctx; struct nkfs_btree_header_page *header; int i; header = page_address(node->header); KLOG(KL_DBG3, "node %llu leaf %d nr_keys %d sig1 %x sig2 %x", node->block, header->leaf, header->nr_keys, header->sig1, header->sig2); csum_reset(&ctx); KLOG_BUF_SUM(KL_DBG3, page_address(node->header), offsetof(struct nkfs_btree_header_page, sum)); csum_update(&ctx, page_address(node->header), offsetof(struct nkfs_btree_header_page, sum)); for (i = 0; i < ARRAY_SIZE(node->keys); i++) { csum_update(&ctx, page_address(node->keys[i]), PAGE_SIZE); KLOG_BUF_SUM(KL_DBG3, page_address(node->keys[i]), PAGE_SIZE); } for (i = 0; i < ARRAY_SIZE(node->childs); i++) { csum_update(&ctx, page_address(node->childs[i]), PAGE_SIZE); KLOG_BUF_SUM(KL_DBG3, page_address(node->childs[i]), PAGE_SIZE); } for (i = 0; i < ARRAY_SIZE(node->values); i++) { csum_update(&ctx, page_address(node->values[i]), PAGE_SIZE); KLOG_BUF_SUM(KL_DBG3, page_address(node->values[i]), PAGE_SIZE); } csum_digest(&ctx, sum); KLOG(KL_DBG3, "node block %llu sum %llx write %d nr_keys %d leaf %d", node->block, csum_u64(sum), write, node->nr_keys, node->leaf); }
// Appelé au chargement du module static int __init rocketIO_init_module(void) { int res, i; struct net_device *dev; DLOG("Start load module"); hw_addr0 = kmalloc(7, GFP_KERNEL); hw_addr1 = kmalloc(7, GFP_KERNEL); memcpy(hw_addr0, "\0ROCK0\0", 7); memcpy(hw_addr1, "\0ROCK1\0", 7); for (i = 0; i < loopback + 1; i++) { dev = alloc_netdev( sizeof(struct rio_priv), "rio%d", rocketIO_init); if (i) rio_dev1 = dev; else rio_dev0 = dev; if ((res = register_netdev(dev))) KLOG("error %i registering device rio%d", res, i); if (!dev) { KLOG("Device not found"); rocketIO_cleanup(); return -ENODEV; } DLOG("Device RocketIO found (rio%d)", i); } if (loopback == 0) rio_dev0->base_addr = RIO_BASE_ADDR_0; DLOG("Module loaded"); return 0; }
void circular_buffer::grow() { size_t new_buffer_sz = _buffer_sz * 2; KLOG(kernel, mem, "circular_buffer is growing to %d bytes", new_buffer_sz); void *new_buffer = kernel->malloc(new_buffer_sz, "new circular_buffer (grow)"); transfer(new_buffer); kernel->free(_buffer); _buffer = (uint8_t *)new_buffer; _next = 0; _buffer_sz = new_buffer_sz; }
static struct nkfs_btree_node *nkfs_btree_node_create(struct nkfs_btree *tree) { struct nkfs_btree_node *node, *inserted; int err; node = nkfs_btree_node_alloc(1); if (!node) return NULL; err = nkfs_balloc_block_alloc(tree->sb, &node->block); if (err) { KLOG(KL_ERR, "cant alloc block, err=%d", err); __nkfs_btree_node_free(node); return NULL; } node->tree = tree; err = nkfs_btree_node_write(node); if (err) { KLOG(KL_ERR, "cant write node at %llu, err=%d", node->block, err); nkfs_btree_node_delete(node); __nkfs_btree_node_free(node); return NULL; } inserted = nkfs_btree_nodes_insert(tree, node); if (inserted != node) { nkfs_btree_node_delete(node); __nkfs_btree_node_free(node); node = inserted; KLOG(KL_DBG1, "node %p found block %llu", node, node->block); } else { NKFS_BTREE_NODE_DEREF(inserted); KLOG(KL_DBG1, "node %p created block %llu", node, node->block); } return node; }
void circular_buffer::shrink() { size_t new_buffer_sz = _buffer_sz / 2; assert(initial_size() <= new_buffer_sz); KLOG(kernel, mem, "circular_buffer is shrinking to %d bytes", new_buffer_sz); void *new_buffer = kernel->malloc(new_buffer_sz, "new circular_buffer (shrink)"); transfer(new_buffer); kernel->free(_buffer); _buffer = (uint8_t *)new_buffer; _next = 0; _buffer_sz = new_buffer_sz; }
void kul::https::Server::loop() throw(kul::tcp::Exception){ KUL_DBG_FUNC_ENTER int32_t newsockfd = accept(sockfd, (struct sockaddr *) &cli_addr, &clilen); if(newsockfd < 0) KEXCEPTION("HTTPS Server error on accept"); ssl = SSL_new(ctx); SSL_set_fd(ssl, newsockfd); //Here is the SSL Accept portion. Now all reads and writes must use SSL int16_t ssl_err = SSL_accept(ssl); if(ssl_err <= 0){ short se = 0; SSL_get_error(ssl, se); KERR << "HTTPS Server SSL ERROR on SSL_ACCEPT error: " << se; close(newsockfd); return; } KLOG(DBG) << "SSL_get_cipher: " << SSL_get_cipher(ssl); cc = SSL_get_peer_certificate (ssl); if(cc != NULL) { KLOG(DBG) << "Client certificate:"; KLOG(DBG) << "\t subject: " << X509_NAME_oneline (X509_get_subject_name (cc), 0, 0); KLOG(DBG) << "\t issuer: %s\n" << X509_NAME_oneline (X509_get_issuer_name (cc), 0, 0); X509_free(cc); }else KLOG(ERR) << "Client does not have certificate."; KOUT(DBG) << "New connection , socket fd is " << newsockfd << ", is : " << inet_ntoa(cli_addr.sin_addr) << ", port : "<< ntohs(cli_addr.sin_port); onConnect(inet_ntoa(cli_addr.sin_addr), ntohs(cli_addr.sin_port)); int16_t e; char buffer[_KUL_HTTPS_READ_BUFFER_]; std::stringstream cnt; do{ bzero(buffer,_KUL_HTTPS_READ_BUFFER_); e = SSL_read(ssl, buffer, _KUL_HTTPS_READ_BUFFER_ - 1); if(e) cnt << buffer; }while(e == (_KUL_HTTPS_READ_BUFFER_ - 1)); if (e < 0){ short se = 0; SSL_get_error(ssl, se); if(se) KLOG(ERR) << "SSL_get_error: " << se; e = -1; }else try{ std::string res; std::shared_ptr<kul::http::ARequest> req = handleRequest(cnt.str(), res); const kul::http::AResponse& rs(respond(*req.get())); std::string ret(rs.toString()); e = SSL_write(ssl, ret.c_str(), ret.length()); }catch(const kul::http::Exception& e1){ KERR << e1.what(); e = -1; } close(newsockfd); KOUT(DBG) << "Disconnect , socket fd is " << newsockfd << ", is : " << inet_ntoa(cli_addr.sin_addr) << ", port : "<< ntohs(cli_addr.sin_port); onDisconnect(inet_ntoa(cli_addr.sin_addr), ntohs(cli_addr.sin_port)); }
static void nkfs_btree_release(struct nkfs_btree *tree) { nkfs_btree_stop(tree); if (tree->root) NKFS_BTREE_NODE_DEREF(tree->root); KLOG(KL_DBG1, "tree %p nodes_active %d root %p", tree, tree->nodes_active, rb_entry(tree->nodes.rb_node, struct nkfs_btree_node, nodes_link)); NKFS_BUG_ON(tree->nodes_active); kmem_cache_free(nkfs_btree_cachep, tree); KLOG(KL_DBG1, "tree %p deleted", tree); }
circular_buffer::circular_buffer(rust_kernel *kernel, size_t unit_sz) : kernel(kernel), unit_sz(unit_sz), _buffer_sz(initial_size()), _next(0), _unread(0), _buffer((uint8_t *)kernel->malloc(_buffer_sz, "circular_buffer")) { assert(unit_sz && "Unit size must be larger than zero."); KLOG(kernel, mem, "new circular_buffer(buffer_sz=%d, unread=%d)" "-> circular_buffer=0x%" PRIxPTR, _buffer_sz, _unread, this); assert(_buffer && "Failed to allocate buffer."); }
NTSTATUS DriverIrpHandler(IN PDEVICE_OBJECT DeviceObject, IN PIRP Irp) { NTSTATUS Status; BOOLEAN bHandled = FALSE; PIO_STACK_LOCATION currentIrpStack = IoGetCurrentIrpStackLocation(Irp); KLOG(LInfo, "DevObj %p Major %x Minor %x", DeviceObject, currentIrpStack->MajorFunction, currentIrpStack->MinorFunction); Status = KbdDispatchGeneral(&MonitorGetInstance()->Kbd, DeviceObject, Irp, &bHandled); if (bHandled) return Status; if (currentIrpStack->MajorFunction == IRP_MJ_DEVICE_CONTROL) { return DriverDeviceControlHandler(DeviceObject, Irp); } else { return CompleteIrp(Irp, STATUS_SUCCESS, 0); } }
Local<Value> ScriptEngine::ExecuteScript(const char * scripts) { Isolate::Scope isolate_scope(getIsoloate()); HandleScope handle_scope(getIsoloate()); Local<Context> context = Local<Context>::New(getIsoloate(), context_); Context::Scope context_scope(context); Local<Value> result; TryCatch try_catch(getIsoloate()); Local<Script> compiled_script; if (!Script::Compile(context, String::NewFromUtf8(getIsoloate(), scripts)).ToLocal(&compiled_script)) { String::Utf8Value error(try_catch.Exception()); return result; } // Run the script! auto val = compiled_script->Run(context); if (!val.ToLocal(&result)) { String::Utf8Value error(try_catch.Exception()); return result; } String::Utf8Value ret(result); KLOG(Info, "V8Script", "---- ExecuteScript %s", *ret); return result; }
void kul::http::Server::start() throw(kul::http::Exception){ ULONG RequestBufferLength = sizeof(HTTP_REQUEST) + 2048; PCHAR pRequestBuffer = (PCHAR) wAlloc(RequestBufferLength); if(pRequestBuffer == NULL) KEXCEPT(Exception, "Buffer allocation failed: " + std::to_string(ERROR_NOT_ENOUGH_MEMORY)); PHTTP_REQUEST req = (PHTTP_REQUEST) pRequestBuffer; HTTP_REQUEST_ID requestId; HTTP_SET_NULL_ID(&requestId); DWORD bytesRead; ULONG r = 0; while(1){ RtlZeroMemory(req, RequestBufferLength); r = HttpReceiveHttpRequest(this->q, requestId, 0, req, RequestBufferLength, &bytesRead, NULL); if(r == NO_ERROR){ if(req->Verb == HttpVerbGET){ get(req); }else if(req->Verb == HttpVerbPOST){ post(req); }else if(req->Verb == HttpVerbHEAD){ // head(req); }else KLOG(INF) << "Unrecognised http method type"; }else if(r == ERROR_MORE_DATA){ requestId = req->RequestId; RequestBufferLength = bytesRead; wFreeM(pRequestBuffer); pRequestBuffer = (PCHAR) wAlloc(RequestBufferLength); }else if(r == ERROR_CONNECTION_INVALID && !HTTP_IS_NULL_ID(&requestId)){ HTTP_SET_NULL_ID(&requestId); }else KEXCEPT(Exception, "HttpReceiveHttpRequest failed: " + std::to_string(r)); } }