void resize_executable_memory(memory_block_data * self , intptr_t new_size , char ** inout_begin , char ** inout_end ) { executable_memory_block* emb = static_cast<executable_memory_block*>(self); void* current_chunk = emb->m_allocated_chunks.back(); void* old_begin = static_cast<void*>(*inout_begin); void* old_end = static_cast<void*>(*inout_end); assert(old_end == emb->m_pivot); void* new_begin = old_begin; void* new_end = ptr_offset(old_begin, new_size); if (new_end >= ptr_offset(current_chunk, emb->m_chunk_size)) { emb->add_chunk(); new_begin = emb->m_allocated_chunks.back(); new_end = ptr_offset(new_begin, new_size); size_t old_size = static_cast<uint8_t*>(old_end) - static_cast<uint8_t*>(old_begin); memcpy(new_begin, old_begin, old_size); *inout_begin = static_cast<char*>(new_begin); } emb->m_pivot = new_end; *inout_end = static_cast<char*>(new_end); }
static int request_header_value(http_parser *parser, const char *value, const size_t len) { octo_http_request *request = ptr_offset(parser, octo_http_request, parser); request->parser_state = PARSER_VALUE; return 0; }
static int request_header_field(http_parser *parser, const char *field, const size_t len) { octo_http_request *request = ptr_offset(parser, octo_http_request, parser); request->parser_state = PARSER_FIELD; return 0; }
static int request_fragment(http_parser *parser, const char *fragment, const size_t len) { octo_http_request *request = ptr_offset(parser, octo_http_request, parser); request->parser_state = PARSER_FRAGMENT; return 0; }
static int request_url(http_parser *parser, const char *url, const size_t len) { octo_http_request *request = ptr_offset(parser, octo_http_request, parser); request->parser_state = PARSER_URL; return 0; }
static int request_query(http_parser *parser, const char *query, const size_t len) { octo_http_request *request = ptr_offset(parser, octo_http_request, parser); request->parser_state = PARSER_QUERY; return 0; }
static int request_headers_complete(http_parser *parser) { octo_http_request *request = ptr_offset(parser, octo_http_request, parser); request->parser_state = PARSER_HEADERS_COMPLETE; return 0; }
void GlobObj::dumpSql(Sql *db, ostream &of) { // First define all functions for (const_fmap_iterator_type i = fbegin(); i != fend(); i++) { Call *fun = i->second; Tokid t = fun->get_site(); of << "INSERT INTO FUNCTIONS VALUES(" << ptr_offset(fun) << ", '" << fun->name << "', " << db->boolval(fun->is_macro()) << ',' << db->boolval(fun->is_defined()) << ',' << db->boolval(fun->is_declared()) << ',' << db->boolval(fun->is_file_scoped()) << ',' << t.get_fileid().get_id() << ',' << (unsigned)(t.get_streampos()) << ',' << fun->get_num_caller(); of << ");\n"; if (fun->is_defined()) { of << "INSERT INTO FUNCTIONMETRICS VALUES(" << ptr_offset(fun); for (int j = 0; j < FunMetrics::metric_max; j++) if (!Metrics::is_internal<FunMetrics>(j)) cout << ',' << fun->metrics().get_metric(j); of << ',' << fun->get_begin().get_tokid().get_fileid().get_id() << ',' << (unsigned)(fun->get_begin().get_tokid().get_streampos()) << ',' << fun->get_end().get_tokid().get_fileid().get_id() << ',' << (unsigned)(fun->get_end().get_tokid().get_streampos()); of << ");\n"; } int start = 0, ord = 0; for (dequeTpart::const_iterator j = fun->get_token().get_parts_begin(); j != fun->get_token().get_parts_end(); j++) { Tokid t2 = j->get_tokid(); int len = j->get_len() - start; int pos = 0; while (pos < len) { Eclass *ec = t2.get_ec(); of << "INSERT INTO FUNCTIONID VALUES(" << ptr_offset(fun) << ',' << ord << ',' << ptr_offset(ec) << ");\n"; pos += ec->get_len(); t2 += ec->get_len(); ord++; } start += j->get_len(); } } // Then their calls to satisfy integrity constraints for (const_fmap_iterator_type i = fbegin(); i != fend(); i++) { Call *fun = i->second; for (Call::const_fiterator_type dest = fun->call_begin(); dest != fun->call_end(); dest++) of << "INSERT INTO FCALLS VALUES(" << ptr_offset(fun) << ',' << ptr_offset(*dest) << ");\n"; } }
void allocate_executable_memory(memory_block_data * self //in , intptr_t size_bytes //in , intptr_t alignment //in , char ** out_begin //out , char ** out_end //out ) { executable_memory_block* emb = static_cast<executable_memory_block*>(self); // some preconditions assert( executable_memory_block_type == executable_memory_block_type); assert( (size_t)size_bytes <= emb->m_chunk_size ); #ifdef ENABLE_LOGGING std::cout << "allocating " << size_bytes << " of executable memory with alignment " << alignment << std::endl; #endif //ENABLE LOGGING if ((size_t)size_bytes > emb->m_chunk_size) { std::stringstream ss; ss << "Memory allocation request of " << size_bytes << " is too large for this executable_memory_block" " with chunk size" << emb->m_chunk_size; throw std::runtime_error(ss.str()); } if (emb->m_allocated_chunks.empty()) emb->add_chunk(); void* current_chunk = emb->m_allocated_chunks.back(); void* begin = reinterpret_cast<void*>(align_up(reinterpret_cast<size_t>(emb->m_pivot), alignment)); void* end = ptr_offset(begin, size_bytes); if (ptr_offset(current_chunk, emb->m_chunk_size) < ptr_offset(emb->m_pivot, size_bytes)) { emb->add_chunk(); begin = emb->m_allocated_chunks.back(); end = ptr_offset(begin, size_bytes); } emb->m_pivot = end; #ifndef NDEBUG assert(ptr_in_range(begin , emb->m_allocated_chunks.back() , ptr_offset(emb->m_allocated_chunks.back() , emb->m_chunk_size))); assert(ptr_in_range(end , emb->m_allocated_chunks.back() , ptr_offset(emb->m_allocated_chunks.back() , emb->m_chunk_size))); #endif assert(((int8_t*)end - (int8_t*)begin) == size_bytes); assert(emb->m_pivot == end); *out_begin = static_cast<char*>(begin); *out_end = static_cast<char*>(end); }
/** * http_parser callbacks */ static int request_message_begin(http_parser *parser) { octo_http_request *request = ptr_offset(parser, octo_http_request, parser); request->parser_state = PARSER_MESSAGE_BEGIN; return 0; }
static int request_message_complete(http_parser *parser) { octo_http_request *request = ptr_offset(parser, octo_http_request, parser); request->parser_state = PARSER_MESSAGE_COMPLETE; return 0; }
*/ kernel_ctx_vma = to_intel_context(dev_priv->kernel_context, dev_priv->engine[RCS])->state; blob->ads.golden_context_lrca = intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset; /* * The GuC expects us to exclude the portion of the context image that * it skips from the size it is to read. It starts reading from after * the execlist context (so skipping the first page [PPHWSP] and 80 * dwords). Weird guc is weird. */ for_each_engine(engine, dev_priv, id) blob->ads.eng_state_size[engine->guc_id] = engine->context_size - skipped_size; base = intel_guc_ggtt_offset(guc, vma); blob->ads.scheduler_policies = base + ptr_offset(blob, policies); blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer); blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state); kunmap(page); return 0; } void intel_guc_ads_destroy(struct intel_guc *guc) { i915_vma_unpin_and_release(&guc->ads_vma, 0); }