klassOop typeArrayKlass::create_klass(BasicType type, int scale, const char* name_str, TRAPS) { typeArrayKlass o; Symbol* sym = NULL; if (name_str != NULL) { sym = SymbolTable::new_symbol(name_str, CHECK_NULL); } KlassHandle klassklass (THREAD, Universe::typeArrayKlassKlassObj()); arrayKlassHandle k = base_create_array_klass(o.vtbl_value(), header_size(), klassklass, CHECK_NULL); typeArrayKlass* ak = typeArrayKlass::cast(k()); ak->set_name(sym); ak->set_layout_helper(array_layout_helper(type)); assert(scale == (1 << ak->log2_element_size()), "scale must check out"); assert(ak->oop_is_javaArray(), "sanity"); assert(ak->oop_is_typeArray(), "sanity"); ak->set_max_length(arrayOopDesc::max_array_length(type)); assert(k()->size() > header_size(), "bad size"); // Call complete_create_array_klass after all instance variables have been initialized. KlassHandle super (THREAD, k->super()); complete_create_array_klass(k, super, CHECK_NULL); return k(); }
klassOop typeArrayKlassKlass::create_klass(TRAPS) { typeArrayKlassKlass o; KlassHandle h_this_klass(THREAD, Universe::klassKlassObj()); KlassHandle k = base_create_klass(h_this_klass, header_size(), o.vtbl_value(), CHECK_0); assert(k()->size() == align_object_size(header_size()), "wrong size for object"); java_lang_Class::create_mirror(k, CHECK_0); // Allocate mirror return k(); }
int tls_record_decrypt(tls_record_t ctx, const tls_buffer input, tls_buffer *output, uint8_t *contentType) { int err; tls_buffer cipherFragment; uint8_t *charPtr; uint64_t seqNum; uint8_t ct; charPtr=input.data; check(input.length>=header_size(ctx)); if(input.length<header_size(ctx)) return errSSLRecordParam; ct = *charPtr++; #if 0 // We dont actually check the record protocol version tls_protocol_version pv; pv = SSLDecodeInt(charPtr, 2); #endif charPtr+=2; if(ctx->isDTLS) { seqNum = SSLDecodeUInt64(charPtr, 8); charPtr+=8; } cipherFragment.length = SSLDecodeInt(charPtr, 2); charPtr+=2; cipherFragment.data = charPtr; #if 0 // This is too strict for the record layer. if (ct < tls_record_type_V3_Smallest || ct > tls_record_type_V3_Largest) return errSSLRecordProtocol; if ((ctx->negProtocolVersion != tls_protocol_version_Undertermined) && (pv != ctx->negProtocolVersion)) { sslErrorLog("invalid record protocol version, expected = %04x, received = %04x", ctx->negProtocolVersion, pv); return errSSLRecordProtocol; // Invalid record version ? } #endif check(input.length>=header_size(ctx)+cipherFragment.length); if(input.length<header_size(ctx)+cipherFragment.length) { return errSSLRecordParam; // input buffer not enough data } if(ctx->isDTLS) { /* if the epoch of the record is different of current read cipher, just drop it */ if((seqNum>>48)!=(ctx->readCipher.sequenceNum>>48)) { return errSSLRecordUnexpectedRecord; } else { ctx->readCipher.sequenceNum=seqNum; } }
klassOop arrayKlassKlass::create_klass(TRAPS) { arrayKlassKlass o; KlassHandle h_this_klass(THREAD, Universe::klassKlassObj()); KlassHandle k = base_create_klass(h_this_klass, header_size(), o.vtbl_value(), CHECK_NULL); // Make sure size calculation is right assert(k()->size() == align_object_size(header_size()), "wrong size for object"); java_lang_Class::create_mirror(k, CHECK_NULL); // Allocate mirror, make links return k(); }
klassOop methodDataKlass::create_klass(TRAPS) { methodDataKlass o; KlassHandle h_this_klass(THREAD, Universe::klassKlassObj()); KlassHandle k = base_create_klass(h_this_klass, header_size(), o.vtbl_value(), CHECK_0); // Make sure size calculation is right assert(k()->size() == align_object_size(header_size()), "wrong size for object"); return k(); }
void PDU::serialize(uint8_t *buffer, uint32_t total_sz, const PDU *parent) { uint32_t sz = header_size() + trailer_size(); /* Must not happen... */ #ifdef TINS_DEBUG assert(total_sz >= sz); #endif prepare_for_serialize(parent); if(_inner_pdu) _inner_pdu->serialize(buffer + header_size(), total_sz - sz, this); write_serialization(buffer, total_sz, parent); }
void EAPOL::write_serialization(uint8_t *buffer, uint32_t total_sz, const PDU *) { #ifdef TINS_DEBUG assert(total_sz >= header_size()); #endif std::memcpy(buffer, &_header, sizeof(_header)); write_body(buffer + sizeof(_header), total_sz - sizeof(_header)); }
klassOop constantPoolCacheKlass::create_klass(TRAPS) { constantPoolCacheKlass o; KlassHandle klassklass(THREAD, Universe::arrayKlassKlassObj()); arrayKlassHandle k = base_create_array_klass(o.vtbl_value(), header_size(), klassklass, CHECK_0); KlassHandle super (THREAD, k->super()); complete_create_array_klass(k, super, CHECK_0); return k(); }
uint32_t PDU::size() const { uint32_t sz = header_size() + trailer_size(); const PDU *ptr(_inner_pdu); while(ptr) { sz += ptr->header_size() + ptr->trailer_size(); ptr = ptr->inner_pdu(); } return sz; }
klassOop constantPoolCacheKlass::create_klass(TRAPS) { constantPoolCacheKlass o; KlassHandle klassklass(THREAD, Universe::arrayKlassKlassObj()); arrayKlassHandle k=base_create_array_klass(o.vtbl_value(),header_size(),klassklass,constantPoolCacheKlass_kid,CHECK_NULL); KlassHandle super (THREAD, k->super()); complete_create_array_klass(k, super, CHECK_NULL); KlassTable::bindReservedKlassId(k(), constantPoolCacheKlass_kid); return k(); }
void IPSecAH::write_serialization(uint8_t* buffer, uint32_t total_sz) { if (inner_pdu()) { next_header(Internals::pdu_flag_to_ip_type(inner_pdu()->pdu_type())); } length(header_size() / sizeof(uint32_t) - 2); OutputMemoryStream output(buffer, total_sz); output.write(header_); output.write(icv_.begin(), icv_.end()); }
void header_init(struct header_chunk* header, const struct cap_header* cp, int layer){ header->cp = cp; header->protocol = NULL; header->last_net = (struct network){"", "", 0}; header->truncated = 0; header->ptr = NULL; } int header_walk(struct header_chunk* header){ if ( !header->ptr ){ header->protocol = protocol_get(PROTOCOL_ETHERNET); header->ptr = header->cp->payload; if ( limited_caplen(header->cp, header->ptr, header_size(header)) ){ header->truncated = 1; } return 1; } return next_payload(header); } void header_dump(FILE* fp, const struct header_chunk* header, const char* prefix){ if ( !header->protocol->dump ){ fprintf(fp, "%s(not implemented)\n", prefix); return; } if ( header->truncated && !header->protocol->partial_print ){ fprintf(fp, "%s[Packet size limited during capture]\n", prefix); return; } ptr_sanity(header->cp, header->ptr, NULL); header->protocol->dump(fp, header, header->ptr, prefix, 0); } void header_format(FILE* fp, const struct header_chunk* header, int flags){ if ( header->truncated && !header->protocol->partial_print ){ fprintf(fp, ": %s [Packet size limited during capture]", header->protocol->name); return; } if ( !header->protocol->format ){ fprintf(fp, ": %s", header->protocol->name); return; } ptr_sanity(header->cp, header->ptr, NULL); header->protocol->format(fp, header, header->ptr, flags); } size_t header_size(const struct header_chunk* header){ return header->protocol->size_dyn ? header->protocol->size_dyn(header, header->ptr) : header->protocol->size; }
klassOop klassKlass::create_klass(TRAPS) { KlassHandle h_this_klass; klassKlass o; // for bootstrapping, handles may not be available yet. klassOop k = base_create_klass_oop(h_this_klass, header_size(), o.vtbl_value(), CHECK_NULL); k->set_klass(k); // point to thyself // Do not try to allocate mirror, java.lang.Class not loaded at this point. // See Universe::fixup_mirrors() return k; }
void mixinOopDesc::bootstrap_object(bootstrap* st) { memOopDesc::bootstrap_header(st); st->read_oop((oop*)&addr()->_methods); st->read_oop((oop*)&addr()->_inst_vars); st->read_oop((oop*)&addr()->_class_vars); st->read_oop((oop*)&addr()->_primary_invocation); st->read_oop((oop*)&addr()->_class_mixin); st->read_oop((oop*)&addr()->_installed); memOopDesc::bootstrap_body(st, header_size()); }
static size_t allocation_size(jint statics_size, jint vtable_length) { size_t size = header_size() + statics_size; #if USE_EMBEDDED_VTABLE_BITMAP size += bitmap_size(vtable_length); #else (void)vtable_length; #endif return align_allocation_size(size); }
static void do_init(int ifindex) { enum ef_pd_flags pd_flags = 0; ef_filter_spec filter_spec; struct pkt_buf* pb; enum ef_vi_flags vi_flags = 0; int i; if( cfg_use_vf ) pd_flags |= EF_PD_VF; if( cfg_phys_mode ) pd_flags |= EF_PD_PHYS_MODE; if( cfg_disable_tx_push ) vi_flags |= EF_VI_TX_PUSH_DISABLE; /* Allocate virtual interface. */ TRY(ef_driver_open(&driver_handle)); TRY(ef_pd_alloc(&pd, driver_handle, ifindex, pd_flags)); TRY(ef_vi_alloc_from_pd(&vi, driver_handle, &pd, driver_handle, -1, -1, -1, NULL, -1, vi_flags)); #ifdef __x86_64__ TRY(ef_pio_alloc(&pio, driver_handle, &pd, -1, driver_handle)); TRY(ef_pio_link_vi(&pio, driver_handle, &vi, driver_handle)); #else /* PIO is only available on x86_64 systems */ TEST(0); #endif ef_filter_spec_init(&filter_spec, EF_FILTER_FLAG_NONE); TRY(ef_filter_spec_set_ip4_local(&filter_spec, IPPROTO_UDP, sa_local.sin_addr.s_addr, sa_local.sin_port)); TRY(ef_vi_filter_add(&vi, driver_handle, &filter_spec, NULL)); { int bytes = (N_RX_BUFS + 1) * RX_BUF_SIZE; void* p; TEST(posix_memalign(&p, 4096, bytes) == 0); TRY(ef_memreg_alloc(&memreg, driver_handle, &pd, driver_handle, p, bytes)); for( i = 0; i <= N_RX_BUFS; ++i ) { pkt_bufs[i] = (void*) ((char*) p + i * RX_BUF_SIZE); pkt_bufs[i]->dma_buf_addr = ef_memreg_dma_addr(&memreg, i * RX_BUF_SIZE); } } for( i = 0; i <= N_RX_BUFS; ++i ) { pb = pkt_bufs[i]; pb->id = i; pb->dma_buf_addr += MEMBER_OFFSET(struct pkt_buf, dma_buf); } init_udp_pkt(pkt_bufs[N_RX_BUFS]->dma_buf, cfg_payload_len); tx_frame_len = cfg_payload_len + header_size(); }
void FreeChunk::mangleFreed(size_t size) { assert(baadbabeHeapWord != deadbeefHeapWord, "Need distinct patterns"); // mangle all but the header of a just-freed block of storage // just prior to passing it to the storage dictionary assert(size >= MinChunkSize, "smallest size of object"); assert(size == _size, "just checking"); HeapWord* addr = (HeapWord*)this; size_t hdr = header_size(); Memory::set_words(addr + hdr, size - hdr, deadbeefHeapWord); }
int ConstMethod::size(int code_size, InlineTableSizes* sizes) { int extra_bytes = code_size; if (sizes->compressed_linenumber_size() > 0) { extra_bytes += sizes->compressed_linenumber_size(); } if (sizes->checked_exceptions_length() > 0) { extra_bytes += sizeof(u2); extra_bytes += sizes->checked_exceptions_length() * sizeof(CheckedExceptionElement); } if (sizes->localvariable_table_length() > 0) { extra_bytes += sizeof(u2); extra_bytes += sizes->localvariable_table_length() * sizeof(LocalVariableTableElement); } if (sizes->exception_table_length() > 0) { extra_bytes += sizeof(u2); extra_bytes += sizes->exception_table_length() * sizeof(ExceptionTableElement); } if (sizes->generic_signature_index() != 0) { extra_bytes += sizeof(u2); } // This has to be a less-than-or-equal check, because we might be // storing information from a zero-length MethodParameters // attribute. We have to store these, because in some cases, they // cause the reflection API to throw a MalformedParametersException. if (sizes->method_parameters_length() >= 0) { extra_bytes += sizeof(u2); extra_bytes += sizes->method_parameters_length() * sizeof(MethodParametersElement); } // Align sizes up to a word. extra_bytes = align_size_up(extra_bytes, BytesPerWord); // One pointer per annotation array if (sizes->method_annotations_length() > 0) { extra_bytes += sizeof(AnnotationArray*); } if (sizes->parameter_annotations_length() > 0) { extra_bytes += sizeof(AnnotationArray*); } if (sizes->type_annotations_length() > 0) { extra_bytes += sizeof(AnnotationArray*); } if (sizes->default_annotations_length() > 0) { extra_bytes += sizeof(AnnotationArray*); } int extra_words = align_size_up(extra_bytes, BytesPerWord) / BytesPerWord; assert(extra_words == extra_bytes/BytesPerWord, "should already be aligned"); return align_metadata_size(header_size() + extra_words); }
void FreeChunk::mangleAllocated(size_t size) { // mangle all but the header of a just-allocated block // of storage assert(size >= MinChunkSize, "smallest size of object"); // we can't assert that _size == size because this may be an // allocation out of a linear allocation block assert(sizeof(FreeChunk) % HeapWordSize == 0, "shouldn't write beyond chunk"); HeapWord* addr = (HeapWord*)this; size_t hdr = header_size(); Memory::set_words(addr + hdr, size - hdr, baadbabeHeapWord); }
void EthernetII::write_serialization(uint8_t *buffer, uint32_t total_sz, const PDU *parent) { #ifdef TINS_DEBUG assert(total_sz >= header_size() + trailer_size()); #endif /* Inner type defaults to IP */ if (inner_pdu()) { Constants::Ethernet::e flag = Internals::pdu_flag_to_ether_type( inner_pdu()->pdu_type() ); payload_type(static_cast<uint16_t>(flag)); } memcpy(buffer, &_eth, sizeof(ethhdr)); uint32_t trailer = trailer_size(); if (trailer) { uint32_t trailer_offset = header_size(); if (inner_pdu()) trailer_offset += inner_pdu()->size(); memset(buffer + trailer_offset, 0, trailer); } }
void RSNEAPOL::write_body(uint8_t *buffer, uint32_t total_sz) { #ifdef TINS_DEBUG assert(total_sz >= header_size() - sizeof(eapolhdr)); #endif if(_key.size()) { if(!_header.key_t) { _header.key_length = Endian::host_to_be<uint16_t>(32); wpa_length(_key.size()); } else if(_key.size()) { wpa_length(_key.size()); } } std::memcpy(buffer, &_header, sizeof(_header)); buffer += sizeof(_header); std::copy(_key.begin(), _key.end(), buffer); }
void DHCP::write_serialization(uint8_t *buffer, uint32_t total_sz, const PDU *parent) { #ifdef TINS_DEBUG assert(total_sz >= header_size()); #endif if(_size) { vend_type &result(BootP::vend()); result.resize(_size); uint8_t *ptr = &result[0] + sizeof(uint32_t); // Magic cookie *((uint32_t*)&result[0]) = Endian::host_to_be<uint32_t>(0x63825363); for(options_type::const_iterator it = _options.begin(); it != _options.end(); ++it) { *(ptr++) = it->option(); *(ptr++) = it->length_field(); std::copy(it->data_ptr(), it->data_ptr() + it->data_size(), ptr); ptr += it->data_size(); } } BootP::write_serialization(buffer, total_sz, parent); }
int constMethodOopDesc::object_size(int code_size, int compressed_line_number_size, int local_variable_table_length, int checked_exceptions_length) { int extra_bytes = code_size; if (compressed_line_number_size > 0) { extra_bytes += compressed_line_number_size; } if (checked_exceptions_length > 0) { extra_bytes += sizeof(u2); extra_bytes += checked_exceptions_length * sizeof(CheckedExceptionElement); } if (local_variable_table_length > 0) { extra_bytes += sizeof(u2); extra_bytes += local_variable_table_length * sizeof(LocalVariableTableElement); } int extra_words = align_size_up(extra_bytes, BytesPerWord) / BytesPerWord; return align_object_size(header_size() + extra_words); }
static int next_payload(struct header_chunk* header){ /* stop processing if protocol doesn't define next_payload */ if ( !header->protocol->next_payload ){ return 0; } /* stop if previous header was truncated */ if ( header->truncated ){ return 0; } const char* next = header->ptr; const struct caputils_protocol* current = header->protocol; enum caputils_protocol_type type = current->next_payload(header, header->ptr, &next); header->ptr = next; header->protocol = protocol_get(type); if ( !header->protocol ){ fprintf(stderr, "invalid protocol type %d, make sure protocol is registerd\n", type); abort(); } /* validate payload pointer */ if ( (header->ptr == NULL && type == PROTOCOL_DONE) || limited_caplen(header->cp, header->ptr, 0) ){ header->ptr = NULL; header->truncated = 1; return 0; } /* make sure pointer is actually inside the captured packet (and not pointing at random data because of a corrupted packet) */ ptr_sanity(header->cp, header->ptr, current); /* ensure there is enough data left */ if ( limited_caplen(header->cp, header->ptr, header_size(header)) ){ header->truncated = 1; } return type != PROTOCOL_UNKNOWN && type != PROTOCOL_DONE; }
void TaskMirrorDesc::variable_oops_do(void do_oop(OopDesc **)) { if (_object_size == header_size()) { // no statics so don't continue. It may be a size_type_array class which // isn't really an instance. Hence trying to obtain the embedded_oop_map // will result in a crash return; } // The statics are embedded at the end of this TaskMirrorDesc but the oopmap // for them is in the JavaClass object if (_containing_class != NULL) { jubyte *map = _containing_class->embedded_oop_map(); // skip over non-static oopmap entries to get to the statics while (*map++ != OopMapSentinel) {} if (*map != OopMapSentinel) { map_oops_do(map, do_oop); } } }
ssize_t RecordSetOutBase::write_header (byte_t* const buf, ssize_t const size) { int const csize(check_size(check_type_)); assert (header_size_max() + csize <= size); ssize_t const hdr_offset(header_size_max() - header_size()); assert (hdr_offset >= 0); size_ -= hdr_offset; int off(hdr_offset); buf[off] = (static_cast<byte_t>(version_) << 4) | /* upper 4 bytes: ver */ (static_cast<byte_t>(check_type_) & 0x0f); off += 1; off += uleb128_encode(size_, buf + off, size - off); off += uleb128_encode(count_, buf + off, size - off); /* write header CRC */ uint32_t const crc(gu_fast_hash32(buf + hdr_offset, off - hdr_offset)); *(reinterpret_cast<uint32_t*>(buf + off)) = htog(crc); off += VER1_CRC_SIZE; /* append payload checksum */ if (check_type_ != CHECK_NONE) { assert (csize <= size - off); check_.append (buf + hdr_offset, off - hdr_offset); /* append header */ check_.gather (buf + off, csize); } return hdr_offset; }
void ClassInfoDesc::variable_oops_do(void do_oop(OopDesc**)) { // // Reminder:: remember to change ROMWriter::stream_class_info() // whenever you change this function! // if (_access_flags.is_array_class()) { //do_oop((OopDesc**)&array._name); } else { do_oop((OopDesc**)&instance._methods); do_oop((OopDesc**)&instance._local_interfaces); do_oop((OopDesc**)&instance._fields); do_oop((OopDesc**)&instance._constants); #if ENABLE_REFLECTION do_oop((OopDesc**)&instance._inner_classes); #endif } // vtable OopDesc** vtable = (OopDesc**) ((address) this + header_size()); int i; for (i = 0; i < _vtable_length; i++) { do_oop(vtable++); // visit virtual method } // itable OopDesc** itable = vtable; OopDesc** itable_end = (OopDesc**) ((address) this + _object_size); for (i = 0; i < _itable_length; i++) { itable++; // skip interface klass_index itable++; // skip integer method index } while (itable < itable_end) { do_oop(itable++); // visit interface method } }
void Dot11::write_serialization(uint8_t *buffer, uint32_t total_sz, const PDU *parent) { #ifdef TINS_DEBUG assert(total_sz >= header_size()); #endif memcpy(buffer, &_header, sizeof(_header)); buffer += sizeof(_header); total_sz -= sizeof(_header); uint32_t written = write_ext_header(buffer, total_sz); buffer += written; total_sz -= written; uint32_t child_len = write_fixed_parameters(buffer, total_sz - _options_size); buffer += child_len; #ifdef TINS_DEBUG assert(total_sz >= child_len + _options_size); #endif for(std::list<option>::const_iterator it = _options.begin(); it != _options.end(); ++it) { *(buffer++) = it->option(); *(buffer++) = it->length_field(); std::copy(it->data_ptr(), it->data_ptr() + it->data_size(), buffer); buffer += it->data_size(); } }
int object_size()const{return header_size();}
static int referent_index_offset() { return header_size() + 0 * sizeof(kjobject); }