/* return the size of the memory mapping and file range of a given section */ static inline void get_section_sizes( const IMAGE_SECTION_HEADER *sec, size_t *map_size, off_t *file_start, size_t *file_size ) { static const unsigned int sector_align = 0x1ff; if (!sec->Misc.VirtualSize) *map_size = ROUND_SIZE( sec->SizeOfRawData ); else *map_size = ROUND_SIZE( sec->Misc.VirtualSize ); *file_start = sec->PointerToRawData & ~sector_align; *file_size = (sec->SizeOfRawData + (sec->PointerToRawData & sector_align) + sector_align) & ~sector_align; if (*file_size > *map_size) *file_size = *map_size; }
bool QGString::enlarge( uint newlen ) { if (newlen==0) { if (m_data) { free(m_data); m_data=0; } m_memSize=0; m_len=0; return TRUE; } uint newMemSize = ROUND_SIZE(newlen+1); if (newMemSize==m_memSize) return TRUE; m_memSize = newMemSize; if (m_data==0) { m_data = (char *)malloc(m_memSize); } else { m_data = (char *)realloc(m_data,m_memSize); } if (m_data==0) { return FALSE; } m_data[newlen-1]='\0'; if (m_len>newlen) m_len=newlen; return TRUE; }
bool QGString::resize( uint newlen ) { m_len = 0; if (newlen==0) { if (m_data) { free(m_data); m_data=0; } m_memSize=0; DBG_STR(("%p: 1.QGString::resize() %d:%s\n",this,m_len,m_data?m_data:"<none>")); return TRUE; } m_memSize = ROUND_SIZE(newlen+1); assert(m_memSize>=newlen+1); if (m_data==0) { m_data = (char *)malloc(m_memSize); } else { m_data = (char *)realloc(m_data,m_memSize); } if (m_data==0) { DBG_STR(("%p: 2.QGString::resize() %d:%s\n",this,m_len,m_data?m_data:"<none>")); return FALSE; } m_data[newlen-1]='\0'; m_len = qstrlen(m_data); DBG_STR(("%p: 3.QGString::resize() %d:%s\n",this,m_len,m_data?m_data:"<none>")); return TRUE; }
/* retrieve the mapping parameters for an executable (PE) image */ static int get_image_params( struct mapping *mapping ) { IMAGE_DOS_HEADER dos; IMAGE_NT_HEADERS nt; IMAGE_SECTION_HEADER *sec = NULL; struct fd *fd; off_t pos; int unix_fd, size, toread; /* load the headers */ if (!(fd = mapping_get_fd( &mapping->obj ))) return 0; if ((unix_fd = get_unix_fd( fd )) == -1) goto error; if (pread( unix_fd, &dos, sizeof(dos), 0 ) != sizeof(dos)) goto error; if (dos.e_magic != IMAGE_DOS_SIGNATURE) goto error; pos = dos.e_lfanew; if (pread( unix_fd, &nt.Signature, sizeof(nt.Signature), pos ) != sizeof(nt.Signature)) goto error; pos += sizeof(nt.Signature); if (nt.Signature != IMAGE_NT_SIGNATURE) goto error; if (pread( unix_fd, &nt.FileHeader, sizeof(nt.FileHeader), pos ) != sizeof(nt.FileHeader)) goto error; pos += sizeof(nt.FileHeader); /* zero out Optional header in the case it's not present or partial */ memset(&nt.OptionalHeader, 0, sizeof(nt.OptionalHeader)); toread = min( sizeof(nt.OptionalHeader), nt.FileHeader.SizeOfOptionalHeader ); if (pread( unix_fd, &nt.OptionalHeader, toread, pos ) != toread) goto error; pos += nt.FileHeader.SizeOfOptionalHeader; /* load the section headers */ size = sizeof(*sec) * nt.FileHeader.NumberOfSections; if (!(sec = malloc( size ))) goto error; if (pread( unix_fd, sec, size, pos ) != size) goto error; if (!build_shared_mapping( mapping, unix_fd, sec, nt.FileHeader.NumberOfSections )) goto error; if (mapping->shared_file) list_add_head( &shared_list, &mapping->shared_entry ); mapping->size = ROUND_SIZE( nt.OptionalHeader.SizeOfImage ); mapping->base = (void *)nt.OptionalHeader.ImageBase; mapping->header_size = pos + size; mapping->protect = VPROT_IMAGE; /* sanity check */ if (mapping->header_size > mapping->size) goto error; free( sec ); release_object( fd ); return 1; error: if (sec) free( sec ); release_object( fd ); set_error( STATUS_INVALID_FILE_FOR_SECTION ); return 0; }
/* * @implemented */ void* malloc(size_t _size) { size_t nSize = ROUND_SIZE(_size); if (nSize<_size) return NULL; return HeapAlloc(GetProcessHeap(), 0, nSize); }
/* * @implemented */ void* __cdecl malloc(size_t _size) { size_t nSize = ROUND_SIZE(_size); if (nSize<_size) return NULL; return RtlAllocateHeap(RtlGetProcessHeap(), 0, nSize); }
/* * @implemented */ void* calloc(size_t _nmemb, size_t _size) { size_t nSize = _nmemb * _size; size_t cSize = ROUND_SIZE(nSize); if ( (_nmemb > ((size_t)-1 / _size)) || (cSize<nSize)) return NULL; return HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, cSize ); }
/* * @implemented */ void* _expand(void* _ptr, size_t _size) { size_t nSize; nSize = ROUND_SIZE(_size); if (nSize<_size) return NULL; return HeapReAlloc(GetProcessHeap(), HEAP_REALLOC_IN_PLACE_ONLY, _ptr, nSize); }
QGString &QGString::operator+=( char c ) { uint len = m_len; uint memSize = ROUND_SIZE(len+2); assert(memSize>=len+2); char *newData = memSize!=m_memSize ? (char *)realloc( m_data, memSize ) : m_data; m_memSize = memSize; if (newData) { m_data = newData; m_data[len] = c; m_data[len+1] = '\0'; } m_len++; DBG_STR(("%p: QGString::operator+=(char s) %d:%s\n",this,m_len,m_data?m_data:"<none>")); return *this; }
QGString::QGString(uint size) { if (size==0) { m_data=0; m_len=0; } else { m_memSize = ROUND_SIZE(size+1); m_data = (char*)malloc(m_memSize); memset(m_data,' ',size); m_data[size]='\0'; m_len=size; } DBG_STR(("%p: QGString::QGString(uint size=%d) %d:%s\n", this,size,m_len,m_data?m_data:"<none>")); }
QGString &QGString::operator+=( const char *str ) { if (!str) return *this; uint len1 = length(); uint len2 = qstrlen(str); uint memSize = ROUND_SIZE(len1 + len2 + 1); assert(memSize>=len1+len2+1); char *newData = memSize!=m_memSize ? (char *)realloc( m_data, memSize ) : m_data; m_memSize = memSize; if (newData) { m_data = newData; memcpy( m_data + len1, str, len2 + 1 ); } m_len+=len2; DBG_STR(("%p: QGString::operator+=(const char *) %d:%s\n",this,m_len,m_data?m_data:"<none>")); return *this; }
QGString::QGString( const char *str ) { if (str==0) { m_data=0; m_len=0; m_memSize=0; } else { m_len = qstrlen(str); m_memSize = ROUND_SIZE(m_len+1); assert(m_memSize>=m_len+1); m_data = (char *)malloc(m_memSize); qstrcpy(m_data,str); } DBG_STR(("%p: QGString::QGString(const char *) %d:%s\n",this,m_len,m_data?m_data:"<none>")); }
QGString &QGString::operator=( const char *str ) { if (m_data) free(m_data); if (str==0) // null string { m_data = 0; m_len = 0; m_memSize = 0; } else { m_len = qstrlen(str); m_memSize = ROUND_SIZE(m_len+1); assert(m_memSize>=m_len+1); m_data = (char*)malloc(m_memSize); qstrcpy(m_data,str); } DBG_STR(("%p: QGString::operator=(const char *) %d:%s\n",this,m_len,m_data?m_data:"<none>")); return *this; }
/* * @implemented */ void* realloc(void* _ptr, size_t _size) { size_t nSize; if (_ptr == NULL) return malloc(_size); if (_size == 0) { free(_ptr); return NULL; } nSize = ROUND_SIZE(_size); /* check for integer overflow */ if (nSize<_size) return NULL; return HeapReAlloc(GetProcessHeap(), 0, _ptr, nSize); }
void HandleFinalize(GC_PTR ptr, GC_PTR client_data) { int prefix = ROUND_SIZE(sizeof(RT0__Struct)); RT0__Object obj = (RT0__Object) (ptr + prefix); DYN_TBCALL(RT0,ObjectDesc,Finalize,(RT0__Object)obj,(obj)); }
/* return the size of the file mapping of a given section */ static inline unsigned int get_section_filemap_size( const IMAGE_SECTION_HEADER *sec ) { if (!sec->Misc.VirtualSize) return sec->SizeOfRawData; else return min( sec->SizeOfRawData, ROUND_SIZE( sec->Misc.VirtualSize ) ); }
OOC_PTR RT0__NewObject(RT0__Struct td, ...) { void *var, *ptr; OOC_INT8 form = td->form; int flags = td->flags; if (form == RT0__strQualType) { /* get to base type of qualified type */ form = td->typeArgs[0]->form; } if (form == RT0__strRecord) { /* record */ int allocate; int prefix; int size = td->size; if (size == 0) size++; prefix = ROUND_SIZE(sizeof(RT0__Struct)); allocate = prefix + size; if (flags & (1<<RT0__flagAtomic)) { ptr = GC_MALLOC_ATOMIC(allocate); } else { ptr = GC_MALLOC(allocate); } if (ptr == NULL) { _out_of_memory(allocate); } else if (RT0__poisonHeap >= 0) { memset(ptr, RT0__poisonHeap, allocate); } var = (char*)ptr+prefix; OOC_TYPE_TAG(var) = td; if (flags & (1<<RT0__flagVTable)) { ((void **) var)[0] = td->tbProcs; } #ifdef USE_BOEHM_GC if (flags & (1<<RT0__flagFinalize)) { GC_register_finalizer(ptr, HandleFinalize, 0, (GC_finalization_proc *) 0, (GC_PTR *) 0); } #endif } else if (form == RT0__strArray) { /* fixed size array */ int size = td->size; if (size == 0) size++; if (flags & (1<<RT0__flagAtomic)) { var = GC_MALLOC_ATOMIC(size); } else { var = GC_MALLOC(size); } if (var == NULL) { _out_of_memory(size); } else if (RT0__poisonHeap >= 0) { memset(var, RT0__poisonHeap, size); } } else { /* dynamic array */ int allocate; va_list ap; int i; size_t size, prefix; void* ptr; OOC_LEN *dim; /* calculate size of the block in bytes */ size = td->size; va_start(ap, td); for (i=0; i != td->len; i++) { OOC_LEN len = va_arg(ap, OOC_LEN); if (len < 0) { _negative_length(len); } size *= len; } va_end(ap); if (size == 0) size++; /* calculate length of type tag prefix; round to maximum required alignment of any basic type */ prefix = ROUND_SIZE(td->len*sizeof(OOC_LEN)); allocate = prefix + size; if (flags & (1<<RT0__flagAtomic)) { ptr = GC_MALLOC_ATOMIC(allocate); } else { ptr = GC_MALLOC(allocate); } if (ptr == NULL) { _out_of_memory(allocate); } else if (RT0__poisonHeap >= 0) { memset(ptr, RT0__poisonHeap, allocate); } var = (char*)ptr+prefix; /* set length of dimensions */ dim = (OOC_LEN*)var; va_start(ap, td); for (i=0; i != td->len; i++) { *(--dim) = va_arg(ap, OOC_LEN); } va_end(ap); } return (OOC_PTR)var; }
/* retrieve the mapping parameters for an executable (PE) image */ static unsigned int get_image_params( struct mapping *mapping, file_pos_t file_size, int unix_fd ) { IMAGE_DOS_HEADER dos; IMAGE_COR20_HEADER clr; IMAGE_SECTION_HEADER sec[96]; struct { DWORD Signature; IMAGE_FILE_HEADER FileHeader; union { IMAGE_OPTIONAL_HEADER32 hdr32; IMAGE_OPTIONAL_HEADER64 hdr64; } opt; } nt; off_t pos; int size; size_t clr_va, clr_size; unsigned int i, cpu_mask = get_supported_cpu_mask(); /* load the headers */ if (!file_size) return STATUS_INVALID_FILE_FOR_SECTION; if (pread( unix_fd, &dos, sizeof(dos), 0 ) != sizeof(dos)) return STATUS_INVALID_IMAGE_NOT_MZ; if (dos.e_magic != IMAGE_DOS_SIGNATURE) return STATUS_INVALID_IMAGE_NOT_MZ; pos = dos.e_lfanew; size = pread( unix_fd, &nt, sizeof(nt), pos ); if (size < sizeof(nt.Signature) + sizeof(nt.FileHeader)) return STATUS_INVALID_IMAGE_FORMAT; /* zero out Optional header in the case it's not present or partial */ size = min( size, sizeof(nt.Signature) + sizeof(nt.FileHeader) + nt.FileHeader.SizeOfOptionalHeader ); if (size < sizeof(nt)) memset( (char *)&nt + size, 0, sizeof(nt) - size ); if (nt.Signature != IMAGE_NT_SIGNATURE) { if (*(WORD *)&nt.Signature == IMAGE_OS2_SIGNATURE) return STATUS_INVALID_IMAGE_NE_FORMAT; return STATUS_INVALID_IMAGE_PROTECT; } switch (nt.opt.hdr32.Magic) { case IMAGE_NT_OPTIONAL_HDR32_MAGIC: switch (nt.FileHeader.Machine) { case IMAGE_FILE_MACHINE_I386: if (cpu_mask & (CPU_FLAG(CPU_x86) | CPU_FLAG(CPU_x86_64))) break; return STATUS_INVALID_IMAGE_FORMAT; case IMAGE_FILE_MACHINE_ARM: case IMAGE_FILE_MACHINE_THUMB: case IMAGE_FILE_MACHINE_ARMNT: if (cpu_mask & (CPU_FLAG(CPU_ARM) | CPU_FLAG(CPU_ARM64))) break; return STATUS_INVALID_IMAGE_FORMAT; case IMAGE_FILE_MACHINE_POWERPC: if (cpu_mask & CPU_FLAG(CPU_POWERPC)) break; return STATUS_INVALID_IMAGE_FORMAT; default: return STATUS_INVALID_IMAGE_FORMAT; } clr_va = nt.opt.hdr32.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].VirtualAddress; clr_size = nt.opt.hdr32.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].Size; mapping->image.base = nt.opt.hdr32.ImageBase; mapping->image.entry_point = nt.opt.hdr32.ImageBase + nt.opt.hdr32.AddressOfEntryPoint; mapping->image.map_size = ROUND_SIZE( nt.opt.hdr32.SizeOfImage ); mapping->image.stack_size = nt.opt.hdr32.SizeOfStackReserve; mapping->image.stack_commit = nt.opt.hdr32.SizeOfStackCommit; mapping->image.subsystem = nt.opt.hdr32.Subsystem; mapping->image.subsystem_low = nt.opt.hdr32.MinorSubsystemVersion; mapping->image.subsystem_high = nt.opt.hdr32.MajorSubsystemVersion; mapping->image.dll_charact = nt.opt.hdr32.DllCharacteristics; mapping->image.contains_code = (nt.opt.hdr32.SizeOfCode || nt.opt.hdr32.AddressOfEntryPoint || nt.opt.hdr32.SectionAlignment & page_mask); mapping->image.header_size = nt.opt.hdr32.SizeOfHeaders; mapping->image.checksum = nt.opt.hdr32.CheckSum; mapping->image.image_flags = 0; if (nt.opt.hdr32.SectionAlignment & page_mask) mapping->image.image_flags |= IMAGE_FLAGS_ImageMappedFlat; if ((nt.opt.hdr32.DllCharacteristics & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) && mapping->image.contains_code && !(clr_va && clr_size)) mapping->image.image_flags |= IMAGE_FLAGS_ImageDynamicallyRelocated; break; case IMAGE_NT_OPTIONAL_HDR64_MAGIC: if (!(cpu_mask & CPU_64BIT_MASK)) return STATUS_INVALID_IMAGE_WIN_64; switch (nt.FileHeader.Machine) { case IMAGE_FILE_MACHINE_AMD64: if (cpu_mask & (CPU_FLAG(CPU_x86) | CPU_FLAG(CPU_x86_64))) break; return STATUS_INVALID_IMAGE_FORMAT; case IMAGE_FILE_MACHINE_ARM64: if (cpu_mask & (CPU_FLAG(CPU_ARM) | CPU_FLAG(CPU_ARM64))) break; return STATUS_INVALID_IMAGE_FORMAT; default: return STATUS_INVALID_IMAGE_FORMAT; } clr_va = nt.opt.hdr64.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].VirtualAddress; clr_size = nt.opt.hdr64.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].Size; mapping->image.base = nt.opt.hdr64.ImageBase; mapping->image.entry_point = nt.opt.hdr64.ImageBase + nt.opt.hdr64.AddressOfEntryPoint; mapping->image.map_size = ROUND_SIZE( nt.opt.hdr64.SizeOfImage ); mapping->image.stack_size = nt.opt.hdr64.SizeOfStackReserve; mapping->image.stack_commit = nt.opt.hdr64.SizeOfStackCommit; mapping->image.subsystem = nt.opt.hdr64.Subsystem; mapping->image.subsystem_low = nt.opt.hdr64.MinorSubsystemVersion; mapping->image.subsystem_high = nt.opt.hdr64.MajorSubsystemVersion; mapping->image.dll_charact = nt.opt.hdr64.DllCharacteristics; mapping->image.contains_code = (nt.opt.hdr64.SizeOfCode || nt.opt.hdr64.AddressOfEntryPoint || nt.opt.hdr64.SectionAlignment & page_mask); mapping->image.header_size = nt.opt.hdr64.SizeOfHeaders; mapping->image.checksum = nt.opt.hdr64.CheckSum; mapping->image.image_flags = 0; if (nt.opt.hdr64.SectionAlignment & page_mask) mapping->image.image_flags |= IMAGE_FLAGS_ImageMappedFlat; if ((nt.opt.hdr64.DllCharacteristics & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) && mapping->image.contains_code && !(clr_va && clr_size)) mapping->image.image_flags |= IMAGE_FLAGS_ImageDynamicallyRelocated; break; default: return STATUS_INVALID_IMAGE_FORMAT; } mapping->image.image_charact = nt.FileHeader.Characteristics; mapping->image.machine = nt.FileHeader.Machine; mapping->image.zerobits = 0; /* FIXME */ mapping->image.gp = 0; /* FIXME */ mapping->image.file_size = file_size; mapping->image.loader_flags = clr_va && clr_size; /* load the section headers */ pos += sizeof(nt.Signature) + sizeof(nt.FileHeader) + nt.FileHeader.SizeOfOptionalHeader; if (nt.FileHeader.NumberOfSections > sizeof(sec)/sizeof(sec[0])) return STATUS_INVALID_IMAGE_FORMAT; size = sizeof(*sec) * nt.FileHeader.NumberOfSections; if (!mapping->size) mapping->size = mapping->image.map_size; else if (mapping->size > mapping->image.map_size) return STATUS_SECTION_TOO_BIG; if (pos + size > mapping->image.map_size) return STATUS_INVALID_FILE_FOR_SECTION; if (pos + size > mapping->image.header_size) mapping->image.header_size = pos + size; if (pread( unix_fd, sec, size, pos ) != size) return STATUS_INVALID_FILE_FOR_SECTION; for (i = 0; i < nt.FileHeader.NumberOfSections && !mapping->image.contains_code; i++) if (sec[i].Characteristics & IMAGE_SCN_MEM_EXECUTE) mapping->image.contains_code = 1; if (load_clr_header( &clr, clr_va, clr_size, unix_fd, sec, nt.FileHeader.NumberOfSections ) && (clr.Flags & COMIMAGE_FLAGS_ILONLY)) { mapping->image.image_flags |= IMAGE_FLAGS_ComPlusILOnly; if (nt.opt.hdr32.Magic == IMAGE_NT_OPTIONAL_HDR32_MAGIC && !(clr.Flags & COMIMAGE_FLAGS_32BITREQUIRED)) mapping->image.image_flags |= IMAGE_FLAGS_ComPlusNativeReady; } if (!build_shared_mapping( mapping, unix_fd, sec, nt.FileHeader.NumberOfSections )) return STATUS_INVALID_FILE_FOR_SECTION; return STATUS_SUCCESS; }
ffi_status ffi_prep_cif_machdep(ffi_cif *cif) { size_t struct_size = 0; int n_gpr = 0; int n_fpr = 0; int n_ov = 0; ffi_type **ptr; int i; /* Determine return value handling. */ switch (cif->rtype->type) { /* Void is easy. */ case FFI_TYPE_VOID: cif->flags = FFI390_RET_VOID; break; /* Structures are returned via a hidden pointer. */ case FFI_TYPE_STRUCT: cif->flags = FFI390_RET_STRUCT; n_gpr++; /* We need one GPR to pass the pointer. */ break; /* Floating point values are returned in fpr 0. */ case FFI_TYPE_FLOAT: cif->flags = FFI390_RET_FLOAT; break; case FFI_TYPE_DOUBLE: cif->flags = FFI390_RET_DOUBLE; break; #if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE case FFI_TYPE_LONGDOUBLE: cif->flags = FFI390_RET_STRUCT; n_gpr++; break; #endif /* Integer values are returned in gpr 2 (and gpr 3 for 64-bit values on 31-bit machines). */ case FFI_TYPE_UINT64: case FFI_TYPE_SINT64: cif->flags = FFI390_RET_INT64; break; case FFI_TYPE_POINTER: case FFI_TYPE_INT: case FFI_TYPE_UINT32: case FFI_TYPE_SINT32: case FFI_TYPE_UINT16: case FFI_TYPE_SINT16: case FFI_TYPE_UINT8: case FFI_TYPE_SINT8: /* These are to be extended to word size. */ #ifdef __s390x__ cif->flags = FFI390_RET_INT64; #else cif->flags = FFI390_RET_INT32; #endif break; default: FFI_ASSERT (0); break; } /* Now for the arguments. */ for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) { int type = (*ptr)->type; #if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE /* 16-byte long double is passed like a struct. */ if (type == FFI_TYPE_LONGDOUBLE) type = FFI_TYPE_STRUCT; #endif /* Check how a structure type is passed. */ if (type == FFI_TYPE_STRUCT) { type = ffi_check_struct_type (*ptr); /* If we pass the struct via pointer, we must reserve space to copy its data for proper call-by-value semantics. */ if (type == FFI_TYPE_POINTER) struct_size += ROUND_SIZE ((*ptr)->size); } /* Now handle all primitive int/float data types. */ switch (type) { /* The first MAX_FPRARGS floating point arguments go in FPRs, the rest overflow to the stack. */ case FFI_TYPE_DOUBLE: if (n_fpr < MAX_FPRARGS) n_fpr++; else n_ov += sizeof (double) / sizeof (long); break; case FFI_TYPE_FLOAT: if (n_fpr < MAX_FPRARGS) n_fpr++; else n_ov++; break; /* On 31-bit machines, 64-bit integers are passed in GPR pairs, if one is still available, or else on the stack. If only one register is free, skip the register (it won't be used for any subsequent argument either). */ #ifndef __s390x__ case FFI_TYPE_UINT64: case FFI_TYPE_SINT64: if (n_gpr == MAX_GPRARGS-1) n_gpr = MAX_GPRARGS; if (n_gpr < MAX_GPRARGS) n_gpr += 2; else n_ov += 2; break; #endif /* Everything else is passed in GPRs (until MAX_GPRARGS have been used) or overflows to the stack. */ default: if (n_gpr < MAX_GPRARGS) n_gpr++; else n_ov++; break; } } /* Total stack space as required for overflow arguments and temporary structure copies. */ cif->bytes = ROUND_SIZE (n_ov * sizeof (long)) + struct_size; return FFI_OK; }
static void ffi_prep_args (unsigned char *stack, extended_cif *ecif) { /* The stack space will be filled with those areas: FPR argument register save area (highest addresses) GPR argument register save area temporary struct copies overflow argument area (lowest addresses) We set up the following pointers: p_fpr: bottom of the FPR area (growing upwards) p_gpr: bottom of the GPR area (growing upwards) p_ov: bottom of the overflow area (growing upwards) p_struct: top of the struct copy area (growing downwards) All areas are kept aligned to twice the word size. */ int gpr_off = ecif->cif->bytes; int fpr_off = gpr_off + ROUND_SIZE (MAX_GPRARGS * sizeof (long)); unsigned long long *p_fpr = (unsigned long long *)(stack + fpr_off); unsigned long *p_gpr = (unsigned long *)(stack + gpr_off); unsigned char *p_struct = (unsigned char *)p_gpr; unsigned long *p_ov = (unsigned long *)stack; int n_fpr = 0; int n_gpr = 0; int n_ov = 0; ffi_type **ptr; void **p_argv = ecif->avalue; int i; /* If we returning a structure then we set the first parameter register to the address of where we are returning this structure. */ if (ecif->cif->flags == FFI390_RET_STRUCT) p_gpr[n_gpr++] = (unsigned long) ecif->rvalue; /* Now for the arguments. */ for (ptr = ecif->cif->arg_types, i = ecif->cif->nargs; i > 0; i--, ptr++, p_argv++) { void *arg = *p_argv; int type = (*ptr)->type; #if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE /* 16-byte long double is passed like a struct. */ if (type == FFI_TYPE_LONGDOUBLE) type = FFI_TYPE_STRUCT; #endif /* Check how a structure type is passed. */ if (type == FFI_TYPE_STRUCT) { type = ffi_check_struct_type (*ptr); /* If we pass the struct via pointer, copy the data. */ if (type == FFI_TYPE_POINTER) { p_struct -= ROUND_SIZE ((*ptr)->size); memcpy (p_struct, (char *)arg, (*ptr)->size); arg = &p_struct; } } /* Now handle all primitive int/pointer/float data types. */ switch (type) { case FFI_TYPE_DOUBLE: if (n_fpr < MAX_FPRARGS) p_fpr[n_fpr++] = *(unsigned long long *) arg; else #ifdef __s390x__ p_ov[n_ov++] = *(unsigned long *) arg; #else p_ov[n_ov++] = ((unsigned long *) arg)[0], p_ov[n_ov++] = ((unsigned long *) arg)[1]; #endif break; case FFI_TYPE_FLOAT: if (n_fpr < MAX_FPRARGS) p_fpr[n_fpr++] = (long long) *(unsigned int *) arg << 32; else p_ov[n_ov++] = *(unsigned int *) arg; break; case FFI_TYPE_POINTER: if (n_gpr < MAX_GPRARGS) p_gpr[n_gpr++] = (unsigned long)*(unsigned char **) arg; else p_ov[n_ov++] = (unsigned long)*(unsigned char **) arg; break; case FFI_TYPE_UINT64: case FFI_TYPE_SINT64: #ifdef __s390x__ if (n_gpr < MAX_GPRARGS) p_gpr[n_gpr++] = *(unsigned long *) arg; else p_ov[n_ov++] = *(unsigned long *) arg; #else if (n_gpr == MAX_GPRARGS-1) n_gpr = MAX_GPRARGS; if (n_gpr < MAX_GPRARGS) p_gpr[n_gpr++] = ((unsigned long *) arg)[0], p_gpr[n_gpr++] = ((unsigned long *) arg)[1]; else p_ov[n_ov++] = ((unsigned long *) arg)[0], p_ov[n_ov++] = ((unsigned long *) arg)[1]; #endif break; case FFI_TYPE_UINT32: if (n_gpr < MAX_GPRARGS) p_gpr[n_gpr++] = *(unsigned int *) arg; else p_ov[n_ov++] = *(unsigned int *) arg; break; case FFI_TYPE_INT: case FFI_TYPE_SINT32: if (n_gpr < MAX_GPRARGS) p_gpr[n_gpr++] = *(signed int *) arg; else p_ov[n_ov++] = *(signed int *) arg; break; case FFI_TYPE_UINT16: if (n_gpr < MAX_GPRARGS) p_gpr[n_gpr++] = *(unsigned short *) arg; else p_ov[n_ov++] = *(unsigned short *) arg; break; case FFI_TYPE_SINT16: if (n_gpr < MAX_GPRARGS) p_gpr[n_gpr++] = *(signed short *) arg; else p_ov[n_ov++] = *(signed short *) arg; break; case FFI_TYPE_UINT8: if (n_gpr < MAX_GPRARGS) p_gpr[n_gpr++] = *(unsigned char *) arg; else p_ov[n_ov++] = *(unsigned char *) arg; break; case FFI_TYPE_SINT8: if (n_gpr < MAX_GPRARGS) p_gpr[n_gpr++] = *(signed char *) arg; else p_ov[n_ov++] = *(signed char *) arg; break; default: FFI_ASSERT (0); break; } } }
/* This function is given a pointer to the header structure that is used to allocate 1 struct of the type given by alloc_type. It first checks if a struct is available in its free list. If not, it checks if 1 is available in its blob, which is a chunk of memory that is reserved for its use. If not, it malloc's a chunk. The initial part of it is used to store the end address of the chunk, and also to keep track of the number of free structs in that chunk. This information is used for freeing the chunk when all the structs in it are free. Assume all input arguments have been validated. This function can be used only to allocate 1 struct of the given type. It returns a pointer to the struct that the user can use. It returns NULL only when it is out of free structs, and cannot malloc any more. The struct returned is zero-ed. A pointer to the chunk that the struct belongs to is stored in the bytes preceding the returned address. Since this pointer it never overwritten, when a struct is allocated from the free_list this pointer does not have to be written. In the 2 other cases, where the struct is allocated from a new chunk, or the blob, a pointer to the chunk is written. */ static Dwarf_Ptr _dwarf_find_memory(Dwarf_Alloc_Hdr alloc_hdr) { /* Pointer to the struct allocated. */ Dwarf_Small *ret_mem = 0; /* Pointer to info about chunks allocated. */ Dwarf_Alloc_Area alloc_area; /* Size of chunk malloc'ed when no free structs left. */ Dwarf_Signed mem_block_size; /* Pointer to block malloc'ed. */ Dwarf_Small *mem_block; /* Check the alloc_area from which the last allocation was made (most recent new block). If that is not successful, then search the list of alloc_area's from alloc_header. */ alloc_area = alloc_hdr->ah_last_alloc_area; if (alloc_area == NULL || alloc_area->aa_free_structs_in_chunk == 0) for (alloc_area = alloc_hdr->ah_alloc_area_head; alloc_area != NULL; alloc_area = alloc_area->aa_next) { if (alloc_area->aa_free_structs_in_chunk > 0) { break; /* found a free entry! */ } } if (alloc_area != NULL) { alloc_area->aa_free_structs_in_chunk--; if (alloc_area->aa_free_list != NULL) { ret_mem = alloc_area->aa_free_list; /* Update the free list. The initial part of the struct is used to hold a pointer to the next struct on the free list. In this way, the free list chain is maintained at 0 memory cost. */ alloc_area->aa_free_list = ((Dwarf_Free_List) ret_mem)->fl_next; } else if (alloc_area->aa_blob_start < alloc_area->aa_blob_end) { ret_mem = alloc_area->aa_blob_start; /* Store pointer to chunk this struct belongs to in the first few bytes. Return pointer to bytes after this pointer storage. */ *(Dwarf_Alloc_Area *) ret_mem = alloc_area; ret_mem += _DW_RESERVE; alloc_area->aa_blob_start += alloc_hdr->ah_bytes_one_struct; } else { /* else fall thru , though it should be impossible to fall thru. And represents a disastrous programming error if we get here. */ #ifdef DEBUG fprintf(stderr, "libdwarf Internal error start %x end %x\n", (int) alloc_area->aa_blob_start, (int) alloc_area->aa_blob_end); #endif } } /* New memory has to malloc'ed since there are no free structs. */ if (ret_mem == 0) { Dwarf_Word rounded_area_hdr_size; alloc_hdr->ah_chunks_allocated++; { /* this nonsense avoids a warning */ /* CONSTCOND would be better */ unsigned long v = sizeof(struct Dwarf_Alloc_Area_s); rounded_area_hdr_size = ROUND_SIZE(v); } /* Allocate memory to contain the required number of structs and the Dwarf_Alloc_Area_s to control it. */ mem_block_size = alloc_hdr->ah_bytes_malloc_per_chunk + rounded_area_hdr_size; mem_block = malloc(mem_block_size); if (mem_block == NULL) { return (NULL); } /* Attach the Dwarf_Alloc_Area_s struct to the list of chunks malloc'ed for this struct type. Also initialize the fields of the Dwarf_Alloc_Area_s. */ alloc_area = (Dwarf_Alloc_Area) mem_block; alloc_area->aa_prev = 0; if (alloc_hdr->ah_alloc_area_head != NULL) { alloc_hdr->ah_alloc_area_head->aa_prev = alloc_area; } alloc_area->aa_free_list = 0; alloc_area->aa_next = alloc_hdr->ah_alloc_area_head; alloc_hdr->ah_alloc_area_head = alloc_area; alloc_area->aa_alloc_hdr = alloc_hdr; alloc_area->aa_free_structs_in_chunk = (Dwarf_Sword) alloc_hdr->ah_structs_per_chunk - 1; if (alloc_area->aa_free_structs_in_chunk < 1) { /* If we get here, there is a disastrous programming error somewhere. */ #ifdef DEBUG fprintf(stderr, "libdwarf Internal error: free structs in chunk %d\n", (int) alloc_area->aa_free_structs_in_chunk); #endif return NULL; } /* The struct returned begins immediately after the Dwarf_Alloc_Area_s struct. */ ret_mem = mem_block + rounded_area_hdr_size; alloc_area->aa_blob_start = ret_mem + alloc_hdr->ah_bytes_one_struct; alloc_area->aa_blob_end = mem_block + mem_block_size; /* Store pointer to chunk this struct belongs to in the first few bytes. Return pointer to bytes after this pointer storage. */ *(Dwarf_Alloc_Area *) ret_mem = alloc_area; ret_mem += _DW_RESERVE; } alloc_hdr->ah_last_alloc_area = alloc_area; alloc_hdr->ah_struct_user_holds++; memset(ret_mem,0, alloc_hdr->ah_bytes_one_struct - _DW_RESERVE); return (ret_mem); }