void Application::initialize() { const auto tmpMemorySize = sizeof(Task) * ApplicationCpp::g_maxThreadCount + sizeof(u32) * ApplicationCpp::g_maxBurstCount * 2 + sizeof(Task*) * ApplicationCpp::g_maxThreadCount + sizeof(OutputData) * ApplicationCpp::g_maxThreadCount; const auto memorySize = 64 KBYTE; static_assert(memorySize >= tmpMemorySize, ""); auto threadCount = std::min(std::thread::hardware_concurrency(), ApplicationCpp::g_maxThreadCount); auto taskCount = threadCount + 1; auto memory = (u8*)_aligned_malloc(memorySize, 64); VX_ASSERT(memory); m_allocator = vx::StackAllocator(memory, memorySize); m_threadCount = threadCount; m_threads = (std::thread*)m_allocator.allocate(sizeof(std::thread) * threadCount, __alignof(std::thread)); VX_ASSERT(m_threads); for (u32 i = 0; i < threadCount; ++i) { new (&m_threads[i]) std::thread{}; } m_tasks = (Task*)m_allocator.allocate(sizeof(Task) * taskCount, __alignof(Task)); VX_ASSERT(m_tasks); for (u32 i = 0; i < taskCount; ++i) { new (&m_tasks[i]) Task(0); } }
int test9(struct f *P) { int R; R = __alignof(P->x); // expected-error {{invalid application of '__alignof' to bit-field}} R = __alignof(P->y); // ok. R = sizeof(P->x); // expected-error {{invalid application of 'sizeof' to bit-field}} return R; }
const unsigned long CUDARunner::RunStep() { //unsigned int best=0; //unsigned int bestg=~0; int offset=0; if(m_in==0 || m_out==0 || m_devin==0 || m_devout==0) { AllocateResources(m_numb,m_numt); } m_out[0].m_bestnonce=0; cuMemcpyHtoD(m_devout,m_out,/*m_numb*m_numt*/sizeof(cuda_out)); cuMemcpyHtoD(m_devin,m_in,sizeof(cuda_in)); int loops=GetStepIterations(); int bits=GetStepBitShift()-1; void *ptr=(void *)(size_t)m_devin; ALIGN_UP(offset, __alignof(ptr)); cuParamSetv(m_function,offset,&ptr,sizeof(ptr)); offset+=sizeof(ptr); ptr=(void *)(size_t)m_devout; ALIGN_UP(offset, __alignof(ptr)); cuParamSetv(m_function,offset,&ptr,sizeof(ptr)); offset+=sizeof(ptr); ALIGN_UP(offset, __alignof(loops)); cuParamSeti(m_function,offset,loops); offset+=sizeof(loops); ALIGN_UP(offset, __alignof(bits)); cuParamSeti(m_function,offset,bits); offset+=sizeof(bits); cuParamSetSize(m_function,offset); cuFuncSetBlockShape(m_function,m_numt,1,1); cuLaunchGrid(m_function,m_numb,1); cuMemcpyDtoH(m_out,m_devout,/*m_numb*m_numt*/sizeof(cuda_out)); // very unlikely that we will find more than 1 hash with H=0 // so we'll just return the first one and not even worry about G for(int i=0; i<1/*m_numb*m_numt*/; i++) { if(m_out[i].m_bestnonce!=0)// && m_out[i].m_bestg<bestg) { return CryptoPP::ByteReverse(m_out[i].m_bestnonce); //best=m_out[i].m_bestnonce; //bestg=m_out[i].m_bestg; } } return 0; }
ExLockSync::ExLockSync() { #ifdef DEBUG m_lockOwner = 0; #endif CASSERT( sizeof( m_data ) == sizeof( CRITICAL_SECTION ) ); CASSERT( __alignof( Data ) == __alignof( CRITICAL_SECTION ) ); InitializeCriticalSection( pcs( &m_data ) ); }
JL_DLLEXPORT void jl_native_alignment(uint_t *int8align, uint_t *int16align, uint_t *int32align, uint_t *int64align, uint_t *float32align, uint_t *float64align) { *int8align = __alignof(uint8_t); *int16align = __alignof(uint16_t); *int32align = __alignof(uint32_t); *int64align = __alignof(uint64_t); *float32align = __alignof(float); *float64align = __alignof(double); }
void GPUInterface::LaunchKernel(GPUFunction deviceFunction, Dim3Int block, Dim3Int grid, int parameterCountV, int totalParameterCount, ...) { // parameters #ifdef BEAGLE_DEBUG_FLOW fprintf(stderr,"\t\t\tEntering GPUInterface::LaunchKernel\n"); #endif SAFE_CUDA(cuCtxPushCurrent(cudaContext)); SAFE_CUDA(cuFuncSetBlockShape(deviceFunction, block.x, block.y, block.z)); int offset = 0; va_list parameters; va_start(parameters, totalParameterCount); for(int i = 0; i < parameterCountV; i++) { void* param = (void*)(size_t)va_arg(parameters, GPUPtr); // adjust offset alignment requirements offset = (offset + __alignof(param) - 1) & ~(__alignof(param) - 1); SAFE_CUDA(cuParamSetv(deviceFunction, offset, ¶m, sizeof(param))); offset += sizeof(void*); } for(int i = parameterCountV; i < totalParameterCount; i++) { unsigned int param = va_arg(parameters, unsigned int); // adjust offset alignment requirements offset = (offset + __alignof(param) - 1) & ~(__alignof(param) - 1); SAFE_CUDA(cuParamSeti(deviceFunction, offset, param)); offset += sizeof(param); } va_end(parameters); SAFE_CUDA(cuParamSetSize(deviceFunction, offset)); SAFE_CUDA(cuLaunchGrid(deviceFunction, grid.x, grid.y)); SAFE_CUDA(cuCtxPopCurrent(&cudaContext)); #ifdef BEAGLE_DEBUG_FLOW fprintf(stderr,"\t\t\tLeaving GPUInterface::LaunchKernel\n"); #endif }
int main(void) { printf("X1 size: %d\n", (int)sizeof(X1)); printf("X1 align: %d\n", (int)__alignof(X1)); printf("X2 size: %d\n", (int)sizeof(X2)); printf("X2 align: %d\n", (int)__alignof(X2)); printf("X4 size: %d\n", (int)sizeof(X4)); printf("X4 align: %d\n", (int)__alignof(X4)); printf("X8 size: %d\n", (int)sizeof(X8)); printf("X8 align: %d\n", (int)__alignof(X8)); return 0; }
void Arena::setDestructor(void* ptr, void (*destructor)(void*)) { ObjectHeader* header = reinterpret_cast<ObjectHeader*>(ptr) - 1; KJ_DASSERT(reinterpret_cast<uintptr_t>(header) % __alignof(ObjectHeader) == 0); header->destructor = destructor; header->next = objectList; objectList = header; }
CAMLprim value spoc_cuda_custom_load_param_vec(value off, value ex, value A, value v){ CAMLparam4(off, ex, A, v); CAMLlocal1(customArray); cu_vector* cuv; CUdeviceptr d_A; char *extra; int offset; int seek; int type_size; int tag; void* ptr; seek = Int_val(Field(v,10)); customArray = Field (Field(v, 1), 0); type_size = Int_val(Field(Field(customArray, 1),1)); extra = (char*)ex; offset = Int_val(Field(off, 0)); cuv = (cu_vector*)Field(A, 1); d_A = cuv->cu_vector; ptr = (void*) (size_t) d_A + seek * type_size; ADD_TO_PARAM_BUFFER(ptr, __alignof(d_A)); Store_field(off, 0, Val_int(offset)); CAMLreturn(Val_unit); }
// Init //------------------------------------------------------------------------------ /*static*/ void MemTracker::Init() { static_assert( sizeof( MemTracker::s_Mutex ) == sizeof( Mutex ), "Unexpected sizeof(Mutex)" ); ASSERT( g_MemTrackerDisabledOnThisThread ); // first caller does init static uint32_t threadSafeGuard( 0 ); if ( AtomicIncU32( &threadSafeGuard ) != 1 ) { // subsequent callers wait for init while ( !s_Initialized ) {} return; } // construct primary mutex in-place INPLACE_NEW ( &GetMutex() ) Mutex; // init hash table s_AllocationHashTable = new Allocation*[ ALLOCATION_HASH_SIZE ]; memset( s_AllocationHashTable, 0, ALLOCATION_HASH_SIZE * sizeof( Allocation * ) ); // init pool for allocation structures s_Allocations = new MemPoolBlock( sizeof( Allocation ), __alignof( Allocation ) ); MemoryBarrier(); s_Initialized = true; }
T f1(T t1, U u1, int i1) { T t2 = i1; t2 = i1 + u1; ++u1; u1++; int i2 = u1; i1 = t1[u1]; i1 *= t1; i1(u1, t1); // error u1(i1, t1); U u2 = (T)i1; static_cast<void>(static_cast<U>(reinterpret_cast<T>( dynamic_cast<U>(const_cast<T>(i1))))); new U(i1, t1); new int(t1, u1); new (t1, u1) int; delete t1; dummy d1 = sizeof(t1); // expected-error {{no viable conversion}} dummy d2 = offsetof(T, foo); // expected-error {{no viable conversion}} dummy d3 = __alignof(u1); // expected-error {{no viable conversion}} i1 = typeid(t1); // expected-error {{assigning to 'int' from incompatible type 'const std::type_info'}} return u1; }
int main(void) { CABINETSTATE cs; LPBYTE pb; int i, j; printf("CABINETSTATE size: %d\n", (int)sizeof(CABINETSTATE )); printf("CABINETSTATE align: %d\n", (int)__alignof(CABINETSTATE )); ZeroMemory(&cs, sizeof(cs)); cs.nVersion = 0xFFFF; cs.fUnusedFlags = 0x7F; cs.fMenuEnumFilter = 0xFFFFFFFF; pb = (LPBYTE)&cs; for (i = 0; i < sizeof(cs); ++i) { for (j = 0; j < 8; j++) { if (*pb & (1 << j)) putchar('1'); else putchar('0'); } pb++; } putchar('\n'); return 0; }
/// <summary> /// Trigger the Arbitrary Overwrite Vulnerability /// </summary> /// <param name="UserWriteWhatWhere">The pointer to WRITE_WHAT_WHERE structure</param> /// <returns>NTSTATUS</returns> NTSTATUS TriggerArbitraryOverwrite(IN PWRITE_WHAT_WHERE UserWriteWhatWhere) { PULONG_PTR What = NULL; PULONG_PTR Where = NULL; NTSTATUS Status = STATUS_SUCCESS; PAGED_CODE(); __try { // Verify if the buffer resides in user mode ProbeForRead((PVOID)UserWriteWhatWhere, sizeof(WRITE_WHAT_WHERE), (ULONG)__alignof(WRITE_WHAT_WHERE)); What = UserWriteWhatWhere->What; Where = UserWriteWhatWhere->Where; DbgPrint("[+] UserWriteWhatWhere: 0x%p\n", UserWriteWhatWhere); DbgPrint("[+] WRITE_WHAT_WHERE Size: 0x%X\n", sizeof(WRITE_WHAT_WHERE)); DbgPrint("[+] UserWriteWhatWhere->What: 0x%p\n", What); DbgPrint("[+] UserWriteWhatWhere->Where: 0x%p\n", Where); #ifdef SECURE // Secure Note: This is secure because the developer is properly validating if address // pointed by 'Where' and 'What' value resides in User mode by calling ProbeForRead() // routine before performing the write operation ProbeForRead((PVOID)Where, sizeof(PULONG_PTR), (ULONG)__alignof(PULONG_PTR)); ProbeForRead((PVOID)What, sizeof(PULONG_PTR), (ULONG)__alignof(PULONG_PTR)); *(Where) = *(What); #else DbgPrint("[+] Triggering Arbitrary Overwrite\n"); // Vulnerability Note: This is a vanilla Arbitrary Memory Overwrite vulnerability // because the developer is writing the value pointed by 'What' to memory location // pointed by 'Where' without properly validating if the values pointed by 'Where' // and 'What' resides in User mode *(Where) = *(What); #endif } __except (EXCEPTION_EXECUTE_HANDLER) { Status = GetExceptionCode(); DbgPrint("[-] Exception Code: 0x%X\n", Status); } return Status; }
int main(void) { printf("X1 size: %d\n", (int)sizeof(X1)); printf("X1 align: %d\n", (int)__alignof(X1)); printf("X1 b: %d\n", (int)FIELD_OFFSET(X1, b)); printf("X2 size: %d\n", (int)sizeof(X2)); printf("X2 align: %d\n", (int)__alignof(X2)); printf("X2 b: %d\n", (int)FIELD_OFFSET(X2, b)); printf("X4 size: %d\n", (int)sizeof(X4)); printf("X4 align: %d\n", (int)__alignof(X4)); printf("X4 b: %d\n", (int)FIELD_OFFSET(X4, b)); printf("X8 size: %d\n", (int)sizeof(X8)); printf("X8 align: %d\n", (int)__alignof(X8)); printf("X8 b: %d\n", (int)FIELD_OFFSET(X8, b)); return 0; }
CAMLprim value spoc_cuda_load_param_int(value off, value ex,value val){ CAMLparam3(off, ex, val); int offset, i;// = malloc(sizeof(i)); char*extra; extra = (char*)ex; offset = Int_val(Field(off, 0)); i = Int_val(val); ADD_TO_PARAM_BUFFER(i, __alignof(i)); Store_field(off, 0, Val_int(offset)); CAMLreturn(Val_unit); }
/// eye function void GPUeye(const GPUtype &OUT) { // number of elements int nout = gm->gputype.getNumel(OUT); gpuTYPE_t tout = gm->gputype.getType(OUT); CUdeviceptr d_OUT = (CUdeviceptr)(UINTPTR gm->gputype.getGPUptr(OUT)); // The GPU kernel depends on the type of input/output CUfunction drvfun; /* Load kernel depending on type */ /* EYEdrvfuns[N_EYEF] float kernel * EYEdrvfuns[N_EYEC] Complex kernel * EYEdrvfuns[N_EYED] double kernel * EYEdrvfuns[N_EYEDC] DoubleComplex kernel */ if (tout == gpuFLOAT) { drvfun = EYEdrvfuns[N_EYEF]; } else if (tout == gpuCFLOAT) { drvfun = EYEdrvfuns[N_EYEC]; } else if (tout == gpuDOUBLE) { drvfun = EYEdrvfuns[N_EYED]; } else if (tout == gpuCDOUBLE) { drvfun = EYEdrvfuns[N_EYEDC]; } const int * outsize = gm->gputype.getSize(OUT); int step = outsize[0]+1; int maxindex = (outsize[0]-1)*step; hostdrv_pars_t gpuprhs[3]; int gpunrhs = 3; gpuprhs[0] = hostdrv_pars(&d_OUT,sizeof(d_OUT),__alignof(d_OUT)); gpuprhs[1] = hostdrv_pars(&maxindex,sizeof(maxindex),__alignof(maxindex)); gpuprhs[2] = hostdrv_pars(&step,sizeof(step),__alignof(step)); hostGPUDRV(drvfun, nout, gpunrhs, gpuprhs); }
CAMLprim value spoc_cuda_load_param_float64(value off, value ex, value val){ CAMLparam3(off, ex, val); int offset; char *extra; double f; extra = (char*)ex; offset = Int_val(Field(off, 0)); f = Double_val(val); ADD_TO_PARAM_BUFFER(f, __alignof(f)); Store_field(off, 0, Val_int(offset)); CAMLreturn(Val_unit); }
int main(int argc, char *argv[]) { auto t = __alignof(Task); QApplication a(argc, argv); Hitrater app; app.show(); return a.exec(); return 0; }
void GOMP_critical_name_start (void **pptr) { gomp_mutex_t *plock; /* If a mutex fits within the space for a pointer, and is zero initialized, then use the pointer space directly. */ if (GOMP_MUTEX_INIT_0 && sizeof (gomp_mutex_t) <= sizeof (void *) && __alignof (gomp_mutex_t) <= sizeof (void *)) plock = (gomp_mutex_t *)pptr; /* Otherwise we have to be prepared to malloc storage. */ else { plock = *pptr; if (plock == NULL) { #ifdef HAVE_SYNC_BUILTINS gomp_mutex_t *nlock = gomp_malloc (sizeof (gomp_mutex_t)); gomp_mutex_init (nlock); plock = __sync_val_compare_and_swap (pptr, NULL, nlock); if (plock != NULL) { gomp_mutex_destroy (nlock); gomp_free (nlock); } else plock = nlock; #else gomp_mutex_lock (&create_lock_lock); plock = *pptr; if (plock == NULL) { plock = gomp_malloc (sizeof (gomp_mutex_t)); gomp_mutex_init (plock); __sync_synchronize (); *pptr = plock; } gomp_mutex_unlock (&create_lock_lock); #endif } } gomp_mutex_lock (plock); /* OMP v3.1, 2.8.6 p81,l16 - "At entry to critical regions" */ gomp_flush0(); }
int main(void) { char *lol[2]; unsigned long stuff = 2134633; struct bogus { char big[9999]; } b; typedef union lolmonster { char x; int z; double y; short z2; } lolmonster_t; typeof(123) x = 5; typeof(*lol) p = "Hello"; typeof(b) bla; typeof(&b) blap; printf("%s: %d\n", p, x); blap = &bla; blap->big[9] = 123; printf("%d\n", bla.big[9]); #if 0 /* XXX */ printf("alignment of long long is %d\n", __alignof__(long long)); #endif printf("alignment of struct bogus: %d\n", __alignof(b)); if (__builtin_expect(123 != 456, 0)) { puts(":)"); } if (__builtin_expect(!555, 1)) { puts(":("); } else { puts(":)"); } { int x = __builtin_expect(555555, 0); printf("%d\n", x); } #if 0 printf("alignment of lolmonster: %d\n", __alignof__(lolmonster_t)); #endif printf("%d\n", (__typeof__(unsigned char))stuff); return 0; }
void* Arena::allocateBytes(size_t amount, uint alignment, bool hasDisposer) { if (hasDisposer) { alignment = kj::max(alignment, __alignof(ObjectHeader)); amount += alignTo(sizeof(ObjectHeader), alignment); } void* result = allocateBytesInternal(amount, alignment); if (hasDisposer) { // Reserve space for the ObjectHeader, but don't add it to the object list yet. result = alignTo(reinterpret_cast<byte*>(result) + sizeof(ObjectHeader), alignment); } KJ_DASSERT(reinterpret_cast<uintptr_t>(result) % alignment == 0); return result; }
void GOMP_critical_name_end (void **pptr) { gomp_mutex_t *plock; /* If a mutex fits within the space for a pointer, and is zero initialized, then use the pointer space directly. */ if (GOMP_MUTEX_INIT_0 && sizeof (gomp_mutex_t) <= sizeof (void *) && __alignof (gomp_mutex_t) <= sizeof (void *)) plock = (gomp_mutex_t *)pptr; else plock = *pptr; gomp_mutex_unlock (plock); }
// CONSTRUCTOR //------------------------------------------------------------------------------ Mutex::Mutex() { #if defined( __WINDOWS__ ) static_assert( sizeof( m_CriticalSection ) == sizeof( CRITICAL_SECTION ), "Unexpected sizeof(CRITICAL_SECTION)" ); static_assert( __alignof( decltype( m_CriticalSection ) ) == __alignof( CRITICAL_SECTION ), "Unexpected __alignof(CRITICAL_SECTION)" ); VERIFY( InitializeCriticalSectionAndSpinCount( (CRITICAL_SECTION *)&m_CriticalSection, 100 ) ); #elif defined( __LINUX__ ) || defined( __APPLE__ ) pthread_mutexattr_t attributes; VERIFY( pthread_mutexattr_init( &attributes ) == 0 ); pthread_mutexattr_settype( &attributes, PTHREAD_MUTEX_RECURSIVE ); VERIFY( pthread_mutex_init( &m_Mutex, &attributes ) == 0 ); #else #error Unknown platform #endif }
/// <summary> /// Create and store the Fake object /// </summary> /// <param name="pFakeObject">The pointer to user FAKE_OBJECT structure</param> /// <returns>NTSTATUS</returns> NTSTATUS CreateFakeObject(IN PFAKE_OBJECT pFakeObject) { NTSTATUS status = STATUS_SUCCESS; PFAKE_OBJECT pKernelFakeObject = NULL; PAGED_CODE(); __try { DbgPrint("[+] Creating Fake Object\n"); // Allocate Pool Memory pKernelFakeObject = (PFAKE_OBJECT)ExAllocatePoolWithTag(NonPagedPool, sizeof(FAKE_OBJECT), (ULONG)POOL_TAG); if (!pKernelFakeObject) { // Unable to allocate Pool Memory with Tag DbgPrint("[-] Unable To Allocate Pool Memory\n"); status = STATUS_NO_MEMORY; return status; } else { DbgPrint("[+] Pool Address: 0x%p\n", pKernelFakeObject); DbgPrint("[+] Pool Type: %s\n", STRINGIFY(NonPagedPool)); DbgPrint("[+] Pool Size: 0x%X\n", sizeof(FAKE_OBJECT)); DbgPrint("[+] Pool Tag: %s\n", STRINGIFY(POOL_TAG)); } // Verify if the buffer resides in User Mode ProbeForRead((PVOID)pFakeObject, sizeof(FAKE_OBJECT), (ULONG)__alignof(FAKE_OBJECT)); // Copy the Fake structure to Pool memory RtlCopyMemory((PVOID)pKernelFakeObject, (PVOID)pFakeObject, sizeof(FAKE_OBJECT)); // Null terminate the char buffer pKernelFakeObject->buffer[sizeof(pKernelFakeObject->buffer) - 1] = '\0'; DbgPrint("[+] Fake Object: 0x%p\n", pKernelFakeObject); } __except (EXCEPTION_EXECUTE_HANDLER) { status = GetExceptionCode(); DbgPrint("[-] Exception Code: 0x%X\n", status); } return status; }
void ff::SmallDict::Reserve(size_t newAllocated, bool allowEmptySpace) { size_t oldAllocated = Allocated(); if (newAllocated > oldAllocated) { if (allowEmptySpace) { newAllocated = std::max<size_t>(NearestPowerOfTwo(newAllocated), 4); } size_t byteSize = sizeof(Data) + newAllocated * sizeof(Entry) - sizeof(Entry); _data = (Data *)_aligned_realloc(_data, byteSize, __alignof(Data)); _data->allocated = newAllocated; _data->size = oldAllocated ? _data->size : 0; _data->atomizer = oldAllocated ? _data->atomizer : &ProcessGlobals::Get()->GetStringCache(); } }
inline unsigned int GetAlignmentOf(T *dummy=NULL) // VC60 workaround { #ifdef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS if (sizeof(T) < 16) return 1; #endif #if (_MSC_VER >= 1300) return __alignof(T); #elif defined(__GNUC__) return __alignof__(T); #elif CRYPTOPP_BOOL_SLOW_WORD64 return UnsignedMin(4U, sizeof(T)); #else return sizeof(T); #endif }
void* Arena::allocateBytesInternal(size_t amount, uint alignment) { if (currentChunk != nullptr) { ChunkHeader* chunk = currentChunk; byte* alignedPos = alignTo(chunk->pos, alignment); // Careful about overflow here. if (amount + (alignedPos - chunk->pos) <= chunk->end - chunk->pos) { // There's enough space in this chunk. chunk->pos = alignedPos + amount; return alignedPos; } } // Not enough space in the current chunk. Allocate a new one. // We need to allocate at least enough space for the ChunkHeader and the requested allocation. // If the alignment is less than that of the chunk header, we'll need to increase it. alignment = kj::max(alignment, __alignof(ChunkHeader)); // If the ChunkHeader size does not match the alignment, we'll need to pad it up. amount += alignTo(sizeof(ChunkHeader), alignment); // Make sure we're going to allocate enough space. while (nextChunkSize < amount) { nextChunkSize *= 2; } // Allocate. byte* bytes = reinterpret_cast<byte*>(operator new(nextChunkSize)); // Set up the ChunkHeader at the beginning of the allocation. ChunkHeader* newChunk = reinterpret_cast<ChunkHeader*>(bytes); newChunk->next = chunkList; newChunk->pos = bytes + amount; newChunk->end = bytes + nextChunkSize; currentChunk = newChunk; chunkList = newChunk; nextChunkSize *= 2; // Move past the ChunkHeader to find the position of the allocated object. return alignTo(bytes + sizeof(ChunkHeader), alignment); }
void GOMP_critical_name_end (void **pptr) { gomp_mutex_t *plock; /* OMP v3.1, 2.8.6 p81,l16 - "At exit from critical regions" Flush and then unlock to ensure all writes land before lock is released */ gomp_flush0(); /* If a mutex fits within the space for a pointer, and is zero initialized, then use the pointer space directly. */ if (GOMP_MUTEX_INIT_0 && sizeof (gomp_mutex_t) <= sizeof (void *) && __alignof (gomp_mutex_t) <= sizeof (void *)) plock = (gomp_mutex_t *)pptr; else plock = *pptr; gomp_mutex_unlock (plock); }
STATIC PAGEABLE USHORT messagetable_SizeofSerializedEntry( _In_ PCMESSAGE_TABLE_ENTRY ptEntry ) { NTSTATUS eStatus = STATUS_UNSUCCESSFUL; USHORT cbTotal = 0; PAGED_CODE(); ASSERT(NULL != ptEntry); // Begin with the size of the header. cbTotal = UFIELD_OFFSET(MESSAGE_RESOURCE_ENTRY, acText); // Add the size of the string itself. if (ptEntry->bUnicode) { eStatus = RtlUShortAdd(cbTotal, ptEntry->tData.tUnicode.MaximumLength, &cbTotal); ASSERT(NT_SUCCESS(eStatus)); } else { eStatus = RtlUShortAdd(cbTotal, ptEntry->tData.tAnsi.MaximumLength, &cbTotal); ASSERT(NT_SUCCESS(eStatus)); } // Now add any required padding. eStatus = RtlUShortAdd(cbTotal, cbTotal % __alignof(MESSAGE_RESOURCE_ENTRY), &cbTotal); ASSERT(NT_SUCCESS(eStatus)); return cbTotal; }
/// <summary> /// Trigger the Stack Overflow Vulnerability /// </summary> /// <param name="pUserModeBuffer">The pointer to user mode buffer</param> /// <param name="userModeBufferSize">Size of the user mode buffer</param> /// <returns>NTSTATUS</returns> NTSTATUS TriggerStackOverflow(IN PVOID pUserModeBuffer, IN SIZE_T userModeBufferSize) { NTSTATUS status = STATUS_SUCCESS; ULONG kernelBuffer[BUFFER_SIZE] = {0}; PAGED_CODE(); __try { DbgPrint("[+] kernelBuffer: 0x%p\n", &kernelBuffer); DbgPrint("[+] kernelBuffer Size: 0x%X\n", sizeof(kernelBuffer)); // Verify if the buffer resides in User Mode ProbeForRead(pUserModeBuffer, sizeof(kernelBuffer), (ULONG)__alignof(kernelBuffer)); DbgPrint("[+] pUserModeBuffer: 0x%p\n", pUserModeBuffer); DbgPrint("[+] userModeBufferSize: 0x%X\n", userModeBufferSize); #ifdef SECURE // Secure Note: This is secure because the developer is passing a size // equal to size of the allocated Pool memory to RtlCopyMemory()/memcpy() // so, there will be no overflow RtlCopyMemory((PVOID)kernelBuffer, pUserModeBuffer, sizeof(kernelBuffer)); #else DbgPrint("[+] Triggering Stack Overflow\n"); // Vulnerability Note: This is a vanilla Stack Based Overflow vulnerability // because the developer is passing the user supplied value directly to // RtlCopyMemory()/memcpy() without validating if the size is greater or // equal to the size allocated for it in on the stack RtlCopyMemory((PVOID)kernelBuffer, pUserModeBuffer, userModeBufferSize); #endif } __except (EXCEPTION_EXECUTE_HANDLER) { status = GetExceptionCode(); DbgPrint("[-] Exception Code: 0x%X\n", status); } return status; }