void *mm_alloc_success(POOL_TYPE pool, SIZE_T bytes, u32 tag) { void *mem; int timeout; for (timeout = DC_MEM_RETRY_TIMEOUT; timeout > 0; timeout -= DC_MEM_RETRY_TIME) { if (mem = ExAllocatePoolWithTag(pool, bytes, tag)) break; if (KeGetCurrentIrql() >= DISPATCH_LEVEL) break; dc_delay(DC_MEM_RETRY_TIME); } return mem; }
PIRP mm_allocate_irp_success(CCHAR StackSize) { PIRP irp; int timeout; for (timeout = DC_MEM_RETRY_TIMEOUT; timeout > 0; timeout -= DC_MEM_RETRY_TIME) { if (irp = IoAllocateIrp(StackSize, FALSE)) break; if (KeGetCurrentIrql() >= DISPATCH_LEVEL) break; dc_delay(DC_MEM_RETRY_TIME); } return irp; }
PMDL mm_allocate_mdl_success(void *data, u32 size) { PMDL mdl; int timeout; for (timeout = DC_MEM_RETRY_TIMEOUT; timeout > 0; timeout -= DC_MEM_RETRY_TIME) { if (mdl = IoAllocateMdl(data, size, FALSE, FALSE, NULL)) break; if (KeGetCurrentIrql() >= DISPATCH_LEVEL) break; dc_delay(DC_MEM_RETRY_TIME); } return mdl; }
void *mm_map_mdl_success(PMDL mdl) { void *mem; int timeout; for (timeout = DC_MEM_RETRY_TIMEOUT; timeout > 0; timeout -= DC_MEM_RETRY_TIME) { if (mem = MmGetSystemAddressForMdlSafe(mdl, HighPagePriority)) break; if (KeGetCurrentIrql() >= DISPATCH_LEVEL) break; dc_delay(DC_MEM_RETRY_TIME); } return mem; }
void hal_print(char *format, ...) { char dbg_msg[MAX_PATH]; va_list args; va_start(args, format); _vsnprintf( dbg_msg, sizeof(dbg_msg), format, args); va_end(args); InbvDisplayString(dbg_msg); if (KeGetCurrentIrql() < DISPATCH_LEVEL) { dc_delay(500); } }
int rnd_get_bytes(u8 *buf, int len) { sha512_ctx sha_ctx; u8 hval[SHA512_DIGEST_SIZE]; int c_len, idx, i; ext_seed seed; int fail; if (reseed_cnt < 256) { DbgMsg("RNG not have sufficient entropy (%d reseeds), collect it now\n", reseed_cnt); } /* in RNG not have sufficient entropy, then collect it now */ while (reseed_cnt < 256) { dc_delay(1); /* wait 1 millisecond */ rnd_reseed_now(); } wait_object_infinity(&rnd_mutex); /* derive AES key from key pool */ aes256_asm_set_key(key_pool, rnd_key); /* mix pool state before get data from it */ rnd_pool_mix(); /* idx - position for extraction pool data */ idx = 0; fail = 0; do { c_len = min(len, SHA512_DIGEST_SIZE); seed.seed1 = getrnd_cnt++; seed.seed2 = len; /* collect additional entropy before extract data block */ rnd_reseed_now(); sha512_init(&sha_ctx); sha512_hash(&sha_ctx, rnd_pool + idx, SHA512_DIGEST_SIZE); sha512_hash(&sha_ctx, pv(&seed), sizeof(seed)); sha512_done(&sha_ctx, hval); /* encrypt hash value with AES in ECB mode */ for (i = 0; i < SHA512_DIGEST_SIZE; i += AES_BLOCK_SIZE) { aes256_asm_encrypt(hval + i, hval + i, rnd_key); } /* copy data to output */ __try { memcpy(buf, hval, c_len); } __except(EXCEPTION_EXECUTE_HANDLER) { fail = 1; } /* increment extraction pointer */ if ( (idx += SHA512_DIGEST_SIZE) == RNG_POOL_SIZE ) { /* if all data from pool extracted then mix pool for use new entropy added with reseeds */ rnd_pool_mix(); idx = 0; } /* collect additional entropy after extract data block */ rnd_reseed_now(); /* update buffer pointer and remaining length */ buf += c_len; len -= c_len; } while ( (len != 0) && (fail == 0) ); /* mix pool after get data to prevent "could boot" attacks to generated keys */ rnd_pool_mix(); /* Prevent leaks */ zeroauto(rnd_key, sizeof(aes256_key)); zeroauto(&sha_ctx, sizeof(sha_ctx)); zeroauto(hval, sizeof(hval)); zeroauto(&seed, sizeof(seed)); KeReleaseMutex(&rnd_mutex, FALSE); return fail == 0; }
/* this routine process unmounting the device unmount options: UM_NOFSCTL - unmount without reporting to FS UM_FORCE - force unmounting */ int dc_process_unmount(dev_hook *hook, int opt) { IO_STATUS_BLOCK iosb; NTSTATUS status; HANDLE h_dev = NULL; int locked = 0; int resl; DbgMsg("dc_process_unmount, dev=%ws\n", hook->dev_name); if ((hook->flags & F_ENABLED) == 0) { return ST_NO_MOUNT; } wait_object_infinity(&hook->busy_lock); if ((hook->flags & F_ENABLED) == 0) { resl = ST_NO_MOUNT; goto cleanup; } do { if (hook->flags & F_FORMATTING) { dc_format_done(hook->dev_name); } if ( !(hook->flags & F_SYSTEM) && !(opt & MF_NOFSCTL) ) { h_dev = io_open_device(hook->dev_name); if ( (h_dev == NULL) && !(opt & MF_FORCE) ) { resl = ST_LOCK_ERR; break; } if (h_dev != NULL) { status = ZwFsControlFile(h_dev, NULL, NULL, NULL, &iosb, FSCTL_LOCK_VOLUME, NULL, 0, NULL, 0); if ( (NT_SUCCESS(status) == FALSE) && !(opt & MF_FORCE) ) { resl = ST_LOCK_ERR; break; } locked = (NT_SUCCESS(status) != FALSE); ZwFsControlFile(h_dev, NULL, NULL, NULL, &iosb, FSCTL_DISMOUNT_VOLUME, NULL, 0, NULL, 0); } } if ((opt & MF_NOSYNC) == 0) { // temporary disable IRP processing hook->flags |= F_DISABLE; // wait for pending IRPs completion if ((opt & MF_NOWAIT_IO) == 0) { while (hook->remove_lock.Common.IoCount > 1) dc_delay(20); } if (hook->flags & F_SYNC) { // send signal to syncronous mode thread dc_send_sync_packet(hook->dev_name, S_OP_FINALIZE, 0); } } hook->flags &= ~F_CLEAR_ON_UNMOUNT; hook->use_size = hook->dsk_size; hook->tmp_size = 0; hook->mnt_flags = 0; resl = ST_OK; // increment mount changes counter lock_inc(&hook->chg_mount); // free encryption key if (hook->dsk_key != NULL) { mm_secure_free(hook->dsk_key); hook->dsk_key = NULL; } if ( !(opt & MF_NOSYNC) ) { /* enable IRP processing */ hook->flags &= ~F_DISABLE; } } while (0); if (h_dev != NULL) { if (locked != 0) { ZwFsControlFile(h_dev, NULL, NULL, NULL, &iosb, FSCTL_UNLOCK_VOLUME, NULL, 0, NULL, 0); } ZwClose(h_dev); } cleanup: KeReleaseMutex(&hook->busy_lock, FALSE); return resl; }