/* * update pools */ static void add_entropy(FState * st, const unsigned char *data, unsigned len) { unsigned pos; unsigned char hash[BLOCK]; MD_CTX md; /* hash given data */ md_init(&md); md_update(&md, data, len); md_result(&md, hash); /* * Make sure the pool 0 is initialized, then update randomly. */ if (st->reseed_count == 0) pos = 0; else pos = get_rand_pool(st); md_update(&st->pool[pos], hash, BLOCK); if (pos == 0) st->pool0_bytes += len; memset_s(hash, sizeof(hash), 0, sizeof(hash)); memset_s(&md, sizeof(hash), 0, sizeof(md)); }
SigmaCryptoLayer::~SigmaCryptoLayer(void) { memset_s(m_local_private_key_b_little_endian, SIGMA_SESSION_PRIVKEY_LENGTH, 0, SIGMA_SESSION_PRIVKEY_LENGTH); memset_s(m_SMK, SIGMA_SMK_LENGTH, 0, SIGMA_SMK_LENGTH); memset_s(m_SK, sizeof(m_SK), 0, sizeof(m_SK)); memset_s(m_MK, sizeof(m_MK), 0, sizeof(m_MK)); }
int hash_s(char * const salt, char * const pass, char * const hash) { register unsigned i; unsigned char * buffer; size_t bufferLen = 0; /* * Sanity checks */ assert(salt != NULL); assert(pass != NULL); assert(hash != NULL); /* * Alloc mem and compose the buffer */ bufferLen = strlen(salt) + strlen(pass) + 16 /* Len of MD5 */; buffer = (unsigned char *) malloc( sizeof(unsigned char) * bufferLen); if (buffer == NULL) { return -1; } memset_s(buffer, 0, bufferLen); /* * https://en.wikipedia.org/wiki/Key_stretching * * key = "" * for 1 to HASH_ITERATIONS do * key = hash(key + password + salt) */ char * actual = (char *) buffer; assert(len(buffer) == 0); for (i = 0; i < HASH_ITERATIONS; ++i) { MD5_CTX ctx; strcpy(actual, pass); actual += strlen(pass); strcpy(actual, salt); actual += strlen(salt); *actual = '\0'; MD5_Init(&ctx); MD5_Update(&ctx, buffer, actual - (char *) buffer); MD5_Final(buffer, &ctx); actual = (char *) buffer + 16; } hexify(buffer, 16, hash); /* * Free and burn mem */ memset_s(buffer, 0, bufferLen); free(buffer); buffer = NULL; return 0; }
pve_status_t get_ppid(ppid_t* ppid) { sgx_key_128bit_t key_tmp; sgx_status_t sgx_status = SGX_SUCCESS; memset(&key_tmp, 0, sizeof(key_tmp)); //get Provisioning Key with both CPUSVN and ISVSVN set to 0 pve_status_t status = get_provision_key(&key_tmp, NULL); if(status != PVEC_SUCCESS){ (void)memset_s(&key_tmp,sizeof(key_tmp), 0, sizeof(key_tmp)); return status; } uint8_t content[16]; memset(&content, 0, sizeof(content)); //generate the mac as PPID se_static_assert(sizeof(sgx_cmac_128bit_key_t) == sizeof(sgx_key_128bit_t)); /*size of sgx_cmac_128bit_key_t and sgx_key_128bit_t should be same*/ se_static_assert(sizeof(sgx_cmac_128bit_tag_t) == sizeof(ppid_t)); /*size of sgx_cmac_128bit_tag_t and ppit_t should be same*/ if((sgx_status=sgx_rijndael128_cmac_msg(reinterpret_cast<const sgx_cmac_128bit_key_t *>(&key_tmp), content, sizeof(content), reinterpret_cast<sgx_cmac_128bit_tag_t *>(ppid)))!=SGX_SUCCESS){ status = sgx_error_to_pve_error(sgx_status); }else{ status = PVEC_SUCCESS; } (void)memset_s(&key_tmp,sizeof(key_tmp), 0, sizeof(key_tmp));//clear provisioning key in stack return status; }
void ZB_clear(ZBuffer * zb, int clear_z, int z, int clear_color, int r, int g, int b) { #if TGL_FEATURE_RENDER_BITS != 24 int color; #endif int y; PIXEL *pp; if (clear_z) { memset_s(zb->zbuf, z, zb->xsize * zb->ysize); } if (clear_color) { pp = zb->pbuf; for (y = 0; y < zb->ysize; y++) { #if TGL_FEATURE_RENDER_BITS == 15 || TGL_FEATURE_RENDER_BITS == 16 color = RGB_TO_PIXEL(r, g, b); memset_s(pp, color, zb->xsize); #elif TGL_FEATURE_RENDER_BITS == 32 color = RGB_TO_PIXEL(r, g, b); memset_l(pp, color, zb->xsize); #elif TGL_FEATURE_RENDER_BITS == 24 memset_RGB24(pp,r>>8,g>>8,b>>8,zb->xsize); #else #error TODO #endif pp = (PIXEL *) ((char *) pp + zb->linesize); } } }
sgx_status_t derive_key( const sgx_ec256_dh_shared_t* shared_key, const char* label, uint32_t label_length, sgx_ec_key_128bit_t* derived_key) { sgx_status_t se_ret = SGX_SUCCESS; uint8_t cmac_key[MAC_KEY_SIZE]; sgx_ec_key_128bit_t key_derive_key; if (!shared_key || !derived_key || !label) { return SGX_ERROR_INVALID_PARAMETER; } /*check integer overflow */ if (label_length > EC_DERIVATION_BUFFER_SIZE(label_length)) { return SGX_ERROR_INVALID_PARAMETER; } memset(cmac_key, 0, MAC_KEY_SIZE); se_ret = sgx_rijndael128_cmac_msg((sgx_cmac_128bit_key_t *)cmac_key, (uint8_t*)shared_key, sizeof(sgx_ec256_dh_shared_t), (sgx_cmac_128bit_tag_t *)&key_derive_key); if (SGX_SUCCESS != se_ret) { memset_s(&key_derive_key, sizeof(key_derive_key), 0, sizeof(key_derive_key)); INTERNAL_SGX_ERROR_CODE_CONVERTOR(se_ret); return se_ret; } /* derivation_buffer = counter(0x01) || label || 0x00 || output_key_len(0x0080) */ uint32_t derivation_buffer_length = EC_DERIVATION_BUFFER_SIZE(label_length); uint8_t *p_derivation_buffer = (uint8_t *)malloc(derivation_buffer_length); if (p_derivation_buffer == NULL) { return SGX_ERROR_OUT_OF_MEMORY; } memset(p_derivation_buffer, 0, derivation_buffer_length); /*counter = 0x01 */ p_derivation_buffer[0] = 0x01; /*label*/ memcpy(&p_derivation_buffer[1], label, label_length); /*output_key_len=0x0080*/ uint16_t *key_len = (uint16_t *)&p_derivation_buffer[derivation_buffer_length - 2]; *key_len = 0x0080; se_ret = sgx_rijndael128_cmac_msg((sgx_cmac_128bit_key_t *)&key_derive_key, p_derivation_buffer, derivation_buffer_length, (sgx_cmac_128bit_tag_t *)derived_key); memset_s(&key_derive_key, sizeof(key_derive_key), 0, sizeof(key_derive_key)); free(p_derivation_buffer); if(SGX_SUCCESS != se_ret) { INTERNAL_SGX_ERROR_CODE_CONVERTOR(se_ret); } return se_ret; }
/* * Sudo conversation function. */ int sudo_conversation(int num_msgs, const struct sudo_conv_message msgs[], struct sudo_conv_reply replies[]) { struct sudo_conv_reply *repl; const struct sudo_conv_message *msg; char *pass; int n, flags = tgetpass_flags; for (n = 0; n < num_msgs; n++) { msg = &msgs[n]; repl = &replies[n]; switch (msg->msg_type & 0xff) { case SUDO_CONV_PROMPT_ECHO_ON: case SUDO_CONV_PROMPT_MASK: if (msg->msg_type == SUDO_CONV_PROMPT_ECHO_ON) SET(flags, TGP_ECHO); else SET(flags, TGP_MASK); /* FALLTHROUGH */ case SUDO_CONV_PROMPT_ECHO_OFF: if (ISSET(msg->msg_type, SUDO_CONV_PROMPT_ECHO_OK)) SET(flags, TGP_NOECHO_TRY); /* Read the password unless interrupted. */ pass = tgetpass(msg->msg, msg->timeout, flags); if (pass == NULL) goto err; repl->reply = estrdup(pass); memset_s(pass, SUDO_CONV_REPL_MAX, 0, strlen(pass)); break; case SUDO_CONV_INFO_MSG: if (msg->msg) (void) fputs(msg->msg, stdout); break; case SUDO_CONV_ERROR_MSG: if (msg->msg) (void) fputs(msg->msg, stderr); break; case SUDO_CONV_DEBUG_MSG: if (msg->msg) sudo_debug_write(msg->msg, strlen(msg->msg), 0); break; default: goto err; } } return 0; err: /* Zero and free allocated memory and return an error. */ do { repl = &replies[n]; if (repl->reply != NULL) { memset_s(repl->reply, SUDO_CONV_REPL_MAX, 0, strlen(repl->reply)); free(repl->reply); repl->reply = NULL; } } while (n--); return -1; }
int bsp_dual_modem_init(void) { int ret = ERROR; DRV_DUAL_MODEM_STR dual_modem_nv; DRV_DM_UART5_STR uart_cfg_nv; memset_s((void*)&g_dual_modem_ctrl.uart_port, sizeof(g_dual_modem_ctrl.uart_port), 0, sizeof(g_dual_modem_ctrl.uart_port)); /*lint !e545*/ //初始化串口属性 memset_s((void*)&dual_modem_nv, sizeof(DRV_DUAL_MODEM_STR), 0, sizeof(DRV_DUAL_MODEM_STR)); memset_s((void*)&uart_cfg_nv, sizeof(DRV_DM_UART5_STR), 0, sizeof(DRV_DM_UART5_STR)); ret = bsp_nvm_read(NV_ID_DRV_DUAL_MODEM ,(u8 *)&dual_modem_nv ,sizeof(DRV_DUAL_MODEM_STR)); if (ret != OK) { dm_print_err("read dual modem nv fail: %d\n", NV_ID_DRV_DUAL_MODEM); dual_modem_nv.enUartEnableCfg = DUAl_MODEM_DISABLE; } if(DUAl_MODEM_ENABLE == dual_modem_nv.enUartEnableCfg) { ret = bsp_nvm_read(NV_ID_DRV_DM_UART5_CFG ,(u8 *)&uart_cfg_nv ,sizeof(DRV_DM_UART5_STR)); if (ret != OK) { dm_print_err("read dual modem nv fail: %d\n", NV_ID_DRV_DM_UART5_CFG); uart_cfg_nv.ex1_param = 0; } g_dual_modem_ctrl.uart_port.rts_mask = (uart_cfg_nv.ex1_param << 14); if(DUAl_MODEM_ENABLE == dual_modem_nv.enUartlogEnableCfg) { g_dual_modem_ctrl.log_flag = 1; bsp_mod_level_set(BSP_MODU_DUAL_MODEM ,BSP_LOG_LEVEL_DEBUG); } ret = dual_modem_wakeup_init(dual_modem_nv); if(ret !=OK) { dm_print_err("dual modem wakeup init failed!\n"); return ERROR; } if(OK != dual_modem_dump_init()) { dm_print_err("dual_modem_dump_init fail!\n"); return ERROR; } #ifdef LPM3_GPIO /* 注册ICC读写回调 */ if(OK != bsp_icc_event_register((ICC_CHN_MCORE_CCORE << 16)|MCORE_CCORE_FUNC_UART, recv_lpm3_msg_icc_cb , NULL, NULL, NULL)) { dm_print_err("register icc callback fail\n"); return ERROR; } #endif g_dual_modem_ctrl.nv_flag = DUAl_MODEM_ENABLE; dm_print_err("dual modem init\n"); } return OK; }
/* * Overwrites sensitive data then frees internal memory. */ void ct_string_deinit(ct_string *str) { if (str->string != NULL) { memset_s(str->string, 0, str->allocated_length); free(str->string); } memset_s(&(str->allocated_length), 0, sizeof(str->allocated_length)); memset_s(&(str->actual_length), 0, sizeof(str->actual_length)); }
int svi_encfile(const char* outfn, const char* tagfn, const char* headerfn, const char* infn, const uint8_t* gkn) { int err = 0; mmfile inmf; err = mmfile_open(&inmf, infn, O_RDONLY); if (err != 0) { E(done, "mmfile_open %s", infn); } uint64_t outlen = svi_buflen(inmf.length); uint64_t numblocks = outlen / 65536; uint64_t taglen = numblocks * 64; mmfile outmf; err = mmfile_create(&outmf, outfn, outlen); if (err != 0) { E(cleanup_inmf, "mmfile_create %s", infn); } mmfile tagsmf; err = mmfile_create(&tagsmf, tagfn, taglen); if (err != 0) { E(cleanup_outmf, "mmfile_create %s", tagfn); } mmfile headermf; err = mmfile_create(&headermf, headerfn, 4096); if (err != 0) { E(cleanup_tagsmf, "mmfile_create %s", headerfn); } memset_s(headermf.mem, headermf.length, 0, headermf.length); err = svi_encrypt(outmf.mem, tagsmf.mem, headermf.mem, inmf.mem); if (err != 0) { // Something really bad happened. We can't assess what went // wrong, so we sanitize all of the output files. memset_s(outmf.mem, outmf.length, 0, outmf.length); memset_s(tagsmf.mem, tagsmf.length, 0, tagsmf.length); memset_s(headermf.mem, headermf.length, 0, headermf.length); E(cleanup_headermf, "svi_inplace %u", err); } uint8_t headertag[64] = {0}; blake2b_state s; blake2b_init_key(&s, 64, gkn, 64); blake2b_update(&s, header, HEADERLEN); blake2b_final(&s, headertag, 64); // Clean up the blake2b state. memset_s(&s, sizeof(blake2b_state), 0, sizeof(blake2b_state)); cleanup_headermf: mmfile_close(&headermf); cleanup_tagsmf: mmfile_close(&tagsmf); cleanup_outmf: mmfile_close(&outmf); cleanup_inmf: mmfile_close(&inmf); done: return err; }
sgx_status_t sgx_read_rand(unsigned char *rand, size_t length_in_bytes) { // check parameters // // rand can be within or outside the enclave if(!rand || !length_in_bytes) { return SGX_ERROR_INVALID_PARAMETER; } if(!sgx_is_within_enclave(rand, length_in_bytes) && !sgx_is_outside_enclave(rand, length_in_bytes)) { return SGX_ERROR_INVALID_PARAMETER; } // loop to rdrand uint32_t rand_num = 0; while(length_in_bytes > 0) { sgx_status_t status = __do_get_rand32(&rand_num); if(status != SGX_SUCCESS) { return status; } size_t size = (length_in_bytes < sizeof(rand_num)) ? length_in_bytes : sizeof(rand_num); memcpy(rand, &rand_num, size); rand += size; length_in_bytes -= size; } memset_s(&rand_num, sizeof(rand_num), 0, sizeof(rand_num)); return SGX_SUCCESS; }
sgx_status_t sgx_aes_gcm128_enc_get_mac(uint8_t *mac, sgx_aes_state_handle_t aes_gcm_state) { if ((mac == NULL) || (aes_gcm_state == NULL)) { return SGX_ERROR_INVALID_PARAMETER; } sgx_status_t ret = SGX_ERROR_UNEXPECTED; int tmp = 0; EVP_CIPHER_CTX *pState = (EVP_CIPHER_CTX*)aes_gcm_state; do { // Finalise the encryption // if (1 != EVP_EncryptFinal_ex(pState, NULL, &tmp)) { break; } // Get tag (MAC) // if (!EVP_CIPHER_CTX_ctrl(pState, EVP_CTRL_AEAD_GET_TAG, SGX_AESGCM_MAC_SIZE, mac)) { break; } ret = SGX_SUCCESS; } while (1); //In case of error, clear output MAC buffer. // if (ret != SGX_SUCCESS) { memset_s(mac, SGX_AESGCM_MAC_SIZE, 0, SGX_AESGCM_MAC_SIZE); } return ret; }
static void sample_ipp_secure_free_BN(IppsBigNumState *pBN, int size_in_bytes) { if(pBN == NULL || size_in_bytes <= 0 || size_in_bytes/sizeof(Ipp32u) <= 0) { if(pBN) { free(pBN); } return; } int bn_size = 0; // Get the size of the IppsBigNumState context in bytes // Since we have checked the size_in_bytes before and the &bn_size is not NULL, ippsBigNumGetSize never returns failure ippsBigNumGetSize(size_in_bytes/(int)sizeof(Ipp32u), &bn_size); if (bn_size <= 0) { free(pBN); return; } // Clear the buffer before free. memset_s(pBN, bn_size, 0, bn_size); free(pBN); return; }
/***************************************************************************** Function : Ipv6PrintInfo Description: print Ipv4/6 info Input : None Output : None Return : *****************************************************************************/ void Ipv6PrintInfo(void * handle) { unsigned int i = 0; char vif_path[NETINFO_PATH_LEN] = {0}; /*xenstore print info*/ for (i = 0; i < XENSTORE_COUNT; i++) { memset_s(vif_path,NETINFO_PATH_LEN,0,NETINFO_PATH_LEN); /*assemble xenstore path*/ if(i == 0) { (void)snprintf_s(vif_path, NETINFO_PATH_LEN, NETINFO_PATH_LEN, "%s", IPV6_VIF_DATA_PATH); } else { (void)snprintf_s(vif_path, NETINFO_PATH_LEN, NETINFO_PATH_LEN, "%s_%u", IPV6_VIF_DATA_PATH, i); } if(xb_write_first_flag == 0) { (void)write_to_xenstore(handle, vif_path, ArrRetNet[i]); } else { (void)write_weak_to_xenstore(handle, vif_path, ArrRetNet[i]); } } }
/* * The time between reseed must be at least RESEED_INTERVAL * microseconds. */ static int enough_time_passed(FState * st) { int ok; struct timeval tv; struct timeval *last = &st->last_reseed_time; gettimeofday(&tv, NULL); /* check how much time has passed */ ok = 0; if (tv.tv_sec > last->tv_sec + 1) ok = 1; else if (tv.tv_sec == last->tv_sec + 1) { if (1000000 + tv.tv_usec - last->tv_usec >= RESEED_INTERVAL) ok = 1; } else if (tv.tv_usec - last->tv_usec >= RESEED_INTERVAL) ok = 1; /* reseed will happen, update last_reseed_time */ if (ok) memcpy(last, &tv, sizeof(tv)); memset_s(&tv, sizeof(tv), 0, sizeof(tv)); return ok; }
ae_error_t SigmaCryptoLayer::ComputePR(SIGMA_SECRET_KEY* oldSK, uint8_t byteToAdd, SIGMA_HMAC* hmac) { uint8_t Sk_Wth_Added_Byte[sizeof(SIGMA_SIGN_KEY)+1]; ae_error_t ae_status = PSE_PR_PR_CALC_ERROR; memset(hmac, 0, sizeof(*hmac)); do { memcpy(Sk_Wth_Added_Byte, oldSK, SIGMA_SK_LENGTH); Sk_Wth_Added_Byte[SIGMA_SK_LENGTH] = byteToAdd; //Compute hmac sgx_status_t status = sgx_hmac_sha256_msg(Sk_Wth_Added_Byte, SIGMA_SK_LENGTH+1, m_MK, SIGMA_MK_LENGTH, (uint8_t *)hmac, SIGMA_HMAC_LENGTH); // defense-in-depth, clear secret data memset_s(Sk_Wth_Added_Byte, sizeof(Sk_Wth_Added_Byte), 0, sizeof(Sk_Wth_Added_Byte)); if (SGX_SUCCESS != status) { ae_status = sgx_error_to_pse_pr_error(status); break; } ae_status = AE_SUCCESS; } while (0); return ae_status; }
static void bsp_ipc_s_init(void) { s32 ret = 0; struct device_node *node = NULL; const char *compatible_name = "hisilicon,ipc_balong_mdm_s"; char *ret_of_iomap = NULL; u32 irq_no_ipc_int = 0; node = of_find_compatible_node(NULL, NULL, compatible_name); if (!node) return; ret_of_iomap = of_iomap(node, 0); if (!ret_of_iomap) { bsp_trace(BSP_LOG_LEVEL_ERROR,BSP_MODU_IPC,"ipc_s of_iomap fail\n"); return; } (void)memset_s((void*)(ipc_ctrl.ipc_int_table+INTSRC_NUM), sizeof(struct ipc_entry) * INTSRC_NUM, 0x0, sizeof(struct ipc_entry) * INTSRC_NUM); ipc_ctrl.ipc_base[IPCM_S] = (u32)ret_of_iomap; writel(0x0,ipc_ctrl.ipc_base[IPCM_S] + BSP_IPC_CPU_INT_MASK(ipc_ctrl.core_num)); writel(0xffffffff,ipc_ctrl.ipc_base[IPCM_S] + BSP_IPC_CPU_INT_CLR(ipc_ctrl.core_num)); irq_no_ipc_int = irq_of_parse_and_map(node, 0); ret = request_irq(irq_no_ipc_int,(irq_handler_t)ipc_int_handler, 0, "ipc_irq",(void*)IPCM_S); if (ret) { bsp_trace(BSP_LOG_LEVEL_ERROR,BSP_MODU_IPC,"ipc_s int handler error,init failed\n"); return; } bsp_trace(BSP_LOG_LEVEL_ERROR,BSP_MODU_IPC,"ccore ipc_s init success\n"); return; }
void sgx_ipp_secure_free_BN(IppsBigNumState *pBN, int size_in_bytes) { if (pBN == NULL || size_in_bytes <= 0 || ((size_in_bytes % sizeof(Ipp32u)) != 0)) { if (pBN) { free(pBN); } return; } int bn_size = 0; // Get the size of the IppsBigNumState context in bytes // Since we have checked the size_in_bytes before and the &bn_size is not NULL, ippsBigNumGetSize never returns failure IppStatus error_code = ippsBigNumGetSize(size_in_bytes/(int)sizeof(Ipp32u), &bn_size); if (error_code != ippStsNoErr) { free(pBN); return; } // Clear the buffer before free. memset_s(pBN, bn_size, 0, bn_size); free(pBN); return; }
static void calc(struct md2 *m, const void *v) { unsigned char x[48], L; const unsigned char *p = v; int i, j, t; L = m->checksum[15]; for (i = 0; i < 16; i++) L = m->checksum[i] ^= subst[p[i] ^ L]; for (i = 0; i < 16; i++) { x[i] = m->state[i]; x[i + 16] = p[i]; x[i + 32] = x[i] ^ p[i]; } t = 0; for (i = 0; i < 18; i++) { for (j = 0; j < 48; j++) t = x[j] ^= subst[t]; t = (t + i) & 0xff; } memcpy(m->state, x, 16); memset_s(x, sizeof(x), 0, sizeof(x)); }
/* Clear the provided context structure */ void psChacha20Poly1305IetfClear(psChacha20Poly1305Ietf_t *ctx) { memset_s(ctx, sizeof(psChacha20Poly1305Ietf_t), 0x0, sizeof(psChacha20Poly1305Ietf_t)); }
void AuthorizationSet::FreeData() { if (elems_ != NULL) memset_s(elems_, 0, elems_size_ * sizeof(keymaster_key_param_t)); if (indirect_data_ != NULL) memset_s(indirect_data_, 0, indirect_data_size_); delete[] elems_; delete[] indirect_data_; elems_ = NULL; indirect_data_ = NULL; elems_size_ = 0; elems_capacity_ = 0; indirect_data_size_ = 0; indirect_data_capacity_ = 0; error_ = OK; }
/* * generate new key from all the pools */ static void reseed(FState * st) { unsigned k; unsigned n; MD_CTX key_md; unsigned char buf[BLOCK]; /* set pool as empty */ st->pool0_bytes = 0; /* * Both #0 and #1 reseed would use only pool 0. Just skip #0 then. */ n = ++st->reseed_count; /* * The goal: use k-th pool only 1/(2^k) of the time. */ md_init(&key_md); for (k = 0; k < NUM_POOLS; k++) { md_result(&st->pool[k], buf); md_update(&key_md, buf, BLOCK); if (n & 1 || !n) break; n >>= 1; } /* add old key into mix too */ md_update(&key_md, st->key, BLOCK); /* add pid to make output diverse after fork() */ md_update(&key_md, (const unsigned char *)&st->pid, sizeof(st->pid)); /* now we have new key */ md_result(&key_md, st->key); /* use new key */ ciph_init(&st->ciph, st->key, BLOCK); memset_s(&key_md, sizeof(key_md), 0, sizeof(key_md)); memset_s(buf, sizeof(buf), 0, sizeof(buf)); }
void safe_memclear(void *s, size_t n) { #if defined(HAVE_MEMSET_S) memset_s(s, n, 0, n); #elif defined(HAVE_EXPLICIT_BZERO) explicit_bzero(s, n); #else safe_memset(s, 0, n); #endif }
static void md_result(MD_CTX * ctx, unsigned char *dst) { SHA256_CTX tmp; memcpy(&tmp, ctx, sizeof(*ctx)); SHA256_Final(dst, &tmp); memset_s(&tmp, sizeof(tmp), 0, sizeof(tmp)); }
void psAesClearGCM(psAesGcm_t *ctx) { /* Only need to clear block if it's implemented externally, Matrix block is part of AesGcm_t and will be cleared below */ #ifndef USE_MATRIX_AES_BLOCK psAesClearBlockKey(&ctx->key); #endif memset_s(ctx, sizeof(psAesGcm_t), 0x0, sizeof(psAesGcm_t)); }
extern "C" sgx_status_t sgx_ra_get_ga( sgx_ra_context_t context, sgx_ec256_public_t *g_a) { sgx_status_t se_ret; if(vector_size(&g_ra_db) <= context||!g_a) return SGX_ERROR_INVALID_PARAMETER; ra_db_item_t* item = NULL; if(0 != vector_get(&g_ra_db, context, reinterpret_cast<void**>(&item)) || item == NULL ) return SGX_ERROR_INVALID_PARAMETER; sgx_ecc_state_handle_t ecc_state = NULL; sgx_ec256_public_t pub_key; sgx_ec256_private_t priv_key; memset(&pub_key, 0, sizeof(pub_key)); memset(&priv_key, 0, sizeof(priv_key)); sgx_spin_lock(&item->item_lock); do { //sgx_ra_init must have been called if (item->state != ra_inited) { se_ret = SGX_ERROR_INVALID_STATE; break; } // ecc_state should be closed when exit. se_ret = sgx_ecc256_open_context(&ecc_state); if (SGX_SUCCESS != se_ret) { if(SGX_ERROR_OUT_OF_MEMORY != se_ret) se_ret = SGX_ERROR_UNEXPECTED; break; } se_ret = sgx_ecc256_create_key_pair(&priv_key, &pub_key, ecc_state); if (SGX_SUCCESS != se_ret) { if(SGX_ERROR_OUT_OF_MEMORY != se_ret) se_ret = SGX_ERROR_UNEXPECTED; break; } memcpy(&item->a, &priv_key, sizeof(item->a)); memcpy(&item->g_a, &pub_key, sizeof(item->g_a)); memcpy(g_a, &pub_key, sizeof(sgx_ec256_public_t)); item->state = ra_get_gaed; //clear local private key to defense in depth memset_s(&priv_key,sizeof(priv_key),0,sizeof(sgx_ec256_private_t)); }while(0); sgx_spin_unlock(&item->item_lock); if(ecc_state!=NULL) sgx_ecc256_close_context(ecc_state); return se_ret; }
// TKE interface for isv enclaves sgx_status_t SGXAPI sgx_ra_close( sgx_ra_context_t context) { if(vector_size(&g_ra_db) <= context) return SGX_ERROR_INVALID_PARAMETER; ra_db_item_t* item = NULL; if(0 != vector_get(&g_ra_db, context, reinterpret_cast<void**>(&item)) || item == NULL ) return SGX_ERROR_INVALID_PARAMETER; sgx_spin_lock(&g_ra_db_lock); //safe clear private key and RA key before free memory to defense in depth memset_s(&item->a,sizeof(item->a),0,sizeof(sgx_ec256_private_t)); memset_s(&item->vk_key,sizeof(item->vk_key),0,sizeof(sgx_ec_key_128bit_t)); memset_s(&item->mk_key,sizeof(item->mk_key),0,sizeof(sgx_ec_key_128bit_t)); memset_s(&item->sk_key,sizeof(item->sk_key),0,sizeof(sgx_ec_key_128bit_t)); memset_s(&item->smk_key,sizeof(item->smk_key),0,sizeof(sgx_ec_key_128bit_t)); SAFE_FREE(item); vector_set(&g_ra_db, context, NULL); sgx_spin_unlock(&g_ra_db_lock); return SGX_SUCCESS; }
/***************************************************************************** * 函 数 名 : bsp_gpio_init * * 功能描述 : GPIO初始化接口 * * 输入参数 : 无 * * 返 回 值 : 无 * * 修改记录 : 2012年11月27日 *****************************************************************************/ s32 bsp_gpio_init(void) { u32 i = 0; int ret = 0; char gpio_clk_name[40] = ""; char node_name[NAME_LENTH] = ""; char *base_addr = NULL; struct clk *gpio_clk = NULL; struct device_node *dev_node = NULL; if (GPIO_DEF_RUNNING == g_u32GpioRunning) { return GPIO_OK; } spin_lock_init(&g_gpio_spinlock); for(i = 0; i < GPIO_MAX_BANK_NUM; i++) { (void)snprintf_s(node_name,NAME_LENTH,NAME_LENTH,"hisilicon,gpio%d",i); dev_node = of_find_compatible_node(NULL,NULL,node_name); if(!dev_node) { gpio_print_error("get gpio%d node failed!\n",i); return ERROR; } /* 内存映射,获得基址 */ base_addr = (char *)of_iomap(dev_node, 0); if (NULL == base_addr) { gpio_print_error("gpio%d iomap fail\n",i); return ERROR; } s_u32GpioBaseAddr[i] = (u32)base_addr; (void)memset_s(gpio_clk_name, 40, 0 , 40); (void)snprintf_s(gpio_clk_name, 40, 40, "gpio%d_clk", i); /*lint !e119*/ gpio_clk = (struct clk *)clk_get(NULL, gpio_clk_name); if(IS_ERR(gpio_clk)) { gpio_print_error("gpio%d clk cannot get, 0x%x.\n", i, gpio_clk); return ERROR; } ret = clk_enable(gpio_clk); } gpio_print_info("gpio init ok.\n"); g_u32GpioRunning = GPIO_DEF_RUNNING; return ret; }
void ZB_clear(ZBuffer *zb, int clear_z, int z, int clear_color, int r, int g, int b) { int color; int y; PIXEL *pp; if (clear_z) { memset_s(zb->zbuf, z, zb->xsize * zb->ysize); } if (clear_z) { memset_l(zb->zbuf2, z, zb->xsize * zb->ysize); } if (clear_color) { pp = zb->pbuf; for (y = 0; y < zb->ysize; y++) { color = RGB_TO_PIXEL(r, g, b); memset_s(pp, color, zb->xsize); pp = (PIXEL *)((char *)pp + zb->linesize); } } }
static void fortuna_cleanup(void) { HEIMDAL_MUTEX_lock(&fortuna_mutex); init_done = 0; have_entropy = 0; memset_s(&main_state, sizeof(main_state), 0, sizeof(main_state)); HEIMDAL_MUTEX_unlock(&fortuna_mutex); }