int wl_p2p_scan(void *wl, uint16 sync_id, int isActive, int numProbes, int activeDwellTime, int passiveDwellTime, int num_channels, uint16 *channels) { wl_p2p_scan_t *params = NULL; int params_size = 0; int malloc_size = 0; int nssid = 0; int err = 0; wl_escan_params_t *eparams; malloc_size = sizeof(wl_p2p_scan_t); malloc_size += OFFSETOF(wl_escan_params_t, params) + WL_SCAN_PARAMS_FIXED_SIZE + WL_NUMCHANNELS * sizeof(uint16); malloc_size += WL_SCAN_PARAMS_SSID_MAX * sizeof(wlc_ssid_t); params = (wl_p2p_scan_t *)malloc(malloc_size); if (params == NULL) { fprintf(stderr, "Error allocating %d bytes for scan params\n", malloc_size); return -1; } memset(params, 0, malloc_size); eparams = (wl_escan_params_t *)(params+1); params->type = 'E'; eparams->version = htod32(ESCAN_REQ_VERSION); eparams->action = htod16(WL_SCAN_ACTION_START); eparams->sync_id = sync_id; memcpy(&eparams->params.bssid, ðer_bcast, ETHER_ADDR_LEN); eparams->params.bss_type = DOT11_BSSTYPE_ANY; eparams->params.scan_type = isActive ? 0 : WL_SCANFLAGS_PASSIVE; eparams->params.nprobes = htod32(numProbes); eparams->params.active_time = htod32(activeDwellTime); eparams->params.passive_time = htod32(passiveDwellTime); eparams->params.home_time = htod32(-1); eparams->params.channel_num = 0; memcpy(eparams->params.channel_list, channels, num_channels * sizeof(uint16)); eparams->params.channel_num = htod32((nssid << WL_SCAN_PARAMS_NSSID_SHIFT) | (num_channels & WL_SCAN_PARAMS_COUNT_MASK)); params_size = sizeof(wl_p2p_scan_t) + sizeof(wl_escan_params_t) + WL_SCAN_PARAMS_FIXED_SIZE+ (num_channels * sizeof(uint16)) + (nssid * sizeof(wlc_ssid_t)); err = wlu_iovar_setbuf(wl, "p2p_scan", params, params_size, buf, WLC_IOCTL_MAXLEN); free(params); return err; }
/* centralized clkreq control policy */ static void pcie_clkreq_upd(pcicore_info_t *pi, uint state) { si_t *sih = pi->sih; ASSERT(PCIE(sih)); if (!PCIE_GEN1(sih)) return; switch (state) { case SI_DOATTACH: if (PCIEGEN1_ASPM(sih)) pcie_clkreq((void *)pi, 1, 0); break; case SI_PCIDOWN: if (sih->buscorerev == 6) { /* turn on serdes PLL down */ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol_addr), ~0, 0); si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol_data), ~0x40, 0); } else if (pi->pcie_pr42767) { pcie_clkreq((void *)pi, 1, 1); } break; case SI_PCIUP: if (sih->buscorerev == 6) { /* turn off serdes PLL down */ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol_addr), ~0, 0); si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol_data), ~0x40, 0x40); } else if (PCIEGEN1_ASPM(sih)) { /* disable clkreq */ pcie_clkreq((void *)pi, 1, 0); } break; default: ASSERT(0); break; } }
/* mask&set gpio timer val */ uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval) { si_info_t *sii; sii = SI_INFO(sih); if (sih->ccrev < 16) return -1; return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval)); }
static int buf_walk_init(mdb_walk_state_t *wsp) { fmd_buf_hash_t bh; if (mdb_vread(&bh, sizeof (bh), wsp->walk_addr) != sizeof (bh)) { mdb_warn("failed to read fmd_buf_hash_t at %p", wsp->walk_addr); return (WALK_ERR); } return (hash_walk_init(wsp, (uintptr_t)bh.bh_hash, bh.bh_hashlen, "fmd_buf", sizeof (fmd_buf_t), OFFSETOF(fmd_buf_t, buf_next))); }
void init_pike_searching(void) { start_new_program(); pike_search_struct_offset=ADD_STORAGE(struct pike_mem_searcher); MAP_VARIABLE("__s", tStr, 0, pike_search_struct_offset + OFFSETOF(pike_mem_searcher,s), PIKE_T_STRING); pike_search_program=end_program(); add_program_constant("Search",pike_search_program,ID_STATIC); memsearch_cache=allocate_mapping(10); memsearch_cache->data->flags |= MAPPING_FLAG_WEAK; }
uint32 si_gpio_int_enable(si_t *sih, bool enable) { si_info_t *sii; uint offs; sii = SI_INFO(sih); if (sih->ccrev < 11) return -1; offs = OFFSETOF(chipcregs_t, intmask); return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0))); }
/* gpio output enable addr */ static uint32* get_addr_gpioouten(void) { sb_info_t *si = SB_INFO(sbh); uint offset = 0; switch (si->gpioid) { case SB_CC: offset = OFFSETOF(chipcregs_t, gpioouten); break; case SB_PCI: offset = OFFSETOF(sbpciregs_t, gpioouten); break; case SB_EXTIF: offset = OFFSETOF(extifregs_t, gpio[0].outen); break; default: return NULL; } return (uint32 *) ((uchar *)get_gpio_base_addr() + offset); }
/********************************************************************** * DOSVM_HardwareInterruptRM * * Emulate call to interrupt handler in real mode. * * Either calls directly builtin handler or pushes interrupt frame to * stack and changes instruction pointer to interrupt handler. */ void DOSVM_HardwareInterruptRM( CONTEXT86 *context, BYTE intnum ) { FARPROC16 handler = DOSVM_GetRMHandler( intnum ); /* check if the call goes to an unhooked interrupt */ if (SELECTOROF(handler) == 0xf000) { /* if so, call it directly */ TRACE( "builtin interrupt %02x has been invoked " "(through vector %02x)\n", OFFSETOF(handler)/DOSVM_STUB_RM, intnum ); DOSVM_CallBuiltinHandler( context, OFFSETOF(handler)/DOSVM_STUB_RM ); } else { /* the interrupt is hooked, simulate interrupt in DOS space */ WORD flag = LOWORD( context->EFlags ); TRACE( "invoking hooked interrupt %02x at %04x:%04x\n", intnum, SELECTOROF(handler), OFFSETOF(handler) ); /* Copy virtual interrupt flag to pushed interrupt flag. */ if (context->EFlags & VIF_MASK) flag |= IF_MASK; else flag &= ~IF_MASK; PUSH_WORD16( context, flag ); PUSH_WORD16( context, context->SegCs ); PUSH_WORD16( context, LOWORD( context->Eip )); context->SegCs = SELECTOROF( handler ); context->Eip = OFFSETOF( handler ); /* Clear virtual interrupt flag and trap flag. */ context->EFlags &= ~(VIF_MASK | TF_MASK); } }
/*********************************************************************** * SwitchStackTo (KERNEL.108) */ void WINAPI SwitchStackTo16( WORD seg, WORD ptr, WORD top ) { STACK16FRAME *oldFrame, *newFrame; INSTANCEDATA *pData; UINT16 copySize; if (!(pData = GlobalLock16( seg ))) return; TRACE("old=%04x:%04x new=%04x:%04x\n", SELECTOROF( NtCurrentTeb()->WOW32Reserved ), OFFSETOF( NtCurrentTeb()->WOW32Reserved ), seg, ptr ); /* Save the old stack */ oldFrame = CURRENT_STACK16; /* pop frame + args and push bp */ pData->old_ss_sp = (SEGPTR)NtCurrentTeb()->WOW32Reserved + sizeof(STACK16FRAME) + 2 * sizeof(WORD); *(WORD *)MapSL(pData->old_ss_sp) = oldFrame->bp; pData->stacktop = top; pData->stackmin = ptr; pData->stackbottom = ptr; /* Switch to the new stack */ /* Note: we need to take the 3 arguments into account; otherwise, * the stack will underflow upon return from this function. */ copySize = oldFrame->bp - OFFSETOF(pData->old_ss_sp); copySize += 3 * sizeof(WORD) + sizeof(STACK16FRAME); NtCurrentTeb()->WOW32Reserved = (void *)MAKESEGPTR( seg, ptr - copySize ); newFrame = CURRENT_STACK16; /* Copy the stack frame and the local variables to the new stack */ memmove( newFrame, oldFrame, copySize ); newFrame->bp = ptr; *(WORD *)MapSL( MAKESEGPTR( seg, ptr ) ) = 0; /* clear previous bp */ }
static int serd_walk_init(mdb_walk_state_t *wsp) { fmd_serd_hash_t sh; if (mdb_vread(&sh, sizeof (sh), wsp->walk_addr) != sizeof (sh)) { mdb_warn("failed to read fmd_serd_hash at %p", wsp->walk_addr); return (WALK_ERR); } return (hash_walk_init(wsp, (uintptr_t)sh.sh_hash, sh.sh_hashlen, "fmd_serd_eng", sizeof (fmd_serd_eng_t), OFFSETOF(fmd_serd_eng_t, sg_next))); }
void sb7fbxmodel::sub_mesh_set_up(sub_mesh& sm) { glGenVertexArrays(1, &sm.vao); glBindVertexArray(sm.vao); glGenBuffers(1, &sm.vbo); glBindBuffer(GL_ARRAY_BUFFER, sm.vao); glBufferData(GL_ARRAY_BUFFER, sizeof(vetex_attr) * sm.count, sm.va, GL_STATIC_DRAW); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(vetex_attr), NULL); glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(vetex_attr), (void*)OFFSETOF(vetex_attr, normal)); glEnableVertexAttribArray(0); //glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 5 * sizeof(GL_FLOAT), (GLvoid*)(3 * sizeof(GL_FLOAT))); //glEnableVertexAttribArray(1); glBindVertexArray(0); }
/*********************************************************************** * SwitchStackBack (KERNEL.109) */ void WINAPI SwitchStackBack16( CONTEXT *context ) { STACK16FRAME *oldFrame, *newFrame; INSTANCEDATA *pData; if (!(pData = GlobalLock16(SELECTOROF(NtCurrentTeb()->WOW32Reserved)))) return; if (!pData->old_ss_sp) { WARN("No previous SwitchStackTo\n" ); return; } TRACE("restoring stack %04x:%04x\n", SELECTOROF(pData->old_ss_sp), OFFSETOF(pData->old_ss_sp) ); oldFrame = CURRENT_STACK16; /* Pop bp from the previous stack */ context->Ebp = (context->Ebp & ~0xffff) | *(WORD *)MapSL(pData->old_ss_sp); pData->old_ss_sp += sizeof(WORD); /* Switch back to the old stack */ NtCurrentTeb()->WOW32Reserved = (void *)(pData->old_ss_sp - sizeof(STACK16FRAME)); context->SegSs = SELECTOROF(pData->old_ss_sp); context->Esp = OFFSETOF(pData->old_ss_sp) - sizeof(DWORD); /*ret addr*/ pData->old_ss_sp = 0; /* Build a stack frame for the return */ newFrame = CURRENT_STACK16; newFrame->frame32 = oldFrame->frame32; newFrame->module_cs = oldFrame->module_cs; newFrame->callfrom_ip = oldFrame->callfrom_ip; newFrame->entry_ip = oldFrame->entry_ip; }
DWORD DOSDEV_FindCharDevice(char*name) { SEGPTR cur_ptr = MAKESEGPTR(HIWORD(DOS_LOLSeg), FIELD_OFFSET(DOS_LISTOFLISTS,NUL_dev)); DOS_DEVICE_HEADER *cur = PTR_REAL_TO_LIN(SELECTOROF(cur_ptr),OFFSETOF(cur_ptr)); char dname[8]; int cnt; /* get first 8 characters */ /* if less than 8 characters, pad with spaces */ for (cnt=0; name[cnt] && cnt<8; cnt++) dname[cnt]=name[cnt]; while(cnt<8) dname[cnt++] = ' '; /* search for char devices with the right name */ while (cur && ((!(cur->attr & ATTR_CHAR)) || memcmp(cur->name,dname,8))) { cur_ptr = cur->next_dev; if (cur_ptr == NONEXT) cur=NULL; else cur = PTR_REAL_TO_LIN(SELECTOROF(cur_ptr),OFFSETOF(cur_ptr)); } return cur_ptr; }
static int xpc_walk_init(mdb_walk_state_t *wsp) { fmd_xprt_class_hash_t xch; if (mdb_vread(&xch, sizeof (xch), wsp->walk_addr) != sizeof (xch)) { mdb_warn("failed to read fmd_xprt_class_hash at %p", wsp->walk_addr); return (WALK_ERR); } return (hash_walk_init(wsp, (uintptr_t)xch.xch_hash, xch.xch_hashlen, "fmd_xprt_class", sizeof (fmd_xprt_class_t), OFFSETOF(fmd_xprt_class_t, xc_next))); }
/* Update Pike_fp->pc */ void sparc_update_pc(void) { LOAD_PIKE_FP(); #ifdef PIKE_BYTECODE_SPARC64 /* The ASR registers are implementation specific in Sparc V7 and V8. */ /* rd %pc, %o7 */ SPARC_RD(SPARC_REG_O7, SPARC_RD_REG_PC); #else /* !0 */ /* call .+8 */ SPARC_CALL(8); /* The new %o7 is available in the delay slot. */ #endif /* 0 */ /* stw %o7, [ %pike_fp + pc ] */ PIKE_STPTR(SPARC_REG_O7, SPARC_REG_PIKE_FP, OFFSETOF(pike_frame, pc), 1); }
/* Needs to happen when coming out of 'standby'/'hibernate' */ static void pcie_war_noplldown(pcicore_info_t *pi) { sbpcieregs_t *pcieregs = pi->regs.pcieregs; uint16 *reg16; ASSERT(pi->sih->buscorerev == 7); /* turn off serdes PLL down */ si_corereg(pi->sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol), CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN); /* clear srom shadow backdoor */ reg16 = &pcieregs->sprom[SRSH_BD_OFFSET]; W_REG(pi->osh, reg16, 0); }
void dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, uint chip) { uint32 gpiocontrol, addr; if (CHIPID(chip) == BCM43362_CHIP_ID) { printf("%s: Enable HW OOB for 43362\n", __FUNCTION__); addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, gpiocontrol); gpiocontrol = bcmsdh_reg_read(sdh, addr, 4); gpiocontrol |= 0x2; bcmsdh_reg_write(sdh, addr, 4, gpiocontrol); bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10005, 0xf, NULL); bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10006, 0x0, NULL); bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10007, 0x2, NULL); } }
/* Rename document name in document list */ void UpdateDocumentName ( UInt16 index, /* record index */ const Char* name, /* new document name */ const Char* filename /* new filename */ ) /* THROWS */ { MemHandle handle; DocumentData* handlePtr; THROW_IF( name == NULL || *name == '\0', errNoDocumentName ); handle = OpenRecord( plkrDocList, index ); THROW_IF( handle == NULL, DmGetLastErr() ); handlePtr = MemHandleLock( handle ); DmWrite( handlePtr, OFFSETOF( DocumentData, name ), name, StrLen( name ) + 1 ); if ( handlePtr->location != RAM ) { DocumentData* dataPtr; UInt16 infoSize; UInt16 dataSize; Char volumeLabel[ LABEL_LEN ]; UInt16 fileLength; UInt16 volumeLabelLength; fileLength = StrLen( handlePtr->data ) + 1; volumeLabelLength = StrLen( handlePtr->data + fileLength ) + 1; StrNCopy( volumeLabel, handlePtr->data + fileLength, volumeLabelLength ); MemHandleUnlock( handle ); infoSize = sizeof *dataPtr; dataSize = StrLen( filename ) + StrLen( volumeLabel ) + 2; handle = ResizeRecord( plkrDocList, index, infoSize + dataSize ); dataPtr = MemHandleLock( handle ); DmWrite( dataPtr, infoSize, filename, StrLen( filename ) + 1 ); DmWrite( dataPtr, infoSize + StrLen( filename ) + 1, volumeLabel, StrLen( volumeLabel ) + 1 ); } MemHandleUnlock( handle ); CloseRecord( handle, true ); DmInsertionSort( plkrDocList, CompareDocumentNames, 0 ); }
int4 add_inter(int val, sm_int_ptr_t addr, sm_global_latch_ptr_t latch) { int4 cntrval, newcntrval, spins, maxspins, retries; boolean_t cswpsuccess; sm_int_ptr_t volatile cntrval_p; ++fast_lock_count; maxspins = num_additional_processors ? MAX_LOCK_SPINS(LOCK_SPINS, num_additional_processors) : 1; cntrval_p = addr; /* Need volatile context especially on Itanium */ for (retries = LOCK_TRIES - 1; 0 < retries; retries--) /* - 1 so do rel_quant 3 times first */ { /* seems like a legitinate spin which could take advantage of transactional memory */ for (spins = maxspins; 0 < spins; spins--) { cntrval = *cntrval_p; newcntrval = cntrval + val; /* This is (currently as of 08/2007) the only non-locking usage of compswap in GT.M. We are not passing compswap an actual sm_global_latch_ptr_t addr like its function would normally dictate. However, since the address of the field we want to deal with is the first int in the global_latch_t, we just pass our int address properly cast to the type that compswap is expecting. The assert below verifies that this assumption has not changed (SE 08/2007) */ assert(0 == OFFSETOF(global_latch_t, u.parts.latch_pid)); IA64_ONLY(cswpsuccess = compswap_unlock(RECAST(sm_global_latch_ptr_t)cntrval_p, cntrval, newcntrval)); NON_IA64_ONLY(cswpsuccess = compswap((sm_global_latch_ptr_t)cntrval_p, cntrval, newcntrval)); if (cswpsuccess) { --fast_lock_count; assert(0 <= fast_lock_count); return newcntrval; } } if (retries & 0x3) /* On all but every 4th pass, do a simple rel_quant */ rel_quant(); /* Release processor to holder of lock (hopefully) */ else { /* On every 4th pass, we bide for awhile */ wcs_sleep(LOCK_SLEEP); assert(0 == (LOCK_TRIES % 4)); /* assures there are 3 rel_quants prior to first wcs_sleep() */ } } --fast_lock_count; assert(FALSE); rts_error_csa(CSA_ARG(NULL) VARLSTCNT(9) ERR_DBCCERR, 2, LEN_AND_LIT("*unknown*"), ERR_ERRCALL, 3, CALLFROM); return 0; /* To keep the compiler quiet */ }
/************************************************************************* * RunDLL_CallEntry16 * * Only exported from shell32 on Windows, probably imported * from shell through the 16/32 thunks. */ void WINAPI RunDLL_CallEntry16( DWORD proc, HWND hwnd, HINSTANCE inst, LPCSTR cmdline, INT cmdshow ) { WORD args[5]; SEGPTR cmdline_seg; TRACE( "proc %x hwnd %p inst %p cmdline %s cmdshow %d\n", proc, hwnd, inst, debugstr_a(cmdline), cmdshow ); cmdline_seg = MapLS( cmdline ); args[4] = HWND_16(hwnd); args[3] = MapHModuleLS(inst); args[2] = SELECTOROF(cmdline_seg); args[1] = OFFSETOF(cmdline_seg); args[0] = cmdshow; WOWCallback16Ex( proc, WCB16_PASCAL, sizeof(args), args, NULL ); UnMapLS( cmdline_seg ); }
static void DOSDEV_DoReq(void*req, DWORD dev) { REQUEST_HEADER *hdr = (REQUEST_HEADER *)req; DOS_DEVICE_HEADER *dhdr; CONTEXT86 ctx; char *phdr; dhdr = PTR_REAL_TO_LIN(SELECTOROF(dev),OFFSETOF(dev)); phdr = ((char*)DOSMEM_LOL()) + DOS_DATASEG_OFF(req); /* copy request to request scratch area */ memcpy(phdr, req, hdr->size); /* prepare to call device driver */ memset(&ctx, 0, sizeof(ctx)); ctx.EFlags |= V86_FLAG; /* ES:BX points to request for strategy routine */ ctx.SegEs = HIWORD(DOS_LOLSeg); ctx.Ebx = DOS_DATASEG_OFF(req); /* call strategy routine */ ctx.SegCs = SELECTOROF(dev); ctx.Eip = dhdr->strategy; DPMI_CallRMProc(&ctx, 0, 0, 0); /* call interrupt routine */ ctx.SegCs = SELECTOROF(dev); ctx.Eip = dhdr->interrupt; DPMI_CallRMProc(&ctx, 0, 0, 0); /* completed, copy request back */ memcpy(req, phdr, hdr->size); if (hdr->status & STAT_ERROR) { switch (hdr->status & STAT_MASK) { case 0x0F: /* invalid disk change */ /* this error seems to fit the bill */ SetLastError(ERROR_NOT_SAME_DEVICE); break; default: SetLastError((hdr->status & STAT_MASK) + 0x13); break; } } }
/* * scheduler_task_remove * @tcb: Task control block that is needed to be removed. * This function removes a finished task from the global task list. Once * removed user can call scheduler_task_add to run a finished task. */ void scheduler_task_remove(TASK *tcb) { /* Lock the scheduler. */ scheduler_lock(); /* Task should be in finished state. */ ASSERT(tcb->state != TASK_FINISHED); #ifdef TASK_STATS /* Remove this task from global task list. */ sll_remove(&sch_task_list, tcb, OFFSETOF(TASK, next_global)); #endif /* TASK_STATS */ /* Enable scheduling. */ scheduler_unlock(); } /* scheduler_task_remove */
/* Restore data for current record from history */ static void RestoreData( void ) { MetaRecord* meta; MemHandle handle; UInt16 recordId; recordId = history.records[ history.currentRecord ].recordId; handle = GetMetaHandle( recordId, false ); meta = MemHandleLock( handle ); DmWrite( meta, OFFSETOF( MetaRecord, verticalOffset ), &history.records[ history.currentRecord ], sizeof( HistoryData ) - sizeof( Int16 ) ); MemHandleUnlock( handle ); CloseRecord( handle, true ); }
/* * Locates, creates, and deletes a record of a duplicate reference. * * For DB_INCR, returns true if the dup was added to the tree. * For DB_DECR, returns true if the dup was in the tree. */ int find_dup_ref(daddr32_t fragno, fsck_ino_t ino, daddr32_t lfn, int flags) { fragment_t key; fragment_t *dup; avl_index_t where; int added = 0; int removed = 0; if (avl_first(&dup_frags) == NULL) { if (flags & DB_CREATE) avl_create(&dup_frags, fragment_cmp, sizeof (fragment_t), OFFSETOF(fragment_t, fr_avl)); else return (0); } key.fr_pfn = fragno; dup = avl_find(&dup_frags, (void *)&key, &where); if ((dup == NULL) & (flags & DB_CREATE)) { dup = alloc_dup(fragno); avl_insert(&dup_frags, (void *)dup, where); } if (dup != NULL) { if (flags & DB_INCR) { if (debug) (void) printf( "adding claim by ino %d as lfn %d\n", ino, lfn); added = increment_claimant(dup, ino, lfn); } else if (flags & DB_DECR) { /* * Note that dup may be invalidated by this call. */ removed = decrement_claimant(dup, ino, lfn); if (debug) (void) printf( "check for claimant ino %d lfn %d returned %d\n", ino, lfn, removed); } } return (added || removed || (dup != NULL)); }
/*********************************************************************** * NE_InitDLL * * Call the DLL initialization code */ static BOOL NE_InitDLL( NE_MODULE *pModule ) { SEGTABLEENTRY *pSegTable; WORD hInst, ds, heap; CONTEXT86 context; pSegTable = NE_SEG_TABLE( pModule ); if (!(pModule->flags & NE_FFLAGS_LIBMODULE) || (pModule->flags & NE_FFLAGS_WIN32)) return TRUE; /*not a library*/ /* Call USER signal handler for Win3.1 compatibility. */ TASK_CallTaskSignalProc( USIG16_DLL_LOAD, pModule->self ); if (!pModule->cs) return TRUE; /* no initialization code */ /* Registers at initialization must be: * cx heap size * di library instance * ds data segment if any * es:si command line (always 0) */ memset( &context, 0, sizeof(context) ); NE_GetDLLInitParams( pModule, &hInst, &ds, &heap ); context.Ecx = heap; context.Edi = hInst; context.SegDs = ds; context.SegEs = ds; /* who knows ... */ context.SegCs = SEL(pSegTable[pModule->cs-1].hSeg); context.Eip = pModule->ip; context.Ebp = OFFSETOF(NtCurrentTeb()->WOW32Reserved) + (WORD)&((STACK16FRAME*)0)->bp; pModule->cs = 0; /* Don't initialize it twice */ TRACE_(dll)("Calling LibMain, cs:ip=%04lx:%04lx ds=%04lx di=%04x cx=%04x\n", context.SegCs, context.Eip, context.SegDs, LOWORD(context.Edi), LOWORD(context.Ecx) ); wine_call_to_16_regs_short( &context, 0 ); return TRUE; }
void BatchRenderer::init() { _vertexCount = 0; glGenVertexArrays(1, &_vao); glBindVertexArray(_vao); glGenBuffers(1, &_vbo); glBindBuffer(GL_ARRAY_BUFFER, _vbo); glBufferData(GL_ARRAY_BUFFER, BUFFER_SIZE, NULL, GL_DYNAMIC_DRAW); glEnableVertexAttribArray(VERTEX_INDEX); glEnableVertexAttribArray(COLOR_INDEX); glEnableVertexAttribArray(UV_INDEX); glEnableVertexAttribArray(TEX_INDEX); glEnableVertexAttribArray(TRANS_X); glEnableVertexAttribArray(TRANS_Y); glEnableVertexAttribArray(TRANS_Z); glEnableVertexAttribArray(TRANS_W); glVertexAttribPointer(VERTEX_INDEX, 3, GL_FLOAT, GL_FALSE, VERTEX_POINTER_SIZE, 0); glVertexAttribPointer(COLOR_INDEX, 4, GL_UNSIGNED_BYTE, GL_TRUE, VERTEX_POINTER_SIZE, (const GLvoid*)OFFSETOF(VertexPointer, VertexPointer::color)); glVertexAttribPointer(UV_INDEX, 2, GL_FLOAT, GL_FALSE, VERTEX_POINTER_SIZE, (const GLvoid*)OFFSETOF(VertexPointer, VertexPointer::uv)); glVertexAttribPointer(TEX_INDEX, 1, GL_FLOAT, GL_FALSE, VERTEX_POINTER_SIZE, (const GLvoid*)OFFSETOF(VertexPointer, VertexPointer::texture_id)); glVertexAttribPointer(TRANS_X, 4, GL_FLOAT, GL_FALSE, VERTEX_POINTER_SIZE, (const GLvoid*)(OFFSETOF(VertexPointer, VertexPointer::transformX))); glVertexAttribPointer(TRANS_Y, 4, GL_FLOAT, GL_FALSE, VERTEX_POINTER_SIZE, (const GLvoid*)(OFFSETOF(VertexPointer, VertexPointer::transformY))); glVertexAttribPointer(TRANS_Z, 4, GL_FLOAT, GL_FALSE, VERTEX_POINTER_SIZE, (const GLvoid*)(OFFSETOF(VertexPointer, VertexPointer::transformZ))); glVertexAttribPointer(TRANS_W, 4, GL_FLOAT, GL_FALSE, VERTEX_POINTER_SIZE, (const GLvoid*)(OFFSETOF(VertexPointer, VertexPointer::transformW))); glBindBuffer(GL_ARRAY_BUFFER, 0); glBindVertexArray(0); }
/* * sleep_task_re_enqueue * @tcb: Task's control block that is needed to be put back in the sleeping * tasks list as there is a higher priority task that is needed to run before * this task. * @from: From where this function was called. * This scheduler's yield function, for now this only called by scheduler API's * when a task is needed to put back in the scheduler list as there is an other * higher priority task. */ static void sleep_task_re_enqueue(TASK *tcb, uint8_t from) { /* Process all the cases from a task can be re/scheduled. */ switch (from) { case YIELD_CANNOT_RUN: /* Just put back this task on the scheduler list. */ sll_push(&sleep_scheduler.ready_tasks, tcb, OFFSETOF(TASK, next_sleep)); /* Task is being suspended. */ tcb->status = TASK_SUSPENDED; break; default: break; } } /* sleep_task_re_enqueue */
/* * console_open * @name: Console name. * @flags: Open flags. * This function will open a console node. */ static void *console_open(char *name, uint32_t flags) { NODE_PARAM param; void *fd = NULL; #ifdef CONFIG_SEMAPHORE /* Obtain the global data lock. */ OS_ASSERT(semaphore_obtain(&console_data.lock, MAX_WAIT) != SUCCESS); #endif /* Initialize a search parameter. */ param.name = name; param.priv = (void *)fd; /* First find a file system to which this call can be forwarded. */ sll_search(&console_data.list, NULL, &fs_sreach_node, ¶m, OFFSETOF(CONSOLE, fs.next)); /* If a node was found. */ if (param.priv) { /* Use this FD, we will update it if required. */ fd = param.priv; } #ifdef CONFIG_SEMAPHORE /* Release the global data lock. */ semaphore_release(&console_data.lock); #endif if (fd != NULL) { /* Check if we need to call the underlying function to get a new file * descriptor. */ if (((CONSOLE *)fd)->fs.open != NULL) { /* Call the underlying API to get the file descriptor. */ fd = ((CONSOLE *)fd)->fs.open(name, flags); } } /* Return the file descriptor. */ return (fd); } /* console_open */
/* * net_buffer_write * @fd: File descriptor. * @data: Buffer pointer needed to be written on this descriptor. * @nbytes: Number of bytes, should always be size of a pointer. * @return: Number of bytes written. * This function will write a networking buffer and queue it for further * processing. */ static int32_t net_buffer_write(void *fd, uint8_t *data, int32_t nbytes) { NET_BUFFER_FS *net_buffer = (NET_BUFFER_FS *)fd; /* Unused parameter. */ UNUSED_PARAM(nbytes); /* Caller already has the lock for net buffer data. */ /* Push the buffer on the network buffer queue. */ sll_append(&net_buffer->buffer_list, (FS_BUFFER *)data, OFFSETOF(FS_BUFFER, next)); /* Tell the file system that there is some data available on this file descriptor. */ fd_data_available(fd); /* Return the number of bytes. */ return (sizeof(FS_BUFFER *)); } /* net_buffer_write */
/* mask&set gpio interrupt mask bits */ uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority) { si_info_t *sii; uint regoff; sii = SI_INFO(sih); regoff = 0; /* gpios could be shared on router platforms */ if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { mask = priority ? (si_gpioreservation & mask) : ((si_gpioreservation | mask) & ~(si_gpioreservation)); val &= mask; } regoff = OFFSETOF(chipcregs_t, gpiointmask); return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); }