CChapterDlg::CChapterDlg(CWnd *ppwndParent, CMovieDoc *ppkDoc, int piChapter) : CDialogEx(IDD_CHAPTER, ppwndParent), m_pkDoc(ppkDoc), m_iChapter(piChapter) { ASSERT_VALID_PTR(ppwndParent); ASSERT_VALID_PTR(ppkDoc); }
IPTR DoMethod (Object * obj, IPTR MethodID, ...) { ASSERT_VALID_PTR(obj); if (!obj) return 0L; ASSERT_VALID_PTR(OCLASS(obj)); AROS_SLOWSTACKMETHODS_PRE(MethodID) retval = CALLHOOKPKT((struct Hook *) OCLASS(obj), obj, AROS_SLOWSTACKMETHODS_ARG(MethodID)); AROS_SLOWSTACKMETHODS_POST } /* DoMethod() */
BOOL CChapterDlg::OnInitDialog() { if ( !CDialogEx::OnInitDialog() ) return FALSE; // enable mask m_wndTime.EnableMask(_T("DD DD DD DDD"), _T("__:__:__.___"), _T('0'), _T("1234567890")); m_wndTime.EnableGetMaskedCharsOnly(FALSE); // without current chapter if ( -1 == m_iChapter ) { // init m_wndTime.SetWindowText(_T("00:00:00.000")); m_wndName.SetWindowText(_T("")); } // with current chapter else { CChapter *lpkChapter = m_pkDoc->m_kChapters[m_iChapter]; ASSERT_VALID_PTR(lpkChapter); // init m_wndTime.SetWindowText(lpkChapter->m_kTime.ToString()); m_wndName.SetWindowText(lpkChapter->m_sName); } return TRUE; }
IPTR DoMethodA ( /* SYNOPSIS */ Object * obj, Msg message) /* FUNCTION Invokes a method on a BOOPSI object. The dispatcher of the class the object is inherited from is called. For more information about methods a class supports, see the class documentation. INPUTS obj - The object on which the method is to be performed. message - The message. The first field is the same for all methods and specifies which method is to be invoked (see <intuition/classusr.h>). RESULT Class and method dependent. See the class documentation. A value of 0 can be a valid return code but can also mean that a method is not supported. NOTES EXAMPLE BUGS SEE ALSO intuition.library/NewObjectA(), intuition.library/SetAttrsA(), intuition.library/GetAttr(), intuition.library/DisposeObject(), CoerceMethodA(), DoSuperMethodA(), <intuition/classusr.h> ******************************************************************************/ { ASSERT_VALID_PTR(obj); if (!obj) return 0L; ASSERT_VALID_PTR(OCLASS(obj)); ASSERT_VALID_PTR(message); return CALLHOOKPKT((struct Hook *) OCLASS(obj), obj, message); } /* DoMethodA */
static void CVMLVMcontextFree(CVMExecEnv *ee, CVMLVMContext *context) { CVMwithAssertsOnly(CVMLVMGlobals* lvm = &CVMglobals.lvm); /* There should be at least one context: */ ASSERT_VALID_PTR(lvm->contexts); ASSERT_VALID_PTR(context); ASSERT_VALID_PTR(context->prevPtr); ASSERT_VALID_PTR(context->lvmICell); ASSERT_VALID_VAL(context->id + 1 /* id can be 0 */); EXTRA_DEBUG_EXEC(CVMconsolePrintf("*** LVM[%d]: CVMLVMcontextFree: " "context: 0x%x\n", context->id, context)); /* Unlink it from the list */ LVM_LOCK(ee); { *context->prevPtr = context->next; if (context->next != NULL) { context->next->prevPtr = context->prevPtr; } } LVM_UNLOCK(ee); CVMID_freeGlobalRoot(ee, context->lvmICell); if (context->systemClassLoader != NULL) { CVMID_freeGlobalRoot(ee, context->systemClassLoader); } /* Invalidates entries */ CVMwithAssertsOnly({ context->prevPtr = (CVMLVMContext**)INVALID_PTR; context->next = (CVMLVMContext*)INVALID_PTR; context->lvmICell = (CVMObjectICell*)INVALID_PTR; context->halting = CVM_FALSE; context->systemClassLoader = (CVMObjectICell*)INVALID_PTR; context->id = INVALID_VAL; memset(context->statics, (int)((char)INVALID_VAL), lvm->numStatics * sizeof(CVMJavaVal32)); });
/* * Allocate a CVMLVMContext structure, initialize it, and * link it in the list of CVMLVMContexts. */ static CVMLVMContext * CVMLVMcontextAlloc(CVMExecEnv* ee, jobject lvmObj) { CVMLVMContext *context; CVMLVMGlobals *lvm = &CVMglobals.lvm; CVMUint32 numStatics = lvm->numStatics; const CVMJavaVal32* staticDataMaster = lvm->staticDataMaster; ASSERT_VALID_VAL(numStatics); ASSERT_VALID_PTR(staticDataMaster); /* Allocate the buffer. Note that CVMLVMContext already includes * one slot for a static field (so subtract 1 from numStatics). */ context = (CVMLVMContext *) malloc(sizeof(CVMLVMContext) + (numStatics-1) * sizeof(CVMJavaVal32)); if (context == NULL) { return NULL; /* Out of memory */ } if (lvmObj == NULL) { /* Should be invoked during the VM startup. The main LVM is not * initialized. Cannot allocate global root yet. */ CVMassert(lvm->mainLVM == NULL); } else { context->lvmICell = CVMID_getGlobalRoot(ee); if (context->lvmICell == NULL) { free(context); return NULL; /* Out of memory */ } CVMID_icellAssign(ee, context->lvmICell, lvmObj); } context->halting = CVM_FALSE; /* systemClassLoader is initialized lazily in * CVMclassGetSystemClassLoader() */ context->systemClassLoader = NULL; /* Initialize the static fields */ memcpy(context->statics, staticDataMaster, numStatics * sizeof(CVMJavaVal32)); /* Link it into the linked list */ LVM_LOCK(ee); context->id = lvm->numberOfContexts++; { CVMLVMContext **list= &lvm->contexts; context->prevPtr = list; context->next = *list; if (*list != NULL) { /* At least one context exists; the main context must * have been initialized. */ CVMassert(lvm->mainLVM != NULL); (*list)->prevPtr = &context->next; } *list = context; } LVM_UNLOCK(ee); EXTRA_DEBUG_EXEC(CVMconsolePrintf("*** LVM[%d]: CVMLVMcontextAlloc: " "context: 0x%x\n", context->id, context)); return context; }
/** * Create a new process, starting at the provided entry point. * * * \note The function * \code * proc_new(entry, data, stacksize, stack) * \endcode * is a more convenient way to create a process, as you don't have to specify * the name. * * \return Process structure of new created process * if successful, NULL otherwise. */ struct Process *proc_new_with_name(UNUSED_ARG(const char *, name), void (*entry)(void), iptr_t data, size_t stack_size, cpu_stack_t *stack_base) { Process *proc; LOG_INFO("name=%s", name); #if CONFIG_KERN_HEAP bool free_stack = false; /* * Free up resources of a zombie process. * * We're implementing a kind of lazy garbage collector here for * efficiency reasons: we can avoid to introduce overhead into another * kernel task dedicated to free up resources (e.g., idle) and we're * not introducing any overhead into the scheduler after a context * switch (that would be *very* bad, because the scheduler runs with * IRQ disabled). * * In this way we are able to release the memory of the zombie tasks * without disabling IRQs and without introducing any significant * overhead in any other kernel task. */ proc_freeZombies(); /* Did the caller provide a stack for us? */ if (!stack_base) { /* Did the caller specify the desired stack size? */ if (!stack_size) stack_size = KERN_MINSTACKSIZE; /* Allocate stack dinamically */ PROC_ATOMIC(stack_base = (cpu_stack_t *)heap_allocmem(&proc_heap, stack_size)); if (stack_base == NULL) return NULL; free_stack = true; } #else // CONFIG_KERN_HEAP /* Stack must have been provided by the user */ ASSERT_VALID_PTR(stack_base); ASSERT(stack_size); #endif // CONFIG_KERN_HEAP #if CONFIG_KERN_MONITOR /* * Fill-in the stack with a special marker to help debugging. * On 64bit platforms, CONFIG_KERN_STACKFILLCODE is larger * than an int, so the (int) cast is required to silence the * warning for truncating its size. */ memset(stack_base, (int)CONFIG_KERN_STACKFILLCODE, stack_size); #endif /* Initialize the process control block */ if (CPU_STACK_GROWS_UPWARD) { proc = (Process *)stack_base; proc->stack = stack_base + PROC_SIZE_WORDS; // On some architecture stack should be aligned, so we do it. proc->stack = (cpu_stack_t *)((uintptr_t)proc->stack + (sizeof(cpu_aligned_stack_t) - ((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t)))); if (CPU_SP_ON_EMPTY_SLOT) proc->stack++; } else { proc = (Process *)(stack_base + stack_size / sizeof(cpu_stack_t) - PROC_SIZE_WORDS); // On some architecture stack should be aligned, so we do it. proc->stack = (cpu_stack_t *)((uintptr_t)proc - ((uintptr_t)proc % sizeof(cpu_aligned_stack_t))); if (CPU_SP_ON_EMPTY_SLOT) proc->stack--; } /* Ensure stack is aligned */ ASSERT((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t) == 0); stack_size -= PROC_SIZE_WORDS * sizeof(cpu_stack_t); proc_initStruct(proc); proc->user_data = data; #if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR proc->stack_base = stack_base; proc->stack_size = stack_size; #if CONFIG_KERN_HEAP if (free_stack) proc->flags |= PF_FREESTACK; #endif #endif proc->user_entry = entry; CPU_CREATE_NEW_STACK(proc->stack); #if CONFIG_KERN_MONITOR monitor_add(proc, name); #endif /* Add to ready list */ ATOMIC(SCHED_ENQUEUE(proc)); return proc; }