NTSTATUS XenBus_DeviceFileInit(WDFDEVICE device, PWDF_IO_QUEUE_CONFIG queue_config, WDFFILEOBJECT file_object) { NTSTATUS status; PXENPCI_DEVICE_INTERFACE_DATA xpdid = GetXpdid(file_object); WDF_IO_QUEUE_CONFIG internal_queue_config; FUNCTION_ENTER(); xpdid->EvtFileCleanup = XenBus_EvtFileCleanup; xpdid->EvtFileClose = XenBus_EvtFileClose; queue_config->EvtIoRead = XenBus_EvtIoRead; queue_config->EvtIoWrite = XenBus_EvtIoWrite; // queue_config->EvtIoDeviceControl = XenBus_EvtIoDeviceControl; InitializeListHead(&xpdid->xenbus.read_list_head); InitializeListHead(&xpdid->xenbus.watch_list_head); xpdid->xenbus.len = 0; WDF_IO_QUEUE_CONFIG_INIT(&internal_queue_config, WdfIoQueueDispatchManual); status = WdfIoQueueCreate(device, &internal_queue_config, WDF_NO_OBJECT_ATTRIBUTES, &xpdid->xenbus.io_queue); if (!NT_SUCCESS(status)) { KdPrint(("Error creating queue 0x%x\n", status)); FUNCTION_EXIT(); return status; } FUNCTION_EXIT(); return status; }
static mng_bool mng_read_data_callback (mng_handle mng_h, mng_ptr buffer, mng_uint32 bytes_requested, mng_uint32 * bytes_read) { FUNCTION_ENTRY(); *bytes_read = 0; return MNG_FALSE; guint available_mng_food; GtkMngView * mng_view = GTK_MNG_VIEW (mng_get_userdata (mng_h)); available_mng_food = mng_view->bytes_to_eat - mng_view->bytes_eaten; if (available_mng_food > 0 && mng_view->mng_food != NULL) { * bytes_read = (mng_uint32) MIN ((mng_uint32) available_mng_food, bytes_requested); memcpy (buffer, mng_view->mng_food + mng_view->bytes_eaten, * bytes_read); mng_view->bytes_eaten += * bytes_read; FUNCTION_EXIT(); return MNG_TRUE; } else { FUNCTION_EXIT(); return MNG_FALSE; } }
VOID XenUsb_DeviceCallback(PVOID context, ULONG callback_type, PVOID value) { PXENUSB_DEVICE_DATA xudd = (PXENUSB_DEVICE_DATA)context; ULONG state; FUNCTION_ENTER(); switch (callback_type) { case XN_DEVICE_CALLBACK_BACKEND_STATE: state = (ULONG)(ULONG_PTR)value; if (state == xudd->backend_state) { FUNCTION_MSG("same state %d\n", state); FUNCTION_EXIT(); } FUNCTION_MSG("XenBusState = %d -> %d\n", xudd->backend_state, state); xudd->backend_state = state; KeSetEvent(&xudd->backend_event, 0, FALSE); break; case XN_DEVICE_CALLBACK_SUSPEND: FUNCTION_MSG("XN_DEVICE_CALLBACK_SUSPEND"); XenUsb_Disconnect(xudd, TRUE); break; case XN_DEVICE_CALLBACK_RESUME: FUNCTION_MSG("XN_DEVICE_CALLBACK_RESUME"); xudd->device_state = DEVICE_STATE_INITIALISING; XenUsb_Connect(xudd, TRUE); // some sort of notify to kick things off? break; } FUNCTION_EXIT(); }
static gboolean gtk_mng_view_init_libmng (GtkMngView * mng_view) { FUNCTION_ENTRY(); GtkWidget * widget; g_return_val_if_fail (IS_GTK_MNG_VIEW (mng_view), FALSE); if (mng_view->MNG_handle) mng_cleanup (&mng_view->MNG_handle); mng_view->MNG_handle = mng_initialize (mng_view, mng_malloc_callback, mng_free_callback, MNG_NULL); if (mng_view->MNG_handle == MNG_NULL) { FUNCTION_EXIT(); return FALSE; } mng_set_storechunks(mng_view->MNG_handle, MNG_TRUE); //mng_set_dfltimggamma(mng_view->MNG_handle, 5); //mng_set_displaygamma(mng_view->MNG_handle, 4); if (mng_setcb_openstream (mng_view->MNG_handle, mng_open_stream_callback) != MNG_NOERROR || mng_setcb_closestream (mng_view->MNG_handle, mng_close_stream_callback) != MNG_NOERROR || mng_setcb_readdata (mng_view->MNG_handle, mng_read_data_callback) != MNG_NOERROR || mng_setcb_processheader (mng_view->MNG_handle, mng_process_header_callback) != MNG_NOERROR || mng_setcb_processmend (mng_view->MNG_handle, mng_process_mend_callback) != MNG_NOERROR || mng_setcb_processterm (mng_view->MNG_handle, mng_process_term_callback) != MNG_NOERROR || mng_setcb_settimer (mng_view->MNG_handle, mng_set_timer_callback) != MNG_NOERROR || mng_setcb_gettickcount (mng_view->MNG_handle, mng_get_tickcount_callback) != MNG_NOERROR || mng_setcb_getcanvasline (mng_view->MNG_handle, mng_get_canvas_line_callback) != MNG_NOERROR || mng_setcb_getalphaline (mng_view->MNG_handle, mng_get_alpha_line_callback) != MNG_NOERROR || mng_setcb_refresh (mng_view->MNG_handle, mng_refresh_callback) != MNG_NOERROR) { mng_cleanup (&mng_view->MNG_handle); FUNCTION_EXIT(); return FALSE; } //mng_set_suspensionmode(mng_view->MNG_handle, MNG_TRUE); mng_set_canvasstyle (mng_view->MNG_handle, MNG_CANVAS_RGB8_A8); widget = GTK_WIDGET (mng_view); if (!GTK_WIDGET_REALIZED (widget)) gtk_widget_realize (widget); mng_set_bgcolor (mng_view->MNG_handle, widget->style->bg[GTK_STATE_NORMAL].red, widget->style->bg[GTK_STATE_NORMAL].green, widget->style->bg[GTK_STATE_NORMAL].blue); FUNCTION_EXIT(); return TRUE; }
static mng_bool mng_close_stream_callback (mng_handle mng_h) { FUNCTION_ENTRY(); FUNCTION_EXIT(); return MNG_TRUE; }
static gboolean gtk_mng_view_expose (GtkWidget * widget, GdkEventExpose * event) { FUNCTION_ENTRY(); g_return_val_if_fail (IS_GTK_MNG_VIEW (widget), FALSE); g_return_val_if_fail (event != NULL, FALSE); if (GTK_WIDGET_REALIZED (widget)) { GdkRectangle dummy; GdkRectangle rectangle; GtkMngView * mng_view; mng_view = GTK_MNG_VIEW (widget); dummy.x = dummy.y = 0; dummy.width = mng_view->width; dummy.height = mng_view->height; if (gdk_rectangle_intersect (&dummy, &event->area, &rectangle)) gtk_mng_view_paint (mng_view, &rectangle); mng_display_resume(mng_view->MNG_handle); } FUNCTION_EXIT(); return FALSE; }
static mng_ptr mng_malloc_callback (mng_size_t how_many) { FUNCTION_ENTRY(); FUNCTION_EXIT(); return (mng_ptr) g_new0 (gchar, how_many); }
NDIS_STATUS XenNet_SetInformation( NDIS_HANDLE adapter_context, NDIS_OID oid, PVOID information_buffer, ULONG information_buffer_length, PULONG bytes_read, PULONG bytes_needed) { NTSTATUS status; int i; FUNCTION_ENTER(); for (i = 0; xennet_oids[i].oid && xennet_oids[i].oid != oid; i++); if (!xennet_oids[i].oid) { FUNCTION_MSG("Unsupported OID %08x\n", oid); return NDIS_STATUS_NOT_SUPPORTED; } if (information_buffer_length < xennet_oids[i].min_length) { FUNCTION_MSG("%s Set InformationBufferLength %d < min_length %d\n", xennet_oids[i].oid_name, information_buffer_length, xennet_oids[i].min_length); *bytes_needed = xennet_oids[i].min_length; return NDIS_STATUS_BUFFER_TOO_SHORT; } if (!xennet_oids[i].set_routine) { FUNCTION_MSG("%s Set not supported\n", xennet_oids[i].oid_name); return NDIS_STATUS_NOT_SUPPORTED; } FUNCTION_MSG("%s\n", xennet_oids[i].oid_name); status = xennet_oids[i].set_routine(adapter_context, information_buffer, information_buffer_length, bytes_read, bytes_needed); FUNCTION_EXIT(); return status; }
/* * This function changes the size of a block of memory that was previously allocated with malloc(). */ void * GMM001_realloc (uint32_t size, void * MemPtr) { void * MemPtr1 = NULL; FUNCTION_ENTRY(GID_GMM001, (uint32_t)GMM001_FUNCTION_ENTRY); /*<<<DD_GMM001_API_3_1>>>*/ if (((uint32_t)MemPtr >= (uint32_t)Heap_Bank1_Start) && ((uint32_t)MemPtr <= ((uint32_t)Heap_Bank1_Start + LENGTH1))) { MemPtr1 = LMM001_realloc(&GMM001_handle0, MemPtr, size); } /*<<<DD_GMM001_API_3_2>>>*/ else if (((uint32_t)MemPtr >= (uint32_t)Heap_Bank2_Start) && ((uint32_t)MemPtr <= ((uint32_t)Heap_Bank2_Start + LENGTH2))) { MemPtr1 = LMM001_realloc(&GMM001_handle1, MemPtr, size); } /*<<<DD_GMM001_API_3_3>>>*/ else if (((uint32_t)MemPtr >= (uint32_t)Heap_Bank3_Start) && ((uint32_t)MemPtr <= ((uint32_t)Heap_Bank3_Start + LENGTH3))) { MemPtr1 = LMM001_realloc(&GMM001_handle2, MemPtr, size); } FUNCTION_EXIT(GID_GMM001, (uint32_t)GMM001_FUNCTION_EXIT); return MemPtr1; }
GtkMngView * gtk_mng_view_new (void) { FUNCTION_ENTRY(); FUNCTION_EXIT(); return GTK_MNG_VIEW (g_object_new(GTK_MNG_VIEW_TYPE, NULL)); }
static VOID XenNet_ResumeWorkItem(PDEVICE_OBJECT device_object, PVOID context) { struct xennet_info *xi = context; KIRQL old_irql; UNREFERENCED_PARAMETER(device_object); FUNCTION_ENTER(); ASSERT(xi->resume_work_item); IoFreeWorkItem(xi->resume_work_item); XenNet_TxResumeStart(xi); XenNet_RxResumeStart(xi); XenNet_ConnectBackend(xi); XenNet_RxResumeEnd(xi); XenNet_TxResumeEnd(xi); KeAcquireSpinLock(&xi->resume_lock, &old_irql); xi->resume_work_item = NULL; KdPrint((__DRIVER_NAME " *Setting suspend_resume_state_fdo = %d\n", xi->device_state->suspend_resume_state_pdo)); xi->device_state->suspend_resume_state_fdo = xi->device_state->suspend_resume_state_pdo; KdPrint((__DRIVER_NAME " *Notifying event channel %d\n", xi->device_state->pdo_event_channel)); xi->vectors.EvtChn_Notify(xi->vectors.context, xi->device_state->pdo_event_channel); KeReleaseSpinLock(&xi->resume_lock, old_irql); FUNCTION_EXIT(); }
VOID XenPci_HighSync(PXENPCI_HIGHSYNC_FUNCTION function0, PXENPCI_HIGHSYNC_FUNCTION functionN, PVOID context) { ULONG ActiveProcessorCount; ULONG i; highsync_info_t *highsync_info; KIRQL old_irql; UNREFERENCED_PARAMETER(context); FUNCTION_ENTER(); highsync_info = ExAllocatePoolWithTag(NonPagedPool, sizeof(highsync_info_t), XENPCI_POOL_TAG); RtlZeroMemory(highsync_info, sizeof(highsync_info_t)); KeInitializeEvent(&highsync_info->highsync_complete_event, SynchronizationEvent, FALSE); highsync_info->function0 = function0; highsync_info->functionN = functionN; highsync_info->context = context; highsync_info->sync_level = HIGH_LEVEL; #if (NTDDI_VERSION >= NTDDI_WINXP) ActiveProcessorCount = (ULONG)KeNumberProcessors; #else ActiveProcessorCount = (ULONG)*KeNumberProcessors; #endif /* Go to HIGH_LEVEL to prevent any races with Dpc's on the current processor */ KeRaiseIrql(highsync_info->sync_level, &old_irql); highsync_info->do_spin = TRUE; for (i = 0; i < ActiveProcessorCount; i++) { if (i == 0) KeInitializeDpc(&highsync_info->dpcs[i], XenPci_HighSyncCallFunction0, highsync_info); else KeInitializeDpc(&highsync_info->dpcs[i], XenPci_HighSyncCallFunctionN, highsync_info); KeSetTargetProcessorDpc(&highsync_info->dpcs[i], (CCHAR)i); KeSetImportanceDpc(&highsync_info->dpcs[i], HighImportance); KdPrint((__DRIVER_NAME " queuing Dpc for CPU %d\n", i)); KeInsertQueueDpc(&highsync_info->dpcs[i], NULL, NULL); } KdPrint((__DRIVER_NAME " All Dpc's queued\n")); KeMemoryBarrier(); KeLowerIrql(old_irql); KdPrint((__DRIVER_NAME " Waiting for highsync_complete_event\n")); KeWaitForSingleObject(&highsync_info->highsync_complete_event, Executive, KernelMode, FALSE, NULL); #if (NTDDI_VERSION >= NTDDI_WINXP) KeFlushQueuedDpcs(); #else { /* just wait 1 second until all DPC's finish - not ideal but it's only for W2K */ LARGE_INTEGER interval; interval.QuadPart = -1 * 1000 * 1000 * 10; /* 1 second */ KeDelayExecutionThread(KernelMode, FALSE, &interval); } #endif ExFreePoolWithTag(highsync_info, XENPCI_POOL_TAG); FUNCTION_EXIT(); }
/* called with urb ring lock held */ static VOID PutRequestsOnRing(PXENUSB_DEVICE_DATA xudd) { partial_pvurb_t *partial_pvurb; uint16_t id; int notify; FUNCTION_ENTER(); FUNCTION_MSG("IRQL = %d\n", KeGetCurrentIrql()); while ((partial_pvurb = (partial_pvurb_t *)RemoveHeadList((PLIST_ENTRY)&xudd->partial_pvurb_queue)) != (partial_pvurb_t *)&xudd->partial_pvurb_queue) { FUNCTION_MSG("partial_pvurb = %p\n", partial_pvurb); /* if this partial_pvurb is cancelling another we don't need to check if the cancelled partial_pvurb is on the ring - that is taken care of in HandleEvent */ id = get_id_from_freelist(xudd->req_id_ss); if (id == (uint16_t)-1) { FUNCTION_MSG("no free ring slots\n"); InsertHeadList(&xudd->partial_pvurb_queue, &partial_pvurb->entry); break; } InsertTailList(&xudd->partial_pvurb_ring, &partial_pvurb->entry); xudd->partial_pvurbs[id] = partial_pvurb; partial_pvurb->req.id = id; *RING_GET_REQUEST(&xudd->urb_ring, xudd->urb_ring.req_prod_pvt) = partial_pvurb->req; xudd->urb_ring.req_prod_pvt++; } RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xudd->urb_ring, notify); if (notify) { FUNCTION_MSG("Notifying\n"); XnNotify(xudd->handle, xudd->event_channel); } FUNCTION_EXIT(); }
static VOID XenPci_DoPatchKernel0(PVOID context) { patch_info_t *pi = context; ULONG i; ULONG high_level_tpr; ULONG patch_position_index = 0; ULONG potential_patch_position_index = 0; FUNCTION_ENTER(); high_level_tpr = SaveTpr(); /* we know all the other CPUs are at HIGH_LEVEL so set them all to the same as cpu 0 */ for (i = 1; i < MAX_VIRT_CPUS; i++) SaveTprProcValue(i, high_level_tpr); /* we can't use KdPrint while patching as it may involve the TPR while we are patching it */ for (i = 0; i < pi->length; i++) { if (XenPci_TestAndPatchInstruction((PUCHAR)pi->base + i)) { patch_positions[patch_position_index++] = (PUCHAR)pi->base + i; } else if (*(PULONG)((PUCHAR)pi->base + i) == LAPIC_TASKPRI) { potential_patch_positions[potential_patch_position_index++] = (PUCHAR)pi->base + i; } } for (i = 0; i < patch_position_index; i++) FUNCTION_MSG("Patch added at %p\n", patch_positions[i]); for (i = 0; i < potential_patch_position_index; i++) FUNCTION_MSG("Unpatch TPR address found at %p\n", potential_patch_positions[i]); FUNCTION_EXIT(); }
static VOID XenBus_EvtFileCleanup(WDFFILEOBJECT file_object) { PXENPCI_DEVICE_INTERFACE_DATA xpdid = GetXpdid(file_object); PXENPCI_DEVICE_DATA xpdd = GetXpdd(WdfFileObjectGetDevice(file_object)); watch_context_t *watch_context; KIRQL old_irql; PCHAR msg; FUNCTION_ENTER(); KeAcquireSpinLock(&xpdid->lock, &old_irql); while (!IsListEmpty(&xpdid->xenbus.watch_list_head)) { watch_context = (watch_context_t *)RemoveHeadList(&xpdid->xenbus.watch_list_head); KeReleaseSpinLock(&xpdid->lock, old_irql); msg = XenBus_RemWatch(xpdd, XBT_NIL, watch_context->path, XenPci_IoWatch, watch_context); if (msg != NULL) { KdPrint((__DRIVER_NAME " Error freeing watch (%s)\n", msg)); XenPci_FreeMem(msg); } ExFreePoolWithTag(watch_context, XENPCI_POOL_TAG); WdfObjectDereference(file_object); KeAcquireSpinLock(&xpdid->lock, &old_irql); } KeReleaseSpinLock(&xpdid->lock, old_irql); FUNCTION_EXIT(); }
static VOID XenBus_EvtFileClose(WDFFILEOBJECT file_object) { UNREFERENCED_PARAMETER(file_object); FUNCTION_ENTER(); FUNCTION_EXIT(); }
VOID XenNet_PnPEventNotify( IN NDIS_HANDLE MiniportAdapterContext, IN NDIS_DEVICE_PNP_EVENT PnPEvent, IN PVOID InformationBuffer, IN ULONG InformationBufferLength ) { UNREFERENCED_PARAMETER(MiniportAdapterContext); UNREFERENCED_PARAMETER(PnPEvent); UNREFERENCED_PARAMETER(InformationBuffer); UNREFERENCED_PARAMETER(InformationBufferLength); FUNCTION_ENTER(); switch (PnPEvent) { case NdisDevicePnPEventSurpriseRemoved: KdPrint((__DRIVER_NAME " NdisDevicePnPEventSurpriseRemoved\n")); break; case NdisDevicePnPEventPowerProfileChanged : KdPrint((__DRIVER_NAME " NdisDevicePnPEventPowerProfileChanged\n")); break; default: KdPrint((__DRIVER_NAME " %d\n", PnPEvent)); break; } FUNCTION_EXIT(); }
static VOID XenPciPdo_EvtDeviceUsageNotification(WDFDEVICE device, WDF_SPECIAL_FILE_TYPE notification_type, BOOLEAN is_in_notification_path) { PXENPCI_PDO_DEVICE_DATA xppdd = GetXppdd(device); FUNCTION_ENTER(); FUNCTION_MSG("path = %s\n", xppdd->path); switch (notification_type) { case WdfSpecialFilePaging: FUNCTION_MSG("notification_type = Paging, flag = %d\n", is_in_notification_path); break; case WdfSpecialFileHibernation: xppdd->hiber_usage_kludge = is_in_notification_path; FUNCTION_MSG("notification_type = Hibernation, flag = %d\n", is_in_notification_path); break; case WdfSpecialFileDump: FUNCTION_MSG("notification_type = Dump, flag = %d\n", is_in_notification_path); break; default: FUNCTION_MSG("notification_type = %d, flag = %d\n", notification_type, is_in_notification_path); break; } FUNCTION_EXIT(); }
VOID XenUsb_EvtRequestCancelPvUrb(WDFREQUEST request) { WDFDEVICE device = WdfIoQueueGetDevice(WdfRequestGetIoQueue(request)); PXENUSB_DEVICE_DATA xudd = GetXudd(device); WDF_REQUEST_PARAMETERS wrp; partial_pvurb_t *partial_pvurb; pvurb_t *pvurb; KIRQL old_irql; FUNCTION_ENTER(); FUNCTION_MSG("cancelling request %p\n", request); WDF_REQUEST_PARAMETERS_INIT(&wrp); KeAcquireSpinLock(&xudd->urb_ring_lock, &old_irql); WdfRequestGetParameters(request, &wrp); pvurb = (pvurb_t *)wrp.Parameters.Others.Arg1; FUNCTION_MSG("pvurb = %p\n", pvurb); ASSERT(pvurb); partial_pvurb = (partial_pvurb_t *)xudd->partial_pvurb_queue.Flink; while (partial_pvurb != (partial_pvurb_t *)&xudd->partial_pvurb_queue) { partial_pvurb_t *next_partial_pvurb = (partial_pvurb_t *)partial_pvurb->entry.Flink; ASSERT(!partial_pvurb->on_ring); FUNCTION_MSG("partial_pvurb = %p is not yet on ring\n", partial_pvurb); RemoveEntryList(&partial_pvurb->entry); ExFreePoolWithTag(partial_pvurb, XENUSB_POOL_TAG); pvurb->ref--; partial_pvurb = next_partial_pvurb; } partial_pvurb = (partial_pvurb_t *)xudd->partial_pvurb_ring.Flink; while (partial_pvurb != (partial_pvurb_t *)&xudd->partial_pvurb_ring) { partial_pvurb_t *next_partial_pvurb = (partial_pvurb_t *)partial_pvurb->entry.Flink; partial_pvurb_t *partial_pvurb_cancel; FUNCTION_MSG("partial_pvurb = %p is on ring\n", partial_pvurb); ASSERT(partial_pvurb->on_ring); partial_pvurb_cancel = ExAllocatePoolWithTag(NonPagedPool, sizeof(*partial_pvurb_cancel), XENUSB_POOL_TAG); /* todo - use lookaside */ ASSERT(partial_pvurb_cancel); /* what would we do if this failed? */ partial_pvurb_cancel->req = partial_pvurb->req; partial_pvurb_cancel->req.pipe = usbif_setunlink_pipe(partial_pvurb_cancel->req.pipe); partial_pvurb_cancel->req.u.unlink.unlink_id = partial_pvurb->req.id; partial_pvurb_cancel->pvurb = pvurb; partial_pvurb_cancel->mdl = NULL; partial_pvurb_cancel->other_partial_pvurb = partial_pvurb; partial_pvurb->other_partial_pvurb = partial_pvurb_cancel; partial_pvurb_cancel->on_ring = FALSE; pvurb->ref++; InsertHeadList(&xudd->partial_pvurb_queue, &partial_pvurb_cancel->entry); partial_pvurb = next_partial_pvurb; } if (pvurb->ref) { PutRequestsOnRing(xudd); KeReleaseSpinLock(&xudd->urb_ring_lock, old_irql); } else { KeReleaseSpinLock(&xudd->urb_ring_lock, old_irql); WdfRequestComplete(request, STATUS_CANCELLED); } FUNCTION_EXIT(); }
static VOID XenPci_HighSyncCallFunction0( PRKDPC Dpc, PVOID Context, PVOID SystemArgument1, PVOID SystemArgument2) { highsync_info_t *highsync_info = Context; ULONG ActiveProcessorCount; KIRQL old_irql; UNREFERENCED_PARAMETER(Dpc); UNREFERENCED_PARAMETER(SystemArgument1); UNREFERENCED_PARAMETER(SystemArgument2); FUNCTION_ENTER(); #if (NTDDI_VERSION >= NTDDI_WINXP) ActiveProcessorCount = (ULONG)KeNumberProcessors; #else ActiveProcessorCount = (ULONG)*KeNumberProcessors; #endif InterlockedIncrement(&highsync_info->nr_procs_at_dispatch_level); if (highsync_info->sync_level > DISPATCH_LEVEL) { while (highsync_info->nr_procs_at_dispatch_level < (LONG)ActiveProcessorCount) { KeStallExecutionProcessor(1); KeMemoryBarrier(); } } _disable(); //__asm cli; KeRaiseIrql(highsync_info->sync_level, &old_irql); while (highsync_info->nr_spinning_at_sync_level < (LONG)ActiveProcessorCount - 1) { KeStallExecutionProcessor(1); KeMemoryBarrier(); } highsync_info->function0(highsync_info->context); KeLowerIrql(old_irql); _enable(); //__asm sti; highsync_info->do_spin = FALSE; KeMemoryBarrier(); /* wait for all the other processors to complete spinning, just in case it matters */ while (highsync_info->nr_spinning_at_sync_level) { KeStallExecutionProcessor(1); KeMemoryBarrier(); } InterlockedDecrement(&highsync_info->nr_procs_at_dispatch_level); /* wait until nr_procs_at_dispatch_level drops to 0 indicating that nothing else requires highsync_info */ while (highsync_info->nr_procs_at_dispatch_level) { KeStallExecutionProcessor(1); KeMemoryBarrier(); } KeSetEvent(&highsync_info->highsync_complete_event, IO_NO_INCREMENT, FALSE); FUNCTION_EXIT(); }
/** Function to disable Interrupt * */ void NVIC002_DisableIRQ(const NVIC002_HandleType* Handle) { /*<<<DD_NVIC002_API_3>>>*/ FUNCTION_ENTRY(GID_NVIC002,NVIC002_FUNC_ENTRY); /* Disable Interrupt */ NVIC_DisableIRQ(Handle->NodeID); FUNCTION_EXIT(GID_NVIC002,NVIC002_FUNC_EXIT); }
static VOID XenPci_DoPatchKernelN(PVOID context) { UNREFERENCED_PARAMETER(context); FUNCTION_ENTER(); FUNCTION_EXIT(); }
/* * The function gets the status of the card whether the card is initialized, * inserted or write protected. */ uint8_t SDMMC001_GetStatus(void) { uint8_t DiskStatus; FUNCTION_ENTRY(GID_SDMMC001,(uint32_t)SDMMC001_FUNCTION_ENTRY); DiskStatus = SDMMC001_Initialize(); FUNCTION_EXIT(GID_SDMMC001, (uint32_t)SDMMC001_FUNCTION_EXIT); return DiskStatus; }
VOID XenNet_CancelOidRequest(NDIS_HANDLE adapter_context, PVOID request_id) { UNREFERENCED_PARAMETER(adapter_context); UNREFERENCED_PARAMETER(request_id); FUNCTION_ENTER(); FUNCTION_EXIT(); }
/* * The function writes the data on the card. */ uint32_t SDMMC001_WriteBlock ( const uint8_t *WriteBuf, uint32_t SectorNumber, uint8_t SectorCount ) { status_t Status ; uint8_t DiskStatus; uint32_t Result; FUNCTION_ENTRY(GID_SDMMC001, (uint32_t)SDMMC001_FUNCTION_ENTRY); do { DiskStatus = SDMMC001_GetStatus (); /*<<<DD_SDMMC001_API_4_1>>>*/ /* If the card is not initialized */ if ((DiskStatus & SDMMC001_STA_NOINIT) != 0) { Result = SDMMC001_RES_NOTRDY; ERROR(GID_SDMMC001,Result,0,0); break; }/* End of "if ((DiskStatus & SDMMC001_STA_NOINIT) != 0)"*/ /* If the card is read only or write protected */ /*<<<DD_SDMMC001_API_4_2>>>*/ if ((DiskStatus & SDMMC001_STA_PROTECT) != 0) { Result = SDMMC001_RES_WRPRT; ERROR(GID_SDMMC001,Result,0,0); break; }/* End of "if ((DiskStatus & SDMMC001_STA_PROTECT) != 0)"*/ /*Check for single block write or multiple block write.*/ /*<<<DD_SDMMC001_API_4_3>>>*/ if (SectorCount == 1) { Status = SDMMC003_CardWriteSingleBlock((uint32_t *)WriteBuf, SectorNumber); } /*<<<DD_SDMMC001_API_4_4>>>*/ else { Status = SDMMC003_CardWriteMultipleBlocks((uint32_t *)WriteBuf,\ SectorNumber, SectorCount ); } /* End of "if (SectorCount == 1)"*/ /*<<<DD_SDMMC001_API_4_5>>>*/ if (Status == (uint32_t)DAVEApp_SUCCESS) { Result = SDMMC001_RES_OK; } /*<<<DD_SDMMC001_API_4_6>>>*/ else { Result = SDMMC001_RES_ERROR; ERROR(GID_SDMMC001,SDMMC001_SDMMC00x_ERROR,0,0); } /* End of "if (Status == (uint32_t)DAVEApp_SUCCESS)"*/ } while(0); FUNCTION_EXIT(GID_SDMMC001, (uint32_t)SDMMC001_FUNCTION_EXIT); return Result; }
NTSTATUS XenPciPdo_EvtDeviceD0Exit(WDFDEVICE device, WDF_POWER_DEVICE_STATE target_state) { NTSTATUS status = STATUS_SUCCESS; PXENPCI_PDO_DEVICE_DATA xppdd = GetXppdd(device); PXENPCI_DEVICE_DATA xpdd = GetXpdd(xppdd->wdf_device_bus_fdo); char path[128]; UNREFERENCED_PARAMETER(device); UNREFERENCED_PARAMETER(target_state); FUNCTION_ENTER(); FUNCTION_MSG("path = %s\n", xppdd->path); switch (target_state) { case WdfPowerDeviceD0: FUNCTION_MSG("WdfPowerDeviceD1\n"); break; case WdfPowerDeviceD1: FUNCTION_MSG("WdfPowerDeviceD1\n"); break; case WdfPowerDeviceD2: FUNCTION_MSG("WdfPowerDeviceD2\n"); break; case WdfPowerDeviceD3: FUNCTION_MSG("WdfPowerDeviceD3\n"); if (xppdd->hiber_usage_kludge) { FUNCTION_MSG("(but really WdfPowerDevicePrepareForHibernation)\n"); target_state = WdfPowerDevicePrepareForHibernation; } break; case WdfPowerDeviceD3Final: FUNCTION_MSG("WdfPowerDeviceD3Final\n"); break; case WdfPowerDevicePrepareForHibernation: FUNCTION_MSG("WdfPowerDevicePrepareForHibernation\n"); break; default: FUNCTION_MSG("Unknown WdfPowerDevice state %d\n", target_state); break; } if (target_state == WdfPowerDevicePrepareForHibernation) { FUNCTION_MSG("not powering down as we are hibernating\n"); // should we set the backend state here so it's correct on resume??? } /* Remove watch on backend state */ /* even if hibernate */ if (xppdd->device_callback) { FUNCTION_MSG("Removing watch %s\n", xppdd->device); RtlStringCbPrintfA(path, ARRAY_SIZE(path), "%s/state", xppdd->backend_path); XenBus_RemWatch(xpdd, XBT_NIL, path, XenPci_BackendStateCallback, xppdd); } FUNCTION_EXIT(); return status; }
static NTSTATUS XenVbd_IoCompletion_START_DEVICE(PDEVICE_OBJECT device, PIRP irp, PVOID context) { UNREFERENCED_PARAMETER(device); UNREFERENCED_PARAMETER(irp); FUNCTION_ENTER(); ExFreePoolWithTag(context, XENVBD_POOL_TAG); FUNCTION_EXIT(); return STATUS_SUCCESS; }
/* Function to configure SCU Interrupts based on user configuration. * */ void NVIC_SCU001_Init() { FUNCTION_ENTRY(GID_NVIC_SCU001,NVIC_SCU001_FUNC_ENTRY); NVIC_SetPriority(64, NVIC_EncodePriority(NVIC_GetPriorityGrouping(),63,0)); /* Enable Interrupt */ NVIC_EnableIRQ(64); FUNCTION_EXIT(GID_NVIC_SCU001,NVIC_SCU001_FUNC_EXIT); }
static VOID XenVbd_StopRing(PXENVBD_DEVICE_DATA xvdd, BOOLEAN suspend) { PXENVBD_FILTER_DATA xvfd = (PXENVBD_FILTER_DATA)xvdd->xvfd; NTSTATUS status; WDFREQUEST request; WDF_REQUEST_SEND_OPTIONS send_options; IO_STACK_LOCATION stack; SCSI_REQUEST_BLOCK srb; SRB_IO_CONTROL sic; FUNCTION_ENTER(); /* send a 'stop' down if we are suspending */ if (suspend) { status = WdfRequestCreate(WDF_NO_OBJECT_ATTRIBUTES, xvfd->wdf_target, &request); FUNCTION_MSG("WdfRequestCreate = %08x\n", status); RtlZeroMemory(&stack, sizeof(IO_STACK_LOCATION)); stack.MajorFunction = IRP_MJ_SCSI; stack.MinorFunction = IRP_MN_SCSI_CLASS; stack.Parameters.Scsi.Srb = &srb; RtlZeroMemory(&srb, SCSI_REQUEST_BLOCK_SIZE); srb.SrbFlags = SRB_FLAGS_BYPASS_FROZEN_QUEUE | SRB_FLAGS_NO_QUEUE_FREEZE; srb.Length = SCSI_REQUEST_BLOCK_SIZE; srb.PathId = 0; srb.TargetId = 0; srb.Lun = 0; srb.OriginalRequest = WdfRequestWdmGetIrp(request); srb.Function = SRB_FUNCTION_IO_CONTROL; srb.DataBuffer = &sic; RtlZeroMemory(&sic, sizeof(SRB_IO_CONTROL)); sic.HeaderLength = sizeof(SRB_IO_CONTROL); memcpy(sic.Signature, XENVBD_CONTROL_SIG, 8); sic.Timeout = 60; sic.ControlCode = XENVBD_CONTROL_STOP; WdfRequestWdmFormatUsingStackLocation(request, &stack); WDF_REQUEST_SEND_OPTIONS_INIT(&send_options, WDF_REQUEST_SEND_OPTION_SYNCHRONOUS); if (!WdfRequestSend(request, xvfd->wdf_target, &send_options)) { FUNCTION_MSG("Request was _NOT_ sent\n"); } #if DBG status = WdfRequestGetStatus(request); FUNCTION_MSG("Request Status = %08x\n", status); FUNCTION_MSG("SRB Status = %08x\n", srb.SrbStatus); #endif WdfObjectDelete(request); } status = XnWriteInt32(xvdd->handle, XN_BASE_FRONTEND, "state", XenbusStateClosing); FUNCTION_EXIT(); }
/** Function to initialize the NVIC node parameters based on * UI configuration. */ void NVIC002_Init(void) { /*<<<DD_NVIC002_API_1>>>*/ FUNCTION_ENTRY(GID_NVIC002,NVIC002_FUNC_ENTRY); // Set Interrupt Priority for NVIC 5 Node App Instance 0 NVIC_SetPriority(5, NVIC_EncodePriority(NVIC_GetPriorityGrouping(),63,0)); /* Enable Interrupt */ NVIC_EnableIRQ(5); FUNCTION_EXIT(GID_NVIC002,NVIC002_FUNC_EXIT); }