/*! Like read_into_cache() but writes data into the cache. To preserve data consistency, it might also read pages into the cache, though, if only a partial page gets written. The same restrictions apply. */ static status_t write_to_cache(file_cache_ref* ref, void* cookie, off_t offset, int32 pageOffset, addr_t buffer, size_t bufferSize, bool useBuffer, vm_page_reservation* reservation, size_t reservePages) { // TODO: We're using way too much stack! Rather allocate a sufficiently // large chunk on the heap. generic_io_vec vecs[MAX_IO_VECS]; uint32 vecCount = 0; generic_size_t numBytes = PAGE_ALIGN(pageOffset + bufferSize); vm_page* pages[MAX_IO_VECS]; int32 pageIndex = 0; status_t status = B_OK; // ToDo: this should be settable somewhere bool writeThrough = false; // allocate pages for the cache and mark them busy for (generic_size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) { // TODO: if space is becoming tight, and this cache is already grown // big - shouldn't we better steal the pages directly in that case? // (a working set like approach for the file cache) // TODO: the pages we allocate here should have been reserved upfront // in cache_io() vm_page* page = pages[pageIndex++] = vm_page_allocate_page( reservation, (writeThrough ? PAGE_STATE_CACHED : PAGE_STATE_MODIFIED) | VM_PAGE_ALLOC_BUSY); page->modified = !writeThrough; ref->cache->InsertPage(page, offset + pos); add_to_iovec(vecs, vecCount, MAX_IO_VECS, page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE); } push_access(ref, offset, bufferSize, true); ref->cache->Unlock(); vm_page_unreserve_pages(reservation); // copy contents (and read in partially written pages first) if (pageOffset != 0) { // This is only a partial write, so we have to read the rest of the page // from the file to have consistent data in the cache generic_io_vec readVec = { vecs[0].base, B_PAGE_SIZE }; generic_size_t bytesRead = B_PAGE_SIZE; status = vfs_read_pages(ref->vnode, cookie, offset, &readVec, 1, B_PHYSICAL_IO_REQUEST, &bytesRead); // ToDo: handle errors for real! if (status < B_OK) panic("1. vfs_read_pages() failed: %s!\n", strerror(status)); } size_t lastPageOffset = (pageOffset + bufferSize) % B_PAGE_SIZE; if (lastPageOffset != 0) { // get the last page in the I/O vectors generic_addr_t last = vecs[vecCount - 1].base + vecs[vecCount - 1].length - B_PAGE_SIZE; if ((off_t)(offset + pageOffset + bufferSize) == ref->cache->virtual_end) { // the space in the page after this write action needs to be cleaned vm_memset_physical(last + lastPageOffset, 0, B_PAGE_SIZE - lastPageOffset); } else { // the end of this write does not happen on a page boundary, so we // need to fetch the last page before we can update it generic_io_vec readVec = { last, B_PAGE_SIZE }; generic_size_t bytesRead = B_PAGE_SIZE; status = vfs_read_pages(ref->vnode, cookie, PAGE_ALIGN(offset + pageOffset + bufferSize) - B_PAGE_SIZE, &readVec, 1, B_PHYSICAL_IO_REQUEST, &bytesRead); // ToDo: handle errors for real! if (status < B_OK) panic("vfs_read_pages() failed: %s!\n", strerror(status)); if (bytesRead < B_PAGE_SIZE) { // the space beyond the file size needs to be cleaned vm_memset_physical(last + bytesRead, 0, B_PAGE_SIZE - bytesRead); } } } for (uint32 i = 0; i < vecCount; i++) { generic_addr_t base = vecs[i].base; generic_size_t bytes = min_c((generic_size_t)bufferSize, generic_size_t(vecs[i].length - pageOffset)); if (useBuffer) { // copy data from user buffer vm_memcpy_to_physical(base + pageOffset, (void*)buffer, bytes, IS_USER_ADDRESS(buffer)); } else { // clear buffer instead vm_memset_physical(base + pageOffset, 0, bytes); } bufferSize -= bytes; if (bufferSize == 0) break; buffer += bytes; pageOffset = 0; } if (writeThrough) { // write cached pages back to the file if we were asked to do that status_t status = vfs_write_pages(ref->vnode, cookie, offset, vecs, vecCount, B_PHYSICAL_IO_REQUEST, &numBytes); if (status < B_OK) { // ToDo: remove allocated pages, ...? panic("file_cache: remove allocated pages! write pages failed: %s\n", strerror(status)); } } if (status == B_OK) reserve_pages(ref, reservation, reservePages, true); ref->cache->Lock(); // make the pages accessible in the cache for (int32 i = pageIndex; i-- > 0;) { ref->cache->MarkPageUnbusy(pages[i]); DEBUG_PAGE_ACCESS_END(pages[i]); } return status; }
status_t RootInode::_UpdateInfo(bool force) { if (!force && fInfoCacheExpire > time(NULL)) return B_OK; MutexLocker _(fInfoCacheLock); if (fInfoCacheExpire > time(NULL)) return B_OK; do { RPC::Server* server = fFileSystem->Server(); Request request(server, fFileSystem); RequestBuilder& req = request.Builder(); req.PutFH(fInfo.fHandle); Attribute attr[] = { FATTR4_FILES_FREE, FATTR4_FILES_TOTAL, FATTR4_MAXREAD, FATTR4_MAXWRITE, FATTR4_SPACE_FREE, FATTR4_SPACE_TOTAL }; req.GetAttr(attr, sizeof(attr) / sizeof(Attribute)); status_t result = request.Send(); if (result != B_OK) return result; ReplyInterpreter& reply = request.Reply(); if (HandleErrors(reply.NFS4Error(), server)) continue; reply.PutFH(); AttrValue* values; uint32 count, next = 0; result = reply.GetAttr(&values, &count); if (result != B_OK) return result; if (count >= next && values[next].fAttribute == FATTR4_FILES_FREE) { fInfoCache.free_nodes = values[next].fData.fValue64; next++; } if (count >= next && values[next].fAttribute == FATTR4_FILES_TOTAL) { fInfoCache.total_nodes = values[next].fData.fValue64; next++; } uint64 ioSize = LONGLONG_MAX; if (count >= next && values[next].fAttribute == FATTR4_MAXREAD) { ioSize = min_c(ioSize, values[next].fData.fValue64); next++; } if (count >= next && values[next].fAttribute == FATTR4_MAXWRITE) { ioSize = min_c(ioSize, values[next].fData.fValue64); next++; } if (ioSize == LONGLONG_MAX) ioSize = 32768; fInfoCache.io_size = ioSize; fInfoCache.block_size = ioSize; fIOSize = ioSize; if (count >= next && values[next].fAttribute == FATTR4_SPACE_FREE) { fInfoCache.free_blocks = values[next].fData.fValue64 / ioSize; next++; } if (count >= next && values[next].fAttribute == FATTR4_SPACE_TOTAL) { fInfoCache.total_blocks = values[next].fData.fValue64 / ioSize; next++; } delete[] values; break; } while (true); fInfoCache.flags = B_FS_IS_PERSISTENT | B_FS_IS_SHARED | B_FS_SUPPORTS_NODE_MONITORING; if (fFileSystem->NamedAttrs() || fFileSystem->GetConfiguration().fEmulateNamedAttrs) fInfoCache.flags |= B_FS_HAS_MIME | B_FS_HAS_ATTR; strlcpy(fInfoCache.volume_name, fName, sizeof(fInfoCache.volume_name)); fInfoCacheExpire = time(NULL) + MetadataCache::kExpirationTime; return B_OK; }
void PowerStatusView::Draw(BRect updateRect) { bool drawBackground = Parent() == NULL || (Parent()->Flags() & B_DRAW_ON_CHILDREN) == 0; if (drawBackground) FillRect(updateRect, B_SOLID_LOW); float aspect = Bounds().Width() / Bounds().Height(); bool below = aspect <= 1.0f; font_height fontHeight; GetFontHeight(&fontHeight); float baseLine = ceilf(fontHeight.ascent); char text[64]; _SetLabel(text, sizeof(text)); float textHeight = ceilf(fontHeight.descent + fontHeight.ascent); float textWidth = StringWidth(text); bool showLabel = fShowLabel && text[0]; BRect iconRect; if (fShowStatusIcon) { iconRect = Bounds(); if (showLabel) { if (below) iconRect.bottom -= textHeight + 4; else iconRect.right -= textWidth + 4; } // make a square iconRect.bottom = min_c(iconRect.bottom, iconRect.right); iconRect.right = iconRect.bottom; if (iconRect.Width() + 1 >= kMinIconWidth && iconRect.Height() + 1 >= kMinIconHeight) { _DrawBattery(iconRect); } else { // there is not enough space for the icon iconRect.Set(0, 0, -1, -1); } } if (showLabel) { BPoint point(0, baseLine); if (iconRect.IsValid()) { if (below) { point.x = (iconRect.Width() - textWidth) / 2; point.y += iconRect.Height() + 2; } else { point.x = iconRect.Width() + 2; point.y += (iconRect.Height() - textHeight) / 2; } } else { point.x = (Bounds().Width() - textWidth) / 2; point.y += (Bounds().Height() - textHeight) / 2; } if (drawBackground) SetHighColor(ui_color(B_CONTROL_TEXT_COLOR)); else { SetDrawingMode(B_OP_OVER); rgb_color c = Parent()->LowColor(); if (c.red + c.green + c.blue > 128 * 3) SetHighColor(0, 0, 0); else SetHighColor(255, 255, 255); } DrawString(text, point); } }
void ScreenWindow::MessageReceived(BMessage* message) { switch (message->what) { case WORKSPACE_CHECK_MSG: _CheckApplyEnabled(); break; case kMsgWorkspaceLayoutChanged: { int32 deltaX = 0; int32 deltaY = 0; message->FindInt32("delta_x", &deltaX); message->FindInt32("delta_y", &deltaY); if (deltaX == 0 && deltaY == 0) break; uint32 newColumns; uint32 newRows; BPrivate::get_workspaces_layout(&newColumns, &newRows); newColumns += deltaX; newRows += deltaY; BPrivate::set_workspaces_layout(newColumns, newRows); _UpdateWorkspaceButtons(); _CheckApplyEnabled(); break; } case kMsgWorkspaceColumnsChanged: { uint32 newColumns = strtoul(fColumnsControl->Text(), NULL, 10); uint32 rows; BPrivate::get_workspaces_layout(NULL, &rows); BPrivate::set_workspaces_layout(newColumns, rows); _UpdateWorkspaceButtons(); _CheckApplyEnabled(); break; } case kMsgWorkspaceRowsChanged: { uint32 newRows = strtoul(fRowsControl->Text(), NULL, 10); uint32 columns; BPrivate::get_workspaces_layout(&columns, NULL); BPrivate::set_workspaces_layout(columns, newRows); _UpdateWorkspaceButtons(); _CheckApplyEnabled(); break; } case POP_RESOLUTION_MSG: { message->FindInt32("width", &fSelected.width); message->FindInt32("height", &fSelected.height); _CheckColorMenu(); _CheckRefreshMenu(); _UpdateMonitorView(); _UpdateRefreshControl(); _CheckApplyEnabled(); break; } case POP_COLORS_MSG: { int32 space; if (message->FindInt32("space", &space) != B_OK) break; int32 index; if (message->FindInt32("index", &index) == B_OK && fColorsMenu->ItemAt(index) != NULL) fUserSelectedColorSpace = fColorsMenu->ItemAt(index); fSelected.space = (color_space)space; _UpdateColorLabel(); _CheckApplyEnabled(); break; } case POP_REFRESH_MSG: { message->FindFloat("refresh", &fSelected.refresh); fOtherRefresh->SetLabel(B_TRANSLATE("Other" B_UTF8_ELLIPSIS)); // revert "Other…" label - it might have a refresh rate prefix _CheckApplyEnabled(); break; } case POP_OTHER_REFRESH_MSG: { // make sure menu shows something useful _UpdateRefreshControl(); float min = 0, max = 999; fScreenMode.GetRefreshLimits(fSelected, min, max); if (min < gMinRefresh) min = gMinRefresh; if (max > gMaxRefresh) max = gMaxRefresh; monitor_info info; if (fScreenMode.GetMonitorInfo(info) == B_OK) { min = max_c(info.min_vertical_frequency, min); max = min_c(info.max_vertical_frequency, max); } RefreshWindow *fRefreshWindow = new RefreshWindow( fRefreshField->ConvertToScreen(B_ORIGIN), fSelected.refresh, min, max); fRefreshWindow->Show(); break; } case SET_CUSTOM_REFRESH_MSG: { // user pressed "done" in "Other…" refresh dialog; // select the refresh rate chosen message->FindFloat("refresh", &fSelected.refresh); _UpdateRefreshControl(); _CheckApplyEnabled(); break; } case POP_COMBINE_DISPLAYS_MSG: { // new combine mode has bee chosen int32 mode; if (message->FindInt32("mode", &mode) == B_OK) fSelected.combine = (combine_mode)mode; _CheckResolutionMenu(); _CheckApplyEnabled(); break; } case POP_SWAP_DISPLAYS_MSG: message->FindBool("swap", &fSelected.swap_displays); _CheckApplyEnabled(); break; case POP_USE_LAPTOP_PANEL_MSG: message->FindBool("use", &fSelected.use_laptop_panel); _CheckApplyEnabled(); break; case POP_TV_STANDARD_MSG: message->FindInt32("tv_standard", (int32 *)&fSelected.tv_standard); _CheckApplyEnabled(); break; case BUTTON_LAUNCH_BACKGROUNDS_MSG: if (be_roster->Launch(kBackgroundsSignature) == B_ALREADY_RUNNING) { app_info info; be_roster->GetAppInfo(kBackgroundsSignature, &info); be_roster->ActivateApp(info.team); } break; case BUTTON_DEFAULTS_MSG: { // TODO: get preferred settings of screen fSelected.width = 640; fSelected.height = 480; fSelected.space = B_CMAP8; fSelected.refresh = 60.0; fSelected.combine = kCombineDisable; fSelected.swap_displays = false; fSelected.use_laptop_panel = false; fSelected.tv_standard = 0; // TODO: workspace defaults _UpdateControls(); break; } case BUTTON_UNDO_MSG: fUndoScreenMode.Revert(); _UpdateActiveMode(); break; case BUTTON_REVERT_MSG: { fModified = false; fBootWorkspaceApplied = false; // ScreenMode::Revert() assumes that we first set the correct // number of workspaces BPrivate::set_workspaces_layout(fOriginalWorkspacesColumns, fOriginalWorkspacesRows); _UpdateWorkspaceButtons(); fScreenMode.Revert(); _UpdateActiveMode(); break; } case BUTTON_APPLY_MSG: _Apply(); break; case MAKE_INITIAL_MSG: // user pressed "keep" in confirmation dialog fModified = true; _UpdateActiveMode(); break; default: BWindow::MessageReceived(message); break; } }
void BListView::MouseDown(BPoint point) { if (!IsFocus()) { MakeFocus(); Sync(); Window()->UpdateIfNeeded(); } BMessage* message = Looper()->CurrentMessage(); int32 index = IndexOf(point); // If the user double (or more) clicked within the current selection, // we don't change the selection but invoke the selection. // TODO: move this code someplace where it can be shared everywhere // instead of every class having to reimplement it, once some sane // API for it is decided. BPoint delta = point - fTrack->drag_start; bigtime_t sysTime; Window()->CurrentMessage()->FindInt64("when", &sysTime); bigtime_t timeDelta = sysTime - fTrack->last_click_time; bigtime_t doubleClickSpeed; get_click_speed(&doubleClickSpeed); bool doubleClick = false; if (timeDelta < doubleClickSpeed && fabs(delta.x) < kDoubleClickTresh && fabs(delta.y) < kDoubleClickTresh && fTrack->item_index == index) { doubleClick = true; } if (doubleClick && index >= fFirstSelected && index <= fLastSelected) { fTrack->drag_start.Set(INT32_MAX, INT32_MAX); Invoke(); return; } int32 modifiers; message->FindInt32("modifiers", &modifiers); if (!doubleClick) { fTrack->drag_start = point; fTrack->last_click_time = system_time(); fTrack->item_index = index; fTrack->was_selected = index >= 0 ? ItemAt(index)->IsSelected() : false; fTrack->try_drag = true; } if (index > -1) { if (fListType == B_MULTIPLE_SELECTION_LIST) { if (modifiers & B_SHIFT_KEY) { // select entire block // TODO: maybe review if we want it like in Tracker // (anchor item) Select(min_c(index, fFirstSelected), max_c(index, fLastSelected)); } else { if (modifiers & B_COMMAND_KEY) { // toggle selection state of clicked item (like in Tracker) // toggle selection state of clicked item if (ItemAt(index)->IsSelected()) Deselect(index); else Select(index, true); } else Select(index); } } else { // toggle selection state of clicked item if ((modifiers & B_COMMAND_KEY) && ItemAt(index)->IsSelected()) Deselect(index); else Select(index); } } else if ((modifiers & B_COMMAND_KEY) == 0) DeselectAll(); }
status_t IOBuffer::GetNextVirtualVec(void*& _cookie, iovec& vector) { virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie; if (cookie == NULL) { cookie = new(malloc_flags(fVIP ? HEAP_PRIORITY_VIP : 0)) virtual_vec_cookie; if (cookie == NULL) return B_NO_MEMORY; cookie->vec_index = 0; cookie->vec_offset = 0; cookie->mapped_area = -1; cookie->physical_page_handle = NULL; cookie->virtual_address = 0; _cookie = cookie; } // recycle a potential previously mapped page if (cookie->physical_page_handle != NULL) { // TODO: This check is invalid! The physical page mapper is not required to // return a non-NULL handle (the generic implementation does not)! vm_put_physical_page(cookie->virtual_address, cookie->physical_page_handle); } if (cookie->vec_index >= fVecCount) return B_BAD_INDEX; if (!fPhysical) { vector.iov_base = (void*)(addr_t)fVecs[cookie->vec_index].base; vector.iov_len = fVecs[cookie->vec_index++].length; return B_OK; } if (cookie->vec_index == 0 && (fVecCount > 1 || fVecs[0].length > B_PAGE_SIZE)) { void* mappedAddress; addr_t mappedSize; // TODO: This is a potential violation of the VIP requirement, since // vm_map_physical_memory_vecs() allocates memory without special flags! cookie->mapped_area = vm_map_physical_memory_vecs( VMAddressSpace::KernelID(), "io buffer mapped physical vecs", &mappedAddress, B_ANY_KERNEL_ADDRESS, &mappedSize, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, fVecs, fVecCount); if (cookie->mapped_area >= 0) { vector.iov_base = mappedAddress; vector.iov_len = mappedSize; return B_OK; } else ktrace_printf("failed to map area: %s\n", strerror(cookie->mapped_area)); } // fallback to page wise mapping generic_io_vec& currentVec = fVecs[cookie->vec_index]; generic_addr_t address = currentVec.base + cookie->vec_offset; size_t pageOffset = address % B_PAGE_SIZE; // TODO: This is a potential violation of the VIP requirement, since // vm_get_physical_page() may allocate memory without special flags! status_t result = vm_get_physical_page(address - pageOffset, &cookie->virtual_address, &cookie->physical_page_handle); if (result != B_OK) return result; generic_size_t length = min_c(currentVec.length - cookie->vec_offset, B_PAGE_SIZE - pageOffset); vector.iov_base = (void*)(cookie->virtual_address + pageOffset); vector.iov_len = length; cookie->vec_offset += length; if (cookie->vec_offset >= currentVec.length) { cookie->vec_index++; cookie->vec_offset = 0; } return B_OK; }
status_t release_sem_etc(sem_id id, int32 count, uint32 flags) { int32 slot = id % sMaxSems; if (gKernelStartup) return B_OK; if (sSemsActive == false) return B_NO_MORE_SEMS; if (id < 0) return B_BAD_SEM_ID; if (count <= 0 && (flags & B_RELEASE_ALL) == 0) return B_BAD_VALUE; InterruptsLocker _; SpinLocker semLocker(sSems[slot].lock); if (sSems[slot].id != id) { TRACE(("sem_release_etc: invalid sem_id %ld\n", id)); return B_BAD_SEM_ID; } // ToDo: the B_CHECK_PERMISSION flag should be made private, as it // doesn't have any use outside the kernel if ((flags & B_CHECK_PERMISSION) != 0 && sSems[slot].u.used.owner == team_get_kernel_team_id()) { dprintf("thread %ld tried to release kernel semaphore.\n", thread_get_current_thread_id()); return B_NOT_ALLOWED; } KTRACE("release_sem_etc(sem: %ld, count: %ld, flags: 0x%lx)", id, count, flags); sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer; #if DEBUG_SEM_LAST_ACQUIRER sSems[slot].u.used.last_releaser = thread_get_current_thread_id(); sSems[slot].u.used.last_release_count = count; #endif if (flags & B_RELEASE_ALL) { count = sSems[slot].u.used.net_count - sSems[slot].u.used.count; // is there anything to do for us at all? if (count == 0) return B_OK; // Don't release more than necessary -- there might be interrupted/ // timed out threads in the queue. flags |= B_RELEASE_IF_WAITING_ONLY; } // Grab the scheduler lock, so thread_is_blocked() is reliable (due to // possible interruptions or timeouts, it wouldn't be otherwise). SpinLocker schedulerLocker(gSchedulerLock); while (count > 0) { queued_thread* entry = sSems[slot].queue.Head(); if (entry == NULL) { if ((flags & B_RELEASE_IF_WAITING_ONLY) == 0) { sSems[slot].u.used.count += count; sSems[slot].u.used.net_count += count; } break; } if (thread_is_blocked(entry->thread)) { // The thread is still waiting. If its count is satisfied, // unblock it. Otherwise we can't unblock any other thread. if (entry->count > sSems[slot].u.used.net_count + count) { sSems[slot].u.used.count += count; sSems[slot].u.used.net_count += count; break; } thread_unblock_locked(entry->thread, B_OK); int delta = min_c(count, entry->count); sSems[slot].u.used.count += delta; sSems[slot].u.used.net_count += delta - entry->count; count -= delta; } else { // The thread is no longer waiting, but still queued, which // means acquiration failed and we can just remove it. sSems[slot].u.used.count += entry->count; } sSems[slot].queue.Remove(entry); entry->queued = false; } schedulerLocker.Unlock(); if (sSems[slot].u.used.count > 0) notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE); // If we've unblocked another thread reschedule, if we've not explicitly // been told not to. if ((flags & B_DO_NOT_RESCHEDULE) == 0) { semLocker.Unlock(); schedulerLocker.Lock(); scheduler_reschedule_if_necessary_locked(); } return B_OK; }
status_t DMAResource::Init(const dma_restrictions& restrictions, generic_size_t blockSize, uint32 bufferCount, uint32 bounceBufferCount) { fRestrictions = restrictions; fBlockSize = blockSize == 0 ? 1 : blockSize; fBufferCount = bufferCount; fBounceBufferCount = bounceBufferCount; fBounceBufferSize = 0; if (fRestrictions.high_address == 0) fRestrictions.high_address = ~(generic_addr_t)0; if (fRestrictions.max_segment_count == 0) fRestrictions.max_segment_count = 16; if (fRestrictions.alignment == 0) fRestrictions.alignment = 1; if (fRestrictions.max_transfer_size == 0) fRestrictions.max_transfer_size = ~(generic_size_t)0; if (fRestrictions.max_segment_size == 0) fRestrictions.max_segment_size = ~(generic_size_t)0; if (_NeedsBoundsBuffers()) { fBounceBufferSize = fRestrictions.max_segment_size * min_c(fRestrictions.max_segment_count, 4); if (fBounceBufferSize > kMaxBounceBufferSize) fBounceBufferSize = kMaxBounceBufferSize; TRACE("DMAResource::Init(): chose bounce buffer size %lu\n", fBounceBufferSize); } dprintf("DMAResource@%p: low/high %" B_PRIxGENADDR "/%" B_PRIxGENADDR ", max segment count %" B_PRIu32 ", align %" B_PRIuGENADDR ", " "boundary %" B_PRIuGENADDR ", max transfer %" B_PRIuGENADDR ", max segment size %" B_PRIuGENADDR "\n", this, fRestrictions.low_address, fRestrictions.high_address, fRestrictions.max_segment_count, fRestrictions.alignment, fRestrictions.boundary, fRestrictions.max_transfer_size, fRestrictions.max_segment_size); fScratchVecs = (generic_io_vec*)malloc( sizeof(generic_io_vec) * fRestrictions.max_segment_count); if (fScratchVecs == NULL) return B_NO_MEMORY; for (size_t i = 0; i < fBufferCount; i++) { DMABuffer* buffer; status_t error = CreateBuffer(&buffer); if (error != B_OK) return error; fDMABuffers.Add(buffer); } // TODO: create bounce buffers in as few areas as feasible for (size_t i = 0; i < fBounceBufferCount; i++) { DMABounceBuffer* buffer; status_t error = CreateBounceBuffer(&buffer); if (error != B_OK) return error; fBounceBuffers.Add(buffer); } return B_OK; }
status_t DMAResource::TranslateNext(IORequest* request, IOOperation* operation, generic_size_t maxOperationLength) { IOBuffer* buffer = request->Buffer(); off_t originalOffset = request->Offset() + request->Length() - request->RemainingBytes(); off_t offset = originalOffset; generic_size_t partialBegin = offset & (fBlockSize - 1); // current iteration state uint32 vecIndex = request->VecIndex(); uint32 vecOffset = request->VecOffset(); generic_size_t totalLength = min_c(request->RemainingBytes(), fRestrictions.max_transfer_size); if (maxOperationLength > 0 && maxOperationLength < totalLength + partialBegin) { totalLength = maxOperationLength - partialBegin; } MutexLocker locker(fLock); DMABuffer* dmaBuffer = fDMABuffers.RemoveHead(); if (dmaBuffer == NULL) return B_BUSY; dmaBuffer->SetVecCount(0); generic_io_vec* vecs = NULL; uint32 segmentCount = 0; TRACE(" offset %Ld, remaining size: %lu, block size %lu -> partial: %lu\n", offset, request->RemainingBytes(), fBlockSize, partialBegin); if (buffer->IsVirtual()) { // Unless we need the bounce buffer anyway, we have to translate the // virtual addresses to physical addresses, so we can check the DMA // restrictions. TRACE(" buffer is virtual %s\n", buffer->IsUser() ? "user" : "kernel"); // TODO: !partialOperation || totalLength >= fBlockSize // TODO: Maybe enforce fBounceBufferSize >= 2 * fBlockSize. if (true) { generic_size_t transferLeft = totalLength; vecs = fScratchVecs; TRACE(" create physical map (for %ld vecs)\n", buffer->VecCount()); for (uint32 i = vecIndex; i < buffer->VecCount(); i++) { generic_io_vec& vec = buffer->VecAt(i); generic_addr_t base = vec.base + vecOffset; generic_size_t size = vec.length - vecOffset; vecOffset = 0; if (size > transferLeft) size = transferLeft; while (size > 0 && segmentCount < fRestrictions.max_segment_count) { physical_entry entry; uint32 count = 1; get_memory_map_etc(request->TeamID(), (void*)base, size, &entry, &count); vecs[segmentCount].base = entry.address; vecs[segmentCount].length = entry.size; transferLeft -= entry.size; base += entry.size; size -= entry.size; segmentCount++; } if (transferLeft == 0) break; } totalLength -= transferLeft; } vecIndex = 0; vecOffset = 0; } else { // We do already have physical addresses. locker.Unlock(); vecs = buffer->Vecs(); segmentCount = min_c(buffer->VecCount() - vecIndex, fRestrictions.max_segment_count); } #ifdef TRACE_DMA_RESOURCE TRACE(" physical count %lu\n", segmentCount); for (uint32 i = 0; i < segmentCount; i++) { TRACE(" [%" B_PRIu32 "] %#" B_PRIxGENADDR ", %" B_PRIxGENADDR "\n", i, vecs[vecIndex + i].base, vecs[vecIndex + i].length); } #endif // check alignment, boundaries, etc. and set vecs in DMA buffer // Fetch a bounce buffer we can use for the DMABuffer. // TODO: We should do that lazily when needed! DMABounceBuffer* bounceBuffer = NULL; if (_NeedsBoundsBuffers()) { bounceBuffer = fBounceBuffers.Head(); if (bounceBuffer == NULL) return B_BUSY; } dmaBuffer->SetBounceBuffer(bounceBuffer); generic_size_t dmaLength = 0; phys_addr_t physicalBounceBuffer = dmaBuffer->PhysicalBounceBufferAddress(); phys_size_t bounceLeft = fBounceBufferSize; generic_size_t transferLeft = totalLength; // If the offset isn't block-aligned, use the bounce buffer to bridge the // gap to the start of the vec. if (partialBegin > 0) { generic_size_t length; if (request->IsWrite()) { // we always need to read in a whole block for the partial write length = fBlockSize; } else { length = (partialBegin + fRestrictions.alignment - 1) & ~(fRestrictions.alignment - 1); } if (_AddBounceBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft, length, true) == 0) { TRACE(" adding partial begin failed, length %lu!\n", length); return B_BAD_VALUE; } dmaLength += length; generic_size_t transferred = length - partialBegin; vecOffset += transferred; offset -= partialBegin; if (transferLeft > transferred) transferLeft -= transferred; else transferLeft = 0; TRACE(" partial begin, using bounce buffer: offset: %lld, length: " "%lu\n", offset, length); } for (uint32 i = vecIndex; i < vecIndex + segmentCount && transferLeft > 0;) { if (dmaBuffer->VecCount() >= fRestrictions.max_segment_count) break; const generic_io_vec& vec = vecs[i]; if (vec.length <= vecOffset) { vecOffset -= vec.length; i++; continue; } generic_addr_t base = vec.base + vecOffset; generic_size_t maxLength = vec.length - vecOffset; if (maxLength > transferLeft) maxLength = transferLeft; generic_size_t length = maxLength; // Cut the vec according to transfer size, segment size, and boundary. if (dmaLength + length > fRestrictions.max_transfer_size) { length = fRestrictions.max_transfer_size - dmaLength; TRACE(" vec %lu: restricting length to %lu due to transfer size " "limit\n", i, length); } _RestrictBoundaryAndSegmentSize(base, length); phys_size_t useBounceBufferSize = 0; // Check low address: use bounce buffer for range to low address. // Check alignment: if not aligned, use bounce buffer for complete vec. if (base < fRestrictions.low_address) { useBounceBufferSize = fRestrictions.low_address - base; TRACE(" vec %lu: below low address, using bounce buffer: %lu\n", i, useBounceBufferSize); } else if (base & (fRestrictions.alignment - 1)) { useBounceBufferSize = length; TRACE(" vec %lu: misalignment, using bounce buffer: %lu\n", i, useBounceBufferSize); } // Enforce high address restriction if (base > fRestrictions.high_address) useBounceBufferSize = length; else if (base + length > fRestrictions.high_address) length = fRestrictions.high_address - base; // Align length as well if (useBounceBufferSize == 0) length &= ~(fRestrictions.alignment - 1); // If length is 0, use bounce buffer for complete vec. if (length == 0) { length = maxLength; useBounceBufferSize = length; TRACE(" vec %lu: 0 length, using bounce buffer: %lu\n", i, useBounceBufferSize); } if (useBounceBufferSize > 0) { // alignment could still be wrong (we round up here) useBounceBufferSize = (useBounceBufferSize + fRestrictions.alignment - 1) & ~(fRestrictions.alignment - 1); length = _AddBounceBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft, useBounceBufferSize, false); if (length == 0) { TRACE(" vec %lu: out of bounce buffer space\n", i); // We don't have any bounce buffer space left, we need to move // this request to the next I/O operation. break; } TRACE(" vec %lu: final bounce length: %lu\n", i, length); } else { TRACE(" vec %lu: final length restriction: %lu\n", i, length); dmaBuffer->AddVec(base, length); } dmaLength += length; vecOffset += length; transferLeft -= min_c(length, transferLeft); } // If we're writing partially, we always need to have a block sized bounce // buffer (or else we would overwrite memory to be written on the read in // the first phase). off_t requestEnd = request->Offset() + request->Length(); if (request->IsWrite()) { generic_size_t diff = dmaLength & (fBlockSize - 1); // If the transfer length is block aligned and we're writing past the // end of the given data, we still have to check the whether the last // vec is a bounce buffer segment shorter than the block size. If so, we // have to cut back the complete block and use a bounce buffer for it // entirely. if (diff == 0 && offset + (off_t)dmaLength > requestEnd) { const generic_io_vec& dmaVec = dmaBuffer->VecAt(dmaBuffer->VecCount() - 1); ASSERT(dmaVec.base >= dmaBuffer->PhysicalBounceBufferAddress() && dmaVec.base < dmaBuffer->PhysicalBounceBufferAddress() + fBounceBufferSize); // We can be certain that the last vec is a bounce buffer vec, // since otherwise the DMA buffer couldn't exceed the end of the // request data. if (dmaVec.length < fBlockSize) diff = fBlockSize; } if (diff != 0) { // Not yet block aligned -- cut back to the previous block and add // a block-sized bounce buffer segment. TRACE(" partial end write: %lu, diff %lu\n", dmaLength, diff); _CutBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft, diff); dmaLength -= diff; if (_AddBounceBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft, fBlockSize, true) == 0) { // If we cannot write anything, we can't process the request at // all. TRACE(" adding bounce buffer failed!!!\n"); if (dmaLength == 0) return B_BAD_VALUE; } else dmaLength += fBlockSize; } } // If total length not block aligned, use bounce buffer for padding (read // case only). while ((dmaLength & (fBlockSize - 1)) != 0) { TRACE(" dmaLength not block aligned: %lu\n", dmaLength); generic_size_t length = (dmaLength + fBlockSize - 1) & ~(fBlockSize - 1); // If total length > max transfer size, segment count > max segment // count, truncate. // TODO: sometimes we can replace the last vec with the bounce buffer // to let it match the restrictions. if (length > fRestrictions.max_transfer_size || dmaBuffer->VecCount() == fRestrictions.max_segment_count || bounceLeft < length - dmaLength) { // cut the part of dma length TRACE(" can't align length due to max transfer size, segment " "count restrictions, or lacking bounce buffer space\n"); generic_size_t toCut = dmaLength & (max_c(fBlockSize, fRestrictions.alignment) - 1); dmaLength -= toCut; if (dmaLength == 0) { // This can only happen, when we have too many small segments // and hit the max segment count. In this case we just use the // bounce buffer for as much as possible of the total length. dmaBuffer->SetVecCount(0); generic_addr_t base = dmaBuffer->PhysicalBounceBufferAddress(); dmaLength = min_c(totalLength, fBounceBufferSize) & ~(max_c(fBlockSize, fRestrictions.alignment) - 1); _RestrictBoundaryAndSegmentSize(base, dmaLength); dmaBuffer->AddVec(base, dmaLength); physicalBounceBuffer = base + dmaLength; bounceLeft = fBounceBufferSize - dmaLength; } else { _CutBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft, toCut); } } else { TRACE(" adding %lu bytes final bounce buffer\n", length - dmaLength); length -= dmaLength; length = _AddBounceBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft, length, true); if (length == 0) panic("don't do this to me!"); dmaLength += length; } } operation->SetBuffer(dmaBuffer); operation->SetBlockSize(fBlockSize); operation->SetOriginalRange(originalOffset, min_c(offset + (off_t)dmaLength, requestEnd) - originalOffset); operation->SetRange(offset, dmaLength); operation->SetPartial(partialBegin != 0, offset + (off_t)dmaLength > requestEnd); // If we don't need the bounce buffer, we put it back, otherwise operation->SetUsesBounceBuffer(bounceLeft < fBounceBufferSize); if (operation->UsesBounceBuffer()) fBounceBuffers.RemoveHead(); else dmaBuffer->SetBounceBuffer(NULL); status_t error = operation->Prepare(request); if (error != B_OK) return error; request->Advance(operation->OriginalLength()); return B_OK; }
status_t MessageAdapter::_FlattenR5Message(uint32 format, const BMessage *from, char *buffer, ssize_t *size) { BMessage::Private messagePrivate((BMessage *)from); BMessage::message_header *header = messagePrivate.GetMessageHeader(); uint8 *data = messagePrivate.GetMessageData(); r5_message_header *r5header = (r5_message_header *)buffer; uint8 *pointer = (uint8 *)buffer + sizeof(r5_message_header); r5header->magic = MESSAGE_FORMAT_R5; r5header->what = from->what; r5header->checksum = 0; uint8 flags = R5_MESSAGE_FLAG_VALID; if (header->target != B_NULL_TOKEN) { *(int32 *)pointer = header->target; pointer += sizeof(int32); flags |= R5_MESSAGE_FLAG_INCLUDE_TARGET; } if (header->reply_port >= 0 && header->reply_target != B_NULL_TOKEN && header->reply_team >= 0) { // reply info *(port_id *)pointer = header->reply_port; pointer += sizeof(port_id); *(int32 *)pointer = header->reply_target; pointer += sizeof(int32); *(team_id *)pointer = header->reply_team; pointer += sizeof(team_id); // big flags *pointer = (header->reply_target == B_PREFERRED_TOKEN ? 1 : 0); pointer++; *pointer = (header->flags & MESSAGE_FLAG_REPLY_REQUIRED ? 1 : 0); pointer++; *pointer = (header->flags & MESSAGE_FLAG_REPLY_DONE ? 1 : 0); pointer++; *pointer = (header->flags & MESSAGE_FLAG_IS_REPLY ? 1 : 0); pointer++; flags |= R5_MESSAGE_FLAG_INCLUDE_REPLY; } if (header->flags & MESSAGE_FLAG_HAS_SPECIFIERS) flags |= R5_MESSAGE_FLAG_SCRIPT_MESSAGE; r5header->flags = flags; // store the header size - used for the checksum later ssize_t headerSize = (addr_t)pointer - (addr_t)buffer; // collect and add the data BMessage::field_header *field = messagePrivate.GetMessageFields(); for (uint32 i = 0; i < header->field_count; i++, field++) { flags = R5_FIELD_FLAG_VALID; if (field->count == 1) flags |= R5_FIELD_FLAG_SINGLE_ITEM; // TODO: we don't really know the data size now (padding missing) // if (field->data_size <= 255 && field->count <= 255) // flags |= R5_FIELD_FLAG_MINI_DATA; if (field->flags & FIELD_FLAG_FIXED_SIZE) flags |= R5_FIELD_FLAG_FIXED_SIZE; *pointer = flags; pointer++; *(type_code *)pointer = field->type; pointer += sizeof(type_code); if (!(flags & R5_FIELD_FLAG_SINGLE_ITEM)) { if (flags & R5_FIELD_FLAG_MINI_DATA) { *pointer = (uint8)field->count; pointer++; } else { *(int32 *)pointer = field->count; pointer += sizeof(int32); } } // we may have to adjust this to account for padding later uint8 *fieldSize = pointer; if (flags & R5_FIELD_FLAG_MINI_DATA) { *pointer = (uint8)field->data_size; pointer++; } else { *(ssize_t *)pointer = field->data_size; pointer += sizeof(ssize_t); } // name int32 nameLength = min_c(field->name_length - 1, 255); *pointer = (uint8)nameLength; pointer++; strncpy((char *)pointer, (char *)data + field->offset, nameLength); pointer += nameLength; // data uint8 *source = data + field->offset + field->name_length; if (flags & R5_FIELD_FLAG_FIXED_SIZE) { memcpy(pointer, source, field->data_size); pointer += field->data_size; } else { uint8 *previous = pointer; for (uint32 i = 0; i < field->count; i++) { ssize_t itemSize = *(ssize_t *)source + sizeof(ssize_t); memcpy(pointer, source, itemSize); ssize_t paddedSize = pad_to_8(itemSize); memset(pointer + itemSize, 0, paddedSize - itemSize); pointer += paddedSize; source += itemSize; } // adjust the field size to the padded value if (flags & R5_FIELD_FLAG_MINI_DATA) *fieldSize = (uint8)(pointer - previous); else *(ssize_t *)fieldSize = (pointer - previous); } } // terminate the fields with a pseudo field with flags 0 (not valid) *pointer = 0; pointer++; // calculate the flattened size from the pointers r5header->flattened_size = (addr_t)pointer - (addr_t)buffer; r5header->checksum = CalculateChecksum((uint8 *)(buffer + 8), headerSize - 8); if (size) *size = r5header->flattened_size; return B_OK; }
status_t AVFormatReader::Stream::GetStreamInfo(int64* frameCount, bigtime_t* duration, media_format* format, const void** infoBuffer, size_t* infoSize) const { BAutolock _(&fLock); TRACE("AVFormatReader::Stream::GetStreamInfo(%ld)\n", VirtualIndex()); double frameRate = FrameRate(); TRACE(" frameRate: %.4f\n", frameRate); #ifdef TRACE_AVFORMAT_READER if (fStream->start_time != kNoPTSValue) { bigtime_t startTime = _ConvertFromStreamTimeBase(fStream->start_time); TRACE(" start_time: %lld or %.5fs\n", startTime, startTime / 1000000.0); // TODO: Handle start time in FindKeyFrame() and Seek()?! } #endif // TRACE_AVFORMAT_READER *duration = Duration(); TRACE(" duration: %lld or %.5fs\n", *duration, *duration / 1000000.0); #if 0 if (fStream->nb_index_entries > 0) { TRACE(" dump of index entries:\n"); int count = 5; int firstEntriesCount = min_c(fStream->nb_index_entries, count); int i = 0; for (; i < firstEntriesCount; i++) { AVIndexEntry& entry = fStream->index_entries[i]; bigtime_t timeGlobal = entry.timestamp; bigtime_t timeNative = _ConvertFromStreamTimeBase(timeGlobal); TRACE(" [%d] native: %.5fs global: %.5fs\n", i, timeNative / 1000000.0f, timeGlobal / 1000000.0f); } if (fStream->nb_index_entries - count > i) { i = fStream->nb_index_entries - count; TRACE(" ...\n"); for (; i < fStream->nb_index_entries; i++) { AVIndexEntry& entry = fStream->index_entries[i]; bigtime_t timeGlobal = entry.timestamp; bigtime_t timeNative = _ConvertFromStreamTimeBase(timeGlobal); TRACE(" [%d] native: %.5fs global: %.5fs\n", i, timeNative / 1000000.0f, timeGlobal / 1000000.0f); } } } #endif *frameCount = fStream->nb_frames; // if (*frameCount == 0) { // Calculate from duration and frame rate *frameCount = (int64)(*duration * frameRate / 1000000LL); TRACE(" frameCount calculated: %lld, from context: %lld\n", *frameCount, fStream->nb_frames); // } else // TRACE(" frameCount: %lld\n", *frameCount); *format = fFormat; *infoBuffer = fStream->codec->extradata; *infoSize = fStream->codec->extradata_size; return B_OK; }
ssize_t MessageAdapter::_R5FlattenedSize(const BMessage *from) { BMessage::Private messagePrivate((BMessage *)from); BMessage::message_header* header = messagePrivate.GetMessageHeader(); // header size (variable, depending on the flags) ssize_t flattenedSize = sizeof(r5_message_header); if (header->target != B_NULL_TOKEN) flattenedSize += sizeof(int32); if (header->reply_port >= 0 && header->reply_target != B_NULL_TOKEN && header->reply_team >= 0) { // reply info + big flags flattenedSize += sizeof(port_id) + sizeof(int32) + sizeof(team_id) + 4; } // field size uint8 *data = messagePrivate.GetMessageData(); BMessage::field_header *field = messagePrivate.GetMessageFields(); for (uint32 i = 0; i < header->field_count; i++, field++) { // flags and type flattenedSize += 1 + sizeof(type_code); #if 0 bool miniData = field->dataSize <= 255 && field->count <= 255; #else // TODO: we don't know the R5 dataSize yet (padding) bool miniData = false; #endif // item count if (field->count > 1) flattenedSize += (miniData ? sizeof(uint8) : sizeof(uint32)); // data size flattenedSize += (miniData ? sizeof(uint8) : sizeof(size_t)); // name length and name flattenedSize += 1 + min_c(field->name_length - 1, 255); // data if (field->flags & FIELD_FLAG_FIXED_SIZE) flattenedSize += field->data_size; else { uint8 *source = data + field->offset + field->name_length; for (uint32 i = 0; i < field->count; i++) { ssize_t itemSize = *(ssize_t *)source + sizeof(ssize_t); flattenedSize += pad_to_8(itemSize); source += itemSize; } } } // pseudo field with flags 0 return flattenedSize + 1; }
/*! Universal read/write function */ static status_t read_write(scsi_periph_device_info *device, scsi_ccb *request, io_operation *operation, uint64 offset, size_t originalNumBlocks, physical_entry* vecs, size_t vecCount, bool isWrite, size_t* _bytesTransferred) { uint32 blockSize = device->block_size; size_t numBlocks = originalNumBlocks; uint32 pos = offset; err_res res; int retries = 0; do { size_t numBytes; bool isReadWrite10 = false; request->flags = isWrite ? SCSI_DIR_OUT : SCSI_DIR_IN; // io_operations are generated by a DMAResource and thus contain DMA // safe physical vectors if (operation != NULL) request->flags |= SCSI_DMA_SAFE; // make sure we avoid 10 byte commands if they aren't supported if (!device->rw10_enabled || device->preferred_ccb_size == 6) { // restricting transfer is OK - the block manager will // take care of transferring the rest if (numBlocks > 0x100) numBlocks = 0x100; // no way to break the 21 bit address limit if (offset > 0x200000) return B_BAD_VALUE; // don't allow transfer cross the 24 bit address limit // (I'm not sure whether this is allowed, but this way we // are sure to not ask for trouble) if (offset < 0x100000) numBlocks = min_c(numBlocks, 0x100000 - pos); } numBytes = numBlocks * blockSize; if (numBlocks != originalNumBlocks) panic("I/O operation would need to be cut."); request->data = NULL; request->sg_list = vecs; request->data_length = numBytes; request->sg_count = vecCount; request->io_operation = operation; request->sort = pos; request->timeout = device->std_timeout; // see whether daemon instructed us to post an ordered command; // reset flag after read SHOW_FLOW(3, "flag=%x, next_tag=%x, ordered: %s", (int)request->flags, (int)device->next_tag_action, (request->flags & SCSI_ORDERED_QTAG) != 0 ? "yes" : "no"); // use shortest commands whenever possible if (offset + numBlocks < 0x200000LL && numBlocks <= 0x100) { scsi_cmd_rw_6 *cmd = (scsi_cmd_rw_6 *)request->cdb; isReadWrite10 = false; memset(cmd, 0, sizeof(*cmd)); cmd->opcode = isWrite ? SCSI_OP_WRITE_6 : SCSI_OP_READ_6; cmd->high_lba = (pos >> 16) & 0x1f; cmd->mid_lba = (pos >> 8) & 0xff; cmd->low_lba = pos & 0xff; cmd->length = numBlocks; request->cdb_length = sizeof(*cmd); } else if (offset + numBlocks < 0x100000000LL && numBlocks <= 0x10000) { scsi_cmd_rw_10 *cmd = (scsi_cmd_rw_10 *)request->cdb; isReadWrite10 = true; memset(cmd, 0, sizeof(*cmd)); cmd->opcode = isWrite ? SCSI_OP_WRITE_10 : SCSI_OP_READ_10; cmd->relative_address = 0; cmd->force_unit_access = 0; cmd->disable_page_out = 0; cmd->lba = B_HOST_TO_BENDIAN_INT32(pos); cmd->length = B_HOST_TO_BENDIAN_INT16(numBlocks); request->cdb_length = sizeof(*cmd); } else if (offset + numBlocks < 0x100000000LL && numBlocks <= 0x10000000) { scsi_cmd_rw_12 *cmd = (scsi_cmd_rw_12 *)request->cdb; memset(cmd, 0, sizeof(*cmd)); cmd->opcode = isWrite ? SCSI_OP_WRITE_12 : SCSI_OP_READ_12; cmd->relative_address = 0; cmd->force_unit_access = 0; cmd->disable_page_out = 0; cmd->lba = B_HOST_TO_BENDIAN_INT32(pos); cmd->length = B_HOST_TO_BENDIAN_INT32(numBlocks); request->cdb_length = sizeof(*cmd); } else { scsi_cmd_rw_16 *cmd = (scsi_cmd_rw_16 *)request->cdb; memset(cmd, 0, sizeof(*cmd)); cmd->opcode = isWrite ? SCSI_OP_WRITE_16 : SCSI_OP_READ_16; cmd->force_unit_access_non_volatile = 0; cmd->force_unit_access = 0; cmd->disable_page_out = 0; cmd->lba = B_HOST_TO_BENDIAN_INT64(offset); cmd->length = B_HOST_TO_BENDIAN_INT32(numBlocks); request->cdb_length = sizeof(*cmd); } // TODO: last chance to detect errors that occured during concurrent accesses //status_t status = handle->pending_error; //if (status != B_OK) // return status; device->scsi->async_io(request); acquire_sem(request->completion_sem); // ask generic peripheral layer what to do now res = periph_check_error(device, request); // TODO: bytes might have been transferred even in the error case! switch (res.action) { case err_act_ok: *_bytesTransferred = numBytes - request->data_resid; break; case err_act_start: res = periph_send_start_stop(device, request, 1, device->removable); if (res.action == err_act_ok) res.action = err_act_retry; break; case err_act_invalid_req: // if this was a 10 byte command, the device probably doesn't // support them, so disable them and retry if (isReadWrite10) { atomic_and(&device->rw10_enabled, 0); res.action = err_act_retry; } else res.action = err_act_fail; break; } } while ((res.action == err_act_retry && retries++ < 3)
static status_t cache_io(void* _cacheRef, void* cookie, off_t offset, addr_t buffer, size_t* _size, bool doWrite) { if (_cacheRef == NULL) panic("cache_io() called with NULL ref!\n"); file_cache_ref* ref = (file_cache_ref*)_cacheRef; VMCache* cache = ref->cache; off_t fileSize = cache->virtual_end; bool useBuffer = buffer != 0; TRACE(("cache_io(ref = %p, offset = %Ld, buffer = %p, size = %lu, %s)\n", ref, offset, (void*)buffer, *_size, doWrite ? "write" : "read")); // out of bounds access? if (offset >= fileSize || offset < 0) { *_size = 0; return B_OK; } int32 pageOffset = offset & (B_PAGE_SIZE - 1); size_t size = *_size; offset -= pageOffset; if ((off_t)(offset + pageOffset + size) > fileSize) { // adapt size to be within the file's offsets size = fileSize - pageOffset - offset; *_size = size; } if (size == 0) return B_OK; // "offset" and "lastOffset" are always aligned to B_PAGE_SIZE, // the "last*" variables always point to the end of the last // satisfied request part const uint32 kMaxChunkSize = MAX_IO_VECS * B_PAGE_SIZE; size_t bytesLeft = size, lastLeft = size; int32 lastPageOffset = pageOffset; addr_t lastBuffer = buffer; off_t lastOffset = offset; size_t lastReservedPages = min_c(MAX_IO_VECS, (pageOffset + bytesLeft + B_PAGE_SIZE - 1) >> PAGE_SHIFT); size_t reservePages = 0; size_t pagesProcessed = 0; cache_func function = NULL; vm_page_reservation reservation; reserve_pages(ref, &reservation, lastReservedPages, doWrite); AutoLocker<VMCache> locker(cache); while (bytesLeft > 0) { // Periodically reevaluate the low memory situation and select the // read/write hook accordingly if (pagesProcessed % 32 == 0) { if (size >= BYPASS_IO_SIZE && low_resource_state(B_KERNEL_RESOURCE_PAGES) != B_NO_LOW_RESOURCE) { // In low memory situations we bypass the cache beyond a // certain I/O size. function = doWrite ? write_to_file : read_from_file; } else function = doWrite ? write_to_cache : read_into_cache; } // check if this page is already in memory vm_page* page = cache->LookupPage(offset); if (page != NULL) { // The page may be busy - since we need to unlock the cache sometime // in the near future, we need to satisfy the request of the pages // we didn't get yet (to make sure no one else interferes in the // meantime). status_t status = satisfy_cache_io(ref, cookie, function, offset, buffer, useBuffer, pageOffset, bytesLeft, reservePages, lastOffset, lastBuffer, lastPageOffset, lastLeft, lastReservedPages, &reservation); if (status != B_OK) return status; // Since satisfy_cache_io() unlocks the cache, we need to look up // the page again. page = cache->LookupPage(offset); if (page != NULL && page->busy) { cache->WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true); continue; } } size_t bytesInPage = min_c(size_t(B_PAGE_SIZE - pageOffset), bytesLeft); TRACE(("lookup page from offset %Ld: %p, size = %lu, pageOffset " "= %lu\n", offset, page, bytesLeft, pageOffset)); if (page != NULL) { if (doWrite || useBuffer) { // Since the following user_mem{cpy,set}() might cause a page // fault, which in turn might cause pages to be reserved, we // need to unlock the cache temporarily to avoid a potential // deadlock. To make sure that our page doesn't go away, we mark // it busy for the time. page->busy = true; locker.Unlock(); // copy the contents of the page already in memory phys_addr_t pageAddress = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE + pageOffset; bool userBuffer = IS_USER_ADDRESS(buffer); if (doWrite) { if (useBuffer) { vm_memcpy_to_physical(pageAddress, (void*)buffer, bytesInPage, userBuffer); } else { vm_memset_physical(pageAddress, 0, bytesInPage); } } else if (useBuffer) { vm_memcpy_from_physical((void*)buffer, pageAddress, bytesInPage, userBuffer); } locker.Lock(); if (doWrite) { DEBUG_PAGE_ACCESS_START(page); page->modified = true; if (page->State() != PAGE_STATE_MODIFIED) vm_page_set_state(page, PAGE_STATE_MODIFIED); DEBUG_PAGE_ACCESS_END(page); } cache->MarkPageUnbusy(page); } // If it is cached only, requeue the page, so the respective queue // roughly remains LRU first sorted. if (page->State() == PAGE_STATE_CACHED || page->State() == PAGE_STATE_MODIFIED) { DEBUG_PAGE_ACCESS_START(page); vm_page_requeue(page, true); DEBUG_PAGE_ACCESS_END(page); } if (bytesLeft <= bytesInPage) { // we've read the last page, so we're done! locker.Unlock(); vm_page_unreserve_pages(&reservation); return B_OK; } // prepare a potential gap request lastBuffer = buffer + bytesInPage; lastLeft = bytesLeft - bytesInPage; lastOffset = offset + B_PAGE_SIZE; lastPageOffset = 0; } if (bytesLeft <= bytesInPage) break; buffer += bytesInPage; bytesLeft -= bytesInPage; pageOffset = 0; offset += B_PAGE_SIZE; pagesProcessed++; if (buffer - lastBuffer + lastPageOffset >= kMaxChunkSize) { status_t status = satisfy_cache_io(ref, cookie, function, offset, buffer, useBuffer, pageOffset, bytesLeft, reservePages, lastOffset, lastBuffer, lastPageOffset, lastLeft, lastReservedPages, &reservation); if (status != B_OK) return status; } } // fill the last remaining bytes of the request (either write or read) return function(ref, cookie, lastOffset, lastPageOffset, lastBuffer, lastLeft, useBuffer, &reservation, 0); }
status_t BasicTerminalBuffer::_ResizeRewrap(int32 width, int32 height, int32 historyCapacity) { //debug_printf("BasicTerminalBuffer::_ResizeRewrap(): (%ld, %ld, history: %ld) -> " //"(%ld, %ld, history: %ld)\n", fWidth, fHeight, HistoryCapacity(), width, height, //historyCapacity); // The width stays the same. _ResizeSimple() does exactly what we need. if (width == fWidth) return _ResizeSimple(width, height, historyCapacity); // The width changes. We have to allocate a new line array, a new history // and re-wrap all lines. TerminalLine** screen = _AllocateLines(width, height); if (screen == NULL) return B_NO_MEMORY; HistoryBuffer* history = NULL; if (historyCapacity > 0) { history = new(std::nothrow) HistoryBuffer; if (history == NULL) { _FreeLines(screen, height); return B_NO_MEMORY; } status_t error = history->Init(width, historyCapacity); if (error != B_OK) { _FreeLines(screen, height); delete history; return error; } } int32 historySize = HistorySize(); int32 totalLines = historySize + fHeight; // re-wrap TerminalLine* lineBuffer = ALLOC_LINE_ON_STACK(fWidth); TermPos cursor; int32 destIndex = 0; int32 sourceIndex = 0; int32 sourceX = 0; int32 destTotalLines = 0; int32 destScreenOffset = 0; int32 maxDestTotalLines = INT_MAX; bool newDestLine = true; bool cursorSeen = false; TerminalLine* sourceLine = _HistoryLineAt(-historySize, lineBuffer); while (sourceIndex < totalLines) { TerminalLine* destLine = screen[destIndex]; if (newDestLine) { // Clear a new dest line before using it. If we're about to // overwrite an previously written line, we push it to the // history first, though. if (history != NULL && destTotalLines >= height) history->AddLine(screen[destIndex]); destLine->Clear(fAttributes, width); newDestLine = false; } int32 sourceLeft = sourceLine->length - sourceX; int32 destLeft = width - destLine->length; //debug_printf(" source: %ld, left: %ld, dest: %ld, left: %ld\n", //sourceIndex, sourceLeft, destIndex, destLeft); if (sourceIndex == historySize && sourceX == 0) { destScreenOffset = destTotalLines; if (destLeft == 0 && sourceLeft > 0) destScreenOffset++; maxDestTotalLines = destScreenOffset + height; //debug_printf(" destScreenOffset: %ld\n", destScreenOffset); } int32 toCopy = min_c(sourceLeft, destLeft); // If the last cell to copy is the first cell of a // full-width char, don't copy it yet. if (toCopy > 0 && IS_WIDTH( sourceLine->cells[sourceX + toCopy - 1].attributes)) { //debug_printf(" -> last char is full-width -- don't copy it\n"); toCopy--; } // translate the cursor position if (fCursor.y + historySize == sourceIndex && fCursor.x >= sourceX && (fCursor.x < sourceX + toCopy || (destLeft >= sourceLeft && sourceX + sourceLeft <= fCursor.x))) { cursor.x = destLine->length + fCursor.x - sourceX; cursor.y = destTotalLines; if (cursor.x >= width) { // The cursor was in free space after the official end // of line. cursor.x = width - 1; } //debug_printf(" cursor: (%ld, %ld)\n", cursor.x, cursor.y); cursorSeen = true; } if (toCopy > 0) { memcpy(destLine->cells + destLine->length, sourceLine->cells + sourceX, toCopy * sizeof(TerminalCell)); destLine->length += toCopy; } destLine->attributes = sourceLine->attributes; bool nextDestLine = false; if (toCopy == sourceLeft) { if (!sourceLine->softBreak) nextDestLine = true; sourceIndex++; sourceX = 0; sourceLine = _HistoryLineAt(sourceIndex - historySize, lineBuffer); } else { destLine->softBreak = true; nextDestLine = true; sourceX += toCopy; } if (nextDestLine) { destIndex = (destIndex + 1) % height; destTotalLines++; newDestLine = true; if (cursorSeen && destTotalLines >= maxDestTotalLines) break; } } // If the last source line had a soft break, the last dest line // won't have been counted yet. if (!newDestLine) { destIndex = (destIndex + 1) % height; destTotalLines++; } //debug_printf(" total lines: %ld -> %ld\n", totalLines, destTotalLines); if (destTotalLines - destScreenOffset > height) destScreenOffset = destTotalLines - height; cursor.y -= destScreenOffset; // When there are less lines (starting with the screen offset) than // there's room in the screen, clear the remaining screen lines. for (int32 i = destTotalLines; i < destScreenOffset + height; i++) { // Move the line we're going to clear to the history, if that's a // line we've written earlier. TerminalLine* line = screen[i % height]; if (history != NULL && i >= height) history->AddLine(line); line->Clear(fAttributes, width); } // Update the values _FreeLines(fScreen, fHeight); delete fHistory; fScreen = screen; fHistory = history; if (fWidth != width) { status_t error = _ResetTabStops(width); if (error != B_OK) return error; } //debug_printf(" cursor: (%ld, %ld) -> (%ld, %ld)\n", fCursor.x, fCursor.y, //cursor.x, cursor.y); fCursor.x = cursor.x; fCursor.y = cursor.y; fSoftWrappedCursor = false; //debug_printf(" screen offset: %ld -> %ld\n", fScreenOffset, destScreenOffset % height); fScreenOffset = destScreenOffset % height; //debug_printf(" height %ld -> %ld\n", fHeight, height); //debug_printf(" width %ld -> %ld\n", fWidth, width); fHeight = height; fWidth = width; fScrollTop = 0; fScrollBottom = fHeight - 1; fOriginMode = fSavedOriginMode = false; return B_OK; }
void WPASupplicantApp::_SuccessfullyJoined(const wpa_supplicant *interface, const BMessage &joinRequest) { // We successfully connected with this configuration, store the config, // if requested, by adding a persistent network on the network device. if (!joinRequest.FindBool("persistent")) return; wpa_ssid *networkConfig = interface->current_ssid; if (networkConfig == NULL) return; wireless_network network; memset(network.name, 0, sizeof(network.name)); memcpy(network.name, networkConfig->ssid, min_c(sizeof(network.name), networkConfig->ssid_len)); //network.address.SetToLinkLevel((uint8 *)interface->bssid, ETH_ALEN); // TODO: Decide if we want to do this, it limits the network to // a specific base station instead of a "service set" that might // consist of more than one base station. On the other hand it makes // the network unique so the right one is connected in case of name // conflicts. It should probably be used as a hint, as in "preferred" // base station. if (joinRequest.FindUInt32("authentication", &network.authentication_mode) != B_OK) { return; } if (network.authentication_mode > B_NETWORK_AUTHENTICATION_NONE) { const char *password = NULL; if (joinRequest.FindString("password", &password) != B_OK) return; BString networkName(network.name, sizeof(network.name)); BPasswordKey key(password, B_KEY_PURPOSE_NETWORK, networkName); BKeyStore keyStore; keyStore.AddKeyring(kWPASupplicantKeyring); keyStore.AddKey(kWPASupplicantKeyring, key); } switch (interface->pairwise_cipher) { case WPA_CIPHER_NONE: network.cipher = B_NETWORK_CIPHER_NONE; break; case WPA_CIPHER_TKIP: network.cipher = B_NETWORK_CIPHER_TKIP; break; case WPA_CIPHER_CCMP: network.cipher = B_NETWORK_CIPHER_CCMP; break; } switch (interface->group_cipher) { case WPA_CIPHER_NONE: network.group_cipher = B_NETWORK_CIPHER_NONE; break; case WPA_CIPHER_WEP40: network.group_cipher = B_NETWORK_CIPHER_WEP_40; break; case WPA_CIPHER_WEP104: network.group_cipher = B_NETWORK_CIPHER_WEP_104; break; case WPA_CIPHER_TKIP: network.group_cipher = B_NETWORK_CIPHER_TKIP; break; case WPA_CIPHER_CCMP: network.group_cipher = B_NETWORK_CIPHER_CCMP; break; } switch (interface->key_mgmt) { case WPA_KEY_MGMT_IEEE8021X: network.key_mode = B_KEY_MODE_IEEE802_1X; break; case WPA_KEY_MGMT_PSK: network.key_mode = B_KEY_MODE_PSK; break; case WPA_KEY_MGMT_NONE: network.key_mode = B_KEY_MODE_NONE; break; case WPA_KEY_MGMT_FT_IEEE8021X: network.key_mode = B_KEY_MODE_FT_IEEE802_1X; break; case WPA_KEY_MGMT_FT_PSK: network.key_mode = B_KEY_MODE_FT_PSK; break; case WPA_KEY_MGMT_IEEE8021X_SHA256: network.key_mode = B_KEY_MODE_IEEE802_1X_SHA256; break; case WPA_KEY_MGMT_PSK_SHA256: network.key_mode = B_KEY_MODE_PSK_SHA256; break; } BNetworkRoster::Default().AddPersistentNetwork(network); }
void BasicTerminalBuffer::_Scroll(int32 top, int32 bottom, int32 numLines) { if (numLines == 0) return; if (numLines > 0) { // scroll text up if (top == 0) { // The lines scrolled out of the screen range are transferred to // the history. // add the lines to the history if (fHistory != NULL) { int32 toHistory = min_c(numLines, bottom - top + 1); for (int32 i = 0; i < toHistory; i++) fHistory->AddLine(_LineAt(i)); if (toHistory < numLines) fHistory->AddEmptyLines(numLines - toHistory); } if (numLines >= bottom - top + 1) { // all lines are scrolled out of range -- just clear them _ClearLines(top, bottom); } else if (bottom == fHeight - 1) { // full screen scroll -- update the screen offset and clear new // lines fScreenOffset = (fScreenOffset + numLines) % fHeight; for (int32 i = bottom - numLines + 1; i <= bottom; i++) _LineAt(i)->Clear(fAttributes, fWidth); } else { // Partial screen scroll. We move the screen offset anyway, but // have to move the unscrolled lines to their new location. // TODO: It may be more efficient to actually move the scrolled // lines only (might depend on the number of scrolled/unscrolled // lines). for (int32 i = fHeight - 1; i > bottom; i--) { std::swap(fScreen[_LineIndex(i)], fScreen[_LineIndex(i + numLines)]); } // update the screen offset and clear the new lines fScreenOffset = (fScreenOffset + numLines) % fHeight; for (int32 i = bottom - numLines + 1; i <= bottom; i++) _LineAt(i)->Clear(fAttributes, fWidth); } // scroll/extend dirty range if (fDirtyInfo.dirtyTop != INT_MAX) { // If the top or bottom of the dirty region are above the // bottom of the scroll region, we have to scroll them up. if (fDirtyInfo.dirtyTop <= bottom) { fDirtyInfo.dirtyTop -= numLines; if (fDirtyInfo.dirtyBottom <= bottom) fDirtyInfo.dirtyBottom -= numLines; } // numLines above the bottom become dirty _Invalidate(bottom - numLines + 1, bottom); } fDirtyInfo.linesScrolled += numLines; // invalidate new empty lines _Invalidate(bottom + 1 - numLines, bottom); // In case only part of the screen was scrolled, we invalidate also // the lines below the scroll region. Those remain unchanged, but // we can't convey that they have not been scrolled via // TerminalBufferDirtyInfo. So we need to force the view to sync // them again. if (bottom < fHeight - 1) _Invalidate(bottom + 1, fHeight - 1); } else if (numLines >= bottom - top + 1) { // all lines are completely scrolled out of range -- just clear // them _ClearLines(top, bottom); } else { // partial scroll -- clear the lines scrolled out of range and move // the other ones for (int32 i = top + numLines; i <= bottom; i++) { int32 lineToDrop = _LineIndex(i - numLines); int32 lineToKeep = _LineIndex(i); fScreen[lineToDrop]->Clear(fAttributes, fWidth); std::swap(fScreen[lineToDrop], fScreen[lineToKeep]); } // clear any lines between the two swapped ranges above for (int32 i = bottom - numLines + 1; i < top + numLines; i++) _LineAt(i)->Clear(fAttributes, fWidth); _Invalidate(top, bottom); } } else { // scroll text down numLines = -numLines; if (numLines >= bottom - top + 1) { // all lines are completely scrolled out of range -- just clear // them _ClearLines(top, bottom); } else { // partial scroll -- clear the lines scrolled out of range and move // the other ones // TODO: When scrolling the whole screen, we could just update fScreenOffset and // clear the respective lines. for (int32 i = bottom - numLines; i >= top; i--) { int32 lineToKeep = _LineIndex(i); int32 lineToDrop = _LineIndex(i + numLines); fScreen[lineToDrop]->Clear(fAttributes, fWidth); std::swap(fScreen[lineToDrop], fScreen[lineToKeep]); } // clear any lines between the two swapped ranges above for (int32 i = bottom - numLines + 1; i < top + numLines; i++) _LineAt(i)->Clear(fAttributes, fWidth); _Invalidate(top, bottom); } } }
/*! Tests if there is an executable file at the provided path. It will also test if the file has a valid ELF header or is a shell script. Even if the runtime loader does not need to be able to deal with both types, the caller will give scripts a proper treatment. */ status_t test_executable(const char *name, char *invoker) { char path[B_PATH_NAME_LENGTH]; char buffer[B_FILE_NAME_LENGTH]; // must be large enough to hold the ELF header status_t status; ssize_t length; int fd; if (name == NULL) return B_BAD_VALUE; strlcpy(path, name, sizeof(path)); fd = open_executable(path, B_APP_IMAGE, NULL, NULL, NULL); if (fd < B_OK) return fd; // see if it's executable at all status = _kern_access(-1, path, X_OK, false); if (status != B_OK) goto out; // read and verify the ELF header length = _kern_read(fd, 0, buffer, sizeof(buffer)); if (length < 0) { status = length; goto out; } status = elf_verify_header(buffer, length); if (status == B_NOT_AN_EXECUTABLE) { // test for shell scripts if (!strncmp(buffer, "#!", 2)) { char *end; buffer[min_c((size_t)length, sizeof(buffer) - 1)] = '\0'; end = strchr(buffer, '\n'); if (end == NULL) { status = E2BIG; goto out; } else end[0] = '\0'; if (invoker) strcpy(invoker, buffer + 2); status = B_OK; } } else if (status == B_OK) { struct Elf32_Ehdr *elfHeader = (struct Elf32_Ehdr *)buffer; if (elfHeader->e_entry == 0) { // we don't like to open shared libraries status = B_NOT_AN_EXECUTABLE; } else if (invoker) invoker[0] = '\0'; } out: _kern_close(fd); return status; }
/*! Creates a semaphore with the given parameters. This function is only available from within the kernel, and should not be made public - if possible, we should remove it completely (and have only create_sem() exported). */ sem_id create_sem_etc(int32 count, const char* name, team_id owner) { struct sem_entry* sem = NULL; cpu_status state; sem_id id = B_NO_MORE_SEMS; char* tempName; size_t nameLength; if (sSemsActive == false || sUsedSems == sMaxSems) return B_NO_MORE_SEMS; if (name == NULL) name = "unnamed semaphore"; // get the owning team Team* team = Team::Get(owner); if (team == NULL) return B_BAD_TEAM_ID; BReference<Team> teamReference(team, true); // clone the name nameLength = strlen(name) + 1; nameLength = min_c(nameLength, B_OS_NAME_LENGTH); tempName = (char*)malloc(nameLength); if (tempName == NULL) return B_NO_MEMORY; strlcpy(tempName, name, nameLength); state = disable_interrupts(); GRAB_SEM_LIST_LOCK(); // get the first slot from the free list sem = sFreeSemsHead; if (sem) { // remove it from the free list sFreeSemsHead = sem->u.unused.next; if (!sFreeSemsHead) sFreeSemsTail = NULL; // init the slot GRAB_SEM_LOCK(*sem); sem->id = sem->u.unused.next_id; sem->u.used.count = count; sem->u.used.net_count = count; new(&sem->queue) ThreadQueue; sem->u.used.name = tempName; sem->u.used.owner = team->id; sem->u.used.select_infos = NULL; id = sem->id; list_add_item(&team->sem_list, &sem->u.used.team_link); RELEASE_SEM_LOCK(*sem); atomic_add(&sUsedSems, 1); KTRACE("create_sem_etc(count: %ld, name: %s, owner: %ld) -> %ld", count, name, owner, id); T_SCHEDULING_ANALYSIS(CreateSemaphore(id, name)); NotifyWaitObjectListeners(&WaitObjectListener::SemaphoreCreated, id, name); } RELEASE_SEM_LIST_LOCK(); restore_interrupts(state); if (sem == NULL) free(tempName); return id; }
extern "C" status_t video_display_splash(addr_t frameBuffer) { if (!gKernelArgs.frame_buffer.enabled) return B_NO_INIT; // clear the video memory memset((void*)frameBuffer, 0, gKernelArgs.frame_buffer.physical_buffer.size); uint8* uncompressedLogo = NULL; unsigned int uncompressedSize = kSplashLogoWidth * kSplashLogoHeight; switch (gKernelArgs.frame_buffer.depth) { case 8: platform_set_palette(k8BitPalette); uncompressedLogo = (uint8*)kernel_args_malloc(uncompressedSize); if (uncompressedLogo == NULL) return B_NO_MEMORY; uncompress(kSplashLogo8BitCompressedImage, sizeof(kSplashLogo8BitCompressedImage), uncompressedLogo, uncompressedSize); break; default: // 24 bits is assumed here uncompressedSize *= 3; uncompressedLogo = (uint8*)kernel_args_malloc(uncompressedSize); if (uncompressedLogo == NULL) return B_NO_MEMORY; uncompress(kSplashLogo24BitCompressedImage, sizeof(kSplashLogo24BitCompressedImage), uncompressedLogo, uncompressedSize); break; } // TODO: support 4-bit indexed version of the images! // render splash logo uint16 iconsHalfHeight = kSplashIconsHeight / 2; int width = min_c(kSplashLogoWidth, gKernelArgs.frame_buffer.width); int height = min_c(kSplashLogoHeight + iconsHalfHeight, gKernelArgs.frame_buffer.height); int placementX = max_c(0, min_c(100, kSplashLogoPlacementX)); int placementY = max_c(0, min_c(100, kSplashLogoPlacementY)); int x = (gKernelArgs.frame_buffer.width - width) * placementX / 100; int y = (gKernelArgs.frame_buffer.height - height) * placementY / 100; height = min_c(kSplashLogoHeight, gKernelArgs.frame_buffer.height); switch (gKernelArgs.frame_buffer.depth) { case 8: break; } video_blit_image(frameBuffer, uncompressedLogo, width, height, kSplashLogoWidth, x, y); kernel_args_free(uncompressedLogo); const uint8* lowerHalfIconImage; uncompressedSize = kSplashIconsWidth * kSplashIconsHeight; switch (gKernelArgs.frame_buffer.depth) { case 8: // pointer into the lower half of the icons image data gKernelArgs.boot_splash = (uint8*)kernel_args_malloc(uncompressedSize); if (gKernelArgs.boot_splash == NULL) return B_NO_MEMORY; uncompress(kSplashIcons8BitCompressedImage, sizeof(kSplashIcons8BitCompressedImage), gKernelArgs.boot_splash, uncompressedSize); lowerHalfIconImage = (uint8 *)gKernelArgs.boot_splash + (kSplashIconsWidth * iconsHalfHeight); break; default: // 24bits is assumed here uncompressedSize *= 3; // pointer into the lower half of the icons image data gKernelArgs.boot_splash = (uint8*)kernel_args_malloc(uncompressedSize); if (gKernelArgs.boot_splash == NULL) return B_NO_MEMORY; uncompress(kSplashIcons24BitCompressedImage, sizeof(kSplashIcons24BitCompressedImage), gKernelArgs.boot_splash, uncompressedSize); lowerHalfIconImage = (uint8 *)gKernelArgs.boot_splash + (kSplashIconsWidth * iconsHalfHeight) * 3; break; } // render initial (grayed out) icons // the grayed out version is the lower half of the icons image width = min_c(kSplashIconsWidth, gKernelArgs.frame_buffer.width); height = min_c(kSplashLogoHeight + iconsHalfHeight, gKernelArgs.frame_buffer.height); placementX = max_c(0, min_c(100, kSplashIconsPlacementX)); placementY = max_c(0, min_c(100, kSplashIconsPlacementY)); x = (gKernelArgs.frame_buffer.width - width) * placementX / 100; y = kSplashLogoHeight + (gKernelArgs.frame_buffer.height - height) * placementY / 100; height = min_c(iconsHalfHeight, gKernelArgs.frame_buffer.height); video_blit_image(frameBuffer, lowerHalfIconImage, width, height, kSplashIconsWidth, x, y); return B_OK; }
ScreenWindow::ScreenWindow(ScreenSettings* settings) : BWindow(settings->WindowFrame(), B_TRANSLATE_SYSTEM_NAME("Screen"), B_TITLED_WINDOW, B_NOT_RESIZABLE | B_NOT_ZOOMABLE | B_AUTO_UPDATE_SIZE_LIMITS, B_ALL_WORKSPACES), fIsVesa(false), fBootWorkspaceApplied(false), fOtherRefresh(NULL), fScreenMode(this), fUndoScreenMode(this), fModified(false) { BScreen screen(this); accelerant_device_info info; if (screen.GetDeviceInfo(&info) == B_OK && !strcasecmp(info.chipset, "VESA")) fIsVesa = true; _UpdateOriginal(); _BuildSupportedColorSpaces(); fActive = fSelected = fOriginal; fSettings = settings; // we need the "Current Workspace" first to get its height BPopUpMenu *popUpMenu = new BPopUpMenu(B_TRANSLATE("Current workspace"), true, true); fAllWorkspacesItem = new BMenuItem(B_TRANSLATE("All workspaces"), new BMessage(WORKSPACE_CHECK_MSG)); popUpMenu->AddItem(fAllWorkspacesItem); BMenuItem *item = new BMenuItem(B_TRANSLATE("Current workspace"), new BMessage(WORKSPACE_CHECK_MSG)); popUpMenu->AddItem(item); fAllWorkspacesItem->SetMarked(true); BMenuField* workspaceMenuField = new BMenuField("WorkspaceMenu", NULL, popUpMenu); workspaceMenuField->ResizeToPreferred(); // box on the left with workspace count and monitor view BBox* screenBox = new BBox("screen box"); BGroupLayout* layout = new BGroupLayout(B_VERTICAL, 5.0); layout->SetInsets(10, 10, 10, 10); screenBox->SetLayout(layout); fMonitorInfo = new BStringView("monitor info", ""); screenBox->AddChild(fMonitorInfo); fMonitorView = new MonitorView(BRect(0.0, 0.0, 80.0, 80.0), "monitor", screen.Frame().IntegerWidth() + 1, screen.Frame().IntegerHeight() + 1); screenBox->AddChild(fMonitorView); fColumnsControl = new BTextControl(B_TRANSLATE("Columns:"), "0", new BMessage(kMsgWorkspaceColumnsChanged)); fRowsControl = new BTextControl(B_TRANSLATE("Rows:"), "0", new BMessage(kMsgWorkspaceRowsChanged)); screenBox->AddChild(BLayoutBuilder::Grid<>(5.0, 5.0) .Add(new BStringView("", B_TRANSLATE("Workspaces")), 0, 0, 3) .AddTextControl(fColumnsControl, 0, 1, B_ALIGN_RIGHT) .AddGroup(B_HORIZONTAL, 0, 2, 1) .Add(_CreateColumnRowButton(true, false)) .Add(_CreateColumnRowButton(true, true)) .End() .AddTextControl(fRowsControl, 0, 2, B_ALIGN_RIGHT) .AddGroup(B_HORIZONTAL, 0, 2, 2) .Add(_CreateColumnRowButton(false, false)) .Add(_CreateColumnRowButton(false, true)) .End() .View()); fBackgroundsButton = new BButton("BackgroundsButton", B_TRANSLATE("Set background" B_UTF8_ELLIPSIS), new BMessage(BUTTON_LAUNCH_BACKGROUNDS_MSG)); fBackgroundsButton->SetFontSize(be_plain_font->Size() * 0.9); screenBox->AddChild(fBackgroundsButton); // box on the right with screen resolution, etc. BBox* controlsBox = new BBox("controls box"); controlsBox->SetLabel(workspaceMenuField); BGroupView* outerControlsView = new BGroupView(B_VERTICAL, 10.0); outerControlsView->GroupLayout()->SetInsets(10, 10, 10, 10); controlsBox->AddChild(outerControlsView); fResolutionMenu = new BPopUpMenu("resolution", true, true); uint16 maxWidth = 0; uint16 maxHeight = 0; uint16 previousWidth = 0; uint16 previousHeight = 0; for (int32 i = 0; i < fScreenMode.CountModes(); i++) { screen_mode mode = fScreenMode.ModeAt(i); if (mode.width == previousWidth && mode.height == previousHeight) continue; previousWidth = mode.width; previousHeight = mode.height; if (maxWidth < mode.width) maxWidth = mode.width; if (maxHeight < mode.height) maxHeight = mode.height; BMessage* message = new BMessage(POP_RESOLUTION_MSG); message->AddInt32("width", mode.width); message->AddInt32("height", mode.height); BString name; name << mode.width << " x " << mode.height; fResolutionMenu->AddItem(new BMenuItem(name.String(), message)); } fMonitorView->SetMaxResolution(maxWidth, maxHeight); fResolutionField = new BMenuField("ResolutionMenu", B_TRANSLATE("Resolution:"), fResolutionMenu); fColorsMenu = new BPopUpMenu("colors", true, false); for (int32 i = 0; i < kColorSpaceCount; i++) { if ((fSupportedColorSpaces & (1 << i)) == 0) continue; BMessage* message = new BMessage(POP_COLORS_MSG); message->AddInt32("bits_per_pixel", kColorSpaces[i].bits_per_pixel); message->AddInt32("space", kColorSpaces[i].space); BMenuItem* item = new BMenuItem(kColorSpaces[i].label, message); if (kColorSpaces[i].space == screen.ColorSpace()) fUserSelectedColorSpace = item; fColorsMenu->AddItem(item); } fColorsField = new BMenuField("ColorsMenu", B_TRANSLATE("Colors:"), fColorsMenu); fRefreshMenu = new BPopUpMenu("refresh rate", true, true); float min, max; if (fScreenMode.GetRefreshLimits(fActive, min, max) != B_OK) { // if we couldn't obtain the refresh limits, reset to the default // range. Constraints from detected monitors will fine-tune this // later. min = kRefreshRates[0]; max = kRefreshRates[kRefreshRateCount - 1]; } if (min == max) { // This is a special case for drivers that only support a single // frequency, like the VESA driver BString name; refresh_rate_to_string(min, name); BMessage *message = new BMessage(POP_REFRESH_MSG); message->AddFloat("refresh", min); BMenuItem *item = new BMenuItem(name.String(), message); fRefreshMenu->AddItem(item); item->SetEnabled(false); } else { monitor_info info; if (fScreenMode.GetMonitorInfo(info) == B_OK) { min = max_c(info.min_vertical_frequency, min); max = min_c(info.max_vertical_frequency, max); } for (int32 i = 0; i < kRefreshRateCount; ++i) { if (kRefreshRates[i] < min || kRefreshRates[i] > max) continue; BString name; name << kRefreshRates[i] << " " << B_TRANSLATE("Hz"); BMessage *message = new BMessage(POP_REFRESH_MSG); message->AddFloat("refresh", kRefreshRates[i]); fRefreshMenu->AddItem(new BMenuItem(name.String(), message)); } fOtherRefresh = new BMenuItem(B_TRANSLATE("Other" B_UTF8_ELLIPSIS), new BMessage(POP_OTHER_REFRESH_MSG)); fRefreshMenu->AddItem(fOtherRefresh); } fRefreshField = new BMenuField("RefreshMenu", B_TRANSLATE("Refresh rate:"), fRefreshMenu); if (_IsVesa()) fRefreshField->Hide(); // enlarged area for multi-monitor settings { bool dummy; uint32 dummy32; bool multiMonSupport; bool useLaptopPanelSupport; bool tvStandardSupport; multiMonSupport = TestMultiMonSupport(&screen) == B_OK; useLaptopPanelSupport = GetUseLaptopPanel(&screen, &dummy) == B_OK; tvStandardSupport = GetTVStandard(&screen, &dummy32) == B_OK; // even if there is no support, we still create all controls // to make sure we don't access NULL pointers later on fCombineMenu = new BPopUpMenu("CombineDisplays", true, true); for (int32 i = 0; i < kCombineModeCount; i++) { BMessage *message = new BMessage(POP_COMBINE_DISPLAYS_MSG); message->AddInt32("mode", kCombineModes[i].mode); fCombineMenu->AddItem(new BMenuItem(kCombineModes[i].name, message)); } fCombineField = new BMenuField("CombineMenu", B_TRANSLATE("Combine displays:"), fCombineMenu); if (!multiMonSupport) fCombineField->Hide(); fSwapDisplaysMenu = new BPopUpMenu("SwapDisplays", true, true); // !order is important - we rely that boolean value == idx BMessage *message = new BMessage(POP_SWAP_DISPLAYS_MSG); message->AddBool("swap", false); fSwapDisplaysMenu->AddItem(new BMenuItem(B_TRANSLATE("no"), message)); message = new BMessage(POP_SWAP_DISPLAYS_MSG); message->AddBool("swap", true); fSwapDisplaysMenu->AddItem(new BMenuItem(B_TRANSLATE("yes"), message)); fSwapDisplaysField = new BMenuField("SwapMenu", B_TRANSLATE("Swap displays:"), fSwapDisplaysMenu); if (!multiMonSupport) fSwapDisplaysField->Hide(); fUseLaptopPanelMenu = new BPopUpMenu("UseLaptopPanel", true, true); // !order is important - we rely that boolean value == idx message = new BMessage(POP_USE_LAPTOP_PANEL_MSG); message->AddBool("use", false); fUseLaptopPanelMenu->AddItem(new BMenuItem(B_TRANSLATE("if needed"), message)); message = new BMessage(POP_USE_LAPTOP_PANEL_MSG); message->AddBool("use", true); fUseLaptopPanelMenu->AddItem(new BMenuItem(B_TRANSLATE("always"), message)); fUseLaptopPanelField = new BMenuField("UseLaptopPanel", B_TRANSLATE("Use laptop panel:"), fUseLaptopPanelMenu); if (!useLaptopPanelSupport) fUseLaptopPanelField->Hide(); fTVStandardMenu = new BPopUpMenu("TVStandard", true, true); // arbitrary limit uint32 i; for (i = 0; i < 100; ++i) { uint32 mode; if (GetNthSupportedTVStandard(&screen, i, &mode) != B_OK) break; BString name = tv_standard_to_string(mode); message = new BMessage(POP_TV_STANDARD_MSG); message->AddInt32("tv_standard", mode); fTVStandardMenu->AddItem(new BMenuItem(name.String(), message)); } fTVStandardField = new BMenuField("tv standard", B_TRANSLATE("Video format:"), fTVStandardMenu); fTVStandardField->SetAlignment(B_ALIGN_RIGHT); if (!tvStandardSupport || i == 0) fTVStandardField->Hide(); } BLayoutBuilder::Group<>(outerControlsView) .AddGrid(5.0, 5.0) .AddMenuField(fResolutionField, 0, 0, B_ALIGN_RIGHT) .AddMenuField(fColorsField, 0, 1, B_ALIGN_RIGHT) .AddMenuField(fRefreshField, 0, 2, B_ALIGN_RIGHT) .AddMenuField(fCombineField, 0, 3, B_ALIGN_RIGHT) .AddMenuField(fSwapDisplaysField, 0, 4, B_ALIGN_RIGHT) .AddMenuField(fUseLaptopPanelField, 0, 5, B_ALIGN_RIGHT) .AddMenuField(fTVStandardField, 0, 6, B_ALIGN_RIGHT) .End(); // TODO: we don't support getting the screen's preferred settings /* fDefaultsButton = new BButton(buttonRect, "DefaultsButton", "Defaults", new BMessage(BUTTON_DEFAULTS_MSG));*/ fApplyButton = new BButton("ApplyButton", B_TRANSLATE("Apply"), new BMessage(BUTTON_APPLY_MSG)); fApplyButton->SetEnabled(false); BLayoutBuilder::Group<>(outerControlsView) .AddGlue() .AddGroup(B_HORIZONTAL) .AddGlue() .Add(fApplyButton); fRevertButton = new BButton("RevertButton", B_TRANSLATE("Revert"), new BMessage(BUTTON_REVERT_MSG)); fRevertButton->SetEnabled(false); BLayoutBuilder::Group<>(this, B_VERTICAL, 10.0) .SetInsets(10, 10, 10, 10) .AddGroup(B_HORIZONTAL, 10.0) .AddGroup(B_VERTICAL) .AddStrut(floor(controlsBox->TopBorderOffset() / 16) - 1) .Add(screenBox) .End() .Add(controlsBox) .End() .AddGroup(B_HORIZONTAL, 10.0) .Add(fRevertButton) .AddGlue(); _UpdateControls(); _UpdateMonitor(); }
int main(int argc, char **argv) { bool printUsageAndExit = true; bool showDebug = false; bool showInfo = false; EPath prog(argv[0]); EStringArray files; const char *lang = "C"; const char *style = "DocBook"; const char *options = NULL; files.AddItem(NULL); const char *tmp_env = getenv("LC_ALL"); if(tmp_env == NULL) tmp_env = getenv("LANG"); if(tmp_env != NULL) lang = tmp_env; do { if(argc < 2) break; for(int n = 1; n < argc; n++) { if(strcmp(argv[n], "-s") == 0) { if(argc - n < 2) break; n++; style = argv[n]; } else if(strcmp(argv[n], "-t") == 0) { if(argc - n < 2) break; n++; options = argv[n]; } else if(strcmp(argv[n], "-o") == 0) { if(argc - n < 2) break; n++; if(files.ReplaceItem(0, argv[n]) == false) break; } else if(strcmp(argv[n], "-l") == 0) { if(argc - n < 2) break; n++; lang = argv[n]; } else if(strcmp(argv[n], "--debug") == 0) { showDebug = true; } else if(strcmp(argv[n], "--showinfo") == 0) { showInfo = true; } else { files.AddItem(argv[n]); } } if(files.CountItems() < 2) break; printUsageAndExit = false; } while(false); if(printUsageAndExit) { print_usage(prog.Leaf()); exit(1); } EString xml_buffer; EString strDocStart = "<document "; EString strDocEnd = "</document>"; for(eint32 i = 1; i < files.CountItems(); i++) { if(files.ItemAt(i) == NULL) continue; EPath readInPath(files.ItemAt(i)->String(), NULL, true); EFile readIn(readInPath.Path(), E_READ_ONLY); if(readIn.InitCheck() != E_OK) { ETK_DEBUG("[%s] --- Unable to read \"%s\".", prog.Leaf(), files.ItemAt(i)->String()); continue; } eint32 old_length = xml_buffer.Length(); char buffer[BUFFER_SIZE]; bool foundDocEnd = true; size_t nLeave = 0; xml_buffer.AppendFormat("<!-- convert from \"%s\" -->\n", readInPath.Leaf()); while(true) { ssize_t len = readIn.Read(buffer + nLeave, BUFFER_SIZE - nLeave); if(len <= 0) break; EString str; str.SetTo(buffer, len + nLeave); str.RemoveAll("\r"); eint32 offset = 0; while(offset >= 0 && offset < str.Length()) { nLeave = 0; if(foundDocEnd) { offset = str.FindFirst("/*", offset); if(offset < 0) { if(str.Length() < 2) break; if(str[str.Length() - 1] == '/' && str[str.Length() - 2] != '*') nLeave = 1; break; } nLeave = str.Length() - offset; offset = str.FindFirst("\n", offset); if(offset < 0) { if(nLeave > 80) nLeave = 0; break; } nLeave = 0; offset++; if(offset >= str.Length()) break; if(strDocStart.Compare(str.String() + offset, strDocStart.Length()) != 0) { eint32 tmp = str.FindLast("<"); if(tmp >= 0 && str.Length() - tmp < strDocStart.Length()) { nLeave = str.Length() - tmp; } else { nLeave = 0; } continue; } foundDocEnd = false; } eint32 endOffset = str.FindFirst(strDocEnd, offset); if(endOffset >= 0) { endOffset += strDocEnd.Length(); foundDocEnd = true; nLeave = 0; } else { eint32 tmp = str.FindLast("<"); if(tmp >= 0 && str.Length() - tmp < strDocEnd.Length()) { nLeave = str.Length() - tmp; } else { nLeave = 0; } } xml_buffer.Append(str.String() + offset, (endOffset >= 0 ? endOffset : str.Length()) - offset - nLeave); if(foundDocEnd) xml_buffer.Append("\n"); offset = endOffset; } if(nLeave > 0) str.CopyInto(buffer, BUFFER_SIZE, str.Length() - nLeave, nLeave); } if(foundDocEnd == false) { xml_buffer.Remove(old_length, -1); ETK_DEBUG("[%s] --- Invalid document \"%s\".", prog.Leaf(), readInPath.Path()); } } EString output_buffer; if(strcmp(style, "None") == 0) { output_buffer.Adopt(xml_buffer); } else if(strcmp(style, "DocBook") == 0) { xml_buffer.ReplaceAll("&", "&"); xml_buffer.ReplaceAll("&lt;", "<"); xml_buffer.ReplaceAll("&gt;", ">"); xml_buffer.ReplaceAll("&nbsp;", " "); xml_buffer.ReplaceAll("©", "©"); xml_buffer.ReplaceAll("®", "®"); xml_buffer.ReplaceAll("\n", "&br;"); eint32 offset = 0; while(offset >= 0 && offset < xml_buffer.Length()) { if((offset = xml_buffer.FindFirst(">", offset)) < 0) break; eint32 tmp = xml_buffer.FindFirst("<", offset); if(tmp < 0 || tmp - offset <= 1) {offset = tmp; continue;} EString str; xml_buffer.MoveInto(str, offset + 1, tmp - offset); str.ReplaceAll(" ", " "); xml_buffer.Insert(str, offset + 1); offset += str.Length() + 1; } ESimpleXmlNode node(NULL, NULL); if(etk_parse_simple_xml(xml_buffer.String(), &node) != E_OK) { ETK_OUTPUT("[%s] --- Unable to parse.\n", prog.Leaf()); exit(1); } ESimpleXmlNode *aNode = NULL; offset = 0; while(offset >= 0 && offset < node.CountNodes()) { if((offset = node.FindNode("document", offset)) < 0) break; if((aNode = node.NodeAt(offset)) == NULL) break; eint32 index = aNode->FindAttribute("lang"); const char *tmp = NULL; if(index < 0 || aNode->AttributeAt(index, &tmp) == NULL || tmp == NULL || strcmp(tmp, lang) != 0) { aNode->RemoveSelf(); delete aNode; continue; } offset++; } aNode = find_xml_node_deep(&node, "documentinfo"); if(aNode != NULL) { if(!showInfo) { ESimpleXmlNode *cNode = aNode->NodeAt(aNode->FindNode("title")); if(cNode) cNode->RemoveSelf(); ESimpleXmlNode *nNode; while((nNode = aNode->NodeAt(0)) != NULL) {nNode->RemoveSelf(); delete nNode;} if(cNode) aNode->AddNode(cNode); } aNode->RemoveSelf(); if(node.AddNode(aNode) == false) delete aNode; } foreach_xml_node(&node, NULL, docbook_foreach, NULL); if(showDebug) node.PrintToStream(); if(convert_to_docbook(&node, &output_buffer, options, lang) != E_OK) { ETK_OUTPUT("[%s] --- Unable to convert to \"DocBook\" style.\n", prog.Leaf()); exit(1); } } else { ETK_OUTPUT("[%s] --- style \"%s\" unsupport yet.\n", prog.Leaf(), style); exit(1); } if(files.ItemAt(0) == NULL || files.ItemAt(0)->String() == NULL) { for(eint32 offset = 0; offset < output_buffer.Length(); offset += BUFFER_SIZE) { EString str(output_buffer.String() + offset, BUFFER_SIZE); fprintf(stdout, "%s", str.String()); } } else { EFile writeOut(files.ItemAt(0)->String(), E_WRITE_ONLY | E_CREATE_FILE | E_ERASE_FILE); if(writeOut.InitCheck() != E_OK) { ETK_OUTPUT("[%s] --- Unable to write \"%s\".\n", prog.Leaf(), files.ItemAt(0)->String()); exit(1); } else { for(eint32 offset = 0; offset < output_buffer.Length(); offset += BUFFER_SIZE) writeOut.Write(output_buffer.String() + offset, min_c(BUFFER_SIZE, output_buffer.Length() - offset)); } } return 0; }
void BListView::KeyDown(const char* bytes, int32 numBytes) { bool extend = fListType == B_MULTIPLE_SELECTION_LIST && (modifiers() & B_SHIFT_KEY) != 0; switch (bytes[0]) { case B_UP_ARROW: { if (fFirstSelected == -1) { // if nothing is selected yet, always select the first item Select(0); } else { if (fAnchorIndex > 0) { if (!extend || fAnchorIndex <= fFirstSelected) Select(fAnchorIndex - 1, extend); else Deselect(fAnchorIndex--); } } ScrollToSelection(); break; } case B_DOWN_ARROW: { if (fFirstSelected == -1) { // if nothing is selected yet, always select the first item Select(0); } else { if (fAnchorIndex < CountItems() - 1) { if (!extend || fAnchorIndex >= fLastSelected) Select(fAnchorIndex + 1, extend); else Deselect(fAnchorIndex++); } } ScrollToSelection(); break; } case B_HOME: if (extend) { Select(0, fAnchorIndex, true); fAnchorIndex = 0; } else Select(0, false); ScrollToSelection(); break; case B_END: if (extend) { Select(fAnchorIndex, CountItems() - 1, true); fAnchorIndex = CountItems() - 1; } else Select(CountItems() - 1, false); ScrollToSelection(); break; case B_PAGE_UP: { BPoint scrollOffset(LeftTop()); scrollOffset.y = max_c(0, scrollOffset.y - Bounds().Height()); ScrollTo(scrollOffset); break; } case B_PAGE_DOWN: { BPoint scrollOffset(LeftTop()); if (BListItem* item = LastItem()) { scrollOffset.y += Bounds().Height(); scrollOffset.y = min_c(item->Bottom() - Bounds().Height(), scrollOffset.y); } ScrollTo(scrollOffset); break; } case B_RETURN: case B_SPACE: Invoke(); break; default: BView::KeyDown(bytes, numBytes); } }
ssize_t UnixEndpoint::Receive(const iovec *vecs, size_t vecCount, ancillary_data_container **_ancillaryData, struct sockaddr *_address, socklen_t *_addressLength) { TRACE("[%ld] %p->UnixEndpoint::Receive(%p, %ld)\n", find_thread(NULL), this, vecs, vecCount); bigtime_t timeout = absolute_timeout(socket->receive.timeout); if (gStackModule->is_restarted_syscall()) timeout = gStackModule->restore_syscall_restart_timeout(); else gStackModule->store_syscall_restart_timeout(timeout); UnixEndpointLocker locker(this); // We can read as long as we have a FIFO. I.e. we are still connected, or // disconnected and not yet reconnected/listening/closed. if (fReceiveFifo == NULL) RETURN_ERROR(ENOTCONN); UnixEndpoint* peerEndpoint = fPeerEndpoint; BReference<UnixEndpoint> peerReference(peerEndpoint); // Copy the peer address upfront. This way, if we read something, we don't // get into a potential race with Close(). if (_address != NULL) { socklen_t addrLen = min_c(*_addressLength, socket->peer.ss_len); memcpy(_address, &socket->peer, addrLen); *_addressLength = addrLen; } // lock our FIFO UnixFifo* fifo = fReceiveFifo; BReference<UnixFifo> _(fifo); UnixFifoLocker fifoLocker(fifo); // unlock endpoint locker.Unlock(); ssize_t result = fifo->Read(vecs, vecCount, _ancillaryData, timeout); // Notify select()ing writers, if we successfully read anything. size_t writable = fifo->Writable(); bool notifyWrite = (result >= 0 && writable > 0 && !fifo->IsWriteShutdown()); // Notify select()ing readers, if we failed to read anything and there's // still something left to read. size_t readable = fifo->Readable(); bool notifyRead = (result < 0 && readable > 0 && !fifo->IsReadShutdown()); // re-lock our endpoint (unlock FIFO to respect locking order) fifoLocker.Unlock(); locker.Lock(); UnixEndpointLocker peerLocker; bool peerLocked = (peerEndpoint != NULL && fPeerEndpoint == peerEndpoint && _LockConnectedEndpoints(locker, peerLocker) == B_OK); // send notifications if (notifyRead) gSocketModule->notify(socket, B_SELECT_READ, readable); if (peerLocked && notifyWrite) gSocketModule->notify(peerEndpoint->socket, B_SELECT_WRITE, writable); switch (result) { case UNIX_FIFO_SHUTDOWN: // Either our socket was closed or read shutdown. if (fState == UNIX_ENDPOINT_CLOSED) { // The FD has been closed. result = EBADF; } else { // if (fReceiveFifo == fifo) { // Orderly shutdown or the peer closed the connection. // } else { // Weird case: Peer closed connection and we are already // reconnected (or listening). // } result = 0; } break; case B_TIMED_OUT: // translate non-blocking timeouts to the correct error code if (timeout == 0) result = B_WOULD_BLOCK; break; } RETURN_ERROR(result); }
/*! Fragments the incoming buffer and send all fragments via the specified \a route. */ static status_t send_fragments(ipv6_protocol* protocol, struct net_route* route, net_buffer* buffer, uint32 mtu) { TRACE_SK(protocol, "SendFragments(%lu bytes, mtu %lu)", buffer->size, mtu); NetBufferHeaderReader<IPv6Header> originalHeader(buffer); if (originalHeader.Status() != B_OK) return originalHeader.Status(); // TODO: currently FragHeader goes always as the last one, but in theory // ext. headers like AuthHeader and DestOptions should go after it. uint16 headersLength = originalHeader->GetHeaderOffset(buffer); uint16 extensionHeadersLength = headersLength - sizeof(ip6_hdr) + sizeof(ip6_frag); uint32 bytesLeft = buffer->size - headersLength; uint32 fragmentOffset = 0; status_t status = B_OK; // TODO: this is rather inefficient net_buffer* headerBuffer = gBufferModule->clone(buffer, false); if (headerBuffer == NULL) return B_NO_MEMORY; status = gBufferModule->remove_trailer(headerBuffer, bytesLeft); if (status != B_OK) return status; uint8 data[bytesLeft]; status = gBufferModule->read(buffer, headersLength, data, bytesLeft); if (status != B_OK) return status; // TODO (from ipv4): we need to make sure all header space is contiguous or // use another construct. NetBufferHeaderReader<IPv6Header> bufferHeader(headerBuffer); // Adapt MTU to be a multiple of 8 (fragment offsets can only be specified // this way) mtu -= headersLength + sizeof(ip6_frag); mtu &= ~7; TRACE(" adjusted MTU to %ld, bytesLeft %ld", mtu, bytesLeft); while (bytesLeft > 0) { uint32 fragmentLength = min_c(bytesLeft, mtu); bytesLeft -= fragmentLength; bool lastFragment = bytesLeft == 0; bufferHeader->header.ip6_nxt = IPPROTO_FRAGMENT; bufferHeader->header.ip6_plen = htons(fragmentLength + extensionHeadersLength); bufferHeader.Sync(); ip6_frag fragmentHeader; fragmentHeader.ip6f_nxt = originalHeader->NextHeader(); fragmentHeader.ip6f_reserved = 0; fragmentHeader.ip6f_offlg = htons(fragmentOffset) & IP6F_OFF_MASK; if (!lastFragment) fragmentHeader.ip6f_offlg |= IP6F_MORE_FRAG; fragmentHeader.ip6f_ident = htonl(atomic_add(&sFragmentID, 1)); TRACE(" send fragment of %ld bytes (%ld bytes left)", fragmentLength, bytesLeft); net_buffer* fragmentBuffer; if (!lastFragment) fragmentBuffer = gBufferModule->clone(headerBuffer, false); else fragmentBuffer = buffer; if (fragmentBuffer == NULL) { status = B_NO_MEMORY; break; } // copy data to fragment do { status = gBufferModule->append( fragmentBuffer, &fragmentHeader, sizeof(ip6_frag)); if (status != B_OK) break; status = gBufferModule->append( fragmentBuffer, &data[fragmentOffset], fragmentLength); if (status != B_OK) break; // send fragment status = sDatalinkModule->send_routed_data(route, fragmentBuffer); } while (false); if (lastFragment) { // we don't own the last buffer, so we don't have to free it break; } if (status != B_OK) { gBufferModule->free(fragmentBuffer); break; } fragmentOffset += fragmentLength; } gBufferModule->free(headerBuffer); return status; }
int main(int argc, char *argv[]) { // 256 frames * 4 buffer parts * 2 channels * 2 bytes per sample // will give us internal buffer of 4096 bytes size_t framesPerBufferPart = 256; size_t bufferPartCount = 4; if (argc != 2 && argc != 4) { printf("Usage: %s <sound file name> [<frames per part> <parts>]\n", argv[0]); return 0; } if (argc == 4) { size_t size = strtoul(argv[2], NULL, 10); if (size > 0) framesPerBufferPart = size; size = strtoul(argv[3], NULL, 10); if (size == 1) { printf("at least 2 buffer parts are needed\n"); return 1; } if (size > 0) bufferPartCount = size; } printf("frames per buffer part: %ld\n", framesPerBufferPart); printf("buffer part count: %ld\n", bufferPartCount); BEntry entry(argv[1]); if (entry.InitCheck() != B_OK || !entry.Exists()) { printf("cannot open input file\n"); return 1; } entry_ref entryRef; entry.GetRef(&entryRef); BMediaFile mediaFile(&entryRef); if (mediaFile.InitCheck() != B_OK) { printf("file not supported\n"); return 1; } if (mediaFile.CountTracks() == 0) { printf("no tracks found in file\n"); return 1; } BMediaTrack *mediaTrack = mediaFile.TrackAt(0); if (mediaTrack == NULL) { printf("problem getting track from file\n"); return 1; } // propose format, let it decide frame rate, channels number and buf size media_format format; memset(&format, 0, sizeof(format)); format.type = B_MEDIA_RAW_AUDIO; format.u.raw_audio.format = media_raw_audio_format::B_AUDIO_SHORT; format.u.raw_audio.byte_order = B_MEDIA_LITTLE_ENDIAN; if (mediaTrack->DecodedFormat(&format) != B_OK) { printf("cannot set decoder output format\n"); return 1; } printf("negotiated format:\n"); printf("frame rate: %g Hz\n", format.u.raw_audio.frame_rate); printf("channel count: %ld\n", format.u.raw_audio.channel_count); printf("buffer size: %ld bytes\n", format.u.raw_audio.buffer_size); gs_audio_format gsFormat; memset(&gsFormat, 0, sizeof(gsFormat)); gsFormat.frame_rate = format.u.raw_audio.frame_rate; gsFormat.channel_count = format.u.raw_audio.channel_count; gsFormat.format = format.u.raw_audio.format; gsFormat.byte_order = format.u.raw_audio.byte_order; BPushGameSound pushGameSound(framesPerBufferPart, &gsFormat, bufferPartCount); if (pushGameSound.InitCheck() != B_OK) { printf("trouble initializing push game sound: %s\n", strerror(pushGameSound.InitCheck())); return 1; } uint8 *buffer; size_t bufferSize; if (pushGameSound.LockForCyclic((void **)&buffer, &bufferSize) != BPushGameSound::lock_ok) { printf("cannot lock buffer\n"); return 1; } memset(buffer, 0, bufferSize); if (pushGameSound.StartPlaying() != B_OK) { printf("cannot start playback\n"); return 1; } printf("playing, press [esc] to exit...\n"); uint8 decoded[format.u.raw_audio.buffer_size * 2]; size_t bufferPartSize = framesPerBufferPart * format.u.raw_audio.channel_count * (format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK); size_t decodedSize = 0; size_t partPos = 0; size_t pos = 0; /*pushGameSound.CurrentPosition();*/ key_info keyInfo; while (true) { // fill buffer part with data from decoded buffer while (partPos < bufferPartSize && decodedSize) { size_t size = min_c(bufferPartSize - partPos, decodedSize); memcpy(buffer + pos + partPos, decoded, size); partPos += size; decodedSize -= size; memmove(decoded, decoded + size, decodedSize); } // if there are too little data to fill next buffer part // read next decoded frames if (partPos < bufferPartSize) { int64 frameCount; if (mediaTrack->ReadFrames(decoded + decodedSize, &frameCount) != B_OK) break; if (frameCount == 0) break; decodedSize += frameCount * format.u.raw_audio.channel_count * (format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK); printf("\rtime: %.2f", (double)mediaTrack->CurrentTime() / 1000000LL); fflush(stdout); continue; } // this buffer part is done partPos = 0; pos += bufferPartSize; if (bufferSize <= pos) pos = 0; // playback sync - wait for the buffer part we're about to fill to be // played while (pushGameSound.CurrentPosition() >= pos + bufferPartSize || pushGameSound.CurrentPosition() < pos) snooze(1000 * framesPerBufferPart / gsFormat.frame_rate); // check escape key state if (get_key_info(&keyInfo) != B_OK) { printf("\nkeyboard state read error\n"); break; } if ((keyInfo.key_states[0] & 0x40) != 0) break; } pushGameSound.StopPlaying(); mediaFile.ReleaseTrack(mediaTrack); mediaFile.CloseFile(); printf("\nfinished.\n"); return 0; }
void ServerApp::_HandleMessage(int32 code, const void* data, size_t size) { TRACE("ServerApp::HandleMessage %#" B_PRIx32 " enter\n", code); switch (code) { case SERVER_CHANGE_FLAVOR_INSTANCES_COUNT: { const server_change_flavor_instances_count_request& request = *static_cast< const server_change_flavor_instances_count_request*>(data); server_change_flavor_instances_count_reply reply; status_t status = B_BAD_VALUE; if (request.delta == 1) { status = gNodeManager->IncrementFlavorInstancesCount( request.add_on_id, request.flavor_id, request.team); } else if (request.delta == -1) { status = gNodeManager->DecrementFlavorInstancesCount( request.add_on_id, request.flavor_id, request.team); } request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_RESCAN_DEFAULTS: { gNodeManager->RescanDefaultNodes(); break; } case SERVER_REGISTER_APP: { const server_register_app_request& request = *static_cast< const server_register_app_request*>(data); server_register_app_reply reply; status_t status = gAppManager->RegisterTeam(request.team, request.messenger); request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_UNREGISTER_APP: { const server_unregister_app_request& request = *static_cast< const server_unregister_app_request*>(data); server_unregister_app_reply reply; status_t status = gAppManager->UnregisterTeam(request.team); request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_GET_ADD_ON_REF: { const server_get_add_on_ref_request& request = *static_cast< const server_get_add_on_ref_request*>(data); server_get_add_on_ref_reply reply; entry_ref ref; reply.result = gNodeManager->GetAddOnRef(request.add_on_id, &ref); reply.ref = ref; request.SendReply(reply.result, &reply, sizeof(reply)); break; } case SERVER_NODE_ID_FOR: { const server_node_id_for_request& request = *static_cast<const server_node_id_for_request*>(data); server_node_id_for_reply reply; status_t status = gNodeManager->FindNodeID(request.port, &reply.node_id); request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_GET_LIVE_NODE_INFO: { const server_get_live_node_info_request& request = *static_cast< const server_get_live_node_info_request*>(data); server_get_live_node_info_reply reply; status_t status = gNodeManager->GetLiveNodeInfo(request.node, &reply.live_info); request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_GET_LIVE_NODES: { const server_get_live_nodes_request& request = *static_cast<const server_get_live_nodes_request*>(data); server_get_live_nodes_reply reply; LiveNodeList nodes; status_t status = gNodeManager->GetLiveNodes(nodes, request.max_count, request.has_input ? &request.input_format : NULL, request.has_output ? &request.output_format : NULL, request.has_name ? request.name : NULL, request.require_kinds); reply.count = nodes.size(); reply.area = -1; live_node_info* infos = reply.live_info; area_id area = -1; if (reply.count > MAX_LIVE_INFO) { // We create an area here, and transfer it to the client size_t size = (reply.count * sizeof(live_node_info) + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1); area = create_area("get live nodes", (void**)&infos, B_ANY_ADDRESS, size, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA); if (area < 0) { reply.area = area; reply.count = 0; } } for (int32 index = 0; index < reply.count; index++) infos[index] = nodes[index]; if (area >= 0) { // transfer the area to the target team reply.area = _kern_transfer_area(area, &reply.address, B_ANY_ADDRESS, request.team); if (reply.area < 0) { delete_area(area); reply.count = 0; } } status = request.SendReply(status, &reply, sizeof(reply)); if (status != B_OK && reply.area >= 0) { // if we couldn't send the message, delete the area delete_area(reply.area); } break; } case SERVER_GET_NODE_FOR: { const server_get_node_for_request& request = *static_cast<const server_get_node_for_request*>(data); server_get_node_for_reply reply; status_t status = gNodeManager->GetCloneForID(request.node_id, request.team, &reply.clone); request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_RELEASE_NODE: { const server_release_node_request& request = *static_cast<const server_release_node_request*>(data); server_release_node_reply reply; status_t status = gNodeManager->ReleaseNode(request.node, request.team); request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_RELEASE_NODE_ALL: { const server_release_node_request& request = *static_cast<const server_release_node_request*>(data); server_release_node_reply reply; status_t status = gNodeManager->ReleaseNodeAll(request.node.node); request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_REGISTER_NODE: { const server_register_node_request& request = *static_cast<const server_register_node_request*>(data); server_register_node_reply reply; status_t status = gNodeManager->RegisterNode(request.add_on_id, request.flavor_id, request.name, request.kinds, request.port, request.team, request.timesource_id, &reply.node_id); request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_UNREGISTER_NODE: { const server_unregister_node_request& request = *static_cast<const server_unregister_node_request*>(data); server_unregister_node_reply reply; status_t status = gNodeManager->UnregisterNode(request.node_id, request.team, &reply.add_on_id, &reply.flavor_id); request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_PUBLISH_INPUTS: { const server_publish_inputs_request& request = *static_cast<const server_publish_inputs_request*>(data); server_publish_inputs_reply reply; status_t status; if (request.count <= MAX_INPUTS) { status = gNodeManager->PublishInputs(request.node, request.inputs, request.count); } else { media_input* inputs; area_id clone; clone = clone_area("media_inputs clone", (void**)&inputs, B_ANY_ADDRESS, B_READ_AREA | B_WRITE_AREA, request.area); if (clone < B_OK) { ERROR("SERVER_PUBLISH_INPUTS: failed to clone area, " "error %#" B_PRIx32 "\n", clone); status = clone; } else { status = gNodeManager->PublishInputs(request.node, inputs, request.count); delete_area(clone); } } request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_PUBLISH_OUTPUTS: { const server_publish_outputs_request& request = *static_cast<const server_publish_outputs_request*>(data); server_publish_outputs_reply reply; status_t status; if (request.count <= MAX_OUTPUTS) { status = gNodeManager->PublishOutputs(request.node, request.outputs, request.count); } else { media_output* outputs; area_id clone; clone = clone_area("media_outputs clone", (void**)&outputs, B_ANY_ADDRESS, B_READ_AREA | B_WRITE_AREA, request.area); if (clone < B_OK) { ERROR("SERVER_PUBLISH_OUTPUTS: failed to clone area, " "error %#" B_PRIx32 "\n", clone); status = clone; } else { status = gNodeManager->PublishOutputs(request.node, outputs, request.count); delete_area(clone); } } request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_GET_NODE: { const server_get_node_request& request = *static_cast<const server_get_node_request*>(data); server_get_node_reply reply; status_t status = gNodeManager->GetClone(request.type, request.team, &reply.node, reply.input_name, &reply.input_id); request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_SET_NODE: { const server_set_node_request& request = *static_cast<const server_set_node_request*>(data); server_set_node_reply reply; status_t status = gNodeManager->SetDefaultNode(request.type, request.use_node ? &request.node : NULL, request.use_dni ? &request.dni : NULL, request.use_input ? &request.input : NULL); request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_GET_DORMANT_NODE_FOR: { const server_get_dormant_node_for_request& request = *static_cast<const server_get_dormant_node_for_request*>( data); server_get_dormant_node_for_reply reply; status_t status = gNodeManager->GetDormantNodeInfo(request.node, &reply.node_info); request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_GET_INSTANCES_FOR: { const server_get_instances_for_request& request = *static_cast<const server_get_instances_for_request*>(data); server_get_instances_for_reply reply; status_t status = gNodeManager->GetInstances(request.add_on_id, request.flavor_id, reply.node_id, &reply.count, min_c(request.max_count, MAX_NODE_ID)); if (reply.count == MAX_NODE_ID && request.max_count > MAX_NODE_ID) { // TODO: might be fixed by using an area PRINT(1, "Warning: SERVER_GET_INSTANCES_FOR: returning " "possibly truncated list of node id's\n"); } request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_SET_NODE_TIMESOURCE: { const server_set_node_timesource_request& request = *static_cast<const server_set_node_timesource_request*>(data); server_set_node_timesource_reply reply; status_t result = gNodeManager->SetNodeTimeSource(request.node_id, request.timesource_id); request.SendReply(result, &reply, sizeof(reply)); break; } case SERVER_REGISTER_ADD_ON: { const server_register_add_on_request& request = *static_cast< const server_register_add_on_request*>(data); server_register_add_on_reply reply; gNodeManager->RegisterAddOn(request.ref, &reply.add_on_id); request.SendReply(B_OK, &reply, sizeof(reply)); break; } case SERVER_UNREGISTER_ADD_ON: { const server_unregister_add_on_command& request = *static_cast< const server_unregister_add_on_command*>(data); gNodeManager->UnregisterAddOn(request.add_on_id); break; } case SERVER_REGISTER_DORMANT_NODE: { const server_register_dormant_node_command& command = *static_cast<const server_register_dormant_node_command*>( data); if (command.purge_id > 0) gNodeManager->InvalidateDormantFlavorInfo(command.purge_id); dormant_flavor_info dormantFlavorInfo; status_t status = dormantFlavorInfo.Unflatten(command.type, command.flattened_data, command.flattened_size); if (status == B_OK) gNodeManager->AddDormantFlavorInfo(dormantFlavorInfo); break; } case SERVER_GET_DORMANT_NODES: { const server_get_dormant_nodes_request& request = *static_cast<const server_get_dormant_nodes_request*>(data); server_get_dormant_nodes_reply reply; reply.count = request.max_count; dormant_node_info* infos = new(std::nothrow) dormant_node_info[reply.count]; if (infos != NULL) { reply.result = gNodeManager->GetDormantNodes(infos, &reply.count, request.has_input ? &request.input_format : NULL, request.has_output ? &request.output_format : NULL, request.has_name ? request.name : NULL, request.require_kinds, request.deny_kinds); } else reply.result = B_NO_MEMORY; if (reply.result != B_OK) reply.count = 0; request.SendReply(reply.result, &reply, sizeof(reply)); if (reply.count > 0) { write_port(request.reply_port, 0, infos, reply.count * sizeof(dormant_node_info)); } delete[] infos; break; } case SERVER_GET_DORMANT_FLAVOR_INFO: { const server_get_dormant_flavor_info_request& request = *static_cast<const server_get_dormant_flavor_info_request*>( data); dormant_flavor_info dormantFlavorInfo; status_t status = gNodeManager->GetDormantFlavorInfoFor( request.add_on_id, request.flavor_id, &dormantFlavorInfo); if (status != B_OK) { server_get_dormant_flavor_info_reply reply; reply.result = status; request.SendReply(reply.result, &reply, sizeof(reply)); } else { size_t replySize = sizeof(server_get_dormant_flavor_info_reply) + dormantFlavorInfo.FlattenedSize(); server_get_dormant_flavor_info_reply* reply = (server_get_dormant_flavor_info_reply*)malloc( replySize); if (reply != NULL) { reply->type = dormantFlavorInfo.TypeCode(); reply->flattened_size = dormantFlavorInfo.FlattenedSize(); reply->result = dormantFlavorInfo.Flatten( reply->flattened_data, reply->flattened_size); request.SendReply(reply->result, reply, replySize); free(reply); } else { server_get_dormant_flavor_info_reply reply; reply.result = B_NO_MEMORY; request.SendReply(reply.result, &reply, sizeof(reply)); } } break; } case SERVER_SET_NODE_CREATOR: { const server_set_node_creator_request& request = *static_cast<const server_set_node_creator_request*>(data); server_set_node_creator_reply reply; status_t status = gNodeManager->SetNodeCreator(request.node, request.creator); request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_GET_SHARED_BUFFER_AREA: { const server_get_shared_buffer_area_request& request = *static_cast<const server_get_shared_buffer_area_request*>( data); server_get_shared_buffer_area_reply reply; reply.area = gBufferManager->SharedBufferListArea(); request.SendReply(reply.area >= 0 ? B_OK : reply.area, &reply, sizeof(reply)); break; } case SERVER_REGISTER_BUFFER: { const server_register_buffer_request& request = *static_cast<const server_register_buffer_request*>(data); server_register_buffer_reply reply; status_t status; if (request.info.buffer == 0) { reply.info = request.info; // size, offset, flags, area is kept // get a new beuffer id into reply.info.buffer status = gBufferManager->RegisterBuffer(request.team, request.info.size, request.info.flags, request.info.offset, request.info.area, &reply.info.buffer); } else { reply.info = request.info; // buffer id is kept status = gBufferManager->RegisterBuffer(request.team, request.info.buffer, &reply.info.size, &reply.info.flags, &reply.info.offset, &reply.info.area); } request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_UNREGISTER_BUFFER: { const server_unregister_buffer_command& request = *static_cast< const server_unregister_buffer_command*>(data); gBufferManager->UnregisterBuffer(request.team, request.buffer_id); break; } case SERVER_GET_MEDIA_FILE_TYPES: { const server_get_media_types_request& request = *static_cast<const server_get_media_types_request*>(data); server_get_media_types_reply reply; area_id area = gMediaFilesManager->GetTypesArea(reply.count); if (area >= 0) { // transfer the area to the target team reply.area = _kern_transfer_area(area, &reply.address, B_ANY_ADDRESS, request.team); if (reply.area < 0) { delete_area(area); reply.area = B_ERROR; reply.count = 0; } } status_t status = request.SendReply( reply.area < 0 ? reply.area : B_OK, &reply, sizeof(reply)); if (status != B_OK) { // if we couldn't send the message, delete the area delete_area(reply.area); } break; } case SERVER_GET_MEDIA_FILE_ITEMS: { const server_get_media_items_request& request = *static_cast<const server_get_media_items_request*>(data); server_get_media_items_reply reply; area_id area = gMediaFilesManager->GetItemsArea(request.type, reply.count); if (area >= 0) { // transfer the area to the target team reply.area = _kern_transfer_area(area, &reply.address, B_ANY_ADDRESS, request.team); if (reply.area < 0) { delete_area(area); reply.area = B_ERROR; reply.count = 0; } } else reply.area = area; status_t status = request.SendReply( reply.area < 0 ? reply.area : B_OK, &reply, sizeof(reply)); if (status != B_OK) { // if we couldn't send the message, delete the area delete_area(reply.area); } break; } case SERVER_GET_REF_FOR: { const server_get_ref_for_request& request = *static_cast<const server_get_ref_for_request*>(data); server_get_ref_for_reply reply; entry_ref* ref; status_t status = gMediaFilesManager->GetRefFor(request.type, request.item, &ref); if (status == B_OK) reply.ref = *ref; request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_SET_REF_FOR: { const server_set_ref_for_request& request = *static_cast<const server_set_ref_for_request*>(data); server_set_ref_for_reply reply; entry_ref ref = request.ref; status_t status = gMediaFilesManager->SetRefFor(request.type, request.item, ref); request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_INVALIDATE_MEDIA_ITEM: { const server_invalidate_item_request& request = *static_cast<const server_invalidate_item_request*>(data); server_invalidate_item_reply reply; status_t status = gMediaFilesManager->InvalidateItem(request.type, request.item); request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_REMOVE_MEDIA_ITEM: { const server_remove_media_item_request& request = *static_cast<const server_remove_media_item_request*>(data); server_remove_media_item_reply reply; status_t status = gMediaFilesManager->RemoveItem(request.type, request.item); request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_GET_ITEM_AUDIO_GAIN: { const server_get_item_audio_gain_request& request = *static_cast<const server_get_item_audio_gain_request*>(data); server_get_item_audio_gain_reply reply; status_t status = gMediaFilesManager->GetAudioGainFor(request.type, request.item, &reply.gain); request.SendReply(status, &reply, sizeof(reply)); break; } case SERVER_SET_ITEM_AUDIO_GAIN: { const server_set_item_audio_gain_request& request = *static_cast<const server_set_item_audio_gain_request*>(data); server_set_ref_for_reply reply; status_t status = gMediaFilesManager->SetAudioGainFor(request.type, request.item, request.gain); request.SendReply(status, &reply, sizeof(reply)); break; } default: printf("media_server: received unknown message code %#08" B_PRIx32 "\n", code); } TRACE("ServerApp::HandleMessage %#" B_PRIx32 " leave\n", code); }
status_t BasicTerminalBuffer::_ResizeSimple(int32 width, int32 height, int32 historyCapacity) { //debug_printf("BasicTerminalBuffer::_ResizeSimple(): (%ld, %ld) -> " //"(%ld, %ld)\n", fWidth, fHeight, width, height); if (width == fWidth && height == fHeight) return B_OK; if (width != fWidth || historyCapacity != HistoryCapacity()) { status_t error = _ResizeHistory(width, historyCapacity); if (error != B_OK) return error; } TerminalLine** lines = _AllocateLines(width, height); if (lines == NULL) return B_NO_MEMORY; // NOTE: If width or history capacity changed, the object will be in // an invalid state, since the history will already use the new values. int32 endLine = min_c(fHeight, height); int32 firstLine = 0; if (height < fHeight) { if (endLine <= fCursor.y) { endLine = fCursor.y + 1; firstLine = endLine - height; } // push the first lines to the history if (fHistory != NULL) { for (int32 i = 0; i < firstLine; i++) { TerminalLine* line = _LineAt(i); if (width < fWidth) _TruncateLine(line, width); fHistory->AddLine(line); } } } // copy the lines we keep for (int32 i = firstLine; i < endLine; i++) { TerminalLine* sourceLine = _LineAt(i); TerminalLine* destLine = lines[i - firstLine]; if (width < fWidth) _TruncateLine(sourceLine, width); memcpy(destLine, sourceLine, (int32)sizeof(TerminalLine) + (sourceLine->length - 1) * (int32)sizeof(TerminalCell)); } // clear the remaining lines for (int32 i = endLine - firstLine; i < height; i++) lines[i]->Clear(fAttributes, width); _FreeLines(fScreen, fHeight); fScreen = lines; if (fWidth != width) { status_t error = _ResetTabStops(width); if (error != B_OK) return error; } fWidth = width; fHeight = height; fScrollTop = 0; fScrollBottom = fHeight - 1; fOriginMode = fSavedOriginMode = false; fScreenOffset = 0; if (fCursor.x > width) fCursor.x = width; fCursor.y -= firstLine; fSoftWrappedCursor = false; return B_OK; }
int dump_tracing_internal(int argc, char** argv, WrapperTraceFilter* wrapperFilter) { int argi = 1; // variables in which we store our state to be continuable static int32 _previousCount = 0; static bool _previousHasFilter = false; static bool _previousPrintStackTrace = false; static int32 _previousMaxToCheck = 0; static int32 _previousFirstChecked = 1; static int32 _previousLastChecked = -1; static int32 _previousDirection = 1; static uint32 _previousEntriesEver = 0; static uint32 _previousEntries = 0; static uint32 _previousOutputFlags = 0; static TraceEntryIterator iterator; uint32 entriesEver = sTracingMetaData->EntriesEver(); // Note: start and index are Pascal-like indices (i.e. in [1, Entries()]). int32 start = 0; // special index: print the last count entries int32 count = 0; int32 maxToCheck = 0; int32 cont = 0; bool hasFilter = false; bool printStackTrace = false; uint32 outputFlags = 0; while (argi < argc) { if (strcmp(argv[argi], "--difftime") == 0) { outputFlags |= TRACE_OUTPUT_DIFF_TIME; argi++; } else if (strcmp(argv[argi], "--printteam") == 0) { outputFlags |= TRACE_OUTPUT_TEAM_ID; argi++; } else if (strcmp(argv[argi], "--stacktrace") == 0) { printStackTrace = true; argi++; } else break; } if (argi < argc) { if (strcmp(argv[argi], "forward") == 0) { cont = 1; argi++; } else if (strcmp(argv[argi], "backward") == 0) { cont = -1; argi++; } } else cont = _previousDirection; if (cont != 0) { if (argi < argc) { print_debugger_command_usage(argv[0]); return 0; } if (entriesEver == 0 || entriesEver != _previousEntriesEver || sTracingMetaData->Entries() != _previousEntries) { kprintf("Can't continue iteration. \"%s\" has not been invoked " "before, or there were new entries written since the last " "invocation.\n", argv[0]); return 0; } } // get start, count, maxToCheck int32* params[3] = { &start, &count, &maxToCheck }; for (int i = 0; i < 3 && !hasFilter && argi < argc; i++) { if (strcmp(argv[argi], "filter") == 0) { hasFilter = true; argi++; } else if (argv[argi][0] == '#') { hasFilter = true; } else { *params[i] = parse_expression(argv[argi]); argi++; } } // filter specification if (argi < argc) { hasFilter = true; if (strcmp(argv[argi], "filter") == 0) argi++; if (!TraceFilterParser::Default()->Parse(argc - argi, argv + argi)) { print_debugger_command_usage(argv[0]); return 0; } } int32 direction; int32 firstToCheck; int32 lastToCheck; if (cont != 0) { // get values from the previous iteration direction = cont; count = _previousCount; maxToCheck = _previousMaxToCheck; hasFilter = _previousHasFilter; outputFlags = _previousOutputFlags; printStackTrace = _previousPrintStackTrace; if (direction < 0) start = _previousFirstChecked - 1; else start = _previousLastChecked + 1; } else { // defaults for count and maxToCheck if (count == 0) count = 30; if (maxToCheck == 0 || !hasFilter) maxToCheck = count; else if (maxToCheck < 0) maxToCheck = sTracingMetaData->Entries(); // determine iteration direction direction = (start <= 0 || count < 0 ? -1 : 1); // validate count and maxToCheck if (count < 0) count = -count; if (maxToCheck < 0) maxToCheck = -maxToCheck; if (maxToCheck > (int32)sTracingMetaData->Entries()) maxToCheck = sTracingMetaData->Entries(); if (count > maxToCheck) count = maxToCheck; // validate start if (start <= 0 || start > (int32)sTracingMetaData->Entries()) start = max_c(1, sTracingMetaData->Entries()); } if (direction < 0) { firstToCheck = max_c(1, start - maxToCheck + 1); lastToCheck = start; } else { firstToCheck = start; lastToCheck = min_c((int32)sTracingMetaData->Entries(), start + maxToCheck - 1); } // reset the iterator, if something changed in the meantime if (entriesEver == 0 || entriesEver != _previousEntriesEver || sTracingMetaData->Entries() != _previousEntries) { iterator.Reset(); } LazyTraceOutput out(sTracingMetaData->TraceOutputBuffer(), kTraceOutputBufferSize, outputFlags); bool markedMatching = false; int32 firstToDump = firstToCheck; int32 lastToDump = lastToCheck; TraceFilter* filter = NULL; if (hasFilter) filter = TraceFilterParser::Default()->Filter(); if (wrapperFilter != NULL) { wrapperFilter->Init(filter, direction, cont != 0); filter = wrapperFilter; } if (direction < 0 && filter && lastToCheck - firstToCheck >= count) { // iteration direction is backwards markedMatching = true; // From the last entry to check iterate backwards to check filter // matches. int32 matching = 0; // move to the entry after the last entry to check iterator.MoveTo(lastToCheck + 1); // iterate backwards firstToDump = -1; lastToDump = -1; while (iterator.Index() > firstToCheck) { TraceEntry* entry = iterator.Previous(); if ((entry->Flags() & ENTRY_INITIALIZED) != 0) { out.Clear(); if (filter->Filter(entry, out)) { entry->ToTraceEntry()->flags |= FILTER_MATCH; if (lastToDump == -1) lastToDump = iterator.Index(); firstToDump = iterator.Index(); matching++; if (matching >= count) break; } else entry->ToTraceEntry()->flags &= ~FILTER_MATCH; } } firstToCheck = iterator.Index(); // iterate to the previous entry, so that the next loop starts at the // right one iterator.Previous(); } out.SetLastEntryTime(0); // set the iterator to the entry before the first one to dump iterator.MoveTo(firstToDump - 1); // dump the entries matching the filter in the range // [firstToDump, lastToDump] int32 dumped = 0; while (TraceEntry* entry = iterator.Next()) { int32 index = iterator.Index(); if (index < firstToDump) continue; if (index > lastToDump || dumped >= count) { if (direction > 0) lastToCheck = index - 1; break; } if ((entry->Flags() & ENTRY_INITIALIZED) != 0) { out.Clear(); if (filter && (markedMatching ? (entry->Flags() & FILTER_MATCH) == 0 : !filter->Filter(entry, out))) { continue; } // don't print trailing new line const char* dump = out.DumpEntry(entry); int len = strlen(dump); if (len > 0 && dump[len - 1] == '\n') len--; kprintf("%5ld. %.*s\n", index, len, dump); if (printStackTrace) { out.Clear(); entry->DumpStackTrace(out); if (out.Size() > 0) kputs(out.Buffer()); } } else if (!filter) kprintf("%5ld. ** uninitialized entry **\n", index); dumped++; } kprintf("printed %ld entries within range %ld to %ld (%ld of %ld total, " "%ld ever)\n", dumped, firstToCheck, lastToCheck, lastToCheck - firstToCheck + 1, sTracingMetaData->Entries(), entriesEver); // store iteration state _previousCount = count; _previousMaxToCheck = maxToCheck; _previousHasFilter = hasFilter; _previousPrintStackTrace = printStackTrace; _previousFirstChecked = firstToCheck; _previousLastChecked = lastToCheck; _previousDirection = direction; _previousEntriesEver = entriesEver; _previousEntries = sTracingMetaData->Entries(); _previousOutputFlags = outputFlags; return cont != 0 ? B_KDEBUG_CONT : 0; }
/*! Reads the requested amount of data into the cache, and allocates pages needed to fulfill that request. This function is called by cache_io(). It can only handle a certain amount of bytes, and the caller must make sure that it matches that criterion. The cache_ref lock must be held when calling this function; during operation it will unlock the cache, though. */ static status_t read_into_cache(file_cache_ref* ref, void* cookie, off_t offset, int32 pageOffset, addr_t buffer, size_t bufferSize, bool useBuffer, vm_page_reservation* reservation, size_t reservePages) { TRACE(("read_into_cache(offset = %Ld, pageOffset = %ld, buffer = %#lx, " "bufferSize = %lu\n", offset, pageOffset, buffer, bufferSize)); VMCache* cache = ref->cache; // TODO: We're using way too much stack! Rather allocate a sufficiently // large chunk on the heap. generic_io_vec vecs[MAX_IO_VECS]; uint32 vecCount = 0; generic_size_t numBytes = PAGE_ALIGN(pageOffset + bufferSize); vm_page* pages[MAX_IO_VECS]; int32 pageIndex = 0; // allocate pages for the cache and mark them busy for (generic_size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) { vm_page* page = pages[pageIndex++] = vm_page_allocate_page( reservation, PAGE_STATE_CACHED | VM_PAGE_ALLOC_BUSY); cache->InsertPage(page, offset + pos); add_to_iovec(vecs, vecCount, MAX_IO_VECS, page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE); // TODO: check if the array is large enough (currently panics)! } push_access(ref, offset, bufferSize, false); cache->Unlock(); vm_page_unreserve_pages(reservation); // read file into reserved pages status_t status = read_pages_and_clear_partial(ref, cookie, offset, vecs, vecCount, B_PHYSICAL_IO_REQUEST, &numBytes); if (status != B_OK) { // reading failed, free allocated pages dprintf("file_cache: read pages failed: %s\n", strerror(status)); cache->Lock(); for (int32 i = 0; i < pageIndex; i++) { cache->NotifyPageEvents(pages[i], PAGE_EVENT_NOT_BUSY); cache->RemovePage(pages[i]); vm_page_set_state(pages[i], PAGE_STATE_FREE); } return status; } // copy the pages if needed and unmap them again for (int32 i = 0; i < pageIndex; i++) { if (useBuffer && bufferSize != 0) { size_t bytes = min_c(bufferSize, (size_t)B_PAGE_SIZE - pageOffset); vm_memcpy_from_physical((void*)buffer, pages[i]->physical_page_number * B_PAGE_SIZE + pageOffset, bytes, IS_USER_ADDRESS(buffer)); buffer += bytes; bufferSize -= bytes; pageOffset = 0; } } reserve_pages(ref, reservation, reservePages, false); cache->Lock(); // make the pages accessible in the cache for (int32 i = pageIndex; i-- > 0;) { DEBUG_PAGE_ACCESS_END(pages[i]); cache->MarkPageUnbusy(pages[i]); } return B_OK; }