void SharedUiItemWidgetMapper::addReadOnlyMapping( QWidget *widget, int section, QVariant valueWhenNull) { removeMapping(section); removeMapping(widget); _sectionToWidget.insert(section, widget); _widgetToSection.insert(widget, section); _sectionToDefaultValue.insert(section, valueWhenNull); populate(section); }
void xf86UnMapVidMem(int ScreenNum, pointer Base, unsigned long Size) { VidMapPtr vp; MappingPtr mp; if (!vidMemInfo.initialised || !vidMemInfo.unmapMem) { xf86DrvMsg(ScreenNum, X_WARNING, "xf86UnMapVidMem() called before xf86MapVidMem()\n"); return; } vp = getVidMapRec(ScreenNum); mp = findMapping(vp, Base, Size); if (!mp) { xf86DrvMsg(ScreenNum, X_WARNING, "xf86UnMapVidMem: cannot find region for [%p,0x%lx]\n", Base, Size); return; } if (vp->mtrrEnabled && vidMemInfo.undoWC && mp) vidMemInfo.undoWC(ScreenNum, mp->mtrrInfo); vidMemInfo.unmapMem(ScreenNum, Base, Size); removeMapping(vp, mp); }
int VHostMap::addMaping(HttpVHost *pVHost, const char *pDomain, int optional) { HttpVHost *pOld = exactMatchVHost(pDomain); if (pOld) { if (pOld == pVHost) return 0; if (strcmp(pOld->getName(), pVHost->getName()) == 0) removeMapping(pDomain); else { if (!optional) { LS_ERROR("Hostname [%s] on listener [%s] is mapped to virtual host [%s], " "can't map to virtual host [%s]!", pDomain, m_sAddr.c_str(), pOld->getName(), pVHost->getName()); return LS_FAIL; } return 0; } } const AutoStr2 *psDomain = pVHost->addMatchName(pDomain); if ((!psDomain) || (addMap(psDomain->c_str(), pVHost))) { LS_ERROR("Associates [%s] with [%s] on hostname/IP [%s] %s!", pVHost->getName(), m_sAddr.c_str(), pDomain, "failed"); return LS_FAIL; } else { LS_DBG_L("Associates [%s] with [%s] on hostname/IP [%s] %s!", pVHost->getName(), m_sAddr.c_str(), pDomain, "succeed"); return 0; } }
bool IOBufferMemoryDescriptor::initWithPhysicalMask( task_t inTask, IOOptionBits options, mach_vm_size_t capacity, mach_vm_address_t alignment, mach_vm_address_t physicalMask) { task_t mapTask = NULL; vm_map_t vmmap = NULL; mach_vm_address_t highestMask = 0; IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference; IODMAMapSpecification mapSpec; bool mapped = false; bool needZero; if (!capacity) return false; _options = options; _capacity = capacity; _internalFlags = 0; _internalReserved = 0; _buffer = 0; _ranges.v64 = IONew(IOAddressRange, 1); if (!_ranges.v64) return (false); _ranges.v64->address = 0; _ranges.v64->length = 0; // make sure super::free doesn't dealloc _ranges before super::init _flags = kIOMemoryAsReference; // Grab IOMD bits from the Buffer MD options iomdOptions |= (options & kIOBufferDescriptorMemoryFlags); if (!(kIOMemoryMapperNone & options)) { IOMapper::checkForSystemMapper(); mapped = (0 != IOMapper::gSystem); } needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options))); if (physicalMask && (alignment <= 1)) { alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1)); highestMask = (physicalMask | alignment); alignment++; if (alignment < page_size) alignment = page_size; } if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) alignment = page_size; if (alignment >= page_size) capacity = round_page(capacity); if (alignment > page_size) options |= kIOMemoryPhysicallyContiguous; _alignment = alignment; if ((capacity + alignment) < _capacity) return (false); if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) return false; bzero(&mapSpec, sizeof(mapSpec)); mapSpec.alignment = _alignment; mapSpec.numAddressBits = 64; if (highestMask && mapped) { if (highestMask <= 0xFFFFFFFF) mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask)); else mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32))); highestMask = 0; } // set memory entry cache mode, pageable, purgeable iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift; if (options & kIOMemoryPageable) { iomdOptions |= kIOMemoryBufferPageable; if (options & kIOMemoryPurgeable) iomdOptions |= kIOMemoryBufferPurgeable; } else { vmmap = kernel_map; // Buffer shouldn't auto prepare they should be prepared explicitly // But it never was enforced so what are you going to do? iomdOptions |= kIOMemoryAutoPrepare; /* Allocate a wired-down buffer inside kernel space. */ bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous)); if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) { contig |= (!mapped); contig |= (0 != (kIOMemoryMapperNone & options)); #if 0 // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now contig |= true; #endif } if (contig || highestMask || (alignment > page_size)) { _internalFlags |= kInternalFlagPhysical; if (highestMask) { _internalFlags |= kInternalFlagPageSized; capacity = round_page(capacity); } _buffer = (void *) IOKernelAllocateWithPhysicalRestrict( capacity, highestMask, alignment, contig); } else if (needZero && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes))) { _internalFlags |= kInternalFlagPageAllocated; needZero = false; _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment); if (_buffer) { IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity); #if IOALLOCDEBUG OSAddAtomic(capacity, &debug_iomalloc_size); #endif } } else if (alignment > 1) { _buffer = IOMallocAligned(capacity, alignment); } else { _buffer = IOMalloc(capacity); } if (!_buffer) { return false; } if (needZero) bzero(_buffer, capacity); } if( (options & (kIOMemoryPageable | kIOMapCacheMask))) { vm_size_t size = round_page(capacity); // initWithOptions will create memory entry iomdOptions |= kIOMemoryPersistent; if( options & kIOMemoryPageable) { #if IOALLOCDEBUG OSAddAtomicLong(size, &debug_iomallocpageable_size); #endif mapTask = inTask; if (NULL == inTask) inTask = kernel_task; } else if (options & kIOMapCacheMask) { // Prefetch each page to put entries into the pmap volatile UInt8 * startAddr = (UInt8 *)_buffer; volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity; while (startAddr < endAddr) { UInt8 dummyVar = *startAddr; (void) dummyVar; startAddr += page_size; } } } _ranges.v64->address = (mach_vm_address_t) _buffer;; _ranges.v64->length = _capacity; if (!super::initWithOptions(_ranges.v64, 1, 0, inTask, iomdOptions, /* System mapper */ 0)) return false; // give any system mapper the allocation params if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, &mapSpec, sizeof(mapSpec))) return false; if (mapTask) { if (!reserved) { reserved = IONew( ExpansionData, 1 ); if( !reserved) return( false ); } reserved->map = createMappingInTask(mapTask, 0, kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0); if (!reserved->map) { _buffer = 0; return( false ); } release(); // map took a retain on this reserved->map->retain(); removeMapping(reserved->map); mach_vm_address_t buffer = reserved->map->getAddress(); _buffer = (void *) buffer; if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) _ranges.v64->address = buffer; } setLength(_capacity); return true; }
bool IOBufferMemoryDescriptor::initWithPhysicalMask( task_t inTask, IOOptionBits options, mach_vm_size_t capacity, mach_vm_address_t alignment, mach_vm_address_t physicalMask) { kern_return_t kr; task_t mapTask = NULL; vm_map_t vmmap = NULL; mach_vm_address_t highestMask = 0; IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference; IODMAMapSpecification mapSpec; bool mapped = false; bool needZero; if (!capacity) return false; _options = options; _capacity = capacity; _internalFlags = 0; _internalReserved = 0; _buffer = 0; _ranges.v64 = IONew(IOAddressRange, 1); if (!_ranges.v64) return (false); _ranges.v64->address = 0; _ranges.v64->length = 0; // make sure super::free doesn't dealloc _ranges before super::init _flags = kIOMemoryAsReference; // Grab IOMD bits from the Buffer MD options iomdOptions |= (options & kIOBufferDescriptorMemoryFlags); if (!(kIOMemoryMapperNone & options)) { IOMapper::checkForSystemMapper(); mapped = (0 != IOMapper::gSystem); } needZero = mapped; if (physicalMask && (alignment <= 1)) { alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1)); highestMask = (physicalMask | alignment); alignment++; if (alignment < page_size) alignment = page_size; } if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) alignment = page_size; if (alignment >= page_size) capacity = round_page(capacity); if (alignment > page_size) options |= kIOMemoryPhysicallyContiguous; _alignment = alignment; if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) return false; bzero(&mapSpec, sizeof(mapSpec)); mapSpec.alignment = _alignment; mapSpec.numAddressBits = 64; if (highestMask && mapped) { if (highestMask <= 0xFFFFFFFF) mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask)); else mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32))); highestMask = 0; } // set flags for entry + object create vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE; // set memory entry cache mode switch (options & kIOMapCacheMask) { case kIOMapInhibitCache: SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); break; case kIOMapWriteThruCache: SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); break; case kIOMapWriteCombineCache: SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); break; case kIOMapCopybackCache: SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); break; case kIOMapCopybackInnerCache: SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode); break; case kIOMapDefaultCache: default: SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); break; } if (options & kIOMemoryPageable) { iomdOptions |= kIOMemoryBufferPageable; // must create the entry before any pages are allocated // set flags for entry + object create memEntryCacheMode |= MAP_MEM_NAMED_CREATE; if (options & kIOMemoryPurgeable) memEntryCacheMode |= MAP_MEM_PURGABLE; } else { memEntryCacheMode |= MAP_MEM_NAMED_REUSE; vmmap = kernel_map; // Buffer shouldn't auto prepare they should be prepared explicitly // But it never was enforced so what are you going to do? iomdOptions |= kIOMemoryAutoPrepare; /* Allocate a wired-down buffer inside kernel space. */ bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous)); if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) { contig |= (!mapped); contig |= (0 != (kIOMemoryMapperNone & options)); #if 0 // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now contig |= true; #endif } if (contig || highestMask || (alignment > page_size)) { _internalFlags |= kInternalFlagPhysical; if (highestMask) { _internalFlags |= kInternalFlagPageSized; capacity = round_page(capacity); } _buffer = (void *) IOKernelAllocateWithPhysicalRestrict( capacity, highestMask, alignment, contig); } else if (needZero && ((capacity + alignment) <= (page_size - kIOPageAllocChunkBytes))) { _internalFlags |= kInternalFlagPageAllocated; needZero = false; _buffer = (void *) iopa_alloc(capacity, alignment); } else if (alignment > 1) { _buffer = IOMallocAligned(capacity, alignment); } else { _buffer = IOMalloc(capacity); } if (!_buffer) { return false; } if (needZero) bzero(_buffer, capacity); } if( (options & (kIOMemoryPageable | kIOMapCacheMask))) { ipc_port_t sharedMem; vm_size_t size = round_page(capacity); kr = mach_make_memory_entry(vmmap, &size, (vm_offset_t)_buffer, memEntryCacheMode, &sharedMem, NULL ); if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) { ipc_port_release_send( sharedMem ); kr = kIOReturnVMError; } if( KERN_SUCCESS != kr) return( false ); _memEntry = (void *) sharedMem; if( options & kIOMemoryPageable) { #if IOALLOCDEBUG debug_iomallocpageable_size += size; #endif mapTask = inTask; if (NULL == inTask) inTask = kernel_task; } else if (options & kIOMapCacheMask) { // Prefetch each page to put entries into the pmap volatile UInt8 * startAddr = (UInt8 *)_buffer; volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity; while (startAddr < endAddr) { *startAddr; startAddr += page_size; } } } _ranges.v64->address = (mach_vm_address_t) _buffer;; _ranges.v64->length = _capacity; if (!super::initWithOptions(_ranges.v64, 1, 0, inTask, iomdOptions, /* System mapper */ 0)) return false; // give any system mapper the allocation params if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, &mapSpec, sizeof(mapSpec))) return false; if (mapTask) { if (!reserved) { reserved = IONew( ExpansionData, 1 ); if( !reserved) return( false ); } reserved->map = createMappingInTask(mapTask, 0, kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0); if (!reserved->map) { _buffer = 0; return( false ); } release(); // map took a retain on this reserved->map->retain(); removeMapping(reserved->map); mach_vm_address_t buffer = reserved->map->getAddress(); _buffer = (void *) buffer; if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) _ranges.v64->address = buffer; } setLength(_capacity); return true; }
///////////////////////////////////////////////////////////////////////////////////////////////////////////// // 析构函数 ///////////////////////////////////////////////////////////////////////////////////////////////////////////// CEccAddMonitor1st::~CEccAddMonitor1st() { // 移除事件绑定 removeMapping(); }
///////////////////////////////////////////////////////////////////////////////////////////////////////////// // 枚举此设备可以使用的所有监测器 ///////////////////////////////////////////////////////////////////////////////////////////////////////////// void CEccAddMonitor1st::EnumMT(const string &szParentID, const string &szDTName, const string &szNetworkSet) { m_szParentID = szParentID; m_szNetworkset = szNetworkSet; if(szDTName.empty() || m_szDTName == szDTName) return ; m_szDTName = szDTName; // 移除已有的消息绑定 removeMapping(); if(m_pContent) { m_pContent->clear(); list<int> lsMTID; CEccMainView::m_pTreeView->GetDevMTList(szDTName, lsMTID); list<int>::iterator lstItem; // 枚举每个监测器模板 for(lstItem = lsMTID.begin(); lstItem != lsMTID.end(); lstItem++) { int nMTID = (*lstItem); // 打开监测器模板 OBJECT objMonitor = GetMonitorTemplet(nMTID, CEccMainView::m_szIDCUser, CEccMainView::m_szAddr); if(objMonitor != INVALID_VALUE) {// 成功 // 主节点 MAPNODE node = GetMTMainAttribNode(objMonitor); if(node != INVALID_VALUE) { // 名称 显示 是否隐藏 描述 string szLabel(""), szHidden (""), szDesc (""); if(FindNodeValue(node, svLabel, szLabel)) szLabel = SVResString::getResString(szLabel.c_str()); if(FindNodeValue(node, svDescription, szDesc)) szDesc = SVResString::getResString(szDesc.c_str()); FindNodeValue(node, svHidden, szHidden); if(szHidden != "true") { int nRow = m_pContent->numRows(); // 监测器显示文字 WText *pName = new WText(szLabel, m_pContent->elementAt(nRow, 0)); if(pName) { // 文字样式 sprintf(pName->contextmenu_, "style='color:#1E5B99;cursor:pointer;' onmouseover='" \ "this.style.textDecoration=\"underline\"' " \ "onmouseout='this.style.textDecoration=\"none\"'"); // 绑定 click connect(pName, SIGNAL(clicked()), "showbar();", &m_MTMapper, SLOT(map()), WObject::ConnectionType::JAVASCRIPTDYNAMIC); m_MTMapper.setMapping(pName, nMTID); pName->setToolTip(szLabel); m_lsText.push_back(pName); } new WText(szDesc, m_pContent->elementAt(nRow, 1)); m_pContent->GetRow(nRow)->setStyleClass("padding_top"); m_pContent->elementAt(nRow, 0)->setStyleClass("widthbold"); m_pContent->elementAt(nRow, 1)->setStyleClass("color_2"); } } // 关闭监测器模板 CloseMonitorTemplet(objMonitor); } } } }