DWORD CWave::GetData(BYTE*& pWaveData, DWORD dwMaxLen) const { // Check validity if (!IsValid()) return (DWORD)0; // Setup and open the MMINFO structure CMMMemoryIOInfo mmioInfo((HPSTR)m_pImageData, m_dwImageLen); CMMIO mmio(mmioInfo); // Find the WAVE chunk CMMTypeChunk mmckParent('W','A','V','E'); mmio.Descend(mmckParent, MMIO_FINDRIFF); // Find and get the size of the data subchunk CMMIdChunk mmckSubchunk('d','a','t','a'); mmio.Descend(mmckSubchunk, mmckParent, MMIO_FINDCHUNK); DWORD dwLenToCopy = mmckSubchunk.cksize; // Allocate memory if the passed in pWaveData was NULL if (pWaveData == NULL) pWaveData = (BYTE*)GlobalLock(GlobalAlloc(GMEM_MOVEABLE, dwLenToCopy)); else // If we didn't allocate our own memory, honor dwMaxLen if (dwMaxLen < dwLenToCopy) dwLenToCopy = dwMaxLen; if (pWaveData) // Read waveform data into the buffer mmio.Read((HPSTR)pWaveData, dwLenToCopy); return dwLenToCopy; }
DWORD CWave::GetDataLen() const { // Check validity if (!IsValid()) return (DWORD)0; // Setup and open the MMINFO structure CMMMemoryIOInfo mmioInfo((HPSTR)m_pImageData, m_dwImageLen); CMMIO mmio(mmioInfo); // Find the WAVE chunk CMMTypeChunk mmckParent('W','A','V','E'); mmio.Descend(mmckParent, MMIO_FINDRIFF); // Find and get the size of the data subchunk CMMIdChunk mmckSubchunk('d','a','t','a'); mmio.Descend(mmckSubchunk, mmckParent, MMIO_FINDCHUNK); return mmckSubchunk.cksize; }
BOOL CWave::GetFormat(WAVEFORMATEX& wfFormat) const { // Check validity if (!IsValid()) return FALSE; // Setup and open the MMINFO structure CMMMemoryIOInfo mmioInfo((HPSTR)m_pImageData, m_dwImageLen); CMMIO mmio(mmioInfo); // Find the WAVE chunk CMMTypeChunk mmckParent('W','A','V','E'); mmio.Descend(mmckParent, MMIO_FINDRIFF); // Find and read the format subchunk CMMIdChunk mmckSubchunk('f','m','t',' '); mmio.Descend(mmckSubchunk, mmckParent, MMIO_FINDCHUNK); mmio.Read((HPSTR)&wfFormat, sizeof(WAVEFORMATEX)); mmio.Ascend(mmckSubchunk); return TRUE; }
/*ARGSUSED3*/ static int mmrw(dev_t dev, struct uio *uio, enum uio_rw rw, cred_t *cred) { pfn_t v; struct iovec *iov; int error = 0; size_t c; ssize_t oresid = uio->uio_resid; minor_t minor = getminor(dev); while (uio->uio_resid > 0 && error == 0) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("mmrw"); continue; } switch (minor) { case M_MEM: memlist_read_lock(); if (!address_in_memlist(phys_install, (uint64_t)uio->uio_loffset, 1)) { memlist_read_unlock(); error = EFAULT; break; } memlist_read_unlock(); v = BTOP((u_offset_t)uio->uio_loffset); error = mmio(uio, rw, v, uio->uio_loffset & PAGEOFFSET, 0, NULL); break; case M_KMEM: case M_ALLKMEM: { page_t **ppp = NULL; caddr_t vaddr = (caddr_t)uio->uio_offset; int try_lock = NEED_LOCK_KVADDR(vaddr); int locked = 0; if ((error = plat_mem_do_mmio(uio, rw)) != ENOTSUP) break; /* * If vaddr does not map a valid page, as_pagelock() * will return failure. Hence we can't check the * return value and return EFAULT here as we'd like. * seg_kp and seg_kpm do not properly support * as_pagelock() for this context so we avoid it * using the try_lock set check above. Some day when * the kernel page locking gets redesigned all this * muck can be cleaned up. */ if (try_lock) locked = (as_pagelock(&kas, &ppp, vaddr, PAGESIZE, S_WRITE) == 0); v = hat_getpfnum(kas.a_hat, (caddr_t)(uintptr_t)uio->uio_loffset); if (v == PFN_INVALID) { if (locked) as_pageunlock(&kas, ppp, vaddr, PAGESIZE, S_WRITE); error = EFAULT; break; } error = mmio(uio, rw, v, uio->uio_loffset & PAGEOFFSET, minor == M_ALLKMEM || mm_kmem_io_access, (locked && ppp) ? *ppp : NULL); if (locked) as_pageunlock(&kas, ppp, vaddr, PAGESIZE, S_WRITE); } break; case M_ZERO: if (rw == UIO_READ) { label_t ljb; if (on_fault(&ljb)) { no_fault(); error = EFAULT; break; } uzero(iov->iov_base, iov->iov_len); no_fault(); uio->uio_resid -= iov->iov_len; uio->uio_loffset += iov->iov_len; break; } /* else it's a write, fall through to NULL case */ /*FALLTHROUGH*/ case M_NULL: if (rw == UIO_READ) return (0); c = iov->iov_len; iov->iov_base += c; iov->iov_len -= c; uio->uio_loffset += c; uio->uio_resid -= c; break; } } return (uio->uio_resid == oresid ? error : 0); }