static int get_modem_info( SerPort_s* psPort, uint32* value, bool bFromKernel ) { uint8 control, status; uint32 result; uint32 flags; control = psPort->sp_nMCR; flags = spinlock_disable( &g_sSPinLock ); status = ser_in( psPort, UART_MSR ); spinunlock_enable( &g_sSPinLock, flags ); result = ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) #ifdef TIOCM_OUT1 | ((control & UART_MCR_OUT1) ? TIOCM_OUT1 : 0) | ((control & UART_MCR_OUT2) ? TIOCM_OUT2 : 0) #endif | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) | ((status & UART_MSR_RI) ? TIOCM_RNG : 0) | ((status & UART_MSR_DSR) ? TIOCM_DSR : 0) | ((status & UART_MSR_CTS) ? TIOCM_CTS : 0); if ( bFromKernel ) { *value = result; return( 0 ); } else { return( memcpy_to_user( value, &result, sizeof(uint32) ) ); } }
static int ps2_interrupt( int nIrqNum, void* pData, SysCallRegs_s* psRegs ) { uint32 nFlg; int nData; PS2_Port_s* psPort = (PS2_Port_s*)pData; nFlg = spinlock_disable( &g_sLock ); if( ( inb( PS2_STATUS_REG ) & PS2_STS_OBF ) != PS2_STS_OBF ) { spinunlock_enable( &g_sLock, nFlg ); return( 0 ); } nData = inb( PS2_DATA_REG ); if( !psPort->bIsAux ) { if( nData == 0xfa ) { psPort->nAckReceived = 1; // Received ack } else if( nData == 0xfe ) { psPort->nAckReceived = -1; // Received noack } nData = ConvertKeyCode( nData ); } if( atomic_read( &psPort->nBytesReceived ) < PS2_BUF_SIZE ) { psPort->pBuffer[ atomic_inc_and_read( &psPort->nInPos ) % PS2_BUF_SIZE ] = nData; atomic_inc( &psPort->nBytesReceived ); wakeup_sem( psPort->hWait, false ); } spinunlock_enable( &g_sLock, nFlg ); return( 0 ); }
status_t device_resume( int nDeviceID, int nDeviceHandle, void* pData ) { PS2_Port_s* psPort = (PS2_Port_s*)pData; uint32 nFlg; uint8 nControl; /* Enable interrupt */ nFlg = spinlock_disable( &g_sLock ); ps2_read_command( PS2_CMD_RCTR, &nControl ); if( atomic_read( &psPort->nOpenCount ) > 0 ) { if( psPort->bIsAux ) { nControl &= ~PS2_CTR_AUXDIS; nControl |= PS2_CTR_AUXINT; printk( "Resume AUX port\n" ); } else { nControl &= ~PS2_CTR_KBDDIS; nControl |= PS2_CTR_KBDINT; printk( "Resume keyboard port\n" ); } } ps2_write_command( PS2_CMD_WCTR, nControl ); spinunlock_enable( &g_sLock, nFlg ); return( 0 ); }
static int set_modem_info( SerPort_s* psPort, int cmd, uint32* value, bool bFromKernel ) { int error; unsigned int arg; unsigned long flags; if ( bFromKernel ) { arg = *value; error = 0; } else { error = memcpy_from_user( &arg, value, sizeof(uint32) ); } if (error) return error; switch (cmd) { case TIOCMBIS: if (arg & TIOCM_RTS) psPort->sp_nMCR |= UART_MCR_RTS; if (arg & TIOCM_DTR) psPort->sp_nMCR |= UART_MCR_DTR; #ifdef TIOCM_OUT1 if (arg & TIOCM_OUT1) psPort->sp_nMCR |= UART_MCR_OUT1; if (arg & TIOCM_OUT2) psPort->sp_nMCR |= UART_MCR_OUT2; #endif break; case TIOCMBIC: if (arg & TIOCM_RTS) psPort->sp_nMCR &= ~UART_MCR_RTS; if (arg & TIOCM_DTR) psPort->sp_nMCR &= ~UART_MCR_DTR; #ifdef TIOCM_OUT1 if (arg & TIOCM_OUT1) psPort->sp_nMCR &= ~UART_MCR_OUT1; if (arg & TIOCM_OUT2) psPort->sp_nMCR &= ~UART_MCR_OUT2; #endif break; case TIOCMSET: psPort->sp_nMCR = ((psPort->sp_nMCR & ~(UART_MCR_RTS | #ifdef TIOCM_OUT1 UART_MCR_OUT1 |UART_MCR_OUT2 | #endif UART_MCR_DTR)) | ((arg & TIOCM_RTS) ? UART_MCR_RTS : 0) #ifdef TIOCM_OUT1 | ((arg & TIOCM_OUT1) ? UART_MCR_OUT1 : 0) | ((arg & TIOCM_OUT2) ? UART_MCR_OUT2 : 0) #endif | ((arg & TIOCM_DTR) ? UART_MCR_DTR : 0)); break; default: return -EINVAL; } flags = spinlock_disable( &g_sSPinLock ); ser_out( psPort, UART_MCR, psPort->sp_nMCR ); spinunlock_enable( &g_sSPinLock, flags ); return 0; }
int ps2_read( void* pNode, void* pCookie, off_t nPosition, void* pBuffer, size_t nSize ) { PS2_Port_s* psPort = (PS2_Port_s*)pNode; uint32 nFlg; int nError; int nBytesReceived = 0; again: nFlg = spinlock_disable( &g_sLock ); if ( atomic_read( &psPort->nBytesReceived ) == 0 ) { nError = spinunlock_and_suspend( psPort->hWait, &g_sLock, nFlg, INFINITE_TIMEOUT ); if ( nError < 0 ) { goto error; } goto again; } while( atomic_read( &psPort->nBytesReceived ) > 0 && nBytesReceived < nSize ) { ((char*)pBuffer)[nBytesReceived++] = psPort->pBuffer[ atomic_inc_and_read( &psPort->nOutPos ) % PS2_BUF_SIZE ]; atomic_dec( &psPort->nBytesReceived ); } spinunlock_enable( &g_sLock, nFlg ); return( nBytesReceived ); error: return( nError ); }
void kmalloc_get_statistics( uint32_t* _used_pages, uint32_t* _alloc_size ) { spinlock_disable( &kmalloc_lock ); *_used_pages = used_pages; *_alloc_size = alloc_size; spinunlock_enable( &kmalloc_lock ); }
// ===================================================================================================================== void kprintf(const char* format, ...) { va_list args; spinlock_disable(&s_console_lock); va_start(args, format); do_printf(kprintf_helper, NULL, format, args); va_end(args); spinunlock_enable(&s_console_lock); }
status_t ser_open( void* pNode, uint32 nFlags, void **pCookie ) { SerPort_s* psPort = pNode; uint nDivisor = 115200 / psPort->sp_nBaudRate; uint32 nFlg; if ( psPort->sp_bOpen == true ) { printk( "ser_open(): port is already open\n" ); return( -EBUSY ); } psPort->sp_bOpen = true; psPort->sp_nFlags = nFlags; psPort->sp_hRecvMutex = create_semaphore( "ser_recv_mutex", 1, 0 ); psPort->sp_hRecvWaitQueue = create_semaphore( "ser_recv_queue", 0, 0 ); psPort->sp_nRecvInPos = 0; psPort->sp_nRecvOutPos = 0; psPort->sp_nRecvSize = 0; psPort->sp_nMCR = 0x0f; nFlg = spinlock_disable( &g_sSPinLock ); ser_out( psPort, UART_LCR, UART_LCR_DLAB | UART_LCR_WLEN7 | UART_LCR_STOP ); // Set UART_LCR_DLAB to enable baud rate divisors ser_out( psPort, UART_DLL, nDivisor & 0xff ); // Baud rate divisor LSB ser_out( psPort, UART_DLM, nDivisor >> 8 ); // Baud rate divisor MSB ser_out( psPort, UART_LCR, UART_LCR_WLEN7 | UART_LCR_STOP ); // Clr UART_LCR_DLAB to disable baud rate divisors // Enable FIFO, IRQ when 8 bytes received ser_out( psPort, UART_FCR, /*UART_FCR_ENABLE_FIFO | UART_FCR6_R_TRIGGER_24*/ 0 ); ser_out( psPort, UART_IER, UART_IER_RDI ); // receive irq enabled ser_out( psPort, UART_MCR, psPort->sp_nMCR ); // Clear interrupt registers ser_in( psPort, UART_LSR ); // Line status (LSR) ser_in( psPort, UART_RX ); ser_in( psPort, UART_IIR ); // Check interrupt type (IIR) ser_in( psPort, UART_MSR ); // Check modem status (MSR) spinunlock_enable( &g_sSPinLock, nFlg ); return( 0 ); }
static int set_termios( SerPort_s* psPort, struct termios* psInfo ) { struct termios sTermios; int nError; nError = memcpy_from_user( &sTermios, psInfo, sizeof( sTermios ) ); if ( nError < 0 ) { return( nError ); } if ( (psPort->sp_sTermios.c_cflag & CBAUD) != (sTermios.c_cflag & CBAUD) ) { uint32 nFlg; switch( sTermios.c_cflag & CBAUD ) { case B0: psPort->sp_nBaudRate = 0; case B50: psPort->sp_nBaudRate = 50; case B75: psPort->sp_nBaudRate = 75; case B110: psPort->sp_nBaudRate = 110; case B134: psPort->sp_nBaudRate = 134; case B150: psPort->sp_nBaudRate = 150; case B200: psPort->sp_nBaudRate = 200; case B300: psPort->sp_nBaudRate = 300; case B600: psPort->sp_nBaudRate = 600; case B1200: psPort->sp_nBaudRate = 1200; case B1800: psPort->sp_nBaudRate = 1800; case B2400: psPort->sp_nBaudRate = 2400; case B4800: psPort->sp_nBaudRate = 4800; case B9600: psPort->sp_nBaudRate = 9600; case B19200: psPort->sp_nBaudRate = 19200; case B38400: psPort->sp_nBaudRate = 38400; case B57600: psPort->sp_nBaudRate = 57600; case B115200: psPort->sp_nBaudRate = 115200; // case B230400: psPort->sp_nBaudRate = 230400; // case B460800: psPort->sp_nBaudRate = 460800; default: printk( "serial: set_termios() invalid baudrate %08x\n", sTermios.c_cflag & CBAUD ); return( -EINVAL ); } nFlg = spinlock_disable( &g_sSPinLock ); if ( psPort->sp_nBaudRate > 0 ) { uint nDivisor = 115200 / psPort->sp_nBaudRate; ser_out( psPort, UART_LCR, 0x83 ); // Set bit 7 to enable baud rate divisors ser_out( psPort, UART_DLL, nDivisor & 0xff ); // Baud rate divisor LSB ser_out( psPort, UART_DLM, nDivisor >> 8 ); // Baud rate divisor MSB ser_out( psPort, UART_LCR, 0x03 ); // Clr bit 7 to disable baud rate divisors }
static int get_lsr_info( SerPort_s* psPort, uint32* value, bool bFromKernel ) { uint8 status; uint32 result; uint32 flags; flags = spinlock_disable( &g_sSPinLock ); status = ser_in( psPort, UART_LSR ); spinunlock_enable( &g_sSPinLock, flags ); result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0); if ( bFromKernel ) { *value = result; return( 0 ); } else { return( memcpy_to_user( value, &result, sizeof(uint32) ) ); } }
status_t ps2_open( void* pNode, uint32 nFlags, void **pCookie ) { uint8 nControl; uint32 nFlg; PS2_Port_s* psPort = (PS2_Port_s*)pNode; printk( "ps2_open()\n" ); if ( atomic_inc_and_read( &psPort->nOpenCount ) > 0 ) { atomic_dec( &psPort->nOpenCount ); return( -EBUSY ); } ps2_flush(); psPort->nIrqHandle = request_irq( psPort->nIrq, ps2_interrupt, NULL, 0, "ps2", psPort ); if ( psPort->nIrqHandle < 0 ) { printk( "PS2: Could not get irq %i\n", psPort->nIrq ); atomic_dec( &psPort->nOpenCount ); return( -EBUSY ); } /* Enable device */ if( psPort->bIsAux ) ps2_command( PS2_CMD_AUX_ENABLE ); nFlg = spinlock_disable( &g_sLock ); ps2_read_command( PS2_CMD_RCTR, &nControl ); if( psPort->bIsAux ) { nControl &= ~PS2_CTR_AUXDIS; nControl |= PS2_CTR_AUXINT; } else { nControl &= ~PS2_CTR_KBDDIS; nControl |= PS2_CTR_KBDINT; } ps2_write_command( PS2_CMD_WCTR, nControl ); spinunlock_enable( &g_sLock, nFlg ); atomic_set( &psPort->nBytesReceived, 0 ); atomic_set( &psPort->nInPos, 0 ); atomic_set( &psPort->nOutPos, 0 ); return( 0 ); }
int __kfree( void *__ptr ) { int dma; unsigned long flags; unsigned int order; struct page_descriptor *page, **pg; struct size_descriptor *bucket; if ( !__ptr ) goto null_kfree; #define ptr ((struct block_header *) __ptr) page = PAGE_DESC( ptr ); __ptr = ptr - 1; if ( ~PAGE_MASK & ( unsigned long )page->next ) goto bad_order; order = page->order; if ( order >= sizeof( sizes ) / sizeof( sizes[0] ) ) goto bad_order; bucket = sizes + order; dma = 0; pg = &bucket->firstfree; if ( ptr->bh_flags == MF_DMA ) { dma = 1; ptr->bh_flags = MF_USED; pg = &bucket->dmafree; } if ( ptr->bh_flags != MF_USED ) { goto bad_order; } flags = spinlock_disable( &g_sMemSpinLock ); ptr->bh_flags = MF_FREE; /* As of now this block is officially free */ atomic_sub( &g_sSysBase.ex_nKernelMemSize, ptr->bh_length ); bucket->nfrees++; bucket->nbytesmalloced -= ptr->bh_length; ptr->bh_next = page->firstfree; page->firstfree = ptr; if ( !page->nfree++ ) { // Page went from full to one free block: put it on the freelist. if ( bucket->nblocks == 1 ) goto free_page; page->next = *pg; *pg = page; } // If page is completely free, free it if ( page->nfree == bucket->nblocks ) { for ( ;; ) { struct page_descriptor *tmp = *pg; if ( !tmp ) { goto not_on_freelist; } if ( tmp == page ) { break; } pg = &tmp->next; } *pg = page->next; free_page: bucket->npages--; free_kmalloc_pages( page, bucket->gfporder, dma ); } spinunlock_enable( &g_sMemSpinLock, flags ); null_kfree: return ( 0 ); bad_order: printk( "kfree of non-kmalloced memory: %p, next= %p, order=%d\n", ptr + 1, page->next, page->order ); return ( -EINVAL ); not_on_freelist: printk( "Ooops. page %p doesn't show on freelist.\n", page ); spinunlock_enable( &g_sMemSpinLock, flags ); return ( -EINVAL ); }
/* * Ugh, this is ugly, but we want the default case to run * straight through, which is why we have the ugly goto's */ void *kmalloc( size_t size, int priority ) { unsigned long flags; unsigned long type; int order, dma; struct block_header *p; struct page_descriptor *page, **pg; struct size_descriptor *bucket = sizes; if ( CURRENT_THREAD != NULL && CURRENT_THREAD->tr_nNumLockedCacheBlocks > 0 && ( priority & MEMF_NOBLOCK ) == 0 ) { //printk( "Error: kmalloc() attempt to alloc memory while holding %d cache blocks locked. Could may lead to deadlock\n", CURRENT_THREAD->tr_nNumLockedCacheBlocks ); //trace_stack( 0, NULL ); } /* Get order */ order = 0; { unsigned int realsize = size + sizeof( struct block_header ); // kmalloc() is inefficient for allocations >= 128K //if ( realsize > BLOCKSIZE( 12 ) ) //{ // printk( "Warning: kmalloc() of oversized block (%d bytes). Could cause fragmentation problems\n", size ); // trace_stack( 0, NULL ); //} for ( ;; ) { int ordersize = BLOCKSIZE( order ); if ( realsize <= ordersize ) break; order++; bucket++; if ( ordersize ) continue; printk( "kmalloc of too large a block (%d bytes).\n", ( int )size ); return NULL; } } dma = 0; type = MF_USED; pg = &bucket->firstfree; #ifndef __ATHEOS__ if ( priority & GFP_DMA ) { dma = 1; type = MF_DMA; pg = &bucket->dmafree; } #endif /* Sanity check... */ flags = spinlock_disable( &g_sMemSpinLock ); page = *pg; if ( !page ) goto no_bucket_page; p = page->firstfree; if ( p->bh_flags != MF_FREE ) goto not_free_on_freelist; found_it: page->firstfree = p->bh_next; page->nfree--; if ( !page->nfree ) *pg = page->next; spinunlock_enable( &g_sMemSpinLock, flags ); bucket->nmallocs++; bucket->nbytesmalloced += size; p->bh_flags = type; /* As of now this block is officially in use */ p->bh_length = size; memset( p +1, 0, size ); atomic_add( &g_sSysBase.ex_nKernelMemSize, size ); return ( p +1 ); /* Pointer arithmetic: increments past header */ no_bucket_page: /* * If we didn't find a page already allocated for this * bucket size, we need to get one.. * * This can be done with ints on: it is private to this invocation */ spinunlock_enable( &g_sMemSpinLock, flags ); { int i, sz; /* sz is the size of the blocks we're dealing with */ sz = BLOCKSIZE( order ); page = get_kmalloc_pages( priority, bucket->gfporder, dma ); if ( !page ) goto no_free_page; found_cached_page: bucket->npages++; page->order = order; /* Loop for all but last block: */ i = ( page->nfree = bucket->nblocks ) - 1; p = BH( page + 1 ); while ( i > 0 ) { i--; p->bh_flags = MF_FREE; p->bh_next = BH( ( ( long )p )+sz ); p = p->bh_next; } /* Last block: */ p->bh_flags = MF_FREE; p->bh_next = NULL; p = BH( page + 1 ); } /* * Now we're going to muck with the "global" freelist * for this size: this should be uninterruptible */ flags = spinlock_disable( &g_sMemSpinLock ); page->next = *pg; *pg = page; goto found_it; no_free_page: /* * No free pages, check the kmalloc cache of * pages to see if maybe we have something available */ if ( !dma && order < MAX_CACHE_ORDER ) { page = ( struct page_descriptor * )atomic_swap( ( int * )( kmalloc_cache + order ), ( int )page ); if ( page ) { goto found_cached_page; } } return NULL; not_free_on_freelist: spinunlock_enable( &g_sMemSpinLock, flags ); printk( "Problem: block on freelist at %08lx isn't free.\n", ( long )p ); printk( "%p\n%p\n%p\n", __builtin_return_address( 0 ), __builtin_return_address( 1 ), __builtin_return_address( 2 ) ); return NULL; }
void kfree( void* p ) { kmalloc_chunk_t* chunk; if (__unlikely(p == NULL)) { return; } #ifdef ENABLE_KMALLOC_BARRIERS kmalloc_validate_barriers(p); p = (uint8_t*)p - KMALLOC_BARRIER_SIZE; #endif /* ENABLE_KMALLOC_BARRIERS */ chunk = ( kmalloc_chunk_t* )( ( uint8_t* )p - sizeof( kmalloc_chunk_t ) ); spinlock_disable( &kmalloc_lock ); if (__unlikely(!kmalloc_chunk_validate(chunk))) { panic( "kfree(): Tried to free an invalid memory region! (%x)\n", p ); } if (__unlikely(kmalloc_chunk_is_free(chunk))) { panic( "kfree(): Tried to free a non-allocated memory region! (%x)\n", p ); } #ifdef ENABLE_KMALLOC_DEBUG kfree_debug( p ); #endif alloc_size -= chunk->size; /* Make the current chunk free. */ kmalloc_chunk_set_free(chunk, 1); /* Destroy the previous data in this memory chunk. */ memset(chunk + 1, 0xAA, chunk->size); /* Merge with the previous chunk if it is free */ if ((chunk->prev != NULL) && (kmalloc_chunk_is_free(chunk->prev))) { kmalloc_chunk_t* prev_chunk = chunk->prev; ASSERT(kmalloc_chunk_validate(chunk->prev)); chunk->magic = 0; prev_chunk->size += chunk->size; prev_chunk->size += sizeof(kmalloc_chunk_t); prev_chunk->next = chunk->next; chunk = prev_chunk; if ( chunk->next != NULL ) { chunk->next->prev = chunk; } } /* merge with the next chunk if it is free */ if ((chunk->next != NULL) && (kmalloc_chunk_is_free(chunk->next))) { kmalloc_chunk_t* next_chunk = chunk->next; ASSERT(kmalloc_chunk_validate(next_chunk)); next_chunk->magic = 0; chunk->size += next_chunk->size; chunk->size += sizeof( kmalloc_chunk_t ); chunk->next = next_chunk->next; if ( chunk->next != NULL ) { chunk->next->prev = chunk; } } /* update the biggest free chunk size in the current block */ if (chunk->size > chunk->block->biggest_free) { chunk->block->biggest_free = chunk->size; } spinunlock_enable( &kmalloc_lock ); }
void* kmalloc( uint32_t size ) { void* p; uint32_t min_size; uint32_t real_size = 0; kmalloc_block_t* block; /* Is this an invalid request? */ if (__unlikely(size == 0)) { kprintf(WARNING, "kmalloc(): Called with 0 size!\n"); return NULL; } /* Ensure the minimum allocation. */ if (size < sizeof(ptr_t)) { size = sizeof(ptr_t); } #ifdef ENABLE_KMALLOC_BARRIERS size += 2 * KMALLOC_BARRIER_SIZE; #endif /* ENABLE_KMALLOC_BARRIERS */ spinlock_disable(&kmalloc_lock); block = root; while (block != NULL) { ASSERT(kmalloc_block_validate(block)); if (block->biggest_free >= size) { goto block_found; } block = block->next; } /* create a new block */ min_size = PAGE_ALIGN(size + sizeof(kmalloc_block_t) + sizeof(kmalloc_chunk_t)); if (min_size < KMALLOC_BLOCK_SIZE) { min_size = KMALLOC_BLOCK_SIZE; } block = kmalloc_block_create(min_size / PAGE_SIZE); if (__unlikely(block == NULL)) { spinunlock_enable(&kmalloc_lock); return NULL; } /* link the new block to the list */ block->next = root; root = block; /* allocate the required memory from the new block */ block_found: p = __kmalloc_from_block(block, size, &real_size); #ifdef ENABLE_KMALLOC_DEBUG kmalloc_debug(size, p); #endif #ifdef ENABLE_KMALLOC_BARRIERS if (p != NULL) { p = kmalloc_create_barriers(p, real_size); } #endif /* ENABLE_KMALLOC_BARRIERS */ spinunlock_enable(&kmalloc_lock); if (__likely(p != NULL)) { #ifdef ENABLE_KMALLOC_BARRIERS memset(p, 0xaa, size - 2 * KMALLOC_BARRIER_SIZE); #else memset(p, 0xaa, size); #endif /* ENABLE_KMALLOC_BARRIERS */ } return p; }
uint32 get_free_pages( int nPageCount, int nFlags ) { Page_s *psPage; Page_s **ppsStart; Page_s *psPrev; uint32 nPage = 0; uint32 nEFlg; nEFlg = spinlock_disable( &g_sPageListSpinLock ); if ( NULL != g_psFirstFreePage ) { if ( 1 == nPageCount ) { psPage = g_psFirstFreePage; g_psFirstFreePage = psPage->p_psNext; psPage->p_psNext = NULL; kassertw( 0 == atomic_read( &psPage->p_nCount ) ); atomic_inc( &psPage->p_nCount ); g_nAllocatedPages++; atomic_dec( &g_sSysBase.ex_nFreePageCount ); nPage = psPage->p_nPageNum * PAGE_SIZE; } else { // static int nCalls = 0; // static int nTotLoops = 0; // nCalls++; psPrev = g_psFirstFreePage; ppsStart = &g_psFirstFreePage; for ( psPage = g_psFirstFreePage->p_psNext; NULL != psPage; psPage = psPage->p_psNext ) { // nTotLoops++; if ( ( psPage - psPrev ) > 1 ) { ppsStart = &psPrev->p_psNext; } else { if ( ( psPage - ( *ppsStart ) ) >= nPageCount - 1 ) { Page_s *psTmp; psTmp = *ppsStart; nPage = ( *ppsStart )->p_nPageNum * PAGE_SIZE; *ppsStart = psPage->p_psNext; for ( ; psTmp <= psPage; psTmp++ ) { if ( 0 != atomic_read( &psTmp->p_nCount ) ) { printk( "Page %d present on free list with count of %d\n", psTmp->p_nPageNum, atomic_read( &psTmp->p_nCount ) ); } atomic_inc( &psTmp->p_nCount ); psTmp->p_psNext = NULL; g_nAllocatedPages++; atomic_dec( &g_sSysBase.ex_nFreePageCount ); } break; } } psPrev = psPage; } } } spinunlock_enable( &g_sPageListSpinLock, nEFlg ); if ( ( 0 != nPage ) && ( nFlags & MEMF_CLEAR ) ) { memset( ( void * )nPage, 0, PAGE_SIZE * nPageCount ); } //flush_tlb_global(); return ( nPage ); }
/***************************************************************************** * NAME: * DESC: * NOTE: * SEE ALSO: ****************************************************************************/ void do_free_pages( uint32 nPage, int nCount ) { Page_s *psPage = &g_psFirstPage[nPage >> PAGE_SHIFT]; Page_s **ppsNext = NULL; uint32 nEFlg; int i; nEFlg = spinlock_disable( &g_sPageListSpinLock ); for ( i = 0; i < nCount; ++i, ++psPage, nPage += PAGE_SIZE ) { atomic_dec( &psPage->p_nCount ); kassertw( atomic_read( &psPage->p_nCount ) >= 0 ); if ( 0 == atomic_read( &psPage->p_nCount ) ) { g_nAllocatedPages--; atomic_inc( &g_sSysBase.ex_nFreePageCount ); if ( ppsNext == NULL ) { if ( g_psFirstFreePage == NULL || psPage < g_psFirstFreePage )
status_t ps2_ioctl( void* pNode, void* pCookie, uint32 nCommand, void* pArgs, bool bFromKernel ) { PS2_Port_s* psPort = (PS2_Port_s*)pNode; uint32 nFlg; if( psPort->bIsAux ) return( 0 ); switch( nCommand ) { case IOCTL_KBD_LEDRST: g_nKbdLedStatus=0; break; case IOCTL_KBD_SCRLOC: g_nKbdLedStatus ^= 0x01; break; case IOCTL_KBD_NUMLOC: g_nKbdLedStatus ^= 0x02; break; case IOCTL_KBD_CAPLOC: g_nKbdLedStatus ^= 0x04; break; default: printk( "PS2: Unknown IOCTL %x\n",(int)nCommand ); break; } /* Write command */ psPort->nAckReceived = 0; nFlg = spinlock_disable( &g_sLock ); ps2_wait_write(); outb( PS2_CMD_KBD_SETLEDS, PS2_DATA_REG ); spinunlock_enable( &g_sLock, nFlg ); int i = 0; while( psPort->nAckReceived == 0 && i < 200 ) { snooze( 1000 ); i++; } if( psPort->nAckReceived == -1 ) { printk( "Could not set LED status: Hardware reported an error for the command!\n" ); return( 0 ); } if( i == 200 ) { printk( "Could not set LED status: Timeout!\n" ); return( 0 ); } psPort->nAckReceived = 0; nFlg = spinlock_disable( &g_sLock ); ps2_wait_write(); outb( g_nKbdLedStatus, PS2_DATA_REG ); spinunlock_enable( &g_sLock, nFlg ); i = 0; while( psPort->nAckReceived == 0 && i < 200 ) { snooze( 1000 ); i++; } if( psPort->nAckReceived == -1 ) { printk( "Could not set LED status: Hardware reported an error for the data!\n" ); return( 0 ); } if( i == 200 ) { printk( "Could not set LED status: Timeout!\n" ); return( 0 ); } return( 0 ); }