/* * Initialize our memory allocator. * * Arguments: * eup Pointer to per unit structure * * Returns: * size Physical RAM size * -1 failed to initialize memory * */ int eni_init_memory(Eni_unit *eup) { /* * Have we (somehow) been called before? */ if ( eup->eu_memmap != NULL ) { /* Oops - it's already been initialized */ return -1; } /* * Allocate initial element which will hold all of memory */ eup->eu_memmap = (Mbd *)KM_ALLOC(sizeof(Mbd), M_DEVBUF, M_WAITOK); /* * Test and size memory */ eup->eu_ramsize = eni_test_memory ( eup ); /* * Initialize a one element list which contains * all buffer memory */ eup->eu_memmap->prev = eup->eu_memmap->next = NULL; eup->eu_memmap->base = (caddr_t)SEGBUF_BASE; eup->eu_memmap->size = eup->eu_ramsize - SEGBUF_BASE; eup->eu_memmap->state = MEM_FREE; return ( eup->eu_ramsize ); }
/* * Allocate a buffer from adapter RAM. Due to constraints on the card, * we may roundup the size request to the next largest chunksize. Note * also that we must pay attention to address alignment within adapter * memory as well. * * Arguments: * eup pointer to per unit structure * size pointer to requested size - in bytes * * Returns: * addr address relative to adapter of allocated memory * size modified to reflect actual size of buffer * */ caddr_t eni_allocate_buffer(Eni_unit *eup, u_long *size) { int nsize; int nclicks; Mbd *eptr = eup->eu_memmap; /* * Initial size requested */ nsize = *size; /* * Find the buffer size which will hold this request. There * are 8 possible sizes, each a power of two up, starting at * 256 words or 1024 bytes. */ for ( nclicks = 0; nclicks < ENI_BUF_NBIT; nclicks++ ) if ( ( 1 << nclicks ) * ENI_BUF_PGSZ >= nsize ) break; /* * Request was for larger then the card supports */ if ( nclicks >= ENI_BUF_NBIT ) { eup->eu_stats.eni_st_drv.drv_mm_toobig++; /* Indicate 0 bytes allocated */ *size = 0; /* Return NULL buffer */ return ( (caddr_t)NULL ); } /* * New size will be buffer size */ nsize = ( 1 << nclicks ) * ENI_BUF_PGSZ; /* * Look through memory for a segment large enough to * hold request */ while ( eptr ) { /* * State must be FREE and size must hold request */ if ( eptr->state == MEM_FREE && eptr->size >= nsize ) { /* * Request will fit - now check if the * alignment needs fixing */ if ( ((u_int)eptr->base & (nsize-1)) != 0 ) { caddr_t nbase; /* * Calculate where the buffer would have to * fall to be aligned. */ nbase = (caddr_t)((u_int)( eptr->base + nsize ) & ~(nsize-1)); /* * If we use this alignment, will it still fit? */ if ( (eptr->size - (nbase - eptr->base)) >= 0 ) { Mbd *etmp; /* Yep - create a new segment */ etmp = (Mbd *)KM_ALLOC(sizeof(Mbd), M_DEVBUF, M_WAITOK); /* Place it in the list */ etmp->next = eptr->next; if ( etmp->next ) etmp->next->prev = etmp; etmp->prev = eptr; eptr->next = etmp; /* Fill in new base and size */ etmp->base = nbase; etmp->size = eptr->size - ( nbase - eptr->base ); /* Adjust old size */ eptr->size -= etmp->size; /* Mark its state */ etmp->state = MEM_FREE; eptr = etmp; /* Done - outa here */ break; } } else break; /* Alignment is okay - we're done */ } /* Haven't found anything yet - keep looking */ eptr = eptr->next; } if ( eptr != NULL ) { /* Found a usable segment - grab what we need */ /* Exact fit? */ if ( eptr->size == nsize ) /* Mark it as INUSE */ eptr->state = MEM_INUSE; else { Mbd *etmp; /* larger then we need - split it */ etmp = (Mbd *)KM_ALLOC(sizeof(Mbd), M_DEVBUF, M_WAITOK); /* Place new element in list */ etmp->next = eptr->next; if ( etmp->next ) etmp->next->prev = etmp; etmp->prev = eptr; eptr->next = etmp; /* Set new base, size and state */ etmp->base = eptr->base + nsize; etmp->size = eptr->size - nsize; etmp->state = MEM_FREE; /* Adjust size and state of element we intend to use */ eptr->size = nsize; eptr->state = MEM_INUSE; } } /* After all that, did we find a usable buffer? */ if ( eptr ) { /* Record another inuse buffer of this size */ if ( eptr->base ) eup->eu_memclicks[nclicks]++; /* * Return true size of allocated buffer */ *size = eptr->size; /* * Make address relative to start of RAM since * its (the address) for use by the adapter, not * the host. */ return ((caddr_t)eptr->base); } else { eup->eu_stats.eni_st_drv.drv_mm_nobuf++; /* No buffer to return - indicate zero length */ *size = 0; /* Return NULL buffer */ return ( (caddr_t)NULL ); } }
/* * Allocate a Control Block * * Gets a new control block allocated from the specified storage pool, * acquiring memory for new pool chunks if required. The returned control * block's contents will be cleared. * * Arguments: * sip pointer to sp_info for storage pool * * Returns: * addr pointer to allocated control block * 0 allocation failed * */ void * atm_allocate(struct sp_info *sip) { void *bp; struct sp_chunk *scp; struct sp_link *slp; crit_enter(); /* * Count calls */ sip->si_allocs++; /* * Are there any free in the pool? */ if (sip->si_free) { /* * Find first chunk with a free block */ for (scp = sip->si_poolh; scp; scp = scp->sc_next) { if (scp->sc_freeh != NULL) break; } } else { /* * No free blocks - have to allocate a new * chunk (but put a limit to this) */ struct sp_link *slp_next; int i; /* * First time for this pool?? */ if (sip->si_chunksiz == 0) { size_t n; /* * Initialize pool information */ n = sizeof(struct sp_chunk) + sip->si_blkcnt * (sip->si_blksiz + sizeof(struct sp_link)); sip->si_chunksiz = roundup(n, SPOOL_ROUNDUP); /* * Place pool on kernel chain */ LINK2TAIL(sip, struct sp_info, atm_pool_head, si_next); } if (sip->si_chunks >= sip->si_maxallow) { sip->si_fails++; crit_exit(); return (NULL); } scp = KM_ALLOC(sip->si_chunksiz, M_DEVBUF, M_INTWAIT | M_NULLOK); if (scp == NULL) { sip->si_fails++; crit_exit(); return (NULL); } scp->sc_next = NULL; scp->sc_info = sip; scp->sc_magic = SPOOL_MAGIC; scp->sc_used = 0; /* * Divy up chunk into free blocks */ slp = (struct sp_link *)(scp + 1); scp->sc_freeh = slp; for (i = sip->si_blkcnt; i > 1; i--) { slp_next = (struct sp_link *)((caddr_t)(slp + 1) + sip->si_blksiz); slp->sl_u.slu_next = slp_next; slp = slp_next; } slp->sl_u.slu_next = NULL; scp->sc_freet = slp; /* * Add new chunk to end of pool */ if (sip->si_poolh) sip->si_poolt->sc_next = scp; else sip->si_poolh = scp; sip->si_poolt = scp; sip->si_chunks++; sip->si_total += sip->si_blkcnt; sip->si_free += sip->si_blkcnt; if (sip->si_chunks > sip->si_maxused) sip->si_maxused = sip->si_chunks; } /* * Allocate the first free block in chunk */ slp = scp->sc_freeh; scp->sc_freeh = slp->sl_u.slu_next; scp->sc_used++; sip->si_free--; bp = (slp + 1); /* * Save link back to pool chunk */ slp->sl_u.slu_chunk = scp; /* * Clear out block */ KM_ZERO(bp, sip->si_blksiz); crit_exit(); return (bp); }