/* * Size and test adapter RAM * * Walk through adapter RAM writing known patterns and reading back * for comparison. We write more than one pattern on the off chance * that we "get lucky" and read what we expected. * * Arguments: * eup pointer to device unit structure * * Returns * size memory size in bytes */ static int eni_test_memory(Eni_unit *eup) { int ram_size = 0; int i; Eni_mem mp; /* * Walk through to maximum looking for RAM */ for ( i = 0; i < MAX_ENI_MEM; i += TEST_STEP ) { mp = (Eni_mem)((int)eup->eu_ram + i); /* write pattern */ *mp = (u_long)TEST_PAT; /* read pattern, match? */ if ( *mp == (u_long)TEST_PAT ) { /* yes - write inverse pattern */ *mp = (u_long)~TEST_PAT; /* read pattern, match? */ if ( *mp == (u_long)~TEST_PAT ) { /* yes - assume another 1K available */ ram_size = i + TEST_STEP; } else break; } else break; } /* * Clear all RAM to initial value of zero. * This makes sure we don't leave anything funny in the * queues. */ KM_ZERO ( eup->eu_ram, ram_size ); /* * If we'd like to claim to have less memory, here's where * we do so. We take the minimum of what we'd like and what * we really found on the adapter. */ ram_size = MIN ( ram_size, eni_mem_max ); return ( ram_size ); }
/* * Process a SPANS VCC timeout * * Called when a previously scheduled SPANS VCCB timer expires. * Processing will based on the current VCC state. * * Called at splnet. * * Arguments: * tip pointer to vccb timer control block * * Returns: * none * */ void spans_vctimer(struct atm_time *tip) { int err; struct spans *spp; struct spans_vccb *svp; /* * Get VCCB and SPANS control block addresses */ svp = (struct spans_vccb *) ((caddr_t)tip - (int)(&((struct vccb *)0)->vc_time)); spp = (struct spans *)svp->sv_pif->pif_siginst; ATM_DEBUG3("spans_vctimer: svp=%p, sstate=%d, ustate=%d\n", svp, svp->sv_sstate, svp->sv_ustate); /* * Process timeout based on protocol state */ switch (svp->sv_sstate) { case SPANS_VC_ABORT: /* * Kill the VCCB and notify the owner */ err = spans_clear_vcc(spp, svp); break; case SPANS_VC_FREE: /* * Free VCCB storage */ svp->sv_ustate = VCCU_CLOSED; svp->sv_sstate = SPANS_VC_FREE; spans_free((struct vccb *)svp); break; case SPANS_VC_POPEN: /* * Issued open request, but didn't get response. */ if (svp->sv_retry < SV_MAX_RETRY) { /* * Retransmit the open request */ err = spans_send_open_req(spp, svp); svp->sv_retry++; SPANS_VC_TIMER((struct vccb *) svp, SV_TIMEOUT); } else { /* * Retry limit exceeded--report the open failed */ svp->sv_ustate = VCCU_CLOSED; svp->sv_sstate = SPANS_VC_FREE; svp->sv_connvc->cvc_attr.cause.tag = T_ATM_PRESENT; svp->sv_connvc->cvc_attr.cause.v.coding_standard = T_ATM_ITU_CODING; svp->sv_connvc->cvc_attr.cause.v.location = T_ATM_LOC_USER; svp->sv_connvc->cvc_attr.cause.v.cause_value = T_ATM_CAUSE_NO_USER_RESPONDING; KM_ZERO(svp->sv_connvc->cvc_attr.cause.v.diagnostics, sizeof(svp->sv_connvc->cvc_attr.cause.v.diagnostics)); atm_cm_cleared(svp->sv_connvc); } break; case SPANS_VC_CLOSE: /* * Issued close request, but didn't get response. */ if (svp->sv_retry < SV_MAX_RETRY) { /* * Retransmit the close request */ err = spans_send_close_req(spp, svp); svp->sv_retry++; SPANS_VC_TIMER((struct vccb *) svp, SV_TIMEOUT); } else { /* * Retry limit exceeded--just finish the close */ svp->sv_sstate = SPANS_VC_FREE; svp->sv_connvc->cvc_attr.cause.tag = T_ATM_PRESENT; svp->sv_connvc->cvc_attr.cause.v.coding_standard = T_ATM_ITU_CODING; svp->sv_connvc->cvc_attr.cause.v.location = T_ATM_LOC_USER; svp->sv_connvc->cvc_attr.cause.v.cause_value = T_ATM_CAUSE_NO_USER_RESPONDING; KM_ZERO(svp->sv_connvc->cvc_attr.cause.v.diagnostics, sizeof(svp->sv_connvc->cvc_attr.cause.v.diagnostics)); atm_cm_cleared(svp->sv_connvc); } break; case SPANS_VC_ACTIVE: case SPANS_VC_ACT_DOWN: /* * Shouldn't happen */ log(LOG_ERR, "spans_vctimer: unexpected state %d\n", svp->sv_sstate); break; default: log(LOG_ERR, "spans: vctimer state: svp=%p, sstate=%d\n", svp, svp->sv_sstate); } }
/* * Allocate a Control Block * * Gets a new control block allocated from the specified storage pool, * acquiring memory for new pool chunks if required. The returned control * block's contents will be cleared. * * Arguments: * sip pointer to sp_info for storage pool * * Returns: * addr pointer to allocated control block * 0 allocation failed * */ void * atm_allocate(struct sp_info *sip) { void *bp; struct sp_chunk *scp; struct sp_link *slp; crit_enter(); /* * Count calls */ sip->si_allocs++; /* * Are there any free in the pool? */ if (sip->si_free) { /* * Find first chunk with a free block */ for (scp = sip->si_poolh; scp; scp = scp->sc_next) { if (scp->sc_freeh != NULL) break; } } else { /* * No free blocks - have to allocate a new * chunk (but put a limit to this) */ struct sp_link *slp_next; int i; /* * First time for this pool?? */ if (sip->si_chunksiz == 0) { size_t n; /* * Initialize pool information */ n = sizeof(struct sp_chunk) + sip->si_blkcnt * (sip->si_blksiz + sizeof(struct sp_link)); sip->si_chunksiz = roundup(n, SPOOL_ROUNDUP); /* * Place pool on kernel chain */ LINK2TAIL(sip, struct sp_info, atm_pool_head, si_next); } if (sip->si_chunks >= sip->si_maxallow) { sip->si_fails++; crit_exit(); return (NULL); } scp = KM_ALLOC(sip->si_chunksiz, M_DEVBUF, M_INTWAIT | M_NULLOK); if (scp == NULL) { sip->si_fails++; crit_exit(); return (NULL); } scp->sc_next = NULL; scp->sc_info = sip; scp->sc_magic = SPOOL_MAGIC; scp->sc_used = 0; /* * Divy up chunk into free blocks */ slp = (struct sp_link *)(scp + 1); scp->sc_freeh = slp; for (i = sip->si_blkcnt; i > 1; i--) { slp_next = (struct sp_link *)((caddr_t)(slp + 1) + sip->si_blksiz); slp->sl_u.slu_next = slp_next; slp = slp_next; } slp->sl_u.slu_next = NULL; scp->sc_freet = slp; /* * Add new chunk to end of pool */ if (sip->si_poolh) sip->si_poolt->sc_next = scp; else sip->si_poolh = scp; sip->si_poolt = scp; sip->si_chunks++; sip->si_total += sip->si_blkcnt; sip->si_free += sip->si_blkcnt; if (sip->si_chunks > sip->si_maxused) sip->si_maxused = sip->si_chunks; } /* * Allocate the first free block in chunk */ slp = scp->sc_freeh; scp->sc_freeh = slp->sl_u.slu_next; scp->sc_used++; sip->si_free--; bp = (slp + 1); /* * Save link back to pool chunk */ slp->sl_u.slu_chunk = scp; /* * Clear out block */ KM_ZERO(bp, sip->si_blksiz); crit_exit(); return (bp); }