int ehci_send_bulk(struct usb_pipe *p, int dir, void *data, int datasize) { if (! CONFIG_USB_EHCI) return -1; struct ehci_pipe *pipe = container_of(p, struct ehci_pipe, pipe); dprintf(7, "ehci_send_bulk qh=%p dir=%d data=%p size=%d\n" , &pipe->qh, dir, data, datasize); // Allocate 4 tds on stack (16byte aligned) u8 tdsbuf[sizeof(struct ehci_qtd) * STACKQTDS + EHCI_QTD_ALIGN - 1]; struct ehci_qtd *tds = (void*)ALIGN((u32)tdsbuf, EHCI_QTD_ALIGN); memset(tds, 0, sizeof(*tds) * STACKQTDS); // Setup fields in qh u16 maxpacket = GET_FLATPTR(pipe->pipe.maxpacket); SET_FLATPTR(pipe->qh.info1 , ((1 << QH_MULT_SHIFT) | (maxpacket << QH_MAXPACKET_SHIFT) | (GET_FLATPTR(pipe->pipe.speed) << QH_SPEED_SHIFT) | (GET_FLATPTR(pipe->pipe.ep) << QH_EP_SHIFT) | (GET_FLATPTR(pipe->pipe.devaddr) << QH_DEVADDR_SHIFT))); SET_FLATPTR(pipe->qh.info2 , ((1 << QH_MULT_SHIFT) | (GET_FLATPTR(pipe->pipe.tt_port) << QH_HUBPORT_SHIFT) | (GET_FLATPTR(pipe->pipe.tt_devaddr) << QH_HUBADDR_SHIFT))); barrier(); SET_FLATPTR(pipe->qh.qtd_next, (u32)MAKE_FLATPTR(GET_SEG(SS), tds)); int tdpos = 0; while (datasize) { struct ehci_qtd *td = &tds[tdpos++ % STACKQTDS]; int ret = ehci_wait_td(pipe, td, 5000); if (ret) return -1; struct ehci_qtd *nexttd_fl = MAKE_FLATPTR(GET_SEG(SS) , &tds[tdpos % STACKQTDS]); int transfer = fillTDbuffer(td, maxpacket, data, datasize); td->qtd_next = (transfer==datasize ? EHCI_PTR_TERM : (u32)nexttd_fl); td->alt_next = EHCI_PTR_TERM; barrier(); td->token = (ehci_explen(transfer) | QTD_STS_ACTIVE | (dir ? QTD_PID_IN : QTD_PID_OUT) | ehci_maxerr(3)); data += transfer; datasize -= transfer; } int i; for (i=0; i<STACKQTDS; i++) { struct ehci_qtd *td = &tds[tdpos++ % STACKQTDS]; int ret = ehci_wait_td(pipe, td, 5000); if (ret) return -1; } return 0; }
// Low-level usb command transmit function. int usb_process_op(struct disk_op_s *op) { if (!CONFIG_USB_MSC) return 0; dprintf(16, "usb_cmd_data id=%p write=%d count=%d buf=%p\n" , op->drive_fl, 0, op->count, op->buf_fl); struct usbdrive_s *udrive_gf = container_of( op->drive_fl, struct usbdrive_s, drive); // Setup command block wrapper. struct cbw_s cbw; memset(&cbw, 0, sizeof(cbw)); int blocksize = scsi_fill_cmd(op, cbw.CBWCB, USB_CDB_SIZE); if (blocksize < 0) return default_process_op(op); u32 bytes = blocksize * op->count; cbw.dCBWSignature = CBW_SIGNATURE; cbw.dCBWTag = 999; // XXX cbw.dCBWDataTransferLength = bytes; cbw.bmCBWFlags = scsi_is_read(op) ? USB_DIR_IN : USB_DIR_OUT; cbw.bCBWLUN = GET_GLOBALFLAT(udrive_gf->lun); cbw.bCBWCBLength = USB_CDB_SIZE; // Transfer cbw to device. int ret = usb_msc_send(udrive_gf, USB_DIR_OUT , MAKE_FLATPTR(GET_SEG(SS), &cbw), sizeof(cbw)); if (ret) goto fail; // Transfer data to/from device. if (bytes) { ret = usb_msc_send(udrive_gf, cbw.bmCBWFlags, op->buf_fl, bytes); if (ret) goto fail; } // Transfer csw info. struct csw_s csw; ret = usb_msc_send(udrive_gf, USB_DIR_IN , MAKE_FLATPTR(GET_SEG(SS), &csw), sizeof(csw)); if (ret) goto fail; if (!csw.bCSWStatus) return DISK_RET_SUCCESS; if (csw.bCSWStatus == 2) goto fail; if (blocksize) op->count -= csw.dCSWDataResidue / blocksize; return DISK_RET_EBADTRACK; fail: // XXX - reset connection dprintf(1, "USB transmission failed\n"); return DISK_RET_EBADTRACK; }
static int ramdisk_copy(struct disk_op_s *op, int iswrite) { u32 offset = GET_GLOBAL(op->drive_g->cntl_id); offset += (u32)op->lba * DISK_SECTOR_SIZE; u64 opd = GDT_DATA | GDT_LIMIT(0xfffff) | GDT_BASE((u32)op->buf_fl); u64 ramd = GDT_DATA | GDT_LIMIT(0xfffff) | GDT_BASE(offset); u64 gdt[6]; if (iswrite) { gdt[2] = opd; gdt[3] = ramd; } else { gdt[2] = ramd; gdt[3] = opd; } // Call int 1587 to copy data. struct bregs br; memset(&br, 0, sizeof(br)); br.flags = F_CF|F_IF; br.ah = 0x87; br.es = GET_SEG(SS); br.si = (u32)gdt; br.cx = op->count * DISK_SECTOR_SIZE / 2; call16_int(0x15, &br); if (br.flags & F_CF) return DISK_RET_EBADTRACK; return DISK_RET_SUCCESS; }
int ehci_poll_intr(struct usb_pipe *p, void *data) { ASSERT16(); if (! CONFIG_USB_EHCI) return -1; struct ehci_pipe *pipe = container_of(p, struct ehci_pipe, pipe); struct ehci_qtd *td = GET_LOWFLAT(pipe->next_td); u32 token = GET_LOWFLAT(td->token); if (token & QTD_STS_ACTIVE) // No intrs found. return -1; // XXX - check for errors. // Copy data. int maxpacket = GET_LOWFLAT(pipe->pipe.maxpacket); int pos = td - GET_LOWFLAT(pipe->tds); void *tddata = GET_LOWFLAT(pipe->data) + maxpacket * pos; memcpy_far(GET_SEG(SS), data, SEG_LOW, LOWFLAT2LOW(tddata), maxpacket); // Reenable this td. struct ehci_qtd *next = (void*)(GET_LOWFLAT(td->qtd_next) & ~EHCI_PTR_BITS); SET_LOWFLAT(pipe->next_td, next); SET_LOWFLAT(td->buf[0], (u32)tddata); barrier(); SET_LOWFLAT(td->token, (ehci_explen(maxpacket) | QTD_STS_ACTIVE | QTD_PID_IN | ehci_maxerr(3))); return 0; }
/* * Do a signal return; undo the signal stack. */ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned long *pax) { void __user *buf; unsigned int tmpflags; unsigned int err = 0; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; GET_SEG(gs); COPY_SEG(fs); COPY_SEG(es); COPY_SEG(ds); COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); COPY(dx); COPY(cx); COPY(ip); COPY_SEG_STRICT(cs); COPY_SEG_STRICT(ss); err |= __get_user(tmpflags, &sc->flags); regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); regs->orig_ax = -1; /* disable syscall checks */ err |= __get_user(buf, &sc->fpstate); err |= restore_i387_xstate(buf); err |= __get_user(*pax, &sc->ax); return err; }
void hash_destroy (HTAB *hashp) { /* cannot destroy a shared memory hash table */ Assert(! hashp->segbase); if (hashp != NULL) { register SEG_OFFSET segNum; SEGMENT segp; int nsegs = hashp->hctl->nsegs; int j; BUCKET_INDEX *elp,p,q; ELEMENT *curr; for (segNum = 0; nsegs > 0; nsegs--, segNum++) { segp = GET_SEG(hashp,segNum); for (j = 0, elp = segp; j < hashp->hctl->ssize; j++, elp++) { for ( p = *elp; p != INVALID_INDEX; p = q ){ curr = GET_BUCKET(hashp,p); q = curr->next; MEM_FREE((char *) curr); } } free((char *)segp); } (void) MEM_FREE( (char *) hashp->dir); (void) MEM_FREE( (char *) hashp->hctl); hash_stats("destroy",hashp); (void) MEM_FREE( (char *) hashp); } }
int ehci_send_bulk(struct usb_pipe *p, int dir, void *data, int datasize) { if (! CONFIG_USB_EHCI) return -1; struct ehci_pipe *pipe = container_of(p, struct ehci_pipe, pipe); dprintf(7, "ehci_send_bulk qh=%p dir=%d data=%p size=%d\n" , &pipe->qh, dir, data, datasize); // Allocate 4 tds on stack (with required alignment) u8 tdsbuf[sizeof(struct ehci_qtd) * STACKQTDS + EHCI_QTD_ALIGN - 1]; struct ehci_qtd *tds = (void*)ALIGN((u32)tdsbuf, EHCI_QTD_ALIGN); memset(tds, 0, sizeof(*tds) * STACKQTDS); barrier(); SET_LOWFLAT(pipe->qh.qtd_next, (u32)MAKE_FLATPTR(GET_SEG(SS), tds)); u16 maxpacket = GET_LOWFLAT(pipe->pipe.maxpacket); int tdpos = 0; while (datasize) { struct ehci_qtd *td = &tds[tdpos++ % STACKQTDS]; int ret = ehci_wait_td(pipe, td, 5000); if (ret) return -1; struct ehci_qtd *nexttd_fl = MAKE_FLATPTR(GET_SEG(SS) , &tds[tdpos % STACKQTDS]); int transfer = fillTDbuffer(td, maxpacket, data, datasize); td->qtd_next = (transfer==datasize ? EHCI_PTR_TERM : (u32)nexttd_fl); td->alt_next = EHCI_PTR_TERM; barrier(); td->token = (ehci_explen(transfer) | QTD_STS_ACTIVE | (dir ? QTD_PID_IN : QTD_PID_OUT) | ehci_maxerr(3)); data += transfer; datasize -= transfer; } int i; for (i=0; i<STACKQTDS; i++) { struct ehci_qtd *td = &tds[tdpos++ % STACKQTDS]; int ret = ehci_wait_td(pipe, td, 5000); if (ret) return -1; } return 0; }
int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { unsigned long buf_val; void __user *buf; unsigned int tmpflags; unsigned int err = 0; /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; get_user_try { #ifdef CONFIG_X86_32 set_user_gs(regs, GET_SEG(gs)); COPY_SEG(fs); COPY_SEG(es); COPY_SEG(ds); #endif /* CONFIG_X86_32 */ COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); COPY(dx); COPY(cx); COPY(ip); COPY(ax); #ifdef CONFIG_X86_64 COPY(r8); COPY(r9); COPY(r10); COPY(r11); COPY(r12); COPY(r13); COPY(r14); COPY(r15); #endif /* CONFIG_X86_64 */ #ifdef CONFIG_X86_32 COPY_SEG_CPL3(cs); COPY_SEG_CPL3(ss); #else /* !CONFIG_X86_32 */ /* Kernel saves and restores only the CS segment register on signals, * which is the bare minimum needed to allow mixed 32/64-bit code. * App's signal handler can save/restore other segments if needed. */ COPY_SEG_CPL3(cs); #endif /* CONFIG_X86_32 */ get_user_ex(tmpflags, &sc->flags); regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); regs->orig_ax = -1; /* disable syscall checks */ get_user_ex(buf_val, &sc->fpstate); buf = (void __user *)buf_val; } get_user_catch(err); err |= fpu__restore_sig(buf, config_enabled(CONFIG_X86_32)); force_iret(); return err; }
int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned long *pax) { void __user *buf; unsigned int tmpflags; unsigned int err = 0; /* */ current_thread_info()->restart_block.fn = do_no_restart_syscall; get_user_try { #ifdef CONFIG_X86_32 set_user_gs(regs, GET_SEG(gs)); COPY_SEG(fs); COPY_SEG(es); COPY_SEG(ds); #endif /* */ COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); COPY(dx); COPY(cx); COPY(ip); #ifdef CONFIG_X86_64 COPY(r8); COPY(r9); COPY(r10); COPY(r11); COPY(r12); COPY(r13); COPY(r14); COPY(r15); #endif /* */ #ifdef CONFIG_X86_32 COPY_SEG_CPL3(cs); COPY_SEG_CPL3(ss); #else /* */ /* */ COPY_SEG_CPL3(cs); #endif /* */ get_user_ex(tmpflags, &sc->flags); regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); regs->orig_ax = -1; /* */ get_user_ex(buf, &sc->fpstate); err |= restore_i387_xstate(buf); get_user_ex(*pax, &sc->ax); } get_user_catch(err); return err; }
// Execute a "disk_op_s" request (using the extra 16bit stack). static int send_disk_op(struct disk_op_s *op) { ASSERT16(); if (! CONFIG_DRIVES) return -1; if (!CONFIG_ENTRY_EXTRASTACK) // Jump on to extra stack return stack_hop(__send_disk_op, op, GET_SEG(SS)); return process_op(op); }
// Execute a "disk_op_s" request after jumping to the extra stack. static int __send_disk_op(struct disk_op_s *op_far, u16 op_seg) { struct disk_op_s dop; memcpy_far(GET_SEG(SS), &dop, op_seg, op_far, sizeof(dop)); int status = process_op(&dop); // Update count with total sectors transferred. SET_FARVAR(op_seg, op_far->count, dop.count); return status; }
int usb_poll_intr(struct usb_pipe *pipe_fl, void *data) { ASSERT16(); switch (GET_LOWFLAT(pipe_fl->type)) { default: case USB_TYPE_UHCI: return uhci_poll_intr(pipe_fl, data); case USB_TYPE_OHCI: return ohci_poll_intr(pipe_fl, data); case USB_TYPE_EHCI: return ehci_poll_intr(pipe_fl, data); case USB_TYPE_XHCI: ; return call32_params(xhci_poll_intr, pipe_fl , MAKE_FLATPTR(GET_SEG(SS), data), 0, -1); } }
int usb_poll_intr(struct usb_pipe *pipe_fl, void *data) { ASSERT16(); switch (GET_LOWFLAT(pipe_fl->type)) { default: case USB_TYPE_UHCI: return uhci_poll_intr(pipe_fl, data); case USB_TYPE_OHCI: return ohci_poll_intr(pipe_fl, data); case USB_TYPE_EHCI: return ehci_poll_intr(pipe_fl, data); case USB_TYPE_XHCI: ; extern void _cfunc32flat_xhci_poll_intr(void); return call32_params(_cfunc32flat_xhci_poll_intr, (u32)pipe_fl , (u32)MAKE_FLATPTR(GET_SEG(SS), (u32)data), 0, -1); } }
int ohci_poll_intr(struct usb_pipe *pipe, void *data) { ASSERT16(); if (! CONFIG_USB_OHCI) return -1; struct ohci_pipe *p = container_of(pipe, struct ohci_pipe, pipe); struct ohci_td *tds = GET_FLATPTR(p->tds); struct ohci_td *head = (void*)GET_FLATPTR(p->ed.hwHeadP); struct ohci_td *tail = (void*)GET_FLATPTR(p->ed.hwTailP); int count = GET_FLATPTR(p->count); int pos = (tail - tds + 1) % count; struct ohci_td *next = &tds[pos]; if (head == next) // No intrs found. return -1; // XXX - check for errors. // Copy data. u32 endp = GET_FLATPTR(p->pipe.endp); int maxpacket = endp2maxsize(endp); void *pipedata = GET_FLATPTR(p->data); void *intrdata = pipedata + maxpacket * pos; memcpy_far(GET_SEG(SS), data , FLATPTR_TO_SEG(intrdata), (void*)FLATPTR_TO_OFFSET(intrdata) , maxpacket); // Reenable this td. SET_FLATPTR(tail->hwINFO, TD_DP_IN | TD_T_TOGGLE | TD_CC); intrdata = pipedata + maxpacket * (tail-tds); SET_FLATPTR(tail->hwCBP, (u32)intrdata); SET_FLATPTR(tail->hwNextTD, (u32)next); SET_FLATPTR(tail->hwBE, (u32)intrdata + maxpacket - 1); SET_FLATPTR(p->ed.hwTailP, (u32)next); return 0; }
static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax) { unsigned int err = 0; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; #define COPY(x) err |= __get_user(regs->x, &sc->x) #define COPY_SEG(seg) \ { unsigned short tmp; \ err |= __get_user(tmp, &sc->seg); \ regs->x##seg = tmp; } #define COPY_SEG_STRICT(seg) \ { unsigned short tmp; \ err |= __get_user(tmp, &sc->seg); \ regs->x##seg = tmp|3; } #define GET_SEG(seg) \ { unsigned short tmp; \ err |= __get_user(tmp, &sc->seg); \ loadsegment(seg,tmp); } #define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_RF | \ X86_EFLAGS_OF | X86_EFLAGS_DF | \ X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \ X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF) GET_SEG(gs); COPY_SEG(fs); COPY_SEG(es); COPY_SEG(ds); COPY(edi); COPY(esi); COPY(ebp); COPY(esp); COPY(ebx); COPY(edx); COPY(ecx); COPY(eip); COPY_SEG_STRICT(cs); COPY_SEG_STRICT(ss); { unsigned int tmpflags; err |= __get_user(tmpflags, &sc->eflags); regs->eflags = (regs->eflags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); regs->orig_eax = -1; /* disable syscall checks */ } { struct _fpstate __user * buf; err |= __get_user(buf, &sc->fpstate); if (buf) { if (!access_ok(VERIFY_READ, buf, sizeof(*buf))) goto badframe; err |= restore_i387(buf); } else { struct task_struct *me = current; if (used_math()) { clear_fpu(me); clear_used_math(); } } } err |= __get_user(*peax, &sc->eax); return err; badframe: return 1; }
/********************************* UTILITIES ************************/ static int expand_table(HTAB *hashp) { HHDR *hctl; SEGMENT old_seg,new_seg; long old_bucket, new_bucket; long new_segnum, new_segndx; long old_segnum, old_segndx; ELEMENT *chain; BUCKET_INDEX *old,*newbi; register BUCKET_INDEX chainIndex,nextIndex; #ifdef HASH_STATISTICS hash_expansions++; #endif hctl = hashp->hctl; new_bucket = ++hctl->max_bucket; old_bucket = (hctl->max_bucket & hctl->low_mask); new_segnum = new_bucket >> hctl->sshift; new_segndx = MOD ( new_bucket, hctl->ssize ); if ( new_segnum >= hctl->nsegs ) { /* Allocate new segment if necessary */ if (new_segnum >= hctl->dsize) { dir_realloc(hashp); } if (! (hashp->dir[new_segnum] = seg_alloc(hashp))) { return (0); } hctl->nsegs++; } if ( new_bucket > hctl->high_mask ) { /* Starting a new doubling */ hctl->low_mask = hctl->high_mask; hctl->high_mask = new_bucket | hctl->low_mask; } /* * Relocate records to the new bucket */ old_segnum = old_bucket >> hctl->sshift; old_segndx = MOD(old_bucket, hctl->ssize); old_seg = GET_SEG(hashp,old_segnum); new_seg = GET_SEG(hashp,new_segnum); old = &old_seg[old_segndx]; newbi = &new_seg[new_segndx]; for (chainIndex = *old; chainIndex != INVALID_INDEX; chainIndex = nextIndex){ chain = GET_BUCKET(hashp,chainIndex); nextIndex = chain->next; if ( call_hash(hashp, (char *)&(chain->key), hctl->keysize) == old_bucket ) { *old = chainIndex; old = &chain->next; } else { *newbi = chainIndex; newbi = &chain->next; } chain->next = INVALID_INDEX; } return (1); }
/* * hash_seq -- sequentially search through hash table and return * all the elements one by one, return NULL on error and * return TRUE in the end. * */ long * hash_seq(HTAB *hashp) { static uint32 curBucket = 0; static BUCKET_INDEX curIndex; ELEMENT *curElem; long segment_num; long segment_ndx; SEGMENT segp; HHDR *hctl; if (hashp == NULL) { /* * reset static state */ curBucket = 0; curIndex = INVALID_INDEX; return((long *) NULL); } hctl = hashp->hctl; while (curBucket <= hctl->max_bucket) { if (curIndex != INVALID_INDEX) { curElem = GET_BUCKET(hashp, curIndex); curIndex = curElem->next; if (curIndex == INVALID_INDEX) /* end of this bucket */ ++curBucket; return(&(curElem->key)); } /* * initialize the search within this bucket. */ segment_num = curBucket >> hctl->sshift; segment_ndx = curBucket & ( hctl->ssize - 1 ); /* * first find the right segment in the table directory. */ segp = GET_SEG(hashp, segment_num); if (segp == NULL) /* this is probably an error */ return((long *) NULL); /* * now find the right index into the segment for the first * item in this bucket's chain. if the bucket is not empty * (its entry in the dir is valid), we know this must * correspond to a valid element and not a freed element * because it came out of the directory of valid stuff. if * there are elements in the bucket chains that point to the * freelist we're in big trouble. */ curIndex = segp[segment_ndx]; if (curIndex == INVALID_INDEX) /* empty bucket */ ++curBucket; } return((long *) TRUE); /* out of buckets */ }
/* * hash_search -- look up key in table and perform action * * action is one of HASH_FIND/HASH_ENTER/HASH_REMOVE * * RETURNS: NULL if table is corrupted, a pointer to the element * found/removed/entered if applicable, TRUE otherwise. * foundPtr is TRUE if we found an element in the table * (FALSE if we entered one). */ long * hash_search(HTAB *hashp, char *keyPtr, HASHACTION action, /* * HASH_FIND / HASH_ENTER / HASH_REMOVE * HASH_FIND_SAVE / HASH_REMOVE_SAVED */ bool *foundPtr) { uint32 bucket; long segment_num; long segment_ndx; SEGMENT segp; register ELEMENT *curr; HHDR *hctl; BUCKET_INDEX currIndex; BUCKET_INDEX *prevIndexPtr; char * destAddr; static struct State { ELEMENT *currElem; BUCKET_INDEX currIndex; BUCKET_INDEX *prevIndex; } saveState; Assert((hashp && keyPtr)); Assert((action == HASH_FIND) || (action == HASH_REMOVE) || (action == HASH_ENTER) || (action == HASH_FIND_SAVE) || (action == HASH_REMOVE_SAVED)); hctl = hashp->hctl; # if HASH_STATISTICS hash_accesses++; hashp->hctl->accesses++; # endif if (action == HASH_REMOVE_SAVED) { curr = saveState.currElem; currIndex = saveState.currIndex; prevIndexPtr = saveState.prevIndex; /* * Try to catch subsequent errors */ Assert(saveState.currElem && !(saveState.currElem = 0)); } else { bucket = call_hash(hashp, keyPtr, hctl->keysize); segment_num = bucket >> hctl->sshift; segment_ndx = bucket & ( hctl->ssize - 1 ); segp = GET_SEG(hashp,segment_num); Assert(segp); prevIndexPtr = &segp[segment_ndx]; currIndex = *prevIndexPtr; /* * Follow collision chain */ for (curr = NULL;currIndex != INVALID_INDEX;) { /* coerce bucket index into a pointer */ curr = GET_BUCKET(hashp,currIndex); if (! memcmp((char *)&(curr->key), keyPtr, hctl->keysize)) { break; } prevIndexPtr = &(curr->next); currIndex = *prevIndexPtr; # if HASH_STATISTICS hash_collisions++; hashp->hctl->collisions++; # endif } } /* * if we found an entry or if we weren't trying * to insert, we're done now. */ *foundPtr = (bool) (currIndex != INVALID_INDEX); switch (action) { case HASH_ENTER: if (currIndex != INVALID_INDEX) return(&(curr->key)); break; case HASH_REMOVE: case HASH_REMOVE_SAVED: if (currIndex != INVALID_INDEX) { Assert(hctl->nkeys > 0); hctl->nkeys--; /* add the bucket to the freelist for this table. */ *prevIndexPtr = curr->next; curr->next = hctl->freeBucketIndex; hctl->freeBucketIndex = currIndex; /* better hope the caller is synchronizing access to * this element, because someone else is going to reuse * it the next time something is added to the table */ return (&(curr->key)); } return((long *) TRUE); case HASH_FIND: if (currIndex != INVALID_INDEX) return(&(curr->key)); return((long *)TRUE); case HASH_FIND_SAVE: if (currIndex != INVALID_INDEX) { saveState.currElem = curr; saveState.prevIndex = prevIndexPtr; saveState.currIndex = currIndex; return(&(curr->key)); } return((long *)TRUE); default: /* can't get here */ return (NULL); } /* If we got here, then we didn't find the element and we have to insert it into the hash table */ Assert(currIndex == INVALID_INDEX); /* get the next free bucket */ currIndex = hctl->freeBucketIndex; if (currIndex == INVALID_INDEX) { /* no free elements. allocate another chunk of buckets */ if (! bucket_alloc(hashp)) { return(NULL); } currIndex = hctl->freeBucketIndex; } Assert(currIndex != INVALID_INDEX); curr = GET_BUCKET(hashp,currIndex); hctl->freeBucketIndex = curr->next; /* link into chain */ *prevIndexPtr = currIndex; /* copy key and data */ destAddr = (char *) &(curr->key); memmove(destAddr,keyPtr,hctl->keysize); curr->next = INVALID_INDEX; /* let the caller initialize the data field after * hash_search returns. */ /* memmove(destAddr,keyPtr,hctl->keysize+hctl->datasize);*/ /* * Check if it is time to split the segment */ if (++hctl->nkeys / (hctl->max_bucket+1) > hctl->ffactor) { /* fprintf(stderr,"expanding on '%s'\n",keyPtr); hash_stats("expanded table",hashp); */ if (! expand_table(hashp)) return(NULL); } return (&(curr->key)); }