dip_status hllLocationManyReg( imp_image_handle *iih, unsigned count, const unsigned_8 *reg_list, location_context *lc, location_list *ll ) { unsigned i; byte j; unsigned idx; const reg_entry *reg; location_list reg_ll; dip_status ds; j = 0; for( i = count; i-- > 0; ) { idx = reg_list[i]; switch( iih->mad ) { case MAD_X86: if( idx >= CV_X86_AL && idx <= CV_X86_EFLAGS ) { reg = &X86_CPURegTable[idx-CV_X86_AL]; } else if( idx >= CV_X86_ST0 && idx <= CV_X86_STATUS ) { reg = &X86_FPURegTable[idx-CV_X86_ST0]; } else { DCStatus( DS_ERR | DS_FAIL ); return( DS_ERR | DS_FAIL ); } break; case MAD_AXP: if( !(idx >= CV_AXP_f0 && idx <= CV_AXP_fltfsr) ) { DCStatus( DS_ERR | DS_FAIL ); return( DS_ERR | DS_FAIL ); } reg = &AXP_RegTable[idx-CV_AXP_f0]; if( reg->ci == CI_LAST ) { DCStatus( DS_ERR | DS_FAIL ); return( DS_ERR | DS_FAIL ); } break; default: DCStatus( DS_ERR | DS_FAIL ); return( DS_ERR | DS_FAIL ); } ds = DCItemLocation( lc, reg->ci, ®_ll ); if( ds != DS_OK ) { DCStatus( ds ); return( ds ); } memcpy( &ll->e[j], ®_ll.e[0], reg_ll.num * sizeof( reg_ll.e[0] ) ); ll->e[j].bit_start += BYTES2BITS( reg->start ); ll->e[j].bit_length = BYTES2BITS( reg->len ); j += reg_ll.num; ll->flags |= reg_ll.flags; } ll->num = j; return( DS_OK ); }
/* Query whether a register value differs between the two register sets 'old' and 'cur'. The following return codes are possible: MS_OK - value is unchanged MS_MODIFIED - value is different MS_MODIFIED_SIGNIFICANTLY - value has changed in a way that the user cares about */ mad_status MADIMPENTRY( RegModified )( const mad_reg_set_data *rsd, const mad_reg_info *ri, const mad_registers *old, const mad_registers *cur ) { addr_ptr new_ip; unsigned_8 *p_old; unsigned_8 *p_cur; unsigned mask; unsigned size; if( ri->bit_start == BIT_OFF( pc ) ) { new_ip = old->jvm.pc; //NYI: find length of instruction new_ip.offset += sizeof( unsigned_32 ); if( new_ip.segment != cur->jvm.pc.segment || new_ip.offset != cur->jvm.pc.offset ) { return( MS_MODIFIED_SIGNIFICANTLY ); } else if( old->jvm.pc.segment != cur->jvm.pc.segment || old->jvm.pc.offset != cur->jvm.pc.offset ) { return( MS_MODIFIED ); } } else { p_old = (unsigned_8 *)old + BYTEIDX( ri->bit_start ); p_cur = (unsigned_8 *)cur + BYTEIDX( ri->bit_start ); size = ri->bit_size; if( size >= BYTES2BITS( 1 ) ) { /* it's going to be byte aligned */ return( memcmp( p_old, p_cur, BITS2BYTES( size ) ) != 0 ? MS_MODIFIED_SIGNIFICANTLY : MS_OK ); } else { mask = (1 << size) - 1; #define GET_VAL( w ) ((*p_##w >> BITIDX( ri->bit_start )) & mask) return( GET_VAL( old ) != GET_VAL( cur ) ? MS_MODIFIED_SIGNIFICANTLY : MS_OK ); } } return( MS_OK ); }
mad_status MADIMPENTRY( RegModified )( const mad_reg_set_data *rsd, const mad_reg_info *ri, const mad_registers *old, const mad_registers *cur ) { unsigned_64 new_ip; unsigned_8 *p_old; unsigned_8 *p_cur; unsigned mask; unsigned size; rsd = rsd; if( ri->bit_start == BIT_OFF( pal.nt.fir ) ) { new_ip = old->axp.pal.nt.fir; //NYI: 64 bit new_ip.u._32[0] += sizeof( unsigned_32 ); if( new_ip.u._32[0] != cur->axp.pal.nt.fir.u._32[0] ) { return( MS_MODIFIED_SIGNIFICANTLY ); } else if( old->axp.pal.nt.fir.u._32[0] != cur->axp.pal.nt.fir.u._32[0] ) { return( MS_MODIFIED ); } } else { p_old = (unsigned_8 *)old + BYTEIDX( ri->bit_start ); p_cur = (unsigned_8 *)cur + BYTEIDX( ri->bit_start ); size = ri->bit_size; if( size >= BYTES2BITS( 1 ) ) { /* it's going to be byte aligned */ return( memcmp( p_old, p_cur, BITS2BYTES( size ) ) != 0 ? MS_MODIFIED_SIGNIFICANTLY : MS_OK ); } else { mask = (1 << size) - 1; #define GET_VAL( w ) (((*p_##w >> BITIDX( ri->bit_start ))) & mask) return( GET_VAL( old ) != GET_VAL( cur ) ? MS_MODIFIED_SIGNIFICANTLY : MS_OK ); } } return( MS_OK ); }
void pcid_reg_write(pcid_info_t *pcid_info, uint32 addr, uint32 value) { if ( (pcid_info->regmem_cb) && (pcid_info->regmem_cb(pcid_info, BCMSIM_PCI_MEM_WRITE, addr, &value, BYTES2BITS(sizeof(uint32))) == 0) ){ return; } PCIM(pcid_info, addr) = value; }
uint32 pcid_reg_read(pcid_info_t *pcid_info, uint32 addr) { uint32 value; if ( (pcid_info->regmem_cb) && (pcid_info->regmem_cb(pcid_info, BCMSIM_PCI_MEM_READ, addr, &value, BYTES2BITS(sizeof(uint32))) == 0) ) { return value; } return PCIM(pcid_info, addr); }
STATIC void pli_cmice_getreg_service(pcid_info_t * pcid_info, int unit, uint32 type, uint32 regnum, uint32 *value) { uint32 r; switch (type) { case PCI_CONFIG: r = regnum & 0xfff; if (r < PCIC_SIZE) { *value = PCIC(pcid_info, r); } break; case PCI_MEMORY: if (pcid_info->regmem_cb) { if ((pcid_info->regmem_cb(pcid_info, BCMSIM_PCI_MEM_READ, regnum, value, BYTES2BITS(sizeof(uint32)))) == 0) break; } r = regnum & 0xffff; if (r == CMIC_LED_CTRL || r == CMIC_LED_STATUS) { *value = 0; } else if (r < PCIM_SIZE(unit)) { *value = PCIM(pcid_info, r); } else { *value = 0xdeadbeef; } break; case I2C_CONFIG: break; case PLI_CONFIG: break; case JTAG_CONFIG: break; case SOC_INTERNAL: soc_internal_read_reg(pcid_info, regnum, value); break; } }
STATIC void pli_cmice_setreg_service(pcid_info_t * pcid_info, int unit, uint32 type, uint32 regnum, uint32 value) { uint32 r; uint32 data[SOC_MAX_MEM_WORDS]; switch (type) { case SOC_INTERNAL: memset(data, 0x0, sizeof(data)); data[0] = value; soc_internal_write_reg(pcid_info, regnum, data); break; case PCI_CONFIG: r = regnum & 0xfff; if (r < PCIC_SIZE) { PCIC(pcid_info, r) = value; } break; case PCI_MEMORY: r = regnum & 0xffff; if (pcid_info->i2crom_fp) { fputc((r >> 8) & 0xff, pcid_info->i2crom_fp); fputc((r >> 0) & 0xff, pcid_info->i2crom_fp); fputc((value >> 24) & 0xff, pcid_info->i2crom_fp); fputc((value >> 16) & 0xff, pcid_info->i2crom_fp); fputc((value >> 8) & 0xff, pcid_info->i2crom_fp); fputc((value >> 0) & 0xff, pcid_info->i2crom_fp); } if (pcid_info->regmem_cb) { if ((pcid_info->regmem_cb(pcid_info, BCMSIM_PCI_MEM_WRITE, regnum, &value, BYTES2BITS(sizeof(uint32)))) == 0) break; } switch (r) { case CMIC_SCHAN_CTRL: soc_internal_schan_ctrl_write(pcid_info, value); break; case CMIC_CONFIG: if (value & CC_RESET_CPS) { soc_internal_reset(pcid_info); } else { PCIM(pcid_info, r) = value; } break; case CMIC_IRQ_MASK: PCIM(pcid_info, r) = value; soc_internal_send_int(pcid_info); /* Send int if pending */ break; case CMIC_DMA_STAT: pcid_dma_stat_write(pcid_info, value); break; case CMIC_DMA_CTRL: pcid_dma_ctrl_write(pcid_info, value); break; case CMIC_TABLE_DMA_CFG: PCIM(pcid_info, r) = value; soc_internal_xgs3_table_dma(pcid_info); break; case CMIC_SLAM_DMA_CFG: PCIM(pcid_info, r) = value; soc_internal_xgs3_tslam_dma(pcid_info); break; case CMIC_LED_CTRL: case CMIC_LED_STATUS: break; default: if (r < PCIM_SIZE(unit)) { PCIM(pcid_info, r) = value; if (soc_feature(unit, soc_feature_schmsg_alias)) { if (r < 0x50) { PCIM(pcid_info, r + 0x800) = value; } else if (r >= 0x800 && r < 0x850) { PCIM(pcid_info, r - 0x800) = value; } } } break; } break; case I2C_CONFIG: break; case PLI_CONFIG: break; case JTAG_CONFIG: break; }
/* * _cch_seek * * Perform a seek operation on a CACHE file. * * Return value: * * The new byte postion in the file. * If an error occurs, -1 is returned, and the stat->sw_error field is * set. */ _ffseek_t _cch_seek( struct fdinfo *fio, /* ffio file descriptor. */ off_t pos, /* requested byte offset */ int whence, /* SEEK_SET, SEEK_CUR, or SEEK_END */ struct ffsw *stat) /* status return word */ { off_t oldpos; /* old bit offset within file */ off_t newpos; /* new bit offset within file */ int newstat; /* status for FFSTAT */ _ffseek_t ret; struct cch_f *cch_info; cch_info = (struct cch_f *)fio->lyr_info; /* * Do special CACHE class things */ oldpos = cch_info->cpos; switch (whence) { case SEEK_SET: newpos = BYTES2BITS(pos); break; case SEEK_CUR: newpos = oldpos + BYTES2BITS(pos); break; case SEEK_END: if (cch_info->is_blkspec) ERETURN(stat, FDC_ERR_NOSUP, 0); newpos = cch_info->fsize + BYTES2BITS(pos); break; default: ERETURN(stat, FDC_ERR_BADSK, 0); } /* * Set up FFSTAT return, according to resulting position, but don't set * it yet. */ newstat = FFCNT; if (newpos == 0) newstat = FFBOD; else if (newpos == cch_info->fsize) newstat = FFEOD; if (newpos < 0) ERETURN(stat, FDC_ERR_BADSK, 0); /* seek before BOF */ #ifdef CCH_SDS_SUPPORTED if (cch_info->optflags & CCHOPT_SDS) { if (newpos & (BITPBLOCK - 1)) ERETURN(stat, FDC_ERR_GRAN, 0); /* need block boundary*/ } #endif cch_info->cpos = newpos; fio->rwflag = POSITIN; if (newpos < cch_info->fsize) { fio->ateof = 0; fio->ateod = 0; } fio->recbits = 0; ret = BITS2BYTES(newpos); SETSTAT(stat, newstat, 0); CCH_DEBUG(("_cch_seek EX: return value %d (o%o)\n",ret,ret)); return (ret); }
/* * _cch_write * * Process write requests for the cache layer. * * Return value: * * The number of bytes transferred is returned upon successful completion. * If an error occurs, -1 is returned. * * The stat->sw_stat field is set to FFCNT upon normal return. */ ssize_t _cch_write( struct fdinfo *fio, /* ffio file descriptor. */ bitptr datptr, /* bit pointer to the user's data. */ size_t nbytes, /* Nuber of bytes to be written. */ struct ffsw *stat, /* pointer to status return word */ int fulp, /* full or partial write mode flag */ int *ubcp /* pointer to unused bit count. On return, */ /* *ubcp is updated to contain the unused bit */ /* count in the data returned. */ ) { off_t cpos; /* bit position in file */ int64 moved; /* number of bits transfered */ int64 bytes_moved; /* number of bytes transfered */ int64 morebits; /* bits moved in current iteration */ int64 numblocks; /* num of pages to process this iter */ int pgoff; off_t fileaddr; off_t eofaddr; int gb_rd; /* nonzero if pages must be read */ int valid; /* nonzero if CCH_VALIDBUFFER should */ /* be set */ int64 nbits; int64 i; int bs, nbu; off_t olpos, endpos, endoff; bitptr toptr; struct ffsw locstat; struct fdinfo *llfio; struct cch_f *cch_info; struct cch_buf *cubuf; int err; short firsteof = 0; short setfirst; CCH_DEBUG(("_cch_write EN: nbytes=%d fulp=%d ubc=%d\n",nbytes,fulp, *ubcp)); CLRSTAT(locstat); cch_info = (struct cch_f *)fio->lyr_info; nbits = BYTES2BITS(nbytes) - *ubcp; fio->rwflag = WRITIN; #if defined(__mips) || defined(_LITTLE_ENDIAN) /* Although this layer is capable of handling non-zero ubc */ /* and bitptrs that aren't on a byte boundary, we are not */ /* supporting this right now on mips systems. */ if (*ubcp != 0) { err = FDC_ERR_UBC; goto err1_ret; } if ((BPBITOFF(datptr) & 07) != 0) { err = FDC_ERR_REQ; goto err1_ret; } #endif if (nbits == 0) { /* quick return for nbits == 0*/ SETSTAT(stat, FFCNT, 0); return(0); } /* * Move data from user to buffer */ llfio = fio->fioptr; bs = cch_info->bsize; /* bit size of each buffer */ cpos = cch_info->cpos; /* current file position */ olpos = cpos; /* save original position */ fileaddr = CCHFLOOR(cpos,bs); /* bit offset within the file of the * start of the current page */ if (cpos > cch_info->fsize) { firsteof = 1; /* Is the page with eof in memory? */ /* If so, zero out the portion beyond eof. */ eofaddr = CCHFLOOR(cch_info->fsize, bs); CCH_FINDBLK(cch_info, eofaddr, cubuf); if (cubuf != NULL && (cubuf->flags & CCH_ZEROED) == 0) { #ifdef CCH_SDS_SUPPORTED if (cch_info->optflags & CCHOPT_SDS) { /* should never happen */ ERETURN(stat, FDC_ERR_INTERR, 0); } #endif pgoff = cch_info->fsize - eofaddr; /* offset of eof */ /* within the page */ SET_BPTR(toptr, INC_BPTR(cubuf->buf, pgoff)); morebits = bs - pgoff; if (morebits != 0) { CCH_MEMCLEAR(toptr, morebits); } cubuf->flags |= CCH_ZEROED; } } while (nbits > 0) { /* * Find the cache buffer assigned to the current page. If * no buffer is currently assigned, then _cch_getblk assigns * one. */ pgoff = cpos - fileaddr; /* offset within the page */ numblocks = 1; /* number of of pages to prcess * in this iteration */ CCH_FINDBLK(cch_info, fileaddr, cubuf); if (cubuf == NULL) { /* if data not buffer-resident*/ if (nbits > cch_info->bypasssize #ifdef CCH_SDS_SUPPORTED && !(cch_info->optflags & CCHOPT_SDS) #endif ) { /* Maybe we can bypass buffering */ if ((morebits= _cch_bypass(cch_info, nbits, cpos, datptr, fileaddr, 'w', llfio, &locstat))>0) goto adjust; else if (morebits < 0) { /* Is it right to return the count */ /* in locstat? Because we might */ /* have read some data... */ goto er1; } /* we weren't able to bypass buffering */ } morebits = nbits; endpos = cpos + morebits; /*1 bit past the end*/ endoff = endpos - CCHFLOOR(endpos,bs); if (endpos > fileaddr + bs) { numblocks = (endpos-fileaddr-1)/bs + 1; nbu = cch_info->nbufs; /* * Handle at most a cache full at a time */ if (numblocks > nbu) { numblocks = nbu; endpos = fileaddr + nbu * bs; endoff = 0; morebits = endpos - cpos; } } /* * It is possible that the first or last * page must be read because the transfer * fills only part of these pages. In each * iteration, _cch_getblk requires that * consecutive buffer pages must all be read, * or else all be assigned without pre-reading. * The following code breaks off the current * portion of the transfer when necessary to * accomplish this. */ if (numblocks > 1) { if (numblocks == 2) { if ((pgoff == 0) != (endoff == 0)) { /* process only first page */ numblocks = 1; endoff = 0; morebits = bs - pgoff; } } else { if (pgoff) { /* process only first page */ numblocks = 1; endoff = 0; morebits = bs - pgoff; } else if (endoff) { /* process all but last page */ numblocks -= 1; endoff = 0; morebits -= endoff; } } } /* * Request that _cch_getblk read in the file * pages if partial pages of data will be * written. */ gb_rd = (pgoff || endoff); /* The pages will be valid if we do not */ /* have to read them. That's because */ /* we will be writing to the entire page */ /* The page will also be valid if we do read it */ valid = 1; setfirst = 0; if (gb_rd && #ifdef CCH_SDS_SUPPORTED !(cch_info->optflags & CCHOPT_SDS) && #endif (numblocks == 1) && ((fileaddr+bs) < cch_info->feof) && (_CCH_ALIGN(pgoff) && _CCH_ALIGN(endoff))) { /* do we really need to read the page in? */ /* if pgoff and endoff are properly aligned, */ /* we do not */ /* Note that if any part of the page is */ /* beyond feof, we want to read it in. */ /* That's because code in _cch_rdabuf */ /* that handles having a partially dirty */ /* page expects to be able to read the */ /* data preceding the dirty data */ gb_rd = 0; valid = 0; /* the page will not be valid */ setfirst = 1; } cubuf = _cch_getblk(cch_info, llfio, fileaddr, &numblocks, gb_rd, valid, &locstat); if (cubuf == NULL) { goto er1; } if (setfirst) { cubuf->firstdata = pgoff; if (endoff == 0) cubuf->lastdata = bs; else cubuf->lastdata = endoff; } if (firsteof && pgoff != 0) { /* There is a gap between the eof and */ /* this data. Zero it if necessary. */ if ((cubuf->flags & CCH_ZEROED) == 0) { int zbits; #ifdef CCH_SDS_SUPPORTED if (cch_info->optflags & CCHOPT_SDS) { /* should never happen */ ERETURN(stat, FDC_ERR_INTERR, 0); } #endif if ((eofaddr == fileaddr)) { /* the eof is on this page */ zbits = bs - (cch_info->fsize - eofaddr); SET_BPTR(toptr, INC_BPTR(cubuf->buf, (cch_info->fsize - eofaddr))); } else { /* the eof is not on this page */ /* zero the entire page */ zbits = bs; toptr = cubuf->buf; } CCH_MEMCLEAR(toptr, zbits); cubuf->flags |= CCH_ZEROED; } } morebits = MIN(nbits, bs * numblocks - pgoff); /* remember the last buffer page for next time */ cch_info->cubuf = cubuf + numblocks - 1; } else { morebits = MIN(nbits, bs - pgoff); if (!(cubuf->flags & CCH_VALIDBUFFER)) { /* The buffer is there, but it */ /* is not entirely valid, because */ /* we never read into it. */ /* We can continue to just dirty it, */ /* provided that the dirty part is */ /* contiguous, and is properly aligned */ endoff = pgoff + morebits; if ((pgoff == cubuf->lastdata && _CCH_ALIGN(endoff))|| (endoff == cubuf->firstdata && _CCH_ALIGN(pgoff)) || (pgoff >= cubuf->firstdata && endoff <= cubuf->lastdata)) { cubuf->firstdata = MIN(pgoff, cubuf->firstdata); cubuf->lastdata = MAX(endoff, cubuf->lastdata); if (cubuf->firstdata == 0 && cubuf->lastdata == bs) { cubuf->lastdata = 0; cubuf->flags |=CCH_VALIDBUFFER; } } else { /* We can't just keep on putting */ /* stuff in the buffer without */ /* prereading it. So, we will call */ /* _cch_rdabuf, which has the */ /* smarts to read only the non-dirty */ /* parts */ if (_cch_rdabuf(cch_info, llfio, cubuf, BITS2BYTES(cch_info->bsize), BITS2BYTES(cubuf->filead), 1, 's', &locstat)) { goto er1; } } } } for (i=0; i<numblocks; i++) { /* adjust last access time */ CCH_CHRONOMETER(cubuf[i],cch_info); cubuf[i].flags |= CCH_DIRTY; } SET_BPTR(toptr, INC_BPTR(cubuf->buf, pgoff)); #ifdef CCH_SDS_SUPPORTED if (cch_info->optflags & CCHOPT_SDS) { if (_sds_fr_mem(toptr, datptr, morebits) == ERR) ERETURN(stat, errno, 0); } else _CCH_MOV_BITS(toptr, datptr, morebits); /* contiguous bufs */ #else _CCH_MOV_BITS(toptr, datptr, morebits); /* contiguous bufs */ #endif adjust: SET_BPTR(datptr, INC_BPTR(datptr, morebits)); cpos += morebits; nbits -= morebits; fileaddr = CCHFLOOR(cpos,bs); /* bit offset within the file of the page */ firsteof = 0; if (cpos > cch_info->fsize) { cch_info->fsize = cpos; } } cch_info->cpos = cpos; moved = cpos - olpos; fio->recbits += moved; bytes_moved = BITS2BYTES(moved); SETSTAT(stat, FFCNT, bytes_moved); return(bytes_moved); err1_ret: ERETURN(stat, err, 0); er1: *stat = locstat; return(ERR); }