/* * Write the User/OEM 64-bit segment of the PR. * XXX should allow writing individual words/bytes */ int cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id) { #ifdef CFI_ARMEDANDDANGEROUS register_t intr; int i, error; #endif if (sc->sc_cmdset != CFI_VEND_INTEL_ECS) return EOPNOTSUPP; KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width)); #ifdef CFI_ARMEDANDDANGEROUS for (i = 7; i >= 4; i--, id >>= 16) { intr = intr_disable(); cfi_write(sc, 0, CFI_INTEL_PP_SETUP); cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff); intr_restore(intr); error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, sc->sc_write_timeout); if (error) break; } cfi_write(sc, 0, CFI_BCS_READ_ARRAY); return error; #else device_printf(sc->sc_dev, "%s: OEM PR not set, " "CFI_ARMEDANDDANGEROUS not configured\n", __func__); return ENXIO; #endif }
/* * Write the Protection Lock Register to lock down the * user-settable segment of the Protection Register. * NOTE: this operation is not reversible. */ int cfi_intel_set_plr(struct cfi_softc *sc) { #ifdef CFI_ARMEDANDDANGEROUS register_t intr; int error; #endif if (sc->sc_cmdset != CFI_VEND_INTEL_ECS) return EOPNOTSUPP; KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width)); #ifdef CFI_ARMEDANDDANGEROUS /* worthy of console msg */ device_printf(sc->sc_dev, "set PLR\n"); intr = intr_disable(); cfi_write(sc, 0, CFI_INTEL_PP_SETUP); cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD); intr_restore(intr); error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, sc->sc_write_timeout); cfi_write(sc, 0, CFI_BCS_READ_ARRAY); return error; #else device_printf(sc->sc_dev, "%s: PLR not set, " "CFI_ARMEDANDDANGEROUS not configured\n", __func__); return ENXIO; #endif }
static void cfi_amd_write(struct cfi_softc *sc, u_int ofs, u_int addr, u_int data) { cfi_write(sc, ofs + AMD_ADDR_START, CFI_AMD_UNLOCK); cfi_write(sc, ofs + AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK); cfi_write(sc, ofs + addr, data); }
uint8_t cfi_read_qry(struct cfi_softc *sc, u_int ofs) { uint8_t val; cfi_write(sc, CFI_QRY_CMD_ADDR * sc->sc_width, CFI_QRY_CMD_DATA); val = cfi_read(sc, ofs * sc->sc_width); cfi_write(sc, 0, CFI_BCS_READ_ARRAY); return (val); }
/* * Read the contents of the Protection Lock Register. */ int cfi_intel_get_plr(struct cfi_softc *sc, uint32_t *plr) { if (sc->sc_cmdset != CFI_VEND_INTEL_ECS) return EOPNOTSUPP; KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width)); cfi_write(sc, 0, CFI_INTEL_READ_ID); *plr = cfi_get16(sc, CFI_INTEL_PLR); cfi_write(sc, 0, CFI_BCS_READ_ARRAY); return 0; }
/* * Read the User/OEM 64-bit segment of the PR. */ int cfi_intel_get_oem_pr(struct cfi_softc *sc, uint64_t *id) { if (sc->sc_cmdset != CFI_VEND_INTEL_ECS) return EOPNOTSUPP; KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width)); cfi_write(sc, 0, CFI_INTEL_READ_ID); *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(4)))<<48 | ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(5)))<<32 | ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(6)))<<16 | ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(7))); cfi_write(sc, 0, CFI_BCS_READ_ARRAY); return 0; }
static vsf_err_t cfi_write_cmd(struct dal_info_t *info, uint8_t cmd, uint8_t data_width, uint32_t address) { uint32_t data = cfi_get_cmd(cmd, data_width); return cfi_write(info, address, data_width, (uint8_t *)&data, 1); }
static vsf_err_t cfi_drv_writeblock_nb(struct dal_info_t *info, uint64_t address, uint8_t *buff) { uint32_t count, i; struct sst32hfxx_drv_param_t *param = (struct sst32hfxx_drv_param_t *)info->param; uint8_t data_width = param->nor_info.common_info.data_width / 8; struct mal_info_t *mal_info = (struct mal_info_t *)info->extra; if (mal_info->read_page_size) { count = (uint32_t)mal_info->read_page_size / data_width; } else { count = (uint32_t)mal_info->capacity.block_size / data_width; } for(i = 0; i < count; i++) { cfi_write_cmd(info, 0xAA, data_width, 0x5555 << 1); cfi_write_cmd(info, 0x55, data_width, 0x2AAA << 1); cfi_write_cmd(info, 0xA0, data_width, 0x5555 << 1); cfi_write(info, (uint32_t)address, data_width, buff, 1); interfaces->delay.delayus(param->delayus); address += data_width; buff += data_width; } return VSFERR_NONE; }
void cfi_test() { struct cfi *cfi; char test_buff[2] = {0x2c,0x04}; char read_buff[2]; int addr; int i; delay(5 * 1000); printf("cfi test ...\n"); cfi_init(0); cfi = &cfid[0]; addr = 0xf80000; for(i=0;i<256;i++) { cfi_erase(0, addr, cfi->sectsiz); cfi_write(0, addr + 0x01fe, 2, test_buff); cfi_write(0, addr + 0x01fc, 2, test_buff); addr += cfi->sectsiz; } addr = 0xf80000; for(i=0;i<256;i++) { cfi_read(0, addr + 0x01fe, 2, read_buff); if ((read_buff[0] != 0x2c) || (read_buff[1] != 0x04)) { printf("error block %d\n",i); } cfi_read(0, addr + 0x01fc, 2, read_buff); if ((read_buff[0] != 0x2c) || (read_buff[1] != 0x04)) { printf("error block %d\n",i); } addr += cfi->sectsiz; } printf("finish"); }
static vsf_err_t cfi_drv_writeblock_nb(struct dal_info_t *info, uint64_t address, uint8_t *buff) { struct cfi_drv_param_t *param = (struct cfi_drv_param_t *)info->param; uint8_t data_width = param->nor_info.common_info.data_width / 8; struct mal_info_t *mal_info = (struct mal_info_t *)info->extra; uint32_t write_page_size = mal_info->write_page_size; cfi_write_cmd(info, 0xAA, data_width, 0x0555 << 1); cfi_write_cmd(info, 0x55, data_width, 0x02AA << 1); cfi_write_cmd(info, 0x25, data_width, (uint32_t)address); cfi_write_cmd(info, (uint8_t)(write_page_size / data_width - 1), data_width, (uint32_t)address); cfi_write(info, (uint32_t)address, data_width, buff, write_page_size / data_width); cfi_write_cmd(info, 0x29, data_width, (uint32_t)address); return VSFERR_NONE; }
int cfi_write_block(struct cfi_softc *sc) { union { uint8_t *x8; uint16_t *x16; uint32_t *x32; } ptr; register_t intr; int error, i; /* Erase the block. */ switch (sc->sc_cmdset) { case CFI_VEND_INTEL_ECS: case CFI_VEND_INTEL_SCS: cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE); cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM); break; case CFI_VEND_AMD_SCS: case CFI_VEND_AMD_ECS: cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START, CFI_AMD_ERASE_SECTOR); cfi_amd_write(sc, sc->sc_wrofs, 0, CFI_AMD_BLOCK_ERASE); break; default: /* Better safe than sorry... */ return (ENODEV); } error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_erase_timeout); if (error) goto out; /* Write the block. */ ptr.x8 = sc->sc_wrbuf; for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) { /* * Make sure the command to start a write and the * actual write happens back-to-back without any * excessive delays. */ intr = intr_disable(); switch (sc->sc_cmdset) { case CFI_VEND_INTEL_ECS: case CFI_VEND_INTEL_SCS: cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM); break; case CFI_VEND_AMD_SCS: case CFI_VEND_AMD_ECS: cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM); break; } switch (sc->sc_width) { case 1: bus_space_write_1(sc->sc_tag, sc->sc_handle, sc->sc_wrofs + i, *(ptr.x8)++); break; case 2: bus_space_write_2(sc->sc_tag, sc->sc_handle, sc->sc_wrofs + i, *(ptr.x16)++); break; case 4: bus_space_write_4(sc->sc_tag, sc->sc_handle, sc->sc_wrofs + i, *(ptr.x32)++); break; } intr_restore(intr); error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_write_timeout); if (error) goto out; } /* error is 0. */ out: cfi_write(sc, 0, CFI_BCS_READ_ARRAY); return (error); }
int cfi_attach(device_t dev) { struct cfi_softc *sc; u_int blksz, blocks; u_int r, u; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_rid = 0; sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid, RF_ACTIVE); if (sc->sc_res == NULL) return (ENXIO); sc->sc_tag = rman_get_bustag(sc->sc_res); sc->sc_handle = rman_get_bushandle(sc->sc_res); /* Get time-out values for erase and write. */ sc->sc_write_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_WRITE); sc->sc_erase_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_ERASE); sc->sc_write_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_WRITE); sc->sc_erase_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_ERASE); /* Get erase regions. */ sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS); sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region), M_TEMP, M_WAITOK | M_ZERO); for (r = 0; r < sc->sc_regions; r++) { blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) | (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8); sc->sc_region[r].r_blocks = blocks + 1; blksz = cfi_read_qry(sc, CFI_QRY_REGION(r) + 2) | (cfi_read_qry(sc, CFI_QRY_REGION(r) + 3) << 8); sc->sc_region[r].r_blksz = (blksz == 0) ? 128 : blksz * 256; } /* Reset the device to a default state. */ cfi_write(sc, 0, CFI_BCS_CLEAR_STATUS); if (bootverbose) { device_printf(dev, "["); for (r = 0; r < sc->sc_regions; r++) { printf("%ux%s%s", sc->sc_region[r].r_blocks, cfi_fmtsize(sc->sc_region[r].r_blksz), (r == sc->sc_regions - 1) ? "]\n" : ","); } } u = device_get_unit(dev); sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600, "%s%u", cfi_driver_name, u); sc->sc_nod->si_drv1 = sc; device_add_child(dev, "cfid", -1); bus_generic_attach(dev); return (0); }