/** * see jtag_add_ir_scan() * */ int interface_jtag_add_ir_scan(struct jtag_tap* active, const struct scan_field *in_fields, tap_state_t state) { size_t num_taps = jtag_tap_count_enabled(); struct jtag_command * cmd = cmd_queue_alloc(sizeof(struct jtag_command)); struct scan_command * scan = cmd_queue_alloc(sizeof(struct scan_command)); struct scan_field * out_fields = cmd_queue_alloc(num_taps * sizeof(struct scan_field)); jtag_queue_command(cmd); cmd->type = JTAG_SCAN; cmd->cmd.scan = scan; scan->ir_scan = true; scan->num_fields = num_taps; /* one field per device */ scan->fields = out_fields; scan->end_state = state; struct scan_field * field = out_fields; /* keep track where we insert data */ /* loop over all enabled TAPs */ for (struct jtag_tap * tap = jtag_tap_next_enabled(NULL); tap != NULL; tap = jtag_tap_next_enabled(tap)) { /* search the input field list for fields for the current TAP */ if (tap == active) { /* if TAP is listed in input fields, copy the value */ tap->bypass = 0; cmd_queue_scan_field_clone(field, in_fields); } else { /* if a TAP isn't listed in input fields, set it to BYPASS */ tap->bypass = 1; field->num_bits = tap->ir_length; field->out_value = buf_set_ones(cmd_queue_alloc(DIV_ROUND_UP(tap->ir_length, 8)), tap->ir_length); field->in_value = NULL; /* do not collect input for tap's in bypass */ } /* update device information */ buf_cpy(field->out_value, tap->cur_instr, tap->ir_length); field++; } assert(field == out_fields + num_taps); /* paranoia: jtag_tap_count_enabled() and jtag_tap_next_enabled() not in sync */ return ERROR_OK; }
int interface_jtag_add_clocks(int num_cycles) { /* allocate memory for a new list member */ struct jtag_command * cmd = cmd_queue_alloc(sizeof(struct jtag_command)); jtag_queue_command(cmd); cmd->type = JTAG_STABLECLOCKS; cmd->cmd.stableclocks = cmd_queue_alloc(sizeof(struct stableclocks_command)); cmd->cmd.stableclocks->num_cycles = num_cycles; return ERROR_OK; }
int interface_jtag_add_sleep(uint32_t us) { /* allocate memory for a new list member */ struct jtag_command * cmd = cmd_queue_alloc(sizeof(struct jtag_command)); jtag_queue_command(cmd); cmd->type = JTAG_SLEEP; cmd->cmd.sleep = cmd_queue_alloc(sizeof(struct sleep_command)); cmd->cmd.sleep->us = us; return ERROR_OK; }
int interface_jtag_add_reset(int req_trst, int req_srst) { /* allocate memory for a new list member */ struct jtag_command * cmd = cmd_queue_alloc(sizeof(struct jtag_command)); jtag_queue_command(cmd); cmd->type = JTAG_RESET; cmd->cmd.reset = cmd_queue_alloc(sizeof(struct reset_command)); cmd->cmd.reset->trst = req_trst; cmd->cmd.reset->srst = req_srst; return ERROR_OK; }
int interface_jtag_add_runtest(int num_cycles, tap_state_t state) { /* allocate memory for a new list member */ struct jtag_command * cmd = cmd_queue_alloc(sizeof(struct jtag_command)); jtag_queue_command(cmd); cmd->type = JTAG_RUNTEST; cmd->cmd.runtest = cmd_queue_alloc(sizeof(struct runtest_command)); cmd->cmd.runtest->num_cycles = num_cycles; cmd->cmd.runtest->end_state = state; return ERROR_OK; }
/** * Copy a struct scan_field for insertion into the queue. * * This allocates a new copy of out_value using cmd_queue_alloc. */ static void cmd_queue_scan_field_clone(struct scan_field * dst, const struct scan_field * src) { dst->tap = src->tap; dst->num_bits = src->num_bits; dst->out_value = buf_cpy(src->out_value, cmd_queue_alloc(DIV_ROUND_UP(src->num_bits, 8)), src->num_bits); dst->in_value = src->in_value; }
int interface_jtag_add_tlr(void) { tap_state_t state = TAP_RESET; /* allocate memory for a new list member */ struct jtag_command * cmd = cmd_queue_alloc(sizeof(struct jtag_command)); jtag_queue_command(cmd); cmd->type = JTAG_TLR_RESET; cmd->cmd.statemove = cmd_queue_alloc(sizeof(struct statemove_command)); cmd->cmd.statemove->end_state = state; return ERROR_OK; }
int interface_jtag_add_pathmove(int num_states, const tap_state_t *path) { /* allocate memory for a new list member */ struct jtag_command * cmd = cmd_queue_alloc(sizeof(struct jtag_command)); jtag_queue_command(cmd); cmd->type = JTAG_PATHMOVE; cmd->cmd.pathmove = cmd_queue_alloc(sizeof(struct pathmove_command)); cmd->cmd.pathmove->num_states = num_states; cmd->cmd.pathmove->path = cmd_queue_alloc(sizeof(tap_state_t) * num_states); for (int i = 0; i < num_states; i++) cmd->cmd.pathmove->path[i] = path[i]; return ERROR_OK; }
/** * see jtag_add_plain_dr_scan() * */ int interface_jtag_add_plain_dr_scan(int in_num_fields, const struct scan_field *in_fields, tap_state_t state) { struct jtag_command * cmd = cmd_queue_alloc(sizeof(struct jtag_command)); struct scan_command * scan = cmd_queue_alloc(sizeof(struct scan_command)); struct scan_field * out_fields = cmd_queue_alloc(in_num_fields * sizeof(struct scan_field)); jtag_queue_command(cmd); cmd->type = JTAG_SCAN; cmd->cmd.scan = scan; scan->ir_scan = false; scan->num_fields = in_num_fields; scan->fields = out_fields; scan->end_state = state; for (int i = 0; i < in_num_fields; i++) cmd_queue_scan_field_clone(out_fields + i, in_fields + i); return ERROR_OK; }
static int jtag_add_plain_scan(int num_bits, const uint8_t *out_bits, uint8_t *in_bits, tap_state_t state, bool ir_scan) { struct jtag_command * cmd = cmd_queue_alloc(sizeof(struct jtag_command)); struct scan_command * scan = cmd_queue_alloc(sizeof(struct scan_command)); struct scan_field * out_fields = cmd_queue_alloc(sizeof(struct scan_field)); jtag_queue_command(cmd); cmd->type = JTAG_SCAN; cmd->cmd.scan = scan; scan->ir_scan = ir_scan; scan->num_fields = 1; scan->fields = out_fields; scan->end_state = state; out_fields->num_bits = num_bits; out_fields->out_value = buf_cpy(out_bits, cmd_queue_alloc(DIV_ROUND_UP(num_bits, 8)), num_bits); out_fields->in_value = in_bits; return ERROR_OK; }
int interface_add_tms_seq(unsigned num_bits, const uint8_t *seq, enum tap_state state) { struct jtag_command *cmd; cmd = cmd_queue_alloc(sizeof(struct jtag_command)); if (cmd == NULL) return ERROR_FAIL; cmd->type = JTAG_TMS; cmd->cmd.tms = cmd_queue_alloc(sizeof(*cmd->cmd.tms)); if (!cmd->cmd.tms) return ERROR_FAIL; /* copy the bits; our caller doesn't guarantee they'll persist */ cmd->cmd.tms->num_bits = num_bits; cmd->cmd.tms->bits = buf_cpy(seq, cmd_queue_alloc(DIV_ROUND_UP(num_bits, 8)), num_bits); if (!cmd->cmd.tms->bits) return ERROR_FAIL; jtag_queue_command(cmd); return ERROR_OK; }
/* add callback to end of queue */ void interface_jtag_add_callback4(jtag_callback_t callback, jtag_callback_data_t data0, jtag_callback_data_t data1, jtag_callback_data_t data2, jtag_callback_data_t data3) { struct jtag_callback_entry *entry = cmd_queue_alloc(sizeof(struct jtag_callback_entry)); entry->next = NULL; entry->callback = callback; entry->data0 = data0; entry->data1 = data1; entry->data2 = data2; entry->data3 = data3; if (jtag_callback_queue_head == NULL) { jtag_callback_queue_head = entry; jtag_callback_queue_tail = entry; } else { jtag_callback_queue_tail->next = entry; jtag_callback_queue_tail = entry; } }
/** * Generate a DR SCAN using the array of output values passed to the function * * This function assumes that the parameter target_tap specifies the one TAP * that is not bypassed. All other TAPs must be bypassed and the function will * generate a dummy 1bit field for them. * * For the target_tap a sequence of output-only fields will be generated where * each field has the size num_bits and the field's values are taken from * the array value. * * The bypass status of TAPs is set by jtag_add_ir_scan(). * */ void interface_jtag_add_dr_out(struct jtag_tap *target_tap, int in_num_fields, const int *num_bits, const uint32_t *value, tap_state_t end_state) { /* count devices in bypass */ size_t bypass_devices = 0; for (struct jtag_tap * tap = jtag_tap_next_enabled(NULL); tap != NULL; tap = jtag_tap_next_enabled(tap)) { if (tap->bypass) bypass_devices++; } struct jtag_command * cmd = cmd_queue_alloc(sizeof(struct jtag_command)); struct scan_command * scan = cmd_queue_alloc(sizeof(struct scan_command)); struct scan_field * out_fields = cmd_queue_alloc((in_num_fields + bypass_devices) * sizeof(struct scan_field)); jtag_queue_command(cmd); cmd->type = JTAG_SCAN; cmd->cmd.scan = scan; scan->ir_scan = false; scan->num_fields = in_num_fields + bypass_devices; scan->fields = out_fields; scan->end_state = end_state; bool target_tap_match = false; struct scan_field * field = out_fields; /* keep track where we insert data */ /* loop over all enabled TAPs */ for (struct jtag_tap * tap = jtag_tap_next_enabled(NULL); tap != NULL; tap = jtag_tap_next_enabled(tap)) { /* if TAP is not bypassed insert matching input fields */ if (!tap->bypass) { assert(tap == target_tap); /* target_tap must match the one not bypassed TAP */ target_tap_match = true; for (int j = 0; j < in_num_fields; j++) { uint8_t out_value[4]; size_t scan_size = num_bits[j]; buf_set_u32(out_value, 0, scan_size, value[j]); field->num_bits = scan_size; field->out_value = buf_cpy(out_value, cmd_queue_alloc(DIV_ROUND_UP(scan_size, 8)), scan_size); field->in_value = NULL; field++; } } /* if a TAP is bypassed, generated a dummy bit*/ else { field->num_bits = 1; field->out_value = NULL; field->in_value = NULL; field++; } } assert(target_tap_match); /* target_tap should be enabled and not bypassed */ }
/** * see jtag_add_dr_scan() * */ int interface_jtag_add_dr_scan(struct jtag_tap* active, int in_num_fields, const struct scan_field *in_fields, tap_state_t state) { /* count devices in bypass */ size_t bypass_devices = 0; for (struct jtag_tap * tap = jtag_tap_next_enabled(NULL); tap != NULL; tap = jtag_tap_next_enabled(tap)) { if (tap->bypass) bypass_devices++; } struct jtag_command * cmd = cmd_queue_alloc(sizeof(struct jtag_command)); struct scan_command * scan = cmd_queue_alloc(sizeof(struct scan_command)); struct scan_field * out_fields = cmd_queue_alloc((in_num_fields + bypass_devices) * sizeof(struct scan_field)); jtag_queue_command(cmd); cmd->type = JTAG_SCAN; cmd->cmd.scan = scan; scan->ir_scan = false; scan->num_fields = in_num_fields + bypass_devices; scan->fields = out_fields; scan->end_state = state; struct scan_field * field = out_fields; /* keep track where we insert data */ /* loop over all enabled TAPs */ for (struct jtag_tap * tap = jtag_tap_next_enabled(NULL); tap != NULL; tap = jtag_tap_next_enabled(tap)) { /* if TAP is not bypassed insert matching input fields */ if (!tap->bypass) { assert(active == tap); #ifndef NDEBUG /* remember initial position for assert() */ struct scan_field *start_field = field; #endif /* NDEBUG */ for (int j = 0; j < in_num_fields; j++) { cmd_queue_scan_field_clone(field, in_fields + j); field++; } assert(field > start_field); /* must have at least one input field per not bypassed TAP */ } /* if a TAP is bypassed, generated a dummy bit*/ else { field->num_bits = 1; field->out_value = NULL; field->in_value = NULL; field++; } } assert(field == out_fields + scan->num_fields); /* no superfluous input fields permitted */ return ERROR_OK; }
int32_t nand_reset_identy(nand_cfg_t * cfg,struct aml_nand_platform * plat,cntl_t *cntl) { clrbits_le32(P_PAD_PULL_UP_REG3,(0xff<<0) | (1<<16)); int32_t num,i,max_ce; void * addr; max_ce=min(cntl->feature&FEATURE_SUPPORT_MAX_CES,plat->ce_num?plat->ce_num:FEATURE_SUPPORT_MAX_CES); struct id_read_s *id; addr=dma_alloc_coherent(max_ce*sizeof(struct id_read_s),(dma_addr_t *)&id); jobkey_t * job[5]; for(i=0;i<5;i++) job[i]=cntl_job_get((i|0x100)); #if 0 for(i=0;i<max_ce;i++) { cntl_ctrl(i,NAND_CLE(NAND_CMD_RESET)); } for(i=0;i<max_ce;i++) { cntl_nop(i,1); cntl_ctrl(i,NAND_CLE(NAND_CMD_STATUS)); cntl_wait(i,NAND_RB_IO6,31);//wait for 1M/16 nand cycle , about 1sec cntl_sts(job[i],STS_NO_INTERRUPT); /// read uni id cntl_ctrl(i,NAND_CLE(NAND_CMD_READID)); cntl_ctrl(i,NAND_ALE(0)); cntl_readbytes(&id[i].id,sizeof(id[i].id)); /// read onfi id cntl_ctrl(i,NAND_CLE(NAND_CMD_READID)); cntl_ctrl(i,NAND_ALE(0x20)); cntl_readbytes(&id[i].onfi,sizeof(id[i].onfi)); } cntl_sts(job[4],STS_NO_INTERRUPT); #else cmd_queue_t *pout; cmd_queue_t * p=cmd_queue_alloc(); uint32_t cemask=0; int32_t stat[max_ce]; assert(p!=NULL); for (i = 0; i < max_ce; i++) { stat[i]=NAND_CMD_STAT_START; } while (cemask != ((1 << max_ce) - 1)) { for (i = 0; i < max_ce; i++) { if(stat[i]==NAND_CMD_STAT_END) continue; stat[i]=nand_reset_identy_queue(p,stat[i],i,&id[i],job[i]); if(stat[i]==NAND_CMD_STAT_END) cemask|=(1<<i); } } nand_write_finish(p,job[4]); pout=cmd_queue_alloc(); cntl_write_cmd(p,pout); cmd_queue_free(p); cmd_queue_free(pout); #endif while(cntl_job_status(job[4],4|0x100)<0) { uint32_t ce; if(cntl_error(&ce)==NAND_CNTL_ERROR_TIMEOUT) { nanddebug(1,"ce %d timeout",ce); cntl_continue(); } }; amlogic_log_print(); printf("\n"); /** * @todo implement this function if(nand_cfg_set(&nand_cfg,0,id)<0) return -1; */ for(i=0;i<max_ce;i++) { nanddebug(1,"CE%d:id=%llx,onfi=%llx,sts=%x",i,id[i].id,id[i].onfi,cntl_job_status(job[i],i|0x100)); cntl_job_free(job[i]); } cntl_job_free(job[4]); nand_cfg.ce_mask=1; num=1; for(i=1;i<max_ce;i++) { if(id[i].id!=id[0].id||id[i].onfi!=id[0].onfi) { nand_cfg.ce_mask&=~(1<<i); continue; } nand_cfg.ce_mask|=(1<<i); num++; } dma_free_coherent(max_ce*sizeof(struct id_read_s),(dma_addr_t )id,addr); return num; }