//! @brief Rebuild the memory and create LUT, Recovery and Free-blocks blocks. //! //! TBD //! //! It uses s_n_invalid_blocks (number of invalid blocks) and //! g_curr_block_addr (current block address of each device). //! //! @return a status: //! PASS if there are no error; //! FAIL a programmation error occured: the block is marked as bad. The //! function must be recall. //! static Status_bool nf_rebuild ( void ) { Status_bool status_bool=PASS; Bool b_duplicate; U8 i_sub_lut; U8 i_dev =0; _MEM_TYPE_SLOW_ U16 i_block=0; _MEM_TYPE_SLOW_ U16 u16_tmp; _MEM_TYPE_SLOW_ U16 sub_lut_log_sz; _MEM_TYPE_SLOW_ U16 log_block_addr; _MEM_TYPE_SLOW_ U16 log_block_addr_min; _MEM_TYPE_SLOW_ U16 log_block_addr_max; // Refine the computation // s_n_invalid_blocks[S_MNGT_DEV] += 1 // Need a block for the Free-blocks block + (G_N_BLOCKS*NF_N_DEVICES)/NF_SUBLUT_SIZE // and one for each sub-LUT ; // Take the max number of invalid blocks of each devices // u16_tmp=s_n_invalid_blocks[0] ; for ( i_dev=1 ; i_dev<NF_N_DEVICES ; i_dev++ ) { u16_tmp=Max( u16_tmp, s_n_invalid_blocks[i_dev] ); } // Take the max number of quarantine blocks of each devices // i_sub_lut=s_n_quarantine_blocks[0] ; for ( i_dev=1 ; i_dev<NF_N_DEVICES ; i_dev++ ) { i_sub_lut=Max( i_sub_lut, s_n_quarantine_blocks[i_dev] ); } sub_lut_log_sz = (U16)NF_N_DEVICES*(G_N_BLOCKS -g_nf_first_block -u16_tmp); // Finally compute the number of exportable physical blocks and free blocks // Assert( u16_tmp<(G_N_BLOCKS -g_nf_first_block) ); g_n_export_blocks= (U16)( ((U32)( (U32)sub_lut_log_sz ) * 1000) / 1024); g_n_export_blocks= Align_down( g_n_export_blocks, NF_N_DEVICES); g_n_free_blocks = (U16)sub_lut_log_sz - g_n_export_blocks; g_n_free_blocks -= (U16)NF_N_DEVICES*i_sub_lut; if( g_n_free_blocks<=NF_LOW_N_FREE_THRESHOLD ) { while(1); // TBD } Assert( g_n_free_blocks>0 ); Assert( g_n_free_blocks<(1L<<NF_SHIFT_PAGE_BYTE) ); // limit the free blocks in order to fit in 1 page // Compute the number of needed sub-LUT // Affect to each management block a free block address // Nfc_action(NFC_ACT_DEV_SELECT, S_MNGT_DEV); g_fbb_block_addr = nf_fetch_free_block(S_MNGT_DEV); nfc_erase_block( nf_block_2_page( g_fbb_block_addr ), TRUE ); g_n_sub_lut= 0; u16_tmp = g_n_export_blocks; //#error il faut positionner les index(LUT, FBB, RCV...) while(1) { Assert( g_n_sub_lut<N_SUBLUT ); g_lut_block_addr [g_n_sub_lut]=nf_fetch_free_block(S_MNGT_DEV); g_lut_block_index[g_n_sub_lut]=0; nfc_erase_block( nf_block_2_page( g_lut_block_addr [g_n_sub_lut] ), TRUE ); g_n_sub_lut++; if( u16_tmp>NF_SUBLUT_SIZE ) u16_tmp-=NF_SUBLUT_SIZE; else break; } g_last_sub_lut_log_sz=u16_tmp/NF_N_DEVICES; // Build the sub-LUTs // for ( i_sub_lut=0 ; i_sub_lut<g_n_sub_lut ; ) { U8 n_sublut_in_buf = g_n_sub_lut - i_sub_lut; // Count remaining sublut to build log_block_addr_max = log_block_addr_min = (U16)i_sub_lut<<(NF_SHIFT_SUBLUT_PHYS-NF_SHIFT_N_DEVICES); // first included if( n_sublut_in_buf>(NF_PAGE_BUFFER_SIZE/(2*NF_SUBLUT_SIZE)) ) { n_sublut_in_buf = NF_PAGE_BUFFER_SIZE/(2*NF_SUBLUT_SIZE); log_block_addr_max += ((U16)n_sublut_in_buf)*g_sub_lut_log_sz; // last not included } else { log_block_addr_max += ((U16)n_sublut_in_buf-1)*g_sub_lut_log_sz +g_last_sub_lut_log_sz; // last not included } nf_init_buffer(); // Report affected logical blocks // u16_tmp=g_n_export_blocks/NF_N_DEVICES; // Number of logical blocks used for the mass storage b_duplicate=FALSE; for ( i_dev=0 ; i_dev<NF_N_DEVICES ; i_dev++ ) { Nfc_action(NFC_ACT_DEV_SELECT, i_dev); g_block_to_kill[i_dev]=0xFFFF; for ( i_block=g_nf_first_block ; i_block<G_N_BLOCKS ; i_block++ ) { nfc_read_spare_byte( g_byte, 8, nf_block_2_page(i_block) ); if(( 0xFF !=g_byte[G_OFST_BLK_STATUS ] ) // The block is bad || ( NFC_BLK_ID_DATA!=g_byte[NFC_SPARE_OFST_1_BLK_ID ] ) // or is not a data block || ( ( 0xFF ==g_byte[NFC_SPARE_OFST_6_LBA ] ) // or is not affected && ( 0xFF ==g_byte[NFC_SPARE_OFST_6_LBA+1 ] ) ) ) { continue; } MSB(log_block_addr) = g_byte[NFC_SPARE_OFST_6_LBA ]; LSB(log_block_addr) = g_byte[NFC_SPARE_OFST_6_LBA+1]; if( log_block_addr>=u16_tmp ) { // The LBA seems bad: it does not fit in any LUT. This happens when unplugging the player. // Block is erased. // Anyway, stay in the loop to track similar problems. nfc_erase_block( nf_block_2_page(i_block), TRUE ); status_bool=FAIL; } if(( log_block_addr>=log_block_addr_min ) && ( log_block_addr< log_block_addr_max )) { U16 ofst=2*((U16)i_dev + (log_block_addr%((U16)NF_PAGE_BUFFER_SIZE/2/NF_N_DEVICES))*NF_N_DEVICES) ; if( ( 0xFF==g_page_buffer[ ofst ] ) && ( 0xFF==g_page_buffer[ ofst +1 ] ) ) { // no redundant phys blocks Assert( ( ofst +1 ) < NF_PAGE_BUFFER_SIZE ); g_page_buffer[ ofst ] = MSB(i_block); g_page_buffer[ ofst +1 ] = LSB(i_block); } else { // A duplicated logical block is detected. This happens when unplugging the player. // Anyway, stay in the loop to track any other redundant blocks, for that sub-LUT. _MEM_TYPE_SLOW_ U16 tmp_addr; MSB(tmp_addr)=g_page_buffer[ ofst ]; LSB(tmp_addr)=g_page_buffer[ ofst +1 ]; //trace("Dupl "); trace_hex32(tmp_addr); trace("-"); trace_hex32(i_block);; trace("\n\r"); if(0xFFFF!=g_block_to_kill[i_dev]) { // !!! There are more than 1 duplicated block on the device. This should never happen... nfc_erase_block( nf_block_2_page(g_block_to_kill[i_dev]), TRUE ); return FAIL; } b_duplicate=TRUE; g_log_block_id=log_block_addr; nfc_open_page_read( nf_block_2_page(i_block) , NF_SPARE_POS+NFC_SPARE_OFST_3_BYTE_3 ); if( NFC_OFST_3_DATA_DST!=Nfc_rd_data_fetch_next() ) { trace("1. Src block="); trace_hex16(i_block); trace_nl(); trace("1. Dst block="); trace_hex16(tmp_addr); trace_nl(); //nfc_print_block(i_block, 0); //nfc_print_block(tmp_addr, 0); //while(1); g_block_to_kill[i_dev]=i_block; // source block g_phys_page_addr[i_dev] = nf_block_2_page( tmp_addr ); // recipient block } else { trace("2. Src block="); trace_hex16(tmp_addr); trace_nl(); trace("2. Dst block="); trace_hex16(i_block); trace_nl(); //nfc_print_block(tmp_addr, 0); //nfc_print_block(i_block, 0); //while(1); g_block_to_kill[i_dev]= tmp_addr ; // source block g_page_buffer[ ofst ]=MSB(i_block); g_page_buffer[ ofst +1 ]=LSB(i_block); g_phys_page_addr[i_dev] = nf_block_2_page( i_block ); // recipient block } } } } // for ( i_block ../.. } // for ( i_dev ../.. if( b_duplicate ) { U8 i_page; U8 i_sect; trace("recovery\n\r"); // Test that recovery can be done for ( i_dev=0 ; i_dev<NF_N_DEVICES ; i_dev++ ) { if( 0xFFFF==g_block_to_kill[i_dev] ) { // !Ooops... we can not recover from that case since there are duplication // only on on device for ( i_dev=0 ; i_dev<NF_N_DEVICES ; i_dev++ ) { if( 0xFFFF!=g_block_to_kill[i_dev] ) { nfc_erase_block( nf_block_2_page(g_block_to_kill[i_dev]), TRUE ); } } return FAIL; } } // Initialize variable for nf_copy_tail g_curr_dev_id=0; g_last_log_sector= ((U32)g_log_block_id) << S_SHIFT_LOG_BLOCK_SECTOR; // Look for last written sector for( i_page=0 ; i_page<SIZE_BLOCK_PAGE ; i_page++ ) { Nfc_action(NFC_ACT_DEV_SELECT, g_curr_dev_id); // open the current device for( i_sect=0 ; i_sect<SIZE_PAGE_SECTOR ; i_sect++ ) { nfc_open_page_read( g_phys_page_addr[g_curr_dev_id] , NF_SPARE_POS + (((U16)i_sect)*16) + NFC_SPARE_OFST_6_LBA ); if(( 0xFF==Nfc_rd_data_fetch_next() ) && ( 0xFF==Nfc_rd_data_fetch_next() )) goto recovery_exit; else { g_last_log_sector++; trace("g_last_log_sector="); trace_hex32(g_last_log_sector); trace_nl(); } } g_phys_page_addr[g_curr_dev_id]++; // update the current physical page of the current device g_curr_dev_id++; // update the current device if( g_curr_dev_id==NF_N_DEVICES ) { g_curr_dev_id=0; } trace("g_curr_dev_id="); trace_hex(g_curr_dev_id); trace_nl(); trace("g_phys_page_addr="); trace_hex32(g_phys_page_addr[g_curr_dev_id]); trace_nl(); } recovery_exit: trace("recovery stop on g_last_log_sector="); trace_hex32(g_last_log_sector); trace_nl(); trace("g_curr_dev_id="); trace_hex(g_curr_dev_id); trace_nl(); trace("g_phys_page_addr="); trace_hex32(g_phys_page_addr[g_curr_dev_id]); trace_nl(); //while(1); nf_copy_tail(); return FAIL; } // At least one redundant have been found: the LUT must be rebuilt since the fetch of free block // may not have seen that affected block (redundant) are in fact free. if( PASS!=status_bool ) { return FAIL; } // Affect a free physical block to the logical block // for( i_dev=0 ; i_dev<NF_N_DEVICES ; i_dev++ ) { Nfc_action(NFC_ACT_DEV_SELECT, i_dev); for(u16_tmp=0 ; u16_tmp<(log_block_addr_max-log_block_addr_min) ; u16_tmp++ ) { U16 ofst=2*((U16)i_dev + u16_tmp*NF_N_DEVICES); if(( 0xFF==g_page_buffer[ofst ] ) && ( 0xFF==g_page_buffer[ofst+1] )) { i_block=nf_fetch_free_block(i_dev); Assert( ofst+1<NF_PAGE_BUFFER_SIZE); g_page_buffer[ofst ] = MSB(i_block); g_page_buffer[ofst+1] = LSB(i_block); } } } // for ( i_dev ../.. // Each sub-LUT will fit in a physical page and will be of the same size // except the last one which contains less // for( ; n_sublut_in_buf!=0 ; n_sublut_in_buf--, i_sub_lut++ ) { sub_lut_log_sz= ( i_sub_lut==(g_n_sub_lut-1) ) ? g_last_sub_lut_log_sz : g_sub_lut_log_sz ; // Write the sub-LUT in the page // status_bool = nf_write_lut(i_sub_lut%(NF_PAGE_BUFFER_SIZE/(2*NF_SUBLUT_SIZE)), i_sub_lut, sub_lut_log_sz); if ( PASS!=status_bool ) { nfc_mark_bad_block( nf_block_2_page( g_lut_block_addr[i_sub_lut] ) ); return FAIL; } } } //#error: si recovery, il faut effacer la lut en question. Il faut donc la reconstruire. // Pour cela, il faut trouver des blocs libres. // 1ere methode: effacer aussi le free-blocks block et le reconstruire, ainsi que la sub-LUT // 2eme methode: marquer les free block pour les reconnaitre et reconstruire la sub lut // Build the free-blocks block // First, fill the internal buffer with the free blocks // for ( i_dev=0 ; i_dev<NF_N_DEVICES ; i_dev++ ) { Nfc_action(NFC_ACT_DEV_SELECT, i_dev); for ( u16_tmp=0 ; u16_tmp<(g_n_free_blocks/NF_N_DEVICES) ; u16_tmp++ ) { // This define is better than using a variable that holds the expression... #define OFST (2*(i_dev + u16_tmp*NF_N_DEVICES)) i_block=nf_fetch_free_block(i_dev); nfc_erase_block( nf_block_2_page(i_block), TRUE ); Assert( OFST <NF_PAGE_BUFFER_SIZE); Assert( OFST +1<NF_PAGE_BUFFER_SIZE); Assert( i_block>=g_nf_first_block ); Assert( i_block< G_N_BLOCKS ); g_page_buffer[OFST ] = MSB(i_block); g_page_buffer[OFST +1] = LSB(i_block); #undef OFST } } // Then write the buffer in the free-blocks block // Note that the list of free-blocks holds on one page only; the // algo is thus made for both 512B and 2kB pages. // g_fbb_block_index=0; status_bool = nf_write_fbb(); if ( PASS!=status_bool ) { nfc_mark_bad_block( nf_block_2_page( g_fbb_block_addr ) ); return FAIL; } //#error Effacer les free blocks !!!!!! //#error il faut determiner s_lut_index[all] pour les sub-lut existantes //#error si il existe un bloc de recovery, alors la lut associée n'est plus valide //#error rendre parametrable la taille du buffer (actuellement 2k). Si <512 et no partial prog: fatal error //nf_init_buffer(); // Cleanup the buffer return PASS; }
volatile void *flashcdw_memset64(volatile void *dst, uint64_t src, size_t nbytes, bool erase) { // Use aggregated pointers to have several alignments available for a same address. UnionCVPtr flash_array_end; UnionVPtr dest; Union64 source = {0}; StructCVPtr dest_end; UnionCVPtr flash_page_source_end; bool incomplete_flash_page_end; Union64 flash_dword; UnionVPtr tmp; unsigned int error_status = 0; unsigned int i; // Reformat arguments. flash_array_end.u8ptr = AVR32_FLASH + flashcdw_get_flash_size(); dest.u8ptr = dst; for (i = (Get_align((uint32_t)dest.u8ptr, sizeof(uint64_t)) - 1) & (sizeof(uint64_t) - 1); src; i = (i - 1) & (sizeof(uint64_t) - 1)) { source.u8[i] = src; src >>= 8; } dest_end.u8ptr = dest.u8ptr + nbytes; // If destination is outside flash, go to next flash page if any. if (dest.u8ptr < AVR32_FLASH) { dest.u8ptr = AVR32_FLASH; } else if (flash_array_end.u8ptr <= dest.u8ptr && dest.u8ptr < AVR32_FLASHCDW_USER_PAGE) { dest.u8ptr = AVR32_FLASHCDW_USER_PAGE; } // If end of destination is outside flash, move it to the end of the previous flash page if any. if (dest_end.u8ptr > AVR32_FLASHCDW_USER_PAGE + AVR32_FLASHCDW_USER_PAGE_SIZE) { dest_end.u8ptr = AVR32_FLASHCDW_USER_PAGE + AVR32_FLASHCDW_USER_PAGE_SIZE; } else if (AVR32_FLASHCDW_USER_PAGE >= dest_end.u8ptr && dest_end.u8ptr > flash_array_end.u8ptr) { dest_end.u8ptr = flash_array_end.u8ptr; } // Align each end of destination pointer with its natural boundary. dest_end.u16ptr = (uint16_t *)Align_down((uint32_t)dest_end.u8ptr, sizeof(uint16_t)); dest_end.u32ptr = (uint32_t *)Align_down((uint32_t)dest_end.u16ptr, sizeof(uint32_t)); dest_end.u64ptr = (uint64_t *)Align_down((uint32_t)dest_end.u32ptr, sizeof(uint64_t)); // While end of destination is not reached... while (dest.u8ptr < dest_end.u8ptr) { // Clear the page buffer in order to prepare data for a flash page write. flashcdw_clear_page_buffer(); error_status |= flashcdw_error_status; // Determine where the source data will end in the current flash page. flash_page_source_end.u64ptr = (uint64_t *)min((uint32_t)dest_end.u64ptr, Align_down((uint32_t)dest.u8ptr, AVR32_FLASHCDW_PAGE_SIZE) + AVR32_FLASHCDW_PAGE_SIZE); // Determine if the current destination page has an incomplete end. incomplete_flash_page_end = (Align_down((uint32_t)dest.u8ptr, AVR32_FLASHCDW_PAGE_SIZE) >= Align_down((uint32_t)dest_end.u8ptr, AVR32_FLASHCDW_PAGE_SIZE)); // Use a flash double-word buffer to manage unaligned accesses. flash_dword.u64 = source.u64; // If destination does not point to the beginning of the current flash page... if (!Test_align((uint32_t)dest.u8ptr, AVR32_FLASHCDW_PAGE_SIZE)) { // Fill the beginning of the page buffer with the current flash page data. // This is required by the hardware, even if page erase is not requested, // in order to be able to write successfully to erased parts of flash // pages that have already been written to. for (tmp.u8ptr = (uint8_t *)Align_down((uint32_t)dest.u8ptr, AVR32_FLASHCDW_PAGE_SIZE); tmp.u64ptr < (uint64_t *)Align_down((uint32_t)dest.u8ptr, sizeof(uint64_t)); tmp.u64ptr++) { *tmp.u32ptr = *tmp.u32ptr; *(tmp.u32ptr+1) = *(tmp.u32ptr+1); } // If destination is not 64-bit aligned... if (!Test_align((uint32_t)dest.u8ptr, sizeof(uint64_t))) { // Fill the beginning of the flash double-word buffer with the current // flash page data. // This is required by the hardware, even if page erase is not // requested, in order to be able to write successfully to erased parts // of flash pages that have already been written to. for (i = 0; i < Get_align((uint32_t)dest.u8ptr, sizeof(uint64_t)); i++) { flash_dword.u8[i] = *tmp.u8ptr++; } // Align the destination pointer with its 64-bit boundary. dest.u64ptr = (uint64_t *)Align_down((uint32_t)dest.u8ptr, sizeof(uint64_t)); // If the current destination double-word is not the last one... if (dest.u64ptr < dest_end.u64ptr) { // Write the flash double-word buffer to the page buffer and reinitialize it. *dest.u32ptr++ = flash_dword.u32[0]; *dest.u32ptr++ = flash_dword.u32[1]; flash_dword.u64 = source.u64; } } } // Write the source data to the page buffer with 64-bit alignment. for (i = flash_page_source_end.u64ptr - dest.u64ptr; i; i--) { *dest.u32ptr++ = source.u32[0]; *dest.u32ptr++ = source.u32[1]; } // If the current destination page has an incomplete end... if (incomplete_flash_page_end) { // This is required by the hardware, even if page erase is not requested, // in order to be able to write successfully to erased parts of flash // pages that have already been written to. { tmp.u8ptr = (volatile uint8_t *)dest_end.u8ptr; // If end of destination is not 64-bit aligned... if (!Test_align((uint32_t)dest_end.u8ptr, sizeof(uint64_t))) { // Fill the end of the flash double-word buffer with the current flash page data. for (i = Get_align((uint32_t)dest_end.u8ptr, sizeof(uint64_t)); i < sizeof(uint64_t); i++) { flash_dword.u8[i] = *tmp.u8ptr++; } // Write the flash double-word buffer to the page buffer. *dest.u32ptr++ = flash_dword.u32[0]; *dest.u32ptr++ = flash_dword.u32[1]; } // Fill the end of the page buffer with the current flash page data. for (; !Test_align((uint32_t)tmp.u64ptr, AVR32_FLASHCDW_PAGE_SIZE); tmp.u64ptr++) { *tmp.u32ptr = *tmp.u32ptr; *(tmp.u32ptr+1) = *(tmp.u32ptr+1); } } } // If the current flash page is in the flash array... if (dest.u8ptr <= AVR32_FLASHCDW_USER_PAGE) { // Erase the current page if requested and write it from the page buffer. if (erase) { flashcdw_erase_page(-1, false); error_status |= flashcdw_error_status; } flashcdw_write_page(-1); error_status |= flashcdw_error_status; // If the end of the flash array is reached, go to the User page. if (dest.u8ptr >= flash_array_end.u8ptr) { dest.u8ptr = AVR32_FLASHCDW_USER_PAGE; } } else { // Erase the User page if requested and write it from the page buffer. if (erase) { flashcdw_erase_user_page(false); error_status |= flashcdw_error_status; } flashcdw_write_user_page(); error_status |= flashcdw_error_status; } } // Update the FLASHC error status. flashcdw_error_status = error_status; // Return the initial destination pointer as the standard memset function does. return dst; }
volatile void* flashc_memcpy (volatile void* dst, const void* src, size_t nbytes, Bool erase) { // Use aggregated pointers to have several alignments available for a same address. UnionCVPtr flash_array_end; UnionVPtr dest; UnionCPtr source; StructCVPtr dest_end; UnionCVPtr flash_page_source_end; Bool incomplete_flash_page_end; Union64 flash_dword; Bool flash_dword_pending = FALSE; UnionVPtr tmp; unsigned int error_status = 0; unsigned int i, j; // Reformat arguments. flash_array_end.u8ptr = AVR32_FLASH + flashc_get_flash_size (); dest.u8ptr = dst; source.u8ptr = src; dest_end.u8ptr = dest.u8ptr + nbytes; // If destination is outside flash, go to next flash page if any. if (dest.u8ptr < AVR32_FLASH) { source.u8ptr += AVR32_FLASH - dest.u8ptr; dest.u8ptr = AVR32_FLASH; } else if (flash_array_end.u8ptr <= dest.u8ptr && dest.u8ptr < AVR32_FLASHC_USER_PAGE) { source.u8ptr += AVR32_FLASHC_USER_PAGE - dest.u8ptr; dest.u8ptr = AVR32_FLASHC_USER_PAGE; } // If end of destination is outside flash, move it to the end of the previous flash page if any. if (dest_end.u8ptr > AVR32_FLASHC_USER_PAGE + AVR32_FLASHC_USER_PAGE_SIZE) { dest_end.u8ptr = AVR32_FLASHC_USER_PAGE + AVR32_FLASHC_USER_PAGE_SIZE; } else if (AVR32_FLASHC_USER_PAGE >= dest_end.u8ptr && dest_end.u8ptr > flash_array_end.u8ptr) { dest_end.u8ptr = flash_array_end.u8ptr; } // Align each end of destination pointer with its natural boundary. dest_end.u16ptr = (U16 *) Align_down ((U32) dest_end.u8ptr, sizeof (U16)); dest_end.u32ptr = (U32 *) Align_down ((U32) dest_end.u16ptr, sizeof (U32)); dest_end.u64ptr = (U64 *) Align_down ((U32) dest_end.u32ptr, sizeof (U64)); // While end of destination is not reached... while (dest.u8ptr < dest_end.u8ptr) { // Clear the page buffer in order to prepare data for a flash page write. flashc_clear_page_buffer (); error_status |= flashc_error_status; // Determine where the source data will end in the current flash page. flash_page_source_end.u64ptr = (U64 *) min ((U32) dest_end.u64ptr, Align_down ((U32) dest.u8ptr, AVR32_FLASHC_PAGE_SIZE) + AVR32_FLASHC_PAGE_SIZE); // Determine if the current destination page has an incomplete end. incomplete_flash_page_end = (Align_down ((U32) dest.u8ptr, AVR32_FLASHC_PAGE_SIZE) >= Align_down ((U32) dest_end.u8ptr, AVR32_FLASHC_PAGE_SIZE)); // If destination does not point to the beginning of the current flash page... if (!Test_align ((U32) dest.u8ptr, AVR32_FLASHC_PAGE_SIZE)) { // Fill the beginning of the page buffer with the current flash page data. // This is required by the hardware, even if page erase is not requested, // in order to be able to write successfully to erased parts of flash // pages that have already been written to. for (tmp.u8ptr = (U8 *) Align_down ((U32) dest.u8ptr, AVR32_FLASHC_PAGE_SIZE); tmp.u64ptr < (U64 *) Align_down ((U32) dest.u8ptr, sizeof (U64)); tmp.u64ptr++) *tmp.u64ptr = *tmp.u64ptr; // If destination is not 64-bit aligned... if (!Test_align ((U32) dest.u8ptr, sizeof (U64))) { // Fill the beginning of the flash double-word buffer with the current // flash page data. // This is required by the hardware, even if page erase is not // requested, in order to be able to write successfully to erased parts // of flash pages that have already been written to. for (i = 0; i < Get_align ((U32) dest.u8ptr, sizeof (U64)); i++) flash_dword.u8[i] = *tmp.u8ptr++; // Fill the end of the flash double-word buffer with the source data. for (; i < sizeof (U64); i++) flash_dword.u8[i] = *source.u8ptr++; // Align the destination pointer with its 64-bit boundary. dest.u64ptr = (U64 *) Align_down ((U32) dest.u8ptr, sizeof (U64)); // If the current destination double-word is not the last one... if (dest.u64ptr < dest_end.u64ptr) { // Write the flash double-word buffer to the page buffer. *dest.u64ptr++ = flash_dword.u64; } // If the current destination double-word is the last one, the flash // double-word buffer must be kept for later. else flash_dword_pending = TRUE; } } // Read the source data with the maximal possible alignment and write it to // the page buffer with 64-bit alignment. switch (Get_align ((U32) source.u8ptr, sizeof (U32))) { case 0: for (i = flash_page_source_end.u64ptr - dest.u64ptr; i; i--) *dest.u64ptr++ = *source.u64ptr++; break; case sizeof (U16): for (i = flash_page_source_end.u64ptr - dest.u64ptr; i; i--) { for (j = 0; j < sizeof (U64) / sizeof (U16); j++) flash_dword.u16[j] = *source.u16ptr++; *dest.u64ptr++ = flash_dword.u64; } break; default: for (i = flash_page_source_end.u64ptr - dest.u64ptr; i; i--) { for (j = 0; j < sizeof (U64); j++) flash_dword.u8[j] = *source.u8ptr++; *dest.u64ptr++ = flash_dword.u64; } } // If the current destination page has an incomplete end... if (incomplete_flash_page_end) { // If the flash double-word buffer is in use, do not initialize it. if (flash_dword_pending) i = Get_align ((U32) dest_end.u8ptr, sizeof (U64)); // If the flash double-word buffer is free... else { // Fill the beginning of the flash double-word buffer with the source data. for (i = 0; i < Get_align ((U32) dest_end.u8ptr, sizeof (U64)); i++) flash_dword.u8[i] = *source.u8ptr++; } // This is required by the hardware, even if page erase is not requested, // in order to be able to write successfully to erased parts of flash // pages that have already been written to. { tmp.u8ptr = (volatile U8 *) dest_end.u8ptr; // If end of destination is not 64-bit aligned... if (!Test_align ((U32) dest_end.u8ptr, sizeof (U64))) { // Fill the end of the flash double-word buffer with the current flash page data. for (; i < sizeof (U64); i++) flash_dword.u8[i] = *tmp.u8ptr++; // Write the flash double-word buffer to the page buffer. *dest.u64ptr++ = flash_dword.u64; } // Fill the end of the page buffer with the current flash page data. for (; !Test_align ((U32) tmp.u64ptr, AVR32_FLASHC_PAGE_SIZE); tmp.u64ptr++) *tmp.u64ptr = *tmp.u64ptr; } } // If the current flash page is in the flash array... if (dest.u8ptr <= AVR32_FLASHC_USER_PAGE) { // Erase the current page if requested and write it from the page buffer. if (erase) { flashc_erase_page (-1, FALSE); error_status |= flashc_error_status; } flashc_write_page (-1); error_status |= flashc_error_status; // If the end of the flash array is reached, go to the User page. if (dest.u8ptr >= flash_array_end.u8ptr) { source.u8ptr += AVR32_FLASHC_USER_PAGE - dest.u8ptr; dest.u8ptr = AVR32_FLASHC_USER_PAGE; } } // If the current flash page is the User page... else { // Erase the User page if requested and write it from the page buffer. if (erase) { flashc_erase_user_page (FALSE); error_status |= flashc_error_status; } flashc_write_user_page (); error_status |= flashc_error_status; } } // Update the FLASHC error status. flashc_error_status = error_status; // Return the initial destination pointer as the standard memcpy function does. return dst; }
//! host_read_p_rxpacket //! //! This function reads the selected pipe FIFO to the buffer pointed to by //! rxbuf, using as few accesses as possible. //! //! @param p Number of the addressed pipe //! @param rxbuf Address of buffer to write //! @param data_length Number of bytes to read //! @param prxbuf NULL or pointer to the buffer address to update //! //! @return Number of read bytes //! //! @note The selected pipe FIFO may be read in several steps by calling //! host_read_p_rxpacket several times. //! //! @warning Invoke Host_reset_pipe_fifo_access before this function when at //! FIFO beginning whether or not the FIFO is to be read in several steps. //! //! @warning Do not mix calls to this function with calls to indexed macros. //! U32 host_read_p_rxpacket(U8 p, void *rxbuf, U32 data_length, void **prxbuf) { // Use aggregated pointers to have several alignments available for a same address UnionCVPtr p_fifo; UnionPtr rxbuf_cur; #if (!defined __OPTIMIZE_SIZE__) || !__OPTIMIZE_SIZE__ // Auto-generated when GCC's -Os command option is used StructCPtr rxbuf_end; #else UnionCPtr rxbuf_end; #endif // !__OPTIMIZE_SIZE__ // Initialize pointers for copy loops and limit the number of bytes to copy p_fifo.u8ptr = pep_fifo[p].u8ptr; rxbuf_cur.u8ptr = rxbuf; rxbuf_end.u8ptr = rxbuf_cur.u8ptr + min(data_length, Host_byte_count(p)); #if (!defined __OPTIMIZE_SIZE__) || !__OPTIMIZE_SIZE__ // Auto-generated when GCC's -Os command option is used rxbuf_end.u16ptr = (U16 *)Align_down((U32)rxbuf_end.u8ptr, sizeof(U16)); rxbuf_end.u32ptr = (U32 *)Align_down((U32)rxbuf_end.u16ptr, sizeof(U32)); rxbuf_end.u64ptr = (U64 *)Align_down((U32)rxbuf_end.u32ptr, sizeof(U64)); // If all addresses are aligned the same way with respect to 16-bit boundaries if (Get_align((U32)rxbuf_cur.u8ptr, sizeof(U16)) == Get_align((U32)p_fifo.u8ptr, sizeof(U16))) { // If pointer to reception buffer is not 16-bit aligned if (!Test_align((U32)rxbuf_cur.u8ptr, sizeof(U16))) { // Copy 8-bit data to reach 16-bit alignment if (rxbuf_cur.u8ptr < rxbuf_end.u8ptr) { // 8-bit accesses to FIFO data registers do require pointer post-increment *rxbuf_cur.u8ptr++ = *p_fifo.u8ptr++; } } // If all addresses are aligned the same way with respect to 32-bit boundaries if (Get_align((U32)rxbuf_cur.u16ptr, sizeof(U32)) == Get_align((U32)p_fifo.u16ptr, sizeof(U32))) { // If pointer to reception buffer is not 32-bit aligned if (!Test_align((U32)rxbuf_cur.u16ptr, sizeof(U32))) { // Copy 16-bit data to reach 32-bit alignment if (rxbuf_cur.u16ptr < rxbuf_end.u16ptr) { // 16-bit accesses to FIFO data registers do require pointer post-increment *rxbuf_cur.u16ptr++ = *p_fifo.u16ptr++; } } // If pointer to reception buffer is not 64-bit aligned if (!Test_align((U32)rxbuf_cur.u32ptr, sizeof(U64))) { // Copy 32-bit data to reach 64-bit alignment if (rxbuf_cur.u32ptr < rxbuf_end.u32ptr) { // 32-bit accesses to FIFO data registers do not require pointer post-increment *rxbuf_cur.u32ptr++ = *p_fifo.u32ptr; } } // Copy 64-bit-aligned data while (rxbuf_cur.u64ptr < rxbuf_end.u64ptr) { // 64-bit accesses to FIFO data registers do not require pointer post-increment *rxbuf_cur.u64ptr++ = *p_fifo.u64ptr; } // Copy 32-bit-aligned data if (rxbuf_cur.u32ptr < rxbuf_end.u32ptr) { // 32-bit accesses to FIFO data registers do not require pointer post-increment *rxbuf_cur.u32ptr++ = *p_fifo.u32ptr; } } // Copy remaining 16-bit data if some while (rxbuf_cur.u16ptr < rxbuf_end.u16ptr) { // 16-bit accesses to FIFO data registers do require pointer post-increment *rxbuf_cur.u16ptr++ = *p_fifo.u16ptr++; } } #endif // !__OPTIMIZE_SIZE__ // Copy remaining 8-bit data if some while (rxbuf_cur.u8ptr < rxbuf_end.u8ptr) { // 8-bit accesses to FIFO data registers do require pointer post-increment *rxbuf_cur.u8ptr++ = *p_fifo.u8ptr++; } // Save current position in FIFO data register pep_fifo[p].u8ptr = (volatile U8 *)p_fifo.u8ptr; // Return the updated buffer address and the number of copied bytes if (prxbuf) *prxbuf = rxbuf_cur.u8ptr; return (rxbuf_cur.u8ptr - (U8 *)rxbuf); }
//! host_set_p_txpacket //! //! This function fills the selected pipe FIFO with a constant byte, using //! as few accesses as possible. //! //! @param p Number of the addressed pipe //! @param txbyte Byte to fill the pipe with //! @param data_length Number of bytes to write //! //! @return Number of non-written bytes //! //! @note The selected pipe FIFO may be filled in several steps by calling //! host_set_p_txpacket several times. //! //! @warning Invoke Host_reset_pipe_fifo_access before this function when at //! FIFO beginning whether or not the FIFO is to be filled in several steps. //! //! @warning Do not mix calls to this function with calls to indexed macros. //! U32 host_set_p_txpacket(U8 p, U8 txbyte, U32 data_length) { // Use aggregated pointers to have several alignments available for a same address UnionVPtr p_fifo_cur; #if (!defined __OPTIMIZE_SIZE__) || !__OPTIMIZE_SIZE__ // Auto-generated when GCC's -Os command option is used StructCVPtr p_fifo_end; Union64 txval; #else UnionCVPtr p_fifo_end; union { U8 u8[1]; } txval; #endif // !__OPTIMIZE_SIZE__ // Initialize pointers for write loops and limit the number of bytes to write p_fifo_cur.u8ptr = pep_fifo[p].u8ptr; p_fifo_end.u8ptr = p_fifo_cur.u8ptr + min(data_length, Host_get_pipe_size(p) - Host_byte_count(p)); #if (!defined __OPTIMIZE_SIZE__) || !__OPTIMIZE_SIZE__ // Auto-generated when GCC's -Os command option is used p_fifo_end.u16ptr = (U16 *)Align_down((U32)p_fifo_end.u8ptr, sizeof(U16)); p_fifo_end.u32ptr = (U32 *)Align_down((U32)p_fifo_end.u16ptr, sizeof(U32)); p_fifo_end.u64ptr = (U64 *)Align_down((U32)p_fifo_end.u32ptr, sizeof(U64)); #endif // !__OPTIMIZE_SIZE__ txval.u8[0] = txbyte; #if (!defined __OPTIMIZE_SIZE__) || !__OPTIMIZE_SIZE__ // Auto-generated when GCC's -Os command option is used txval.u8[1] = txval.u8[0]; txval.u16[1] = txval.u16[0]; txval.u32[1] = txval.u32[0]; // If pointer to FIFO data register is not 16-bit aligned if (!Test_align((U32)p_fifo_cur.u8ptr, sizeof(U16))) { // Write 8-bit data to reach 16-bit alignment if (p_fifo_cur.u8ptr < p_fifo_end.u8ptr) { *p_fifo_cur.u8ptr++ = txval.u8[0]; } } // If pointer to FIFO data register is not 32-bit aligned if (!Test_align((U32)p_fifo_cur.u16ptr, sizeof(U32))) { // Write 16-bit data to reach 32-bit alignment if (p_fifo_cur.u16ptr < p_fifo_end.u16ptr) { *p_fifo_cur.u16ptr++ = txval.u16[0]; } } // If pointer to FIFO data register is not 64-bit aligned if (!Test_align((U32)p_fifo_cur.u32ptr, sizeof(U64))) { // Write 32-bit data to reach 64-bit alignment if (p_fifo_cur.u32ptr < p_fifo_end.u32ptr) { *p_fifo_cur.u32ptr++ = txval.u32[0]; } } // Write 64-bit-aligned data while (p_fifo_cur.u64ptr < p_fifo_end.u64ptr) { *p_fifo_cur.u64ptr++ = txval.u64; } // Write remaining 32-bit data if some if (p_fifo_cur.u32ptr < p_fifo_end.u32ptr) { *p_fifo_cur.u32ptr++ = txval.u32[0]; } // Write remaining 16-bit data if some if (p_fifo_cur.u16ptr < p_fifo_end.u16ptr) { *p_fifo_cur.u16ptr++ = txval.u16[0]; } // Write remaining 8-bit data if some if (p_fifo_cur.u8ptr < p_fifo_end.u8ptr) { *p_fifo_cur.u8ptr++ = txval.u8[0]; } #else // Write remaining 8-bit data if some while (p_fifo_cur.u8ptr < p_fifo_end.u8ptr) { *p_fifo_cur.u8ptr++ = txval.u8[0]; } #endif // !__OPTIMIZE_SIZE__ // Compute the number of non-written bytes data_length -= p_fifo_cur.u8ptr - pep_fifo[p].u8ptr; // Save current position in FIFO data register pep_fifo[p].u8ptr = p_fifo_cur.u8ptr; // Return the number of non-written bytes return data_length; }
//! usb_write_ep_txpacket //! //! This function writes the buffer pointed to by txbuf to the selected //! endpoint FIFO, using as few accesses as possible. //! //! @param ep Number of the addressed endpoint //! @param txbuf Address of buffer to read //! @param data_length Number of bytes to write //! @param ptxbuf NULL or pointer to the buffer address to update //! //! @return Number of non-written bytes //! //! @note The selected endpoint FIFO may be written in several steps by calling //! usb_write_ep_txpacket several times. //! //! @warning Invoke Usb_reset_endpoint_fifo_access before this function when at //! FIFO beginning whether or not the FIFO is to be written in several steps. //! //! @warning Do not mix calls to this function with calls to indexed macros. //! U32 usb_write_ep_txpacket(U8 ep, const void *txbuf, U32 data_length, const void **ptxbuf) { // Use aggregated pointers to have several alignments available for a same address UnionVPtr ep_fifo; UnionCPtr txbuf_cur; #if (!defined __OPTIMIZE_SIZE__) || !__OPTIMIZE_SIZE__ // Auto-generated when GCC's -Os command option is used StructCPtr txbuf_end; #else UnionCPtr txbuf_end; #endif // !__OPTIMIZE_SIZE__ // Initialize pointers for copy loops and limit the number of bytes to copy ep_fifo.u8ptr = pep_fifo[ep].u8ptr; txbuf_cur.u8ptr = txbuf; txbuf_end.u8ptr = txbuf_cur.u8ptr + min(data_length, Usb_get_endpoint_size(ep) - Usb_byte_count(ep)); #if (!defined __OPTIMIZE_SIZE__) || !__OPTIMIZE_SIZE__ // Auto-generated when GCC's -Os command option is used txbuf_end.u16ptr = (U16 *)Align_down((U32)txbuf_end.u8ptr, sizeof(U16)); txbuf_end.u32ptr = (U32 *)Align_down((U32)txbuf_end.u16ptr, sizeof(U32)); txbuf_end.u64ptr = (U64 *)Align_down((U32)txbuf_end.u32ptr, sizeof(U64)); // If all addresses are aligned the same way with respect to 16-bit boundaries if (Get_align((U32)txbuf_cur.u8ptr, sizeof(U16)) == Get_align((U32)ep_fifo.u8ptr, sizeof(U16))) { // If pointer to transmission buffer is not 16-bit aligned if (!Test_align((U32)txbuf_cur.u8ptr, sizeof(U16))) { // Copy 8-bit data to reach 16-bit alignment if (txbuf_cur.u8ptr < txbuf_end.u8ptr) { // 8-bit accesses to FIFO data registers do require pointer post-increment *ep_fifo.u8ptr++ = *txbuf_cur.u8ptr++; } } // If all addresses are aligned the same way with respect to 32-bit boundaries if (Get_align((U32)txbuf_cur.u16ptr, sizeof(U32)) == Get_align((U32)ep_fifo.u16ptr, sizeof(U32))) { // If pointer to transmission buffer is not 32-bit aligned if (!Test_align((U32)txbuf_cur.u16ptr, sizeof(U32))) { // Copy 16-bit data to reach 32-bit alignment if (txbuf_cur.u16ptr < txbuf_end.u16ptr) { // 16-bit accesses to FIFO data registers do require pointer post-increment *ep_fifo.u16ptr++ = *txbuf_cur.u16ptr++; } } // If pointer to transmission buffer is not 64-bit aligned if (!Test_align((U32)txbuf_cur.u32ptr, sizeof(U64))) { // Copy 32-bit data to reach 64-bit alignment if (txbuf_cur.u32ptr < txbuf_end.u32ptr) { // 32-bit accesses to FIFO data registers do not require pointer post-increment *ep_fifo.u32ptr = *txbuf_cur.u32ptr++; } } // Copy 64-bit-aligned data while (txbuf_cur.u64ptr < txbuf_end.u64ptr) { // 64-bit accesses to FIFO data registers do not require pointer post-increment *ep_fifo.u64ptr = *txbuf_cur.u64ptr++; } // Copy 32-bit-aligned data if (txbuf_cur.u32ptr < txbuf_end.u32ptr) { // 32-bit accesses to FIFO data registers do not require pointer post-increment *ep_fifo.u32ptr = *txbuf_cur.u32ptr++; } } // Copy remaining 16-bit data if some while (txbuf_cur.u16ptr < txbuf_end.u16ptr) { // 16-bit accesses to FIFO data registers do require pointer post-increment *ep_fifo.u16ptr++ = *txbuf_cur.u16ptr++; } } #endif // !__OPTIMIZE_SIZE__ // Copy remaining 8-bit data if some while (txbuf_cur.u8ptr < txbuf_end.u8ptr) { // 8-bit accesses to FIFO data registers do require pointer post-increment *ep_fifo.u8ptr++ = *txbuf_cur.u8ptr++; } // Save current position in FIFO data register pep_fifo[ep].u8ptr = ep_fifo.u8ptr; // Return the updated buffer address and the number of non-copied bytes if (ptxbuf) *ptxbuf = txbuf_cur.u8ptr; return data_length - (txbuf_cur.u8ptr - (U8 *)txbuf); }