void danube_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) { DANUBE_MTD_DMSG("from:%x to:%x len:%d",from,to,len); enable_ebu(); memcpy_toio((void *)(map->map_priv_1 + to), from, len); disable_ebu(); }
void danube_write32(struct map_info *map, __u32 d, unsigned long adr) { enable_ebu(); *((__u32 *)(map->map_priv_1 + adr)) = d; disable_ebu(); DANUBE_MTD_DMSG("32: [%p + %x] <== %x \n",map->map_priv_1, adr, d); }
void danube_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { DANUBE_MTD_DMSG("from:%x to:%x len:%d",from,to,len); #if defined(CONFIG_DANUBE)||defined(CONFIG_TWINPASS) //for dual-core //work around for EBU access that causes unaligned access exception u8 *p; u8 *to_8; enable_ebu(); from = (unsigned long) (from + map->map_priv_1); if ( (((unsigned long)to) & 3) == (from & 3) ) { memcpy_fromio(to, (void *)from, len); }else { p = (u8 *) (from); to_8 = (u8 *) (to); while(len--){ *to_8++ = *p++; } } disable_ebu(); #else memcpy_fromio(to, (void *)(map->map_priv_1 + from), len); #endif }
__u32 danube_read32(struct map_info *map, unsigned long ofs) { u32 temp; enable_ebu(); temp = *((__u32 *)(map->map_priv_1 + ofs)); disable_ebu(); DANUBE_MTD_DMSG("32: [%p + %x] ==> %x \n",map->map_priv_1, ofs, temp); return temp; }
void danube_write16(struct map_info *map, __u16 d, unsigned long adr) { /* Modify the address offset to account the EBU address swap. * Needed only for probing. */ if (probing) adr ^= 2; enable_ebu(); *((__u16 *)( map->map_priv_1 + adr)) = d; disable_ebu(); DANUBE_MTD_DMSG("16: [%p + %x] <== %x \n",map->map_priv_1, adr, d); }
__u16 danube_read16(struct map_info *map, unsigned long ofs) { /* Modify the address offset to account the EBU address swap. * Needed only for probing. */ u16 temp; if (probing) ofs ^= 2; enable_ebu(); temp = *((__u16 *)(map->map_priv_1 + ofs)); disable_ebu(); DANUBE_MTD_DMSG("16: [%p + %x] ==> %x \n",map->map_priv_1, ofs, temp); return temp; }