static int dfs_jffs2_close(struct dfs_fd* file) { int result; cyg_file * jffs2_file; RT_ASSERT(file->data != NULL); jffs2_file = (cyg_file *)(file->data); if (file->flags & DFS_O_DIRECTORY) /* operations about dir */ { rt_mutex_take(&jffs2_lock, RT_WAITING_FOREVER); result = jffs2_dir_colse(jffs2_file); rt_mutex_release(&jffs2_lock); if (result) return jffs2_result_to_dfs(result); rt_free(jffs2_file); return 0; } /* regular file operations */ rt_mutex_take(&jffs2_lock, RT_WAITING_FOREVER); result = jffs2_file_colse(jffs2_file); rt_mutex_release(&jffs2_lock); if (result) return jffs2_result_to_dfs(result); /* release memory */ rt_free(jffs2_file); return 0; }
static void thread1_entry(void* parameter) { rt_err_t result; result = rt_mutex_take(mutex, RT_WAITING_FOREVER); result = rt_mutex_take(mutex, RT_WAITING_FOREVER); rt_kprintf("thread1: got mutex\n"); if (result != RT_EOK) { return; } for(t1_count = 0; t1_count < 5;t1_count ++) { rt_kprintf("thread1:count: %d\n", t1_count); } if (t2->current_priority != t1->current_priority) { rt_kprintf("thread1: released mutex\n"); rt_mutex_release(mutex); rt_mutex_release(mutex); } }
void lcd1602_DispProcessing(uint8_t percent) { uint8_t i, BlockNum = 0; uint8_t abyString[17]; rt_err_t rtResult; //while (1) { BlockNum = ((uint32_t)percent * 16) / 99; for (i = 0; i < 16; i++) { if (i < BlockNum) { abyString [i] = '#'; } else { abyString [i] = ' '; } } abyString[16] = 0; rtResult = rt_mutex_take(LcdMutex, RT_WAITING_FOREVER); if (rtResult == RT_EOK) { LCD_PutStr(abyString, 16, 16); rt_mutex_release(LcdMutex); } } }
static rt_err_t nand_mtd_write ( struct rt_mtd_nand_device * dev, rt_off_t page, const rt_uint8_t * data, rt_uint32_t data_len,//will be 2048 always! const rt_uint8_t * spare, rt_uint32_t spare_len) { rt_err_t result=RT_EOK; int i; rt_mutex_take(&nand, RT_WAITING_FOREVER); if (data != RT_NULL && data_len != 0) { // get offset of sst39vf's write position rt_uint32_t page_offs = page*512; for(i=0;i<data_len/2;i++) Mem_Wr(*(rt_uint16_t *)((rt_uint16_t *)data+i),page_offs+i); } if (spare != RT_NULL && spare_len != 0) { // get spare offset of sst39vf's write position rt_uint32_t spare_offs = page*16 + NOR_SPARE_BLOCK; for(i=0;i<spare_len/2;i++) Mem_Wr(*(rt_uint16_t *)((rt_uint16_t *)spare+i),spare_offs+i); } rt_mutex_release(&nand); return result; }
rt_size_t rt_spi_transfer(struct rt_spi_device *device, const void *send_buf, void *recv_buf, rt_size_t length) { rt_err_t result; struct rt_spi_message message; RT_ASSERT(device != RT_NULL); RT_ASSERT(device->bus != RT_NULL); result = rt_mutex_take(&(device->bus->lock), RT_WAITING_FOREVER); if (result == RT_EOK) { if (device->bus->owner != device) { /* not the same owner as current, re-configure SPI bus */ result = device->bus->ops->configure(device, &device->config); if (result == RT_EOK) { /* set SPI bus owner */ device->bus->owner = device; } else { /* configure SPI bus failed */ rt_set_errno(-RT_EIO); result = 0; goto __exit; } } /* initial message */ message.send_buf = send_buf; message.recv_buf = recv_buf; message.length = length; message.cs_take = 1; message.cs_release = 1; message.next = RT_NULL; /* transfer message */ result = device->bus->ops->xfer(device, &message); if (result == 0) { rt_set_errno(-RT_EIO); goto __exit; } } else { rt_set_errno(-RT_EIO); return 0; } __exit: rt_mutex_release(&(device->bus->lock)); return result; }
/*TODO: If your device need more time to initialize I2C bus or waiting memory write, you can use I2C_AcknowledgePolling avoid I2C bus lose.*/ Status I2C_AcknowledgePolling(I2C_TypeDef* I2Cx ,uint8_t Addr) { uint32_t timeout = 0xFFFF, ret; uint16_t tmp; ret = rt_mutex_take(i2c_mux, RT_WAITING_FOREVER ); if( ret == RT_EOK ) { do{ if( timeout-- <= 0 ) { I2C_ClearFlag(I2Cx,I2C_FLAG_AF); I2Cx->CR1 |= CR1_STOP_Set; rt_mutex_release(i2c_mux); return Error; } I2Cx->CR1 |= CR1_START_Set; tmp = I2Cx->SR1;//²M°£SB¦ì I2Cx->DR = Addr; }while((I2Cx->SR1&0x0002) != 0x0002); I2C_ClearFlag(I2Cx,I2C_FLAG_AF); I2Cx->CR1 |= CR1_STOP_Set; while ((I2Cx->CR1&0x200) == 0x200); rt_kprintf( "AcknowledgePolling OK\n"); rt_mutex_release(i2c_mux); return Success; } else return Error; }
rt_err_t rt_spi_configure(struct rt_spi_device* device, struct rt_spi_configuration* cfg) { rt_err_t result; RT_ASSERT(device != RT_NULL); /* set configuration */ device->config.data_width = cfg->data_width; device->config.mode = cfg->mode & RT_SPI_MODE_MASK ; device->config.max_hz = cfg->max_hz ; if (device->bus != RT_NULL) { result = rt_mutex_take(&(device->bus->lock), RT_WAITING_FOREVER); if (result == RT_EOK) { if (device->bus->owner == device) { device->bus->ops->configure(device, &device->config); } /* release lock */ rt_mutex_release(&(device->bus->lock)); } } return RT_EOK; }
rt_size_t rt_i2c_transfer(struct rt_i2c_bus_device *bus, struct rt_i2c_msg msgs[], rt_uint32_t num) { rt_size_t ret; if (bus->ops->master_xfer) { #ifdef RT_I2C_DEBUG for (ret = 0; ret < num; ret++) { i2c_dbg("msgs[%d] %c, addr=0x%02x, len=%d%s\n", ret, (msgs[ret].flags & RT_I2C_RD) ? 'R' : 'W', msgs[ret].addr, msgs[ret].len); } #endif rt_mutex_take(&bus->lock, RT_WAITING_FOREVER); ret = bus->ops->master_xfer(bus, msgs, num); rt_mutex_release(&bus->lock); return ret; } else { i2c_dbg("I2C bus operation not supported\n"); return 0; } }
int ff_req_grant(_SYNC_t m) { if (rt_mutex_take(m, _FS_TIMEOUT) == RT_EOK) return RT_TRUE; return RT_FALSE; }
int pthread_mutex_lock(pthread_mutex_t *mutex) { int mtype; rt_err_t result; if (!mutex) return EINVAL; if (mutex->attr == -1) { /* init mutex */ pthread_mutex_init(mutex, RT_NULL); } mtype = mutex->attr & MUTEXATTR_TYPE_MASK; rt_enter_critical(); if (mutex->lock.owner == rt_thread_self() && mtype != PTHREAD_MUTEX_RECURSIVE) { rt_exit_critical(); return EDEADLK; } rt_exit_critical(); result = rt_mutex_take(&(mutex->lock), RT_WAITING_FOREVER); if (result == RT_EOK) return 0; return EINVAL; }
/** Lock a mutex * @param mutex the mutex to lock */ void sys_mutex_lock(sys_mutex_t *mutex) { RT_DEBUG_NOT_IN_INTERRUPT; rt_mutex_take(*mutex, RT_WAITING_FOREVER); return; }
static int dfs_jffs2_write(struct dfs_fd* file, const void* buf, rt_size_t len) { cyg_file * jffs2_file; struct CYG_UIO_TAG uio_s; struct CYG_IOVEC_TAG iovec; int char_write; int result; RT_ASSERT(file->data != NULL); jffs2_file = (cyg_file *)(file->data); uio_s.uio_iov = &iovec; uio_s.uio_iov->iov_base = (void *)buf; uio_s.uio_iov->iov_len = len; uio_s.uio_iovcnt = 1; //must be 1 //uio_s.uio_offset //not used... uio_s.uio_resid = uio_s.uio_iov->iov_len; //seem no use in jffs2; char_write = jffs2_file->f_offset; rt_mutex_take(&jffs2_lock, RT_WAITING_FOREVER); result = jffs2_file_write(jffs2_file, &uio_s); rt_mutex_release(&jffs2_lock); if (result) return jffs2_result_to_dfs(result); /* update position */ file->pos = jffs2_file->f_offset; char_write = jffs2_file->f_offset - char_write; return char_write; }
/** * this function will lock device file system. * * @note please don't invoke it on ISR. */ void dfs_lock(void) { rt_err_t result; result = rt_mutex_take(&fslock, RT_WAITING_FOREVER); if (result != RT_EOK) { RT_ASSERT(0); } }
static rt_size_t telnet_read(rt_device_t dev, rt_off_t pos, void* buffer, rt_size_t size) { rt_size_t result; /* read from rx ring buffer */ rt_mutex_take(telnet->rx_ringbuffer_lock, RT_WAITING_FOREVER); result = rt_ringbuffer_get(&(telnet->rx_ringbuffer), buffer, size); rt_mutex_release(telnet->rx_ringbuffer_lock); return result; }
static void thread2_entry(void* parameter) { rt_thread_delay(5); rt_mutex_take(mutex, RT_WAITING_FOREVER); rt_kprintf("thread2: got mutex\n"); for(t2_count = 0; t2_count < 5;t2_count ++) { rt_kprintf("thread2: count: %d\n", t2_count); } }
static rt_err_t nand_mtd_check_block( struct rt_mtd_nand_device* device, rt_uint32_t block) { rt_err_t result=RT_EOK; rt_uint16_t bad; rt_uint32_t spare_offs = NOR_SPARE_BLOCK+block*32; rt_mutex_take(&nand, RT_WAITING_FOREVER); Mem_Rd(&bad,spare_offs); rt_mutex_release(&nand); return ((bad&0xff)==0xff) ? RT_EOK:RT_ERROR; }
static void rt_thread_entry2(void* parameter) { rt_err_t result; rt_tick_t tick; /* 1. static mutex test */ rt_kprintf("thread2 try to get static mutex\n"); rt_mutex_take(&static_mutex, 10); rt_kprintf("thread2 got static mutex\n"); rt_thread_delay(RT_TICK_PER_SECOND); rt_kprintf("thread2 release static mutex\n"); rt_mutex_release(&static_mutex); /* 2. dynamic mutex test */ rt_kprintf("thread2 try to get dynamic mutex\n"); rt_mutex_take(dynamic_mutex, 10); rt_kprintf("thread2 got dynamic mutex\n"); rt_thread_delay(RT_TICK_PER_SECOND); rt_kprintf("thread2 release dynamic mutex\n"); rt_mutex_release(dynamic_mutex); }
static rt_err_t nand_mtd_mark_bad_block( struct rt_mtd_nand_device* device, rt_uint32_t block) { rt_err_t result=RT_EOK; rt_uint16_t data=0xff00; rt_uint32_t spare_offs = NOR_SPARE_BLOCK+block*32; rt_mutex_take(&nand, RT_WAITING_FOREVER); //write spare_offs to 0xff00 Mem_Wr(data,spare_offs); rt_mutex_release(&nand); return result; }
void hm_heatcal_thread_entry(void* parameter) { rt_uint32_t event_set = 0; float heat_used = 0; cal_event = rt_event_create("H_cal", RT_IPC_FLAG_FIFO); RT_ASSERT(cal_event); while(1) { if(rt_event_recv(cal_event, TDC_DATA_FULL_EVENT, RT_EVENT_FLAG_AND | RT_EVENT_FLAG_CLEAR, RT_WAITING_FOREVER, &event_set)==RT_EOK) { if((rt_mutex_take(temp_lock, rt_tick_from_millisecond(LOCK_TACK_WAIT_TIME_MS)) || (rt_mutex_take(tof_lock, rt_tick_from_millisecond(LOCK_TACK_WAIT_TIME_MS)))) != RT_EOK) { rt_kprintf("TOF and temprature take lock error\n"); } heat_used += do_heat_cal(); rt_mutex_release(tof_lock); rt_mutex_release(temp_lock); heat_print(heat_used); } } }
/// Wait until a Mutex becomes available osStatus osMutexWait(osMutexId mutex_id, uint32_t millisec) { rt_err_t result; rt_tick_t ticks; ticks = rt_tick_from_millisecond(millisec); result = rt_mutex_take(mutex_id, ticks); if (result == RT_EOK) return osOK; else return osErrorOS; }
int AddTestIndex(struct TestIndexListStc til) { int pos=ReadAllTestIndex(); struct TestIndexListStc *ti = NULL; ti = &TestIndexListEnd; struct TestIndexListStc *pti=NULL; pti=malloc(sizeof(TestIndexList));//c0 ti->next = pti;//c1 pti = ti;//c2 ti = ti->next;//c3 ti->pre = pti;//c4 ti->next = NULL;//c5 pos++; ti->ti=pos; ti->del=0; ti->time=til.time; memcpy(&ti->unit,&til.time,sizeof(til.time)); memcpy(&ti->TestLocA,&til.TestLocA,sizeof(til.TestLocA)); memcpy(&ti->TestLocB,&til.TestLocB,sizeof(til.TestLocA)); memcpy(&ti->TestLocC,&til.TestLocC,sizeof(til.TestLocA)); ti->a1=til.a1; ti->a2=til.a2; ti->b1=til.b1; ti->b2=til.b2; TestIndexListEnd=*ti; UINT8 tfwrbuf[TFCARD_BLOCK_SIZE]; memcpy(tfwrbuf,&TestIndexListEnd,sizeof(TestIndexListEnd)); rt_uint8_t idxpath[10]; CreatePath(TestIndexListEnd.pos,idxpath); rt_err_t result = rt_mutex_take(&tfio_mutex, RT_WAITING_NO); //update testindex.bin CH378FileOpen(tibin); CH378SecLocate(SECEND); PUINT8 realcnt; CH378SecWrite(tfwrbuf,1,realcnt); if(*realcnt<1) { //write error } //create index.bin status=CH378FileCreate(idxpath); CH378FileClose(1); rt_mutex_release(&tfio_mutex); return -1; }
int pthread_mutex_trylock(pthread_mutex_t *mutex) { rt_err_t result; if (!mutex) return EINVAL; if (mutex->attr == -1) { /* init mutex */ pthread_mutex_init(mutex, RT_NULL); } result = rt_mutex_take(&(mutex->lock), 0); if (result == RT_EOK) return 0; return EBUSY; }
/* send telnet option to remote */ static void send_option_to_client(struct telnet_session* telnet, rt_uint8_t option, rt_uint8_t value) { rt_uint8_t optbuf[4]; optbuf[0] = TELNET_IAC; optbuf[1] = option; optbuf[2] = value; optbuf[3] = 0; rt_mutex_take(telnet->tx_ringbuffer_lock, RT_WAITING_FOREVER); rt_ringbuffer_put(&telnet->tx_ringbuffer, optbuf, 3); rt_mutex_release(telnet->tx_ringbuffer_lock); send_to_client(telnet); }
void OLED_Clear_line(u8 x,u8 y,u8 sizef) { u8 i,n; rt_mutex_take(oled_disp_mut,RT_WAITING_FOREVER); i = y; OLED_WR_Byte (0xb0+i,OLED_CMD); //¨¦¨¨??¨°3¦Ì??¡¤¡ê¡§0~7¡ê? OLED_WR_Byte (0x00,OLED_CMD); //¨¦¨¨????¨º??????a¨¢D¦Ì¨ª¦Ì??¡¤ OLED_WR_Byte (0x10,OLED_CMD); //¨¦¨¨????¨º??????a¨¢D??¦Ì??¡¤ for(n=0;n<128;n++)OLED_WR_Byte(0,OLED_DATA); rt_mutex_release(oled_disp_mut); }
static rt_size_t rt_dflash_read(rt_device_t dev, rt_off_t pos, void* buffer, rt_size_t size) { int r; uint8_t *p; uint8_t i; rt_mutex_take(mutex, RT_WAITING_FOREVER); p = (uint8_t*)(StartAddr + pos * SectorSize); rt_memcpy(buffer, p, size * SectorSize); rt_mutex_release(mutex); return size; }
static int dfs_jffs2_stat(struct dfs_filesystem* fs, const char *path, struct stat *st) { int result; struct jffs2_stat s; cyg_mtab_entry * mte; /* deal the path for jffs2 */ RT_ASSERT(!((path[0] == '/') && (path[1] == 0))); if (path[0] == '/') path++; result = _find_fs(&mte, fs->dev_id); if (result) return -DFS_STATUS_ENOENT; rt_mutex_take(&jffs2_lock, RT_WAITING_FOREVER); result = jffs2_porting_stat(mte, mte->root, path, (void *)&s); rt_mutex_release(&jffs2_lock); if (result) return jffs2_result_to_dfs(result); /* convert to dfs stat structure */ switch(s.st_mode & JFFS2_S_IFMT) { case JFFS2_S_IFREG: st->st_mode = DFS_S_IFREG | DFS_S_IRUSR | DFS_S_IRGRP | DFS_S_IROTH | DFS_S_IWUSR | DFS_S_IWGRP | DFS_S_IWOTH; break; case JFFS2_S_IFDIR: st->st_mode = DFS_S_IFDIR | DFS_S_IXUSR | DFS_S_IXGRP | DFS_S_IXOTH; break; default: st->st_mode = DFS_DT_UNKNOWN; //fixme break; } st->st_dev = 0; st->st_size = s.st_size; st->st_mtime = s.st_mtime; st->st_blksize = 1;//fixme: what's this field? return 0; }
void logTask(void * prm) { crtpInitTaskQueue(CRTP_PORT_LOG); while(1) { crtpReceivePacketBlock(CRTP_PORT_LOG, &p); //xSemaphoreTake(logLock, portMAX_DELAY); rt_mutex_take(logLock, RT_WAITING_FOREVER); if (p.channel==TOC_CH) logTOCProcess(p.data[0]); if (p.channel==CONTROL_CH) logControlProcess(); //xSemaphoreGive(logLock); rt_mutex_release(logLock); } }
/* transmit packet. */ rt_err_t rt_cme_eth_tx( rt_device_t dev, struct pbuf* p) { rt_err_t result = RT_EOK; ETH_TX_DESC *desc; struct rt_cme_eth * cme_eth = (struct rt_cme_eth *)dev; rt_mutex_take(&cme_eth->lock, RT_WAITING_FOREVER); #ifdef ETH_TX_DUMP packet_dump("TX dump", p); #endif /* ETH_TX_DUMP */ /* get free tx buffer */ { rt_err_t result; result = rt_sem_take(&cme_eth->tx_buf_free, RT_TICK_PER_SECOND/10); if (result != RT_EOK) { result = -RT_ERROR; goto _exit; } } desc = ETH_AcquireFreeTxDesc(); if(desc == RT_NULL) { CME_ETH_PRINTF("TxDesc not ready!\n"); RT_ASSERT(0); result = -RT_ERROR; goto _exit; } desc->TX_0.TX0_b.FS = TRUE; desc->TX_0.TX0_b.LS = TRUE; desc->TX_1.TX1_b.SIZE = p->tot_len; pbuf_copy_partial(p, ( void *)(desc->bufAddr), p->tot_len, 0); ETH_ReleaseTxDesc(desc); ETH_ResumeTx(); _exit: rt_mutex_release(&cme_eth->lock); return result; }
rt_err_t rs485_send_data(u8* data,u16 len) { rt_mutex_take(rs485_send_mut,RT_WAITING_FOREVER); RS485_TX_ENABLE; if(uart1_dev_my->device == RT_NULL) { uart1_rs485_set_device(); } rt_device_write(uart1_dev_my->device, 0, data, len); rt_thread_delay (80); RS485_RX_ENABLE; rt_mutex_release(rs485_send_mut); return RT_EOK; }
static rt_err_t nand_mtd_erase_block( struct rt_mtd_nand_device* device, rt_uint32_t block) { rt_err_t result=RT_EOK; int i; rt_uint16_t data=0xffff; rt_uint32_t block_offs=block*32*512; rt_uint32_t spare_offs = NOR_SPARE_BLOCK+block*32; rt_mutex_take(&nand, RT_WAITING_FOREVER); //erase block offs , len=one block size /2 , 16bit wr for(i=0;i<(32*512)/2;i++) Mem_Wr(data,block_offs+i); //erase spare offs, len=one block spare size /2 , 16bit wr for(i=0;i<(32*16)/2;i++) Mem_Wr(data,spare_offs+i); rt_mutex_release(&nand); return result; }