示例#1
0
/*
 * This routine creates a new partition map and sets it current.  If there
 * was a current map, the new map starts out identical to it.  Otherwise
 * the new map starts out all zeroes.
 */
void
make_partition()
{
	register struct partition_info *pptr, *parts;
	int	i;

	/*
	 * Lock out interrupts so the lists don't get mangled.
	 */
	enter_critical();
	/*
	 * Get space for for the new map and link it into the list
	 * of maps for the current disk type.
	 */
	pptr = (struct partition_info *)zalloc(sizeof (struct partition_info));
	parts = cur_dtype->dtype_plist;
	if (parts == NULL) {
		cur_dtype->dtype_plist = pptr;
	} else {
		while (parts->pinfo_next != NULL) {
			parts = parts->pinfo_next;
		}
		parts->pinfo_next = pptr;
		pptr->pinfo_next = NULL;
	}
	/*
	 * If there was a current map, copy its values.
	 */
	if (cur_label == L_TYPE_EFI) {
	    struct dk_gpt	*map;
	    int			nparts;
	    int			size;

	    nparts = cur_parts->etoc->efi_nparts;
	    size = sizeof (struct dk_part) * nparts + sizeof (struct dk_gpt);
	    map = zalloc(size);
	    (void) memcpy(map, cur_parts->etoc, size);
	    pptr->etoc = map;
	    cur_disk->disk_parts = cur_parts = pptr;
	    exit_critical();
	    return;
	}
	if (cur_parts != NULL) {
		for (i = 0; i < NDKMAP; i++) {
			pptr->pinfo_map[i] = cur_parts->pinfo_map[i];
		}
		pptr->vtoc = cur_parts->vtoc;
	} else {
		/*
		 * Otherwise set initial default vtoc values
		 */
		set_vtoc_defaults(pptr);
	}

	/*
	 * Make the new one current.
	 */
	cur_disk->disk_parts = cur_parts = pptr;
	exit_critical();
}
void TASK_stop_timer(task_timer* timer) {
#ifndef CONFIG_TASK_NONCRITICAL_TIMER
  enter_critical();
  TQ_ENTER_CRITICAL;
#endif
  if (!timer->alive) {
#ifndef CONFIG_TASK_NONCRITICAL_TIMER
  TQ_EXIT_CRITICAL;
  exit_critical();
#endif
  return;
  }
  task_sys.tim_lock = TRUE;
  timer->alive = FALSE;

  // wipe all dead instances
  task_timer *cur_timer = task_sys.first_timer;
  task_timer *pre_timer = NULL;
  while (cur_timer) {
    if (cur_timer->alive == FALSE) {
      if (pre_timer == NULL) {
        task_sys.first_timer = timer->_next;
      } else {
        pre_timer->_next = timer->_next;
      }
    }
    pre_timer = cur_timer;
    cur_timer = cur_timer->_next;
  }
  task_sys.tim_lock = FALSE;
#ifndef CONFIG_TASK_NONCRITICAL_TIMER
  TQ_EXIT_CRITICAL;
  exit_critical();
#endif
}
示例#3
0
void
free_mem(struct os_mem *mem_ptr, void *block_ptr)
{
	cpsr_t cpsr;

	if (mem_ptr == NULL)
		return;

	if (block_ptr == NULL)
		return;

	enter_critical();

	/* Make sure all blocks not already returned */
	if (mem_ptr->num_free >= mem_ptr->num_blocks) {
		exit_critical();
		return;
	}

	/* block_ptr point to the next free block address */
	//*(void **) block_ptr = mem_ptr->mem_free_list;
	//mem_ptr->mem_free_list = block_ptr;

	void *free_block;
	int return_blocks = 0;
	void **link_ptr;

	/* free block address */
	free_block = mem_ptr->mem_free_list;
	mem_ptr->mem_free_list = block_ptr;
	
	link_ptr = mem_ptr->mem_free_list;

	/* Link the block address back to list */
	while (TRUE) {
		block_ptr += mem_ptr->block_len;

		if (block_ptr == free_block)
			break;

		*link_ptr = block_ptr;
		link_ptr = (void **) block_ptr;

		++return_blocks;
	}

	mem_ptr->num_free += return_blocks + 1;

	exit_critical();	
}
示例#4
0
static void stmpe_task_adc(u32_t a, void *b) {
  enter_critical();
  if (!TASK_mutex_lock(&i2c_mutex)) {
    STMPE_DBG("stmpe_impl task adc mutex locked\n");
    exit_critical();
    return;
  }
  STMPE_DBG("stmpe_impl task adc mutex acquired\n");
  int res = stmpe811_handler_adc_read(&stmpe.handler, stmpe.adc_chan);
  if (res != I2C_OK) {
    STMPE_DBG("stmpe_impl task adc ERR mutex unlocked\n");
    TASK_mutex_unlock(&i2c_mutex);
  }
  exit_critical();
}
示例#5
0
static void stmpe_task_gpio(u32_t a, void *b) {
  enter_critical();
  if (!TASK_mutex_lock(&i2c_mutex)) {
    STMPE_DBG("stmpe_impl task gpio mutex locked\n");
    exit_critical();
    return;
  }
  STMPE_DBG("stmpe_impl task gpio mutex acquired\n");
  int res = stmpe811_handler_gpio_define(&stmpe.handler, stmpe.gpio_set, stmpe.gpio_reset);
  if (res != I2C_OK) {
    STMPE_DBG("stmpe_impl task gpio ERR mutex unlocked\n");
    TASK_mutex_unlock(&i2c_mutex);
  }
  exit_critical();
}
示例#6
0
static void stmpe_err_cb(stmpe811_handler_state state, int err) {
  enter_critical();
  if (err) DBG(D_APP, D_WARN, "stmpe_impl callback state:%i res:%i\n", state, err);
  STMPE_DBG("stmpe_impl res state:%i res:%i\n", state, err);
  STMPE_DBG("stmpe_impl finished mutex unlocked\n");
  TASK_mutex_unlock(&i2c_mutex);
  if (err) {
    stmpe.req_mask = 0;
  }
  // the order must comply with stmpe_exe_req function order
  if (state == STMPE811_HDL_STATE_GPIO_DEFINE_CLR || state == STMPE811_HDL_STATE_GPIO_DEFINE_SET) {
    STMPE_DBG("stmpe_impl req CLR gpio\n");
    stmpe.req_mask &= ~STMPE_REQ_GPIO;
    stmpe.gpio_set = 0;
    stmpe.gpio_reset = 0;
  }
  else if (state == STMPE811_HDL_STATE_ADC_READ) {
    STMPE_DBG("stmpe_impl req CLR adc\n");
    stmpe.req_mask &= ~STMPE_REQ_ADC;
    stmpe.adc_chan = 0;
  }
  else if (state == STMPE811_HDL_STATE_TEMP_READ || state == STMPE811_HDL_STATE_TEMP_RESULT) {
    STMPE_DBG("stmpe_impl req CLR temp\n");
    stmpe.req_mask &= ~STMPE_REQ_TEMP;
  }
  else if (state == STMPE811_HDL_STATE_INT_STA_READ) {
    STMPE_DBG("stmpe_impl req CLR int_sta\n");
    print("int sta 0b%08b\n", stmpe811_handler_int_sta_get(&stmpe.handler));
    stmpe.req_mask &= ~STMPE_REQ_INT_STA;
  }
  stmpe_exe_req();
  exit_critical();
}
示例#7
0
文件: task.c 项目: lemin9538/mirros
static int init_task_struct(struct task_struct *task, u32 flag)
{
	struct task_struct *parent;
	unsigned long flags;

	/* if thread is a kernel thread, his parent is idle */
	if (flag & PROCESS_TYPE_KERNEL)
		parent = idle;
	else
		parent = current;

	/* get a new pid */
	task->pid = get_new_pid(task);
	if ((task->pid) < 0)
		return -EINVAL;

	task->uid = parent->uid;
	task->stack_base = NULL;
	task->stack_base = NULL;
	strncpy(task->name, parent->name, PROCESS_NAME_SIZE);
	task->flag = flag;
	task->state = 0;

	/* add task to the child list of his parent. */
	task->parent = parent;
	init_list(&task->p);
	init_list(&task->child);
	init_mutex(&task->mutex);

	enter_critical(&flags);
	list_add(&parent->child, &task->p);
	exit_critical(&flags);
	
	return 0;
}
示例#8
0
int
p_expand()
{
	uint64_t delta;
	uint_t nparts;
	struct dk_gpt *efi_label = cur_parts->etoc;

	if (cur_parts->etoc->efi_altern_lba == 1 ||
	    (cur_parts->etoc->efi_altern_lba >=
	    cur_parts->etoc->efi_last_lba)) {
		err_print("Warning: No expanded capacity is found.\n");
		return (0);
	}

	delta = efi_label->efi_last_lba - efi_label->efi_altern_lba;
	nparts = efi_label->efi_nparts;

	enter_critical();
	efi_label->efi_parts[nparts - 1].p_start += delta;
	efi_label->efi_last_u_lba += delta;
	efi_label->efi_altern_lba = cur_parts->etoc->efi_last_lba;
	exit_critical();

	fmt_print("The expanded capacity is added to the unallocated space.\n");
	return (0);
}
示例#9
0
void *
get_mem(struct os_mem *mem_ptr, u32 size)
{
	cpsr_t	cpsr;
	void	*block_ptr;
	
	if (mem_ptr == NULL) {
		errno = ERR_MEM_INVALID_PTR;
		return NULL;
	}

	enter_critical();

/*
	// See if there any free memory blocks
	if (mem_ptr->num_free > 0) {
		// Point to the start free block
		block_ptr = mem_ptr->mem_free_list;
		// mem_free_list = next block addr
		mem_ptr->mem_free_list = *(void **) block_ptr;
		--(mem_ptr->num_free);

		exit_critical();
		return block_ptr;
	}
*/

	/**/
	int need_blocks;
	need_blocks = (int) (size / mem_ptr->block_len) + 1;
	if (mem_ptr->num_free > need_blocks) {
		/* Point to the start free block */
		block_ptr = mem_ptr->mem_free_list;
		/* mem_free_list = next n blocks addr */
		mem_ptr->mem_free_list = *(void **) ((u8 *) block_ptr + \
			(need_blocks - 1) * mem_ptr->block_len);

		mem_ptr->num_free -= need_blocks;

		exit_critical();
		return block_ptr;
	}

	exit_critical();
	errno = ERR_MEM_NO_FREE_BLOCK;
	return NULL;
}
示例#10
0
void spin_unlock_irqstore(spin_lock_t *lock, unsigned long *flags)
{
#ifdef CONFIG_SMP
	lock->value = 0;
	lock->cpu = -1;
#endif
	exit_critical(flags);
}
示例#11
0
bool TASK_mutex_lock(task_mutex *m) {
  task *t = task_sys.current;
  if (!m->taken) {
    m->entries = 1;
    task_take_lock(t, m);
    TRACE_TASK_MUTEX_ENTER(t->_ix);
    return TRUE;
  }
  if (m->reentrant && m->owner == t) {
    m->entries++;
    TRACE_TASK_MUTEX_ENTER_M(t->_ix);
    ASSERT(m->entries < 254);
    return TRUE;
  }
  // taken, mark task still allocated and insert into mutexq
  TRACE_TASK_MUTEX_WAIT(t->_ix);
  enter_critical();
  TQ_ENTER_CRITICAL;
  t->wait_mutex = m;
  if ((t->flags & (TASK_STATIC | TASK_LOOP)) == 0) {
    task_pool.mask[t->_ix/32] &= ~(1<<(t->_ix & 0x1f));
  }
  if ((t->flags & TASK_LOOP)) {
    // looped, remove us from end of queue
    ASSERT(task_sys.last == t);
    ASSERT(task_sys.head);
    if (task_sys.head == t) {
      // the only task in sched queue
      task_sys.head = NULL;
      task_sys.last = NULL;
    } else {
      // find the task pointing to the last task == current task
      task *ct = (task *)task_sys.head;
      while (ct->_next != t) {
        ct = ct->_next;
        ASSERT(ct);
      }
      // remove last task from queue
      ct->_next = NULL;
      task_sys.last = ct;
    }
  }
  TQ_EXIT_CRITICAL;
  exit_critical();

  // insert into mutex queue
  if (m->last == 0) {
    m->head = t;
  } else {
    m->last->_next = t;
  }
  m->last = t;
  t->_next = NULL;
  t->flags &= ~TASK_RUN;
  t->flags |= TASK_WAIT;

  return FALSE;
}
示例#12
0
static void idle_task(void *parg)
  {
  while(true)
    {
    exit_critical();
    yield();
    }
    //asm volatile("pwrsav #1");
  }
示例#13
0
void STMPE_req_read_int_sta(void) {
  enter_critical();
  bool do_req = stmpe.req_mask == 0;
  stmpe.req_mask |= STMPE_REQ_INT_STA;
  if (do_req) {
    stmpe_exe_req();
  }
  exit_critical();
}
示例#14
0
void STMPE_req_read_temp(void) {
  enter_critical();
  bool do_req = stmpe.req_mask == 0;
  stmpe.req_mask |= STMPE_REQ_TEMP;
  if (do_req) {
    stmpe_exe_req();
  }
  exit_critical();
}
示例#15
0
/*
 * This routine picks to closest partition table which matches the
 * selected disk type.  It is called each time the disk type is
 * changed.  If no match is found, it uses the first element
 * of the partition table.  If no table exists, a dummy is
 * created.
 */
int
get_partition()
{
	register struct partition_info *pptr;
	register struct partition_info *parts;

	/*
	 * If there are no pre-defined maps for this disk type, it's
	 * an error.
	 */
	parts = cur_dtype->dtype_plist;
	if (parts == NULL) {
		err_print("No defined partition tables.\n");
		make_partition();
		return (-1);
	}
	/*
	 * Loop through the pre-defined maps searching for one which match
	 * disk type.  If found copy it into unmamed partition.
	 */
	enter_critical();
	for (pptr = parts; pptr != NULL; pptr = pptr->pinfo_next) {
	    if (cur_dtype->dtype_asciilabel) {
		if (pptr->pinfo_name != NULL && strcmp(pptr->pinfo_name,
				cur_dtype->dtype_asciilabel) == 0) {
			/*
			 * Set current partition and name it.
			 */
			cur_disk->disk_parts = cur_parts = pptr;
			cur_parts->pinfo_name = pptr->pinfo_name;
			exit_critical();
			return (0);
		}
	    }
	}
	/*
	 * If we couldn't find a match, take the first one.
	 * Set current partition and name it.
	 */
	cur_disk->disk_parts = cur_parts = cur_dtype->dtype_plist;
	cur_parts->pinfo_name = parts->pinfo_name;
	exit_critical();
	return (0);
}
示例#16
0
int
thr_sigsetmask(int how, const sigset_t *set, sigset_t *oset)
{
	ulwp_t *self = curthread;
	sigset_t saveset;

	if (set == NULL) {
		enter_critical(self);
		if (oset != NULL)
			*oset = self->ul_sigmask;
		exit_critical(self);
	} else {
		switch (how) {
		case SIG_BLOCK:
		case SIG_UNBLOCK:
		case SIG_SETMASK:
			break;
		default:
			return (EINVAL);
		}

		/*
		 * The assignments to self->ul_sigmask must be protected from
		 * signals.  The nuances of this code are subtle.  Be careful.
		 */
		block_all_signals(self);
		if (oset != NULL)
			saveset = self->ul_sigmask;
		switch (how) {
		case SIG_BLOCK:
			self->ul_sigmask.__sigbits[0] |= set->__sigbits[0];
			self->ul_sigmask.__sigbits[1] |= set->__sigbits[1];
			self->ul_sigmask.__sigbits[2] |= set->__sigbits[2];
			self->ul_sigmask.__sigbits[3] |= set->__sigbits[3];
			break;
		case SIG_UNBLOCK:
			self->ul_sigmask.__sigbits[0] &= ~set->__sigbits[0];
			self->ul_sigmask.__sigbits[1] &= ~set->__sigbits[1];
			self->ul_sigmask.__sigbits[2] &= ~set->__sigbits[2];
			self->ul_sigmask.__sigbits[3] &= ~set->__sigbits[3];
			break;
		case SIG_SETMASK:
			self->ul_sigmask.__sigbits[0] = set->__sigbits[0];
			self->ul_sigmask.__sigbits[1] = set->__sigbits[1];
			self->ul_sigmask.__sigbits[2] = set->__sigbits[2];
			self->ul_sigmask.__sigbits[3] = set->__sigbits[3];
			break;
		}
		delete_reserved_signals(&self->ul_sigmask);
		if (oset != NULL)
			*oset = saveset;
		restore_signals(self);
	}

	return (0);
}
示例#17
0
void STMPE_req_gpio_set(u8_t set, u8_t reset) {
  enter_critical();
  bool do_req = stmpe.req_mask == 0;
  stmpe.gpio_set |= set;
  stmpe.gpio_reset |= reset;
  stmpe.req_mask |= STMPE_REQ_GPIO;
  if (do_req) {
    stmpe_exe_req();
  }
  exit_critical();
}
示例#18
0
void STMPE_req_read_adc(u8_t adc, adc_cb_f fn) {
  enter_critical();
  bool do_req = stmpe.req_mask == 0;
  stmpe.adc_chan = adc;
  stmpe.req_mask |= STMPE_REQ_ADC;
  stmpe.adc_cb = fn;
  if (do_req) {
    stmpe_exe_req();
  }
  exit_critical();
}
示例#19
0
/**********************************************************************//**
 * \brief	MCLK, SMCLK, and ACLK Init Routine
 *
 * This function uses REFO (the internal reference oscillator) to initialize
 * the DCO to the value targetFreq
 *
 * \retval	-1	The clock initialization failed
 * \retval	0	The clock initialization was successful
 *
 *************************************************************************/
int clkInit(void)
{
	unsigned int state;
	long retval;

	enter_critical(state);
		retval = setFLL(DCO_FREQ);
	exit_critical(state);

	if(retval != -1) retval = 0;
	return (int)retval;
}
示例#20
0
/*
 * Tell the kernel to block all signals.
 * Use the schedctl interface, or failing that, use __lwp_sigmask().
 * This action can be rescinded only by making a system call that
 * sets the signal mask:
 *	__lwp_sigmask(), __sigprocmask(), __setcontext(),
 *	__sigsuspend() or __pollsys().
 * In particular, this action cannot be reversed by assigning
 * scp->sc_sigblock = 0.  That would be a way to lose signals.
 * See the definition of restore_signals(self).
 */
void
block_all_signals(ulwp_t *self)
{
	volatile sc_shared_t *scp;

	enter_critical(self);
	if ((scp = self->ul_schedctl) != NULL ||
	    (scp = setup_schedctl()) != NULL)
		scp->sc_sigblock = 1;
	else
		(void) __lwp_sigmask(SIG_SETMASK, &maskset);
	exit_critical(self);
}
示例#21
0
/***********************************************************
 *功能:	初始化调试功能
 *形参:
 *      	无
 *返回:
 *      	无
 */
void InitDebug(void)
{	
	//Initbutten();		//按键
	//oled
	enter_critical();
	OLED_Init();
	exit_critical();
	//串口
	uart_init(Debug_UARTx, 115200);
	uart_irq_EN(Debug_UARTx);
	
	gpio_Interrupt_init(DEBUG_PIN, GPI_UP_PF, GPI_DISAB);
}
示例#22
0
int register_irq(int nr,int (*fn)(void *arg),void *arg)
{
	int err = 0;
	unsigned long flags;

	if ((nr > IRQ_NR-1) || (fn == NULL))
		return -EINVAL;

	enter_critical(&flags);
	err = arch_register_irq(nr, fn, arg);
	exit_critical(&flags);

	return err;
}
示例#23
0
task* TASK_create(task_f f, u8_t flags) {
  enter_critical();
  TQ_ENTER_CRITICAL;
  task* task = TASK_snatch_free(flags & TASK_STATIC);
  TQ_EXIT_CRITICAL;
  exit_critical();
  if (task) {
    task->f = f;
    task->run_requests = 0;
    task->flags = flags & (~(TASK_RUN | TASK_EXE | TASK_WAIT | TASK_KILLED));
    return task;
  } else {
    return 0;
  }
}
示例#24
0
static void stmpe_task_config(u32_t a, void *b) {
  enter_critical();
  if (!TASK_mutex_lock(&i2c_mutex)) {
    STMPE_DBG("stmpe_impl task config mutex locked\n");
    exit_critical();
    return;
  }
  STMPE_DBG("stmpe_impl task config mutex acquired\n");
  int res = stmpe811_handler_setup(&stmpe.handler,
      STMPE_BLOCK_TEMP | STMPE_BLOCK_GPIO | STMPE_BLOCK_ADC,
      STMPE_GPIO_VBAT_EN |
      STMPE_GPIO2 | STMPE_GPIO3 | STMPE_GPIO4 |  STMPE_GPIO5 | STMPE_GPIO6 | STMPE_GPIO7, // analog / digital
      STMPE_GPIO_VBAT_EN, // direction
      0, // default output
      TRUE, // interrupt
      STMPE_INT_POLARITY_ACTIVE_LOW_FALLING, STMPE_INT_TYPE_LEVEL, // interrupt config
      STMPE_INT_GPIO | STMPE_INT_ADC | STMPE_INT_TEMP_SENS, // interrupt enable
      STMPE_GPIO2 | STMPE_GPIO3 | STMPE_GPIO4 |  STMPE_GPIO5 | STMPE_GPIO6 | STMPE_GPIO7, // gpio interrupt mask
      STMPE_GPIO_ADC_VBAT, // adc interrupt mask
      STMPE_GPIO2 | STMPE_GPIO3 | STMPE_GPIO4 | STMPE_GPIO5 | STMPE_GPIO6 | STMPE_GPIO7, // detect rising
      STMPE_GPIO2 | STMPE_GPIO3 | STMPE_GPIO4 | STMPE_GPIO5 | STMPE_GPIO6 | STMPE_GPIO7, // detect falling
      STMPE_ADC_CLK_3_25MHZ,
      STMPE_ADC_TIM_80,
      STMPE_ADC_RES_12B,
      STMPE_ADC_REF_INT,
      TRUE, // temp enable
      STMPE_TEMP_MODE_ONCE,
      FALSE,
      STMPE_TEMP_THRES_OVER,
      0);
  if (res != I2C_OK) {
    STMPE_DBG("stmpe_impl task config ERR mutex unlocked\n");
    TASK_mutex_unlock(&i2c_mutex);
  }
  exit_critical();
}
示例#25
0
void TASK_free(task *t) {
  enter_critical();
  TQ_ENTER_CRITICAL;
  if ((t->flags & TASK_RUN) == 0) {
    // not scheduled, so remove it directly from pool
    task_pool.mask[t->_ix/32] |= (1<<(t->_ix & 0x1f));
  }
  // else, scheduled => will be removed in TASK_tick when executed

#ifdef CONFIG_TASKQ_MUTEX
  // check if task is in a mutex wait queue, and remove it if so
  if (t->flags & TASK_WAIT) {
    ASSERT(t->wait_mutex);
    task_mutex *m = t->wait_mutex;
    if (m->head == t) {
      m->head = t->_next;
      if (m->last == t) {
        m->last = NULL;
      }
    } else {
      task *prev_ct = NULL;
      task *ct = (task *)m->head;
      while (ct != NULL) {
        if (ct == t) {
          ASSERT(prev_ct);
          prev_ct->_next = t->_next;
          if (m->last == t) {
            m->last = prev_ct;
            prev_ct->_next = NULL;
          }
          break;
        }
        prev_ct = ct;
        ct = ct->_next;
      }
    }
  }
#endif

  t->flags &= ~(TASK_RUN | TASK_STATIC | TASK_LOOP | TASK_WAIT);
  t->flags |= TASK_KILLED;
  TQ_EXIT_CRITICAL;
  exit_critical();
}
示例#26
0
result_t subscribe(msg_hook_t *handler)
  {
  if(handler == 0)
    return e_bad_parameter;
  
  enter_critical();
  
  if(listener != 0)
    {
    listener->prev = handler;
    handler->next = listener;
    }
    
  listener = handler;
  
  exit_critical();
  
  return s_ok;
  }
示例#27
0
result_t register_service(uint8_t service, msg_hook_t *handler)
  {
  if(service == 0 ||
     service >= num_services)
    return e_bad_parameter;
  
  enter_critical();
  
  if(services[service] != 0)
    {
    handler->prev = services[service];
    handler->prev->next = handler;
    }
  
  services[service] = handler;
  
  exit_critical();
  
  return s_ok;
  }
示例#28
0
result_t unsubscribe(msg_hook_t *handler)
  {
  enter_critical();
  
  if(handler->prev != 0)
    handler->prev->next = handler->next;
  
  if(handler->next != 0)
    handler->next->prev = handler->prev;
  
  if(handler == listener)
    listener = handler->next;
  
  handler->next = 0;
  handler->prev = 0;
  
  exit_critical();
  
  return s_ok;
  }
示例#29
0
void TASK_run(task* task, u32_t arg, void* arg_p) {
  ASSERT(task);
  ASSERT((task_pool.mask[task->_ix/32] & (1<<(task->_ix & 0x1f))) == 0); // check it is allocated
  ASSERT((task->flags & TASK_RUN) == 0);       // already scheduled
  ASSERT((task->flags & TASK_WAIT) == 0);      // waiting for a mutex
  ASSERT(task >= &task_pool.task[0]);          // mem check
  ASSERT(task <= &task_pool.task[CONFIG_TASK_POOL]); // mem check
  task->flags |= TASK_RUN;
  task->arg = arg;
  task->arg_p = arg_p;

  enter_critical();
  TQ_ENTER_CRITICAL;
  if (task_sys.last == 0) {
    task_sys.head = task;
  } else {
    task_sys.last->_next = task;
  }
  task_sys.last = task;
  // would same task be added twice or more, this at least fixes endless loop
  task->_next = 0;
  task->run_requests++; // if added again during execution
  TRACE_TASK_RUN(task->_ix);
#if defined(CONFIG_OS) & defined(CONFIG_TASK_QUEUE_IN_THREAD)
  //#if defined(CONFIG_OS)
  // TODO PETER FIX
  // for some reason we sporadically crash when doing OS_cond_sig
  // whilst firing off tasks in IRQs which occur very frequently
  // (eg dumping pics from ADNS3000s) - only when task queue is not in
  // a thread. Fix is to put task queue in a thread or not signalling
  // the condition, but need to sort out why this happens.
  //
  // hardfault:
  //  INVPC: UsaFlt general
  //  FORCED: HardFlt SVC/BKPT within SVC

  OS_cond_signal(&task_sys.cond);
#endif
  TQ_EXIT_CRITICAL;
  exit_critical();
}
示例#30
0
/*
 * Report a thread usage error.
 * Not called if _THREAD_ERROR_DETECTION=0.
 * Writes message and continues execution if _THREAD_ERROR_DETECTION=1.
 * Writes message and dumps core if _THREAD_ERROR_DETECTION=2.
 */
void
thread_error(const char *msg)
{
	char buf[800];
	uberdata_t *udp;
	ulwp_t *self;
	lwpid_t lwpid;

	/* avoid recursion deadlock */
	if ((self = __curthread()) != NULL) {
		if (assert_thread == self)
			_exit(127);
		enter_critical(self);
		(void) _lwp_mutex_lock(&assert_lock);
		assert_thread = self;
		lwpid = self->ul_lwpid;
		udp = self->ul_uberdata;
	} else {
		self = NULL;
		(void) _lwp_mutex_lock(&assert_lock);
		lwpid = _lwp_self();
		udp = &__uberdata;
	}

	(void) strcpy(buf, "\n*** _THREAD_ERROR_DETECTION: "
	    "thread usage error detected ***\n*** ");
	(void) strcat(buf, msg);

	(void) strcat(buf, "\n*** calling thread is ");
	ultos((uint64_t)(uintptr_t)self, 16, buf + strlen(buf));
	(void) strcat(buf, " thread-id ");
	ultos((uint64_t)lwpid, 10, buf + strlen(buf));
	(void) strcat(buf, "\n\n");
	(void) __write(2, buf, strlen(buf));
	if (udp->uberflags.uf_thread_error_detection >= 2)
		Abort(buf);
	assert_thread = NULL;
	(void) _lwp_mutex_unlock(&assert_lock);
	if (self != NULL)
		exit_critical(self);
}