__LINK_C error_t hw_gpio_configure_interrupt(pin_id_t pin_id, gpio_inthandler_t callback, uint8_t event_mask) { if((GPIO_PIN(pin_id) >= NUM_GPIOINT) || (gpio_callback[GPIO_PIN(pin_id)] != 0x00)) return EINVAL; start_atomic(); GPio_edge *TGpio = (GPio_edge *) SFRADR_GPIO_EDGE1; gpio_callback[GPIO_PIN(pin_id)] = callback; TGpio->old_in = TGpio->in; TGpio->edge = 0x1; // Clear all edges TGpio->level_sel |= (1<<GPIO_PIN(pin_id));// Select pin to interrupt if (event_mask == GPIO_RISING_EDGE) TGpio->rs_edge_sel = 0x1; else if (event_mask == GPIO_FALLING_EDGE) TGpio->fl_edge_sel = 0x1; else { end_atomic(); return FAIL; } end_atomic(); return SUCCESS; }
__LINK_C error_t ubutton_register_callback(button_id_t button_id, ubutton_callback_t callback) { if(button_id >= NUM_USERBUTTONS) return ESIZE; else if (callback == 0x0) return EINVAL; uint8_t empty_index = BUTTON_QUEUE_SIZE; for(int i = 0; i < BUTTON_QUEUE_SIZE; i++) { if(empty_index == BUTTON_QUEUE_SIZE && buttons[button_id].callbacks[i] == 0x0) empty_index = i; else if(buttons[button_id].callbacks[i] == callback) return EALREADY; } if(empty_index >= BUTTON_QUEUE_SIZE) return ENOMEM; start_atomic(); buttons[button_id].callbacks[empty_index] = callback; buttons[button_id].num_registered_callbacks++; if(buttons[button_id].num_registered_callbacks == 1) { //this is the first listener to register --> enable the GPIO interrupt error_t err = hw_gpio_enable_interrupt(buttons[button_id].button_id); assert(err == SUCCESS); } end_atomic(); return SUCCESS; }
__LINK_C error_t hw_gpio_configure_interrupt(pin_id_t pin_id, gpio_inthandler_t callback, uint8_t event_mask) { if(interrupts[pin_id.pin].interrupt_port != pin_id.port) return EOFF; else if(callback == 0x0 || event_mask > (GPIO_RISING_EDGE | GPIO_FALLING_EDGE)) return EINVAL; error_t err; start_atomic(); //do this check atomically: interrupts[..] callback is altered by this function //so the check belongs in the critical section as well if(interrupts[pin_id.pin].callback != 0x0 && interrupts[pin_id.pin].callback != callback) err = EBUSY; else { interrupts[pin_id.pin].callback = callback; GPIOINT_CallbackRegister(pin_id.pin, &gpio_int_callback); GPIO_IntConfig(pin_id.port, pin_id.pin, !!(event_mask & GPIO_RISING_EDGE), !!(event_mask & GPIO_FALLING_EDGE), false); err = SUCCESS; } end_atomic(); return err; }
__LINK_C void scheduler_run() { while(1) { while(NG(current_priority) < NUM_PRIORITIES) { check_structs_are_valid(); for(uint8_t id = pop_task((NG(current_priority))); id != NO_TASK; id = pop_task(NG(current_priority))) { check_structs_are_valid(); NG(m_info)[id].task(); } //this needs to be done atomically since otherwise we risk decrementing the current priority //while a higher priority task is waiting in the queue start_atomic(); if (!tasks_waiting(NG(current_priority))) NG(current_priority)++; #ifndef NDEBUG for(int i = 0; i < NG(current_priority); i++) assert(!tasks_waiting(i)); #endif end_atomic(); } hw_enter_lowpower_mode(FRAMEWORK_SCHEDULER_LP_MODE); } }
__LINK_C error_t sched_cancel_task(task_t task) { check_structs_are_valid(); error_t retVal; start_atomic(); uint8_t id = get_task_id(task); if(id == NO_TASK) retVal = EINVAL; else if(!is_scheduled(id)) retVal = EALREADY; else { if (NG(m_info)[id].prev == NO_TASK) NG(m_head)[NG(m_info)[id].priority] = NG(m_info)[id].next; else NG(m_info)[NG(m_info)[id].prev].next = NG(m_info)[id].next; if (NG(m_info)[id].next == NO_TASK) NG(m_tail)[NG(m_info)[id].priority] = NG(m_info)[id].prev; else NG(m_info)[NG(m_info)[id].next].prev = NG(m_info)[id].prev; NG(m_info)[id].prev = NO_TASK; NG(m_info)[id].next = NO_TASK; NG(m_info)[id].priority = NOT_SCHEDULED; check_structs_are_valid(); retVal = SUCCESS; } end_atomic(); return retVal; }
__LINK_C error_t ubutton_deregister_callback(button_id_t button_id, ubutton_callback_t callback) { if(button_id >= NUM_USERBUTTONS) return ESIZE; else if (callback == 0x0) return EINVAL; uint8_t callback_index = BUTTON_QUEUE_SIZE; for(int i = 0; i < BUTTON_QUEUE_SIZE; i++) { if(buttons[button_id].callbacks[i] == callback) { callback_index = i; break; } } if(callback_index >= BUTTON_QUEUE_SIZE) return EALREADY; start_atomic(); buttons[button_id].callbacks[callback_index] = 0x0; buttons[button_id].num_registered_callbacks--; if(buttons[button_id].num_registered_callbacks == 0) { //this is the last listener to deregister --> disable the GPIO interrupt error_t err = hw_gpio_disable_interrupt(buttons[button_id].button_id); assert(err == SUCCESS); } end_atomic(); return SUCCESS; }
__LINK_C error_t sched_register_task(task_t task) { error_t retVal; check_structs_are_valid(); //INT_Disable(); start_atomic(); if(NG(num_registered_tasks) >= NUM_TASKS) retVal = ENOMEM; else if(get_task_id(task) != NO_TASK) retVal = EALREADY; else { for(int i = NG(num_registered_tasks); i >= 0; i--) { if (i == 0 || ((void*)NG(m_index)[i-1].task) < ((void*)task)) { NG(m_index)[i].task = task; NG(m_index)[i].index = NG(num_registered_tasks); NG(m_info)[NG(m_index)[i].index].task = task; break; } else { NG(m_index)[i] = NG(m_index)[i-1]; } } NG(num_registered_tasks)++; retVal = SUCCESS; } //INT_Enable(); end_atomic(); check_structs_are_valid(); return retVal; }
void button_task() { button_id_t button_id = NUM_USERBUTTONS; ubutton_callback_t callback = 0x0; start_atomic(); for(int i = 0; i < NUM_USERBUTTONS;i++) { for(;buttons[i].cur_callback_id < BUTTON_QUEUE_SIZE && buttons[i].callbacks[buttons[i].cur_callback_id] == 0x0; buttons[i].cur_callback_id++); if(buttons[i].cur_callback_id < BUTTON_QUEUE_SIZE) { callback = buttons[i].callbacks[buttons[i].cur_callback_id]; button_id = i; buttons[i].cur_callback_id++; break; } } end_atomic(); if(button_id < NUM_USERBUTTONS && callback != 0x0) { //reschedule the task to do the next callback (if needed) sched_post_task(&button_task); callback(button_id); } }
//we override __assert_func to flash the leds (so we know something bad has happend) //and to repeat the error message repeatedly (so we have a chance to attach the device to a serial console before the error message is gone) void __assert_func( const char *file, int line, const char *func, const char *failedexpr) { start_atomic(); led_on(0); led_on(2); led_on(3); while(1) { #if defined(FRAMEWORK_LOG_ENABLED) printf("assertion \"%s\" failed: file \"%s\", line %d%s%s\n",failedexpr, file, line, func ? ", function: " : "", func ? func : ""); #endif __BKPT (0); // break into debugger, when attached for(uint32_t j = 0; j < 20; j++) { //blink at twice the frequency of the _exit call, so we can identify which of the two events has occurred for(uint32_t i = 0; i < 0xFFFFF; i++){} led_toggle(0); led_toggle(2); led_toggle(3); } } end_atomic(); }
void check_structs_are_valid() { start_atomic(); assert(NG(num_registered_tasks) <= NUM_TASKS); bool visited[NUM_TASKS]; memset(visited, false, NUM_TASKS); for(int i = 0; i < NG(num_registered_tasks); i++) { assert(NG(m_index)[i].task != 0x0); assert(NG(m_index)[i].index != NO_TASK); assert(NG(m_index)[i].index < NUM_TASKS); assert(NG(m_info)[NG(m_index)[i].index].task == NG(m_index)[i].task); assert(!visited[NG(m_index)[i].index]); visited[NG(m_index)[i].index] = true; } for(int i = NG(num_registered_tasks); i < NUM_TASKS; i++) { assert(NG(m_index)[i].task == 0x0); assert(NG(m_index)[i].index == NO_TASK); } for(int i = 0; i < NUM_TASKS; i++) assert(visited[i] || NG(m_info)[i].task == 0x0); memset(visited, false, NUM_TASKS); for(int prio = 0; prio < NUM_PRIORITIES;prio++) { uint8_t prev_ind=NO_TASK; for(uint8_t cur_ind = NG(m_head)[prio]; cur_ind != NO_TASK; cur_ind = NG(m_info)[cur_ind].next) { assert(cur_ind < NUM_TASKS); assert(!visited[cur_ind]); visited[cur_ind] = true; assert(NG(m_info)[cur_ind].prev == prev_ind); if(prev_ind != NO_TASK) assert(NG(m_info)[prev_ind].next == cur_ind); else assert(NG(m_head)[prio] == cur_ind); assert(NG(m_info)[cur_ind].task != 0x0); assert(NG(m_info)[cur_ind].priority == prio); prev_ind=cur_ind; } assert(NG(m_tail)[prio] == prev_ind); } for(int i = 0; i < NUM_TASKS; i++) { assert((visited[i]) || NG(m_info)[i].priority == NOT_SCHEDULED); } assert(NG(current_priority) <= NUM_PRIORITIES); for(int i = 0; i < NG(current_priority); i++) assert(NG(m_head)[i] == NO_TASK); //INT_Enable(); end_atomic(); }
bool hw_timer_is_overflow_pending(hwtimer_id_t timer_id) { if(timer_id >= HWTIMER_NUM) return false; start_atomic(); //COMP0 is used to limit thc RTC to 16 bits -> use this one to check bool is_pending = !!((RTC_IntGet() & RTC->IEN) & RTC_IFS_COMP0); end_atomic(); return is_pending; }
bool hw_timer_is_interrupt_pending(hwtimer_id_t timer_id) { if(timer_id >= HWTIMER_NUM) return false; start_atomic(); bool is_pending = !!((RTC_IntGet() & RTC->IEN) & RTC_IFS_COMP1); end_atomic(); return is_pending; }
error_t hw_timer_cancel(hwtimer_id_t timer_id) { if(timer_id >= HWTIMER_NUM) return ESIZE; if(!timer_inited) return EOFF; start_atomic(); RTC_IntDisable(RTC_IEN_COMP1); RTC_IntClear(RTC_IEN_COMP1); end_atomic(); }
__LINK_C bool sched_is_scheduled(task_t task) { //INT_Disable(); start_atomic(); uint8_t task_id = get_task_id(task); bool retVal = false; if(task_id != NO_TASK) retVal = is_scheduled(task_id); //INT_Enable(); end_atomic(); return retVal; }
//Overwrite _exit so we don't get a fault that's impossible to debug void _exit(int exit) { start_atomic(); //wait forever while the interrupts are disabled while(1) { //blink the leds so we know _exit has been called for(uint32_t i = 0; i < 0x1FFFFF; i++) {} led_toggle(0); led_toggle(1); } end_atomic(); }
__LINK_C error_t timer_post_task_prio(task_t task, timer_tick_t fire_time, uint8_t priority) { error_t status = ENOMEM; if(priority > MIN_PRIORITY) return EINVAL; start_atomic(); uint32_t empty_index = FRAMEWORK_TIMER_STACK_SIZE; for(uint32_t i = 0; i < FRAMEWORK_TIMER_STACK_SIZE; i++) { if(NG(timers)[i].f == 0x0 && empty_index == FRAMEWORK_TIMER_STACK_SIZE) { empty_index = i; } else if(NG(timers)[i].f == task) { //for now: do not allow an event to be scheduled more than once //otherwise we risk having the same task being scheduled twice and only executed once //because the scheduler disallows the same task to be scheduled multiple times status = EALREADY; break; } } if(status != EALREADY && empty_index != FRAMEWORK_TIMER_STACK_SIZE) { bool timers_reset = reset_timers(); NG(timers)[empty_index].f = task; NG(timers)[empty_index].next_event = fire_time; NG(timers)[empty_index].priority = priority; //if there is no event scheduled, this event will run before the next scheduled event //or we reset the timers: trigger a reconfiguration of the next scheduled event bool do_config = NG(next_event) == NO_EVENT || timers_reset; if(!do_config) { uint32_t counter = timer_get_counter_value(); //if the new event should fire sooner than the old event --> trigger reconfig //this is done using signed ints (compared to the current counter) //to ensure propper handling of timer overflows int32_t next_fire_delay = ((int32_t)fire_time) - ((int32_t)counter); int32_t old_fire_delay = ((int32_t)NG(timers)[NG(next_event)].next_event) - ((int32_t)counter); do_config = next_fire_delay < old_fire_delay; } if(do_config) configure_next_event(); status = SUCCESS; } end_atomic(); return status; }
__LINK_C timer_tick_t timer_get_counter_value() { timer_tick_t counter; start_atomic(); counter = NG(timer_offset) + hw_timer_getvalue(HW_TIMER_ID); //increase the counter with COUNTER_OVERFLOW_INCREASE //if an overflow is pending. (This is to compensate for the //fact that NG(timer_offset) is not updated until the overflow //interrupt is actually fired if(hw_timer_is_overflow_pending(HW_TIMER_ID)) counter += COUNTER_OVERFLOW_INCREASE; end_atomic(); return counter; }
static void gpio_int_callback(uint8_t pin) { //we use emlib's GPIO interrupt handler which does NOT //disable the interrupts by default --> disable them here to get the same behavior !! start_atomic(); assert(interrupts[pin].callback != 0x0); pin_id_t id = {interrupts[pin].interrupt_port, pin}; //report an event_mask of '0' since the only way to check which event occurred //is to check the state of the pin from the interrupt handler and //since the execution of interrupt handlers may be 'delayed' this method is NOT reliable. // TODO find out if there is no way to do this reliable on efm32gg interrupts[pin].callback(id,0); end_atomic(); }
__LINK_C error_t hw_gpio_disable_interrupt(pin_id_t pin_id) { start_atomic(); GPio_edge *TGpio = (GPio_edge*) SFRADR_GPIO_EDGE1; // update mask register TGpio->mask &= (0x0ffffffff ^ (1<<GPIO_PIN(pin_id))); //DPRINT ("Disable: mask = %08x pin_id %04x in %02x", gpio_edge->mask, GPIO_PIN(pin_id), gpio_edge->in); end_atomic(); return SUCCESS; }
error_t hw_timer_schedule(hwtimer_id_t timer_id, hwtimer_tick_t tick ) { if(timer_id >= HWTIMER_NUM) return ESIZE; if(!timer_inited) return EOFF; start_atomic(); RTC_IntDisable(RTC_IEN_COMP1); RTC_CompareSet( 1, tick ); RTC_IntClear(RTC_IEN_COMP1); RTC_IntEnable(RTC_IEN_COMP1); end_atomic(); }
error_t hw_timer_counter_reset(hwtimer_id_t timer_id) { if(timer_id >= HWTIMER_NUM) return ESIZE; if(!timer_inited) return EOFF; start_atomic(); RTC_IntDisable(RTC_IEN_COMP0 | RTC_IEN_COMP1); RTC_IntClear(RTC_IEN_COMP0 | RTC_IEN_COMP1); RTC_CounterReset(); RTC_IntEnable(RTC_IEN_COMP0); end_atomic(); }
//we override __assert_func to flash the leds (so we know something bad has happend) //and to repeat the error message repeatedly (so we have a chance to attach the device to a serial console before the error message is gone) void __assert_func( const char *file, int line, const char *func, const char *failedexpr) { start_atomic(); led_on(0); led_on(1); while(1) { printf("assertion \"%s\" failed: file \"%s\", line %d%s%s\n",failedexpr, file, line, func ? ", function: " : "", func ? func : ""); for(uint32_t j = 0; j < 20; j++) { //blink at twice the frequency of the _exit call, so we can identify which of the two events has occurred for(uint32_t i = 0; i < 0xFFFFF; i++) {} led_toggle(0); led_toggle(1); } } end_atomic(); }
__LINK_C bool timer_is_task_scheduled(task_t task) { bool present = false; start_atomic(); for (uint32_t i = 0; i < FRAMEWORK_TIMER_STACK_SIZE; i++) { if (NG(timers)[i].f == task) { present = true; break; } } end_atomic(); return present; }
//we override __assert_func to flash the leds (so we know something bad has happend) //and to repeat the error message repeatedly (so we have a chance to attach the device to a serial console before the error message is gone) void __assert_func( const char *file, int line, const char *func, const char *failedexpr) { #if defined FRAMEWORK_DEBUG_ASSERT_REBOOT // make sure this parameter is used also when including assert.h instead of debug.h hw_reset(); #endif start_atomic(); led_on(0); led_on(1); #ifdef PLATFORM_USE_USB_CDC // Dissable all IRQs except the one for USB for(uint32_t j=0;j < EMU_IRQn; j++) NVIC_DisableIRQ(j); NVIC_EnableIRQ( USB_IRQn ); end_atomic(); #endif lcd_clear(); lcd_write_string("ERROR"); lcd_write_number(timer_get_counter_value()); __asm__("BKPT"); // break into debugger while(1) { printf("assertion \"%s\" failed: file \"%s\", line %d%s%s\n",failedexpr, file, line, func ? ", function: " : "", func ? func : ""); for(uint32_t j = 0; j < 20; j++) { //blink at twice the frequency of the _exit call, so we can identify which of the two events has occurred for(uint32_t i = 0; i < 0xFFFFF; i++){} led_toggle(0); led_toggle(1); } } end_atomic(); }
/**************************************************************************//** * @brief Enables LFACLK and selects LFXO as clock source for RTC. * Sets up the RTC to count at 1024 Hz. * The counter should not be cleared on a compare match and keep running. * Interrupts should be cleared and enabled. * The counter should run. *****************************************************************************/ error_t hw_timer_init(hwtimer_id_t timer_id, uint8_t frequency, timer_callback_t compare_callback, timer_callback_t overflow_callback) { if(timer_id >= HWTIMER_NUM) return ESIZE; if(timer_inited) return EALREADY; if(frequency != HWTIMER_FREQ_1MS && frequency != HWTIMER_FREQ_32K) return EINVAL; start_atomic(); compare_f = compare_callback; overflow_f = overflow_callback; timer_inited = true; /* Configuring clocks in the Clock Management Unit (CMU) */ startLfxoForRtc(frequency); RTC_Init_TypeDef rtcInit = RTC_INIT_DEFAULT; rtcInit.enable = false; /* Don't enable RTC after init has run */ rtcInit.comp0Top = true; /* Clear counter on compare 0 match: cmp 0 is used to limit the value of the rtc to 0xffff */ rtcInit.debugRun = false; /* Counter shall not keep running during debug halt. */ /* Initialize the RTC */ RTC_Init(&rtcInit); //disable all rtc interrupts while we're still configuring RTC_IntDisable(RTC_IEN_OF | RTC_IEN_COMP0 | RTC_IEN_COMP1); RTC_IntClear(RTC_IFC_OF | RTC_IFC_COMP0 | RTC_IFC_COMP1); //Set maximum value for the RTC RTC_CompareSet( 0, 0x0000FFFF ); RTC_CounterReset(); RTC_IntEnable(RTC_IEN_COMP0); NVIC_EnableIRQ(RTC_IRQn); RTC_Enable(true); end_atomic(); return SUCCESS; }
static uint8_t pop_task(int priority) { uint8_t id = NO_TASK; check_structs_are_valid(); start_atomic(); if (NG(m_head)[priority] != NO_TASK) { id = NG(m_head)[priority]; NG(m_head)[priority] = NG(m_info)[NG(m_head)[priority]].next; if(NG(m_head)[priority] == NO_TASK) NG(m_tail)[priority] = NO_TASK; else NG(m_info)[NG(m_head)[priority]].prev = NO_TASK; NG(m_info)[id].next = NO_TASK; NG(m_info)[id].prev = NO_TASK; NG(m_info)[id].priority = NOT_SCHEDULED; } end_atomic(); check_structs_are_valid(); return id; }
__LINK_C error_t timer_cancel_task(task_t task) { error_t status = EALREADY; start_atomic(); for(uint32_t i = 0; i < FRAMEWORK_TIMER_STACK_SIZE; i++) { if(NG(timers)[i].f == task) { NG(timers)[i].f = 0x0; //if we were the first event to fire --> trigger a reconfiguration if(NG(next_event) == i) configure_next_event(); status = SUCCESS; break; } } end_atomic(); return status; }
__LINK_C error_t hw_gpio_set_edge_interrupt(pin_id_t pin_id, uint8_t edge) { GPio_edge *TGpio = (GPio_edge*) PORT_BASE(pin_id); start_atomic(); TGpio->mask &= (0x0ffffffff ^ (1<<GPIO_PIN(pin_id))); TGpio->edge = 0x1; // Clear all edges TGpio->old_in = TGpio->in; TGpio->level_sel |= (1<<GPIO_PIN(pin_id)); // Select pin to interrupt if (edge == GPIO_RISING_EDGE) TGpio->rs_edge_sel = 0x1; else TGpio->fl_edge_sel = 0x1; //DPRINT ("id %04x edge %d level_sel = %02x in %02x old %02x", GPIO_PIN(pin_id), edge, gpio_edge->level_sel, gpio_edge->in, gpio_edge->old_in); end_atomic(); return SUCCESS; }
__LINK_C error_t sched_post_task_prio(task_t task, uint8_t priority) { error_t retVal; start_atomic(); check_structs_are_valid(); uint8_t task_id = get_task_id(task); if(task_id == NO_TASK) retVal = EINVAL; else if(priority > MIN_PRIORITY || priority < MAX_PRIORITY) retVal = ESIZE; else if (is_scheduled(task_id)) retVal = EALREADY; else { if(NG(m_head)[priority] == NO_TASK) { NG(m_head)[priority] = task_id; NG(m_tail)[priority] = task_id; } else { NG(m_info)[NG(m_tail)[priority]].next = task_id; NG(m_info)[task_id].prev = NG(m_tail)[priority]; NG(m_tail)[priority] = task_id; } NG(m_info)[task_id].priority = priority; //if our priority is higher than the currently known maximum priority if((priority < NG(current_priority))) NG(current_priority) = priority; check_structs_are_valid(); retVal = SUCCESS; } end_atomic(); check_structs_are_valid(); return retVal; }
/** * drm_atomic_helper_commit - commit validated state object * @dev: DRM device * @state: the driver state object * @nonblock: nonblocking commit * * This function commits a with drm_atomic_helper_check() pre-validated state * object. This can still fail when e.g. the framebuffer reservation fails. * * RETURNS * Zero for success or -errno. */ int msm_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock) { struct msm_drm_private *priv = dev->dev_private; int nplanes = dev->mode_config.num_total_plane; int ncrtcs = dev->mode_config.num_crtc; struct msm_commit *c; int i, ret; ret = drm_atomic_helper_prepare_planes(dev, state); if (ret) return ret; c = commit_init(state); if (!c) { ret = -ENOMEM; goto error; } /* * Figure out what crtcs we have: */ for (i = 0; i < ncrtcs; i++) { struct drm_crtc *crtc = state->crtcs[i]; if (!crtc) continue; c->crtc_mask |= (1 << drm_crtc_index(crtc)); } /* * Figure out what fence to wait for: */ for (i = 0; i < nplanes; i++) { struct drm_plane *plane = state->planes[i]; struct drm_plane_state *new_state = state->plane_states[i]; if (!plane) continue; if ((plane->state->fb != new_state->fb) && new_state->fb) { struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0); struct msm_gem_object *msm_obj = to_msm_bo(obj); new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv); } } /* * Wait for pending updates on any of the same crtc's and then * mark our set of crtc's as busy: */ ret = start_atomic(dev->dev_private, c->crtc_mask); if (ret) { kfree(c); goto error; } /* * This is the point of no return - everything below never fails except * when the hw goes bonghits. Which means we can commit the new state on * the software side now. */ drm_atomic_helper_swap_state(dev, state); /* * Everything below can be run asynchronously without the need to grab * any modeset locks at all under one conditions: It must be guaranteed * that the asynchronous work has either been cancelled (if the driver * supports it, which at least requires that the framebuffers get * cleaned up with drm_atomic_helper_cleanup_planes()) or completed * before the new state gets committed on the software side with * drm_atomic_helper_swap_state(). * * This scheme allows new atomic state updates to be prepared and * checked in parallel to the asynchronous completion of the previous * update. Which is important since compositors need to figure out the * composition of the next frame right after having submitted the * current layout. */ if (nonblock) { queue_work(priv->atomic_wq, &c->work); return 0; } complete_commit(c, false); return 0; error: drm_atomic_helper_cleanup_planes(dev, state); return ret; }