void GKI_delay (UINT32 timeout)
{
    UINT8 rtask = GKI_get_taskid();
    struct timespec delay;
    int err;

    GKI_TRACE("GKI_delay %d %d", (int)rtask, (int)timeout);

    delay.tv_sec = timeout / 1000;
    delay.tv_nsec = 1000 * 1000 * (timeout%1000);

    /* [u]sleep can't be used because it uses SIGALRM */

    do {
        err = nanosleep(&delay, &delay);
    } while (err < 0 && errno ==EINTR);

    /* Check if task was killed while sleeping */

     /* NOTE : if you do not implement task killing, you do not need this check */

    if (rtask && gki_cb.com.OSRdyTbl[rtask] == TASK_DEAD)
    {
    }

    GKI_TRACE("GKI_delay %d %d done", (int)rtask, (int)timeout);

    return;
}
static void gki_set_timer_scheduling( void )
{
    pid_t               main_pid = getpid();
    struct sched_param  param;
    int                 policy;

    policy = sched_getscheduler(main_pid);

    if ( policy != -1 )
    {
        GKI_TRACE("gki_set_timer_scheduling(()::scheduler current policy: %d", policy);

        /* ensure highest priority in the system + 2 to allow space for read threads */
        param.sched_priority = GKI_LINUX_TIMER_TICK_PRIORITY;

        if ( 0!=sched_setscheduler(main_pid, GKI_LINUX_TIMER_POLICY, &param ) )
        {
            GKI_TRACE("sched_setscheduler() failed with error: %d", errno);
        }
    }
    else
    {
        GKI_TRACE( "getscheduler failed: %d", errno);
    }
}
/*******************************************************************************
**
** Function         GKI_resume_task()
**
** Description      This function resumes the task specified in the argument.
**
** Parameters:      task_id  - (input) the id of the task that has to resumed
**
** Returns          GKI_SUCCESS if all OK
**
** NOTE             This function is NOT called by the Broadcom stack and
**                  profiles. If you want to implement task suspension capability,
**                  put specific code here.
**
*******************************************************************************/
UINT8 GKI_resume_task (UINT8 task_id)
{
    GKI_TRACE("GKI_resume_task %d - NOT implemented", task_id);


    GKI_TRACE("GKI_resume_task %d done", task_id);

    return (GKI_SUCCESS);
}
INT8 *GKI_map_taskname (UINT8 task_id)
{
    GKI_TRACE("GKI_map_taskname %d", task_id);

    if (task_id < GKI_MAX_TASKS)
    {
        GKI_TRACE("GKI_map_taskname %d %s done", task_id, gki_cb.com.OSTName[task_id]);
         return (gki_cb.com.OSTName[task_id]);
    }
    else if (task_id == GKI_MAX_TASKS )
    {
        return (gki_cb.com.OSTName[GKI_get_taskid()]);
    }
    else
    {
        return (INT8*)"BAD";
    }
}
/*******************************************************************************
**
** Function         GKI_get_taskid
**
** Description      This function gets the currently running task ID.
**
** Returns          task ID
**
** NOTE             The Broadcom upper stack and profiles may run as a single task.
**                  If you only have one GKI task, then you can hard-code this
**                  function to return a '1'. Otherwise, you should have some
**                  OS-specific method to determine the current task.
**
*******************************************************************************/
UINT8 GKI_get_taskid (void)
{
    int i;

    pthread_t thread_id = pthread_self( );

    GKI_TRACE("GKI_get_taskid %x", (int)thread_id);

    for (i = 0; i < GKI_MAX_TASKS; i++) {
        if (gki_cb.os.thread_id[i] == thread_id) {
            //GKI_TRACE("GKI_get_taskid %x %d done", thread_id, i);
            return(i);
        }
    }

    GKI_TRACE("GKI_get_taskid: task id = -1");

    return(-1);
}
UINT8 GKI_send_event (UINT8 task_id, UINT16 event)
{
    GKI_TRACE("GKI_send_event %d %x", task_id, event);

    if (task_id < GKI_MAX_TASKS)
    {
        /* protect OSWaitEvt[task_id] from manipulation in GKI_wait() */
        pthread_mutex_lock(&gki_cb.os.thread_evt_mutex[task_id]);

        /* Set the event bit */
        gki_cb.com.OSWaitEvt[task_id] |= event;

        pthread_cond_signal(&gki_cb.os.thread_evt_cond[task_id]);

        pthread_mutex_unlock(&gki_cb.os.thread_evt_mutex[task_id]);

        GKI_TRACE("GKI_send_event %d %x done", task_id, event);
        return ( GKI_SUCCESS );
    }
    GKI_TRACE("############## GKI_send_event FAILED!! ##################");
    return (GKI_FAILURE);
}
void GKI_exception (UINT16 code, char *msg)
{
    UINT8 task_id;
    int i = 0;
    FREE_QUEUE_T  *Q;
    tGKI_COM_CB *p_cb = &gki_cb.com;

    ALOGE( "GKI_exception(): Task State Table");

    for(task_id = 0; task_id < GKI_MAX_TASKS; task_id++)
    {
        ALOGE( "TASK ID [%d] task name [%s] state [%d]",
                         task_id,
                         gki_cb.com.OSTName[task_id],
                         gki_cb.com.OSRdyTbl[task_id]);
    }

    ALOGE("GKI_exception %d %s", code, msg);
    ALOGE( "********************************************************************");
    ALOGE( "* GKI_exception(): %d %s", code, msg);
    ALOGE( "********************************************************************");

#if 0//(GKI_DEBUG == TRUE)
    GKI_disable();

    if (gki_cb.com.ExceptionCnt < GKI_MAX_EXCEPTION)
    {
        EXCEPTION_T *pExp;

        pExp =  &gki_cb.com.Exception[gki_cb.com.ExceptionCnt++];
        pExp->type = code;
        pExp->taskid = GKI_get_taskid();
        strncpy((char *)pExp->msg, msg, GKI_MAX_EXCEPTION_MSGLEN - 1);
    }

    GKI_enable();
#endif
    if (code == GKI_ERROR_OUT_OF_BUFFERS)
    {
        for(i=0; i<p_cb->curr_total_no_of_pools; i++)
        {
            Q = &p_cb->freeq[p_cb->pool_list[i]];
            if (Q !=NULL)
                ALOGE("GKI_exception Buffer current cnt:%x, Total:%x", Q->cur_cnt, Q->total);
        }
    }

    GKI_TRACE("GKI_exception %d %s done", code, msg);
    return;
}
static BOOLEAN gki_alloc_free_queue(UINT8 id)
{
    FREE_QUEUE_T  *Q;
    tGKI_COM_CB *p_cb = &gki_cb.com;
    GKI_TRACE("\ngki_alloc_free_queue in, id:%d \n", (int)id );

    Q = &p_cb->freeq[p_cb->pool_list[id]];

    if(Q->p_first == 0)
    {
        void* p_mem = GKI_os_malloc((Q->size + BUFFER_PADDING_SIZE) * Q->total);
        if(p_mem)
        {
            //re-initialize the queue with allocated memory
            GKI_TRACE("\ngki_alloc_free_queue calling  gki_init_free_queue, id:%d  size:%d, totol:%d\n", id, Q->size, Q->total);
            gki_init_free_queue(id, Q->size, Q->total, p_mem);
            GKI_TRACE("\ngki_alloc_free_queue ret OK, id:%d  size:%d, totol:%d\n", id, Q->size, Q->total);
            return TRUE;
        }
        GKI_exception (GKI_ERROR_BUF_SIZE_TOOBIG, "gki_alloc_free_queue: Not enough memory");
    }
    GKI_TRACE("\ngki_alloc_free_queue out failed, id:%d\n", id);
    return FALSE;
}
void GKI_exception (UINT16 code, char *msg)
{
    UINT8 task_id;
    int i = 0;

    GKI_ERROR_LOG( "GKI_exception(): Task State Table\n");

    for(task_id = 0; task_id < GKI_MAX_TASKS; task_id++)
    {
        GKI_ERROR_LOG( "TASK ID [%d] task name [%s] state [%d]\n",
                         task_id,
                         gki_cb.com.OSTName[task_id],
                         gki_cb.com.OSRdyTbl[task_id]);
    }

    GKI_ERROR_LOG("GKI_exception %d %s", code, msg);
    GKI_ERROR_LOG( "\n********************************************************************\n");
    GKI_ERROR_LOG( "* GKI_exception(): %d %s\n", code, msg);
    GKI_ERROR_LOG( "********************************************************************\n");

#if 0//(GKI_DEBUG == TRUE)
    GKI_disable();

    if (gki_cb.com.ExceptionCnt < GKI_MAX_EXCEPTION)
    {
        EXCEPTION_T *pExp;

        pExp =  &gki_cb.com.Exception[gki_cb.com.ExceptionCnt++];
        pExp->type = code;
        pExp->taskid = GKI_get_taskid();
        strncpy((char *)pExp->msg, msg, GKI_MAX_EXCEPTION_MSGLEN - 1);
    }

    GKI_enable();
#endif

    GKI_TRACE("GKI_exception %d %s done", code, msg);
    return;
}
/*******************************************************************************
**
** Function         GKI_wait
**
** Description      This function is called by tasks to wait for a specific
**                  event or set of events. The task may specify the duration
**                  that it wants to wait for, or 0 if infinite.
**
** Parameters:      flag -    (input) the event or set of events to wait for
**                  timeout - (input) the duration that the task wants to wait
**                                    for the specific events (in system ticks)
**
**
** Returns          the event mask of received events or zero if timeout
**
*******************************************************************************/
UINT16 GKI_wait (UINT16 flag, UINT32 timeout)
{
    UINT16 evt;
    UINT8 rtask;
    struct timespec abstime = { 0, 0 };

    int sec;
    int nano_sec;

    rtask = GKI_get_taskid();

    GKI_TRACE("GKI_wait %d %x %d", (int)rtask, (int)flag, (int)timeout);

    gki_cb.com.OSWaitForEvt[rtask] = flag;

    /* protect OSWaitEvt[rtask] from modification from an other thread */
    pthread_mutex_lock(&gki_cb.os.thread_evt_mutex[rtask]);

    if (!(gki_cb.com.OSWaitEvt[rtask] & flag))
    {
        if (timeout)
        {
            clock_gettime(CLOCK_MONOTONIC, &abstime);

            /* add timeout */
            sec = timeout / 1000;
            nano_sec = (timeout % 1000) * NANOSEC_PER_MILLISEC;
            abstime.tv_nsec += nano_sec;
            if (abstime.tv_nsec > NSEC_PER_SEC)
            {
                abstime.tv_sec += (abstime.tv_nsec / NSEC_PER_SEC);
                abstime.tv_nsec = abstime.tv_nsec % NSEC_PER_SEC;
            }
            abstime.tv_sec += sec;

            pthread_cond_timedwait_monotonic(&gki_cb.os.thread_evt_cond[rtask],
                    &gki_cb.os.thread_evt_mutex[rtask], &abstime);

        }
        else
        {
            pthread_cond_wait(&gki_cb.os.thread_evt_cond[rtask], &gki_cb.os.thread_evt_mutex[rtask]);
        }

        /* TODO: check, this is probably neither not needed depending on phtread_cond_wait() implmentation,
         e.g. it looks like it is implemented as a counter in which case multiple cond_signal
         should NOT be lost! */

        /* we are waking up after waiting for some events, so refresh variables
           no need to call GKI_disable() here as we know that we will have some events as we've been waking
           up after condition pending or timeout */

        if (gki_cb.com.OSTaskQFirst[rtask][0])
            gki_cb.com.OSWaitEvt[rtask] |= TASK_MBOX_0_EVT_MASK;
        if (gki_cb.com.OSTaskQFirst[rtask][1])
            gki_cb.com.OSWaitEvt[rtask] |= TASK_MBOX_1_EVT_MASK;
        if (gki_cb.com.OSTaskQFirst[rtask][2])
            gki_cb.com.OSWaitEvt[rtask] |= TASK_MBOX_2_EVT_MASK;
        if (gki_cb.com.OSTaskQFirst[rtask][3])
            gki_cb.com.OSWaitEvt[rtask] |= TASK_MBOX_3_EVT_MASK;

        if (gki_cb.com.OSRdyTbl[rtask] == TASK_DEAD)
        {
            gki_cb.com.OSWaitEvt[rtask] = 0;
            /* unlock thread_evt_mutex as pthread_cond_wait() does auto lock when cond is met */
            pthread_mutex_unlock(&gki_cb.os.thread_evt_mutex[rtask]);
            return (EVENT_MASK(GKI_SHUTDOWN_EVT));
        }
    }

    /* Clear the wait for event mask */
    gki_cb.com.OSWaitForEvt[rtask] = 0;

    /* Return only those bits which user wants... */
    evt = gki_cb.com.OSWaitEvt[rtask] & flag;

    /* Clear only those bits which user wants... */
    gki_cb.com.OSWaitEvt[rtask] &= ~flag;

    /* unlock thread_evt_mutex as pthread_cond_wait() does auto lock mutex when cond is met */
    pthread_mutex_unlock(&gki_cb.os.thread_evt_mutex[rtask]);

    GKI_TRACE("GKI_wait %d %x %d %x done", (int)rtask, (int)flag, (int)timeout, (int)evt);
    return (evt);
}
void GKI_run (void *p_task_id)
{
    struct timespec delay;
    int err;
    volatile int * p_run_cond = &gki_cb.os.no_timer_suspend;

#ifndef GKI_NO_TICK_STOP
    /* adjust btld scheduling scheme now */
    gki_set_timer_scheduling();

    /* register start stop function which disable timer loop in GKI_run() when no timers are
     * in any GKI/BTA/BTU this should save power when BTLD is idle! */
    GKI_timer_queue_register_callback( gki_system_tick_start_stop_cback );
    GKI_TRACE( "GKI_run(): Start/Stop GKI_timer_update_registered!" );
#endif

#ifdef NO_GKI_RUN_RETURN
    pthread_attr_t timer_attr;

    shutdown_timer = 0;

    pthread_attr_init(&timer_attr);
    if (pthread_create( &timer_thread_id,
              &timer_attr,
              timer_thread,
              NULL) != 0 )
    {
        GKI_ERROR_LOG("pthread_create failed to create timer_thread!\n\r");
        return;
    }

#else
    GKI_TRACE("GKI_run ");
    for (;;)
    {
        do
        {
            /* adjust hear bit tick in btld by changning TICKS_PER_SEC!!!!! this formula works only for
             * 1-1000ms heart beat units! */
            delay.tv_sec = LINUX_SEC / 1000;
            delay.tv_nsec = 1000 * 1000 * (LINUX_SEC % 1000);

            /* [u]sleep can't be used because it uses SIGALRM */
            do
            {
                err = nanosleep(&delay, &delay);
            } while (err < 0 && errno == EINTR);

            /* the unit should be alsways 1 (1 tick). only if you vary for some reason heart beat tick
             * e.g. power saving you may want to provide more ticks
             */
            GKI_timer_update( 1 );
            /* BT_TRACE_2( TRACE_LAYER_HCI, TRACE_TYPE_DEBUG, "update: tv_sec: %d, tv_nsec: %d", delay.tv_sec, delay.tv_nsec ); */
        } while ( GKI_TIMER_TICK_RUN_COND == *p_run_cond );

        /* currently on reason to exit above loop is no_timer_suspend == GKI_TIMER_TICK_STOP_COND
         * block timer main thread till re-armed by  */

        GKI_TIMER_TRACE(">>> SUSPENDED GKI_timer_update()" );

        pthread_mutex_lock( &gki_cb.os.gki_timer_mutex );
        pthread_cond_wait( &gki_cb.os.gki_timer_cond, &gki_cb.os.gki_timer_mutex );
        pthread_mutex_unlock( &gki_cb.os.gki_timer_mutex );

        /* potentially we need to adjust os gki_cb.com.OSTicks */
        GKI_TIMER_TRACE(">>> RESTARTED GKI_timer_update(): run_cond: %d",
                    *p_run_cond );

    }
#endif
    return;
}
void GKI_run(void)
{
    /* adjust btld scheduling scheme now */
    gki_set_timer_scheduling();
    GKI_TRACE( "GKI_run(): Start/Stop GKI_timer_update_registered!" );
}
void* timer_thread(void *arg)
{
    int timeout_ns=0;
    struct timespec timeout;
    struct timespec previous = {0,0};
    struct timespec current;
    int err;
    int delta_ns;
    int restart;
    tGKI_OS         *p_os = &gki_cb.os;
    int  *p_run_cond = &p_os->no_timer_suspend;

    /* Indicate that tick is just starting */
    restart = 1;

    prctl(PR_SET_NAME, (unsigned long)"gki timer", 0, 0, 0);

    raise_priority_a2dp(TASK_HIGH_GKI_TIMER);

    while(!shutdown_timer)
    {
        /* If the timer has been stopped (no SW timer running) */
        if (*p_run_cond == GKI_TIMER_TICK_STOP_COND)
        {
            /*
             * We will lock/wait on GKI_timer_mutex.
             * This mutex will be unlocked when timer is re-started
             */
            GKI_TRACE("GKI_run lock mutex");
            pthread_mutex_lock(&p_os->gki_timer_mutex);

            /* We are here because the mutex has been released by timer cback */
            /* Let's release it for future use */
            GKI_TRACE("GKI_run unlock mutex");
            pthread_mutex_unlock(&p_os->gki_timer_mutex);

            /* Indicate that tick is just starting */
            restart = 1;
        }

        /* Get time */
        clock_gettime(CLOCK_MONOTONIC, &current);

        /* Check if tick was just restarted, indicating to the compiler that this is
         * unlikely to happen (to help branch prediction) */
        if (__unlikely(restart))
        {
            /* Clear the restart indication */
            restart = 0;

            timeout_ns = (GKI_TICKS_TO_MS(1) * 1000000);
        }
        else
        {
            /* Compute time elapsed since last sleep start */
            delta_ns = current.tv_nsec - previous.tv_nsec;
            delta_ns += (current.tv_sec - previous.tv_sec) * 1000000000;

            /* Compute next timeout:
             *    timeout = (next theoretical expiration) - current time
             *    timeout = (previous time + timeout + delay) - current time
             *    timeout = timeout + delay - (current time - previous time)
             *    timeout += delay - delta */
            timeout_ns += (GKI_TICKS_TO_MS(1) * 1000000) - delta_ns;
        }
        /* Save the current time for next iteration */
        previous = current;

        timeout.tv_sec = 0;

        /* Sleep until next theoretical tick time.  In case of excessive
           elapsed time since last theoretical tick expiration, it is
           possible that the timeout value is negative.  To protect
           against this error, we set minimum sleep time to 10% of the
           tick period.  We indicate to compiler that this is unlikely to
           happen (to help branch prediction) */

        if (__unlikely(timeout_ns < ((GKI_TICKS_TO_MS(1) * 1000000) * 0.1)))
        {
            timeout.tv_nsec = (GKI_TICKS_TO_MS(1) * 1000000) * 0.1;

            /* Print error message if tick really got delayed
               (more than 5 ticks) */
            if (timeout_ns < GKI_TICKS_TO_MS(-5) * 1000000)
            {
                GKI_ERROR_LOG("tick delayed > 5 slots (%d,%d) -- cpu overload ? ",
                        timeout_ns, GKI_TICKS_TO_MS(-5) * 1000000);
            }
        }
        else
        {
            timeout.tv_nsec = timeout_ns;
        }

        do
        {
            /* [u]sleep can't be used because it uses SIGALRM */
            err = nanosleep(&timeout, &timeout);
        } while (err < 0 && errno == EINTR);

        /* Increment the GKI time value by one tick and update internal timers */
        GKI_timer_update(1);
    }
    GKI_TRACE("gki_ulinux: Exiting timer_thread");
    pthread_exit(NULL);
    return NULL;
}
void GKI_shutdown(void)
{
    UINT8 task_id;
#if ( FALSE == GKI_PTHREAD_JOINABLE )
    int i = 0;
#else
    int result;
#endif

#ifdef GKI_USE_DEFERED_ALLOC_BUF_POOLS
    gki_dealloc_free_queue();
#endif

    /* release threads and set as TASK_DEAD. going from low to high priority fixes
     * GKI_exception problem due to btu->hci sleep request events  */
    for (task_id = GKI_MAX_TASKS; task_id > 0; task_id--)
    {
        if (gki_cb.com.OSRdyTbl[task_id - 1] != TASK_DEAD)
        {
            gki_cb.com.OSRdyTbl[task_id - 1] = TASK_DEAD;

            /* paranoi settings, make sure that we do not execute any mailbox events */
            gki_cb.com.OSWaitEvt[task_id-1] &= ~(TASK_MBOX_0_EVT_MASK|TASK_MBOX_1_EVT_MASK|
                                                TASK_MBOX_2_EVT_MASK|TASK_MBOX_3_EVT_MASK);
            GKI_send_event(task_id - 1, EVENT_MASK(GKI_SHUTDOWN_EVT));

#if ( FALSE == GKI_PTHREAD_JOINABLE )
            i = 0;

            while ((gki_cb.com.OSWaitEvt[task_id - 1] != 0) && (++i < 10))
                usleep(100 * 1000);
#else
            result = pthread_join( gki_cb.os.thread_id[task_id-1], NULL );

            if ( result < 0 )
            {
                ALOGE( "pthread_join() FAILED: result: %d", result );
            }
#endif
            // GKI_ERROR_LOG( "GKI_shutdown(): task %s dead\n", gki_cb.com.OSTName[task_id]);
            GKI_exit_task(task_id - 1);
        }
    }

    /* Destroy mutex and condition variable objects */
    pthread_mutex_destroy(&gki_cb.os.GKI_mutex);

    /*    pthread_mutex_destroy(&GKI_sched_mutex); */
#if (GKI_DEBUG == TRUE)
    pthread_mutex_destroy(&gki_cb.os.GKI_trace_mutex);
#endif
    /*    pthread_mutex_destroy(&thread_delay_mutex);
     pthread_cond_destroy (&thread_delay_cond); */
#if ( FALSE == GKI_PTHREAD_JOINABLE )
    i = 0;
#endif

#ifdef NO_GKI_RUN_RETURN
    shutdown_timer = 1;
#endif
    if (g_GkiTimerWakeLockOn)
    {
        GKI_TRACE("GKI_shutdown :  release_wake_lock(brcm_btld)");
        release_wake_lock(WAKE_LOCK_ID);
        g_GkiTimerWakeLockOn = 0;
    }
}
/*******************************************************************************
**
** Function         GKI_create_task
**
** Description      This function is called to create a new OSS task.
**
** Parameters:      task_entry  - (input) pointer to the entry function of the task
**                  task_id     - (input) Task id is mapped to priority
**                  taskname    - (input) name given to the task
**                  stack       - (input) pointer to the top of the stack (highest memory location)
**                  stacksize   - (input) size of the stack allocated for the task
**
** Returns          GKI_SUCCESS if all OK, GKI_FAILURE if any problem
**
** NOTE             This function take some parameters that may not be needed
**                  by your particular OS. They are here for compatability
**                  of the function prototype.
**
*******************************************************************************/
UINT8 GKI_create_task (TASKPTR task_entry, UINT8 task_id, INT8 *taskname, UINT16 *stack, UINT16 stacksize)
{
    UINT16  i;
    UINT8   *p;
    struct sched_param param;
    int policy, ret = 0;
    pthread_attr_t attr1;

    GKI_TRACE( "GKI_create_task %x %d %s %x %d", (int)task_entry, (int)task_id,
            (char*) taskname, (int) stack, (int)stacksize);

    if (task_id >= GKI_MAX_TASKS)
    {
        GKI_ERROR_LOG("Error! task ID > max task allowed");
        return (GKI_FAILURE);
    }


    gki_cb.com.OSRdyTbl[task_id]    = TASK_READY;
    gki_cb.com.OSTName[task_id]     = taskname;
    gki_cb.com.OSWaitTmr[task_id]   = 0;
    gki_cb.com.OSWaitEvt[task_id]   = 0;

    /* Initialize mutex and condition variable objects for events and timeouts */
    pthread_mutex_init(&gki_cb.os.thread_evt_mutex[task_id], NULL);
    pthread_cond_init (&gki_cb.os.thread_evt_cond[task_id], NULL);
    pthread_mutex_init(&gki_cb.os.thread_timeout_mutex[task_id], NULL);
    pthread_cond_init (&gki_cb.os.thread_timeout_cond[task_id], NULL);

    pthread_attr_init(&attr1);
    /* by default, pthread creates a joinable thread */
#if ( FALSE == GKI_PTHREAD_JOINABLE )
    pthread_attr_setdetachstate(&attr1, PTHREAD_CREATE_DETACHED);

    GKI_TRACE("GKI creating task %i\n", task_id);
#else
    GKI_TRACE("GKI creating JOINABLE task %i\n", task_id);
#endif

    /* On Android, the new tasks starts running before 'gki_cb.os.thread_id[task_id]' is initialized */
    /* Pass task_id to new task so it can initialize gki_cb.os.thread_id[task_id] for it calls GKI_wait */
    gki_pthread_info[task_id].task_id = task_id;
    gki_pthread_info[task_id].task_entry = task_entry;
    gki_pthread_info[task_id].params = 0;

    ret = pthread_create( &gki_cb.os.thread_id[task_id],
              &attr1,
              (void *)gki_task_entry,
              &gki_pthread_info[task_id]);

    if (ret != 0)
    {
         GKI_ERROR_LOG("pthread_create failed(%d), %s!\n\r", ret, taskname);
         return GKI_FAILURE;
    }

    if(pthread_getschedparam(gki_cb.os.thread_id[task_id], &policy, &param)==0)
     {
#if (GKI_LINUX_BASE_POLICY!=GKI_SCHED_NORMAL)
#if defined(PBS_SQL_TASK)
         if (task_id == PBS_SQL_TASK)
         {
             GKI_TRACE("PBS SQL lowest priority task");
             policy = SCHED_NORMAL;
         }
         else
#endif
#endif
         {
             /* check if define in gki_int.h is correct for this compile environment! */
             policy = GKI_LINUX_BASE_POLICY;
#if (GKI_LINUX_BASE_POLICY != GKI_SCHED_NORMAL)
             param.sched_priority = GKI_LINUX_BASE_PRIORITY - task_id - 2;
#else
             param.sched_priority = 0;
#endif
         }
         pthread_setschedparam(gki_cb.os.thread_id[task_id], policy, &param);
     }

    GKI_TRACE( "Leaving GKI_create_task %x %d %x %s %x %d\n",
              (int)task_entry,
              (int)task_id,
              (int)gki_cb.os.thread_id[task_id],
              (char*)taskname,
              (int)stack,
              (int)stacksize);

    return (GKI_SUCCESS);
}
/*******************************************************************************
**
** Function         GKI_sched_unlock
**
** Description      This function is called by tasks to enable scheduler switching.
**
** Returns          void
**
** NOTE             This function is NOT called by the Broadcom stack and
**                  profiles. If you want to use it in your own implementation,
**                  put code here to tell the OS to re-enable context switching.
**
*******************************************************************************/
void GKI_sched_unlock(void)
{
    GKI_TRACE("GKI_sched_unlock");
}
/*******************************************************************************
**
** Function         GKI_sched_lock
**
** Description      This function is called by tasks to disable scheduler
**                  task context switching.
**
** Returns          void
**
** NOTE             This function is NOT called by the Broadcom stack and
**                  profiles. If you want to use it in your own implementation,
**                  put code here to tell the OS to disable context switching.
**
*******************************************************************************/
void GKI_sched_lock(void)
{
    GKI_TRACE("GKI_sched_lock");
    return;
}
/*******************************************************************************
**
** Function         GKI_isend_event
**
** Description      This function is called from ISRs to send events to other
**                  tasks. The only difference between this function and GKI_send_event
**                  is that this function assumes interrupts are already disabled.
**
** Parameters:      task_id -  (input) The destination task Id for the event.
**                  event   -  (input) The event flag
**
** Returns          GKI_SUCCESS if all OK, else GKI_FAILURE
**
** NOTE             This function is NOT called by the Broadcom stack and
**                  profiles. If you want to use it in your own implementation,
**                  put your code here, otherwise you can delete the entire
**                  body of the function.
**
*******************************************************************************/
UINT8 GKI_isend_event (UINT8 task_id, UINT16 event)
{
    GKI_TRACE("GKI_isend_event %d %x", task_id, event);
    GKI_TRACE("GKI_isend_event %d %x done", task_id, event);
    return    GKI_send_event(task_id, event);
}