Beispiel #1
0
void mmocdbg_print_message
(
     
     const char *string,
       /* Format in which string needs to be printed */
     ...
)
{
  AEEVaList   arg_ptr;
  char        msg_str[ MMOC_MAX_STRING_LENGTH];

  /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/

  /* Clear the string buffer 
  */
  memset (msg_str, 0, sizeof(msg_str));

  /*lint -save -e64 -e718 -e746 -e63 */
  AEEVA_START( arg_ptr, string );
  /*lint -restore */

  /*  Writing max of MMOC_MAX_STRING_LENGTH-1 characters only, 
  **  so the end null character stays untouched.
  */  
  (void)std_vstrlprintf (msg_str, MMOC_MAX_STRING_LENGTH - 1, 
                                                        string, arg_ptr);
  AEEVA_END( arg_ptr );

  MSG_SPRINTF_1(MSG_SSID_DFLT, MSG_LVL_HIGH, "=MMOC= %s", msg_str);

} /* mmocdbg_print_message() */
static int
diag_printf(const char *buf,  uint16_t vdevid,  uint16_t level,
            uint32_t optionflag, uint32_t timestamp, FILE *log_out)
{
    char pbuf[512];
    if (vdevid < DBGLOG_MAX_VDEVID)
        snprintf(pbuf, 512, "FWMSG: [%u] vap-%u %s", timestamp, vdevid, buf);
    else
        snprintf(pbuf, 512, "FWMSG: [%u] %s", timestamp, buf);

    if (optionflag & QXDM_FLAG) {
       switch(level) {
       case DBGLOG_VERBOSE:
           MSG_SPRINTF_1(MSG_SSID_WLAN, MSG_LEGACY_LOW, "%s", pbuf);
       break;
       case DBGLOG_INFO:
           MSG_SPRINTF_1(MSG_SSID_WLAN, MSG_LEGACY_MED , "%s", pbuf);
       break;
       case DBGLOG_INFO_LVL_1:
           MSG_SPRINTF_1(MSG_SSID_WLAN, MSG_LEGACY_MED , "%s", pbuf);
       break;
       case DBGLOG_INFO_LVL_2:
           MSG_SPRINTF_1(MSG_SSID_WLAN, MSG_LEGACY_MED , "%s", pbuf);
       break;
       case DBGLOG_WARN:
           MSG_SPRINTF_1(MSG_SSID_WLAN, MSG_LEGACY_HIGH, "%s", pbuf);
       break;
       case DBGLOG_ERR:
           MSG_SPRINTF_1(MSG_SSID_WLAN, MSG_LEGACY_HIGH, "%s", pbuf);
       break;
       case DBGLOG_LVL_MAX:
           MSG_SPRINTF_1(MSG_SSID_WLAN, MSG_LEGACY_FATAL, "%s", pbuf);
       break;
       default:
           MSG_SPRINTF_1(MSG_SSID_WLAN, MSG_LEGACY_FATAL, "%s", pbuf);
       break;
       }
    } else if (optionflag & CONSOLE_FLAG) {
        android_printf("%s\n", pbuf);
    }
    else if (optionflag & LOGFILE_FLAG) {
        if (log_out)
            return fprintf(log_out, "%s\n", pbuf);
    }
    return 0;
}
/*===========================================================================

 FUNCTION rcinit_internal_process_groups

 DESCRIPTION
 init function processing
 task define and start signaling

 DEPENDENCIES
 none

 RETURN VALUE
 none

 SIDE EFFECTS
 none

 ===========================================================================*/
void rcinit_internal_process_groups(void)
{
   const rcinit_info_t* rcinit_p;

   rcinit_info_p rcinit_info_rcinit_p = rcinit_lookup("rcinit");

   if (RCINIT_NULL == rcinit_info_rcinit_p)
   {
      ERR_FATAL("rcinit task info not available", 0, 0, 0);
   }

   rcinit_internal.policy_base = rcinit_internal_groups[rcinit_internal.policy_curr]; // initialize before a transition

   ////////////////////////////////////////
   // Process Groups
   ////////////////////////////////////////

   while (rcinit_internal.group_curr < RCINIT_GROUP_MAX)
   {
      unsigned long resource_size;

#if defined(RCINIT_TRACER_SWEVT)
      tracer_event_simple(rcinit_swe_event_init[rcinit_internal.group_curr]);    // software event
#endif

      MSG_SPRINTF_1(MSG_SSID_TMS, MSG_LEGACY_HIGH, "initialization begins group %x", rcinit_internal.group_curr);

      ////////////////////////////////////////
      // Policy Filter Application
      ////////////////////////////////////////

      rcinit_internal.process_state = rcinit_process_group_policy_fn;

      rcinit_internal.group_base = rcinit_internal.policy_base[rcinit_internal.group_curr]; // initialize before a transition

      for (rcinit_internal.group_curr_idx = RCINIT_GROUP_0; RCINIT_NULL != rcinit_internal.group_base[rcinit_internal.group_curr_idx]; rcinit_internal.group_curr_idx++)
      {
         rcinit_p = rcinit_internal.group_base[rcinit_internal.group_curr_idx];

         if (RCINIT_NULL != rcinit_p && RCINIT_NULL == rcinit_p->handle &&
             RCINIT_STKSZ_ZERO == rcinit_p->stksz &&
             RCINIT_TASK_POLICYFN == rcinit_p->type &&
             RCINIT_NULL != rcinit_p->entry)
         {
            static boolean policy_oneshot = FALSE;

            if (FALSE == policy_oneshot)
            {
               RCINIT_NAME(*policyfn)(const RCINIT_NAME[]);
               RCINIT_NAME policy_name;
               RCINIT_POLICY policy;

               policyfn = (RCINIT_NAME(*)(const RCINIT_NAME[]))rcinit_p->entry;
               policy_name = policyfn(rcinit_internal_policy_list);
               policy = rcinit_lookup_policy(policy_name);

               if (RCINIT_POLICY_NONE != policy)
               {
                  policy_oneshot = TRUE;                                         // only one policy transition is allowed

                  rcinit_internal.policy_curr = policy;

                  rcinit_internal.policy_base = rcinit_internal_groups[rcinit_internal.policy_curr];
               }
            }

            break;
         }
      }

      rcinit_internal.group_base = rcinit_internal.policy_base[rcinit_internal.group_curr]; // after transition

      ////////////////////////////////////////
      // Static Resource Sizing
      ////////////////////////////////////////

      rcinit_internal.process_state = rcinit_process_group_sizing;

      resource_size = 0;                                                         // initial group resource allocation (scale is rcinit_stack_t)

      for (rcinit_internal.group_curr_idx = RCINIT_GROUP_0; RCINIT_NULL != rcinit_internal.group_base[rcinit_internal.group_curr_idx]; rcinit_internal.group_curr_idx++)
      {
         rcinit_p = rcinit_internal.group_base[rcinit_internal.group_curr_idx];

         if (RCINIT_NULL != rcinit_p && RCINIT_NULL != rcinit_p->handle &&       // must have a context handle
             RCINIT_STKSZ_ZERO != rcinit_p->stksz &&                             // must have a stack size
             rcinit_info_rcinit_p != rcinit_p)                                   // must not be rcinit, rcinit is the bootstrap
         {
            if (RCINIT_TASK_DALTASK == rcinit_p->type)
            {
               // NO STACK ALLOCATIONS; DAL WORKLOOPS INTERNALLY MANAGED
            }

            else if (RCINIT_TASK_POSIX == rcinit_p->type)
            {
               resource_size += rcinit_p->stksz;                                 // tech area specified
               resource_size += sizeof(_rcxh_scope_t);                           // handlers overhead
               resource_size = stack_base_pad(resource_size);                    // pad alignment
            }

            else if (RCINIT_TASK_QURTTASK == rcinit_p->type)
            {
               resource_size += rcinit_p->stksz;                                 // tech area specified
               resource_size += sizeof(_rcxh_scope_t);                           // handlers overhead
               resource_size = stack_base_pad(resource_size);                    // pad alignment
            }

            else if (RCINIT_TASK_REXTASK == rcinit_p->type || RCINIT_TASK_LEGACY == rcinit_p->type)
            {
               resource_size += rcinit_p->stksz;                                 // tech area specified
               resource_size += sizeof(_rcxh_scope_t);                           // handlers overhead
               resource_size = stack_base_pad(resource_size);                    // pad alignment
            }
         }
      }

      ////////////////////////////////////////
      // Static Resource Allocation
      ////////////////////////////////////////

      // One chunck per group to aviod excessive heap fragmentation. We cannot know what
      // the group configuration is until the policy function has been run, so it is not
      // possible to perform an allocation any larger than group based granularity. Less
      // fragmentation comes with a larger allocation.

      // The stack resource allocation is performed as heap is marked as NO EXECUTE space
      // with all products. We are not allowed to supply stacks in areas that potentially
      // will be in EXECUTE capable areas. Stack smashing by buffer overflow should not be
      // able to leave executable code in the stack.

      rcinit_internal.process_state = rcinit_process_group_allocation;

      if (0 != resource_size)                                                    // do not allocate a zero size resource
      {
         resource_size = stack_base_pad(resource_size);                          // pad alignment

         // TODO: save current nhlos context name. set nhlos context name to "STACKS_GROUPx". supply better runtime
         // tracking instruments based on context name. This might require a temporary context be established and
         // reclaimed.

         rcinit_internal.allocs[rcinit_internal.group_curr] = (unsigned char*)rcinit_internal_malloc(resource_size);

         // TODO: restore nhlos context name.

         if (RCINIT_NULL == rcinit_internal.allocs[rcinit_internal.group_curr])
         {
            ERR_FATAL("static resource allocation failure", 0, 0, 0);
         }

         // set for location of next allocation
         rcinit_internal.stacks[rcinit_internal.group_curr] = (rcinit_stack_p)stack_base_pad(rcinit_internal.allocs[rcinit_internal.group_curr]);

         rcinit_internal.stacks_size += resource_size; // measured in KB
      }

      ////////////////////////////////////////
      // Static Resource Assignments
      ////////////////////////////////////////

      // Stack resource allocations are handed out to the tasks; the pointer math is performed bother
      // over rcinit_stack_t sizes and unsigned char* so that at the end, an warning check can be computed
      // and insure that everything matches. Diagnostic instrumets output issues with computed sizes when
      // they are not matching.

      // The small pad location between stacks, when available, is initialized to a known canary. If
      // the small pad location between stacks, when available, has the canary overwritten, then that
      // will indicate a stack overflow from the higher address into the lower address. Triage via
      // Trace32 to identify this specific condition.

      rcinit_internal.process_state = rcinit_process_group_assignment;

      if (0 != resource_size)                                                    // do not allocate a zero size resource
      {
         for (rcinit_internal.group_curr_idx = RCINIT_GROUP_0; RCINIT_NULL != rcinit_internal.group_base[rcinit_internal.group_curr_idx]; rcinit_internal.group_curr_idx++)
         {
            rcinit_p = rcinit_internal.group_base[rcinit_internal.group_curr_idx];

            if (RCINIT_NULL != rcinit_p && RCINIT_NULL != rcinit_p->handle &&    // must have a context handle
                RCINIT_STKSZ_ZERO != rcinit_p->stksz &&                          // must have a stack size
                rcinit_info_rcinit_p != rcinit_p)                                // must not be rcinit, rcinit is the bootstrap
            {
               if (RCINIT_TASK_DALTASK == rcinit_p->type)                        // check and handout previously computed allocation
               {
                  // NO STACK ALLOCATIONS; DAL WORKLOOPS INTERNALLY MANAGED
               }

               else if (RCINIT_TASK_POSIX == rcinit_p->type)                     // check and handout previously computed allocation
               {
                  rcinit_p->handle->stack = rcinit_internal.stacks[rcinit_internal.group_curr];

                  rcinit_internal.stacks[rcinit_internal.group_curr] += (rcinit_p->stksz / sizeof(rcinit_stack_t));
                  rcinit_internal.stacks[rcinit_internal.group_curr] += (sizeof(_rcxh_scope_t) / sizeof(rcinit_stack_t));

                  // set for location of next allocation

                  rcinit_internal.stacks[rcinit_internal.group_curr] = (rcinit_stack_p)stack_base_pad(rcinit_internal.stacks[rcinit_internal.group_curr]);

                  if ((unsigned char*)rcinit_internal.stacks[rcinit_internal.group_curr] > rcinit_internal.allocs[rcinit_internal.group_curr] + resource_size)
                  {
                     MSG_SPRINTF_2(MSG_SSID_TMS, MSG_LEGACY_ERROR, "stack pad check in group %x with posix %s", rcinit_internal.group_curr, rcinit_p->name);
                  }
               }

               else if (RCINIT_TASK_QURTTASK == rcinit_p->type)                  // check and handout previously computed allocation
               {
                  rcinit_p->handle->stack = rcinit_internal.stacks[rcinit_internal.group_curr];

                  rcinit_internal.stacks[rcinit_internal.group_curr] += (rcinit_p->stksz / sizeof(rcinit_stack_t));
                  rcinit_internal.stacks[rcinit_internal.group_curr] += (sizeof(_rcxh_scope_t) / sizeof(rcinit_stack_t));

                  // set for location of next allocation

                  rcinit_internal.stacks[rcinit_internal.group_curr] = (rcinit_stack_p)stack_base_pad(rcinit_internal.stacks[rcinit_internal.group_curr]);

                  if ((unsigned char*)rcinit_internal.stacks[rcinit_internal.group_curr] > rcinit_internal.allocs[rcinit_internal.group_curr] + resource_size)
                  {
                     MSG_SPRINTF_2(MSG_SSID_TMS, MSG_LEGACY_ERROR, "stack pad check in group %x with qurt %s", rcinit_internal.group_curr, rcinit_p->name);
                  }
               }

               else if (RCINIT_TASK_REXTASK == rcinit_p->type || RCINIT_TASK_LEGACY == rcinit_p->type) // check and handout previously computed allocation
               {
                  rcinit_p->handle->stack = rcinit_internal.stacks[rcinit_internal.group_curr];

                  rcinit_internal.stacks[rcinit_internal.group_curr] += (rcinit_p->stksz / sizeof(rcinit_stack_t));
                  rcinit_internal.stacks[rcinit_internal.group_curr] += (sizeof(_rcxh_scope_t) / sizeof(rcinit_stack_t));

                  // set for location of next allocation

                  rcinit_internal.stacks[rcinit_internal.group_curr] = (rcinit_stack_p)stack_base_pad(rcinit_internal.stacks[rcinit_internal.group_curr]);

                  if ((unsigned char*)rcinit_internal.stacks[rcinit_internal.group_curr] > rcinit_internal.allocs[rcinit_internal.group_curr] + resource_size)
                  {
                     MSG_SPRINTF_2(MSG_SSID_TMS, MSG_LEGACY_ERROR, "stack pad check in group %x with qurt %s", rcinit_internal.group_curr, rcinit_p->name);
                  }
               }
            }
         }
      }

      ////////////////////////////////////////
      // Function Process
      ////////////////////////////////////////

      // Initialization functions are triggered for execution at this point. They
      // are considered callbacks, and executed within the context of the 'rcworker'
      // task. The 'rcworker' task by itself does not supply any heap management, so
      // heap allocations tracked to 'rcworker' context are actually performed by
      // other tech area supplied callbacks.

      rcinit_internal.process_state = rcinit_process_group_init_fn;

      for (rcinit_internal.group_curr_idx = RCINIT_GROUP_0; RCINIT_NULL != rcinit_internal.group_base[rcinit_internal.group_curr_idx]; rcinit_internal.group_curr_idx++)
      {
         rcinit_p = rcinit_internal.group_base[rcinit_internal.group_curr_idx];

         if (IS_INITFN(rcinit_p))
         {
            rcinit_internal.worker_argv = rcinit_internal.group_base;

            if (DAL_SUCCESS != DALSYS_EventCtrl(rcinit_internal.hEventWorkLoop, DALSYS_EVENT_CTRL_TRIGGER) ||
                DAL_SUCCESS != DALSYS_EventWait(rcinit_internal.hEventWorkLoopAck) ||
                DAL_SUCCESS != DALSYS_EventCtrl(rcinit_internal.hEventWorkLoopAck, DALSYS_EVENT_CTRL_RESET))
            {
               ERR_FATAL("rcworker trigger", 0, 0, 0);
            }

            break;
         }
      }

      ////////////////////////////////////////
      // Task Process
      ////////////////////////////////////////

      // Tasks are now logically concurrently started. It is required for the task to
      // call the rcinit_handshake_startup function to indicate it is ready to block at
      // the logical barrier for all other tasks performing concurrent initialization.

      // It is highly discouraged for a task context to perform any type of blocking
      // during the initialization portion. Blocking calls can occur following the
      // required call to rcinit_handshake_startup. Any other use is not encouraged,
      // and ultimately could lead to race conditions or other latent bugs based on
      // startup timing that is not so deterministic.

      rcinit_internal.process_state = rcinit_process_group_init_task;

      rcinit_internal.def.curr = 0;                                              // currently defined
      rcinit_internal.def.prev = rcevt_getcount_handle(rcinit_internal.define);  // previously defined

      for (rcinit_internal.group_curr_idx = RCINIT_GROUP_0; RCINIT_NULL != rcinit_internal.group_base[rcinit_internal.group_curr_idx]; rcinit_internal.group_curr_idx++)
      {
         rcinit_p = rcinit_internal.group_base[rcinit_internal.group_curr_idx];

         if (IS_TASK(rcinit_p))
         {
            if (RCINIT_ENTRY_NONE != rcinit_p->handle->entry)
            {
               if (RCINIT_TASK_DALTASK == rcinit_p->type)
               {
                  // NO STACK ALLOCATIONS; DAL WORKLOOPS INTERNALLY MANAGED

                  rcinit_internal_start_daltask(rcinit_p, rcinit_p->entry);      // launches task and fills in overhead

                  rcinit_internal.def.curr++;
               }

               else if (RCINIT_TASK_POSIX == rcinit_p->type)
               {
                  if (RCINIT_STACK_NULL == rcinit_p->handle->stack)
                  {
                     MSG_SPRINTF_2(MSG_SSID_TMS, MSG_LEGACY_ERROR, "stack checks in group %x with posix %s", rcinit_internal.group_curr, rcinit_p->name);
                  }

                  else
                  {
                     rcinit_internal_start_posix(rcinit_p, rcinit_p->entry);     // launches task and fills in overhead

                     rcinit_internal.def.curr++;
                  }
               }

               else if (RCINIT_TASK_QURTTASK == rcinit_p->type)
               {
                  if (RCINIT_STACK_NULL == rcinit_p->handle->stack)
                  {
                     MSG_SPRINTF_2(MSG_SSID_TMS, MSG_LEGACY_ERROR, "stack checks in group %x with qurt %s", rcinit_internal.group_curr, rcinit_p->name);
                  }

                  else
                  {
                     rcinit_internal_start_qurttask(rcinit_p, rcinit_p->entry);  // launches task and fills in overhead

                     rcinit_internal.def.curr++;
                  }
               }

               else if (RCINIT_TASK_REXTASK == rcinit_p->type || RCINIT_TASK_LEGACY == rcinit_p->type)
               {
                  if (RCINIT_STACK_NULL == rcinit_p->handle->stack)
                  {
                     MSG_SPRINTF_2(MSG_SSID_TMS, MSG_LEGACY_ERROR, "stack checks in group %x with rex %s", rcinit_internal.group_curr, rcinit_p->name);
                  }

                  else
                  {
                     rcinit_internal_start_rextask(rcinit_p, rcinit_p->entry);   // launches task and fills in overhead

                     rcinit_internal.def.curr++;
                  }
               }
            }
         }
      }

      if (0 != rcinit_internal.def.curr)                                         // only wait when tasks were started
      {
         rcinit_internal.process_state = rcinit_process_group_init_blocking;

         // async signal arrival order; wait for all to complete that were started

         // STALLING HERE?

         // BLOCKING HERE OCCURS UNTIL ALL THE TASKS STARTED PERFORM THEIR HANDSHAKES.
         // THIS IS THE NORMAL MECHANISM. CHECK WITH THE TECH AREA OWNING A SPECIFIC
         // CONTEXT WHEN BLOCKING DURATION BECOMES EXCESSIVE WAITING FOR THE HANDSHAKE.
         // TASKS ARE LOGICALLY STARTED IN PARALLEL, AND ALLOWED TO BE SCHEDULED AND
         // RUN CONCURRENTLY.

         rcevt_wait_count_handle(rcinit_internal.define, rcinit_internal.def.prev + rcinit_internal.def.curr); // wait for all defined tasks handshake; this group

         // TASKS BLOCKING ON THIS HANDLE ARE WAITING CORRECTLY; ANY TASK BLOCKING
         // ELSEWHERE IS NOT PERFORMING STARTUP CORRECTLY.

         rcinit_internal.process_state = rcinit_process_group_run;

         rcevt_signal_handle(rcinit_internal.defineack);                         // issue start signal to all defined tasks; this group
      }

      else
      {
         // No blocking required, no tasks started
      }

      rcinit_internal.def.curr = 0;                                              // reset instruments
      rcinit_internal.def.prev = 0;                                              // reset instruments

      rcinit_internal.group_curr++;
   }

   ////////////////////////////////////////
   // Group Processing Complete
   ////////////////////////////////////////

   rcinit_internal.group_curr = RCINIT_GROUP_NONE;                               // current group
   rcinit_internal.process_state = rcinit_process_none;
}