void CondWait (struct Cond *cond, struct Mutex *mutex) { struct Process *proc; if (current_process == NULL) return; DisableInterrupts(); /* Release Mutex */ if (mutex->locked == TRUE) { proc = LIST_HEAD (&mutex->blocked_list); if (proc != NULL) { LIST_REM_HEAD (&mutex->blocked_list, blocked_entry); proc->state = PROC_STATE_READY; SchedReady (proc); } else { mutex->locked = FALSE; } } else { KPANIC ("CondWait() on a free mutex"); } /* Block current process on Condvar */ LIST_ADD_TAIL (&cond->blocked_list, current_process, blocked_entry); current_process->state = PROC_STATE_COND_BLOCKED; SchedUnready (current_process); Reschedule(); /* Now acquire mutex */ if (mutex->locked == TRUE) { LIST_ADD_TAIL (&mutex->blocked_list, current_process, blocked_entry); current_process->state = PROC_STATE_MUTEX_BLOCKED; SchedUnready (current_process); Reschedule(); } else { mutex->locked = TRUE; } EnableInterrupts(); }
TopicHandle_t DDS_SP_add_topic (ParticipantHandle_t participant_handle, DomainHandle_t domain_handle) { MSParticipant_t *p = NULL; MSDomain_t *d = NULL; MSUTopic_t *tp = NULL; MSTopic_t *dtp = NULL; /* log_printf (SEC_ID, 0, "MSP: Add default 'allow all' policy for topic\r\n"); */ if (participant_handle > 0) { /*ADD this topic for participant*/ if (!(p = id_handles [participant_handle])) return (-1); if ( LIST_HEAD (p->topics) && LIST_HEAD (p->topics)->topic.index == 0) DDS_SP_remove_topic ( participant_handle, 0, 0); if (!(tp = calloc (1, sizeof (MSUTopic_t)))) fatal_printf ("Out of memory!\r\n"); LIST_ADD_TAIL (p->topics, *tp); tp->id = ~0; /* all domains */ tp->topic.name = NULL; /* name == '*' */ tp->topic.mode = TA_ALL; tp->topic.index = ++topic_counter; log_printf ("MSP: add topic (%d, 0, %d);\r\n", participant_handle, tp->topic.index); tp->topic.blacklist = 0; p->ntopics++; return (tp->topic.index); } else if (domain_handle > 0) { /*ADD this topic for domain*/ if (!(d = domain_handles [domain_handle])) return (-1); if (LIST_HEAD (d->topics) && LIST_HEAD (d->topics)->index == 0) DDS_SP_remove_topic (0, domain_handle, 0); if (!(dtp = calloc (1, sizeof (MSTopic_t)))) fatal_printf ("Out of memory!\r\n"); LIST_ADD_TAIL (d->topics, *dtp); dtp->name = NULL; /* name == '*' */ dtp->mode = TA_ALL; dtp->index = ++topic_counter; log_printf ("MSP: add topic (0, %d, %d);\r\n", participant_handle, tp->topic.index); dtp->blacklist = 0; d->ntopics++; return (dtp->index); } return (-1); }
DomainHandle_t DDS_SP_add_domain (void) { MSDomain_t *d; MSTopic_t *tp; MSPartition_t *pp; if (!domains.head) msp_init (); /* insert a default, allow all policy. */ /* log_printf (SEC_ID, 0, "MSP: Creating a default 'allow none' policy for all domains\r\n"); */ d = calloc (1, sizeof (MSDomain_t)); if (!d) fatal_printf ("out-of-memory for domain policy!\r\n"); d->domain_id = ~0; /* all other domains */ d->access = DS_SECRET; d->exclusive = 0; /* open */ #ifdef DDS_SECURITY d->transport = TRANS_BOTH_DTLS_UDP; #else d->transport = TRANS_BOTH_NONE; #endif d->blacklist = 0; /* allow all topics in the domain */ LIST_INIT (d->topics); if (!(tp = calloc (1, sizeof (MSTopic_t)))) fatal_printf ("Out of memory!\r\n"); LIST_ADD_TAIL (d->topics, *tp); tp->name = 0; /* name == '*' */ tp->mode = TA_ALL; tp->index = 0; tp->blacklist = 0; d->ntopics++; /* allow all partitions in the domain */ LIST_INIT (d->partitions); if (!(pp = calloc (1, sizeof (MSPartition_t)))) fatal_printf ("Out of memory!\r\n"); LIST_ADD_TAIL (d->partitions, *pp); pp->name = NULL; /* name == '*' */ pp->mode = TA_ALL; pp->index = 0; pp->blacklist = 0; d->npartitions++; LIST_ADD_TAIL (domains, *d); d->handle = ++num_domains; domain_handles [d->handle] = d; return (d->handle); }
int Sched::addtoactive(Task &task) { Task *pos; uint8_t task_priority; task_priority = task.Task_GetSchedPriority(); DEBUG_PRINT("addtoactive:task_priority:%d\n",task_priority); task.Task_SetState(TSTATE_TASK_READYTORUN); if (LIST_EMPTY(task_active)) { // furtherm LIST_ADD(task_active, task); DEBUG_PRINT("LIST_EMPTY:LIST_ADD to task_active OK\n"); Sched_SetCurrentTask(task); DEBUG_PRINT("It's the first task.\n"); return OK; }else { if (LIST_LAST_ENTRY(task_active).Task_GetSchedPriority() > task_priority) { LIST_ADD_TAIL(task_active, task); }else { LIST_FOR_EACH_ENTRY(task_active, pos) { if (pos->Task_GetSchedPriority() <= task_priority) { LIST_ADD_BEFORE(task_active, task, (*pos)); } } if (!Sched_locked() && IS_LIST_FIRST_ENTRY(task_active, task)) { return OK; } } } return NO; }
ParticipantHandle_t DDS_SP_add_participant (void) { MSParticipant_t *p; MSUTopic_t *tp = NULL; MSUPartition_t *pp = NULL; msp_init (); /* log_printf (SEC_ID, 0, "MSP: Creating a default 'allow all' policy for participant\r\n"); */ /* insert a default, allow all policy */ p = calloc (1, sizeof (MSParticipant_t)); if (!p) fatal_printf ("out-of-memory for participant (user) policy!\r\n"); memcpy (p->name, "*", strlen ("*") + 1); p->access = DS_SECRET; p->blacklist = 0; LIST_INIT (p->topics); if (!(tp = calloc (1, sizeof (MSUTopic_t)))) fatal_printf ("Out of memory!\r\n"); LIST_ADD_TAIL (p->topics, *tp); tp->id = ~0; /* all domains */ tp->topic.name = NULL; /* name == '*' */ tp->topic.mode = TA_ALL; tp->topic.index = 0; tp->topic.blacklist = 0; p->ntopics++; LIST_INIT (p->partitions); if (!(pp = calloc (1, sizeof (MSUPartition_t)))) fatal_printf ("Out of memory!\r\n"); LIST_ADD_TAIL (p->partitions, *pp); pp->id = ~0; /* all domains */ pp->partition.name = NULL; /* name == '*' */ pp->partition.mode = TA_ALL; pp->partition.index = 0; pp->partition.blacklist = 0; p->npartitions++; LIST_ADD_TAIL (participants, *p); p->handle = ++num_ids; id_handles [p->handle] = p; return (p->handle); }
void DoAtaAddCallback (struct BlkReq *blkreq) { struct Ata *ata; ata = blkreq->unitp; LIST_ADD_TAIL (&ata->callback_list, blkreq->callback, callback_entry); blkreq->error = 0; blkreq->rc = 0; }
void activiate_hsr(hsr_t *hsr, void *data) { BUG_ON(hsr->priority >= HSR_PRIORITY_MAX_NR); ++hsr->count; hsr->data = data; if (!LIST_INLIST(&hsr->node)) { LIST_ADD_TAIL(hsr_array + hsr->priority, &hsr->node); hsr_bitmap |= (1 << hsr->priority); } }
void Sleep (struct Rendez *rendez) { struct Process *current; current = GetCurrentProcess(); DisablePreemption(); current->state = PROC_STATE_SLEEP; current->sleeping_on = rendez; LIST_ADD_TAIL (&rendez->process_list, current, rendez_link); SchedUnready (current); __KernelExit(); }
SYSCALL int PutCachedSegment (vm_addr addr, bits32_t flags, uint64 *out_segment_id) { struct Process *current; struct Segment *seg; uint64 segment_id; uint64 key; current = GetCurrentProcess(); seg = SegmentFind (addr); if (seg == NULL || MEM_TYPE(seg->flags) != MEM_ALLOC || seg->owner != current) return paramErr; segment_id = seg->segment_id; key = segment_id % CACHE_HASH_SZ; CopyOut (out_segment_id, &segment_id, sizeof (segment_id)); DisablePreemption(); LIST_ADD_HEAD (&cache_hash[key], seg, hash_link); if (flags & CACHE_AGE) { if (last_aged_seg != NULL) { LIST_INSERT_BEFORE (&cache_lru_list, last_aged_seg, seg, lru_link); } else { LIST_ADD_TAIL (&cache_lru_list, seg, lru_link); } last_aged_seg = seg; } else { LIST_ADD_HEAD (&cache_lru_list, seg, lru_link); } PmapRemoveRegion (seg); return 0; }
SYSCALL int AddInterruptHandler (int irq) { struct ISRHandler *isr_handler; struct Process *current; int handle; current = GetCurrentProcess(); if (!(current->flags & PROCF_ALLOW_IO)) return privilegeErr; if (free_handle_cnt < 1 || free_isr_handler_cnt < 1) return resourceErr; DisablePreemption(); handle = AllocHandle(); isr_handler = LIST_HEAD (&free_isr_handler_list); LIST_REM_HEAD (&free_isr_handler_list, isr_handler_entry); free_isr_handler_cnt --; isr_handler->irq = irq; isr_handler->handle = handle; SetObject (current, handle, HANDLE_TYPE_ISR, isr_handler); DisableInterrupts(); LIST_ADD_TAIL (&isr_handler_list[isr_handler->irq], isr_handler, isr_handler_entry); irq_handler_cnt[irq] ++; if (irq_handler_cnt[irq] == 1) UnmaskInterrupt(irq); EnableInterrupts(); return handle; }
struct CDNode *CDAllocNode (struct CDSB *cdsb, struct ISODirEntry *idir) { struct CDNode *node; if ((node = KMalloc (sizeof (struct CDNode))) != NULL) { node->extent_start = Iso733 (idir->extent); node->size = Iso733 (idir->size); node->flags = idir->flags[0]; node->cdsb = cdsb; node->reference_cnt = 1; /* CDSetTime (cdsb, node, idir, ST_ATIME | ST_MTIME | ST_CTIME); */ LIST_ADD_TAIL (&cdsb->node_list, node, node_entry); LIST_INIT (&node->filp_list); } return node; }
struct FatNode *AllocNode (struct FatSB *fsb, struct FatDirEntry *dirent, uint32 sector, uint32 offset) { struct FatNode *node; if ((node = KMalloc (sizeof (struct FatNode))) != NULL) { MemCpy (&node->dirent, dirent, sizeof (struct FatDirEntry)); node->fsb = fsb; node->reference_cnt = 1; node->dirent_sector = sector; node->dirent_offset = offset; node->hint_cluster = 0; node->hint_offset = 0; LIST_ADD_TAIL (&fsb->node_list, node, node_entry); LIST_INIT (&node->filp_list); } return node; }
void InitProcessTables (void) { uint32 t; struct Process *proc; struct Timer *timer; struct ISRHandler *isr_handler; struct Channel *channel; struct Handle *handle; struct Parcel *parcel; KPRINTF ("InitProcessTables()"); for (t=0;t<NIRQ;t++) { LIST_INIT (&isr_handler_list[t]); } for (t=0; t<32; t++) { CIRCLEQ_INIT (&realtime_queue[t]); } LIST_INIT (&stride_queue); LIST_INIT (&free_process_list) for (t=0; t<max_process; t++) { proc = (struct Process *)((uint8 *)process_table + t*PROCESS_SZ); LIST_ADD_TAIL (&free_process_list, proc, free_entry); proc->state = PROC_STATE_UNALLOC; } for (t=0; t<JIFFIES_PER_SECOND; t++) { LIST_INIT(&timing_wheel[t]); } softclock_seconds = hardclock_seconds = 0; softclock_jiffies = hardclock_jiffies = 0; LIST_INIT (&free_timer_list); for (t=0; t < max_timer; t++) { timer = &timer_table[t]; LIST_ADD_TAIL (&free_timer_list, timer, timer_entry); } LIST_INIT (&free_isr_handler_list); for (t=0; t < max_isr_handler; t++) { isr_handler = &isr_handler_table[t]; LIST_ADD_TAIL (&free_isr_handler_list, isr_handler, isr_handler_entry); } LIST_INIT (&free_channel_list); for (t=0; t < max_channel; t++) { channel = &channel_table[t]; LIST_ADD_TAIL (&free_channel_list, channel, link); } LIST_INIT (&free_handle_list); for (t = 0; t < max_handle; t++) { handle = &handle_table[t]; handle->type = HANDLE_TYPE_FREE; handle->pending = 0; handle->owner = NULL; handle->object = NULL; handle->flags = 0; LIST_ADD_TAIL (&free_handle_list, handle, link); } LIST_INIT (&free_parcel_list); for (t=0; t < max_parcel; t++) { parcel = &parcel_table[t]; LIST_ADD_TAIL (&free_parcel_list, parcel, link); } }
int CondTimedWait (volatile struct Cond *cond, struct Mutex *mutex, struct TimeVal *tv) { struct Process *proc; struct Timer timer; if (current_process == NULL) return 0; DisableInterrupts(); if (mutex->locked == TRUE) { /* Release Mutex */ proc = LIST_HEAD (&mutex->blocked_list); if (proc != NULL) { LIST_REM_HEAD (&mutex->blocked_list, blocked_entry); proc->state = PROC_STATE_READY; SchedReady (proc); } else { mutex->locked = FALSE; } } else { KPANIC ("CondTimedWait() on a free mutex"); } /* Block current process on Condvar */ LIST_ADD_TAIL (&cond->blocked_list, current_process, blocked_entry); SetTimer (&timer, TIMER_TYPE_RELATIVE, tv, &CondTimedWaitCallout, &cond); current_process->state = PROC_STATE_COND_BLOCKED; SchedUnready (current_process); Reschedule(); CancelTimer (&timer); /* Now acquire mutex */ if (mutex->locked == TRUE) { LIST_ADD_TAIL (&mutex->blocked_list, current_process, blocked_entry); current_process->state = PROC_STATE_MUTEX_BLOCKED; SchedUnready (current_process); Reschedule(); } else { mutex->locked = TRUE; } EnableInterrupts(); if (cond == NULL) return -1; else return 0; }
void reader_add_fragment (READER *rp, Change_t *cp, KeyHash_t *hp, DataFragSMsg *fragp) { Reader_t *r = (Reader_t *) rp->endpoint.endpoint; RemWriter_t *rwp, *fwp; CCREF *refp; FragInfo_t *fip; Change_t *ncp; unsigned max_frags; /*log_printf (RTPS_ID, 0, "reader_add_fragment: snr:%u, start:%u, num:%u, fsize:%u, tsize:%u\r\n", fragp->writer_sn.low, fragp->frag_start, fragp->num_fragments, fragp->frag_size, fragp->sample_size);*/ /* Check if we already got fragments from writer. */ fwp = NULL; LIST_FOREACH (rp->rem_writers, rwp) if ((refp = LIST_HEAD (rwp->rw_changes)) != NULL && refp->u.c.change->c_writer == cp->c_writer) { fwp = rwp; break; } /* If this is the first, add a new RemWriter for the fragment, allocate all relevant data, and start the fragment timer. */ if (!fwp) { max_frags = (fragp->sample_size + fragp->frag_size - 1) / fragp->frag_size; if (fragp->frag_start + fragp->num_fragments - 1 > max_frags) return; if ((rwp = mds_pool_alloc (&rtps_mem_blocks [MB_REM_WRITER])) == NULL) { warn_printf ("reader_add_fragment: no memory for fragment!"); return; } memset (rwp, 0, sizeof (RemWriter_t)); rwp->rw_reader = rp; LIST_INIT (rwp->rw_changes); rp->rem_writers.count++; fwp = rwp; LIST_ADD_TAIL (rp->rem_writers, *rwp); ncp = hc_change_new (); if (!ncp) { warn_printf ("reader_add_fragment: no memory for change!"); goto no_change_mem; } ncp->c_kind = cp->c_kind; ncp->c_writer = cp->c_writer; ncp->c_time = cp->c_time; ncp->c_seqnr = cp->c_seqnr; refp = ccref_add (&rwp->rw_changes, ncp, 0, 1, CS_RECEIVED); if (!refp) { warn_printf ("reader_add_fragment: no memory for list element!"); goto no_ref_mem; } fip = rfraginfo_create (refp, fragp, max_frags); if (!fip) { warn_printf ("reader_add_fragment: no memory for fragment info!"); goto no_frag_mem; } } else { refp = LIST_HEAD (rwp->rw_changes); fip = refp->fragments; ncp = refp->u.c.change; if (fip->fsize != fragp->frag_size || fip->length != fragp->sample_size || !SEQNR_EQ (ncp->c_seqnr, fragp->writer_sn)) { /* Incorrect fragment context: reset it. */ fip = rfraginfo_update (refp, fragp); if (!fip) goto cleanup; } else if (fragp->frag_start + fragp->num_fragments - 1 > fip->total) return; } /* Update key info if present. */ if (hp) { fip->hash = *hp; fip->hp = &fip->hash; fip->key = fip->hash.hash; fip->keylen = 12; } /* Mark the fragment as correctly received. */ mark_fragment (fip, fragp, cp); /* If all fragments received correctly, cleanup the context. */ if (!fip->num_na) { ncp->c_db = fip->data; ncp->c_length = fip->length; ncp->c_data = fip->data->data; rcl_access (fip->data); fip->data->nrefs++; rcl_done (fip->data); rfraginfo_delete (refp); reader_cache_add_key (rp, ncp, &fip->hash, fip->key, fip->keylen); cleanup: if (fip) { FRAGSC_TMR_STOP (rwp, &fip->timer); rfraginfo_delete (refp); } mds_pool_free (&rtps_mem_blocks [MB_CCREF], refp); remote_writer_remove (rp, rwp); mds_pool_free (&rtps_mem_blocks [MB_REM_WRITER], rwp); } else { FRAGSC_TMR_START (rwp, &fip->timer, TICKS_PER_SEC * 2, (uintptr_t) rwp, reader_frag_to, &r->r_lock); } return; no_frag_mem: mds_pool_free (&rtps_mem_blocks [MB_CCREF], refp); no_ref_mem: hc_change_free (ncp); no_change_mem: mds_pool_free (&rtps_mem_blocks [MB_REM_WRITER], rwp); return; }
int AllocDupAddressSpace (struct AddressSpace *src_as, struct AddressSpace *dst_as) { struct MemRegion *src_mr, *dst_mr; int error = 0; MutexLock (&vm_mutex); if (PmapInit (dst_as) == TRUE) { LIST_INIT (&dst_as->sorted_memregion_list); LIST_INIT (&dst_as->free_memregion_list); dst_as->hint = NULL; dst_as->page_cnt = src_as->page_cnt; src_mr = LIST_HEAD (¤t_process->user_as->sorted_memregion_list); while (src_mr != NULL && error == 0) { if ((dst_mr = LIST_HEAD (&unused_memregion_list)) != NULL) { LIST_REM_HEAD (&unused_memregion_list, unused_entry); LIST_ADD_TAIL (&dst_as->sorted_memregion_list, dst_mr, sorted_entry); free_memregion_cnt--; dst_mr->base_addr = src_mr->base_addr; dst_mr->ceiling_addr = src_mr->ceiling_addr; dst_mr->as = dst_as; dst_mr->type = src_mr->type; dst_mr->prot = src_mr->prot; dst_mr->flags = src_mr->flags; dst_mr->pageframe_hint = NULL; LIST_INIT (&dst_mr->pageframe_list); if (dst_mr->type == MR_TYPE_ANON) { if (AllocDupPageframes (dst_mr, src_mr) != 0) { error = -1; } } else if (dst_mr->type == MR_TYPE_FREE) { LIST_ADD_TAIL (&dst_as->free_memregion_list, dst_mr, free_entry); } } else { error = -1; } src_mr = LIST_NEXT (src_mr, sorted_entry); } } else { error = -1; } MutexUnlock (&vm_mutex); KASSERT (error != -1); return error; }
static void _ies_task_dispatch_message(ilm_struct *pIlm) { /*----------------------------------------------------------------*/ /* Local Variables */ /*----------------------------------------------------------------*/ srv_ies_ilm_job_request *pReq; srv_ies_job *pJob; srv_ies_job *pLow; /*----------------------------------------------------------------*/ /* Code Body */ /*----------------------------------------------------------------*/ switch(pIlm->msg_id) { case MSG_ID_IES_JOB_REQ: pReq = (srv_ies_ilm_job_request*)pIlm->local_para_ptr; ASSERT(NULL != pReq); pJob = pReq->pJob; ASSERT(NULL != pJob); ASSERT(KAL_FALSE == g_srv_ies_job_deinit); if(!_srv_ies_job_compare_seq_num(pReq->seqNum, pJob->seqNum)) { kal_trace(MOD_IES, TRACE_GROUP_2, "_ies_task_dispatch_message got inconsistent sequence number %d, %d", pReq->seqNum, pReq->pJob->seqNum); break; } if (SRV_IES_JOB_REQUEST_CANCEL == pReq->type) { if (!LIST_EMPTY(((srv_ies_list_head_struct*)pJob))) { LIST_DEL(((srv_ies_list_head_struct*)pJob)); } _ies_task_job_handle_cancel(pJob); if (pJob == g_ies_task_context.pJob) { g_ies_task_context.pJob = NULL; } } else { ASSERT(SRV_IES_JOB_REQUEST_START == pReq->type); ASSERT(SRV_IES_JOB_STATE_FINISHED != pJob->state); if (g_ies_task_context.pJob) { if (g_ies_task_context.pJob->jobType > pJob->jobType) { pLow = g_ies_task_context.pJob; g_ies_task_context.pJob = pJob; ASSERT(SRV_IES_JOB_STATE_FINISHED != pLow->state); switch(pLow->jobType) { case SRV_IES_JOB_TYPE_RENDER_PREVIEW: case SRV_IES_JOB_TYPE_RENDER_BUFFER: case SRV_IES_JOB_TYPE_RENDER_FILE: LIST_ADD_HEAD(&(g_ies_task_context.normal), (srv_ies_list_head_struct*)pLow); break; case SRV_IES_JOB_TYPE_CREATE_META: LIST_ADD_HEAD(&(g_ies_task_context.lowest), (srv_ies_list_head_struct*)pLow); break; default: ASSERT(0); } } else { pLow = pJob; switch(pLow->jobType) { case SRV_IES_JOB_TYPE_RENDER_PREVIEW: case SRV_IES_JOB_TYPE_RENDER_BUFFER: case SRV_IES_JOB_TYPE_RENDER_FILE: LIST_ADD_TAIL(&(g_ies_task_context.normal), (srv_ies_list_head_struct*)pLow); break; case SRV_IES_JOB_TYPE_CREATE_META: LIST_ADD_TAIL(&(g_ies_task_context.lowest), (srv_ies_list_head_struct*)pLow); break; default: ASSERT(0); } } kal_take_mutex(g_srv_ies_job_mutex); if (SRV_IES_JOB_STATE_CANCELLED != pLow->state) { pLow->state = SRV_IES_JOB_STATE_PENDING; } kal_give_mutex(g_srv_ies_job_mutex); } else { ASSERT(SRV_IES_JOB_STATE_FINISHED != pJob->state); g_ies_task_context.pJob = pJob; } if (_ies_task_job_handle_start(g_ies_task_context.pJob)) { g_ies_task_context.pJob = NULL; } } break; default: break; } }
void root_beginio (void *ioreq) { struct FSReq *fsreq = ioreq; struct RootFilp *filp; struct Mount *mount; int len; KPRINTF ("root_beginio()"); fsreq->flags |= IOF_QUICK; switch (fsreq->cmd) { case FS_CMD_OPEN: fsreq->error = ENOSYS; fsreq->rc = -1; break; case FS_CMD_PIPE: fsreq->error = ENOSYS; fsreq->rc = -1; break; case FS_CMD_CLOSE: fsreq->error = ENOSYS; fsreq->rc = -1; break; case FS_CMD_DUP: /* Might want to dup() the handle? */ fsreq->error = ENOSYS; fsreq->rc = -1; break; case FS_CMD_READ: fsreq->error = ENOSYS; fsreq->nbytes_transferred = -1; fsreq->rc = -1; break; case FS_CMD_WRITE: fsreq->error = ENOSYS; fsreq->nbytes_transferred = -1; fsreq->rc = -1; break; case FS_CMD_LSEEK: fsreq->error = ENOSYS; fsreq->position = -1; break; case FS_CMD_UNLINK: fsreq->error = ENOSYS; fsreq->rc = -1; break; case FS_CMD_RENAME: fsreq->error = ENOSYS; fsreq->rc = -1; break; case FS_CMD_FTRUNCATE: fsreq->error = ENOSYS; fsreq->rc = -1; break; case FS_CMD_FSTAT: fsreq->stat->st_mode = S_IFDIR | (S_IRUSR | S_IRGRP | S_IROTH); fsreq->stat->st_nlink = 1; /* ??? 0 ??? */ fsreq->stat->st_uid = 1; fsreq->stat->st_gid = 1; fsreq->stat->st_rdev = 5; fsreq->stat->st_size = 0; fsreq->stat->st_atime = 0; fsreq->stat->st_mtime = 0; fsreq->stat->st_ctime = 0; fsreq->stat->st_blocks = 0; fsreq->error = 0; fsreq->rc = 0; break; case FS_CMD_STAT: fsreq->stat->st_mode = S_IFDIR | (S_IRUSR | S_IRGRP | S_IROTH); fsreq->stat->st_nlink = 1; /* ??? 0 ??? */ fsreq->stat->st_uid = 1; fsreq->stat->st_gid = 1; fsreq->stat->st_rdev = 5; fsreq->stat->st_size = 0; fsreq->stat->st_atime = 0; fsreq->stat->st_mtime = 0; fsreq->stat->st_ctime = 0; fsreq->stat->st_blocks = 0; fsreq->error = 0; fsreq->rc = 0; break; case FS_CMD_FSTATFS: fsreq->error = ENOSYS; fsreq->rc = -1; break; case FS_CMD_FSYNC: fsreq->error = ENOSYS; fsreq->rc = -1; break; case FS_CMD_SYNC: fsreq->error = ENOSYS; fsreq->rc = -1; break; case FS_CMD_MKDIR: fsreq->error = ENOSYS; fsreq->rc = -1; break; case FS_CMD_RMDIR: fsreq->error = ENOSYS; fsreq->rc = -1; break; case FS_CMD_OPENDIR: { if ((filp = KMalloc (sizeof (struct RootFilp))) != NULL) { filp->device = &root_handler; filp->seek_mount = LIST_HEAD (&mount_list); LIST_ADD_TAIL (&root_filp_list, filp, filp_entry); fsreq->filp = filp; fsreq->device = &root_handler; fsreq->error = 0; fsreq->rc = 0; } else { fsreq->error = ENOSYS; fsreq->rc = -1; } break; } case FS_CMD_CLOSEDIR: { filp = fsreq->filp; LIST_REM_ENTRY (&root_filp_list, filp, filp_entry); KFree (filp); fsreq->error = 0; fsreq->rc = 0; break; } case FS_CMD_READDIR: filp = fsreq->filp; mount = filp->seek_mount; if (mount != NULL) { len = StrLen (mount->name); if (len + 1 <= NAME_MAX) { CopyOut (fsreq->as, &fsreq->dirent->d_name, mount->name, len + 1); filp->seek_mount = LIST_NEXT (mount, mount_list_entry); fsreq->error = 0; fsreq->rc = 0; } else { fsreq->dirent = NULL; fsreq->error = ENAMETOOLONG; fsreq->rc = -1; } } else { fsreq->dirent = NULL; fsreq->error = 0; fsreq->rc = -1; } break; case FS_CMD_REWINDDIR: filp = fsreq->filp; filp->seek_mount = LIST_HEAD (&mount_list); fsreq->error = 0; fsreq->rc = 0; break; case FS_CMD_ISATTY: fsreq->error = 0; fsreq->rc = 0; break; case FS_CMD_TCGETATTR: fsreq->error = ENOTTY; fsreq->rc = -1; break; case FS_CMD_TCSETATTR: fsreq->error = ENOTTY; fsreq->rc = -1; break; case FS_CMD_IOCTL: fsreq->error = ENOSYS; fsreq->rc = -1; break; default: { KPANIC ("ROOT Unknown command"); fsreq->error = ENOSYS; fsreq->rc = -1; break; } } }
Endpoint *ep_create (DDS_DomainParticipant part, DDS_BuiltinTopicKey_t *key, const char *topic_name, const char *type_name, DDS_PublicationBuiltinTopicData *data, int writer) { Topic *tp; Endpoint *ep; printf ("* New %s (%s/%s)\r\n", (writer) ? "writer" : "reader", topic_name, type_name); ep = ep_lookup (key); if (ep) { if (writer) { ep->data = *data; tp = ep->topic; ep->data.topic_name = tp->topic_name; ep->data.type_name = tp->type_name; } return (ep); } ep = malloc (sizeof (Endpoint)); if (!ep) { fprintf (stderr, "Not enough memory to create endpoint!\r\n"); return (NULL); } ep->topic = tp = t_lookup (topic_name); if (!tp) { ep->link = NULL; tp = malloc (sizeof (Topic)); if (!tp) { fprintf (stderr, "Not enough memory to create topic!\r\n"); free (ep); return (NULL); } if (writer) { tp->writers = ep; tp->readers = NULL; } else { tp->readers = ep; tp->writers = NULL; } tp->topic_name = strdup (topic_name); tp->type_name = strdup (type_name); tp->active = 0; tp->topic = NULL; tp->dtype = NULL; tp->sub = NULL; tp->ts = NULL; tp->ndata = tp->ndispose = tp->nnowriter = 0; ep->topic = tp; LIST_ADD_TAIL (topics, *tp); } else if (writer) { ep->link = tp->writers; tp->writers = ep; } else { ep->link = tp->readers; tp->readers = ep; } ep->writer = writer; ep->key = *key; if (ep->writer) { ep->data = *data; ep->data.topic_name = tp->topic_name; ep->data.type_name = tp->type_name; } LIST_ADD_TAIL (endpoints, *ep); /* Endpoint successfully added -- check if we need a reader. */ if (writer && !ep->link && auto_filter [0] && !nmatch (auto_filter, tp->topic_name, NM_CASEFOLD)) start_reader (part, tp); return (ep); }