int inet_pton(int af, FAR const char *src, FAR void *dst) { #ifndef CONFIG_NET_IPv6 size_t srcoffset; size_t numoffset; int value; int ndots; uint8_t ch; char numstr[4]; uint8_t *ip; DEBUGASSERT(src && dst); if (af != AF_INET) { set_errno(EAFNOSUPPORT); return -1; } (void)memset(dst, 0, sizeof(struct in_addr)); ip = (uint8_t *)dst; srcoffset = 0; numoffset = 0; ndots = 0; for(;;) { ch = (uint8_t)src[srcoffset++]; if (ch == '.' || ch == '\0') { if (ch == '.' && ndots >= 4) { /* Too many dots */ break; } if (numoffset <= 0) { /* Empty numeric string */ break; } numstr[numoffset] = '\0'; numoffset = 0; value = atoi(numstr); if (value < 0 || value > 255) { /* Out of range value */ break; } ip[ndots] = (uint8_t)value; if (ch == '\0') { if (ndots != 3) { /* Not enough dots */ break; } /* Return 1 if the conversion succeeds */ return 1; } ndots++; } else if (ch >= '0' && ch <= '9') { numstr[numoffset++] = ch; if (numoffset >= 4) { /* Number is too long */ break; } } else { /* Illegal character */ break; } } /* Return zero if there is any problem parsing the input */ return 0; #else size_t srcoffset; size_t numoffset; long value; int nsep; int nrsep; uint8_t ch; char numstr[5]; uint8_t ip[sizeof(struct in6_addr)]; uint8_t rip[sizeof(struct in6_addr)]; bool rtime; DEBUGASSERT(src && dst); if (af != AF_INET6) { set_errno(EAFNOSUPPORT); return -1; } (void)memset(dst, 0, sizeof(struct in6_addr)); srcoffset = 0; numoffset = 0; nsep = 0; nrsep = 0; rtime = false; for(;;) { ch = (uint8_t)src[srcoffset++]; if (ch == ':' || ch == '\0') { if (ch == ':' && (nsep + nrsep) >= 8) { /* Too many separators */ break; } if (ch != '\0' && numoffset <= 0) { /* Empty numeric string */ if (rtime && nrsep > 1) { /* dup simple */ break; } numoffset = 0; rtime = true; continue; } numstr[numoffset] = '\0'; numoffset = 0; value = strtol(numstr, NULL, 16); if (value < 0 || value > 0xffff) { /* Out of range value */ break; } if (!rtime) { ip[(nsep << 1) + 0] = (uint8_t)((value >> 8) & 0xff); ip[(nsep << 1) + 1] = (uint8_t)((value >> 0) & 0xff); nsep++; } else { rip[(nrsep << 1) + 0] = (uint8_t)((value >> 8) & 0xff); rip[(nrsep << 1) + 1] = (uint8_t)((value >> 0) & 0xff); nrsep++; } if (ch == '\0' /* || ch == '/' */) { if ((nsep <= 1 && nrsep <= 0) || (nsep + nrsep) < 1 || (nsep + nrsep) > 8) { /* Separator count problem */ break; } if (nsep > 0) { memcpy(dst, &ip[0], nsep << 1); } if (nrsep > 0) { memcpy(dst + (16 - (nrsep << 1)), &rip[0], nrsep << 1); } /* Return 1 if the conversion succeeds */ return 1; } }
TEST_END TEST_BEGIN(test_malloc_strtoumax) { struct test_s { const char *input; const char *expected_remainder; int base; int expected_errno; const char *expected_errno_name; uintmax_t expected_x; }; #define ERR(e) e, #e #define UMAX(x) ((uintmax_t)x##ULL) struct test_s tests[] = { {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX}, {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX}, {"0", "0", 37, ERR(EINVAL), UINTMAX_MAX}, {"", "", 0, ERR(EINVAL), UINTMAX_MAX}, {"+", "+", 0, ERR(EINVAL), UINTMAX_MAX}, {"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX}, {"-", "-", 0, ERR(EINVAL), UINTMAX_MAX}, {"42", "", 0, ERR(0), UMAX(42)}, {"+42", "", 0, ERR(0), UMAX(42)}, {"-42", "", 0, ERR(0), UMAX(-42)}, {"042", "", 0, ERR(0), UMAX(042)}, {"+042", "", 0, ERR(0), UMAX(042)}, {"-042", "", 0, ERR(0), UMAX(-042)}, {"0x42", "", 0, ERR(0), UMAX(0x42)}, {"+0x42", "", 0, ERR(0), UMAX(0x42)}, {"-0x42", "", 0, ERR(0), UMAX(-0x42)}, {"0", "", 0, ERR(0), UMAX(0)}, {"1", "", 0, ERR(0), UMAX(1)}, {"42", "", 0, ERR(0), UMAX(42)}, {" 42", "", 0, ERR(0), UMAX(42)}, {"42 ", " ", 0, ERR(0), UMAX(42)}, {"0x", "x", 0, ERR(0), UMAX(0)}, {"42x", "x", 0, ERR(0), UMAX(42)}, {"07", "", 0, ERR(0), UMAX(7)}, {"010", "", 0, ERR(0), UMAX(8)}, {"08", "8", 0, ERR(0), UMAX(0)}, {"0_", "_", 0, ERR(0), UMAX(0)}, {"0x", "x", 0, ERR(0), UMAX(0)}, {"0X", "X", 0, ERR(0), UMAX(0)}, {"0xg", "xg", 0, ERR(0), UMAX(0)}, {"0XA", "", 0, ERR(0), UMAX(10)}, {"010", "", 10, ERR(0), UMAX(10)}, {"0x3", "x3", 10, ERR(0), UMAX(0)}, {"12", "2", 2, ERR(0), UMAX(1)}, {"78", "8", 8, ERR(0), UMAX(7)}, {"9a", "a", 10, ERR(0), UMAX(9)}, {"9A", "A", 10, ERR(0), UMAX(9)}, {"fg", "g", 16, ERR(0), UMAX(15)}, {"FG", "G", 16, ERR(0), UMAX(15)}, {"0xfg", "g", 16, ERR(0), UMAX(15)}, {"0XFG", "G", 16, ERR(0), UMAX(15)}, {"z_", "_", 36, ERR(0), UMAX(35)}, {"Z_", "_", 36, ERR(0), UMAX(35)} }; #undef ERR #undef UMAX unsigned i; for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) { struct test_s *test = &tests[i]; int err; uintmax_t result; char *remainder; set_errno(0); result = malloc_strtoumax(test->input, &remainder, test->base); err = get_errno(); assert_d_eq(err, test->expected_errno, "Expected errno %s for \"%s\", base %d", test->expected_errno_name, test->input, test->base); assert_str_eq(remainder, test->expected_remainder, "Unexpected remainder for \"%s\", base %d", test->input, test->base); if (err == 0) { assert_ju_eq(result, test->expected_x, "Unexpected result for \"%s\", base %d", test->input, test->base); } } }
int nxtk_drawlinewindow(NXTKWINDOW hfwnd, FAR struct nxgl_vector_s *vector, nxgl_coord_t width, nxgl_mxpixel_t color[CONFIG_NX_NPLANES], uint8_t caps) { struct nxgl_trapezoid_s trap[3]; struct nxgl_rect_s rect; int ret; #ifdef CONFIG_DEBUG if (!hfwnd || !vector || width < 1 || !color) { set_errno(EINVAL); return ERROR; } #endif /* Split the line into trapezoids */ ret = nxgl_splitline(vector, trap, &rect, width); switch (ret) { /* 0: Line successfully broken up into three trapezoids. Values in * traps[0], traps[1], and traps[2] are valid. */ case 0: ret = nxtk_filltrapwindow(hfwnd, &trap[0], color); if (ret == OK) { ret = nxtk_filltrapwindow(hfwnd, &trap[1], color); if (ret == OK) { ret = nxtk_filltrapwindow(hfwnd, &trap[2], color); } } break; /* 1: Line successfully represented by one trapezoid. Value in traps[1] * is valid. */ case 1: ret = nxtk_filltrapwindow(hfwnd, &trap[1], color); break; /* 2: Line successfully represented by one rectangle. Value in rect is * valid */ case 2: ret = nxtk_fillwindow(hfwnd, &rect, color); break; /* <0: On errors, a negated errno value is returned. */ default: set_errno(EINVAL); return ERROR; } /* Draw circular caps at each end of the line to support better line joins */ if (caps != NX_LINECAP_NONE && width >= 3) { nxgl_coord_t radius = width >> 1; /* Draw a circle at pt1 */ ret = OK; if ((caps & NX_LINECAP_PT1) != 0) { ret = nxtk_fillcirclewindow(hfwnd, &vector->pt1, radius, color); } /* Draw a circle at pt2 */ if (ret == OK && (caps & NX_LINECAP_PT2) != 0) { ret = nxtk_fillcirclewindow(hfwnd, &vector->pt2, radius, color); } }
RTP_BOOL SH7264_SMSC_open(PIFACE pi) { DWORD mac[2], dwCnt, dwReg, dw; PSH7264_SMSC_SOFTC sc = iface_to_softc(pi); if (!sc) { RTP_DEBUG_ERROR("SH7264_SMSC_open: softc invalid!\r\n", NOVAR, 0, 0); set_errno(ENUMDEVICE); return(RTP_FALSE); } // Set Interface sc->iface = pi; iface = pi; //Setting port PJCR0 |= 0x3300; if (WORD_SWAP != 0xFFFFFFFF) //Need at least one read from LAN chip before writing to WORD_SWAP register { WORD_SWAP = 0xFFFFFFFF; } ETHERNET_DELAY(10000); // soft reset HW_CFG = HWCFG_SRST; dwCnt = 100000; do { ETHERNET_DELAY(10); dwReg = HW_CFG; dwCnt--; }while ((dwCnt > 0) && (dwReg & HWCFG_SRST)); if (dwReg & HWCFG_SRST) { debug_printf("SH7264_SMSC_open: Error in Soft reset.\r\n"); return; } ETHERNET_DELAY(10000); //Read Mac address from EEPROM mac[0] = BIT_EDIANDW(SMSC9218_Read_Mac_Reg(ADDRL)); mac[1] = BIT_EDIANDW(SMSC9218_Read_Mac_Reg(ADDRH)); int macLen = __min(g_NetworkConfig.NetworkInterfaces[NETWORK_INTERFACE_INDEX_SH7264RSK].macAddressLen, sizeof(sc->mac_address)); dw = (DWORD)&g_NetworkConfig.NetworkInterfaces[NETWORK_INTERFACE_INDEX_SH7264RSK].macAddressBuffer[0]; if (((dw >= 0x00000000) && (dw <= 0x03000000)) || ((dw >= 0x20000000) && (dw <= 0x23000000))) { g_AM29DL_16_BS_DeviceTable.InitializeDevice(pBLOCK_CONFIG); g_AM29DL_16_BS_DeviceTable.Write(pBLOCK_CONFIG,(DWORD)&g_NetworkConfig.NetworkInterfaces[NETWORK_INTERFACE_INDEX_SH7264RSK].macAddressBuffer[0],6,(BYTE *)mac,TRUE); } else { memcpy((void *)&g_NetworkConfig.NetworkInterfaces[NETWORK_INTERFACE_INDEX_SH7264RSK].macAddressBuffer[0], (void *)&mac[0], 6); } if(macLen > 0) { memcpy(&sc->mac_address[0], &g_NetworkConfig.NetworkInterfaces[NETWORK_INTERFACE_INDEX_SH7264RSK].macAddressBuffer[0], macLen); } else { RTP_DEBUG_ERROR("Device initialize without MAC address!!!\r\n", NOVAR, 0, 0); } // Now put in a dummy ethernet address rtp_memcpy(&pi->addr.my_hw_addr[0], sc->mac_address, 6); // Get the ethernet address // clear statistic information sc->stats.packets_in = 0L; sc->stats.packets_out = 0L; sc->stats.bytes_in = 0L; sc->stats.bytes_out = 0L; sc->stats.errors_in = 0L; sc->stats.errors_out = 0L; if(RTP_FALSE == SH7264_SMSC_SetupDevice()) { return RTP_FALSE; } rtp_irq_hook_interrupt( (RTP_PFVOID) pi, (RTP_IRQ_FN_POINTER)SH7264_SMSC_recv, (RTP_IRQ_FN_POINTER) 0); return(RTP_TRUE); }
int klpd_unreg(int did, idtype_t type, id_t id) { door_handle_t dh; int res = 0; proc_t *p; pid_t pid; projid_t proj; kproject_t *kpp = NULL; credklpd_t *ckp; switch (type) { case P_PID: pid = (pid_t)id; break; case P_PROJID: proj = (projid_t)id; kpp = project_hold_by_id(proj, crgetzone(CRED()), PROJECT_HOLD_FIND); if (kpp == NULL) return (set_errno(ESRCH)); break; default: return (set_errno(ENOTSUP)); } dh = door_ki_lookup(did); if (dh == NULL) { if (kpp != NULL) project_rele(kpp); return (set_errno(EINVAL)); } if (kpp != NULL) { mutex_enter(&klpd_mutex); if (kpp->kpj_klpd == NULL) res = ESRCH; else klpd_freelist(&kpp->kpj_klpd); mutex_exit(&klpd_mutex); project_rele(kpp); goto out; } else if ((int)pid > 0) { mutex_enter(&pidlock); p = prfind(pid); if (p == NULL) { mutex_exit(&pidlock); door_ki_rele(dh); return (set_errno(ESRCH)); } mutex_enter(&p->p_crlock); mutex_exit(&pidlock); } else if (pid == 0) { p = curproc; mutex_enter(&p->p_crlock); } else { res = klpd_unreg_dh(dh); goto out; } ckp = crgetcrklpd(p->p_cred); if (ckp != NULL) { crklpd_setreg(ckp, NULL); } else { res = ESRCH; } mutex_exit(&p->p_crlock); out: door_ki_rele(dh); if (res != 0) return (set_errno(res)); return (0); }
ssize_t sendfile(int outfd, int infd, off_t *offset, size_t count) { FAR uint8_t *iobuffer; FAR uint8_t *wrbuffer; off_t startpos = 0; ssize_t nbytesread; ssize_t nbyteswritten; ssize_t ntransferred; bool endxfr; /* Get the current file position. */ if (offset) { /* Use lseek to get the current file position */ startpos = lseek(infd, 0, SEEK_CUR); if (startpos == (off_t)-1) { return ERROR; } /* Use lseek again to set the new file position */ if (lseek(infd, *offset, SEEK_SET) == (off_t)-1) { return ERROR; } } /* Allocate an I/O buffer */ iobuffer = (FAR void *)lib_malloc(CONFIG_LIB_SENDFILE_BUFSIZE); if (!iobuffer) { set_errno(ENOMEM); return ERROR; } /* Now transfer 'count' bytes from the infd to the outfd */ for (ntransferred = 0, endxfr = false; ntransferred < count && !endxfr;) { /* Loop until the read side of the transfer comes to some conclusion */ do { /* Read a buffer of data from the infd */ nbytesread = read(infd, iobuffer, CONFIG_LIB_SENDFILE_BUFSIZE); /* Check for end of file */ if (nbytesread == 0) { /* End of file. Break out and return current number of bytes * transferred. */ endxfr = true; break; } /* Check for a read ERROR. EINTR is a special case. This function * should break out and return an error if EINTR is returned and * no data has been transferred. But what should it do if some * data has been transferred? I suppose just continue? */ else if (nbytesread < 0) { /* EINTR is not an error (but will still stop the copy) */ #ifndef CONFIG_DISABLE_SIGNALS if (errno != EINTR || ntransferred == 0) #endif { /* Read error. Break out and return the error condition. */ ntransferred = ERROR; endxfr = true; break; } } } while (nbytesread < 0); /* Was anything read? */ if (!endxfr) { /* Yes.. Loop until the read side of the transfer comes to some * conclusion. */ wrbuffer = iobuffer; do { /* Write the buffer of data to the outfd */ nbyteswritten = write(outfd, wrbuffer, nbytesread); /* Check for a complete (or parial) write. write() should not * return zero. */ if (nbyteswritten >= 0) { /* Advance the buffer pointer and decrement the number of bytes * remaining in the iobuffer. Typically, nbytesread will now * be zero. */ wrbuffer += nbyteswritten; nbytesread -= nbyteswritten; /* Increment the total number of bytes successfully transferred. */ ntransferred += nbyteswritten; } /* Otherwise an error occurred */ else { /* Check for a read ERROR. EINTR is a special case. This * function should break out and return an error if EINTR * is returned and no data has been transferred. But what * should it do if some data has been transferred? I * suppose just continue? */ #ifndef CONFIG_DISABLE_SIGNALS if (errno != EINTR || ntransferred == 0) #endif { /* Write error. Break out and return the error condition */ ntransferred = ERROR; endxfr = true; break; } } } while (nbytesread > 0); } } /* Release the I/O buffer */ lib_free(iobuffer); /* Return the current file position */ if (offset) { /* Use lseek to get the current file position */ off_t curpos = lseek(infd, 0, SEEK_CUR); if (curpos == (off_t)-1) { return ERROR; } /* Return the current file position */ *offset = curpos; /* Use lseek again to restore the original file position */ if (lseek(infd, startpos, SEEK_SET) == (off_t)-1) { return ERROR; } } /* Finally return the number of bytes actually transferred (or ERROR * if any failure occurred). */ return ntransferred; }
int shmctl(int shmid, int cmd, struct shmid_ds *buf) { FAR struct shm_region_s *region; int ret; DEBUGASSERT(shmid >= 0 && shmid < CONFIG_ARCH_SHM_MAXREGIONS); region = &g_shminfo.si_region[shmid]; DEBUGASSERT((region->sr_flags & SRFLAG_INUSE) != 0); /* Get exclusive access to the region data structure */ ret = sem_wait(®ion->sr_sem); if (ret < 0) { shmerr("ERROR: sem_wait failed: %d\n", ret); return ret; } /* Handle the request according to the received cmd */ switch (cmd) { case IPC_STAT: { /* Place the current value of each member of the shmid_ds data * structure associated with shmid into the structure pointed to * by buf. */ DEBUGASSERT(buf); memcpy(buf, ®ion->sr_ds, sizeof(struct shmid_ds)); } break; case IPC_SET: { /* Set the value of the shm_perm.mode member of the shmid_ds * data structure associated with shmid to the corresponding * value found in the structure pointed to by buf. */ region->sr_ds.shm_perm.mode = buf->shm_perm.mode; } break; case IPC_RMID: { /* Are any processes attached to the region? */ if (region->sr_ds.shm_nattch > 0) { /* Yes.. just set the UNLINKED flag. The region will be removed when there are no longer any processes attached to it. */ region->sr_flags |= SRFLAG_UNLINKED; } else { /* No.. free the entry now */ shm_destroy(shmid); /* Don't try anything further on the deleted region */ return OK; } } break; default: shmerr("ERROR: Unrecognized command: %d\n", cmd); ret = -EINVAL; goto errout_with_semaphore; } /* Save the process ID of the the last operation */ region = &g_shminfo.si_region[shmid]; region->sr_ds.shm_lpid = getpid(); /* Save the time of the last shmctl() */ region->sr_ds.shm_ctime = time(NULL); /* Release our lock on the entry */ sem_post(®ion->sr_sem); return ret; errout_with_semaphore: sem_post(®ion->sr_sem); set_errno(-ret); return ERROR; }
static inline int file_vfcntl(int fildes, int cmd, va_list ap) { FAR struct filelist *list; FAR struct file *this_file; int err = 0; int ret = OK; /* Get the thread-specific file list */ list = sched_getfiles(); if (!list) { err = EMFILE; goto errout; } /* Was this file opened ? */ this_file = &list->fl_files[fildes]; if (!this_file->f_inode) { err = EBADF; goto errout; } switch (cmd) { case F_DUPFD: /* Return a new file descriptor which shall be the lowest numbered * available (that is, not already open) file descriptor greater than * or equal to the third argument, arg, taken as an integer of type * int. The new file descriptor shall refer to the same open file * description as the original file descriptor, and shall share any * locks. The FD_CLOEXEC flag associated with the new file descriptor * shall be cleared to keep the file open across calls to one of the * exec functions. */ { ret = file_dup(fildes, va_arg(ap, int)); } break; case F_GETFD: /* Get the file descriptor flags defined in <fcntl.h> that are associated * with the file descriptor fildes. File descriptor flags are associated * with a single file descriptor and do not affect other file descriptors * that refer to the same file. */ case F_SETFD: /* Set the file descriptor flags defined in <fcntl.h>, that are associated * with fildes, to the third argument, arg, taken as type int. If the * FD_CLOEXEC flag in the third argument is 0, the file shall remain open * across the exec functions; otherwise, the file shall be closed upon * successful execution of one of the exec functions. */ err = ENOSYS; break; case F_GETFL: /* Get the file status flags and file access modes, defined in <fcntl.h>, * for the file description associated with fildes. The file access modes * can be extracted from the return value using the mask O_ACCMODE, which is * defined in <fcntl.h>. File status flags and file access modes are associated * with the file description and do not affect other file descriptors that * refer to the same file with different open file descriptions. */ { ret = this_file->f_oflags; } break; case F_SETFL: /* Set the file status flags, defined in <fcntl.h>, for the file description * associated with fildes from the corresponding bits in the third argument, * arg, taken as type int. Bits corresponding to the file access mode and * the file creation flags, as defined in <fcntl.h>, that are set in arg shall * be ignored. If any bits in arg other than those mentioned here are changed * by the application, the result is unspecified. */ { int oflags = va_arg(ap, int); oflags &= FFCNTL; this_file->f_oflags &= ~FFCNTL; this_file->f_oflags |= oflags; } break; case F_GETOWN: /* If fildes refers to a socket, get the process or process group ID specified * to receive SIGURG signals when out-of-band data is available. Positive values * indicate a process ID; negative values, other than -1, indicate a process group * ID. If fildes does not refer to a socket, the results are unspecified. */ case F_SETOWN: /* If fildes refers to a socket, set the process or process group ID specified * to receive SIGURG signals when out-of-band data is available, using the value * of the third argument, arg, taken as type int. Positive values indicate a * process ID; negative values, other than -1, indicate a process group ID. If * fildes does not refer to a socket, the results are unspecified. */ err = EBADF; /* Only valid on socket descriptors */ break; case F_GETLK: /* Get the first lock which blocks the lock description pointed to by the third * argument, arg, taken as a pointer to type struct flock, defined in <fcntl.h>. * The information retrieved shall overwrite the information passed to fcntl() in * the structure flock. If no lock is found that would prevent this lock from being * created, then the structure shall be left unchanged except for the lock type * which shall be set to F_UNLCK. */ case F_SETLK: /* Set or clear a file segment lock according to the lock description pointed to * by the third argument, arg, taken as a pointer to type struct flock, defined in * <fcntl.h>. F_SETLK can establish shared (or read) locks (F_RDLCK) or exclusive * (or write) locks (F_WRLCK), as well as to remove either type of lock (F_UNLCK). * F_RDLCK, F_WRLCK, and F_UNLCK are defined in <fcntl.h>. If a shared or exclusive * lock cannot be set, fcntl() shall return immediately with a return value of -1. */ case F_SETLKW: /* This command shall be equivalent to F_SETLK except that if a shared or exclusive * lock is blocked by other locks, the thread shall wait until the request can be * satisfied. If a signal that is to be caught is received while fcntl() is waiting * for a region, fcntl() shall be interrupted. Upon return from the signal handler, * fcntl() shall return -1 with errno set to [EINTR], and the lock operation shall * not be done. */ err = ENOSYS; /* Not implemented */ break; default: err = EINVAL; break; } errout: if (err != 0) { set_errno(err); return ERROR; } return ret; }
int task_restart(pid_t pid) { FAR struct tcb_s *rtcb; FAR struct task_tcb_s *tcb; FAR dq_queue_t *tasklist; irqstate_t flags; int errcode; #ifdef CONFIG_SMP int cpu; #endif int ret; /* Check if the task to restart is the calling task */ rtcb = this_task(); if ((pid == 0) || (pid == rtcb->pid)) { /* Not implemented */ errcode = ENOSYS; goto errout; } /* We are restarting some other task than ourselves. Make sure that the * task does not change its state while we are executing. In the single * CPU state this could be done by disabling pre-emption. But we will * a little stronger medicine on the SMP case: The task make be running * on another CPU. */ flags = enter_critical_section(); /* Find for the TCB associated with matching pid */ tcb = (FAR struct task_tcb_s *)sched_gettcb(pid); #ifndef CONFIG_DISABLE_PTHREAD if (!tcb || (tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD) #else if (!tcb) #endif { /* There is no TCB with this pid or, if there is, it is not a task. */ errcode = ESRCH; goto errout_with_lock; } #ifdef CONFIG_SMP /* If the task is running on another CPU, then pause that CPU. We can * then manipulate the TCB of the restarted task and when we resume the * that CPU, the restart take effect. */ cpu = sched_cpu_pause(&tcb->cmn); #endif /* CONFIG_SMP */ /* Try to recover from any bad states */ task_recover((FAR struct tcb_s *)tcb); /* Kill any children of this thread */ #ifdef HAVE_GROUP_MEMBERS (void)group_killchildren(tcb); #endif /* Remove the TCB from whatever list it is in. After this point, the TCB * should no longer be accessible to the system */ #ifdef CONFIG_SMP tasklist = TLIST_HEAD(tcb->cmn.task_state, tcb->cmn.cpu); #else tasklist = TLIST_HEAD(tcb->cmn.task_state); #endif dq_rem((FAR dq_entry_t *)tcb, tasklist); tcb->cmn.task_state = TSTATE_TASK_INVALID; /* Deallocate anything left in the TCB's queues */ sig_cleanup((FAR struct tcb_s *)tcb); /* Deallocate Signal lists */ /* Reset the current task priority */ tcb->cmn.sched_priority = tcb->cmn.init_priority; /* The task should restart with pre-emption disabled and not in a critical * secton. */ tcb->cmn.lockcount = 0; #ifdef CONFIG_SMP tcb->cmn.irqcount = 0; #endif /* Reset the base task priority and the number of pending reprioritizations */ #ifdef CONFIG_PRIORITY_INHERITANCE tcb->cmn.base_priority = tcb->cmn.init_priority; # if CONFIG_SEM_NNESTPRIO > 0 tcb->cmn.npend_reprio = 0; # endif #endif /* Re-initialize the processor-specific portion of the TCB. This will * reset the entry point and the start-up parameters */ up_initial_state((FAR struct tcb_s *)tcb); /* Add the task to the inactive task list */ dq_addfirst((FAR dq_entry_t *)tcb, (FAR dq_queue_t *)&g_inactivetasks); tcb->cmn.task_state = TSTATE_TASK_INACTIVE; #ifdef CONFIG_SMP /* Resume the paused CPU (if any) */ if (cpu >= 0) { ret = up_cpu_resume(cpu); if (ret < 0) { errcode = -ret; goto errout_with_lock; } } #endif /* CONFIG_SMP */ leave_critical_section(flags); /* Activate the task. */ ret = task_activate((FAR struct tcb_s *)tcb); if (ret != OK) { (void)task_terminate(pid, true); errcode = -ret; goto errout_with_lock; } return OK; errout_with_lock: leave_critical_section(flags); errout: set_errno(errcode); return ERROR; }
int CheckPerm(FCOM_T *comm_ptr, int permissions) { uid_t uid; gid_t gid; /*-------------------------------------------------------------------*/ /* Get group and user ID for current process. */ /*-------------------------------------------------------------------*/ FsGetId(&uid, &gid); /*-------------------------------------------------------------------*/ /* Check if we need to look at read permissions. */ /*-------------------------------------------------------------------*/ if (permissions & F_READ) { /*-----------------------------------------------------------------*/ /* If other has no read permission, check group. */ /*-----------------------------------------------------------------*/ if ((comm_ptr->mode & S_IROTH) == FALSE) { /*---------------------------------------------------------------*/ /* If not in group or group has no read permission, check owner. */ /*---------------------------------------------------------------*/ if ((gid != comm_ptr->group_id) || !(comm_ptr->mode & S_IRGRP)) { /*-------------------------------------------------------------*/ /* If not user or user has no read permissions, return error. */ /*-------------------------------------------------------------*/ if ((uid != comm_ptr->user_id) || !(comm_ptr->mode & S_IRUSR)) { set_errno(EACCES); return -1; } } } } /*-------------------------------------------------------------------*/ /* Check if we need to look at write permissions. */ /*-------------------------------------------------------------------*/ if (permissions & F_WRITE) { /*-----------------------------------------------------------------*/ /* If other has no write permission, check group. */ /*-----------------------------------------------------------------*/ if ((comm_ptr->mode & S_IWOTH) == 0) { /*---------------------------------------------------------------*/ /* If not in group or group has no write permission, check owner.*/ /*---------------------------------------------------------------*/ if ((gid != comm_ptr->group_id) || !(comm_ptr->mode & S_IWGRP)) { /*-------------------------------------------------------------*/ /* If not user or user has no write permissions, return error. */ /*-------------------------------------------------------------*/ if ((uid != comm_ptr->user_id) || !(comm_ptr->mode & S_IWUSR)) { set_errno(EACCES); return -1; } } } } /*-------------------------------------------------------------------*/ /* Check if we need to look at execute permissions. */ /*-------------------------------------------------------------------*/ if (permissions & F_EXECUTE) { /*-----------------------------------------------------------------*/ /* If other has no execute permission, check group. */ /*-----------------------------------------------------------------*/ if ((comm_ptr->mode & S_IXOTH) == 0) { /*---------------------------------------------------------------*/ /* If not in group or group can't execute, check owner. */ /*---------------------------------------------------------------*/ if ((gid != comm_ptr->group_id) || !(comm_ptr->mode & S_IXGRP)) { /*-------------------------------------------------------------*/ /* If not user or user can't execute, return error. */ /*-------------------------------------------------------------*/ if ((uid != comm_ptr->user_id) || !(comm_ptr->mode & S_IXUSR)) { set_errno(EACCES); return -1; } } } } return 0; }
/*ARGSUSED*/ int mach_sysconfig(int which) { return (set_errno(EINVAL)); }
FAR void *shmat(int shmid, FAR const void *shmaddr, int shmflg) { FAR struct shm_region_s *region; FAR struct task_group_s *group; FAR struct tcb_s *tcb; uintptr_t vaddr; unsigned int npages; int ret; /* Get the region associated with the shmid */ DEBUGASSERT(shmid >= 0 && shmid < CONFIG_ARCH_SHM_MAXREGIONS); region = &g_shminfo.si_region[shmid]; DEBUGASSERT((region->sr_flags & SRFLAG_INUSE) != 0); /* Get the TCB and group containing our virtual memory allocator */ tcb = sched_self(); DEBUGASSERT(tcb && tcb->group); group = tcb->group; DEBUGASSERT(group->tg_shm.gs_handle != NULL && group->tg_shm.gs_vaddr[shmid] == 0); /* Get exclusive access to the region data structure */ ret = sem_wait(®ion->sr_sem); if (ret < 0) { shmdbg("sem_wait failed: %d\n", ret); goto errout; } /* Set aside a virtual address space to span this physical region */ vaddr = (uintptr_t)gran_alloc(group->tg_shm.gs_handle, region->sr_ds.shm_segsz); if (vaddr == 0) { shmdbg("gran_alloc() failed\n"); ret = -ENOMEM; goto errout_with_semaphore; } /* Convert the region size to pages */ npages = MM_PGALIGNUP(region->sr_ds.shm_segsz); /* Attach, i.e, map, on shared memory region to the user virtual address. */ ret = up_shmat(region->sr_pages, npages, vaddr); if (ret < 0) { shmdbg("up_shmat() failed\n"); goto errout_with_vaddr; } /* Save the virtual address of the region. We will need that in shmat() * to do the reverse lookup: Give the virtual address of the region to * detach, we need to get the region table index. */ group->tg_shm.gs_vaddr[shmid] = vaddr; /* Increment the count of processes attached to this region */ region->sr_ds.shm_nattch++; /* Save the process ID of the the last operation */ region->sr_ds.shm_lpid = tcb->pid; /* Save the time of the last shmat() */ region->sr_ds.shm_atime = time(NULL); /* Release our lock on the entry */ sem_post(®ion->sr_sem); return (FAR void *)vaddr; errout_with_vaddr: gran_free(group->tg_shm.gs_handle, (FAR void *)vaddr, region->sr_ds.shm_segsz); errout_with_semaphore: sem_post(®ion->sr_sem); set_errno(-ret); errout: return (FAR void *)ERROR; }
long fpathconf(int fid, int name) { long rv; #if OS_PARM_CHECK /*-------------------------------------------------------------------*/ /* Ensure file descriptor is valid. */ /*-------------------------------------------------------------------*/ if (fid < 0 || fid >= FOPEN_MAX) { set_errno(EBADF); return -1; } #endif /*-------------------------------------------------------------------*/ /* Return error if file is closed. */ /*-------------------------------------------------------------------*/ if (Files[fid].ioctl == NULL) { set_errno(EBADF); return -1; } /*-------------------------------------------------------------------*/ /* Based on the name variable, get the right value. */ /*-------------------------------------------------------------------*/ switch (name) { /*-----------------------------------------------------------------*/ /* _PC_NAME_MAX and _PC_PATH_MAX have the same value. */ /*-----------------------------------------------------------------*/ case _PC_NAME_MAX: case _PC_PATH_MAX: { rv = PATH_MAX; break; } case _PC_NO_TRUNC: { rv = _PATH_NO_TRUNC; break; } case _PC_LINK_MAX: { /*---------------------------------------------------------------*/ /* The link counter is stored in a "ui8". */ /*---------------------------------------------------------------*/ rv = 255; break; } /*-----------------------------------------------------------------*/ /* All other variables have no limit, so return -1 for them. */ /*-----------------------------------------------------------------*/ default: { rv = -1; break; } } return rv; }
long lx_sysinfo(struct lx_sysinfo *sip) { struct lx_sysinfo si; zone_t *zone = curthread->t_procp->p_zone; uint64_t zphysmem, zfreemem, ztotswap, zfreeswap; si.si_uptime = gethrestime_sec() - zone->zone_boot_time; /* * We scale down the load in avenrun to allow larger load averages * to fit in 32 bits. Linux doesn't, so we remove the scaling * here. */ si.si_loads[0] = zone->zone_avenrun[0] << FSHIFT; si.si_loads[1] = zone->zone_avenrun[1] << FSHIFT; si.si_loads[2] = zone->zone_avenrun[2] << FSHIFT; /* * In linux each thread looks like a process, so we conflate the * two in this stat as well. */ si.si_procs = (int32_t)zone->zone_nlwps; /* * If memory or swap limits are set on the zone, use those, otherwise * use the system values. physmem and freemem are in pages, but the * zone values are in bytes. Likewise, ani_max and ani_free are in * pages. */ if (zone->zone_phys_mem_ctl == UINT64_MAX) { zphysmem = physmem; zfreemem = freemem; } else { zphysmem = btop(zone->zone_phys_mem_ctl); zfreemem = btop(zone->zone_phys_mem_ctl - zone->zone_phys_mem); } if (zone->zone_max_swap_ctl == UINT64_MAX) { ztotswap = k_anoninfo.ani_max; zfreeswap = k_anoninfo.ani_free; } else { /* * See the comment in swapctl for a description of how free is * calculated within a zone. */ rctl_qty_t used; spgcnt_t avail; uint64_t max; avail = MAX((spgcnt_t)(availrmem - swapfs_minfree), 0); max = k_anoninfo.ani_max + k_anoninfo.ani_mem_resv + avail; mutex_enter(&zone->zone_mem_lock); ztotswap = btop(zone->zone_max_swap_ctl); used = btop(zone->zone_max_swap); mutex_exit(&zone->zone_mem_lock); zfreeswap = MIN(ztotswap, max) - used; } /* * If the maximum memory stat is less than 1^20 pages (i.e. 4GB), * then we report the result in bytes. Otherwise we use pages. * Once we start supporting >1TB systems/zones, we'll need a third * option. */ if (MAX(zphysmem, ztotswap) < 1024 * 1024) { si.si_totalram = ptob(zphysmem); si.si_freeram = ptob(zfreemem); si.si_totalswap = ptob(ztotswap); si.si_freeswap = ptob(zfreeswap); si.si_mem_unit = 1; } else { si.si_totalram = zphysmem; si.si_freeram = zfreemem; si.si_totalswap = ztotswap; si.si_freeswap = zfreeswap; si.si_mem_unit = PAGESIZE; } si.si_bufferram = 0; si.si_sharedram = 0; /* * These two stats refer to high physical memory. If an * application running in a Linux zone cares about this, then * either it or we are broken. */ si.si_totalhigh = 0; si.si_freehigh = 0; if (copyout(&si, sip, sizeof (si)) != 0) return (set_errno(EFAULT)); return (0); }
/* * Buy-back from SunOS 4.x * * Like setgid() and setegid() combined -except- that non-root users * can change cr_rgid to cr_gid, and the semantics of cr_sgid are * subtly different. */ int setregid(gid_t rgid, gid_t egid) { proc_t *p; int error = EPERM; int do_nocd = 0; cred_t *cr, *newcr; ksid_t ksid, *ksp; zone_t *zone = crgetzone(CRED()); if ((rgid != -1 && !VALID_GID(rgid, zone)) || (egid != -1 && !VALID_GID(egid, zone))) return (set_errno(EINVAL)); if (egid != -1 && egid > MAXUID) { if (ksid_lookupbygid(zone, egid, &ksid) != 0) return (set_errno(EINVAL)); ksp = &ksid; } else { ksp = NULL; } /* * Need to pre-allocate the new cred structure before grabbing * the p_crlock mutex. */ newcr = cralloc_ksid(); p = ttoproc(curthread); mutex_enter(&p->p_crlock); cr = p->p_cred; if ((rgid == -1 || rgid == cr->cr_rgid || rgid == cr->cr_gid || rgid == cr->cr_sgid) && (egid == -1 || egid == cr->cr_rgid || egid == cr->cr_gid || egid == cr->cr_sgid) || (error = secpolicy_allow_setid(cr, -1, B_FALSE)) == 0) { crhold(cr); crcopy_to(cr, newcr); p->p_cred = newcr; if (egid != -1) { newcr->cr_gid = egid; crsetsid(newcr, ksp, KSID_GROUP); } if (rgid != -1) newcr->cr_rgid = rgid; /* * "If the real gid is being changed, or the effective gid is * being changed to a value not equal to the real gid, the * saved gid is set to the new effective gid." */ if (rgid != -1 || (egid != -1 && newcr->cr_gid != newcr->cr_rgid)) newcr->cr_sgid = newcr->cr_gid; /* * A privileged process that makes itself look like a * set-gid process must be marked to produce no core dump. */ if ((cr->cr_gid != newcr->cr_gid || cr->cr_rgid != newcr->cr_rgid || cr->cr_sgid != newcr->cr_sgid) && error == 0) do_nocd = 1; error = 0; crfree(cr); } mutex_exit(&p->p_crlock); if (error == 0) { if (do_nocd) { mutex_enter(&p->p_lock); p->p_flag |= SNOCD; mutex_exit(&p->p_lock); } crset(p, newcr); /* broadcast to process threads */ return (0); } crfree(newcr); if (ksp != NULL) ksid_rele(ksp); return (set_errno(error)); }
/* * msgctl system call. * * gets q lock (via ipc_lookup), releases before return. * may call users of msg_lock */ static int msgctl(int msgid, int cmd, void *arg) { STRUCT_DECL(msqid_ds, ds); /* SVR4 queue work area */ kmsqid_t *qp; /* ptr to associated q */ int error; struct cred *cr; model_t mdl = get_udatamodel(); struct msqid_ds64 ds64; kmutex_t *lock; proc_t *pp = curproc; STRUCT_INIT(ds, mdl); cr = CRED(); /* * Perform pre- or non-lookup actions (e.g. copyins, RMID). */ switch (cmd) { case IPC_SET: if (copyin(arg, STRUCT_BUF(ds), STRUCT_SIZE(ds))) return (set_errno(EFAULT)); break; case IPC_SET64: if (copyin(arg, &ds64, sizeof (struct msqid_ds64))) return (set_errno(EFAULT)); break; case IPC_RMID: if (error = ipc_rmid(msq_svc, msgid, cr)) return (set_errno(error)); return (0); } /* * get msqid_ds for this msgid */ if ((lock = ipc_lookup(msq_svc, msgid, (kipc_perm_t **)&qp)) == NULL) return (set_errno(EINVAL)); switch (cmd) { case IPC_SET: if (STRUCT_FGET(ds, msg_qbytes) > qp->msg_qbytes && secpolicy_ipc_config(cr) != 0) { mutex_exit(lock); return (set_errno(EPERM)); } if (error = ipcperm_set(msq_svc, cr, &qp->msg_perm, &STRUCT_BUF(ds)->msg_perm, mdl)) { mutex_exit(lock); return (set_errno(error)); } qp->msg_qbytes = STRUCT_FGET(ds, msg_qbytes); qp->msg_ctime = gethrestime_sec(); break; case IPC_STAT: if (error = ipcperm_access(&qp->msg_perm, MSG_R, cr)) { mutex_exit(lock); return (set_errno(error)); } if (qp->msg_rcv_cnt) qp->msg_perm.ipc_mode |= MSG_RWAIT; if (qp->msg_snd_cnt) qp->msg_perm.ipc_mode |= MSG_WWAIT; ipcperm_stat(&STRUCT_BUF(ds)->msg_perm, &qp->msg_perm, mdl); qp->msg_perm.ipc_mode &= ~(MSG_RWAIT|MSG_WWAIT); STRUCT_FSETP(ds, msg_first, NULL); /* kernel addr */ STRUCT_FSETP(ds, msg_last, NULL); STRUCT_FSET(ds, msg_cbytes, qp->msg_cbytes); STRUCT_FSET(ds, msg_qnum, qp->msg_qnum); STRUCT_FSET(ds, msg_qbytes, qp->msg_qbytes); STRUCT_FSET(ds, msg_lspid, qp->msg_lspid); STRUCT_FSET(ds, msg_lrpid, qp->msg_lrpid); STRUCT_FSET(ds, msg_stime, qp->msg_stime); STRUCT_FSET(ds, msg_rtime, qp->msg_rtime); STRUCT_FSET(ds, msg_ctime, qp->msg_ctime); break; case IPC_SET64: mutex_enter(&pp->p_lock); if ((ds64.msgx_qbytes > qp->msg_qbytes) && secpolicy_ipc_config(cr) != 0 && rctl_test(rc_process_msgmnb, pp->p_rctls, pp, ds64.msgx_qbytes, RCA_SAFE) & RCT_DENY) { mutex_exit(&pp->p_lock); mutex_exit(lock); return (set_errno(EPERM)); } mutex_exit(&pp->p_lock); if (error = ipcperm_set64(msq_svc, cr, &qp->msg_perm, &ds64.msgx_perm)) { mutex_exit(lock); return (set_errno(error)); } qp->msg_qbytes = ds64.msgx_qbytes; qp->msg_ctime = gethrestime_sec(); break; case IPC_STAT64: if (qp->msg_rcv_cnt) qp->msg_perm.ipc_mode |= MSG_RWAIT; if (qp->msg_snd_cnt) qp->msg_perm.ipc_mode |= MSG_WWAIT; ipcperm_stat64(&ds64.msgx_perm, &qp->msg_perm); qp->msg_perm.ipc_mode &= ~(MSG_RWAIT|MSG_WWAIT); ds64.msgx_cbytes = qp->msg_cbytes; ds64.msgx_qnum = qp->msg_qnum; ds64.msgx_qbytes = qp->msg_qbytes; ds64.msgx_lspid = qp->msg_lspid; ds64.msgx_lrpid = qp->msg_lrpid; ds64.msgx_stime = qp->msg_stime; ds64.msgx_rtime = qp->msg_rtime; ds64.msgx_ctime = qp->msg_ctime; break; default: mutex_exit(lock); return (set_errno(EINVAL)); } mutex_exit(lock); /* * Do copyout last (after releasing mutex). */ switch (cmd) { case IPC_STAT: if (copyout(STRUCT_BUF(ds), arg, STRUCT_SIZE(ds))) return (set_errno(EFAULT)); break; case IPC_STAT64: if (copyout(&ds64, arg, sizeof (struct msqid_ds64))) return (set_errno(EFAULT)); break; } return (0); }
int setgid(gid_t gid) { proc_t *p; int error; int do_nocd = 0; cred_t *cr, *newcr; ksid_t ksid, *ksp; zone_t *zone = crgetzone(CRED()); if (!VALID_GID(gid, zone)) return (set_errno(EINVAL)); if (gid > MAXUID) { if (ksid_lookupbygid(zone, gid, &ksid) != 0) return (set_errno(EINVAL)); ksp = &ksid; } else { ksp = NULL; } /* * Need to pre-allocate the new cred structure before grabbing * the p_crlock mutex. We cannot hold the mutex across the * secpolicy functions. */ newcr = cralloc_ksid(); p = ttoproc(curthread); mutex_enter(&p->p_crlock); retry: cr = p->p_cred; crhold(cr); mutex_exit(&p->p_crlock); if ((gid == cr->cr_rgid || gid == cr->cr_sgid) && secpolicy_allow_setid(cr, -1, B_TRUE) != 0) { mutex_enter(&p->p_crlock); crfree(cr); if (cr != p->p_cred) goto retry; error = 0; crcopy_to(cr, newcr); p->p_cred = newcr; newcr->cr_gid = gid; crsetsid(newcr, ksp, KSID_GROUP); mutex_exit(&p->p_crlock); } else if ((error = secpolicy_allow_setid(cr, -1, B_FALSE)) == 0) { mutex_enter(&p->p_crlock); crfree(cr); if (cr != p->p_cred) goto retry; /* * A privileged process that makes itself look like a * set-gid process must be marked to produce no core dump. */ if (cr->cr_gid != gid || cr->cr_rgid != gid || cr->cr_sgid != gid) do_nocd = 1; crcopy_to(cr, newcr); p->p_cred = newcr; newcr->cr_gid = gid; newcr->cr_rgid = gid; newcr->cr_sgid = gid; crsetsid(newcr, ksp, KSID_GROUP); mutex_exit(&p->p_crlock); } else { crfree(newcr); crfree(cr); if (ksp != NULL) ksid_rele(ksp); } if (error == 0) { if (do_nocd) { mutex_enter(&p->p_lock); p->p_flag |= SNOCD; mutex_exit(&p->p_lock); } crset(p, newcr); /* broadcast to process threads */ return (0); } return (set_errno(error)); }
int task_restart(pid_t pid) { FAR struct tcb_s *rtcb; FAR struct task_tcb_s *tcb; FAR dq_queue_t *tasklist; irqstate_t state; int status; /* Make sure this task does not become ready-to-run while * we are futzing with its TCB */ sched_lock(); /* Check if the task to restart is the calling task */ rtcb = this_task(); if ((pid == 0) || (pid == rtcb->pid)) { /* Not implemented */ set_errno(ENOSYS); return ERROR; } #ifdef CONFIG_SMP /* There is currently no capability to restart a task that is actively * running on another CPU either. This is not the calling cast so if it * is running, then it could only be running a a different CPU. * * Also, will need some interlocks to assure that no tasks are rescheduled * on any other CPU while we do this. */ #warning Missing SMP logic if (rtcb->task_state == TSTATE_TASK_RUNNING) { /* Not implemented */ set_errno(ENOSYS); return ERROR; } #endif /* We are restarting some other task than ourselves */ /* Find for the TCB associated with matching pid */ tcb = (FAR struct task_tcb_s *)sched_gettcb(pid); #ifndef CONFIG_DISABLE_PTHREAD if (!tcb || (tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD) #else if (!tcb) #endif { /* There is no TCB with this pid or, if there is, it is not a task. */ set_errno(ESRCH); return ERROR; } /* Try to recover from any bad states */ task_recover((FAR struct tcb_s *)tcb); /* Kill any children of this thread */ #ifdef HAVE_GROUP_MEMBERS (void)group_killchildren(tcb); #endif /* Remove the TCB from whatever list it is in. After this point, the TCB * should no longer be accessible to the system */ #ifdef CONFIG_SMP tasklist = TLIST_HEAD(tcb->cmn.task_state, tcb->cmn.cpu); #else tasklist = TLIST_HEAD(tcb->cmn.task_state); #endif state = irqsave(); dq_rem((FAR dq_entry_t *)tcb, tasklist); tcb->cmn.task_state = TSTATE_TASK_INVALID; irqrestore(state); /* Deallocate anything left in the TCB's queues */ sig_cleanup((FAR struct tcb_s *)tcb); /* Deallocate Signal lists */ /* Reset the current task priority */ tcb->cmn.sched_priority = tcb->init_priority; /* Reset the base task priority and the number of pending reprioritizations */ #ifdef CONFIG_PRIORITY_INHERITANCE tcb->cmn.base_priority = tcb->init_priority; # if CONFIG_SEM_NNESTPRIO > 0 tcb->cmn.npend_reprio = 0; # endif #endif /* Re-initialize the processor-specific portion of the TCB. This will * reset the entry point and the start-up parameters */ up_initial_state((FAR struct tcb_s *)tcb); /* Add the task to the inactive task list */ dq_addfirst((FAR dq_entry_t *)tcb, (FAR dq_queue_t *)&g_inactivetasks); tcb->cmn.task_state = TSTATE_TASK_INACTIVE; /* Activate the task */ status = task_activate((FAR struct tcb_s *)tcb); if (status != OK) { (void)task_delete(pid); set_errno(-status); return ERROR; } sched_unlock(); return OK; }
int mq_waitsend(mqd_t mqdes) { FAR struct tcb_s *rtcb; FAR struct mqueue_inode_s *msgq; /* Get a pointer to the message queue */ msgq = mqdes->msgq; /* Verify that the queue is indeed full as the caller thinks */ if (msgq->nmsgs >= msgq->maxmsgs) { /* Should we block until there is sufficient space in the * message queue? */ if ((mqdes->oflags & O_NONBLOCK) != 0) { /* No... We will return an error to the caller. */ set_errno(EAGAIN); return ERROR; } /* Yes... We will not return control until the message queue is * available or we receive a signal or at timout occurs. */ else { /* Loop until there are fewer than max allowable messages in the * receiving message queue */ while (msgq->nmsgs >= msgq->maxmsgs) { /* Block until the message queue is no longer full. * When we are unblocked, we will try again */ rtcb = (FAR struct tcb_s *)g_readytorun.head; rtcb->msgwaitq = msgq; msgq->nwaitnotfull++; set_errno(OK); up_block_task(rtcb, TSTATE_WAIT_MQNOTFULL); /* When we resume at this point, either (1) the message queue * is no longer empty, or (2) the wait has been interrupted by * a signal. We can detect the latter case be examining the * errno value (should be EINTR or ETIMEOUT). */ if (get_errno() != OK) { return ERROR; } } } } return OK; }
int munmap(FAR void *start, size_t length) { FAR struct fs_rammap_s *prev; FAR struct fs_rammap_s *curr; FAR void *newaddr; unsigned int offset; int ret; int err; /* Find a region containing this start and length in the list of regions */ rammap_initialize(); ret = sem_wait(&g_rammaps.exclsem); if (ret < 0) { return ERROR; } /* Seach the list of regions */ for (prev = NULL, curr = g_rammaps.head; curr; prev = curr, curr = curr->flink) { /* Does this region include any part of the specified range? */ if ((uintptr_t)start < (uintptr_t)curr->addr + curr->length && (uintptr_t)start + length >= (uintptr_t)curr->addr) { break; } } /* Did we find the region */ if (!curr) { fdbg("Region not found\n"); err = EINVAL; goto errout_with_semaphore; } /* Get the offset from the beginning of the region and the actual number * of bytes to "unmap". All mappings must extend to the end of the region. * There is no support for free a block of memory but leaving a block of * memory at the end. This is a consequence of using kumm_realloc() to * simulate the unmapping. */ offset = start - curr->addr; if (offset + length < curr->length) { fdbg("Cannot umap without unmapping to the end\n"); err = ENOSYS; goto errout_with_semaphore; } /* Okay.. the region is beging umapped to the end. Make sure the length * indicates that. */ length = curr->length - offset; /* Are we unmapping the entire region (offset == 0)? */ if (length >= curr->length) { /* Yes.. remove the mapping from the list */ if (prev) { prev->flink = curr->flink; } else { g_rammaps.head = curr->flink; } /* Then free the region */ kumm_free(curr); } /* No.. We have been asked to "unmap' only a portion of the memory * (offset > 0). */ else { newaddr = kumm_realloc(curr->addr, sizeof(struct fs_rammap_s) + length); DEBUGASSERT(newaddr == (FAR void*)(curr->addr)); curr->length = length; } sem_post(&g_rammaps.exclsem); return OK; errout_with_semaphore: sem_post(&g_rammaps.exclsem); set_errno(err); return ERROR; }
int waitid(idtype_t idtype, id_t id, siginfo_t *info, int options) { FAR _TCB *rtcb = (FAR _TCB *)g_readytorun.head; sigset_t sigset; int err; int ret; /* MISSING LOGIC: If WNOHANG is provided in the options, then this function * should returned immediately. However, there is no mechanism available now * know if the thread has child: The children remember their parents (if * CONFIG_SCHED_HAVE_PARENT) but the parents do not remember their children. */ /* None of the options are supported except for WEXITED (which must be * provided. Currently SIGCHILD always reports CLD_EXITED so we cannot * distinguish any other events. */ #ifdef CONFIG_DEBUG if (options != WEXITED) { set_errno(ENOSYS); return ERROR; } #endif /* Create a signal set that contains only SIGCHLD */ (void)sigemptyset(&sigset); (void)sigaddset(&sigset, SIGCHLD); /* Disable pre-emption so that nothing changes while the loop executes */ sched_lock(); /* Verify that this task actually has children and that the the requeste * TCB is actually a child of this task. */ if (rtcb->nchildren == 0) { err = ECHILD; goto errout_with_errno; } else if (idtype == P_PID) { /* Get the TCB corresponding to this PID and make sure it is our child. */ FAR _TCB *ctcb = sched_gettcb((pid_t)id); if (!ctcb || ctcb->parent != rtcb->pid) { err = ECHILD; goto errout_with_errno; } } /* Loop until the child that we are waiting for dies */ for (;;) { /* Check if the task has already died. Signals are not queued in * NuttX. So a possibility is that the child has died and we * missed the death of child signal (we got some other signal * instead). */ if (rtcb->nchildren == 0 || (idtype == P_PID && (ret = kill((pid_t)id, 0)) < 0)) { /* We know that the child task was running okay we stared, * so we must have lost the signal. What can we do? * Let's claim we were interrupted by a signal. */ err = EINTR; goto errout_with_errno; } /* Wait for any death-of-child signal */ ret = sigwaitinfo(&sigset, info); if (ret < 0) { goto errout; } /* Make there this was SIGCHLD */ if (info->si_signo == SIGCHLD) { /* Yes.. Are we waiting for the death of a specific child? */ if (idtype == P_PID) { /* Was this the death of the thread we were waiting for? */ if (info->si_pid == (pid_t)id) { /* Yes... return success */ break; } } /* Are we waiting for any child to change state? */ else if (idtype == P_ALL) { /* Return success */ break; } /* Other ID types are not supported */ else /* if (idtype == P_PGID) */ { set_errno(ENOSYS); goto errout; } } } sched_unlock(); return OK; errout_with_errno: set_errno(err); errout: sched_unlock(); return ERROR; }
int nx_eventhandler(NXHANDLE handle) { FAR struct nxfe_conn_s *conn = (FAR struct nxfe_conn_s *)handle; struct nxsvrmsg_s *msg; struct nxbe_window_s *wnd; char buffer[NX_MXCLIMSGLEN]; int nbytes; /* Get the next message from our incoming message queue */ do { nbytes = mq_receive(conn->crdmq, buffer, NX_MXCLIMSGLEN, 0); if (nbytes < 0) { /* EINTR is not an error. The wait was interrupted by a signal and * we just need to try reading again. */ if (errno != EINTR) { if (errno == EAGAIN) { /* EAGAIN is not an error. It occurs because the MQ is opened with * O_NONBLOCK and there is no message available now. */ return OK; } else { gdbg("mq_receive failed: %d\n", errno); return ERROR; } } } } while (nbytes < 0); DEBUGASSERT(nbytes >= sizeof(struct nxclimsg_s)); /* Dispatch the message appropriately */ msg = (struct nxsvrmsg_s *)buffer; gvdbg("Received msgid=%d\n", msg->msgid); switch (msg->msgid) { case NX_CLIMSG_CONNECTED: nx_connected(conn); break; case NX_CLIMSG_DISCONNECTED: nx_disconnected(conn); set_errno(EHOSTDOWN); return ERROR; case NX_CLIMSG_REDRAW: { FAR struct nxclimsg_redraw_s *redraw = (FAR struct nxclimsg_redraw_s *)buffer; wnd = redraw->wnd; DEBUGASSERT(wnd); if (wnd->cb->redraw) { wnd->cb->redraw((NXWINDOW)wnd, &redraw->rect, redraw->more, wnd->arg); } } break; case NX_CLIMSG_NEWPOSITION: { FAR struct nxclimsg_newposition_s *postn = (FAR struct nxclimsg_newposition_s *)buffer; wnd = postn->wnd; DEBUGASSERT(wnd); if (wnd->cb->position) { wnd->cb->position((NXWINDOW)wnd, &postn->size, &postn->pos, &postn->bounds, wnd->arg); } } break; #ifdef CONFIG_NX_XYINPUT case NX_CLIMSG_MOUSEIN: { FAR struct nxclimsg_mousein_s *mouse = (FAR struct nxclimsg_mousein_s *)buffer; wnd = mouse->wnd; DEBUGASSERT(wnd); if (wnd->cb->mousein) { wnd->cb->mousein((NXWINDOW)wnd, &mouse->pos, mouse->buttons, wnd->arg); } } break; #endif #ifdef CONFIG_NX_KBD case NX_CLIMSG_KBDIN: { FAR struct nxclimsg_kbdin_s *kbd = (FAR struct nxclimsg_kbdin_s *)buffer; wnd = kbd->wnd; DEBUGASSERT(wnd); if (wnd->cb->kbdin) { wnd->cb->kbdin((NXWINDOW)wnd, kbd->nch, kbd->ch, wnd->arg); } } break; #endif case NX_CLIMSG_BLOCKED: { FAR struct nxclimsg_blocked_s *blocked = (FAR struct nxclimsg_blocked_s *)buffer; wnd = blocked->wnd; DEBUGASSERT(wnd); if (wnd->cb->blocked) { wnd->cb->blocked((NXWINDOW)wnd, wnd->arg, blocked->arg); } } break; default: gdbg("Unrecognized message opcode: %d\n", ((FAR struct nxsvrmsg_s *)buffer)->msgid); break; } return OK; }
/* * Register the klpd. * If the pid_t passed in is positive, update the registration for * the specific process; that is only possible if the process already * has a registration on it. This change of registration will affect * all processes which share common ancestry. * * MY_PID (pid 0) can be used to create or change the context for * the current process, typically done after fork(). * * A negative value can be used to register a klpd globally. * * The per-credential klpd needs to be cleaned up when entering * a zone or unsetting the flag. */ int klpd_reg(int did, idtype_t type, id_t id, priv_set_t *psetbuf) { cred_t *cr = CRED(); door_handle_t dh; klpd_reg_t *kpd; priv_set_t pset; door_info_t di; credklpd_t *ckp = NULL; pid_t pid = -1; projid_t proj = -1; kproject_t *kpp = NULL; if (CR_FLAGS(cr) & PRIV_XPOLICY) return (set_errno(EINVAL)); if (copyin(psetbuf, &pset, sizeof (priv_set_t))) return (set_errno(EFAULT)); if (!priv_issubset(&pset, &CR_OEPRIV(cr))) return (set_errno(EPERM)); switch (type) { case P_PID: pid = (pid_t)id; if (pid == P_MYPID) pid = curproc->p_pid; if (pid == curproc->p_pid) ckp = crklpd_alloc(); break; case P_PROJID: proj = (projid_t)id; kpp = project_hold_by_id(proj, crgetzone(cr), PROJECT_HOLD_FIND); if (kpp == NULL) return (set_errno(ESRCH)); break; default: return (set_errno(ENOTSUP)); } /* * Verify the door passed in; it must be a door and we won't * allow processes to be called on their own behalf. */ dh = door_ki_lookup(did); if (dh == NULL || door_ki_info(dh, &di) != 0) { if (ckp != NULL) crklpd_rele(ckp); if (kpp != NULL) project_rele(kpp); return (set_errno(EBADF)); } if (type == P_PID && pid == di.di_target) { if (ckp != NULL) crklpd_rele(ckp); ASSERT(kpp == NULL); return (set_errno(EINVAL)); } kpd = kmem_zalloc(sizeof (*kpd), KM_SLEEP); crhold(kpd->klpd_cred = cr); kpd->klpd_door = dh; kpd->klpd_door_pid = di.di_target; kpd->klpd_ref = 1; kpd->klpd_pset = pset; if (kpp != NULL) { mutex_enter(&klpd_mutex); kpd = klpd_link(kpd, &kpp->kpj_klpd, B_TRUE); mutex_exit(&klpd_mutex); if (kpd != NULL) klpd_rele(kpd); project_rele(kpp); } else if ((int)pid < 0) { /* Global daemon */ mutex_enter(&klpd_mutex); (void) klpd_link(kpd, &klpd_list, B_FALSE); mutex_exit(&klpd_mutex); } else if (pid == curproc->p_pid) { proc_t *p = curproc; cred_t *newcr = cralloc(); /* No need to lock, sole reference to ckp */ kpd = klpd_link(kpd, &ckp->crkl_reg, B_TRUE); if (kpd != NULL) klpd_rele(kpd); mutex_enter(&p->p_crlock); cr = p->p_cred; crdup_to(cr, newcr); crsetcrklpd(newcr, ckp); p->p_cred = newcr; /* Already held for p_cred */ crhold(newcr); /* Hold once for the current thread */ mutex_exit(&p->p_crlock); crfree(cr); /* One for the p_cred */ crset(p, newcr); } else { proc_t *p; cred_t *pcr; mutex_enter(&pidlock); p = prfind(pid); if (p == NULL || !prochasprocperm(p, curproc, CRED())) { mutex_exit(&pidlock); klpd_rele(kpd); return (set_errno(p == NULL ? ESRCH : EPERM)); } mutex_enter(&p->p_crlock); crhold(pcr = p->p_cred); mutex_exit(&pidlock); mutex_exit(&p->p_crlock); /* * We're going to update the credential's ckp in place; * this requires that it exists. */ ckp = crgetcrklpd(pcr); if (ckp == NULL) { crfree(pcr); klpd_rele(kpd); return (set_errno(EINVAL)); } crklpd_setreg(ckp, kpd); crfree(pcr); } return (0); }
int malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { int ret; size_t i; const char *f; #define APPEND_C(c) do { \ if (i < size) \ str[i] = (c); \ i++; \ } while (0) #define APPEND_S(s, slen) do { \ if (i < size) { \ size_t cpylen = (slen <= size - i) ? slen : size - i; \ memcpy(&str[i], s, cpylen); \ } \ i += slen; \ } while (0) #define APPEND_PADDED_S(s, slen, width, left_justify) do { \ /* Left padding. */ \ size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \ (size_t)width - slen : 0); \ if (left_justify == false && pad_len != 0) { \ size_t j; \ for (j = 0; j < pad_len; j++) \ APPEND_C(' '); \ } \ /* Value. */ \ APPEND_S(s, slen); \ /* Right padding. */ \ if (left_justify && pad_len != 0) { \ size_t j; \ for (j = 0; j < pad_len; j++) \ APPEND_C(' '); \ } \ } while (0) #define GET_ARG_NUMERIC(val, len) do { \ switch (len) { \ case '?': \ val = va_arg(ap, int); \ break; \ case '?' | 0x80: \ val = va_arg(ap, unsigned int); \ break; \ case 'l': \ val = va_arg(ap, long); \ break; \ case 'l' | 0x80: \ val = va_arg(ap, unsigned long); \ break; \ case 'q': \ val = va_arg(ap, long long); \ break; \ case 'q' | 0x80: \ val = va_arg(ap, unsigned long long); \ break; \ case 'j': \ val = va_arg(ap, intmax_t); \ break; \ case 't': \ val = va_arg(ap, ptrdiff_t); \ break; \ case 'z': \ val = va_arg(ap, ssize_t); \ break; \ case 'z' | 0x80: \ val = va_arg(ap, size_t); \ break; \ case 'p': /* Synthetic; used for %p. */ \ val = va_arg(ap, uintptr_t); \ break; \ default: not_reached(); \ } \ } while (0) i = 0; f = format; while (true) { switch (*f) { case '\0': goto label_out; case '%': { bool alt_form = false; bool zero_pad = false; bool left_justify = false; bool plus_space = false; bool plus_plus = false; int prec = -1; int width = -1; unsigned char len = '?'; f++; if (*f == '%') { /* %% */ APPEND_C(*f); break; } /* Flags. */ while (true) { switch (*f) { case '#': assert(alt_form == false); alt_form = true; break; case '0': assert(zero_pad == false); zero_pad = true; break; case '-': assert(left_justify == false); left_justify = true; break; case ' ': assert(plus_space == false); plus_space = true; break; case '+': assert(plus_plus == false); plus_plus = true; break; default: goto label_width; } f++; } /* Width. */ label_width: switch (*f) { case '*': width = va_arg(ap, int); f++; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { uintmax_t uwidth; set_errno(0); uwidth = malloc_strtoumax(f, (char **)&f, 10); assert(uwidth != UINTMAX_MAX || get_errno() != ERANGE); width = (int)uwidth; if (*f == '.') { f++; goto label_precision; } else goto label_length; break; } case '.': f++; goto label_precision; default: goto label_length; } /* Precision. */ label_precision: switch (*f) { case '*': prec = va_arg(ap, int); f++; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { uintmax_t uprec; set_errno(0); uprec = malloc_strtoumax(f, (char **)&f, 10); assert(uprec != UINTMAX_MAX || get_errno() != ERANGE); prec = (int)uprec; break; } default: break; } /* Length. */ label_length: switch (*f) { case 'l': f++; if (*f == 'l') { len = 'q'; f++; } else len = 'l'; break; case 'j': len = 'j'; f++; break; case 't': len = 't'; f++; break; case 'z': len = 'z'; f++; break; default: break; } /* Conversion specifier. */ switch (*f) { char *s; size_t slen; case 'd': case 'i': { intmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[D2S_BUFSIZE]; GET_ARG_NUMERIC(val, len); s = d2s(val, (plus_plus ? '+' : (plus_space ? ' ' : '-')), buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'o': { uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[O2S_BUFSIZE]; GET_ARG_NUMERIC(val, len | 0x80); s = o2s(val, alt_form, buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'u': { uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[U2S_BUFSIZE]; GET_ARG_NUMERIC(val, len | 0x80); s = u2s(val, 10, false, buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'x': case 'X': { uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[X2S_BUFSIZE]; GET_ARG_NUMERIC(val, len | 0x80); s = x2s(val, alt_form, *f == 'X', buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'c': { unsigned char val; char buf[2]; assert(len == '?' || len == 'l'); assert_not_implemented(len != 'l'); val = va_arg(ap, int); buf[0] = val; buf[1] = '\0'; APPEND_PADDED_S(buf, 1, width, left_justify); f++; break; } case 's': assert(len == '?' || len == 'l'); assert_not_implemented(len != 'l'); s = va_arg(ap, char *); slen = (prec == -1) ? strlen(s) : prec; APPEND_PADDED_S(s, slen, width, left_justify); f++; break; case 'p': { uintmax_t val; char buf[X2S_BUFSIZE]; GET_ARG_NUMERIC(val, 'p'); s = x2s(val, true, false, buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } default: not_implemented(); } break; } default: { APPEND_C(*f); f++; break; }} } label_out: if (i < size) str[i] = '\0'; else str[size - 1] = '\0'; ret = i; #undef APPEND_C #undef APPEND_S #undef APPEND_PADDED_S #undef GET_ARG_NUMERIC return (ret); }
static int in_sync_sys(char *pathname, uint_t flags) { struct vnode *vp; int error; /* For debugging/testing */ if (inst_sync_disable) return (0); /* * We must have sufficient privilege to do this, since we lock critical * data structures whilst we're doing it .. */ if ((error = secpolicy_sys_devices(CRED())) != 0) return (set_errno(error)); if (flags != INST_SYNC_ALWAYS && flags != INST_SYNC_IF_REQUIRED) return (set_errno(EINVAL)); /* * Only one process is allowed to get the state of the instance * number assignments on the system at any given time. */ e_ddi_enter_instance(); /* * Recreate the instance file only if the device tree has changed * or if the caller explicitly requests so. */ if (e_ddi_instance_is_clean() && flags != INST_SYNC_ALWAYS) { error = EALREADY; goto end; } /* * Create an instance file for writing, giving it a mode that * will only permit reading. Note that we refuse to overwrite * an existing file. */ if ((error = vn_open(pathname, UIO_USERSPACE, FCREAT, 0444, &vp, CRCREAT, 0)) != 0) { if (error == EISDIR) error = EACCES; /* SVID compliance? */ goto end; } /* * So far so good. We're singly threaded, the vnode is beckoning * so let's get on with it. Any error, and we just give up and * hand the first error we get back to userland. */ error = in_write_instance(vp); /* * If there was any sort of error, we deliberately go and * remove the file we just created so that any attempts to * use it will quickly fail. */ if (error) (void) vn_remove(pathname, UIO_USERSPACE, RMFILE); else e_ddi_instance_set_clean(); end: e_ddi_exit_instance(); return (error ? set_errno(error) : 0); }
uintmax_t malloc_strtoumax(const char *nptr, char **endptr, int base) { uintmax_t ret, digit; int b; bool neg; const char *p, *ns; if (base < 0 || base == 1 || base > 36) { set_errno(EINVAL); return (UINTMAX_MAX); } b = base; /* Swallow leading whitespace and get sign, if any. */ neg = false; p = nptr; while (true) { switch (*p) { case '\t': case '\n': case '\v': case '\f': case '\r': case ' ': p++; break; case '-': neg = true; /* Fall through. */ case '+': p++; /* Fall through. */ default: goto label_prefix; } } /* Get prefix, if any. */ label_prefix: /* * Note where the first non-whitespace/sign character is so that it is * possible to tell whether any digits are consumed (e.g., " 0" vs. * " -x"). */ ns = p; if (*p == '0') { switch (p[1]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': if (b == 0) b = 8; if (b == 8) p++; break; case 'x': switch (p[2]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': if (b == 0) b = 16; if (b == 16) p += 2; break; default: break; } break; default: break; } } if (b == 0) b = 10; /* Convert. */ ret = 0; while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b) || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b) || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) { uintmax_t pret = ret; ret *= b; ret += digit; if (ret < pret) { /* Overflow. */ set_errno(ERANGE); return (UINTMAX_MAX); } p++; } if (neg) ret = -ret; if (endptr != NULL) { if (p == ns) { /* No characters were converted. */ *endptr = (char *)nptr; } else *endptr = (char *)p; } return (ret); }
int getsockname(int sockfd, FAR struct sockaddr *addr, FAR socklen_t *addrlen) { FAR struct socket *psock = sockfd_socket(sockfd); int ret; int errcode; /* Verify that the sockfd corresponds to valid, allocated socket */ if (!psock || psock->s_crefs <= 0) { errcode = EBADF; goto errout; } /* Some sanity checking... Shouldn't need this on a buckled up embedded * system (?) */ #ifdef CONFIG_DEBUG_FEATURES if (!addr || !addrlen) { errcode = EINVAL; goto errout; } #endif /* Handle by address domain */ switch (psock->s_domain) { #ifdef CONFIG_NET_IPv4 case PF_INET: ret = ipv4_getsockname(psock, addr, addrlen); break; #endif #ifdef CONFIG_NET_IPv6 case PF_INET6: ret = ipv6_getsockname(psock, addr, addrlen); break; #endif case PF_PACKET: default: errcode = EAFNOSUPPORT; goto errout; } /* Check for failure */ if (ret < 0) { errcode = -ret; goto errout; } return OK; errout: set_errno(errcode); return ERROR; }
int prctl(int option, ...) { va_list ap; int err; va_start(ap, option); switch (option) { case PR_SET_NAME: case PR_GET_NAME: #if CONFIG_TASK_NAME_SIZE > 0 { /* Get the prctl arguments */ char *name = va_arg(ap, char *); int pid = va_arg(ap, int); FAR _TCB *tcb; /* Get the TCB associated with the PID (handling the special case of * pid==0 meaning "this thread") */ if (!pid) { tcb = (FAR _TCB *)g_readytorun.head; } else { tcb = sched_gettcb(pid); } /* An invalid pid will be indicated by a NULL TCB returned from * sched_gettcb() */ if (!tcb) { sdbg("Pid does not correspond to a task: %d\n", pid); err = ESRCH; goto errout; } /* A pointer to the task name storage must also be provided */ if (!name) { sdbg("No name provide\n"); err = EFAULT; goto errout; } /* Now get or set the task name */ if (option == PR_SET_NAME) { /* tcb->name may not be null-terminated */ strncpy(tcb->name, name, CONFIG_TASK_NAME_SIZE); } else { /* The returned value will be null-terminated, truncating if necessary */ strncpy(name, tcb->name, CONFIG_TASK_NAME_SIZE-1); name[CONFIG_TASK_NAME_SIZE-1] = '\0'; } } break; #else sdbg("Option not enabled: %d\n", option); err = ENOSYS; goto errout; #endif default: sdbg("Unrecognized option: %d\n", option); err = EINVAL; goto errout; } va_end(ap); return OK; errout: va_end(ap); set_errno(err); return ERROR; }
/* * cladm(2) cluster administation system call. */ int cladm(int fac, int cmd, void *arg) { int error = 0; int copyout_bootflags; switch (fac) { case CL_INITIALIZE: if (cmd != CL_GET_BOOTFLAG) { error = EINVAL; break; } /* * The CLUSTER_INSTALLING and CLUSTER_DCS_ENABLED bootflags are * internal flags. We do not want to expose these to the user * level. */ copyout_bootflags = (cluster_bootflags & ~(CLUSTER_INSTALLING | CLUSTER_DCS_ENABLED)); if (copyout(©out_bootflags, arg, sizeof (int))) { error = EFAULT; } break; case CL_CONFIG: /* * We handle CL_NODEID here so that the node number * can be returned if the system is configured as part * of a cluster but not booted as part of the cluster. */ if (cmd == CL_NODEID) { nodeid_t nid; /* return error if not configured as a cluster */ if (!(cluster_bootflags & CLUSTER_CONFIGURED)) { error = ENOSYS; break; } nid = clconf_get_nodeid(); error = copyout(&nid, arg, sizeof (nid)); break; } /* FALLTHROUGH */ default: if ((cluster_bootflags & (CLUSTER_CONFIGURED|CLUSTER_BOOTED)) != (CLUSTER_CONFIGURED|CLUSTER_BOOTED)) { error = EINVAL; break; } error = cladmin(fac, cmd, arg); /* * error will be -1 if the cladm module cannot be loaded; * otherwise, it is the errno value returned * (see {i86,sparc}/ml/modstubs.s). */ if (error < 0) error = ENOSYS; break; } return (error ? set_errno(error) : 0); }
ssize_t lib_fread(FAR void *ptr, size_t count, FAR FILE *stream) { unsigned char *dest = (unsigned char*)ptr; ssize_t bytes_read; #if CONFIG_STDIO_BUFFER_SIZE > 0 int ret; #endif /* Make sure that reading from this stream is allowed */ if (!stream || (stream->fs_oflags & O_RDOK) == 0) { set_errno(EBADF); bytes_read = -1; } else { /* The stream must be stable until we complete the read */ lib_take_semaphore(stream); #if CONFIG_NUNGET_CHARS > 0 /* First, re-read any previously ungotten characters */ while ((stream->fs_nungotten > 0) && (count > 0)) { /* Decrement the count of ungotten bytes to get an index */ stream->fs_nungotten--; /* Return the last ungotten byte */ *dest++ = stream->fs_ungotten[stream->fs_nungotten]; /* That's one less byte that we have to read */ count--; } #endif #if CONFIG_STDIO_BUFFER_SIZE > 0 /* If the buffer is currently being used for write access, then * flush all of the buffered write data. We do not support concurrent * buffered read/write access. */ ret = lib_wrflush(stream); if (ret < 0) { lib_give_semaphore(stream); return ret; } /* Now get any other needed chars from the buffer or the file. */ while (count > 0) { /* Is there readable data in the buffer? */ while ((count > 0) && (stream->fs_bufpos < stream->fs_bufread)) { /* Yes, copy a byte into the user buffer */ *dest++ = *stream->fs_bufpos++; count--; } /* The buffer is empty OR we have already supplied the number of * bytes requested in the read. Check if we need to read * more from the file. */ if (count > 0) { size_t buffer_available; /* We need to read more data into the buffer from the file */ /* Mark the buffer empty */ stream->fs_bufpos = stream->fs_bufread = stream->fs_bufstart; /* How much space is available in the buffer? */ buffer_available = stream->fs_bufend - stream->fs_bufread; /* Will the number of bytes that we need to read fit into * the buffer space that is available? If the read size is * larger than the buffer, then read some of the data * directly into the user's buffer. */ if (count > buffer_available) { bytes_read = read(stream->fs_fd, dest, count); if (bytes_read < 0) { /* An error occurred on the read. The error code is * in the 'errno' variable. */ goto errout_with_errno; } else if (bytes_read == 0) { /* We are at the end of the file. But we may already * have buffered data. In that case, we will report * the EOF indication later. */ goto shortread; } else { /* Some bytes were read. Adjust the dest pointer */ dest += bytes_read; /* Were all of the requested bytes read? */ if ((size_t)bytes_read < count) { /* No. We must be at the end of file. */ goto shortread; } else { /* Yes. We are done. */ count = 0; } } } else { /* The number of bytes required to satisfy the read * is less than or equal to the size of the buffer * space that we have left. Read as much as we can * into the buffer. */ bytes_read = read(stream->fs_fd, stream->fs_bufread, buffer_available); if (bytes_read < 0) { /* An error occurred on the read. The error code is * in the 'errno' variable. */ goto errout_with_errno; } else if (bytes_read == 0) { /* We are at the end of the file. But we may already * have buffered data. In that case, we will report * the EOF indication later. */ goto shortread; } else { /* Some bytes were read */ stream->fs_bufread += bytes_read; } } } } #else /* Now get any other needed chars from the file. */ while (count > 0) { bytes_read = read(stream->fs_fd, dest, count); if (bytes_read < 0) { /* An error occurred on the read. The error code is * in the 'errno' variable. */ goto errout_with_errno; } else if (bytes_read == 0) { /* We are at the end of the file. But we may already * have buffered data. In that case, we will report * the EOF indication later. */ break; } else { dest += bytes_read; count -= bytes_read; } } #endif /* Here after a successful (but perhaps short) read */ #if CONFIG_STDIO_BUFFER_SIZE > 0 shortread: #endif bytes_read = dest - (unsigned char*)ptr; /* Set or clear the EOF indicator. If we get here because of a * short read and the total number of* bytes read is zero, then * we must be at the end-of-file. */ if (bytes_read > 0) { stream->fs_flags &= ~__FS_FLAG_EOF; } else { stream->fs_flags |= __FS_FLAG_EOF; } lib_give_semaphore(stream); } return bytes_read; /* Error exits */ errout_with_errno: stream->fs_flags |= __FS_FLAG_ERROR; lib_give_semaphore(stream); return -get_errno(); }