int losetup(FAR const char *devname, FAR const char *filename, uint16_t sectsize, off_t offset, bool readonly) { FAR struct loop_struct_s *dev; struct stat sb; int ret; /* Sanity check */ #ifdef CONFIG_DEBUG if (!devname || !filename || !sectsize) { return -EINVAL; } #endif /* Get the size of the file */ ret = stat(filename, &sb); if (ret < 0) { dbg("Failed to stat %s: %d\n", filename, get_errno()); return -get_errno(); } /* Check if the file system is big enough for one block */ if (sb.st_size - offset < sectsize) { dbg("File is too small for blocksize\n"); return -ERANGE; } /* Allocate a loop device structure */ dev = (FAR struct loop_struct_s *)kmm_zalloc(sizeof(struct loop_struct_s)); if (!dev) { return -ENOMEM; } /* Initialize the loop device structure. */ sem_init(&dev->sem, 0, 1); dev->nsectors = (sb.st_size - offset) / sectsize; dev->sectsize = sectsize; dev->offset = offset; /* Open the file. */ #ifdef CONFIG_FS_WRITABLE dev->writeenabled = false; /* Assume failure */ dev->fd = -1; /* First try to open the device R/W access (unless we are asked * to open it readonly). */ if (!readonly) { dev->fd = open(filename, O_RDWR); } if (dev->fd >= 0) { dev->writeenabled = true; /* Success */ } else #endif { /* If that fails, then try to open the device read-only */ dev->fd = open(filename, O_RDWR); if (dev->fd < 0) { dbg("Failed to open %s: %d\n", filename, get_errno()); ret = -get_errno(); goto errout_with_dev; } } /* Inode private data will be reference to the loop device structure */ ret = register_blockdriver(devname, &g_bops, 0, dev); if (ret < 0) { fdbg("register_blockdriver failed: %d\n", -ret); goto errout_with_fd; } return OK; errout_with_fd: close(dev->fd); errout_with_dev: kmm_free(dev); return ret; }
int ads7843e_register(FAR struct spi_dev_s *spi, FAR struct ads7843e_config_s *config, int minor) { FAR struct ads7843e_dev_s *priv; char devname[DEV_NAMELEN]; #ifdef CONFIG_ADS7843E_MULTIPLE irqstate_t flags; #endif int ret; iinfo("spi: %p minor: %d\n", spi, minor); /* Debug-only sanity checks */ DEBUGASSERT(spi != NULL && config != NULL && minor >= 0 && minor < 100); /* Create and initialize a ADS7843E device driver instance */ #ifndef CONFIG_ADS7843E_MULTIPLE priv = &g_ads7843e; #else priv = (FAR struct ads7843e_dev_s *)kmm_malloc(sizeof(struct ads7843e_dev_s)); if (!priv) { ierr("ERROR: kmm_malloc(%d) failed\n", sizeof(struct ads7843e_dev_s)); return -ENOMEM; } #endif /* Initialize the ADS7843E device driver instance */ memset(priv, 0, sizeof(struct ads7843e_dev_s)); priv->spi = spi; /* Save the SPI device handle */ priv->config = config; /* Save the board configuration */ priv->wdog = wd_create(); /* Create a watchdog timer */ priv->threshx = INVALID_THRESHOLD; /* Initialize thresholding logic */ priv->threshy = INVALID_THRESHOLD; /* Initialize thresholding logic */ /* Initialize semaphores */ sem_init(&priv->devsem, 0, 1); /* Initialize device structure semaphore */ sem_init(&priv->waitsem, 0, 0); /* Initialize pen event wait semaphore */ /* The pen event semaphore is used for signaling and, hence, should not * have priority inheritance enabled. */ sem_setprotocol(&priv->waitsem, SEM_PRIO_NONE); /* Make sure that interrupts are disabled */ config->clear(config); config->enable(config, false); /* Attach the interrupt handler */ ret = config->attach(config, ads7843e_interrupt); if (ret < 0) { ierr("ERROR: Failed to attach interrupt\n"); goto errout_with_priv; } iinfo("Mode: %d Bits: 8 Frequency: %d\n", CONFIG_ADS7843E_SPIMODE, CONFIG_ADS7843E_FREQUENCY); /* Lock the SPI bus so that we have exclusive access */ ads7843e_lock(spi); /* Enable the PEN IRQ */ ads7843e_sendcmd(priv, ADS7843_CMD_ENABPENIRQ); /* Unlock the bus */ ads7843e_unlock(spi); /* Register the device as an input device */ (void)snprintf(devname, DEV_NAMELEN, DEV_FORMAT, minor); iinfo("Registering %s\n", devname); ret = register_driver(devname, &ads7843e_fops, 0666, priv); if (ret < 0) { ierr("ERROR: register_driver() failed: %d\n", ret); goto errout_with_priv; } /* If multiple ADS7843E devices are supported, then we will need to add * this new instance to a list of device instances so that it can be * found by the interrupt handler based on the received IRQ number. */ #ifdef CONFIG_ADS7843E_MULTIPLE priv->flink = g_ads7843elist; g_ads7843elist = priv; leave_critical_section(flags); #endif /* Schedule work to perform the initial sampling and to set the data * availability conditions. */ ret = work_queue(HPWORK, &priv->work, ads7843e_worker, priv, 0); if (ret != 0) { ierr("ERROR: Failed to queue work: %d\n", ret); goto errout_with_priv; } /* And return success (?) */ return OK; errout_with_priv: sem_destroy(&priv->devsem); #ifdef CONFIG_ADS7843E_MULTIPLE kmm_free(priv); #endif return ret; }
static int ajoy_close(FAR struct file *filep) { FAR struct inode *inode; FAR struct ajoy_upperhalf_s *priv; FAR struct ajoy_open_s *opriv; FAR struct ajoy_open_s *curr; FAR struct ajoy_open_s *prev; irqstate_t flags; bool closing; int ret; DEBUGASSERT(filep && filep->f_priv && filep->f_inode); opriv = filep->f_priv; inode = filep->f_inode; DEBUGASSERT(inode->i_private); priv = (FAR struct ajoy_upperhalf_s *)inode->i_private; /* Handle an improbable race conditions with the following atomic test * and set. * * This is actually a pretty feeble attempt to handle this. The * improbable race condition occurs if two different threads try to * close the joystick driver at the same time. The rule: don't do * that! It is feeble because we do not really enforce stale pointer * detection anyway. */ flags = irqsave(); closing = opriv->ao_closing; opriv->ao_closing = true; irqrestore(flags); if (closing) { /* Another thread is doing the close */ return OK; } /* Get exclusive access to the driver structure */ ret = ajoy_takesem(&priv->au_exclsem); if (ret < 0) { ivdbg("ERROR: ajoy_takesem failed: %d\n", ret); return ret; } /* Find the open structure in the list of open structures for the device */ for (prev = NULL, curr = priv->au_open; curr && curr != opriv; prev = curr, curr = curr->ao_flink); DEBUGASSERT(curr); if (!curr) { ivdbg("ERROR: Failed to find open entry\n"); ret = -ENOENT; goto errout_with_exclsem; } /* Remove the structure from the device */ if (prev) { prev->ao_flink = opriv->ao_flink; } else { priv->au_open = opriv->ao_flink; } /* And free the open structure */ kmm_free(opriv); /* Enable/disable interrupt handling */ ajoy_enable(priv); ret = OK; errout_with_exclsem: ajoy_givesem(&priv->au_exclsem); return ret; }
int group_initialize(FAR struct task_tcb_s *tcb) { FAR struct task_group_s *group; #if defined(HAVE_GROUP_MEMBERS) || defined(CONFIG_ARCH_ADDRENV) irqstate_t flags; #endif DEBUGASSERT(tcb && tcb->cmn.group); group = tcb->cmn.group; #ifdef HAVE_GROUP_MEMBERS /* Allocate space to hold GROUP_INITIAL_MEMBERS members of the group */ group->tg_members = (FAR pid_t *)kmm_malloc(GROUP_INITIAL_MEMBERS * sizeof(pid_t)); if (!group->tg_members) { kmm_free(group); return -ENOMEM; } /* Assign the PID of this new task as a member of the group. */ group->tg_members[0] = tcb->cmn.pid; /* Initialize the non-zero elements of group structure and assign it to * the tcb. */ group->tg_mxmembers = GROUP_INITIAL_MEMBERS; /* Number of members in allocation */ #endif #if defined(HAVE_GROUP_MEMBERS) || defined(CONFIG_ARCH_ADDRENV) /* Add the initialized entry to the list of groups */ flags = irqsave(); group->flink = g_grouphead; g_grouphead = group; irqrestore(flags); #endif /* Save the ID of the main task within the group of threads. This needed * for things like SIGCHILD. It ID is also saved in the TCB of the main * task but is also retained in the group which may persist after the main * task has exited. */ #if !defined(CONFIG_DISABLE_PTHREAD) && defined(CONFIG_SCHED_HAVE_PARENT) group->tg_task = tcb->cmn.pid; #endif #if defined(CONFIG_SCHED_ATEXIT) && !defined(CONFIG_SCHED_ONEXIT) /* atexit support *********************************************************** */ sq_init(&(group->tg_atexitfunc)); #endif #ifdef CONFIG_SCHED_ONEXIT /* on_exit support ********************************************************** */ sq_init(&(group->tg_onexitfunc)); #endif /* Mark that there is one member in the group, the main task */ group->tg_nmembers = 1; return OK; }
int pty_register(int minor) { FAR struct pty_devpair_s *devpair; int pipe_a[2]; int pipe_b[2]; char devname[16]; int ret; /* Allocate a device instance */ devpair = kmm_zalloc(sizeof(struct pty_devpair_s)); if (devpair == NULL) { return -ENOMEM; } sem_init(&devpair->pp_slavesem, 0, 0); sem_init(&devpair->pp_exclsem, 0, 1); #ifndef CONFIG_DISABLE_PSEUDOFS_OPERATIONS devpair->pp_minor = minor; #endif devpair->pp_locked = true; devpair->pp_master.pd_devpair = devpair; devpair->pp_master.pd_master = true; devpair->pp_slave.pd_devpair = devpair; /* Create two pipes */ ret = pipe(pipe_a); if (ret < 0) { goto errout_with_devpair; } ret = pipe(pipe_b); if (ret < 0) { goto errout_with_pipea; } /* Detach the pipe file descriptors (closing them in the process) * * fd[0] is for reading; * fd[1] is for writing. */ ret = file_detach(pipe_a[0], &devpair->pp_master.pd_src); if (ret < 0) { goto errout_with_pipeb; } pipe_a[0] = -1; ret = file_detach(pipe_a[1], &devpair->pp_slave.pd_sink); if (ret < 0) { goto errout_with_pipeb; } pipe_a[1] = -1; ret = file_detach(pipe_b[0], &devpair->pp_slave.pd_src); if (ret < 0) { goto errout_with_pipeb; } pipe_b[0] = -1; ret = file_detach(pipe_b[1], &devpair->pp_master.pd_sink); if (ret < 0) { goto errout_with_pipeb; } pipe_b[1] = -1; /* Register the slave device * * BSD style (deprecated): /dev/ttypN * SUSv1 style: /dev/pts/N * * Where N is the minor number */ #ifdef CONFIG_PSEUDOTERM_BSD snprintf(devname, 16, "/dev/ttyp%d", minor); #else snprintf(devname, 16, "/dev/pts/%d", minor); #endif ret = register_driver(devname, &pty_fops, 0666, &devpair->pp_slave); if (ret < 0) { goto errout_with_pipeb; } /* Register the master device * * BSD style (deprecated): /dev/ptyN * SUSv1 style: Master: /dev/ptmx (multiplexor, see ptmx.c) * * Where N is the minor number */ snprintf(devname, 16, "/dev/pty%d", minor); ret = register_driver(devname, &pty_fops, 0666, &devpair->pp_master); if (ret < 0) { goto errout_with_slave; } return OK; errout_with_slave: #ifdef CONFIG_PSEUDOTERM_BSD snprintf(devname, 16, "/dev/ttyp%d", minor); #else snprintf(devname, 16, "/dev/pts/%d", minor); #endif (void)unregister_driver(devname); errout_with_pipeb: if (pipe_b[0] >= 0) { close(pipe_b[0]); } else { (void)file_close_detached(&devpair->pp_master.pd_src); } if (pipe_b[1] >= 0) { close(pipe_b[1]); } else { (void)file_close_detached(&devpair->pp_slave.pd_sink); } errout_with_pipea: if (pipe_a[0] >= 0) { close(pipe_a[0]); } else { (void)file_close_detached(&devpair->pp_slave.pd_src); } if (pipe_a[1] >= 0) { close(pipe_a[1]); } else { (void)file_close_detached(&devpair->pp_master.pd_sink); } errout_with_devpair: sem_destroy(&devpair->pp_exclsem); sem_destroy(&devpair->pp_slavesem); kmm_free(devpair); return ret; }
FAR char *exepath_next(EXEPATH_HANDLE handle, FAR const char *relpath) { FAR struct exepath_s *exepath = (FAR struct exepath_s *)handle; struct stat buf; FAR char *endptr; FAR char *path; FAR char *fullpath; int pathlen; int ret; /* Verify that a value handle and relative path were provided */ DEBUGASSERT(exepath && relpath); DEBUGASSERT(relpath[0] != '\0' && relpath[0] != '/'); /* Loop until (1) we find a file with this relative path from one of the * absolute paths in the PATH variable, or (2) all of the absolute paths * in the PATH variable have been considered. */ for (;;) { /* Make sure that exepath->next points to the beginning of a string */ path = exepath->next; if (*path == '\0') { /* If it points to a NULL it means that either (1) the PATH varialbe * is empty, or (2) we have already examined all of the paths in the * path variable. */ return (FAR char *)NULL; } /* Okay... 'path' points to the beginning of the string. The string may * be termined either with (1) ':' which separates the path from the * next path in the list, or (2) NUL which marks the end of the list. */ endptr = strchr(path, ':'); if (!endptr) { /* If strchr returns NUL it means that ':' does not appear in the * string. Therefore, this must be the final path in the PATH * variable content. */ endptr = &path[strlen(path)]; exepath->next = endptr; DEBUGASSERT(*endptr == '\0'); } else { DEBUGASSERT(*endptr == ':'); exepath->next = endptr + 1; *endptr = '\0'; } pathlen = strlen(path) + strlen(relpath) + 2; fullpath = (FAR char *)kmm_malloc(pathlen); if (!fullpath) { /* Failed to allocate memory */ return (FAR char *)NULL; } /* Construct the full path */ sprintf(fullpath, "%s/%s", path, relpath); /* Verify that a regular file exists at this path */ ret = stat(fullpath, &buf);; if (ret == OK && S_ISREG(buf.st_mode)) { return fullpath; } /* Failed to stat the file. Just free the allocated memory and * continue to try the next path. */ kmm_free(fullpath); } /* We will not get here */ }
FAR void *composite_initialize(void) { FAR struct composite_alloc_s *alloc; FAR struct composite_dev_s *priv; FAR struct composite_driver_s *drvr; int ret; /* Allocate the structures needed */ alloc = (FAR struct composite_alloc_s *)kmm_malloc(sizeof(struct composite_alloc_s)); if (!alloc) { usbtrace(TRACE_CLSERROR(USBCOMPOSITE_TRACEERR_ALLOCDEVSTRUCT), 0); return NULL; } /* Convenience pointers into the allocated blob */ priv = &alloc->dev; drvr = &alloc->drvr; /* Initialize the USB composite driver structure */ memset(priv, 0, sizeof(struct composite_dev_s)); /* Get the constitueat class driver objects */ ret = DEV1_CLASSOBJECT(&priv->dev1); if (ret < 0) { usbtrace(TRACE_CLSERROR(USBCOMPOSITE_TRACEERR_CLASSOBJECT), (uint16_t)-ret); goto errout_with_alloc; } ret = DEV2_CLASSOBJECT(&priv->dev2); if (ret < 0) { usbtrace(TRACE_CLSERROR(USBCOMPOSITE_TRACEERR_CLASSOBJECT), (uint16_t)-ret); goto errout_with_alloc; } /* Initialize the USB class driver structure */ #ifdef CONFIG_USBDEV_DUALSPEED drvr->drvr.speed = USB_SPEED_HIGH; #else drvr->drvr.speed = USB_SPEED_FULL; #endif drvr->drvr.ops = &g_driverops; drvr->dev = priv; /* Register the USB composite class driver */ ret = usbdev_register(&drvr->drvr); if (ret) { usbtrace(TRACE_CLSERROR(USBCOMPOSITE_TRACEERR_DEVREGISTER), (uint16_t)-ret); goto errout_with_alloc; } return (FAR void *)alloc; errout_with_alloc: kmm_free(alloc); return NULL; }
int select(int nfds, FAR fd_set *readfds, FAR fd_set *writefds, FAR fd_set *exceptfds, FAR struct timeval *timeout) { struct pollfd *pollset; int errcode = OK; int fd; int npfds; int msec; int ndx; int ret; /* select() is cancellation point */ (void)enter_cancellation_point(); /* How many pollfd structures do we need to allocate? */ /* Initialize the descriptor list for poll() */ for (fd = 0, npfds = 0; fd < nfds; fd++) { /* Check if any monitor operation is requested on this fd */ if ((readfds && FD_ISSET(fd, readfds)) || (writefds && FD_ISSET(fd, writefds)) || (exceptfds && FD_ISSET(fd, exceptfds))) { /* Yes.. increment the count of pollfds structures needed */ npfds++; } } /* Allocate the descriptor list for poll() */ pollset = (struct pollfd *)kmm_zalloc(npfds * sizeof(struct pollfd)); if (!pollset) { set_errno(ENOMEM); leave_cancellation_point(); return ERROR; } /* Initialize the descriptor list for poll() */ for (fd = 0, ndx = 0; fd < nfds; fd++) { int incr = 0; /* The readfs set holds the set of FDs that the caller can be assured * of reading from without blocking. Note that POLLHUP is included as * a read-able condition. POLLHUP will be reported at the end-of-file * or when a connection is lost. In either case, the read() can then * be performed without blocking. */ if (readfds && FD_ISSET(fd, readfds)) { pollset[ndx].fd = fd; pollset[ndx].events |= POLLIN; incr = 1; } /* The writefds set holds the set of FDs that the caller can be assured * of writing to without blocking. */ if (writefds && FD_ISSET(fd, writefds)) { pollset[ndx].fd = fd; pollset[ndx].events |= POLLOUT; incr = 1; } /* The exceptfds set holds the set of FDs that are watched for exceptions */ if (exceptfds && FD_ISSET(fd, exceptfds)) { pollset[ndx].fd = fd; incr = 1; } ndx += incr; } DEBUGASSERT(ndx == npfds); /* Convert the timeout to milliseconds */ if (timeout) { /* Calculate the timeout in milliseconds */ msec = timeout->tv_sec * 1000 + timeout->tv_usec / 1000; } else { /* Any negative value of msec means no timeout */ msec = -1; } /* Then let poll do all of the real work. */ ret = poll(pollset, npfds, msec); if (ret < 0) { /* poll() failed! Save the errno value */ errcode = get_errno(); } /* Now set up the return values */ if (readfds) { memset(readfds, 0, sizeof(fd_set)); } if (writefds) { memset(writefds, 0, sizeof(fd_set)); } if (exceptfds) { memset(exceptfds, 0, sizeof(fd_set)); } /* Convert the poll descriptor list back into selects 3 bitsets */ if (ret > 0) { ret = 0; for (ndx = 0; ndx < npfds; ndx++) { /* Check for read conditions. Note that POLLHUP is included as a * read condition. POLLHUP will be reported when no more data will * be available (such as when a connection is lost). In either * case, the read() can then be performed without blocking. */ if (readfds) { if (pollset[ndx].revents & (POLLIN | POLLHUP)) { FD_SET(pollset[ndx].fd, readfds); ret++; } } /* Check for write conditions */ if (writefds) { if (pollset[ndx].revents & POLLOUT) { FD_SET(pollset[ndx].fd, writefds); ret++; } } /* Check for exceptions */ if (exceptfds) { if (pollset[ndx].revents & POLLERR) { FD_SET(pollset[ndx].fd, exceptfds); ret++; } } } } kmm_free(pollset); /* Did poll() fail above? */ if (ret < 0) { /* Yes.. restore the errno value */ set_errno(errcode); } leave_cancellation_point(); return ret; }
int max11802_register(FAR struct spi_dev_s *spi, FAR struct max11802_config_s *config, int minor) { FAR struct max11802_dev_s *priv; char devname[DEV_NAMELEN]; #ifdef CONFIG_MAX11802_MULTIPLE irqstate_t flags; #endif int ret; iinfo("spi: %p minor: %d\n", spi, minor); /* Debug-only sanity checks */ DEBUGASSERT(spi != NULL && config != NULL && minor >= 0 && minor < 100); /* Create and initialize a MAX11802 device driver instance */ #ifndef CONFIG_MAX11802_MULTIPLE priv = &g_max11802; #else priv = (FAR struct max11802_dev_s *)kmm_malloc(sizeof(struct max11802_dev_s)); if (!priv) { ierr("ERROR: kmm_malloc(%d) failed\n", sizeof(struct max11802_dev_s)); return -ENOMEM; } #endif /* Initialize the MAX11802 device driver instance */ memset(priv, 0, sizeof(struct max11802_dev_s)); priv->spi = spi; /* Save the SPI device handle */ priv->config = config; /* Save the board configuration */ priv->wdog = wd_create(); /* Create a watchdog timer */ priv->threshx = INVALID_THRESHOLD; /* Initialize thresholding logic */ priv->threshy = INVALID_THRESHOLD; /* Initialize thresholding logic */ sem_init(&priv->devsem, 0, 1); /* Initialize device structure semaphore */ sem_init(&priv->waitsem, 0, 0); /* Initialize pen event wait semaphore */ /* Make sure that interrupts are disabled */ config->clear(config); config->enable(config, false); /* Attach the interrupt handler */ ret = config->attach(config, max11802_interrupt); if (ret < 0) { ierr("ERROR: Failed to attach interrupt\n"); goto errout_with_priv; } iinfo("Mode: %d Bits: 8 Frequency: %d\n", CONFIG_MAX11802_SPIMODE, CONFIG_MAX11802_FREQUENCY); /* Lock the SPI bus so that we have exclusive access */ max11802_lock(spi); /* Configure MAX11802 registers */ SPI_SELECT(priv->spi, SPIDEV_TOUCHSCREEN, true); (void)SPI_SEND(priv->spi, MAX11802_CMD_MODE_WR); (void)SPI_SEND(priv->spi, MAX11802_MODE); SPI_SELECT(priv->spi, SPIDEV_TOUCHSCREEN, false); SPI_SELECT(priv->spi, SPIDEV_TOUCHSCREEN, true); (void)SPI_SEND(priv->spi, MAX11802_CMD_AVG_WR); (void)SPI_SEND(priv->spi, MAX11802_AVG); SPI_SELECT(priv->spi, SPIDEV_TOUCHSCREEN, false); SPI_SELECT(priv->spi, SPIDEV_TOUCHSCREEN, true); (void)SPI_SEND(priv->spi, MAX11802_CMD_TIMING_WR); (void)SPI_SEND(priv->spi, MAX11802_TIMING); SPI_SELECT(priv->spi, SPIDEV_TOUCHSCREEN, false); SPI_SELECT(priv->spi, SPIDEV_TOUCHSCREEN, true); (void)SPI_SEND(priv->spi, MAX11802_CMD_DELAY_WR); (void)SPI_SEND(priv->spi, MAX11802_DELAY); SPI_SELECT(priv->spi, SPIDEV_TOUCHSCREEN, false); /* Test that the device access was successful. */ SPI_SELECT(priv->spi, SPIDEV_TOUCHSCREEN, true); (void)SPI_SEND(priv->spi, MAX11802_CMD_MODE_RD); ret = SPI_SEND(priv->spi, 0); SPI_SELECT(priv->spi, SPIDEV_TOUCHSCREEN, false); /* Unlock the bus */ max11802_unlock(spi); if (ret != MAX11802_MODE) { ierr("ERROR: max11802 mode readback failed: %02x\n", ret); goto errout_with_priv; } /* Register the device as an input device */ (void)snprintf(devname, DEV_NAMELEN, DEV_FORMAT, minor); iinfo("Registering %s\n", devname); ret = register_driver(devname, &max11802_fops, 0666, priv); if (ret < 0) { ierr("ERROR: register_driver() failed: %d\n", ret); goto errout_with_priv; } /* If multiple MAX11802 devices are supported, then we will need to add * this new instance to a list of device instances so that it can be * found by the interrupt handler based on the recieved IRQ number. */ #ifdef CONFIG_MAX11802_MULTIPLE flags = enter_critical_section(); priv->flink = g_max11802list; g_max11802list = priv; leave_critical_section(flags); #endif /* Schedule work to perform the initial sampling and to set the data * availability conditions. */ ret = work_queue(HPWORK, &priv->work, max11802_worker, priv, 0); if (ret != 0) { ierr("ERROR: Failed to queue work: %d\n", ret); goto errout_with_priv; } /* And return success (?) */ return OK; errout_with_priv: sem_destroy(&priv->devsem); #ifdef CONFIG_MAX11802_MULTIPLE kmm_free(priv); #endif return ret; }
static off_t mtdconfig_consolidate(FAR struct mtdconfig_struct_s *dev) { off_t src_block, dst_block; off_t src_offset, dst_offset; uint16_t blkper, x, bytes, bytes_left_in_block; struct mtdconfig_header_s hdr; int ret; uint8_t sig[CONFIGDATA_BLOCK_HDR_SIZE]; uint8_t *pBuf; /* Prepare to copy block 0 to the last block (erase blocks) */ src_block = 0; dst_block = dev->neraseblocks - 1; /* Ensure the last block is erased */ MTD_ERASE(dev->mtd, dst_block, 1); blkper = dev->erasesize / dev->blocksize; dst_block *= blkper; /* Convert to read/write blocks */ /* Allocate a small buffer for moving data */ pBuf = (uint8_t *)kmm_malloc(dev->blocksize); if (pBuf == NULL) { return 0; } /* Now copy block zero to last block */ for (x = 0; x < blkper; x++) { ret = MTD_BREAD(dev->mtd, src_block++, 1, dev->buffer); if (ret < 0) { /* I/O Error! */ goto errout; } ret = MTD_BWRITE(dev->mtd, dst_block++, 1, dev->buffer); if (ret < 0) { /* I/O Error! */ goto errout; } } /* Erase block zero and write a format signature. */ MTD_ERASE(dev->mtd, 0, 1); sig[0] = 'C'; sig[1] = 'D'; sig[2] = CONFIGDATA_FORMAT_VERSION; ret = mtdconfig_writebytes(dev, 0, sig, sizeof(sig)); if (ret != sizeof(sig)) { /* Cannot write even the signature. */ ret = -EIO; goto errout; } /* Now consolidate entries. */ src_block = 1; dst_block = 0; src_offset = src_block * dev->erasesize + CONFIGDATA_BLOCK_HDR_SIZE; dst_offset = CONFIGDATA_BLOCK_HDR_SIZE; while (src_block < dev->neraseblocks) { /* Scan all headers and move them to the src_offset */ retry_relocate: bytes = MTD_READ(dev->mtd, src_offset, sizeof(hdr), (uint8_t *) &hdr); if (bytes != sizeof(hdr)) { /* I/O Error! */ ret = -EIO; goto errout; } if (hdr.flags == MTD_ERASED_FLAGS) { /* Test if the source entry is active or if we are at the end * of data for this erase block. */ if (hdr.id == MTD_ERASED_ID) { /* No more data in this erase block. Advance to the * next one. */ src_offset = (src_block + 1) * dev->erasesize + CONFIGDATA_BLOCK_HDR_SIZE; } else { /* Test if this entry will fit in the current destination block */ bytes_left_in_block = (dst_block + 1) * dev->erasesize - dst_offset; if (hdr.len + sizeof(hdr) > bytes_left_in_block) { /* Item doesn't fit in the block. Advance to the next one */ /* Update control variables */ dst_block++; dst_offset = dst_block * dev->erasesize + CONFIGDATA_BLOCK_HDR_SIZE; DEBUGASSERT(dst_block != src_block); /* Retry the relocate */ goto retry_relocate; } /* Copy this entry to the destination */ ret = mtdconfig_writebytes(dev, dst_offset, (uint8_t *) &hdr, sizeof(hdr)); if (ret != sizeof(hdr)) { /* I/O Error! */ ret = -EIO; goto errout; } src_offset += sizeof(hdr); dst_offset += sizeof(hdr); /* Now copy the data */ while (hdr.len) { bytes = hdr.len; if (bytes > dev->blocksize) { bytes = dev->blocksize; } /* Move the data. */ ret = mtdconfig_readbytes(dev, src_offset, pBuf, bytes); if (ret != OK) { /* I/O Error! */ ret = -EIO; goto errout; } ret = mtdconfig_writebytes(dev, dst_offset, pBuf, bytes); if (ret != bytes) { /* I/O Error! */ ret = -EIO; goto errout; } /* Update control variables */ hdr.len -= bytes; src_offset += bytes; dst_offset += bytes; } } } else { /* This item has been released. Skip it! */ src_offset += sizeof(hdr) + hdr.len; if (src_offset + sizeof(hdr) >= (src_block + 1) * dev->erasesize || src_offset == (src_block + 1) * dev->erasesize) { /* No room left at end of source block */ src_offset = (src_block + 1) * dev->erasesize + CONFIGDATA_BLOCK_HDR_SIZE; } } /* Test if we are out of space in the src block */ if (src_offset + sizeof(hdr) >= (src_block + 1) * dev->erasesize) { /* No room at end of src block for another header. Go to next * source block. */ src_offset = (src_block + 1) * dev->erasesize + CONFIGDATA_BLOCK_HDR_SIZE; } /* Test if we advanced to the next block. If we did, then erase the * old block. */ if (src_block != src_offset / dev->erasesize) { /* Erase the block ... we have emptied it */ MTD_ERASE(dev->mtd, src_block, 1); src_block++; } /* Test if we are out of space in the dst block */ if (dst_offset + sizeof(hdr) >= (dst_block + 1) * dev->erasesize) { /* No room at end of dst block for another header. Go to next block. */ dst_block++; dst_offset = dst_block * dev->erasesize + CONFIGDATA_BLOCK_HDR_SIZE; DEBUGASSERT(dst_block != src_block); } } kmm_free(pBuf); return dst_offset; errout: kmm_free(pBuf); ferr("ERROR: fail consolidate: %d\n", ret); return 0; }
int pty_register(int minor) { FAR struct pty_devpair_s *devpair; int pipe_a[2]; int pipe_b[2]; char devname[16]; int ret; /* Allocate a device instance */ devpair = kmm_zalloc(sizeof(struct pty_devpair_s)); if (devpair == NULL) { return -ENOMEM; } /* Initialize semaphores */ sem_init(&devpair->pp_slavesem, 0, 0); sem_init(&devpair->pp_exclsem, 0, 1); /* The pp_slavesem semaphore is used for signaling and, hence, should not * have priority inheritance enabled. */ sem_setprotocol(&devpair->pp_slavesem, SEM_PRIO_NONE); #ifndef CONFIG_DISABLE_PSEUDOFS_OPERATIONS devpair->pp_minor = minor; #endif devpair->pp_locked = true; devpair->pp_master.pd_devpair = devpair; devpair->pp_master.pd_master = true; devpair->pp_slave.pd_devpair = devpair; /* Create two pipes: * * pipe_a: Master source, slave sink (TX, slave-to-master) * pipe_b: Master sink, slave source (RX, master-to-slave) */ ret = pipe2(pipe_a, CONFIG_PSEUDOTERM_TXBUFSIZE); if (ret < 0) { goto errout_with_devpair; } ret = pipe2(pipe_b, CONFIG_PSEUDOTERM_RXBUFSIZE); if (ret < 0) { goto errout_with_pipea; } /* Detach the pipe file descriptors (closing them in the process) * * fd[0] is for reading; * fd[1] is for writing. */ ret = file_detach(pipe_a[0], &devpair->pp_master.pd_src); if (ret < 0) { goto errout_with_pipeb; } pipe_a[0] = -1; ret = file_detach(pipe_a[1], &devpair->pp_slave.pd_sink); if (ret < 0) { goto errout_with_pipeb; } pipe_a[1] = -1; ret = file_detach(pipe_b[0], &devpair->pp_slave.pd_src); if (ret < 0) { goto errout_with_pipeb; } pipe_b[0] = -1; ret = file_detach(pipe_b[1], &devpair->pp_master.pd_sink); if (ret < 0) { goto errout_with_pipeb; } pipe_b[1] = -1; /* Register the slave device * * BSD style (deprecated): /dev/ttypN * SUSv1 style: /dev/pts/N * * Where N is the minor number */ #ifdef CONFIG_PSEUDOTERM_BSD snprintf(devname, 16, "/dev/ttyp%d", minor); #else snprintf(devname, 16, "/dev/pts/%d", minor); #endif ret = register_driver(devname, &g_pty_fops, 0666, &devpair->pp_slave); if (ret < 0) { goto errout_with_pipeb; } /* Register the master device * * BSD style (deprecated): /dev/ptyN * SUSv1 style: Master: /dev/ptmx (multiplexor, see ptmx.c) * * Where N is the minor number */ snprintf(devname, 16, "/dev/pty%d", minor); ret = register_driver(devname, &g_pty_fops, 0666, &devpair->pp_master); if (ret < 0) { goto errout_with_slave; } return OK; errout_with_slave: #ifdef CONFIG_PSEUDOTERM_BSD snprintf(devname, 16, "/dev/ttyp%d", minor); #else snprintf(devname, 16, "/dev/pts/%d", minor); #endif (void)unregister_driver(devname); errout_with_pipeb: if (pipe_b[0] >= 0) { close(pipe_b[0]); } else { (void)file_close_detached(&devpair->pp_master.pd_src); } if (pipe_b[1] >= 0) { close(pipe_b[1]); } else { (void)file_close_detached(&devpair->pp_slave.pd_sink); } errout_with_pipea: if (pipe_a[0] >= 0) { close(pipe_a[0]); } else { (void)file_close_detached(&devpair->pp_slave.pd_src); } if (pipe_a[1] >= 0) { close(pipe_a[1]); } else { (void)file_close_detached(&devpair->pp_master.pd_sink); } errout_with_devpair: sem_destroy(&devpair->pp_exclsem); sem_destroy(&devpair->pp_slavesem); kmm_free(devpair); return ret; }
static off_t mtdconfig_ramconsolidate(FAR struct mtdconfig_struct_s *dev) { FAR uint8_t *pBuf; FAR struct mtdconfig_header_s *phdr; struct mtdconfig_header_s hdr; uint16_t src_block = 0, dst_block = 0, blkper; off_t dst_offset = CONFIGDATA_BLOCK_HDR_SIZE; off_t src_offset = CONFIGDATA_BLOCK_HDR_SIZE; off_t bytes_left_in_block; uint8_t sig[CONFIGDATA_BLOCK_HDR_SIZE]; int ret; /* Allocate a consolidation buffer */ pBuf = (uint8_t *)kmm_malloc(dev->erasesize); if (pBuf == NULL) { /* Unable to allocate buffer, can't consolidate! */ return 0; } /* Loop for all blocks and consolidate them */ blkper = dev->erasesize / dev->blocksize; while (src_block < dev->neraseblocks) { /* Point to beginning of pBuf and read the next erase block */ ret = MTD_BREAD(dev->mtd, src_block * blkper, blkper, pBuf); if (ret < 0) { /* Error doing block read */ goto errout; } /* Now erase the block */ ret = MTD_ERASE(dev->mtd, src_block, 1); if (ret < 0) { /* Error erasing the block */ goto errout; } /* If this is block zero, then write a format signature */ if (src_block == 0) { sig[0] = 'C'; sig[1] = 'D'; sig[2] = CONFIGDATA_FORMAT_VERSION; ret = mtdconfig_writebytes(dev, 0, sig, sizeof(sig)); if (ret != sizeof(sig)) { /* Cannot write even the signature. */ ret = -EIO; goto errout; } } /* Copy active items back to the MTD device. */ while (src_offset < dev->erasesize) { phdr = (FAR struct mtdconfig_header_s *) &pBuf[src_offset]; if (phdr->id == MTD_ERASED_ID) { /* No more data in this erase block. */ src_offset = dev->erasesize; continue; } if (phdr->flags == MTD_ERASED_FLAGS) { /* This is an active entry. Copy it. Check if it * fits in the current destination block. */ bytes_left_in_block = (dst_block + 1) * dev->erasesize - dst_offset; if (bytes_left_in_block < sizeof(*phdr) + phdr->len) { /* Item won't fit in the destination block. Move to * the next block. */ dst_block++; dst_offset = dst_block * dev->erasesize + CONFIGDATA_BLOCK_HDR_SIZE; /* Test for program bug. We shouldn't ever overflow * even if no entries were inactive. */ DEBUGASSERT(dst_block != dev->neraseblocks); } /* Now write the item to the current dst_offset location. */ ret = mtdconfig_writebytes(dev, dst_offset, (uint8_t *) phdr, sizeof(hdr)); if (ret != sizeof(hdr)) { /* I/O Error! */ ret = -EIO; goto errout; } dst_offset += sizeof(hdr); ret = mtdconfig_writebytes(dev, dst_offset, &pBuf[src_offset + sizeof(hdr)], phdr->len); if (ret != phdr->len) { /* I/O Error! */ ret = -EIO; goto errout; } dst_offset += phdr->len; /* Test if enough space in dst block for another header */ if (dst_offset + sizeof(hdr) >= (dst_block + 1) * dev->erasesize || dst_offset == (dst_block + 1) * dev->erasesize) { dst_block++; dst_offset = dst_block * dev->erasesize + CONFIGDATA_BLOCK_HDR_SIZE; } } /* Increment past the current source item */ src_offset += sizeof(hdr) + phdr->len; if (src_offset + sizeof(hdr) > dev->erasesize) { src_offset = dev->erasesize; } DEBUGASSERT(src_offset <= dev->erasesize); } /* Increment to next source block */ src_block++; src_offset = CONFIGDATA_BLOCK_HDR_SIZE; } kmm_free(pBuf); return dst_offset; errout: kmm_free(pBuf); ferr("ERROR: fail ram consolidate: %d\n", ret); return 0; }
static int mtdconfig_setconfig(FAR struct mtdconfig_struct_s *dev, FAR struct config_data_s *pdata) { uint8_t sig[CONFIGDATA_BLOCK_HDR_SIZE]; /* Format signature bytes ("CD") */ char retrycount = 0; int ret = -ENOSYS; off_t offset, bytes_left_in_block, bytes; uint16_t block; struct mtdconfig_header_s hdr; uint8_t ram_consolidate; /* Allocate a temp block buffer */ dev->buffer = (FAR uint8_t *) kmm_malloc(dev->blocksize); if (dev->buffer == NULL) { return -ENOMEM; } /* Read and validate the signature bytes */ retry: offset = mtdconfig_findfirstentry(dev, &hdr); if (offset == 0) { /* Config Data partition not formatted. */ if (retrycount) { ret = -ENOSYS; goto errout; } /* Try to format the config partition */ ret = MTD_IOCTL(dev->mtd, MTDIOC_BULKERASE, 0); if (ret < 0) { goto errout; } /* Write a format signature */ sig[0] = 'C'; sig[1] = 'D'; sig[2] = CONFIGDATA_FORMAT_VERSION; ret = mtdconfig_writebytes(dev, 0, sig, sizeof(sig)); if (ret != sizeof(sig)) { /* Cannot write even the signature. */ ret = -EIO; goto errout; } /* Now go try to read the signature again (as verification) */ retrycount++; goto retry; } /* Okay, the Config Data partition is formatted. Check if the * config item being written is already in the database. If it * is, we must mark it as obsolete before creating a new entry. */ offset = mtdconfig_findentry(dev, offset, pdata, &hdr); /* Test if the header was found. */ if (offset > 0 && pdata->id == hdr.id && pdata->instance == hdr.instance) { /* Mark this entry as released */ hdr.flags = (uint8_t)~MTD_ERASED_FLAGS; mtdconfig_writebytes(dev, offset, &hdr.flags, sizeof(hdr.flags)); } /* Test if the new length is zero. If it is, then we are * deleting the entry. */ if (pdata->len == 0) { ret = OK; goto errout; } /* Now find a new entry for this config data */ retrycount = 0; retry_find: offset = mtdconfig_findfirstentry(dev, &hdr); if (offset > 0 && hdr.id == MTD_ERASED_ID) { block = offset / dev->erasesize; bytes_left_in_block = (block + 1) * dev->erasesize - offset; if (bytes_left_in_block < sizeof(hdr) + pdata->len) { /* Simulate an active block to search for the next one * in the code below. */ hdr.id = 1; } } if (hdr.id != MTD_ERASED_ID) { /* Read the next entry */ offset = mtdconfig_findnextentry(dev, offset, &hdr, pdata->len); if (offset == 0) { /* No free entries left on device! */ #ifdef CONFIG_MTD_CONFIG_RAM_CONSOLIDATE ram_consolidate = 1; #else ram_consolidate = dev->neraseblocks == 1; #endif if (ram_consolidate) { /* If we only have 1 erase block, then we must do a RAM * assisted consolidation of released entries. */ if (retrycount) { /* Out of space! */ ret = -ENOMEM; goto errout; } mtdconfig_ramconsolidate(dev); retrycount++; goto retry_find; } #ifndef CONFIG_MTD_CONFIG_RAM_CONSOLIDATE else { if (retrycount) { /* Out of space! */ ret = -ENOMEM; goto errout; } mtdconfig_consolidate(dev); retrycount++; goto retry_find; } #endif } } /* Test if a new entry was found */ if (offset > 0) { /* Save the data at this entry */ hdr.id = pdata->id; hdr.instance = pdata->instance; hdr.len = pdata->len; hdr.flags = MTD_ERASED_FLAGS; ret = mtdconfig_writebytes(dev, offset, (uint8_t *)&hdr, sizeof(hdr)); if (ret != sizeof(hdr)) { /* Cannot write even header! */ ret = -EIO; goto errout; } bytes = mtdconfig_writebytes(dev, offset + sizeof(hdr), pdata->configdata, pdata->len); if (bytes != pdata->len) { /* Error writing data! */ hdr.flags = MTD_ERASED_FLAGS; mtdconfig_writebytes(dev, offset, (uint8_t *)&hdr, sizeof(hdr.flags)); ret = -EIO; goto errout; } ret = OK; } errout: /* Free the buffer */ kmm_free(dev->buffer); return ret; }
static int proc_opendir(FAR const char *relpath, FAR struct fs_dirent_s *dir) { FAR struct proc_dir_s *procdir; FAR const struct proc_node_s *node; FAR struct tcb_s *tcb; irqstate_t flags; unsigned long tmp; FAR char *ptr; pid_t pid; finfo("relpath: \"%s\"\n", relpath ? relpath : "NULL"); DEBUGASSERT(relpath != NULL && dir != NULL && dir->u.procfs == NULL); /* The relative must be either: * * (1) "<pid>" - The sub-directory of task/thread attributes, * (2) "self" - Which refers to the PID of the calling task, or * (3) The name of a directory node under either of those */ /* Otherwise, the relative path should be a valid task/thread ID */ ptr = NULL; if (strncmp(relpath, "self", 4) == 0) { tmp = (unsigned long)getpid(); /* Get the PID of the calling task */ ptr = (FAR char *)relpath + 4; /* Discard const */ } else { tmp = strtoul(relpath, &ptr, 10); /* Extract the PID from path */ } if (ptr == NULL || (*ptr != '\0' && *ptr != '/')) { /* strtoul failed or there is something in the path after the pid */ ferr("ERROR: Invalid path \"%s\"\n", relpath); return -ENOENT; } /* A valid PID would be in the range of 0-32767 (0 is reserved for the * IDLE thread). */ if (tmp >= 32768) { ferr("ERROR: Invalid PID %ld\n", tmp); return -ENOENT; } /* Now verify that a task with this task/thread ID exists */ pid = (pid_t)tmp; flags = enter_critical_section(); tcb = sched_gettcb(pid); leave_critical_section(flags); if (tcb == NULL) { ferr("ERROR: PID %d is not valid\n", (int)pid); return -ENOENT; } /* Allocate the directory structure. Note that the index and procentry * pointer are implicitly nullified by kmm_zalloc(). Only the remaining, * non-zero entries will need be initialized. */ procdir = (FAR struct proc_dir_s *)kmm_zalloc(sizeof(struct proc_dir_s)); if (procdir == NULL) { ferr("ERROR: Failed to allocate the directory structure\n"); return -ENOMEM; } /* Was the <pid> the final element of the path? */ if (*ptr != '\0' && strcmp(ptr, "/") != 0) { /* There is something in the path after the pid. Skip over the path * segment delimiter and see if we can identify the node of interest. */ ptr++; node = proc_findnode(ptr); if (node == NULL) { ferr("ERROR: Invalid path \"%s\"\n", relpath); kmm_free(procdir); return -ENOENT; } /* The node must be a directory, not a file */ if (!DIRENT_ISDIRECTORY(node->dtype)) { ferr("ERROR: Path \"%s\" is not a directory\n", relpath); kmm_free(procdir); return -ENOTDIR; } /* This is a second level directory */ procdir->base.level = 2; procdir->base.nentries = PROC_NGROUPNODES; procdir->node = node; } else { /* Use the special level0 node */ procdir->base.level = 1; procdir->base.nentries = PROC_NLEVEL0NODES; procdir->node = &g_level0node; } procdir->pid = pid; dir->u.procfs = (FAR void *)procdir; return OK; }
int stm32_tsc_setup(int minor) { FAR struct tc_dev_s *priv; char devname[DEV_NAMELEN]; #ifdef CONFIG_TOUCHSCREEN_MULTIPLE irqstate_t flags; #endif int ret; iinfo("minor: %d\n", minor); DEBUGASSERT(minor >= 0 && minor < 100); /* If we only have one touchscreen, check if we already did init */ #ifndef CONFIG_TOUCHSCREEN_MULTIPLE if (g_touchinitdone) { return OK; } #endif /* Configure the touchscreen DRIVEA and DRIVEB pins for output */ stm32_configgpio(GPIO_TP_DRIVEA); stm32_configgpio(GPIO_TP_DRIVEB); /* Configure Analog inputs for sampling X and Y coordinates */ stm32_configgpio(GPIO_TP_XL); stm32_configgpio(GPIO_TP_YD); tc_adc_init(); /* Create and initialize a touchscreen device driver instance */ #ifndef CONFIG_TOUCHSCREEN_MULTIPLE priv = &g_touchscreen; #else priv = (FAR struct tc_dev_s *)kmm_malloc(sizeof(struct tc_dev_s)); if (!priv) { ierr("ERROR: kmm_malloc(%d) failed\n", sizeof(struct tc_dev_s)); return -ENOMEM; } #endif /* Initialize the touchscreen device driver instance */ memset(priv, 0, sizeof(struct tc_dev_s)); nxsem_init(&priv->devsem, 0, 1); /* Initialize device structure semaphore */ nxsem_init(&priv->waitsem, 0, 0); /* Initialize pen event wait semaphore */ /* Register the device as an input device */ (void)snprintf(devname, DEV_NAMELEN, DEV_FORMAT, minor); iinfo("Registering %s\n", devname); ret = register_driver(devname, &tc_fops, 0666, priv); if (ret < 0) { ierr("ERROR: register_driver() failed: %d\n", ret); goto errout_with_priv; } /* Schedule work to perform the initial sampling and to set the data * availability conditions. */ priv->state = TC_READY; ret = work_queue(HPWORK, &priv->work, tc_worker, priv, 0); if (ret != 0) { ierr("ERROR: Failed to queue work: %d\n", ret); goto errout_with_priv; } /* And return success (?) */ #ifndef CONFIG_TOUCHSCREEN_MULTIPLE g_touchinitdone = true; #endif return OK; errout_with_priv: nxsem_destroy(&priv->devsem); #ifdef CONFIG_TOUCHSCREEN_MULTIPLE kmm_free(priv); #endif return ret; }
FAR struct mtd_dev_s *blockmtd_initialize(FAR const char *path, size_t offset, size_t mtdlen, int16_t sectsize, int32_t erasesize) { FAR struct file_dev_s *priv; size_t nblocks; int mode; int ret; int fd; /* Create an instance of the FILE MTD device state structure */ priv = (FAR struct file_dev_s *)kmm_zalloc(sizeof(struct file_dev_s)); if (!priv) { ferr("ERROR: Failed to allocate the FILE MTD state structure\n"); return NULL; } /* Determine the file open mode */ mode = O_RDOK; #ifdef CONFIG_FS_WRITABLE mode |= O_WROK; #endif /* Try to open the file. NOTE that block devices will use a character * driver proxy. */ fd = open(path, mode); if (fd <0) { ferr("ERROR: Failed to open the FILE MTD file %s\n", path); kmm_free(priv); return NULL; } /* Detach the file descriptor from the open file */ ret = file_detach(fd, &priv->mtdfile); if (ret < 0) { ferr("ERROR: Failed to detail the FILE MTD file %s\n", path); close(fd); kmm_free(priv); return NULL; } /* Set the block size based on the provided sectsize parameter */ if (sectsize <= 0) { priv->blocksize = CONFIG_FILEMTD_BLOCKSIZE; } else { priv->blocksize = sectsize; } /* Set the erase size based on the provided erasesize parameter */ if (erasesize <= 0) { priv->erasesize = CONFIG_FILEMTD_ERASESIZE; } else { priv->erasesize = erasesize; } /* Force the size to be an even number of the erase block size */ nblocks = mtdlen / priv->erasesize; if (nblocks < 3) { ferr("ERROR: Need to provide at least three full erase block\n"); file_close_detached(&priv->mtdfile); kmm_free(priv); return NULL; } /* Perform initialization as necessary. (unsupported methods were * nullified by kmm_zalloc). */ priv->mtd.erase = filemtd_erase; priv->mtd.bread = filemtd_bread; priv->mtd.bwrite = filemtd_bwrite; priv->mtd.read = filemtd_byteread; #ifdef CONFIG_MTD_BYTE_WRITE priv->mtd.write = file_bytewrite; #endif priv->mtd.ioctl = filemtd_ioctl; priv->offset = offset; priv->nblocks = nblocks; #ifdef CONFIG_MTD_REGISTRATION /* Register the MTD with the procfs system if enabled */ mtd_register(&priv->mtd, "filemtd"); #endif return &priv->mtd; }
int exec(FAR const char *filename, FAR char * const *argv, FAR const struct symtab_s *exports, int nexports) { #if defined(CONFIG_SCHED_ONEXIT) && defined(CONFIG_SCHED_HAVE_PARENT) FAR struct binary_s *bin; int pid; int err; int ret; /* Allocate the load information */ bin = (FAR struct binary_s *)kmm_zalloc(sizeof(struct binary_s)); if (!bin) { bdbg("ERROR: Failed to allocate binary_s\n"); err = ENOMEM; goto errout; } /* Initialize the binary structure */ bin->filename = filename; bin->exports = exports; bin->nexports = nexports; /* Copy the argv[] list */ ret = binfmt_copyargv(bin, argv); if (ret < 0) { err = -ret; bdbg("ERROR: Failed to copy argv[]: %d\n", err); goto errout_with_bin; } /* Load the module into memory */ ret = load_module(bin); if (ret < 0) { err = get_errno(); bdbg("ERROR: Failed to load program '%s': %d\n", filename, err); goto errout_with_argv; } /* Disable pre-emption so that the executed module does * not return until we get a chance to connect the on_exit * handler. */ sched_lock(); /* Then start the module */ pid = exec_module(bin); if (pid < 0) { err = get_errno(); bdbg("ERROR: Failed to execute program '%s': %d\n", filename, err); goto errout_with_lock; } /* Set up to unload the module (and free the binary_s structure) * when the task exists. */ ret = schedule_unload(pid, bin); if (ret < 0) { err = get_errno(); bdbg("ERROR: Failed to schedule unload '%s': %d\n", filename, err); } sched_unlock(); return pid; errout_with_lock: sched_unlock(); unload_module(bin); errout_with_argv: binfmt_freeargv(bin); errout_with_bin: kmm_free(bin); errout: set_errno(err); return ERROR; #else struct binary_s bin; int err; int ret; /* Load the module into memory */ memset(&bin, 0, sizeof(struct binary_s)); bin.filename = filename; bin.exports = exports; bin.nexports = nexports; ret = load_module(&bin); if (ret < 0) { err = get_errno(); bdbg("ERROR: Failed to load program '%s': %d\n", filename, err); goto errout; } /* Then start the module */ ret = exec_module(&bin); if (ret < 0) { err = get_errno(); bdbg("ERROR: Failed to execute program '%s': %d\n", filename, err); goto errout_with_module; } /* TODO: How does the module get unloaded in this case? */ return ret; errout_with_module: unload_module(&bin); errout: set_errno(err); return ERROR; #endif }
int nxffs_dump(FAR struct mtd_dev_s *mtd, bool verbose) { #if defined(CONFIG_DEBUG_FEATURES) && defined(CONFIG_DEBUG_FS) struct nxffs_blkinfo_s blkinfo; int ret; /* Get the volume geometry. (casting to uintptr_t first eliminates * complaints on some architectures where the sizeof long is different * from the size of a pointer). */ memset(&blkinfo, 0, sizeof(struct nxffs_blkinfo_s)); ret = MTD_IOCTL(mtd, MTDIOC_GEOMETRY, (unsigned long)((uintptr_t)&blkinfo.geo)); if (ret < 0) { ferr("ERROR: MTD ioctl(MTDIOC_GEOMETRY) failed: %d\n", -ret); return ret; } /* Save the verbose output indication */ blkinfo.verbose = verbose; /* Allocate a buffer to hold one block */ blkinfo.buffer = (FAR uint8_t *)kmm_malloc(blkinfo.geo.blocksize); if (!blkinfo.buffer) { ferr("ERROR: Failed to allocate block cache\n"); return -ENOMEM; } /* Now read every block on the device */ syslog(LOG_NOTICE, "NXFFS Dump:\n"); syslog(LOG_NOTICE, g_hdrformat); blkinfo.nblocks = blkinfo.geo.erasesize * blkinfo.geo.neraseblocks / blkinfo.geo.blocksize; for (blkinfo.block = 0, blkinfo.offset = 0; blkinfo.block < blkinfo.nblocks; blkinfo.block++, blkinfo.offset += blkinfo.geo.blocksize) { /* Read the next block */ ret = MTD_BREAD(mtd, blkinfo.block, 1, blkinfo.buffer); if (ret < 0) { #ifndef CONFIG_NXFFS_NAND /* Read errors are fatal */ ferr("ERROR: Failed to read block %d\n", blkinfo.block); kmm_free(blkinfo.buffer); return ret; #else /* A read error is probably fatal on all media but NAND. * On NAND, the read error probably just signifies a block * with an uncorrectable ECC failure. So, to handle NAND, * just report the read error and continue. */ syslog(LOG_NOTICE, g_format, blkinfo.block, 0, "BLOCK", "RD FAIL", blkinfo.geo.blocksize); #endif } else { /* Analyze the block that we just read */ nxffs_analyze(&blkinfo); } } syslog(LOG_NOTICE, "%d blocks analyzed\n", blkinfo.nblocks); kmm_free(blkinfo.buffer); return OK; #else return -ENOSYS; #endif }
void exepath_release(EXEPATH_HANDLE handle) { kmm_free(handle); }
struct cc1101_dev_s * cc1101_init(struct spi_dev_s * spi, uint8_t isrpin, uint32_t pinset, const struct c1101_rfsettings_s * rfsettings) { struct cc1101_dev_s * dev; ASSERT(spi); if ((dev = kmm_malloc(sizeof(struct cc1101_dev_s))) == NULL) { errno = ENOMEM; return NULL; } dev->rfsettings = rfsettings; dev->spi = spi; dev->isrpin = isrpin; dev->pinset = pinset; dev->flags = 0; dev->channel = rfsettings->CHMIN; dev->power = rfsettings->PAMAX; /* Reset chip, check status bytes */ if (cc1101_reset(dev) < 0) { kmm_free(dev); errno = EFAULT; return NULL; } /* Check part compatibility */ if (cc1101_checkpart(dev) < 0) { kmm_free(dev); errno = ENODEV; return NULL; } /* Configure CC1101: * - disable GDOx for best performance * - load RF * - and packet control */ cc1101_setgdo(dev, CC1101_PIN_GDO0, CC1101_GDO_HIZ); cc1101_setgdo(dev, CC1101_PIN_GDO1, CC1101_GDO_HIZ); cc1101_setgdo(dev, CC1101_PIN_GDO2, CC1101_GDO_HIZ); cc1101_setrf(dev, rfsettings); cc1101_setpacketctrl(dev); /* Set the ISR to be triggerred on falling edge of the: * * 6 (0x06) Asserts when sync word has been sent / received, and * de-asserts at the end of the packet. In RX, the pin will de-assert * when the optional address check fails or the RX FIFO overflows. * In TX the pin will de-assert if the TX FIFO underflows. */ cc1101_setgdo(dev, dev->isrpin, CC1101_GDO_SYNC); /* Bind to external interrupt line */ /* depends on STM32: TODO: Make that config within pinset and * provide general gpio interface * stm32_gpiosetevent(pinset, false, true, true, cc1101_eventcb); */ return dev; }
int group_allocate(FAR struct task_tcb_s *tcb, uint8_t ttype) { FAR struct task_group_s *group; int ret; DEBUGASSERT(tcb && !tcb->cmn.group); /* Allocate the group structure and assign it to the TCB */ group = (FAR struct task_group_s *)kmm_zalloc(sizeof(struct task_group_s)); if (!group) { return -ENOMEM; } #if CONFIG_NFILE_STREAMS > 0 && (defined(CONFIG_BUILD_PROTECTED) || \ defined(CONFIG_BUILD_KERNEL)) && defined(CONFIG_MM_KERNEL_HEAP) /* If this group is being created for a privileged thread, then all elements * of the group must be created for privileged access. */ if ((ttype & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_KERNEL) { group->tg_flags |= GROUP_FLAG_PRIVILEGED; } /* In a flat, single-heap build. The stream list is allocated with the * group structure. But in a kernel build with a kernel allocator, it * must be separately allocated using a user-space allocator. */ group->tg_streamlist = (FAR struct streamlist *)group_zalloc(group, sizeof(struct streamlist)); if (!group->tg_streamlist) { kmm_free(group); return -ENOMEM; } #endif /* Attach the group to the TCB */ tcb->cmn.group = group; #if defined(HAVE_GROUP_MEMBERS) || defined(CONFIG_ARCH_ADDRENV) /* Assign the group a unique ID. If g_gidcounter were to wrap before we * finish with task creation, that would be a problem. */ group_assigngid(group); #endif /* Duplicate the parent tasks environment */ ret = env_dup(group); if (ret < 0) { #if CONFIG_NFILE_STREAMS > 0 && (defined(CONFIG_BUILD_PROTECTED) || \ defined(CONFIG_BUILD_KERNEL)) && defined(CONFIG_MM_KERNEL_HEAP) group_free(group, group->tg_streamlist); #endif kmm_free(group); tcb->cmn.group = NULL; return ret; } /* Initialize the pthread join semaphore */ #ifndef CONFIG_DISABLE_PTHREAD (void)sem_init(&group->tg_joinsem, 0, 1); #endif #if defined(CONFIG_SCHED_WAITPID) && !defined(CONFIG_SCHED_HAVE_PARENT) (void)sem_init(&group->tg_exitsem, 0, 0); #endif return OK; }
int tcp_pollsetup(FAR struct socket *psock, FAR struct pollfd *fds) { FAR struct tcp_conn_s *conn = psock->s_conn; FAR struct tcp_poll_s *info; FAR struct devif_callback_s *cb; int ret; /* Sanity check */ #ifdef CONFIG_DEBUG_FEATURES if (!conn || !fds) { return -EINVAL; } #endif /* Allocate a container to hold the poll information */ info = (FAR struct tcp_poll_s *)kmm_malloc(sizeof(struct tcp_poll_s)); if (!info) { return -ENOMEM; } /* Some of the following must be atomic */ net_lock(); /* Allocate a TCP/IP callback structure */ cb = tcp_callback_alloc(conn); if (!cb) { ret = -EBUSY; goto errout_with_lock; } /* Initialize the poll info container */ info->psock = psock; info->fds = fds; info->cb = cb; /* Initialize the callback structure. Save the reference to the info * structure as callback private data so that it will be available during * callback processing. */ cb->flags = (TCP_NEWDATA | TCP_BACKLOG | TCP_POLL | TCP_DISCONN_EVENTS); cb->priv = (FAR void *)info; cb->event = tcp_poll_interrupt; /* Save the reference in the poll info structure as fds private as well * for use during poll teardown as well. */ fds->priv = (FAR void *)info; #ifdef CONFIG_NET_TCPBACKLOG /* Check for read data or backlogged connection availability now */ if (!IOB_QEMPTY(&conn->readahead) || tcp_backlogavailable(conn)) #else /* Check for read data availability now */ if (!IOB_QEMPTY(&conn->readahead)) #endif { /* Normal data may be read without blocking. */ fds->revents |= (POLLRDNORM & fds->events); } /* Check for a loss of connection events. We need to be careful here. * There are four possibilities: * * 1) The socket is connected and we are waiting for data availability * events. * * __SS_ISCONNECTED(f) == true * __SS_ISLISTENING(f) == false * __SS_ISCLOSED(f) == false * * Action: Wait for data availability events * * 2) This is a listener socket that was never connected and we are * waiting for connection events. * * __SS_ISCONNECTED(f) == false * __SS_ISLISTENING(f) == true * __SS_ISCLOSED(f) == false * * Action: Wait for connection events * * 3) This socket was previously connected, but the peer has gracefully * closed the connection. * * __SS_ISCONNECTED(f) == false * __SS_ISLISTENING(f) == false * __SS_ISCLOSED(f) == true * * Action: Return with POLLHUP|POLLERR events * * 4) This socket was previously connected, but we lost the connection * due to some exceptional event. * * __SS_ISCONNECTED(f) == false * __SS_ISLISTENING(f) == false * __SS_ISCLOSED(f) == false * * Action: Return with POLLHUP|POLLERR events */ if (!_SS_ISCONNECTED(psock->s_flags) && !_SS_ISLISTENING(psock->s_flags)) { /* We were previously connected but lost the connection either due * to a graceful shutdown by the remote peer or because of some * exceptional event. */ fds->revents |= (POLLERR | POLLHUP); } else if (_SS_ISCONNECTED(psock->s_flags) && psock_tcp_cansend(psock) >= 0) { fds->revents |= (POLLWRNORM & fds->events); } /* Check if any requested events are already in effect */ if (fds->revents != 0) { /* Yes.. then signal the poll logic */ sem_post(fds->sem); } net_unlock(); return OK; errout_with_lock: kmm_free(info); net_unlock(); return ret; }
int nxflat_addrenv_alloc(FAR struct nxflat_loadinfo_s *loadinfo, size_t envsize) { FAR struct dspace_s *dspace; #ifdef CONFIG_ARCH_ADDRENV FAR void *vdata; save_addrenv_t oldenv; size_t heapsize; int ret; #endif DEBUGASSERT(!loadinfo->dspace); /* Allocate the struct dspace_s container for the D-Space allocation */ dspace = (FAR struct dspace_s *)kmm_malloc(sizeof(struct dspace_s)); if (dspace == 0) { bdbg("ERROR: Failed to allocate DSpace\n"); return -ENOMEM; } #ifdef CONFIG_ARCH_ADDRENV /* Determine the heapsize to allocate. If there is no dynamic stack then * heapsize must at least as big as the fixed stack size since the stack * will be allocated from the heap in that case. */ #ifdef CONFIG_ARCH_STACK_DYNAMIC heapsize = ARCH_HEAP_SIZE; #else heapsize = MIN(loadinfo->stacksize, ARCH_HEAP_SIZE); #endif /* Create a D-Space address environment for the new NXFLAT task */ ret = up_addrenv_create(0, envsize, heapsize, &loadinfo->addrenv); if (ret < 0) { bdbg("ERROR: up_addrenv_create failed: %d\n", ret); goto errout_with_dspace; } /* Get the virtual address associated with the start of the address * environment. This is the base address that we will need to use to * access the D-Space region (but only if the address environment has been * selected. */ ret = up_addrenv_vdata(&loadinfo->addrenv, 0, &vdata); if (ret < 0) { bdbg("ERROR: up_addrenv_vdata failed: %d\n", ret); goto errout_with_addrenv; } /* Clear all of the allocated D-Space memory. We have to temporarily * selected the D-Space address environment to do this. */ ret = up_addrenv_select(loadinfo->addrenv, &oldenv); if (ret < 0) { bdbg("ERROR: up_addrenv_select failed: %d\n", ret); goto errout_with_addrenv; } memset(vdata, 0, envsize); ret = up_addrenv_restore(oldenv); if (ret < 0) { bdbg("ERROR: up_addrenv_restore failed: %d\n", ret); goto errout_with_addrenv; } /* Success... save the fruits of our labor */ loadinfo->dspace = dspace; dspace->crefs = 1; dspace->region = (FAR uint8_t *)vdata; return OK; errout_with_addrenv: (void)up_addrenv_destroy(&loadinfo->addrenv); loadinfo->addrenv = 0; errout_with_dspace: kmm_free(dspace); return ret; #else /* Allocate (and zero) memory to hold the ELF image */ dspace->region = (FAR uint8_t *)kumm_zalloc(envsize); if (!dspace->region) { kmm_free(dspace); return -ENOMEM; } loadinfo->dspace = dspace; dspace->crefs = 1; return OK; #endif }
int icmp_pollsetup(FAR struct socket *psock, FAR struct pollfd *fds) { FAR struct icmp_conn_s *conn = psock->s_conn; FAR struct icmp_poll_s *info; FAR struct devif_callback_s *cb; int ret; DEBUGASSERT(conn != NULL && fds != NULL); /* Allocate a container to hold the poll information */ info = (FAR struct icmp_poll_s *)kmm_malloc(sizeof(struct icmp_poll_s)); if (!info) { return -ENOMEM; } /* Some of the following must be atomic */ net_lock(); /* Get the device that will provide the provide the NETDEV_DOWN event. * NOTE: in the event that the local socket is bound to INADDR_ANY, the * dev value will be zero and there will be no NETDEV_DOWN notifications. */ if (conn->dev == NULL) { conn->dev = netdev_default(); } /* Allocate a ICMP callback structure */ cb = icmp_callback_alloc(conn->dev); if (cb == NULL) { ret = -EBUSY; goto errout_with_lock; } /* Initialize the poll info container */ info->psock = psock; info->fds = fds; info->cb = cb; /* Initialize the callback structure. Save the reference to the info * structure as callback private data so that it will be available during * callback processing. */ cb->flags = 0; cb->priv = (FAR void *)info; cb->event = icmp_poll_eventhandler; if ((info->fds->events & POLLOUT) != 0) { cb->flags |= ICMP_POLL; } if ((info->fds->events & POLLIN) != 0) { cb->flags |= ICMP_NEWDATA; } if ((info->fds->events & (POLLHUP | POLLERR)) != 0) { cb->flags |= NETDEV_DOWN; } /* Save the reference in the poll info structure as fds private as well * for use during poll teardown as well. */ fds->priv = (FAR void *)info; /* Check for read data availability now */ if (!IOB_QEMPTY(&conn->readahead)) { /* Normal data may be read without blocking. */ fds->revents |= (POLLRDNORM & fds->events); } /* Check if any requested events are already in effect */ if (fds->revents != 0) { /* Yes.. then signal the poll logic */ nxsem_post(fds->sem); } net_unlock(); return OK; errout_with_lock: kmm_free(info); net_unlock(); return ret; }
static void usbhost_disconnect_event(FAR void *arg) { FAR struct usbhost_class_s *hubclass = (FAR struct usbhost_class_s *)arg; FAR struct usbhost_hubpriv_s *priv; FAR struct usbhost_hubport_s *hport; FAR struct usbhost_hubport_s *child; irqstate_t flags; int port; uvdbg("Disconnecting\n"); DEBUGASSERT(hubclass != NULL && hubclass->hport != NULL); priv = &((FAR struct usbhost_hubclass_s *)hubclass)->hubpriv; hport = hubclass->hport; uvdbg("Destroying hub on port %d\n", hport->port); /* Set an indication to any users of the device that the device is no * longer available. */ flags = irqsave(); /* Cancel any pending transfers on the interrupt IN pipe */ DRVR_CANCEL(hport->drvr, priv->intin); /* Cancel any pending port status change events */ work_cancel(LPWORK, &priv->work); /* Disable power to all downstream ports */ (void)usbhost_hubpwr(priv, hport, false); /* Free the allocated control request */ DRVR_FREE(hport->drvr, (FAR uint8_t *)priv->ctrlreq); /* Free buffer for status change (INT) endpoint */ DRVR_IOFREE(hport->drvr, priv->buffer); /* Destroy the interrupt IN endpoint */ DRVR_EPFREE(hport->drvr, priv->intin); /* Release per-port resources */ for (port = 0; port < USBHUB_MAX_PORTS; port++) { /* Free any devices classes connect on this hub port */ child = &priv->hport[port]; if (child->devclass != NULL) { CLASS_DISCONNECTED(child->devclass); child->devclass = NULL; } /* Free any resources used by the hub port */ usbhost_hport_deactivate(child); } /* Deactivate the parent hub port (unless it is the root hub port) */ usbhost_hport_deactivate(hport); /* Destroy the semaphores */ sem_destroy(&priv->exclsem); /* Disconnect the USB host device */ DRVR_DISCONNECT(hport->drvr, hport); /* Free the class instance */ kmm_free(hubclass); hport->devclass = NULL; irqrestore(flags); }
int up_fbinitialize(int display) { FAR struct lcdfb_dev_s *priv; FAR struct lcd_dev_s *lcd; struct fb_videoinfo_s vinfo; struct nxgl_rect_s rect; int ret; lcdinfo("display=%d\n", display); DEBUGASSERT((unsigned)display < UINT8_MAX); /* Allocate the framebuffer state structure */ priv = (FAR struct lcdfb_dev_s *)kmm_zalloc(sizeof(struct lcdfb_dev_s)); if (priv == NULL) { lcderr("ERROR: Failed to allocate state structure\n"); return -ENOMEM; } /* Initialize the LCD-independent fields of the state structure */ priv->display = display; priv->vtable.getvideoinfo = lcdfb_getvideoinfo, priv->vtable.getplaneinfo = lcdfb_getplaneinfo, #ifdef CONFIG_FB_CMAP priv->vtable.getcmap = lcdfb_getcmap, priv->vtable.putcmap = lcdfb_putcmap, #endif #ifdef CONFIG_FB_HWCURSOR priv->vtable.getcursor = lcdfb_getcursor, priv->vtable.setcursor = lcdfb_setcursor, #endif #ifdef CONFIG_LCD_EXTERNINIT /* Use external graphics driver initialization */ lcd = board_graphics_setup(display); if (lcd == NULL) { gerr("ERROR: board_graphics_setup failed, devno=%d\n", display); return EXIT_FAILURE; } #else /* Initialize the LCD device */ ret = board_lcd_initialize(); if (ret < 0) { lcderr("ERROR: board_lcd_initialize() failed: %d\n", ret); goto errout_with_state; } /* Get the device instance */ lcd = board_lcd_getdev(display); if (lcd == NULL) { lcderr("ERROR: board_lcd_getdev failed, devno=%d\n", display); ret = -ENODEV; goto errout_with_lcd; } #endif priv->lcd = lcd; /* Initialize the LCD-dependent fields of the state structure */ DEBUGASSERT(lcd->getvideoinfo != NULL); ret = lcd->getvideoinfo(lcd, &vinfo); if (ret < 0) { lcderr("ERROR: LCD getvideoinfo() failed: %d\n", ret); goto errout_with_lcd; } priv->xres = vinfo.xres; priv->yres = vinfo.yres; DEBUGASSERT(lcd->getplaneinfo != NULL); ret = lcd->getplaneinfo(lcd, VIDEO_PLANE, &priv->pinfo); if (ret < 0) { lcderr("ERROR: LCD getplaneinfo() failed: %d\n", ret); goto errout_with_lcd; } /* Allocate (and clear) the framebuffer */ priv->stride = ((size_t)priv->xres * priv->pinfo.bpp + 7) >> 3; priv->fblen = priv->stride * priv->yres; priv->fbmem = (FAR uint8_t *)kmm_zalloc(priv->fblen); if (priv->fbmem == NULL) { lcderr("ERROR: Failed to allocate frame buffer memory\n"); ret = -ENOMEM; goto errout_with_lcd; } /* Add the state structure to the list of framebuffer interfaces */ priv->flink = g_lcdfb; g_lcdfb = priv; /* Write the entire framebuffer to the LCD */ rect.pt1.x = 0; rect.pt1.y = 0; rect.pt2.x = priv->xres - 1; rect.pt2.y = priv->yres - 1; ret = lcdfb_update(priv, &rect); if (ret < 0) { lcderr("FB update failed: %d\n", ret); } /* Turn the LCD on at 75% power */ (void)priv->lcd->setpower(priv->lcd, ((3*CONFIG_LCD_MAXPOWER + 3)/4)); return OK; errout_with_lcd: #ifndef CONFIG_LCD_EXTERNINIT board_lcd_uninitialize(); errout_with_state: #endif kmm_free(priv); return ret; }