Пример #1
0
/**
 * @brief   EEPROM write routine.
 * @details Function writes data to EEPROM.
 * @pre     Data must be fit to single EEPROM page.
 *
 * @param[in] eepcfg  pointer to configuration structure of eeprom file
 * @param[in] offset  addres of 1-st byte to be write
 * @param[in] data    pointer to buffer with data to be written
 * @param[in] len     number of bytes to be written
 */
static msg_t eeprom_write(const I2CEepromFileConfig *eepcfg, uint32_t offset,
                          const uint8_t *data, size_t len) {
    msg_t status = RDY_RESET;
    systime_t tmo = calc_timeout(eepcfg->i2cp, (len + 2), 0);

    chDbgCheck(((len <= eepcfg->size) && ((offset + len) <= eepcfg->size)),
               "out of device bounds");
    chDbgCheck((((offset + eepcfg->barrier_low) / eepcfg->pagesize) ==
                (((offset + eepcfg->barrier_low) + len - 1) / eepcfg->pagesize)),
               "data can not be fitted in single page");

    /* write address bytes */
    eeprom_split_addr(eepcfg->write_buf, (offset + eepcfg->barrier_low));
    /* write data bytes */
    memcpy(&(eepcfg->write_buf[2]), data, len);

#if I2C_USE_MUTUAL_EXCLUSION
    i2cAcquireBus(eepcfg->i2cp);
#endif

    status = i2cMasterTransmitTimeout(eepcfg->i2cp, eepcfg->addr,
                                      eepcfg->write_buf, (len + 2), NULL, 0, tmo);

#if I2C_USE_MUTUAL_EXCLUSION
    i2cReleaseBus(eepcfg->i2cp);
#endif

    /* wait until EEPROM process data */
    chThdSleep(eepcfg->write_time);

    return status;
}
Пример #2
0
msg_t i2c_receive(i2caddr_t addr, uint8_t *rxbuf, size_t rxbytes){
  msg_t status = RDY_OK;
  systime_t tmo = calc_timeout(&I2C_BUS, 0, rxbytes);

  i2cAcquireBus(&I2C_BUS);
  status = i2cMasterReceiveTimeout(&I2C_BUS, addr, rxbuf, rxbytes, tmo);
  i2cReleaseBus(&I2C_BUS);
  chDbgAssert(status == RDY_OK, "i2c_transmit(), #1", "error in driver");
  if (status == RDY_TIMEOUT){
	//chprintf((BaseChannel *)&SD1, "I2C Timeout, restarting...\r\n");
    i2cStop(&I2C_BUS);
    chThdSleepMilliseconds(1);
    i2cStart(&I2C_BUS, &i2cfg1);
    setGlobalFlag(I2C_RESTARTED_FLAG);
    return status;
  }
  return status;
}
Пример #3
0
msg_t i2c_transmit(i2caddr_t addr, const uint8_t *txbuf, size_t txbytes,
                   uint8_t *rxbuf, size_t rxbytes){
  msg_t status = RDY_OK;
  systime_t tmo = calc_timeout(&I2C_BUS, txbytes, rxbytes);

  i2cAcquireBus(&I2C_BUS);
  status = i2cMasterTransmitTimeout(&I2C_BUS, addr, txbuf, txbytes, rxbuf, rxbytes, tmo);
  i2cReleaseBus(&I2C_BUS);
  if (status == RDY_TIMEOUT){
	//chprintf((BaseChannel *)&SD1, "I2C Timeout, restarting...\r\n");
    i2cStop(&I2C_BUS);
    chThdSleepMilliseconds(1);
    i2cStart(&I2C_BUS, &i2cfg1);
    setGlobalFlag(I2C_RESTARTED_FLAG);
    return status;
  }
  return status;
}
Пример #4
0
/**
 * @brief   EEPROM read routine.
 *
 * @param[in] eepcfg    pointer to configuration structure of eeprom file
 * @param[in] offset    addres of 1-st byte to be read
 * @param[in] data      pointer to buffer with data to be written
 * @param[in] len       number of bytes to be red
 */
static msg_t eeprom_read(const I2CEepromFileConfig *eepcfg,
                         uint32_t offset, uint8_t *data, size_t len) {

    msg_t status = RDY_RESET;
    systime_t tmo = calc_timeout(eepcfg->i2cp, 2, len);

    chDbgCheck(((len <= eepcfg->size) && ((offset + len) <= eepcfg->size)),
               "out of device bounds");

    eeprom_split_addr(eepcfg->write_buf, (offset + eepcfg->barrier_low));

#if I2C_USE_MUTUAL_EXCLUSION
    i2cAcquireBus(eepcfg->i2cp);
#endif

    status = i2cMasterTransmitTimeout(eepcfg->i2cp, eepcfg->addr,
                                      eepcfg->write_buf, 2, data, len, tmo);

#if I2C_USE_MUTUAL_EXCLUSION
    i2cReleaseBus(eepcfg->i2cp);
#endif

    return status;
}
Пример #5
0
void ixx_event_loop_update(IxxEventLoop* self)
{
    int timeout = calc_timeout(self);

    FD_ZERO(&self->read_fds);
    FD_ZERO(&self->write_fds);

    int nfd = 0;
    for (IxxEvent* event = self->events.first;
         event;
         event = event->node.next)
    {
        int fd = 0;

        switch (event->type) {
        case IXX_EVENT_UNDEFINED:
            break;
        case IXX_EVENT_TIMER:
            break;
        case IXX_EVENT_THREAD:
            break;
        case IXX_EVENT_SOCKET_CONNECT:
            fd = event->socket_connect.socket;
            FD_SET(fd, &self->write_fds);
            break;
        case IXX_EVENT_SOCKET_ACCEPT:
            fd = event->socket_accept.socket;
            FD_SET(fd, &self->read_fds);
            break;
        case IXX_EVENT_SOCKET_READ:
            fd = event->socket_read.socket;
            FD_SET(fd, &self->read_fds);
            break;
        case IXX_EVENT_SOCKET_WRITE:
            fd = event->socket_write.socket;
            FD_SET(fd, &self->write_fds);
            break;
        default:
            break;
        }

        if (fd > nfd) {
            nfd = fd;
        }
    }

    int err;

    unsigned long long before_tick = ixx_time_tick();

    if (timeout < 0) {
        if (0 == nfd) {
            ixx_sleep(timeout);
            err = IXX_OK;
        }
        else {
            err = select(nfd, &self->read_fds, &self->write_fds, NULL, NULL);
        }
    }
    else {
        if (0 == nfd) {
            ixx_sleep(timeout);
            err = IXX_OK;
        }
        else {
            struct timeval tv;
            tv.tv_sec = timeout / 1000;
            tv.tv_usec = (timeout % 1000) * 1000;
            err = select(nfd, &self->read_fds, &self->write_fds, NULL, &tv);
        }
    }

    unsigned long long after_tick = ixx_time_tick();

    IxxEvent* next_event = NULL;
    for (IxxEvent* event = self->events.first;
         event;
         event = next_event)
    {
        next_event = event->node.next;

        event->elapsed = after_tick - event->start_tick;

        switch (event->type) {
        case IXX_EVENT_UNDEFINED:
            break;
        case IXX_EVENT_TIMER:
            update_timer(event, self);
            break;
        case IXX_EVENT_THREAD:
            break;
        case IXX_EVENT_SOCKET_CONNECT:
            break;
        case IXX_EVENT_SOCKET_ACCEPT:
            break;
        case IXX_EVENT_SOCKET_READ:
            break;
        case IXX_EVENT_SOCKET_WRITE:
            break;
        default:
            break;
        }
    }
}
Пример #6
0
static int
raw_getbyte(long do_keytmout, char *cptr)
{
    int ret;
    struct ztmout tmout;
#if defined(HAS_TIO) && \
  (defined(sun) || (!defined(HAVE_POLL) && !defined(HAVE_SELECT)))
    struct ttyinfo ti;
#endif
#ifndef HAVE_POLL
# ifdef HAVE_SELECT
    fd_set foofd, errfd;
    FD_ZERO(&errfd);
# endif
#endif

    calc_timeout(&tmout, do_keytmout);

    /*
     * Handle timeouts and watched fd's.  If a watched fd or a function
     * timeout triggers we restart any key timeout.  This is likely to
     * be harmless: the combination is extremely rare and a function
     * is likely to occupy the user for a little while anyway.  We used
     * to make timeouts take precedence, but we can't now that the
     * timeouts may be external, so we may have both a permanent watched
     * fd and a long-term timeout.
     */
    if ((nwatch || tmout.tp != ZTM_NONE)) {
#if defined(HAVE_SELECT) || defined(HAVE_POLL)
	int i, errtry = 0, selret;
# ifdef HAVE_POLL
	int nfds;
	struct pollfd *fds;
# endif
# if defined(HAS_TIO) && defined(sun)
	/*
	 * Yes, I know this is complicated.  Yes, I know we
	 * already have three bits of code to poll the terminal
	 * down below.  No, I don't want to do this either.
	 * However, it turns out on certain OSes, specifically
	 * Solaris, that you can't poll typeahead for love nor
	 * money without actually trying to read it.  But
	 * if we are trying to select (and we need to if we
	 * are watching other fd's) we won't pick that up.
	 * So we just try and read it without blocking in
	 * the time-honoured (i.e. absurdly baroque) termios
	 * fashion.
	 */
	gettyinfo(&ti);
	ti.tio.c_cc[VMIN] = 0;
	settyinfo(&ti);
	winch_unblock();
	ret = read(SHTTY, cptr, 1);
	winch_block();
	ti.tio.c_cc[VMIN] = 1;
	settyinfo(&ti);
	if (ret > 0)
	    return 1;
# endif
# ifdef HAVE_POLL
	nfds = 1 + nwatch;
	/* First pollfd is SHTTY, following are the nwatch fds */
	fds = zalloc(sizeof(struct pollfd) * nfds);
	fds[0].fd = SHTTY;
	/*
	 * POLLIN, POLLIN, POLLIN,
	 * Keep those fd's POLLIN...
	 */
	fds[0].events = POLLIN;
	for (i = 0; i < nwatch; i++) {
	    fds[i+1].fd = watch_fds[i].fd;
	    fds[i+1].events = POLLIN;
	}
# endif
	for (;;) {
# ifdef HAVE_POLL
	    int poll_timeout;

	    if (tmout.tp != ZTM_NONE)
		poll_timeout = tmout.exp100ths * 10;
	    else
		poll_timeout = -1;

	    winch_unblock();
	    selret = poll(fds, errtry ? 1 : nfds, poll_timeout);
	    winch_block();
# else
	    int fdmax = SHTTY;
	    struct timeval *tvptr;
	    struct timeval expire_tv;

	    FD_ZERO(&foofd);
	    FD_SET(SHTTY, &foofd);
	    if (!errtry) {
		for (i = 0; i < nwatch; i++) {
		    int fd = watch_fds[i].fd;
		    if (FD_ISSET(fd, &errfd))
			continue;
		    FD_SET(fd, &foofd);
		    if (fd > fdmax)
			fdmax = fd;
		}
	    }
	    FD_ZERO(&errfd);

	    if (tmout.tp != ZTM_NONE) {
		expire_tv.tv_sec = tmout.exp100ths / 100;
		expire_tv.tv_usec = (tmout.exp100ths % 100) * 10000L;
		tvptr = &expire_tv;
	    }
	    else
		tvptr = NULL;

	    winch_unblock();
	    selret = select(fdmax+1, (SELECT_ARG_2_T) & foofd,
			    NULL, NULL, tvptr);
	    winch_block();
# endif
	    /*
	     * Make sure a user interrupt gets passed on straight away.
	     */
	    if (selret < 0 && (errflag || retflag || breaks || exit_pending))
		break;
	    /*
	     * Try to avoid errors on our special fd's from
	     * messing up reads from the terminal.  Try first
	     * with all fds, then try unsetting the special ones.
	     */
	    if (selret < 0 && !errtry) {
		errtry = 1;
		continue;
	    }
	    if (selret == 0) {
		/*
		 * Nothing ready and no error, so we timed out.
		 */
		switch (tmout.tp) {
		case ZTM_NONE:
		    /* keeps compiler happy if not debugging */
#ifdef DEBUG
		    dputs("BUG: timeout fired with no timeout set.");
#endif
		    /* treat as if a key timeout triggered */
		    /*FALLTHROUGH*/
		case ZTM_KEY:
		    /* Special value -2 signals nothing ready */
		    selret = -2;
		    break;

		case ZTM_FUNC:
		    while (firstnode(timedfns)) {
			Timedfn tfdat = (Timedfn)getdata(firstnode(timedfns));
			/*
			 * It's possible a previous function took
			 * a long time to run (though it can't
			 * call zle recursively), so recalculate
			 * the time on each iteration.
			 */
			time_t now = time(NULL);
			if (tfdat->when > now)
			    break;
			tfdat->func();
		    }
		    /* Function may have messed up the display */
		    if (resetneeded)
			zrefresh();
		    /* We need to recalculate the timeout */
		    /*FALLTHROUGH*/
		case ZTM_MAX:
		    /*
		     * Reached the limit of our range, but not the
		     * actual timeout; recalculate the timeout.
		     * We're cheating with the key timeout here:
		     * if one clashed with a function timeout we
		     * reconsider the key timeout from scratch.
		     * The effect of this is microscopic.
		     */
		    calc_timeout(&tmout, do_keytmout);
		    break;
		}
		/*
		 * If we handled the timeout successfully,
		 * carry on.
		 */
		if (selret == 0)
		    continue;
	    }
	    /* If error or unhandled timeout, give up. */
	    if (selret < 0)
		break;
	    /*
	     * If there's user input handle it straight away.
	     * This improves the user's ability to handle exceptional
	     * conditions like runaway output.
	     */
	    if (
# ifdef HAVE_POLL
		 (fds[0].revents & POLLIN)
# else
		 FD_ISSET(SHTTY, &foofd)
# endif
		 )
		break;
	    if (nwatch && !errtry) {
		/*
		 * Copy the details of the watch fds in case the
		 * user decides to delete one from inside the
		 * handler function.
		 */
		int lnwatch = nwatch;
		Watch_fd lwatch_fds = zalloc(lnwatch*sizeof(struct watch_fd));
		memcpy(lwatch_fds, watch_fds, lnwatch*sizeof(struct watch_fd));
		for (i = 0; i < lnwatch; i++)
		    lwatch_fds[i].func = ztrdup(lwatch_fds[i].func);
		for (i = 0; i < lnwatch; i++) {
		    Watch_fd lwatch_fd = lwatch_fds + i;
		    if (
# ifdef HAVE_POLL
			(fds[i+1].revents & (POLLIN|POLLERR|POLLHUP|POLLNVAL))
# else
			FD_ISSET(lwatch_fd->fd, &foofd) ||
			FD_ISSET(lwatch_fd->fd, &errfd)
# endif
			) {
			/* Handle the fd. */
			char *fdbuf;
			{
			    char buf[BDIGBUFSIZE];
			    convbase(buf, lwatch_fd->fd, 10);
			    fdbuf = ztrdup(buf);
			}

			if (lwatch_fd->widget) {
			    zlecallhook(lwatch_fd->func, fdbuf);
			    zsfree(fdbuf);
			} else {
			    LinkList funcargs = znewlinklist();
			    zaddlinknode(funcargs, ztrdup(lwatch_fd->func));
			    zaddlinknode(funcargs, fdbuf);
# ifdef HAVE_POLL
#  ifdef POLLERR
			    if (fds[i+1].revents & POLLERR)
				zaddlinknode(funcargs, ztrdup("err"));
#  endif
#  ifdef POLLHUP
			    if (fds[i+1].revents & POLLHUP)
				zaddlinknode(funcargs, ztrdup("hup"));
#  endif
#  ifdef POLLNVAL
			    if (fds[i+1].revents & POLLNVAL)
				zaddlinknode(funcargs, ztrdup("nval"));
#  endif
# else
			    if (FD_ISSET(lwatch_fd->fd, &errfd))
				zaddlinknode(funcargs, ztrdup("err"));
# endif
			    callhookfunc(lwatch_fd->func, funcargs, 0, NULL);
			    freelinklist(funcargs, freestr);
			}
			if (errflag) {
			    /* No sensible way of handling errors here */
			    errflag &= ~ERRFLAG_ERROR;
			    /*
			     * Paranoia: don't run the hooks again this
			     * time.
			     */
			    errtry = 1;
			}
		    }
		}
		/* Function may have invalidated the display. */
		if (resetneeded)
		    zrefresh();
		for (i = 0; i < lnwatch; i++)
		    zsfree(lwatch_fds[i].func);
		zfree(lwatch_fds, lnwatch*sizeof(struct watch_fd));

# ifdef HAVE_POLL
		/* Function may have added or removed handlers */
		nfds = 1 + nwatch;
		if (nfds > 1) {
		    fds = zrealloc(fds, sizeof(struct pollfd) * nfds);
		    for (i = 0; i < nwatch; i++) {
			/*
			 * This is imperfect because it assumes fds[] and
			 * watch_fds[] remain in sync, which may be false
			 * if handlers are shuffled.  However, it should
			 * be harmless (e.g., produce one extra pass of
			 * the loop) in the event they fall out of sync.
			 */
			if (fds[i+1].fd == watch_fds[i].fd &&
			    (fds[i+1].revents & (POLLERR|POLLHUP|POLLNVAL))) {
			    fds[i+1].events = 0;	/* Don't poll this */
			} else {
			    fds[i+1].fd = watch_fds[i].fd;
			    fds[i+1].events = POLLIN;
			}
			fds[i+1].revents = 0;
		    }
		}
# endif
	    }
	}
# ifdef HAVE_POLL
	zfree(fds, sizeof(struct pollfd) * nfds);
# endif
	if (selret < 0)
	    return selret;
#else
# ifdef HAS_TIO
	ti = shttyinfo;
	ti.tio.c_lflag &= ~ICANON;
	ti.tio.c_cc[VMIN] = 0;
	ti.tio.c_cc[VTIME] = tmout.exp100ths / 10;
#  ifdef HAVE_TERMIOS_H
	tcsetattr(SHTTY, TCSANOW, &ti.tio);
#  else
	ioctl(SHTTY, TCSETA, &ti.tio);
#  endif
	winch_unblock();
	ret = read(SHTTY, cptr, 1);
	winch_block();
#  ifdef HAVE_TERMIOS_H
	tcsetattr(SHTTY, TCSANOW, &shttyinfo.tio);
#  else
	ioctl(SHTTY, TCSETA, &shttyinfo.tio);
#  endif
	return (ret <= 0) ? ret : *cptr;
# endif
#endif
    }

    winch_unblock();
    ret = read(SHTTY, cptr, 1);
    winch_block();

    return ret;
}