Пример #1
0
Static int
uftdi_param(void *vsc, int portno, struct termios *t)
{
	struct uftdi_softc *sc = vsc;
	usb_device_request_t req;
	usbd_status err;
	int rate, data, flow;

	DPRINTF(("uftdi_param: sc=%p\n", sc));

	if (sc->sc_dying)
		return (EIO);

	req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
	req.bRequest = FTDI_SIO_SET_BITMODE;
	USETW(req.wValue, FTDI_BITMODE_RESET << 8 | 0x00);
	USETW(req.wIndex, portno);
	USETW(req.wLength, 0);
	err = usbd_do_request(sc->sc_udev, &req, NULL);
	if (err)
		return (EIO);

	switch (sc->sc_type) {
	case UFTDI_TYPE_SIO:
		switch (t->c_ospeed) {
		case 300: rate = ftdi_sio_b300; break;
		case 600: rate = ftdi_sio_b600; break;
		case 1200: rate = ftdi_sio_b1200; break;
		case 2400: rate = ftdi_sio_b2400; break;
		case 4800: rate = ftdi_sio_b4800; break;
		case 9600: rate = ftdi_sio_b9600; break;
		case 19200: rate = ftdi_sio_b19200; break;
		case 38400: rate = ftdi_sio_b38400; break;
		case 57600: rate = ftdi_sio_b57600; break;
		case 115200: rate = ftdi_sio_b115200; break;
		default:
			return (EINVAL);
		}
		break;

	case UFTDI_TYPE_8U232AM:
		switch(t->c_ospeed) {
		case 300: rate = ftdi_8u232am_b300; break;
		case 600: rate = ftdi_8u232am_b600; break;
		case 1200: rate = ftdi_8u232am_b1200; break;
		case 2400: rate = ftdi_8u232am_b2400; break;
		case 4800: rate = ftdi_8u232am_b4800; break;
		case 9600: rate = ftdi_8u232am_b9600; break;
		case 19200: rate = ftdi_8u232am_b19200; break;
		case 38400: rate = ftdi_8u232am_b38400; break;
		case 57600: rate = ftdi_8u232am_b57600; break;
		case 115200: rate = ftdi_8u232am_b115200; break;
		case 230400: rate = ftdi_8u232am_b230400; break;
		case 460800: rate = ftdi_8u232am_b460800; break;
		case 921600: rate = ftdi_8u232am_b921600; break;
		default:
			return (EINVAL);
		}
		break;

	default:
		return (EINVAL);
	}
	req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
	req.bRequest = FTDI_SIO_SET_BAUD_RATE;
	USETW(req.wValue, rate);
	USETW(req.wIndex, portno);
	USETW(req.wLength, 0);
	DPRINTFN(2,("uftdi_param: reqtype=0x%02x req=0x%02x value=0x%04x "
		    "index=0x%04x len=%d\n", req.bmRequestType, req.bRequest,
		    UGETW(req.wValue), UGETW(req.wIndex), UGETW(req.wLength)));
	err = usbd_do_request(sc->sc_udev, &req, NULL);
	if (err)
		return (EIO);

	if (ISSET(t->c_cflag, CSTOPB))
		data = FTDI_SIO_SET_DATA_STOP_BITS_2;
	else
		data = FTDI_SIO_SET_DATA_STOP_BITS_1;
	if (ISSET(t->c_cflag, PARENB)) {
		if (ISSET(t->c_cflag, PARODD))
			data |= FTDI_SIO_SET_DATA_PARITY_ODD;
		else
			data |= FTDI_SIO_SET_DATA_PARITY_EVEN;
	} else
		data |= FTDI_SIO_SET_DATA_PARITY_NONE;
	switch (ISSET(t->c_cflag, CSIZE)) {
	case CS5:
		data |= FTDI_SIO_SET_DATA_BITS(5);
		break;
	case CS6:
		data |= FTDI_SIO_SET_DATA_BITS(6);
		break;
	case CS7:
		data |= FTDI_SIO_SET_DATA_BITS(7);
		break;
	case CS8:
		data |= FTDI_SIO_SET_DATA_BITS(8);
		break;
	}
	sc->last_lcr = data;

	req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
	req.bRequest = FTDI_SIO_SET_DATA;
	USETW(req.wValue, data);
	USETW(req.wIndex, portno);
	USETW(req.wLength, 0);
	DPRINTFN(2,("uftdi_param: reqtype=0x%02x req=0x%02x value=0x%04x "
		    "index=0x%04x len=%d\n", req.bmRequestType, req.bRequest,
		    UGETW(req.wValue), UGETW(req.wIndex), UGETW(req.wLength)));
	err = usbd_do_request(sc->sc_udev, &req, NULL);
	if (err)
		return (EIO);

	if (ISSET(t->c_cflag, CRTSCTS)) {
		flow = FTDI_SIO_RTS_CTS_HS;
		USETW(req.wValue, 0);
	} else if (ISSET(t->c_iflag, IXON) && ISSET(t->c_iflag, IXOFF)) {
		flow = FTDI_SIO_XON_XOFF_HS;
		USETW2(req.wValue, t->c_cc[VSTOP], t->c_cc[VSTART]);
	} else {
		flow = FTDI_SIO_DISABLE_FLOW_CTRL;
		USETW(req.wValue, 0);
	}
	req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
	req.bRequest = FTDI_SIO_SET_FLOW_CTRL;
	USETW2(req.wIndex, flow, portno);
	USETW(req.wLength, 0);
	err = usbd_do_request(sc->sc_udev, &req, NULL);
	if (err)
		return (EIO);

	return (0);
}
Пример #2
0
void
at91usart_attach_subr(struct at91usart_softc *sc, struct at91bus_attach_args *sa)
{
	struct tty *tp;
	int err;

	printf("\n");

	if (bus_space_map(sa->sa_iot, sa->sa_addr, sa->sa_size, 0, &sc->sc_ioh))
		panic("%s: Cannot map registers", device_xname(sc->sc_dev));

	sc->sc_iot = sa->sa_iot;
	sc->sc_hwbase = sa->sa_addr;
	sc->sc_dmat = sa->sa_dmat;
	sc->sc_pid = sa->sa_pid;

	/* allocate fifos */
	err = at91pdc_alloc_fifo(sc->sc_dmat, &sc->sc_rx_fifo, AT91USART_RING_SIZE, BUS_DMA_READ | BUS_DMA_STREAMING);
	if (err)
		panic("%s: cannot allocate rx fifo", device_xname(sc->sc_dev));

	err = at91pdc_alloc_fifo(sc->sc_dmat, &sc->sc_tx_fifo, AT91USART_RING_SIZE, BUS_DMA_WRITE | BUS_DMA_STREAMING);
	if (err)
		panic("%s: cannot allocate tx fifo", device_xname(sc->sc_dev));

	/* initialize uart */
	at91_peripheral_clock(sc->sc_pid, 1);

	at91usart_writereg(sc, US_IDR, -1);
	at91usart_writereg(sc, US_RTOR, 12);	// 12-bit timeout
	at91usart_writereg(sc, US_PDC + PDC_PTCR, PDC_PTCR_TXTDIS | PDC_PTCR_RXTDIS);
	at91_intr_establish(sa->sa_pid, IPL_TTY, INTR_HIGH_LEVEL, at91usart_intr, sc);
	USART_INIT(sc, 115200U);

#ifdef	NOTYET
	if (sc->sc_iot == usart_cn_sc.sc_iot
	    && sc->sc_hwbase == usart_cn_sc.sc_hwbase) {
		usart_cn_sc.sc_attached = 1;
		/* Make sure the console is always "hardwired". */
		delay(10000);	/* wait for output to finish */
		SET(sc->sc_hwflags, COM_HW_CONSOLE);
		SET(sc->sc_swflags, TIOCFLAG_SOFTCAR);
		SET(sc->sc_ier, USART_INT_RXRDY);
		USARTREG(USART_IER) = USART_INT_RXRDY; // @@@@@
	}
#endif	// NOTYET

	tp = ttymalloc();
	tp->t_oproc = at91usart_start;
	tp->t_param = at91usart_param;
	tp->t_hwiflow = at91usart_hwiflow;

	sc->sc_tty = tp;

	tty_attach(tp);

#if	NOTYET
	if (ISSET(sc->sc_hwflags, COM_HW_CONSOLE)) {
		int maj;

		/* locate the major number */
		maj = cdevsw_lookup_major(&at91usart_cdevsw);

		cn_tab->cn_dev = makedev(maj, device_unit(sc->sc_dev));

		aprint_normal("%s: console (maj %u  min %u  cn_dev %u)\n",
		    device_xname(sc->sc_dev), maj, device_unit(sc->sc_dev),
		    cn_tab->cn_dev);
	}
#endif	/* NOTYET */

	sc->sc_si = softint_establish(SOFTINT_SERIAL, at91usart_soft, sc);

#if NRND > 0 && defined(RND_COM)
	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
			  RND_TYPE_TTY, 0);
#endif

	/* if there are no enable/disable functions, assume the device
	   is always enabled */
	if (!sc->enable)
		sc->enabled = 1;

	/* XXX configure register */
	/* xxx_config(sc) */

	SET(sc->sc_hwflags, COM_HW_DEV_OK);
}
Пример #3
0
int
at91usart_open(dev_t dev, int flag, int mode, struct lwp *l)
{
	struct at91usart_softc *sc;
	struct tty *tp;
	int s;
	int error;

	sc = device_lookup_private(&at91usart_cd, COMUNIT(dev));
	if (sc == NULL || !ISSET(sc->sc_hwflags, COM_HW_DEV_OK))
		return (ENXIO);

	if (!device_is_active(sc->sc_dev))
		return (ENXIO);

#ifdef KGDB
	/*
	 * If this is the kgdb port, no other use is permitted.
	 */
	if (ISSET(sc->sc_hwflags, COM_HW_KGDB))
		return (EBUSY);
#endif

	tp = sc->sc_tty;

	if (kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp))
		return (EBUSY);

	s = spltty();

	/*
	 * Do the following iff this is a first open.
	 */
	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
		struct termios t;

		tp->t_dev = dev;

		if (sc->enable) {
			if ((*sc->enable)(sc)) {
				splx(s);
				printf("%s: device enable failed\n",
				       device_xname(sc->sc_dev));
				return (EIO);
			}
			sc->enabled = 1;
#if 0
/* XXXXXXXXXXXXXXX */
			com_config(sc);
#endif
		}

		/* reset fifos: */
		AT91PDC_RESET_FIFO(sc->sc_iot, sc->sc_ioh, sc->sc_dmat, US_PDC, &sc->sc_rx_fifo, 0);
		AT91PDC_RESET_FIFO(sc->sc_iot, sc->sc_ioh, sc->sc_dmat, US_PDC, &sc->sc_tx_fifo, 1);

		/* reset receive */
		at91usart_writereg(sc, US_CR, US_CR_RSTSTA | US_CR_STTTO);

		/* Turn on interrupts. */
		sc->sc_ier = US_CSR_ENDRX|US_CSR_RXBUFF|US_CSR_TIMEOUT|US_CSR_RXBRK;
		at91usart_writereg(sc, US_IER, sc->sc_ier);

		/* enable DMA: */
		at91usart_writereg(sc, US_PDC + PDC_PTCR, PDC_PTCR_RXTEN);

		/*
		 * Initialize the termios status to the defaults.  Add in the
		 * sticky bits from TIOCSFLAGS.
		 */
		t.c_ispeed = 0;
/*		if (ISSET(sc->sc_hwflags, COM_HW_CONSOLE)) {
			t.c_ospeed = usart_cn_sc.sc_ospeed;
			t.c_cflag = usart_cn_sc.sc_cflag;
		} else*/ {
			t.c_ospeed = TTYDEF_SPEED;
			t.c_cflag = TTYDEF_CFLAG;
		}
		if (ISSET(sc->sc_swflags, TIOCFLAG_CLOCAL))
			SET(t.c_cflag, CLOCAL);
		if (ISSET(sc->sc_swflags, TIOCFLAG_CRTSCTS))
			SET(t.c_cflag, CRTSCTS);
		if (ISSET(sc->sc_swflags, TIOCFLAG_MDMBUF))
			SET(t.c_cflag, MDMBUF);

		/* Make sure at91usart_param() will do something. */
		tp->t_ospeed = 0;
		(void) at91usart_param(tp, &t);
		tp->t_iflag = TTYDEF_IFLAG;
		tp->t_oflag = TTYDEF_OFLAG;
		tp->t_lflag = TTYDEF_LFLAG;
		ttychars(tp);
		ttsetwater(tp);

		/* and unblock. */
		CLR(sc->sc_rx_flags, RX_ANY_BLOCK);

#ifdef COM_DEBUG
		if (at91usart_debug)
			comstatus(sc, "at91usart_open  ");
#endif

	}
	
	splx(s);

	error = ttyopen(tp, COMDIALOUT(dev), ISSET(flag, O_NONBLOCK));
	if (error)
		goto bad;

	error = (*tp->t_linesw->l_open)(dev, tp);
	if (error)
		goto bad;

	return (0);

bad:
	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
		/*
		 * We failed to open the device, and nobody else had it opened.
		 * Clean up the state as appropriate.
		 */
		at91usart_shutdown(sc);
	}

	return (error);
}
Пример #4
0
__private_extern__ int
nget_9p(mount_9p *nmp, fid_9p fid, qid_9p qid, vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
{
#pragma unused(ctx)
	struct vnode_fsparam fsp;
	struct hnode_9p *nhp;
	node_9p *np;
	uint32_t vid;
	int e, i;

	TRACE();
	nhp = HASH9P(nmp, qid.path);
loop:
	lck_mtx_lock(nmp->nodelck);
	LIST_FOREACH (np, nhp, next) {
		if(np->dir.qid.path != qid.path)
			continue;
		if (ISSET(np->flags, NODE_INIT)) {
			SET(np->flags, NODE_WAITINIT);
			msleep(np, nmp->nodelck, PINOD|PDROP, "nget_9p_init", NULL);
			goto loop;
		}
		if (ISSET(np->flags, NODE_RECL)) {
			SET(np->flags, NODE_WAITRECL);
			msleep(np, nmp->nodelck, PINOD|PDROP, "nget_9p_reclaim", NULL);
			goto loop;
		}
		vid = vnode_vid(np->vp);
		lck_mtx_unlock(nmp->nodelck);
		if (vnode_getwithvid(np->vp, vid))
			goto loop;
		
		nlock_9p(np, NODE_LCK_EXCLUSIVE);
		if (dvp && cnp && ISSET(cnp->cn_flags, MAKEENTRY) && np->dir.qid.vers!=0) {
			// DEBUG("caching %s", np->dir->name);
			cache_enter(dvp, np->vp, cnp);
		} else {
			// DEBUG("not in cache qid=%d %s", qid.vers, np->dir->name);
		}

		*vpp = np->vp;
		return 0;
	}
	
	if (fid == NOFID)
		return EFAULT;

	np = malloc_9p(sizeof(*np));
	if (np == NULL) {
err0:
		lck_mtx_unlock(nmp->nodelck);
		return ENOMEM;
	}
	np->lck = lck_rw_alloc_init(lck_grp_9p, LCK_ATTR_NULL);
	if (np->lck == NULL) {
		free_9p(np);
		goto err0;
	}

	np->nmp = nmp;
	np->fid = fid;
	np->dir.qid = qid;
	for (i=0; i<3; i++)
		np->openfid[i].fid = NOFID;

	SET(np->flags, NODE_INIT);
	LIST_INSERT_HEAD(nhp, np, next);
	nlock_9p(np, NODE_LCK_EXCLUSIVE);
	lck_mtx_unlock(nmp->nodelck);

	if ((e=ngetdir_9p(np))) {
err1:
		nunlock_9p(np);
		lck_mtx_lock(nmp->nodelck);
		LIST_REMOVE(np, next);
		CLR(np->flags, NODE_INIT);
		if (ISSET(np->flags, NODE_WAITINIT)) {
			CLR(np->flags, NODE_WAITINIT);
			wakeup(np);
		}
		lck_mtx_unlock(nmp->nodelck);
		lck_rw_free(np->lck, lck_grp_9p);
		free_9p(np);
		return e;
	}

	fsp.vnfs_mp			= nmp->mp;
	fsp.vnfs_str		= fsname;
	fsp.vnfs_dvp		= dvp;
	fsp.vnfs_fsnode		= np;
	fsp.vnfs_vops		= vnode_op_9p;
	fsp.vnfs_markroot	= dvp==NULL? TRUE: FALSE;
	fsp.vnfs_marksystem	= FALSE;
	fsp.vnfs_filesize	= np->dir.length;
	fsp.vnfs_cnp		= cnp;
	fsp.vnfs_flags		= VNFS_ADDFSREF;
	dirvtype_9p(&np->dir, ISSET(nmp->flags, F_DOTU), &fsp.vnfs_vtype, &fsp.vnfs_rdev);
	if (!dvp || !cnp || !ISSET(cnp->cn_flags, MAKEENTRY) || qid.vers==0)
		SET(fsp.vnfs_flags, VNFS_NOCACHE);

	if ((e=vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &fsp, &np->vp)))
		goto err1;

	vnode_settag(np->vp, VT_OTHER);
	lck_mtx_lock(nmp->nodelck);
	CLR(np->flags, NODE_INIT);
	if (ISSET(np->flags, NODE_WAITINIT)) {
		CLR(np->flags, NODE_WAITINIT);
		wakeup(np);
	}
	lck_mtx_unlock(nmp->nodelck);
	*vpp = np->vp;

	return 0;
}
Пример #5
0
void AssimpShader::render(const glm::mat4& mv_matrix,
        const glm::mat4& mv_it_matrix, const glm::mat4& mvp_matrix,
        RenderData* render_data, Material* material) {
    Mesh* mesh = render_data->mesh();
    Texture* texture;
    int feature_set = material->get_shader_feature_set();

    /* Get the texture only diffuse texture is set */
    if (ISSET(feature_set, AS_DIFFUSE_TEXTURE)) {
        texture = material->getTexture("main_texture");
        if (texture->getTarget() != GL_TEXTURE_2D) {
            std::string error =
                    "TextureShader::render : texture with wrong target.";
            throw error;
        }
    }

    /* Based on feature set get the shader program, feature set cannot exceed program count */
    program_ = program_list_[feature_set & (AS_TOTAL_GL_PROGRAM_COUNT - 1)];

    u_mvp_ = glGetUniformLocation(program_->id(), "u_mvp");
    u_texture_ = glGetUniformLocation(program_->id(), "u_texture");
    u_diffuse_color_ = glGetUniformLocation(program_->id(), "u_diffuse_color");
    u_ambient_color_ = glGetUniformLocation(program_->id(), "u_ambient_color");
    u_color_ = glGetUniformLocation(program_->id(), "u_color");
    u_opacity_ = glGetUniformLocation(program_->id(), "u_opacity");

    /* Get common attributes and uniforms from material */
    glm::vec3 color = material->getVec3("color");
    float opacity = material->getFloat("opacity");

#if _GVRF_USE_GLES3_
    mesh->generateVAO();

    glUseProgram(program_->id());
    glUniformMatrix4fv(u_mvp_, 1, GL_FALSE, glm::value_ptr(mvp_matrix));

    if (ISSET(feature_set, AS_DIFFUSE_TEXTURE)) {
        glActiveTexture (GL_TEXTURE0);
        glBindTexture(texture->getTarget(), texture->getId());
        glUniform1i(u_texture_, 0);
    } else {
        glm::vec4 diffuse_color = material->getVec4("diffuse_color");
        glm::vec4 ambient_color = material->getVec4("ambient_color");
        glUniform4f(u_diffuse_color_, diffuse_color.r, diffuse_color.g,
                diffuse_color.b, diffuse_color.a);
        glUniform4f(u_ambient_color_, ambient_color.r, ambient_color.g,
                ambient_color.b, ambient_color.a);
    }

    /* Set up bones if AS_SKINNING is set */
    if (ISSET(feature_set, AS_SKINNING)) {
        a_bone_indices_ = glGetAttribLocation(program_->id(), "a_bone_indices");
        a_bone_weights_ = glGetAttribLocation(program_->id(), "a_bone_weights");
        u_bone_matrices_ = glGetUniformLocation(program_->id(), "u_bone_matrix[0]");
        if (u_bone_matrices_ == -1) {
            LOGD("Warning! Unable to get the location of uniform u_bone_matrix[0]\n");
        }

        mesh->setBoneLoc(a_bone_indices_, a_bone_weights_);
        mesh->generateBoneArrayBuffers();

        glm::mat4 finalTransform;
        int nBones = MIN(mesh->getVertexBoneData().getNumBones(), MAX_BONES);
        for (int i = 0; i < nBones; ++i) {
            finalTransform = mesh->getVertexBoneData().getFinalBoneTransform(i);
            glUniformMatrix4fv(u_bone_matrices_ + i, 1, GL_FALSE, glm::value_ptr(finalTransform));
        }
    }

    glUniform3f(u_color_, color.r, color.g, color.b);
    glUniform1f(u_opacity_, opacity);

    glBindVertexArray(mesh->getVAOId(Material::ASSIMP_SHADER));
    glDrawElements(render_data->draw_mode(), mesh->indices().size(), GL_UNSIGNED_SHORT,
            0);
    glBindVertexArray(0);
#else
    glUseProgram(program_->id());

    glVertexAttribPointer(a_position_, 3, GL_FLOAT, GL_FALSE, 0,
            mesh->vertices().data());
    glEnableVertexAttribArray(a_position_);

    glVertexAttribPointer(a_tex_coord_, 2, GL_FLOAT, GL_FALSE, 0,
            mesh->tex_coords().data());
    glEnableVertexAttribArray(a_tex_coord_);

    glUniformMatrix4fv(u_mvp_, 1, GL_FALSE, glm::value_ptr(mvp_matrix));

    if (ISSET(feature_set, AS_DIFFUSE_TEXTURE)) {
        glActiveTexture (GL_TEXTURE0);
        glBindTexture(texture->getTarget(), texture->getId());
        glUniform1i(u_texture_, 0);
    } else {
        glm::vec4 diffuse_color = material->getVec4("diffuse_color");
        glm::vec4 ambient_color = material->getVec4("ambient_color");
        glUniform4f(u_diffuse_color_, diffuse_color.x, diffuse_color.y, diffuse_color.z, diffuse_color.w);
        glUniform4f(u_ambient_color_, ambient_color.x, ambient_color.y, ambient_color.z, ambient_color.w);
    }

    glUniform3f(u_color_, color.r, color.g, color.b);
    glUniform1f(u_opacity_, opacity);

    glDrawElements(render_data->draw_mode(), mesh->indices().size(), GL_UNSIGNED_SHORT,
            mesh->indices().data());
#endif

    checkGlError("AssimpShader::render");
}
Пример #6
0
static int
vnop_readdir_9p(struct vnop_readdir_args *ap)
{
	struct direntry de64;
	struct dirent de32;
	vnode_t vp;
	node_9p *np;
	dir_9p *dp;
	fid_9p fid;
	off_t off;
	uio_t uio;
	uint32_t i, nd, nlen, plen;
	void *p;
	int e;
	
	TRACE();
	vp = ap->a_vp;
	uio = ap->a_uio;
	np = NTO9P(vp);

	if (!vnode_isdir(vp))
		return ENOTDIR;

	if (ISSET(ap->a_flags, VNODE_READDIR_REQSEEKOFF))
		return EINVAL;

	off = uio_offset(uio);
	if (off < 0)
		return EINVAL;
	
	if (uio_resid(uio) == 0)
		return 0;

	e = 0;
	nlock_9p(np, NODE_LCK_EXCLUSIVE);
	fid = np->openfid[OREAD].fid;
	if (fid == NOFID) {
		e = EBADF;
		goto error;
	}

	if (ap->a_eofflag)
		ap->a_eofflag = 0;

	if (off == 0 || np->direntries==NULL) {
		if((e=readdirs_9p(np->nmp, fid, &np->direntries, &np->ndirentries)))
			goto error;
		if (np->ndirentries && np->direntries==NULL)
			panic("bug in readdir");
	}
	
	dp = np->direntries;
	nd = np->ndirentries;
	for (i=off; i<nd; i++) {
		if (ISSET(ap->a_flags, VNODE_READDIR_EXTENDED)) {
			bzero(&de64, sizeof(de64));
			de64.d_ino = QTOI(dp[i].qid);
			de64.d_type = dp[i].mode&DMDIR? DT_DIR: DT_REG;
			nlen = strlen(dp[i].name);
			de64.d_namlen = MIN(nlen, sizeof(de64.d_name)-1);
			bcopy(dp[i].name, de64.d_name, de64.d_namlen);
			de64.d_reclen = DIRENT64_LEN(de64.d_namlen);
			plen = de64.d_reclen;
			p = &de64;
		} else {
			bzero(&de32, sizeof(de32));
			de32.d_ino = QTOI(dp[i].qid);
			de32.d_type = dp[i].mode&DMDIR? DT_DIR: DT_REG;
			nlen = strlen(dp[i].name);
			de32.d_namlen = MIN(nlen, sizeof(de32.d_name)-1);
			bcopy(dp[i].name, de32.d_name, de32.d_namlen);
			de32.d_reclen = DIRENT32_LEN(de32.d_namlen);
			plen = de32.d_reclen;
			p = &de32;
		}

		if (uio_resid(uio) < plen)
			break;

		if ((e=uiomove(p, plen, uio)))
			goto error;
	}

	uio_setoffset(uio, i);
	if (ap->a_numdirent)
		*ap->a_numdirent = i - off;
	if (i==nd && ap->a_eofflag) {
		*ap->a_eofflag = 1;
		free_9p(np->direntries);
		np->direntries = NULL;
		np->ndirentries = 0;
	}

error:
	nunlock_9p(np);
	return e;
}
Пример #7
0
static int
vnop_open_9p(struct vnop_open_args *ap)
{
	openfid_9p *op;
	node_9p *np;
	fid_9p fid;
	qid_9p qid;
	uint32_t iounit;
	int e, flags, mode;

	TRACE();
	flags = 0;
	if (ap->a_mode)
		flags = OFLAGS(ap->a_mode);

	mode = flags & O_ACCMODE;
	CLR(flags, O_ACCMODE);
    
	CLR(flags, O_DIRECTORY|O_NONBLOCK|O_NOFOLLOW);
	CLR(flags, O_APPEND);

	/* locks implemented on the vfs layer */
	CLR(flags, O_EXLOCK|O_SHLOCK);
    
	if (ISSET(flags, O_TRUNC)) {
		SET(mode, OTRUNC);
		CLR(flags, O_TRUNC);
	}

    if (ISSET(flags, O_CLOEXEC)) {
		SET(mode, OCEXEC);
		CLR(flags, O_CLOEXEC);
	}
    
    if (ISSET(flags, O_EXCL)) {
		SET(mode, OEXCL);
		CLR(flags, O_EXCL);
	}
    
	/* vnop_creat just called */
	CLR(flags, O_CREAT);

	if (ISSET(flags, O_EVTONLY))
		CLR(flags, O_EVTONLY);
	if (ISSET(flags, FNOCACHE))
		CLR(flags, FNOCACHE);
	if (ISSET(flags, FNORDAHEAD))
		CLR(flags, FNORDAHEAD);

	if (flags) {
		DEBUG("unexpected open mode %x", flags);
		return ENOTSUP;
	}

	np = NTO9P(ap->a_vp);
	nlock_9p(np, NODE_LCK_EXCLUSIVE);
	op = ofidget(np, ap->a_mode);
	if (op->fid == NOFID) {
		if ((e=walk_9p(np->nmp, np->fid, NULL, 0, &fid, &qid)))
			goto error;	
		if ((e=open_9p(np->nmp, fid, mode, &qid, &iounit)))
			goto error;

		np->iounit = iounit;
		op->fid = fid;
	}

	/* no cache for dirs, .u or synthetic files */
	if (!vnode_isreg(np->vp) || np->dir.qid.vers==0) {
		vnode_setnocache(np->vp);
		vnode_setnoreadahead(np->vp);
	}

	OSIncrementAtomic(&op->ref);
	nunlock_9p(np);
	return 0;

error:
	clunk_9p(np->nmp, fid);
	nunlock_9p(np);
	return e;
}
Пример #8
0
int
ucomopen(dev_t dev, int flag, int mode, struct lwp *l)
{
	int unit = UCOMUNIT(dev);
	usbd_status err;
	struct ucom_softc *sc = device_lookup_private(&ucom_cd, unit);
	struct ucom_buffer *ub;
	struct tty *tp;
	int s, i;
	int error;

	if (sc == NULL)
		return (ENXIO);

	if (sc->sc_dying)
		return (EIO);

	if (!device_is_active(sc->sc_dev))
		return (ENXIO);

	tp = sc->sc_tty;

	DPRINTF(("ucomopen: unit=%d, tp=%p\n", unit, tp));

	if (kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp))
		return (EBUSY);

	s = spltty();

	/*
	 * Do the following iff this is a first open.
	 */
	while (sc->sc_opening)
		tsleep(&sc->sc_opening, PRIBIO, "ucomop", 0);

	if (sc->sc_dying) {
		splx(s);
		return (EIO);
	}
	sc->sc_opening = 1;

	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
		struct termios t;

		tp->t_dev = dev;

		if (sc->sc_methods->ucom_open != NULL) {
			error = sc->sc_methods->ucom_open(sc->sc_parent,
							  sc->sc_portno);
			if (error) {
				ucom_cleanup(sc);
				sc->sc_opening = 0;
				wakeup(&sc->sc_opening);
				splx(s);
				return (error);
			}
		}

		ucom_status_change(sc);

		/* Clear PPS capture state on first open. */
		mutex_spin_enter(&timecounter_lock);
		memset(&sc->sc_pps_state, 0, sizeof(sc->sc_pps_state));
		sc->sc_pps_state.ppscap = PPS_CAPTUREASSERT | PPS_CAPTURECLEAR;
		pps_init(&sc->sc_pps_state);
		mutex_spin_exit(&timecounter_lock);

		/*
		 * Initialize the termios status to the defaults.  Add in the
		 * sticky bits from TIOCSFLAGS.
		 */
		t.c_ispeed = 0;
		t.c_ospeed = TTYDEF_SPEED;
		t.c_cflag = TTYDEF_CFLAG;
		if (ISSET(sc->sc_swflags, TIOCFLAG_CLOCAL))
			SET(t.c_cflag, CLOCAL);
		if (ISSET(sc->sc_swflags, TIOCFLAG_CRTSCTS))
			SET(t.c_cflag, CRTSCTS);
		if (ISSET(sc->sc_swflags, TIOCFLAG_MDMBUF))
			SET(t.c_cflag, MDMBUF);
		/* Make sure ucomparam() will do something. */
		tp->t_ospeed = 0;
		(void) ucomparam(tp, &t);
		tp->t_iflag = TTYDEF_IFLAG;
		tp->t_oflag = TTYDEF_OFLAG;
		tp->t_lflag = TTYDEF_LFLAG;
		ttychars(tp);
		ttsetwater(tp);

		/*
		 * Turn on DTR.  We must always do this, even if carrier is not
		 * present, because otherwise we'd have to use TIOCSDTR
		 * immediately after setting CLOCAL, which applications do not
		 * expect.  We always assert DTR while the device is open
		 * unless explicitly requested to deassert it.  Ditto RTS.
		 */
		ucom_dtr(sc, 1);
		ucom_rts(sc, 1);

		DPRINTF(("ucomopen: open pipes in=%d out=%d\n",
			 sc->sc_bulkin_no, sc->sc_bulkout_no));

		/* Open the bulk pipes */
		err = usbd_open_pipe(sc->sc_iface, sc->sc_bulkin_no,
				     USBD_EXCLUSIVE_USE, &sc->sc_bulkin_pipe);
		if (err) {
			DPRINTF(("%s: open bulk in error (addr %d), err=%s\n",
				 device_xname(sc->sc_dev), sc->sc_bulkin_no,
				 usbd_errstr(err)));
			error = EIO;
			goto fail_0;
		}
		err = usbd_open_pipe(sc->sc_iface, sc->sc_bulkout_no,
				     USBD_EXCLUSIVE_USE, &sc->sc_bulkout_pipe);
		if (err) {
			DPRINTF(("%s: open bulk out error (addr %d), err=%s\n",
				 device_xname(sc->sc_dev), sc->sc_bulkout_no,
				 usbd_errstr(err)));
			error = EIO;
			goto fail_1;
		}

		sc->sc_rx_unblock = 0;
		sc->sc_rx_stopped = 0;
		sc->sc_tx_stopped = 0;

		memset(sc->sc_ibuff, 0, sizeof(sc->sc_ibuff));
		memset(sc->sc_obuff, 0, sizeof(sc->sc_obuff));

		SIMPLEQ_INIT(&sc->sc_ibuff_empty);
		SIMPLEQ_INIT(&sc->sc_ibuff_full);
		SIMPLEQ_INIT(&sc->sc_obuff_free);
		SIMPLEQ_INIT(&sc->sc_obuff_full);

		/* Allocate input buffers */
		for (ub = &sc->sc_ibuff[0]; ub != &sc->sc_ibuff[UCOM_IN_BUFFS];
		    ub++) {
			ub->ub_xfer = usbd_alloc_xfer(sc->sc_udev);
			if (ub->ub_xfer == NULL) {
				error = ENOMEM;
				goto fail_2;
			}
			ub->ub_data = usbd_alloc_buffer(ub->ub_xfer,
			    sc->sc_ibufsizepad);
			if (ub->ub_data == NULL) {
				error = ENOMEM;
				goto fail_2;
			}

			if (ucomsubmitread(sc, ub) != USBD_NORMAL_COMPLETION) {
				error = EIO;
				goto fail_2;
			}
		}

		for (ub = &sc->sc_obuff[0]; ub != &sc->sc_obuff[UCOM_OUT_BUFFS];
		    ub++) {
			ub->ub_xfer = usbd_alloc_xfer(sc->sc_udev);
			if (ub->ub_xfer == NULL) {
				error = ENOMEM;
				goto fail_2;
			}
			ub->ub_data = usbd_alloc_buffer(ub->ub_xfer,
			    sc->sc_obufsize);
			if (ub->ub_data == NULL) {
				error = ENOMEM;
				goto fail_2;
			}

			SIMPLEQ_INSERT_TAIL(&sc->sc_obuff_free, ub, ub_link);
		}

	}
	sc->sc_opening = 0;
	wakeup(&sc->sc_opening);
	splx(s);

	error = ttyopen(tp, UCOMDIALOUT(dev), ISSET(flag, O_NONBLOCK));
	if (error)
		goto bad;

	error = (*tp->t_linesw->l_open)(dev, tp);
	if (error)
		goto bad;

	return (0);

fail_2:
	usbd_abort_pipe(sc->sc_bulkin_pipe);
	for (i = 0; i < UCOM_IN_BUFFS; i++) {
		if (sc->sc_ibuff[i].ub_xfer != NULL) {
			usbd_free_xfer(sc->sc_ibuff[i].ub_xfer);
			sc->sc_ibuff[i].ub_xfer = NULL;
			sc->sc_ibuff[i].ub_data = NULL;
		}
	}
	usbd_abort_pipe(sc->sc_bulkout_pipe);
	for (i = 0; i < UCOM_OUT_BUFFS; i++) {
		if (sc->sc_obuff[i].ub_xfer != NULL) {
			usbd_free_xfer(sc->sc_obuff[i].ub_xfer);
			sc->sc_obuff[i].ub_xfer = NULL;
			sc->sc_obuff[i].ub_data = NULL;
		}
	}

	usbd_close_pipe(sc->sc_bulkout_pipe);
	sc->sc_bulkout_pipe = NULL;
fail_1:
	usbd_close_pipe(sc->sc_bulkin_pipe);
	sc->sc_bulkin_pipe = NULL;
fail_0:
	sc->sc_opening = 0;
	wakeup(&sc->sc_opening);
	splx(s);
	return (error);

bad:
	s = spltty();
	CLR(tp->t_state, TS_BUSY);
	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
		/*
		 * We failed to open the device, and nobody else had it opened.
		 * Clean up the state as appropriate.
		 */
		ucom_cleanup(sc);
	}
	splx(s);

	return (error);
}
Пример #9
0
static int
ucomparam(struct tty *tp, struct termios *t)
{
	struct ucom_softc *sc = device_lookup_private(&ucom_cd,
	    UCOMUNIT(tp->t_dev));
	int error;

	if (sc == NULL || sc->sc_dying)
		return (EIO);

	/* Check requested parameters. */
	if (t->c_ispeed && t->c_ispeed != t->c_ospeed)
		return (EINVAL);

	/*
	 * For the console, always force CLOCAL and !HUPCL, so that the port
	 * is always active.
	 */
	if (ISSET(sc->sc_swflags, TIOCFLAG_SOFTCAR)) {
		SET(t->c_cflag, CLOCAL);
		CLR(t->c_cflag, HUPCL);
	}

	/*
	 * If there were no changes, don't do anything.  This avoids dropping
	 * input and improves performance when all we did was frob things like
	 * VMIN and VTIME.
	 */
	if (tp->t_ospeed == t->c_ospeed &&
	    tp->t_cflag == t->c_cflag)
		return (0);

	/* XXX lcr = ISSET(sc->sc_lcr, LCR_SBREAK) | cflag2lcr(t->c_cflag); */

	/* And copy to tty. */
	tp->t_ispeed = 0;
	tp->t_ospeed = t->c_ospeed;
	tp->t_cflag = t->c_cflag;

	if (sc->sc_methods->ucom_param != NULL) {
		error = sc->sc_methods->ucom_param(sc->sc_parent, sc->sc_portno,
			    t);
		if (error)
			return (error);
	}

	/* XXX worry about CHWFLOW */

	/*
	 * Update the tty layer's idea of the carrier bit, in case we changed
	 * CLOCAL or MDMBUF.  We don't hang up here; we only do that by
	 * explicit request.
	 */
	DPRINTF(("ucomparam: l_modem\n"));
	(void) (*tp->t_linesw->l_modem)(tp, ISSET(sc->sc_msr, UMSR_DCD));

#if 0
XXX what if the hardware is not open
	if (!ISSET(t->c_cflag, CHWFLOW)) {
		if (sc->sc_tx_stopped) {
			sc->sc_tx_stopped = 0;
			ucomstart(tp);
		}
	}
#endif

	return (0);
}
Пример #10
0
/*
 * This is the tricky part -- do not casually change *anything* in here.  The
 * idea is to build the linked list of entries that are used by yfts_children
 * and yfts_read.  There are lots of special cases.
 *
 * The real slowdown in walking the tree is the stat calls.  If FTS_NOSTAT is
 * set and it's a physical walk (so that symbolic links can't be directories),
 * we can do things quickly.  First, if it's a 4.4BSD file system, the type
 * of the file is in the directory entry.  Otherwise, we assume that the number
 * of subdirectories in a node is equal to the number of links to the parent.
 * The former skips all stat calls.  The latter skips stat calls in any leaf
 * directories and for any files after the subdirectories in the directory have
 * been found, cutting the stat calls by about 2/3.
 */
static FTSENT *
fts_build(FTS * sp, int type)
{
    struct dirent *dp;
    FTSENT *p, *head;
    int nitems;
    FTSENT *cur, *tail;

#ifdef _win_
    dird dirpd;
    struct DIR *dirp;
#else
    DIR *dirp;
#endif

    void *oldaddr;
    int cderrno, descend, len, level, maxlen, nlinks, saved_errno,
        nostat, doadjust;
    char *cp;

    /* Set current node pointer. */
    cur = sp->fts_cur;

    /*
     * Open the directory for reading.  If this fails, we're done.
     * If being called from yfts_read, set the fts_info field.
     */
#ifdef FTS_WHITEOUT
    if (ISSET(FTS_WHITEOUT))
        oflag = DTF_NODUP|DTF_REWIND;
    else
        oflag = DTF_HIDEW|DTF_NODUP|DTF_REWIND;
#else
#define __opendir2(path, flag) opendir(path)
#endif
    if ((dirp = __opendir2(cur->fts_accpath, oflag)) == NULL) {
        if (type == BREAD) {
            cur->fts_info = FTS_DNR;
            cur->fts_errno = errno;
        }
        return (NULL);
    }

#ifdef _win_
    dirpd = get_dird(cur->fts_accpath);
#endif

    /*
     * Nlinks is the number of possible entries of type directory in the
     * directory if we're cheating on stat calls, 0 if we're not doing
     * any stat calls at all, -1 if we're doing stats on everything.
     */
    if (type == BNAMES) {
        nlinks = 0;
        /* Be quiet about nostat, GCC. */
        nostat = 0;
    } else if (ISSET(FTS_NOSTAT) && ISSET(FTS_PHYSICAL)) {
        nlinks = cur->fts_nlink - (ISSET(FTS_SEEDOT) ? 0 : 2);
        nostat = 1;
    } else {
        nlinks = -1;
        nostat = 0;
    }

#ifdef notdef
    (void)printf("nlinks == %d (cur: %d)\n", nlinks, cur->fts_nlink);
    (void)printf("NOSTAT %d PHYSICAL %d SEEDOT %d\n",
                 ISSET(FTS_NOSTAT), ISSET(FTS_PHYSICAL), ISSET(FTS_SEEDOT));
#endif
    /*
     * If we're going to need to stat anything or we want to descend
     * and stay in the directory, chdir.  If this fails we keep going,
     * but set a flag so we don't chdir after the post-order visit.
     * We won't be able to stat anything, but we can still return the
     * names themselves.  Note, that since yfts_read won't be able to
     * chdir into the directory, it will have to return different path
     * names than before, i.e. "a/b" instead of "b".  Since the node
     * has already been visited in pre-order, have to wait until the
     * post-order visit to return the error.  There is a special case
     * here, if there was nothing to stat then it's not an error to
     * not be able to stat.  This is all fairly nasty.  If a program
     * needed sorted entries or stat information, they had better be
     * checking FTS_NS on the returned nodes.
     */
    cderrno = 0;
    if (nlinks || type == BREAD) {
#ifndef _win_
        if (fts_safe_changedir(sp, cur, dirfd(dirp), NULL)) {
#else
        if (fts_safe_changedir(sp, cur, -1, dirpd)) {
#endif

            if (nlinks && type == BREAD)
                cur->fts_errno = errno;
            cur->fts_flags |= FTS_DONTCHDIR;
            descend = 0;
            cderrno = errno;
            (void)closedir(dirp);
            dirp = NULL;
#ifdef _win_
            close_dird(dirpd);
            dirpd = invalidDirD;
#else
            UNUSED(invalidDirD);
#endif
        } else
            descend = 1;
    } else
        descend = 0;

    /*
     * Figure out the max file name length that can be stored in the
     * current path -- the inner loop allocates more path as necessary.
     * We really wouldn't have to do the maxlen calculations here, we
     * could do them in yfts_read before returning the path, but it's a
     * lot easier here since the length is part of the dirent structure.
     *
     * If not changing directories set a pointer so that can just append
     * each new name into the path.
     */
    len = NAPPEND(cur);
    if (ISSET(FTS_NOCHDIR)) {
        cp = sp->fts_path + len;
        *cp++ = LOCSLASH_C;
    } else {
        /* GCC, you're too verbose. */
        cp = NULL;
    }
    len++;
    maxlen = sp->fts_pathlen - len;

    level = cur->fts_level + 1;

    /* Read the directory, attaching each entry to the `link' pointer. */
    doadjust = 0;

    //to ensure enough buffer
    TTempBuf dpe;

    for (head = tail = NULL, nitems = 0; dirp && (dp = yreaddir(dirp, (struct dirent*)dpe.Data())) != 0;) {
        if (!ISSET(FTS_SEEDOT) && ISDOT(dp->d_name))
            continue;

        if ((p = fts_alloc(sp, dp->d_name, (int)strlen(dp->d_name))) == NULL)
            goto mem1;
        if (strlen(dp->d_name) >= (size_t)maxlen) {    /* include space for NUL */
            oldaddr = sp->fts_path;
            if (fts_palloc(sp, strlen(dp->d_name) +len + 1)) {
                /*
                 * No more memory for path or structures.  Save
                 * errno, free up the current structure and the
                 * structures already allocated.
                 */
mem1:
                saved_errno = errno;
                if (p)
                    free(p);
                fts_lfree(head);
                (void)closedir(dirp);
#ifdef _win_
                close_dird(dirpd);
#endif
                cur->fts_info = FTS_ERR;
                SET(FTS_STOP);
                errno = saved_errno;
                return (NULL);
            }
            /* Did realloc() change the pointer? */
            if (oldaddr != sp->fts_path) {
                doadjust = 1;
                if (ISSET(FTS_NOCHDIR))
                    cp = sp->fts_path + len;
            }
            maxlen = sp->fts_pathlen - len;
        }

        if (len + strlen(dp->d_name) >= USHRT_MAX) {
            /*
             * In an FTSENT, fts_pathlen is a u_short so it is
             * possible to wraparound here.  If we do, free up
             * the current structure and the structures already
             * allocated, then error out with ENAMETOOLONG.
             */
            free(p);
            fts_lfree(head);
            (void)closedir(dirp);
#ifdef _win_
            close_dird(dirpd);
#endif
            cur->fts_info = FTS_ERR;
            SET(FTS_STOP);
            errno = ENAMETOOLONG;
            return (NULL);
        }
        p->fts_level = (short)level;
        p->fts_parent = sp->fts_cur;
        p->fts_pathlen = u_short(len + strlen(dp->d_name));

#ifdef FTS_WHITEOUT
        if (dp->d_type == DT_WHT)
            p->fts_flags |= FTS_ISW;
#endif

        if (cderrno) {
            if (nlinks) {
                p->fts_info = FTS_NS;
                p->fts_errno = cderrno;
            } else
                p->fts_info = FTS_NSOK;
            p->fts_accpath = cur->fts_accpath;
        } else if (nlinks == 0
#ifdef DT_DIR
                   || (nostat &&
                       dp->d_type != DT_DIR && dp->d_type != DT_UNKNOWN)
#endif
                  ) {
            p->fts_accpath =
                ISSET(FTS_NOCHDIR) ? p->fts_path : p->fts_name;
            p->fts_info = FTS_NSOK;
        } else {
            /* Build a file name for fts_stat to stat. */
            if (ISSET(FTS_NOCHDIR)) {
                p->fts_accpath = p->fts_path;
                memmove((void*)cp, (void*)p->fts_name, (size_t)p->fts_namelen + 1);
            } else
                p->fts_accpath = p->fts_name;
            /* Stat it. */
            p->fts_info = fts_stat(sp, p, 0);

            /* Decrement link count if applicable. */
            if (nlinks > 0 && (p->fts_info == FTS_D ||
                               p->fts_info == FTS_DC || p->fts_info == FTS_DOT))
                --nlinks;
        }

        /* We walk in directory order so "ls -f" doesn't get upset. */
        p->fts_link = NULL;
        if (head == NULL)
            head = tail = p;
        else {
            tail->fts_link = p;
            tail = p;
        }
        ++nitems;
    }
    if (dirp) {
        (void)closedir(dirp);
#ifdef _win_
        close_dird(dirpd);
#endif
    }

    /*
     * If realloc() changed the address of the path, adjust the
     * addresses for the rest of the tree and the dir list.
     */
    if (doadjust)
        fts_padjust(sp);

    /*
     * If not changing directories, reset the path back to original
     * state.
     */
    if (ISSET(FTS_NOCHDIR)) {
        if (len == sp->fts_pathlen || nitems == 0)
            --cp;
        *cp = '\0';
    }

    /*
     * If descended after called from yfts_children or after called from
     * yfts_read and nothing found, get back.  At the root level we use
     * the saved fd; if one of yfts_open()'s arguments is a relative path
     * to an empty directory, we wind up here with no other way back.  If
     * can't get back, we're done.
     */
    if (descend && (type == BCHILD || !nitems) &&
            (cur->fts_level == FTS_ROOTLEVEL ?
             FCHDIR(sp, sp->fts_rfd) :
             fts_safe_changedir(sp, cur->fts_parent, -1, ".."))) {
        cur->fts_info = FTS_ERR;
        SET(FTS_STOP);
        return (NULL);
    }

    /* If didn't find anything, return NULL. */
    if (!nitems) {
        if (type == BREAD)
            cur->fts_info = FTS_DP;
        return (NULL);
    }

    /* Sort the entries. */
    if (sp->fts_compar && nitems > 1)
        head = fts_sort(sp, head, nitems);
    return (head);
}

static u_short
fts_stat(FTS * sp, FTSENT * p, int follow)
{
    dev_t dev;
    ino_t ino;
    struct stat *sbp, sb;
    int saved_errno;
    /* If user needs stat info, stat buffer already allocated. */
    sbp = ISSET(FTS_NOSTAT) ? &sb : p->fts_statp;

#ifdef FTS_WHITEOUT
    /* check for whiteout */
    if (p->fts_flags & FTS_ISW) {
        if (sbp != &sb) {
            memset(sbp, '\0', sizeof (*sbp));
            sbp->st_mode = S_IFWHT;
        }
        return (FTS_W);
    }
#endif

    /*
     * If doing a logical walk, or application requested FTS_FOLLOW, do
     * a stat(2).  If that fails, check for a non-existent symlink.  If
     * fail, set the errno from the stat call.
     */
    if (ISSET(FTS_LOGICAL) || follow) {
        if (stat(p->fts_accpath, sbp)) {
            saved_errno = errno;
            if (!lstat(p->fts_accpath, sbp)) {
                errno = 0;
                return (FTS_SLNONE);
            }
            p->fts_errno = saved_errno;
            memset(sbp, 0, sizeof(struct stat));
            return (FTS_NS);
        }
    }
    else if (lstat(p->fts_accpath, sbp)) {
        p->fts_errno = errno;
        memset(sbp, 0, sizeof(struct stat));
        return (FTS_NS);
    }

    if (S_ISDIR(sbp->st_mode)) {
        /*
         * Set the device/inode.  Used to find cycles and check for
         * crossing mount points.  Also remember the link count, used
         * in fts_build to limit the number of stat calls.  It is
         * understood that these fields are only referenced if fts_info
         * is set to FTS_D.
         */
        dev = p->fts_dev = sbp->st_dev;
        ino = p->fts_ino = sbp->st_ino;
        p->fts_nlink = sbp->st_nlink;

        const char* fts_name_x = p->fts_name;
        if (ISDOT(fts_name_x))
            return (FTS_DOT);

        /*
         * Cycle detection is done by brute force when the directory
         * is first encountered.  If the tree gets deep enough or the
         * number of symbolic links to directories is high enough,
         * something faster might be worthwhile.
         */

        //There is no way to detect symlink or mount cycles on win32

#ifndef _win_
        FTSENT *t;
        for (t = p->fts_parent;
                t->fts_level >= FTS_ROOTLEVEL; t = t->fts_parent)
            if (ino == t->fts_ino && dev == t->fts_dev) {
                p->fts_cycle = t;
                return (FTS_DC);
            }
#endif /*_win_*/
        return (FTS_D);

    }
    if (S_ISLNK(sbp->st_mode))
        return (FTS_SL);
    if (S_ISREG(sbp->st_mode))
        return (FTS_F);
    return (FTS_DEFAULT);
}
Пример #11
0
int file_open_shaper(struct shaper *my, char *fname, char *flags)
{
	char line[1024], group[256]="(unnamed)";
	float sum=0, load=0, peak=0;
	float scale[12][31][7][24];
	char ff[1024];

	/* clear everything */
	memset(scale,0,sizeof(scale));
	linenum=0; 
	file=fname;

	/* "-" means stdin */
	my->fp = (strcmp(fname,"-")==0?stdin:(gl_findfile(fname,NULL,R_OK,ff,sizeof(ff))?fopen(ff,flags):NULL));
	if (my->fp==NULL)
	{
		gl_error("shaper file %s: %s", fname, strerror(errno));
		my->status = TS_DONE;
		return 0;
	}
	my->status=TS_OPEN;
	my->type = FT_FILE;
	/* TODO: these should be read from the shape file, or better yet, inferred from it */
	my->step = 3600; /* default interval step is one hour */
	my->interval = 24; /* default unint shape integrated over one day */
	memset(my->shape,0,sizeof(my->shape));
	/* load the file into the shape */
	while (fgets(line,sizeof(line),my->fp)!=NULL)
	{
		unsigned char *hours, *days, *months, *weekdays;
		char min[256],hour[256],day[256],month[256],weekday[256],value[32];
		char *p=line;
		linenum++;
		while (isspace(*p)) p++;
		if (p[0]=='\0' || p[0]=='#') continue;
		if (strcmp(group,"")!=0 && (isdigit(p[0]) || p[0]=='*'))
		{	/* shape value */
			int h, d, m, w;
			if (sscanf(line,"%s %s %s %s %[^,],%[^,\n]",min,hour,day,month,weekday,value)<6)
			{
				gl_error("%s(%d) : shape '%s' has specification '%s'", file, linenum, group, line);
				continue;
			}
			/* minutes are ignored right now */
			if (min[0]!='*') gl_warning("%s(%d) : minutes are ignored in '%s'", file, linenum, line);
			hours=hourmap(hour);
			days=daymap(day);
			months=monthmap(month);
			weekdays=weekdaymap(weekday);
			load = (float)atof(value);
			for (m=0; m<12; m++)
			{
				if (!ISSET(months,m)) continue;
				for (w=0; w<7; w++)
				{
					if (!ISSET(weekdays,w)) continue;
					for (d=0; d<31; d++)
					{
						if (!ISSET(days,d)) continue;
						for (h=0; h<24; h++)
						{
							if (!ISSET(hours,h)) continue;
							scale[m][d][w][h] = -load; /* negative indicates unscaled value */
						}
					}
				}
			}
			sum += load; /* integrate over shape */
			if (load>peak) peak=load; /* keep the highest load in the shape (that's going to be 255) */
		}
		else if (p[0]=='}')
		{	/* end shape group */
			int h, d, m, w;
			my->scale = peak/255/sum;
			/* rescale group */
			for (m=0; m<12; m++)
			{
				for (w=0; w<7; w++)
				{
					for (d=0; d<31; d++)
					{
						for (h=0; h<24; h++)
						{
							if (scale[m][d][w][h]<0)
								my->shape[m][d][w][h] = (unsigned char)(-scale[m][d][w][h] / peak * 255 +0.5); /* negative removes scaled value indicator */
						}
					}
				}
			}
			strcpy(group,"");
		}
		else if (sscanf(p,"%s {",group)==1)
		{	/* new shape group */
			sum=0;
		}
		else
		{	/* syntax error */
			gl_error("%s(%d) : shape specification '%s' is not valid", file, linenum, line);
		}
	}
	return 1;
}
Пример #12
0
FTSENT *
yfts_children(FTS * sp, int instr)
{
    FTSENT *p;
    dird fd;
    if (instr && instr != FTS_NAMEONLY) {
        errno = EINVAL;
        return (NULL);
    }

    /* Set current node pointer. */
    p = sp->fts_cur;

    /*
     * Errno set to 0 so user can distinguish empty directory from
     * an error.
     */
    errno = 0;

    /* Fatal errors stop here. */
    if (ISSET(FTS_STOP))
        return (NULL);

    /* Return logical hierarchy of user's arguments. */
    if (p->fts_info == FTS_INIT)
        return (p->fts_link);

    /*
     * If not a directory being visited in pre-order, stop here.  Could
     * allow FTS_DNR, assuming the user has fixed the problem, but the
     * same effect is available with FTS_AGAIN.
     */
    if (p->fts_info != FTS_D /* && p->fts_info != FTS_DNR */)
        return (NULL);

    /* Free up any previous child list. */
    if (sp->fts_child)
        fts_lfree(sp->fts_child);

    if (instr == FTS_NAMEONLY) {
        SET(FTS_NAMEONLY);
        instr = BNAMES;
    } else
        instr = BCHILD;

    /*
     * If using chdir on a relative path and called BEFORE yfts_read does
     * its chdir to the root of a traversal, we can lose -- we need to
     * chdir into the subdirectory, and we don't know where the current
     * directory is, so we can't get back so that the upcoming chdir by
     * yfts_read will work.
     */
    if (p->fts_level != FTS_ROOTLEVEL || p->fts_accpath[0] == LOCSLASH_C ||
            ISSET(FTS_NOCHDIR))
        return (sp->fts_child = fts_build(sp, instr));

    if (valid_dird(fd = get_cwdd()))
        return (NULL);
    sp->fts_child = fts_build(sp, instr);
    if (chdir_dird(fd)) {
        close_dird(fd);
        return (NULL);
    }
    close_dird(fd);
    return (sp->fts_child);
}
Пример #13
0
FTSENT *
yfts_read(FTS * sp) {
    FTSENT *p, *tmp;
    int instr;
    char *t;
    int saved_errno;

    ClearLastSystemError();

    /* If finished or unrecoverable error, return NULL. */
    if (sp->fts_cur == NULL || ISSET(FTS_STOP))
        return (NULL);

    /* Set current node pointer. */
    p = sp->fts_cur;

    /* Save and zero out user instructions. */
    instr = p->fts_instr;
    p->fts_instr = FTS_NOINSTR;

    /* Any type of file may be re-visited; re-stat and re-turn. */
    if (instr == FTS_AGAIN) {
        p->fts_info = fts_stat(sp, p, 0);
        return (p);
    }

    /*
     * Following a symlink -- SLNONE test allows application to see
     * SLNONE and recover.  If indirecting through a symlink, have
     * keep a pointer to current location.  If unable to get that
     * pointer, follow fails.
     */
    if (instr == FTS_FOLLOW &&
            (p->fts_info == FTS_SL || p->fts_info == FTS_SLNONE)) {
        p->fts_info = fts_stat(sp, p, 1);
        if (p->fts_info == FTS_D && !ISSET(FTS_NOCHDIR)) {
            if (valid_dird(p->fts_symfd = get_cwdd())) {
                p->fts_errno = errno;
                p->fts_info = FTS_ERR;
            } else
                p->fts_flags |= FTS_SYMFOLLOW;
        }
        return (p);
    }

    /* Directory in pre-order. */
    if (p->fts_info == FTS_D) {
        /* If skipped or crossed mount point, do post-order visit. */
        if (instr == FTS_SKIP ||
                (ISSET(FTS_XDEV) && p->fts_dev != sp->fts_dev)) {
            if (p->fts_flags & FTS_SYMFOLLOW)
                close_dird(p->fts_symfd);
            if (sp->fts_child) {
                fts_lfree(sp->fts_child);
                sp->fts_child = NULL;
            }
            p->fts_info = FTS_DP;
            return (p);
        }

        /* Rebuild if only read the names and now traversing. */
        if (sp->fts_child && ISSET(FTS_NAMEONLY)) {
            CLR(FTS_NAMEONLY);
            fts_lfree(sp->fts_child);
            sp->fts_child = NULL;
        }

        /*
         * Cd to the subdirectory.
         *
         * If have already read and now fail to chdir, whack the list
         * to make the names come out right, and set the parent errno
         * so the application will eventually get an error condition.
         * Set the FTS_DONTCHDIR flag so that when we logically change
         * directories back to the parent we don't do a chdir.
         *
         * If haven't read do so.  If the read fails, fts_build sets
         * FTS_STOP or the fts_info field of the node.
         */
        if (sp->fts_child) {
            if (fts_safe_changedir(sp, p, -1, p->fts_accpath)) {
                p->fts_errno = errno;
                p->fts_flags |= FTS_DONTCHDIR;
                for (p = sp->fts_child; p; p = p->fts_link)
                    p->fts_accpath =
                        p->fts_parent->fts_accpath;
            }
        } else if ((sp->fts_child = fts_build(sp, BREAD)) == NULL) {
            if (ISSET(FTS_STOP))
                return (NULL);
            return (p);
        }
        p = sp->fts_child;
        sp->fts_child = NULL;
        goto name;
    }

    /* Move to the next node on this level. */
next:
    tmp = p;
    if ((p = p->fts_link) != 0) {
        free(tmp);

        /*
         * If reached the top, return to the original directory (or
         * the root of the tree), and load the paths for the next root.
         */
        if (p->fts_level == FTS_ROOTLEVEL) {
            if (FCHDIR(sp, sp->fts_rfd)) {
                SET(FTS_STOP);
                return (NULL);
            }
            fts_load(sp, p);
            return (sp->fts_cur = p);
        }

        /*
         * User may have called yfts_set on the node.  If skipped,
         * ignore.  If followed, get a file descriptor so we can
         * get back if necessary.
         */
        if (p->fts_instr == FTS_SKIP)
            goto next;
        if (p->fts_instr == FTS_FOLLOW) {
            p->fts_info = fts_stat(sp, p, 1);
            if (p->fts_info == FTS_D && !ISSET(FTS_NOCHDIR)) {
                if (valid_dird(p->fts_symfd =
                                   get_cwdd())) {
                    p->fts_errno = errno;
                    p->fts_info = FTS_ERR;
                } else
                    p->fts_flags |= FTS_SYMFOLLOW;
            }
            p->fts_instr = FTS_NOINSTR;
        }

name:
        t = sp->fts_path + NAPPEND(p->fts_parent);
        *t++ = LOCSLASH_C;
        memmove(t, p->fts_name, (size_t)p->fts_namelen + 1);
        return (sp->fts_cur = p);
    }

    /* Move up to the parent node. */
    p = tmp->fts_parent;
    free(tmp);

    if (p->fts_level == FTS_ROOTPARENTLEVEL) {
        /*
         * Done; free everything up and set errno to 0 so the user
         * can distinguish between error and EOF.
         */
        free(p);
        errno = 0;
        return (sp->fts_cur = NULL);
    }

    /* NUL terminate the pathname. */
    sp->fts_path[p->fts_pathlen] = '\0';

    /*
     * Return to the parent directory.  If at a root node or came through
     * a symlink, go back through the file descriptor.  Otherwise, cd up
     * one directory.
     */
    if (p->fts_level == FTS_ROOTLEVEL) {
        if (FCHDIR(sp, sp->fts_rfd)) {
            SET(FTS_STOP);
            return (NULL);
        }
    } else if (p->fts_flags & FTS_SYMFOLLOW) {
        if (FCHDIR(sp, p->fts_symfd)) {
            saved_errno = errno;
            close_dird(p->fts_symfd);
            errno = saved_errno;
            SET(FTS_STOP);
            return (NULL);
        }
        close_dird(p->fts_symfd);
    } else if (!(p->fts_flags & FTS_DONTCHDIR) &&
               fts_safe_changedir(sp, p->fts_parent, -1, "..")) {
        SET(FTS_STOP);
        return (NULL);
    }
    p->fts_info = p->fts_errno ? FTS_ERR : FTS_DP;
    return (sp->fts_cur = p);
}
Пример #14
0
FTS *
yfts_open(char * const * argv, int options, int (*compar) (const FTSENT **, const FTSENT **))
{
    FTS *sp;
    FTSENT *p, *root;
    int nitems;
    FTSENT *parent, *tmp;
    int len;

    errno = 0;

    /* Options check. */
    if (options & ~FTS_OPTIONMASK) {
        errno = EINVAL;
        return (NULL);
    }

    /* Allocate/initialize the stream */
    if ((sp = (FTS*)malloc(sizeof(FTS))) == NULL)
        return (NULL);
    memset(sp, 0, sizeof(FTS));
    sp->fts_compar = compar;
    sp->fts_options = options;

    /* Shush, GCC. */
    tmp = NULL;

    /* Logical walks turn on NOCHDIR; symbolic links are too hard. */
    if (ISSET(FTS_LOGICAL))
        SET(FTS_NOCHDIR);

    /*
     * Start out with 1K of path space, and enough, in any case,
     * to hold the user's paths.
     */
    if (fts_palloc(sp, MAX(fts_maxarglen(argv), MAXPATHLEN)))
        goto mem1;

    /* Allocate/initialize root's parent. */
    if ((parent = fts_alloc(sp, "", 0)) == NULL)
        goto mem2;
    parent->fts_level = FTS_ROOTPARENTLEVEL;

    /* Allocate/initialize root(s). */
    for (root = NULL, nitems = 0; *argv; ++argv, ++nitems) {
        /* Don't allow zero-length paths. */

        len = strlen(*argv);

//Any subsequent windows call will expect no trailing slashes so we will remove them here
#ifdef _win_
        while (len && ((*argv)[len-1] == '\\' || (*argv)[len-1] == '/')) {
            --len;
        }
#endif

        if (len == 0) {
            errno = ENOENT;
            goto mem3;
        }

        p = fts_alloc(sp, *argv, len);
        p->fts_level = FTS_ROOTLEVEL;
        p->fts_parent = parent;
        p->fts_accpath = p->fts_name;
        p->fts_info = fts_stat(sp, p, ISSET(FTS_COMFOLLOW));

        /* Command-line "." and ".." are real directories. */
        if (p->fts_info == FTS_DOT)
            p->fts_info = FTS_D;

        /*
         * If comparison routine supplied, traverse in sorted
         * order; otherwise traverse in the order specified.
         */
        if (compar) {
            p->fts_link = root;
            root = p;
        } else {
            p->fts_link = NULL;
            if (root == NULL)
                tmp = root = p;
            else {
                tmp->fts_link = p;
                tmp = p;
            }
        }
    }
    if (compar && nitems > 1)
        root = fts_sort(sp, root, nitems);

    /*
     * Allocate a dummy pointer and make yfts_read think that we've just
     * finished the node before the root(s); set p->fts_info to FTS_INIT
     * so that everything about the "current" node is ignored.
     */
    if ((sp->fts_cur = fts_alloc(sp, "", 0)) == NULL)
        goto mem3;
    sp->fts_cur->fts_level = FTS_ROOTLEVEL;
    sp->fts_cur->fts_link = root;
    sp->fts_cur->fts_info = FTS_INIT;

    /*
     * If using chdir(2), grab a file descriptor pointing to dot to ensure
     * that we can get back here; this could be avoided for some paths,
     * but almost certainly not worth the effort.  Slashes, symbolic links,
     * and ".." are all fairly nasty problems.  Note, if we can't get the
     * descriptor we run anyway, just more slowly.
     */

    if (!ISSET(FTS_NOCHDIR) && valid_dird(sp->fts_rfd = get_cwdd()))
        SET(FTS_NOCHDIR);

    return (sp);

mem3:
    fts_lfree(root);
    free(parent);
mem2:
    free(sp->fts_path);
mem1:
    free(sp);
    return (NULL);
}
Пример #15
0
/*
 * Find a buffer which is available for use.
 *
 * We must notify getblk if we slept during the buffer allocation. When
 * that happens, we allocate a buffer anyway (unless tsleep is interrupted
 * or times out) and return !0.
 */
int
getnewbuf(int slpflag, int slptimeo, struct buf **bpp)
{
	struct buf *bp;
	int s, ret, error;

	*bpp = NULL;
	ret = 0;

start:
	s = splbio();
	/*
	 * Wake up cleaner if we're getting low on buffers.
	 */
	if (numdirtypages >= hidirtypages)
		wakeup(&bd_req);

	if ((numcleanpages <= locleanpages) &&
	    curproc != syncerproc && curproc != cleanerproc) {
		needbuffer++;
		error = tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf",
				slptimeo);
		splx(s);
		if (error)
			return (1);
		ret = 1;
		goto start;
	}
	if ((bp = TAILQ_FIRST(&bufqueues[BQ_CLEAN])) == NULL) {
		/* wait for a free buffer of any kind */
		nobuffers = 1;
		error = tsleep(&nobuffers, slpflag|(PRIBIO-3),
				"getnewbuf", slptimeo);
		splx(s);
		if (error)
			return (1);
		ret = 1;
		goto start;
	}

	bremfree(bp);

	/* Buffer is no longer on free lists. */
	SET(bp->b_flags, B_BUSY);

#ifdef DIAGNOSTIC
	if (ISSET(bp->b_flags, B_DELWRI))
		panic("Dirty buffer on BQ_CLEAN");
#endif

	/* disassociate us from our vnode, if we had one... */
	if (bp->b_vp)
		brelvp(bp);

	splx(s);

#ifdef DIAGNOSTIC
	/* CLEAN buffers must have no dependencies */ 
	if (LIST_FIRST(&bp->b_dep) != NULL)
		panic("BQ_CLEAN has buffer with dependencies");
#endif

	/* clear out various other fields */
	bp->b_flags = B_BUSY;
	bp->b_dev = NODEV;
	bp->b_blkno = bp->b_lblkno = 0;
	bp->b_iodone = 0;
	bp->b_error = 0;
	bp->b_resid = 0;
	bp->b_bcount = 0;
	bp->b_dirtyoff = bp->b_dirtyend = 0;
	bp->b_validoff = bp->b_validend = 0;

	bremhash(bp);
	*bpp = bp;
	return (ret);
}
Пример #16
0
int
udf_mountfs(struct vnode *devvp, struct mount *mp, uint32_t lb, struct proc *p)
{
	struct buf *bp = NULL;
	struct anchor_vdp avdp;
	struct umount *ump = NULL;
	struct part_desc *pd;
	struct logvol_desc *lvd;
	struct fileset_desc *fsd;
	struct extfile_entry *xfentry;
	struct file_entry *fentry;
	uint32_t sector, size, mvds_start, mvds_end;
	uint32_t fsd_offset = 0;
	uint16_t part_num = 0, fsd_part = 0;
	int error = EINVAL;
	int logvol_found = 0, part_found = 0, fsd_found = 0;
	int bsize;

	/*
	 * Disallow multiple mounts of the same device.
	 * Disallow mounting of a device that is currently in use
	 * (except for root, which might share swap device for miniroot).
	 * Flush out any old buffers remaining from a previous use.
	 */
	if ((error = vfs_mountedon(devvp)))
		return (error);
	if (vcount(devvp) > 1 && devvp != rootvp)
		return (EBUSY);
	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
	error = vinvalbuf(devvp, V_SAVE, p->p_ucred, p, 0, 0);
	VOP_UNLOCK(devvp, 0, p);
	if (error)
		return (error);

	error = VOP_OPEN(devvp, FREAD, FSCRED, p);
	if (error)
		return (error);

	ump = malloc(sizeof(*ump), M_UDFMOUNT, M_WAITOK | M_ZERO);

	mp->mnt_data = (qaddr_t) ump;
	mp->mnt_stat.f_fsid.val[0] = devvp->v_rdev;
	mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
	mp->mnt_flag |= MNT_LOCAL;

	ump->um_mountp = mp;
	ump->um_dev = devvp->v_rdev;
	ump->um_devvp = devvp;

	bsize = 2048;	/* Should probe the media for its size. */

	/* 
	 * Get the Anchor Volume Descriptor Pointer from sector 256.
	 * Should also check sector n - 256, n, and 512.
	 */
	sector = 256;
	if ((error = bread(devvp, sector * btodb(bsize), bsize, &bp)) != 0)
		goto bail;
	if ((error = udf_checktag((struct desc_tag *)bp->b_data, TAGID_ANCHOR)))
		goto bail;

	bcopy(bp->b_data, &avdp, sizeof(struct anchor_vdp));
	brelse(bp);
	bp = NULL;

	/*
	 * Extract the Partition Descriptor and Logical Volume Descriptor
	 * from the Volume Descriptor Sequence.
	 * Should we care about the partition type right now?
	 * What about multiple partitions?
	 */
	mvds_start = letoh32(avdp.main_vds_ex.loc);
	mvds_end = mvds_start + (letoh32(avdp.main_vds_ex.len) - 1) / bsize;
	for (sector = mvds_start; sector < mvds_end; sector++) {
		if ((error = bread(devvp, sector * btodb(bsize), bsize, 
				   &bp)) != 0) {
			printf("Can't read sector %d of VDS\n", sector);
			goto bail;
		}
		lvd = (struct logvol_desc *)bp->b_data;
		if (!udf_checktag(&lvd->tag, TAGID_LOGVOL)) {
			ump->um_bsize = letoh32(lvd->lb_size);
			ump->um_bmask = ump->um_bsize - 1;
			ump->um_bshift = ffs(ump->um_bsize) - 1;
			fsd_part = letoh16(lvd->_lvd_use.fsd_loc.loc.part_num);
			fsd_offset = letoh32(lvd->_lvd_use.fsd_loc.loc.lb_num);
			if (udf_find_partmaps(ump, lvd))
				break;
			logvol_found = 1;
		}
		pd = (struct part_desc *)bp->b_data;
		if (!udf_checktag(&pd->tag, TAGID_PARTITION)) {
			part_found = 1;
			part_num = letoh16(pd->part_num);
			ump->um_len = ump->um_reallen = letoh32(pd->part_len);
			ump->um_start = ump->um_realstart = letoh32(pd->start_loc);
		}

		brelse(bp); 
		bp = NULL;
		if ((part_found) && (logvol_found))
			break;
	}

	if (!part_found || !logvol_found) {
		error = EINVAL;
		goto bail;
	}

	if (ISSET(ump->um_flags, UDF_MNT_USES_META)) {
		/* Read Metadata File 'File Entry' to find Metadata file. */
		struct long_ad *la;
		sector = ump->um_start + ump->um_meta_start; /* Set in udf_get_mpartmap() */
		if ((error = RDSECTOR(devvp, sector, ump->um_bsize, &bp)) != 0) {
			printf("Cannot read sector %d for Metadata File Entry\n", sector);
			error = EINVAL;
			goto bail;
		}
		xfentry = (struct extfile_entry *)bp->b_data;
		fentry = (struct file_entry *)bp->b_data;
		if (udf_checktag(&xfentry->tag, TAGID_EXTFENTRY) == 0)
			la = (struct long_ad *)&xfentry->data[letoh32(xfentry->l_ea)];
		else if (udf_checktag(&fentry->tag, TAGID_FENTRY) == 0)
			la = (struct long_ad *)&fentry->data[letoh32(fentry->l_ea)];
		else {
			printf("Invalid Metadata File FE @ sector %d! (tag.id %d)\n",
			    sector, fentry->tag.id);
			error = EINVAL;
			goto bail;
		}
		ump->um_meta_start = letoh32(la->loc.lb_num);
		ump->um_meta_len = letoh32(la->len);
		if (bp != NULL) {
			brelse(bp);
			bp = NULL;
		}
	} else if (fsd_part != part_num) {
		printf("FSD does not lie within the partition!\n");
		error = EINVAL;
		goto bail;
	}

	mtx_init(&ump->um_hashmtx, IPL_NONE);
	ump->um_hashtbl = hashinit(UDF_HASHTBLSIZE, M_UDFMOUNT, M_WAITOK,
	    &ump->um_hashsz);
	arc4random_buf(&ump->um_hashkey, sizeof(ump->um_hashkey));

	/* Get the VAT, if needed */
	if (ump->um_flags & UDF_MNT_FIND_VAT) {
		error = udf_vat_get(ump, lb);
		if (error)
			goto bail;
	}

	/*
	 * Grab the Fileset Descriptor
	 * Thanks to Chuck McCrobie <*****@*****.**> for pointing
	 * me in the right direction here.
	 */

	if (ISSET(ump->um_flags, UDF_MNT_USES_META))
		sector = ump->um_meta_start; 
	else
		sector = fsd_offset;
	udf_vat_map(ump, &sector);
	if ((error = RDSECTOR(devvp, sector, ump->um_bsize, &bp)) != 0) {
		printf("Cannot read sector %d of FSD\n", sector);
		goto bail;
	}
	fsd = (struct fileset_desc *)bp->b_data;
	if (!udf_checktag(&fsd->tag, TAGID_FSD)) {
		fsd_found = 1;
		bcopy(&fsd->rootdir_icb, &ump->um_root_icb,
		    sizeof(struct long_ad));
		if (ISSET(ump->um_flags, UDF_MNT_USES_META)) {
			ump->um_root_icb.loc.lb_num += ump->um_meta_start; 
			ump->um_root_icb.loc.part_num = part_num;
		}
	}

	brelse(bp);
	bp = NULL;

	if (!fsd_found) {
		printf("Couldn't find the fsd\n");
		error = EINVAL;
		goto bail;
	}

	/*
	 * Find the file entry for the root directory.
	 */
	sector = letoh32(ump->um_root_icb.loc.lb_num);
	size = letoh32(ump->um_root_icb.len);
	udf_vat_map(ump, &sector);
	if ((error = udf_readlblks(ump, sector, size, &bp)) != 0) {
		printf("Cannot read sector %d\n", sector);
		goto bail;
	}

	xfentry = (struct extfile_entry *)bp->b_data;
	fentry = (struct file_entry *)bp->b_data;
	error = udf_checktag(&xfentry->tag, TAGID_EXTFENTRY);
	if (error) {
	    	error = udf_checktag(&fentry->tag, TAGID_FENTRY);
		if (error) {
			printf("Invalid root file entry!\n");
			goto bail;
		}
	}

	brelse(bp);
	bp = NULL;

	devvp->v_specmountpoint = mp;

	return (0);

bail:
	if (ump->um_hashtbl != NULL)
		free(ump->um_hashtbl, M_UDFMOUNT, 0);

	if (ump != NULL) {
		free(ump, M_UDFMOUNT, 0);
		mp->mnt_data = NULL;
		mp->mnt_flag &= ~MNT_LOCAL;
	}
	if (bp != NULL)
		brelse(bp);

	vn_lock(devvp, LK_EXCLUSIVE|LK_RETRY, p);
	VOP_CLOSE(devvp, FREAD, FSCRED, p);
	VOP_UNLOCK(devvp, 0, p);

	return (error);
}
Пример #17
0
/*
 * Build a new environment and ether clear potentially dangerous
 * variables from the old one or start with a clean slate.
 * Also adds sudo-specific variables (SUDO_*).
 */
void
rebuild_env(void)
{
    char **old_envp, **ep, *cp, *ps1;
    char idbuf[MAX_UID_T_LEN];
    unsigned int didvar;
    int reset_home = FALSE;

    /*
     * Either clean out the environment or reset to a safe default.
     */
    ps1 = NULL;
    didvar = 0;
    env.env_len = 0;
    env.env_size = 128;
    old_envp = env.envp;
    env.envp = emalloc2(env.env_size, sizeof(char *));
#ifdef ENV_DEBUG
    memset(env.envp, 0, env.env_size * sizeof(char *));
#endif

    /* Reset HOME based on target user if configured to. */
    if (ISSET(sudo_mode, MODE_RUN)) {
	if (def_always_set_home ||
	    ISSET(sudo_mode, MODE_RESET_HOME | MODE_LOGIN_SHELL) || 
	    (ISSET(sudo_mode, MODE_SHELL) && def_set_home))
	    reset_home = TRUE;
    }

    if (def_env_reset || ISSET(sudo_mode, MODE_LOGIN_SHELL)) {
	/* Pull in vars we want to keep from the old environment. */
	for (ep = old_envp; *ep; ep++) {
	    int keepit;

	    /* Skip variables with values beginning with () (bash functions) */
	    if ((cp = strchr(*ep, '=')) != NULL) {
		if (strncmp(cp, "=() ", 3) == 0)
		    continue;
	    }

	    /*
	     * First check certain variables for '%' and '/' characters.
	     * If no match there, check the keep list.
	     * If nothing matched, we remove it from the environment.
	     */
	    keepit = matches_env_check(*ep);
	    if (keepit == -1)
		keepit = matches_env_keep(*ep);

	    /* For SUDO_PS1 -> PS1 conversion. */
	    if (strncmp(*ep, "SUDO_PS1=", 8) == 0)
		ps1 = *ep + 5;

	    if (keepit) {
		/* Preserve variable. */
		switch (**ep) {
		    case 'H':
			if (strncmp(*ep, "HOME=", 5) == 0)
			    SET(didvar, DID_HOME);
			break;
		    case 'L':
			if (strncmp(*ep, "LOGNAME=", 8) == 0)
			    SET(didvar, DID_LOGNAME);
			break;
		    case 'M':
			if (strncmp(*ep, "MAIL=", 5) == 0)
			    SET(didvar, DID_MAIL);
			break;
		    case 'P':
			if (strncmp(*ep, "PATH=", 5) == 0)
			    SET(didvar, DID_PATH);
			break;
		    case 'S':
			if (strncmp(*ep, "SHELL=", 6) == 0)
			    SET(didvar, DID_SHELL);
			break;
		    case 'T':
			if (strncmp(*ep, "TERM=", 5) == 0)
			    SET(didvar, DID_TERM);
			break;
		    case 'U':
			if (strncmp(*ep, "USER="******"USERNAME="******"SHELL", runas_pw->pw_shell, ISSET(didvar, DID_SHELL));
	    sudo_setenv("LOGNAME", runas_pw->pw_name,
		ISSET(didvar, DID_LOGNAME));
	    sudo_setenv("USER", runas_pw->pw_name, ISSET(didvar, DID_USER));
	    sudo_setenv("USERNAME", runas_pw->pw_name,
		ISSET(didvar, DID_USERNAME));
	} else {
	    if (!ISSET(didvar, DID_SHELL))
		sudo_setenv("SHELL", sudo_user.pw->pw_shell, FALSE);
	    if (!ISSET(didvar, DID_LOGNAME))
		sudo_setenv("LOGNAME", user_name, FALSE);
	    if (!ISSET(didvar, DID_USER))
		sudo_setenv("USER", user_name, FALSE);
	    if (!ISSET(didvar, DID_USERNAME))
		sudo_setenv("USERNAME", user_name, FALSE);
	}

	/* If we didn't keep HOME, reset it based on target user. */
	if (!ISSET(didvar, KEPT_HOME))
	    reset_home = TRUE;

	/*
	 * Set MAIL to target user in -i mode or if MAIL is not preserved
	 * from user's environment.
	 */
	if (ISSET(sudo_mode, MODE_LOGIN_SHELL) || !ISSET(didvar, KEPT_MAIL)) {
	    cp = _PATH_MAILDIR;
	    if (cp[sizeof(_PATH_MAILDIR) - 2] == '/')
		easprintf(&cp, "MAIL=%s%s", _PATH_MAILDIR, runas_pw->pw_name);
	    else
		easprintf(&cp, "MAIL=%s/%s", _PATH_MAILDIR, runas_pw->pw_name);
	    sudo_putenv(cp, ISSET(didvar, DID_MAIL), TRUE);
	}
    } else {
	/*
	 * Copy environ entries as long as they don't match env_delete or
	 * env_check.
	 */
	for (ep = old_envp; *ep; ep++) {
	    int okvar;

	    /* Skip variables with values beginning with () (bash functions) */
	    if ((cp = strchr(*ep, '=')) != NULL) {
		if (strncmp(cp, "=() ", 3) == 0)
		    continue;
	    }

	    /*
	     * First check variables against the blacklist in env_delete.
	     * If no match there check for '%' and '/' characters.
	     */
	    okvar = matches_env_delete(*ep) != TRUE;
	    if (okvar)
		okvar = matches_env_check(*ep) != FALSE;

	    if (okvar) {
		if (strncmp(*ep, "SUDO_PS1=", 9) == 0)
		    ps1 = *ep + 5;
		else if (strncmp(*ep, "PATH=", 5) == 0)
		    SET(didvar, DID_PATH);
		else if (strncmp(*ep, "TERM=", 5) == 0)
		    SET(didvar, DID_TERM);
		sudo_putenv(*ep, FALSE, FALSE);
	    }
	}
    }
    /* Replace the PATH envariable with a secure one? */
    if (def_secure_path && !user_is_exempt()) {
	sudo_setenv("PATH", def_secure_path, TRUE);
	SET(didvar, DID_PATH);
    }

    /*
     * Set $USER, $LOGNAME and $USERNAME to target if "set_logname" is not
     * disabled.  We skip this if we are running a login shell (because
     * they have already been set them) or sudoedit (because we want the
     * editor to find the user's startup files).
     */
    if (def_set_logname && !ISSET(sudo_mode, MODE_LOGIN_SHELL|MODE_EDIT)) {
	if (!ISSET(didvar, KEPT_LOGNAME))
	    sudo_setenv("LOGNAME", runas_pw->pw_name, TRUE);
	if (!ISSET(didvar, KEPT_USER))
	    sudo_setenv("USER", runas_pw->pw_name, TRUE);
	if (!ISSET(didvar, KEPT_USERNAME))
	    sudo_setenv("USERNAME", runas_pw->pw_name, TRUE);
    }

    /* Set $HOME to target user if not preserving user's value. */
    if (reset_home)
	sudo_setenv("HOME", runas_pw->pw_dir, TRUE);

    /* Provide default values for $TERM and $PATH if they are not set. */
    if (!ISSET(didvar, DID_TERM))
	sudo_putenv("TERM=unknown", FALSE, FALSE);
    if (!ISSET(didvar, DID_PATH))
	sudo_setenv("PATH", _PATH_STDPATH, FALSE);

    /* Set PS1 if SUDO_PS1 is set. */
    if (ps1 != NULL)
	sudo_putenv(ps1, TRUE, TRUE);

    /* Add the SUDO_COMMAND envariable (cmnd + args). */
    if (user_args) {
	easprintf(&cp, "%s %s", user_cmnd, user_args);
	sudo_setenv("SUDO_COMMAND", cp, TRUE);
	efree(cp);
    } else {
	sudo_setenv("SUDO_COMMAND", user_cmnd, TRUE);
    }

    /* Add the SUDO_USER, SUDO_UID, SUDO_GID environment variables. */
    sudo_setenv("SUDO_USER", user_name, TRUE);
    snprintf(idbuf, sizeof(idbuf), "%u", (unsigned int) user_uid);
    sudo_setenv("SUDO_UID", idbuf, TRUE);
    snprintf(idbuf, sizeof(idbuf), "%u", (unsigned int) user_gid);
    sudo_setenv("SUDO_GID", idbuf, TRUE);

    /* Free old environment. */
    efree(old_envp);
}
Пример #18
0
/*
 * Handle an exception.
 * In the case of a kernel trap, we return the pc where to resume if
 * pcb_onfault is set, otherwise, return old pc.
 */
void
trap(struct trap_frame *trapframe)
{
	struct cpu_info *ci = curcpu();
	struct proc *p = ci->ci_curproc;
	int type;

	type = (trapframe->cause & CR_EXC_CODE) >> CR_EXC_CODE_SHIFT;

#if defined(CPU_R8000) && !defined(DEBUG_INTERRUPT)
	if (type != T_INT)
#endif
		trapdebug_enter(ci, trapframe, -1);

#ifdef CPU_R8000
	if (type != T_INT && type != T_SYSCALL)
#else
	if (type != T_SYSCALL)
#endif
		atomic_add_int(&uvmexp.traps, 1);
	if (USERMODE(trapframe->sr)) {
		type |= T_USER;
		refreshcreds(p);
	}

	/*
	 * Enable hardware interrupts if they were on before the trap;
	 * enable IPI interrupts only otherwise.
	 */
	switch (type) {
#ifdef CPU_R8000
	case T_INT:
	case T_INT | T_USER:
#endif
	case T_BREAK:
		break;
	default:
		if (ISSET(trapframe->sr, SR_INT_ENAB))
			enableintr();
		else {
#ifdef MULTIPROCESSOR
			ENABLEIPI();
#endif
		}
		break;
	}

#ifdef CPU_R8000
	/*
	 * Some exception causes on R8000 are actually detected by external
	 * circuitry, and as such are reported as external interrupts.
	 * On R8000 kernels, external interrupts vector to trap() instead of
	 * interrupt(), so that we can process these particular exceptions
	 * as if they were triggered as regular exceptions.
	 */
	if ((type & ~T_USER) == T_INT) {
		/*
		 * Similar reality check as done in interrupt(), in case
		 * an interrupt occured between a write to COP_0_STATUS_REG
		 * and it taking effect.
		 */
		if (!ISSET(trapframe->sr, SR_INT_ENAB))
			return;

		if (trapframe->cause & CR_VCE) {
#ifndef DEBUG_INTERRUPT
			trapdebug_enter(ci, trapframe, -1);
#endif
			panic("VCE or TLBX");
		}
		if (trapframe->cause & CR_FPE) {
#ifndef DEBUG_INTERRUPT
			trapdebug_enter(ci, trapframe, -1);
#endif
			itsa(trapframe, ci, p, T_FPE | (type & T_USER));
			cp0_reset_cause(CR_FPE);
		}
		if (trapframe->cause & CR_INT_MASK)
			interrupt(trapframe);

		return;	/* no userret */
	} else
#endif
		itsa(trapframe, ci, p, type);

	if (type & T_USER)
		userret(p);
}
Пример #19
0
static int
ncreate_9p(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, char *target)
{
	openfid_9p *op;
	mount_9p *nmp;
	node_9p *dnp, *np;
	uint32_t perm, iounit;
	uint8_t mode;
	fid_9p fid, openfid;
	qid_9p qid;
	char *ext, buf[64];
	int e;

	dnp = NTO9P(dvp);
	nmp = dnp->nmp;
	fid = NOFID;
	openfid = NOFID;
	*vpp = NULL;

	if (vnode_vfsisrdonly(dvp))
		return EROFS;

	if (!ISSET(nmp->flags, F_DOTU) && vap->va_type!=VREG && vap->va_type!=VDIR)
		return ENOTSUP;

	if (!ISSET(nmp->flags, FLAG_DSSTORE) &&
		strncmp(".DS_Store", cnp->cn_nameptr, cnp->cn_namelen)==0)
		return EINVAL;

	ext = "";
	mode = ORDWR;
	perm = MAKEIMODE(vap->va_type, vap->va_mode) & 0777;
	switch (vap->va_type) {
	case VREG:
		break;

	case VDIR:
		mode = OREAD;
		SET(perm, DMDIR);
		break;

	case VBLK:
	case VCHR:
		SET(perm, DMDEVICE);
		snprintf(buf, sizeof(buf), "%c %d %d", vap->va_type==VBLK?'b':'c', vap->va_rdev>>20, vap->va_rdev&((1<<20) - 1));
		ext = buf;
		break;

	case VFIFO:
		SET(perm, DMNAMEDPIPE);
		break;

	case VSOCK:
		SET(perm, DMSOCKET);
		break;

	case VLNK:
		SET(perm, DMSYMLINK);
		ext = target;
		break;

	default:
		return EINVAL;
	}
	
	if (ISSET(vap->va_vaflags, VA_EXCLUSIVE))
		SET(mode, OEXCL);

	
	nlock_9p(dnp, NODE_LCK_EXCLUSIVE);
	if ((e=walk_9p(nmp, dnp->fid, NULL, 0, &openfid, &qid)))
		goto error;
	if ((e=create_9p(nmp, openfid, cnp->cn_nameptr, cnp->cn_namelen, mode, perm, ext, &qid, &iounit)))
		goto error;
	if ((e=walk_9p(nmp, dnp->fid, cnp->cn_nameptr, cnp->cn_namelen, &fid, &qid)))
		goto error;
	if ((e=nget_9p(nmp, fid, qid, dvp, vpp, cnp, ctx)))
		goto error;

	cache_purge_negatives(dvp);
	np = NTO9P(*vpp);
	np->iounit = iounit;
	op = &np->openfid[vap->va_type==VDIR? OREAD: ORDWR];
	op->fid = openfid;
	OSIncrementAtomic(&op->ref);
	nunlock_9p(np);
	nunlock_9p(dnp);
	return 0;

error:
	clunk_9p(nmp, openfid);
	clunk_9p(nmp, fid);
	nunlock_9p(dnp);
	return e;
}
Пример #20
0
/*
 - walk - step through the string either quickly or slowly
 == static const char *walk(struct match *m, const char *start, \
 ==	const char *stop, sopno startst, sopno stopst, bool fast);
 */
static const char * /* where it ended, or NULL */
walk(struct match *m, const char *start, const char *stop, sopno startst,
	sopno stopst, bool fast)
{
	states st = m->st;
	states fresh = m->fresh;
	states empty = m->empty;
	states tmp = m->tmp;
	const char *p = start;
	wint_t c;
	wint_t lastc;		/* previous c */
	wint_t flagch;
	int i;
	const char *matchp;	/* last p at which a match ended */
	size_t clen;

	AT("slow", start, stop, startst, stopst);
	CLEAR(st);
	SET1(st, startst);
	SP("sstart", st, *p);
	st = step(m->g, startst, stopst, st, NOTHING, st);
	if (fast)
		ASSIGN(fresh, st);
	matchp = NULL;
	if (start == m->offp || (start == m->beginp && !(m->eflags&REG_NOTBOL)))
		c = OUT;
	else {
		/*
		 * XXX Wrong if the previous character was multi-byte.
		 * Newline never is (in encodings supported by FreeBSD),
		 * so this only breaks the ISWORD tests below.
		 */
		c = (uch)*(start - 1);
	}
	for (;;) {
		/* next character */
		lastc = c;
		if (p == m->endp) {
			c = OUT;
			clen = 0;
		} else
			clen = XMBRTOWC(&c, p, m->endp - p, &m->mbs, BADCHAR);

		if (fast && EQ(st, fresh))
			matchp = p;

		/* is there an EOL and/or BOL between lastc and c? */
		flagch = '\0';
		i = 0;
		if ( (lastc == '\n' && m->g->cflags&REG_NEWLINE) ||
				(lastc == OUT && !(m->eflags&REG_NOTBOL)) ) {
			flagch = BOL;
			i = m->g->nbol;
		}
		if ( (c == '\n' && m->g->cflags&REG_NEWLINE) ||
				(c == OUT && !(m->eflags&REG_NOTEOL)) ) {
			flagch = (flagch == BOL) ? BOLEOL : EOL;
			i += m->g->neol;
		}
		if (i != 0) {
			for (; i > 0; i--)
				st = step(m->g, startst, stopst, st, flagch, st);
			SP("sboleol", st, c);
		}

		/* how about a word boundary? */
		if ( (flagch == BOL || (lastc != OUT && !ISWORD(lastc))) &&
					(c != OUT && ISWORD(c)) ) {
			flagch = BOW;
		}
		if ( (lastc != OUT && ISWORD(lastc)) &&
				(flagch == EOL || (c != OUT && !ISWORD(c))) ) {
			flagch = EOW;
		}
		if (flagch == BOW || flagch == EOW) {
			st = step(m->g, startst, stopst, st, flagch, st);
			SP("sboweow", st, c);
		}

		/* are we done? */
		if (ISSET(st, stopst)) {
			if (fast)
				break;
			else
				matchp = p;
		}
		if (EQ(st, empty) || p == stop || clen > stop - p)
			break;		/* NOTE BREAK OUT */

		/* no, we must deal with this character */
		ASSIGN(tmp, st);
		if (fast)
			ASSIGN(st, fresh);
		else
			ASSIGN(st, empty);
		assert(c != OUT);
		st = step(m->g, startst, stopst, tmp, c, st);
		SP("saft", st, c);
		assert(EQ(step(m->g, startst, stopst, st, NOTHING, st), st));
		p += clen;
	}

	if (fast) {
		assert(matchp != NULL);
		m->coldp = matchp;
		if (ISSET(st, stopst))
			return (p + XMBRTOWC(NULL, p, stop - p, &m->mbs, 0));
		else
			return (NULL);
	} else
		return (matchp);
}
Пример #21
0
static int
vnop_setattr_9p(struct vnop_setattr_args *ap)
{
	struct vnode_attr *vap;
	vnode_t vp;
	node_9p *np;
	dir_9p d;
	int e;

	TRACE();
	vp = ap->a_vp;
	vap = ap->a_vap;
	np = NTO9P(vp);

	if (vnode_vfsisrdonly(vp))
		return EROFS;
	
	if (vnode_isvroot(vp))
		return EACCES;

	nulldir(&d);
	if (VATTR_IS_ACTIVE(vap, va_data_size)) {
		if (vnode_isdir(vp))
			return EISDIR;
		d.length = vap->va_data_size;
	}
	VATTR_SET_SUPPORTED(vap, va_data_size);

	if (VATTR_IS_ACTIVE(vap, va_access_time))
		d.atime = vap->va_access_time.tv_sec;
	VATTR_SET_SUPPORTED(vap, va_access_time);

	if (VATTR_IS_ACTIVE(vap, va_modify_time))
		d.mtime = vap->va_modify_time.tv_sec;
	VATTR_SET_SUPPORTED(vap, va_modify_time);

	if (VATTR_IS_ACTIVE(vap, va_mode)) {
		d.mode = vap->va_mode & 0777;
		if (vnode_isdir(vp))
			SET(d.mode, DMDIR);
		if (ISSET(np->nmp->flags, F_DOTU)) {
			switch (vnode_vtype(vp)) {
			case VBLK:
			case VCHR:
				SET(d.mode, DMDEVICE);
				break;
			case VLNK:
				SET(d.mode, DMSYMLINK);
				break;
			case VSOCK:
				SET(d.mode, DMSOCKET);
				break;
			case VFIFO:
				SET(d.mode, DMNAMEDPIPE);
				break;
			default:
				break;
			}
		}
	}
	VATTR_SET_SUPPORTED(vap, va_mode);

	nlock_9p(np, NODE_LCK_EXCLUSIVE);
	e = wstat_9p(np->nmp, np->fid, &d);
	np->dirtimer = 0;

	if (e==0 && d.length!=~0)
		ubc_setsize(vp, d.length);

	nunlock_9p(np);
	return e;
}
Пример #22
0
/* Our main file browser function.  path is the tilde-expanded path we
 * start browsing from. */
char *do_browser(char *path, DIR *dir)
{
    char *retval = NULL;
    int kbinput;
    bool old_const_update = ISSET(CONST_UPDATE);
    char *prev_dir = NULL;
	/* The directory we were in before backing up to "..". */
    char *ans = NULL;
	/* The last answer the user typed at the statusbar prompt. */
    size_t old_selected;
	/* The selected file we had before the current selected file. */
    functionptrtype func;
	/* The function of the key the user typed in. */

    curs_set(0);
    blank_statusbar();
    bottombars(MBROWSER);
    wnoutrefresh(bottomwin);

    UNSET(CONST_UPDATE);

    ans = mallocstrcpy(NULL, "");

  change_browser_directory:
	/* We go here after we select a new directory. */

    /* Start with no key pressed. */
    kbinput = ERR;

    path = mallocstrassn(path, get_full_path(path));

    /* Save the current path in order to be used later. */
    path_save = path;

    assert(path != NULL && path[strlen(path) - 1] == '/');

    /* Get the file list, and set longest and width in the process. */
    browser_init(path, dir);

    assert(filelist != NULL);

    /* Sort the file list. */
    qsort(filelist, filelist_len, sizeof(char *), diralphasort);

    /* If prev_dir isn't NULL, select the directory saved in it, and
     * then blow it away. */
    if (prev_dir != NULL) {
	browser_select_dirname(prev_dir);

	free(prev_dir);
	prev_dir = NULL;
    /* Otherwise, select the first file or directory in the list. */
    } else
	selected = 0;

    old_selected = (size_t)-1;

    titlebar(path);

    while (TRUE) {
	struct stat st;
	int i;
	size_t fileline = selected / width;
		/* The line number the selected file is on. */
	char *new_path;
		/* The path we switch to at the "Go to Directory"
		 * prompt. */

	/* Display the file list if we don't have a key, or if the
	 * selected file has changed, and set width in the process. */
	if (kbinput == ERR || old_selected != selected)
	    browser_refresh();

	old_selected = selected;

	kbinput = get_kbinput(edit);

#ifndef NANO_TINY
	if (kbinput == KEY_WINCH) {
	    kbinput = ERR;
	    curs_set(0);
	    continue;
	}
#endif

#ifndef DISABLE_MOUSE
	if (kbinput == KEY_MOUSE) {
	    int mouse_x, mouse_y;

	    /* We can click on the edit window to select a
	     * filename. */
	    if (get_mouseinput(&mouse_x, &mouse_y, TRUE) == 0 &&
		wmouse_trafo(edit, &mouse_y, &mouse_x, FALSE)) {
		/* longest is the width of each column.  There
		 * are two spaces between each column. */
		selected = (fileline / editwinrows) *
				(editwinrows * width) + (mouse_y *
				width) + (mouse_x / (longest + 2));

		/* If they clicked beyond the end of a row,
		 * select the last filename in that row. */
		if (mouse_x > width * (longest + 2))
		    selected--;

		/* If we're off the screen, select the last filename. */
		if (selected > filelist_len - 1)
		    selected = filelist_len - 1;

		/* If we selected the same filename as last time,
		 * put back the Enter key so that it's read in. */
		if (old_selected == selected)
		    unget_kbinput(sc_seq_or(do_enter_void, 0), FALSE, FALSE);
	    }
	}
#endif /* !DISABLE_MOUSE */

	func = parse_browser_input(&kbinput);

	if (func == total_refresh) {
	    total_redraw();
	} else if (func == do_help_void) {
#ifndef DISABLE_HELP
	    do_help_void();
	    /* Perhaps the window dimensions have changed. */
	    browser_refresh();
	    curs_set(0);
#else
	    nano_disabled_msg();
#endif
	} else if (func == do_search) {
	    /* Search for a filename. */
	    curs_set(1);
	    do_filesearch();
	    curs_set(0);
	} else if (func == do_research) {
	    /* Search for another filename. */
	    do_fileresearch();
	} else if (func == do_page_up) {
	    if (selected >= (editwinrows + fileline % editwinrows) * width)
		selected -= (editwinrows + fileline % editwinrows) * width;
	    else
		selected = 0;
	} else if (func == do_page_down) {
	    selected += (editwinrows - fileline % editwinrows) * width;
	    if (selected > filelist_len - 1)
		selected = filelist_len - 1;
	} else if (func == do_first_file) {
	    selected = 0;
	} else if (func == do_last_file) {
	    selected = filelist_len - 1;
	} else if (func == goto_dir_void) {
	    /* Go to a specific directory. */
	    curs_set(1);
	    i = do_prompt(TRUE,
#ifndef DISABLE_TABCOMP
			FALSE,
#endif
			MGOTODIR, ans,
#ifndef DISABLE_HISTORIES
			NULL,
#endif
			/* TRANSLATORS: This is a prompt. */
			browser_refresh, _("Go To Directory"));

	    curs_set(0);
	    bottombars(MBROWSER);

	    /* If the directory begins with a newline (i.e. an
	     * encoded null), treat it as though it's blank. */
	    if (i < 0 || *answer == '\n') {
		/* We canceled.  Indicate that on the statusbar, and
		* blank out ans, since we're done with it. */
		statusbar(_("Cancelled"));
		ans = mallocstrcpy(ans, "");
		continue;
	    } else if (i != 0) {
		/* Put back the "Go to Directory" key and save
		 * answer in ans, so that the file list is displayed
		 * again, the prompt is displayed again, and what we
		 * typed before at the prompt is displayed again. */
		unget_kbinput(sc_seq_or(do_gotolinecolumn_void, 0), FALSE, FALSE);
		ans = mallocstrcpy(ans, answer);
		continue;
	    }

	    /* We have a directory.  Blank out ans, since we're done
	     * with it. */
	    ans = mallocstrcpy(ans, "");

	    /* Convert newlines to nulls, just before we go to the
	     * directory. */
	    sunder(answer);
	    align(&answer);

	    new_path = real_dir_from_tilde(answer);

	    if (new_path[0] != '/') {
		new_path = charealloc(new_path, strlen(path) +
				strlen(answer) + 1);
		sprintf(new_path, "%s%s", path, answer);
	    }

#ifndef DISABLE_OPERATINGDIR
	    if (check_operating_dir(new_path, FALSE)) {
		statusbar(_("Can't go outside of %s in restricted mode"),
				operating_dir);
		free(new_path);
		continue;
	    }
#endif

	    dir = opendir(new_path);
	    if (dir == NULL) {
		/* We can't open this directory for some reason.
		* Complain. */
		statusbar(_("Error reading %s: %s"), answer,
				strerror(errno));
		beep();
		free(new_path);
		continue;
	    }

	    /* Start over again with the new path value. */
	    free(path);
	    path = new_path;
	    goto change_browser_directory;
	} else if (func == do_up_void) {
	    if (selected >= width)
		selected -= width;
	} else if (func == do_down_void) {
	    if (selected + width <= filelist_len - 1)
		selected += width;
	} else if (func == do_left) {
	    if (selected > 0)
		selected--;
	} else if (func == do_right) {
	    if (selected < filelist_len - 1)
		selected++;
	} else if (func == do_enter_void) {
	    /* We can't move up from "/". */
	    if (strcmp(filelist[selected], "/..") == 0) {
		statusbar(_("Can't move up a directory"));
		beep();
		continue;
	    }

#ifndef DISABLE_OPERATINGDIR
	    /* Note: The selected file can be outside the operating
	     * directory if it's ".." or if it's a symlink to a
	     * directory outside the operating directory. */
	    if (check_operating_dir(filelist[selected], FALSE)) {
		statusbar(_("Can't go outside of %s in restricted mode"),
				operating_dir);
		beep();
		continue;
	    }
#endif

	    if (stat(filelist[selected], &st) == -1) {
		/* We can't open this file for some reason.
		 * Complain. */
		 statusbar(_("Error reading %s: %s"),
				filelist[selected], strerror(errno));
		 beep();
		 continue;
	    }

	    if (!S_ISDIR(st.st_mode)) {
		/* We've successfully opened a file, we're done, so
		 * get out. */
		retval = mallocstrcpy(NULL, filelist[selected]);
		break;
	    } else if (strcmp(tail(filelist[selected]), "..") == 0)
		/* We've successfully opened the parent directory,
		 * save the current directory in prev_dir, so that
		 * we can easily return to it by hitting Enter. */
		prev_dir = mallocstrcpy(NULL, striponedir(filelist[selected]));

	    dir = opendir(filelist[selected]);
	    if (dir == NULL) {
		/* We can't open this directory for some reason.
		 * Complain. */
		statusbar(_("Error reading %s: %s"),
				filelist[selected], strerror(errno));
		beep();
		continue;
	    }

	    path = mallocstrcpy(path, filelist[selected]);

	    /* Start over again with the new path value. */
	    goto change_browser_directory;
	} else if (func == do_exit) {
	    /* Exit from the file browser. */
	    break;
	}
    }
    titlebar(NULL);
    edit_refresh();
    curs_set(1);
    if (old_const_update)
	SET(CONST_UPDATE);

    free(path);
    free(ans);

    free_chararray(filelist, filelist_len);
    filelist = NULL;
    filelist_len = 0;

    return retval;
}
Пример #23
0
AssimpShader::AssimpShader() :
        program_(0), u_mvp_(0), u_diffuse_color_(0), u_ambient_color_(
                0), u_texture_(0), u_color_(0), u_opacity_(
                0), program_list_(0) {
    program_list_ = new GLProgram*[AS_TOTAL_GL_PROGRAM_COUNT];

    const char* vertex_shader_strings[AS_TOTAL_SHADER_STRINGS_COUNT];
    GLint vertex_shader_string_lengths[AS_TOTAL_SHADER_STRINGS_COUNT];
    const char* fragment_shader_strings[AS_TOTAL_SHADER_STRINGS_COUNT];
    GLint fragment_shader_string_lengths[AS_TOTAL_SHADER_STRINGS_COUNT];

    for (int i = 0; i < AS_TOTAL_GL_PROGRAM_COUNT; i++) {
        int counter = 0;

        vertex_shader_strings[counter] =  GLSL_VERSION;
        vertex_shader_string_lengths[counter] = (GLint) strlen(GLSL_VERSION);
        fragment_shader_strings[counter] = GLSL_VERSION;
        fragment_shader_string_lengths[counter] = (GLint) strlen(GLSL_VERSION);
        counter++;

        // TODO: remove duplicate code
        if (ISSET(i, AS_DIFFUSE_TEXTURE)) {
            vertex_shader_strings[counter] =  DIFFUSE_TEXTURE;
            vertex_shader_string_lengths[counter] = (GLint) strlen(DIFFUSE_TEXTURE);
            fragment_shader_strings[counter] = DIFFUSE_TEXTURE;
            fragment_shader_string_lengths[counter] = (GLint) strlen(DIFFUSE_TEXTURE);
            counter++;
        } else {
            vertex_shader_strings[counter] =  NO_DIFFUSE_TEXTURE;
            vertex_shader_string_lengths[counter] = (GLint) strlen(NO_DIFFUSE_TEXTURE);
            fragment_shader_strings[counter] = NO_DIFFUSE_TEXTURE;
            fragment_shader_string_lengths[counter] = (GLint) strlen(NO_DIFFUSE_TEXTURE);
            counter++;
        }

        if (ISSET(i, AS_SPECULAR_TEXTURE)) {
            vertex_shader_strings[counter] =  SPECULAR_TEXTURE;
            vertex_shader_string_lengths[counter] = (GLint) strlen(SPECULAR_TEXTURE);
            fragment_shader_strings[counter] = SPECULAR_TEXTURE;
            fragment_shader_string_lengths[counter] = (GLint) strlen(SPECULAR_TEXTURE);
            counter++;
        } else {
            vertex_shader_strings[counter] =  NO_SPECULAR_TEXTURE;
            vertex_shader_string_lengths[counter] = (GLint) strlen(NO_SPECULAR_TEXTURE);
            fragment_shader_strings[counter] = NO_SPECULAR_TEXTURE;
            fragment_shader_string_lengths[counter] = (GLint) strlen(NO_SPECULAR_TEXTURE);
            counter++;
        }

        if (ISSET(i, AS_SKINNING)) {
            vertex_shader_strings[counter] =  SKINNING;
            vertex_shader_string_lengths[counter] = (GLint) strlen(SKINNING);
            fragment_shader_strings[counter] = SKINNING;
            fragment_shader_string_lengths[counter] = (GLint) strlen(SKINNING);
            counter++;
        } else {
            vertex_shader_strings[counter] =  NO_SKINNING;
            vertex_shader_string_lengths[counter] = (GLint) strlen(NO_SKINNING);
            fragment_shader_strings[counter] = NO_SKINNING;
            fragment_shader_string_lengths[counter] = (GLint) strlen(NO_SKINNING);
            counter++;
        }

        /* Shader should be added in the last */
        vertex_shader_strings[counter] = VERTEX_SHADER;
        vertex_shader_string_lengths[counter] = (GLint) strlen(VERTEX_SHADER);
        fragment_shader_strings[counter] = FRAGMENT_SHADER;
        fragment_shader_string_lengths[counter] = (GLint) strlen(FRAGMENT_SHADER);
        counter++;

        program_list_[i] = new GLProgram(vertex_shader_strings,
                    vertex_shader_string_lengths, fragment_shader_strings,
                    fragment_shader_string_lengths, counter);
    }
}
Пример #24
0
/*
 * Block write.  Described in Bach (p.56)
 */
int
bwrite(struct buf *bp)
{
	int rv, async, wasdelayed, s;
	struct vnode *vp;
	struct mount *mp;

	/*
	 * Remember buffer type, to switch on it later.  If the write was
	 * synchronous, but the file system was mounted with MNT_ASYNC,
	 * convert it to a delayed write.
	 * XXX note that this relies on delayed tape writes being converted
	 * to async, not sync writes (which is safe, but ugly).
	 */
	async = ISSET(bp->b_flags, B_ASYNC);
	if (!async && bp->b_vp && bp->b_vp->v_mount &&
	    ISSET(bp->b_vp->v_mount->mnt_flag, MNT_ASYNC)) {
		bdwrite(bp);
		return (0);
	}

	/*
	 * Collect statistics on synchronous and asynchronous writes.
	 * Writes to block devices are charged to their associated
	 * filesystem (if any).
	 */
	if ((vp = bp->b_vp) != NULL) {
		if (vp->v_type == VBLK)
			mp = vp->v_specmountpoint;
		else
			mp = vp->v_mount;
		if (mp != NULL) {
			if (async)
				mp->mnt_stat.f_asyncwrites++;
			else
				mp->mnt_stat.f_syncwrites++;
		}
	}

	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));

	s = splbio();

	/*
	 * If not synchronous, pay for the I/O operation and make
	 * sure the buf is on the correct vnode queue.  We have
	 * to do this now, because if we don't, the vnode may not
	 * be properly notified that its I/O has completed.
	 */
	if (wasdelayed) {
		reassignbuf(bp);
	} else
		curproc->p_stats->p_ru.ru_oublock++;
	

	/* Initiate disk write.  Make sure the appropriate party is charged. */
	bp->b_vp->v_numoutput++;
	splx(s);
	SET(bp->b_flags, B_WRITEINPROG);
	VOP_STRATEGY(bp);

	if (async)
		return (0);

	/*
	 * If I/O was synchronous, wait for it to complete.
	 */
	rv = biowait(bp);

	/* Release the buffer. */
	brelse(bp);

	return (rv);
}
Пример #25
0
void throttle_info_update(void *throttle_info, int flags)
{
	struct _throttle_io_info_t *info = throttle_info;
	struct uthread	*ut;
	int policy;
	int is_throttleable_io = 0;
	int is_passive_io = 0;
	SInt32 oldValue;

	if (!lowpri_IO_initial_window_msecs || (info == NULL))
		return;
	policy = throttle_get_io_policy(&ut);

	switch (policy) {
	case IOPOL_DEFAULT:
	case IOPOL_NORMAL:
		break;
	case IOPOL_THROTTLE:
		is_throttleable_io = 1;
		break;
	case IOPOL_PASSIVE:
		is_passive_io = 1;
		break;
	default:
		printf("unknown I/O policy %d", policy);
		break;
	}

	if (!is_throttleable_io && ISSET(flags, B_PASSIVE))
		is_passive_io |= 1;

	if (!is_throttleable_io) {
		if (!is_passive_io){
			microuptime(&info->last_normal_IO_timestamp);
		}
	} else if (ut) {
		/*
		 * I'd really like to do the IOSleep here, but
		 * we may be holding all kinds of filesystem related locks
		 * and the pages for this I/O marked 'busy'...
		 * we don't want to cause a normal task to block on
		 * one of these locks while we're throttling a task marked
		 * for low priority I/O... we'll mark the uthread and
		 * do the delay just before we return from the system
		 * call that triggered this I/O or from vnode_pagein
		 */
		if (ut->uu_lowpri_window == 0) {
			ut->uu_throttle_info = info;
			throttle_info_ref(ut->uu_throttle_info);
			DEBUG_ALLOC_THROTTLE_INFO("updating info = %p\n", info, info );

			oldValue = OSIncrementAtomic(&info->numthreads_throttling);
			if (oldValue < 0) {
				panic("%s: numthreads negative", __func__);
			}
			ut->uu_lowpri_window = lowpri_IO_initial_window_msecs;
			ut->uu_lowpri_window += lowpri_IO_window_msecs_inc * oldValue;
		} else {
			/* The thread sends I/Os to different devices within the same system call */
			if (ut->uu_throttle_info != info) {
    				struct _throttle_io_info_t *old_info = ut->uu_throttle_info;

				// keep track of the numthreads in the right device
				OSDecrementAtomic(&old_info->numthreads_throttling);
				OSIncrementAtomic(&info->numthreads_throttling);

    				DEBUG_ALLOC_THROTTLE_INFO("switching from info = %p\n", old_info, old_info );
    				DEBUG_ALLOC_THROTTLE_INFO("switching to info = %p\n", info, info );
				/* This thread no longer needs a reference on that throttle info */
				throttle_info_rel(ut->uu_throttle_info);
				ut->uu_throttle_info = info;
				/* Need to take a reference on this throttle info */
				throttle_info_ref(ut->uu_throttle_info);
			}
			int numthreads = MAX(1, info->numthreads_throttling);
			ut->uu_lowpri_window += lowpri_IO_window_msecs_inc * numthreads;
			if (ut->uu_lowpri_window > lowpri_max_window_msecs * numthreads)
				ut->uu_lowpri_window = lowpri_max_window_msecs * numthreads;
		}
	}
}
Пример #26
0
/*
 * Release a buffer on to the free lists.
 * Described in Bach (p. 46).
 */
void
brelse(struct buf *bp)
{
	struct bqueues *bufq;
	int s;

	/* Block disk interrupts. */
	s = splbio();

	/*
	 * Determine which queue the buffer should be on, then put it there.
	 */

	/* If it's locked, don't report an error; try again later. */
	if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
		CLR(bp->b_flags, B_ERROR);

	/* If it's not cacheable, or an error, mark it invalid. */
	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
		SET(bp->b_flags, B_INVAL);

	if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
		/*
		 * If it's invalid or empty, dissociate it from its vnode
		 * and put on the head of the appropriate queue.
		 */
		if (LIST_FIRST(&bp->b_dep) != NULL)
			buf_deallocate(bp);

		if (ISSET(bp->b_flags, B_DELWRI)) {
			CLR(bp->b_flags, B_DELWRI);
		}

		if (bp->b_vp) {
			reassignbuf(bp);
			brelvp(bp);
		}
		if (bp->b_bufsize <= 0) {
			/* no data */
			bufq = &bufqueues[BQ_EMPTY];
			numemptybufs++;
		} else {
			/* invalid data */
			bufq = &bufqueues[BQ_CLEAN];
			numfreepages += btoc(bp->b_bufsize);
			numcleanpages += btoc(bp->b_bufsize);
		}
		binsheadfree(bp, bufq);
	} else {
		/*
		 * It has valid data.  Put it on the end of the appropriate
		 * queue, so that it'll stick around for as long as possible.
		 */
		if (ISSET(bp->b_flags, B_LOCKED))
			/* locked in core */
			bufq = &bufqueues[BQ_LOCKED];
		else {
			numfreepages += btoc(bp->b_bufsize);
			if (!ISSET(bp->b_flags, B_DELWRI)) {
				numcleanpages += btoc(bp->b_bufsize);
				bufq = &bufqueues[BQ_CLEAN];
			} else {
				numdirtypages += btoc(bp->b_bufsize);
				bufq = &bufqueues[BQ_DIRTY];
			}
		}
		if (ISSET(bp->b_flags, B_AGE))
			binsheadfree(bp, bufq);
		else
			binstailfree(bp, bufq);
	}

	/* Unlock the buffer. */
	CLR(bp->b_flags, (B_AGE | B_ASYNC | B_BUSY | B_NOCACHE | B_DEFERRED));


	/* Wake up syncer and cleaner processes waiting for buffers */
	if (nobuffers) {
		wakeup(&nobuffers);
		nobuffers = 0;
	}

	/* Wake up any processes waiting for any buffer to become free. */
	if (needbuffer && (numcleanpages > locleanpages)) {
		needbuffer--;
		wakeup_one(&needbuffer);
	}

	splx(s);

	/* Wake up any processes waiting for _this_ buffer to become free. */
	if (ISSET(bp->b_flags, B_WANTED)) {
		CLR(bp->b_flags, B_WANTED);
		wakeup(bp);
	}
}
Пример #27
0
static int
at91usart_param(struct tty *tp, struct termios *t)
{
	struct at91usart_softc *sc
		= device_lookup_private(&at91usart_cd, COMUNIT(tp->t_dev));
	int s;

	if (COM_ISALIVE(sc) == 0)
		return (EIO);

	if (t->c_ispeed && t->c_ispeed != t->c_ospeed)
		return (EINVAL);

	/*
	 * For the console, always force CLOCAL and !HUPCL, so that the port
	 * is always active.
	 */
	if (ISSET(sc->sc_swflags, TIOCFLAG_SOFTCAR) ||
	    ISSET(sc->sc_hwflags, COM_HW_CONSOLE)) {
		SET(t->c_cflag, CLOCAL);
		CLR(t->c_cflag, HUPCL);
	}

	/*
	 * If there were no changes, don't do anything.  This avoids dropping
	 * input and improves performance when all we did was frob things like
	 * VMIN and VTIME.
	 */
	if (tp->t_ospeed == t->c_ospeed &&
	    tp->t_cflag == t->c_cflag)
		return (0);

	s = spltty();

	sc->sc_brgr = (AT91_MSTCLK / 16 + t->c_ospeed / 2) / t->c_ospeed;
	
	/* And copy to tty. */
	tp->t_ispeed = 0;
	tp->t_ospeed = t->c_ospeed;
	tp->t_cflag = t->c_cflag;
	at91usart_set(sc);

	splx(s);

	/*
	 * Update the tty layer's idea of the carrier bit.
	 * We tell tty the carrier is always on.
	 */
	(void) (*tp->t_linesw->l_modem)(tp, 1);

#ifdef COM_DEBUG
	if (com_debug)
		comstatus(sc, "comparam ");
#endif

	/* tell the upper layer about hwflow.. */
	if (sc->hwflow)
		(*sc->hwflow)(sc, t->c_cflag);

	return (0);
}
Пример #28
0
/*
 * Get a block of requested size that is associated with
 * a given vnode and block offset. If it is found in the
 * block cache, mark it as having been found, make it busy
 * and return it. Otherwise, return an empty block of the
 * correct size. It is up to the caller to insure that the
 * cached blocks be of the correct size.
 */
struct buf *
getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
{
	struct bufhashhdr *bh;
	struct buf *bp, *nbp = NULL;
	int s, err;

	/*
	 * XXX
	 * The following is an inlined version of 'incore()', but with
	 * the 'invalid' test moved to after the 'busy' test.  It's
	 * necessary because there are some cases in which the NFS
	 * code sets B_INVAL prior to writing data to the server, but
	 * in which the buffers actually contain valid data.  In this
	 * case, we can't allow the system to allocate a new buffer for
	 * the block until the write is finished.
	 */
	bh = BUFHASH(vp, blkno);
start:
	LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
		if (bp->b_lblkno != blkno || bp->b_vp != vp)
			continue;

		s = splbio();
		if (ISSET(bp->b_flags, B_BUSY)) {
			SET(bp->b_flags, B_WANTED);
			err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
			    slptimeo);
			splx(s);
			if (err)
				return (NULL);
			goto start;
		}

		if (!ISSET(bp->b_flags, B_INVAL)) {
			SET(bp->b_flags, (B_BUSY | B_CACHE));
			bremfree(bp);
			splx(s);
			break;
		}
		splx(s);
	}

	if (bp == NULL) {
		if (nbp == NULL && getnewbuf(slpflag, slptimeo, &nbp) != 0) {
			goto start;
		}
		bp = nbp;
		binshash(bp, bh);
		bp->b_blkno = bp->b_lblkno = blkno;
		s = splbio();
		bgetvp(vp, bp);
		splx(s);
	} else if (nbp != NULL) {
		/*
		 * Set B_AGE so that buffer appear at BQ_CLEAN head
		 * and gets reused ASAP.
		 */
		SET(nbp->b_flags, B_AGE);
		brelse(nbp);
	}
	allocbuf(bp, size);

	return (bp);
}
Пример #29
0
inline static void
at91usart_rxsoft(struct at91usart_softc *sc, struct tty *tp, unsigned csr)
{
	u_char *start, *get, *end;
	int cc;

	AT91PDC_FIFO_POSTREAD(sc->sc_iot, sc->sc_ioh, sc->sc_dmat, US_PDC,
			      &sc->sc_rx_fifo);

	if (ISSET(csr, US_CSR_TIMEOUT | US_CSR_RXBRK))
		at91usart_rx_stopped(sc);

	while ((start = AT91PDC_FIFO_RDPTR(&sc->sc_rx_fifo, &cc)) != NULL) {
		int (*rint)(int, struct tty *) = tp->t_linesw->l_rint;
		int code;

		if (!ISSET(csr, US_CSR_TIMEOUT | US_CSR_RXBRK))
			at91usart_rx_started(sc);

		for (get = start, end = start + cc; get < end; get++) {
			code = *get;
			if ((*rint)(code, tp) == -1) {
				/*
				 * The line discipline's buffer is out of space.
				 */
				if (!ISSET(sc->sc_rx_flags, RX_TTY_BLOCKED)) {
					/*
					 * We're either not using flow control, or the
					 * line discipline didn't tell us to block for
					 * some reason.  Either way, we have no way to
					 * know when there's more space available, so
					 * just drop the rest of the data.
					 */
					get = end;
					printf("%s: receive missing data!\n",
					     device_xname(sc->sc_dev));
				} else {
					/*
					 * Don't schedule any more receive processing
					 * until the line discipline tells us there's
					 * space available (through comhwiflow()).
					 * Leave the rest of the data in the input
					 * buffer.
					 */
					SET(sc->sc_rx_flags, RX_TTY_OVERFLOWED);
				}
				break;
			}
		}

		// tell we've read some bytes...
		AT91PDC_FIFO_READ(&sc->sc_rx_fifo, get - start);

		if (ISSET(sc->sc_rx_flags, RX_TTY_BLOCKED))
			break;
	}

	// h/w flow control hook:
	if (ISSET(sc->sc_swflags, TIOCFLAG_CRTSCTS))
		at91usart_rx_rts_ctl(sc, (AT91PDC_FIFO_SPACE(&sc->sc_rx_fifo) > PDC_BLOCK_SIZE * 2));

	// write next pointer if USART is ready:
	if (AT91PDC_FIFO_PREREAD(sc->sc_iot, sc->sc_ioh, sc->sc_dmat, US_PDC,
				  &sc->sc_rx_fifo, PDC_BLOCK_SIZE)) {
		SET(sc->sc_ier, US_CSR_ENDRX | US_CSR_RXBUFF | US_CSR_TIMEOUT | US_CSR_RXBRK);
	} else {
		CLR(sc->sc_ier, US_CSR_ENDRX | US_CSR_RXBUFF | US_CSR_TIMEOUT | US_CSR_RXBRK);
	}
}
Пример #30
0
static int
obiosdhc_edma_xfer_data(struct sdhc_softc *sdhc_sc, struct sdmmc_command *cmd)
{
    struct obiosdhc_softc *sc = device_private(sdhc_sc->sc_dev);
    kmutex_t *plock = sdhc_host_lock(sc->sc_hosts[0]);
    struct edma_channel *edma;
    uint16_t *edma_param;
    struct edma_param ep;
    size_t seg;
    int error;
    int blksize = MIN(cmd->c_datalen, cmd->c_blklen);

    KASSERT(mutex_owned(plock));

    edma = ISSET(cmd->c_flags, SCF_CMD_READ) ?
           sc->sc_edma_rx : sc->sc_edma_tx;
    edma_param = ISSET(cmd->c_flags, SCF_CMD_READ) ?
                 sc->sc_edma_param_rx : sc->sc_edma_param_tx;

    DPRINTF(1, (sc->sc.sc_dev, "edma xfer: nsegs=%d ch# %d\n",
                cmd->c_dmamap->dm_nsegs, edma_channel_index(edma)));

    if (cmd->c_dmamap->dm_nsegs > EDMA_MAX_PARAMS) {
        return ENOMEM;
    }

    for (seg = 0; seg < cmd->c_dmamap->dm_nsegs; seg++) {
        ep.ep_opt = __SHIFTIN(2, EDMA_PARAM_OPT_FWID) /* 32-bit */;
        ep.ep_opt |= __SHIFTIN(edma_channel_index(edma),
                               EDMA_PARAM_OPT_TCC);
        if (seg == cmd->c_dmamap->dm_nsegs - 1) {
            ep.ep_opt |= EDMA_PARAM_OPT_TCINTEN;
            ep.ep_link = 0xffff;
        } else {
            ep.ep_link = EDMA_PARAM_BASE(edma_param[seg+1]);
        }
        if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
            ep.ep_opt |= EDMA_PARAM_OPT_SAM;
            ep.ep_src = sc->sc_edma_fifo;
            ep.ep_dst = cmd->c_dmamap->dm_segs[seg].ds_addr;
        } else {
            ep.ep_opt |= EDMA_PARAM_OPT_DAM;
            ep.ep_src = cmd->c_dmamap->dm_segs[seg].ds_addr;
            ep.ep_dst = sc->sc_edma_fifo;
        }

        KASSERT(cmd->c_dmamap->dm_segs[seg].ds_len <= 65536 * 4);

        /*
        * For unknown reason, the A-DMA transfers never completes for
        	 * transfers larger than 64 butes. So use a AB transfer,
        	 * with a 64 bytes A len
        	 */
        ep.ep_bcntrld = 0;	/* not used for AB-synchronous mode */
        ep.ep_opt |= EDMA_PARAM_OPT_SYNCDIM;
        ep.ep_acnt = min(cmd->c_dmamap->dm_segs[seg].ds_len, 64);
        ep.ep_bcnt = min(cmd->c_dmamap->dm_segs[seg].ds_len, blksize) /
                     ep.ep_acnt;
        ep.ep_ccnt = cmd->c_dmamap->dm_segs[seg].ds_len /
                     (ep.ep_acnt * ep.ep_bcnt);
        ep.ep_srcbidx = ep.ep_dstbidx = 0;
        ep.ep_srccidx = ep.ep_dstcidx = 0;
        if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
            ep.ep_dstbidx = ep.ep_acnt;
            ep.ep_dstcidx = ep.ep_acnt * ep.ep_bcnt;
        } else {
            ep.ep_srcbidx = ep.ep_acnt;
            ep.ep_srccidx = ep.ep_acnt * ep.ep_bcnt;
        }

        edma_set_param(edma, edma_param[seg], &ep);
#ifdef OM3SDHC_DEBUG
        if (om3sdhcdebug >= 1) {
            printf("target OPT: %08x\n", ep.ep_opt);
            edma_dump_param(edma, edma_param[seg]);
        }
#endif
    }

    error = 0;
    sc->sc_edma_pending = true;
    edma_transfer_enable(edma, edma_param[0]);
    while (sc->sc_edma_pending) {
        error = cv_timedwait(&sc->sc_edma_cv, plock, hz*10);
        if (error == EWOULDBLOCK) {
            device_printf(sc->sc.sc_dev, "transfer timeout!\n");
            edma_dump(edma);
            edma_dump_param(edma, edma_param[0]);
            edma_halt(edma);
            sc->sc_edma_pending = false;
            error = ETIMEDOUT;
            break;
        }
    }
    edma_halt(edma);

    return error;
}