Exemplo n.º 1
0
/*
 * Establish an interrupt handler.
 * Called by driver attach functions.
 */
void *
intr_establish(int (*func)(void *), void *arg, int ipl, int priority)
{
	struct hp300_intrhand *newih, *curih;

	if ((ipl < 0) || (ipl >= NISR))
		panic("intr_establish: bad ipl %d", ipl);

	newih = malloc(sizeof(struct hp300_intrhand), M_DEVBUF, M_NOWAIT);
	if (newih == NULL)
		panic("intr_establish: can't allocate space for handler");

	/* Fill in the new entry. */
	newih->ih_fn = func;
	newih->ih_arg = arg;
	newih->ih_ipl = ipl;
	newih->ih_priority = priority;

	/*
	 * Some devices are particularly sensitive to interrupt
	 * handling latency.  The DCA, for example, can lose many
	 * characters if its interrupt isn't handled with reasonable
	 * speed.  For this reason, we sort ISRs by IPL_* priority,
	 * inserting higher priority interrupts before lower priority
	 * interrupts.
	 */

	/*
	 * Get the appropriate ISR list.  If the list is empty, no
	 * additional work is necessary; we simply insert ourselves
	 * at the head of the list.
	 */

	if (LIST_FIRST(&hp300_intr_list[ipl].hi_q) == NULL) {
		LIST_INSERT_HEAD(&hp300_intr_list[ipl].hi_q, newih, ih_q);
		goto done;
	}

	/*
	 * A little extra work is required.  We traverse the list
	 * and place ourselves after any ISRs with our current (or
	 * higher) priority.
	 */

	for (curih = LIST_FIRST(&hp300_intr_list[ipl].hi_q);
	    LIST_NEXT(curih,ih_q) != NULL;
	    curih = LIST_NEXT(curih,ih_q)) {
		if (newih->ih_priority > curih->ih_priority) {
			LIST_INSERT_BEFORE(curih, newih, ih_q);
			goto done;
		}
	}

	/*
	 * We're the least important entry, it seems.  We just go
	 * on the end.
	 */
	LIST_INSERT_AFTER(curih, newih, ih_q);

 done:
	return newih;
}
Exemplo n.º 2
0
void
i81342_mem_bs_free(void *t, bus_space_handle_t bsh, bus_size_t size)
{

    panic("i81342_mem_bs_free(): not implemented");
}
Exemplo n.º 3
0
/* create font based on window type, charater attributes and
   window device context */
HGDIOBJ mswin_get_font(int win_type, int attr, HDC hdc, BOOL replace)
{
	HFONT fnt = NULL;
	LOGFONT lgfnt;
	int font_size;
	int font_index;
	static BOOL once = FALSE;

	if( !once ) {
		once = TRUE;
		atexit(font_table_cleanup);
	}

	ZeroMemory( &lgfnt, sizeof(lgfnt) );

	/* try find font in the table */
	for(font_index=0; font_index<font_table_size; font_index++)
		if(NHFONT_CODE(win_type, attr)==font_table[font_index].code)
			break;

	if( !replace && font_index<font_table_size )
		return font_table[font_index].hFont;

	switch(win_type) {
	case NHW_STATUS:
		lgfnt.lfHeight			=	-iflags.wc_fontsiz_status*GetDeviceCaps(hdc, LOGPIXELSY)/72;	 // height of font
		lgfnt.lfWidth			=	0;				     // average character width
		lgfnt.lfEscapement		=	0;					 // angle of escapement
		lgfnt.lfOrientation		=	0;					 // base-line orientation angle
		lgfnt.lfWeight			=	FW_NORMAL;           // font weight
		lgfnt.lfItalic			=	FALSE;		         // italic attribute option
		lgfnt.lfUnderline		=	FALSE;			     // underline attribute option
		lgfnt.lfStrikeOut		=	FALSE;			     // strikeout attribute option
		lgfnt.lfCharSet			=	mswin_charset();     // character set identifier
		lgfnt.lfOutPrecision	=	OUT_DEFAULT_PRECIS;  // output precision
		lgfnt.lfClipPrecision	=	CLIP_DEFAULT_PRECIS; // clipping precision
		lgfnt.lfQuality			=	DEFAULT_QUALITY;     // output quality
		if( iflags.wc_font_status &&
			*iflags.wc_font_status ) {
			lgfnt.lfPitchAndFamily = DEFAULT_PITCH;		 // pitch and family
			NH_A2W( iflags.wc_font_status, lgfnt.lfFaceName, LF_FACESIZE);
		} else {
			lgfnt.lfPitchAndFamily = FIXED_PITCH;		 // pitch and family
		}
		break;

	case NHW_MENU:
		lgfnt.lfHeight			=	-iflags.wc_fontsiz_menu*GetDeviceCaps(hdc, LOGPIXELSY)/72;	 // height of font
		lgfnt.lfWidth			=	0;				     // average character width
		lgfnt.lfEscapement		=	0;					 // angle of escapement
		lgfnt.lfOrientation		=	0;					 // base-line orientation angle
		lgfnt.lfWeight			=	(attr==ATR_BOLD || attr==ATR_INVERSE)? FW_BOLD : FW_NORMAL;   // font weight
		lgfnt.lfItalic			=	(attr==ATR_BLINK)? TRUE: FALSE;		     // italic attribute option
		lgfnt.lfUnderline		=	(attr==ATR_ULINE)? TRUE : FALSE;		 // underline attribute option
		lgfnt.lfStrikeOut		=	FALSE;				// strikeout attribute option
		lgfnt.lfCharSet			=	mswin_charset();     // character set identifier
		lgfnt.lfOutPrecision	=	OUT_DEFAULT_PRECIS;  // output precision
		lgfnt.lfClipPrecision	=	CLIP_DEFAULT_PRECIS; // clipping precision
		lgfnt.lfQuality			=	DEFAULT_QUALITY;     // output quality
		if( iflags.wc_font_menu &&
			*iflags.wc_font_menu ) {
			lgfnt.lfPitchAndFamily	= DEFAULT_PITCH;		 // pitch and family
			NH_A2W( iflags.wc_font_menu, lgfnt.lfFaceName, LF_FACESIZE);
		} else {
			lgfnt.lfPitchAndFamily = FIXED_PITCH;		 // pitch and family
		}
		break;

	case NHW_MESSAGE:
		font_size = (attr==ATR_INVERSE)? iflags.wc_fontsiz_message+1 : iflags.wc_fontsiz_message;
		lgfnt.lfHeight			=	-font_size*GetDeviceCaps(hdc, LOGPIXELSY)/72;	 // height of font
		lgfnt.lfWidth			=	0;				     // average character width
		lgfnt.lfEscapement		=	0;					 // angle of escapement
		lgfnt.lfOrientation		=	0;					 // base-line orientation angle
		lgfnt.lfWeight			=	(attr==ATR_BOLD || attr==ATR_INVERSE)? FW_BOLD : FW_NORMAL;   // font weight
		lgfnt.lfItalic			=	(attr==ATR_BLINK)? TRUE: FALSE;		     // italic attribute option
		lgfnt.lfUnderline		=	(attr==ATR_ULINE)? TRUE : FALSE;		 // underline attribute option
		lgfnt.lfStrikeOut		=	FALSE;			     // strikeout attribute option
		lgfnt.lfCharSet			=	mswin_charset();     // character set identifier
		lgfnt.lfOutPrecision	=	OUT_DEFAULT_PRECIS;  // output precision
		lgfnt.lfClipPrecision	=	CLIP_DEFAULT_PRECIS; // clipping precision
		lgfnt.lfQuality			=	DEFAULT_QUALITY;     // output quality
		if( iflags.wc_font_message &&
			*iflags.wc_font_message ) {
			lgfnt.lfPitchAndFamily	= DEFAULT_PITCH;		 // pitch and family
			NH_A2W( iflags.wc_font_message, lgfnt.lfFaceName, LF_FACESIZE);
		} else {
			lgfnt.lfPitchAndFamily	= VARIABLE_PITCH;		 // pitch and family
		}
		break;

	case NHW_TEXT:
		lgfnt.lfHeight			=	-iflags.wc_fontsiz_text*GetDeviceCaps(hdc, LOGPIXELSY)/72;	 // height of font
		lgfnt.lfWidth			=	0;				     // average character width
		lgfnt.lfEscapement		=	0;					 // angle of escapement
		lgfnt.lfOrientation		=	0;					 // base-line orientation angle
		lgfnt.lfWeight			=	(attr==ATR_BOLD || attr==ATR_INVERSE)? FW_BOLD : FW_NORMAL;   // font weight
		lgfnt.lfItalic			=	(attr==ATR_BLINK)? TRUE: FALSE;		     // italic attribute option
		lgfnt.lfUnderline		=	(attr==ATR_ULINE)? TRUE : FALSE;		 // underline attribute option
		lgfnt.lfStrikeOut		=	FALSE;			     // strikeout attribute option
		lgfnt.lfCharSet			=	mswin_charset();     // character set identifier
		lgfnt.lfOutPrecision	=	OUT_DEFAULT_PRECIS;  // output precision
		lgfnt.lfClipPrecision	=	CLIP_DEFAULT_PRECIS; // clipping precision
		lgfnt.lfQuality			=	DEFAULT_QUALITY;     // output quality
		if( iflags.wc_font_text &&
			*iflags.wc_font_text ) {
			lgfnt.lfPitchAndFamily	= DEFAULT_PITCH;		 // pitch and family
			NH_A2W( iflags.wc_font_text, lgfnt.lfFaceName, LF_FACESIZE);
		} else {
			lgfnt.lfPitchAndFamily	= FIXED_PITCH;		 // pitch and family
		}
		break;
	}

	fnt = CreateFontIndirect(&lgfnt);

	/* add font to the table */
	if( font_index==font_table_size ) {
		if( font_table_size>=MAXFONTS ) panic( "font table overflow!" );
		font_table_size++;
	} else {
		DeleteObject(font_table[font_index].hFont);
	}

	font_table[font_index].code = NHFONT_CODE(win_type, attr);
	font_table[font_index].hFont = fnt;
	return fnt;
}
Exemplo n.º 4
0
static __init void prom_meminit(void)
{
    u64 addr, size, type; /* regardless of 64BIT_PHYS_ADDR */
    int mem_flags = 0;
    unsigned int idx;
    int rd_flag;
#ifdef CONFIG_BLK_DEV_INITRD
    unsigned long initrd_pstart;
    unsigned long initrd_pend;

#ifdef CONFIG_EMBEDDED_RAMDISK
    /* If we're using an embedded ramdisk, then __rd_start and __rd_end
       are defined by the linker to be on either side of the ramdisk
       area.  Otherwise, initrd_start should be defined by kernel command
       line arguments */
    if (initrd_start == 0) {
        initrd_start = (unsigned long)&__rd_start;
        initrd_end = (unsigned long)&__rd_end;
    }
#endif

    initrd_pstart = CPHYSADDR(initrd_start);
    initrd_pend = CPHYSADDR(initrd_end);
    if (initrd_start &&
            ((initrd_pstart > MAX_RAM_SIZE)
             || (initrd_pend > MAX_RAM_SIZE))) {
        panic("initrd out of addressable memory");
    }

#endif /* INITRD */

    for (idx = 0; cfe_enummem(idx, mem_flags, &addr, &size, &type) != CFE_ERR_NOMORE;
            idx++) {
        rd_flag = 0;
        if (type == CFE_MI_AVAILABLE) {
            /*
             * See if this block contains (any portion of) the
             * ramdisk
             */
#ifdef CONFIG_BLK_DEV_INITRD
            if (initrd_start) {
                if ((initrd_pstart > addr) &&
                        (initrd_pstart < (addr + size))) {
                    add_memory_region(addr,
                                      initrd_pstart - addr,
                                      BOOT_MEM_RAM);
                    rd_flag = 1;
                }
                if ((initrd_pend > addr) &&
                        (initrd_pend < (addr + size))) {
                    add_memory_region(initrd_pend,
                                      (addr + size) - initrd_pend,
                                      BOOT_MEM_RAM);
                    rd_flag = 1;
                }
            }
#endif
            if (!rd_flag) {
                if (addr > MAX_RAM_SIZE)
                    continue;
                if (addr+size > MAX_RAM_SIZE)
                    size = MAX_RAM_SIZE - (addr+size) + 1;
                /*
                 * memcpy/__copy_user prefetch, which
                 * will cause a bus error for
                 * KSEG/KUSEG addrs not backed by RAM.
                 * Hence, reserve some padding for the
                 * prefetch distance.
                 */
                if (size > 512)
                    size -= 512;
                add_memory_region(addr, size, BOOT_MEM_RAM);
            }
            board_mem_region_addrs[board_mem_region_count] = addr;
            board_mem_region_sizes[board_mem_region_count] = size;
            board_mem_region_count++;
            if (board_mem_region_count ==
                    SIBYTE_MAX_MEM_REGIONS) {
                /*
                 * Too many regions.  Need to configure more
                 */
                while(1);
            }
        }
    }
#ifdef CONFIG_BLK_DEV_INITRD
    if (initrd_start) {
        add_memory_region(initrd_pstart, initrd_pend - initrd_pstart,
                          BOOT_MEM_RESERVED);
    }
#endif
}
Exemplo n.º 5
0
/*
 * Prepare an Elf binary's exec package
 *
 * First, set of the various offsets/lengths in the exec package.
 *
 * Then, mark the text image busy (so it can be demand paged) or error out if
 * this is not possible.  Finally, set up vmcmds for the text, data, bss, and
 * stack segments.
 */
int
ELFNAME2(exec,makecmds)(struct proc *p, struct exec_package *epp)
{
	Elf_Ehdr *eh = epp->ep_hdr;
	Elf_Phdr *ph, *pp;
	Elf_Addr phdr = 0;
	int error, i;
	char interp[MAXPATHLEN];
	u_long pos = 0, phsize;
	u_int8_t os = OOS_NULL;

	if (epp->ep_hdrvalid < sizeof(Elf_Ehdr))
		return (ENOEXEC);

	if (ELFNAME(check_header)(eh, ET_EXEC) &&
	    ELFNAME(olf_check_header)(eh, ET_EXEC, &os))
		return (ENOEXEC);

	/*
	 * check if vnode is in open for writing, because we want to demand-
	 * page out of it.  if it is, don't do it, for various reasons.
	 */
	if (epp->ep_vp->v_writecount != 0) {
#ifdef DIAGNOSTIC
		if (epp->ep_vp->v_flag & VTEXT)
			panic("exec: a VTEXT vnode has writecount != 0");
#endif
		return (ETXTBSY);
	}
	/*
	 * Allocate space to hold all the program headers, and read them
	 * from the file
	 */
	phsize = eh->e_phnum * sizeof(Elf_Phdr);
	ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK);

	if ((error = ELFNAME(read_from)(p, epp->ep_vp, eh->e_phoff, (caddr_t)ph,
	    phsize)) != 0)
		goto bad;

	epp->ep_tsize = ELFDEFNNAME(NO_ADDR);
	epp->ep_dsize = ELFDEFNNAME(NO_ADDR);

	interp[0] = '\0';

	for (i = 0; i < eh->e_phnum; i++) {
		pp = &ph[i];
		if (pp->p_type == PT_INTERP) {
			if (pp->p_filesz >= sizeof(interp))
				goto bad;
			if ((error = ELFNAME(read_from)(p, epp->ep_vp,
			    pp->p_offset, (caddr_t)interp, pp->p_filesz)) != 0)
				goto bad;
			break;
		}
	}

	/*
	 * OK, we want a slightly different twist of the
	 * standard emulation package for "real" elf.
	 */
	epp->ep_emul = &ELFNAMEEND(emul);
	pos = ELFDEFNNAME(NO_ADDR);

	/*
	 * On the same architecture, we may be emulating different systems.
	 * See which one will accept this executable.
	 *
	 * Probe functions would normally see if the interpreter (if any)
	 * exists. Emulation packages may possibly replace the interpreter in
	 * interp[] with a changed path (/emul/xxx/<path>), and also
	 * set the ep_emul field in the exec package structure.
	 */
	error = ENOEXEC;
	p->p_os = OOS_OPENBSD;
#ifdef NATIVE_EXEC_ELF
	if (ELFNAME(os_pt_note)(p, epp, epp->ep_hdr, "OpenBSD", 8, 4) == 0) {
		goto native;
	}
#endif
	for (i = 0;
	    i < sizeof(ELFNAME(probes)) / sizeof(ELFNAME(probes)[0]) && error;
	    i++) {
		if (os == OOS_NULL || ((1 << os) & ELFNAME(probes)[i].os_mask))
			error = ELFNAME(probes)[i].func ?
			    (*ELFNAME(probes)[i].func)(p, epp, interp, &pos, &os) :
			    0;
	}
	if (!error)
		p->p_os = os;
#ifndef NATIVE_EXEC_ELF
	else
		goto bad;
#else
native:
#endif /* NATIVE_EXEC_ELF */
	/*
	 * Load all the necessary sections
	 */
	for (i = 0; i < eh->e_phnum; i++) {
		Elf_Addr addr = ELFDEFNNAME(NO_ADDR), size = 0;
		int prot = 0;

		pp = &ph[i];

		switch (ph[i].p_type) {
		case PT_LOAD:
			/*
			 * Calcuates size of text and data segments
			 * by starting at first and going to end of last.
			 * 'rwx' sections are treated as data.
			 * this is correct for BSS_PLT, but may not be
			 * for DATA_PLT, is fine for TEXT_PLT.
			 */
			ELFNAME(load_psection)(&epp->ep_vmcmds, epp->ep_vp,
			    &ph[i], &addr, &size, &prot, 0);
			/*
			 * Decide whether it's text or data by looking
			 * at the protection of the section
			 */
			if (prot & VM_PROT_WRITE) {
				/* data section */
				if (epp->ep_dsize == ELFDEFNNAME(NO_ADDR)) {
					epp->ep_daddr = addr;
					epp->ep_dsize = size;
				} else {
					if (addr < epp->ep_daddr) {
						epp->ep_dsize =
						    epp->ep_dsize +
						    epp->ep_daddr -
						    addr;
						epp->ep_daddr = addr;
					} else
						epp->ep_dsize = addr+size -
						    epp->ep_daddr;
				}
			} else if (prot & VM_PROT_EXECUTE) {
				/* text section */
				if (epp->ep_tsize == ELFDEFNNAME(NO_ADDR)) {
					epp->ep_taddr = addr;
					epp->ep_tsize = size;
				} else {
					if (addr < epp->ep_taddr) {
						epp->ep_tsize =
						    epp->ep_tsize +
						    epp->ep_taddr -
						    addr;
						epp->ep_taddr = addr;
					} else
						epp->ep_tsize = addr+size -
						    epp->ep_taddr;
				}
			}
			break;

		case PT_SHLIB:
			error = ENOEXEC;
			goto bad;

		case PT_INTERP:
			/* Already did this one */
		case PT_DYNAMIC:
		case PT_NOTE:
			break;

		case PT_PHDR:
			/* Note address of program headers (in text segment) */
			phdr = pp->p_vaddr;
			break;

		default:
			/*
			 * Not fatal, we don't need to understand everything
			 * :-)
			 */
			break;
		}
	}

	/*
	 * Check if we found a dynamically linked binary and arrange to load
	 * it's interpreter when the exec file is released.
	 */
	if (interp[0]) {
		char *ip;
		struct elf_args *ap;

		ip = (char *)malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
		ap = (struct elf_args *)
		    malloc(sizeof(struct elf_args), M_TEMP, M_WAITOK);

		bcopy(interp, ip, MAXPATHLEN);
		epp->ep_interp = ip;
		epp->ep_interp_pos = pos;

		ap->arg_phaddr = phdr;
		ap->arg_phentsize = eh->e_phentsize;
		ap->arg_phnum = eh->e_phnum;
		ap->arg_entry = eh->e_entry;
		ap->arg_os = os;

		epp->ep_emul_arg = ap;
		epp->ep_entry = eh->e_entry; /* keep check_exec() happy */
	} else {
		epp->ep_interp = NULL;
		epp->ep_entry = eh->e_entry;
	}

#if defined(COMPAT_SVR4) && defined(i386)
#ifndef ELF_MAP_PAGE_ZERO
	/* Dell SVR4 maps page zero, yeuch! */
	if (p->p_os == OOS_DELL)
#endif
		NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, PAGE_SIZE, 0,
		    epp->ep_vp, 0, VM_PROT_READ);
#endif

	free((char *)ph, M_TEMP);
	vn_marktext(epp->ep_vp);
	return (exec_setup_stack(p, epp));

bad:
	free((char *)ph, M_TEMP);
	kill_vmcmds(&epp->ep_vmcmds);
	return (ENOEXEC);
}
Exemplo n.º 6
0
/*===========================================================================*
 *				do_exec					     *
 *===========================================================================*/
PUBLIC int do_exec()
{
/* Perform the execve(name, argv, envp) call.  The user library builds a
 * complete stack image, including pointers, args, environ, etc.  The stack
 * is copied to a buffer inside MM, and then to the new core image.
 */

  register struct mproc *rmp;
  struct mproc *sh_mp;
  int m, r, fd, ft, sn;
  static char mbuf[ARG_MAX];	/* buffer for stack and zeroes */
  static char name_buf[PATH_MAX]; /* the name of the file to exec */
  char *new_sp, *name, *basename;
  vir_bytes src, dst, text_bytes, data_bytes, bss_bytes, stk_bytes, vsp;
  phys_bytes tot_bytes;		/* total space for program, including gap */
  long sym_bytes;
  vir_clicks sc;
  struct stat s_buf[2], *s_p;
  vir_bytes pc;

  /* Do some validity checks. */
  rmp = mp;
  stk_bytes = (vir_bytes) stack_bytes;
  if (stk_bytes > ARG_MAX) return(ENOMEM);	/* stack too big */
  if (exec_len <= 0 || exec_len > PATH_MAX) return(EINVAL);

  /* Get the exec file name and see if the file is executable. */
  src = (vir_bytes) exec_name;
  dst = (vir_bytes) name_buf;
  r = sys_copy(who, D, (phys_bytes) src,
		MM_PROC_NR, D, (phys_bytes) dst, (phys_bytes) exec_len);
  if (r != OK) return(r);	/* file name not in user data segment */

  /* Fetch the stack from the user before destroying the old core image. */
  src = (vir_bytes) stack_ptr;
  dst = (vir_bytes) mbuf;
  r = sys_copy(who, D, (phys_bytes) src,
  			MM_PROC_NR, D, (phys_bytes) dst, (phys_bytes)stk_bytes);

  if (r != OK) return(EACCES);	/* can't fetch stack (e.g. bad virtual addr) */

  r = 0;	/* r = 0 (first attempt), or 1 (interpreted script) */
  name = name_buf;	/* name of file to exec. */
  do {
	s_p = &s_buf[r];
	tell_fs(CHDIR, who, FALSE, 0);  /* switch to the user's FS environ */
	fd = allowed(name, s_p, X_BIT);	/* is file executable? */
	if (fd < 0) return(fd);		/* file was not executable */

	/* Read the file header and extract the segment sizes. */
	sc = (stk_bytes + CLICK_SIZE - 1) >> CLICK_SHIFT;

	m = read_header(fd, &ft, &text_bytes, &data_bytes, &bss_bytes, 
					&tot_bytes, &sym_bytes, sc, &pc);
	if (m != ESCRIPT || ++r > 1) break;
  } while ((name = patch_stack(fd, mbuf, &stk_bytes, name_buf)) != NULL);

  if (m < 0) {
	close(fd);		/* something wrong with header */
	return(stk_bytes > ARG_MAX ? ENOMEM : ENOEXEC);
  }

  /* Can the process' text be shared with that of one already running? */
  sh_mp = find_share(rmp, s_p->st_ino, s_p->st_dev, s_p->st_ctime);

  /* Allocate new memory and release old memory.  Fix map and tell kernel. */
  r = new_mem(sh_mp, text_bytes, data_bytes, bss_bytes, stk_bytes, tot_bytes);
  if (r != OK) {
	close(fd);		/* insufficient core or program too big */
	return(r);
  }

  /* Save file identification to allow it to be shared. */
  rmp->mp_ino = s_p->st_ino;
  rmp->mp_dev = s_p->st_dev;
  rmp->mp_ctime = s_p->st_ctime;

  /* Patch up stack and copy it from MM to new core image. */
  vsp = (vir_bytes) rmp->mp_seg[S].mem_vir << CLICK_SHIFT;
  vsp += (vir_bytes) rmp->mp_seg[S].mem_len << CLICK_SHIFT;
  vsp -= stk_bytes;
  patch_ptr(mbuf, vsp);
  src = (vir_bytes) mbuf;
  r = sys_copy(MM_PROC_NR, D, (phys_bytes) src,
  			who, D, (phys_bytes) vsp, (phys_bytes)stk_bytes);
  if (r != OK) panic("do_exec stack copy err on", who);

  /* Read in text and data segments. */
  if (sh_mp != NULL) {
	lseek(fd, (off_t) text_bytes, SEEK_CUR);  /* shared: skip text */
  } else {
	rw_seg(0, fd, who, T, text_bytes);
  }
  rw_seg(0, fd, who, D, data_bytes);

  close(fd);			/* don't need exec file any more */

  /* Take care of setuid/setgid bits. */
  if ((rmp->mp_flags & TRACED) == 0) { /* suppress if tracing */
	if (s_buf[0].st_mode & I_SET_UID_BIT) {
		rmp->mp_effuid = s_buf[0].st_uid;
		tell_fs(SETUID,who, (int)rmp->mp_realuid, (int)rmp->mp_effuid);
	}
	if (s_buf[0].st_mode & I_SET_GID_BIT) {
		rmp->mp_effgid = s_buf[0].st_gid;
		tell_fs(SETGID,who, (int)rmp->mp_realgid, (int)rmp->mp_effgid);
	}
  }

  /* Save offset to initial argc (for ps) */
  rmp->mp_procargs = vsp;

  /* Fix 'mproc' fields, tell kernel that exec is done,  reset caught sigs. */
  for (sn = 1; sn <= _NSIG; sn++) {
	if (sigismember(&rmp->mp_catch, sn)) {
		sigdelset(&rmp->mp_catch, sn);
		rmp->mp_sigact[sn].sa_handler = SIG_DFL;
		sigemptyset(&rmp->mp_sigact[sn].sa_mask);
	}
  }

  rmp->mp_flags &= ~SEPARATE;	/* turn off SEPARATE bit */
  rmp->mp_flags |= ft;		/* turn it on for separate I & D files */
  new_sp = (char *) vsp;

  tell_fs(EXEC, who, 0, 0);	/* allow FS to handle FD_CLOEXEC files */

  /* System will save command line for debugging, ps(1) output, etc. */
  basename = strrchr(name, '/');
  if (basename == NULL) basename = name; else basename++;
  sys_exec(who, new_sp, rmp->mp_flags & TRACED, basename, pc);

  return(SUSPEND);		/* no reply, new program just runs */
}
Exemplo n.º 7
0
/* return TRUE if mon still alive */
boolean
hmon(struct monst *mon, struct obj *obj, int thrown)
{
	int tmp;
	boolean         hittxt = FALSE;

	if (!obj) {
		tmp = rnd(2);	/* attack with bare hands */
		if (mon->data->mlet == 'c' && !uarmg) {
			pline("You hit the cockatrice with your bare hands.");
			pline("You turn to stone ...");
			done_in_by(mon);
		}
	} else if (obj->olet == WEAPON_SYM || obj->otyp == PICK_AXE) {
		if (obj == uwep && (obj->otyp > SPEAR || obj->otyp < BOOMERANG))
			tmp = rnd(2);
		else {
			if (strchr(mlarge, mon->data->mlet)) {
				tmp = rnd(objects[obj->otyp].wldam);
				if (obj->otyp == TWO_HANDED_SWORD)
					tmp += d(2, 6);
				else if (obj->otyp == FLAIL)
					tmp += rnd(4);
			} else {
				tmp = rnd(objects[obj->otyp].wsdam);
			}
			tmp += obj->spe;
			if (!thrown && obj == uwep && obj->otyp == BOOMERANG
			    && !rn2(3)) {
				pline("As you hit %s, the boomerang breaks into splinters.",
				      monnam(mon));
				freeinv(obj);
				setworn((struct obj *) 0, obj->owornmask);
				obfree(obj, (struct obj *) 0);
				obj = NULL;
				tmp++;
			}
		}
		if (mon->data->mlet == 'O' && obj != NULL &&
		    obj->otyp == TWO_HANDED_SWORD &&
		    !strcmp(ONAME(obj), "Orcrist"))
			tmp += rnd(10);
	} else
		switch (obj->otyp) {
		case HEAVY_IRON_BALL:
			tmp = rnd(25);
			break;
		case EXPENSIVE_CAMERA:
			pline("You succeed in destroying your camera. Congratulations!");
			freeinv(obj);
			if (obj->owornmask)
				setworn((struct obj *) 0, obj->owornmask);
			obfree(obj, (struct obj *) 0);
			return (TRUE);
		case DEAD_COCKATRICE:
			pline("You hit %s with the cockatrice corpse.",
			      monnam(mon));
			if (mon->data->mlet == 'c') {
				tmp = 1;
				hittxt = TRUE;
				break;
			}
			pline("%s is turned to stone!", Monnam(mon));
			killed(mon);
			return (FALSE);
		case CLOVE_OF_GARLIC:	/* no effect against demons */
			if (strchr(UNDEAD, mon->data->mlet))
				mon->mflee = 1;
			tmp = 1;
			break;
		default:
			/* non-weapons can damage because of their weight */
			/* (but not too much) */
			tmp = obj->owt / 10;
			if (tmp < 1)
				tmp = 1;
			else
				tmp = rnd(tmp);
			if (tmp > 6)
				tmp = 6;
		}

	/****** NOTE: perhaps obj is undefined!! (if !thrown && BOOMERANG) */

	tmp += u.udaminc + dbon();
	if (u.uswallow) {
		if ((tmp -= u.uswldtim) <= 0) {
			pline("Your arms are no longer able to hit.");
			return (TRUE);
		}
	}
	if (tmp < 1)
		tmp = 1;
	mon->mhp -= tmp;
	if (mon->mhp < 1) {
		killed(mon);
		return (FALSE);
	}
	if (mon->mtame && (!mon->mflee || mon->mfleetim)) {
		mon->mflee = 1;	/* Rick Richardson */
		mon->mfleetim += 10 * rnd(tmp);
	}
	if (!hittxt) {
		if (thrown) {
			/* this assumes that we cannot throw plural things */
			if (obj == NULL)
				panic("thrown non-object");
			hit(xname(obj) /* or: objects[obj->otyp].oc_name */ ,
			    mon, exclam(tmp));
		} else if (Blind)
			pline("You hit it.");
		else
			pline("You hit %s%s", monnam(mon), exclam(tmp));
	}
	if (u.umconf && !thrown) {
		if (!Blind) {
			pline("Your hands stop glowing blue.");
			if (!mon->mfroz && !mon->msleep)
				pline("%s appears confused.", Monnam(mon));
		}
		mon->mconf = 1;
		u.umconf = 0;
	}
	return (TRUE);		/* mon still alive */
}
/*
 * This function is very similar to ctl_ioctl_do_datamove().  Is there a
 * way to combine the functionality?
 *
 * XXX KDM may need to move this into a thread.  We're doing a bcopy in the
 * caller's context, which will usually be the backend.  That may not be a
 * good thing.
 */
static void
cfcs_datamove(union ctl_io *io)
{
	union ccb *ccb;
	bus_dma_segment_t cam_sg_entry, *cam_sglist;
	struct ctl_sg_entry ctl_sg_entry, *ctl_sglist;
	int cam_sg_count, ctl_sg_count, cam_sg_start;
	int cam_sg_offset;
	int len_to_copy, len_copied;
	int ctl_watermark, cam_watermark;
	int i, j;


	cam_sg_offset = 0;
	cam_sg_start = 0;

	ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;

	/*
	 * Note that we have a check in cfcs_action() to make sure that any
	 * CCBs with "bad" flags are returned with CAM_REQ_INVALID.  This
	 * is just to make sure no one removes that check without updating
	 * this code to provide the additional functionality necessary to
	 * support those modes of operation.
	 */
	KASSERT(((ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS) == 0), ("invalid "
		  "CAM flags %#x", (ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS)));

	/*
	 * Simplify things on both sides by putting single buffers into a
	 * single entry S/G list.
	 */
	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
	case CAM_DATA_SG: {
		int len_seen;

		cam_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr;
		cam_sg_count = ccb->csio.sglist_cnt;

		for (i = 0, len_seen = 0; i < cam_sg_count; i++) {
			if ((len_seen + cam_sglist[i].ds_len) >=
			     io->scsiio.kern_rel_offset) {
				cam_sg_start = i;
				cam_sg_offset = io->scsiio.kern_rel_offset -
					len_seen;
				break;
			}
			len_seen += cam_sglist[i].ds_len;
		}
		break;
	}
	case CAM_DATA_VADDR:
		cam_sglist = &cam_sg_entry;
		cam_sglist[0].ds_len = ccb->csio.dxfer_len;
		cam_sglist[0].ds_addr = (bus_addr_t)ccb->csio.data_ptr;
		cam_sg_count = 1;
		cam_sg_start = 0;
		cam_sg_offset = io->scsiio.kern_rel_offset;
		break;
	default:
		panic("Invalid CAM flags %#x", ccb->ccb_h.flags);
	}

	if (io->scsiio.kern_sg_entries > 0) {
		ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
		ctl_sg_count = io->scsiio.kern_sg_entries;
	} else {
		ctl_sglist = &ctl_sg_entry;
		ctl_sglist->addr = io->scsiio.kern_data_ptr;
		ctl_sglist->len = io->scsiio.kern_data_len;
		ctl_sg_count = 1;
	}

	ctl_watermark = 0;
	cam_watermark = cam_sg_offset;
	len_copied = 0;
	for (i = cam_sg_start, j = 0;
	     i < cam_sg_count && j < ctl_sg_count;) {
		uint8_t *cam_ptr, *ctl_ptr;

		len_to_copy = ctl_min(cam_sglist[i].ds_len - cam_watermark,
				      ctl_sglist[j].len - ctl_watermark);

		cam_ptr = (uint8_t *)cam_sglist[i].ds_addr;
		cam_ptr = cam_ptr + cam_watermark;
		if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
			/*
			 * XXX KDM fix this!
			 */
			panic("need to implement bus address support");
#if 0
			kern_ptr = bus_to_virt(kern_sglist[j].addr);
#endif
		} else
			ctl_ptr = (uint8_t *)ctl_sglist[j].addr;
		ctl_ptr = ctl_ptr + ctl_watermark;

		ctl_watermark += len_to_copy;
		cam_watermark += len_to_copy;

		if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
		     CTL_FLAG_DATA_IN) {
			CTL_DEBUG_PRINT(("%s: copying %d bytes to CAM\n",
					 __func__, len_to_copy));
			CTL_DEBUG_PRINT(("%s: from %p to %p\n", ctl_ptr,
					 __func__, cam_ptr));
			bcopy(ctl_ptr, cam_ptr, len_to_copy);
		} else {
			CTL_DEBUG_PRINT(("%s: copying %d bytes from CAM\n",
					 __func__, len_to_copy));
			CTL_DEBUG_PRINT(("%s: from %p to %p\n", cam_ptr,
					 __func__, ctl_ptr));
			bcopy(cam_ptr, ctl_ptr, len_to_copy);
		}

		len_copied += len_to_copy;

		if (cam_sglist[i].ds_len == cam_watermark) {
			i++;
			cam_watermark = 0;
		}

		if (ctl_sglist[j].len == ctl_watermark) {
			j++;
			ctl_watermark = 0;
		}
	}

	io->scsiio.ext_data_filled += len_copied;

	io->scsiio.be_move_done(io);
}
Exemplo n.º 9
0
int crisv32_request_dma(unsigned int dmanr, const char *device_id,
			unsigned options, unsigned int bandwidth,
			enum dma_owner owner)
{
	unsigned long flags;
	reg_config_rw_clk_ctrl clk_ctrl;
	reg_strmux_rw_cfg strmux_cfg;

	if (crisv32_arbiter_allocate_bandwidth(dmanr,
					       options & DMA_INT_MEM ?
					       INT_REGION : EXT_REGION,
					       bandwidth))
		return -ENOMEM;

	spin_lock_irqsave(&dma_lock, flags);

	if (used_dma_channels[dmanr]) {
		spin_unlock_irqrestore(&dma_lock, flags);
		if (options & DMA_VERBOSE_ON_ERROR) {
			printk(KERN_ERR "Failed to request DMA %i for %s, "
				"already allocated by %s\n",
				dmanr,
				device_id,
				used_dma_channels_users[dmanr]);
		}
		if (options & DMA_PANIC_ON_ERROR)
			panic("request_dma error!");
		spin_unlock_irqrestore(&dma_lock, flags);
		return -EBUSY;
	}
	clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
	strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg);

	switch (dmanr) {
	case 0:
	case 1:
		clk_ctrl.dma01_eth0 = 1;
		break;
	case 2:
	case 3:
		clk_ctrl.dma23 = 1;
		break;
	case 4:
	case 5:
		clk_ctrl.dma45 = 1;
		break;
	case 6:
	case 7:
		clk_ctrl.dma67 = 1;
		break;
	case 8:
	case 9:
		clk_ctrl.dma89_strcop = 1;
		break;
#if MAX_DMA_CHANNELS-1 != 9
#error Check dma.c
#endif
	default:
		spin_unlock_irqrestore(&dma_lock, flags);
		if (options & DMA_VERBOSE_ON_ERROR) {
			printk(KERN_ERR "Failed to request DMA %i for %s, "
				"only 0-%i valid)\n",
				dmanr, device_id, MAX_DMA_CHANNELS - 1);
		}

		if (options & DMA_PANIC_ON_ERROR)
			panic("request_dma error!");
		return -EINVAL;
	}

	switch (owner) {
	case dma_eth0:
		if (dmanr == 0)
			strmux_cfg.dma0 = regk_strmux_eth0;
		else if (dmanr == 1)
			strmux_cfg.dma1 = regk_strmux_eth0;
		else
			panic("Invalid DMA channel for eth0\n");
		break;
	case dma_eth1:
		if (dmanr == 6)
			strmux_cfg.dma6 = regk_strmux_eth1;
		else if (dmanr == 7)
			strmux_cfg.dma7 = regk_strmux_eth1;
		else
			panic("Invalid DMA channel for eth1\n");
		break;
	case dma_iop0:
		if (dmanr == 2)
			strmux_cfg.dma2 = regk_strmux_iop0;
		else if (dmanr == 3)
			strmux_cfg.dma3 = regk_strmux_iop0;
		else
			panic("Invalid DMA channel for iop0\n");
		break;
	case dma_iop1:
		if (dmanr == 4)
			strmux_cfg.dma4 = regk_strmux_iop1;
		else if (dmanr == 5)
			strmux_cfg.dma5 = regk_strmux_iop1;
		else
			panic("Invalid DMA channel for iop1\n");
		break;
	case dma_ser0:
		if (dmanr == 6)
			strmux_cfg.dma6 = regk_strmux_ser0;
		else if (dmanr == 7)
			strmux_cfg.dma7 = regk_strmux_ser0;
		else
			panic("Invalid DMA channel for ser0\n");
		break;
	case dma_ser1:
		if (dmanr == 4)
			strmux_cfg.dma4 = regk_strmux_ser1;
		else if (dmanr == 5)
			strmux_cfg.dma5 = regk_strmux_ser1;
		else
			panic("Invalid DMA channel for ser1\n");
		break;
	case dma_ser2:
		if (dmanr == 2)
			strmux_cfg.dma2 = regk_strmux_ser2;
		else if (dmanr == 3)
			strmux_cfg.dma3 = regk_strmux_ser2;
		else
			panic("Invalid DMA channel for ser2\n");
		break;
	case dma_ser3:
		if (dmanr == 8)
			strmux_cfg.dma8 = regk_strmux_ser3;
		else if (dmanr == 9)
			strmux_cfg.dma9 = regk_strmux_ser3;
		else
			panic("Invalid DMA channel for ser3\n");
		break;
	case dma_sser0:
		if (dmanr == 4)
			strmux_cfg.dma4 = regk_strmux_sser0;
		else if (dmanr == 5)
			strmux_cfg.dma5 = regk_strmux_sser0;
		else
			panic("Invalid DMA channel for sser0\n");
		break;
	case dma_sser1:
		if (dmanr == 6)
			strmux_cfg.dma6 = regk_strmux_sser1;
		else if (dmanr == 7)
			strmux_cfg.dma7 = regk_strmux_sser1;
		else
			panic("Invalid DMA channel for sser1\n");
		break;
	case dma_ata:
		if (dmanr == 2)
			strmux_cfg.dma2 = regk_strmux_ata;
		else if (dmanr == 3)
			strmux_cfg.dma3 = regk_strmux_ata;
		else
			panic("Invalid DMA channel for ata\n");
		break;
	case dma_strp:
		if (dmanr == 8)
			strmux_cfg.dma8 = regk_strmux_strcop;
		else if (dmanr == 9)
			strmux_cfg.dma9 = regk_strmux_strcop;
		else
			panic("Invalid DMA channel for strp\n");
		break;
	case dma_ext0:
		if (dmanr == 6)
			strmux_cfg.dma6 = regk_strmux_ext0;
		else
			panic("Invalid DMA channel for ext0\n");
		break;
	case dma_ext1:
		if (dmanr == 7)
			strmux_cfg.dma7 = regk_strmux_ext1;
		else
			panic("Invalid DMA channel for ext1\n");
		break;
	case dma_ext2:
		if (dmanr == 2)
			strmux_cfg.dma2 = regk_strmux_ext2;
		else if (dmanr == 8)
			strmux_cfg.dma8 = regk_strmux_ext2;
		else
			panic("Invalid DMA channel for ext2\n");
		break;
	case dma_ext3:
		if (dmanr == 3)
			strmux_cfg.dma3 = regk_strmux_ext3;
		else if (dmanr == 9)
			strmux_cfg.dma9 = regk_strmux_ext2;
		else
			panic("Invalid DMA channel for ext2\n");
		break;
	}

	used_dma_channels[dmanr] = 1;
	used_dma_channels_users[dmanr] = device_id;
	REG_WR(config, regi_config, rw_clk_ctrl, clk_ctrl);
	REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg);
	spin_unlock_irqrestore(&dma_lock, flags);
	return 0;
}
Exemplo n.º 10
0
SMSCConn *smscconn_create(CfgGroup *grp, int start_as_stopped)
{
    SMSCConn *conn;
    Octstr *smsc_type;
    int ret;
    long throughput;
    Octstr *allowed_smsc_id_regex;
    Octstr *denied_smsc_id_regex;
    Octstr *allowed_prefix_regex;
    Octstr *denied_prefix_regex;
    Octstr *preferred_prefix_regex;

    if (grp == NULL)
        return NULL;

    conn = gw_malloc(sizeof(*conn));
    memset(conn, 0, sizeof(*conn));

    conn->why_killed = SMSCCONN_ALIVE;
    conn->status = SMSCCONN_CONNECTING;
    conn->connect_time = -1;
    conn->is_stopped = start_as_stopped;

    conn->received = counter_create();
    conn->sent = counter_create();
    conn->failed = counter_create();
    conn->flow_mutex = mutex_create();

#define GET_OPTIONAL_VAL(x, n) x = cfg_get(grp, octstr_imm(n))
#define SPLIT_OPTIONAL_VAL(x, n) \
        do { \
                Octstr *tmp = cfg_get(grp, octstr_imm(n)); \
                if (tmp) x = octstr_split(tmp, octstr_imm(";")); \
                else x = NULL; \
                octstr_destroy(tmp); \
        }while(0)

    GET_OPTIONAL_VAL(conn->id, "smsc-id");
    SPLIT_OPTIONAL_VAL(conn->allowed_smsc_id, "allowed-smsc-id");
    SPLIT_OPTIONAL_VAL(conn->denied_smsc_id, "denied-smsc-id");
    SPLIT_OPTIONAL_VAL(conn->preferred_smsc_id, "preferred-smsc-id");
    GET_OPTIONAL_VAL(conn->allowed_prefix, "allowed-prefix");
    GET_OPTIONAL_VAL(conn->denied_prefix, "denied-prefix");
    GET_OPTIONAL_VAL(conn->preferred_prefix, "preferred-prefix");
    GET_OPTIONAL_VAL(conn->unified_prefix, "unified-prefix");
    GET_OPTIONAL_VAL(conn->our_host, "our-host");
    GET_OPTIONAL_VAL(conn->log_file, "log-file");
    cfg_get_bool(&conn->alt_dcs, grp, octstr_imm("alt-dcs"));

    GET_OPTIONAL_VAL(allowed_smsc_id_regex, "allowed-smsc-id-regex");
    if (allowed_smsc_id_regex != NULL)
        if ((conn->allowed_smsc_id_regex = gw_regex_comp(allowed_smsc_id_regex, REG_EXTENDED)) == NULL)
            panic(0, "Could not compile pattern '%s'", octstr_get_cstr(allowed_smsc_id_regex));
    GET_OPTIONAL_VAL(denied_smsc_id_regex, "denied-smsc-id-regex");
    if (denied_smsc_id_regex != NULL)
        if ((conn->denied_smsc_id_regex = gw_regex_comp(denied_smsc_id_regex, REG_EXTENDED)) == NULL)
            panic(0, "Could not compile pattern '%s'", octstr_get_cstr(denied_smsc_id_regex));
    GET_OPTIONAL_VAL(allowed_prefix_regex, "allowed-prefix-regex");
    if (allowed_prefix_regex != NULL)
        if ((conn->allowed_prefix_regex = gw_regex_comp(allowed_prefix_regex, REG_EXTENDED)) == NULL)
            panic(0, "Could not compile pattern '%s'", octstr_get_cstr(allowed_prefix_regex));
    GET_OPTIONAL_VAL(denied_prefix_regex, "denied-prefix-regex");
    if (denied_prefix_regex != NULL)
        if ((conn->denied_prefix_regex = gw_regex_comp(denied_prefix_regex, REG_EXTENDED)) == NULL)
            panic(0, "Could not compile pattern '%s'", octstr_get_cstr(denied_prefix_regex));
    GET_OPTIONAL_VAL(preferred_prefix_regex, "preferred-prefix-regex");
    if (preferred_prefix_regex != NULL)
        if ((conn->preferred_prefix_regex = gw_regex_comp(preferred_prefix_regex, REG_EXTENDED)) == NULL)
            panic(0, "Could not compile pattern '%s'", octstr_get_cstr(preferred_prefix_regex));

    if (cfg_get_integer(&throughput, grp, octstr_imm("throughput")) == -1)
        conn->throughput = 0;   /* defaults to no throughtput limitation */
    else
        conn->throughput = (int) throughput;

    /* configure the internal rerouting rules for this smsc id */
    init_reroute(conn, grp);

    if (cfg_get_integer(&conn->log_level, grp, octstr_imm("log-level")) == -1)
        conn->log_level = 0;

    /* open a smsc-id specific log-file in exlusive mode */
    if (conn->log_file)
        conn->log_idx = log_open(octstr_get_cstr(conn->log_file),
                                 conn->log_level, GW_EXCL);
#undef GET_OPTIONAL_VAL
#undef SPLIT_OPTIONAL_VAL

    if (conn->allowed_smsc_id && conn->denied_smsc_id)
        warning(0, "Both 'allowed-smsc-id' and 'denied-smsc-id' set, deny-list "
                "automatically ignored");
    if (conn->allowed_smsc_id_regex && conn->denied_smsc_id_regex)
        warning(0, "Both 'allowed-smsc-id_regex' and 'denied-smsc-id_regex' set, deny-regex "
                "automatically ignored");

    if (cfg_get_integer(&conn->reconnect_delay, grp,
                        octstr_imm("reconnect-delay")) == -1)
        conn->reconnect_delay = SMSCCONN_RECONNECT_DELAY;

    smsc_type = cfg_get(grp, octstr_imm("smsc"));
    if (smsc_type == NULL) {
        error(0, "Required field 'smsc' missing for smsc group.");
        smscconn_destroy(conn);
        octstr_destroy(smsc_type);
        return NULL;
    }

    if (octstr_compare(smsc_type, octstr_imm("fake")) == 0)
        ret = smsc_fake_create(conn, grp);
    else if (octstr_compare(smsc_type, octstr_imm("cimd2")) == 0)
        ret = smsc_cimd2_create(conn, grp);
    else if (octstr_compare(smsc_type, octstr_imm("emi")) == 0)
        ret = smsc_emi2_create(conn, grp);
    else if (octstr_compare(smsc_type, octstr_imm("http")) == 0)
        ret = smsc_http_create(conn, grp);
    else if (octstr_compare(smsc_type, octstr_imm("smpp")) == 0)
        ret = smsc_smpp_create(conn, grp);
    else if (octstr_compare(smsc_type, octstr_imm("at")) == 0)
        ret = smsc_at2_create(conn,grp);
    else if (octstr_compare(smsc_type, octstr_imm("cgw")) == 0)
        ret = smsc_cgw_create(conn,grp);
    else if (octstr_compare(smsc_type, octstr_imm("smasi")) == 0)
        ret = smsc_smasi_create(conn, grp);
    else if (octstr_compare(smsc_type, octstr_imm("oisd")) == 0)
        ret = smsc_oisd_create(conn, grp);
    else
        ret = smsc_wrapper_create(conn, grp);

    octstr_destroy(smsc_type);
    if (ret == -1) {
        smscconn_destroy(conn);
        return NULL;
    }
    gw_assert(conn->send_msg != NULL);

    bb_smscconn_ready(conn);

    return conn;
}
Exemplo n.º 11
0
/*
 * Bring one cpu online.
 */
int __cpuinit smp_boot_one_cpu(int cpuid)
{
	const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
	struct task_struct *idle;
	long timeout;

	/* 
	 * Create an idle task for this CPU.  Note the address wed* give 
	 * to kernel_thread is irrelevant -- it's going to start
	 * where OS_BOOT_RENDEVZ vector in SAL says to start.  But
	 * this gets all the other task-y sort of data structures set
	 * up like we wish.   We need to pull the just created idle task 
	 * off the run queue and stuff it into the init_tasks[] array.  
	 * Sheesh . . .
	 */

	idle = fork_idle(cpuid);
	if (IS_ERR(idle))
		panic("SMP: fork failed for CPU:%d", cpuid);

	task_thread_info(idle)->cpu = cpuid;

	/* Let _start know what logical CPU we're booting
	** (offset into init_tasks[],cpu_data[])
	*/
	cpu_now_booting = cpuid;

	/* 
	** boot strap code needs to know the task address since
	** it also contains the process stack.
	*/
	smp_init_current_idle_task = idle ;
	mb();

	printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);

	/*
	** This gets PDC to release the CPU from a very tight loop.
	**
	** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
	** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which 
	** is executed after receiving the rendezvous signal (an interrupt to 
	** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the 
	** contents of memory are valid."
	*/
	gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
	mb();

	/* 
	 * OK, wait a bit for that CPU to finish staggering about. 
	 * Slave will set a bit when it reaches smp_cpu_init().
	 * Once the "monarch CPU" sees the bit change, it can move on.
	 */
	for (timeout = 0; timeout < 10000; timeout++) {
		if(cpu_online(cpuid)) {
			/* Which implies Slave has started up */
			cpu_now_booting = 0;
			smp_init_current_idle_task = NULL;
			goto alive ;
		}
		udelay(100);
		barrier();
	}

	put_task_struct(idle);
	idle = NULL;

	printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
	return -1;

alive:
	/* Remember the Slave data */
	smp_debug(100, KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
		cpuid, timeout * 100);
	return 0;
}
Exemplo n.º 12
0
long
sys_mmap(
	unsigned long addr,
	unsigned long len,
	unsigned long prot,
	unsigned long flags,
	unsigned long fd,
	unsigned long off
)
{
	struct aspace *as = current->aspace;
	struct file *file;
	struct vm_area_struct vma;
	unsigned long mmap_brk;
	int rv;

	/* printk("[%s] SYS_MMAP: addr=%lx, len=%lu\n", current->name, addr, len); */

	if (len != round_up(len, PAGE_SIZE))
		return -EINVAL;

	/* we only support anonymous private mapping; file-backed
	   private mapping has copy-on-write semantics, which we don't
	   want due to complete lack of any pagefaulting resolution */

	if((flags & MAP_PRIVATE) && !(flags & MAP_ANONYMOUS))
		return -EINVAL;

	/* anonymous mappings (not backed by a file) are handled specially */
	if(flags & MAP_ANONYMOUS) {
		/* anonymous mmap()ed memory is put at the top of the
		   heap region, and grows from high to low addresses,
		   i.e. down towards the current heap end. */
		spin_lock(&as->lock);
		mmap_brk = round_down(as->mmap_brk - len, PAGE_SIZE);

		/* Protect against extending into the UNIX data segment,
		   or becoming negative (which wraps around to large addr) */
		if ((mmap_brk <= as->brk) || (mmap_brk >= as->mmap_brk)) {
			spin_unlock(&as->lock);
			printk("[%s] SYS_MMAP: ENOMEM (len=%lu, heap_brk=%lx, mmap_brk=%lx)\n",
			       current->name, len, as->brk, as->mmap_brk);
			return -ENOMEM;
		}

		as->mmap_brk = mmap_brk;
		spin_unlock(&as->lock);

		/* Zero the memory */
		paddr_t phys;
		if (__aspace_virt_to_phys(as, mmap_brk, & phys))
			panic("sys_mmap() failed to get physical address\n");
		memset(__va(phys), 0, len);

		/* printk("[%s] SYS_MMAP: returning mmap_brk=%lx, heap_brk=%lx\n", current->name, mmap_brk, as->brk); */
		return mmap_brk;
	}

	/* file-backed mappings */

	/* TODO: add a million checks here that we'll simply ignore now */

	file = get_current_file(fd);
	if(NULL == file)
		return -EBADF;

	if(NULL == file->f_op ||
	   NULL == file->f_op->mmap)
		return -ENODEV;

	spin_lock(&as->lock);
	if ((rv = __aspace_find_hole(as, addr, len, PAGE_SIZE, &addr))) {
		spin_unlock(&as->lock);
		return -ENOMEM;
	}

	if ((rv = __aspace_add_region(as, addr, len,
				      VM_READ|VM_WRITE|VM_USER,
				      PAGE_SIZE, "mmap"))) {
		/* assuming there is no race between find_hole and
		   add_region, as we're holding the as->lock, this
		   failure can't be due to someone adding our region
		   in between */
		spin_unlock(&as->lock);
		return -ENOMEM;
	}
	spin_unlock(&as->lock);

	/* fill the vm_area_struct to keep compatible with linux layer */
	vma.vm_start = addr;
	vma.vm_end = addr + len;
	vma.vm_page_prot = __pgprot(VM_READ|VM_WRITE);
	vma.vm_pgoff = 0;

	rv = file->f_op->mmap(file, &vma);
	if(rv) {
		spin_lock(&as->lock);
		__aspace_del_region(as, addr, len);
		spin_unlock(&as->lock);
		return rv;
	}
	return vma.vm_start;
}
Exemplo n.º 13
0
static void
trap_dispatch(struct trapframe *tf) {
    char c;

    int ret=0;

    switch (tf->tf_trapno) {
    case T_PGFLT:  //page fault
        if ((ret = pgfault_handler(tf)) != 0) {
            print_trapframe(tf);
            if (current == NULL) {
                panic("handle pgfault failed. ret=%d\n", ret);
            }
            else {
                if (trap_in_kernel(tf)) {
                    panic("handle pgfault failed in kernel mode. ret=%d\n", ret);
                }
                cprintf("killed by kernel.\n");
                panic("handle user mode pgfault failed. ret=%d\n", ret); 
                do_exit(-E_KILLED);
            }
        }
        break;
    case T_SYSCALL:
        syscall();
        break;
    case IRQ_OFFSET + IRQ_TIMER:
#if 0
    LAB3 : If some page replacement algorithm(such as CLOCK PRA) need tick to change the priority of pages,
    then you can add code here. 
#endif
        /* LAB1 YOUR CODE : STEP 3 */
        /* handle the timer interrupt */
        /* (1) After a timer interrupt, you should record this event using a global variable (increase it), such as ticks in kern/driver/clock.c
         * (2) Every TICK_NUM cycle, you can print some info using a funciton, such as print_ticks().
         * (3) Too Simple? Yes, I think so!
         */
        /* LAB5 YOUR CODE */
        /* you should upate you lab1 code (just add ONE or TWO lines of code):
         *    Every TICK_NUM cycle, you should set current process's current->need_resched = 1
         */
        ticks ++;
        /* LAB6 YOUR CODE */
        /* IMPORTANT FUNCTIONS:
	     * run_timer_list
	     *----------------------
	     * you should update your lab5 code (just add ONE or TWO lines of code):
         *    Every tick, you should update the system time, iterate the timers, and trigger the timers which are end to call scheduler.
         *    You can use one funcitons to finish all these things.
         */
        assert(current != NULL);
        run_timer_list();
        break;
    case IRQ_OFFSET + IRQ_COM1:
        c = cons_getc();
        cprintf("serial [%03d] %c\n", c, c);
        break;
    case IRQ_OFFSET + IRQ_KBD:
        c = cons_getc();
        cprintf("kbd [%03d] %c\n", c, c);
        break;
    //LAB1 CHALLENGE 1 : YOUR CODE you should modify below codes.
    case T_SWITCH_TOU:
    case T_SWITCH_TOK:
        panic("T_SWITCH_** ??\n");
        break;
    case IRQ_OFFSET + IRQ_IDE1:
    case IRQ_OFFSET + IRQ_IDE2:
        /* do nothing */
        break;
    default:
        print_trapframe(tf);
        if (current != NULL) {
            cprintf("unhandled trap.\n");
            do_exit(-E_KILLED);
        }
        // in kernel, it must be a mistake
        panic("unexpected trap in kernel.\n");

    }
}
Exemplo n.º 14
0
/**
 * <Ring 0> Send a message to the dest proc. If dest is blocked waiting for
 * the message, copy the message to it and unblock dest. Otherwise the caller
 * will be blocked and appended to the dest's sending queue.
 * 
 * @param current  The caller, the sender.
 * @param dest     To whom the message is sent.
 * @param m        The message.
 * 
 * @return Zero if success.
 *****************************************************************************/
PRIVATE int msg_send(struct proc* current, int dest, MESSAGE* m)
{
	struct proc* sender = current;
	struct proc* p_dest = proc_table + dest; /* proc dest */

	assert(proc2pid(sender) != dest);

	/* check for deadlock here */
	if (deadlock(proc2pid(sender), dest)) {
		panic(">>DEADLOCK<< %s->%s", sender->name, p_dest->name);
	}

	if ((p_dest->p_flags & RECEIVING) && /* dest is waiting for the msg */
	    (p_dest->p_recvfrom == proc2pid(sender) ||
	     p_dest->p_recvfrom == ANY)) {
		assert(p_dest->p_msg);
		assert(m);

		phys_copy(va2la(dest, p_dest->p_msg),
			  va2la(proc2pid(sender), m),
			  sizeof(MESSAGE));
		p_dest->p_msg = 0;
		p_dest->p_flags &= ~RECEIVING; /* dest has received the msg */
		p_dest->p_recvfrom = NO_TASK;
		unblock(p_dest);

		assert(p_dest->p_flags == 0);
		assert(p_dest->p_msg == 0);
		assert(p_dest->p_recvfrom == NO_TASK);
		assert(p_dest->p_sendto == NO_TASK);
		assert(sender->p_flags == 0);
		assert(sender->p_msg == 0);
		assert(sender->p_recvfrom == NO_TASK);
		assert(sender->p_sendto == NO_TASK);
	}
	else { /* dest is not waiting for the msg */
		sender->p_flags |= SENDING;
		assert(sender->p_flags == SENDING);
		sender->p_sendto = dest;
		sender->p_msg = m;

		/* append to the sending queue */
		struct proc * p;
		if (p_dest->q_sending) {
			p = p_dest->q_sending;
			while (p->next_sending)
				p = p->next_sending;
			p->next_sending = sender;
		}
		else {
			p_dest->q_sending = sender;
		}
		sender->next_sending = 0;

		block(sender);

		assert(sender->p_flags == SENDING);
		assert(sender->p_msg != 0);
		assert(sender->p_recvfrom == NO_TASK);
		assert(sender->p_sendto == dest);
	}

	return 0;
}
Exemplo n.º 15
0
static void
linux_syscall_fancy(register_t code, struct lwp *l, struct frame *frame)
{
	struct proc *p = l->l_proc;
	const struct sysent *callp;
	int error, nsys;
	size_t argsize;
	register_t args[8], rval[2];

	nsys = p->p_emul->e_nsysent;
	callp = p->p_emul->e_sysent;

	if (code < 0 || code >= nsys)
		callp += p->p_emul->e_nosys;		/* illegal */
	else
		callp += code;

	argsize = callp->sy_argsize;

	/*
	 * Linux passes the args in d1-d5
	 */
	switch (argsize) {
	case 20:
		args[4] = frame->f_regs[D5];
	case 16:
		args[3] = frame->f_regs[D4];
	case 12:
		args[2] = frame->f_regs[D3];
	case 8:
		args[1] = frame->f_regs[D2];
	case 4:
		args[0] = frame->f_regs[D1];
	case 0:
		break;
	default:
		panic("linux syscall %d weird argsize %d",
			code, argsize);
		break;
	}

	if ((error = trace_enter(code, callp, args)) != 0)
		goto out;

	rval[0] = 0;
	rval[1] = frame->f_regs[D1];
	error = sy_call(callp, l, args, rval);
out:
	switch (error) {
	case 0:
		/*
		 * Reinitialize proc pointer `p' as it may be different
		 * if this is a child returning from fork syscall.
		 */
		p = curproc;
		frame->f_regs[D0] = rval[0];
		frame->f_regs[D1] = rval[1];
		frame->f_sr &= ~PSL_C;	/* carry bit */
		break;
	case ERESTART:
		/*
		 * We always enter through a `trap' instruction, which is 2
		 * bytes, so adjust the pc by that amount.
		 */
		frame->f_pc = frame->f_pc - 2;
		break;
	case EJUSTRETURN:
		/* nothing to do */
		break;
	default:
		if (p->p_emul->e_errno)
			error = p->p_emul->e_errno[error];
		frame->f_regs[D0] = error;
		frame->f_sr |= PSL_C;	/* carry bit */
		break;
	}

	trace_exit(code, callp, args, rval, error);
}
Exemplo n.º 16
0
/* convert BFD symbols flags to a printable string */
static char *			/* symbol flags string */
flags2str(unsigned int flags)	/* bfd symbol flags */
{
  static char buf[256];
  char *p;

  if (!flags)
    return "";

  p = buf;
  *p = '\0';

  if (flags & BSF_LOCAL)
    {
      *p++ = 'L';
      *p++ = '|';
    }
  if (flags & BSF_GLOBAL)
    {
      *p++ = 'G';
      *p++ = '|';
    }
  if (flags & BSF_DEBUGGING)
    {
      *p++ = 'D';
      *p++ = '|';
    }
  if (flags & BSF_FUNCTION)
    {
      *p++ = 'F';
      *p++ = '|';
    }
  if (flags & BSF_KEEP)
    {
      *p++ = 'K';
      *p++ = '|';
    }
  if (flags & BSF_KEEP_G)
    {
      *p++ = 'k'; *p++ = '|';
    }
  if (flags & BSF_WEAK)
    {
      *p++ = 'W';
      *p++ = '|';
    }
  if (flags & BSF_SECTION_SYM)
    {
      *p++ = 'S'; *p++ = '|';
    }
  if (flags & BSF_OLD_COMMON)
    {
      *p++ = 'O';
      *p++ = '|';
    }
  if (flags & BSF_NOT_AT_END)
    {
      *p++ = 'N';
      *p++ = '|';
    }
  if (flags & BSF_CONSTRUCTOR)
    {
      *p++ = 'C';
      *p++ = '|';
    }
  if (flags & BSF_WARNING)
    {
      *p++ = 'w';
      *p++ = '|';
    }
  if (flags & BSF_INDIRECT)
    {
      *p++ = 'I';
      *p++ = '|';
    }
  if (flags & BSF_FILE)
    {
      *p++ = 'f';
      *p++ = '|';
    }

  if (p == buf)
    panic("no flags detected");

  *--p = '\0';
  return buf;
}
Exemplo n.º 17
0
/*
 * Send an interrupt to process.
 */
void
sendsig_sigcontext(const ksiginfo_t *ksi, const sigset_t *mask)
{
	struct lwp *l = curlwp;
	struct proc *p = l->l_proc;
	struct sigacts *ps = p->p_sigacts;
	struct frame *frame = (struct frame *)l->l_md.md_regs;
	int onstack, error;
	int sig = ksi->ksi_signo;
	u_long code = KSI_TRAPCODE(ksi);
	struct sigframe_sigcontext *fp = getframe(l, sig, &onstack), kf;
	sig_t catcher = SIGACTION(p, sig).sa_handler;
	short ft = frame->f_format;

	fp--;

#ifdef DEBUG
	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
		printf("sendsig(%d): sig %d ssp %p usp %p scp %p ft %d\n",
		    p->p_pid, sig, &onstack, fp, &fp->sf_sc, ft);
#endif

	/* Build stack frame for signal trampoline. */
	switch (ps->sa_sigdesc[sig].sd_vers) {
	case 0:		/* legacy on-stack sigtramp */
		kf.sf_ra = (int)p->p_sigctx.ps_sigcode;
		break;

	case 1:
		kf.sf_ra = (int)ps->sa_sigdesc[sig].sd_tramp;
		break;

	default:
		/* Don't know what trampoline version; kill it. */
		sigexit(l, SIGILL);
	}

	kf.sf_signum = sig;
	kf.sf_code = code;
	kf.sf_scp = &fp->sf_sc;

	/*
	 * Save necessary hardware state.  Currently this includes:
	 *	- general registers
	 *	- original exception frame (if not a "normal" frame)
	 *	- FP coprocessor state
	 */
	kf.sf_state.ss_flags = SS_USERREGS;
	memcpy(kf.sf_state.ss_frame.f_regs, frame->f_regs,
	    sizeof(frame->f_regs));
	if (ft >= FMT4) {
#ifdef DEBUG
		if (ft > 15 || exframesize[ft] < 0)
			panic("sendsig: bogus frame type");
#endif
		kf.sf_state.ss_flags |= SS_RTEFRAME;
		kf.sf_state.ss_frame.f_format = frame->f_format;
		kf.sf_state.ss_frame.f_vector = frame->f_vector;
		memcpy(&kf.sf_state.ss_frame.F_u, &frame->F_u,
		    (size_t) exframesize[ft]);
		/*
		 * Leave an indicator that we need to clean up the kernel
		 * stack.  We do this by setting the "pad word" above the
		 * hardware stack frame to the amount the stack must be
		 * adjusted by.
		 *
		 * N.B. we increment rather than just set f_stackadj in
		 * case we are called from syscall when processing a
		 * sigreturn.  In that case, f_stackadj may be non-zero.
		 */
		frame->f_stackadj += exframesize[ft];
		frame->f_format = frame->f_vector = 0;
#ifdef DEBUG
		if (sigdebug & SDB_FOLLOW)
			printf("sendsig(%d): copy out %d of frame %d\n",
			    p->p_pid, exframesize[ft], ft);
#endif
	}

	if (fputype) {
		kf.sf_state.ss_flags |= SS_FPSTATE;
		m68881_save(&kf.sf_state.ss_fpstate);
	}
#ifdef DEBUG
	if ((sigdebug & SDB_FPSTATE) && *(char *)&kf.sf_state.ss_fpstate)
		printf("sendsig(%d): copy out FP state (%x) to %p\n",
		    p->p_pid, *(u_int *)&kf.sf_state.ss_fpstate,
		    &kf.sf_state.ss_fpstate);
#endif

	/* Build the signal context to be used by sigreturn. */
	kf.sf_sc.sc_sp = frame->f_regs[SP];
	kf.sf_sc.sc_fp = frame->f_regs[A6];
	kf.sf_sc.sc_ap = (int)&fp->sf_state;
	kf.sf_sc.sc_pc = frame->f_pc;
	kf.sf_sc.sc_ps = frame->f_sr;

	/* Save signal stack. */
	kf.sf_sc.sc_onstack = l->l_sigstk.ss_flags & SS_ONSTACK;

	/* Save signal mask. */
	kf.sf_sc.sc_mask = *mask;

#ifdef COMPAT_13
	/*
	 * XXX We always have to save an old style signal mask because
	 * XXX we might be delivering a signal to a process which will
	 * XXX escape from the signal in a non-standard way and invoke
	 * XXX sigreturn() directly.
	 */
	native_sigset_to_sigset13(mask, &kf.sf_sc.__sc_mask13);
#endif
	sendsig_reset(l, sig);
	mutex_exit(p->p_lock);
	error = copyout(&kf, fp, sizeof(kf));
	mutex_enter(p->p_lock);

	if (error != 0) {
#ifdef DEBUG
		if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
			printf("sendsig(%d): copyout failed on sig %d\n",
			    p->p_pid, sig);
#endif
		/*
		 * Process has trashed its stack; give it an illegal
		 * instruction to halt it in its tracks.
		 */
		sigexit(l, SIGILL);
		/* NOTREACHED */
	}
#ifdef DEBUG
	if (sigdebug & SDB_FOLLOW)
		printf("sendsig(%d): sig %d scp %p fp %p sc_sp %x sc_ap %x\n",
		    p->p_pid, sig, kf.sf_scp, fp,
		    kf.sf_sc.sc_sp, kf.sf_sc.sc_ap);
#endif

	buildcontext(l, catcher, fp);

	/* Remember that we're now on the signal stack. */
	if (onstack)
		l->l_sigstk.ss_flags |= SS_ONSTACK;

#ifdef DEBUG
	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
		printf("sendsig(%d): sig %d returns\n",
		    p->p_pid, sig);
#endif
}
Exemplo n.º 18
0
/* load symbols out of FNAME */
void
sym_loadsyms(char *fname,	/* file name containing symbols */
	     int load_locals)	/* load local symbols */
{
  int i, debug_cnt;
#ifdef BFD_LOADER
  bfd *abfd;
  asymbol **syms;
  int storage, i, nsyms, debug_cnt;
#else /* !BFD_LOADER */
  int len;
  FILE *fobj;
  struct ecoff_filehdr fhdr;
  struct ecoff_aouthdr ahdr;
  struct ecoff_symhdr_t symhdr;
  char *strtab = NULL;
  struct ecoff_EXTR *extr;
#endif /* BFD_LOADER */

  if (syms_loaded)
    {
      /* symbols are already loaded */
      /* FIXME: can't handle symbols from multiple files */
      return;
    }

#ifdef BFD_LOADER

  /* load the program into memory, try both endians */
  if (!(abfd = bfd_openr(fname, "ss-coff-big")))
    if (!(abfd = bfd_openr(fname, "ss-coff-little")))
      fatal("cannot open executable `%s'", fname);

  /* this call is mainly for its side effect of reading in the sections.
     we follow the traditional behavior of `strings' in that we don't
     complain if we don't recognize a file to be an object file.  */
  if (!bfd_check_format(abfd, bfd_object))
    {
      bfd_close(abfd);
      fatal("cannot open executable `%s'", fname);
    }

  /* sanity check, endian should be the same as loader.c encountered */
  if (abfd->xvec->byteorder_big_p != (unsigned)ld_target_big_endian)
    panic("binary endian changed");

  if ((bfd_get_file_flags(abfd) & (HAS_SYMS|HAS_LOCALS)))
    {
      /* file has locals, read them in */
      storage = bfd_get_symtab_upper_bound(abfd);
      if (storage <= 0)
	fatal("HAS_SYMS is set, but `%s' still lacks symbols", fname);

      syms = (asymbol **)calloc(storage, 1);
      if (!syms)
	fatal("out of virtual memory");

      nsyms = bfd_canonicalize_symtab (abfd, syms);
      if (nsyms <= 0)
	fatal("HAS_SYMS is set, but `%s' still lacks symbols", fname);

      /*
       * convert symbols to local format
       */

      /* first count symbols */
      sym_ndatasyms = 0; sym_ntextsyms = 0;
      for (i=0; i < nsyms; i++)
	{
	  asymbol *sym = syms[i];

	  /* decode symbol type */
	  if (/* from the data section */
	      (!strcmp(sym->section->name, ".rdata")
	       || !strcmp(sym->section->name, ".data")
	       || !strcmp(sym->section->name, ".sdata")
	       || !strcmp(sym->section->name, ".bss")
	       || !strcmp(sym->section->name, ".sbss"))
	      /* from a scope we are interested in */
	      && RELEVANT_SCOPE(sym))
	    {
	      /* data segment symbol */
	      sym_ndatasyms++;
#ifdef PRINT_SYMS
	      fprintf(stderr,
		      "+sym: %s  sect: %s  flags: %s  value: 0x%08lx\n",
		      sym->name, sym->section->name, flags2str(sym->flags),
		      sym->value + sym->section->vma);
#endif /* PRINT_SYMS */
	    }
	  else if (/* from the text section */
		   !strcmp(sym->section->name, ".text")
		   /* from a scope we are interested in */
		   && RELEVANT_SCOPE(sym))
	    {
	      /* text segment symbol */
	      sym_ntextsyms++;
#ifdef PRINT_SYMS
	      fprintf(stderr,
		      "+sym: %s  sect: %s  flags: %s  value: 0x%08lx\n",
		      sym->name, sym->section->name, flags2str(sym->flags),
		      sym->value + sym->section->vma);
#endif /* PRINT_SYMS */
	    }
	  else
	    {
	      /* non-segment sections */
#ifdef PRINT_SYMS
	      fprintf(stderr,
		      "-sym: %s  sect: %s  flags: %s  value: 0x%08lx\n",
		      sym->name, sym->section->name, flags2str(sym->flags),
		      sym->value + sym->section->vma);
#endif /* PRINT_SYMS */
	    }
	}
      sym_nsyms = sym_ntextsyms + sym_ndatasyms;
      if (sym_nsyms <= 0)
	fatal("`%s' has no text or data symbols", fname);

      /* allocate symbol space */
      sym_db = (struct sym_sym_t *)calloc(sym_nsyms, sizeof(struct sym_sym_t));
      if (!sym_db)
	fatal("out of virtual memory");

      /* convert symbols to internal format */
      for (debug_cnt=0, i=0; i < nsyms; i++)
	{
	  asymbol *sym = syms[i];

	  /* decode symbol type */
	  if (/* from the data section */
	      (!strcmp(sym->section->name, ".rdata")
	       || !strcmp(sym->section->name, ".data")
	       || !strcmp(sym->section->name, ".sdata")
	       || !strcmp(sym->section->name, ".bss")
	       || !strcmp(sym->section->name, ".sbss"))
	      /* from a scope we are interested in */
	      && RELEVANT_SCOPE(sym))
	    {
	      /* data segment symbol, insert into symbol database */
	      sym_db[debug_cnt].name = mystrdup((char *)sym->name);
	      sym_db[debug_cnt].seg = ss_data;
	      sym_db[debug_cnt].initialized =
		(!strcmp(sym->section->name, ".rdata")
		 || !strcmp(sym->section->name, ".data")
		 || !strcmp(sym->section->name, ".sdata"));
	      sym_db[debug_cnt].pub = (sym->flags & BSF_GLOBAL);
	      sym_db[debug_cnt].local = (sym->name[0] == '$');
	      sym_db[debug_cnt].addr = sym->value + sym->section->vma;

	      debug_cnt++;
	    }
	  else if (/* from the text section */
		   !strcmp(sym->section->name, ".text")
		   /* from a scope we are interested in */
		   && RELEVANT_SCOPE(sym))
	    {
	      /* text segment symbol, insert into symbol database */
	      sym_db[debug_cnt].name = mystrdup((char *)sym->name);
	      sym_db[debug_cnt].seg = ss_text;
	      sym_db[debug_cnt].initialized = /* seems reasonable */TRUE;
	      sym_db[debug_cnt].pub = (sym->flags & BSF_GLOBAL);
	      sym_db[debug_cnt].local = (sym->name[0] == '$');
	      sym_db[debug_cnt].addr = sym->value + sym->section->vma;

	      debug_cnt++;
	    }
	  else
	    {
	      /* non-segment sections */
	    }
	}
      /* sanity check */
      if (debug_cnt != sym_nsyms)
	panic("could not locate all counted symbols");

      /* release bfd symbol storage */
      free(syms);
    }

  /* done with file, close if */
  if (!bfd_close(abfd))
    fatal("could not close executable `%s'", fname);

#else /* !BFD_LOADER */

  /* load the program into memory, try both endians */
#if defined(__CYGWIN32__) || defined(_MSC_VER)
  fobj = fopen(fname, "rb");
#else
  fobj = fopen(fname, "r");
#endif
  if (!fobj)
    fatal("cannot open executable `%s'", fname);

  if (fread(&fhdr, sizeof(struct ecoff_filehdr), 1, fobj) < 1)
    fatal("cannot read header from executable `%s'", fname);

  /* record endian of target */
  if (fhdr.f_magic != ECOFF_EB_MAGIC && fhdr.f_magic != ECOFF_EL_MAGIC)
    fatal("bad magic number in executable `%s'", fname);

  if (fread(&ahdr, sizeof(struct ecoff_aouthdr), 1, fobj) < 1)
    fatal("cannot read AOUT header from executable `%s'", fname);

  /* seek to the beginning of the symbolic header */
  fseek(fobj, fhdr.f_symptr, 0);

  if (fread(&symhdr, sizeof(struct ecoff_symhdr_t), 1, fobj) < 1)
    fatal("could not read symbolic header from executable `%s'", fname);

  if (symhdr.magic != ECOFF_magicSym)
    fatal("bad magic number (0x%x) in symbolic header", symhdr.magic);

  /* allocate space for the string table */
  len = symhdr.issMax + symhdr.issExtMax;
  strtab = (char *)calloc(len, sizeof(char));
  if (!strtab)
    fatal("out of virtual memory");

  /* read all the symbol names into memory */
  fseek(fobj, symhdr.cbSsOffset, 0);
  if (fread(strtab, len, 1, fobj) < 0)
    fatal("error while reading symbol table names");

  /* allocate symbol space */
  len = symhdr.isymMax + symhdr.iextMax;
  if (len <= 0)
    fatal("`%s' has no text or data symbols", fname);
  sym_db = (struct sym_sym_t *)calloc(len, sizeof(struct sym_sym_t));
  if (!sym_db)
    fatal("out of virtual memory");

  /* allocate space for the external symbol entries */
  extr =
    (struct ecoff_EXTR *)calloc(symhdr.iextMax, sizeof(struct ecoff_EXTR));
  if (!extr)
    fatal("out of virtual memory");

  fseek(fobj, symhdr.cbExtOffset, 0);
  if (fread(extr, sizeof(struct ecoff_EXTR), symhdr.iextMax, fobj) < 0)
    fatal("error reading external symbol entries");

  sym_nsyms = 0; sym_ndatasyms = 0; sym_ntextsyms = 0;

  /* convert symbols to internal format */
  for (i=0; i < symhdr.iextMax; i++)
    {
      int str_offset;

      str_offset = symhdr.issMax + extr[i].asym.iss;

#if 0
      printf("ext %2d: ifd = %2d, iss = %3d, value = %8x, st = %3x, "
	     "sc = %3x, index = %3x\n",
	     i, extr[i].ifd,
	     extr[i].asym.iss, extr[i].asym.value,
	     extr[i].asym.st, extr[i].asym.sc,
	     extr[i].asym.index);
      printf("       %08x %2d %2d %s\n",
	     extr[i].asym.value,
	     extr[i].asym.st,
	     extr[i].asym.sc,
	     &strtab[str_offset]);
#endif

      switch (extr[i].asym.st)
	{
	case ECOFF_stGlobal:
	case ECOFF_stStatic:
	  /* from data segment */
	  sym_db[sym_nsyms].name = mystrdup(&strtab[str_offset]);
	  sym_db[sym_nsyms].seg = ss_data;
	  sym_db[sym_nsyms].initialized = /* FIXME: ??? */TRUE;
	  sym_db[sym_nsyms].pub = /* FIXME: ??? */TRUE;
	  sym_db[sym_nsyms].local = /* FIXME: ??? */FALSE;
	  sym_db[sym_nsyms].addr = extr[i].asym.value;
	  sym_nsyms++;
	  sym_ndatasyms++;
	  break;

	case ECOFF_stProc:
	case ECOFF_stStaticProc:
	case ECOFF_stLabel:
	  /* from text segment */
	  sym_db[sym_nsyms].name = mystrdup(&strtab[str_offset]);
	  sym_db[sym_nsyms].seg = ss_text;
	  sym_db[sym_nsyms].initialized = /* FIXME: ??? */TRUE;
	  sym_db[sym_nsyms].pub = /* FIXME: ??? */TRUE;
	  sym_db[sym_nsyms].local = /* FIXME: ??? */FALSE;
	  sym_db[sym_nsyms].addr = extr[i].asym.value;
	  sym_nsyms++;
	  sym_ntextsyms++;
	  break;

	default:
	  /* FIXME: ignored... */;
	}
    }
  free(extr);

  /* done with the executable, close it */
  if (fclose(fobj))
    fatal("could not close executable `%s'", fname);

#endif /* BFD_LOADER */

  /*
   * generate various sortings
   */

  /* all symbols sorted by address and name */
  sym_syms =
    (struct sym_sym_t **)calloc(sym_nsyms, sizeof(struct sym_sym_t *));
  if (!sym_syms)
    fatal("out of virtual memory");

  sym_syms_by_name =
    (struct sym_sym_t **)calloc(sym_nsyms, sizeof(struct sym_sym_t *));
  if (!sym_syms_by_name)
    fatal("out of virtual memory");

  for (debug_cnt=0, i=0; i<sym_nsyms; i++)
    {
      sym_syms[debug_cnt] = &sym_db[i];
      sym_syms_by_name[debug_cnt] = &sym_db[i];
      debug_cnt++;
    }
  /* sanity check */
  if (debug_cnt != sym_nsyms)
    panic("could not locate all symbols");

  /* sort by address */
  qsort(sym_syms, sym_nsyms, sizeof(struct sym_sym_t *), (void *)acmp);

  /* sort by name */
  qsort(sym_syms_by_name, sym_nsyms, sizeof(struct sym_sym_t *), (void *)ncmp);

  /* text segment sorted by address and name */
  sym_textsyms =
    (struct sym_sym_t **)calloc(sym_ntextsyms, sizeof(struct sym_sym_t *));
  if (!sym_textsyms)
    fatal("out of virtual memory");

  sym_textsyms_by_name =
    (struct sym_sym_t **)calloc(sym_ntextsyms, sizeof(struct sym_sym_t *));
  if (!sym_textsyms_by_name)
    fatal("out of virtual memory");

  for (debug_cnt=0, i=0; i<sym_nsyms; i++)
    {
      if (sym_db[i].seg == ss_text)
	{
	  sym_textsyms[debug_cnt] = &sym_db[i];
	  sym_textsyms_by_name[debug_cnt] = &sym_db[i];
	  debug_cnt++;
	}
    }
  /* sanity check */
  if (debug_cnt != sym_ntextsyms)
    panic("could not locate all text symbols");

  /* sort by address */
  qsort(sym_textsyms, sym_ntextsyms, sizeof(struct sym_sym_t *), (void *)acmp);

  /* sort by name */
  qsort(sym_textsyms_by_name, sym_ntextsyms,
	sizeof(struct sym_sym_t *), (void *)ncmp);

  /* data segment sorted by address and name */
  sym_datasyms =
    (struct sym_sym_t **)calloc(sym_ndatasyms, sizeof(struct sym_sym_t *));
  if (!sym_datasyms)
    fatal("out of virtual memory");

  sym_datasyms_by_name =
    (struct sym_sym_t **)calloc(sym_ndatasyms, sizeof(struct sym_sym_t *));
  if (!sym_datasyms_by_name)
    fatal("out of virtual memory");

  for (debug_cnt=0, i=0; i<sym_nsyms; i++)
    {
      if (sym_db[i].seg == ss_data)
	{
	  sym_datasyms[debug_cnt] = &sym_db[i];
	  sym_datasyms_by_name[debug_cnt] = &sym_db[i];
	  debug_cnt++;
	}
    }
  /* sanity check */
  if (debug_cnt != sym_ndatasyms)
    panic("could not locate all data symbols");
      
  /* sort by address */
  qsort(sym_datasyms, sym_ndatasyms, sizeof(struct sym_sym_t *), (void *)acmp);

  /* sort by name */
  qsort(sym_datasyms_by_name, sym_ndatasyms,
	sizeof(struct sym_sym_t *), (void *)ncmp);

  /* compute symbol sizes */
  for (i=0; i<sym_ntextsyms; i++)
    {
      sym_textsyms[i]->size =
	(i != (sym_ntextsyms - 1)
	 ? (sym_textsyms[i+1]->addr - sym_textsyms[i]->addr)
	 : ((ld_text_base + ld_text_size) - sym_textsyms[i]->addr));
    }
  for (i=0; i<sym_ndatasyms; i++)
    {
      sym_datasyms[i]->size =
	(i != (sym_ndatasyms - 1)
	 ? (sym_datasyms[i+1]->addr - sym_datasyms[i]->addr)
	 : ((ld_data_base + ld_data_size) - sym_datasyms[i]->addr));
    }

  /* symbols are now available for use */
  syms_loaded = TRUE;
}
Exemplo n.º 19
0
void
ehci_arbus_attach(device_t parent, device_t self, void *aux)
{
	ehci_softc_t *sc = device_private(self);
	struct arbus_attach_args * const aa = aux;
	void *ih = NULL;
	int error;

	sc->iot = aa->aa_bst_le;
	sc->sc_size = aa->aa_size;
	//sc->sc_bus.ub_hcpriv = sc;
	sc->sc_bus.ub_dmatag = aa->aa_dmat;
	sc->sc_bus.ub_revision = USBREV_1_0;
	sc->sc_flags |= EHCIF_ETTF;
	sc->sc_vendor_init = ehci_arbus_init;

	error = bus_space_map(aa->aa_bst, aa->aa_addr, aa->aa_size, 0,
	    &sc->ioh);

	if (error) {
		aprint_error(": failed to map registers: %d\n", error);
		return;
	}

	/* The recommended value is 0x20 for both ports and the host */
	REGVAL(AR9344_USB_CONFIG_BASE) = 0x20c00;	/* magic */
	DELAY(1000);

	/* get offset to operational regs */
	uint32_t r = bus_space_read_4(aa->aa_bst, sc->ioh, 0);
	if (r != 0x40) {
		aprint_error(": error: CAPLENGTH (%#x) != 0x40\n", sc->sc_offs);
		return;
	}

	sc->sc_offs = EREAD1(sc, EHCI_CAPLENGTH);

	aprint_normal("\n");

	/* Disable EHCI interrupts */
	EOWRITE4(sc, EHCI_USBINTR, 0);

	/* establish interrupt */
	ih = arbus_intr_establish(aa->aa_cirq, aa->aa_mirq, ehci_intr, sc);
	if (ih == NULL)
		panic("%s: couldn't establish interrupt",
		    device_xname(self));

	/*
	 * There are no companion controllers
	 */
	sc->sc_ncomp = 0;

	error = ehci_init(sc);
	if (error) {
		aprint_error("%s: init failed, error=%d\n", device_xname(self),
		    error);
		if (ih != NULL)
			arbus_intr_disestablish(ih);
		return;
	}

	/* Attach USB device */
	sc->sc_child = config_found(self, &sc->sc_bus, usbctlprint);
}
Exemplo n.º 20
0
/*
 * Truncate the inode oip to at most length size, freeing the
 * disk blocks.
 */
int
ext2_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred)
{
	struct vnode *ovp = vp;
	daddr_t lastblock;
	struct inode *oip;
	daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR];
	daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR];
	struct ext2_sb_info *fs;
	struct buf *bp;
	int offset, size, level;
	long count, nblocks, blocksreleased = 0;
	int i;
	int aflags, error, allerror;
	off_t osize;
/*
kprintf("ext2_truncate called %d to %d\n", VTOI(ovp)->i_number, length);
*/	/*
	 * negative file sizes will totally break the code below and
	 * are not meaningful anyways.
	 */
	if (length < 0)
	    return EFBIG;

	oip = VTOI(ovp);
	if (ovp->v_type == VLNK &&
	    oip->i_size < ovp->v_mount->mnt_maxsymlinklen) {
#if DIAGNOSTIC
		if (length != 0)
			panic("ext2_truncate: partial truncate of symlink");
#endif
		bzero((char *)&oip->i_shortlink, (u_int)oip->i_size);
		oip->i_size = 0;
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (EXT2_UPDATE(ovp, 1));
	}
	if (oip->i_size == length) {
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (EXT2_UPDATE(ovp, 0));
	}
#if QUOTA
	if ((error = ext2_getinoquota(oip)) != 0)
		return (error);
#endif
	fs = oip->i_e2fs;
	osize = oip->i_size;
	ext2_discard_prealloc(oip);
	/*
	 * Lengthen the size of the file. We must ensure that the
	 * last byte of the file is allocated. Since the smallest
	 * value of osize is 0, length will be at least 1.
	 */
	if (osize < length) {
		offset = blkoff(fs, length - 1);
		lbn = lblkno(fs, length - 1);
		aflags = B_CLRBUF;
		if (flags & IO_SYNC)
			aflags |= B_SYNC;
		vnode_pager_setsize(ovp, length);
		error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, aflags);
		if (error) {
			vnode_pager_setsize(ovp, osize);
			return (error);
		}
		oip->i_size = length;
		if (aflags & IO_SYNC)
			bwrite(bp);
		else
			bawrite(bp);
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (EXT2_UPDATE(ovp, 1));
	}
	/*
	 * Shorten the size of the file. If the file is not being
	 * truncated to a block boundry, the contents of the
	 * partial block following the end of the file must be
	 * zero'ed in case it ever become accessable again because
	 * of subsequent file growth.
	 */
	/* I don't understand the comment above */
	offset = blkoff(fs, length);
	if (offset == 0) {
		oip->i_size = length;
	} else {
		lbn = lblkno(fs, length);
		aflags = B_CLRBUF;
		if (flags & IO_SYNC)
			aflags |= B_SYNC;
		error = ext2_balloc(oip, lbn, offset, cred, &bp, aflags);
		if (error)
			return (error);
		oip->i_size = length;
		size = blksize(fs, oip, lbn);
		bzero((char *)bp->b_data + offset, (u_int)(size - offset));
		allocbuf(bp, size);
		if (aflags & IO_SYNC)
			bwrite(bp);
		else
			bawrite(bp);
	}
	/*
	 * Calculate index into inode's block list of
	 * last direct and indirect blocks (if any)
	 * which we want to keep.  Lastblock is -1 when
	 * the file is truncated to 0.
	 */
	lastblock = lblkno(fs, length + fs->s_blocksize - 1) - 1;
	lastiblock[SINGLE] = lastblock - NDADDR;
	lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
	lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
	nblocks = btodb(fs->s_blocksize);
	/*
	 * Update file and block pointers on disk before we start freeing
	 * blocks.  If we crash before free'ing blocks below, the blocks
	 * will be returned to the free list.  lastiblock values are also
	 * normalized to -1 for calls to ext2_indirtrunc below.
	 */
	bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks);
	for (level = TRIPLE; level >= SINGLE; level--)
		if (lastiblock[level] < 0) {
			oip->i_ib[level] = 0;
			lastiblock[level] = -1;
		}
	for (i = NDADDR - 1; i > lastblock; i--)
		oip->i_db[i] = 0;
	oip->i_flag |= IN_CHANGE | IN_UPDATE;
	allerror = EXT2_UPDATE(ovp, 1);

	/*
	 * Having written the new inode to disk, save its new configuration
	 * and put back the old block pointers long enough to process them.
	 * Note that we save the new block configuration so we can check it
	 * when we are done.
	 */
	bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks);
	bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks);
	oip->i_size = osize;
	error = vtruncbuf(ovp, length, (int)fs->s_blocksize);
	if (error && (allerror == 0))
		allerror = error;

	/*
	 * Indirect blocks first.
	 */
	indir_lbn[SINGLE] = -NDADDR;
	indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
	indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
	for (level = TRIPLE; level >= SINGLE; level--) {
		bn = oip->i_ib[level];
		if (bn != 0) {
			error = ext2_indirtrunc(oip, indir_lbn[level],
			    fsbtodoff(fs, bn), lastiblock[level], level, &count);
			if (error)
				allerror = error;
			blocksreleased += count;
			if (lastiblock[level] < 0) {
				oip->i_ib[level] = 0;
				ext2_blkfree(oip, bn, fs->s_frag_size);
				blocksreleased += nblocks;
			}
		}
		if (lastiblock[level] >= 0)
			goto done;
	}

	/*
	 * All whole direct blocks or frags.
	 */
	for (i = NDADDR - 1; i > lastblock; i--) {
		long bsize;

		bn = oip->i_db[i];
		if (bn == 0)
			continue;
		oip->i_db[i] = 0;
		bsize = blksize(fs, oip, i);
		ext2_blkfree(oip, bn, bsize);
		blocksreleased += btodb(bsize);
	}
	if (lastblock < 0)
		goto done;

	/*
	 * Finally, look for a change in size of the
	 * last direct block; release any frags.
	 */
	bn = oip->i_db[lastblock];
	if (bn != 0) {
		long oldspace, newspace;

		/*
		 * Calculate amount of space we're giving
		 * back as old block size minus new block size.
		 */
		oldspace = blksize(fs, oip, lastblock);
		oip->i_size = length;
		newspace = blksize(fs, oip, lastblock);
		if (newspace == 0)
			panic("itrunc: newspace");
		if (oldspace - newspace > 0) {
			/*
			 * Block number of space to be free'd is
			 * the old block # plus the number of frags
			 * required for the storage we're keeping.
			 */
			bn += numfrags(fs, newspace);
			ext2_blkfree(oip, bn, oldspace - newspace);
			blocksreleased += btodb(oldspace - newspace);
		}
	}
done:
#if DIAGNOSTIC
	for (level = SINGLE; level <= TRIPLE; level++)
		if (newblks[NDADDR + level] != oip->i_ib[level])
			panic("itrunc1");
	for (i = 0; i < NDADDR; i++)
		if (newblks[i] != oip->i_db[i])
			panic("itrunc2");
	if (length == 0 && (!RB_EMPTY(&ovp->v_rbdirty_tree) ||
			    !RB_EMPTY(&ovp->v_rbclean_tree)))
		panic("itrunc3");
#endif /* DIAGNOSTIC */
	/*
	 * Put back the real size.
	 */
	oip->i_size = length;
	oip->i_blocks -= blocksreleased;
	if (oip->i_blocks < 0)			/* sanity */
		oip->i_blocks = 0;
	oip->i_flag |= IN_CHANGE;
	vnode_pager_setsize(ovp, length);
#if QUOTA
	ext2_chkdq(oip, -blocksreleased, NOCRED, 0);
#endif
	return (allerror);
}
Exemplo n.º 21
0
Arquivo: intr.c Projeto: 0day-ci/xen
void svm_intr_assist(void) 
{
    struct vcpu *v = current;
    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    struct hvm_intack intack;
    enum hvm_intblk intblk;

    /* Crank the handle on interrupt state. */
    pt_update_irq(v);

    do {
        intack = hvm_vcpu_has_pending_irq(v);
        if ( likely(intack.source == hvm_intsrc_none) )
            return;

        intblk = hvm_interrupt_blocked(v, intack);
        if ( intblk == hvm_intblk_svm_gif ) {
            ASSERT(nestedhvm_enabled(v->domain));
            return;
        }

        /* Interrupts for the nested guest are already
         * in the vmcb.
         */
        if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) )
        {
            int rc;

            /* l2 guest was running when an interrupt for
             * the l1 guest occured.
             */
            rc = nestedsvm_vcpu_interrupt(v, intack);
            switch (rc) {
            case NSVM_INTR_NOTINTERCEPTED:
                /* Inject interrupt into 2nd level guest directly. */
                break;	
            case NSVM_INTR_NOTHANDLED:
            case NSVM_INTR_FORCEVMEXIT:
                return;
            case NSVM_INTR_MASKED:
                /* Guest already enabled an interrupt window. */
                return;
            default:
                panic("%s: nestedsvm_vcpu_interrupt can't handle value %#x",
                    __func__, rc);
            }
        }

        /*
         * Pending IRQs must be delayed if:
         * 1. An event is already pending. This is despite the fact that SVM
         *    provides a VINTR delivery method quite separate from the EVENTINJ
         *    mechanism. The event delivery can arbitrarily delay the injection
         *    of the vintr (for example, if the exception is handled via an
         *    interrupt gate, hence zeroing RFLAGS.IF). In the meantime:
         *    - the vTPR could be modified upwards, so we need to wait until
         *      the exception is delivered before we can safely decide that an
         *      interrupt is deliverable; and
         *    - the guest might look at the APIC/PIC state, so we ought not to
         *      have cleared the interrupt out of the IRR.
         * 2. The IRQ is masked.
         */
        if ( unlikely(vmcb->eventinj.fields.v) || intblk )
        {
            svm_enable_intr_window(v, intack);
            return;
        }

        intack = hvm_vcpu_ack_pending_irq(v, intack);
    } while ( intack.source == hvm_intsrc_none );

    if ( intack.source == hvm_intsrc_nmi )
    {
        svm_inject_nmi(v);
    }
    else
    {
        HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0);
        svm_inject_extint(v, intack.vector);
        pt_intr_post(v, intack);
    }

    /* Is there another IRQ to queue up behind this one? */
    intack = hvm_vcpu_has_pending_irq(v);
    if ( unlikely(intack.source != hvm_intsrc_none) )
        svm_enable_intr_window(v, intack);
}
Exemplo n.º 22
0
static int
ext2_indirtrunc(struct inode *ip, daddr_t lbn, off_t doffset, daddr_t lastbn,
		int level, long *countp)
{
	int i;
	struct buf *bp;
	struct ext2_sb_info *fs = ip->i_e2fs;
	daddr_t *bap;
	struct vnode *vp;
	daddr_t *copy, nb, nlbn, last;
	long blkcount, factor;
	int nblocks, blocksreleased = 0;
	int error = 0, allerror = 0;

	/*
	 * Calculate index in current block of last
	 * block to be kept.  -1 indicates the entire
	 * block so we need not calculate the index.
	 */
	factor = 1;
	for (i = SINGLE; i < level; i++)
		factor *= NINDIR(fs);
	last = lastbn;
	if (lastbn > 0)
		last /= factor;
	nblocks = btodb(fs->s_blocksize);
	/*
	 * Get buffer of block pointers, zero those entries corresponding
	 * to blocks to be free'd, and update on disk copy first.  Since
	 * double(triple) indirect before single(double) indirect, calls
	 * to bmap on these blocks will fail.  However, we already have
	 * the on disk address, so we have to set the bio_offset field
	 * explicitly instead of letting bread do everything for us.
	 */
	vp = ITOV(ip);
	bp = getblk(vp, lblktodoff(fs, lbn), (int)fs->s_blocksize, 0, 0);
	if ((bp->b_flags & B_CACHE) == 0) {
		bp->b_flags &= ~(B_ERROR | B_INVAL);
		bp->b_cmd = BUF_CMD_READ;
		if (bp->b_bcount > bp->b_bufsize)
			panic("ext2_indirtrunc: bad buffer size");
		bp->b_bio2.bio_offset = doffset;
		bp->b_bio1.bio_done = biodone_sync;
		bp->b_bio1.bio_flags |= BIO_SYNC;
		vfs_busy_pages(bp->b_vp, bp);
		vn_strategy(vp, &bp->b_bio1);
		error = biowait(&bp->b_bio1, "biord");
	}
	if (error) {
		brelse(bp);
		*countp = 0;
		return (error);
	}

	bap = (daddr_t *)bp->b_data;
	copy = kmalloc(fs->s_blocksize, M_TEMP, M_WAITOK);
	bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->s_blocksize);
	bzero((caddr_t)&bap[last + 1],
	  (u_int)(NINDIR(fs) - (last + 1)) * sizeof (daddr_t));
	if (last == -1)
		bp->b_flags |= B_INVAL;
	error = bwrite(bp);
	if (error)
		allerror = error;
	bap = copy;

	/*
	 * Recursively free totally unused blocks.
	 */
	for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last;
	    i--, nlbn += factor) {
		nb = bap[i];
		if (nb == 0)
			continue;
		if (level > SINGLE) {
			if ((error = ext2_indirtrunc(ip, nlbn,
			    fsbtodoff(fs, nb), (daddr_t)-1, level - 1, &blkcount)) != 0)
				allerror = error;
			blocksreleased += blkcount;
		}
		ext2_blkfree(ip, nb, fs->s_blocksize);
		blocksreleased += nblocks;
	}

	/*
	 * Recursively free last partial block.
	 */
	if (level > SINGLE && lastbn >= 0) {
		last = lastbn % factor;
		nb = bap[i];
		if (nb != 0) {
			error = ext2_indirtrunc(ip, nlbn, fsbtodoff(fs, nb),
						last, level - 1, &blkcount);
			if (error)
				allerror = error;
			blocksreleased += blkcount;
		}
	}
	kfree(copy, M_TEMP);
	*countp = blocksreleased;
	return (allerror);
}
Exemplo n.º 23
0
void checkb(struct block *b, char *msg)
{
	void *dead = (void *)Bdead;
	struct extra_bdata *ebd;
	size_t extra_len = 0;

	if (b == dead)
		panic("checkb b %s 0x%lx", msg, b);
	if (b->base == dead || b->lim == dead || b->next == dead
		|| b->rp == dead || b->wp == dead) {
		printd("checkb: base 0x%8.8lx lim 0x%8.8lx next 0x%8.8lx\n",
			   b->base, b->lim, b->next);
		printd("checkb: rp 0x%8.8lx wp 0x%8.8lx\n", b->rp, b->wp);
		panic("checkb dead: %s\n", msg);
	}

	if (b->base > b->lim)
		panic("checkb 0 %s 0x%lx 0x%lx", msg, b->base, b->lim);
	if (b->rp < b->base)
		panic("checkb 1 %s 0x%lx 0x%lx", msg, b->base, b->rp);
	if (b->wp < b->base)
		panic("checkb 2 %s 0x%lx 0x%lx", msg, b->base, b->wp);
	if (b->rp > b->lim)
		panic("checkb 3 %s 0x%lx 0x%lx", msg, b->rp, b->lim);
	if (b->wp > b->lim)
		panic("checkb 4 %s 0x%lx 0x%lx", msg, b->wp, b->lim);
	if (b->nr_extra_bufs && !b->extra_data)
		panic("checkb 5 %s missing extra_data", msg);

	for (int i = 0; i < b->nr_extra_bufs; i++) {
		ebd = &b->extra_data[i];
		if (!ebd->base && (ebd->off || ebd->len))
			panic("checkb %s: ebd %d has no base, but has off %d and len %d",
			      msg, i, ebd->off, ebd->len);
		if (ebd->base) {
			if (!kmalloc_refcnt((void*)ebd->base))
				panic("checkb %s: buf %d, base %p has no refcnt!\n", msg, i,
				      ebd->base);
			extra_len += ebd->len;
		}
	}
	if (extra_len != b->extra_len)
		panic("checkb %s: block extra_len %d differs from sum of ebd len %d",
		      msg, b->extra_len, extra_len);
}
Exemplo n.º 24
0
void
trap(struct trapframe *tf)
{
  if(tf->trapno == T_SYSCALL){
    if(proc->killed)
      exit();
    proc->tf = tf;
    syscall();
    if(proc->killed)
      exit();
    return;
  }

  switch(tf->trapno){
  case T_IRQ0 + IRQ_TIMER:
    if(cpu->id == 0){
      acquire(&tickslock);
      ticks++;
      wakeup(&ticks);
      release(&tickslock);
    }
    lapiceoi();
    break;
  case T_IRQ0 + IRQ_IDE:
    ideintr();
    lapiceoi();
    break;
  case T_IRQ0 + IRQ_IDE+1:
    // Bochs generates spurious IDE1 interrupts.
    break;
  case T_IRQ0 + IRQ_KBD:
    kbdintr();
    lapiceoi();
    break;
  case T_IRQ0 + IRQ_COM1:
    uartintr();
    lapiceoi();
    break;
  case T_IRQ0 + IRQ_ETH:
    ethintr();
    lapiceoi();
    break;
  case T_IRQ0 + 7:
  case T_IRQ0 + IRQ_SPURIOUS:
    cprintf("cpu%d: spurious interrupt at %x:%x\n",
            cpu->id, tf->cs, tf->eip);
    lapiceoi();
    break;
   
  default:
    if(proc == 0 || (tf->cs&3) == 0){
      // In kernel, it must be our mistake.
      cprintf("unexpected trap %d from cpu %d eip %x (cr2=0x%x)\n",
              tf->trapno, cpu->id, tf->eip, rcr2());
      panic("trap");
    }
    // In user space, assume process misbehaved.
    cprintf("pid %d %s: trap %d err %d on cpu %d "
            "eip 0x%x addr 0x%x--kill proc\n",
            proc->pid, proc->name, tf->trapno, tf->err, cpu->id, tf->eip, 
            rcr2());
    proc->killed = 1;
  }

  // Force process exit if it has been killed and is in user space.
  // (If it is still executing in the kernel, let it keep running 
  // until it gets to the regular system call return.)
  if(proc && proc->killed && (tf->cs&3) == DPL_USER)
    exit();

  // Force process to give up CPU on clock tick.
  // If interrupts were on while locks held, would need to check nlock.
  if(proc && proc->state == RUNNING && tf->trapno == T_IRQ0+IRQ_TIMER)
    yield();

  // Check if the process has been killed since we yielded
  if(proc && proc->killed && (tf->cs&3) == DPL_USER)
    exit();
}
Exemplo n.º 25
0
/*===========================================================================*
 *				do_getset				     *
 *===========================================================================*/
PUBLIC int do_getset()
{
/* Handle GETUID, GETGID, GETPID, GETPGRP, SETUID, SETGID, SETSID.  The four
 * GETs and SETSID return their primary results in 'r'.  GETUID, GETGID, and
 * GETPID also return secondary results (the effective IDs, or the parent
 * process ID) in 'reply_res2', which is returned to the user.
 */

  register struct mproc *rmp = mp;
  int r, proc;

  switch(call_nr) {
	case GETUID:
		r = rmp->mp_realuid;
		rmp->mp_reply.reply_res2 = rmp->mp_effuid;
		break;

	case GETGID:
		r = rmp->mp_realgid;
		rmp->mp_reply.reply_res2 = rmp->mp_effgid;
		break;

	case GETPID:
		r = mproc[who_p].mp_pid;
		rmp->mp_reply.reply_res2 = mproc[rmp->mp_parent].mp_pid;
		if(pm_isokendpt(m_in.endpt, &proc) == OK && proc >= 0)
			rmp->mp_reply.reply_res3 = mproc[proc].mp_pid;
		break;

	case SETEUID:
	case SETUID:
		if (rmp->mp_realuid != (uid_t) m_in.usr_id && 
				rmp->mp_effuid != SUPER_USER)
			return(EPERM);
		if(call_nr == SETUID) rmp->mp_realuid = (uid_t) m_in.usr_id;
		rmp->mp_effuid = (uid_t) m_in.usr_id;

		if (rmp->mp_fs_call != PM_IDLE)
		{
			panic(__FILE__, "do_getset: not idle",
				rmp->mp_fs_call);
		}
		rmp->mp_fs_call= PM_SETUID;
		r= notify(FS_PROC_NR);
		if (r != OK)
			panic(__FILE__, "do_getset: unable to notify FS", r);
		
		/* Do not reply until FS is ready to process the setuid
		 * request
		 */
		r= SUSPEND;
		break;

	case SETEGID:
	case SETGID:
		if (rmp->mp_realgid != (gid_t) m_in.grp_id && 
				rmp->mp_effuid != SUPER_USER)
			return(EPERM);
		if(call_nr == SETGID) rmp->mp_realgid = (gid_t) m_in.grp_id;
		rmp->mp_effgid = (gid_t) m_in.grp_id;

		if (rmp->mp_fs_call != PM_IDLE)
		{
			panic(__FILE__, "do_getset: not idle",
				rmp->mp_fs_call);
		}
		rmp->mp_fs_call= PM_SETGID;
		r= notify(FS_PROC_NR);
		if (r != OK)
			panic(__FILE__, "do_getset: unable to notify FS", r);

		/* Do not reply until FS is ready to process the setgid
		 * request
		 */
		r= SUSPEND;
		break;

	case SETSID:
		if (rmp->mp_procgrp == rmp->mp_pid) return(EPERM);
		rmp->mp_procgrp = rmp->mp_pid;

		if (rmp->mp_fs_call != PM_IDLE)
		{
			panic(__FILE__, "do_getset: not idle",
				rmp->mp_fs_call);
		}
		rmp->mp_fs_call= PM_SETSID;
		r= notify(FS_PROC_NR);
		if (r != OK)
			panic(__FILE__, "do_getset: unable to notify FS", r);

		/* Do not reply until FS is ready to process the setsid
		 * request
		 */
		r= SUSPEND;
		break;

	case GETPGRP:
		r = rmp->mp_procgrp;
		break;

	default:
		r = EINVAL;
		break;	
  }
  return(r);
}
Exemplo n.º 26
0
static int 
ip_fw_chk(struct ip **pip, int hlen,
	struct ifnet *oif, u_int16_t *cookie, struct mbuf **m,
	struct ip_fw_chain **flow_id)
{
	struct ip_fw_chain *chain;
	struct ip_fw *rule = NULL;
	struct ip *ip = NULL ;
	struct ifnet *const rif = (*m)->m_pkthdr.rcvif;
	u_short offset ;
	u_short src_port, dst_port;
#ifdef	IPFW_DIVERT_RESTART
	u_int16_t skipto = *cookie;
#else
	u_int16_t ignport = ntohs(*cookie);
#endif

	if (pip) { /* normal ip packet */
	    ip = *pip;
	    offset = (ip->ip_off & IP_OFFMASK);
	} else { /* bridged or non-ip packet */
	    struct ether_header *eh = mtod(*m, struct ether_header *);
	    switch (ntohs(eh->ether_type)) {
	    case ETHERTYPE_IP :
		if ((*m)->m_len<sizeof(struct ether_header) + sizeof(struct ip))
		    goto non_ip ;
		ip = (struct ip *)(eh + 1 );
		if (ip->ip_v != IPVERSION)
		    goto non_ip ;
		hlen = ip->ip_hl << 2;
		if (hlen < sizeof(struct ip)) /* minimum header length */
		    goto non_ip ;
		if ((*m)->m_len < 14 + hlen + 14) {
		    printf("-- m_len %d, need more...\n", (*m)->m_len);
		    goto non_ip ;
		}
		offset = (ip->ip_off & IP_OFFMASK);
		break ;
	    default :
non_ip:		ip = NULL ;
		break ;
	    }
	}

	if (*flow_id) {
	    if (fw_one_pass)
		return 0 ; /* accept if passed first test */
	    /*
	     * pkt has already been tagged. Look for the next rule
	     * to restart processing
	     */
	    if ( (chain = (*flow_id)->rule->next_rule_ptr) == NULL )
		chain = (*flow_id)->rule->next_rule_ptr =
			lookup_next_rule(*flow_id) ;
		if (!chain)
		    goto dropit;
	} else {
	    chain=LIST_FIRST(&ip_fw_chain);
#ifdef IPFW_DIVERT_RESTART
	    if ( skipto ) {
		/*
		 * If we've been asked to start at a given rule immediatly,
		 * do so.
		 */
                if (skipto >= 65535)
                        goto dropit;
                while (chain && (chain->rule->fw_number <= skipto)) {
                        chain = LIST_NEXT(chain, chain);
                }
                if (! chain)
		    goto dropit;
	    }
#endif /* IPFW_DIVERT_RESTART */
        }
	*cookie = 0;
	for (; chain; chain = LIST_NEXT(chain, chain)) {
		register struct ip_fw *f;
again:
		f = chain->rule;

		if (oif) {
			/* Check direction outbound */
			if (!(f->fw_flg & IP_FW_F_OUT))
				continue;
		} else {
			/* Check direction inbound */
			if (!(f->fw_flg & IP_FW_F_IN))
				continue;
		}

		if (ip == NULL ) {
		    /*
		     * do relevant checks for non-ip packets:
		     * after this, only goto got_match or continue
		     */
		    struct ether_header *eh = mtod(*m, struct ether_header *);
		    int i, h, l ;
#if 0
		    printf("-- ip_fw: rule %d(%d) for %6D <- %6D type 0x%04x\n",
			    f->fw_number, IP_FW_GETNSRCP(f),
			    eh->ether_dhost, ".", eh->ether_shost, ".",
			    ntohs(eh->ether_type) );
#endif
		    /*
		     * make default rule always match or we have a panic
		     */
		    if (f->fw_number == 65535)
			goto got_match ;

		    /*
		     * temporary hack: 
		     *   udp from 0.0.0.0 means this rule applies.
		     *   1 src port is match ether type
		     *   2 src ports (interval) is match ether type
		     *   3 src ports is match ether address
		     */
		    if (f->fw_src.s_addr != 0 || f->fw_prot != IPPROTO_UDP)
			continue ;
		    switch (IP_FW_GETNSRCP(f)) {
		    case 1: /* match one type */
			if (  /* ( (f->fw_flg & IP_FW_F_INVSRC) != 0) ^ */
			      ( f->fw_pts[0] == ntohs(eh->ether_type) )  ) {
			    printf("match!\n");
			    goto got_match ;
			}
		    default:
			break ;
		    }
		    continue;
		}
		/* Fragments */
		if ((f->fw_flg & IP_FW_F_FRAG) && !(ip->ip_off & IP_OFFMASK))
			continue;

		/* If src-addr doesn't match, not this rule. */
		if (((f->fw_flg & IP_FW_F_INVSRC) != 0) ^ ((ip->ip_src.s_addr
		    & f->fw_smsk.s_addr) != f->fw_src.s_addr))
			continue;

		/* If dest-addr doesn't match, not this rule. */
		if (((f->fw_flg & IP_FW_F_INVDST) != 0) ^ ((ip->ip_dst.s_addr
		    & f->fw_dmsk.s_addr) != f->fw_dst.s_addr))
			continue;

		/* Interface check */
		if ((f->fw_flg & IF_FW_F_VIAHACK) == IF_FW_F_VIAHACK) {
			struct ifnet *const iface = oif ? oif : rif;

			/* Backwards compatibility hack for "via" */
			if (!iface || !iface_match(iface,
			    &f->fw_in_if, f->fw_flg & IP_FW_F_OIFNAME))
				continue;
		} else {
			/* Check receive interface */
			if ((f->fw_flg & IP_FW_F_IIFACE)
			    && (!rif || !iface_match(rif,
			      &f->fw_in_if, f->fw_flg & IP_FW_F_IIFNAME)))
				continue;
			/* Check outgoing interface */
			if ((f->fw_flg & IP_FW_F_OIFACE)
			    && (!oif || !iface_match(oif,
			      &f->fw_out_if, f->fw_flg & IP_FW_F_OIFNAME)))
				continue;
		}

		/* Check IP options */
		if (f->fw_ipopt != f->fw_ipnopt && !ipopts_match(ip, f))
			continue;

		/* Check protocol; if wildcard, match */
		if (f->fw_prot == IPPROTO_IP)
			goto got_match;

		/* If different, don't match */
		if (ip->ip_p != f->fw_prot) 
			continue;

#define PULLUP_TO(len)	do {						\
			    if ((*m)->m_len < (len) ) {			\
				if ( (*m = m_pullup(*m, (len))) == 0) 	\
				    goto bogusfrag;			\
				*pip = ip = mtod(*m, struct ip *);	\
				offset = (ip->ip_off & IP_OFFMASK);	\
			    }						\
			} while (0)

		/* Protocol specific checks */
		switch (ip->ip_p) {
		case IPPROTO_TCP:
		    {
			struct tcphdr *tcp;

			if (offset == 1)	/* cf. RFC 1858 */
				goto bogusfrag;
			if (offset != 0) {
				/*
				 * TCP flags and ports aren't available in this
				 * packet -- if this rule specified either one,
				 * we consider the rule a non-match.
				 */
				if (f->fw_nports != 0 ||
				    f->fw_tcpf != f->fw_tcpnf)
					continue;

				break;
			}
			PULLUP_TO(hlen + 14);
			tcp = (struct tcphdr *) ((u_long *)ip + ip->ip_hl);
			if (f->fw_tcpf != f->fw_tcpnf && !tcpflg_match(tcp, f))
				continue;
			src_port = ntohs(tcp->th_sport);
			dst_port = ntohs(tcp->th_dport);
			goto check_ports;
		    }

		case IPPROTO_UDP:
		    {
			struct udphdr *udp;

			if (offset != 0) {
				/*
				 * Port specification is unavailable -- if this
				 * rule specifies a port, we consider the rule
				 * a non-match.
				 */
				if (f->fw_nports != 0)
					continue;

				break;
			}
			PULLUP_TO(hlen + 4);
			udp = (struct udphdr *) ((u_long *)ip + ip->ip_hl);
			src_port = ntohs(udp->uh_sport);
			dst_port = ntohs(udp->uh_dport);
check_ports:
			if (!port_match(&f->fw_pts[0],
			    IP_FW_GETNSRCP(f), src_port,
			    f->fw_flg & IP_FW_F_SRNG))
				continue;
			if (!port_match(&f->fw_pts[IP_FW_GETNSRCP(f)],
			    IP_FW_GETNDSTP(f), dst_port,
			    f->fw_flg & IP_FW_F_DRNG)) 
				continue;
			break;
		    }

		case IPPROTO_ICMP:
		    {
			struct icmp *icmp;

			if (offset != 0)	/* Type isn't valid */
				break;
			PULLUP_TO(hlen + 2);
			icmp = (struct icmp *) ((u_long *)ip + ip->ip_hl);
			if (!icmptype_match(icmp, f))
				continue;
			break;
		    }
#undef PULLUP_TO

bogusfrag:
			if (fw_verbose)
				ipfw_report(NULL, ip, rif, oif);
			goto dropit;
		}

got_match:
		*flow_id = chain ; /* XXX set flow id */
#ifndef IPFW_DIVERT_RESTART
		/* Ignore divert/tee rule if socket port is "ignport" */
		switch (f->fw_flg & IP_FW_F_COMMAND) {
		case IP_FW_F_DIVERT:
		case IP_FW_F_TEE:
			if (f->fw_divert_port == ignport)
				continue;       /* ignore this rule */
			break;
		}

#endif /* IPFW_DIVERT_RESTART */
		/* Update statistics */
		f->fw_pcnt += 1;
		/*
		 * note -- bridged-ip packets still have some fields
		 * in network order, including ip_len
		 */
		if (ip) {
		    if (pip)
			f->fw_bcnt += ip->ip_len;
		    else
			f->fw_bcnt += ntohs(ip->ip_len);
		}
		f->timestamp = time.tv_sec;

		/* Log to console if desired */
		if ((f->fw_flg & IP_FW_F_PRN) && fw_verbose)
			ipfw_report(f, ip, rif, oif);

		/* Take appropriate action */
		switch (f->fw_flg & IP_FW_F_COMMAND) {
		case IP_FW_F_ACCEPT:
			return(0);
		case IP_FW_F_COUNT:
			continue;
#ifdef IPDIVERT
		case IP_FW_F_DIVERT:
#ifdef IPFW_DIVERT_RESTART
			*cookie = f->fw_number;
#else
			*cookie = htons(f->fw_divert_port);
#endif /* IPFW_DIVERT_RESTART */
			return(f->fw_divert_port);
#endif
		case IP_FW_F_TEE:
			/*
			 * XXX someday tee packet here, but beware that you
			 * can't use m_copym() or m_copypacket() because
			 * the divert input routine modifies the mbuf
			 * (and these routines only increment reference
			 * counts in the case of mbuf clusters), so need
			 * to write custom routine.
			 */
			continue;
		case IP_FW_F_SKIPTO: /* XXX check */
		    if ( f->next_rule_ptr )
			chain = f->next_rule_ptr ;
		    else
			chain = lookup_next_rule(chain) ;
		    if (!chain)
			goto dropit;
		    goto again ;
#ifdef DUMMYNET
		case IP_FW_F_PIPE:
			return(f->fw_pipe_nr | 0x10000 );
#endif
		}

		/* Deny/reject this packet using this rule */
		rule = f;
		break;
	}

#ifdef DIAGNOSTIC
	/* Rule 65535 should always be there and should always match */
	if (!chain)
		panic("ip_fw: chain");
#endif

	/*
	 * At this point, we're going to drop the packet.
	 * Send a reject notice if all of the following are true:
	 *
	 * - The packet matched a reject rule
	 * - The packet is not an ICMP packet, or is an ICMP query packet
	 * - The packet is not a multicast or broadcast packet
	 */
	if ((rule->fw_flg & IP_FW_F_COMMAND) == IP_FW_F_REJECT
	    && ip
	    && (ip->ip_p != IPPROTO_ICMP || is_icmp_query(ip))
	    && !((*m)->m_flags & (M_BCAST|M_MCAST))
	    && !IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
		switch (rule->fw_reject_code) {
		case IP_FW_REJECT_RST:
		  {
			struct tcphdr *const tcp =
				(struct tcphdr *) ((u_long *)ip + ip->ip_hl);
			struct tcpiphdr ti, *const tip = (struct tcpiphdr *) ip;

			if (offset != 0 || (tcp->th_flags & TH_RST))
				break;
			ti.ti_i = *((struct ipovly *) ip);
			ti.ti_t = *tcp;
			bcopy(&ti, ip, sizeof(ti));
			NTOHL(tip->ti_seq);
			NTOHL(tip->ti_ack);
			tip->ti_len = ip->ip_len - hlen - (tip->ti_off << 2);
			if (tcp->th_flags & TH_ACK) {
				tcp_respond(NULL, tip, *m,
				    (tcp_seq)0, ntohl(tcp->th_ack), TH_RST);
			} else {
				if (tcp->th_flags & TH_SYN)
					tip->ti_len++;
				tcp_respond(NULL, tip, *m, tip->ti_seq
				    + tip->ti_len, (tcp_seq)0, TH_RST|TH_ACK);
			}
			*m = NULL;
			break;
		  }
		default:	/* Send an ICMP unreachable using code */
			icmp_error(*m, ICMP_UNREACH,
			    rule->fw_reject_code, 0L, 0);
			*m = NULL;
			break;
		}
	}

dropit:
	/*
	 * Finally, drop the packet.
	 */
	if (*m) {
		m_freem(*m);
		*m = NULL;
	}
	return(0);
}
Exemplo n.º 27
0
static int sef_cb_init_fresh(__unused int type, __unused sef_init_info_t *info)
{
	int err;
	unsigned hz;

	char my_name[16];
	int my_priv;

	err = sys_whoami(&lwip_ep, my_name, sizeof(my_name), &my_priv);
	if (err != OK)
		panic("Cannot get own endpoint");

	nic_init_all();
	inet_read_conf();

	/* init lwip library */
	stats_init();
	sys_init();
	mem_init();
	memp_init();
	pbuf_init();

	hz = sys_hz();

	arp_ticks = ARP_TMR_INTERVAL / (1000 / hz);
	tcp_fticks = TCP_FAST_INTERVAL / (1000 / hz);
	tcp_sticks = TCP_SLOW_INTERVAL / (1000 / hz);

	etharp_init();
	
	set_timer(&arp_tmr, arp_ticks, arp_watchdog, 0);
	set_timer(&tcp_ftmr, tcp_fticks, tcp_fwatchdog, 0);
	set_timer(&tcp_stmr, tcp_sticks, tcp_swatchdog, 0);
	
	netif_init();
	netif_lo = netif_find("lo0");

	/* Read configuration. */
#if 0
	nw_conf();

	/* Get a random number */
	timerand= 1;
	fd = open(RANDOM_DEV_NAME, O_RDONLY | O_NONBLOCK);
	if (fd != -1)
	{
		err= read(fd, randbits, sizeof(randbits));
		if (err == sizeof(randbits))
			timerand= 0;
		else
		{
			printf("inet: unable to read random data from %s: %s\n",
				RANDOM_DEV_NAME, err == -1 ? strerror(errno) :
				err == 0 ? "EOF" : "not enough data");
		}
		close(fd);
	}
	else
	{
		printf("inet: unable to open random device %s: %s\n",
				RANDOM_DEV_NAME, strerror(errno));
	}
	if (timerand)
	{
		printf("inet: using current time for random-number seed\n");
		err= gettimeofday(&tv, NULL);
		if (err == -1)
		{
			printf("sysutime failed: %s\n", strerror(errno));
			exit(1);
		}
		memcpy(randbits, &tv, sizeof(tv));
	}
	init_rand256(randbits);
#endif

	/* Subscribe to driver events for network drivers. */
	if ((err = ds_subscribe("drv\\.net\\..*",
					DSF_INITIAL | DSF_OVERWRITE)) != OK)
		panic(("inet: can't subscribe to driver events"));

	/* Announce we are up. LWIP announces its presence to VFS just like
	 * any other character driver.
	 */
	chardriver_announce();

	return(OK);
}
Exemplo n.º 28
0
/* ARGSUSED */
int
d_shell_command(int f, int n)
{
#ifdef MONA
  ewprintf("shell command is not supported");
  return (FALSE);
#else
	char	 command[512], fname[MAXPATHLEN], buf[BUFSIZ], *bufp, *cp;
	int	 infd, fds[2];
	pid_t	 pid;
	struct	 sigaction olda, newa;
	struct buffer	*bp;
	struct mgwin	*wp;
	FILE	*fin;

	bp = bfind("*Shell Command Output*", TRUE);
	if (bclear(bp) != TRUE)
		return (ABORT);

	if (d_makename(curwp->w_dotp, fname, sizeof(fname)) != FALSE) {
		ewprintf("bad line");
		return (ABORT);
	}

	command[0] = '\0';
	if ((bufp = eread("! on %s: ", command, sizeof(command), EFNEW,
	    basename(fname))) == NULL)
		return (ABORT);
	infd = open(fname, O_RDONLY);
	if (infd == -1) {
		ewprintf("Can't open input file : %s", strerror(errno));
		return (FALSE);
	}
	if (pipe(fds) == -1) {
		ewprintf("Can't create pipe : %s", strerror(errno));
		close(infd);
		return (FALSE);
	}

	newa.sa_handler = reaper;
	newa.sa_flags = 0;
	if (sigaction(SIGCHLD, &newa, &olda) == -1) {
		close(infd);
		close(fds[0]);
		close(fds[1]);
		return (ABORT);
	}
	pid = fork();
	switch (pid) {
	case -1:
		ewprintf("Can't fork");
		return (ABORT);
	case 0:
		close(fds[0]);
		dup2(infd, STDIN_FILENO);
		dup2(fds[1], STDOUT_FILENO);
		dup2(fds[1], STDERR_FILENO);
		execl("/bin/sh", "sh", "-c", bufp, (char *)NULL);
		exit(1);
		break;
	default:
		close(infd);
		close(fds[1]);
		fin = fdopen(fds[0], "r");
		if (fin == NULL)	/* "r" is surely a valid mode! */
			panic("can't happen");
		while (fgets(buf, sizeof(buf), fin) != NULL) {
			cp = strrchr(buf, '\n');
			if (cp == NULL && !feof(fin)) {	/* too long a line */
				int c;
				addlinef(bp, "%s...", buf);
				while ((c = getc(fin)) != EOF && c != '\n')
					;
				continue;
			} else if (cp)
				*cp = '\0';
			addline(bp, buf);
		}
		fclose(fin);
		close(fds[0]);
		break;
	}
	wp = popbuf(bp);
	if (wp == NULL)
		return (ABORT);	/* XXX - free the buffer?? */
	curwp = wp;
	curbp = wp->w_bufp;
	if (sigaction(SIGCHLD, &olda, NULL) == -1)
		ewprintf("Warning, couldn't reset previous signal handler");
	return (TRUE);
#endif
}
Exemplo n.º 29
0
/*
 * Do an "ls" style listing of a directory
 */
static void
printlist(char *name, char *basename)
{
    struct afile *fp, *list, *listp;
    struct direct *dp;
    struct afile single;
    RST_DIR *dirp;
    int entries, len, namelen;
    char locname[MAXPATHLEN + 1];

    dp = pathsearch(name);
    listp = NULL;
    if (dp == NULL || (!dflag && TSTINO(dp->d_ino, dumpmap) == 0) ||
            (!vflag && dp->d_ino == WINO))
        return;
    if ((dirp = rst_opendir(name)) == NULL) {
        entries = 1;
        list = &single;
        mkentry(name, dp, list);
        len = strlen(basename) + 1;
        if (strlen(name) - len > single.len) {
            freename(single.fname);
            single.fname = savename(&name[len]);
            single.len = strlen(single.fname);
        }
    } else {
        entries = 0;
        while ((dp = rst_readdir(dirp)) != NULL)
            entries++;
        rst_closedir(dirp);
        list = (struct afile *)malloc(entries * sizeof(struct afile));
        if (list == NULL) {
            fprintf(stderr, "ls: out of memory\n");
            return;
        }
        if ((dirp = rst_opendir(name)) == NULL)
            panic("directory reopen failed\n");
        fprintf(stderr, "%s:\n", name);
        entries = 0;
        listp = list;
        (void) strncpy(locname, name, MAXPATHLEN);
        (void) strncat(locname, "/", MAXPATHLEN);
        namelen = strlen(locname);
        while ((dp = rst_readdir(dirp)) != NULL) {
            if (!dflag && TSTINO(dp->d_ino, dumpmap) == 0)
                continue;
            if (!vflag && (dp->d_ino == WINO ||
                           strcmp(dp->d_name, ".") == 0 ||
                           strcmp(dp->d_name, "..") == 0))
                continue;
            locname[namelen] = '\0';
            if (namelen + dp->d_namlen >= MAXPATHLEN) {
                fprintf(stderr, "%s%s: name exceeds %d char\n",
                        locname, dp->d_name, MAXPATHLEN);
            } else {
                (void) strncat(locname, dp->d_name,
                               (int)dp->d_namlen);
                mkentry(locname, dp, listp++);
                entries++;
            }
        }
        rst_closedir(dirp);
        if (entries == 0) {
            fprintf(stderr, "\n");
            free(list);
            return;
        }
        qsort((char *)list, entries, sizeof(struct afile), fcmp);
    }
    formatf(list, entries);
    if (dirp != NULL) {
        for (fp = listp - 1; fp >= list; fp--)
            freename(fp->fname);
        fprintf(stderr, "\n");
        free(list);
    }
}
Exemplo n.º 30
0
/*
 * Mkdir system call
 */
int
ext2fs_mkdir(void *v)
{
	struct vop_mkdir_args *ap = v;
	struct vnode *dvp = ap->a_dvp;
	struct vattr *vap = ap->a_vap;
	struct componentname *cnp = ap->a_cnp;
	struct inode *ip, *dp;
	struct vnode *tvp;
	struct ext2fs_dirtemplate dirtemplate;
	mode_t dmode;
	int error;

#ifdef DIAGNOSTIC
	if ((cnp->cn_flags & HASBUF) == 0)
		panic("ext2fs_mkdir: no name");
#endif
	dp = VTOI(dvp);
	if ((nlink_t)dp->i_e2fs_nlink >= LINK_MAX) {
		error = EMLINK;
		goto out;
	}
	dmode = vap->va_mode & ACCESSPERMS;
	dmode |= IFDIR;
	/*
	 * Must simulate part of ext2fs_makeinode here to acquire the inode,
	 * but not have it entered in the parent directory. The entry is
	 * made later after writing "." and ".." entries.
	 */
	if ((error = ext2fs_inode_alloc(dp, dmode, cnp->cn_cred, &tvp)) != 0)
		goto out;
	ip = VTOI(tvp);
	ip->i_e2fs_uid = cnp->cn_cred->cr_uid;
	ip->i_e2fs_gid = dp->i_e2fs_gid;
	ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
	ip->i_e2fs_mode = dmode;
	tvp->v_type = VDIR;	/* Rest init'd in getnewvnode(). */
	ip->i_e2fs_nlink = 2;
	error = ext2fs_update(ip, 1);

	/*
	 * Bump link count in parent directory
	 * to reflect work done below.  Should
	 * be done before reference is created
	 * so reparation is possible if we crash.
	 */
	dp->i_e2fs_nlink++;
	dp->i_flag |= IN_CHANGE;
	if ((error = ext2fs_update(dp, 1)) != 0)
		goto bad;

	/* Initialize directory with "." and ".." from static template. */
	memset(&dirtemplate, 0, sizeof(dirtemplate));
	dirtemplate.dot_ino = h2fs32(ip->i_number);
	dirtemplate.dot_reclen = h2fs16(12);
	dirtemplate.dot_namlen = 1;
	if (ip->i_e2fs->e2fs.e2fs_rev > E2FS_REV0 &&
	    (ip->i_e2fs->e2fs.e2fs_features_incompat & EXT2F_INCOMPAT_FTYPE)) {
		dirtemplate.dot_type = EXT2_FT_DIR;
	}
	dirtemplate.dot_name[0] = '.';
	dirtemplate.dotdot_ino = h2fs32(dp->i_number);
	dirtemplate.dotdot_reclen = h2fs16(VTOI(dvp)->i_e2fs->e2fs_bsize - 12);
	dirtemplate.dotdot_namlen = 2;
	if (ip->i_e2fs->e2fs.e2fs_rev > E2FS_REV0 &&
	    (ip->i_e2fs->e2fs.e2fs_features_incompat & EXT2F_INCOMPAT_FTYPE)) {
		dirtemplate.dotdot_type = EXT2_FT_DIR;
	}
	dirtemplate.dotdot_name[0] = dirtemplate.dotdot_name[1] = '.';
	error = vn_rdwr(UIO_WRITE, tvp, (caddr_t)&dirtemplate,
	    sizeof (dirtemplate), (off_t)0, UIO_SYSSPACE,
	    IO_NODELOCKED|IO_SYNC, cnp->cn_cred, NULL, curproc);
	if (error) {
		dp->i_e2fs_nlink--;
		dp->i_flag |= IN_CHANGE;
		goto bad;
	}
	if (VTOI(dvp)->i_e2fs->e2fs_bsize >
							VFSTOUFS(dvp->v_mount)->um_mountp->mnt_stat.f_bsize)
		panic("ext2fs_mkdir: blksize"); /* XXX should grow with balloc() */
	else {
		error = ext2fs_setsize(ip, VTOI(dvp)->i_e2fs->e2fs_bsize);
  	        if (error) {
  	        	dp->i_e2fs_nlink--;
  	        	dp->i_flag |= IN_CHANGE;
  	        	goto bad;
  	        }
		ip->i_flag |= IN_CHANGE;
	}

	/* Directory set up, now install its entry in the parent directory. */
	error = ext2fs_direnter(ip, dvp, cnp);
	if (error != 0) {
		dp->i_e2fs_nlink--;
		dp->i_flag |= IN_CHANGE;
	}
bad:
	/*
	 * No need to do an explicit VOP_TRUNCATE here, vrele will do this
	 * for us because we set the link count to 0.
	 */
	if (error) {
		ip->i_e2fs_nlink = 0;
		ip->i_flag |= IN_CHANGE;
		vput(tvp);
	} else
		*ap->a_vpp = tvp;
out:
	pool_put(&namei_pool, cnp->cn_pnbuf);
	vput(dvp);
	return (error);
}