Beispiel #1
0
//----------------------------------------------------------------------
static void TouchArg(op_t &x, int isload)
{
  switch ( x.type )
  {
    case o_displ:
    case o_imm:
      if ( op_adds_xrefs(uFlag, x.n) )
      {
        int outf = x.type != o_imm ? OOF_ADDR : 0;
        ua_add_off_drefs2(x, dr_O, outf|OOF_SIGNED);
      }
      break;

    case o_mem:
    case o_ind_mem:
    case o_reg:
    case o_ind_reg:
      {
        ea_t dea;
        if ( x.type == o_mem || x.type == o_ind_mem )
        {
          dea = map_addr(x.addr, x.n, true);
        }
        else
        {
          if ( x.reg >= rRR0 )
            dea = map_addr(x.reg - rRR0, x.n, true);
          else
            dea = map_addr(x.reg - rR0, x.n, true);
        }
        ua_dodata2(x.offb, dea, x.dtyp);
        if ( !isload )
          doVar(dea);
        ua_add_dref(x.offb, dea, isload ? dr_R : dr_W);
        if ( !has_user_name(get_flags_novalue(dea)) && dea > intmem)
        {
          char buf[10];
          int num = dea - intmem;
          if ( num < 0x100 )
          {
            qsnprintf(buf, sizeof(buf), "R%d", num);
          }
          else if ( num < 0x1000 )
          {
            qsnprintf(buf, sizeof(buf), "ERF_%X_%d", num >> 8, num & 0xFF);
          }
          else
          {
            int reg_no     = ((num >> 4) & 0xF0) + (num & 0xF);
            int subbank_no = ((num >> 4) & 0xF) + 1;
            qsnprintf(buf, sizeof(buf), "R%d_%X", reg_no, subbank_no);
          }
          set_name(dea, buf, SN_NOWARN);
        }
Beispiel #2
0
/*
 * watch a dinode on vfs
 */
void watch_dinode(unsigned int dino) {
	printf("----dinode: %d--------\n", dino);
	struct d_inode_t dinode;
	fseek(fd, map_addr(dino), 0);				
	fread(&dinode, 1, sizeof(dinode), fd);
	printf("PA: %lu\n", map_addr(dino));
	printf("dino: %d\n", dinode.dino);
	printf("type: %c\n", dinode.type);
	printf("addr[0]: %d\n", dinode.addr[0]);
	printf("size: %d\n", dinode.size);
	printf("----------------------\n");
	return;
}
Beispiel #3
0
/*
 * Find address for user to map.
 * If MAP_FIXED is not specified, we can pick any address we want, but we will
 * first try the value in *addrp if it is non-NULL.  Thus this is implementing
 * a way to try and get a preferred address.
 */
int
choose_addr(struct as *as, caddr_t *addrp, size_t len, offset_t off,
    int vacalign, uint_t flags)
{
	caddr_t basep = (caddr_t)(uintptr_t)((uintptr_t)*addrp & PAGEMASK);
	size_t lenp = len;

	ASSERT(AS_ISCLAIMGAP(as));	/* searches should be serialized */
	if (flags & MAP_FIXED) {
		(void) as_unmap(as, *addrp, len);
		return (0);
	} else if (basep != NULL && ((flags & MAP_ALIGN) == 0) &&
	    !as_gap(as, len, &basep, &lenp, 0, *addrp)) {
		/* User supplied address was available */
		*addrp = basep;
	} else {
		/*
		 * No user supplied address or the address supplied was not
		 * available.
		 */
		map_addr(addrp, len, off, vacalign, flags);
	}
	if (*addrp == NULL)
		return (ENOMEM);
	return (0);
}
Beispiel #4
0
int do_mmap(inode* ip, uint addr, int len, int prot, int flags, int off) {
  vas_t* vas = proc->vas;
  seg_t *seg, *prev;
  uint *addrp;

  //cprintf("start map addr:0x%x len:0x%x to proc:%d\n", addr, len, proc->pid);

  if ((flags & MAP_FIXED) == 0) {
    map_addr(addrp, len, off, 1);
    int tmp = (int)(*addrp);
    //if (tmp == (int)0)
      //return (ENOMEM);
  } else
    addrp = &addr;

  seg_vnode_t* nseg = (seg_vnode_t*) seg_t::alloc(vas, *addrp, len);
  nseg->vp = ip;
  nseg->offset = off & PAGEMASK;
  nseg->type = flags | MAP_TYPE;
  nseg->prot = prot;

  vas->seglast = nseg;

  return (int)(*addrp);
}
Beispiel #5
0
int main()
{
  int loop = 1;
  int input = -1;
  while(loop) {
    printf("Virtual memory to Main memory mapping:\n");
    printf("--------------------------------------\n");
    printf("1) Set parameters\n");
    printf("2) Map virtual address\n");
    printf("3) Print page table\n");
    printf("4) Quit\n");
    printf("\n");
    printf("Enter selection: ");
    scanf("%d", &input);
    switch(input) {
    case 1:
      setparameters();
      break;
    case 2:
      map_addr();
      break;
    case 3:
      print_table();
      break;
    case 4:
      loop = 0;
      break;
    default:
      printf("Invalid selection.");
    }
    printf("\n\n");
  }
  return 0;
}
Beispiel #6
0
static void *collect_notes(
	int fd, Elf64_Ehdr *ehdr, Elf64_Phdr *phdr, size_t *note_bytes)
{
	int i;
	size_t bytes, result_bytes;
	char *notes;

	result_bytes = 0;
	/* Find the worst case note memory usage */
	bytes = 0;
	for(i = 0; i < ehdr->e_phnum; i++) {
		if (phdr[i].p_type == PT_NOTE) {
			bytes += phdr[i].p_filesz;
		}
	}

	/* Allocate the worst case note array */
	notes = xmalloc(bytes);

	/* Walk through and capture the notes */
	for(i = 0; i < ehdr->e_phnum; i++) {
		Elf64_Nhdr *hdr, *lhdr, *nhdr;
		void *pnotes;
		if (phdr[i].p_type != PT_NOTE) {
			continue;
		}
		/* First snapshot the notes */
		pnotes = map_addr(fd, phdr[i].p_filesz, phdr[i].p_offset);
		memcpy(notes + result_bytes, pnotes, phdr[i].p_filesz);
		unmap_addr(pnotes, phdr[i].p_filesz);

		/* Walk through the new notes and find the real length */
		hdr = (Elf64_Nhdr *)(notes + result_bytes);
		lhdr = (Elf64_Nhdr *)(notes + result_bytes + phdr[i].p_filesz);
		for(; hdr < lhdr; hdr = nhdr) {
			size_t hdr_size;
			/* If there is not a name this is a invalid/reserved note
			 * stop here.
			 */
			if (hdr->n_namesz == 0) {
				break;
			}
			hdr_size = 
				sizeof(*hdr) + 
				((hdr->n_namesz + 3) & ~3) +
				((hdr->n_descsz + 3) & ~3);

			nhdr = (Elf64_Nhdr *)(((char *)hdr) + hdr_size); 
			/* if the note does not fit in the segment stop here */
			if (nhdr > lhdr) {
				break;
			}
			/* Update result_bytes for after each good header */
			result_bytes = ((char *)hdr) - notes;
		}
	}
	*note_bytes = result_bytes;
	return notes;
}
Beispiel #7
0
/*ARGSUSED*/
static int
smb_segmap(dev_t dev, off_t off, struct as *as, caddr_t *addrp, off_t len,
    uint_t prot, uint_t maxprot, uint_t flags, cred_t *cred)
{
	smb_clone_t *cp = &smb_clones[getminor(dev)];

	size_t alen = P2ROUNDUP(len, PAGESIZE);
	caddr_t addr;

	iovec_t iov;
	uio_t uio;
	int err;

	if (len <= 0 || (flags & MAP_FIXED))
		return (EINVAL);

	if ((prot & PROT_WRITE) && (flags & MAP_SHARED))
		return (EACCES);

	if (off < 0 || off + len < off || off + len > cp->c_eplen + cp->c_stlen)
		return (ENXIO);

	as_rangelock(as);
	map_addr(&addr, alen, 0, 1, 0);

	if (addr != NULL)
		err = as_map(as, addr, alen, segvn_create, zfod_argsp);
	else
		err = ENOMEM;

	as_rangeunlock(as);
	*addrp = addr;

	if (err != 0)
		return (err);

	iov.iov_base = addr;
	iov.iov_len = len;

	bzero(&uio, sizeof (uio_t));
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = off;
	uio.uio_segflg = UIO_USERSPACE;
	uio.uio_extflg = UIO_COPY_DEFAULT;
	uio.uio_resid = len;

	if ((err = smb_uiomove(cp, &uio)) != 0)
		(void) as_unmap(as, addr, alen);

	return (err);
}
Beispiel #8
0
oop_t ActivationObj::nonlocal_return(oop_t result, oop_t rcvr) {
  ActivationObj* outermost_frame;
  if (! ::is_block(rcvr)) {
    // todo optimize transmogrify away NLR bytecodes in outer methods
    assert(map_addr()->is_outer_activation_map());
    outermost_frame = this;
  } else {
    // todo unimplemented _OnNonLocalReturn:
    outermost_frame = ActivationObj::from(home_frame(rcvr))->outermost_lexical_frame();
  }
  oop_t sender_frame = outermost_frame->get_sender();
  ActivationObj::from(sender_frame)->remote_push(result);
  return sender_frame;
}
Beispiel #9
0
/*
 * rlen is in multiples of PAGESIZE
 */
static char *
ksyms_asmap(struct as *as, size_t rlen)
{
	char *addr = NULL;

	as_rangelock(as);
	map_addr(&addr, rlen, 0, 1, 0);
	if (addr == NULL || as_map(as, addr, rlen, segvn_create, zfod_argsp)) {
		as_rangeunlock(as);
		return (NULL);
	}
	as_rangeunlock(as);
	return (addr);
}
Beispiel #10
0
static int map_sip_addr(struct sk_buff *skb,
            const char **dptr, unsigned int *datalen,
            enum sip_header_types type)
{
    enum ip_conntrack_info ctinfo;
    struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
    unsigned int matchlen, matchoff;
    union nf_inet_addr addr;
    __be16 port;

    if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL,
                    &matchoff, &matchlen, &addr, &port) <= 0)
        return 1;
    return map_addr(skb, dptr, datalen, matchoff, matchlen, &addr, port);
}
/*ARGSUSED8*/
static int
privcmd_segmap(dev_t dev, off_t off, struct as *as, caddr_t *addrp,
    off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr)
{
	struct segmf_crargs a;
	int error;

	if (secpolicy_xvm_control(cr))
		return (EPERM);

	as_rangelock(as);
	if ((flags & MAP_FIXED) == 0) {
		map_addr(addrp, len, (offset_t)off, 0, flags);
		if (*addrp == NULL) {
			error = ENOMEM;
			goto rangeunlock;
		}
	} else {
		/*
		 * User specified address
		 */
		(void) as_unmap(as, *addrp, len);
	}

	/*
	 * The mapping *must* be MAP_SHARED at offset 0.
	 *
	 * (Foreign pages are treated like device memory; the
	 * ioctl interface allows the backing objects to be
	 * arbitrarily redefined to point at any machine frame.)
	 */
	if ((flags & MAP_TYPE) != MAP_SHARED || off != 0) {
		error = EINVAL;
		goto rangeunlock;
	}

	a.dev = dev;
	a.prot = (uchar_t)prot;
	a.maxprot = (uchar_t)maxprot;
	error = as_map(as, *addrp, len, segmf_create, &a);

rangeunlock:
	as_rangeunlock(as);
	return (error);
}
Beispiel #12
0
/*
 * This function is called when a page needs to be mapped into a
 * process's address space.  Allocate the user address space and
 * set up the mapping to the page.  Assumes the page has already
 * been allocated and locked in memory via schedctl_getpage.
 */
static int
schedctl_map(struct anon_map *amp, caddr_t *uaddrp, caddr_t kaddr)
{
	caddr_t addr = NULL;
	struct as *as = curproc->p_as;
	struct segvn_crargs vn_a;
	int error;

	as_rangelock(as);
	/* pass address of kernel mapping as offset to avoid VAC conflicts */
	map_addr(&addr, PAGESIZE, (offset_t)(uintptr_t)kaddr, 1, 0);
	if (addr == NULL) {
		as_rangeunlock(as);
		return (ENOMEM);
	}

	/*
	 * Use segvn to set up the mapping to the page.
	 */
	vn_a.vp = NULL;
	vn_a.offset = 0;
	vn_a.cred = NULL;
	vn_a.type = MAP_SHARED;
	vn_a.prot = vn_a.maxprot = PROT_ALL;
	vn_a.flags = 0;
	vn_a.amp = amp;
	vn_a.szc = 0;
	vn_a.lgrp_mem_policy_flags = 0;
	error = as_map(as, addr, PAGESIZE, segvn_create, &vn_a);
	as_rangeunlock(as);

	if (error)
		return (error);

	*uaddrp = addr;
	return (0);
}
Beispiel #13
0
static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
			       const char **dptr, unsigned int *datalen)
{
	enum ip_conntrack_info ctinfo;
	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
	unsigned int coff, matchoff, matchlen;
	enum sip_header_types hdr;
	union nf_inet_addr addr;
	__be16 port;
	int request, in_header;

	/* Basic rules: requests and responses. */
	if (strnicmp(*dptr, "SIP/2.0", strlen("SIP/2.0")) != 0) {
		if (ct_sip_parse_request(ct, *dptr, *datalen,
					 &matchoff, &matchlen,
					 &addr, &port) > 0 &&
		    !map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
			      &addr, port))
			return NF_DROP;
		request = 1;
	} else
		request = 0;

	if (nf_ct_protonum(ct) == IPPROTO_TCP)
		hdr = SIP_HDR_VIA_TCP;
	else
		hdr = SIP_HDR_VIA_UDP;

	/* Translate topmost Via header and parameters */
	if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
				    hdr, NULL, &matchoff, &matchlen,
				    &addr, &port) > 0) {
<<<<<<< HEAD
		unsigned int matchend, poff, plen, buflen, n;
=======
<<<<<<< HEAD
Beispiel #14
0
int main(int argc, char **argv)
{
	char *start_addr_str, *end;
	unsigned long long start_addr;
	Elf64_Ehdr *ehdr;
	Elf64_Phdr *phdr;
	void *notes, *headers;
	size_t note_bytes, header_bytes;
	int fd;
	int i;
	start_addr_str = 0;
	if (argc > 2) {
		fprintf(stderr, "Invalid argument count\n");
		exit(9);
	}
	if (argc == 2) {
		start_addr_str = argv[1];
	}
	if (!start_addr_str) {
		start_addr_str = getenv("elfcorehdr");
	}
	if (!start_addr_str) {
		fprintf(stderr, "Cannot find the start of the core dump\n");
		exit(1);
	}
	start_addr = strtoull(start_addr_str, &end, 0);
	if ((start_addr_str == end) || (*end != '\0')) {
		fprintf(stderr, "Bad core dump start addres: %s\n",
			start_addr_str);
		exit(2);
	}
	
	fd = open(DEV_MEM, O_RDONLY);
	if (fd < 0) {
		fprintf(stderr, "Cannot open " DEV_MEM ": %s\n",
			strerror(errno));
		exit(3);
	}

	/* Get the elf header */
	ehdr = map_addr(fd, sizeof(*ehdr), start_addr);

	/* Verify the ELF header */
	if (	(ehdr->e_ident[EI_MAG0] != ELFMAG0) ||
		(ehdr->e_ident[EI_MAG1] != ELFMAG1) ||
		(ehdr->e_ident[EI_MAG2] != ELFMAG2) ||
		(ehdr->e_ident[EI_MAG3] != ELFMAG3) ||
		(ehdr->e_ident[EI_CLASS] != ELFCLASS64) ||
		(ehdr->e_ident[EI_DATA] != ELFDATALOCAL) ||
		(ehdr->e_ident[EI_VERSION] != EV_CURRENT) ||
		(ehdr->e_type != ET_CORE) ||
		(ehdr->e_version != EV_CURRENT) ||
		(ehdr->e_ehsize != sizeof(Elf64_Ehdr)) ||
		(ehdr->e_phentsize != sizeof(Elf64_Phdr)) ||
		(ehdr->e_phnum == 0))
	{
		fprintf(stderr, "Invalid Elf header\n");
		exit(4);
	}
	
	/* Get the program header */
	phdr = map_addr(fd, sizeof(*phdr)*(ehdr->e_phnum), ehdr->e_phoff);

	/* Collect up the notes */
	note_bytes = 0;
	notes = collect_notes(fd, ehdr, phdr, &note_bytes);
	
	/* Generate new headers */
	header_bytes = 0;
	headers = generate_new_headers(ehdr, phdr, note_bytes, &header_bytes);

	/* Write out everything */
	write_all(STDOUT_FILENO, headers, header_bytes);
	write_all(STDOUT_FILENO, notes, note_bytes);
	for(i = 0; i < ehdr->e_phnum; i++) {
		unsigned long long offset, size;
		size_t wsize;
		if (phdr[i].p_type != PT_NOTE) {
			continue;
		}
		offset = phdr[i].p_offset;
		size   = phdr[i].p_filesz;
		wsize  = MAP_WINDOW_SIZE;
		if (wsize > size) {
			wsize = size;
		}
		for(;size > 0; size -= wsize, offset += wsize) {
			void *buf;
			wsize = MAP_WINDOW_SIZE;
			if (wsize > size) {
				wsize = size;
			}
			buf = map_addr(fd, wsize, offset);
			write_all(STDOUT_FILENO, buf, wsize);
			unmap_addr(buf, wsize);
		}
	}
	free(notes);
	close(fd);
	return 0;
}
Beispiel #15
0
oop_t MemObj::contents_of_slot_for_bootstrapping(char* n) {
  SlotDesc* sd = map_addr()->find_slot_with_C_name(n);
  assert(sd);
  return sd->contents(this);
}  
/*ARGSUSED*/
int
gfxp_ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp,
    off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cred,
    ddi_device_acc_attr_t *accattrp, uint_t rnumber)
{
	struct segdev_crargs dev_a;
	int (*mapfunc)(dev_t dev, off_t off, int prot);
	uint_t hat_attr;
	pfn_t pfn;
	int error, i;

	if ((mapfunc = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap) == nodev)
		return (ENODEV);

	/*
	 * Character devices that support the d_mmap
	 * interface can only be mmap'ed shared.
	 */
	if ((flags & MAP_TYPE) != MAP_SHARED)
		return (EINVAL);

	/*
	 * Check that this region is indeed mappable on this platform.
	 * Use the mapping function.
	 */
	if (ddi_device_mapping_check(dev, accattrp, rnumber, &hat_attr) == -1)
		return (ENXIO);

	if (accattrp != NULL) {
		switch (accattrp->devacc_attr_dataorder) {
		case DDI_STRICTORDER_ACC:
			/* Want UC */
			hat_attr &= ~HAT_ORDER_MASK;
			hat_attr |= (HAT_STRICTORDER | HAT_PLAT_NOCACHE);
			break;
		case DDI_MERGING_OK_ACC:
			/* Want WC */
			hat_attr &= ~HAT_ORDER_MASK;
			hat_attr |= (HAT_MERGING_OK | HAT_PLAT_NOCACHE);
			break;
		}
	}

	/*
	 * Check to ensure that the entire range is
	 * legal and we are not trying to map in
	 * more than the device will let us.
	 */
	for (i = 0; i < len; i += PAGESIZE) {
		if (i == 0) {
			/*
			 * Save the pfn at offset here. This pfn will be
			 * used later to get user address.
			 */
			if ((pfn = (pfn_t)cdev_mmap(mapfunc, dev, offset,
					maxprot)) == PFN_INVALID)
				return (ENXIO);
		} else {
			if (cdev_mmap(mapfunc, dev, offset + i, maxprot) ==
				PFN_INVALID)
				return (ENXIO);
		}
	}

	as_rangelock(as);
	if ((flags & MAP_FIXED) == 0) {
		/*
		 * Pick an address w/o worrying about
		 * any vac alignment constraints.
		 */
		map_addr(addrp, len, ptob(pfn), 0, flags);
		if (*addrp == NULL) {
			as_rangeunlock(as);
			return (ENOMEM);
		}
	} else {
		/*
		 * User-specified address; blow away any previous mappings.
		 */
		(void) as_unmap(as, *addrp, len);
	}

	dev_a.mapfunc = mapfunc;
	dev_a.dev = dev;
	dev_a.offset = (offset_t)offset;
	dev_a.type = flags & MAP_TYPE;
	dev_a.prot = (uchar_t)prot;
	dev_a.maxprot = (uchar_t)maxprot;
	dev_a.hat_attr = hat_attr;
#if DEBUG
	dev_a.hat_flags = 0;
#else
	dev_a.hat_flags = HAT_LOAD_LOCK;
#endif
	dev_a.devmap_data = NULL;

	error = as_map(as, *addrp, len, segdev_create, &dev_a);
	as_rangeunlock(as);

	return (error);
}
Beispiel #17
0
/*
 * format all the on-disk structures including:
 *
 * 1. find the vfs and open
 * 2. twelve bitmaps	#402~#1001
 * 3. on-disk inode		#2~#401
 * 4. root dinode		$1
 * 5. root directory	#403
 * 6. usr info			#0
 * 7. super block		#1
 *
 * which can be easily pictured as:
 * user superblock	on-disk inodes						free blocks and bitmap
 * [0]	[1]					[2]................[401]	[402].......................[1001]
 */
void format_fs(char* path, char* pwd) {

	/* open vfs */
	fd = fopen(path, "r+w+b");
	
	if (fd == NULL) 
	{
		printf("vfs does not exist\n");
		exit(0);
	}

	/* 
	 * create 12 bitmaps and store them at
	 * #402		#452	#502	#552	#602	#652
	 * #702		#752	#802	#852	#902	#952
	 *
	 * each bitmap manages the next nearby 49 blocks
	 * e.g. the bitmap stored in #402 manages #403~#451
	 */
	struct bmap_t bmap;
	int i, j;
	for (i=NIBLOCK + 2; i<NIBLOCK + NDBLOCK + 2; i+=50) {  /* [402, 1002) */
		for(j=1; j<50; j++) {		/* [1, 49] */
			bmap.use[j] = 0; 
			bmap.addr[j] = i + j;	/* 1 + 402 = 403, 49 + 402 = 451 */
			bmap.free_block_num = 49;
		}
		fseek(fd, i * SBLOCK, 0);
		fwrite(&bmap, 1, sizeof(bmap), fd);
	}

	/* 
	 * initialize the on-disk inode(dinode)
	 * dinode's number = size of inode block area / size of dinode
	 * the inode block area starts at #2
	 *
	 * NOTE: THE DINODE.NO STARTS FROM 1 BUT NOT 0
	 */
	struct d_inode_t dinode;
	dinode.dino = 1;
	dinode.size = 0;
	dinode.type = 'e';											/* write the empty inodes from $1 */
	for (i=1; i<SBLOCK * NIBLOCK / sizeof(dinode); i++) {		
		fseek(fd, map_addr(i), 0);								/* the dinode starts at #2 */
		fwrite(&dinode, 1, sizeof(dinode), fd);		
		dinode.dino++;							
	}

	/*
	 * create the user(root)'s directory inode
	 * write it at $1
	 */
	dinode.size = 1;	
	dinode.type = 'd';						/* directory */
	dinode.addr[0] = NIBLOCK + 2 + 1;		/* #403 */
	dinode.dino = 1;						/* root inode is $1 */
	fseek(fd, map_addr(dinode.dino), 0);
	fwrite(&dinode, 1, sizeof(dinode), fd);

	/* create root dir at #403 */
	struct directory_t dir;
	dir.size = 0;				/* nothing in the root dir at first */
	fseek(fd, SBLOCK * (NIBLOCK + 2 + 1), 0);
	fwrite(&dir, 1, sizeof(dir), fd);

	/* since the #403 is used, adjust the bitmap in #402 */
	fseek(fd, 402 * SBLOCK, 0);
	fread(&bmap, 1, sizeof(bmap), fd);
	bmap.use[1] = 1;
	bmap.free_block_num--;
	fseek(fd, 402 * SBLOCK, 0);
	fwrite(&bmap, 1, sizeof(bmap), fd);

	/* create usr at #0 */
	struct user_t usr;
	strcpy(usr.name, "root");  
	strcpy(usr.password, pwd);
	usr.dino = 1;				/* $1 */
	fseek(fd, 0, 0);
	fwrite(&usr, 1, sizeof(usr), fd);

	/* create super block at #1 */
	struct super_block_t sb;
	sb.data_block_num = NDBLOCK - ( NDBLOCK / 50 );		/* 600 - 12 = 588 */
	sb.inode_block_num = NIBLOCK;						/* 400 */
	sb.free_block_num = NDBLOCK - ( NDBLOCK / 50 ) - 1;	/* one for root block*/
	sb.free_block_sp = 0;	
	sb.free_inode_num = NIBLOCK * SBLOCK / sizeof(dinode) - 1; /* one for root inode */
	sb.free_inode_sp = 0;
	sb.modified = 0;
	fseek(fd, SBLOCK * 1, 0);  
	fwrite(&sb, 1, sizeof(sb), fd);

	/* close the file handler and return */
	fclose(fd);
	return;
}
Beispiel #18
0
bool MemObj::is_map()        { return  map_addr() -> is_mapMap(); }
Beispiel #19
0
oop_t ActivationObj::loop(oop_t this_activation) {

  The::set_active_context( this_activation, this);
  
  DECLARE_STACK;
  smi         bci = get_pc_quickly(io);
  
  ActivationMapObj* m_addr = map_addr();
  
  oop_t           codes_oop    = m_addr->codes();
  ByteVectorObj*  codes_addr   = ByteVectorObj::from(codes_oop);
  char*           codes        = codes_addr->bytes();
  fint            codes_length = codes_addr->indexableSize();



  oop_t         literals       = m_addr->literals();
  ObjVectorObj* literals_addr  = ObjVectorObj::from(literals);
  fint          literals_io    = literals_addr->indexableOrigin();
  
 
  fint index = 0, temp_index;
  # define UC_index ((temp_index = index << INDEXWIDTH), (index = 0), temp_index | bc_index)
  bool undirected_resend = false;
  # define UC_undirected_resend (undirected_resend ? (undirected_resend = false, true) : false)
  
  fint lexical_level = 0;
  
  # define use_lit (literals_addr->read_oop(literals_io + UC_index))
  
  oop_t delegatee = 0, temp_del;
  # define UC_del  ((temp_del = delegatee), (delegatee = 0), temp_del)
  
  fint arg_count = 0, temp_arg_count;
  # define UC_arg_count ((temp_arg_count = arg_count), (arg_count = 0), temp_arg_count)
  
  fint temp_bci;
  // for process pre-emption, stop on backward branches
  // todo optimize should probably just stop every 10 or 100 backward branches, or even just every N bytecodes
  # define set_bci(bci_oop) (temp_bci = value_of_smiOop(assert_smi(bci_oop)), stop = temp_bci < bci, bci = temp_bci)
  
  oop_t self = get_self_quickly(io);
  oop_t rcvr = get_rcvr_quickly(io);
  for ( bool stop = false; !stop; ) {
    if (bci >= codes_length) {
      oop_t r = pop();
      oop_t s = get_sender_quickly(io);
      if (s != NULL) // it'll be NULL if we're returning from the start method
        ActivationObj::from(s)->remote_push(r);
      // todo optimize time slow; quits this routine just for a return -- dmu 1/06
      return s;
    }
    unsigned char bc = codes[bci++];
    ByteCodeKind kind  = getOp(bc);
    fint         bc_index = getIndex(bc);
    // printf("interpreting a bytecode in activationMap %i, bc is %i, kind is %i, bc_index is %i\n", map_oop(), bc, kind, bc_index);
    switch (kind) {
     default:   fatal("unknown kind of bytecode"); break;
     
     case                   INDEX_CODE:          index = UC_index;     break;
     case           LEXICAL_LEVEL_CODE:  lexical_level = UC_index;     break;
     case          ARGUMENT_COUNT_CODE:      arg_count = UC_index;     break;
  
     case           READ_LOCAL_CODE:   push(local_obj_addr(lexical_level)-> read_arg_or_local(UC_index)      );  lexical_level = 0;               break;
     case          WRITE_LOCAL_CODE:        local_obj_addr(lexical_level)->write_arg_or_local(UC_index, pop());  lexical_level = 0;  push(self);  break;
     
     case          BRANCH_CODE:                                                          set_bci(use_lit);                   break;
     case          BRANCH_TRUE_CODE:     if ( pop() == The::oop_of(The:: true_object))   set_bci(use_lit);  else index = 0;  break;
     case          BRANCH_FALSE_CODE:    if ( pop() == The::oop_of(The::false_object))   set_bci(use_lit);  else index = 0;  break;
     case          BRANCH_INDEXED_CODE:
                                        {
                                         ObjVectorObj* branch_vector_addr = ObjVectorObj::from(assert_objVector(use_lit));
                                         oop_t branch_index_oop = pop();
                                         if ( is_smi(branch_index_oop) ) {
                                            smi branch_index = value_of_smiOop(branch_index_oop);
                                            if (  0 <= branch_index  &&  branch_index < branch_vector_addr->indexableSize()  )   {
                                              oop_t dest_oop = branch_vector_addr->indexable_at(branch_index);
                                              set_bci(dest_oop);
                                            }
                                         }
                                        }
                                        break;
       
     
     case      DELEGATEE_CODE:               delegatee = use_lit;                                     break;


     case LITERAL_CODE:
      {
       oop_t lit = use_lit;
       if (::is_block(lit)) {
         put_sp_quickly(io, sp); // make sure that the sp is stored correctly, because an allocation could trigger a GC
         oop_t cloned_block = BlockObj::clone_block(lit, this_activation);
         ActivationObj* possibly_moved_act_addr = ActivationObj::from(this_activation); // mightHaveScavengedTheActivation
         if (possibly_moved_act_addr != this) {
           possibly_moved_act_addr->remote_push(cloned_block);
           possibly_moved_act_addr->put_pc_quickly( io, bci );
           return this_activation;
         } else {
           push(cloned_block);
         }
       } else {
         push(lit);
       }
      }
      break;
     
     case IMPLICIT_SEND_CODE:
      // fall through
     case SEND_CODE:
     {
      oop_t selector = use_lit;
      if (selector == The::oop_of(The::restart_selector)) {
        put_sp_quickly( io,  sp  = first_stack_offset               );
        put_pc_quickly( io,  bci = get_pc_after_endInit_quickly(io) );
        break;
      }
      put_sp_quickly( io, sp );
      // todo optimize dmu 3/6. This is here for the _Breakpoint primitve to help debugging by storing the PC.
      // But it slows every primitive, sigh.
      put_pc_quickly( io, bci);

      oop_t a = send(kind == IMPLICIT_SEND_CODE, selector, UC_undirected_resend, UC_del, UC_arg_count, this_activation); 
      if (a != this_activation || ActivationObj::from(a) != this) { // mightHaveScavengedTheActivation
        // put_pc_quickly( io, bci); // commented out after I added the put_pc_quickly above, dmu 3/6
        return a;
      }
      sp = get_sp_quickly(io);
     }
     break;
      
     case NO_OPERAND_CODE:
      switch(bc_index) {
       default: fatal("???"); break;
        case               POP_CODE:      pop();                                  break;
        case              SELF_CODE:      push(self);                             break;
        case          END_INIT_CODE:      put_pc_after_endInit_quickly(io, bci);  break;

        case   NONLOCAL_RETURN_CODE:      return nonlocal_return(pop(), rcvr);    break;
        case UNDIRECTED_RESEND_CODE:      undirected_resend = true;               break;
       }
       break;

    }
  }
  put_sp_quickly( io, sp  );
  put_pc_quickly( io, bci );
  return this_activation;
}
Beispiel #20
0
bool MemObj::is_objVector()  { oop_t mt = map_addr() -> mapType();
                               return mt == The::oop_of(The::objectVectorMap_mapType)
                                   || mt == The::oop_of(The::mapMap_mapType)
                                   || mt == The::oop_of(The::blockActivationMap_mapType)
                                   || mt == The::oop_of(The::outerActivationMap_mapType); }
Beispiel #21
0
bool MemObj::is_block()      { return (map_addr() -> mapType()) == The::oop_of(The::blockMap_mapType);        }
Beispiel #22
0
void MemObj::set_contents_of_slot(oop_t n, oop_t x) {
  SlotDesc* sd = map_addr()->find_slot(n);
  assert(sd);
  sd->set_contents(this, x);
}
Beispiel #23
0
ActivationObj* ActivationObj::outermost_lexical_frame() {
  return map_addr()->is_outer_activation_map()
             ? this
             : ActivationObj::from(home_frame(get_rcvr()))->outermost_lexical_frame();
}
Beispiel #24
0
/*ARGSUSED*/
static int
xpvtap_segmap(dev_t dev, off_t off, struct as *asp, caddr_t *addrp,
    off_t len, unsigned int prot, unsigned int maxprot, unsigned int flags,
    cred_t *cred_p)
{
	struct segmf_crargs a;
	xpvtap_state_t *state;
	int instance;
	int e;


	if (secpolicy_xvm_control(cred_p)) {
		return (EPERM);
	}

	instance = getminor(dev);
	state = ddi_get_soft_state(xpvtap_statep, instance);
	if (state == NULL) {
		return (EBADF);
	}

	/* the user app should be doing a MAP_SHARED mapping */
	if ((flags & MAP_TYPE) != MAP_SHARED) {
		return (EINVAL);
	}

	/*
	 * if this is the user ring (offset = 0), devmap it (which ends up in
	 * xpvtap_devmap). devmap will alloc and map the ring into the
	 * app's VA space.
	 */
	if (off == 0) {
		e = devmap_setup(dev, (offset_t)off, asp, addrp, (size_t)len,
		    prot, maxprot, flags, cred_p);
		return (e);
	}

	/* this should be the mmap for the gref pages (offset = PAGESIZE) */
	if (off != PAGESIZE) {
		return (EINVAL);
	}

	/* make sure we get the size we're expecting */
	if (len != XPVTAP_GREF_BUFSIZE) {
		return (EINVAL);
	}

	/*
	 * reserve user app VA space for the gref pages and use segmf to
	 * manage the backing store for the physical memory. segmf will
	 * map in/out the grefs and fault them in/out.
	 */
	ASSERT(asp == state->bt_map.um_as);
	as_rangelock(asp);
	if ((flags & MAP_FIXED) == 0) {
		map_addr(addrp, len, 0, 0, flags);
		if (*addrp == NULL) {
			as_rangeunlock(asp);
			return (ENOMEM);
		}
	} else {
		/* User specified address */
		(void) as_unmap(asp, *addrp, len);
	}
	a.dev = dev;
	a.prot = (uchar_t)prot;
	a.maxprot = (uchar_t)maxprot;
	e = as_map(asp, *addrp, len, segmf_create, &a);
	if (e != 0) {
		as_rangeunlock(asp);
		return (e);
	}
	as_rangeunlock(asp);

	/*
	 * Stash user base address, and compute address where the request
	 * array will end up.
	 */
	state->bt_map.um_guest_pages = (caddr_t)*addrp;
	state->bt_map.um_guest_size = (size_t)len;

	/* register an as callback so we can cleanup when the app goes away */
	e = as_add_callback(asp, xpvtap_segmf_unregister, state,
	    AS_UNMAP_EVENT, *addrp, len, KM_SLEEP);
	if (e != 0) {
		(void) as_unmap(asp, *addrp, len);
		return (EINVAL);
	}

	/* wake thread to see if there are requests already queued up */
	mutex_enter(&state->bt_thread.ut_mutex);
	state->bt_thread.ut_wake = B_TRUE;
	cv_signal(&state->bt_thread.ut_wake_cv);
	mutex_exit(&state->bt_thread.ut_mutex);

	return (0);
}
Beispiel #25
0
static int
xmem_map(struct vnode *vp, offset_t off, struct as *as, caddr_t *addrp,
	size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
	struct cred *cred)
{
	struct seg		*seg;
	struct segxmem_crargs	xmem_a;
	struct xmemnode 	*xp = (struct xmemnode *)VTOXN(vp);
	struct xmount 		*xm = (struct xmount *)VTOXM(vp);
	uint_t			blocknumber;
	int 			error;

#ifdef lint
	maxprot = maxprot;
#endif
	if (vp->v_flag & VNOMAP)
		return (ENOSYS);

	if (off < 0)
		return (EINVAL);

	/* offset, length and address has to all be block aligned */

	if (off & (xm->xm_bsize - 1) || len & (xm->xm_bsize - 1) ||
		((ulong_t)*addrp) & (xm->xm_bsize - 1)) {

		return (EINVAL);
	}

	if (vp->v_type != VREG)
		return (ENODEV);

	if (flags & MAP_PRIVATE)
		return (EINVAL);	/* XXX need to be handled */

	/*
	 * Don't allow mapping to locked file
	 */
	if (vn_has_mandatory_locks(vp, xp->xn_mode)) {
		return (EAGAIN);
	}

	if (error = xmem_fillpages(xp, vp, off, len, 1)) {
		return (error);
	}

	blocknumber = off >> xm->xm_bshift;

	if (flags & MAP_FIXED) {
		/*
		 * User specified address - blow away any previous mappings
		 */
		AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
		seg = as_findseg(as, *addrp, 0);

		/*
		 * Fast path. segxmem_remap will fail if this is the wrong
		 * segment or if the len is beyond end of seg. If it fails,
		 * we do the regular stuff thru as_* routines.
		 */

		if (seg && (segxmem_remap(seg, vp, *addrp, len,
				&xp->xn_ppa[blocknumber], prot) == 0)) {
			AS_LOCK_EXIT(as, &as->a_lock);
			return (0);
		}
		AS_LOCK_EXIT(as, &as->a_lock);
		if (seg)
			(void) as_unmap(as, *addrp, len);

		as_rangelock(as);

		error = valid_usr_range(*addrp, len, prot, as, as->a_userlimit);

		if (error != RANGE_OKAY ||
			as_gap(as, len, addrp, &len, AH_CONTAIN, *addrp)) {
			as_rangeunlock(as);
			return (EINVAL);
		}

	} else {
		as_rangelock(as);
		map_addr(addrp, len, (offset_t)off, 1, flags);
	}

	if (*addrp == NULL) {
		as_rangeunlock(as);
		return (ENOMEM);
	}

	xmem_a.xma_vp = vp;
	xmem_a.xma_offset = (u_offset_t)off;
	xmem_a.xma_prot = prot;
	xmem_a.xma_cred = cred;
	xmem_a.xma_ppa = &xp->xn_ppa[blocknumber];
	xmem_a.xma_bshift = xm->xm_bshift;

	error = as_map(as, *addrp, len, segxmem_create, &xmem_a);

	as_rangeunlock(as);
	return (error);
}
Beispiel #26
0
static void handle_operand(op_t &x,int loading)
{
  switch ( x.type )
  {
    case o_phrase:              // no special hanlding for these types
    case o_reg:
      break;

    case o_imm:                         // an immediate number as an operand
      if ( !loading ) goto BAD_LOGIC;   // this can't happen!
      doImmdValue();                    // handle immediate number

      // if the value was converted to an offset, then create a data xref:
      if ( isOff(uFlag, x.n) )
        ua_add_off_drefs2(x, dr_O, OOFS_IFSIGN);

      break;

    case o_displ:
      doImmdValue();                    // handle immediate number

      // if the value was converted to an offset, then create a data xref:
      if ( isOff(uFlag, x.n) )
        ua_add_off_drefs2(x, loading?dr_R:dr_W, OOFS_IFSIGN|OOF_ADDR);
      break;

    case o_bit:                         // 8051 specific operand types - bits
    case o_bitnot:
      x.addr = (x.reg & 0xF8);
      if( (x.addr & 0x80) == 0 ) x.addr = x.addr/8 + 0x20;
      attach_bit_comment(x.addr, x.reg & 7);  // attach a comment if necessary
      goto MEM_XREF;

    case o_bit251:
      attach_bit_comment(x.addr, x.b251_bit);
      /* no break */

    case o_mem:                         // an ordinary memory data reference
MEM_XREF:
      {
        ea_t dea = map_addr(x.addr, x.n, true);
        ua_dodata2(x.offb, dea, x.dtyp);
        if ( !loading )
          doVar(dea);     // write access
        ua_add_dref(x.offb, dea, loading ? dr_R : dr_W);
      }
      break;

    case o_near:                        // a code reference
      {
        ea_t ea = map_addr(x.addr, x.n, false);
        int iscall = InstrIsSet(cmd.itype, CF_CALL);
        ua_add_cref(x.offb, ea, iscall ? fl_CN : fl_JN);
        if ( flow && iscall )
          flow = func_does_return(ea);
      }
      break;

    default:
BAD_LOGIC:
      warning("%a: %s,%d: bad optype %d", cmd.ea, cmd.get_canon_mnem(), x.n, x.type);
      break;
  }
}
static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
			       const char **dptr, unsigned int *datalen)
{
	enum ip_conntrack_info ctinfo;
	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
	unsigned int coff, matchoff, matchlen;
	enum sip_header_types hdr;
	union nf_inet_addr addr;
	__be16 port;
	int request, in_header;

	
	if (strnicmp(*dptr, "SIP/2.0", strlen("SIP/2.0")) != 0) {
		if (ct_sip_parse_request(ct, *dptr, *datalen,
					 &matchoff, &matchlen,
					 &addr, &port) > 0 &&
		    !map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
			      &addr, port))
			return NF_DROP;
		request = 1;
	} else
		request = 0;

	if (nf_ct_protonum(ct) == IPPROTO_TCP)
		hdr = SIP_HDR_VIA_TCP;
	else
		hdr = SIP_HDR_VIA_UDP;

	
	if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
				    hdr, NULL, &matchoff, &matchlen,
				    &addr, &port) > 0) {
		unsigned int olen, matchend, poff, plen, buflen, n;
		char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];

		if (request) {
			if (addr.ip != ct->tuplehash[dir].tuple.src.u3.ip ||
			    port != ct->tuplehash[dir].tuple.src.u.udp.port)
				goto next;
		} else {
			if (addr.ip != ct->tuplehash[dir].tuple.dst.u3.ip ||
			    port != ct->tuplehash[dir].tuple.dst.u.udp.port)
				goto next;
		}

		olen = *datalen;
		if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
			      &addr, port))
			return NF_DROP;

		matchend = matchoff + matchlen + *datalen - olen;

		if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
					       "maddr=", &poff, &plen,
					       &addr) > 0 &&
		    addr.ip == ct->tuplehash[dir].tuple.src.u3.ip &&
		    addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) {
			buflen = sprintf(buffer, "%pI4",
					&ct->tuplehash[!dir].tuple.dst.u3.ip);
			if (!mangle_packet(skb, dataoff, dptr, datalen,
					   poff, plen, buffer, buflen))
				return NF_DROP;
		}

		if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
					       "received=", &poff, &plen,
					       &addr) > 0 &&
		    addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip &&
		    addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) {
			buflen = sprintf(buffer, "%pI4",
					&ct->tuplehash[!dir].tuple.src.u3.ip);
			if (!mangle_packet(skb, dataoff, dptr, datalen,
					   poff, plen, buffer, buflen))
				return NF_DROP;
		}

		if (ct_sip_parse_numerical_param(ct, *dptr, matchend, *datalen,
						 "rport=", &poff, &plen,
						 &n) > 0 &&
		    htons(n) == ct->tuplehash[dir].tuple.dst.u.udp.port &&
		    htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) {
			__be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port;
			buflen = sprintf(buffer, "%u", ntohs(p));
			if (!mangle_packet(skb, dataoff, dptr, datalen,
					   poff, plen, buffer, buflen))
				return NF_DROP;
		}
	}

next:
	
	coff = 0;
	in_header = 0;
	while (ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen,
				       SIP_HDR_CONTACT, &in_header,
				       &matchoff, &matchlen,
				       &addr, &port) > 0) {
		if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
			      &addr, port))
			return NF_DROP;
	}

	if (!map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_FROM) ||
	    !map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_TO))
		return NF_DROP;

	return NF_ACCEPT;
}
Beispiel #28
0
//----------------------------------------------------------------------
bool idaapi outop(op_t &x)
{
  uval_t v;

  switch ( x.type )
  {
    case o_imm:
      out_symbol('#');
      OutValue(x, OOFW_IMM);
      break;

    case o_ind_reg:
      out_symbol('@');

    case o_reg:
      OutReg(x.reg);
      break;

    case o_phrase:
//ig: лучше out_keyword, чем простой OutLine()
//    так цвет будет правильный
      out_keyword(phrases[x.phrase]);
      break;

    case o_displ:
      OutValue(x, OOF_ADDR | OOFW_IMM);  // x.addr
      out_symbol('(');
      OutReg(x.reg);
      out_symbol(')');
      break;

    case o_ind_mem:
      out_symbol('@');

    case o_mem:
    case o_near:
      v = map_addr(x.addr, x.n, x.type != o_near);
      if ( !out_name_expr(x, v, x.addr) )
      {
        const char *name = z8_find_ioport(v);
        if ( name != NULL )
        {
          out_line(name, COLOR_IMPNAME);
        }
        else
        {
          OutValue(x, OOF_ADDR | OOF_NUMBER | OOFS_NOSIGN | OOFW_16);
          QueueSet(Q_noName, cmd.ea);
        }
      }
      break;

    case o_void:
      return 0;

    default:
      warning("out: %a: bad optype %d", cmd.ea, x.type);
  }

  return 1;
}
Beispiel #29
0
static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
			       const char **dptr, unsigned int *datalen)
{
	enum ip_conntrack_info ctinfo;
	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
	unsigned int coff, matchoff, matchlen;
	enum sip_header_types hdr;
	union nf_inet_addr addr;
	__be16 port;
	int request, in_header;

	/* Basic rules: requests and responses. */
	if (strnicmp(*dptr, "SIP/2.0", strlen("SIP/2.0")) != 0) {
		if (ct_sip_parse_request(ct, *dptr, *datalen,
					 &matchoff, &matchlen,
					 &addr, &port) > 0 &&
		    !map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
			      &addr, port))
			return NF_DROP;
		request = 1;
	} else
		request = 0;

	if (nf_ct_protonum(ct) == IPPROTO_TCP)
		hdr = SIP_HDR_VIA_TCP;
	else
		hdr = SIP_HDR_VIA_UDP;

	/* Translate topmost Via header and parameters */
	if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
				    hdr, NULL, &matchoff, &matchlen,
				    &addr, &port) > 0) {
		unsigned int matchend, poff, plen, buflen, n;
		char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];

		/* We're only interested in headers related to this
		 * connection */
		if (request) {
			if (addr.ip != ct->tuplehash[dir].tuple.src.u3.ip ||
			    port != ct->tuplehash[dir].tuple.src.u.udp.port)
				goto next;
		} else {
			if (addr.ip != ct->tuplehash[dir].tuple.dst.u3.ip ||
			    port != ct->tuplehash[dir].tuple.dst.u.udp.port)
				goto next;
		}

		if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
			      &addr, port))
			return NF_DROP;

		matchend = matchoff + matchlen;

		/* The maddr= parameter (RFC 2361) specifies where to send
		 * the reply. */
		if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
					       "maddr=", &poff, &plen,
					       &addr) > 0 &&
		    addr.ip == ct->tuplehash[dir].tuple.src.u3.ip &&
		    addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) {
			buflen = sprintf(buffer, "%pI4",
					&ct->tuplehash[!dir].tuple.dst.u3.ip);
			if (!mangle_packet(skb, dataoff, dptr, datalen,
					   poff, plen, buffer, buflen))
				return NF_DROP;
		}

		/* The received= parameter (RFC 2361) contains the address
		 * from which the server received the request. */
		if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
					       "received=", &poff, &plen,
					       &addr) > 0 &&
		    addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip &&
		    addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) {
			buflen = sprintf(buffer, "%pI4",
					&ct->tuplehash[!dir].tuple.src.u3.ip);
			if (!mangle_packet(skb, dataoff, dptr, datalen,
					   poff, plen, buffer, buflen))
				return NF_DROP;
		}

		/* The rport= parameter (RFC 3581) contains the port number
		 * from which the server received the request. */
		if (ct_sip_parse_numerical_param(ct, *dptr, matchend, *datalen,
						 "rport=", &poff, &plen,
						 &n) > 0 &&
		    htons(n) == ct->tuplehash[dir].tuple.dst.u.udp.port &&
		    htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) {
			__be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port;
			buflen = sprintf(buffer, "%u", ntohs(p));
			if (!mangle_packet(skb, dataoff, dptr, datalen,
					   poff, plen, buffer, buflen))
				return NF_DROP;
		}
	}

next:
	/* Translate Contact headers */
	coff = 0;
	in_header = 0;
	while (ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen,
				       SIP_HDR_CONTACT, &in_header,
				       &matchoff, &matchlen,
				       &addr, &port) > 0) {
		if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
			      &addr, port))
			return NF_DROP;
	}

	if (!map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_FROM) ||
	    !map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_TO))
		return NF_DROP;

	return NF_ACCEPT;
}
Beispiel #30
0
oop_t MemObj::contents_of_slot(oop_t n) {
  SlotDesc* sd = map_addr()->find_slot(n);
  assert(sd);
  return sd->contents(this);
}