Exemple #1
0
static void free_module(struct module *mod)
{
	__unlink_module(mod);
	module_unload_free(mod);
	module_free(mod, mod->module_init);
	module_free(mod, mod->module_core);
}
Exemple #2
0
void bpf_jit_free(struct bpf_prog *fp)
{
	if (fp->jited)
		module_free(NULL, fp->bpf_func);

	bpf_prog_unlock_free(fp);
}
Exemple #3
0
Module*
module_load (const gchar *path,
             const gchar *channel_name)
{
  void (*init) (XfsmSplashConfig *config);
  Module *module;
  gchar          property_base[512];
  XfconfChannel *channel;
  gchar         *dp;
  gchar         *sp;

  /* load module */
  module = g_new0 (Module, 1);
#if GLIB_CHECK_VERSION(2,4,0)
  module->handle = g_module_open (path, G_MODULE_BIND_LOCAL);
#else
  module->handle = g_module_open (path, 0);
#endif
  if (G_UNLIKELY (module->handle == NULL))
    goto error0;
  if (!g_module_symbol (module->handle, "config_init", (gpointer)&init))
    goto error1;

  /* determine engine name */
  sp = module->engine = g_path_get_basename (path);
  if (sp[0] == 'l' && sp[1] == 'i' && sp[2] == 'b')
    sp += 3;
  for (dp = module->engine; *sp != '\0'; ++dp, ++sp)
    {
      if (*sp == '.')
        break;
      else
        *dp = *sp;
    }
  *dp = '\0';

  g_snprintf (property_base, sizeof (property_base),
              "/splash/engines/%s", module->engine);
  channel = xfconf_channel_new_with_property_base (channel_name,
                                                   property_base);

  /* initialize module */
  module->config.rc = xfsm_splash_rc_new (channel);
  g_object_unref (channel);
  init (&module->config);
  if (G_UNLIKELY (module->config.name == NULL))
    {
      module_free (module);
      return NULL;
    }

  /* succeed */
  return module;

error1:
  g_module_close (module->handle);
error0:
  g_free (module);
  return NULL;
}
static void bpf2_jit_free_deferred(struct work_struct *work)
{
	struct bpf_program *prog = container_of(work, struct bpf_program, work);
	unsigned long addr = (unsigned long)prog->jit_image & PAGE_MASK;
	struct bpf_binary_header *header = (void *)addr;

	set_memory_rw(addr, header->pages);
	module_free(NULL, header);
	free_bpf_program(prog);
}
Exemple #5
0
int do_init_module(void __user * umod, unsigned long len,
		   const char __user * uargs)
{
	struct module *mod;
	int ret = 0;

	// TODO: non-preemptive kernel does not need to lock module mutex

	mod = load_module(umod, len, uargs);
	if (mod == NULL) {
		// TODO: non-preemptive kernel does not need to unlock module mutex
		return -1;
	}
	// TODO: non-preemptive kernel does not need to unlock module mutex
	struct module test_module;
	kprintf("Module size: %d\n", sizeof(test_module));
  kprintf("list: %d\n", (int)&(test_module.list) - (int)&(test_module));
  kprintf("name: %d\n", (int)&(test_module.name) - (int)&(test_module));
  kprintf("kernel_symbol: %d\n", (int)&(test_module.syms) - (int)&(test_module));
  kprintf("num_syms: %d\n",  (int)&(test_module.num_syms) - (int)&(test_module));
  kprintf("init: %d\n", (int)&(test_module.init) - (int)&(test_module));
  kprintf("module_init: %d\n", (int)&(test_module.module_init) - (int)&(test_module));
  kprintf("module_core: %d\n", (int)&(test_module.module_core) - (int)&(test_module));
  kprintf("symtab: %d\n", (int)&(test_module.symtab) - (int)&(test_module));
  kprintf("percpu: %d\n", (int)&(test_module.percpu) - (int)&(test_module));
  kprintf("exit: %d\n", (int)&(test_module.exit) - (int)&(test_module));
	if (mod->init != NULL) {
		ret = (*mod->init) ();
	}
	if (ret < 0) {
		mod->state = MODULE_STATE_GOING;
		// TODO: non-preemptive kernel does not need to lock module mutex
		free_module(mod);
		// TODO: non-preemptive kernel does not need to unlock
		return ret;
	}
	if (ret > 0) {
		kprintf("%s: %s->init suspiciously returned %d\n"
			"%s: loading anyway...\n", __func__, mod->name, ret,
			__func__);
	}
	mod->state = MODULE_STATE_LIVE;

	// TODO: lock?
	module_free(mod, mod->module_init);
	mod->module_init = NULL;
	mod->init_size = 0;
	mod->init_text_size = 0;
	// TODO: unlock

	return 0;
}
Exemple #6
0
bool
module_load(const char *path)
{
    Module *module;
    ModuleInfo *moduleInfo;

    module = module_new();

    module->Path = StrDup(path);

    if(uv_dlopen(path, &module->handle) < 0)
    {
        module_free(module);
        return false;
    }

    if(uv_dlsym(&module->handle, "ModuleInfoPtr", (void **)&moduleInfo) < 0)
    {
        module_free(module);
        return false;
    }

    if(moduleInfo == NULL)
    {
        module_free(module);
        return false;
    }

    if(moduleInfo->Load != NULL)
    {
        moduleInfo->Load();
    }

    vector_push_back(ModuleList, module);

    return true;
}
Exemple #7
0
int	core_run_no_termcap(t_core *core)
{
  int	ret;

  clear_buffer(core->buffer, 0);
  put_str(BASE_PROMPT);
  while (!core->shutdown && (ret = read(0, core->buffer, BUFF_SIZE)) > 0)
    {
      clear_buffer(core->buffer, ret - 1);
      parser(core, core->buffer);
      if (!core->shutdown)
	put_str(BASE_PROMPT);
      clear_buffer(core->buffer, 0);
    }
  free(core->prompt);
  module_free(core);
  return (NO_ERR);
}
Exemple #8
0
/** Allocates the memory for interfaces and variables in a module.
 * \param nof_inputs Number of input interfaces to allocate
 * \param nof_outputs Number of output interfaces to allocate
 * \param nof_variables Number of variables to allocate
 * \returns 0 on success or -1 on error
 */
int module_alloc(module_t *module, int nof_inputs, int nof_outputs, int nof_variables) {
	mdebug("module_id=%d, nof_inputs=%d, nof_outputs=%d, nof_variables=%d\n",module->id,
			nof_inputs, nof_outputs, nof_variables);
	if (!module) return -1;
	if (module->inputs || module->outputs || module->variables) {
		return -1;
	}
	module->inputs = (interface_t*) pool_alloc(nof_inputs,sizeof(interface_t));
	module->outputs = (interface_t*) pool_alloc(nof_outputs,sizeof(interface_t));
	module->variables = (variable_t*) pool_alloc(nof_variables,sizeof(variable_t));
	if (!module->inputs || !module->outputs || !module->variables) {
		module_free(module);
		return -1;
	}
	module->nof_inputs = nof_inputs;
	module->nof_outputs = nof_outputs;
	module->nof_variables = nof_variables;
	return 0;
}
int module_disable(struct np_module* module, int destroy) {
	ncds_free(module->ds);
	module->ds = NULL;

	if (ncds_consolidate() != 0) {
		nc_verb_warning("%s: consolidating libnetconf datastores failed for module %s.", __func__, module->name);
	}

	if (destroy) {
		if (module->next) {
			module->next->prev = module->prev;
		}
		if (module->prev) {
			module->prev->next = module->next;
		}
		if (netopeer_options.modules == module) {
			netopeer_options.modules = module->next;
		}

		module_free(module);
	}
	return(EXIT_SUCCESS);
}
Exemple #10
0
int	core_run_termcap(t_core *core)
{
  int	ret;
  char	buffer[BUF_CAPS_SIZE];

  put_str(PROMPT_1(core->prompt));
  while (!core->shutdown && (ret = read(0, buffer, 3)) > 0)
    {
      clear_buffer_caps(buffer, ret);
      if (key_hook(core, buffer) == FALSE)
	core->buf_caps = str_cat(core->buf_caps, buffer, FREE_LEFT);
      clear_buffer_caps(buffer, BUF_CAPS_SIZE);
      if (!core->shutdown && !reset_cursor(core, 0))
	return (ERROR);
      core->prompt = get_prompt(core);
      if (!core->shutdown)
	put_str(PROMPT_1(core->prompt));
      put_str(core->buf_caps);
    }
  free(core->buf_caps);
  free(core->prompt);
  module_free(core);
  return (NO_ERR);
}
Exemple #11
0
void mapfile_apply(list_t* names)
{
	TCHAR* undecorated;
	ULONG total = 0, filtered = 0, applied = 0, addr;
	int err, result;
	name_t* nm, *nm_last;
	list_t* rmtable;
	module_t* module = module_info(&err);
	if (!err)
	{
		Addtolist(0, 0, "Applying names from map file to module '%s'", module->name);
		if (!g_Config->collisionchecks)
		{
			rmtable = list_create();
		}
		nm = (name_t*)names->first;
		while (nm)
		{
			if (nm->segment < module->nseg)
			{
				if (g_Config->demangle)
				{
					undecorated = (TCHAR*)malloc(2 * nm->size * sizeof(TCHAR));
					if (result = Demanglename(nm->buffer, NM_LIBRARY, undecorated))
					{
						free(nm->buffer);
						nm->size = result + 1;
						nm->buffer = undecorated;
					}
					else
					{
						free(undecorated);
					}
				}
				addr = module->base + module->segments[nm->segment] + nm->offset;
				if (g_Config->usemasks)
				{
					if (result = mask_filter(nm))
					{
						filtered++;
						if ((result & FILTER_SKIP) && !g_Config->collisionchecks &&
							/* Findname for NM_ANYNAME fails everytime, dunno why */
							(Findname(addr, NM_COMMENT, NULL) || Findname(addr, NM_LABEL, NULL))) 
						{
							list_addname(rmtable, NULL, 0, nm->segment, nm->offset);
							total++;
							nm = nm->next;
							continue;
						}
					}
				}
				if (g_Config->comments)
				{
					if (g_Config->collisionchecks)
					{
						if (!Findname(addr, NM_COMMENT, NULL) && !Quickinsertname(addr, NM_COMMENT, nm->buffer))
						{
							applied++;
						}
					}
					else if (!Quickinsertname(addr, NM_COMMENT, nm->buffer))
					{
						applied++;
					}
				}
				if (g_Config->labels)
				{
					if (g_Config->collisionchecks)
					{
						if (!Findlabel(addr, NULL) && !Quickinsertname(addr, NM_LABEL, nm->buffer))
						{
							applied++;
						}
					}
					else if (!Quickinsertname(addr, NM_LABEL, nm->buffer))
					{
						applied++;
					}
				}
			}
			total++;
			Progress(total * 1000 / names->count, "Inserting names");
			nm = nm->next;
		}
		Progress(0, "");
		Infoline("Merging names");
		Mergequicknames();
		if (!g_Config->collisionchecks)
		{
			Infoline("Cleaning skipped entries");
			nm = (name_t*)rmtable->first;
			while (nm)
			{
				addr = module->base + module->segments[nm->segment] + nm->offset;
				if (g_Config->comments)
				{
					Insertname(addr, NM_COMMENT, "");
				}
				if (g_Config->labels)
				{
					Insertname(addr, NM_LABEL, "");
				}
				nm_last = nm;
				nm = nm->next;
				/* Manual list_freenames expansion to speed it up somehow */
				free(nm_last);
			}
		}
		Infoline("Total loaded: %d, Names applied: %d, Names filtered: %d", total, applied, filtered);
		Addtolist(0, -1, "  Total loaded: %d, Names applied: %d, Names filtered: %d", total, applied, filtered);
		module_free(module);
	}
	else
	{
		module_error(err);
	}
}
Exemple #12
0
static void jit_free_defer(struct work_struct *arg)
{
	module_free(NULL, arg);
}
Exemple #13
0
static int
module_load(const char *path, const char *name,
	    const struct module_dir_load_settings *set,
	    struct module *all_modules,
	    struct module **module_r, const char **error_r)
{
	void *handle;
	struct module *module;
	const char *const *module_version;

	*module_r = NULL;
	*error_r = NULL;

	if (set->ignore_dlopen_errors) {
		handle = quiet_dlopen(path, RTLD_GLOBAL | RTLD_NOW);
		if (handle == NULL) {
			if (set->debug) {
				i_debug("Skipping module %s, "
					"because dlopen() failed: %s "
					"(this is usually intentional, "
					"so just ignore this message)",
					name, dlerror());
			}
			return 0;
		}
	} else {
		handle = dlopen(path, RTLD_GLOBAL | RTLD_NOW);
		if (handle == NULL) {
			*error_r = t_strdup_printf("dlopen() failed: %s",
						   dlerror());
#ifdef RTLD_LAZY
			/* try to give a better error message by lazily loading
			   the plugin and checking its dependencies */
			handle = dlopen(path, RTLD_LAZY);
			if (handle == NULL)
				return -1;
#else
			return -1;
#endif
		}
	}

	module = i_new(struct module, 1);
	module->path = i_strdup(path);
	module->name = i_strdup(name);
	module->handle = handle;

	module_version = set->abi_version == NULL ? NULL :
		get_symbol(module, t_strconcat(name, "_version", NULL), TRUE);
	if (module_version != NULL &&
	    !versions_equal(*module_version, set->abi_version)) {
		*error_r = t_strdup_printf(
			"Module is for different ABI version %s (we have %s)",
			*module_version, set->abi_version);
		module_free(module);
		return -1;
	}

	/* get our init func */
	module->init = (void (*)(struct module *))
		get_symbol(module, t_strconcat(name, "_init", NULL),
			   !set->require_init_funcs);
	module->deinit = (void (*)(void))
		get_symbol(module, t_strconcat(name, "_deinit", NULL),
			   !set->require_init_funcs);

	if ((module->init == NULL || module->deinit == NULL) &&
	    set->require_init_funcs) {
		*error_r = t_strdup_printf(
			"Module doesn't have %s function",
			module->init == NULL ? "init" : "deinit");
	} else if (!module_check_wrong_binary_dependency(set, module, error_r)) {
		/* failed */
	} else if (!module_check_missing_plugin_dependencies(set, module,
							     all_modules, error_r)) {
		/* failed */
	}

	if (*error_r != NULL) {
		module->deinit = NULL;
		module_free(module);
		return -1;
	}

	if (set->debug)
		i_debug("Module loaded: %s", path);
	*module_r = module;
	return 1;
}
Exemple #14
0
void module_free_exec(struct module *mod, void *module_region)
{
	module_free(mod, module_region);
}
void bpf_jit_compile(struct bpf_prog *fp)
{
	unsigned int cleanup_addr, proglen, oldproglen = 0;
	u32 temp[8], *prog, *func, seen = 0, pass;
	const struct sock_filter *filter = fp->insns;
	int i, flen = fp->len, pc_ret0 = -1;
	unsigned int *addrs;
	void *image;

	if (!bpf_jit_enable)
		return;

	addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
	if (addrs == NULL)
		return;

	/* Before first pass, make a rough estimation of addrs[]
	 * each bpf instruction is translated to less than 64 bytes
	 */
	for (proglen = 0, i = 0; i < flen; i++) {
		proglen += 64;
		addrs[i] = proglen;
	}
	cleanup_addr = proglen; /* epilogue address */
	image = NULL;
	for (pass = 0; pass < 10; pass++) {
		u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;

		/* no prologue/epilogue for trivial filters (RET something) */
		proglen = 0;
		prog = temp;

		/* Prologue */
		if (seen_or_pass0) {
			if (seen_or_pass0 & SEEN_MEM) {
				unsigned int sz = BASE_STACKFRAME;
				sz += BPF_MEMWORDS * sizeof(u32);
				emit_alloc_stack(sz);
			}

			/* Make sure we dont leek kernel memory. */
			if (seen_or_pass0 & SEEN_XREG)
				emit_clear(r_X);

			/* If this filter needs to access skb data,
			 * load %o4 and %o5 with:
			 *  %o4 = skb->len - skb->data_len
			 *  %o5 = skb->data
			 * And also back up %o7 into r_saved_O7 so we can
			 * invoke the stubs using 'call'.
			 */
			if (seen_or_pass0 & SEEN_DATAREF) {
				emit_load32(r_SKB, struct sk_buff, len, r_HEADLEN);
				emit_load32(r_SKB, struct sk_buff, data_len, r_TMP);
				emit_sub(r_HEADLEN, r_TMP, r_HEADLEN);
				emit_loadptr(r_SKB, struct sk_buff, data, r_SKB_DATA);
			}
		}
		emit_reg_move(O7, r_saved_O7);

		switch (filter[0].code) {
		case BPF_RET | BPF_K:
		case BPF_LD | BPF_W | BPF_LEN:
		case BPF_LD | BPF_W | BPF_ABS:
		case BPF_LD | BPF_H | BPF_ABS:
		case BPF_LD | BPF_B | BPF_ABS:
			/* The first instruction sets the A register (or is
			 * a "RET 'constant'")
			 */
			break;
		default:
			/* Make sure we dont leak kernel information to the
			 * user.
			 */
			emit_clear(r_A); /* A = 0 */
		}

		for (i = 0; i < flen; i++) {
			unsigned int K = filter[i].k;
			unsigned int t_offset;
			unsigned int f_offset;
			u32 t_op, f_op;
			u16 code = bpf_anc_helper(&filter[i]);
			int ilen;

			switch (code) {
			case BPF_ALU | BPF_ADD | BPF_X:	/* A += X; */
				emit_alu_X(ADD);
				break;
			case BPF_ALU | BPF_ADD | BPF_K:	/* A += K; */
				emit_alu_K(ADD, K);
				break;
			case BPF_ALU | BPF_SUB | BPF_X:	/* A -= X; */
				emit_alu_X(SUB);
				break;
			case BPF_ALU | BPF_SUB | BPF_K:	/* A -= K */
				emit_alu_K(SUB, K);
				break;
			case BPF_ALU | BPF_AND | BPF_X:	/* A &= X */
				emit_alu_X(AND);
				break;
			case BPF_ALU | BPF_AND | BPF_K:	/* A &= K */
				emit_alu_K(AND, K);
				break;
			case BPF_ALU | BPF_OR | BPF_X:	/* A |= X */
				emit_alu_X(OR);
				break;
			case BPF_ALU | BPF_OR | BPF_K:	/* A |= K */
				emit_alu_K(OR, K);
				break;
			case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */
			case BPF_ALU | BPF_XOR | BPF_X:
				emit_alu_X(XOR);
				break;
			case BPF_ALU | BPF_XOR | BPF_K:	/* A ^= K */
				emit_alu_K(XOR, K);
				break;
			case BPF_ALU | BPF_LSH | BPF_X:	/* A <<= X */
				emit_alu_X(SLL);
				break;
			case BPF_ALU | BPF_LSH | BPF_K:	/* A <<= K */
				emit_alu_K(SLL, K);
				break;
			case BPF_ALU | BPF_RSH | BPF_X:	/* A >>= X */
				emit_alu_X(SRL);
				break;
			case BPF_ALU | BPF_RSH | BPF_K:	/* A >>= K */
				emit_alu_K(SRL, K);
				break;
			case BPF_ALU | BPF_MUL | BPF_X:	/* A *= X; */
				emit_alu_X(MUL);
				break;
			case BPF_ALU | BPF_MUL | BPF_K:	/* A *= K */
				emit_alu_K(MUL, K);
				break;
			case BPF_ALU | BPF_DIV | BPF_K:	/* A /= K with K != 0*/
				if (K == 1)
					break;
				emit_write_y(G0);
#ifdef CONFIG_SPARC32
				/* The Sparc v8 architecture requires
				 * three instructions between a %y
				 * register write and the first use.
				 */
				emit_nop();
				emit_nop();
				emit_nop();
#endif
				emit_alu_K(DIV, K);
				break;
			case BPF_ALU | BPF_DIV | BPF_X:	/* A /= X; */
				emit_cmpi(r_X, 0);
				if (pc_ret0 > 0) {
					t_offset = addrs[pc_ret0 - 1];
#ifdef CONFIG_SPARC32
					emit_branch(BE, t_offset + 20);
#else
					emit_branch(BE, t_offset + 8);
#endif
					emit_nop(); /* delay slot */
				} else {
					emit_branch_off(BNE, 16);
					emit_nop();
#ifdef CONFIG_SPARC32
					emit_jump(cleanup_addr + 20);
#else
					emit_jump(cleanup_addr + 8);
#endif
					emit_clear(r_A);
				}
				emit_write_y(G0);
#ifdef CONFIG_SPARC32
				/* The Sparc v8 architecture requires
				 * three instructions between a %y
				 * register write and the first use.
				 */
				emit_nop();
				emit_nop();
				emit_nop();
#endif
				emit_alu_X(DIV);
				break;
			case BPF_ALU | BPF_NEG:
				emit_neg();
				break;
			case BPF_RET | BPF_K:
				if (!K) {
					if (pc_ret0 == -1)
						pc_ret0 = i;
					emit_clear(r_A);
				} else {
					emit_loadimm(K, r_A);
				}
				/* Fallthrough */
			case BPF_RET | BPF_A:
				if (seen_or_pass0) {
					if (i != flen - 1) {
						emit_jump(cleanup_addr);
						emit_nop();
						break;
					}
					if (seen_or_pass0 & SEEN_MEM) {
						unsigned int sz = BASE_STACKFRAME;
						sz += BPF_MEMWORDS * sizeof(u32);
						emit_release_stack(sz);
					}
				}
				/* jmpl %r_saved_O7 + 8, %g0 */
				emit_jmpl(r_saved_O7, 8, G0);
				emit_reg_move(r_A, O0); /* delay slot */
				break;
			case BPF_MISC | BPF_TAX:
				seen |= SEEN_XREG;
				emit_reg_move(r_A, r_X);
				break;
			case BPF_MISC | BPF_TXA:
				seen |= SEEN_XREG;
				emit_reg_move(r_X, r_A);
				break;
			case BPF_ANC | SKF_AD_CPU:
				emit_load_cpu(r_A);
				break;
			case BPF_ANC | SKF_AD_PROTOCOL:
				emit_skb_load16(protocol, r_A);
				break;
#if 0
				/* GCC won't let us take the address of
				 * a bit field even though we very much
				 * know what we are doing here.
				 */
			case BPF_ANC | SKF_AD_PKTTYPE:
				__emit_skb_load8(pkt_type, r_A);
				emit_alu_K(SRL, 5);
				break;
#endif
			case BPF_ANC | SKF_AD_IFINDEX:
				emit_skb_loadptr(dev, r_A);
				emit_cmpi(r_A, 0);
				emit_branch(BE_PTR, cleanup_addr + 4);
				emit_nop();
				emit_load32(r_A, struct net_device, ifindex, r_A);
				break;
			case BPF_ANC | SKF_AD_MARK:
				emit_skb_load32(mark, r_A);
				break;
			case BPF_ANC | SKF_AD_QUEUE:
				emit_skb_load16(queue_mapping, r_A);
				break;
			case BPF_ANC | SKF_AD_HATYPE:
				emit_skb_loadptr(dev, r_A);
				emit_cmpi(r_A, 0);
				emit_branch(BE_PTR, cleanup_addr + 4);
				emit_nop();
				emit_load16(r_A, struct net_device, type, r_A);
				break;
			case BPF_ANC | SKF_AD_RXHASH:
				emit_skb_load32(hash, r_A);
				break;
			case BPF_ANC | SKF_AD_VLAN_TAG:
			case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
				emit_skb_load16(vlan_tci, r_A);
				if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
					emit_andi(r_A, VLAN_VID_MASK, r_A);
				} else {
					emit_loadimm(VLAN_TAG_PRESENT, r_TMP);
					emit_and(r_A, r_TMP, r_A);
				}
				break;

			case BPF_LD | BPF_IMM:
				emit_loadimm(K, r_A);
				break;
			case BPF_LDX | BPF_IMM:
				emit_loadimm(K, r_X);
				break;
			case BPF_LD | BPF_MEM:
				emit_ldmem(K * 4, r_A);
				break;
			case BPF_LDX | BPF_MEM:
				emit_ldmem(K * 4, r_X);
				break;
			case BPF_ST:
				emit_stmem(K * 4, r_A);
				break;
			case BPF_STX:
				emit_stmem(K * 4, r_X);
				break;

#define CHOOSE_LOAD_FUNC(K, func) \
	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)

			case BPF_LD | BPF_W | BPF_ABS:
				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word);
common_load:			seen |= SEEN_DATAREF;
				emit_loadimm(K, r_OFF);
				emit_call(func);
				break;
			case BPF_LD | BPF_H | BPF_ABS:
				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half);
				goto common_load;
			case BPF_LD | BPF_B | BPF_ABS:
				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte);
				goto common_load;
			case BPF_LDX | BPF_B | BPF_MSH:
				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh);
				goto common_load;
			case BPF_LD | BPF_W | BPF_IND:
				func = bpf_jit_load_word;
common_load_ind:		seen |= SEEN_DATAREF | SEEN_XREG;
				if (K) {
					if (is_simm13(K)) {
						emit_addi(r_X, K, r_OFF);
					} else {
						emit_loadimm(K, r_TMP);
						emit_add(r_X, r_TMP, r_OFF);
					}
				} else {
					emit_reg_move(r_X, r_OFF);
				}
				emit_call(func);
				break;
			case BPF_LD | BPF_H | BPF_IND:
				func = bpf_jit_load_half;
				goto common_load_ind;
			case BPF_LD | BPF_B | BPF_IND:
				func = bpf_jit_load_byte;
				goto common_load_ind;
			case BPF_JMP | BPF_JA:
				emit_jump(addrs[i + K]);
				emit_nop();
				break;

#define COND_SEL(CODE, TOP, FOP)	\
	case CODE:			\
		t_op = TOP;		\
		f_op = FOP;		\
		goto cond_branch

			COND_SEL(BPF_JMP | BPF_JGT | BPF_K, BGU, BLEU);
			COND_SEL(BPF_JMP | BPF_JGE | BPF_K, BGEU, BLU);
			COND_SEL(BPF_JMP | BPF_JEQ | BPF_K, BE, BNE);
			COND_SEL(BPF_JMP | BPF_JSET | BPF_K, BNE, BE);
			COND_SEL(BPF_JMP | BPF_JGT | BPF_X, BGU, BLEU);
			COND_SEL(BPF_JMP | BPF_JGE | BPF_X, BGEU, BLU);
			COND_SEL(BPF_JMP | BPF_JEQ | BPF_X, BE, BNE);
			COND_SEL(BPF_JMP | BPF_JSET | BPF_X, BNE, BE);

cond_branch:			f_offset = addrs[i + filter[i].jf];
				t_offset = addrs[i + filter[i].jt];

				/* same targets, can avoid doing the test :) */
				if (filter[i].jt == filter[i].jf) {
					emit_jump(t_offset);
					emit_nop();
					break;
				}

				switch (code) {
				case BPF_JMP | BPF_JGT | BPF_X:
				case BPF_JMP | BPF_JGE | BPF_X:
				case BPF_JMP | BPF_JEQ | BPF_X:
					seen |= SEEN_XREG;
					emit_cmp(r_A, r_X);
					break;
				case BPF_JMP | BPF_JSET | BPF_X:
					seen |= SEEN_XREG;
					emit_btst(r_A, r_X);
					break;
				case BPF_JMP | BPF_JEQ | BPF_K:
				case BPF_JMP | BPF_JGT | BPF_K:
				case BPF_JMP | BPF_JGE | BPF_K:
					if (is_simm13(K)) {
						emit_cmpi(r_A, K);
					} else {
						emit_loadimm(K, r_TMP);
						emit_cmp(r_A, r_TMP);
					}
					break;
				case BPF_JMP | BPF_JSET | BPF_K:
					if (is_simm13(K)) {
						emit_btsti(r_A, K);
					} else {
						emit_loadimm(K, r_TMP);
						emit_btst(r_A, r_TMP);
					}
					break;
				}
				if (filter[i].jt != 0) {
					if (filter[i].jf)
						t_offset += 8;
					emit_branch(t_op, t_offset);
					emit_nop(); /* delay slot */
					if (filter[i].jf) {
						emit_jump(f_offset);
						emit_nop();
					}
					break;
				}
				emit_branch(f_op, f_offset);
				emit_nop(); /* delay slot */
				break;

			default:
				/* hmm, too complex filter, give up with jit compiler */
				goto out;
			}
			ilen = (void *) prog - (void *) temp;
			if (image) {
				if (unlikely(proglen + ilen > oldproglen)) {
					pr_err("bpb_jit_compile fatal error\n");
					kfree(addrs);
					module_free(NULL, image);
					return;
				}
				memcpy(image + proglen, temp, ilen);
			}
			proglen += ilen;
			addrs[i] = proglen;
			prog = temp;
		}
		/* last bpf instruction is always a RET :
		 * use it to give the cleanup instruction(s) addr
		 */
		cleanup_addr = proglen - 8; /* jmpl; mov r_A,%o0; */
		if (seen_or_pass0 & SEEN_MEM)
			cleanup_addr -= 4; /* add %sp, X, %sp; */

		if (image) {
			if (proglen != oldproglen)
				pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n",
				       proglen, oldproglen);
			break;
		}
		if (proglen == oldproglen) {
			image = module_alloc(proglen);
			if (!image)
				goto out;
		}
		oldproglen = proglen;
	}

	if (bpf_jit_enable > 1)
		bpf_jit_dump(flen, proglen, pass, image);

	if (image) {
		bpf_flush_icache(image, image + proglen);
		fp->bpf_func = (void *)image;
		fp->jited = 1;
	}
out:
	kfree(addrs);
	return;
}
Exemple #16
0
static noinline struct module *load_module(void __user * umod,
					   unsigned long len,
					   const char __user * uargs)
{
	struct elfhdr *hdr;
	struct secthdr *sechdrs;
	char *secstrings, *args, *modmagic, *strtab = NULL;
	//char *staging;

	unsigned int i;
	unsigned int symindex = 0;
	unsigned int strindex = 0;
	unsigned int modindex, versindex, infoindex, pcpuindex;
	struct module *mod;
	long err = 0;
	void *ptr = NULL;

	kprintf("load_module: umod=%p, len=%lu, uargs=%p\n", umod, len, uargs);

	if (len < sizeof(*hdr))
		return NULL;
	if (len > 64 * 1024 * 1024 || (hdr = kmalloc(len)) == NULL)
		return NULL;

	kprintf("load_module: copy_from_user\n");

	struct mm_struct *mm = current->mm;
	lock_mm(mm);
	if (!copy_from_user(mm, hdr, umod, len, 1)) {
		unlock_mm(mm);
		goto free_hdr;
	}
	unlock_mm(mm);

	kprintf("load_module: hdr:%p\n", hdr);
	// sanity check
	if (memcmp(&(hdr->e_magic), ELFMAG, SELFMAG) != 0
	    || hdr->e_type != ET_REL || !elf_check_arch(hdr)
	    || hdr->e_shentsize != sizeof(*sechdrs)) {
		kprintf("load_module: sanity check failed.\n");
		goto free_hdr;
	}

	if (len < hdr->e_shoff + hdr->e_shnum * sizeof(*sechdrs))
		goto truncated;

	sechdrs = (void *)hdr + hdr->e_shoff;
	secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
	sechdrs[0].sh_addr = 0;

	for (i = 1; i < hdr->e_shnum; i++) {
		if (sechdrs[i].sh_type != SHT_NOBITS
		    && len < sechdrs[i].sh_offset + sechdrs[i].sh_size)
			goto truncated;

		// mark sh_addr
		sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;

		if (sechdrs[i].sh_type == SHT_SYMTAB) {
			symindex = i;
			strindex = sechdrs[i].sh_link;
			strtab = (char *)hdr + sechdrs[strindex].sh_offset;
		}

	}

	modindex =
	    find_sec(hdr, sechdrs, secstrings, ".gnu.linkonce.this_module");

	if (!modindex) {
		kprintf("load_module: No module found in object.\n");
		goto free_hdr;
	}
	// temp: point mod into copy of data
	mod = (void *)sechdrs[modindex].sh_addr;

	if (symindex == 0) {
		kprintf("load_module: %s module has no symbols (stripped?).\n",
			mod->name);
		goto free_hdr;
	}
	versindex = find_sec(hdr, sechdrs, secstrings, "__versions");
	infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo");
	pcpuindex = 0;//find_pcpusec(hdr, sechdrs, secstrings);

	// don't keep modinfo and version
	sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
	sechdrs[versindex].sh_flags &= ~(unsigned long)SHF_ALLOC;

	// keep symbol and string tables
	sechdrs[symindex].sh_flags |= SHF_ALLOC;
	sechdrs[strindex].sh_flags |= SHF_ALLOC;

	/*if (!check_modstruct_version(sechdrs, versindex, mod)) {
		goto free_hdr;
	}*/

	/*
	   modmagic = get_modinfo(sechdrs, infoindex, "vermagic");

	   if (!modmagic) {
	   kprintf("load_module: bad vermagic\n");
	   goto free_hdr;
	   } else if (!same_magic(modmagic, vermagic, versindex)) {
	   ; 
	   // TODO: module magic is left for future use.
	   }
	 */

	//staging = get_modinfo(sechdrs, infoindex, "staging");
	// TODO: staging is left for future use.

	if (find_module(mod->name)) {
		kprintf("load_module: module %s exists\n", mod->name);
		goto free_mod;
	}

	mod->state = MODULE_STATE_COMING;

	// err = module_frob_arch_sections(hdr, sechdrs, secstrings, mod);
	// TODO: we do not need it for x86 or arm

	// TODO: percpu is no longer needed.

	layout_sections(mod, hdr, sechdrs, secstrings);

	ptr = module_alloc_update_bounds(mod->core_size);

	if (!ptr) {
		goto free_percpu;
	}
	memset(ptr, 0, mod->core_size);
	mod->module_core = ptr;

	ptr = module_alloc_update_bounds(mod->init_size);

	if (!ptr && mod->init_size) {
		goto free_core;
	}
	memset(ptr, 0, mod->init_size);
	mod->module_init = ptr;

	kprintf("load_module: final section addresses:\n");
	for (i = 0; i < hdr->e_shnum; i++) {
		void *dest;
		if (!(sechdrs[i].sh_flags & SHF_ALLOC)) {
			kprintf("\tSkipped %s\n",
				secstrings + sechdrs[i].sh_name);
			continue;
		}
		if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
			dest =
			    mod->module_init +
			    (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
		else
			dest = mod->module_core + sechdrs[i].sh_entsize;
		if (sechdrs[i].sh_type != SHT_NOBITS)
			memcpy(dest, (void *)sechdrs[i].sh_addr,
			       sechdrs[i].sh_size);
		sechdrs[i].sh_addr = (unsigned long)dest;
		kprintf("\t0x%lx %s\n", sechdrs[i].sh_addr,
			secstrings + sechdrs[i].sh_name);
	}
	/* Module has been moved. */
	mod = (void *)sechdrs[modindex].sh_addr;

	/* Now we've moved module, initialize linked lists, etc. */
	module_unload_init(mod);

	/* Set up license info based on the info section */
	set_license(mod, get_modinfo(sechdrs, infoindex, "license"));

	err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
			       mod);

	if (err < 0)
		goto cleanup;

	mod->syms = section_objs(hdr, sechdrs, secstrings, "__ksymtab",
				 sizeof(*mod->syms), &mod->num_syms);
	mod->crcs = section_addr(hdr, sechdrs, secstrings, "__kcrctab");

	// relocations
	for (i = 1; i < hdr->e_shnum; i++) {
		const char *strtab = (char *)sechdrs[strindex].sh_addr;
		unsigned int info = sechdrs[i].sh_info;

		/* Not a valid relocation section */
		if (info >= hdr->e_shnum)
			continue;

		/* Don't bother with non-allocated sections */
		if (!(sechdrs[info].sh_flags & SHF_ALLOC))
			continue;

		if (sechdrs[i].sh_type == SHT_REL)
			err = apply_relocate(sechdrs, strtab, symindex, i, mod);
		else if (sechdrs[i].sh_type == SHT_RELA)
			err =
			    apply_relocate_add(sechdrs, strtab, symindex, i,
					       mod);

		if (err < 0)
			goto cleanup;
	}

	err = verify_export_symbols(mod);
	if (err < 0)
		goto cleanup;

	// TODO: kallsyms is left for future use.
	//add_kallsyms(mod, sechdrs, symindex, strindex, secstrings);

	err = module_finalize(hdr, sechdrs, mod);
	if (err < 0)
		goto cleanup;

	list_add(&modules, &mod->list);

	kfree(hdr);
	return mod;

cleanup:
	module_unload_free(mod);

free_init:
	module_free(mod, mod->module_init);

free_core:
	module_free(mod, mod->module_core);

free_percpu:

free_mod:

free_hdr:
	kfree(hdr);
	return NULL;

truncated:
	kprintf("load_module: module len %lu truncated.\n");
	goto free_hdr;
}