Esempio n. 1
0
static S mk_trie_dirs(const TF * const fort, void * hash, size_t hashlen, sds * path) {
  *path = sdsnew(fort->path);
  *path = sdscat(*path, "/");

  char dir_node[(fort->cfg.width * 2) + 2];

  uint8_t * hashb = hash;
  for (size_t i = 0; i < fort->cfg.depth; i++) {
    for (size_t j = 0; j < fort->cfg.width; j++) {
      char * strpos = &dir_node[j * 2];
      size_t hashix = (i * fort->cfg.width) + j;

      snprintf(strpos, sizeof(dir_node), "%02x/", hashb[hashix]);
    }
    *path = sdscat(*path, dir_node);

    if (dir_exists(*path)) {
      continue;
    } else {
      PANIC_IF(0 != mkdir(*path, DIRMODE));
    }
  }

  sds hash_str = mk_hash_str(hash, hashlen);
  *path = sdscatsds(*path, hash_str);
  sdsfree(hash_str);

  if (!dir_exists(*path)) {
    PANIC_IF(0 != mkdir(*path, DIRMODE));
  }

  return triefort_ok;
}
Esempio n. 2
0
void spawner_new_c::setup_stream_(const options_class::redirect redirect, std_stream_type source_type, runner* this_runner) {
    auto source_pipe = this_runner->get_pipe(source_type);

    if (redirect.type == options_class::std) {
        if (source_type == std_stream_input) {
            get_std(std_stream_input, redirect.flags)->connect(source_pipe);
        }
        else {
            source_pipe->connect(get_std(source_type, redirect.flags));
        }
        return;
    }

    PANIC_IF(redirect.type != options_class::pipe);
    auto index = redirect.pipe_index;
    auto stream = redirect.name;
    PANIC_IF(index < 0 || index >= runners.size());

    auto target_runner = runners[index];

    multipipe_ptr target_pipe;
    if (stream == "stdin") {
        PANIC_IF(source_type == std_stream_input);
        target_pipe = target_runner->get_pipe(std_stream_input, redirect.flags);
        source_pipe->connect(target_pipe);
    }
    else if (stream == "stdout") {
        PANIC_IF(source_type != std_stream_input);
        target_pipe = target_runner->get_pipe(std_stream_output, redirect.flags);
        target_pipe->connect(source_pipe);
    }
    else if (stream == "stderr") {
        PANIC_IF(source_type != std_stream_input);
        target_pipe = target_runner->get_pipe(std_stream_error, redirect.flags);
        target_pipe->connect(source_pipe);
    }
    else {
        PANIC("invalid stream name");
    }

    if (control_mode_enabled) {
        if (source_type == std_stream_output) {
            setup_stream_in_control_mode_(this_runner, source_pipe);
        }
        else if (stream == "stdout") {
            setup_stream_in_control_mode_(target_runner, target_pipe);
        }
    }
}
Esempio n. 3
0
S triefort_put(TF * fort,
    const void * const buffer, const size_t bufferlen,
    void * const hash) {
  NULLCHK(fort);
  NULLCHK(buffer);
  NULLCHK(hash);

  const size_t hashlen = fort->cfg.hash_len;
  triefort_hasher_fn * hfn = fort->hcfg->hasher;

  if (0 != hfn(hash, hashlen, buffer, bufferlen)) {
    return triefort_err_hasher_error;
  }

  sds sdata_path = NULL;
  PANIC_IF(triefort_ok != mk_trie_dirs(fort, hash, hashlen, &sdata_path));
  sdata_path = sdscat(sdata_path, "/triefort.data");

  S s = triefort_ok;

  if (file_exists(sdata_path)) {
    s = triefort_err_hash_already_exists;
  } else {
    if (!file_exists(sdata_path)) {
      s = write_file(sdata_path, buffer, bufferlen);
    }
  }

  sdsfree(sdata_path);

  return s;
}
Esempio n. 4
0
int spawner_new_c::normal_to_runner_index_(int normal_index) {
    // normal_index must be valid here
    int normal_runner_index = normal_index - 1;
    if (normal_runner_index >= controller_index_) {
        normal_runner_index++;
    }
    PANIC_IF(normal_runner_index <= 0 || normal_runner_index >= (int)runners.size());
    return normal_runner_index;
}
Esempio n. 5
0
int spawner_new_c::agent_to_runner_index_(int agent_index) {
    // agent_index must be valid here
    int agent_runner_index = agent_index - 1;
    if (agent_runner_index >= controller_index_) {
        agent_runner_index++;
    }
    PANIC_IF(agent_runner_index <= 0 || agent_runner_index >= (int)runners.size());
    return agent_runner_index;
}
Esempio n. 6
0
S triefort_exists_with_key(TF * const fort, const void * const key, const size_t keylen) {
  NULLCHK(fort);
  NULLCHK(key);

  void * hash = calloc(1, fort->cfg.hash_len);
  PANIC_IF(0 != fort->hcfg->hasher(hash, fort->cfg.hash_len, key, keylen));
  S s = triefort_exists(fort, hash);
  free(hash);
  return s;
}
Esempio n. 7
0
static S mk_info_from_path(const TF * const fort, sds path, const void * const hash, INFO ** info) {
  sds data_path = sdsdup(path);
  sds key_path = sdsdup(path);

  data_path = sdscat(data_path, "/triefort.data");
  key_path = sdscat(key_path, "/triefort.key");

  S status = triefort_ok;

  if (file_exists(data_path)) {
    struct stat s;
    PANIC_IF(0 != stat(data_path, &s));

    *info = calloc(1, sizeof(**info));
    INFO * inf = *info;

    inf->hash = calloc(1, fort->cfg.hash_len);
    memcpy(inf->hash, hash, fort->cfg.hash_len);
    inf->hashlen = fort->cfg.hash_len;
    inf->length = s.st_size;

    if (file_exists(key_path)) {
      FILE * kh = fopen(key_path, "rb");
      PANIC_IF(NULL == kh);
      PANIC_IF(0 != fstat(fileno(kh), &s));

      inf->keylen = s.st_size;
      inf->key = calloc(1, s.st_size);
      PANIC_IF(1 != fread(inf->key, s.st_size, 1, kh));
      fclose(kh);
    } else {
      inf->keylen = 0;
      inf->key = NULL;
    }
  } else {
    status = triefort_err_hash_does_not_exist;
  }

  sdsfree(data_path);
  sdsfree(key_path);

  return status;
}
Esempio n. 8
0
/* Set an interrupt handler routine
 * Parameters:
 *     interrupt - the interrupt number (NOT the IRQ number)
 *     handler - pointer to the handler function
 *
 * Returns:
 *     void
 */
void int_register_handler(uint8_t interrupt, int_handler_t handler)
{
    KASSERT(interrupt < INT_NUM_INTERRUPTS);

    // Make sure we're not trying to change any special interrupts
    PANIC_IF(interrupt == INT_SYSCALL_INTERRUPT,
             "tried to change syscall interrupt handler");
    PANIC_IF(interrupt >= FIRST_IRQ_INT && interrupt < FIRST_IRQ_INT + NUM_IRQS,
             "tried to change IRQ-aware interrupt handler");

    // If handler is null, reset the handler function to the default
    if (!handler)
    {
        g_int_handler_table[interrupt] = unexpected_int_handler;
    }
    else
    {
        g_int_handler_table[interrupt] = handler;
    }
}
Esempio n. 9
0
S triefort_get_with_key(TF * const fort, const void * const key, size_t keylen, void * buffer, size_t bufferlen, size_t * readlen) {
  NULLCHK(fort);
  NULLCHK(key);
  NULLCHK(buffer);
  NULLCHK(readlen);

  void * hash = calloc(1, fort->cfg.hash_len);
  PANIC_IF(0 != fort->hcfg->hasher(hash, fort->cfg.hash_len, key, keylen));
  S s = triefort_get(fort, hash, buffer, bufferlen, readlen);
  free(hash);

  return s;
}
Esempio n. 10
0
void mem_init(struct multiboot_info *boot_record)
{
    struct scan_region_data data = { 0 };

    mem_init_segments();
    mem_create_framelist(boot_record, &s_framelist, &s_numframes);
    mem_scan_regions(boot_record, &mem_scan_region, &data);

    PANIC_IF(!data.heap_created, "Couldn't create kernel heap!");

    cons_printf("Memory: %u bytes in heap, %u available pages\n",
                data.heap_size, data.avail_pages);
}
Esempio n. 11
0
void CookerRegistry::register_cooker(UniquePtr<Cooker> pCooker)
{
    for (const ChefString & rawExt : pCooker->rawExts())
    {
        PANIC_IF(sRawExtToCooker.find(rawExt) != sRawExtToCooker.end(),
                 "Multiple cookers registered for same raw extension: %s",
                 rawExt);

        sRawExtToCooker[rawExt] = pCooker.get();
    }

    for (const ChefString & cookedExt : pCooker->cookedExtsExclusive())
    {
        PANIC_IF(sCookedExtToCooker.find(cookedExt) != sCookedExtToCooker.end(),
                 "Multiple cookers registered for same cooked extension: %s",
                 cookedExt);

        sCookedExtToCooker[cookedExt] = pCooker.get();
    }

    sCookers.emplace_back(std::move(pCooker));
}
Esempio n. 12
0
u16 ModelPhysics::maskFromHash(u32 hash)
{
    ASSERT(hash != 0);
    auto it = mMaskBits.find(hash);
    if (it != mMaskBits.end())
        return it->second;
    else
    {
        PANIC_IF(mMaskBits.size() >= 12, "Too many mask bits defined, Bullet only allows for 12(ish)");
        u16 mask = (u16)(1 << mMaskBits.size());
        mMaskBits[hash] = mask;
        return mask;
    }
}
Esempio n. 13
0
S triefort_info_with_key(
    const TF * const fort,
    const void * const key,
    const size_t keylen,
    INFO ** const info) {
  NULLCHK(fort);
  NULLCHK(key);
  NULLCHK(info);

  void * hash = calloc(1, fort->cfg.hash_len);
  PANIC_IF(0 != fort->hcfg->hasher(hash, fort->cfg.hash_len, key, keylen));
  S s = triefort_info(fort, hash, info);
  free(hash);

  return s;
}
Esempio n. 14
0
S triefort_get_stream_with_key(
    TF * const fort,
    const void * const key,
    const size_t keylen,
    FILE ** const hdl) {
  NULLCHK(fort);
  NULLCHK(key);
  NULLCHK(hdl);

  void * hash = calloc(1, fort->cfg.hash_len);
  PANIC_IF(0 != fort->hcfg->hasher(hash, fort->cfg.hash_len, key, keylen));
  S s = triefort_get_stream(fort, hash, hdl);
  free(hash);

  return s;
}
Esempio n. 15
0
static S write_file(const char * const filename, const void * const data, const size_t datalen) {
  S s = triefort_ok;

  if (file_exists(filename)) {
    s = triefort_err_path_already_exists;
  } else {
    FILE * fh = fopen(filename, "wb");
    PANIC_IF(NULL == fh);
    size_t wlen = fwrite(data, datalen, 1, fh);
    if (wlen != 1) {
      s = triefort_err_write_error;
    }
    fclose(fh);
  }

  return s;
}
Esempio n. 16
0
S triefort_put_with_key(TF * fort,
    const void * const key, const size_t keylen,
    const void * const buffer, const size_t bufferlen,
    void * const hash) {
  NULLCHK(fort);
  NULLCHK(key);
  NULLCHK(buffer);
  NULLCHK(hash);

  if (keylen > fort->cfg.max_key_len) {
    return triefort_err_key_too_long;
  }

  triefort_hasher_fn * hfn = fort->hcfg->hasher;
  const size_t hashlen = fort->cfg.hash_len;

  if (0 != hfn(hash, hashlen, key, keylen)) {
    return triefort_err_hasher_error;
  }

  S s;
  sds dir_path = NULL;
  PANIC_IF(triefort_ok != mk_trie_dirs(fort, hash, hashlen, &dir_path));

  sds skey_path = sdsdup(dir_path);
  sds sdata_path = sdsdup(dir_path);

  sdata_path = sdscat(sdata_path, "/triefort.data");
  skey_path = sdscat(skey_path, "/triefort.key");

  if (file_exists(sdata_path)) {
    s = triefort_err_hash_already_exists;
  } else {
    s = write_file(skey_path, key, keylen);
    if (triefort_ok == s) {
      s = write_file(sdata_path, buffer, bufferlen);
    }
  }

  sdsfree(skey_path);
  sdsfree(sdata_path);
  sdsfree(dir_path);

  return s;
}
Esempio n. 17
0
int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
	struct iwch_dev *rhp;
	struct iwch_cq *chp;
	int npolled;
	int err = 0;

	chp = to_iwch_cq(ibcq);
	rhp = chp->rhp;

	mtx_lock(&chp->lock);
	for (npolled = 0; npolled < num_entries; ++npolled) {
#ifdef DEBUG
		int i=0;
#endif

		/*
		 * Because T3 can post CQEs that are _not_ associated
		 * with a WR, we might have to poll again after removing
		 * one of these.
		 */
		do {
			err = iwch_poll_cq_one(rhp, chp, wc + npolled);
#ifdef DEBUG
			PANIC_IF(++i > 1000);
#endif
		} while (err == -EAGAIN);
		if (err <= 0)
			break;
	}
	mtx_unlock(&chp->lock);

	if (err < 0) {
		return err;
	} else {
		return npolled;
	}
}
Esempio n. 18
0
static S load_cfg(CFG * const cfg, const char * const path) {
  NULLCHK(cfg);
  NULLCHK(path);

  FILE * cfghdl = fopen(path, "rb");
  if (NULL == cfghdl) {
    return triefort_err_config_could_not_be_opened;
  } else {
    PANIC_IF(1 != fread(&cfg->depth, sizeof(cfg->depth), 1, cfghdl));
    PANIC_IF(1 != fread(&cfg->width, sizeof(cfg->width), 1, cfghdl));
    PANIC_IF(1 != fread(&cfg->hash_len, sizeof(cfg->hash_len), 1, cfghdl));
    PANIC_IF(1 != fread(&cfg->max_key_len, sizeof(cfg->max_key_len), 1, cfghdl));
    uint8_t nlenb = 0;
    PANIC_IF(1 != fread(&nlenb, sizeof(nlenb), 1, cfghdl));
    PANIC_IF(nlenb != fread(&cfg->hash_name, 1, nlenb, cfghdl));
    fclose(cfghdl);
  }

  return triefort_ok;
}
Esempio n. 19
0
static S store_cfg(const CFG * const cfg, const char * const path) {
  NULLCHK(cfg);
  NULLCHK(path);

  FILE * cfghdl = fopen(path, "wb");
  if (NULL == cfghdl) {
    return triefort_err_config_could_not_be_created;
  } else {
    PANIC_IF(1 != fwrite(&cfg->depth, sizeof(cfg->depth), 1, cfghdl));
    PANIC_IF(1 != fwrite(&cfg->width, sizeof(cfg->width), 1, cfghdl));
    PANIC_IF(1 != fwrite(&cfg->hash_len, sizeof(cfg->hash_len), 1, cfghdl));
    PANIC_IF(1 != fwrite(&cfg->max_key_len, sizeof(cfg->max_key_len), 1, cfghdl));

    size_t nlen = strnlen(cfg->hash_name, sizeof(cfg->hash_name) - 1);
    uint8_t nlenb = nlen;

    PANIC_IF(1 != fwrite(&nlenb, sizeof(nlenb), 1, cfghdl));
    PANIC_IF(nlen != fwrite(&cfg->hash_name, 1, nlen, cfghdl));
    fclose(cfghdl);
  }

  return triefort_ok;
}
Esempio n. 20
0
static void
cpu_initialize_context(unsigned int cpu)
{
	/* vcpu_guest_context_t is too large to allocate on the stack.
	 * Hence we allocate statically and protect it with a lock */
	vm_page_t m[NPGPTD + 2];
	static vcpu_guest_context_t ctxt;
	vm_offset_t boot_stack;
	vm_offset_t newPTD;
	vm_paddr_t ma[NPGPTD];
	int i;

	/*
	 * Page 0,[0-3]	PTD
	 * Page 1, [4]	boot stack
	 * Page [5]	PDPT
	 *
	 */
	for (i = 0; i < NPGPTD + 2; i++) {
		m[i] = vm_page_alloc(NULL, 0,
		    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
		    VM_ALLOC_ZERO);

		pmap_zero_page(m[i]);

	}
	boot_stack = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
	newPTD = kmem_alloc_nofault(kernel_map, NPGPTD * PAGE_SIZE);
	ma[0] = VM_PAGE_TO_MACH(m[0])|PG_V;

#ifdef PAE	
	pmap_kenter(boot_stack, VM_PAGE_TO_PHYS(m[NPGPTD + 1]));
	for (i = 0; i < NPGPTD; i++) {
		((vm_paddr_t *)boot_stack)[i] =
		ma[i] = VM_PAGE_TO_MACH(m[i])|PG_V;
	}
#endif	

	/*
	 * Copy cpu0 IdlePTD to new IdlePTD - copying only
	 * kernel mappings
	 */
	pmap_qenter(newPTD, m, 4);
	
	memcpy((uint8_t *)newPTD + KPTDI*sizeof(vm_paddr_t),
	    (uint8_t *)PTOV(IdlePTD) + KPTDI*sizeof(vm_paddr_t),
	    nkpt*sizeof(vm_paddr_t));

	pmap_qremove(newPTD, 4);
	kmem_free(kernel_map, newPTD, 4 * PAGE_SIZE);
	/*
	 * map actual idle stack to boot_stack
	 */
	pmap_kenter(boot_stack, VM_PAGE_TO_PHYS(m[NPGPTD]));


	xen_pgdpt_pin(VM_PAGE_TO_MACH(m[NPGPTD + 1]));
	rw_wlock(&pvh_global_lock);
	for (i = 0; i < 4; i++) {
		int pdir = (PTDPTDI + i) / NPDEPG;
		int curoffset = (PTDPTDI + i) % NPDEPG;
		
		xen_queue_pt_update((vm_paddr_t)
		    ((ma[pdir] & ~PG_V) + (curoffset*sizeof(vm_paddr_t))), 
		    ma[i]);
	}
	PT_UPDATES_FLUSH();
	rw_wunlock(&pvh_global_lock);
	
	memset(&ctxt, 0, sizeof(ctxt));
	ctxt.flags = VGCF_IN_KERNEL;
	ctxt.user_regs.ds = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.es = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.fs = GSEL(GPRIV_SEL, SEL_KPL);
	ctxt.user_regs.gs = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.cs = GSEL(GCODE_SEL, SEL_KPL);
	ctxt.user_regs.ss = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.eip = (unsigned long)init_secondary;
	ctxt.user_regs.eflags = PSL_KERNEL | 0x1000; /* IOPL_RING1 */

	memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));

	smp_trap_init(ctxt.trap_ctxt);

	ctxt.ldt_ents = 0;
	ctxt.gdt_frames[0] = (uint32_t)((uint64_t)vtomach(bootAPgdt) >> PAGE_SHIFT);
	ctxt.gdt_ents      = 512;

#ifdef __i386__
	ctxt.user_regs.esp = boot_stack + PAGE_SIZE;

	ctxt.kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.kernel_sp = boot_stack + PAGE_SIZE;

	ctxt.event_callback_cs     = GSEL(GCODE_SEL, SEL_KPL);
	ctxt.event_callback_eip    = (unsigned long)Xhypervisor_callback;
	ctxt.failsafe_callback_cs  = GSEL(GCODE_SEL, SEL_KPL);
	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;

	ctxt.ctrlreg[3] = VM_PAGE_TO_MACH(m[NPGPTD + 1]);
#else /* __x86_64__ */
	ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs);
	ctxt.kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.kernel_sp = idle->thread.rsp0;

	ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
	ctxt.syscall_callback_eip  = (unsigned long)system_call;

	ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(init_level4_pgt));

	ctxt.gs_base_kernel = (unsigned long)(cpu_pda(cpu));
#endif

	printf("gdtpfn=%lx pdptpfn=%lx\n",
	    ctxt.gdt_frames[0],
	    ctxt.ctrlreg[3] >> PAGE_SHIFT);

	PANIC_IF(HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt));
	DELAY(3000);
	PANIC_IF(HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL));
}
Esempio n. 21
0
File: x86_vm.c Progetto: c00p/geekos
void vm_init_paging(struct multiboot_info *boot_info)
{
	struct x86_cpuid_info cpuid_info;
	struct frame *pgdir_frame;
	struct frame *pgtab_frame;
	pte_t *pgtab;
	ulong_t paddr, mem_max;

	/*
	 * Check CPUID instruction to see if large pages (PSE feature)
	 * is supported.
	 */
	PANIC_IF(!x86_cpuid(&cpuid_info), "GeekOS requires a Pentium-class CPU");
	PANIC_IF(!cpuid_info.feature_info_edx.pse, "Processor does not support PSE");
	cons_printf("CPU supports PSE\n");

	/*
	 * Enable PSE by setting the PSE bit in CR4.
	 */
	x86_set_cr4(x86_get_cr4() | CR4_PSE);

	/*
	 * Allocate kernel page directory.
	 */
	pgdir_frame = mem_alloc_frame(FRAME_KERN, 1);
	s_kernel_pagedir = mem_frame_to_pa(pgdir_frame);
	memset(s_kernel_pagedir, '\0', PAGE_SIZE);

	/*
	 * We will support at most 2G of physical memory.
	 */
	mem_max = ((ulong_t) boot_info->mem_upper) * 1024;
	if (mem_max > (1 << 31)) {
		mem_max = (ulong_t) (1 << 31);
	}

	/*
	 * We need a page table for the low 4M of the kernel address space,
	 * since we want to leave the zero page unmapped (to catch null pointer derefs).
	 */
	pgtab_frame = mem_alloc_frame(FRAME_KERN, 1);
	pgtab = mem_frame_to_pa(pgtab_frame);
	memset(pgtab, '\0', PAGE_SIZE);

	/*
	 * Initialize low page table, leaving page 0 unmapped
	 */
	for (paddr = PAGE_SIZE; paddr < VM_PT_SPAN; paddr += PAGE_SIZE) {
		vm_set_pte(pgtab, VM_WRITE|VM_READ|VM_EXEC, paddr, paddr);
	}

	/*
	 * Add low page table to the kernel pagedir.
	 */
	vm_set_pde(s_kernel_pagedir, VM_WRITE|VM_READ|VM_EXEC, 0UL, (u32_t) pgtab);

	/*
	 * Use 4M pages to map the rest of the low 2G of memory
	 */
	for (paddr = VM_PT_SPAN; paddr < mem_max; paddr += VM_PT_SPAN) {
		vm_set_pde_4m(s_kernel_pagedir, VM_WRITE|VM_READ|VM_EXEC, paddr, paddr);
	}

	/*
	 * Turn on paging!
	 */
	x86_set_cr3((u32_t) s_kernel_pagedir); /* set the kernel page directory */
	x86_set_cr0(x86_get_cr0() | CR0_PG);   /* turn on the paging bit in cr0 */

	cons_printf("Paging enabled\n");
}
Esempio n. 22
0
bool spawner_new_c::init() {
    if (!init_runner() || !runners.size()) {
        return false;
    }
    for (size_t i = 0; i < runners.size(); i++) {
        if (runners[i]->get_options().controller) {
            // there must be only one controller process
            PANIC_IF(controller_index_ != -1);
            controller_index_ = i;
            control_mode_enabled = true;
            controller_input_lock = multipipe::create_pipe(write_mode, true, 0);
            controller_input_lock->get_pipe()->close();
            // Lock controller stdin
            controller_input_lock->connect(runners[i]->get_pipe(std_stream_input));
            controller_input_ = runners[i]->get_pipe(std_stream_input)->get_pipe();
            controller_output_ = runners[i]->get_pipe(std_stream_output);
        }
    }
    if (controller_index_ != -1) {
        awaited_agents_.resize(runners.size() - 1);
        for (size_t i = 0; i < runners.size(); i++) {
            secure_runner* sr = static_cast<secure_runner*>(runners[i]);
            sr->start_suspended = true;
            if (i != controller_index_) {
                sr->on_terminate = [=]() {
                    LOG("runner", i, "terminated");
                    on_terminate_mutex_.lock();
                    wait_agent_mutex_.lock();
                    awaited_agents_[i - 1] = false;
                    std::string message = std::to_string(i) + "T#\n";
                    // Send message to controller only.
                    controller_input_->write(message.c_str(), message.size());
                    bool have_running_agents = false;
                    for (auto j = 0; j < runners.size(); j++) {
                        if (j != controller_index_ && j != i && runners[j]->is_running()) {
                            have_running_agents = true;
                            break;
                        }
                    }
                    if (!have_running_agents) {
                        // Unlock controller stdin
                        controller_input_lock->finalize();
                    }
                    wait_agent_mutex_.unlock();
                    on_terminate_mutex_.unlock();
                };
            } else {
                sr->on_terminate = [=]() {
                    LOG("controller terminated");
                    for (size_t j = 0; j < runners.size(); j++) {
                        if (j != controller_index_) {
                            runners[j]->resume();
                        }
                    }
                };
            }
        }
    }

    for (auto runner : runners) {
        options_class runner_options = runner->get_options();
        const struct {
            std::vector<options_class::redirect> &streams;
            std_stream_type type;
        } redirects_all[] = {
            { runner_options.stdinput, std_stream_input },
            { runner_options.stdoutput, std_stream_output },
            { runner_options.stderror, std_stream_error },
        };
        for (const auto& redirects : redirects_all) {
            for (const auto& redirect : redirects.streams) {
                PANIC_IF(redirect.original.size() == 0);
                if (redirect.type == options_class::file) {
                    continue;
                }
                setup_stream_(redirect, redirects.type, runner);
            }
        }
    }
    return true;
}
Esempio n. 23
0
bool spawner_new_c::init() {
    if (!init_runner() || !runners.size()) {
        return false;
    }
    for (size_t i = 0; i < runners.size(); i++) {
        if (runners[i]->get_options().controller) {
            // there must be only one controller process
            PANIC_IF(controller_index_ != -1);
            controller_index_ = i;
            control_mode_enabled = true;
            controller_buffer_ = std::make_shared<pipe_buffer_c>(runners[i]->get_input_pipe());
        }
    }
    if (controller_index_ != -1) {
        awaited_normals_.resize(runners.size() - 1);
        for (size_t i = 0; i < runners.size(); i++) {
            secure_runner* sr = static_cast<secure_runner*>(runners[i]);
            if (i != controller_index_) {
                sr->start_suspended = true;
            }
            sr->on_terminate = [=]() {
                on_terminate_mutex_.lock();
                for (auto& r : runners) {
                    if (r == sr) {
                        continue;
                    }
                    for (auto& b : sr->duplex_buffers) {
                        for (int pi = STD_INPUT_PIPE; pi <= STD_ERROR_PIPE; pi++) {
                            auto&& p = r->get_pipe(static_cast<const pipes_t>(pi));
                            p->remove_buffer(b);
                        }
                    }
                }

                if (i > 0 && awaited_normals_[i - 1]) {
                    wait_normal_mutex_.lock();
                    awaited_normals_[i - 1] = false;
                    std::string message = std::to_string(i) + "I#\n";
                    controller_buffer_->write(message.c_str(), message.size());
                    wait_normal_mutex_.unlock();
                }
                on_terminate_mutex_.unlock();
            };
        }
    }

    for (auto& runner : runners) {
        options_class runner_options = runner->get_options();
        struct {
            std::vector<std::string> &streams;
            pipes_t pipe_type;
        } streams[] = {
            { runner_options.stdinput, STD_INPUT_PIPE },
            { runner_options.stdoutput, STD_OUTPUT_PIPE },
            { runner_options.stderror, STD_ERROR_PIPE },
        };
        for (auto& stream_item : streams) {
            for (auto& stream_str : stream_item.streams) {
                PANIC_IF(stream_str.size() == 0);
                if (stream_str[0] != '*') {
                    continue;
                }
                setup_stream_(stream_str, stream_item.pipe_type, runner);
            }
        }
    }
    return true;
}
Esempio n. 24
0
void spawner_new_c::setup_stream_(const std::string& stream_str, pipes_t this_pipe_type, runner* this_runner) {
    size_t pos = stream_str.find(".");
    // malformed argument
    PANIC_IF(pos == std::string::npos);
    size_t index = stoi(stream_str.substr(1, pos - 1), nullptr, 10);
    std::string stream = stream_str.substr(pos + 1);
    // invalid index
    PANIC_IF(index >= runners.size() || index < 0);
    pipes_t other_pipe_type;
    if (stream == "stderr") {
        other_pipe_type = STD_ERROR_PIPE;
    }
    else if (stream == "stdin") {
        other_pipe_type = STD_INPUT_PIPE;
    }
    else if (stream == "stdout") {
        other_pipe_type = STD_OUTPUT_PIPE;
    }
    else {
        PANIC("invalid stream name");
    }
    runner *target_runner = runners[index];
    std::shared_ptr<input_pipe_c> input_pipe = nullptr;
    std::shared_ptr<output_pipe_c> output_pipe = nullptr;
    pipes_t out_pipe_type = STD_ERROR_PIPE;
    runner* out_pipe_runner = nullptr;
    runner* in_pipe_runner = nullptr;

    if (this_pipe_type == STD_INPUT_PIPE && other_pipe_type != STD_INPUT_PIPE) {

        input_pipe = std::static_pointer_cast<input_pipe_c>(this_runner->get_pipe(this_pipe_type));
        output_pipe = std::static_pointer_cast<output_pipe_c>(target_runner->get_pipe(other_pipe_type));
        out_pipe_type = other_pipe_type;
        out_pipe_runner = target_runner;
        in_pipe_runner = this_runner;

    } else if (this_pipe_type != STD_INPUT_PIPE && other_pipe_type == STD_INPUT_PIPE) {

        input_pipe = std::static_pointer_cast<input_pipe_c>(target_runner->get_pipe(other_pipe_type));
        output_pipe = std::static_pointer_cast<output_pipe_c>(this_runner->get_pipe(this_pipe_type));
        out_pipe_type = this_pipe_type;
        out_pipe_runner = this_runner;
        in_pipe_runner = target_runner;
    } else {
        PANIC("invalid pipe mapping");
    }

    std::shared_ptr<duplex_buffer_c> buffer = std::make_shared<duplex_buffer_c>();
    in_pipe_runner->duplex_buffers.push_back(buffer);
    out_pipe_runner->duplex_buffers.push_back(buffer);
    input_pipe->add_input_buffer(buffer);
    output_pipe->add_output_buffer(buffer);

    int out_runner_index = -1;
    int in_runner_index = -1;
    for (size_t i = 0; i < runners.size(); i++) {
        if (runners[i] == out_pipe_runner) {
            out_runner_index = i;
        }
        if (runners[i] == in_pipe_runner) {
            in_runner_index = i;
        }
    }

    if (out_runner_index == controller_index_) {
        int index = in_runner_index;
        if (index > controller_index_) {
            index--;
        }
        buffer_to_runner_index_[buffer] = index + 1;
    }

    if (control_mode_enabled
            && out_pipe_type == STD_OUTPUT_PIPE
            && !output_pipe->process_message) {

        if (out_pipe_runner->get_options().controller) {
            output_pipe->process_message = [=](const std::string& message, output_pipe_c* pipe) {
                process_controller_message_(message, pipe);
            };
        } else {
            int index = out_runner_index;
            if (index > controller_index_) {
                index--;
            }
            output_pipe->process_message = [=](const std::string& message, output_pipe_c* pipe) {
                process_normal_message_(message, pipe, index + 1);
            };
        }
    }
}
Esempio n. 25
0
void Material::registerVec4Var(u32 nameHash, const vec4 & value)
{
    PANIC_IF(mVec4VarCount >= kMaxVec4Vars, "Too many Vec4 material vars");
    mVec4Vars[mVec4VarCount++] = Material::Vec4Var(nameHash, value);
}
Esempio n. 26
0
void Material::registerTexture(u32 nameHash, const Asset * pGimgAsset)
{
    PANIC_IF(mTextureCount >= kMaxTextures, "Too many textures for material");
    mTextures[mTextureCount++] = Material::TextureInfo(nameHash, 0, pGimgAsset);
    AssetMgr::addref_asset(kRendererTaskId, pGimgAsset);
}