Ejemplo n.º 1
0
void rk808_configure_ldo(int ldo, int millivolts)
{
	uint8_t vsel;

	if (!millivolts) {
		rk808_clrsetbits(LDO_EN, 1 << (ldo - 1), 0);
		return;
	}

	switch (ldo) {
	case 1:
	case 2:
	case 4:
	case 5:
	case 8:
		vsel = div_round_up(millivolts, 100) - 18;
		assert(vsel <= 0x10);
		break;
	case 3:
	case 6:
	case 7:
		vsel = div_round_up(millivolts, 100) - 8;
		assert(vsel <= 0x11);
		break;
	default:
		die("Unknown LDO index!");
	}

	rk808_clrsetbits(LDO_ONSEL(ldo), 0x1f, vsel);
	rk808_clrsetbits(LDO_EN, 0, 1 << (ldo - 1));
}
Ejemplo n.º 2
0
void rk808_configure_buck(int buck, int millivolts)
{
	uint8_t vsel;
	uint8_t buck_reg;

	switch (buck) {
	case 1:
	case 2:
		/* 25mV steps. base = 29 * 25mV = 725 */
		vsel = (div_round_up(millivolts, 25) - 29) * 2 + 1;
		assert(vsel <= 0x3f);
		buck_reg = BUCK1SEL + 4 * (buck - 1);
		break;
	case 4:
		vsel = div_round_up(millivolts, 100) - 18;
		assert(vsel <= 0xf);
		buck_reg = BUCK4SEL;
		break;
	default:
		die("Unknown buck index!");
	}
	rk808_clrsetbits(DCDC_ILMAX, 0, 3 << ((buck - 1) * 2));

	/* undervoltage detection may be wrong, disable it */
	rk808_clrsetbits(DCDC_UV_ACT, 1 << (buck - 1), 0);

	rk808_clrsetbits(buck_reg, 0x3f, vsel);
	rk808_clrsetbits(DCDC_EN, 0, 1 << (buck - 1));
}
Ejemplo n.º 3
0
static void init_utmip_pll(void)
{
	int khz = clock_get_pll_input_khz();

	/* Shut off PLL crystal clock while we mess with it */
	clrbits_le32(CLK_RST_REG(utmip_pll_cfg2), 1 << 30); /* PHY_XTAL_CLKEN */
	udelay(1);

	write32(CLK_RST_REG(utmip_pll_cfg0),	/* 960MHz * 1 / 80 == 12 MHz */
		80 << 16 |			/* (rst) phy_divn */
		 1 <<  8);			/* (rst) phy_divm */

	write32(CLK_RST_REG(utmip_pll_cfg1),
		div_round_up(khz, 8000) << 27 |	/* pllu_enbl_cnt / 8 (1us) */
				      0 << 16 | /* PLLU pwrdn */
				      0 << 14 | /* pll_enable pwrdn */
				      0 << 12 | /* pll_active pwrdn */
		 div_round_up(khz, 102) << 0);  /* phy_stbl_cnt / 256 (2.5ms) */

	/* TODO: TRM can't decide if actv is 5us or 10us, keep an eye on it */
	write32(CLK_RST_REG(utmip_pll_cfg2),
				      0 << 24 |	/* SAMP_D/XDEV pwrdn */
		div_round_up(khz, 3200) << 18 |	/* phy_actv_cnt / 16 (5us) */
		 div_round_up(khz, 256) <<  6 |	/* pllu_stbl_cnt / 256 (1ms) */
				      0 <<  4 |	/* SAMP_C/USB3 pwrdn */
				      0 <<  2 |	/* SAMP_B/XHOST pwrdn */
				      0 <<  0);	/* SAMP_A/USBD pwrdn */

	setbits_le32(CLK_RST_REG(utmip_pll_cfg2), 1 << 30); /* PHY_XTAL_CLKEN */
}
Ejemplo n.º 4
0
/** Derive sizes of different filesystem structures.
 *
 * This function concentrates all the different computations of FAT
 * file system params.
 */
static int fat_params_compute(struct fat_cfg *cfg)
{
	uint32_t fat_bytes;
	uint32_t non_data_sectors_lb;

	/*
	 * Make a conservative guess on the FAT size needed for the file
	 * system. The optimum could be potentially smaller since we
	 * do not subtract size of the FAT itself when computing the
	 * size of the data region.
	 */

	cfg->reserved_sectors = 1 + cfg->addt_res_sectors;
	if (cfg->fat_type != FAT32) {
		cfg->rootdir_sectors = div_round_up(cfg->root_ent_max * DIRENT_SIZE,
			cfg->sector_size);
	} else
		cfg->rootdir_sectors = 0;
	non_data_sectors_lb = cfg->reserved_sectors + cfg->rootdir_sectors;

	cfg->total_clusters = div_round_up(cfg->total_sectors - non_data_sectors_lb,
	    cfg->sectors_per_cluster);

	if ((cfg->fat_type == FAT12 && cfg->total_clusters > FAT12_CLST_MAX) ||
	    (cfg->fat_type == FAT16 && (cfg->total_clusters <= FAT12_CLST_MAX ||
	    cfg->total_clusters > FAT16_CLST_MAX)) ||
	    (cfg->fat_type == FAT32 && cfg->total_clusters <= FAT16_CLST_MAX))
		return ENOSPC;

	fat_bytes = div_round_up((cfg->total_clusters + 2) *
	    FAT_CLUSTER_DOUBLE_SIZE(cfg->fat_type), 2);
	cfg->fat_sectors = div_round_up(fat_bytes, cfg->sector_size);

	return EOK;
}
Ejemplo n.º 5
0
size_t verity_tree_blocks(uint64_t data_size, size_t block_size, size_t hash_size,
                          int level)
{
    size_t level_blocks = div_round_up(data_size, block_size);
    int hashes_per_block = div_round_up(block_size, hash_size);

    do {
        level_blocks = div_round_up(level_blocks, hashes_per_block);
    } while (level--);

    return level_blocks;
}
Ejemplo n.º 6
0
static void make_subgrid_division(const ivec n, int ovl, int nthread,
                                  ivec nsub)
{
    int   gsize_opt, gsize;
    int   nsx, nsy, nsz;
    char *env;

    gsize_opt = -1;
    for (nsx = 1; nsx <= nthread; nsx++)
    {
        if (nthread % nsx == 0)
        {
            for (nsy = 1; nsy <= nthread; nsy++)
            {
                if (nsx*nsy <= nthread && nthread % (nsx*nsy) == 0)
                {
                    nsz = nthread/(nsx*nsy);

                    /* Determine the number of grid points per thread */
                    gsize =
                        (div_round_up(n[XX], nsx) + ovl)*
                        (div_round_up(n[YY], nsy) + ovl)*
                        (div_round_up(n[ZZ], nsz) + ovl);

                    /* Minimize the number of grids points per thread
                     * and, secondarily, the number of cuts in minor dimensions.
                     */
                    if (gsize_opt == -1 ||
                        gsize < gsize_opt ||
                        (gsize == gsize_opt &&
                         (nsz < nsub[ZZ] || (nsz == nsub[ZZ] && nsy < nsub[YY]))))
                    {
                        nsub[XX]  = nsx;
                        nsub[YY]  = nsy;
                        nsub[ZZ]  = nsz;
                        gsize_opt = gsize;
                    }
                }
            }
        }
    }

    env = getenv("GMX_PME_THREAD_DIVISION");
    if (env != NULL)
    {
        sscanf(env, "%20d %20d %20d", &nsub[XX], &nsub[YY], &nsub[ZZ]);
    }

    if (nsub[XX]*nsub[YY]*nsub[ZZ] != nthread)
    {
        gmx_fatal(FARGS, "PME grid thread division (%d x %d x %d) does not match the total number of threads (%d)", nsub[XX], nsub[YY], nsub[ZZ], nthread);
    }
}
Ejemplo n.º 7
0
void SliceEffect::get_output_size(unsigned *width, unsigned *height,
                                  unsigned *virtual_width, unsigned *virtual_height) const
{
	if (direction == HORIZONTAL) {
		*width = div_round_up(input_width, input_slice_size) * output_slice_size;
		*height = input_height;	
	} else {
		*width = input_width;	
		*height = div_round_up(input_height, input_slice_size) * output_slice_size;
	}
	*virtual_width = *width;
	*virtual_height = *height;
}
Ejemplo n.º 8
0
/***********************************************************
 * Delay for the given number of microseconds. The driver must
 * be initialized before calling this function.
 ***********************************************************/
void udelay(uint32_t usec)
{
	assert(timer_ops != NULL &&
		(timer_ops->clk_mult != 0) &&
		(timer_ops->clk_div != 0) &&
		(timer_ops->get_timer_value != NULL));

	uint32_t start, delta, total_delta;

	assert(usec < UINT32_MAX / timer_ops->clk_div);

	start = timer_ops->get_timer_value();

	/* Add an extra tick to avoid delaying less than requested. */
	total_delta =
		div_round_up(usec * timer_ops->clk_div, timer_ops->clk_mult) + 1;

	do {
		/*
		 * If the timer value wraps around, the subtraction will
		 * overflow and it will still give the correct result.
		 */
		delta = start - timer_ops->get_timer_value(); /* Decreasing counter */

	} while (delta < total_delta);
}
Ejemplo n.º 9
0
		void metadata_size(int size)
		{
			if (m_metadata_size > 0 || size <= 0 || size > 4 * 1024 * 1024) return;
			m_metadata_size = size;
			m_metadata.reset(new char[size]);
			m_requested_metadata.resize(div_round_up(size, 16 * 1024));
		}
Ejemplo n.º 10
0
void tcam::add_flow_hw(uint32_t flow_pos, flow &new_flow)
{
	// get a hardware representation of the flow
	hw_flow new_hw_flow(new_flow);

	memset(tcam_temp_write_data, 0, sizeof(tcam_temp_write_data)); // clear the temp table

	for (uint32_t lut = 0; lut < tcam_fpga_lut_count_per_line; lut++) { // over all 49 LUTs

		uint32_t key_5_bit = get_bits<>(
			new_hw_flow.key,
			lut * tcam_fpga_lut_width,
			std::min(
				lut * tcam_fpga_lut_width + tcam_fpga_lut_width -1,
				hw_flow::hw_tuple_size_bits -1
			)
		);

		uint32_t mask_5_bit = get_bits<>(
			new_hw_flow.mask,
			lut * tcam_fpga_lut_width,
			std::min(
				lut * tcam_fpga_lut_width + tcam_fpga_lut_width -1,
				hw_flow::hw_tuple_size_bits -1
			)
		);

		// the LUT on the FPGA needs 32 shift (write) operations to be filled
		for (uint32_t lut_pos = 0; lut_pos < tcam_fpga_lut_size; lut_pos++) {

			if (((
					~mask_5_bit				// '0' bit in the mask means wildcard (the corresponding key bit does not count anymore)
				|	~(lut_pos ^ key_5_bit)	// or there is a match with the key bit (xnor)
			) & (tcam_fpga_lut_size-1)) == (tcam_fpga_lut_size-1)) { // all 5 bits must be one

				// set this LUT bit to '1'
				tcam_temp_write_data[lut / 32][lut_pos] |= (uint32_t)1 << (lut % 32);
			}
		}
	}

	// write the precalculated LUT contents into one line of the TCAM

	// the shift operations are performed the other way round (see srl32e xilinx documentation)
	for (int lut_pos = tcam_fpga_lut_size-1; lut_pos >= 0; lut_pos--) {

		// 32 LUTs can be shifted simultaneously with one bus access (@ 49 LUTs: 2 32bit bus accesses)
		for (uint32_t word_access = 0; word_access < div_round_up(tcam_fpga_lut_count_per_line, 32); word_access++) {

			axi_write(srl_access[flow_pos][word_access], tcam_temp_write_data[word_access][lut_pos]);
		}
	}

	// add action to tcam
	axi_write(actions[flow_pos], new_flow.get_action_hw_representation());
}
Ejemplo n.º 11
0
static void enable_cache(void)
{
	mmu_init();
	/* Whole space is uncached. */
	mmu_config_range(0, 4096, DCACHE_OFF);
	/* SRAM is cached. MMU code will round size up to page size. */
	mmu_config_range((uintptr_t)_sram/MiB, div_round_up(_sram_size, MiB),
			 DCACHE_WRITEBACK);
	mmu_disable_range(0, 1);
	dcache_mmu_enable();
}
Ejemplo n.º 12
0
static void __attribute__((noinline)) romstage(void)
{
	timestamp_init(0);
	timestamp_add_now(TS_START_ROMSTAGE);

	console_init();
	exception_init();

	sdram_init(get_sdram_config());

	/* used for MMU and CBMEM setup, in MB */
	u32 dram_start_mb = (uintptr_t)_dram/MiB;
	u32 dram_end_mb = sdram_max_addressable_mb();
	u32 dram_size_mb = dram_end_mb - dram_start_mb;

	configure_l2_cache();
	mmu_init();
	/* Device memory below DRAM is uncached. */
	mmu_config_range(0, dram_start_mb, DCACHE_OFF);
	/* SRAM is cached. MMU code will round size up to page size. */
	mmu_config_range((uintptr_t)_sram/MiB, div_round_up(_sram_size, MiB),
			 DCACHE_WRITEBACK);
	/* DRAM is cached. */
	mmu_config_range(dram_start_mb, dram_size_mb, DCACHE_WRITEBACK);
	/* A window for DMA is uncached. */
	mmu_config_range((uintptr_t)_dma_coherent/MiB,
			 _dma_coherent_size/MiB, DCACHE_OFF);
	/* The space above DRAM is uncached. */
	if (dram_end_mb < 4096)
		mmu_config_range(dram_end_mb, 4096 - dram_end_mb, DCACHE_OFF);
	mmu_disable_range(0, 1);
	dcache_mmu_enable();

	/*
	 * A watchdog reset only resets part of the system so it ends up in
	 * a funny state. If that happens, we need to reset the whole machine.
	 */
	if (power_reset_status() == POWER_RESET_WATCHDOG) {
		printk(BIOS_INFO, "Watchdog reset detected, rebooting.\n");
		hard_reset();
	}

	/* FIXME: this may require coordination with moving timestamps */
	cbmem_initialize_empty();

	early_mainboard_init();

	run_ramstage();
}
Ejemplo n.º 13
0
void rk808_configure_buck(uint8_t bus, int buck, int millivolts)
{
	uint8_t vsel;
	uint8_t buck_reg;

	switch (buck) {
	case 1:
	case 2:
		/*base on 725mv, use 25mv step */
		vsel = (div_round_up(millivolts, 25) - 29) * 2 + 1;
		assert(vsel <= 0x3f);
		buck_reg = BUCK1SEL + 4 * (buck - 1);
		break;
	case 4:
		vsel = div_round_up(millivolts, 100) - 18;
		assert(vsel <= 0xf);
		buck_reg = BUCK4SEL;
		break;
	default:
		die("fault buck index!");
	}
	rk808_clrsetbits(bus, buck_reg, 0x3f, vsel);
	rk808_clrsetbits(bus, DCDC_EN, 0, 1 << (buck - 1));
}
Ejemplo n.º 14
0
static void stm32_sdmmc2_init(void)
{
	uint32_t clock_div;
	uintptr_t base = sdmmc2_params.reg_base;

	clock_div = div_round_up(sdmmc2_params.clk_rate,
				 STM32MP1_MMC_INIT_FREQ * 2);

	mmio_write_32(base + SDMMC_CLKCR, SDMMC_CLKCR_HWFC_EN | clock_div |
		      sdmmc2_params.negedge |
		      sdmmc2_params.pin_ckin);

	mmio_write_32(base + SDMMC_POWER,
		      SDMMC_POWER_PWRCTRL | sdmmc2_params.dirpol);

	mdelay(1);
}
Ejemplo n.º 15
0
static int stm32_sdmmc2_set_ios(unsigned int clk, unsigned int width)
{
	uintptr_t base = sdmmc2_params.reg_base;
	uint32_t bus_cfg = 0;
	uint32_t clock_div, max_freq;
	uint32_t clk_rate = sdmmc2_params.clk_rate;
	uint32_t max_bus_freq = sdmmc2_params.device_info->max_bus_freq;

	switch (width) {
	case MMC_BUS_WIDTH_1:
		break;
	case MMC_BUS_WIDTH_4:
		bus_cfg |= SDMMC_CLKCR_WIDBUS_4;
		break;
	case MMC_BUS_WIDTH_8:
		bus_cfg |= SDMMC_CLKCR_WIDBUS_8;
		break;
	default:
		panic();
		break;
	}

	if (sdmmc2_params.device_info->mmc_dev_type == MMC_IS_EMMC) {
		if (max_bus_freq >= 52000000U) {
			max_freq = STM32MP1_EMMC_HIGH_SPEED_MAX_FREQ;
		} else {
			max_freq = STM32MP1_EMMC_NORMAL_SPEED_MAX_FREQ;
		}
	} else {
		if (max_bus_freq >= 50000000U) {
			max_freq = STM32MP1_SD_HIGH_SPEED_MAX_FREQ;
		} else {
			max_freq = STM32MP1_SD_NORMAL_SPEED_MAX_FREQ;
		}
	}

	clock_div = div_round_up(clk_rate, max_freq * 2);

	mmio_write_32(base + SDMMC_CLKCR,
		      SDMMC_CLKCR_HWFC_EN | clock_div | bus_cfg |
		      sdmmc2_params.negedge |
		      sdmmc2_params.pin_ckin);

	return 0;
}
Ejemplo n.º 16
0
Archivo: ucnv.c Proyecto: MSch/MacRuby
long
str_ucnv_length(rb_str_t *self, bool ucs2_mode)
{
    USE_CONVERTER(cnv, self->encoding);

    const char *pos = self->bytes;
    const char *end = pos + self->length_in_bytes;
    long len = 0;
    bool valid_encoding = true;
    for (;;) {
	const char *character_start_pos = pos;
	// iterate through the string one Unicode code point at a time
	UErrorCode err = U_ZERO_ERROR;
	UChar32 c = ucnv_getNextUChar(cnv, &pos, end, &err);
	if (err == U_INDEX_OUTOFBOUNDS_ERROR) {
	    // end of the string
	    break;
	}
	else if (U_FAILURE(err)) {
	    valid_encoding = false;
	    long min_char_size = self->encoding->min_char_size;
	    long converted_width = pos - character_start_pos;
	    len += div_round_up(converted_width, min_char_size);
	}
	else {
	    if (ucs2_mode && !U_IS_BMP(c)) {
		len += 2;
	    }
	    else {
		++len;
	    }
	}
    }

    ucnv_close(cnv);

    str_set_valid_encoding(self, valid_encoding);

    return len;
}
Ejemplo n.º 17
0
void pmegrids_init(pmegrids_t *grids,
                   int nx, int ny, int nz, int nz_base,
                   int pme_order,
                   gmx_bool bUseThreads,
                   int nthread,
                   int overlap_x,
                   int overlap_y)
{
    ivec n, n_base;
    int  t, x, y, z, d, i, tfac;
    int  max_comm_lines = -1;

    n[XX] = nx - (pme_order - 1);
    n[YY] = ny - (pme_order - 1);
    n[ZZ] = nz - (pme_order - 1);

    copy_ivec(n, n_base);
    n_base[ZZ] = nz_base;

    pmegrid_init(&grids->grid, 0, 0, 0, 0, 0, 0, n[XX], n[YY], n[ZZ], FALSE, pme_order,
                 NULL);

    grids->nthread = nthread;

    make_subgrid_division(n_base, pme_order-1, grids->nthread, grids->nc);

    if (bUseThreads)
    {
        ivec nst;
        int  gridsize;

        for (d = 0; d < DIM; d++)
        {
            nst[d] = div_round_up(n[d], grids->nc[d]) + pme_order - 1;
        }
        set_grid_alignment(&nst[ZZ], pme_order);

        if (debug)
        {
            fprintf(debug, "pmegrid thread local division: %d x %d x %d\n",
                    grids->nc[XX], grids->nc[YY], grids->nc[ZZ]);
            fprintf(debug, "pmegrid %d %d %d max thread pmegrid %d %d %d\n",
                    nx, ny, nz,
                    nst[XX], nst[YY], nst[ZZ]);
        }

        snew(grids->grid_th, grids->nthread);
        t        = 0;
        gridsize = nst[XX]*nst[YY]*nst[ZZ];
        set_gridsize_alignment(&gridsize, pme_order);
        snew_aligned(grids->grid_all,
                     grids->nthread*gridsize+(grids->nthread+1)*GMX_CACHE_SEP,
                     SIMD4_ALIGNMENT);

        for (x = 0; x < grids->nc[XX]; x++)
        {
            for (y = 0; y < grids->nc[YY]; y++)
            {
                for (z = 0; z < grids->nc[ZZ]; z++)
                {
                    pmegrid_init(&grids->grid_th[t],
                                 x, y, z,
                                 (n[XX]*(x  ))/grids->nc[XX],
                                 (n[YY]*(y  ))/grids->nc[YY],
                                 (n[ZZ]*(z  ))/grids->nc[ZZ],
                                 (n[XX]*(x+1))/grids->nc[XX],
                                 (n[YY]*(y+1))/grids->nc[YY],
                                 (n[ZZ]*(z+1))/grids->nc[ZZ],
                                 TRUE,
                                 pme_order,
                                 grids->grid_all+GMX_CACHE_SEP+t*(gridsize+GMX_CACHE_SEP));
                    t++;
                }
            }
        }
    }
    else
    {
        grids->grid_th = NULL;
    }

    snew(grids->g2t, DIM);
    tfac = 1;
    for (d = DIM-1; d >= 0; d--)
    {
        snew(grids->g2t[d], n[d]);
        t = 0;
        for (i = 0; i < n[d]; i++)
        {
            /* The second check should match the parameters
             * of the pmegrid_init call above.
             */
            while (t + 1 < grids->nc[d] && i >= (n[d]*(t+1))/grids->nc[d])
            {
                t++;
            }
            grids->g2t[d][i] = t*tfac;
        }

        tfac *= grids->nc[d];

        switch (d)
        {
            case XX: max_comm_lines = overlap_x;     break;
            case YY: max_comm_lines = overlap_y;     break;
            case ZZ: max_comm_lines = pme_order - 1; break;
        }
        grids->nthread_comm[d] = 0;
        while ((n[d]*grids->nthread_comm[d])/grids->nc[d] < max_comm_lines &&
               grids->nthread_comm[d] < grids->nc[d])
        {
            grids->nthread_comm[d]++;
        }
        if (debug != NULL)
        {
            fprintf(debug, "pmegrid thread grid communication range in %c: %d\n",
                    'x'+d, grids->nthread_comm[d]);
        }
        /* It should be possible to make grids->nthread_comm[d]==grids->nc[d]
         * work, but this is not a problematic restriction.
         */
        if (grids->nc[d] > 1 && grids->nthread_comm[d] > grids->nc[d])
        {
            gmx_fatal(FARGS, "Too many threads for PME (%d) compared to the number of grid lines, reduce the number of threads doing PME", grids->nthread);
        }
    }
}
Ejemplo n.º 18
0
int main(int argc, char **argv)
{
    char *data_filename;
    char *verity_filename;
    unsigned char *salt = NULL;
    size_t salt_size = 0;
    bool sparse = false;
    size_t block_size = 4096;
    uint64_t calculate_size = 0;
    bool verbose = false;

    while (1) {
        const static struct option long_options[] = {
            {"salt-str", required_argument, 0, 'a'},
            {"salt-hex", required_argument, 0, 'A'},
            {"help", no_argument, 0, 'h'},
            {"sparse", no_argument, 0, 'S'},
            {"verity-size", required_argument, 0, 's'},
            {"verbose", no_argument, 0, 'v'},
            {NULL, 0, 0, 0}
        };
        int c = getopt_long(argc, argv, "a:A:hSs:v", long_options, NULL);
        if (c < 0) {
            break;
        }

        switch (c) {
        case 'a':
            salt_size = strlen(optarg);
            salt = new unsigned char[salt_size]();
            if (salt == NULL) {
                FATAL("failed to allocate memory for salt\n");
            }
            memcpy(salt, optarg, salt_size);
            break;
        case 'A': {
                BIGNUM *bn = NULL;
                if(!BN_hex2bn(&bn, optarg)) {
                    FATAL("failed to convert salt from hex\n");
                }
                salt_size = BN_num_bytes(bn);
                salt = new unsigned char[salt_size]();
                if (salt == NULL) {
                    FATAL("failed to allocate memory for salt\n");
                }
                if((size_t)BN_bn2bin(bn, salt) != salt_size) {
                    FATAL("failed to convert salt to bytes\n");
                }
            }
            break;
        case 'h':
            usage();
            return 1;
        case 'S':
            sparse = true;
            break;
        case 's': {
                char* endptr;
                errno = 0;
                unsigned long long int inSize = strtoull(optarg, &endptr, 0);
                if (optarg[0] == '\0' || *endptr != '\0' ||
                        (errno == ERANGE && inSize == ULLONG_MAX)) {
                    FATAL("invalid value of verity-size\n");
                }
                if (inSize > UINT64_MAX) {
                    FATAL("invalid value of verity-size\n");
                }
                calculate_size = (uint64_t)inSize;
            }
            break;
        case 'v':
            verbose = true;
            break;
        case '?':
            usage();
            return 1;
        default:
            abort();
        }
    }

    argc -= optind;
    argv += optind;

    const EVP_MD *md = EVP_sha256();
    if (!md) {
        FATAL("failed to get digest\n");
    }

    size_t hash_size = EVP_MD_size(md);
    assert(hash_size * 2 < block_size);

    if (!salt || !salt_size) {
        salt_size = hash_size;
        salt = new unsigned char[salt_size];
        if (salt == NULL) {
            FATAL("failed to allocate memory for salt\n");
        }

        int random_fd = open("/dev/urandom", O_RDONLY);
        if (random_fd < 0) {
            FATAL("failed to open /dev/urandom\n");
        }

        ssize_t ret = read(random_fd, salt, salt_size);
        if (ret != (ssize_t)salt_size) {
            FATAL("failed to read %zu bytes from /dev/urandom: %zd %d\n", salt_size, ret, errno);
        }
        close(random_fd);
    }

    if (calculate_size) {
        if (argc != 0) {
            usage();
            return 1;
        }
        size_t verity_blocks = 0;
        size_t level_blocks;
        int levels = 0;
        do {
            level_blocks = verity_tree_blocks(calculate_size, block_size, hash_size, levels);
            levels++;
            verity_blocks += level_blocks;
        } while (level_blocks > 1);

        printf("%" PRIu64 "\n", (uint64_t)verity_blocks * block_size);
        return 0;
    }

    if (argc != 2) {
        usage();
        return 1;
    }

    data_filename = argv[0];
    verity_filename = argv[1];

    int fd = open(data_filename, O_RDONLY);
    if (fd < 0) {
        FATAL("failed to open %s\n", data_filename);
    }

    struct sparse_file *file;
    if (sparse) {
        file = sparse_file_import(fd, false, false);
    } else {
        file = sparse_file_import_auto(fd, false, verbose);
    }

    if (!file) {
        FATAL("failed to read file %s\n", data_filename);
    }

    int64_t len = sparse_file_len(file, false, false);
    if (len % block_size != 0) {
        FATAL("file size %" PRIu64 " is not a multiple of %zu bytes\n",
                len, block_size);
    }

    int levels = 0;
    size_t verity_blocks = 0;
    size_t level_blocks;

    do {
        level_blocks = verity_tree_blocks(len, block_size, hash_size, levels);
        levels++;
        verity_blocks += level_blocks;
    } while (level_blocks > 1);

    unsigned char *verity_tree = new unsigned char[verity_blocks * block_size]();
    unsigned char **verity_tree_levels = new unsigned char *[levels + 1]();
    size_t *verity_tree_level_blocks = new size_t[levels]();
    if (verity_tree == NULL || verity_tree_levels == NULL || verity_tree_level_blocks == NULL) {
        FATAL("failed to allocate memory for verity tree\n");
    }

    unsigned char *ptr = verity_tree;
    for (int i = levels - 1; i >= 0; i--) {
        verity_tree_levels[i] = ptr;
        verity_tree_level_blocks[i] = verity_tree_blocks(len, block_size, hash_size, i);
        ptr += verity_tree_level_blocks[i] * block_size;
    }
    assert(ptr == verity_tree + verity_blocks * block_size);
    assert(verity_tree_level_blocks[levels - 1] == 1);

    unsigned char zero_block_hash[hash_size];
    unsigned char zero_block[block_size];
    memset(zero_block, 0, block_size);
    hash_block(md, zero_block, block_size, salt, salt_size, zero_block_hash, NULL);

    unsigned char root_hash[hash_size];
    verity_tree_levels[levels] = root_hash;

    struct sparse_hash_ctx ctx;
    ctx.hashes = verity_tree_levels[0];
    ctx.salt = salt;
    ctx.salt_size = salt_size;
    ctx.hash_size = hash_size;
    ctx.block_size = block_size;
    ctx.zero_block_hash = zero_block_hash;
    ctx.md = md;

    sparse_file_callback(file, false, false, hash_chunk, &ctx);

    sparse_file_destroy(file);
    close(fd);

    for (int i = 0; i < levels; i++) {
        size_t out_size;
        hash_blocks(md,
                verity_tree_levels[i], verity_tree_level_blocks[i] * block_size,
                verity_tree_levels[i + 1], &out_size,
                salt, salt_size, block_size);
          if (i < levels - 1) {
              assert(div_round_up(out_size, block_size) == verity_tree_level_blocks[i + 1]);
          } else {
              assert(out_size == hash_size);
          }
    }

    for (size_t i = 0; i < hash_size; i++) {
        printf("%02x", root_hash[i]);
    }
    printf(" ");
    for (size_t i = 0; i < salt_size; i++) {
        printf("%02x", salt[i]);
    }
    printf("\n");

    fd = open(verity_filename, O_WRONLY|O_CREAT, 0666);
    if (fd < 0) {
        FATAL("failed to open output file '%s'\n", verity_filename);
    }
    write(fd, verity_tree, verity_blocks * block_size);
    close(fd);

    delete[] verity_tree_levels;
    delete[] verity_tree_level_blocks;
    delete[] verity_tree;
    delete[] salt;
}
Ejemplo n.º 19
0
static bool upb_msglayout_init(const upb_msgdef *m,
                               upb_msglayout *l,
                               upb_msgfactory *factory) {
  upb_msg_field_iter it;
  upb_msg_oneof_iter oit;
  size_t hasbit;
  size_t submsg_count = 0;
  const upb_msglayout **submsgs;
  upb_msglayout_field *fields;

  for (upb_msg_field_begin(&it, m);
       !upb_msg_field_done(&it);
       upb_msg_field_next(&it)) {
    const upb_fielddef* f = upb_msg_iter_field(&it);
    if (upb_fielddef_issubmsg(f)) {
      submsg_count++;
    }
  }

  memset(l, 0, sizeof(*l));

  fields = upb_gmalloc(upb_msgdef_numfields(m) * sizeof(*fields));
  submsgs = upb_gmalloc(submsg_count * sizeof(*submsgs));

  if ((!fields && upb_msgdef_numfields(m)) ||
      (!submsgs && submsg_count)) {
    /* OOM. */
    upb_gfree(fields);
    upb_gfree(submsgs);
    return false;
  }

  l->field_count = upb_msgdef_numfields(m);
  l->fields = fields;
  l->submsgs = submsgs;

  /* Allocate data offsets in three stages:
   *
   * 1. hasbits.
   * 2. regular fields.
   * 3. oneof fields.
   *
   * OPT: There is a lot of room for optimization here to minimize the size.
   */

  /* Allocate hasbits and set basic field attributes. */
  submsg_count = 0;
  for (upb_msg_field_begin(&it, m), hasbit = 0;
       !upb_msg_field_done(&it);
       upb_msg_field_next(&it)) {
    const upb_fielddef* f = upb_msg_iter_field(&it);
    upb_msglayout_field *field = &fields[upb_fielddef_index(f)];

    field->number = upb_fielddef_number(f);
    field->descriptortype = upb_fielddef_descriptortype(f);
    field->label = upb_fielddef_label(f);

    if (upb_fielddef_issubmsg(f)) {
      const upb_msglayout *sub_layout =
          upb_msgfactory_getlayout(factory, upb_fielddef_msgsubdef(f));
      field->submsg_index = submsg_count++;
      submsgs[field->submsg_index] = sub_layout;
    }

    if (upb_fielddef_haspresence(f) && !upb_fielddef_containingoneof(f)) {
      field->presence = (hasbit++);
    } else {
      field->presence = 0;
    }
  }

  /* Account for space used by hasbits. */
  l->size = div_round_up(hasbit, 8);

  /* Allocate non-oneof fields. */
  for (upb_msg_field_begin(&it, m); !upb_msg_field_done(&it);
       upb_msg_field_next(&it)) {
    const upb_fielddef* f = upb_msg_iter_field(&it);
    size_t field_size = upb_msg_fielddefsize(f);
    size_t index = upb_fielddef_index(f);

    if (upb_fielddef_containingoneof(f)) {
      /* Oneofs are handled separately below. */
      continue;
    }

    fields[index].offset = upb_msglayout_place(l, field_size);
  }

  /* Allocate oneof fields.  Each oneof field consists of a uint32 for the case
   * and space for the actual data. */
  for (upb_msg_oneof_begin(&oit, m); !upb_msg_oneof_done(&oit);
       upb_msg_oneof_next(&oit)) {
    const upb_oneofdef* o = upb_msg_iter_oneof(&oit);
    upb_oneof_iter fit;

    size_t case_size = sizeof(uint32_t);  /* Could potentially optimize this. */
    size_t field_size = 0;
    uint32_t case_offset;
    uint32_t data_offset;

    /* Calculate field size: the max of all field sizes. */
    for (upb_oneof_begin(&fit, o);
         !upb_oneof_done(&fit);
         upb_oneof_next(&fit)) {
      const upb_fielddef* f = upb_oneof_iter_field(&fit);
      field_size = UPB_MAX(field_size, upb_msg_fielddefsize(f));
    }

    /* Align and allocate case offset. */
    case_offset = upb_msglayout_place(l, case_size);
    data_offset = upb_msglayout_place(l, field_size);

    for (upb_oneof_begin(&fit, o);
         !upb_oneof_done(&fit);
         upb_oneof_next(&fit)) {
      const upb_fielddef* f = upb_oneof_iter_field(&fit);
      fields[upb_fielddef_index(f)].offset = data_offset;
      fields[upb_fielddef_index(f)].presence = ~case_offset;
    }
  }

  /* Size of the entire structure should be a multiple of its greatest
   * alignment.  TODO: track overall alignment for real? */
  l->size = align_up(l->size, 8);

  return true;
}
Ejemplo n.º 20
0
void TopicSender::sendWithFEC()
{
#if WITH_OPENFEC
	uint16_t msg_id = m_sender->allocateMessageID();
	uint64_t dataSize = sizeof(FECHeader) + m_buf.size();

	// If the message fits in a single packet, use that as the buffer size
	uint64_t symbolSize;
	uint64_t sourceSymbols;

	if(dataSize <= FECPacket::MaxDataSize)
	{
		sourceSymbols = 1;
		symbolSize = dataSize;
	}
	else
	{
		// We need to pad the data to a multiple of our packet payload size.
		sourceSymbols = div_round_up(dataSize, FECPacket::MaxDataSize);
		symbolSize = FECPacket::MaxDataSize;
	}

	ROS_DEBUG("dataSize: %lu, symbol size: %lu, sourceSymbols: %lu", dataSize, symbolSize, sourceSymbols);

	uint64_t packetSize = sizeof(FECPacket::Header) + symbolSize;

	ROS_DEBUG("=> packetSize: %lu", packetSize);

	uint64_t repairSymbols = std::ceil(m_sender->fec() * sourceSymbols);

	uint64_t numPackets = sourceSymbols + repairSymbols;

	of_session_t* ses = 0;
	uint32_t prng_seed = rand();
	if(sourceSymbols >= MIN_PACKETS_LDPC)
	{
		ROS_DEBUG("%s: Choosing LDPC-Staircase codec", m_topicName.c_str());

		if(of_create_codec_instance(&ses, OF_CODEC_LDPC_STAIRCASE_STABLE, OF_ENCODER, 1) != OF_STATUS_OK)
		{
			ROS_ERROR("%s: Could not create LDPC codec instance", m_topicName.c_str());
			return;
		}

		of_ldpc_parameters_t params;
		params.nb_source_symbols = sourceSymbols;
		params.nb_repair_symbols = std::ceil(m_sender->fec() * sourceSymbols);
		params.encoding_symbol_length = symbolSize;
		params.prng_seed = prng_seed;
		params.N1 = 7;

		ROS_DEBUG("LDPC seed: 7, 0x%X", params.prng_seed);

		if(of_set_fec_parameters(ses, (of_parameters_t*)&params) != OF_STATUS_OK)
		{
			ROS_ERROR("%s: Could not set FEC parameters", m_topicName.c_str());
			of_release_codec_instance(ses);
			return;
		}
	}
	else
	{
		ROS_DEBUG("%s: Choosing Reed-Solomon codec", m_topicName.c_str());

		if(of_create_codec_instance(&ses, OF_CODEC_REED_SOLOMON_GF_2_M_STABLE, OF_ENCODER, 0) != OF_STATUS_OK)
		{
			ROS_ERROR("%s: Could not create REED_SOLOMON codec instance", m_topicName.c_str());
			return;
		}

		of_rs_2_m_parameters params;
		params.nb_source_symbols = sourceSymbols;
		params.nb_repair_symbols = std::ceil(m_sender->fec() * sourceSymbols);
		params.encoding_symbol_length = symbolSize;
		params.m = 8;

		if(of_set_fec_parameters(ses, (of_parameters_t*)&params) != OF_STATUS_OK)
		{
			ROS_ERROR("%s: Could not set FEC parameters", m_topicName.c_str());
			of_release_codec_instance(ses);
			return;
		}
	}

	std::vector<uint8_t> packetBuffer(numPackets * packetSize);
	std::vector<void*> symbols(sourceSymbols + repairSymbols);

	uint64_t writtenData = 0;

	// Fill the source packets
	for(uint64_t i = 0; i < sourceSymbols; ++i)
	{
		uint8_t* packetPtr = packetBuffer.data() + i * packetSize;

		FECPacket::Header* header = reinterpret_cast<FECPacket::Header*>(packetPtr);

		header->msg_id = msg_id;
		header->symbol_id = i;
		header->symbol_length = symbolSize;
		header->source_symbols = sourceSymbols;
		header->repair_symbols = repairSymbols;
		header->prng_seed = prng_seed;

		uint8_t* dataPtr = packetPtr + sizeof(FECPacket::Header);
		uint64_t remainingSpace = symbolSize;

		symbols[i] = dataPtr;

		if(i == 0)
		{
			// First packet includes the FECHeader
			FECHeader* msgHeader = reinterpret_cast<FECHeader*>(dataPtr);

			// Fill in header fields
			msgHeader->flags = m_flags;
			msgHeader->topic_msg_counter = m_inputMsgCounter;

			strncpy(msgHeader->topic_name, m_topicName.c_str(), sizeof(msgHeader->topic_name));
			if(msgHeader->topic_name[sizeof(msgHeader->topic_name)-1] != 0)
			{
				ROS_ERROR("Topic '%s' is too long. Please shorten the name.", m_topicName.c_str());
				msgHeader->topic_name[sizeof(msgHeader->topic_name)-1] = 0;
			}

			strncpy(msgHeader->topic_type, m_topicType.c_str(), sizeof(msgHeader->topic_type));
			if(msgHeader->topic_type[sizeof(msgHeader->topic_type)-1] != 0)
			{
				ROS_ERROR("Topic type '%s' is too long. Please shorten the name.", m_topicType.c_str());
				msgHeader->topic_type[sizeof(msgHeader->topic_type)-1] = 0;
			}

			for(int i = 0; i < 4; ++i)
				msgHeader->topic_md5[i] = m_md5[i];

			dataPtr += sizeof(FECHeader);
			remainingSpace -= sizeof(FECHeader);
		}

		uint64_t chunkSize = std::min(remainingSpace, m_buf.size() - writtenData);
		memcpy(dataPtr, m_buf.data() + writtenData, chunkSize);
		writtenData += chunkSize;

		// Set any padding to zero
		if(chunkSize < remainingSpace)
			memset(dataPtr + chunkSize, 0, remainingSpace - chunkSize);
	}

	// Fill the repair packets
	for(uint64_t i = sourceSymbols; i < sourceSymbols + repairSymbols; ++i)
	{
		uint8_t* packetPtr = packetBuffer.data() + i * packetSize;

		FECPacket::Header* header = reinterpret_cast<FECPacket::Header*>(packetPtr);

		header->msg_id = msg_id;
		header->symbol_id = i;
		header->symbol_length = symbolSize;
		header->source_symbols = sourceSymbols;
		header->repair_symbols = repairSymbols;
		header->prng_seed = prng_seed;

		uint8_t* dataPtr = packetPtr + sizeof(FECPacket::Header);
		symbols[i] = dataPtr;
	}
	for(uint64_t i = sourceSymbols; i < sourceSymbols + repairSymbols; ++i)
	{
		if(of_build_repair_symbol(ses, symbols.data(), i) != OF_STATUS_OK)
		{
			ROS_ERROR("%s: Could not build repair symbol", m_topicName.c_str());
			of_release_codec_instance(ses);
			return;
		}
	}

	// FEC work is done
	of_release_codec_instance(ses);

	std::vector<unsigned int> packetOrder(numPackets);
	std::iota(packetOrder.begin(), packetOrder.end(), 0);

	// Send the packets in random order
	unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
	std::mt19937 mt(seed);
	std::shuffle(packetOrder.begin(), packetOrder.end(), mt);

	ROS_DEBUG("Sending %d packets", (int)packetOrder.size());
	for(unsigned int idx : packetOrder)
	{
		if(!m_sender->send(packetBuffer.data() + idx * packetSize, packetSize, m_topicName))
			return;
	}
#else
	throw std::runtime_error("Forward error correction requested, but I was not compiled with FEC support...");
#endif
}
Ejemplo n.º 21
0
static void
calculate_tiles(struct fd_batch *batch)
{
	struct fd_context *ctx = batch->ctx;
	struct fd_gmem_stateobj *gmem = &ctx->gmem;
	struct pipe_scissor_state *scissor = &batch->max_scissor;
	struct pipe_framebuffer_state *pfb = &batch->framebuffer;
	const uint32_t gmem_alignw = ctx->screen->gmem_alignw;
	const uint32_t gmem_alignh = ctx->screen->gmem_alignh;
	const uint32_t gmem_size = ctx->screen->gmemsize_bytes;
	uint32_t minx, miny, width, height;
	uint32_t nbins_x = 1, nbins_y = 1;
	uint32_t bin_w, bin_h;
	uint32_t max_width = bin_width(ctx->screen);
	uint8_t cbuf_cpp[MAX_RENDER_TARGETS] = {0}, zsbuf_cpp[2] = {0};
	uint32_t i, j, t, xoff, yoff;
	uint32_t tpp_x, tpp_y;
	bool has_zs = !!(batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL));
	int tile_n[ARRAY_SIZE(ctx->pipe)];

	if (has_zs) {
		struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
		zsbuf_cpp[0] = rsc->cpp;
		if (rsc->stencil)
			zsbuf_cpp[1] = rsc->stencil->cpp;
	}
	for (i = 0; i < pfb->nr_cbufs; i++) {
		if (pfb->cbufs[i])
			cbuf_cpp[i] = util_format_get_blocksize(pfb->cbufs[i]->format);
		else
			cbuf_cpp[i] = 4;
	}

	if (!memcmp(gmem->zsbuf_cpp, zsbuf_cpp, sizeof(zsbuf_cpp)) &&
		!memcmp(gmem->cbuf_cpp, cbuf_cpp, sizeof(cbuf_cpp)) &&
		!memcmp(&gmem->scissor, scissor, sizeof(gmem->scissor))) {
		/* everything is up-to-date */
		return;
	}

	if (fd_mesa_debug & FD_DBG_NOSCIS) {
		minx = 0;
		miny = 0;
		width = pfb->width;
		height = pfb->height;
	} else {
		/* round down to multiple of alignment: */
		minx = scissor->minx & ~(gmem_alignw - 1);
		miny = scissor->miny & ~(gmem_alignh - 1);
		width = scissor->maxx - minx;
		height = scissor->maxy - miny;
	}

	bin_w = align(width, gmem_alignw);
	bin_h = align(height, gmem_alignh);

	/* first, find a bin width that satisfies the maximum width
	 * restrictions:
	 */
	while (bin_w > max_width) {
		nbins_x++;
		bin_w = align(width / nbins_x, gmem_alignw);
	}

	if (fd_mesa_debug & FD_DBG_MSGS) {
		debug_printf("binning input: cbuf cpp:");
		for (i = 0; i < pfb->nr_cbufs; i++)
			debug_printf(" %d", cbuf_cpp[i]);
		debug_printf(", zsbuf cpp: %d; %dx%d\n",
				zsbuf_cpp[0], width, height);
	}

	/* then find a bin width/height that satisfies the memory
	 * constraints:
	 */
	while (total_size(cbuf_cpp, zsbuf_cpp, bin_w, bin_h, gmem) > gmem_size) {
		if (bin_w > bin_h) {
			nbins_x++;
			bin_w = align(width / nbins_x, gmem_alignw);
		} else {
			nbins_y++;
			bin_h = align(height / nbins_y, gmem_alignh);
		}
	}

	DBG("using %d bins of size %dx%d", nbins_x*nbins_y, bin_w, bin_h);

	gmem->scissor = *scissor;
	memcpy(gmem->cbuf_cpp, cbuf_cpp, sizeof(cbuf_cpp));
	memcpy(gmem->zsbuf_cpp, zsbuf_cpp, sizeof(zsbuf_cpp));
	gmem->bin_h = bin_h;
	gmem->bin_w = bin_w;
	gmem->nbins_x = nbins_x;
	gmem->nbins_y = nbins_y;
	gmem->minx = minx;
	gmem->miny = miny;
	gmem->width = width;
	gmem->height = height;

	/*
	 * Assign tiles and pipes:
	 *
	 * At some point it might be worth playing with different
	 * strategies and seeing if that makes much impact on
	 * performance.
	 */

#define div_round_up(v, a)  (((v) + (a) - 1) / (a))
	/* figure out number of tiles per pipe: */
	tpp_x = tpp_y = 1;
	while (div_round_up(nbins_y, tpp_y) > 8)
		tpp_y += 2;
	while ((div_round_up(nbins_y, tpp_y) *
			div_round_up(nbins_x, tpp_x)) > 8)
		tpp_x += 1;

	/* configure pipes: */
	xoff = yoff = 0;
	for (i = 0; i < ARRAY_SIZE(ctx->pipe); i++) {
		struct fd_vsc_pipe *pipe = &ctx->pipe[i];

		if (xoff >= nbins_x) {
			xoff = 0;
			yoff += tpp_y;
		}

		if (yoff >= nbins_y) {
			break;
		}

		pipe->x = xoff;
		pipe->y = yoff;
		pipe->w = MIN2(tpp_x, nbins_x - xoff);
		pipe->h = MIN2(tpp_y, nbins_y - yoff);

		xoff += tpp_x;
	}

	for (; i < ARRAY_SIZE(ctx->pipe); i++) {
		struct fd_vsc_pipe *pipe = &ctx->pipe[i];
		pipe->x = pipe->y = pipe->w = pipe->h = 0;
	}

#if 0 /* debug */
	printf("%dx%d ... tpp=%dx%d\n", nbins_x, nbins_y, tpp_x, tpp_y);
	for (i = 0; i < 8; i++) {
		struct fd_vsc_pipe *pipe = &ctx->pipe[i];
		printf("pipe[%d]: %ux%u @ %u,%u\n", i,
				pipe->w, pipe->h, pipe->x, pipe->y);
	}
#endif

	/* configure tiles: */
	t = 0;
	yoff = miny;
	memset(tile_n, 0, sizeof(tile_n));
	for (i = 0; i < nbins_y; i++) {
		uint32_t bw, bh;

		xoff = minx;

		/* clip bin height: */
		bh = MIN2(bin_h, miny + height - yoff);

		for (j = 0; j < nbins_x; j++) {
			struct fd_tile *tile = &ctx->tile[t];
			uint32_t p;

			assert(t < ARRAY_SIZE(ctx->tile));

			/* pipe number: */
			p = ((i / tpp_y) * div_round_up(nbins_x, tpp_x)) + (j / tpp_x);

			/* clip bin width: */
			bw = MIN2(bin_w, minx + width - xoff);

			tile->n = tile_n[p]++;
			tile->p = p;
			tile->bin_w = bw;
			tile->bin_h = bh;
			tile->xoff = xoff;
			tile->yoff = yoff;

			t++;

			xoff += bw;
		}

		yoff += bh;
	}

#if 0 /* debug */
	t = 0;
	for (i = 0; i < nbins_y; i++) {
		for (j = 0; j < nbins_x; j++) {
			struct fd_tile *tile = &ctx->tile[t++];
			printf("|p:%u n:%u|", tile->p, tile->n);
		}
		printf("\n");
	}
#endif
}
Ejemplo n.º 22
0
void tcam::deactivate_all_flows_hw()
{
	for (uint32_t i = 0; i < div_round_up(tcam_table.size(), 32); i++)
		axi_write(active_bits[i], 0);
}