Exemplo n.º 1
0
//! Get size of exported data.
uint grit_xp_size(GritRec *gr)
{
	uint size= 0;
	uint extra=0;

	if(gr->bRiff)
	{
		size= 8+8+8+sizeof(GrfHeader);	// RIFF + GRF + HDR overhead.
		extra= 8;
	}

	if(gr->gfxProcMode == GRIT_EXPORT)
		size += ALIGN4(rec_size(&gr->_gfxRec)) + extra;

	if(gr->mapProcMode == GRIT_EXPORT)
		size += ALIGN4(rec_size(&gr->_mapRec)) + extra;

	if(gr->mapProcMode == GRIT_EXPORT && gr->isMetaTiled())
		size += ALIGN4(rec_size(&gr->_metaRec)) + extra;

	if(gr->palProcMode == GRIT_EXPORT)
		size += ALIGN4(rec_size(&gr->_palRec)) + extra;

	return size;
}
Exemplo n.º 2
0
/*! \note	Still very unsafe, but I need to redo everything later anyway.
*/
void grs_run(GritShared *grs, GritRec *gr_base)
{
	// Make sure we have shared data.
	if( grs->dib==NULL && grs->palRec.data==NULL)
	{
		lprintf(LOG_WARNING, "No shared data to run with!\n");
		return;
	}

	// Make copy of gr_base for flags, etc
	GritRec *gr= grit_alloc();
	grs_free(gr->shared);

	grit_copy_options(gr, gr_base);
	grit_copy_strings(gr, gr_base);

	// Attach shared data
	gr->shared= grs;
	strrepl(&gr->symName, grs->symName);
	strrepl(&gr->dstPath, grs->dstPath);

	if(grs->dib == NULL)
	{
		// Palette only. Create new dib.
		gr->srcDib= dib_alloc(16, 16, 8, NULL);
		memset(dib_get_pal(gr->srcDib), 0, PAL_MAX*RGB_SIZE);
		memcpy(dib_get_pal(gr->srcDib), grs->palRec.data, rec_size(&grs->palRec));
	}
	else
		gr->srcDib= dib_clone(grs->dib);

	// NOTE: aliasing screws up deletion later; detach manually.
	gr->_dib= gr->srcDib;	

	// Run for shared gr
	do 
	{
		if(!grit_validate(gr))
			break;

		bool grit_prep_gfx(GritRec *gr);
		bool grit_prep_shared_pal(GritRec *gr);

		if(gr->gfxProcMode != GRIT_EXCLUDE)
			grit_prep_gfx(gr);
		
		if(gr->palProcMode != GRIT_EXCLUDE)
			grit_prep_shared_pal(gr);

		if(gr->bExport)
			grit_export(gr);

	} while(0);

	gr->_dib= NULL;

	// Detach shared data and delete gr
	gr->shared= NULL;
	grit_free(gr);
}
Exemplo n.º 3
0
/*!	Converts palette to 16bit GBA colors, compresses it and fills in
	\a gr._palRec.
*/
bool grit_prep_pal(GritRec *gr)
{
    lprintf(LOG_STATUS, "Palette preparation.\n");

    int ii, nclrs, palS;
    COLOR *palOut;
    RGBQUAD *palIn;

    nclrs= gr->palEnd - gr->palStart;
    if(dib_get_nclrs(gr->_dib) < nclrs && nclrs != 0)
        nclrs= dib_get_nclrs(gr->_dib);

    palS= nclrs*sizeof(COLOR);
    palOut= (COLOR*)malloc(palS);
    palIn= &dib_get_pal(gr->_dib)[gr->palStart];

    for(ii=0; ii<nclrs; ii++)
        palOut[ii]= RGB16(palIn[ii].rgbBlue, palIn[ii].rgbGreen, palIn[ii].rgbRed);

    RECORD rec= { 2, palS/2, (BYTE*)palOut };

    if( BYTE_ORDER == BIG_ENDIAN )
        data_byte_rev(rec.data, rec.data, rec_size(&rec), 2);

    // Attach and compress palette
    grit_compress(&rec, &rec, gr->palCompression);
    rec_alias(&gr->_palRec, &rec);

    lprintf(LOG_STATUS, "Palette preparation complete.\n");
    return true;
}
Exemplo n.º 4
0
bool
deq_rec::rcv_decode(rec_hdr_t h, std::ifstream* ifsp, std::size_t& rec_offs)
{
    if (rec_offs == 0)
    {
        //_deq_hdr.hdr_copy(h);
        ::rec_hdr_copy(&_deq_hdr._rhdr, &h);
        ifsp->read((char*)&_deq_hdr._deq_rid, sizeof(uint64_t));
        ifsp->read((char*)&_deq_hdr._xidsize, sizeof(std::size_t));
#if defined(JRNL_32_BIT)
        ifsp->ignore(sizeof(uint32_t)); // _filler0
#endif
        rec_offs = sizeof(_deq_hdr);
        // Read header, allocate (if req'd) for xid
        if (_deq_hdr._xidsize)
        {
            _buff = std::malloc(_deq_hdr._xidsize);
            MALLOC_CHK(_buff, "_buff", "enq_rec", "rcv_decode");
        }
    }
    if (rec_offs < sizeof(_deq_hdr) + _deq_hdr._xidsize)
    {
        // Read xid (or continue reading xid)
        std::size_t offs = rec_offs - sizeof(_deq_hdr);
        ifsp->read((char*)_buff + offs, _deq_hdr._xidsize - offs);
        std::size_t size_read = ifsp->gcount();
        rec_offs += size_read;
        if (size_read < _deq_hdr._xidsize - offs)
        {
            assert(ifsp->eof());
            // As we may have read past eof, turn off fail bit
            ifsp->clear(ifsp->rdstate()&(~std::ifstream::failbit));
            assert(!ifsp->fail() && !ifsp->bad());
            return false;
        }
    }
    if (rec_offs < sizeof(_deq_hdr) +
            (_deq_hdr._xidsize ? _deq_hdr._xidsize + sizeof(rec_tail_t) : 0))
    {
        // Read tail (or continue reading tail)
        std::size_t offs = rec_offs - sizeof(_deq_hdr) - _deq_hdr._xidsize;
        ifsp->read((char*)&_deq_tail + offs, sizeof(rec_tail_t) - offs);
        std::size_t size_read = ifsp->gcount();
        rec_offs += size_read;
        if (size_read < sizeof(rec_tail_t) - offs)
        {
            assert(ifsp->eof());
            // As we may have read past eof, turn off fail bit
            ifsp->clear(ifsp->rdstate()&(~std::ifstream::failbit));
            assert(!ifsp->fail() && !ifsp->bad());
            return false;
        }
    }
    ifsp->ignore(rec_size_dblks() * QLS_DBLK_SIZE_BYTES - rec_size());
    if (_deq_hdr._xidsize)
        chk_tail(); // Throws if tail invalid or record incomplete
    assert(!ifsp->fail() && !ifsp->bad());
    return true;
}
Exemplo n.º 5
0
/*!	Prepares the work dib for export, i.e. converts to the final
	bitdepth, compresses the data and fills in \a gr._gfxRec.
*/
bool grit_prep_gfx(GritRec *gr)
{
    lprintf(LOG_STATUS, "Graphics preparation.\n");

    int srcB= dib_get_bpp(gr->_dib);	// should be 8 or 16 by now
    int srcP= dib_get_pitch(gr->_dib);
    int srcS= dib_get_size_img(gr->_dib);
    BYTE *srcD= dib_get_img(gr->_dib);

    int dstB= gr->gfxBpp;
    // # dst bytes, with # src pixels as 'width'
    int dstS= dib_align(srcS*8/srcB, dstB);
    dstS= ALIGN4(dstS);
    BYTE *dstD= (BYTE*)malloc(dstS);
    if(dstD == NULL)
    {
        lprintf(LOG_ERROR, "  Can't allocate graphics data.\n");
        return false;
    }

    // Convert to final bitdepth
    // NOTE: do not use dib_convert here, because of potential
    //   problems with padding
    // NOTE: we're already at 8 or 16 bpp here, with 16 bpp already
    //   accounted for. Only have to do 8->1,2,4
    // TODO: offset
    if(srcB == 8 && srcB != dstB)
    {
        lprintf(LOG_STATUS, "  Bitpacking: %d -> %d.\n", srcB, dstB);
        data_bit_pack(dstD, srcD, srcS, srcB, dstB, 0);
    }
    else
        memcpy(dstD, srcD, dstS);

    RECORD rec= { 1, dstS, dstD };

    if( BYTE_ORDER == BIG_ENDIAN && gr->gfxBpp > 8 )
        data_byte_rev(rec.data, rec.data, rec_size(&rec), gr->gfxBpp/8);

    // attach and compress graphics
    grit_compress(&rec, &rec, gr->gfxCompression);
    rec_alias(&gr->_gfxRec, &rec);

    lprintf(LOG_STATUS, "Graphics preparation complete.\n");
    return true;
}
Exemplo n.º 6
0
/*!
	\note Binary files cannot be appended, and are separate files.
*/
bool grit_xp_bin(GritRec *gr)
{
	lprintf(LOG_STATUS, "Export to binary files.\n");		

	char fpath[MAXPATHLEN], str[MAXPATHLEN];
	const char *fmode= gr->bAppend ? "a+b" : "wb";
	const char *exts[4]= {"img.bin", "map.bin", "meta.bin", "pal.bin" };

	path_repl_ext(str, gr->dstPath, NULL, MAXPATHLEN);
	
	DataItem item;
	for(eint id=GRIT_ITEM_GFX; id<GRIT_ITEM_MAX; id++)
	{
		grit_prep_item(gr, id, &item);
		if(item.procMode == GRIT_EXPORT)
		{
			path_repl_ext(fpath, str, exts[id], MAXPATHLEN);
			xp_data_bin(fpath, item.pRec->data, rec_size(item.pRec), fmode);
		}
	}

	return true;
}
Exemplo n.º 7
0
// Initializes InBuf, InSize; allocates OutBuf.
// the rest is done in CompressLZ77.
uint lz77gba_compress(RECORD *dst, const RECORD *src)
{
	// Fail on the obvious
	if(src==NULL || src->data==NULL || dst==NULL)
		return 0;
	
	InSize= rec_size(src);
	OutSize = InSize + InSize/8 + 16;
	OutBuf = (BYTE*)malloc(OutSize);
	if(OutBuf == NULL)
		return 0;
	InBuf= (BYTE*)src->data;

	CompressLZ77();
	OutSize= ALIGN4(OutSize);

	u8 *dstD= (u8*)malloc(OutSize);
	memcpy(dstD, OutBuf, OutSize);
	rec_attach(dst, dstD, 1, OutSize);

	free(OutBuf);

	return OutSize;
}
Exemplo n.º 8
0
long NcVar::rec_size(void) {
    return rec_size(get_dim(0));
}
Exemplo n.º 9
0
NcValues* NcVar::get_rec(NcDim* rdim, long slice)
{
    int idx = dim_to_index(rdim);
    long size = num_dims();
    size_t* start = new size_t[size];
    long* startl = new long[size];
    for (int i=1; i < size ; i++) {
	start[i] = 0;
	startl[i] = 0;
    }
    start[idx] = slice;
    startl[idx] = slice;
    NcBool result = set_cur(startl);
    if (! result ) {
	delete [] start;
	delete [] startl;
	return 0;
    }

    long* edgel = edges();
    size_t* edge = new size_t[size];
    for (int i=1; i < size ; i++) {
	edge[i] = edgel[i];
    }
    edge[idx] = 1;
    edgel[idx] = 1;
    NcValues* valp = get_space(rec_size(rdim));
    int status;
    switch (type()) {
    case ncFloat:
	status = NcError::set_err(
				  nc_get_vara_float(the_file->id(), the_id, start, edge, 
				   (float *)valp->base())
				  );
	break;
    case ncDouble:
	status = NcError::set_err(
				  nc_get_vara_double(the_file->id(), the_id, start, edge, 
				    (double *)valp->base())
				  );
	break;
    case ncInt:
	status = NcError::set_err(
				  nc_get_vara_int(the_file->id(), the_id, start, edge, 
				 (int *)valp->base())
				  );
	break;
    case ncShort:
	status = NcError::set_err(
				  nc_get_vara_short(the_file->id(), the_id, start, edge, 
				   (short *)valp->base())
				  );
	break;
    case ncByte:
	status = NcError::set_err(
				  nc_get_vara_schar(the_file->id(), the_id, start, edge, 
				   (signed char *)valp->base())
				  );
	break;
    case ncChar:
	status = NcError::set_err(
				  nc_get_vara_text(the_file->id(), the_id, start, edge, 
				   (char *)valp->base())
				  );
	break;
    case ncNoType:
    default:
	return 0;
    }
    delete [] start;
    delete [] startl;
    delete [] edge;
    delete [] edgel;
    if (status != NC_NOERR) {
	delete valp;
	return 0;
    }
    return valp;
} 
Exemplo n.º 10
0
/*!	\param dib	DIB to reduce the palette of. Must be paletted. Pixels 
		will be rearranged to match the palette.
	\param extPal	External palette record. \a dib will use this 
		and its own palette. Can be NULL. The new reduced palette goes here too.
	\return	Number of reduced colors, or 0 if not 8bpp. 
	\note	The order of colors is the order of appearance, except for the 
		first one.
*/
int dib_pal_reduce(CLDIB *dib, RECORD *extPal)
{
	// Only for 8bpp (for now)
	if(dib == NULL || dib_get_bpp(dib) != 8)
		return 0;

	int ii, jj, kk, ix, iy;

	int dibW, dibH, dibP;
	dib_get_attr(dib, &dibW, &dibH, NULL, &dibP);

	BYTE *dibD= dib_get_img(dib);

	// Get palette histogram
	int histo[256];

	memset(histo, 0, sizeof(histo));
	for(iy=0; iy<dibH; iy++)
		for(ix=0; ix<dibW; ix++)
			histo[dibD[iy*dibP+ix]]++;

	// Allocate room for new palette and init with ext pal
	// NOTE: extPal is assumed reduced!
	// NOTE: double-size for rdxPal for worst-case scenario.
	// NOTE: the *Clr things are just to make comparisons easier.
	//		 pointers ftw!

	int count;
	RGBQUAD *rdxPal= (RGBQUAD*)malloc(512*RGB_SIZE);
	COLORREF *rdxClr= (COLORREF*)rdxPal, *dibClr= (COLORREF*)dib_get_pal(dib);

	memset(rdxPal, 0, 512*RGB_SIZE);
	if(extPal != NULL && extPal->data != NULL)
	{
		memcpy(rdxPal, extPal->data, rec_size(extPal));
		count= extPal->height;
	}
	else
	{
		rdxClr[0]= dibClr[0];
		count= 1;
	}

	// PONDER: always keep index 0 ?

	// Create reduced palette and prep tables for pixel conversion.
	DWORD srcIdx[PAL_MAX], dstIdx[PAL_MAX];

	kk=0;
	for(ii=0; ii<PAL_MAX; ii++)
	{
		if(histo[ii])
		{
			for(jj=0; jj<count; jj++)
				if(rdxClr[jj] == dibClr[ii])
					break;
			// Match: add color to table
			if(jj == count)
			{
				rdxClr[jj]= dibClr[ii];
				count++;
			}
			srcIdx[kk]= jj;
			dstIdx[kk]= ii;
			kk++;
		}
	}

	// PONDER: what *should* happen if nn > PAL_MAX ?
	// Fail, trunc or re-quantize?

	//  Update palette and remap pixels
	memcpy(dibClr, rdxClr, PAL_MAX*RGB_SIZE);
	dib_pixel_replace(dib, dstIdx, srcIdx, kk);

	// Update rdxPal's data
	if(extPal)
	{
		extPal->width= RGB_SIZE;
		extPal->height= count;
		free(extPal->data);

		extPal->data= (BYTE*)malloc(count*RGB_SIZE);
		memcpy(extPal->data, rdxClr, count*RGB_SIZE);
	}

	return count;
}
Exemplo n.º 11
0
uint32_t
deq_rec::encode(void* wptr, uint32_t rec_offs_dblks, uint32_t max_size_dblks)
{
    assert(wptr != 0);
    assert(max_size_dblks > 0);
    if (_xidp == 0)
        assert(_deq_hdr._xidsize == 0);

    std::size_t rec_offs = rec_offs_dblks * QLS_DBLK_SIZE_BYTES;
    std::size_t rem = max_size_dblks * QLS_DBLK_SIZE_BYTES;
    std::size_t wr_cnt = 0;
    if (rec_offs_dblks) // Continuation of split dequeue record (over 2 or more pages)
    {
        if (size_dblks(rec_size()) - rec_offs_dblks > max_size_dblks) // Further split required
        {
            rec_offs -= sizeof(_deq_hdr);
            std::size_t wsize = _deq_hdr._xidsize > rec_offs ? _deq_hdr._xidsize - rec_offs : 0;
            std::size_t wsize2 = wsize;
            if (wsize)
            {
                if (wsize > rem)
                    wsize = rem;
                std::memcpy(wptr, (const char*)_xidp + rec_offs, wsize);
                wr_cnt += wsize;
                rem -= wsize;
            }
            rec_offs -= _deq_hdr._xidsize - wsize2;
            if (rem)
            {
                wsize = sizeof(_deq_tail) > rec_offs ? sizeof(_deq_tail) - rec_offs : 0;
                wsize2 = wsize;
                if (wsize)
                {
                    if (wsize > rem)
                        wsize = rem;
                    std::memcpy((char*)wptr + wr_cnt, (char*)&_deq_tail + rec_offs, wsize);
                    wr_cnt += wsize;
                    rem -= wsize;
                }
                rec_offs -= sizeof(_deq_tail) - wsize2;
            }
            assert(rem == 0);
            assert(rec_offs == 0);
        }
        else // No further split required
        {
            rec_offs -= sizeof(_deq_hdr);
            std::size_t wsize = _deq_hdr._xidsize > rec_offs ? _deq_hdr._xidsize - rec_offs : 0;
            if (wsize)
            {
                std::memcpy(wptr, (const char*)_xidp + rec_offs, wsize);
                wr_cnt += wsize;
            }
            rec_offs -= _deq_hdr._xidsize - wsize;
            wsize = sizeof(_deq_tail) > rec_offs ? sizeof(_deq_tail) - rec_offs : 0;
            if (wsize)
            {
                std::memcpy((char*)wptr + wr_cnt, (char*)&_deq_tail + rec_offs, wsize);
                wr_cnt += wsize;
#ifdef QLS_CLEAN
                std::size_t rec_offs = rec_offs_dblks * QLS_DBLK_SIZE_BYTES;
                std::size_t dblk_rec_size = size_dblks(rec_size() - rec_offs) * QLS_DBLK_SIZE_BYTES;
                std::memset((char*)wptr + wr_cnt, QLS_CLEAN_CHAR, dblk_rec_size - wr_cnt);
#endif
            }
            rec_offs -= sizeof(_deq_tail) - wsize;
            assert(rec_offs == 0);
        }
    }
    else // Start at beginning of data record
    {
        // Assumption: the header will always fit into the first dblk
        std::memcpy(wptr, (void*)&_deq_hdr, sizeof(_deq_hdr));
        wr_cnt = sizeof(_deq_hdr);
        if (size_dblks(rec_size()) > max_size_dblks) // Split required - can only occur with xid
        {
            std::size_t wsize;
            rem -= sizeof(_deq_hdr);
            if (rem)
            {
                wsize = rem >= _deq_hdr._xidsize ? _deq_hdr._xidsize : rem;
                std::memcpy((char*)wptr + wr_cnt, _xidp, wsize);
                wr_cnt += wsize;
                rem -= wsize;
            }
            if (rem)
            {
                wsize = rem >= sizeof(_deq_tail) ? sizeof(_deq_tail) : rem;
                std::memcpy((char*)wptr + wr_cnt, (void*)&_deq_tail, wsize);
                wr_cnt += wsize;
                rem -= wsize;
            }
            assert(rem == 0);
        }
        else // No split required
        {
            if (_deq_hdr._xidsize)
            {
                std::memcpy((char*)wptr + wr_cnt, _xidp, _deq_hdr._xidsize);
                wr_cnt += _deq_hdr._xidsize;
                std::memcpy((char*)wptr + wr_cnt, (void*)&_deq_tail, sizeof(_deq_tail));
                wr_cnt += sizeof(_deq_tail);
            }
#ifdef QLS_CLEAN
            std::size_t dblk_rec_size = size_dblks(rec_size()) * QLS_DBLK_SIZE_BYTES;
            std::memset((char*)wptr + wr_cnt, QLS_CLEAN_CHAR, dblk_rec_size - wr_cnt);
#endif
        }
    }
    return size_dblks(wr_cnt);
}
Exemplo n.º 12
0
/*!	Does map creation and layout, tileset reduction and map
	compression. Updates \a gr._dib with the new tileset, and fills
	in \a gr._mapRec and \a gr._metaRec.
	\note The work bitmap must be 8 bpp here, and already rearranged
	to a tile strip, which are the results of \c grit_prep_work_dib()
	and \c grit_prep_tiles(), respectively.
*/
bool grit_prep_map(GritRec *gr)
{
    if(dib_get_bpp(gr->_dib) < 8)
    {
        lprintf(LOG_ERROR, "  Can't map for bpp<8.\n");
        return false;
    }

    CLDIB *workDib= gr->_dib;

    // --- if SBB-mode, tile to 256x256. ---
    if(gr->mapLayout == GRIT_MAP_REG)
    {
        lprintf(LOG_STATUS, "  tiling to Screenblock size (256x256p).\n");

        int blockW= 256, blockH= 256;
        if(gr->bColMajor)
        {
            blockW= dib_get_width(workDib);
            blockH= dib_get_height(workDib);
            dib_redim(workDib, 256, blockH, 0);
        }

        if(!dib_redim(workDib, blockW, blockH, 0))
        {
            lprintf(LOG_ERROR, "  SBB tiling failed.\n");
            return false;
        }
    }

    ETmapFlags flags;
    Tilemap *metaMap= NULL, *map= NULL;
    RECORD metaRec= { 0, 0, NULL }, mapRec= { 0, 0, NULL };
    MapselFormat mf;

    CLDIB *extDib= NULL;
    int tileN= 0;
    uint extW= 0, extH= 0, tileW= gr->tileWidth, tileH= gr->tileHeight;
    uint mtileW= gr->mtileWidth(), mtileH= gr->mtileHeight();

    if(gr->gfxIsShared)
    {
        extDib= gr->shared->dib;
        extW= extDib ? dib_get_width(extDib) : 0;
        extH= extDib ? dib_get_height(extDib) : 0;
    }

    // --- If metatiled, convert to metatiles. ---
    if(gr->isMetaTiled())
    {
        lprintf(LOG_STATUS, "  Performing metatile reduction: tiles%s%s\n",
                (gr->mapRedux & GRIT_META_PAL ? ", palette" : "") );

        flags  = TMAP_DEFAULT;
        if(gr->mapRedux & GRIT_META_PAL)
            flags |= TMAP_PBANK;
        if(gr->bColMajor)
            flags |= TMAP_COLMAJOR;

        metaMap= tmap_alloc();
        if(extW == mtileW)
        {
            lprintf(LOG_STATUS, "  Using external metatileset.\n");
            tmap_init_from_dib(metaMap, workDib, mtileW, mtileH, flags, extDib);
        }
        else
            tmap_init_from_dib(metaMap, workDib, mtileW, mtileH, flags, NULL);

        mf= c_mapselGbaText;
        tileN= tmap_get_tilecount(metaMap);
        if(tileN >= (1<<mf.idLen))
            lprintf(LOG_WARNING, "  Number of metatiles (%d) exceeds field limit (%d).\n",
                    tileN, 1<<mf.idLen);

        tmap_pack(metaMap, &metaRec, &mf);
        if( BYTE_ORDER == BIG_ENDIAN && mf.bitDepth > 8 )
            data_byte_rev(metaRec.data, metaRec.data, rec_size(&metaRec), mf.bitDepth/8);

        // Make temp copy for base-tiling and try to avoid aliasing pointers.
        // Gawd, I hate manual mem-mgt >_<.
        dib_free(workDib);
        if(gr->bColMajor)
            workDib= dib_redim_copy(metaMap->tiles, tileN*mtileW, mtileH, 0);
        else
            workDib= dib_clone(metaMap->tiles);
    }

    // ---Convert to base tiles. ---
    flags = 0;
    if(gr->mapRedux & GRIT_RDX_TILE)
        flags |= TMAP_TILE;
    if(gr->mapRedux & GRIT_RDX_FLIP)
        flags |= TMAP_FLIP;
    if(gr->mapRedux & GRIT_RDX_PBANK)
        flags |= TMAP_PBANK;
    if(gr->bColMajor)
        flags |= TMAP_COLMAJOR;

    lprintf(LOG_STATUS, "  Performing tile reduction: %s%s%s\n",
            (flags & TMAP_TILE  ? "unique tiles; " : ""),
            (flags & TMAP_FLIP  ? "flip; " : ""),
            (flags & TMAP_PBANK ? "palswap; " : ""));

    map= tmap_alloc();
    if(extW == tileW)
    {
        lprintf(LOG_STATUS, "  Using external tileset.\n");
        tmap_init_from_dib(map, workDib, tileW, tileH, flags, extDib);
    }
    else
        tmap_init_from_dib(map, workDib, tileW, tileH, flags, NULL);

    // --- Pack/Reformat and compress ---
    //# TODO: allow custom mapsel format.
    mf= gr->msFormat;

    tileN= tmap_get_tilecount(metaMap);
    if(tileN >= (1<<mf.idLen))
        lprintf(LOG_WARNING, "  Number of tiles (%d) exceeds field limit (%d).\n",
                tileN, 1<<mf.idLen);

    tmap_pack(map, &mapRec, &mf);

    if( BYTE_ORDER == BIG_ENDIAN && mf.bitDepth > 8 )
        data_byte_rev(mapRec.data, mapRec.data, rec_size(&mapRec), mf.bitDepth/8);

    grit_compress(&mapRec, &mapRec, gr->mapCompression);

    // --- Cleanup ---

    // Make extra copy for external tile dib.
    if(gr->gfxIsShared)
    {
        dib_free(gr->shared->dib);

        // Use metatileset for external, unless the old external was a
        // base tileset.
        if(gr->isMetaTiled() && extW != tileW)
            gr->shared->dib= dib_clone(metaMap->tiles);
        else
            gr->shared->dib= dib_clone(map->tiles);
    }

    // Attach tileset for later processing.
    gr->_dib= tmap_detach_tiles(map);

    rec_alias(&gr->_mapRec, &mapRec);
    rec_alias(&gr->_metaRec, &metaRec);

    tmap_free(map);
    tmap_free(metaMap);

    lprintf(LOG_STATUS, "Map preparation complete.\n");
    return true;
}
Exemplo n.º 13
0
//! Export to GNU Assembly
// Yes, it is almost identical to c. Maybe I'll merge the two 
// later.
bool grit_xp_gas(GritRec *gr)
{
	char str[MAXPATHLEN];
	char tag[MAXPATHLEN], fpath[MAXPATHLEN], tmppath[MAXPATHLEN];
	long pos= -1;
	FILE *fin, *fout;
	bool bAppend= gr->bAppend;

	// Prep begin tag
	sprintf(tag, "@{{BLOCK(%s)",gr->symName);

	// Open 'output' file
	strcpy(fpath, gr->dstPath);

	lprintf(LOG_STATUS, "Export to GNU asm: %s into %s .\n", gr->symName, fpath);		

	// File doesn't exist -> write-mode only
	if(!file_exists(fpath))
		bAppend=false;

	if(bAppend)
	{
		// Open temp and input file
		mkstemp(tmppath);
		if( (fout=fopen(tmppath, "w")) == NULL)
			return false;

		fin= fopen(fpath, "r");
		pos= file_find_tag(fout, fin, tag);
	}
	else
		fout= fopen(fpath, "w");

	// Add blank line before new block
	if(pos == -1)
		fputc('\n', fout);

	// --- Start grit-block ---

	fprintf(fout, "%s\n\n", tag);

	grit_preface(gr, fout, "@");


	if(gr->bRiff)	// Single GRF item
	{
		chunk_t *chunk= grit_prep_grf(gr);
		strcat(strcpy(str, gr->symName), "Grf");

		xp_array_gas(fout, str, chunk, chunk->size+8, 
			grit_type_size(gr->gfxDataType));

		chunk_free(chunk);
	}
	else			// Separate items
	{
		DataItem item;
		for(eint id=GRIT_ITEM_GFX; id<GRIT_ITEM_MAX; id++)
		{
			grit_prep_item(gr, id, &item);
			if(item.procMode == GRIT_EXPORT)
				xp_array_gas(fout, item.name, item.pRec->data, rec_size(item.pRec), 
					grit_type_size(item.dataType));					
		}
	}

	sprintf(tag, "@}}BLOCK(%s)",gr->symName);
	fprintf(fout, "%s\n", tag);

	// --- End grit-block ---

	if(bAppend)
	{
		// Skip till end tag and copy rest
		file_find_tag(NULL, fin, tag);
		file_copy(fout, fin, -1);

		// close files and rename
		fclose(fout);
		fclose(fin);
		remove(fpath);
		rename(tmppath, fpath);
	}
	else
	{
		fclose(fout);		// close files
	}

	return true;
}