Exemplo n.º 1
0
void Gobby::PreferencesDialog::Security::on_credentials_changed()
{
	set_file_error(m_error_trust_file,
	               m_cert_manager.get_trust_error());

	if(m_key_generator_handle.get() == NULL)
	{
		set_file_error(m_error_key_file,
		               m_cert_manager.get_key_error());
	}
	else
	{
		m_error_key_file.set_text(
			_("2048-bit RSA private key is being "
			  "generated, please wait..."));
		m_error_key_file.show();
	}

	if(m_cert_generator_handle.get() == NULL)
		set_file_error(m_error_cert_file,
		               m_cert_manager.get_certificate_error());

	const bool operation_in_progress =
		m_key_generator_handle.get() != NULL ||
		m_cert_generator_handle.get() != NULL;

	m_btn_key_file.set_sensitive(!operation_in_progress);
	m_btn_key_file_create.set_sensitive(!operation_in_progress);
	m_btn_cert_file.set_sensitive(!operation_in_progress);

	m_btn_cert_file_create.set_sensitive(
		!operation_in_progress &&
		m_cert_manager.get_private_key() != NULL);
}
Exemplo n.º 2
0
/*
   read_directory(archive) -> files dict (new reference)

   Given a path to a Zip archive, build a dict, mapping file names
   (local to the archive, using SEP as a separator) to toc entries.

   A toc_entry is a tuple:

   (__file__,      # value to use for __file__, available for all files,
                   # encoded to the filesystem encoding
    compress,      # compression kind; 0 for uncompressed
    data_size,     # size of compressed data on disk
    file_size,     # size of decompressed data
    file_offset,   # offset of file header from start of archive
    time,          # mod time of file (in dos format)
    date,          # mod data of file (in dos format)
    crc,           # crc checksum of the data
   )

   Directories can be recognized by the trailing SEP in the name,
   data_size and file_offset are 0.
*/
static PyObject *
read_directory(PyObject *archive)
{
    PyObject *files = NULL;
    FILE *fp;
    unsigned short flags, compress, time, date, name_size;
    unsigned int crc, data_size, file_size, header_size, header_offset;
    unsigned long file_offset, header_position;
    unsigned long arc_offset;  /* Absolute offset to start of the zip-archive. */
    unsigned int count, i;
    unsigned char buffer[46];
    char name[MAXPATHLEN + 5];
    PyObject *nameobj = NULL;
    PyObject *path;
    const char *charset;
    int bootstrap;
    const char *errmsg = NULL;

    fp = _Py_fopen_obj(archive, "rb");
    if (fp == NULL) {
        if (PyErr_ExceptionMatches(PyExc_OSError)) {
            _PyErr_FormatFromCause(ZipImportError,
                                   "can't open Zip file: %R", archive);
        }
        return NULL;
    }

    if (fseek(fp, -22, SEEK_END) == -1) {
        goto file_error;
    }
    header_position = (unsigned long)ftell(fp);
    if (header_position == (unsigned long)-1) {
        goto file_error;
    }
    assert(header_position <= (unsigned long)LONG_MAX);
    if (fread(buffer, 1, 22, fp) != 22) {
        goto file_error;
    }
    if (get_uint32(buffer) != 0x06054B50u) {
        /* Bad: End of Central Dir signature */
        errmsg = "not a Zip file";
        goto invalid_header;
    }

    header_size = get_uint32(buffer + 12);
    header_offset = get_uint32(buffer + 16);
    if (header_position < header_size) {
        errmsg = "bad central directory size";
        goto invalid_header;
    }
    if (header_position < header_offset) {
        errmsg = "bad central directory offset";
        goto invalid_header;
    }
    if (header_position - header_size < header_offset) {
        errmsg = "bad central directory size or offset";
        goto invalid_header;
    }
    header_position -= header_size;
    arc_offset = header_position - header_offset;

    files = PyDict_New();
    if (files == NULL) {
        goto error;
    }
    /* Start of Central Directory */
    count = 0;
    if (fseek(fp, (long)header_position, 0) == -1) {
        goto file_error;
    }
    for (;;) {
        PyObject *t;
        size_t n;
        int err;

        n = fread(buffer, 1, 46, fp);
        if (n < 4) {
            goto eof_error;
        }
        /* Start of file header */
        if (get_uint32(buffer) != 0x02014B50u) {
            break;              /* Bad: Central Dir File Header */
        }
        if (n != 46) {
            goto eof_error;
        }
        flags = get_uint16(buffer + 8);
        compress = get_uint16(buffer + 10);
        time = get_uint16(buffer + 12);
        date = get_uint16(buffer + 14);
        crc = get_uint32(buffer + 16);
        data_size = get_uint32(buffer + 20);
        file_size = get_uint32(buffer + 24);
        name_size = get_uint16(buffer + 28);
        header_size = (unsigned int)name_size +
           get_uint16(buffer + 30) /* extra field */ +
           get_uint16(buffer + 32) /* comment */;

        file_offset = get_uint32(buffer + 42);
        if (file_offset > header_offset) {
            errmsg = "bad local header offset";
            goto invalid_header;
        }
        file_offset += arc_offset;

        if (name_size > MAXPATHLEN) {
            name_size = MAXPATHLEN;
        }
        if (fread(name, 1, name_size, fp) != name_size) {
            goto file_error;
        }
        name[name_size] = '\0';  /* Add terminating null byte */
#if SEP != '/'
        for (i = 0; i < name_size; i++) {
            if (name[i] == '/') {
                name[i] = SEP;
            }
        }
#endif
        /* Skip the rest of the header.
         * On Windows, calling fseek to skip over the fields we don't use is
         * slower than reading the data because fseek flushes stdio's
         * internal buffers.  See issue #8745. */
        assert(header_size <= 3*0xFFFFu);
        for (i = name_size; i < header_size; i++) {
            if (getc(fp) == EOF) {
                goto file_error;
            }
        }

        bootstrap = 0;
        if (flags & 0x0800) {
            charset = "utf-8";
        }
        else if (!PyThreadState_GET()->interp->codecs_initialized) {
            /* During bootstrap, we may need to load the encodings
               package from a ZIP file. But the cp437 encoding is implemented
               in Python in the encodings package.

               Break out of this dependency by assuming that the path to
               the encodings module is ASCII-only. */
            charset = "ascii";
            bootstrap = 1;
        }
        else {
            charset = "cp437";
        }
        nameobj = PyUnicode_Decode(name, name_size, charset, NULL);
        if (nameobj == NULL) {
            if (bootstrap) {
                PyErr_Format(PyExc_NotImplementedError,
                    "bootstrap issue: python%i%i.zip contains non-ASCII "
                    "filenames without the unicode flag",
                    PY_MAJOR_VERSION, PY_MINOR_VERSION);
            }
            goto error;
        }
        if (PyUnicode_READY(nameobj) == -1) {
            goto error;
        }
        path = PyUnicode_FromFormat("%U%c%U", archive, SEP, nameobj);
        if (path == NULL) {
            goto error;
        }
        t = Py_BuildValue("NHIIkHHI", path, compress, data_size,
                          file_size, file_offset, time, date, crc);
        if (t == NULL) {
            goto error;
        }
        err = PyDict_SetItem(files, nameobj, t);
        Py_CLEAR(nameobj);
        Py_DECREF(t);
        if (err != 0) {
            goto error;
        }
        count++;
    }
    fclose(fp);
    if (Py_VerboseFlag) {
        PySys_FormatStderr("# zipimport: found %u names in %R\n",
                           count, archive);
    }
    return files;

eof_error:
    set_file_error(archive, !ferror(fp));
    goto error;

file_error:
    PyErr_Format(ZipImportError, "can't read Zip file: %R", archive);
    goto error;

invalid_header:
    assert(errmsg != NULL);
    PyErr_Format(ZipImportError, "%s: %R", errmsg, archive);
    goto error;

error:
    fclose(fp);
    Py_XDECREF(files);
    Py_XDECREF(nameobj);
    return NULL;
}
Exemplo n.º 3
0
/* Given a path to a Zip file and a toc_entry, return the (uncompressed)
   data as a new reference. */
static PyObject *
get_data(PyObject *archive, PyObject *toc_entry)
{
    PyObject *raw_data = NULL, *data, *decompress;
    char *buf;
    FILE *fp;
    PyObject *datapath;
    unsigned short compress, time, date;
    unsigned int crc;
    Py_ssize_t data_size, file_size, bytes_size;
    long file_offset, header_size;
    unsigned char buffer[30];
    const char *errmsg = NULL;

    if (!PyArg_ParseTuple(toc_entry, "OHnnlHHI", &datapath, &compress,
                          &data_size, &file_size, &file_offset, &time,
                          &date, &crc)) {
        return NULL;
    }
    if (data_size < 0) {
        PyErr_Format(ZipImportError, "negative data size");
        return NULL;
    }

    fp = _Py_fopen_obj(archive, "rb");
    if (!fp) {
        return NULL;
    }
    /* Check to make sure the local file header is correct */
    if (fseek(fp, file_offset, 0) == -1) {
        goto file_error;
    }
    if (fread(buffer, 1, 30, fp) != 30) {
        goto eof_error;
    }
    if (get_uint32(buffer) != 0x04034B50u) {
        /* Bad: Local File Header */
        errmsg = "bad local file header";
        goto invalid_header;
    }

    header_size = (unsigned int)30 +
        get_uint16(buffer + 26) /* file name */ +
        get_uint16(buffer + 28) /* extra field */;
    if (file_offset > LONG_MAX - header_size) {
        errmsg = "bad local file header size";
        goto invalid_header;
    }
    file_offset += header_size;  /* Start of file data */

    if (data_size > LONG_MAX - 1) {
        fclose(fp);
        PyErr_NoMemory();
        return NULL;
    }
    bytes_size = compress == 0 ? data_size : data_size + 1;
    if (bytes_size == 0) {
        bytes_size++;
    }
    raw_data = PyBytes_FromStringAndSize((char *)NULL, bytes_size);
    if (raw_data == NULL) {
        goto error;
    }
    buf = PyBytes_AsString(raw_data);

    if (fseek(fp, file_offset, 0) == -1) {
        goto file_error;
    }
    if (fread(buf, 1, data_size, fp) != (size_t)data_size) {
        PyErr_SetString(PyExc_IOError,
                        "zipimport: can't read data");
        goto error;
    }

    fclose(fp);
    fp = NULL;

    if (compress != 0) {
        buf[data_size] = 'Z';  /* saw this in zipfile.py */
        data_size++;
    }
    buf[data_size] = '\0';

    if (compress == 0) {  /* data is not compressed */
        data = PyBytes_FromStringAndSize(buf, data_size);
        Py_DECREF(raw_data);
        return data;
    }

    /* Decompress with zlib */
    decompress = get_decompress_func();
    if (decompress == NULL) {
        PyErr_SetString(ZipImportError,
                        "can't decompress data; "
                        "zlib not available");
        goto error;
    }
    data = PyObject_CallFunction(decompress, "Oi", raw_data, -15);
    Py_DECREF(decompress);
    Py_DECREF(raw_data);
    return data;

eof_error:
    set_file_error(archive, !ferror(fp));
    goto error;

file_error:
    PyErr_Format(ZipImportError, "can't read Zip file: %R", archive);
    goto error;

invalid_header:
    assert(errmsg != NULL);
    PyErr_Format(ZipImportError, "%s: %R", errmsg, archive);
    goto error;

error:
    if (fp != NULL) {
        fclose(fp);
    }
    Py_XDECREF(raw_data);
    return NULL;
}
Exemplo n.º 4
0
Arquivo: m_file.c Projeto: camgunz/d2k
static void set_file_error_from_errno(void) {
  set_file_error(
    M_FILE_ERROR, g_file_error_from_errno(errno), strerror(errno)
  );
}
Exemplo n.º 5
0
/*
   read_directory(archive) -> files dict (new reference)

   Given a path to a Zip archive, build a dict, mapping file names
   (local to the archive, using SEP as a separator) to toc entries.

   A toc_entry is a tuple:

       (__file__,      # value to use for __file__, available for all files
    compress,      # compression kind; 0 for uncompressed
    data_size,     # size of compressed data on disk
    file_size,     # size of decompressed data
    file_offset,   # offset of file header from start of archive
    time,          # mod time of file (in dos format)
    date,          # mod data of file (in dos format)
    crc,           # crc checksum of the data
       )

   Directories can be recognized by the trailing SEP in the name,
   data_size and file_offset are 0.
*/
static PyObject *
read_directory(const char *archive)
{
    PyObject *files = NULL;
    FILE *fp;
    unsigned short compress, time, date, name_size;
    unsigned int crc, data_size, file_size, header_size, header_offset;
    unsigned long file_offset, header_position;
    unsigned long arc_offset;  /* Absolute offset to start of the zip-archive. */
    unsigned int count, i;
    unsigned char buffer[46];
    size_t length;
    char path[MAXPATHLEN + 5];
    char name[MAXPATHLEN + 5];
    const char *errmsg = NULL;

    if (strlen(archive) > MAXPATHLEN) {
        PyErr_SetString(PyExc_OverflowError,
                        "Zip path name is too long");
        return NULL;
    }
    strcpy(path, archive);

    fp = fopen(archive, "rb");
    if (fp == NULL) {
        PyErr_Format(ZipImportError, "can't open Zip file: "
                     "'%.200s'", archive);
        return NULL;
    }

    if (fseek(fp, -22, SEEK_END) == -1) {
        goto file_error;
    }
    header_position = (unsigned long)ftell(fp);
    if (header_position == (unsigned long)-1) {
        goto file_error;
    }
    assert(header_position <= (unsigned long)LONG_MAX);
    if (fread(buffer, 1, 22, fp) != 22) {
        goto file_error;
    }
    if (get_uint32(buffer) != 0x06054B50u) {
        /* Bad: End of Central Dir signature */
        errmsg = "not a Zip file";
        goto invalid_header;
    }

    header_size = get_uint32(buffer + 12);
    header_offset = get_uint32(buffer + 16);
    if (header_position < header_size) {
        errmsg = "bad central directory size";
        goto invalid_header;
    }
    if (header_position < header_offset) {
        errmsg = "bad central directory offset";
        goto invalid_header;
    }
    if (header_position - header_size < header_offset) {
        errmsg = "bad central directory size or offset";
        goto invalid_header;
    }
    header_position -= header_size;
    arc_offset = header_position - header_offset;

    files = PyDict_New();
    if (files == NULL) {
        goto error;
    }

    length = (long)strlen(path);
    path[length] = SEP;

    /* Start of Central Directory */
    count = 0;
    if (fseek(fp, (long)header_position, 0) == -1) {
        goto file_error;
    }
    for (;;) {
        PyObject *t;
        size_t n;
        int err;

        n = fread(buffer, 1, 46, fp);
        if (n < 4) {
            goto eof_error;
        }
        /* Start of file header */
        if (get_uint32(buffer) != 0x02014B50u) {
            break;              /* Bad: Central Dir File Header */
        }
        if (n != 46) {
            goto eof_error;
        }
        compress = get_uint16(buffer + 10);
        time = get_uint16(buffer + 12);
        date = get_uint16(buffer + 14);
        crc = get_uint32(buffer + 16);
        data_size = get_uint32(buffer + 20);
        file_size = get_uint32(buffer + 24);
        name_size = get_uint16(buffer + 28);
        header_size = (unsigned int)name_size +
           get_uint16(buffer + 30) /* extra field */ +
           get_uint16(buffer + 32) /* comment */;

        file_offset = get_uint32(buffer + 42);
        if (file_offset > header_offset) {
            errmsg = "bad local header offset";
            goto invalid_header;
        }
        file_offset += arc_offset;

        if (name_size > MAXPATHLEN) {
            name_size = MAXPATHLEN;
        }
        if (fread(name, 1, name_size, fp) != name_size) {
            goto file_error;
        }
        name[name_size] = '\0';  /* Add terminating null byte */
        if (SEP != '/') {
            for (i = 0; i < name_size; i++) {
                if (name[i] == '/') {
                    name[i] = SEP;
                }
            }
        }
        /* Skip the rest of the header.
         * On Windows, calling fseek to skip over the fields we don't use is
         * slower than reading the data because fseek flushes stdio's
         * internal buffers.  See issue #8745. */
        assert(header_size <= 3*0xFFFFu);
        for (i = name_size; i < header_size; i++) {
            if (getc(fp) == EOF) {
                goto file_error;
            }
        }

        strncpy(path + length + 1, name, MAXPATHLEN - length - 1);

        t = Py_BuildValue("sHIIkHHI", path, compress, data_size,
                          file_size, file_offset, time, date, crc);
        if (t == NULL) {
            goto error;
        }
        err = PyDict_SetItemString(files, name, t);
        Py_DECREF(t);
        if (err != 0) {
            goto error;
        }
        count++;
    }
    fclose(fp);
    if (Py_VerboseFlag) {
        PySys_WriteStderr("# zipimport: found %u names in %.200s\n",
                           count, archive);
    }
    return files;

eof_error:
    set_file_error(archive, !ferror(fp));
    goto error;

file_error:
    PyErr_Format(ZipImportError, "can't read Zip file: %.200s", archive);
    goto error;

invalid_header:
    assert(errmsg != NULL);
    PyErr_Format(ZipImportError, "%s: %.200s", errmsg, archive);
    goto error;

error:
    fclose(fp);
    Py_XDECREF(files);
    return NULL;
}