/* this should be less naive */ value arc_pathjoin2(arc *c, value p1, value p2) { TYPECHECK(p1, T_STRING); TYPECHECK(p2, T_STRING); p1 = arc_strcatc(c, p1, DIR_SEP); return(arc_strcat(c, p1, p2)); }
SExpr* SPrimScope::tryTypeCheck() { // for inlined prims, try to see if primitive will always fail if (!InlinePrimitives) return NULL; bool fail = false; switch (pd->type()) { case NotReallyAPrimitive: case InternalPrimitive: fatal("cannot call an internal primitive from Self code"); return NULL; case IntComparisonPrimitive: case IntArithmeticPrimitive: // must have two smis fail = CHECK_INT(receiver) || CHECK_INT(args->last()); break; case FloatArithmeticPrimitive: case FloatComparisonPrimitive: // must have two floats fail = CHECK_FLOAT(receiver) || CHECK_FLOAT(args->last()); break; case AtPrimitive: case AtPutPrimitive: // must have array rcvr and smi arg fail = TYPECHECK(receiver, is_objVector()) || CHECK_INT(args->last()); break; case SizePrimitive: // must have array rcvr fail = TYPECHECK(receiver, is_objVector()); break; case ByteAtPutPrimitive: // stored value must be 0..255; for now, test only for integer fail = CHECK_INT(args->nth(0)); // fall through case ByteAtPrimitive: // must have array rcvr and smi arg fail |= TYPECHECK(receiver, is_byteVector()) || CHECK_INT(args->last()); break; case ByteSizePrimitive: // must have array rcvr fail = TYPECHECK(receiver, is_byteVector()); break; default: return NULL; } if (fail) { // primitive will always fail ConstPReg* error = new_ConstPReg(_sender, VMString[BADTYPEERROR]); Node* dummy; MergeNode* mdummy = NULL; return genPrimFailure(NULL, error, dummy, mdummy, resultPR, false); } else { return NULL; } }
AFFEND /* XXX - when a file handle opened by pipe-from is up for gc it uses fclose instead of pclose! */ value arc_pipe_from(arc *c, value cmd) { FILE *fp; int len; char *cmdstr; value ffp; TYPECHECK(cmd, T_STRING); len = FIX2INT(arc_strutflen(c, cmd)); cmdstr = (char *)alloca(sizeof(char)*(len+1)); arc_str2cstr(c, cmd, cmdstr); fp = popen(cmdstr, "r"); if (fp == NULL) { int en = errno; arc_err_cstrfmt(c, "pipe-from: error executing command \"%s\", (%s; errno=%d)", cmdstr, strerror(en), en); } ffp = mkfio(c, T_INPORT, fp, cmd); IO(ffp)->io_ops = VINDEX(VINDEX(c->builtins, BI_io), BI_io_pfp); IO(ffp)->io_tfn = &procio_tfn; return(ffp); }
int bind_params(sqlite3_stmt* stmt, const mxArray *params, int column) { TYPECHECK(params, mxSTRUCT_CLASS); int i, n = mxGetNumberOfFields(params); for (i = 0; i < n; i++) { mxArray *array = mxGetFieldByNumber(params, column, i); mxClassID cls = mxGetClassID(array); int res; switch (cls) { case mxFUNCTION_CLASS: break; case mxCHAR_CLASS: res = bind_string(stmt, i + 1, array); break; case mxSINGLE_CLASS: case mxDOUBLE_CLASS: res = bind_double(stmt, i + 1, array); break; default: /* anything else is an integer */ res = bind_int64(stmt, i + 1, array); } if (res != SQLITE_OK) { return res; } } return SQLITE_OK; }
static bool wsgi_senditer(Request* request) { #define ITER_MAXSEND 1024*4 PyObject* item = request->iterable_next; if(!item) return true; ssize_t sent = 0; while(item && sent < ITER_MAXSEND) { TYPECHECK(item, PyString, "wsgi iterable items", true); if(!sendall(request, PyString_AS_STRING(item), PyString_GET_SIZE(item))) return true; sent += PyString_GET_SIZE(item); Py_DECREF(item); item = PyIter_Next(request->response); if(PyErr_Occurred()) { /* TODO: What to do here? Parts of the response are already sent */ return true; } } if(item) { request->iterable_next = item; return false; } else { return true; } }
value arc_mvfile(arc *c, value oldname, value newname) { char *utf_oldname, *utf_newname; int en; TYPECHECK(oldname, T_STRING); TYPECHECK(newname, T_STRING); utf_oldname = alloca(FIX2INT(arc_strutflen(c, oldname)) + 1); arc_str2cstr(c, oldname, utf_oldname); utf_newname = alloca(FIX2INT(arc_strutflen(c, newname)) + 1); arc_str2cstr(c, newname, utf_newname); if (rename(utf_oldname, utf_newname) != 0) { en = errno; arc_err_cstrfmt(c, "mvfile: cannot move file \"%s\" to \"%s\", (%s; errno=%d)", utf_oldname, utf_newname, strerror(en), en); return(CNIL); } return(CNIL); }
static bool wsgi_sendheaders(Request* request) { char buf[1024*4]; size_t bufpos = 0; #define buf_write(src, len) \ do { \ size_t n = len; \ const char* s = src; \ while(n--) buf[bufpos++] = *s++; \ } while(0) buf_write("HTTP/1.0 ", strlen("HTTP/1.0 ")); buf_write(PyString_AS_STRING(request->status), PyString_GET_SIZE(request->status)); size_t n_headers = PyList_GET_SIZE(request->headers); for(size_t i=0; i<n_headers; ++i) { PyObject* tuple = PyList_GET_ITEM(request->headers, i); assert(tuple); TYPECHECK(tuple, PyTuple, "headers", true); if(PyTuple_GET_SIZE(tuple) < 2) { PyErr_Format( PyExc_TypeError, "headers must be tuples of length 2, not %zd", PyTuple_GET_SIZE(tuple) ); return true; } PyObject* field = PyTuple_GET_ITEM(tuple, 0); PyObject* value = PyTuple_GET_ITEM(tuple, 1); TYPECHECK(field, PyString, "header tuple items", true); TYPECHECK(value, PyString, "header tuple items", true); buf_write("\r\n", strlen("\r\n")); buf_write(PyString_AS_STRING(field), PyString_GET_SIZE(field)); buf_write(": ", strlen(": ")); buf_write(PyString_AS_STRING(value), PyString_GET_SIZE(value)); } buf_write("\r\n\r\n", strlen("\r\n\r\n")); return !sendall(request, buf, bufpos); }
int bind_string(sqlite3_stmt *stmt, int index, const mxArray *array) { TYPECHECK(array, mxCHAR_CLASS); char *s = mxArrayToString(array); int res = sqlite3_bind_text(stmt, index, s, -1, /* ALL the bytes */ SQLITE_TRANSIENT /* make a copy */ ); mxFree(s); return res; }
value arc_dir_exists(arc *c, value dirname) { char *utf_filename; struct stat st; TYPECHECK(dirname, T_STRING); utf_filename = alloca(FIX2INT(arc_strutflen(c, dirname)) + 1); arc_str2cstr(c, dirname, utf_filename); if (stat(utf_filename, &st) == -1) { return(CNIL); } if (S_ISDIR(st.st_mode)) return(dirname); return(CNIL); }
value arc_rmfile(arc *c, value filename) { char *utf_filename; int en; TYPECHECK(filename, T_STRING); utf_filename = alloca(FIX2INT(arc_strutflen(c, filename)) + 1); arc_str2cstr(c, filename, utf_filename); if (unlink(utf_filename) < 0) { en = errno; arc_err_cstrfmt(c, "rmfile: cannot delete file \"%s\", (%s; errno=%d)", utf_filename, strerror(en), en); return(CNIL); } return(CNIL); }
value arc_dir(arc *c, value dirname) { char *utf_filename; DIR *dirp; int en; value dirlist; struct dirent *entry, *result; int delen; TYPECHECK(dirname, T_STRING); utf_filename = alloca(FIX2INT(arc_strutflen(c, dirname)) + 1); arc_str2cstr(c, dirname, utf_filename); dirp = opendir(utf_filename); if (dirp == NULL) { en = errno; arc_err_cstrfmt(c, "dir: cannot open directory \"%s\", (%s; errno=%d)", utf_filename, strerror(en), en); return(CNIL); } dirlist = CNIL; delen = offsetof(struct dirent, d_name) + pathconf(utf_filename, _PC_NAME_MAX) + 1; entry = (struct dirent *)alloca(delen); for (;;) { if (readdir_r(dirp, entry, &result) != 0) { /* error */ en = errno; arc_err_cstrfmt(c, "dir: error reading directory \"%s\", (%s; errno=%d)", utf_filename, strerror(en), en); return(CNIL); } /* end of list */ if (result == NULL) break; /* ignore the . and .. directories */ if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) continue; dirlist = cons(c, arc_mkstringc(c, entry->d_name), dirlist); } closedir(dirp); return(dirlist); }
int bind_int64(sqlite3_stmt *stmt, int index, const mxArray *array) { int64_t val = 0; TYPECHECK(array, mxINT16_CLASS, mxUINT16_CLASS, mxINT32_CLASS, mxUINT32_CLASS, mxINT64_CLASS, mxUINT64_CLASS); mxClassID cls = mxGetClassID(array); #define GET_VALUE(CLASS, TYPE) \ if (cls == CLASS) { \ TYPE *p = mxGetData(array); \ val = *p; \ } GET_VALUE(mxINT16_CLASS, int16_t); GET_VALUE(mxUINT16_CLASS, uint16_t); GET_VALUE(mxINT16_CLASS, int32_t); GET_VALUE(mxUINT16_CLASS, uint32_t); GET_VALUE(mxINT16_CLASS, int64_t); GET_VALUE(mxUINT16_CLASS, uint64_t); return sqlite3_bind_int64(stmt, index, val); }
int bind_double(sqlite3_stmt *stmt, int index, const mxArray *array) { TYPECHECK(array, mxSINGLE_CLASS, mxDOUBLE_CLASS); double val = mxGetScalar(array); return sqlite3_bind_double(stmt, index, val); }
static PyObject* start_response(PyObject* self, PyObject* args, PyObject* kwargs) { Request* request = ((StartResponse*)self)->request; if(request->state & REQUEST_START_RESPONSE_CALLED) { /* not the first call of start_response -- throw away any previous status and headers. */ Py_DECREF(request->status); Py_DECREF(request->headers); request->status = NULL; request->headers = NULL; } PyObject* status = NULL; PyObject* headers = NULL; PyObject* exc_info = NULL; if(!PyArg_UnpackTuple(args, "start_response", 2, 3, &status, &headers, &exc_info)) return NULL; if(exc_info) { TYPECHECK(exc_info, PyTuple, "start_response argument 3", NULL); if(PyTuple_GET_SIZE(exc_info) != 3) { PyErr_Format( PyExc_TypeError, "start_response argument 3 must be a tuple of length 3, " "not of length %zd", PyTuple_GET_SIZE(exc_info) ); return NULL; } restore_exception_tuple(exc_info, /* incref items? */ true); if(request->state & REQUEST_RESPONSE_HEADERS_SENT) /* Headers already sent. According to PEP 333, we should * let the exception propagate in this case. */ return NULL; /* Headers not yet sent; handle this start_response call if 'exc_info' would not be passed, but print the exception and 'sys.exc_clear()' */ PyErr_Print(); } else if(request->state & REQUEST_START_RESPONSE_CALLED) { PyErr_SetString(PyExc_TypeError, "'start_response' called twice without " "passing 'exc_info' the second time"); return NULL; } TYPECHECK(status, PyString, "start_response argument 1", NULL); TYPECHECK(headers, PyList, "start_response argument 2", NULL); Py_INCREF(status); Py_INCREF(headers); request->status = status; request->headers = headers; request->state |= REQUEST_START_RESPONSE_CALLED; Py_RETURN_NONE; }