static size_t G_GNUC_MGET_NONNULL_ALL _normalize_path(char *path) { char *p1 = path, *p2 = path; debug_printf("path %s ->\n", path); // skip ./ and ../ at the beginning of the path for (;;) { if (*p2 == '/') p2++; else if (*p2 == '.') { if (p2[1] == '/') p2 += 2; else if (p2[1] == '.') { if (p2[2] == '/') p2 += 3; else if (!p2[2]) p2 += 2; else break; } else if (!p2[1]) p2++; else break; } else break; } // normalize path but stop at query or fragment while (*p2 && *p2 != '?' && *p2 != '#') { if (*p2 == '/') { if (p2[1] == '.') { if (!strncmp(p2, "/../", 4)) { // go one level up p2 += 3; while (p1 > path && *--p1 != '/'); } else if (!strcmp(p2, "/..")) { p2 += 3; while (p1 > path && *--p1 != '/'); if (p1 > path) *p1++='/'; } else if (!strncmp(p2, "/./", 3)) { p2 += 2; } else if (!strcmp(p2, "/.")) { p2 += 2; if (p1 > path) *p1++='/'; } else *p1++ = *p2++; } else if (p1 == path) p2++; // avoid leading slash else if (p2[1] == '/') p2++; // double slash to single slash else *p1++ = *p2++; } else *p1++ = *p2++; } if (p1 != p2) { while (*p2) *p1++ = *p2++; *p1 = 0; } debug_printf(" %s\n", path); return p1 - path; }
/* struct vnop_getattr_args { struct vnode *a_vp; struct vattr *a_vap; struct ucred *a_cred; struct thread *a_td; }; */ static int fuse_vnop_getattr(struct vop_getattr_args *ap) { struct vnode *vp = ap->a_vp; struct vattr *vap = ap->a_vap; struct ucred *cred = ap->a_cred; struct thread *td = curthread; struct fuse_vnode_data *fvdat = VTOFUD(vp); int err = 0; int dataflags; struct fuse_dispatcher fdi; FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp)); dataflags = fuse_get_mpdata(vnode_mount(vp))->dataflags; /* Note that we are not bailing out on a dead file system just yet. */ if (!(dataflags & FSESS_INITED)) { if (!vnode_isvroot(vp)) { fdata_set_dead(fuse_get_mpdata(vnode_mount(vp))); err = ENOTCONN; debug_printf("fuse_getattr b: returning ENOTCONN\n"); return err; } else { goto fake; } } fdisp_init(&fdi, 0); if ((err = fdisp_simple_putget_vp(&fdi, FUSE_GETATTR, vp, td, cred))) { if ((err == ENOTCONN) && vnode_isvroot(vp)) { /* see comment at similar place in fuse_statfs() */ fdisp_destroy(&fdi); goto fake; } if (err == ENOENT) { fuse_internal_vnode_disappear(vp); } goto out; } cache_attrs(vp, (struct fuse_attr_out *)fdi.answ); if (vap != VTOVA(vp)) { memcpy(vap, VTOVA(vp), sizeof(*vap)); } if (vap->va_type != vnode_vtype(vp)) { fuse_internal_vnode_disappear(vp); err = ENOENT; goto out; } if ((fvdat->flag & FN_SIZECHANGE) != 0) vap->va_size = fvdat->filesize; if (vnode_isreg(vp) && (fvdat->flag & FN_SIZECHANGE) == 0) { /* * This is for those cases when the file size changed without us * knowing, and we want to catch up. */ off_t new_filesize = ((struct fuse_attr_out *) fdi.answ)->attr.size; if (fvdat->filesize != new_filesize) { fuse_vnode_setsize(vp, cred, new_filesize); } } debug_printf("fuse_getattr e: returning 0\n"); out: fdisp_destroy(&fdi); return err; fake: bzero(vap, sizeof(*vap)); vap->va_type = vnode_vtype(vp); return 0; }
/** * Allocate space for and store data in a buffer object. Any data that was * previously stored in the buffer object is lost. If data is NULL, * memory will be allocated, but no copy will occur. * Called via ctx->Driver.BufferData(). * \return GL_TRUE for success, GL_FALSE if out of memory */ static GLboolean st_bufferobj_data(struct gl_context *ctx, GLenum target, GLsizeiptrARB size, const GLvoid * data, GLenum usage, GLbitfield storageFlags, struct gl_buffer_object *obj) { struct st_context *st = st_context(ctx); struct pipe_context *pipe = st->pipe; struct st_buffer_object *st_obj = st_buffer_object(obj); unsigned bind, pipe_usage, pipe_flags = 0; if (size && data && st_obj->buffer && st_obj->Base.Size == size && st_obj->Base.Usage == usage && st_obj->Base.StorageFlags == storageFlags) { /* Just discard the old contents and write new data. * This should be the same as creating a new buffer, but we avoid * a lot of validation in Mesa. */ struct pipe_box box; u_box_1d(0, size, &box); pipe->transfer_inline_write(pipe, st_obj->buffer, 0, PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE, &box, data, 0, 0); return GL_TRUE; } st_obj->Base.Size = size; st_obj->Base.Usage = usage; st_obj->Base.StorageFlags = storageFlags; switch (target) { case GL_PIXEL_PACK_BUFFER_ARB: case GL_PIXEL_UNPACK_BUFFER_ARB: bind = PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW; break; case GL_ARRAY_BUFFER_ARB: bind = PIPE_BIND_VERTEX_BUFFER; break; case GL_ELEMENT_ARRAY_BUFFER_ARB: bind = PIPE_BIND_INDEX_BUFFER; break; case GL_TEXTURE_BUFFER: bind = PIPE_BIND_SAMPLER_VIEW; break; case GL_TRANSFORM_FEEDBACK_BUFFER: bind = PIPE_BIND_STREAM_OUTPUT; break; case GL_UNIFORM_BUFFER: bind = PIPE_BIND_CONSTANT_BUFFER; break; case GL_DRAW_INDIRECT_BUFFER: bind = PIPE_BIND_COMMAND_ARGS_BUFFER; break; default: bind = 0; } /* Set usage. */ if (st_obj->Base.Immutable) { /* BufferStorage */ if (storageFlags & GL_CLIENT_STORAGE_BIT) pipe_usage = PIPE_USAGE_STAGING; else pipe_usage = PIPE_USAGE_DEFAULT; } else { /* BufferData */ switch (usage) { case GL_STATIC_DRAW: case GL_STATIC_READ: case GL_STATIC_COPY: default: pipe_usage = PIPE_USAGE_DEFAULT; break; case GL_DYNAMIC_DRAW: case GL_DYNAMIC_READ: case GL_DYNAMIC_COPY: pipe_usage = PIPE_USAGE_DYNAMIC; break; case GL_STREAM_DRAW: case GL_STREAM_READ: case GL_STREAM_COPY: pipe_usage = PIPE_USAGE_STREAM; break; } } /* Set flags. */ if (storageFlags & GL_MAP_PERSISTENT_BIT) pipe_flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT; if (storageFlags & GL_MAP_COHERENT_BIT) pipe_flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT; pipe_resource_reference( &st_obj->buffer, NULL ); if (ST_DEBUG & DEBUG_BUFFER) { debug_printf("Create buffer size %" PRId64 " bind 0x%x\n", (int64_t) size, bind); } if (size != 0) { struct pipe_resource buffer; memset(&buffer, 0, sizeof buffer); buffer.target = PIPE_BUFFER; buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */ buffer.bind = bind; buffer.usage = pipe_usage; buffer.flags = pipe_flags; buffer.width0 = size; buffer.height0 = 1; buffer.depth0 = 1; buffer.array_size = 1; st_obj->buffer = pipe->screen->resource_create(pipe->screen, &buffer); if (!st_obj->buffer) { /* out of memory */ st_obj->Base.Size = 0; return GL_FALSE; } if (data) pipe_buffer_write(pipe, st_obj->buffer, 0, size, data); } /* BufferData may change an array or uniform buffer, need to update it */ st->dirty.st |= ST_NEW_VERTEX_ARRAYS | ST_NEW_UNIFORM_BUFFER; return GL_TRUE; }
int dmarc_write_history_file() { int history_file_fd; ssize_t written_len; int tmp_ans; u_char **rua; /* aggregate report addressees */ uschar *history_buffer = NULL; if (!dmarc_history_file) return DMARC_HIST_DISABLED; history_file_fd = log_create(dmarc_history_file); if (history_file_fd < 0) { log_write(0, LOG_MAIN|LOG_PANIC, "failure to create DMARC history file: %s", dmarc_history_file); return DMARC_HIST_FILE_ERR; } /* Generate the contents of the history file */ history_buffer = string_sprintf( "job %s\nreporter %s\nreceived %ld\nipaddr %s\nfrom %s\nmfrom %s\n", message_id, primary_hostname, time(NULL), sender_host_address, header_from_sender, expand_string(US"$sender_address_domain")); if (spf_response) history_buffer = string_sprintf("%sspf %d\n", history_buffer, dmarc_spf_ares_result); /* history_buffer = string_sprintf("%sspf -1\n", history_buffer); */ history_buffer = string_sprintf( "%s%spdomain %s\npolicy %d\n", history_buffer, dkim_history_buffer, dmarc_used_domain, dmarc_policy); if ((rua = opendmarc_policy_fetch_rua(dmarc_pctx, NULL, 0, 1))) for (tmp_ans = 0; rua[tmp_ans]; tmp_ans++) history_buffer = string_sprintf("%srua %s\n", history_buffer, rua[tmp_ans]); else history_buffer = string_sprintf("%srua -\n", history_buffer); opendmarc_policy_fetch_pct(dmarc_pctx, &tmp_ans); history_buffer = string_sprintf("%spct %d\n", history_buffer, tmp_ans); opendmarc_policy_fetch_adkim(dmarc_pctx, &tmp_ans); history_buffer = string_sprintf("%sadkim %d\n", history_buffer, tmp_ans); opendmarc_policy_fetch_aspf(dmarc_pctx, &tmp_ans); history_buffer = string_sprintf("%saspf %d\n", history_buffer, tmp_ans); opendmarc_policy_fetch_p(dmarc_pctx, &tmp_ans); history_buffer = string_sprintf("%sp %d\n", history_buffer, tmp_ans); opendmarc_policy_fetch_sp(dmarc_pctx, &tmp_ans); history_buffer = string_sprintf("%ssp %d\n", history_buffer, tmp_ans); history_buffer = string_sprintf( "%salign_dkim %d\nalign_spf %d\naction %d\n", history_buffer, da, sa, action); /* Write the contents to the history file */ DEBUG(D_receive) debug_printf("DMARC logging history data for opendmarc reporting%s\n", (host_checking || running_in_test_harness) ? " (not really)" : ""); if (host_checking || running_in_test_harness) { DEBUG(D_receive) debug_printf("DMARC history data for debugging:\n%s", history_buffer); } else { written_len = write_to_fd_buf(history_file_fd, history_buffer, Ustrlen(history_buffer)); if (written_len == 0) { log_write(0, LOG_MAIN|LOG_PANIC, "failure to write to DMARC history file: %s", dmarc_history_file); return DMARC_HIST_WRITE_ERR; } (void)close(history_file_fd); } return DMARC_HIST_OK; }
/* struct vnop_setattr_args { struct vnode *a_vp; struct vattr *a_vap; struct ucred *a_cred; struct thread *a_td; }; */ static int fuse_vnop_setattr(struct vop_setattr_args *ap) { struct vnode *vp = ap->a_vp; struct vattr *vap = ap->a_vap; struct ucred *cred = ap->a_cred; struct thread *td = curthread; struct fuse_dispatcher fdi; struct fuse_setattr_in *fsai; struct fuse_access_param facp; int err = 0; enum vtype vtyp; int sizechanged = 0; uint64_t newsize = 0; FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp)); if (fuse_isdeadfs(vp)) { return ENXIO; } fdisp_init(&fdi, sizeof(*fsai)); fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred); fsai = fdi.indata; fsai->valid = 0; bzero(&facp, sizeof(facp)); facp.xuid = vap->va_uid; facp.xgid = vap->va_gid; if (vap->va_uid != (uid_t)VNOVAL) { facp.facc_flags |= FACCESS_CHOWN; fsai->uid = vap->va_uid; fsai->valid |= FATTR_UID; } if (vap->va_gid != (gid_t)VNOVAL) { facp.facc_flags |= FACCESS_CHOWN; fsai->gid = vap->va_gid; fsai->valid |= FATTR_GID; } if (vap->va_size != VNOVAL) { struct fuse_filehandle *fufh = NULL; /*Truncate to a new value. */ fsai->size = vap->va_size; sizechanged = 1; newsize = vap->va_size; fsai->valid |= FATTR_SIZE; fuse_filehandle_getrw(vp, FUFH_WRONLY, &fufh); if (fufh) { fsai->fh = fufh->fh_id; fsai->valid |= FATTR_FH; } } if (vap->va_atime.tv_sec != VNOVAL) { fsai->atime = vap->va_atime.tv_sec; fsai->atimensec = vap->va_atime.tv_nsec; fsai->valid |= FATTR_ATIME; } if (vap->va_mtime.tv_sec != VNOVAL) { fsai->mtime = vap->va_mtime.tv_sec; fsai->mtimensec = vap->va_mtime.tv_nsec; fsai->valid |= FATTR_MTIME; } if (vap->va_mode != (mode_t)VNOVAL) { fsai->mode = vap->va_mode & ALLPERMS; fsai->valid |= FATTR_MODE; } if (!fsai->valid) { goto out; } vtyp = vnode_vtype(vp); if (fsai->valid & FATTR_SIZE && vtyp == VDIR) { err = EISDIR; goto out; } if (vfs_isrdonly(vnode_mount(vp)) && (fsai->valid & ~FATTR_SIZE || vtyp == VREG)) { err = EROFS; goto out; } if (fsai->valid & ~FATTR_SIZE) { /*err = fuse_internal_access(vp, VADMIN, context, &facp); */ /*XXX */ err = 0; } facp.facc_flags &= ~FACCESS_XQUERIES; if (err && !(fsai->valid & ~(FATTR_ATIME | FATTR_MTIME)) && vap->va_vaflags & VA_UTIMES_NULL) { err = fuse_internal_access(vp, VWRITE, &facp, td, cred); } if (err) goto out; if ((err = fdisp_wait_answ(&fdi))) goto out; vtyp = IFTOVT(((struct fuse_attr_out *)fdi.answ)->attr.mode); if (vnode_vtype(vp) != vtyp) { if (vnode_vtype(vp) == VNON && vtyp != VNON) { debug_printf("FUSE: Dang! vnode_vtype is VNON and vtype isn't.\n"); } else { /* * STALE vnode, ditch * * The vnode has changed its type "behind our back". There's * nothing really we can do, so let us just force an internal * revocation and tell the caller to try again, if interested. */ fuse_internal_vnode_disappear(vp); err = EAGAIN; } } if (!err && !sizechanged) { cache_attrs(vp, (struct fuse_attr_out *)fdi.answ); } out: fdisp_destroy(&fdi); if (!err && sizechanged) { fuse_vnode_setsize(vp, cred, newsize); VTOFUD(vp)->flag &= ~FN_SIZECHANGE; } return err; }
int mpd_playlist_queue_commit(MpdObj *mi) { if(!mpd_check_connected(mi)) { debug_printf(DEBUG_WARNING,"not connected\n"); return MPD_NOT_CONNECTED; } if(mi->queue == NULL) { debug_printf(DEBUG_WARNING,"mi->queue is empty"); return MPD_PLAYLIST_QUEUE_EMPTY; } if(mpd_lock_conn(mi)) { debug_printf(DEBUG_WARNING,"lock failed\n"); return MPD_LOCK_FAILED; } mpd_sendCommandListBegin(mi->connection); /* get first item */ mi->queue = mi->queue->first; while(mi->queue != NULL) { if(mi->queue->type == MPD_QUEUE_ADD) { if(mi->queue->path != NULL) { mpd_sendAddCommand(mi->connection, mi->queue->path); } } else if(mi->queue->type == MPD_QUEUE_LOAD) { if(mi->queue->path != NULL) { mpd_sendLoadCommand(mi->connection, mi->queue->path); } } else if (mi->queue->type == MPD_QUEUE_DELETE_ID) { if(mi->queue->id >= 0) { mpd_sendDeleteIdCommand(mi->connection, mi->queue->id); } } else if (mi->queue->type == MPD_QUEUE_DELETE_POS) { if(mi->queue->id >= 0) { mpd_sendDeleteCommand(mi->connection, mi->queue->id); } } mpd_queue_get_next(mi); } mpd_sendCommandListEnd(mi->connection); mpd_finishCommand(mi->connection); mpd_unlock_conn(mi); mpd_status_update(mi); return MPD_OK; }
static int softpipe_get_param(struct pipe_screen *screen, enum pipe_cap param) { switch (param) { case PIPE_CAP_NPOT_TEXTURES: case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES: return 1; case PIPE_CAP_TWO_SIDED_STENCIL: return 1; case PIPE_CAP_SM3: return 1; case PIPE_CAP_ANISOTROPIC_FILTER: return 1; case PIPE_CAP_POINT_SPRITE: return 1; case PIPE_CAP_MAX_RENDER_TARGETS: return PIPE_MAX_COLOR_BUFS; case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS: return 1; case PIPE_CAP_OCCLUSION_QUERY: return 1; case PIPE_CAP_QUERY_TIME_ELAPSED: return 1; case PIPE_CAP_QUERY_PIPELINE_STATISTICS: return 1; case PIPE_CAP_TEXTURE_MIRROR_CLAMP: return 1; case PIPE_CAP_TEXTURE_SHADOW_MAP: return 1; case PIPE_CAP_TEXTURE_SWIZZLE: return 1; case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK: return 0; case PIPE_CAP_MAX_TEXTURE_2D_LEVELS: return SP_MAX_TEXTURE_2D_LEVELS; case PIPE_CAP_MAX_TEXTURE_3D_LEVELS: return SP_MAX_TEXTURE_3D_LEVELS; case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS: return SP_MAX_TEXTURE_CUBE_LEVELS; case PIPE_CAP_BLEND_EQUATION_SEPARATE: return 1; case PIPE_CAP_INDEP_BLEND_ENABLE: return 1; case PIPE_CAP_INDEP_BLEND_FUNC: return 1; case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT: case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT: case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER: case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER: return 1; case PIPE_CAP_DEPTH_CLIP_DISABLE: return 1; case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS: return PIPE_MAX_SO_BUFFERS; case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS: case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS: return 16*4; case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES: case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS: return 1024; case PIPE_CAP_MAX_VERTEX_STREAMS: return 1; case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE: return 2048; case PIPE_CAP_PRIMITIVE_RESTART: return 1; case PIPE_CAP_SHADER_STENCIL_EXPORT: return 1; case PIPE_CAP_TGSI_INSTANCEID: case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR: case PIPE_CAP_START_INSTANCE: return 1; case PIPE_CAP_SEAMLESS_CUBE_MAP: case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE: return 1; case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS: return 256; /* for GL3 */ case PIPE_CAP_MIN_TEXEL_OFFSET: return -8; case PIPE_CAP_MAX_TEXEL_OFFSET: return 7; case PIPE_CAP_CONDITIONAL_RENDER: return 1; case PIPE_CAP_TEXTURE_BARRIER: return 0; case PIPE_CAP_FRAGMENT_COLOR_CLAMPED: case PIPE_CAP_VERTEX_COLOR_UNCLAMPED: /* draw module */ case PIPE_CAP_VERTEX_COLOR_CLAMPED: /* draw module */ return 1; case PIPE_CAP_MIXED_COLORBUFFER_FORMATS: return 0; case PIPE_CAP_GLSL_FEATURE_LEVEL: return 330; case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION: return 0; case PIPE_CAP_COMPUTE: return 0; case PIPE_CAP_USER_VERTEX_BUFFERS: case PIPE_CAP_USER_INDEX_BUFFERS: case PIPE_CAP_USER_CONSTANT_BUFFERS: case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME: case PIPE_CAP_TGSI_VS_LAYER_VIEWPORT: return 1; case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT: return 16; case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS: case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY: case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY: case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY: case PIPE_CAP_TEXTURE_MULTISAMPLE: return 0; case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT: return 64; case PIPE_CAP_QUERY_TIMESTAMP: case PIPE_CAP_CUBE_MAP_ARRAY: return 1; case PIPE_CAP_TEXTURE_BUFFER_OBJECTS: return 1; case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE: return 65536; case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT: return 0; case PIPE_CAP_TGSI_TEXCOORD: case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER: return 0; case PIPE_CAP_MAX_VIEWPORTS: return 1; case PIPE_CAP_ENDIANNESS: return PIPE_ENDIAN_NATIVE; case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS: case PIPE_CAP_TEXTURE_GATHER_SM5: case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT: case PIPE_CAP_TEXTURE_QUERY_LOD: case PIPE_CAP_SAMPLE_SHADING: case PIPE_CAP_TEXTURE_GATHER_OFFSETS: case PIPE_CAP_TGSI_VS_WINDOW_SPACE_POSITION: case PIPE_CAP_TGSI_FS_FINE_DERIVATIVE: case PIPE_CAP_SAMPLER_VIEW_TARGET: return 0; case PIPE_CAP_FAKE_SW_MSAA: return 1; case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET: case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET: return 0; case PIPE_CAP_DRAW_INDIRECT: return 1; case PIPE_CAP_VENDOR_ID: return 0xFFFFFFFF; case PIPE_CAP_DEVICE_ID: return 0xFFFFFFFF; case PIPE_CAP_ACCELERATED: return 0; case PIPE_CAP_VIDEO_MEMORY: { /* XXX: Do we want to return the full amount fo system memory ? */ uint64_t system_memory; if (!os_get_total_physical_memory(&system_memory)) return 0; return (int)(system_memory >> 20); } case PIPE_CAP_UMA: return 0; case PIPE_CAP_CONDITIONAL_RENDER_INVERTED: return 1; case PIPE_CAP_CLIP_HALFZ: return 1; } /* should only get here on unhandled cases */ debug_printf("Unexpected PIPE_CAP %d query\n", param); return 0; }
int test_memset_s (void) { errno_t rc; uint32_t len; uint32_t i; uint8_t value; /*--------------------------------------------------*/ value = 34; rc = memset_s(NULL, LEN, value); if (rc != ESNULLP) { debug_printf("%s %u Error rc=%u \n", __FUNCTION__, __LINE__, rc); } /*--------------------------------------------------*/ value = 34; rc = memset_s(mem1, 0, value); if (rc != ESZEROL) { debug_printf("%s %u Error rc=%u \n", __FUNCTION__, __LINE__, rc); } /*--------------------------------------------------*/ for (i=0; i<LEN; i++) { mem1[i] = 99; } len = 1; value = 34; rc = memset_s(mem1, len, value); if (rc != EOK) { debug_printf("%s %u Error rc=%u \n", __FUNCTION__, __LINE__, rc); } for (i=0; i<len; i++) { if (mem1[i] != value) { printf("%d - %d m1=%d \n", __LINE__, i, mem1[i]); } } /*--------------------------------------------------*/ for (i=0; i<LEN; i++) { mem1[i] = 99; } len = 2; value = 34; rc = memset_s(mem1, len, value); if (rc != EOK) { debug_printf("%s %u Error rc=%u \n", __FUNCTION__, __LINE__, rc); } for (i=0; i<len; i++) { if (mem1[i] != value) { printf("%d - %d m1=%d \n", __LINE__, i, mem1[i]); } } /*--------------------------------------------------*/ for (i=0; i<LEN; i++) { mem1[i] = 99; } len = 12; value = 34; rc = memset_s(mem1, len, value); if (rc != EOK) { debug_printf("%s %u Error rc=%u \n", __FUNCTION__, __LINE__, rc); } for (i=0; i<len; i++) { if (mem1[i] != value) { printf("%d - %d m1=%d \n", __LINE__, i, mem1[i]); } } /*--------------------------------------------------*/ for (i=0; i<LEN; i++) { mem1[i] = 99; } len = 31; value = 34; rc = memset_s(mem1, len, value); if (rc != EOK) { debug_printf("%s %u Error rc=%u \n", __FUNCTION__, __LINE__, rc); } for (i=0; i<len; i++) { if (mem1[i] != value) { printf("%d - %d m1=%d \n", __LINE__, i, mem1[i]); } } /*--------------------------------------------------*/ for (i=0; i<LEN; i++) { mem1[i] = 99; } len = 133; value = 34; rc = memset_s(mem1, len, value); if (rc != EOK) { debug_printf("%s %u Error rc=%u \n", __FUNCTION__, __LINE__, rc); } for (i=0; i<len; i++) { if (mem1[i] != value) { printf("%d - %d m1=%d \n", __LINE__, i, mem1[i]); } } /*--------------------------------------------------*/ /*--------------------------------------------------*/ return (0); }
/* * Check and emit a range of shader constant registers, trying to coalesce * successive shader constant updates in a single command in order to save * space on the command buffer. This is a HWv8 feature. */ static enum pipe_error emit_const_range(struct svga_context *svga, unsigned shader, unsigned offset, unsigned count, const float (*values)[4]) { unsigned i, j; enum pipe_error ret; assert(shader == PIPE_SHADER_VERTEX || shader == PIPE_SHADER_FRAGMENT); assert(!svga_have_vgpu10(svga)); #ifdef DEBUG if (offset + count > SVGA3D_CONSTREG_MAX) { debug_printf("svga: too many constants (offset %u + count %u = %u (max = %u))\n", offset, count, offset + count, SVGA3D_CONSTREG_MAX); } #endif if (offset > SVGA3D_CONSTREG_MAX) { /* This isn't OK, but if we propagate an error all the way up we'll * just get into more trouble. * XXX note that offset is always zero at this time so this is moot. */ return PIPE_OK; } if (offset + count > SVGA3D_CONSTREG_MAX) { /* Just drop the extra constants for now. * Ideally we should not have allowed the app to create a shader * that exceeds our constant buffer size but there's no way to * express that in gallium at this time. */ count = SVGA3D_CONSTREG_MAX - offset; } i = 0; while (i < count) { if (memcmp(svga->state.hw_draw.cb[shader][offset + i], values[i], 4 * sizeof(float)) != 0) { /* Found one dirty constant */ if (SVGA_DEBUG & DEBUG_CONSTS) debug_printf("%s %s %d: %f %f %f %f\n", __FUNCTION__, shader == PIPE_SHADER_VERTEX ? "VERT" : "FRAG", offset + i, values[i][0], values[i][1], values[i][2], values[i][3]); /* Look for more consecutive dirty constants. */ j = i + 1; while (j < count && j < i + MAX_CONST_REG_COUNT && memcmp(svga->state.hw_draw.cb[shader][offset + j], values[j], 4 * sizeof(float)) != 0) { if (SVGA_DEBUG & DEBUG_CONSTS) debug_printf("%s %s %d: %f %f %f %f\n", __FUNCTION__, shader == PIPE_SHADER_VERTEX ? "VERT" : "FRAG", offset + j, values[j][0], values[j][1], values[j][2], values[j][3]); ++j; } assert(j >= i + 1); /* Send them all together. */ if (svga_have_gb_objects(svga)) { ret = SVGA3D_SetGBShaderConstsInline(svga->swc, offset + i, /* start */ j - i, /* count */ svga_shader_type(shader), SVGA3D_CONST_TYPE_FLOAT, values + i); } else { ret = SVGA3D_SetShaderConsts(svga->swc, offset + i, j - i, svga_shader_type(shader), SVGA3D_CONST_TYPE_FLOAT, values + i); } if (ret != PIPE_OK) { return ret; } /* * Local copy of the hardware state. */ memcpy(svga->state.hw_draw.cb[shader][offset + i], values[i], (j - i) * 4 * sizeof(float)); i = j + 1; } else { ++i; } } return PIPE_OK; }
static Bool ExaPrepareCopy(PixmapPtr pSrcPixmap, PixmapPtr pDstPixmap, int xdir, int ydir, int alu, Pixel planeMask) { ScrnInfoPtr pScrn = xf86Screens[pDstPixmap->drawable.pScreen->myNum]; modesettingPtr ms = modesettingPTR(pScrn); struct exa_context *exa = ms->exa; struct exa_pixmap_priv *priv = exaGetPixmapDriverPrivate(pDstPixmap); struct exa_pixmap_priv *src_priv = exaGetPixmapDriverPrivate(pSrcPixmap); #if DEBUG_PRINT debug_printf("ExaPrepareCopy\n"); #endif if (!exa->accel) return FALSE; if (!exa->pipe) XORG_FALLBACK("accle not enabled"); if (!priv || !priv->tex) XORG_FALLBACK("pDst %s", !priv ? "!priv" : "!priv->tex"); if (!src_priv || !src_priv->tex) XORG_FALLBACK("pSrc %s", !src_priv ? "!priv" : "!priv->tex"); if (!EXA_PM_IS_SOLID(&pSrcPixmap->drawable, planeMask)) XORG_FALLBACK("planeMask is not solid"); if (alu != GXcopy) XORG_FALLBACK("alu not GXcopy"); if (!exa->scrn->is_format_supported(exa->scrn, priv->tex->format, priv->tex->target, PIPE_TEXTURE_USAGE_RENDER_TARGET, 0)) XORG_FALLBACK("pDst format %s", util_format_name(priv->tex->format)); if (!exa->scrn->is_format_supported(exa->scrn, src_priv->tex->format, src_priv->tex->target, PIPE_TEXTURE_USAGE_SAMPLER, 0)) XORG_FALLBACK("pSrc format %s", util_format_name(src_priv->tex->format)); exa->copy.src = src_priv; exa->copy.dst = priv; /* For same-surface copies, the pipe->surface_copy path is clearly * superior, providing it is implemented. In other cases it's not * clear what the better path would be, and eventually we'd * probably want to gather timings and choose dynamically. */ if (exa->pipe->surface_copy && exa->copy.src == exa->copy.dst) { exa->copy.use_surface_copy = TRUE; exa->copy.src_surface = exa->scrn->get_tex_surface( exa->scrn, exa->copy.src->tex, 0, 0, 0, PIPE_BUFFER_USAGE_GPU_READ); exa->copy.dst_surface = exa->scrn->get_tex_surface( exa->scrn, exa->copy.dst->tex, 0, 0, 0, PIPE_BUFFER_USAGE_GPU_WRITE ); } else { exa->copy.use_surface_copy = FALSE; if (exa->copy.dst == exa->copy.src) exa->copy.src_texture = renderer_clone_texture( exa->renderer, exa->copy.src->tex ); else pipe_texture_reference(&exa->copy.src_texture, exa->copy.src->tex); exa->copy.dst_surface = exa->scrn->get_tex_surface(exa->scrn, exa->copy.dst->tex, 0, 0, 0, PIPE_BUFFER_USAGE_GPU_WRITE); renderer_copy_prepare(exa->renderer, exa->copy.dst_surface, exa->copy.src_texture ); } return TRUE; }
static Bool ExaPrepareComposite(int op, PicturePtr pSrcPicture, PicturePtr pMaskPicture, PicturePtr pDstPicture, PixmapPtr pSrc, PixmapPtr pMask, PixmapPtr pDst) { ScrnInfoPtr pScrn = xf86Screens[pDst->drawable.pScreen->myNum]; modesettingPtr ms = modesettingPTR(pScrn); struct exa_context *exa = ms->exa; struct exa_pixmap_priv *priv; if (!exa->accel) return FALSE; #if DEBUG_PRINT debug_printf("ExaPrepareComposite(%d, src=0x%p, mask=0x%p, dst=0x%p)\n", op, pSrcPicture, pMaskPicture, pDstPicture); debug_printf("\tFormats: src(%s), mask(%s), dst(%s)\n", pSrcPicture ? render_format_name(pSrcPicture->format) : "none", pMaskPicture ? render_format_name(pMaskPicture->format) : "none", pDstPicture ? render_format_name(pDstPicture->format) : "none"); #endif if (!exa->pipe) XORG_FALLBACK("accle not enabled"); priv = exaGetPixmapDriverPrivate(pDst); if (!priv || !priv->tex) XORG_FALLBACK("pDst %s", !priv ? "!priv" : "!priv->tex"); if (!exa->scrn->is_format_supported(exa->scrn, priv->tex->format, priv->tex->target, PIPE_TEXTURE_USAGE_RENDER_TARGET, 0)) XORG_FALLBACK("pDst format: %s", util_format_name(priv->tex->format)); if (priv->picture_format != pDstPicture->format) XORG_FALLBACK("pDst pic_format: %s != %s", render_format_name(priv->picture_format), render_format_name(pDstPicture->format)); if (pSrc) { priv = exaGetPixmapDriverPrivate(pSrc); if (!priv || !priv->tex) XORG_FALLBACK("pSrc %s", !priv ? "!priv" : "!priv->tex"); if (!exa->scrn->is_format_supported(exa->scrn, priv->tex->format, priv->tex->target, PIPE_TEXTURE_USAGE_SAMPLER, 0)) XORG_FALLBACK("pSrc format: %s", util_format_name(priv->tex->format)); if (!picture_check_formats(priv, pSrcPicture)) XORG_FALLBACK("pSrc pic_format: %s != %s", render_format_name(priv->picture_format), render_format_name(pSrcPicture->format)); } if (pMask) { priv = exaGetPixmapDriverPrivate(pMask); if (!priv || !priv->tex) XORG_FALLBACK("pMask %s", !priv ? "!priv" : "!priv->tex"); if (!exa->scrn->is_format_supported(exa->scrn, priv->tex->format, priv->tex->target, PIPE_TEXTURE_USAGE_SAMPLER, 0)) XORG_FALLBACK("pMask format: %s", util_format_name(priv->tex->format)); if (!picture_check_formats(priv, pMaskPicture)) XORG_FALLBACK("pMask pic_format: %s != %s", render_format_name(priv->picture_format), render_format_name(pMaskPicture->format)); } return xorg_composite_bind_state(exa, op, pSrcPicture, pMaskPicture, pDstPicture, pSrc ? exaGetPixmapDriverPrivate(pSrc) : NULL, pMask ? exaGetPixmapDriverPrivate(pMask) : NULL, exaGetPixmapDriverPrivate(pDst)); }
static int perform_ibase_search(uschar * query, uschar * server, uschar ** resultptr, uschar ** errmsg, BOOL * defer_break) { isc_stmt_handle stmth = NULL; XSQLDA *out_sqlda; XSQLVAR *var; char buffer[256]; ISC_STATUS status[20], *statusp = status; int i; int ssize = 0; int offset = 0; int yield = DEFER; uschar *result = NULL; ibase_connection *cn; uschar *server_copy = NULL; uschar *sdata[3]; /* Disaggregate the parameters from the server argument. The order is host, database, user, password. We can write to the string, since it is in a nextinlist temporary buffer. The copy of the string that is used for caching has the password removed. This copy is also used for debugging output. */ for (i = 2; i > 0; i--) { uschar *pp = Ustrrchr(server, '|'); if (pp == NULL) { *errmsg = string_sprintf("incomplete Interbase server data: %s", (i == 3) ? server : server_copy); *defer_break = TRUE; return DEFER; } *pp++ = 0; sdata[i] = pp; if (i == 2) server_copy = string_copy(server); /* sans password */ } sdata[0] = server; /* What's left at the start */ /* See if we have a cached connection to the server */ for (cn = ibase_connections; cn != NULL; cn = cn->next) { if (Ustrcmp(cn->server, server_copy) == 0) { break; } } /* Use a previously cached connection ? */ if (cn != NULL) { static char db_info_options[] = { isc_info_base_level }; /* test if the connection is alive */ if (isc_database_info (status, &cn->dbh, sizeof(db_info_options), db_info_options, sizeof(buffer), buffer)) { /* error occurred: assume connection is down */ DEBUG(D_lookup) debug_printf ("Interbase cleaning up cached connection: %s\n", cn->server); isc_detach_database(status, &cn->dbh); } else { DEBUG(D_lookup) debug_printf("Interbase using cached connection for %s\n", server_copy); } } else { cn = store_get(sizeof(ibase_connection)); cn->server = server_copy; cn->dbh = NULL; cn->transh = NULL; cn->next = ibase_connections; ibase_connections = cn; } /* If no cached connection, we must set one up. */ if (cn->dbh == NULL || cn->transh == NULL) { char *dpb, *p; short dpb_length; static char trans_options[] = { isc_tpb_version3, isc_tpb_read, isc_tpb_read_committed, isc_tpb_rec_version }; /* Construct the database parameter buffer. */ dpb = buffer; *dpb++ = isc_dpb_version1; *dpb++ = isc_dpb_user_name; *dpb++ = strlen(sdata[1]); for (p = sdata[1]; *p;) *dpb++ = *p++; *dpb++ = isc_dpb_password; *dpb++ = strlen(sdata[2]); for (p = sdata[2]; *p;) *dpb++ = *p++; dpb_length = dpb - buffer; DEBUG(D_lookup) debug_printf("new Interbase connection: database=%s user=%s\n", sdata[0], sdata[1]); /* Connect to the database */ if (isc_attach_database (status, 0, sdata[0], &cn->dbh, dpb_length, buffer)) { isc_interprete(buffer, &statusp); *errmsg = string_sprintf("Interbase attach() failed: %s", buffer); *defer_break = FALSE; goto IBASE_EXIT; } /* Now start a read-only read-committed transaction */ if (isc_start_transaction (status, &cn->transh, 1, &cn->dbh, sizeof(trans_options), trans_options)) { isc_interprete(buffer, &statusp); isc_detach_database(status, &cn->dbh); *errmsg = string_sprintf("Interbase start_transaction() failed: %s", buffer); *defer_break = FALSE; goto IBASE_EXIT; } } /* Run the query */ if (isc_dsql_allocate_statement(status, &cn->dbh, &stmth)) { isc_interprete(buffer, &statusp); *errmsg = string_sprintf("Interbase alloc_statement() failed: %s", buffer); *defer_break = FALSE; goto IBASE_EXIT; } out_sqlda = store_get(XSQLDA_LENGTH(1)); out_sqlda->version = SQLDA_VERSION1; out_sqlda->sqln = 1; if (isc_dsql_prepare (status, &cn->transh, &stmth, 0, query, 1, out_sqlda)) { isc_interprete(buffer, &statusp); store_reset(out_sqlda); out_sqlda = NULL; *errmsg = string_sprintf("Interbase prepare_statement() failed: %s", buffer); *defer_break = FALSE; goto IBASE_EXIT; } /* re-allocate the output structure if there's more than one field */ if (out_sqlda->sqln < out_sqlda->sqld) { XSQLDA *new_sqlda = store_get(XSQLDA_LENGTH(out_sqlda->sqld)); if (isc_dsql_describe (status, &stmth, out_sqlda->version, new_sqlda)) { isc_interprete(buffer, &statusp); isc_dsql_free_statement(status, &stmth, DSQL_drop); store_reset(out_sqlda); out_sqlda = NULL; *errmsg = string_sprintf("Interbase describe_statement() failed: %s", buffer); *defer_break = FALSE; goto IBASE_EXIT; } out_sqlda = new_sqlda; } /* allocate storage for every returned field */ for (i = 0, var = out_sqlda->sqlvar; i < out_sqlda->sqld; i++, var++) { switch (var->sqltype & ~1) { case SQL_VARYING: var->sqldata = (char *) store_get(sizeof(char) * var->sqllen + 2); break; case SQL_TEXT: var->sqldata = (char *) store_get(sizeof(char) * var->sqllen); break; case SQL_SHORT: var->sqldata = (char *) store_get(sizeof(short)); break; case SQL_LONG: var->sqldata = (char *) store_get(sizeof(ISC_LONG)); break; #ifdef SQL_INT64 case SQL_INT64: var->sqldata = (char *) store_get(sizeof(ISC_INT64)); break; #endif case SQL_FLOAT: var->sqldata = (char *) store_get(sizeof(float)); break; case SQL_DOUBLE: var->sqldata = (char *) store_get(sizeof(double)); break; #ifdef SQL_TIMESTAMP case SQL_DATE: var->sqldata = (char *) store_get(sizeof(ISC_QUAD)); break; #else case SQL_TIMESTAMP: var->sqldata = (char *) store_get(sizeof(ISC_TIMESTAMP)); break; case SQL_TYPE_DATE: var->sqldata = (char *) store_get(sizeof(ISC_DATE)); break; case SQL_TYPE_TIME: var->sqldata = (char *) store_get(sizeof(ISC_TIME)); break; #endif } if (var->sqltype & 1) { var->sqlind = (short *) store_get(sizeof(short)); } } /* finally, we're ready to execute the statement */ if (isc_dsql_execute (status, &cn->transh, &stmth, out_sqlda->version, NULL)) { isc_interprete(buffer, &statusp); *errmsg = string_sprintf("Interbase describe_statement() failed: %s", buffer); isc_dsql_free_statement(status, &stmth, DSQL_drop); *defer_break = FALSE; goto IBASE_EXIT; } while (isc_dsql_fetch(status, &stmth, out_sqlda->version, out_sqlda) != 100L) { /* check if an error occurred */ if (status[0] & status[1]) { isc_interprete(buffer, &statusp); *errmsg = string_sprintf("Interbase fetch() failed: %s", buffer); isc_dsql_free_statement(status, &stmth, DSQL_drop); *defer_break = FALSE; goto IBASE_EXIT; } if (result != NULL) result = string_cat(result, &ssize, &offset, US "\n", 1); /* Find the number of fields returned. If this is one, we don't add field names to the data. Otherwise we do. */ if (out_sqlda->sqld == 1) { if (out_sqlda->sqlvar[0].sqlind == NULL || *out_sqlda->sqlvar[0].sqlind != -1) /* NULL value yields nothing */ result = string_cat(result, &ssize, &offset, US buffer, fetch_field(buffer, sizeof(buffer), &out_sqlda->sqlvar[0])); } else for (i = 0; i < out_sqlda->sqld; i++) { int len = fetch_field(buffer, sizeof(buffer), &out_sqlda->sqlvar[i]); result = string_cat(result, &ssize, &offset, US out_sqlda->sqlvar[i].aliasname, out_sqlda->sqlvar[i].aliasname_length); result = string_cat(result, &ssize, &offset, US "=", 1); /* Quote the value if it contains spaces or is empty */ if (*out_sqlda->sqlvar[i].sqlind == -1) { /* NULL value */ result = string_cat(result, &ssize, &offset, US "\"\"", 2); } else if (buffer[0] == 0 || Ustrchr(buffer, ' ') != NULL) { int j; result = string_cat(result, &ssize, &offset, US "\"", 1); for (j = 0; j < len; j++) { if (buffer[j] == '\"' || buffer[j] == '\\') result = string_cat(result, &ssize, &offset, US "\\", 1); result = string_cat(result, &ssize, &offset, US buffer + j, 1); } result = string_cat(result, &ssize, &offset, US "\"", 1); } else { result = string_cat(result, &ssize, &offset, US buffer, len); } result = string_cat(result, &ssize, &offset, US " ", 1); } } /* If result is NULL then no data has been found and so we return FAIL. Otherwise, we must terminate the string which has been built; string_cat() always leaves enough room for a terminating zero. */ if (result == NULL) { yield = FAIL; *errmsg = US "Interbase: no data found"; } else { result[offset] = 0; store_reset(result + offset + 1); } /* Get here by goto from various error checks. */ IBASE_EXIT: if (stmth != NULL) isc_dsql_free_statement(status, &stmth, DSQL_drop); /* Non-NULL result indicates a sucessful result */ if (result != NULL) { *resultptr = result; return OK; } else { DEBUG(D_lookup) debug_printf("%s\n", *errmsg); return yield; /* FAIL or DEFER */ } }
int show_prop(property *prop) { char *buf; char *head, *tail; char *p[6] = {0}; char *indication_id = NULL, *iconic_label =NULL, *label_string = NULL; int check_leaf = 0; /* output new prop_list for Emacs */ if (prop->list == NULL) { debug_printf(DEBUG_ERROR, "no prop_list\n"); a_printf(" ( e ) "); return 0; } a_printf(" ( l "); head = buf = uim_strdup(prop->list); #define PART_BRANCH "branch" #define PART_LEAF "leaf" #define ACTION_ID_IMSW "action_imsw_" while (head && *head) { /* * head: beginning of each line * tail: end of each line * p[n]: token */ tail = strchr(head, '\n'); if (tail) *tail = '\0'; else break; /* head always not equal NULL */ if (strlen(head) >= strlen(PART_BRANCH) && strncmp(head, PART_BRANCH, strlen(PART_BRANCH)) == 0) { if ((p[0] = strchr(head, '\t')) && (p[1] = strchr(p[0] + 1, '\t')) && (p[2] = strchr(p[1] + 1, '\t'))) { *p[0] = *p[1] = *p[2] = '\0'; indication_id = p[0] + 1; iconic_label = p[1] + 1; label_string = p[2] + 1; check_leaf = 1; /* check next leaf */ /*a_printf(" ( \"%s\" \"%s\" \"%s\" ) ", p[0] + 1, p[1] + 1, p[2] + 1);*/ } } else if (strlen(head) >= strlen(PART_LEAF) && strncmp(head, PART_LEAF, strlen(PART_LEAF)) == 0) { if (check_leaf && indication_id && iconic_label && label_string) { check_leaf = 0; /* im_switcher detection */ if ((p[0] = strchr(head, '\t')) && (p[1] = strchr(p[0] + 1, '\t')) && (p[2] = strchr(p[1] + 1, '\t')) && (p[3] = strchr(p[2] + 1, '\t')) && (p[4] = strchr(p[3] + 1, '\t')) && (p[5] = strchr(p[4] + 1, '\t'))) *p[0] = *p[1] = *p[2] = *p[3] = *p[4] = *p[5] = '\0'; if (strlen(p[4] + 1) >= strlen(ACTION_ID_IMSW) && strncmp(p[4] + 1, ACTION_ID_IMSW, strlen(ACTION_ID_IMSW)) == 0) a_printf(" ( \"im-name\" \"%s\" \"%s\" \"%s\" ) ", indication_id, iconic_label, label_string); else a_printf(" ( \"im-mode\" \"%s\" \"%s\" \"%s\" ) ", indication_id, iconic_label, label_string); } } head = tail + 1; } free(buf); a_printf(" ) "); return 1; #undef PART_BRANCH #undef PART_LEAF #undef ACTION_ID_IMSW }
//char *iri_relative_to_absolute(IRI *iri, const char *tag, const char *val, size_t len, char *dst, size_t dst_size) const char *mget_iri_relative_to_abs(mget_iri_t *base, const char *val, size_t len, mget_buffer_t *buf) { debug_printf("*url = %.*s\n", (int)len, val); if (*val == '/') { if (base) { char path[len + 1]; // strlcpy or snprintf are ineffective here since they do strlen(val), which might be large memcpy(path, val, len); path[len] = 0; if (len >= 2 && val[1] == '/') { char *p; // absolute URI without scheme: //authority/path... if ((p = strchr(path + 2, '/'))) _normalize_path(p + 1); mget_buffer_strcpy(buf, base->scheme); mget_buffer_strcat(buf, ":"); mget_buffer_strcat(buf, path); debug_printf("*1 %s\n", buf->data); } else { // absolute path _normalize_path(path); mget_buffer_strcpy(buf, mget_iri_get_connection_part(base)); mget_buffer_strcat(buf, "/"); mget_buffer_strcat(buf, path); debug_printf("*2 %s\n", buf->data); } } else return NULL; } else { // see if URI begins with a scheme: if (memchr(val, ':', len)) { // absolute URI if (buf) { mget_buffer_memcpy(buf, val, len); debug_printf("*3 %s\n", buf->data); } else { debug_printf("*3 %s\n", val); return val; } } else if (base) { // relative path const char *lastsep = base->path ? strrchr(base->path, '/') : NULL; mget_buffer_strcpy(buf, mget_iri_get_connection_part(base)); mget_buffer_strcat(buf, "/"); size_t tmp_len = buf->length; if (lastsep) mget_buffer_memcat(buf, base->path, lastsep - base->path + 1); if (len) mget_buffer_memcat(buf, val, len); buf->length = _normalize_path(buf->data + tmp_len) + tmp_len; debug_printf("*4 %s %zu\n", buf->data, buf->length); } else if (val[len] == 0) { return val; } else return NULL; } return buf->data; }
void add_network(char *essid, char *ascii_password) { debug_printf(2, "%s('%s', '%s')\n", __func__, essid, ascii_password); network_action('a', essid, ascii_password); }
int eximsrs_init() { uschar *list = srs_config; uschar secret_buf[SRS_MAX_SECRET_LENGTH]; uschar *secret = NULL; uschar sbuf[4]; uschar *sbufp; /* Check if this instance of Exim has not initialized SRS */ if(srs == NULL) { int co = 0; int hashlen, maxage; BOOL usetimestamp, usehash; /* Copy config vars */ hashlen = srs_hashlength; maxage = srs_maxage; usetimestamp = srs_usetimestamp; usehash = srs_usehash; /* Pass srs_config var (overrides new config vars) */ co = 0; if(srs_config != NULL) { secret = string_nextinlist(&list, &co, secret_buf, SRS_MAX_SECRET_LENGTH); if((sbufp = string_nextinlist(&list, &co, sbuf, sizeof(sbuf))) != NULL) maxage = atoi(sbuf); if((sbufp = string_nextinlist(&list, &co, sbuf, sizeof(sbuf))) != NULL) hashlen = atoi(sbuf); if((sbufp = string_nextinlist(&list, &co, sbuf, sizeof(sbuf))) != NULL) usetimestamp = atoi(sbuf); if((sbufp = string_nextinlist(&list, &co, sbuf, sizeof(sbuf))) != NULL) usehash = atoi(sbuf); } if(srs_hashmin == -1) srs_hashmin = hashlen; /* First secret specified in secrets? */ co = 0; list = srs_secrets; if(secret == NULL || *secret == '\0') { if((secret = string_nextinlist(&list, &co, secret_buf, SRS_MAX_SECRET_LENGTH)) == NULL) { log_write(0, LOG_MAIN | LOG_PANIC, "SRS Configuration Error: No secret specified"); return DEFER; } } /* Check config */ if(maxage < 0 || maxage > 365) { log_write(0, LOG_MAIN | LOG_PANIC, "SRS Configuration Error: Invalid maximum timestamp age"); return DEFER; } if(hashlen < 1 || hashlen > 20 || srs_hashmin < 1 || srs_hashmin > 20) { log_write(0, LOG_MAIN | LOG_PANIC, "SRS Configuration Error: Invalid hash length"); return DEFER; } if((srs = srs_open(secret, Ustrlen(secret), maxage, hashlen, srs_hashmin)) == NULL) { log_write(0, LOG_MAIN | LOG_PANIC, "Failed to allocate SRS memory"); return DEFER; } srs_set_option(srs, SRS_OPTION_USETIMESTAMP, usetimestamp); srs_set_option(srs, SRS_OPTION_USEHASH, usehash); /* Extra secrets? */ while((secret = string_nextinlist(&list, &co, secret_buf, SRS_MAX_SECRET_LENGTH)) != NULL) srs_add_secret(srs, secret, (Ustrlen(secret) > SRS_MAX_SECRET_LENGTH) ? SRS_MAX_SECRET_LENGTH : Ustrlen(secret)); DEBUG(D_any) debug_printf("SRS initialized\n"); } return OK; }
void remove_network(char *essid) { debug_printf(2, "%s()\n", __func__); network_action('d', essid, NULL); }
int userwaypoint(WayPoint wps[],int nwps) { int i = nwps; int j; int k; float uulat; float uulat_dist; float uulat_d; float uulon; float uulon_dist; float uulon_d; char entlat[ULAT_TYPE]; char *ptr_entlat = entlat; char entlat_dist[ULAT_TYPE]; char *ptr_entlat_dist = entlat_dist; char entlon[ULONG_TYPE]; char *ptr_entlon = entlon; char entlon_dist[ULONG_TYPE]; char *ptr_entlon_dist = entlon_dist; char entlon_d[ULONG_TYPE]; char *ptr_e_d = entlon_d; char *ptr_1 = "Please enter Latitude "; char *ptr_2 = ":"; char *ptr_3 = "Please enter Longitude "; char *ptr_4 = " "; j = i + 1; USART_putstring(USART_PC,"\n\r"); debug_print(ptr_1); debug_printi(j); debug_print(ptr_2);USART_putstring(USART_PC,"\n\r"); for (k = 0; k < UENTLAT; k++) { entlat[k] = USART_receive(USART_PC); } //----------------------------------------------------------- /*entlat_dist[0]=entlat[0]; entlat_dist[1]=entlat[1]; entlat_dist[2]=entlat[3]; entlat_dist[3]=entlat[4]; entlat_dist[4]=entlat[5]; entlat_dist[5]=entlat[6]; entlat_dist[6]=entlat[7]; entlat_dist[7]=0x00;*/ uulat = atof (ptr_entlat); //uulat_dist = atof (ptr_entlat_dist); uulat_d = uulat; debug_printf(uulat);debug_print(ptr_4);debug_printf(uulat_d); wps[i].latitude = uulat; //wps[i].latitude_dist = uulat_dist; wps[i].latitude_deg = uulat; //----------------------------------------------------------- USART_putstring(USART_PC,"\n\r"); debug_print(ptr_3); debug_printi(j); debug_print(ptr_2);USART_putstring(USART_PC,"\n\r"); for (k = 0; k < UENTLON; k++) { entlon[k] = USART_receive(USART_PC); } //----------------------------------------------------------- /*entlon_dist[0]=entlon[0]; entlon_dist[1]=entlon[1]; entlon_dist[2]=entlon[2]; entlon_dist[3]=entlon[4]; entlon_dist[4]=entlon[5]; entlon_dist[5]=entlon[6]; entlon_dist[6]=entlon[7]; entlon_dist[7]=entlon[8]; entlon_dist[8]=0x00;*/ entlon_d[0]=entlon[1]; entlon_d[1]=entlon[2]; entlon_d[2]=entlon[3]; entlon_d[3]=entlon[4]; entlon_d[4]=entlon[5]; entlon_d[5]=entlon[6]; entlon_d[6]=entlon[7]; entlon_d[7]=entlon[8]; entlon_d[8]=0x00; uulon = atof (ptr_entlon); //uulon_dist = atof (ptr_entlon_dist); uulon_d = atof (ptr_e_d); debug_printf(uulon);debug_print(ptr_4);debug_printf(uulon_d); wps[i].longitude = uulon; //wps[i].longitude_dist = uulon_dist; wps[i].longitude_deg = uulon_d; //----------------------------------------------------------- i = i + 1; return(i); }
/* Check if the ball will collide with something if it is moved to the * specified coordinates. If so, the direction is changed and 1 is returned * to indicate that the caller should recalculate the new coordinates and * try again. If there was no collision it returns 0. If something exceptional * happens (eg. the last brick is destroyed or the last ball is lost) it * returns 2 to indicate that the caller should give up trying to move the * ball. */ int check_ball_collision(nbstate *state, coords *c) { int i, bc; grid *g = state->grid; /* Check for a collision with the top of the game area: */ if(c->y < state->ball.s->h + (2 * BALLS_BORDER) + state->scores.h) { /* Bounce the ball back down and ask the caller to try again: */ state->ball.d = normalise_angle(M_PI - state->ball.d); return 1; } /* Check for a collision with the bottom of the game area: */ if(c->y > state->canvasheight - state->ball.s->h) { /* If the solidfloor cheat is active, bounce the ball back up * and ask the caller to try again: */ if(state->flags.sf) { state->ball.d = normalise_angle(M_PI - state->ball.d); return 1; } else { /* Otherwise destroy the ball, move the new ball to * the parked position (park_ball() is called by * lost_ball()) and ask the caller to give up trying * to move the ball: */ lost_ball(state); move_ball(state); return 2; } } /* Check for a collision with the left hand side of the game area: */ if(c->x < 0) { /* Bounce the ball back and ask the caller to try again: */ state->ball.d = normalise_angle((2 * M_PI) - state->ball.d); return 1; } /* Check for a collision with the right hand side of the game area: */ if(c->x > state->canvaswidth - state->ball.s->w) { /* Bounce the ball back and ask the caller to try again: */ state->ball.d = normalise_angle((2 * M_PI) - state->ball.d); return 1; } /* Check for a collision with the bat: */ if(c->y > state->canvasheight - state->batheight - state->ball.s->h && c->x > state->batx - (state->batwidths[state->bat] / 2) - state->ball.s->w && c->x < state->batx + (state->batwidths[state->bat] / 2)) { /* If the collision happened with the side of the bat instead * of the top, we don't care so just tell the caller there * was no collision: */ if(state->ball.y > state->canvasheight - state->batheight - state->ball.s->h) return 0; /* If the StickyBat power-up is active, park the ball: */ if(state->powertimes.stickybat) { park_ball(state); move_ball(state); return 2; } else { /* Otherwise bounce it back up and ask the caller to * try again: */ state->ball.d = normalise_angle(((c->x + (state->ball.s->w / 2) - state->batx) / state->batwidths[state->bat] / 2) * M_PI); return 1; } } /* Check for collisions with the bricks: */ bc = 0; /* No collisions have happened yet. */ /* For each brick in the grid: */ for(i = 0; i < state->width * state->height; i++) { /* If there is a brick in this grid position and the ball * intersects it: */ if(g->b && c->y + state->ball.s->h > g->y && c->y < g->y + state->brickheight && c->x + state->ball.s->w > g->x && c->x < g->x + state->brickwidth) { /* Perform the brick collision actions, and if * something exceptional happens (ie. we destroy the * last brick), return straight away asking the caller * to give up trying to move the ball: */ if(brick_collision(state, g)) return 2; /* Unless the NoBounce cheat is active, bounce the * ball off the brick. Only do this on the first brick * collision we find. */ if(!state->flags.nb && !bc) { bc = 1; /* Bounce off the left face: */ if(state->ball.x + state->ball.s->w < g->x) { state->ball.d = normalise_angle((2 * M_PI) - state->ball.d); /* Bounce off the right face: */ } else if(state->ball.x >= g->x + state->brickwidth) { state->ball.d = normalise_angle((2 * M_PI) - state->ball.d); /* Bounce off the upper face: */ } else if(state->ball.y + state->ball.s->h < g->y) { state->ball.d = normalise_angle(M_PI - state->ball.d); /* Bounce off the lower face: */ } else if(state->ball.y >= g->y + state->brickheight) { state->ball.d = normalise_angle(M_PI - state->ball.d); } else { /* This shouldn't happen, but I don't * trust the above algorithm 100%. */ debug_printf ("Internal error: " "couldn't figure out brick " "collision face\n"); } } } g++; /* Increment to the next grid position. */ } /* If a brick collision occured, ask the caller to try again: */ if(bc) return 1; return 0; /* Otherwise tell the caller that no collision occured. */ }
///////////////////////////////////////////////////////////////////////////////// // function definitions int user_waypoint_test (WayPoint wps[],int numwps) { int i; int uj = numwps + 1; int uloop1 = 1; int userchk1; int userchk2; float ulat; float ulon; float ulat_d; float ulat_dist; float ulon_dist; float ulon_d; char entwp[3]; char *ptr_entwp = entwp; char entlat[ULAT_TYPE]; char *ptr_entlat = entlat; char entlat_dist[ULAT_TYPE]; char *ptr_entlat_dist = entlat_dist; char entlon[ULONG_TYPE]; char *ptr_entlon = entlon; char entlon_dist[ULONG_TYPE]; char *ptr_entlon_dist = entlon_dist; char entlon_d[ULONG_TYPE]; char *ptr_e_d = entlon_d; char yes[3] = "y"; char *ptr_yes = yes; char no[3] = "n"; char *ptr_no = no; char *ptr_1 = "Please enter Latitude "; char *ptr_2 = ":"; char *ptr_3 = "Please enter Longitude "; char *ptr_4 = "Would you like to enter another way point? (y or n):"; char *ptr_6 = "Finished entering way points"; char *ptr_7 = "Invalid, please try again"; char *ptr_8 = " "; // user enter first lat USART_putstring(USART_PC,"\n\r"); debug_print(ptr_1); debug_printi(uj); debug_print(ptr_2);USART_putstring(USART_PC,"\n\r"); for (i = 0; i < UENTLAT; i++) { entlat[i] = USART_receive(USART_PC); } /*entlat_dist[0]=entlat[0]; entlat_dist[1]=entlat[1]; entlat_dist[2]=entlat[3]; entlat_dist[3]=entlat[4]; entlat_dist[4]=entlat[5]; entlat_dist[5]=entlat[6]; entlat_dist[6]=entlat[7]; entlat_dist[7]=0x00;*/ ulat = atof (ptr_entlat); //ulat_dist = atof (ptr_entlat_dist); ulat_d = ulat; debug_printf(ulat);debug_print(ptr_8);debug_printf(ulat_d); wps[numwps].latitude = ulat; //wps[numwps].latitude_dist = ulat_dist; wps[numwps].latitude_deg = ulat_d; // user enter first long USART_putstring(USART_PC,"\n\r"); debug_print(ptr_3); debug_printi(uj); debug_print(ptr_2);USART_putstring(USART_PC,"\n\r"); for (i = 0; i < UENTLON; i++) { entlon[i] = USART_receive(USART_PC); } /*entlon_dist[0]=entlon[0]; entlon_dist[1]=entlon[1]; entlon_dist[2]=entlon[2]; entlon_dist[3]=entlon[4]; entlon_dist[4]=entlon[5]; entlon_dist[5]=entlon[6]; entlon_dist[6]=entlon[7]; entlon_dist[7]=entlon[8]; entlon_dist[8]=0x00;*/ entlon_d[0]=entlon[1]; entlon_d[1]=entlon[2]; entlon_d[2]=entlon[3]; entlon_d[3]=entlon[4]; entlon_d[4]=entlon[5]; entlon_d[5]=entlon[6]; entlon_d[6]=entlon[7]; entlon_d[7]=entlon[8]; entlon_d[8]=0x00; ulon = atof (ptr_entlon); //ulon_dist = atof (ptr_entlon_dist); ulon_d = atof (ptr_e_d); debug_printf(ulon);debug_print(ptr_8);debug_printf(ulon_d); wps[numwps].longitude = ulon; //wps[numwps].longitude_dist = ulon_dist; wps[numwps].longitude_deg = ulon_d; //----------------------------------------------------------- numwps = numwps + 1; // prompt if user wants to enter another way point USART_putstring(USART_PC,"\n\r"); debug_println(ptr_4); for (i = 0; i < 1; i++) { entwp[i] = USART_receive(USART_PC); } entwp[1]=0x00; debug_println(ptr_entwp); while (uloop1 == 1) { userchk1 = strcmp(ptr_entwp,ptr_yes); userchk2 = strcmp(ptr_entwp,ptr_no); if(userchk1 == 0) { numwps = userwaypoint(wps,numwps); USART_putstring(USART_PC,"\n\r");debug_println(ptr_4); for (i = 0; i < 1; i++) { entwp[i] = USART_receive(USART_PC); } debug_println(ptr_entwp); } else if (userchk2 == 0) { debug_println(ptr_6); uloop1 = 0; } else { debug_println(ptr_7); USART_putstring(USART_PC,"\n\r");debug_println(ptr_4); for (i = 0; i < 1; i++) { entwp[i] = USART_receive(USART_PC); } debug_println(ptr_entwp); } } return(numwps); }
int dmarc_process() { int sr, origin; /* used in SPF section */ int dmarc_spf_result = 0; /* stores spf into dmarc conn ctx */ int tmp_ans, c; pdkim_signature *sig = NULL; BOOL has_dmarc_record = TRUE; u_char **ruf; /* forensic report addressees, if called for */ /* ACLs have "control=dmarc_disable_verify" */ if (dmarc_disable_verify) { dmarc_ar_header = dmarc_auth_results_header(from_header, NULL); return OK; } /* Store the header From: sender domain for this part of DMARC. * If there is no from_header struct, then it's likely this message * is locally generated and relying on fixups to add it. Just skip * the entire DMARC system if we can't find a From: header....or if * there was a previous error. */ if (!from_header || dmarc_abort) dmarc_abort = TRUE; else { uschar * errormsg; int dummy, domain; uschar * p; uschar saveend; parse_allow_group = TRUE; p = parse_find_address_end(from_header->text, FALSE); saveend = *p; *p = '\0'; if ((header_from_sender = parse_extract_address(from_header->text, &errormsg, &dummy, &dummy, &domain, FALSE))) header_from_sender += domain; *p = saveend; /* The opendmarc library extracts the domain from the email address, but * only try to store it if it's not empty. Otherwise, skip out of DMARC. */ if (!header_from_sender || (strcmp( CCS header_from_sender, "") == 0)) dmarc_abort = TRUE; libdm_status = dmarc_abort ? DMARC_PARSE_OKAY : opendmarc_policy_store_from_domain(dmarc_pctx, header_from_sender); if (libdm_status != DMARC_PARSE_OKAY) { log_write(0, LOG_MAIN|LOG_PANIC, "failure to store header From: in DMARC: %s, header was '%s'", opendmarc_policy_status_to_str(libdm_status), from_header->text); dmarc_abort = TRUE; } } /* Skip DMARC if connection is SMTP Auth. Temporarily, admin should * instead do this in the ACLs. */ if (!dmarc_abort && !sender_host_authenticated) { /* Use the envelope sender domain for this part of DMARC */ spf_sender_domain = expand_string(US"$sender_address_domain"); if (!spf_response) { /* No spf data means null envelope sender so generate a domain name * from the sender_helo_name */ if (!spf_sender_domain) { spf_sender_domain = sender_helo_name; log_write(0, LOG_MAIN, "DMARC using synthesized SPF sender domain = %s\n", spf_sender_domain); DEBUG(D_receive) debug_printf("DMARC using synthesized SPF sender domain = %s\n", spf_sender_domain); } dmarc_spf_result = DMARC_POLICY_SPF_OUTCOME_NONE; dmarc_spf_ares_result = ARES_RESULT_UNKNOWN; origin = DMARC_POLICY_SPF_ORIGIN_HELO; spf_human_readable = US""; } else { sr = spf_response->result; dmarc_spf_result = sr == SPF_RESULT_NEUTRAL ? DMARC_POLICY_SPF_OUTCOME_NONE : sr == SPF_RESULT_PASS ? DMARC_POLICY_SPF_OUTCOME_PASS : sr == SPF_RESULT_FAIL ? DMARC_POLICY_SPF_OUTCOME_FAIL : sr == SPF_RESULT_SOFTFAIL ? DMARC_POLICY_SPF_OUTCOME_TMPFAIL : DMARC_POLICY_SPF_OUTCOME_NONE; dmarc_spf_ares_result = sr == SPF_RESULT_NEUTRAL ? ARES_RESULT_NEUTRAL : sr == SPF_RESULT_PASS ? ARES_RESULT_PASS : sr == SPF_RESULT_FAIL ? ARES_RESULT_FAIL : sr == SPF_RESULT_SOFTFAIL ? ARES_RESULT_SOFTFAIL : sr == SPF_RESULT_NONE ? ARES_RESULT_NONE : sr == SPF_RESULT_TEMPERROR ? ARES_RESULT_TEMPERROR : sr == SPF_RESULT_PERMERROR ? ARES_RESULT_PERMERROR : ARES_RESULT_UNKNOWN; origin = DMARC_POLICY_SPF_ORIGIN_MAILFROM; spf_human_readable = (uschar *)spf_response->header_comment; DEBUG(D_receive) debug_printf("DMARC using SPF sender domain = %s\n", spf_sender_domain); } if (strcmp( CCS spf_sender_domain, "") == 0) dmarc_abort = TRUE; if (!dmarc_abort) { libdm_status = opendmarc_policy_store_spf(dmarc_pctx, spf_sender_domain, dmarc_spf_result, origin, spf_human_readable); if (libdm_status != DMARC_PARSE_OKAY) log_write(0, LOG_MAIN|LOG_PANIC, "failure to store spf for DMARC: %s", opendmarc_policy_status_to_str(libdm_status)); } /* Now we cycle through the dkim signature results and put into * the opendmarc context, further building the DMARC reply. */ sig = dkim_signatures; dkim_history_buffer = US""; while (sig) { int dkim_result, dkim_ares_result, vs, ves; vs = sig->verify_status; ves = sig->verify_ext_status; dkim_result = vs == PDKIM_VERIFY_PASS ? DMARC_POLICY_DKIM_OUTCOME_PASS : vs == PDKIM_VERIFY_FAIL ? DMARC_POLICY_DKIM_OUTCOME_FAIL : vs == PDKIM_VERIFY_INVALID ? DMARC_POLICY_DKIM_OUTCOME_TMPFAIL : DMARC_POLICY_DKIM_OUTCOME_NONE; libdm_status = opendmarc_policy_store_dkim(dmarc_pctx, (uschar *)sig->domain, dkim_result, US""); DEBUG(D_receive) debug_printf("DMARC adding DKIM sender domain = %s\n", sig->domain); if (libdm_status != DMARC_PARSE_OKAY) log_write(0, LOG_MAIN|LOG_PANIC, "failure to store dkim (%s) for DMARC: %s", sig->domain, opendmarc_policy_status_to_str(libdm_status)); dkim_ares_result = vs == PDKIM_VERIFY_PASS ? ARES_RESULT_PASS : vs == PDKIM_VERIFY_FAIL ? ARES_RESULT_FAIL : vs == PDKIM_VERIFY_NONE ? ARES_RESULT_NONE : vs == PDKIM_VERIFY_INVALID ? ves == PDKIM_VERIFY_INVALID_PUBKEY_UNAVAILABLE ? ARES_RESULT_PERMERROR : ves == PDKIM_VERIFY_INVALID_BUFFER_SIZE ? ARES_RESULT_PERMERROR : ves == PDKIM_VERIFY_INVALID_PUBKEY_PARSING ? ARES_RESULT_PERMERROR : ARES_RESULT_UNKNOWN : ARES_RESULT_UNKNOWN; dkim_history_buffer = string_sprintf("%sdkim %s %d\n", dkim_history_buffer, sig->domain, dkim_ares_result); sig = sig->next; } libdm_status = opendmarc_policy_query_dmarc(dmarc_pctx, US""); switch (libdm_status) { case DMARC_DNS_ERROR_NXDOMAIN: case DMARC_DNS_ERROR_NO_RECORD: DEBUG(D_receive) debug_printf("DMARC no record found for %s\n", header_from_sender); has_dmarc_record = FALSE; break; case DMARC_PARSE_OKAY: DEBUG(D_receive) debug_printf("DMARC record found for %s\n", header_from_sender); break; case DMARC_PARSE_ERROR_BAD_VALUE: DEBUG(D_receive) debug_printf("DMARC record parse error for %s\n", header_from_sender); has_dmarc_record = FALSE; break; default: /* everything else, skip dmarc */ DEBUG(D_receive) debug_printf("DMARC skipping (%d), unsure what to do with %s", libdm_status, from_header->text); has_dmarc_record = FALSE; break; } /* Store the policy string in an expandable variable. */ libdm_status = opendmarc_policy_fetch_p(dmarc_pctx, &tmp_ans); for (c = 0; dmarc_policy_description[c].name; c++) if (tmp_ans == dmarc_policy_description[c].value) { dmarc_domain_policy = string_sprintf("%s",dmarc_policy_description[c].name); break; } /* Can't use exim's string manipulation functions so allocate memory * for libopendmarc using its max hostname length definition. */ uschar *dmarc_domain = (uschar *)calloc(DMARC_MAXHOSTNAMELEN, sizeof(uschar)); libdm_status = opendmarc_policy_fetch_utilized_domain(dmarc_pctx, dmarc_domain, DMARC_MAXHOSTNAMELEN-1); dmarc_used_domain = string_copy(dmarc_domain); free(dmarc_domain); if (libdm_status != DMARC_PARSE_OKAY) log_write(0, LOG_MAIN|LOG_PANIC, "failure to read domainname used for DMARC lookup: %s", opendmarc_policy_status_to_str(libdm_status)); libdm_status = opendmarc_get_policy_to_enforce(dmarc_pctx); dmarc_policy = libdm_status; switch(libdm_status) { case DMARC_POLICY_ABSENT: /* No DMARC record found */ dmarc_status = US"norecord"; dmarc_pass_fail = US"none"; dmarc_status_text = US"No DMARC record"; action = DMARC_RESULT_ACCEPT; break; case DMARC_FROM_DOMAIN_ABSENT: /* No From: domain */ dmarc_status = US"nofrom"; dmarc_pass_fail = US"temperror"; dmarc_status_text = US"No From: domain found"; action = DMARC_RESULT_ACCEPT; break; case DMARC_POLICY_NONE: /* Accept and report */ dmarc_status = US"none"; dmarc_pass_fail = US"none"; dmarc_status_text = US"None, Accept"; action = DMARC_RESULT_ACCEPT; break; case DMARC_POLICY_PASS: /* Explicit accept */ dmarc_status = US"accept"; dmarc_pass_fail = US"pass"; dmarc_status_text = US"Accept"; action = DMARC_RESULT_ACCEPT; break; case DMARC_POLICY_REJECT: /* Explicit reject */ dmarc_status = US"reject"; dmarc_pass_fail = US"fail"; dmarc_status_text = US"Reject"; action = DMARC_RESULT_REJECT; break; case DMARC_POLICY_QUARANTINE: /* Explicit quarantine */ dmarc_status = US"quarantine"; dmarc_pass_fail = US"fail"; dmarc_status_text = US"Quarantine"; action = DMARC_RESULT_QUARANTINE; break; default: dmarc_status = US"temperror"; dmarc_pass_fail = US"temperror"; dmarc_status_text = US"Internal Policy Error"; action = DMARC_RESULT_TEMPFAIL; break; } libdm_status = opendmarc_policy_fetch_alignment(dmarc_pctx, &da, &sa); if (libdm_status != DMARC_PARSE_OKAY) log_write(0, LOG_MAIN|LOG_PANIC, "failure to read DMARC alignment: %s", opendmarc_policy_status_to_str(libdm_status)); if (has_dmarc_record == TRUE) { log_write(0, LOG_MAIN, "DMARC results: spf_domain=%s dmarc_domain=%s " "spf_align=%s dkim_align=%s enforcement='%s'", spf_sender_domain, dmarc_used_domain, (sa==DMARC_POLICY_SPF_ALIGNMENT_PASS) ?"yes":"no", (da==DMARC_POLICY_DKIM_ALIGNMENT_PASS)?"yes":"no", dmarc_status_text); history_file_status = dmarc_write_history_file(); /* Now get the forensic reporting addresses, if any */ ruf = opendmarc_policy_fetch_ruf(dmarc_pctx, NULL, 0, 1); dmarc_send_forensic_report(ruf); } } /* set some global variables here */ dmarc_ar_header = dmarc_auth_results_header(from_header, NULL); /* shut down libopendmarc */ if ( dmarc_pctx != NULL ) (void) opendmarc_policy_connect_shutdown(dmarc_pctx); if ( dmarc_disable_verify == FALSE ) (void) opendmarc_policy_library_shutdown(&dmarc_ctx); return OK; }
static void define_rasterizer_object(struct svga_context *svga, struct svga_rasterizer_state *rast) { struct svga_screen *svgascreen = svga_screen(svga->pipe.screen); unsigned fill_mode = translate_fill_mode(rast->templ.fill_front); const unsigned cull_mode = translate_cull_mode(rast->templ.cull_face); const int depth_bias = rast->templ.offset_units; const float slope_scaled_depth_bias = rast->templ.offset_scale; /* PIPE_CAP_POLYGON_OFFSET_CLAMP not supported: */ const float depth_bias_clamp = 0.0; const float line_width = rast->templ.line_width > 0.0f ? rast->templ.line_width : 1.0f; const uint8 line_factor = rast->templ.line_stipple_enable ? rast->templ.line_stipple_factor : 0; const uint16 line_pattern = rast->templ.line_stipple_enable ? rast->templ.line_stipple_pattern : 0; unsigned try; rast->id = util_bitmask_add(svga->rast_object_id_bm); if (rast->templ.fill_front != rast->templ.fill_back) { /* The VGPU10 device can't handle different front/back fill modes. * We'll handle that with a swtnl/draw fallback. But we need to * make sure we always fill triangles in that case. */ fill_mode = SVGA3D_FILLMODE_FILL; } for (try = 0; try < 2; try++) { const uint8 pv_last = !rast->templ.flatshade_first && svgascreen->haveProvokingVertex; enum pipe_error ret = SVGA3D_vgpu10_DefineRasterizerState(svga->swc, rast->id, fill_mode, cull_mode, rast->templ.front_ccw, depth_bias, depth_bias_clamp, slope_scaled_depth_bias, rast->templ.depth_clip_near, rast->templ.scissor, rast->templ.multisample, rast->templ.line_smooth, line_width, rast->templ.line_stipple_enable, line_factor, line_pattern, pv_last); if (ret == PIPE_OK) return; svga_context_flush(svga, NULL); } } static void * svga_create_rasterizer_state(struct pipe_context *pipe, const struct pipe_rasterizer_state *templ) { struct svga_context *svga = svga_context(pipe); struct svga_rasterizer_state *rast = CALLOC_STRUCT(svga_rasterizer_state); struct svga_screen *screen = svga_screen(pipe->screen); if (!rast) return NULL; /* need this for draw module. */ rast->templ = *templ; rast->shademode = svga_translate_flatshade(templ->flatshade); rast->cullmode = svga_translate_cullmode(templ->cull_face, templ->front_ccw); rast->scissortestenable = templ->scissor; rast->multisampleantialias = templ->multisample; rast->antialiasedlineenable = templ->line_smooth; rast->lastpixel = templ->line_last_pixel; rast->pointsprite = templ->point_quad_rasterization; if (rast->templ.multisample) { /* The OpenGL 3.0 spec says points are always drawn as circles when * MSAA is enabled. Note that our implementation isn't 100% correct, * though. Our smooth point implementation involves drawing a square, * computing fragment distance from point center, then attenuating * the fragment alpha value. We should not attenuate alpha if msaa * is enabled. We should kill fragments entirely outside the circle * and let the GPU compute per-fragment coverage. * But as-is, our implementation gives acceptable results and passes * Piglit's MSAA point smooth test. */ rast->templ.point_smooth = TRUE; } if (templ->point_smooth) { /* For smooth points we need to generate fragments for at least * a 2x2 region. Otherwise the quad we draw may be too small and * we may generate no fragments at all. */ rast->pointsize = MAX2(2.0f, templ->point_size); } else { rast->pointsize = templ->point_size; } rast->hw_fillmode = PIPE_POLYGON_MODE_FILL; /* Use swtnl + decomposition implement these: */ if (templ->line_width <= screen->maxLineWidth) { /* pass line width to device */ rast->linewidth = MAX2(1.0F, templ->line_width); } else if (svga->debug.no_line_width) { /* nothing */ } else { /* use 'draw' pipeline for wide line */ rast->need_pipeline |= SVGA_PIPELINE_FLAG_LINES; rast->need_pipeline_lines_str = "line width"; } if (templ->line_stipple_enable) { if (screen->haveLineStipple || svga->debug.force_hw_line_stipple) { SVGA3dLinePattern lp; lp.repeat = templ->line_stipple_factor + 1; lp.pattern = templ->line_stipple_pattern; rast->linepattern = lp.uintValue; } else { /* use 'draw' module to decompose into short line segments */ rast->need_pipeline |= SVGA_PIPELINE_FLAG_LINES; rast->need_pipeline_lines_str = "line stipple"; } } if (!svga_have_vgpu10(svga) && templ->point_smooth) { rast->need_pipeline |= SVGA_PIPELINE_FLAG_POINTS; rast->need_pipeline_points_str = "smooth points"; } if (templ->line_smooth && !screen->haveLineSmooth) { /* * XXX: Enabling the pipeline slows down performance immensely, so ignore * line smooth state, where there is very little visual improvement. * Smooth lines will still be drawn for wide lines. */ #if 0 rast->need_pipeline |= SVGA_PIPELINE_FLAG_LINES; rast->need_pipeline_lines_str = "smooth lines"; #endif } { int fill_front = templ->fill_front; int fill_back = templ->fill_back; int fill = PIPE_POLYGON_MODE_FILL; boolean offset_front = util_get_offset(templ, fill_front); boolean offset_back = util_get_offset(templ, fill_back); boolean offset = FALSE; switch (templ->cull_face) { case PIPE_FACE_FRONT_AND_BACK: offset = FALSE; fill = PIPE_POLYGON_MODE_FILL; break; case PIPE_FACE_FRONT: offset = offset_back; fill = fill_back; break; case PIPE_FACE_BACK: offset = offset_front; fill = fill_front; break; case PIPE_FACE_NONE: if (fill_front != fill_back || offset_front != offset_back) { /* Always need the draw module to work out different * front/back fill modes: */ rast->need_pipeline |= SVGA_PIPELINE_FLAG_TRIS; rast->need_pipeline_tris_str = "different front/back fillmodes"; fill = PIPE_POLYGON_MODE_FILL; } else { offset = offset_front; fill = fill_front; } break; default: assert(0); break; } /* Unfilled primitive modes aren't implemented on all virtual * hardware. We can do some unfilled processing with index * translation, but otherwise need the draw module: */ if (fill != PIPE_POLYGON_MODE_FILL && (templ->flatshade || templ->light_twoside || offset)) { fill = PIPE_POLYGON_MODE_FILL; rast->need_pipeline |= SVGA_PIPELINE_FLAG_TRIS; rast->need_pipeline_tris_str = "unfilled primitives with no index manipulation"; } /* If we are decomposing to lines, and lines need the pipeline, * then we also need the pipeline for tris. */ if (fill == PIPE_POLYGON_MODE_LINE && (rast->need_pipeline & SVGA_PIPELINE_FLAG_LINES)) { fill = PIPE_POLYGON_MODE_FILL; rast->need_pipeline |= SVGA_PIPELINE_FLAG_TRIS; rast->need_pipeline_tris_str = "decomposing lines"; } /* Similarly for points: */ if (fill == PIPE_POLYGON_MODE_POINT && (rast->need_pipeline & SVGA_PIPELINE_FLAG_POINTS)) { fill = PIPE_POLYGON_MODE_FILL; rast->need_pipeline |= SVGA_PIPELINE_FLAG_TRIS; rast->need_pipeline_tris_str = "decomposing points"; } if (offset) { rast->slopescaledepthbias = templ->offset_scale; rast->depthbias = templ->offset_units; } rast->hw_fillmode = fill; } if (rast->need_pipeline & SVGA_PIPELINE_FLAG_TRIS) { /* Turn off stuff which will get done in the draw module: */ rast->hw_fillmode = PIPE_POLYGON_MODE_FILL; rast->slopescaledepthbias = 0; rast->depthbias = 0; } if (0 && rast->need_pipeline) { debug_printf("svga: rast need_pipeline = 0x%x\n", rast->need_pipeline); debug_printf(" pnts: %s \n", rast->need_pipeline_points_str); debug_printf(" lins: %s \n", rast->need_pipeline_lines_str); debug_printf(" tris: %s \n", rast->need_pipeline_tris_str); } if (svga_have_vgpu10(svga)) { define_rasterizer_object(svga, rast); } if (templ->poly_smooth) { pipe_debug_message(&svga->debug.callback, CONFORMANCE, "GL_POLYGON_SMOOTH not supported"); } svga->hud.num_rasterizer_objects++; SVGA_STATS_COUNT_INC(svga_screen(svga->pipe.screen)->sws, SVGA_STATS_COUNT_RASTERIZERSTATE); return rast; } static void svga_bind_rasterizer_state(struct pipe_context *pipe, void *state) { struct svga_context *svga = svga_context(pipe); struct svga_rasterizer_state *raster = (struct svga_rasterizer_state *)state; if (!raster || !svga->curr.rast) { svga->dirty |= SVGA_NEW_STIPPLE | SVGA_NEW_DEPTH_STENCIL_ALPHA; } else { if (raster->templ.poly_stipple_enable != svga->curr.rast->templ.poly_stipple_enable) { svga->dirty |= SVGA_NEW_STIPPLE; } if (raster->templ.rasterizer_discard != svga->curr.rast->templ.rasterizer_discard) { svga->dirty |= SVGA_NEW_DEPTH_STENCIL_ALPHA; } } svga->curr.rast = raster; svga->dirty |= SVGA_NEW_RAST; } static void svga_delete_rasterizer_state(struct pipe_context *pipe, void *state) { struct svga_context *svga = svga_context(pipe); struct svga_rasterizer_state *raster = (struct svga_rasterizer_state *) state; if (svga_have_vgpu10(svga)) { enum pipe_error ret = SVGA3D_vgpu10_DestroyRasterizerState(svga->swc, raster->id); if (ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_vgpu10_DestroyRasterizerState(svga->swc, raster->id); } if (raster->id == svga->state.hw_draw.rasterizer_id) svga->state.hw_draw.rasterizer_id = SVGA3D_INVALID_ID; util_bitmask_clear(svga->rast_object_id_bm, raster->id); } FREE(state); svga->hud.num_rasterizer_objects--; } void svga_init_rasterizer_functions(struct svga_context *svga) { svga->pipe.create_rasterizer_state = svga_create_rasterizer_state; svga->pipe.bind_rasterizer_state = svga_bind_rasterizer_state; svga->pipe.delete_rasterizer_state = svga_delete_rasterizer_state; }
static int virgl_get_param(struct pipe_screen *screen, enum pipe_cap param) { struct virgl_screen *vscreen = virgl_screen(screen); switch (param) { case PIPE_CAP_NPOT_TEXTURES: return 1; case PIPE_CAP_TWO_SIDED_STENCIL: return 1; case PIPE_CAP_SM3: return 1; case PIPE_CAP_ANISOTROPIC_FILTER: return 1; case PIPE_CAP_POINT_SPRITE: return 1; case PIPE_CAP_MAX_RENDER_TARGETS: return vscreen->caps.caps.v1.max_render_targets; case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS: return vscreen->caps.caps.v1.max_dual_source_render_targets; case PIPE_CAP_OCCLUSION_QUERY: return vscreen->caps.caps.v1.bset.occlusion_query; case PIPE_CAP_TEXTURE_MIRROR_CLAMP: return vscreen->caps.caps.v1.bset.mirror_clamp; case PIPE_CAP_TEXTURE_SHADOW_MAP: return 1; case PIPE_CAP_TEXTURE_SWIZZLE: return 1; case PIPE_CAP_MAX_TEXTURE_2D_LEVELS: return SP_MAX_TEXTURE_2D_LEVELS; case PIPE_CAP_MAX_TEXTURE_3D_LEVELS: return SP_MAX_TEXTURE_3D_LEVELS; case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS: return SP_MAX_TEXTURE_CUBE_LEVELS; case PIPE_CAP_BLEND_EQUATION_SEPARATE: return 1; case PIPE_CAP_INDEP_BLEND_ENABLE: return vscreen->caps.caps.v1.bset.indep_blend_enable; case PIPE_CAP_INDEP_BLEND_FUNC: return vscreen->caps.caps.v1.bset.indep_blend_func; case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT: case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT: case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER: case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER: return vscreen->caps.caps.v1.bset.fragment_coord_conventions; case PIPE_CAP_DEPTH_CLIP_DISABLE: return vscreen->caps.caps.v1.bset.depth_clip_disable; case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS: return vscreen->caps.caps.v1.max_streamout_buffers; case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS: case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS: return 16*4; case PIPE_CAP_PRIMITIVE_RESTART: return vscreen->caps.caps.v1.bset.primitive_restart; case PIPE_CAP_SHADER_STENCIL_EXPORT: return vscreen->caps.caps.v1.bset.shader_stencil_export; case PIPE_CAP_TGSI_INSTANCEID: case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR: return 1; case PIPE_CAP_SEAMLESS_CUBE_MAP: return vscreen->caps.caps.v1.bset.seamless_cube_map; case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE: return vscreen->caps.caps.v1.bset.seamless_cube_map_per_texture; case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS: return vscreen->caps.caps.v1.max_texture_array_layers; case PIPE_CAP_MIN_TEXEL_OFFSET: case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET: return -8; case PIPE_CAP_MAX_TEXEL_OFFSET: case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET: return 7; case PIPE_CAP_CONDITIONAL_RENDER: return vscreen->caps.caps.v1.bset.conditional_render; case PIPE_CAP_TEXTURE_BARRIER: return 0; case PIPE_CAP_VERTEX_COLOR_UNCLAMPED: return 1; case PIPE_CAP_FRAGMENT_COLOR_CLAMPED: case PIPE_CAP_VERTEX_COLOR_CLAMPED: return vscreen->caps.caps.v1.bset.color_clamping; case PIPE_CAP_MIXED_COLORBUFFER_FORMATS: return 1; case PIPE_CAP_GLSL_FEATURE_LEVEL: return vscreen->caps.caps.v1.glsl_level; case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION: return 0; case PIPE_CAP_COMPUTE: return 0; case PIPE_CAP_USER_VERTEX_BUFFERS: return 0; case PIPE_CAP_USER_INDEX_BUFFERS: case PIPE_CAP_USER_CONSTANT_BUFFERS: return 1; case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT: return 16; case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME: return vscreen->caps.caps.v1.bset.streamout_pause_resume; case PIPE_CAP_START_INSTANCE: return vscreen->caps.caps.v1.bset.start_instance; case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS: case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY: case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY: case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY: case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER: return 0; case PIPE_CAP_QUERY_TIMESTAMP: return 1; case PIPE_CAP_QUERY_TIME_ELAPSED: return 0; case PIPE_CAP_TGSI_TEXCOORD: return 0; case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT: return VIRGL_MAP_BUFFER_ALIGNMENT; case PIPE_CAP_TEXTURE_BUFFER_OBJECTS: return vscreen->caps.caps.v1.max_tbo_size > 0; case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT: return 0; case PIPE_CAP_BUFFER_SAMPLER_VIEW_RGBA_ONLY: return 0; case PIPE_CAP_CUBE_MAP_ARRAY: return vscreen->caps.caps.v1.bset.cube_map_array; case PIPE_CAP_TEXTURE_MULTISAMPLE: return vscreen->caps.caps.v1.bset.texture_multisample; case PIPE_CAP_MAX_VIEWPORTS: return vscreen->caps.caps.v1.max_viewports; case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE: return vscreen->caps.caps.v1.max_tbo_size; case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK: case PIPE_CAP_QUERY_PIPELINE_STATISTICS: case PIPE_CAP_ENDIANNESS: return 0; case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES: case PIPE_CAP_MIXED_COLOR_DEPTH_BITS: return 1; case PIPE_CAP_TGSI_VS_LAYER_VIEWPORT: return 0; case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES: return 256; case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS: return 16384; case PIPE_CAP_TEXTURE_QUERY_LOD: return vscreen->caps.caps.v1.bset.texture_query_lod; case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS: return vscreen->caps.caps.v1.max_texture_gather_components; case PIPE_CAP_TEXTURE_GATHER_SM5: case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT: case PIPE_CAP_SAMPLE_SHADING: case PIPE_CAP_FAKE_SW_MSAA: case PIPE_CAP_TEXTURE_GATHER_OFFSETS: case PIPE_CAP_TGSI_VS_WINDOW_SPACE_POSITION: case PIPE_CAP_MAX_VERTEX_STREAMS: case PIPE_CAP_DRAW_INDIRECT: case PIPE_CAP_MULTI_DRAW_INDIRECT: case PIPE_CAP_MULTI_DRAW_INDIRECT_PARAMS: case PIPE_CAP_TGSI_FS_FINE_DERIVATIVE: case PIPE_CAP_CONDITIONAL_RENDER_INVERTED: case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE: case PIPE_CAP_SAMPLER_VIEW_TARGET: case PIPE_CAP_CLIP_HALFZ: case PIPE_CAP_VERTEXID_NOBASE: case PIPE_CAP_POLYGON_OFFSET_CLAMP: case PIPE_CAP_MULTISAMPLE_Z_RESOLVE: case PIPE_CAP_RESOURCE_FROM_USER_MEMORY: case PIPE_CAP_DEVICE_RESET_STATUS_QUERY: case PIPE_CAP_MAX_SHADER_PATCH_VARYINGS: case PIPE_CAP_TEXTURE_FLOAT_LINEAR: case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR: case PIPE_CAP_DEPTH_BOUNDS_TEST: case PIPE_CAP_TGSI_TXQS: case PIPE_CAP_FORCE_PERSAMPLE_INTERP: case PIPE_CAP_SHAREABLE_SHADERS: case PIPE_CAP_CLEAR_TEXTURE: case PIPE_CAP_DRAW_PARAMETERS: case PIPE_CAP_TGSI_PACK_HALF_FLOAT: case PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL: case PIPE_CAP_TGSI_FS_FACE_IS_INTEGER_SYSVAL: case PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT: case PIPE_CAP_INVALIDATE_BUFFER: case PIPE_CAP_GENERATE_MIPMAP: case PIPE_CAP_SURFACE_REINTERPRET_BLOCKS: case PIPE_CAP_QUERY_BUFFER_OBJECT: case PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS: case PIPE_CAP_STRING_MARKER: case PIPE_CAP_QUERY_MEMORY_INFO: case PIPE_CAP_PCI_GROUP: case PIPE_CAP_PCI_BUS: case PIPE_CAP_PCI_DEVICE: case PIPE_CAP_PCI_FUNCTION: case PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT: case PIPE_CAP_ROBUST_BUFFER_ACCESS_BEHAVIOR: case PIPE_CAP_CULL_DISTANCE: case PIPE_CAP_PRIMITIVE_RESTART_FOR_PATCHES: case PIPE_CAP_TGSI_VOTE: case PIPE_CAP_MAX_WINDOW_RECTANGLES: case PIPE_CAP_POLYGON_OFFSET_UNITS_UNSCALED: case PIPE_CAP_VIEWPORT_SUBPIXEL_BITS: case PIPE_CAP_TGSI_ARRAY_COMPONENTS: return 0; case PIPE_CAP_VENDOR_ID: return 0x1af4; case PIPE_CAP_DEVICE_ID: return 0x1010; case PIPE_CAP_ACCELERATED: return 1; case PIPE_CAP_UMA: case PIPE_CAP_VIDEO_MEMORY: return 0; } /* should only get here on unhandled cases */ debug_printf("Unexpected PIPE_CAP %d query\n", param); return 0; }
boolean vmw_ioctl_init(struct vmw_winsys_screen *vws) { struct drm_vmw_getparam_arg gp_arg; struct drm_vmw_get_3d_cap_arg cap_arg; unsigned int size; int ret; uint32_t *cap_buffer; drmVersionPtr version; boolean have_drm_2_5; VMW_FUNC; version = drmGetVersion(vws->ioctl.drm_fd); if (!version) goto out_no_version; have_drm_2_5 = version->version_major > 2 || (version->version_major == 2 && version->version_minor > 4); vws->ioctl.have_drm_2_6 = version->version_major > 2 || (version->version_major == 2 && version->version_minor > 5); memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_3D; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret || gp_arg.value == 0) { vmw_error("No 3D enabled (%i, %s).\n", ret, strerror(-ret)); goto out_no_3d; } memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_FIFO_HW_VERSION; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret) { vmw_error("Failed to get fifo hw version (%i, %s).\n", ret, strerror(-ret)); goto out_no_3d; } vws->ioctl.hwversion = gp_arg.value; memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_HW_CAPS; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret) vws->base.have_gb_objects = FALSE; else vws->base.have_gb_objects = !!(gp_arg.value & (uint64_t) SVGA_CAP_GBOBJECTS); if (vws->base.have_gb_objects && !have_drm_2_5) goto out_no_3d; if (vws->base.have_gb_objects) { memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_3D_CAPS_SIZE; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret) size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t); else size = gp_arg.value; if (vws->base.have_gb_objects) vws->ioctl.num_cap_3d = size / sizeof(uint32_t); else vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX; memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_MAX_MOB_MEMORY; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret) { /* Just guess a large enough value. */ vws->ioctl.max_mob_memory = 256*1024*1024; } else { vws->ioctl.max_mob_memory = gp_arg.value; } memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_MAX_MOB_SIZE; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret || gp_arg.value == 0) { vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE; } else { vws->ioctl.max_texture_size = gp_arg.value; } /* Never early flush surfaces, mobs do accounting. */ vws->ioctl.max_surface_memory = -1; } else { vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX; memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_MAX_SURF_MEMORY; if (have_drm_2_5) ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (!have_drm_2_5 || ret) { /* Just guess a large enough value, around 800mb. */ vws->ioctl.max_surface_memory = 0x30000000; } else { vws->ioctl.max_surface_memory = gp_arg.value; } vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE; size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t); } cap_buffer = calloc(1, size); if (!cap_buffer) { debug_printf("Failed alloc fifo 3D caps buffer.\n"); goto out_no_3d; } vws->ioctl.cap_3d = calloc(vws->ioctl.num_cap_3d, sizeof(*vws->ioctl.cap_3d)); if (!vws->ioctl.cap_3d) { debug_printf("Failed alloc fifo 3D caps buffer.\n"); goto out_no_caparray; } memset(&cap_arg, 0, sizeof(cap_arg)); cap_arg.buffer = (uint64_t) (unsigned long) (cap_buffer); cap_arg.max_size = size; ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_GET_3D_CAP, &cap_arg, sizeof(cap_arg)); if (ret) { debug_printf("Failed to get 3D capabilities" " (%i, %s).\n", ret, strerror(-ret)); goto out_no_caps; } ret = vmw_ioctl_parse_caps(vws, cap_buffer); if (ret) { debug_printf("Failed to parse 3D capabilities" " (%i, %s).\n", ret, strerror(-ret)); goto out_no_caps; } free(cap_buffer); drmFreeVersion(version); vmw_printf("%s OK\n", __FUNCTION__); return TRUE; out_no_caps: free(vws->ioctl.cap_3d); out_no_caparray: free(cap_buffer); out_no_3d: drmFreeVersion(version); out_no_version: vws->ioctl.num_cap_3d = 0; debug_printf("%s Failed\n", __FUNCTION__); return FALSE; }
/* struct vnop_create_args { struct vnode *a_dvp; struct vnode **a_vpp; struct componentname *a_cnp; struct vattr *a_vap; }; */ static int fuse_vnop_create(struct vop_create_args *ap) { struct vnode *dvp = ap->a_dvp; struct vnode **vpp = ap->a_vpp; struct componentname *cnp = ap->a_cnp; struct vattr *vap = ap->a_vap; struct thread *td = cnp->cn_thread; struct ucred *cred = cnp->cn_cred; struct fuse_open_in *foi; struct fuse_entry_out *feo; struct fuse_dispatcher fdi; struct fuse_dispatcher *fdip = &fdi; int err; struct mount *mp = vnode_mount(dvp); uint64_t parentnid = VTOFUD(dvp)->nid; mode_t mode = MAKEIMODE(vap->va_type, vap->va_mode); uint64_t x_fh_id; uint32_t x_open_flags; fuse_trace_printf_vnop(); if (fuse_isdeadfs(dvp)) { return ENXIO; } bzero(&fdi, sizeof(fdi)); /* XXX: Will we ever want devices ? */ if ((vap->va_type != VREG)) { printf("fuse_vnop_create: unsupported va_type %d\n", vap->va_type); return (EINVAL); } debug_printf("parent nid = %ju, mode = %x\n", (uintmax_t)parentnid, mode); fdisp_init(fdip, sizeof(*foi) + cnp->cn_namelen + 1); if (!fsess_isimpl(mp, FUSE_CREATE)) { debug_printf("eh, daemon doesn't implement create?\n"); return (EINVAL); } fdisp_make(fdip, FUSE_CREATE, vnode_mount(dvp), parentnid, td, cred); foi = fdip->indata; foi->mode = mode; foi->flags = O_CREAT | O_RDWR; memcpy((char *)fdip->indata + sizeof(*foi), cnp->cn_nameptr, cnp->cn_namelen); ((char *)fdip->indata)[sizeof(*foi) + cnp->cn_namelen] = '\0'; err = fdisp_wait_answ(fdip); if (err) { if (err == ENOSYS) fsess_set_notimpl(mp, FUSE_CREATE); debug_printf("create: got err=%d from daemon\n", err); goto out; } feo = fdip->answ; if ((err = fuse_internal_checkentry(feo, VREG))) { goto out; } err = fuse_vnode_get(mp, feo->nodeid, dvp, vpp, cnp, VREG); if (err) { struct fuse_release_in *fri; uint64_t nodeid = feo->nodeid; uint64_t fh_id = ((struct fuse_open_out *)(feo + 1))->fh; fdisp_init(fdip, sizeof(*fri)); fdisp_make(fdip, FUSE_RELEASE, mp, nodeid, td, cred); fri = fdip->indata; fri->fh = fh_id; fri->flags = OFLAGS(mode); fuse_insert_callback(fdip->tick, fuse_internal_forget_callback); fuse_insert_message(fdip->tick); return err; } ASSERT_VOP_ELOCKED(*vpp, "fuse_vnop_create"); fdip->answ = feo + 1; x_fh_id = ((struct fuse_open_out *)(feo + 1))->fh; x_open_flags = ((struct fuse_open_out *)(feo + 1))->open_flags; fuse_filehandle_init(*vpp, FUFH_RDWR, NULL, x_fh_id); fuse_vnode_open(*vpp, x_open_flags, td); cache_purge_negative(dvp); out: fdisp_destroy(fdip); return err; }
static errval_t bulk_bind_received(struct bulk_channel *channel) { debug_printf("APP: bind received"); return SYS_ERR_OK; }
void lp_build_tgsi_aos(struct gallivm_state *gallivm, const struct tgsi_token *tokens, struct lp_type type, const unsigned char swizzles[4], LLVMValueRef consts_ptr, const LLVMValueRef *inputs, LLVMValueRef *outputs, struct lp_build_sampler_aos *sampler, const struct tgsi_shader_info *info) { struct lp_build_tgsi_aos_context bld; struct tgsi_parse_context parse; uint num_immediates = 0; unsigned chan; int pc = 0; /* Setup build context */ memset(&bld, 0, sizeof bld); lp_build_context_init(&bld.bld_base.base, gallivm, type); lp_build_context_init(&bld.bld_base.uint_bld, gallivm, lp_uint_type(type)); lp_build_context_init(&bld.bld_base.int_bld, gallivm, lp_int_type(type)); lp_build_context_init(&bld.int_bld, gallivm, lp_int_type(type)); for (chan = 0; chan < 4; ++chan) { bld.swizzles[chan] = swizzles[chan]; bld.inv_swizzles[swizzles[chan]] = chan; } bld.inputs = inputs; bld.outputs = outputs; bld.consts_ptr = consts_ptr; bld.sampler = sampler; bld.indirect_files = info->indirect_files; bld.bld_base.emit_swizzle = swizzle_aos; bld.bld_base.info = info; bld.bld_base.emit_fetch_funcs[TGSI_FILE_CONSTANT] = emit_fetch_constant; bld.bld_base.emit_fetch_funcs[TGSI_FILE_IMMEDIATE] = emit_fetch_immediate; bld.bld_base.emit_fetch_funcs[TGSI_FILE_INPUT] = emit_fetch_input; bld.bld_base.emit_fetch_funcs[TGSI_FILE_TEMPORARY] = emit_fetch_temporary; /* Set opcode actions */ lp_set_default_actions_cpu(&bld.bld_base); if (!lp_bld_tgsi_list_init(&bld.bld_base)) { return; } tgsi_parse_init(&parse, tokens); while (!tgsi_parse_end_of_tokens(&parse)) { tgsi_parse_token(&parse); switch(parse.FullToken.Token.Type) { case TGSI_TOKEN_TYPE_DECLARATION: /* Inputs already interpolated */ lp_emit_declaration_aos(&bld, &parse.FullToken.FullDeclaration); break; case TGSI_TOKEN_TYPE_INSTRUCTION: /* save expanded instruction */ lp_bld_tgsi_add_instruction(&bld.bld_base, &parse.FullToken.FullInstruction); break; case TGSI_TOKEN_TYPE_IMMEDIATE: /* simply copy the immediate values into the next immediates[] slot */ { const uint size = parse.FullToken.FullImmediate.Immediate.NrTokens - 1; float imm[4]; assert(size <= 4); assert(num_immediates < LP_MAX_TGSI_IMMEDIATES); for (chan = 0; chan < 4; ++chan) { imm[chan] = 0.0f; } for (chan = 0; chan < size; ++chan) { unsigned swizzle = bld.swizzles[chan]; imm[swizzle] = parse.FullToken.FullImmediate.u[chan].Float; } bld.immediates[num_immediates] = lp_build_const_aos(gallivm, type, imm[0], imm[1], imm[2], imm[3], NULL); num_immediates++; } break; case TGSI_TOKEN_TYPE_PROPERTY: break; default: assert(0); } } while (pc != -1) { struct tgsi_full_instruction *instr = bld.bld_base.instructions + pc; const struct tgsi_opcode_info *opcode_info = tgsi_get_opcode_info(instr->Instruction.Opcode); if (!lp_emit_instruction_aos(&bld, instr, opcode_info, &pc)) _debug_printf("warning: failed to translate tgsi opcode %s to LLVM\n", opcode_info->mnemonic); } if (0) { LLVMBasicBlockRef block = LLVMGetInsertBlock(gallivm->builder); LLVMValueRef function = LLVMGetBasicBlockParent(block); debug_printf("11111111111111111111111111111 \n"); tgsi_dump(tokens, 0); lp_debug_dump_value(function); debug_printf("2222222222222222222222222222 \n"); } tgsi_parse_free(&parse); FREE(bld.bld_base.instructions); if (0) { LLVMModuleRef module = LLVMGetGlobalParent( LLVMGetBasicBlockParent(LLVMGetInsertBlock(gallivm->builder))); LLVMDumpModule(module); } }
/** * \brief handles the initialization of a new bulk channel */ static err_t handle_init(struct block_net_service *c, struct tcp_pcb *tpcb, struct bulk_net_endpoint_descriptor* rx_ep, struct bulk_net_endpoint_descriptor* tx_ep) { BS_NET_DEBUG_TRACE errval_t err; if (c->rx_chan.state != BULK_STATE_UNINITIALIZED || c->tx_chan.state != BULK_STATE_UNINITIALIZED) { /* this is an error, already initialized */ debug_printf("Notice: channels already initialized.\n"); return ERR_OK; } c->bound = 0; #if BULK_NET_BACKEND_PROXY BS_NET_DEBUG_NET("%s", "creating bulk net proxy channel"); debug_printf("initializing endpoints\n"); bulk_local_init_endpoint(&c->rx_ep, NULL); bulk_local_init_endpoint(&c->tx_ep, NULL); struct bulk_channel_setup chan_setup = { .direction = BULK_DIRECTION_TX, .role = BULK_ROLE_MASTER, .trust = BULK_TRUST_FULL, .meta_size = sizeof(struct bs_meta_data), .waitset = get_default_waitset(), .user_state = c, }; err = bulk_channel_create(&c->tx_chan, (struct bulk_endpoint_descriptor *) &c->tx_ep, &bulk_tx_cb, &chan_setup); if (err_is_fail(err)) { bulk_channel_destroy(&c->tx_chan, BULK_CONT_NOP); debug_printf("ERROR: Failed to create the TX channel\n"); return err; } chan_setup.direction = BULK_DIRECTION_RX; err = bulk_channel_create(&c->rx_chan, (struct bulk_endpoint_descriptor *) &c->rx_ep, &bulk_rx_cb, &chan_setup); if (err_is_fail(err)) { bulk_channel_destroy(&c->tx_chan, BULK_CONT_NOP); debug_printf("ERROR: Failed to create the RX channel\n"); return err; } bulk_local_init_endpoint(&c->rx_p_ep, &c->rx_chan); bulk_local_init_endpoint(&c->tx_p_ep, &c->tx_chan); c->tx_proxy.user_state = c; c->rx_proxy.user_state = c; /* XXX: tx_ep->ip.addr */ BS_NET_DEBUG_BULK("bulk net proxy connect RX port=%i\n", tx_ep->port); err = bulk_net_proxy_connect(&c->tx_proxy, &c->tx_p_ep.generic, c->tx_chan.waitset, BLOCK_SIZE, "e10k", BLOCK_NET_TX_QUEUE, ntohl(tx_ep->ip.addr), tx_ep->port, net_proxy_connected_cb); if (err_is_fail(err)) { debug_printf("ERROR: failed to create net proxy\n"); return err; } BS_NET_DEBUG_BULK("bulk net proxy connect RX port=%i\n", rx_ep->port); err = bulk_net_proxy_connect(&c->rx_proxy, &c->rx_p_ep.generic, c->rx_chan.waitset, BLOCK_SIZE, "e10k", BLOCK_NET_RX_QUEUE, ntohl(tx_ep->ip.addr), rx_ep->port, net_proxy_connected_cb); if (err_is_fail(err)) { debug_printf("ERROR: failed to create net proxy\n"); return err; } #else struct bulk_net_ep_setup ep_setup = { .port = rx_ep->port, .ip.addr = ntohl(rx_ep->ip.addr), .queue = BLOCK_NET_RX_QUEUE, .max_queues = BLOCK_NET_MAX_QUEUES, .buffer_size = BLOCK_SIZE, .buffer_count = BLOCK_NET_BUFFER_COUNT, .no_copy = BULK_NET_BACKEND_NOCOPY}; /* create the RX endpoint */ err = bulk_net_ep_create_remote(&c->rx_ep, &ep_setup); assert(!err_is_fail(err)); ep_setup.port = tx_ep->port; ep_setup.queue = BLOCK_NET_TX_QUEUE; /* create the TX endpoint */ err = bulk_net_ep_create_remote(&c->tx_ep, &ep_setup); assert(!err_is_fail(err)); struct bulk_channel_bind_params params = { .role = BULK_ROLE_GENERIC, .user_state = c, .waitset = get_default_waitset(), .trust = BULK_TRUST_FULL}; struct bulk_continuation cont = { .arg = c, .handler = chan_bind_cb}; err = bulk_channel_bind(&c->rx_chan, (struct bulk_endpoint_descriptor *) &c->rx_ep, &bulk_rx_cb, ¶ms, cont); if (err_is_fail(err)) { return err; } err = bulk_channel_bind(&c->tx_chan, (struct bulk_endpoint_descriptor *) &c->tx_ep, &bulk_tx_cb, ¶ms, cont); if (err_is_fail(err)) { /* TODO: teardown channel */ return err; } #endif BS_NET_DEBUG_NET("%s", "handle init done."); return ERR_OK; } /** * \brief handles the reply of an error in case of unkown request */ static err_t handle_bad_request(struct block_net_service *c, struct tcp_pcb *tpcb) { BS_NET_DEBUG_TRACE return ERR_OK; } #if 0 /** * \brief handler for disconnect requests */ static err_t handle_disconnect(struct block_net_service *c, struct tcp_pcb *tpcb) { // free up resources // close the network connection assert(!"NYI: block_net_init"); return ERR_OK; }
static enum pipe_error retrieve_or_generate_indices( struct svga_hwtnl *hwtnl, unsigned prim, unsigned gen_type, unsigned gen_nr, unsigned gen_size, u_generate_func generate, struct pipe_resource **out_buf ) { enum pipe_error ret = PIPE_OK; int i; for (i = 0; i < IDX_CACHE_MAX; i++) { if (hwtnl->index_cache[prim][i].buffer != NULL && hwtnl->index_cache[prim][i].generate == generate) { if (compare(hwtnl->index_cache[prim][i].gen_nr, gen_nr, gen_type)) { pipe_resource_reference( out_buf, hwtnl->index_cache[prim][i].buffer ); if (DBG) debug_printf("%s retrieve %d/%d\n", __FUNCTION__, i, gen_nr); return PIPE_OK; } else if (gen_type == U_GENERATE_REUSABLE) { pipe_resource_reference( &hwtnl->index_cache[prim][i].buffer, NULL ); if (DBG) debug_printf("%s discard %d/%d\n", __FUNCTION__, i, hwtnl->index_cache[prim][i].gen_nr); break; } } } if (i == IDX_CACHE_MAX) { unsigned smallest = 0; unsigned smallest_size = ~0; for (i = 0; i < IDX_CACHE_MAX && smallest_size; i++) { if (hwtnl->index_cache[prim][i].buffer == NULL) { smallest = i; smallest_size = 0; } else if (hwtnl->index_cache[prim][i].gen_nr < smallest) { smallest = i; smallest_size = hwtnl->index_cache[prim][i].gen_nr; } } assert (smallest != IDX_CACHE_MAX); pipe_resource_reference( &hwtnl->index_cache[prim][smallest].buffer, NULL ); if (DBG) debug_printf("%s discard smallest %d/%d\n", __FUNCTION__, smallest, smallest_size); i = smallest; } ret = generate_indices( hwtnl, gen_nr, gen_size, generate, out_buf ); if (ret != PIPE_OK) return ret; hwtnl->index_cache[prim][i].generate = generate; hwtnl->index_cache[prim][i].gen_nr = gen_nr; pipe_resource_reference( &hwtnl->index_cache[prim][i].buffer, *out_buf ); if (DBG) debug_printf("%s cache %d/%d\n", __FUNCTION__, i, hwtnl->index_cache[prim][i].gen_nr); return PIPE_OK; }
mget_iri_t *mget_iri_parse(const char *url, const char *encoding) { mget_iri_t *iri; const char *default_port = NULL; char *p, *s, *authority, c; size_t slen, it; int url_allocated, maybe_scheme; if (!url) return NULL; /* URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ] hier-part = "//" authority path-abempty / path-absolute / path-rootless / path-empty scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) */ while (isspace(*url)) url++; if (!*url) return NULL; // first unescape, than convert to UTF-8 if (strchr(url, '%')) { char *unesc_url = strdup(url); mget_percent_unescape(unesc_url); if (mget_str_needs_encoding(unesc_url)) { if ((url = mget_str_to_utf8(unesc_url, encoding))) xfree(unesc_url); else url = unesc_url; // on error, use what we have } else url = unesc_url; url_allocated = 1; } else { url_allocated = 0; if (mget_str_needs_encoding(url)) { if ((s = mget_str_to_utf8(url, encoding))) { url = s; url_allocated = 1; } } } // just use one block of memory for all parsed URI parts slen = strlen(url); iri = xmalloc(sizeof(mget_iri_t) + slen * 2 + 2); memset(iri, 0, sizeof(mget_iri_t)); strcpy(((char *)iri) + sizeof(mget_iri_t), url); iri->uri = ((char *)iri) + sizeof(mget_iri_t); s = ((char *)iri) + sizeof(mget_iri_t) + slen + 1; strcpy(s, url); if (url_allocated) xfree(url); p = s; if (isalpha(*p)) { maybe_scheme = 1; while (*s && !_iri_isgendelim(*s)) { if (maybe_scheme && !_iri_isscheme(*s)) maybe_scheme = 0; s++; } } else maybe_scheme = 0; if (maybe_scheme && (*s == ':' && (s[1] == '/' || s[1] == 0))) { // found a scheme *s++ = 0; // find the scheme in our static list of supported schemes // for later comparisons we compare pointers (avoiding strcasecmp()) iri->scheme = p; for (it = 0; mget_iri_schemes[it]; it++) { if (!mget_strcasecmp_ascii(mget_iri_schemes[it], p)) { iri->scheme = mget_iri_schemes[it]; default_port = iri_ports[it]; break; } } if (iri->scheme == p) { // convert scheme to lowercase mget_strtolower((char *)iri->scheme); } } else { iri->scheme = MGET_IRI_SCHEME_DEFAULT; default_port = iri_ports[0]; // port 80 s = p; // rewind } // this is true for http, https, ftp, file if (s[0] == '/' && s[1] == '/') s += 2; // authority authority = s; while (*s && *s != '/' && *s != '?' && *s != '#') s++; c = *s; if (c) *s++ = 0; // left over: [path][?query][#fragment] if (c == '/') { iri->path = s; while (*s && *s != '?' && *s != '#') s++; c = *s; if (c) *s++ = 0; } if (c == '?') { iri->query = s; while (*s && *s != '#') s++; c = *s; if (c) *s++ = 0; } if (c == '#') { iri->fragment = s; s += strlen(s); } if (*s) { debug_printf("unparsed rest '%s'\n", s); } if (*authority) { s = authority; p = strchr(authority, '@'); if (p) { iri->userinfo = s; *p = 0; s = p + 1; } if (*s == '[') { p = strrchr(s, ']'); if (p) { iri->host = s + 1; *p = 0; s = p + 1; } else { // something is broken iri->host = s + 1; s += strlen(s); } } else { iri->host = s; while (*s && *s != ':') s++; } if (*s == ':') { if (s[1]) { if (!default_port || (strcmp(s + 1, default_port) && atoi(s + 1) != atoi(default_port))) iri->port = s + 1; } } *s = 0; } iri->resolv_port = iri->port ? iri->port : default_port; // now unescape all components (not interested in display, userinfo, password) if (iri->host) { mget_strtolower((char *)iri->host); if ((p = (char *)mget_str_to_ascii(iri->host)) != iri->host) { iri->host = p; iri->host_allocated = 1; } } else { if (iri->scheme == MGET_IRI_SCHEME_HTTP || iri->scheme == MGET_IRI_SCHEME_HTTPS) { error_printf(_("Missing host/domain in URI '%s'\n"), iri->uri); mget_iri_free(&iri); return NULL; } } /* debug_printf("scheme=%s\n",iri->scheme); debug_printf("host=%s\n",iri->host); debug_printf("path=%s\n",iri->path); debug_printf("query=%s\n",iri->query); debug_printf("fragment=%s\n",iri->fragment); */ return iri; }