From be0c69957e7489423606023ad820599652a60e15 Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 20 Jun 2023 13:39:35 +0100 Subject: [PATCH 1/7] compiler: remove destination type from cast builtins Resolves: #5909 --- src/AstGen.zig | 215 ++++++++++---- src/Autodoc.zig | 3 - src/BuiltinFn.zig | 28 +- src/Sema.zig | 727 ++++++++++++++++++++++++++++----------------- src/TypedValue.zig | 5 - src/Zir.zig | 44 ++- src/print_zir.zig | 34 ++- 7 files changed, 690 insertions(+), 366 deletions(-) diff --git a/src/AstGen.zig b/src/AstGen.zig index f1acd7e3e3..df64d58549 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -335,6 +335,32 @@ const ResultInfo = struct { }, } } + + /// Find the result type for a cast builtin given the result location. + /// If the location does not have a known result type, emits an error on + /// the given node. + fn resultType(rl: Loc, gz: *GenZir, node: Ast.Node.Index, builtin_name: []const u8) !Zir.Inst.Ref { + const astgen = gz.astgen; + switch (rl) { + .discard, .none, .ref, .inferred_ptr => {}, + .ty, .coerced_ty => |ty_ref| return ty_ref, + .ptr => |ptr| { + const ptr_ty = try gz.addUnNode(.typeof, ptr.inst, node); + return gz.addUnNode(.elem_type, ptr_ty, node); + }, + .block_ptr => |block_scope| { + if (block_scope.rl_ty_inst != .none) return block_scope.rl_ty_inst; + if (block_scope.break_result_info.rl == .ptr) { + const ptr_ty = try gz.addUnNode(.typeof, block_scope.break_result_info.rl.ptr.inst, node); + return gz.addUnNode(.elem_type, ptr_ty, node); + } + }, + } + + return astgen.failNodeNotes(node, "{s} must have a known result type", .{builtin_name}, &.{ + try astgen.errNoteNode(node, "use @as to provide explicit result type", .{}), + }); + } }; const Context = enum { @@ -2521,6 +2547,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As .array_type, .array_type_sentinel, .elem_type_index, + .elem_type, .vector_type, .indexable_ptr_len, .anyframe_type, @@ -2662,7 +2689,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As .int_cast, .ptr_cast, .truncate, - .align_cast, .has_decl, .has_field, .clz, @@ -7924,11 +7950,10 @@ fn bitCast( scope: *Scope, ri: ResultInfo, node: Ast.Node.Index, - lhs: Ast.Node.Index, - rhs: Ast.Node.Index, + operand_node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { - const dest_type = try reachableTypeExpr(gz, scope, lhs, node); - const operand = try reachableExpr(gz, scope, .{ .rl = .none }, rhs, node); + const dest_type = try ri.rl.resultType(gz, node, "@bitCast"); + const operand = try reachableExpr(gz, scope, .{ .rl = .none }, operand_node, node); const result = try gz.addPlNode(.bitcast, node, Zir.Inst.Bin{ .lhs = dest_type, .rhs = operand, @@ -7936,6 +7961,116 @@ fn bitCast( return rvalue(gz, ri, result, node); } +/// Handle one or more nested pointer cast builtins: +/// * @ptrCast +/// * @alignCast +/// * @addrSpaceCast +/// * @constCast +/// * @volatileCast +/// Any sequence of such builtins is treated as a single operation. This allowed +/// for sequences like `@ptrCast(@alignCast(ptr))` to work correctly despite the +/// intermediate result type being unknown. +fn ptrCast( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + root_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + + var flags: Zir.Inst.FullPtrCastFlags = .{}; + + // Note that all pointer cast builtins have one parameter, so we only need + // to handle `builtin_call_two`. + var node = root_node; + while (true) { + switch (node_tags[node]) { + .builtin_call_two, .builtin_call_two_comma => {}, + .grouped_expression => { + // Handle the chaining even with redundant parentheses + node = node_datas[node].lhs; + continue; + }, + else => break, + } + + if (node_datas[node].lhs == 0) break; // 0 args + if (node_datas[node].rhs != 0) break; // 2 args + + const builtin_token = main_tokens[node]; + const builtin_name = tree.tokenSlice(builtin_token); + const info = BuiltinFn.list.get(builtin_name) orelse break; + if (info.param_count != 1) break; + + switch (info.tag) { + else => break, + inline .ptr_cast, + .align_cast, + .addrspace_cast, + .const_cast, + .volatile_cast, + => |tag| { + if (@field(flags, @tagName(tag))) { + return astgen.failNode(node, "redundant {s}", .{builtin_name}); + } + @field(flags, @tagName(tag)) = true; + }, + } + + node = node_datas[node].lhs; + } + + const flags_i = @bitCast(u5, flags); + assert(flags_i != 0); + + const ptr_only: Zir.Inst.FullPtrCastFlags = .{ .ptr_cast = true }; + if (flags_i == @bitCast(u5, ptr_only)) { + // Special case: simpler representation + return typeCast(gz, scope, ri, root_node, node, .ptr_cast, "@ptrCast"); + } + + const no_result_ty_flags: Zir.Inst.FullPtrCastFlags = .{ + .const_cast = true, + .volatile_cast = true, + }; + if ((flags_i & ~@bitCast(u5, no_result_ty_flags)) == 0) { + // Result type not needed + const cursor = maybeAdvanceSourceCursorToMainToken(gz, root_node); + const operand = try expr(gz, scope, .{ .rl = .none }, node); + try emitDbgStmt(gz, cursor); + const result = try gz.addExtendedPayloadSmall(.ptr_cast_no_dest, flags_i, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(root_node), + .operand = operand, + }); + return rvalue(gz, ri, result, root_node); + } + + // Full cast including result type + const need_result_type_builtin = if (flags.ptr_cast) + "@ptrCast" + else if (flags.align_cast) + "@alignCast" + else if (flags.addrspace_cast) + "@addrSpaceCast" + else + unreachable; + + const cursor = maybeAdvanceSourceCursorToMainToken(gz, root_node); + const result_type = try ri.rl.resultType(gz, root_node, need_result_type_builtin); + const operand = try expr(gz, scope, .{ .rl = .none }, node); + try emitDbgStmt(gz, cursor); + const result = try gz.addExtendedPayloadSmall(.ptr_cast_full, flags_i, Zir.Inst.BinNode{ + .node = gz.nodeIndexToRelative(root_node), + .lhs = result_type, + .rhs = operand, + }); + return rvalue(gz, ri, result, root_node); +} + fn typeOf( gz: *GenZir, scope: *Scope, @@ -8123,7 +8258,7 @@ fn builtinCall( // zig fmt: off .as => return as( gz, scope, ri, node, params[0], params[1]), - .bit_cast => return bitCast( gz, scope, ri, node, params[0], params[1]), + .bit_cast => return bitCast( gz, scope, ri, node, params[0]), .TypeOf => return typeOf( gz, scope, ri, node, params), .union_init => return unionInit(gz, scope, ri, node, params), .c_import => return cImport( gz, scope, node, params[0]), @@ -8308,14 +8443,13 @@ fn builtinCall( .Frame => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_type), .frame_size => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_size), - .int_from_float => return typeCast(gz, scope, ri, node, params[0], params[1], .int_from_float), - .float_from_int => return typeCast(gz, scope, ri, node, params[0], params[1], .float_from_int), - .ptr_from_int => return typeCast(gz, scope, ri, node, params[0], params[1], .ptr_from_int), - .enum_from_int => return typeCast(gz, scope, ri, node, params[0], params[1], .enum_from_int), - .float_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .float_cast), - .int_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .int_cast), - .ptr_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .ptr_cast), - .truncate => return typeCast(gz, scope, ri, node, params[0], params[1], .truncate), + .int_from_float => return typeCast(gz, scope, ri, node, params[0], .int_from_float, builtin_name), + .float_from_int => return typeCast(gz, scope, ri, node, params[0], .float_from_int, builtin_name), + .ptr_from_int => return typeCast(gz, scope, ri, node, params[0], .ptr_from_int, builtin_name), + .enum_from_int => return typeCast(gz, scope, ri, node, params[0], .enum_from_int, builtin_name), + .float_cast => return typeCast(gz, scope, ri, node, params[0], .float_cast, builtin_name), + .int_cast => return typeCast(gz, scope, ri, node, params[0], .int_cast, builtin_name), + .truncate => return typeCast(gz, scope, ri, node, params[0], .truncate, builtin_name), // zig fmt: on .Type => { @@ -8368,49 +8502,22 @@ fn builtinCall( }); return rvalue(gz, ri, result, node); }, - .align_cast => { - const dest_align = try comptimeExpr(gz, scope, align_ri, params[0]); - const rhs = try expr(gz, scope, .{ .rl = .none }, params[1]); - const result = try gz.addPlNode(.align_cast, node, Zir.Inst.Bin{ - .lhs = dest_align, - .rhs = rhs, - }); - return rvalue(gz, ri, result, node); - }, .err_set_cast => { try emitDbgNode(gz, node); const result = try gz.addExtendedPayload(.err_set_cast, Zir.Inst.BinNode{ - .lhs = try typeExpr(gz, scope, params[0]), - .rhs = try expr(gz, scope, .{ .rl = .none }, params[1]), + .lhs = try ri.rl.resultType(gz, node, "@errSetCast"), + .rhs = try expr(gz, scope, .{ .rl = .none }, params[0]), .node = gz.nodeIndexToRelative(node), }); return rvalue(gz, ri, result, node); }, - .addrspace_cast => { - const result = try gz.addExtendedPayload(.addrspace_cast, Zir.Inst.BinNode{ - .lhs = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .address_space_type } }, params[0]), - .rhs = try expr(gz, scope, .{ .rl = .none }, params[1]), - .node = gz.nodeIndexToRelative(node), - }); - return rvalue(gz, ri, result, node); - }, - .const_cast => { - const operand = try expr(gz, scope, .{ .rl = .none }, params[0]); - const result = try gz.addExtendedPayload(.const_cast, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = operand, - }); - return rvalue(gz, ri, result, node); - }, - .volatile_cast => { - const operand = try expr(gz, scope, .{ .rl = .none }, params[0]); - const result = try gz.addExtendedPayload(.volatile_cast, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = operand, - }); - return rvalue(gz, ri, result, node); - }, + .ptr_cast, + .align_cast, + .addrspace_cast, + .const_cast, + .volatile_cast, + => return ptrCast(gz, scope, ri, node), // zig fmt: off .has_decl => return hasDeclOrField(gz, scope, ri, node, params[0], params[1], .has_decl), @@ -8725,13 +8832,13 @@ fn typeCast( scope: *Scope, ri: ResultInfo, node: Ast.Node.Index, - lhs_node: Ast.Node.Index, - rhs_node: Ast.Node.Index, + operand_node: Ast.Node.Index, tag: Zir.Inst.Tag, + builtin_name: []const u8, ) InnerError!Zir.Inst.Ref { const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); - const result_type = try typeExpr(gz, scope, lhs_node); - const operand = try expr(gz, scope, .{ .rl = .none }, rhs_node); + const result_type = try ri.rl.resultType(gz, node, builtin_name); + const operand = try expr(gz, scope, .{ .rl = .none }, operand_node); try emitDbgStmt(gz, cursor); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ @@ -9432,6 +9539,7 @@ fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index, have_ switch (builtin_info.needs_mem_loc) { .never => return false, .always => return true, + .forward0 => node = node_datas[node].lhs, .forward1 => node = node_datas[node].rhs, } // Missing builtin arg is not a parsing error, expect an error later. @@ -9448,6 +9556,7 @@ fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index, have_ switch (builtin_info.needs_mem_loc) { .never => return false, .always => return true, + .forward0 => node = params[0], .forward1 => node = params[1], } // Missing builtin arg is not a parsing error, expect an error later. diff --git a/src/Autodoc.zig b/src/Autodoc.zig index 68ddcc94c4..33c57b1197 100644 --- a/src/Autodoc.zig +++ b/src/Autodoc.zig @@ -1529,7 +1529,6 @@ fn walkInstruction( .int_cast, .ptr_cast, .truncate, - .align_cast, .has_decl, .has_field, .div_exact, @@ -3024,8 +3023,6 @@ fn walkInstruction( .int_from_error, .error_from_int, .reify, - .const_cast, - .volatile_cast, => { const extra = file.zir.extraData(Zir.Inst.UnNode, extended.operand).data; const bin_index = self.exprs.items.len; diff --git a/src/BuiltinFn.zig b/src/BuiltinFn.zig index 27b963f528..9498b8dc83 100644 --- a/src/BuiltinFn.zig +++ b/src/BuiltinFn.zig @@ -129,6 +129,8 @@ pub const MemLocRequirement = enum { never, /// The builtin always needs a memory location. always, + /// The builtin forwards the question to argument at index 0. + forward0, /// The builtin forwards the question to argument at index 1. forward1, }; @@ -168,14 +170,14 @@ pub const list = list: { "@addrSpaceCast", .{ .tag = .addrspace_cast, - .param_count = 2, + .param_count = 1, }, }, .{ "@alignCast", .{ .tag = .align_cast, - .param_count = 2, + .param_count = 1, }, }, .{ @@ -226,8 +228,8 @@ pub const list = list: { "@bitCast", .{ .tag = .bit_cast, - .needs_mem_loc = .forward1, - .param_count = 2, + .needs_mem_loc = .forward0, + .param_count = 1, }, }, .{ @@ -457,7 +459,7 @@ pub const list = list: { .{ .tag = .err_set_cast, .eval_to_error = .always, - .param_count = 2, + .param_count = 1, }, }, .{ @@ -502,14 +504,14 @@ pub const list = list: { "@floatCast", .{ .tag = .float_cast, - .param_count = 2, + .param_count = 1, }, }, .{ "@intFromFloat", .{ .tag = .int_from_float, - .param_count = 2, + .param_count = 1, }, }, .{ @@ -572,14 +574,14 @@ pub const list = list: { "@intCast", .{ .tag = .int_cast, - .param_count = 2, + .param_count = 1, }, }, .{ "@enumFromInt", .{ .tag = .enum_from_int, - .param_count = 2, + .param_count = 1, }, }, .{ @@ -594,14 +596,14 @@ pub const list = list: { "@floatFromInt", .{ .tag = .float_from_int, - .param_count = 2, + .param_count = 1, }, }, .{ "@ptrFromInt", .{ .tag = .ptr_from_int, - .param_count = 2, + .param_count = 1, }, }, .{ @@ -685,7 +687,7 @@ pub const list = list: { "@ptrCast", .{ .tag = .ptr_cast, - .param_count = 2, + .param_count = 1, }, }, .{ @@ -938,7 +940,7 @@ pub const list = list: { "@truncate", .{ .tag = .truncate, - .param_count = 2, + .param_count = 1, }, }, .{ diff --git a/src/Sema.zig b/src/Sema.zig index b171c1bcb8..e45cccd43b 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -960,6 +960,7 @@ fn analyzeBodyInner( .elem_val => try sema.zirElemVal(block, inst), .elem_val_node => try sema.zirElemValNode(block, inst), .elem_type_index => try sema.zirElemTypeIndex(block, inst), + .elem_type => try sema.zirElemType(block, inst), .enum_literal => try sema.zirEnumLiteral(block, inst), .int_from_enum => try sema.zirIntFromEnum(block, inst), .enum_from_int => try sema.zirEnumFromInt(block, inst), @@ -1044,7 +1045,6 @@ fn analyzeBodyInner( .int_cast => try sema.zirIntCast(block, inst), .ptr_cast => try sema.zirPtrCast(block, inst), .truncate => try sema.zirTruncate(block, inst), - .align_cast => try sema.zirAlignCast(block, inst), .has_decl => try sema.zirHasDecl(block, inst), .has_field => try sema.zirHasField(block, inst), .byte_swap => try sema.zirByteSwap(block, inst), @@ -1172,13 +1172,12 @@ fn analyzeBodyInner( .reify => try sema.zirReify( block, extended, inst), .builtin_async_call => try sema.zirBuiltinAsyncCall( block, extended), .cmpxchg => try sema.zirCmpxchg( block, extended), - .addrspace_cast => try sema.zirAddrSpaceCast( block, extended), .c_va_arg => try sema.zirCVaArg( block, extended), .c_va_copy => try sema.zirCVaCopy( block, extended), .c_va_end => try sema.zirCVaEnd( block, extended), .c_va_start => try sema.zirCVaStart( block, extended), - .const_cast, => try sema.zirConstCast( block, extended), - .volatile_cast, => try sema.zirVolatileCast( block, extended), + .ptr_cast_full => try sema.zirPtrCastFull( block, extended), + .ptr_cast_no_dest => try sema.zirPtrCastNoDest( block, extended), .work_item_id => try sema.zirWorkItem( block, extended, extended.opcode), .work_group_size => try sema.zirWorkItem( block, extended, extended.opcode), .work_group_id => try sema.zirWorkItem( block, extended, extended.opcode), @@ -1821,6 +1820,24 @@ pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Ins return ty; } +fn resolveCastDestType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, builtin_name: []const u8) !Type { + return sema.resolveType(block, src, zir_ref) catch |err| switch (err) { + error.GenericPoison => { + // Cast builtins use their result type as the destination type, but + // it could be an anytype argument, which we can't catch in AstGen. + const msg = msg: { + const msg = try sema.errMsg(block, src, "{s} must have a known result type", .{builtin_name}); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, src, msg, "result type is unknown due to anytype parameter", .{}); + try sema.errNote(block, src, msg, "use @as to provide explicit result type", .{}); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(msg); + }, + else => |e| return e, + }; +} + fn analyzeAsType( sema: *Sema, block: *Block, @@ -7953,6 +7970,14 @@ fn zirElemTypeIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr } } +fn zirElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const un_node = sema.code.instructions.items(.data)[inst].un_node; + const ptr_ty = try sema.resolveType(block, .unneeded, un_node.operand); + assert(ptr_ty.zigTypeTag(mod) == .Pointer); // validated by a previous instruction + return sema.addType(ptr_ty.childType(mod)); +} + fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; @@ -8278,13 +8303,12 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); - const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; - const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@enumFromInt"); const operand = try sema.resolveInst(extra.rhs); if (dest_ty.zigTypeTag(mod) != .Enum) { - return sema.fail(block, dest_ty_src, "expected enum, found '{}'", .{dest_ty.fmt(mod)}); + return sema.fail(block, src, "expected enum, found '{}'", .{dest_ty.fmt(mod)}); } _ = try sema.checkIntType(block, operand_src, sema.typeOf(operand)); @@ -9572,14 +9596,14 @@ fn zirIntCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; - const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const src = inst_data.src(); + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@intCast"); const operand = try sema.resolveInst(extra.rhs); - return sema.intCast(block, inst_data.src(), dest_ty, dest_ty_src, operand, operand_src, true); + return sema.intCast(block, inst_data.src(), dest_ty, src, operand, operand_src, true); } fn intCast( @@ -9733,11 +9757,11 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; - const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const src = inst_data.src(); + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@bitCast"); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); switch (dest_ty.zigTypeTag(mod)) { @@ -9756,14 +9780,14 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Type, .Undefined, .Void, - => return sema.fail(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}), + => return sema.fail(block, src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}), .Enum => { const msg = msg: { - const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}); + const msg = try sema.errMsg(block, src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); switch (operand_ty.zigTypeTag(mod)) { - .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @enumFromInt to cast from '{}'", .{operand_ty.fmt(mod)}), + .Int, .ComptimeInt => try sema.errNote(block, src, msg, "use @enumFromInt to cast from '{}'", .{operand_ty.fmt(mod)}), else => {}, } @@ -9774,11 +9798,11 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Pointer => { const msg = msg: { - const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}); + const msg = try sema.errMsg(block, src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); switch (operand_ty.zigTypeTag(mod)) { - .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @ptrFromInt to cast from '{}'", .{operand_ty.fmt(mod)}), - .Pointer => try sema.errNote(block, dest_ty_src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(mod)}), + .Int, .ComptimeInt => try sema.errNote(block, src, msg, "use @ptrFromInt to cast from '{}'", .{operand_ty.fmt(mod)}), + .Pointer => try sema.errNote(block, src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(mod)}), else => {}, } @@ -9792,7 +9816,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Union => "union", else => unreachable, }; - return sema.fail(block, dest_ty_src, "cannot @bitCast to '{}'; {s} does not have a guaranteed in-memory layout", .{ + return sema.fail(block, src, "cannot @bitCast to '{}'; {s} does not have a guaranteed in-memory layout", .{ dest_ty.fmt(mod), container, }); }, @@ -9876,11 +9900,11 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; - const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const src = inst_data.src(); + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@floatCast"); const operand = try sema.resolveInst(extra.rhs); const target = mod.getTarget(); @@ -9889,7 +9913,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .Float => false, else => return sema.fail( block, - dest_ty_src, + src, "expected float type, found '{}'", .{dest_ty.fmt(mod)}, ), @@ -20552,50 +20576,6 @@ fn reifyStruct( return decl_val; } -fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { - const mod = sema.mod; - const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; - const src = LazySrcLoc.nodeOffset(extra.node); - const addrspace_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; - const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node }; - - const dest_addrspace = try sema.analyzeAddressSpace(block, addrspace_src, extra.lhs, .pointer); - const ptr = try sema.resolveInst(extra.rhs); - const ptr_ty = sema.typeOf(ptr); - - try sema.checkPtrOperand(block, ptr_src, ptr_ty); - - var ptr_info = ptr_ty.ptrInfo(mod); - const src_addrspace = ptr_info.flags.address_space; - if (!target_util.addrSpaceCastIsValid(sema.mod.getTarget(), src_addrspace, dest_addrspace)) { - const msg = msg: { - const msg = try sema.errMsg(block, src, "invalid address space cast", .{}); - errdefer msg.destroy(sema.gpa); - try sema.errNote(block, src, msg, "address space '{s}' is not compatible with address space '{s}'", .{ @tagName(src_addrspace), @tagName(dest_addrspace) }); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); - } - - ptr_info.flags.address_space = dest_addrspace; - const dest_ptr_ty = try mod.ptrType(ptr_info); - const dest_ty = if (ptr_ty.zigTypeTag(mod) == .Optional) - try mod.optionalType(dest_ptr_ty.toIntern()) - else - dest_ptr_ty; - - try sema.requireRuntimeBlock(block, src, ptr_src); - // TODO: Address space cast safety? - - return block.addInst(.{ - .tag = .addrspace_cast, - .data = .{ .ty_op = .{ - .ty = try sema.addType(dest_ty), - .operand = ptr, - } }, - }); -} - fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) CompileError!Air.Inst.Ref { const va_list_ty = try sema.getBuiltinType("VaList"); const va_list_ptr = try sema.mod.singleMutPtrType(va_list_ty); @@ -20711,14 +20691,14 @@ fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; + const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; - const dest_ty = try sema.resolveType(block, ty_src, extra.lhs); + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@intFromFloat"); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - _ = try sema.checkIntType(block, ty_src, dest_ty); + _ = try sema.checkIntType(block, src, dest_ty); try sema.checkFloatType(block, operand_src, operand_ty); if (try sema.resolveMaybeUndefVal(operand)) |val| { @@ -20751,14 +20731,14 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; + const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; - const dest_ty = try sema.resolveType(block, ty_src, extra.lhs); + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@floatFromInt"); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - try sema.checkFloatType(block, ty_src, dest_ty); + try sema.checkFloatType(block, src, dest_ty); _ = try sema.checkIntType(block, operand_src, operand_ty); if (try sema.resolveMaybeUndefVal(operand)) |val| { @@ -20779,21 +20759,20 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_res = try sema.resolveInst(extra.rhs); const operand_coerced = try sema.coerce(block, Type.usize, operand_res, operand_src); - const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const ptr_ty = try sema.resolveType(block, src, extra.lhs); - try sema.checkPtrType(block, type_src, ptr_ty); + const ptr_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@ptrFromInt"); + try sema.checkPtrType(block, src, ptr_ty); const elem_ty = ptr_ty.elemType2(mod); const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema); if (ptr_ty.isSlice(mod)) { const msg = msg: { - const msg = try sema.errMsg(block, type_src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - try sema.errNote(block, type_src, msg, "slice length cannot be inferred from address", .{}); + try sema.errNote(block, src, msg, "slice length cannot be inferred from address", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -20841,12 +20820,11 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); - const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node }; - const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@errSetCast"); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - try sema.checkErrorSetType(block, dest_ty_src, dest_ty); + try sema.checkErrorSetType(block, src, dest_ty); try sema.checkErrorSetType(block, operand_src, operand_ty); // operand must be defined since it can be an invalid error value @@ -20869,7 +20847,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat break :disjoint true; } - try sema.resolveInferredErrorSetTy(block, dest_ty_src, dest_ty); + try sema.resolveInferredErrorSetTy(block, src, dest_ty); try sema.resolveInferredErrorSetTy(block, operand_src, operand_ty); for (dest_ty.errorSetNames(mod)) |dest_err_name| { if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_name)) @@ -20924,138 +20902,415 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat return block.addBitCast(dest_ty, operand); } +fn zirPtrCastFull(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small)); + const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; + const src = LazySrcLoc.nodeOffset(extra.node); + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; + const operand = try sema.resolveInst(extra.rhs); + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@ptrCast"); // TODO: better error message (builtin name) + return sema.ptrCastFull( + block, + flags, + src, + operand, + operand_src, + dest_ty, + ); +} + fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); - const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@ptrCast"); const operand = try sema.resolveInst(extra.rhs); + + return sema.ptrCastFull( + block, + .{ .ptr_cast = true }, + src, + operand, + operand_src, + dest_ty, + ); +} + +fn ptrCastFull( + sema: *Sema, + block: *Block, + flags: Zir.Inst.FullPtrCastFlags, + src: LazySrcLoc, + operand: Air.Inst.Ref, + operand_src: LazySrcLoc, + dest_ty: Type, +) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - try sema.checkPtrType(block, dest_ty_src, dest_ty); + try sema.checkPtrType(block, src, dest_ty); try sema.checkPtrOperand(block, operand_src, operand_ty); - const operand_info = operand_ty.ptrInfo(mod); + const src_info = operand_ty.ptrInfo(mod); const dest_info = dest_ty.ptrInfo(mod); - if (operand_info.flags.is_const and !dest_info.flags.is_const) { - const msg = msg: { - const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{}); - errdefer msg.destroy(sema.gpa); - try sema.errNote(block, src, msg, "consider using '@constCast'", .{}); - break :msg msg; + try sema.resolveTypeLayout(src_info.child.toType()); + try sema.resolveTypeLayout(dest_info.child.toType()); + + const src_slice_like = src_info.flags.size == .Slice or + (src_info.flags.size == .One and src_info.child.toType().zigTypeTag(mod) == .Array); + + const dest_slice_like = dest_info.flags.size == .Slice or + (dest_info.flags.size == .One and dest_info.child.toType().zigTypeTag(mod) == .Array); + + if (dest_info.flags.size == .Slice and !src_slice_like) { + return sema.fail(block, src, "illegal pointer cast to slice", .{}); + } + + if (dest_info.flags.size == .Slice) { + const src_elem_size = switch (src_info.flags.size) { + .Slice => src_info.child.toType().abiSize(mod), + // pointer to array + .One => src_info.child.toType().childType(mod).abiSize(mod), + else => unreachable, }; - return sema.failWithOwnedErrorMsg(msg); - } - if (operand_info.flags.is_volatile and !dest_info.flags.is_volatile) { - const msg = msg: { - const msg = try sema.errMsg(block, src, "cast discards volatile qualifier", .{}); - errdefer msg.destroy(sema.gpa); - - try sema.errNote(block, src, msg, "consider using '@volatileCast'", .{}); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); - } - if (operand_info.flags.address_space != dest_info.flags.address_space) { - const msg = msg: { - const msg = try sema.errMsg(block, src, "cast changes pointer address space", .{}); - errdefer msg.destroy(sema.gpa); - - try sema.errNote(block, src, msg, "consider using '@addrSpaceCast'", .{}); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); + const dest_elem_size = dest_info.child.toType().abiSize(mod); + if (src_elem_size != dest_elem_size) { + return sema.fail(block, src, "TODO: implement @ptrCast between slices changing the length", .{}); + } } - const dest_is_slice = dest_ty.isSlice(mod); - const operand_is_slice = operand_ty.isSlice(mod); - if (dest_is_slice and !operand_is_slice) { - return sema.fail(block, dest_ty_src, "illegal pointer cast to slice", .{}); + // The checking logic in this function must stay in sync with Sema.coerceInMemoryAllowedPtrs + + if (!flags.ptr_cast) { + check_size: { + if (src_info.flags.size == dest_info.flags.size) break :check_size; + if (src_slice_like and dest_slice_like) break :check_size; + if (src_info.flags.size == .C) break :check_size; + if (dest_info.flags.size == .C) break :check_size; + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "cannot implicitly convert {s} pointer to {s} pointer", .{ + pointerSizeString(src_info.flags.size), + pointerSizeString(dest_info.flags.size), + }); + errdefer msg.destroy(sema.gpa); + if (dest_info.flags.size == .Many and + (src_info.flags.size == .Slice or + (src_info.flags.size == .One and src_info.child.toType().zigTypeTag(mod) == .Array))) + { + try sema.errNote(block, src, msg, "use 'ptr' field to convert slice to many pointer", .{}); + } else { + try sema.errNote(block, src, msg, "use @ptrCast to change pointer size", .{}); + } + break :msg msg; + }); + } + + check_child: { + const src_child = if (dest_info.flags.size == .Slice and src_info.flags.size == .One) blk: { + // *[n]T -> []T + break :blk src_info.child.toType().childType(mod); + } else src_info.child.toType(); + + const dest_child = dest_info.child.toType(); + + const imc_res = try sema.coerceInMemoryAllowed( + block, + dest_child, + src_child, + !dest_info.flags.is_const, + mod.getTarget(), + src, + operand_src, + ); + if (imc_res == .ok) break :check_child; + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "pointer element type '{}' cannot coerce into element type '{}'", .{ + src_child.fmt(mod), + dest_child.fmt(mod), + }); + errdefer msg.destroy(sema.gpa); + try imc_res.report(sema, block, src, msg); + try sema.errNote(block, src, msg, "use @ptrCast to cast pointer element type", .{}); + break :msg msg; + }); + } + + check_sent: { + if (dest_info.sentinel == .none) break :check_sent; + if (src_info.flags.size == .C) break :check_sent; + if (src_info.sentinel != .none) { + const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, src_info.sentinel, dest_info.child); + if (dest_info.sentinel == coerced_sent) break :check_sent; + } + if (src_slice_like and src_info.flags.size == .One and dest_info.flags.size == .Slice) { + // [*]nT -> []T + const arr_ty = src_info.child.toType(); + if (arr_ty.sentinel(mod)) |src_sentinel| { + const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, src_sentinel.toIntern(), dest_info.child); + if (dest_info.sentinel == coerced_sent) break :check_sent; + } + } + return sema.failWithOwnedErrorMsg(msg: { + const msg = if (src_info.sentinel == .none) blk: { + break :blk try sema.errMsg(block, src, "destination pointer requires '{}' sentinel", .{ + dest_info.sentinel.toValue().fmtValue(dest_info.child.toType(), mod), + }); + } else blk: { + break :blk try sema.errMsg(block, src, "pointer sentinel '{}' cannot coerce into pointer sentinel '{}'", .{ + src_info.sentinel.toValue().fmtValue(src_info.child.toType(), mod), + dest_info.sentinel.toValue().fmtValue(dest_info.child.toType(), mod), + }); + }; + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, src, msg, "use @ptrCast to cast pointer sentinel", .{}); + break :msg msg; + }); + } + + if (src_info.packed_offset.host_size != dest_info.packed_offset.host_size) { + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "pointer host size '{}' cannot coerce into pointer host size '{}'", .{ + src_info.packed_offset.host_size, + dest_info.packed_offset.host_size, + }); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, src, msg, "use @ptrCast to cast pointer host size", .{}); + break :msg msg; + }); + } + + if (src_info.packed_offset.bit_offset != dest_info.packed_offset.bit_offset) { + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "pointer bit offset '{}' cannot coerce into pointer bit offset '{}'", .{ + src_info.packed_offset.bit_offset, + dest_info.packed_offset.bit_offset, + }); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, src, msg, "use @ptrCast to cast pointer bit offset", .{}); + break :msg msg; + }); + } + + check_allowzero: { + const src_allows_zero = operand_ty.ptrAllowsZero(mod); + const dest_allows_zero = dest_ty.ptrAllowsZero(mod); + if (!src_allows_zero) break :check_allowzero; + if (dest_allows_zero) break :check_allowzero; + + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "'{}' could have null values which are illegal in type '{}'", .{ + operand_ty.fmt(mod), + dest_ty.fmt(mod), + }); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, src, msg, "use @ptrCast to assert the pointer is not null", .{}); + break :msg msg; + }); + } + + // TODO: vector index? } - const ptr = if (operand_is_slice and !dest_is_slice) - try sema.analyzeSlicePtr(block, operand_src, operand, operand_ty) - else - operand; - const dest_elem_ty = dest_ty.elemType2(mod); - try sema.resolveTypeLayout(dest_elem_ty); - const dest_align = dest_ty.ptrAlignment(mod); + const src_align = src_info.flags.alignment.toByteUnitsOptional() orelse src_info.child.toType().abiAlignment(mod); + const dest_align = dest_info.flags.alignment.toByteUnitsOptional() orelse dest_info.child.toType().abiAlignment(mod); + if (!flags.align_cast) { + if (dest_align > src_align) { + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "cast increases pointer alignment", .{}); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, operand_src, msg, "'{}' has alignment '{d}'", .{ + operand_ty.fmt(mod), src_align, + }); + try sema.errNote(block, src, msg, "'{}' has alignment '{d}'", .{ + dest_ty.fmt(mod), dest_align, + }); + try sema.errNote(block, src, msg, "use @alignCast to assert pointer alignment", .{}); + break :msg msg; + }); + } + } - const operand_elem_ty = operand_ty.elemType2(mod); - try sema.resolveTypeLayout(operand_elem_ty); - const operand_align = operand_ty.ptrAlignment(mod); + if (!flags.addrspace_cast) { + if (src_info.flags.address_space != dest_info.flags.address_space) { + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "cast changes pointer address space", .{}); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, operand_src, msg, "'{}' has address space '{s}'", .{ + operand_ty.fmt(mod), @tagName(src_info.flags.address_space), + }); + try sema.errNote(block, src, msg, "'{}' has address space '{s}'", .{ + dest_ty.fmt(mod), @tagName(dest_info.flags.address_space), + }); + try sema.errNote(block, src, msg, "use @addrSpaceCast to cast pointer address space", .{}); + break :msg msg; + }); + } + } else { + // Some address space casts are always disallowed + if (!target_util.addrSpaceCastIsValid(mod.getTarget(), src_info.flags.address_space, dest_info.flags.address_space)) { + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "invalid address space cast", .{}); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, operand_src, msg, "address space '{s}' is not compatible with address space '{s}'", .{ + @tagName(src_info.flags.address_space), + @tagName(dest_info.flags.address_space), + }); + break :msg msg; + }); + } + } - // If the destination is less aligned than the source, preserve the source alignment - const aligned_dest_ty = if (operand_align <= dest_align) dest_ty else blk: { - // Unwrap the pointer (or pointer-like optional) type, set alignment, and re-wrap into result - var dest_ptr_info = dest_ty.ptrInfo(mod); - dest_ptr_info.flags.alignment = Alignment.fromNonzeroByteUnits(operand_align); + if (!flags.const_cast) { + if (src_info.flags.is_const and !dest_info.flags.is_const) { + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{}); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, src, msg, "use @constCast to discard const qualifier", .{}); + break :msg msg; + }); + } + } + + if (!flags.volatile_cast) { + if (src_info.flags.is_volatile and !dest_info.flags.is_volatile) { + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "cast discards volatile qualifier", .{}); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, src, msg, "use @volatileCast to discard volatile qualifier", .{}); + break :msg msg; + }); + } + } + + const ptr = if (src_info.flags.size == .Slice and dest_info.flags.size != .Slice) ptr: { + break :ptr try sema.analyzeSlicePtr(block, operand_src, operand, operand_ty); + } else operand; + + const dest_ptr_ty = if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) blk: { + // Only convert to a many-pointer at first + var info = dest_info; + info.flags.size = .Many; + const ty = try mod.ptrType(info); if (dest_ty.zigTypeTag(mod) == .Optional) { - break :blk try mod.optionalType((try mod.ptrType(dest_ptr_info)).toIntern()); + break :blk try mod.optionalType(ty.toIntern()); } else { - break :blk try mod.ptrType(dest_ptr_info); + break :blk ty; } - }; + } else dest_ty; - if (dest_is_slice) { - const operand_elem_size = operand_elem_ty.abiSize(mod); - const dest_elem_size = dest_elem_ty.abiSize(mod); - if (operand_elem_size != dest_elem_size) { - return sema.fail(block, dest_ty_src, "TODO: implement @ptrCast between slices changing the length", .{}); + // Cannot do @addrSpaceCast at comptime + if (!flags.addrspace_cast) { + if (try sema.resolveMaybeUndefVal(ptr)) |ptr_val| { + if (!dest_ty.ptrAllowsZero(mod) and ptr_val.isUndef(mod)) { + return sema.failWithUseOfUndef(block, operand_src); + } + if (!dest_ty.ptrAllowsZero(mod) and ptr_val.isNull(mod)) { + return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}); + } + if (dest_align > src_align) { + if (try ptr_val.getUnsignedIntAdvanced(mod, null)) |addr| { + if (addr % dest_align != 0) { + return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, dest_align }); + } + } + } + if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) { + if (ptr_val.isUndef(mod)) return sema.addConstUndef(dest_ty); + const arr_len = try mod.intValue(Type.usize, src_info.child.toType().arrayLen(mod)); + return sema.addConstant((try mod.intern(.{ .ptr = .{ + .ty = dest_ty.toIntern(), + .addr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr, + .len = arr_len.toIntern(), + } })).toValue()); + } else { + assert(dest_ptr_ty.eql(dest_ty, mod)); + return sema.addConstant(try mod.getCoerced(ptr_val, dest_ty)); + } } } - if (dest_align > operand_align) { - const msg = msg: { - const msg = try sema.errMsg(block, src, "cast increases pointer alignment", .{}); - errdefer msg.destroy(sema.gpa); - - try sema.errNote(block, operand_src, msg, "'{}' has alignment '{d}'", .{ - operand_ty.fmt(mod), operand_align, - }); - try sema.errNote(block, dest_ty_src, msg, "'{}' has alignment '{d}'", .{ - dest_ty.fmt(mod), dest_align, - }); - - try sema.errNote(block, src, msg, "consider using '@alignCast'", .{}); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); - } - - if (try sema.resolveMaybeUndefVal(ptr)) |operand_val| { - if (!dest_ty.ptrAllowsZero(mod) and operand_val.isUndef(mod)) { - return sema.failWithUseOfUndef(block, operand_src); - } - if (!dest_ty.ptrAllowsZero(mod) and operand_val.isNull(mod)) { - return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}); - } - return sema.addConstant(try mod.getCoerced(operand_val, aligned_dest_ty)); - } - try sema.requireRuntimeBlock(block, src, null); + if (block.wantSafety() and operand_ty.ptrAllowsZero(mod) and !dest_ty.ptrAllowsZero(mod) and - (try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn)) + (try sema.typeHasRuntimeBits(dest_info.child.toType()) or dest_info.child.toType().zigTypeTag(mod) == .Fn)) { const ptr_int = try block.addUnOp(.int_from_ptr, ptr); const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize); - const ok = if (operand_is_slice) ok: { - const len = try sema.analyzeSliceLen(block, operand_src, operand); + const ok = if (src_info.flags.size == .Slice and dest_info.flags.size == .Slice) ok: { + const len = try sema.analyzeSliceLen(block, operand_src, ptr); const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize); break :ok try block.addBinOp(.bit_or, len_zero, is_non_zero); } else is_non_zero; try sema.addSafetyCheck(block, ok, .cast_to_null); } - return block.addBitCast(aligned_dest_ty, ptr); + if (block.wantSafety() and dest_align > src_align and try sema.typeHasRuntimeBits(dest_info.child.toType())) { + const align_minus_1 = try sema.addConstant( + try mod.intValue(Type.usize, dest_align - 1), + ); + const ptr_int = try block.addUnOp(.int_from_ptr, ptr); + const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1); + const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); + const ok = if (src_info.flags.size == .Slice and dest_info.flags.size == .Slice) ok: { + const len = try sema.analyzeSliceLen(block, operand_src, ptr); + const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize); + break :ok try block.addBinOp(.bit_or, len_zero, is_aligned); + } else is_aligned; + try sema.addSafetyCheck(block, ok, .incorrect_alignment); + } + + // If we're going from an array pointer to a slice, this will only be the pointer part! + const result_ptr = if (flags.addrspace_cast) ptr: { + // We can't change address spaces with a bitcast, so this requires two instructions + var intermediate_info = src_info; + intermediate_info.flags.address_space = dest_info.flags.address_space; + const intermediate_ptr_ty = try mod.ptrType(intermediate_info); + const intermediate_ty = if (dest_ptr_ty.zigTypeTag(mod) == .Optional) blk: { + break :blk try mod.optionalType(intermediate_ptr_ty.toIntern()); + } else intermediate_ptr_ty; + const intermediate = try block.addInst(.{ + .tag = .addrspace_cast, + .data = .{ .ty_op = .{ + .ty = try sema.addType(intermediate_ty), + .operand = ptr, + } }, + }); + if (intermediate_ty.eql(dest_ptr_ty, mod)) { + // We only changed the address space, so no need for a bitcast + break :ptr intermediate; + } + break :ptr try block.addBitCast(dest_ptr_ty, intermediate); + } else ptr: { + break :ptr try block.addBitCast(dest_ptr_ty, ptr); + }; + + if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) { + // We have to construct a slice using the operand's child's array length + // Note that we know from the check at the start of the function that operand_ty is slice-like + const arr_len = try sema.addConstant( + try mod.intValue(Type.usize, src_info.child.toType().arrayLen(mod)), + ); + return block.addInst(.{ + .tag = .slice, + .data = .{ .ty_pl = .{ + .ty = try sema.addType(dest_ty), + .payload = try sema.addExtra(Air.Bin{ + .lhs = result_ptr, + .rhs = arr_len, + }), + } }, + }); + } else { + assert(dest_ptr_ty.eql(dest_ty, mod)); + return result_ptr; + } } -fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { +fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const mod = sema.mod; + const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small)); const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -21064,7 +21319,8 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData try sema.checkPtrOperand(block, operand_src, operand_ty); var ptr_info = operand_ty.ptrInfo(mod); - ptr_info.flags.is_const = false; + if (flags.const_cast) ptr_info.flags.is_const = false; + if (flags.volatile_cast) ptr_info.flags.is_volatile = false; const dest_ty = try mod.ptrType(ptr_info); if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { @@ -21075,49 +21331,25 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData return block.addBitCast(dest_ty, operand); } -fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { - const mod = sema.mod; - const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; - const src = LazySrcLoc.nodeOffset(extra.node); - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; - const operand = try sema.resolveInst(extra.operand); - const operand_ty = sema.typeOf(operand); - try sema.checkPtrOperand(block, operand_src, operand_ty); - - var ptr_info = operand_ty.ptrInfo(mod); - ptr_info.flags.is_volatile = false; - const dest_ty = try mod.ptrType(ptr_info); - - if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - return sema.addConstant(operand_val); - } - - try sema.requireRuntimeBlock(block, src, null); - return block.addBitCast(dest_ty, operand); -} - fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); - const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const dest_scalar_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@truncate"); + const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, src); const operand = try sema.resolveInst(extra.rhs); - const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_scalar_ty); const operand_ty = sema.typeOf(operand); const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src); - const is_vector = operand_ty.zigTypeTag(mod) == .Vector; - const dest_ty = if (is_vector) - try mod.vectorType(.{ - .len = operand_ty.vectorLen(mod), - .child = dest_scalar_ty.toIntern(), - }) - else - dest_scalar_ty; - if (dest_is_comptime_int) { + const operand_is_vector = operand_ty.zigTypeTag(mod) == .Vector; + const dest_is_vector = dest_ty.zigTypeTag(mod) == .Vector; + if (operand_is_vector != dest_is_vector) { + return sema.fail(block, operand_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(mod), operand_ty.fmt(mod) }); + } + + if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.coerce(block, dest_ty, operand, operand_src); } @@ -21147,7 +21379,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .{ dest_ty.fmt(mod), operand_ty.fmt(mod) }, ); errdefer msg.destroy(sema.gpa); - try sema.errNote(block, dest_ty_src, msg, "destination type has {d} bits", .{ + try sema.errNote(block, src, msg, "destination type has {d} bits", .{ dest_info.bits, }); try sema.errNote(block, operand_src, msg, "operand type has {d} bits", .{ @@ -21161,7 +21393,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (try sema.resolveMaybeUndefValIntable(operand)) |val| { if (val.isUndef(mod)) return sema.addConstUndef(dest_ty); - if (!is_vector) { + if (!dest_is_vector) { return sema.addConstant(try mod.getCoerced( try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, mod), dest_ty, @@ -21182,59 +21414,6 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return block.addTyOp(.trunc, dest_ty, operand); } -fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; - const inst_data = sema.code.instructions.items(.data)[inst].pl_node; - const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const align_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; - const dest_align = try sema.resolveAlign(block, align_src, extra.lhs); - const ptr = try sema.resolveInst(extra.rhs); - const ptr_ty = sema.typeOf(ptr); - - try sema.checkPtrOperand(block, ptr_src, ptr_ty); - - var ptr_info = ptr_ty.ptrInfo(mod); - ptr_info.flags.alignment = dest_align; - var dest_ty = try mod.ptrType(ptr_info); - if (ptr_ty.zigTypeTag(mod) == .Optional) { - dest_ty = try mod.optionalType(dest_ty.toIntern()); - } - - if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |val| { - if (try val.getUnsignedIntAdvanced(mod, null)) |addr| { - const dest_align_bytes = dest_align.toByteUnitsOptional().?; - if (addr % dest_align_bytes != 0) { - return sema.fail(block, ptr_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, dest_align_bytes }); - } - } - return sema.addConstant(try mod.getCoerced(val, dest_ty)); - } - - try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src); - if (block.wantSafety() and dest_align.order(Alignment.fromNonzeroByteUnits(1)).compare(.gt) and - try sema.typeHasRuntimeBits(ptr_info.child.toType())) - { - const align_minus_1 = try sema.addConstant( - try mod.intValue(Type.usize, dest_align.toByteUnitsOptional().? - 1), - ); - const actual_ptr = if (ptr_ty.isSlice(mod)) - try sema.analyzeSlicePtr(block, ptr_src, ptr, ptr_ty) - else - ptr; - const ptr_int = try block.addUnOp(.int_from_ptr, actual_ptr); - const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1); - const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); - const ok = if (ptr_ty.isSlice(mod)) ok: { - const len = try sema.analyzeSliceLen(block, ptr_src, ptr); - const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize); - break :ok try block.addBinOp(.bit_or, len_zero, is_aligned); - } else is_aligned; - try sema.addSafetyCheck(block, ok, .incorrect_alignment); - } - return sema.bitCast(block, dest_ty, ptr, ptr_src, null); -} - fn zirBitCount( sema: *Sema, block: *Block, @@ -21546,7 +21725,7 @@ fn checkPtrOperand( }; return sema.failWithOwnedErrorMsg(msg); }, - .Optional => if (ty.isPtrLikeOptional(mod)) return, + .Optional => if (ty.childType(mod).zigTypeTag(mod) == .Pointer) return, else => {}, } return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)}); @@ -21577,7 +21756,7 @@ fn checkPtrType( }; return sema.failWithOwnedErrorMsg(msg); }, - .Optional => if (ty.isPtrLikeOptional(mod)) return, + .Optional => if (ty.childType(mod).zigTypeTag(mod) == .Pointer) return, else => {}, } return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)}); diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 159297a4e8..1e8ab0fd87 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -241,11 +241,6 @@ pub fn print( return; } try writer.writeAll("@enumFromInt("); - try print(.{ - .ty = Type.type, - .val = enum_tag.ty.toValue(), - }, writer, level - 1, mod); - try writer.writeAll(", "); try print(.{ .ty = ip.typeOf(enum_tag.int).toType(), .val = enum_tag.int.toValue(), diff --git a/src/Zir.zig b/src/Zir.zig index 301f50958a..45ee755d6b 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -230,6 +230,9 @@ pub const Inst = struct { /// Given an indexable type, returns the type of the element at given index. /// Uses the `bin` union field. lhs is the indexable type, rhs is the index. elem_type_index, + /// Given a pointer type, returns its element type. + /// Uses the `un_node` field. + elem_type, /// Given a pointer to an indexable object, returns the len property. This is /// used by for loops. This instruction also emits a for-loop specific compile /// error if the indexable object is not indexable. @@ -838,13 +841,12 @@ pub const Inst = struct { int_cast, /// Implements the `@ptrCast` builtin. /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. + /// Not every `@ptrCast` will correspond to this instruction - see also + /// `ptr_cast_full` in `Extended`. ptr_cast, /// Implements the `@truncate` builtin. /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. truncate, - /// Implements the `@alignCast` builtin. - /// Uses `pl_node` with payload `Bin`. `lhs` is dest alignment, `rhs` is operand. - align_cast, /// Implements the `@hasDecl` builtin. /// Uses the `pl_node` union field. Payload is `Bin`. @@ -1005,6 +1007,7 @@ pub const Inst = struct { .array_type_sentinel, .vector_type, .elem_type_index, + .elem_type, .indexable_ptr_len, .anyframe_type, .as, @@ -1172,7 +1175,6 @@ pub const Inst = struct { .int_cast, .ptr_cast, .truncate, - .align_cast, .has_field, .clz, .ctz, @@ -1309,6 +1311,7 @@ pub const Inst = struct { .array_type_sentinel, .vector_type, .elem_type_index, + .elem_type, .indexable_ptr_len, .anyframe_type, .as, @@ -1454,7 +1457,6 @@ pub const Inst = struct { .int_cast, .ptr_cast, .truncate, - .align_cast, .has_field, .clz, .ctz, @@ -1539,6 +1541,7 @@ pub const Inst = struct { .array_type_sentinel = .pl_node, .vector_type = .pl_node, .elem_type_index = .bin, + .elem_type = .un_node, .indexable_ptr_len = .un_node, .anyframe_type = .un_node, .as = .bin, @@ -1717,7 +1720,6 @@ pub const Inst = struct { .int_cast = .pl_node, .ptr_cast = .pl_node, .truncate = .pl_node, - .align_cast = .pl_node, .typeof_builtin = .pl_node, .has_decl = .pl_node, @@ -1948,9 +1950,6 @@ pub const Inst = struct { /// `small` 0=>weak 1=>strong /// `operand` is payload index to `Cmpxchg`. cmpxchg, - /// Implement the builtin `@addrSpaceCast` - /// `operand` is payload index to `BinNode`. `lhs` is dest type, `rhs` is operand. - addrspace_cast, /// Implement builtin `@cVaArg`. /// `operand` is payload index to `BinNode`. c_va_arg, @@ -1963,12 +1962,21 @@ pub const Inst = struct { /// Implement builtin `@cVaStart`. /// `operand` is `src_node: i32`. c_va_start, - /// Implements the `@constCast` builtin. + /// Implements the following builtins: + /// `@ptrCast`, `@alignCast`, `@addrSpaceCast`, `@constCast`, `@volatileCast`. + /// Represents an arbitrary nesting of the above builtins. Such a nesting is treated as a + /// single operation which can modify multiple components of a pointer type. + /// `operand` is payload index to `BinNode`. + /// `small` contains `FullPtrCastFlags`. + /// AST node is the root of the nested casts. + /// `lhs` is dest type, `rhs` is operand. + ptr_cast_full, /// `operand` is payload index to `UnNode`. - const_cast, - /// Implements the `@volatileCast` builtin. - /// `operand` is payload index to `UnNode`. - volatile_cast, + /// `small` contains `FullPtrCastFlags`. + /// Guaranteed to only have flags where no explicit destination type is + /// required (const_cast and volatile_cast). + /// AST node is the root of the nested casts. + ptr_cast_no_dest, /// Implements the `@workItemId` builtin. /// `operand` is payload index to `UnNode`. work_item_id, @@ -2806,6 +2814,14 @@ pub const Inst = struct { dbg_var, }; + pub const FullPtrCastFlags = packed struct(u5) { + ptr_cast: bool = false, + align_cast: bool = false, + addrspace_cast: bool = false, + const_cast: bool = false, + volatile_cast: bool = false, + }; + /// Trailing: /// 0. src_node: i32, // if has_src_node /// 1. tag_type: Ref, // if has_tag_type diff --git a/src/print_zir.zig b/src/print_zir.zig index 0291578189..472461cd04 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -154,6 +154,7 @@ const Writer = struct { .alloc, .alloc_mut, .alloc_comptime_mut, + .elem_type, .indexable_ptr_len, .anyframe_type, .bit_not, @@ -329,7 +330,6 @@ const Writer = struct { .int_cast, .ptr_cast, .truncate, - .align_cast, .div_exact, .div_floor, .div_trunc, @@ -507,8 +507,6 @@ const Writer = struct { .reify, .c_va_copy, .c_va_end, - .const_cast, - .volatile_cast, .work_item_id, .work_group_size, .work_group_id, @@ -525,7 +523,6 @@ const Writer = struct { .err_set_cast, .wasm_memory_grow, .prefetch, - .addrspace_cast, .c_va_arg, => { const inst_data = self.code.extraData(Zir.Inst.BinNode, extended.operand).data; @@ -539,6 +536,8 @@ const Writer = struct { .builtin_async_call => try self.writeBuiltinAsyncCall(stream, extended), .cmpxchg => try self.writeCmpxchg(stream, extended), + .ptr_cast_full => try self.writePtrCastFull(stream, extended), + .ptr_cast_no_dest => try self.writePtrCastNoDest(stream, extended), } } @@ -964,6 +963,33 @@ const Writer = struct { try self.writeSrc(stream, src); } + fn writePtrCastFull(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { + const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small)); + const extra = self.code.extraData(Zir.Inst.BinNode, extended.operand).data; + const src = LazySrcLoc.nodeOffset(extra.node); + if (flags.ptr_cast) try stream.writeAll("ptr_cast, "); + if (flags.align_cast) try stream.writeAll("align_cast, "); + if (flags.addrspace_cast) try stream.writeAll("addrspace_cast, "); + if (flags.const_cast) try stream.writeAll("const_cast, "); + if (flags.volatile_cast) try stream.writeAll("volatile_cast, "); + try self.writeInstRef(stream, extra.lhs); + try stream.writeAll(", "); + try self.writeInstRef(stream, extra.rhs); + try stream.writeAll(")) "); + try self.writeSrc(stream, src); + } + + fn writePtrCastNoDest(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { + const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small)); + const extra = self.code.extraData(Zir.Inst.UnNode, extended.operand).data; + const src = LazySrcLoc.nodeOffset(extra.node); + if (flags.const_cast) try stream.writeAll("const_cast, "); + if (flags.volatile_cast) try stream.writeAll("volatile_cast, "); + try self.writeInstRef(stream, extra.operand); + try stream.writeAll(")) "); + try self.writeSrc(stream, src); + } + fn writeAtomicLoad(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { const inst_data = self.code.instructions.items(.data)[inst].pl_node; const extra = self.code.extraData(Zir.Inst.AtomicLoad, inst_data.payload_index).data; From 283d6509730a0ca6fae0ed07a1814f5a9237f282 Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 20 Jun 2023 14:19:14 +0100 Subject: [PATCH 2/7] fmt: add rewrite for cast builtin type parameters --- lib/std/zig/render.zig | 58 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 2 deletions(-) diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index 0c93230d46..72f54b3f4f 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -1390,14 +1390,51 @@ fn renderBuiltinCall( ) Error!void { const token_tags = tree.tokens.items(.tag); - // TODO remove before release of 0.11.0 + // TODO remove before release of 0.12.0 const slice = tree.tokenSlice(builtin_token); + const rewrite_two_param_cast = params.len == 2 and for ([_][]const u8{ + "@bitCast", + "@errSetCast", + "@floatCast", + "@intCast", + "@ptrCast", + "@intFromFloat", + "@floatToInt", + "@enumFromInt", + "@intToEnum", + "@floatFromInt", + "@intToFloat", + "@ptrFromInt", + "@intToPtr", + "@truncate", + }) |name| { + if (mem.eql(u8, slice, name)) break true; + } else false; + + if (rewrite_two_param_cast) { + const after_last_param_token = tree.lastToken(params[1]) + 1; + if (token_tags[after_last_param_token] != .comma) { + // Render all on one line, no trailing comma. + try ais.writer().writeAll("@as"); + try renderToken(ais, tree, builtin_token + 1, .none); // ( + try renderExpression(gpa, ais, tree, params[0], .comma_space); + } else { + // Render one param per line. + try ais.writer().writeAll("@as"); + ais.pushIndent(); + try renderToken(ais, tree, builtin_token + 1, .newline); // ( + try renderExpression(gpa, ais, tree, params[0], .comma); + } + } + // Corresponding logic below builtin name rewrite below + + // TODO remove before release of 0.11.0 if (mem.eql(u8, slice, "@maximum")) { try ais.writer().writeAll("@max"); } else if (mem.eql(u8, slice, "@minimum")) { try ais.writer().writeAll("@min"); } - // + // TODO remove before release of 0.12.0 else if (mem.eql(u8, slice, "@boolToInt")) { try ais.writer().writeAll("@intFromBool"); } else if (mem.eql(u8, slice, "@enumToInt")) { @@ -1420,6 +1457,23 @@ fn renderBuiltinCall( try renderToken(ais, tree, builtin_token, .none); // @name } + if (rewrite_two_param_cast) { + // Matches with corresponding logic above builtin name rewrite + const after_last_param_token = tree.lastToken(params[1]) + 1; + try ais.writer().writeAll("("); + try renderExpression(gpa, ais, tree, params[1], .none); + try ais.writer().writeAll(")"); + if (token_tags[after_last_param_token] != .comma) { + // Render all on one line, no trailing comma. + return renderToken(ais, tree, after_last_param_token, space); // ) + } else { + // Render one param per line. + ais.popIndent(); + try renderToken(ais, tree, after_last_param_token, .newline); // , + return renderToken(ais, tree, after_last_param_token + 1, space); // ) + } + } + if (params.len == 0) { try renderToken(ais, tree, builtin_token + 1, .none); // ( return renderToken(ais, tree, builtin_token + 2, space); // ) From 447ca4e3fff021f471b748187b53f0a4744ad0bc Mon Sep 17 00:00:00 2001 From: mlugg Date: Thu, 22 Jun 2023 22:40:13 +0100 Subject: [PATCH 3/7] translate-c: update to new cast builtin syntax --- src/translate_c.zig | 146 +++++++++++++++++++++++----------------- src/translate_c/ast.zig | 88 ++++++++---------------- 2 files changed, 113 insertions(+), 121 deletions(-) diff --git a/src/translate_c.zig b/src/translate_c.zig index 8d5804c5e5..4078bd0f34 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -1010,17 +1010,23 @@ fn buildFlexibleArrayFn( const bit_offset = layout.getFieldOffset(field_index); // this is a target-specific constant based on the struct layout const byte_offset = bit_offset / 8; - const casted_self = try Tag.ptr_cast.create(c.arena, .{ + const casted_self = try Tag.as.create(c.arena, .{ .lhs = intermediate_type_ident, - .rhs = self_param, + .rhs = try Tag.ptr_cast.create(c.arena, self_param), }); const field_offset = try transCreateNodeNumber(c, byte_offset, .int); const field_ptr = try Tag.add.create(c.arena, .{ .lhs = casted_self, .rhs = field_offset }); - const alignment = try Tag.alignof.create(c.arena, element_type); - - const ptr_val = try Tag.align_cast.create(c.arena, .{ .lhs = alignment, .rhs = field_ptr }); - const ptr_cast = try Tag.ptr_cast.create(c.arena, .{ .lhs = return_type_ident, .rhs = ptr_val }); + const ptr_cast = try Tag.as.create(c.arena, .{ + .lhs = return_type_ident, + .rhs = try Tag.ptr_cast.create( + c.arena, + try Tag.align_cast.create( + c.arena, + field_ptr, + ), + ), + }); const return_stmt = try Tag.@"return".create(c.arena, ptr_cast); try block_scope.statements.append(return_stmt); @@ -1579,14 +1585,14 @@ fn transOffsetOfExpr( /// pointer arithmetic expressions, where wraparound will ensure we get the correct value. /// node -> @bitCast(usize, @intCast(isize, node)) fn usizeCastForWrappingPtrArithmetic(gpa: mem.Allocator, node: Node) TransError!Node { - const intcast_node = try Tag.int_cast.create(gpa, .{ + const intcast_node = try Tag.as.create(gpa, .{ .lhs = try Tag.type.create(gpa, "isize"), - .rhs = node, + .rhs = try Tag.int_cast.create(gpa, node), }); - return Tag.bit_cast.create(gpa, .{ + return Tag.as.create(gpa, .{ .lhs = try Tag.type.create(gpa, "usize"), - .rhs = intcast_node, + .rhs = try Tag.bit_cast.create(gpa, intcast_node), }); } @@ -1781,7 +1787,10 @@ fn transBinaryOperator( const elem_type = c_pointer.castTag(.c_pointer).?.data.elem_type; const sizeof = try Tag.sizeof.create(c.arena, elem_type); - const bitcast = try Tag.bit_cast.create(c.arena, .{ .lhs = ptrdiff_type, .rhs = infixOpNode }); + const bitcast = try Tag.as.create(c.arena, .{ + .lhs = ptrdiff_type, + .rhs = try Tag.bit_cast.create(c.arena, infixOpNode), + }); return Tag.div_exact.create(c.arena, .{ .lhs = bitcast, @@ -2310,7 +2319,7 @@ fn transIntegerLiteral( // unsigned char y = 256; // How this gets evaluated is the 256 is an integer, which gets truncated to signed char, then bit-casted // to unsigned char, resulting in 0. In order for this to work, we have to emit this zig code: - // var y = @bitCast(u8, @truncate(i8, @as(c_int, 256))); + // var y = @as(u8, @bitCast(@as(i8, @truncate(@as(c_int, 256))))); // Ideally in translate-c we could flatten this out to simply: // var y: u8 = 0; // But the first step is to be correct, and the next step is to make the output more elegant. @@ -2501,7 +2510,10 @@ fn transCCast( .lt => { // @truncate(SameSignSmallerInt, src_int_expr) const ty_node = try transQualTypeIntWidthOf(c, dst_type, src_type_is_signed); - src_int_expr = try Tag.truncate.create(c.arena, .{ .lhs = ty_node, .rhs = src_int_expr }); + src_int_expr = try Tag.as.create(c.arena, .{ + .lhs = ty_node, + .rhs = try Tag.truncate.create(c.arena, src_int_expr), + }); }, .gt => { // @as(SameSignBiggerInt, src_int_expr) @@ -2512,36 +2524,57 @@ fn transCCast( // src_int_expr = src_int_expr }, } - // @bitCast(dest_type, intermediate_value) - return Tag.bit_cast.create(c.arena, .{ .lhs = dst_node, .rhs = src_int_expr }); + // @as(dest_type, @bitCast(intermediate_value)) + return Tag.as.create(c.arena, .{ + .lhs = dst_node, + .rhs = try Tag.bit_cast.create(c.arena, src_int_expr), + }); } if (cIsVector(src_type) or cIsVector(dst_type)) { // C cast where at least 1 operand is a vector requires them to be same size - // @bitCast(dest_type, val) - return Tag.bit_cast.create(c.arena, .{ .lhs = dst_node, .rhs = expr }); + // @as(dest_type, @bitCast(val)) + return Tag.as.create(c.arena, .{ + .lhs = dst_node, + .rhs = try Tag.bit_cast.create(c.arena, expr), + }); } if (cIsInteger(dst_type) and qualTypeIsPtr(src_type)) { // @intCast(dest_type, @intFromPtr(val)) const int_from_ptr = try Tag.int_from_ptr.create(c.arena, expr); - return Tag.int_cast.create(c.arena, .{ .lhs = dst_node, .rhs = int_from_ptr }); + return Tag.as.create(c.arena, .{ + .lhs = dst_node, + .rhs = try Tag.int_cast.create(c.arena, int_from_ptr), + }); } if (cIsInteger(src_type) and qualTypeIsPtr(dst_type)) { - // @ptrFromInt(dest_type, val) - return Tag.ptr_from_int.create(c.arena, .{ .lhs = dst_node, .rhs = expr }); + // @as(dest_type, @ptrFromInt(val)) + return Tag.as.create(c.arena, .{ + .lhs = dst_node, + .rhs = try Tag.ptr_from_int.create(c.arena, expr), + }); } if (cIsFloating(src_type) and cIsFloating(dst_type)) { - // @floatCast(dest_type, val) - return Tag.float_cast.create(c.arena, .{ .lhs = dst_node, .rhs = expr }); + // @as(dest_type, @floatCast(val)) + return Tag.as.create(c.arena, .{ + .lhs = dst_node, + .rhs = try Tag.float_cast.create(c.arena, expr), + }); } if (cIsFloating(src_type) and !cIsFloating(dst_type)) { - // @intFromFloat(dest_type, val) - return Tag.int_from_float.create(c.arena, .{ .lhs = dst_node, .rhs = expr }); + // @as(dest_type, @intFromFloat(val)) + return Tag.as.create(c.arena, .{ + .lhs = dst_node, + .rhs = try Tag.int_from_float.create(c.arena, expr), + }); } if (!cIsFloating(src_type) and cIsFloating(dst_type)) { var rhs = expr; if (qualTypeIsBoolean(src_type)) rhs = try Tag.int_from_bool.create(c.arena, expr); - // @floatFromInt(dest_type, val) - return Tag.float_from_int.create(c.arena, .{ .lhs = dst_node, .rhs = rhs }); + // @as(dest_type, @floatFromInt(val)) + return Tag.as.create(c.arena, .{ + .lhs = dst_node, + .rhs = try Tag.float_from_int.create(c.arena, rhs), + }); } if (qualTypeIsBoolean(src_type) and !qualTypeIsBoolean(dst_type)) { // @intFromBool returns a u1 @@ -3487,9 +3520,9 @@ fn transSignedArrayAccess( const then_value = try Tag.add.create(c.arena, .{ .lhs = container_node, - .rhs = try Tag.int_cast.create(c.arena, .{ + .rhs = try Tag.as.create(c.arena, .{ .lhs = try Tag.type.create(c.arena, "usize"), - .rhs = tmp_ref, + .rhs = try Tag.int_cast.create(c.arena, tmp_ref), }), }); @@ -3499,17 +3532,17 @@ fn transSignedArrayAccess( }); const minuend = container_node; - const signed_size = try Tag.int_cast.create(c.arena, .{ + const signed_size = try Tag.as.create(c.arena, .{ .lhs = try Tag.type.create(c.arena, "isize"), - .rhs = tmp_ref, + .rhs = try Tag.int_cast.create(c.arena, tmp_ref), }); const to_cast = try Tag.add_wrap.create(c.arena, .{ .lhs = signed_size, .rhs = try Tag.negate.create(c.arena, Tag.one_literal.init()), }); - const bitcast_node = try Tag.bit_cast.create(c.arena, .{ + const bitcast_node = try Tag.as.create(c.arena, .{ .lhs = try Tag.type.create(c.arena, "usize"), - .rhs = to_cast, + .rhs = try Tag.bit_cast.create(c.arena, to_cast), }); const subtrahend = try Tag.bit_not.create(c.arena, bitcast_node); const difference = try Tag.sub.create(c.arena, .{ @@ -3566,7 +3599,13 @@ fn transArrayAccess(c: *Context, scope: *Scope, stmt: *const clang.ArraySubscrip const rhs = if (is_longlong or is_signed) blk: { // check if long long first so that signed long long doesn't just become unsigned long long const typeid_node = if (is_longlong) try Tag.type.create(c.arena, "usize") else try transQualTypeIntWidthOf(c, subscr_qt, false); - break :blk try Tag.int_cast.create(c.arena, .{ .lhs = typeid_node, .rhs = try transExpr(c, scope, subscr_expr, .used) }); + break :blk try Tag.as.create(c.arena, .{ + .lhs = typeid_node, + .rhs = try Tag.int_cast.create( + c.arena, + try transExpr(c, scope, subscr_expr, .used), + ), + }); } else try transExpr(c, scope, subscr_expr, .used); const node = try Tag.array_access.create(c.arena, .{ @@ -3968,8 +4007,7 @@ fn transCreateCompoundAssign( } if (is_shift) { - const cast_to_type = try qualTypeToLog2IntRef(c, scope, rhs_qt, loc); - rhs_node = try Tag.int_cast.create(c.arena, .{ .lhs = cast_to_type, .rhs = rhs_node }); + rhs_node = try Tag.int_cast.create(c.arena, rhs_node); } else if (requires_int_cast) { rhs_node = try transCCast(c, scope, loc, lhs_qt, rhs_qt, rhs_node); } @@ -4008,8 +4046,7 @@ fn transCreateCompoundAssign( try block_scope.statements.append(assign); } else { if (is_shift) { - const cast_to_type = try qualTypeToLog2IntRef(c, &block_scope.base, rhs_qt, loc); - rhs_node = try Tag.int_cast.create(c.arena, .{ .lhs = cast_to_type, .rhs = rhs_node }); + rhs_node = try Tag.int_cast.create(c.arena, rhs_node); } else if (requires_int_cast) { rhs_node = try transCCast(c, &block_scope.base, loc, lhs_qt, rhs_qt, rhs_node); } @@ -4029,7 +4066,10 @@ fn transCreateCompoundAssign( // Casting away const or volatile requires us to use @ptrFromInt fn removeCVQualifiers(c: *Context, dst_type_node: Node, expr: Node) Error!Node { const int_from_ptr = try Tag.int_from_ptr.create(c.arena, expr); - return Tag.ptr_from_int.create(c.arena, .{ .lhs = dst_type_node, .rhs = int_from_ptr }); + return Tag.as.create(c.arena, .{ + .lhs = dst_type_node, + .rhs = try Tag.ptr_from_int.create(c.arena, int_from_ptr), + }); } fn transCPtrCast( @@ -4062,11 +4102,12 @@ fn transCPtrCast( // For opaque types a ptrCast is enough expr else blk: { - const alignof = try Tag.std_meta_alignment.create(c.arena, dst_type_node); - const align_cast = try Tag.align_cast.create(c.arena, .{ .lhs = alignof, .rhs = expr }); - break :blk align_cast; + break :blk try Tag.align_cast.create(c.arena, expr); }; - return Tag.ptr_cast.create(c.arena, .{ .lhs = dst_type_node, .rhs = rhs }); + return Tag.as.create(c.arena, .{ + .lhs = dst_type_node, + .rhs = try Tag.ptr_cast.create(c.arena, rhs), + }); } } @@ -4337,19 +4378,6 @@ fn qualTypeIntBitWidth(c: *Context, qt: clang.QualType) !u32 { } } -fn qualTypeToLog2IntRef(c: *Context, scope: *Scope, qt: clang.QualType, source_loc: clang.SourceLocation) !Node { - const int_bit_width = try qualTypeIntBitWidth(c, qt); - - if (int_bit_width != 0) { - // we can perform the log2 now. - const cast_bit_width = math.log2_int(u64, int_bit_width); - return Tag.log2_int_type.create(c.arena, cast_bit_width); - } - - const zig_type = try transQualType(c, scope, qt, source_loc); - return Tag.std_math_Log2Int.create(c.arena, zig_type); -} - fn qualTypeChildIsFnProto(qt: clang.QualType) bool { const ty = qualTypeCanon(qt); @@ -4731,14 +4759,12 @@ fn transCreateNodeShiftOp( const lhs_expr = stmt.getLHS(); const rhs_expr = stmt.getRHS(); - const rhs_location = rhs_expr.getBeginLoc(); // lhs >> @as(u5, rh) const lhs = try transExpr(c, scope, lhs_expr, .used); - const rhs_type = try qualTypeToLog2IntRef(c, scope, stmt.getType(), rhs_location); const rhs = try transExprCoercing(c, scope, rhs_expr, .used); - const rhs_casted = try Tag.int_cast.create(c.arena, .{ .lhs = rhs_type, .rhs = rhs }); + const rhs_casted = try Tag.int_cast.create(c.arena, rhs); return transCreateNodeInfixOp(c, op, lhs, rhs_casted, used); } @@ -6513,9 +6539,9 @@ fn parseCPostfixExpr(c: *Context, m: *MacroCtx, scope: *Scope, type_name: ?Node) }, .LBracket => { const index_val = try macroIntFromBool(c, try parseCExpr(c, m, scope)); - const index = try Tag.int_cast.create(c.arena, .{ + const index = try Tag.as.create(c.arena, .{ .lhs = try Tag.type.create(c.arena, "usize"), - .rhs = index_val, + .rhs = try Tag.int_cast.create(c.arena, index_val), }); node = try Tag.array_access.create(c.arena, .{ .lhs = node, .rhs = index }); try m.skip(c, .RBracket); diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig index c8ccfa497f..a24bff0176 100644 --- a/src/translate_c/ast.zig +++ b/src/translate_c/ast.zig @@ -115,15 +115,10 @@ pub const Node = extern union { /// @import("std").zig.c_builtins. import_c_builtin, - log2_int_type, - /// @import("std").math.Log2Int(operand) - std_math_Log2Int, - /// @intCast(lhs, rhs) + /// @intCast(operand) int_cast, /// @import("std").zig.c_translation.promoteIntLiteral(value, type, base) helpers_promoteIntLiteral, - /// @import("std").meta.alignment(value) - std_meta_alignment, /// @import("std").zig.c_translation.signedRemainder(lhs, rhs) signed_remainder, /// @divTrunc(lhs, rhs) @@ -132,23 +127,23 @@ pub const Node = extern union { int_from_bool, /// @as(lhs, rhs) as, - /// @truncate(lhs, rhs) + /// @truncate(operand) truncate, - /// @bitCast(lhs, rhs) + /// @bitCast(operand) bit_cast, - /// @floatCast(lhs, rhs) + /// @floatCast(operand) float_cast, - /// @intFromFloat(lhs, rhs) + /// @intFromFloat(operand) int_from_float, - /// @floatFromInt(lhs, rhs) + /// @floatFromInt(operand) float_from_int, - /// @ptrFromInt(lhs, rhs) + /// @ptrFromInt(operand) ptr_from_int, /// @intFromPtr(operand) int_from_ptr, - /// @alignCast(lhs, rhs) + /// @alignCast(operand) align_cast, - /// @ptrCast(lhs, rhs) + /// @ptrCast(operand) ptr_cast, /// @divExact(lhs, rhs) div_exact, @@ -254,7 +249,6 @@ pub const Node = extern union { .@"comptime", .@"defer", .asm_simple, - .std_math_Log2Int, .negate, .negate_wrap, .bit_not, @@ -270,12 +264,20 @@ pub const Node = extern union { .switch_else, .block_single, .helpers_sizeof, - .std_meta_alignment, .int_from_bool, .sizeof, .alignof, .typeof, .typeinfo, + .align_cast, + .truncate, + .bit_cast, + .float_cast, + .int_from_float, + .float_from_int, + .ptr_from_int, + .ptr_cast, + .int_cast, => Payload.UnOp, .add, @@ -314,24 +316,15 @@ pub const Node = extern union { .bit_xor_assign, .div_trunc, .signed_remainder, - .int_cast, .as, - .truncate, - .bit_cast, - .float_cast, - .int_from_float, - .float_from_int, - .ptr_from_int, .array_cat, .ellipsis3, .assign, - .align_cast, .array_access, .std_mem_zeroinit, .helpers_flexible_array_type, .helpers_shuffle_vector_index, .vector, - .ptr_cast, .div_exact, .offset_of, .helpers_cast, @@ -367,7 +360,6 @@ pub const Node = extern union { .c_pointer, .single_pointer => Payload.Pointer, .array_type, .null_sentinel_array_type => Payload.Array, .arg_redecl, .alias, .fail_decl => Payload.ArgRedecl, - .log2_int_type => Payload.Log2IntType, .var_simple, .pub_var_simple, .static_local_var, .mut_str => Payload.SimpleVarDecl, .enum_constant => Payload.EnumConstant, .array_filler => Payload.ArrayFiller, @@ -644,11 +636,6 @@ pub const Payload = struct { }, }; - pub const Log2IntType = struct { - base: Payload, - data: std.math.Log2Int(u64), - }; - pub const SimpleVarDecl = struct { base: Payload, data: struct { @@ -885,11 +872,6 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { try c.buf.append('\n'); return @as(NodeIndex, 0); // error: integer value 0 cannot be coerced to type 'std.mem.Allocator.Error!u32' }, - .std_math_Log2Int => { - const payload = node.castTag(.std_math_Log2Int).?.data; - const import_node = try renderStdImport(c, &.{ "math", "Log2Int" }); - return renderCall(c, import_node, &.{payload}); - }, .helpers_cast => { const payload = node.castTag(.helpers_cast).?.data; const import_node = try renderStdImport(c, &.{ "zig", "c_translation", "cast" }); @@ -900,11 +882,6 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { const import_node = try renderStdImport(c, &.{ "zig", "c_translation", "promoteIntLiteral" }); return renderCall(c, import_node, &.{ payload.type, payload.value, payload.base }); }, - .std_meta_alignment => { - const payload = node.castTag(.std_meta_alignment).?.data; - const import_node = try renderStdImport(c, &.{ "meta", "alignment" }); - return renderCall(c, import_node, &.{payload}); - }, .helpers_sizeof => { const payload = node.castTag(.helpers_sizeof).?.data; const import_node = try renderStdImport(c, &.{ "zig", "c_translation", "sizeof" }); @@ -1081,14 +1058,6 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { .data = undefined, }); }, - .log2_int_type => { - const payload = node.castTag(.log2_int_type).?.data; - return c.addNode(.{ - .tag = .identifier, - .main_token = try c.addTokenFmt(.identifier, "u{d}", .{payload}), - .data = undefined, - }); - }, .identifier => { const payload = node.castTag(.identifier).?.data; return c.addNode(.{ @@ -1344,7 +1313,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { }, .int_cast => { const payload = node.castTag(.int_cast).?.data; - return renderBuiltinCall(c, "@intCast", &.{ payload.lhs, payload.rhs }); + return renderBuiltinCall(c, "@intCast", &.{payload}); }, .signed_remainder => { const payload = node.castTag(.signed_remainder).?.data; @@ -1365,27 +1334,27 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { }, .truncate => { const payload = node.castTag(.truncate).?.data; - return renderBuiltinCall(c, "@truncate", &.{ payload.lhs, payload.rhs }); + return renderBuiltinCall(c, "@truncate", &.{payload}); }, .bit_cast => { const payload = node.castTag(.bit_cast).?.data; - return renderBuiltinCall(c, "@bitCast", &.{ payload.lhs, payload.rhs }); + return renderBuiltinCall(c, "@bitCast", &.{payload}); }, .float_cast => { const payload = node.castTag(.float_cast).?.data; - return renderBuiltinCall(c, "@floatCast", &.{ payload.lhs, payload.rhs }); + return renderBuiltinCall(c, "@floatCast", &.{payload}); }, .int_from_float => { const payload = node.castTag(.int_from_float).?.data; - return renderBuiltinCall(c, "@intFromFloat", &.{ payload.lhs, payload.rhs }); + return renderBuiltinCall(c, "@intFromFloat", &.{payload}); }, .float_from_int => { const payload = node.castTag(.float_from_int).?.data; - return renderBuiltinCall(c, "@floatFromInt", &.{ payload.lhs, payload.rhs }); + return renderBuiltinCall(c, "@floatFromInt", &.{payload}); }, .ptr_from_int => { const payload = node.castTag(.ptr_from_int).?.data; - return renderBuiltinCall(c, "@ptrFromInt", &.{ payload.lhs, payload.rhs }); + return renderBuiltinCall(c, "@ptrFromInt", &.{payload}); }, .int_from_ptr => { const payload = node.castTag(.int_from_ptr).?.data; @@ -1393,11 +1362,11 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { }, .align_cast => { const payload = node.castTag(.align_cast).?.data; - return renderBuiltinCall(c, "@alignCast", &.{ payload.lhs, payload.rhs }); + return renderBuiltinCall(c, "@alignCast", &.{payload}); }, .ptr_cast => { const payload = node.castTag(.ptr_cast).?.data; - return renderBuiltinCall(c, "@ptrCast", &.{ payload.lhs, payload.rhs }); + return renderBuiltinCall(c, "@ptrCast", &.{payload}); }, .div_exact => { const payload = node.castTag(.div_exact).?.data; @@ -2330,14 +2299,11 @@ fn renderNodeGrouped(c: *Context, node: Node) !NodeIndex { .float_from_int, .ptr_from_int, .std_mem_zeroes, - .std_math_Log2Int, - .log2_int_type, .int_from_ptr, .sizeof, .alignof, .typeof, .typeinfo, - .std_meta_alignment, .vector, .helpers_sizeof, .helpers_cast, From f26dda21171e26f44aeec8c59a75bbb3331eeb2e Mon Sep 17 00:00:00 2001 From: mlugg Date: Thu, 22 Jun 2023 18:46:56 +0100 Subject: [PATCH 4/7] all: migrate code to new cast builtin syntax Most of this migration was performed automatically with `zig fmt`. There were a few exceptions which I had to manually fix: * `@alignCast` and `@addrSpaceCast` cannot be automatically rewritten * `@truncate`'s fixup is incorrect for vectors * Test cases are not formatted, and their error locations change --- lib/compiler_rt/addf3.zig | 44 +- lib/compiler_rt/addf3_test.zig | 46 +- lib/compiler_rt/arm.zig | 2 +- lib/compiler_rt/atomics.zig | 6 +- lib/compiler_rt/aulldiv.zig | 4 +- lib/compiler_rt/aullrem.zig | 4 +- lib/compiler_rt/ceil.zig | 16 +- lib/compiler_rt/clear_cache.zig | 4 +- lib/compiler_rt/clzdi2_test.zig | 2 +- lib/compiler_rt/clzsi2_test.zig | 4 +- lib/compiler_rt/clzti2_test.zig | 2 +- lib/compiler_rt/cmptf2.zig | 12 +- lib/compiler_rt/common.zig | 26 +- lib/compiler_rt/comparef.zig | 18 +- lib/compiler_rt/cos.zig | 10 +- lib/compiler_rt/count0bits.zig | 24 +- lib/compiler_rt/ctzdi2_test.zig | 2 +- lib/compiler_rt/ctzsi2_test.zig | 2 +- lib/compiler_rt/ctzti2_test.zig | 2 +- lib/compiler_rt/divdf3.zig | 64 +- lib/compiler_rt/divdf3_test.zig | 2 +- lib/compiler_rt/divhf3.zig | 2 +- lib/compiler_rt/divsf3.zig | 58 +- lib/compiler_rt/divsf3_test.zig | 2 +- lib/compiler_rt/divtf3.zig | 72 +-- lib/compiler_rt/divtf3_test.zig | 6 +- lib/compiler_rt/divti3.zig | 6 +- lib/compiler_rt/divti3_test.zig | 8 +- lib/compiler_rt/divxf3.zig | 76 +-- lib/compiler_rt/divxf3_test.zig | 8 +- lib/compiler_rt/emutls.zig | 57 +- lib/compiler_rt/exp.zig | 22 +- lib/compiler_rt/exp2.zig | 38 +- lib/compiler_rt/extenddftf2.zig | 4 +- lib/compiler_rt/extenddfxf2.zig | 2 +- lib/compiler_rt/extendf.zig | 14 +- lib/compiler_rt/extendf_test.zig | 42 +- lib/compiler_rt/extendhfdf2.zig | 2 +- lib/compiler_rt/extendhfsf2.zig | 6 +- lib/compiler_rt/extendhftf2.zig | 2 +- lib/compiler_rt/extendhfxf2.zig | 2 +- lib/compiler_rt/extendsfdf2.zig | 4 +- lib/compiler_rt/extendsftf2.zig | 4 +- lib/compiler_rt/extendsfxf2.zig | 2 +- lib/compiler_rt/extendxftf2.zig | 4 +- lib/compiler_rt/fabs.zig | 4 +- lib/compiler_rt/ffsdi2_test.zig | 2 +- lib/compiler_rt/ffssi2_test.zig | 2 +- lib/compiler_rt/ffsti2_test.zig | 2 +- lib/compiler_rt/fixdfti.zig | 2 +- lib/compiler_rt/fixhfti.zig | 2 +- lib/compiler_rt/fixsfti.zig | 2 +- lib/compiler_rt/fixtfti.zig | 2 +- lib/compiler_rt/fixunsdfti.zig | 2 +- lib/compiler_rt/fixunshfti.zig | 2 +- lib/compiler_rt/fixunssfti.zig | 2 +- lib/compiler_rt/fixunstfti.zig | 2 +- lib/compiler_rt/fixunsxfti.zig | 2 +- lib/compiler_rt/fixxfti.zig | 2 +- lib/compiler_rt/float_from_int.zig | 12 +- lib/compiler_rt/float_from_int_test.zig | 94 +-- lib/compiler_rt/floattidf.zig | 2 +- lib/compiler_rt/floattihf.zig | 2 +- lib/compiler_rt/floattisf.zig | 2 +- lib/compiler_rt/floattitf.zig | 2 +- lib/compiler_rt/floattixf.zig | 2 +- lib/compiler_rt/floatuntidf.zig | 2 +- lib/compiler_rt/floatuntihf.zig | 2 +- lib/compiler_rt/floatuntisf.zig | 2 +- lib/compiler_rt/floatuntitf.zig | 2 +- lib/compiler_rt/floatuntixf.zig | 2 +- lib/compiler_rt/floor.zig | 22 +- lib/compiler_rt/fma.zig | 38 +- lib/compiler_rt/fmod.zig | 64 +- lib/compiler_rt/int.zig | 82 +-- lib/compiler_rt/int_from_float.zig | 12 +- lib/compiler_rt/log.zig | 28 +- lib/compiler_rt/log10.zig | 36 +- lib/compiler_rt/log2.zig | 36 +- lib/compiler_rt/modti3.zig | 6 +- lib/compiler_rt/modti3_test.zig | 2 +- lib/compiler_rt/mulXi3.zig | 8 +- lib/compiler_rt/mulXi3_test.zig | 16 +- lib/compiler_rt/mulf3.zig | 60 +- lib/compiler_rt/mulf3_test.zig | 52 +- lib/compiler_rt/mulo.zig | 2 +- lib/compiler_rt/mulodi4_test.zig | 48 +- lib/compiler_rt/mulosi4_test.zig | 52 +- lib/compiler_rt/muloti4_test.zig | 62 +- lib/compiler_rt/negv.zig | 2 +- lib/compiler_rt/parity.zig | 8 +- lib/compiler_rt/paritydi2_test.zig | 10 +- lib/compiler_rt/paritysi2_test.zig | 10 +- lib/compiler_rt/parityti2_test.zig | 10 +- lib/compiler_rt/popcount.zig | 4 +- lib/compiler_rt/popcountdi2_test.zig | 10 +- lib/compiler_rt/popcountsi2_test.zig | 10 +- lib/compiler_rt/popcountti2_test.zig | 10 +- lib/compiler_rt/powiXf2.zig | 2 +- lib/compiler_rt/powiXf2_test.zig | 248 ++++---- lib/compiler_rt/rem_pio2.zig | 26 +- lib/compiler_rt/rem_pio2_large.zig | 30 +- lib/compiler_rt/rem_pio2f.zig | 10 +- lib/compiler_rt/round.zig | 16 +- lib/compiler_rt/shift.zig | 26 +- lib/compiler_rt/shift_test.zig | 500 +++++++-------- lib/compiler_rt/sin.zig | 14 +- lib/compiler_rt/sincos.zig | 20 +- lib/compiler_rt/sqrt.zig | 32 +- lib/compiler_rt/subdf3.zig | 4 +- lib/compiler_rt/subhf3.zig | 2 +- lib/compiler_rt/subsf3.zig | 4 +- lib/compiler_rt/subtf3.zig | 2 +- lib/compiler_rt/tan.zig | 10 +- lib/compiler_rt/trig.zig | 14 +- lib/compiler_rt/trunc.zig | 28 +- lib/compiler_rt/truncdfhf2.zig | 4 +- lib/compiler_rt/truncf.zig | 40 +- lib/compiler_rt/truncf_test.zig | 42 +- lib/compiler_rt/truncsfhf2.zig | 6 +- lib/compiler_rt/trunctfhf2.zig | 2 +- lib/compiler_rt/trunctfxf2.zig | 8 +- lib/compiler_rt/truncxfhf2.zig | 2 +- lib/compiler_rt/udivmod.zig | 28 +- lib/compiler_rt/udivmodei4.zig | 14 +- lib/compiler_rt/udivmodti4.zig | 2 +- lib/compiler_rt/udivti3.zig | 2 +- lib/compiler_rt/umodti3.zig | 4 +- lib/ssp.zig | 2 +- lib/std/Build.zig | 12 +- lib/std/Build/Cache.zig | 4 +- lib/std/Build/Step.zig | 4 +- lib/std/Build/Step/CheckObject.zig | 14 +- lib/std/Build/Step/Compile.zig | 6 +- lib/std/Build/Step/Run.zig | 4 +- lib/std/Progress.zig | 4 +- lib/std/Thread.zig | 42 +- lib/std/Thread/Futex.zig | 50 +- lib/std/Thread/Mutex.zig | 6 +- lib/std/array_hash_map.zig | 46 +- lib/std/array_list.zig | 12 +- lib/std/atomic/Atomic.zig | 20 +- lib/std/atomic/queue.zig | 2 +- lib/std/atomic/stack.zig | 2 +- lib/std/base64.zig | 10 +- lib/std/bit_set.zig | 42 +- lib/std/bounded_array.zig | 2 +- lib/std/builtin.zig | 2 +- lib/std/c.zig | 2 +- lib/std/c/darwin.zig | 68 +- lib/std/c/dragonfly.zig | 20 +- lib/std/c/freebsd.zig | 22 +- lib/std/c/haiku.zig | 10 +- lib/std/c/linux.zig | 2 +- lib/std/c/netbsd.zig | 16 +- lib/std/c/openbsd.zig | 14 +- lib/std/c/solaris.zig | 28 +- lib/std/child_process.zig | 26 +- lib/std/coff.zig | 32 +- lib/std/compress/deflate/bits_utils.zig | 2 +- lib/std/compress/deflate/compressor.zig | 38 +- lib/std/compress/deflate/compressor_test.zig | 2 +- lib/std/compress/deflate/decompressor.zig | 86 +-- lib/std/compress/deflate/deflate_fast.zig | 92 +-- .../compress/deflate/deflate_fast_test.zig | 8 +- lib/std/compress/deflate/dict_decoder.zig | 20 +- .../compress/deflate/huffman_bit_writer.zig | 110 ++-- lib/std/compress/deflate/huffman_code.zig | 20 +- lib/std/compress/deflate/token.zig | 10 +- lib/std/compress/gzip.zig | 2 +- lib/std/compress/lzma/decode.zig | 10 +- lib/std/compress/lzma2/decode.zig | 6 +- lib/std/compress/xz.zig | 2 +- lib/std/compress/xz/block.zig | 6 +- lib/std/compress/zlib.zig | 6 +- lib/std/compress/zstandard/decode/block.zig | 14 +- lib/std/compress/zstandard/decode/fse.zig | 14 +- lib/std/compress/zstandard/decode/huffman.zig | 10 +- lib/std/compress/zstandard/decompress.zig | 8 +- lib/std/crypto/25519/curve25519.zig | 2 +- lib/std/crypto/25519/edwards25519.zig | 24 +- lib/std/crypto/25519/field.zig | 22 +- lib/std/crypto/25519/scalar.zig | 74 +-- lib/std/crypto/Certificate.zig | 22 +- lib/std/crypto/Certificate/Bundle.zig | 6 +- lib/std/crypto/Certificate/Bundle/macos.zig | 6 +- lib/std/crypto/aegis.zig | 2 +- lib/std/crypto/aes/soft.zig | 102 +-- lib/std/crypto/aes_ocb.zig | 8 +- lib/std/crypto/argon2.zig | 22 +- lib/std/crypto/ascon.zig | 4 +- lib/std/crypto/bcrypt.zig | 8 +- lib/std/crypto/benchmark.zig | 52 +- lib/std/crypto/blake2.zig | 18 +- lib/std/crypto/blake3.zig | 14 +- lib/std/crypto/chacha20.zig | 8 +- lib/std/crypto/ecdsa.zig | 6 +- lib/std/crypto/ff.zig | 70 +-- lib/std/crypto/ghash_polyval.zig | 62 +- lib/std/crypto/isap.zig | 2 +- lib/std/crypto/keccak_p.zig | 4 +- lib/std/crypto/kyber_d00.zig | 72 +-- lib/std/crypto/md5.zig | 6 +- lib/std/crypto/pbkdf2.zig | 2 +- lib/std/crypto/pcurves/common.zig | 6 +- lib/std/crypto/pcurves/p256.zig | 20 +- lib/std/crypto/pcurves/p256/p256_64.zig | 72 +-- .../crypto/pcurves/p256/p256_scalar_64.zig | 72 +-- lib/std/crypto/pcurves/p384.zig | 20 +- lib/std/crypto/pcurves/p384/p384_64.zig | 104 +-- .../crypto/pcurves/p384/p384_scalar_64.zig | 104 +-- lib/std/crypto/pcurves/secp256k1.zig | 32 +- .../crypto/pcurves/secp256k1/secp256k1_64.zig | 72 +-- .../pcurves/secp256k1/secp256k1_scalar_64.zig | 72 +-- lib/std/crypto/phc_encoding.zig | 2 +- lib/std/crypto/poly1305.zig | 14 +- lib/std/crypto/salsa20.zig | 4 +- lib/std/crypto/scrypt.zig | 46 +- lib/std/crypto/sha1.zig | 6 +- lib/std/crypto/sha2.zig | 20 +- lib/std/crypto/siphash.zig | 12 +- lib/std/crypto/tlcsprng.zig | 6 +- lib/std/crypto/tls.zig | 20 +- lib/std/crypto/tls/Client.zig | 56 +- lib/std/crypto/utils.zig | 16 +- lib/std/cstr.zig | 4 +- lib/std/debug.zig | 116 ++-- lib/std/dwarf.zig | 12 +- lib/std/dynamic_library.zig | 42 +- lib/std/elf.zig | 30 +- lib/std/enums.zig | 30 +- lib/std/event/lock.zig | 6 +- lib/std/event/loop.zig | 12 +- lib/std/event/rwlock.zig | 8 +- lib/std/fmt.zig | 70 +-- lib/std/fmt/errol.zig | 98 +-- lib/std/fmt/parse_float.zig | 2 +- lib/std/fmt/parse_float/common.zig | 10 +- .../fmt/parse_float/convert_eisel_lemire.zig | 16 +- lib/std/fmt/parse_float/convert_fast.zig | 10 +- lib/std/fmt/parse_float/convert_hex.zig | 2 +- lib/std/fmt/parse_float/convert_slow.zig | 12 +- lib/std/fmt/parse_float/decimal.zig | 20 +- lib/std/fmt/parse_float/parse.zig | 14 +- lib/std/fs.zig | 37 +- lib/std/fs/file.zig | 18 +- lib/std/fs/get_app_data_dir.zig | 2 +- lib/std/fs/wasi.zig | 4 +- lib/std/fs/watch.zig | 16 +- lib/std/hash/adler.zig | 2 +- lib/std/hash/auto_hash.zig | 4 +- lib/std/hash/benchmark.zig | 12 +- lib/std/hash/cityhash.zig | 26 +- lib/std/hash/crc.zig | 24 +- lib/std/hash/murmur.zig | 50 +- lib/std/hash/wyhash.zig | 6 +- lib/std/hash/xxhash.zig | 2 +- lib/std/hash_map.zig | 44 +- lib/std/heap.zig | 50 +- lib/std/heap/PageAllocator.zig | 13 +- lib/std/heap/ThreadSafeAllocator.zig | 6 +- lib/std/heap/WasmAllocator.zig | 20 +- lib/std/heap/WasmPageAllocator.zig | 12 +- lib/std/heap/arena_allocator.zig | 24 +- lib/std/heap/general_purpose_allocator.zig | 56 +- lib/std/heap/log_to_writer_allocator.zig | 6 +- lib/std/heap/logging_allocator.zig | 6 +- lib/std/heap/memory_pool.zig | 8 +- lib/std/http/Client.zig | 14 +- lib/std/http/Server.zig | 12 +- lib/std/http/protocol.zig | 48 +- lib/std/io.zig | 2 +- lib/std/io/bit_reader.zig | 22 +- lib/std/io/bit_writer.zig | 28 +- lib/std/io/c_writer.zig | 2 +- lib/std/io/reader.zig | 2 +- lib/std/json/scanner.zig | 8 +- lib/std/json/static.zig | 20 +- lib/std/json/stringify.zig | 4 +- lib/std/json/write_stream.zig | 6 +- lib/std/leb128.zig | 42 +- lib/std/macho.zig | 14 +- lib/std/math.zig | 86 +-- lib/std/math/acos.zig | 16 +- lib/std/math/acosh.zig | 4 +- lib/std/math/asin.zig | 12 +- lib/std/math/asinh.zig | 8 +- lib/std/math/atan.zig | 10 +- lib/std/math/atan2.zig | 16 +- lib/std/math/atanh.zig | 10 +- lib/std/math/big/int.zig | 64 +- lib/std/math/big/int_test.zig | 66 +- lib/std/math/big/rational.zig | 22 +- lib/std/math/cbrt.zig | 22 +- lib/std/math/complex/atan.zig | 4 +- lib/std/math/complex/cosh.zig | 16 +- lib/std/math/complex/exp.zig | 16 +- lib/std/math/complex/ldexp.zig | 24 +- lib/std/math/complex/sinh.zig | 16 +- lib/std/math/complex/sqrt.zig | 8 +- lib/std/math/complex/tanh.zig | 12 +- lib/std/math/copysign.zig | 6 +- lib/std/math/cosh.zig | 10 +- lib/std/math/expm1.zig | 24 +- lib/std/math/expo2.zig | 4 +- lib/std/math/float.zig | 2 +- lib/std/math/frexp.zig | 18 +- lib/std/math/hypot.zig | 18 +- lib/std/math/ilogb.zig | 8 +- lib/std/math/isfinite.zig | 2 +- lib/std/math/isinf.zig | 2 +- lib/std/math/isnormal.zig | 6 +- lib/std/math/ldexp.zig | 30 +- lib/std/math/log.zig | 4 +- lib/std/math/log10.zig | 14 +- lib/std/math/log1p.zig | 24 +- lib/std/math/modf.zig | 28 +- lib/std/math/pow.zig | 4 +- lib/std/math/signbit.zig | 2 +- lib/std/math/sinh.zig | 10 +- lib/std/math/sqrt.zig | 2 +- lib/std/math/tanh.zig | 12 +- lib/std/mem.zig | 223 ++++--- lib/std/mem/Allocator.zig | 18 +- lib/std/meta.zig | 18 +- lib/std/meta/trailer_flags.zig | 6 +- lib/std/meta/trait.zig | 2 +- lib/std/multi_array_list.zig | 33 +- lib/std/net.zig | 78 +-- lib/std/os.zig | 250 ++++---- lib/std/os/linux.zig | 516 +++++++-------- lib/std/os/linux/bpf.zig | 30 +- lib/std/os/linux/bpf/helpers.zig | 282 ++++----- lib/std/os/linux/io_uring.zig | 101 ++- lib/std/os/linux/ioctl.zig | 2 +- lib/std/os/linux/start_pie.zig | 8 +- lib/std/os/linux/test.zig | 16 +- lib/std/os/linux/tls.zig | 6 +- lib/std/os/linux/vdso.zig | 30 +- lib/std/os/plan9.zig | 4 +- lib/std/os/test.zig | 4 +- lib/std/os/uefi.zig | 2 +- lib/std/os/uefi/pool_allocator.zig | 6 +- .../uefi/protocols/device_path_protocol.zig | 26 +- lib/std/os/uefi/protocols/file_protocol.zig | 4 +- lib/std/os/uefi/protocols/hii.zig | 2 +- .../protocols/managed_network_protocol.zig | 2 +- lib/std/os/uefi/protocols/udp6_protocol.zig | 4 +- lib/std/os/uefi/tables/boot_services.zig | 2 +- lib/std/os/wasi.zig | 6 +- lib/std/os/windows.zig | 166 ++--- lib/std/os/windows/user32.zig | 2 +- lib/std/os/windows/ws2_32.zig | 2 +- lib/std/packed_int_array.zig | 32 +- lib/std/pdb.zig | 30 +- lib/std/process.zig | 18 +- lib/std/rand.zig | 59 +- lib/std/rand/Isaac64.zig | 8 +- lib/std/rand/Pcg.zig | 10 +- lib/std/rand/RomuTrio.zig | 8 +- lib/std/rand/Sfc64.zig | 4 +- lib/std/rand/Xoroshiro128.zig | 6 +- lib/std/rand/Xoshiro256.zig | 10 +- lib/std/rand/benchmark.zig | 4 +- lib/std/rand/test.zig | 16 +- lib/std/rand/ziggurat.zig | 6 +- lib/std/segmented_list.zig | 16 +- lib/std/simd.zig | 24 +- lib/std/sort/pdq.zig | 4 +- lib/std/start.zig | 24 +- lib/std/start_windows_tls.zig | 2 +- lib/std/tar.zig | 14 +- lib/std/target.zig | 18 +- lib/std/testing/failing_allocator.zig | 6 +- lib/std/time.zig | 16 +- lib/std/time/epoch.zig | 12 +- lib/std/tz.zig | 4 +- lib/std/unicode.zig | 32 +- lib/std/unicode/throughput_test.zig | 4 +- lib/std/valgrind.zig | 2 +- lib/std/valgrind/callgrind.zig | 2 +- lib/std/valgrind/memcheck.zig | 22 +- lib/std/zig.zig | 2 +- lib/std/zig/Ast.zig | 10 +- lib/std/zig/CrossTarget.zig | 2 +- lib/std/zig/ErrorBundle.zig | 34 +- lib/std/zig/Parse.zig | 30 +- lib/std/zig/Server.zig | 28 +- lib/std/zig/c_builtins.zig | 20 +- lib/std/zig/c_translation.zig | 65 +- lib/std/zig/number_literal.zig | 6 +- lib/std/zig/parser_test.zig | 20 +- lib/std/zig/perf_test.zig | 6 +- lib/std/zig/render.zig | 4 +- lib/std/zig/string_literal.zig | 4 +- lib/std/zig/system/NativeTargetInfo.zig | 56 +- lib/std/zig/system/arm.zig | 14 +- lib/std/zig/system/windows.zig | 40 +- lib/std/zig/tokenizer.zig | 2 +- lib/test_runner.zig | 6 +- src/Air.zig | 26 +- src/AstGen.zig | 346 +++++----- src/Autodoc.zig | 98 +-- src/Compilation.zig | 40 +- src/InternPool.zig | 410 ++++++------ src/Liveness.zig | 68 +- src/Liveness/Verify.zig | 22 +- src/Manifest.zig | 22 +- src/Module.zig | 166 ++--- src/Package.zig | 6 +- src/Sema.zig | 590 +++++++++--------- src/TypedValue.zig | 8 +- src/Zir.zig | 44 +- src/arch/aarch64/CodeGen.zig | 176 +++--- src/arch/aarch64/Emit.zig | 44 +- src/arch/aarch64/Mir.zig | 2 +- src/arch/aarch64/bits.zig | 218 +++---- src/arch/arm/CodeGen.zig | 192 +++--- src/arch/arm/Emit.zig | 38 +- src/arch/arm/Mir.zig | 2 +- src/arch/arm/abi.zig | 2 +- src/arch/arm/bits.zig | 64 +- src/arch/riscv64/CodeGen.zig | 38 +- src/arch/riscv64/Emit.zig | 10 +- src/arch/riscv64/Mir.zig | 2 +- src/arch/riscv64/bits.zig | 46 +- src/arch/sparc64/CodeGen.zig | 86 +-- src/arch/sparc64/Emit.zig | 26 +- src/arch/sparc64/Mir.zig | 2 +- src/arch/sparc64/bits.zig | 80 +-- src/arch/wasm/CodeGen.zig | 328 +++++----- src/arch/wasm/Emit.zig | 22 +- src/arch/wasm/Mir.zig | 16 +- src/arch/x86_64/CodeGen.zig | 458 +++++++------- src/arch/x86_64/Emit.zig | 28 +- src/arch/x86_64/Encoding.zig | 4 +- src/arch/x86_64/Lower.zig | 10 +- src/arch/x86_64/Mir.zig | 34 +- src/arch/x86_64/abi.zig | 4 +- src/arch/x86_64/bits.zig | 32 +- src/arch/x86_64/encoder.zig | 14 +- src/clang.zig | 2 +- src/codegen.zig | 38 +- src/codegen/c.zig | 102 +-- src/codegen/c/type.zig | 20 +- src/codegen/llvm.zig | 264 ++++---- src/codegen/llvm/bindings.zig | 2 +- src/codegen/spirv.zig | 50 +- src/codegen/spirv/Assembler.zig | 24 +- src/codegen/spirv/Cache.zig | 124 ++-- src/codegen/spirv/Module.zig | 14 +- src/codegen/spirv/Section.zig | 30 +- src/crash_report.zig | 48 +- src/glibc.zig | 8 +- src/link/C.zig | 8 +- src/link/Coff.zig | 110 ++-- src/link/Coff/ImportTable.zig | 6 +- src/link/Coff/Relocation.zig | 24 +- src/link/Dwarf.zig | 116 ++-- src/link/Elf.zig | 104 +-- src/link/MachO.zig | 98 +-- src/link/MachO/Archive.zig | 2 +- src/link/MachO/CodeSignature.zig | 18 +- src/link/MachO/DebugSymbols.zig | 42 +- src/link/MachO/DwarfInfo.zig | 8 +- src/link/MachO/Dylib.zig | 12 +- src/link/MachO/Object.zig | 64 +- src/link/MachO/Relocation.zig | 46 +- src/link/MachO/Trie.zig | 2 +- src/link/MachO/UnwindInfo.zig | 108 ++-- src/link/MachO/ZldAtom.zig | 120 ++-- src/link/MachO/dead_strip.zig | 24 +- src/link/MachO/dyld_info/Rebase.zig | 10 +- src/link/MachO/dyld_info/bind.zig | 36 +- src/link/MachO/eh_frame.zig | 72 +-- src/link/MachO/load_commands.zig | 24 +- src/link/MachO/thunks.zig | 12 +- src/link/MachO/zld.zig | 144 ++--- src/link/Plan9.zig | 44 +- src/link/Wasm.zig | 276 ++++---- src/link/Wasm/Atom.zig | 18 +- src/link/Wasm/Object.zig | 32 +- src/link/Wasm/types.zig | 2 +- src/link/strtab.zig | 6 +- src/link/table_section.zig | 2 +- src/link/tapi/Tokenizer.zig | 4 +- src/main.zig | 18 +- src/objcopy.zig | 54 +- src/print_air.zig | 22 +- src/print_targets.zig | 4 +- src/print_zir.zig | 144 ++--- src/register_manager.zig | 4 +- src/tracy.zig | 6 +- src/translate_c.zig | 474 +++++++------- src/translate_c/ast.zig | 16 +- src/type.zig | 28 +- src/value.zig | 176 +++--- test/behavior/align.zig | 28 +- test/behavior/array.zig | 4 +- test/behavior/async_fn.zig | 10 +- test/behavior/atomics.zig | 2 +- test/behavior/basic.zig | 22 +- test/behavior/bit_shifting.zig | 6 +- test/behavior/bitcast.zig | 74 +-- test/behavior/bitreverse.zig | 28 +- test/behavior/bool.zig | 8 +- test/behavior/bugs/11995.zig | 2 +- test/behavior/bugs/12051.zig | 4 +- test/behavior/bugs/12119.zig | 2 +- test/behavior/bugs/12450.zig | 2 +- test/behavior/bugs/12723.zig | 2 +- test/behavior/bugs/13664.zig | 2 +- test/behavior/bugs/421.zig | 2 +- test/behavior/bugs/6781.zig | 8 +- test/behavior/bugs/718.zig | 2 +- test/behavior/bugs/726.zig | 4 +- ...n_functions_returning_void_or_noreturn.zig | 4 +- test/behavior/byteswap.zig | 32 +- test/behavior/call.zig | 2 +- test/behavior/cast.zig | 120 ++-- test/behavior/cast_int.zig | 2 +- test/behavior/comptime_memory.zig | 68 +- test/behavior/enum.zig | 36 +- test/behavior/error.zig | 4 +- test/behavior/eval.zig | 20 +- test/behavior/export.zig | 2 +- test/behavior/floatop.zig | 6 +- test/behavior/fn.zig | 8 +- test/behavior/fn_in_struct_in_comptime.zig | 2 +- test/behavior/for.zig | 10 +- test/behavior/generics.zig | 6 +- test/behavior/int128.zig | 16 +- test/behavior/math.zig | 20 +- test/behavior/memcpy.zig | 2 +- test/behavior/packed-struct.zig | 10 +- .../packed_struct_explicit_backing_int.zig | 2 +- test/behavior/pointers.zig | 24 +- test/behavior/popcount.zig | 2 +- test/behavior/ptrcast.zig | 44 +- test/behavior/ptrfromint.zig | 8 +- test/behavior/sizeof_and_typeof.zig | 4 +- test/behavior/slice.zig | 20 +- test/behavior/slice_sentinel_comptime.zig | 16 +- test/behavior/struct.zig | 20 +- test/behavior/switch.zig | 10 +- test/behavior/translate_c_macros.zig | 4 +- test/behavior/truncate.zig | 26 +- test/behavior/tuple.zig | 2 +- test/behavior/tuple_declarations.zig | 2 +- test/behavior/type.zig | 16 +- test/behavior/type_info.zig | 16 +- test/behavior/vector.zig | 2 +- test/c_abi/main.zig | 18 +- .../alignCast_expects_pointer_or_slice.zig | 5 +- .../bad_alignCast_at_comptime.zig | 6 +- ...tCast_same_size_but_bit_count_mismatch.zig | 4 +- .../compile_errors/bitCast_to_enum_type.zig | 6 +- ...h_different_sizes_inside_an_expression.zig | 4 +- ...ast_negative_value_to_unsigned_integer.zig | 2 +- ...mpile_log_a_pointer_to_an_opaque_value.zig | 2 +- .../compile_time_null_ptr_cast.zig | 2 +- .../compile_time_undef_ptr_cast.zig | 2 +- .../comptime_call_of_function_pointer.zig | 2 +- ...atch_memory_at_target_index_terminated.zig | 4 +- ...ch_memory_at_target_index_unterminated.zig | 4 +- ...entinel_does_not_match_target-sentinel.zig | 4 +- ...e-sentinel_is_out_of_bounds_terminated.zig | 4 +- ...sentinel_is_out_of_bounds_unterminated.zig | 4 +- ...n-exhaustive_enums_checks_int_in_range.zig | 4 +- ...field_count_range_but_not_matching_tag.zig | 4 +- ..._known_at_comptime_violates_error_sets.zig | 4 +- ...xplicitly_casting_non_tag_type_to_enum.zig | 2 +- ...comptime_field_ptr_not_based_on_struct.zig | 2 +- .../field_access_of_opaque_type.zig | 2 +- .../incorrect_type_to_memset_memcpy.zig | 4 +- .../increase_pointer_alignment_in_ptrCast.zig | 8 +- ...float_conversion_to_comptime_int-float.zig | 12 +- .../intFromFloat_comptime_safety.zig | 12 +- .../intFromPtr_0_to_non_optional_pointer.zig | 2 +- .../int_to_err_non_global_invalid_number.zig | 4 +- .../integer_cast_truncates_bits.zig | 2 +- .../integer_underflow_error.zig | 4 +- .../compile_errors/invalid_float_casts.zig | 16 +- .../compile_errors/invalid_int_casts.zig | 16 +- .../invalid_non-exhaustive_enum_to_union.zig | 6 +- ..._3818_bitcast_from_parray-slice_to_u16.zig | 12 +- ...es_from_comptime_reinterpreted_pointer.zig | 2 +- .../missing_builtin_arg_in_initializer.zig | 10 +- .../non_float_passed_to_intFromFloat.zig | 2 +- .../non_int_passed_to_floatFromInt.zig | 2 +- ..._comptime_float_passed_to_intFromFloat.zig | 2 +- .../ptrCast_discards_const_qualifier.zig | 6 +- .../ptrFromInt_non_ptr_type.zig | 10 +- .../ptrFromInt_with_misaligned_address.zig | 2 +- .../compile_errors/ptrcast_to_non-pointer.zig | 4 +- ...ading_past_end_of_pointer_casted_array.zig | 2 +- ...austive_enum_with_non-integer_tag_type.zig | 2 +- ...xhaustive_enum_with_undefined_tag_type.zig | 2 +- ...ce_cannot_have_its_bytes_reinterpreted.zig | 4 +- ...n_invalid_value_of_non-exhaustive_enum.zig | 4 +- .../compile_errors/truncate_sign_mismatch.zig | 16 +- ...ointer_coerced_to_pointer_to_opaque_{}.zig | 2 +- test/cases/enum_values.0.zig | 2 +- test/cases/enum_values.1.zig | 2 +- test/cases/error_in_nested_declaration.zig | 6 +- test/cases/int_to_ptr.0.zig | 4 +- test/cases/int_to_ptr.1.zig | 4 +- ...ment_address_space_reading_and_writing.zig | 2 +- test/cases/llvm/large_slices.zig | 2 +- test/cases/safety/@alignCast misaligned.zig | 3 +- .../@enumFromInt - no matching tag value.zig | 2 +- ...tCast error not present in destination.zig | 2 +- test/cases/safety/@intCast to u0.zig | 2 +- ...oat cannot fit - negative out of range.zig | 2 +- ...loat cannot fit - negative to unsigned.zig | 2 +- ...oat cannot fit - positive out of range.zig | 2 +- ...o to non-optional byte-aligned pointer.zig | 2 +- ...t address zero to non-optional pointer.zig | 2 +- .../@ptrFromInt with misaligned address.zig | 2 +- .../@tagName on corrupted enum value.zig | 2 +- .../@tagName on corrupted union value.zig | 2 +- ...inter casting to null function pointer.zig | 2 +- ...in cast to unsigned integer - widening.zig | 2 +- ...ot fitting in cast to unsigned integer.zig | 2 +- .../safety/signed-unsigned vector cast.zig | 2 +- ... sentinel mismatch - optional pointers.zig | 2 +- ...else on corrupt enum value - one prong.zig | 2 +- ...tch else on corrupt enum value - union.zig | 2 +- .../switch else on corrupt enum value.zig | 2 +- .../safety/switch on corrupted enum value.zig | 2 +- .../switch on corrupted union value.zig | 2 +- test/cases/safety/truncating vector cast.zig | 2 +- ...ast to signed integer - same bit count.zig | 2 +- .../safety/unsigned-signed vector cast.zig | 2 +- ...e does not fit in shortening cast - u0.zig | 2 +- .../value does not fit in shortening cast.zig | 2 +- test/cbe.zig | 10 +- test/compare_output.zig | 10 +- test/link/macho/dead_strip_dylibs/build.zig | 2 +- test/nvptx.zig | 2 +- test/standalone/hello_world/hello_libc.zig | 2 +- test/standalone/issue_11595/main.zig | 2 +- .../main_return_error/error_u8_non_zero.zig | 2 +- test/standalone/mix_c_files/main.zig | 2 +- test/standalone/pie/main.zig | 2 +- test/translate_c.zig | 140 ++--- tools/extract-grammar.zig | 2 +- tools/gen_spirv_spec.zig | 2 +- tools/gen_stubs.zig | 10 +- tools/update-linux-headers.zig | 2 +- tools/update_clang_options.zig | 4 +- 651 files changed, 8967 insertions(+), 9039 deletions(-) diff --git a/lib/compiler_rt/addf3.zig b/lib/compiler_rt/addf3.zig index 8edfef9838..e72294a55f 100644 --- a/lib/compiler_rt/addf3.zig +++ b/lib/compiler_rt/addf3.zig @@ -24,28 +24,28 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T { const significandMask = (@as(Z, 1) << significandBits) - 1; const absMask = signBit - 1; - const qnanRep = @bitCast(Z, math.nan(T)) | quietBit; + const qnanRep = @as(Z, @bitCast(math.nan(T))) | quietBit; - var aRep = @bitCast(Z, a); - var bRep = @bitCast(Z, b); + var aRep = @as(Z, @bitCast(a)); + var bRep = @as(Z, @bitCast(b)); const aAbs = aRep & absMask; const bAbs = bRep & absMask; - const infRep = @bitCast(Z, math.inf(T)); + const infRep = @as(Z, @bitCast(math.inf(T))); // Detect if a or b is zero, infinity, or NaN. if (aAbs -% @as(Z, 1) >= infRep - @as(Z, 1) or bAbs -% @as(Z, 1) >= infRep - @as(Z, 1)) { // NaN + anything = qNaN - if (aAbs > infRep) return @bitCast(T, @bitCast(Z, a) | quietBit); + if (aAbs > infRep) return @as(T, @bitCast(@as(Z, @bitCast(a)) | quietBit)); // anything + NaN = qNaN - if (bAbs > infRep) return @bitCast(T, @bitCast(Z, b) | quietBit); + if (bAbs > infRep) return @as(T, @bitCast(@as(Z, @bitCast(b)) | quietBit)); if (aAbs == infRep) { // +/-infinity + -/+infinity = qNaN - if ((@bitCast(Z, a) ^ @bitCast(Z, b)) == signBit) { - return @bitCast(T, qnanRep); + if ((@as(Z, @bitCast(a)) ^ @as(Z, @bitCast(b))) == signBit) { + return @as(T, @bitCast(qnanRep)); } // +/-infinity + anything remaining = +/- infinity else { @@ -60,7 +60,7 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T { if (aAbs == 0) { // but we need to get the sign right for zero + zero if (bAbs == 0) { - return @bitCast(T, @bitCast(Z, a) & @bitCast(Z, b)); + return @as(T, @bitCast(@as(Z, @bitCast(a)) & @as(Z, @bitCast(b)))); } else { return b; } @@ -78,8 +78,8 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T { } // Extract the exponent and significand from the (possibly swapped) a and b. - var aExponent = @intCast(i32, (aRep >> significandBits) & maxExponent); - var bExponent = @intCast(i32, (bRep >> significandBits) & maxExponent); + var aExponent = @as(i32, @intCast((aRep >> significandBits) & maxExponent)); + var bExponent = @as(i32, @intCast((bRep >> significandBits) & maxExponent)); var aSignificand = aRep & significandMask; var bSignificand = bRep & significandMask; @@ -101,11 +101,11 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T { // Shift the significand of b by the difference in exponents, with a sticky // bottom bit to get rounding correct. - const @"align" = @intCast(u32, aExponent - bExponent); + const @"align" = @as(u32, @intCast(aExponent - bExponent)); if (@"align" != 0) { if (@"align" < typeWidth) { - const sticky = if (bSignificand << @intCast(S, typeWidth - @"align") != 0) @as(Z, 1) else 0; - bSignificand = (bSignificand >> @truncate(S, @"align")) | sticky; + const sticky = if (bSignificand << @as(S, @intCast(typeWidth - @"align")) != 0) @as(Z, 1) else 0; + bSignificand = (bSignificand >> @as(S, @truncate(@"align"))) | sticky; } else { bSignificand = 1; // sticky; b is known to be non-zero. } @@ -113,13 +113,13 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T { if (subtraction) { aSignificand -= bSignificand; // If a == -b, return +zero. - if (aSignificand == 0) return @bitCast(T, @as(Z, 0)); + if (aSignificand == 0) return @as(T, @bitCast(@as(Z, 0))); // If partial cancellation occured, we need to left-shift the result // and adjust the exponent: if (aSignificand < integerBit << 3) { - const shift = @intCast(i32, @clz(aSignificand)) - @intCast(i32, @clz(integerBit << 3)); - aSignificand <<= @intCast(S, shift); + const shift = @as(i32, @intCast(@clz(aSignificand))) - @as(i32, @intCast(@clz(integerBit << 3))); + aSignificand <<= @as(S, @intCast(shift)); aExponent -= shift; } } else { // addition @@ -135,13 +135,13 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T { } // If we have overflowed the type, return +/- infinity: - if (aExponent >= maxExponent) return @bitCast(T, infRep | resultSign); + if (aExponent >= maxExponent) return @as(T, @bitCast(infRep | resultSign)); if (aExponent <= 0) { // Result is denormal; the exponent and round/sticky bits are zero. // All we need to do is shift the significand and apply the correct sign. - aSignificand >>= @intCast(S, 4 - aExponent); - return @bitCast(T, resultSign | aSignificand); + aSignificand >>= @as(S, @intCast(4 - aExponent)); + return @as(T, @bitCast(resultSign | aSignificand)); } // Low three bits are round, guard, and sticky. @@ -151,7 +151,7 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T { var result = (aSignificand >> 3) & significandMask; // Insert the exponent and sign. - result |= @intCast(Z, aExponent) << significandBits; + result |= @as(Z, @intCast(aExponent)) << significandBits; result |= resultSign; // Final rounding. The result may overflow to infinity, but that is the @@ -164,7 +164,7 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T { if ((result >> significandBits) != 0) result |= integerBit; } - return @bitCast(T, result); + return @as(T, @bitCast(result)); } test { diff --git a/lib/compiler_rt/addf3_test.zig b/lib/compiler_rt/addf3_test.zig index 1df87a889f..c020ee0bc9 100644 --- a/lib/compiler_rt/addf3_test.zig +++ b/lib/compiler_rt/addf3_test.zig @@ -5,7 +5,7 @@ const std = @import("std"); const math = std.math; -const qnan128 = @bitCast(f128, @as(u128, 0x7fff800000000000) << 64); +const qnan128 = @as(f128, @bitCast(@as(u128, 0x7fff800000000000) << 64)); const __addtf3 = @import("addtf3.zig").__addtf3; const __addxf3 = @import("addxf3.zig").__addxf3; @@ -14,9 +14,9 @@ const __subtf3 = @import("subtf3.zig").__subtf3; fn test__addtf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void { const x = __addtf3(a, b); - const rep = @bitCast(u128, x); - const hi = @intCast(u64, rep >> 64); - const lo = @truncate(u64, rep); + const rep = @as(u128, @bitCast(x)); + const hi = @as(u64, @intCast(rep >> 64)); + const lo = @as(u64, @truncate(rep)); if (hi == expected_hi and lo == expected_lo) { return; @@ -37,7 +37,7 @@ test "addtf3" { try test__addtf3(qnan128, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0); // NaN + any = NaN - try test__addtf3(@bitCast(f128, (@as(u128, 0x7fff000000000000) << 64) | @as(u128, 0x800030000000)), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0); + try test__addtf3(@as(f128, @bitCast((@as(u128, 0x7fff000000000000) << 64) | @as(u128, 0x800030000000))), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0); // inf + inf = inf try test__addtf3(math.inf(f128), math.inf(f128), 0x7fff000000000000, 0x0); @@ -53,9 +53,9 @@ test "addtf3" { fn test__subtf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void { const x = __subtf3(a, b); - const rep = @bitCast(u128, x); - const hi = @intCast(u64, rep >> 64); - const lo = @truncate(u64, rep); + const rep = @as(u128, @bitCast(x)); + const hi = @as(u64, @intCast(rep >> 64)); + const lo = @as(u64, @truncate(rep)); if (hi == expected_hi and lo == expected_lo) { return; @@ -77,7 +77,7 @@ test "subtf3" { try test__subtf3(qnan128, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0); // NaN + any = NaN - try test__subtf3(@bitCast(f128, (@as(u128, 0x7fff000000000000) << 64) | @as(u128, 0x800030000000)), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0); + try test__subtf3(@as(f128, @bitCast((@as(u128, 0x7fff000000000000) << 64) | @as(u128, 0x800030000000))), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0); // inf - any = inf try test__subtf3(math.inf(f128), 0x1.23456789abcdefp+5, 0x7fff000000000000, 0x0); @@ -87,16 +87,16 @@ test "subtf3" { try test__subtf3(0x1.ee9d7c52354a6936ab8d7654321fp-1, 0x1.234567829a3bcdef5678ade36734p+5, 0xc0041b8af1915166, 0xa44a7bca780a166c); } -const qnan80 = @bitCast(f80, @bitCast(u80, math.nan(f80)) | (1 << (math.floatFractionalBits(f80) - 1))); +const qnan80 = @as(f80, @bitCast(@as(u80, @bitCast(math.nan(f80))) | (1 << (math.floatFractionalBits(f80) - 1)))); fn test__addxf3(a: f80, b: f80, expected: u80) !void { const x = __addxf3(a, b); - const rep = @bitCast(u80, x); + const rep = @as(u80, @bitCast(x)); if (rep == expected) return; - if (math.isNan(@bitCast(f80, expected)) and math.isNan(x)) + if (math.isNan(@as(f80, @bitCast(expected))) and math.isNan(x)) return; // We don't currently test NaN payload propagation return error.TestFailed; @@ -104,33 +104,33 @@ fn test__addxf3(a: f80, b: f80, expected: u80) !void { test "addxf3" { // NaN + any = NaN - try test__addxf3(qnan80, 0x1.23456789abcdefp+5, @bitCast(u80, qnan80)); - try test__addxf3(@bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), 0x1.23456789abcdefp+5, @bitCast(u80, qnan80)); + try test__addxf3(qnan80, 0x1.23456789abcdefp+5, @as(u80, @bitCast(qnan80))); + try test__addxf3(@as(f80, @bitCast(@as(u80, 0x7fff_8000_8000_3000_0000))), 0x1.23456789abcdefp+5, @as(u80, @bitCast(qnan80))); // any + NaN = NaN - try test__addxf3(0x1.23456789abcdefp+5, qnan80, @bitCast(u80, qnan80)); - try test__addxf3(0x1.23456789abcdefp+5, @bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), @bitCast(u80, qnan80)); + try test__addxf3(0x1.23456789abcdefp+5, qnan80, @as(u80, @bitCast(qnan80))); + try test__addxf3(0x1.23456789abcdefp+5, @as(f80, @bitCast(@as(u80, 0x7fff_8000_8000_3000_0000))), @as(u80, @bitCast(qnan80))); // NaN + inf = NaN - try test__addxf3(qnan80, math.inf(f80), @bitCast(u80, qnan80)); + try test__addxf3(qnan80, math.inf(f80), @as(u80, @bitCast(qnan80))); // inf + NaN = NaN - try test__addxf3(math.inf(f80), qnan80, @bitCast(u80, qnan80)); + try test__addxf3(math.inf(f80), qnan80, @as(u80, @bitCast(qnan80))); // inf + inf = inf - try test__addxf3(math.inf(f80), math.inf(f80), @bitCast(u80, math.inf(f80))); + try test__addxf3(math.inf(f80), math.inf(f80), @as(u80, @bitCast(math.inf(f80)))); // inf + -inf = NaN - try test__addxf3(math.inf(f80), -math.inf(f80), @bitCast(u80, qnan80)); + try test__addxf3(math.inf(f80), -math.inf(f80), @as(u80, @bitCast(qnan80))); // -inf + inf = NaN - try test__addxf3(-math.inf(f80), math.inf(f80), @bitCast(u80, qnan80)); + try test__addxf3(-math.inf(f80), math.inf(f80), @as(u80, @bitCast(qnan80))); // inf + any = inf - try test__addxf3(math.inf(f80), 0x1.2335653452436234723489432abcdefp+5, @bitCast(u80, math.inf(f80))); + try test__addxf3(math.inf(f80), 0x1.2335653452436234723489432abcdefp+5, @as(u80, @bitCast(math.inf(f80)))); // any + inf = inf - try test__addxf3(0x1.2335653452436234723489432abcdefp+5, math.inf(f80), @bitCast(u80, math.inf(f80))); + try test__addxf3(0x1.2335653452436234723489432abcdefp+5, math.inf(f80), @as(u80, @bitCast(math.inf(f80)))); // any + any try test__addxf3(0x1.23456789abcdp+5, 0x1.dcba987654321p+5, 0x4005_BFFFFFFFFFFFC400); diff --git a/lib/compiler_rt/arm.zig b/lib/compiler_rt/arm.zig index b358fbfa80..811bb88d46 100644 --- a/lib/compiler_rt/arm.zig +++ b/lib/compiler_rt/arm.zig @@ -192,6 +192,6 @@ pub fn __aeabi_ldivmod() callconv(.Naked) void { } pub fn __aeabi_drsub(a: f64, b: f64) callconv(.AAPCS) f64 { - const neg_a = @bitCast(f64, @bitCast(u64, a) ^ (@as(u64, 1) << 63)); + const neg_a = @as(f64, @bitCast(@as(u64, @bitCast(a)) ^ (@as(u64, 1) << 63))); return b + neg_a; } diff --git a/lib/compiler_rt/atomics.zig b/lib/compiler_rt/atomics.zig index de0c777d45..8e2fdbb54b 100644 --- a/lib/compiler_rt/atomics.zig +++ b/lib/compiler_rt/atomics.zig @@ -232,16 +232,16 @@ fn wideUpdate(comptime T: type, ptr: *T, val: T, update: anytype) T { const addr = @intFromPtr(ptr); const wide_addr = addr & ~(@as(T, smallest_atomic_fetch_exch_size) - 1); - const wide_ptr = @alignCast(smallest_atomic_fetch_exch_size, @ptrFromInt(*WideAtomic, wide_addr)); + const wide_ptr: *align(smallest_atomic_fetch_exch_size) WideAtomic = @alignCast(@as(*WideAtomic, @ptrFromInt(wide_addr))); const inner_offset = addr & (@as(T, smallest_atomic_fetch_exch_size) - 1); - const inner_shift = @intCast(std.math.Log2Int(T), inner_offset * 8); + const inner_shift = @as(std.math.Log2Int(T), @intCast(inner_offset * 8)); const mask = @as(WideAtomic, std.math.maxInt(T)) << inner_shift; var wide_old = @atomicLoad(WideAtomic, wide_ptr, .SeqCst); while (true) { - const old = @truncate(T, (wide_old & mask) >> inner_shift); + const old = @as(T, @truncate((wide_old & mask) >> inner_shift)); const new = update(val, old); const wide_new = wide_old & ~mask | (@as(WideAtomic, new) << inner_shift); if (@cmpxchgWeak(WideAtomic, wide_ptr, wide_old, wide_new, .SeqCst, .SeqCst)) |new_wide_old| { diff --git a/lib/compiler_rt/aulldiv.zig b/lib/compiler_rt/aulldiv.zig index 95e1f2eaa2..1ce8f80c9f 100644 --- a/lib/compiler_rt/aulldiv.zig +++ b/lib/compiler_rt/aulldiv.zig @@ -21,9 +21,9 @@ pub fn _alldiv(a: i64, b: i64) callconv(.Stdcall) i64 { const an = (a ^ s_a) -% s_a; const bn = (b ^ s_b) -% s_b; - const r = @bitCast(u64, an) / @bitCast(u64, bn); + const r = @as(u64, @bitCast(an)) / @as(u64, @bitCast(bn)); const s = s_a ^ s_b; - return (@bitCast(i64, r) ^ s) -% s; + return (@as(i64, @bitCast(r)) ^ s) -% s; } pub fn _aulldiv() callconv(.Naked) void { diff --git a/lib/compiler_rt/aullrem.zig b/lib/compiler_rt/aullrem.zig index 2bbcf6626c..a87ec26475 100644 --- a/lib/compiler_rt/aullrem.zig +++ b/lib/compiler_rt/aullrem.zig @@ -21,9 +21,9 @@ pub fn _allrem(a: i64, b: i64) callconv(.Stdcall) i64 { const an = (a ^ s_a) -% s_a; const bn = (b ^ s_b) -% s_b; - const r = @bitCast(u64, an) % @bitCast(u64, bn); + const r = @as(u64, @bitCast(an)) % @as(u64, @bitCast(bn)); const s = s_a ^ s_b; - return (@bitCast(i64, r) ^ s) -% s; + return (@as(i64, @bitCast(r)) ^ s) -% s; } pub fn _aullrem() callconv(.Naked) void { diff --git a/lib/compiler_rt/ceil.zig b/lib/compiler_rt/ceil.zig index 2765ed9f74..36ff398916 100644 --- a/lib/compiler_rt/ceil.zig +++ b/lib/compiler_rt/ceil.zig @@ -27,12 +27,12 @@ comptime { pub fn __ceilh(x: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, ceilf(x)); + return @as(f16, @floatCast(ceilf(x))); } pub fn ceilf(x: f32) callconv(.C) f32 { - var u = @bitCast(u32, x); - var e = @intCast(i32, (u >> 23) & 0xFF) - 0x7F; + var u = @as(u32, @bitCast(x)); + var e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F; var m: u32 = undefined; // TODO: Shouldn't need this explicit check. @@ -43,7 +43,7 @@ pub fn ceilf(x: f32) callconv(.C) f32 { if (e >= 23) { return x; } else if (e >= 0) { - m = @as(u32, 0x007FFFFF) >> @intCast(u5, e); + m = @as(u32, 0x007FFFFF) >> @as(u5, @intCast(e)); if (u & m == 0) { return x; } @@ -52,7 +52,7 @@ pub fn ceilf(x: f32) callconv(.C) f32 { u += m; } u &= ~m; - return @bitCast(f32, u); + return @as(f32, @bitCast(u)); } else { math.doNotOptimizeAway(x + 0x1.0p120); if (u >> 31 != 0) { @@ -66,7 +66,7 @@ pub fn ceilf(x: f32) callconv(.C) f32 { pub fn ceil(x: f64) callconv(.C) f64 { const f64_toint = 1.0 / math.floatEps(f64); - const u = @bitCast(u64, x); + const u = @as(u64, @bitCast(x)); const e = (u >> 52) & 0x7FF; var y: f64 = undefined; @@ -96,13 +96,13 @@ pub fn ceil(x: f64) callconv(.C) f64 { pub fn __ceilx(x: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, ceilq(x)); + return @as(f80, @floatCast(ceilq(x))); } pub fn ceilq(x: f128) callconv(.C) f128 { const f128_toint = 1.0 / math.floatEps(f128); - const u = @bitCast(u128, x); + const u = @as(u128, @bitCast(x)); const e = (u >> 112) & 0x7FFF; var y: f128 = undefined; diff --git a/lib/compiler_rt/clear_cache.zig b/lib/compiler_rt/clear_cache.zig index e39d726e0f..f2f4fc9bc2 100644 --- a/lib/compiler_rt/clear_cache.zig +++ b/lib/compiler_rt/clear_cache.zig @@ -102,7 +102,7 @@ fn clear_cache(start: usize, end: usize) callconv(.C) void { // If CTR_EL0.IDC is set, data cache cleaning to the point of unification // is not required for instruction to data coherence. if (((ctr_el0 >> 28) & 0x1) == 0x0) { - const dcache_line_size: usize = @as(usize, 4) << @intCast(u6, (ctr_el0 >> 16) & 15); + const dcache_line_size: usize = @as(usize, 4) << @as(u6, @intCast((ctr_el0 >> 16) & 15)); addr = start & ~(dcache_line_size - 1); while (addr < end) : (addr += dcache_line_size) { asm volatile ("dc cvau, %[addr]" @@ -115,7 +115,7 @@ fn clear_cache(start: usize, end: usize) callconv(.C) void { // If CTR_EL0.DIC is set, instruction cache invalidation to the point of // unification is not required for instruction to data coherence. if (((ctr_el0 >> 29) & 0x1) == 0x0) { - const icache_line_size: usize = @as(usize, 4) << @intCast(u6, (ctr_el0 >> 0) & 15); + const icache_line_size: usize = @as(usize, 4) << @as(u6, @intCast((ctr_el0 >> 0) & 15)); addr = start & ~(icache_line_size - 1); while (addr < end) : (addr += icache_line_size) { asm volatile ("ic ivau, %[addr]" diff --git a/lib/compiler_rt/clzdi2_test.zig b/lib/compiler_rt/clzdi2_test.zig index 1f12b1bcd0..c713c35755 100644 --- a/lib/compiler_rt/clzdi2_test.zig +++ b/lib/compiler_rt/clzdi2_test.zig @@ -2,7 +2,7 @@ const clz = @import("count0bits.zig"); const testing = @import("std").testing; fn test__clzdi2(a: u64, expected: i64) !void { - var x = @bitCast(i64, a); + var x = @as(i64, @bitCast(a)); var result = clz.__clzdi2(x); try testing.expectEqual(expected, result); } diff --git a/lib/compiler_rt/clzsi2_test.zig b/lib/compiler_rt/clzsi2_test.zig index fc0790ef71..575952241c 100644 --- a/lib/compiler_rt/clzsi2_test.zig +++ b/lib/compiler_rt/clzsi2_test.zig @@ -4,8 +4,8 @@ const testing = @import("std").testing; fn test__clzsi2(a: u32, expected: i32) !void { const nakedClzsi2 = clz.__clzsi2; - const actualClzsi2 = @ptrCast(*const fn (a: i32) callconv(.C) i32, &nakedClzsi2); - const x = @bitCast(i32, a); + const actualClzsi2 = @as(*const fn (a: i32) callconv(.C) i32, @ptrCast(&nakedClzsi2)); + const x = @as(i32, @bitCast(a)); const result = actualClzsi2(x); try testing.expectEqual(expected, result); } diff --git a/lib/compiler_rt/clzti2_test.zig b/lib/compiler_rt/clzti2_test.zig index 171c285a27..ce0f26c32e 100644 --- a/lib/compiler_rt/clzti2_test.zig +++ b/lib/compiler_rt/clzti2_test.zig @@ -2,7 +2,7 @@ const clz = @import("count0bits.zig"); const testing = @import("std").testing; fn test__clzti2(a: u128, expected: i64) !void { - var x = @bitCast(i128, a); + var x = @as(i128, @bitCast(a)); var result = clz.__clzti2(x); try testing.expectEqual(expected, result); } diff --git a/lib/compiler_rt/cmptf2.zig b/lib/compiler_rt/cmptf2.zig index bee0652292..4f8ecc73b9 100644 --- a/lib/compiler_rt/cmptf2.zig +++ b/lib/compiler_rt/cmptf2.zig @@ -75,30 +75,30 @@ fn _Qp_cmp(a: *const f128, b: *const f128) callconv(.C) i32 { } fn _Qp_feq(a: *const f128, b: *const f128) callconv(.C) bool { - return @enumFromInt(SparcFCMP, _Qp_cmp(a, b)) == .Equal; + return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) == .Equal; } fn _Qp_fne(a: *const f128, b: *const f128) callconv(.C) bool { - return @enumFromInt(SparcFCMP, _Qp_cmp(a, b)) != .Equal; + return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) != .Equal; } fn _Qp_flt(a: *const f128, b: *const f128) callconv(.C) bool { - return @enumFromInt(SparcFCMP, _Qp_cmp(a, b)) == .Less; + return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) == .Less; } fn _Qp_fgt(a: *const f128, b: *const f128) callconv(.C) bool { - return @enumFromInt(SparcFCMP, _Qp_cmp(a, b)) == .Greater; + return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) == .Greater; } fn _Qp_fge(a: *const f128, b: *const f128) callconv(.C) bool { - return switch (@enumFromInt(SparcFCMP, _Qp_cmp(a, b))) { + return switch (@as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b)))) { .Equal, .Greater => true, .Less, .Unordered => false, }; } fn _Qp_fle(a: *const f128, b: *const f128) callconv(.C) bool { - return switch (@enumFromInt(SparcFCMP, _Qp_cmp(a, b))) { + return switch (@as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b)))) { .Equal, .Less => true, .Greater, .Unordered => false, }; diff --git a/lib/compiler_rt/common.zig b/lib/compiler_rt/common.zig index eaabffa073..752a4a46df 100644 --- a/lib/compiler_rt/common.zig +++ b/lib/compiler_rt/common.zig @@ -102,22 +102,22 @@ pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void { u16 => { // 16x16 --> 32 bit multiply const product = @as(u32, a) * @as(u32, b); - hi.* = @intCast(u16, product >> 16); - lo.* = @truncate(u16, product); + hi.* = @as(u16, @intCast(product >> 16)); + lo.* = @as(u16, @truncate(product)); }, u32 => { // 32x32 --> 64 bit multiply const product = @as(u64, a) * @as(u64, b); - hi.* = @truncate(u32, product >> 32); - lo.* = @truncate(u32, product); + hi.* = @as(u32, @truncate(product >> 32)); + lo.* = @as(u32, @truncate(product)); }, u64 => { const S = struct { fn loWord(x: u64) u64 { - return @truncate(u32, x); + return @as(u32, @truncate(x)); } fn hiWord(x: u64) u64 { - return @truncate(u32, x >> 32); + return @as(u32, @truncate(x >> 32)); } }; // 64x64 -> 128 wide multiply for platforms that don't have such an operation; @@ -141,16 +141,16 @@ pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void { const Word_FullMask = @as(u64, 0xffffffffffffffff); const S = struct { fn Word_1(x: u128) u64 { - return @truncate(u32, x >> 96); + return @as(u32, @truncate(x >> 96)); } fn Word_2(x: u128) u64 { - return @truncate(u32, x >> 64); + return @as(u32, @truncate(x >> 64)); } fn Word_3(x: u128) u64 { - return @truncate(u32, x >> 32); + return @as(u32, @truncate(x >> 32)); } fn Word_4(x: u128) u64 { - return @truncate(u32, x); + return @as(u32, @truncate(x)); } }; // 128x128 -> 256 wide multiply for platforms that don't have such an operation; @@ -216,7 +216,7 @@ pub fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeIn const integerBit = @as(Z, 1) << std.math.floatFractionalBits(T); const shift = @clz(significand.*) - @clz(integerBit); - significand.* <<= @intCast(std.math.Log2Int(Z), shift); + significand.* <<= @as(std.math.Log2Int(Z), @intCast(shift)); return @as(i32, 1) - shift; } @@ -228,8 +228,8 @@ pub inline fn fneg(a: anytype) @TypeOf(a) { .bits = bits, } }); const sign_bit_mask = @as(U, 1) << (bits - 1); - const negated = @bitCast(U, a) ^ sign_bit_mask; - return @bitCast(F, negated); + const negated = @as(U, @bitCast(a)) ^ sign_bit_mask; + return @as(F, @bitCast(negated)); } /// Allows to access underlying bits as two equally sized lower and higher diff --git a/lib/compiler_rt/comparef.zig b/lib/compiler_rt/comparef.zig index d4f4e0504d..94c19dcae4 100644 --- a/lib/compiler_rt/comparef.zig +++ b/lib/compiler_rt/comparef.zig @@ -26,12 +26,12 @@ pub inline fn cmpf2(comptime T: type, comptime RT: type, a: T, b: T) RT { const signBit = (@as(rep_t, 1) << (significandBits + exponentBits)); const absMask = signBit - 1; const infT = comptime std.math.inf(T); - const infRep = @bitCast(rep_t, infT); + const infRep = @as(rep_t, @bitCast(infT)); - const aInt = @bitCast(srep_t, a); - const bInt = @bitCast(srep_t, b); - const aAbs = @bitCast(rep_t, aInt) & absMask; - const bAbs = @bitCast(rep_t, bInt) & absMask; + const aInt = @as(srep_t, @bitCast(a)); + const bInt = @as(srep_t, @bitCast(b)); + const aAbs = @as(rep_t, @bitCast(aInt)) & absMask; + const bAbs = @as(rep_t, @bitCast(bInt)) & absMask; // If either a or b is NaN, they are unordered. if (aAbs > infRep or bAbs > infRep) return RT.Unordered; @@ -81,7 +81,7 @@ pub inline fn cmp_f80(comptime RT: type, a: f80, b: f80) RT { return .Equal; } else if (a_rep.exp & sign_bit != b_rep.exp & sign_bit) { // signs are different - if (@bitCast(i16, a_rep.exp) < @bitCast(i16, b_rep.exp)) { + if (@as(i16, @bitCast(a_rep.exp)) < @as(i16, @bitCast(b_rep.exp))) { return .Less; } else { return .Greater; @@ -104,10 +104,10 @@ pub inline fn unordcmp(comptime T: type, a: T, b: T) i32 { const exponentBits = std.math.floatExponentBits(T); const signBit = (@as(rep_t, 1) << (significandBits + exponentBits)); const absMask = signBit - 1; - const infRep = @bitCast(rep_t, std.math.inf(T)); + const infRep = @as(rep_t, @bitCast(std.math.inf(T))); - const aAbs: rep_t = @bitCast(rep_t, a) & absMask; - const bAbs: rep_t = @bitCast(rep_t, b) & absMask; + const aAbs: rep_t = @as(rep_t, @bitCast(a)) & absMask; + const bAbs: rep_t = @as(rep_t, @bitCast(b)) & absMask; return @intFromBool(aAbs > infRep or bAbs > infRep); } diff --git a/lib/compiler_rt/cos.zig b/lib/compiler_rt/cos.zig index 029b6c346a..898c4fa64e 100644 --- a/lib/compiler_rt/cos.zig +++ b/lib/compiler_rt/cos.zig @@ -25,7 +25,7 @@ comptime { pub fn __cosh(a: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, cosf(a)); + return @as(f16, @floatCast(cosf(a))); } pub fn cosf(x: f32) callconv(.C) f32 { @@ -35,7 +35,7 @@ pub fn cosf(x: f32) callconv(.C) f32 { const c3pio2: f64 = 3.0 * math.pi / 2.0; // 0x4012D97C, 0x7F3321D2 const c4pio2: f64 = 4.0 * math.pi / 2.0; // 0x401921FB, 0x54442D18 - var ix = @bitCast(u32, x); + var ix = @as(u32, @bitCast(x)); const sign = ix >> 31 != 0; ix &= 0x7fffffff; @@ -86,7 +86,7 @@ pub fn cosf(x: f32) callconv(.C) f32 { } pub fn cos(x: f64) callconv(.C) f64 { - var ix = @bitCast(u64, x) >> 32; + var ix = @as(u64, @bitCast(x)) >> 32; ix &= 0x7fffffff; // |x| ~< pi/4 @@ -116,12 +116,12 @@ pub fn cos(x: f64) callconv(.C) f64 { pub fn __cosx(a: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, cosq(a)); + return @as(f80, @floatCast(cosq(a))); } pub fn cosq(a: f128) callconv(.C) f128 { // TODO: more correct implementation - return cos(@floatCast(f64, a)); + return cos(@as(f64, @floatCast(a))); } pub fn cosl(x: c_longdouble) callconv(.C) c_longdouble { diff --git a/lib/compiler_rt/count0bits.zig b/lib/compiler_rt/count0bits.zig index e1500d0102..952d25e146 100644 --- a/lib/compiler_rt/count0bits.zig +++ b/lib/compiler_rt/count0bits.zig @@ -32,9 +32,9 @@ comptime { inline fn clzXi2(comptime T: type, a: T) i32 { var x = switch (@bitSizeOf(T)) { - 32 => @bitCast(u32, a), - 64 => @bitCast(u64, a), - 128 => @bitCast(u128, a), + 32 => @as(u32, @bitCast(a)), + 64 => @as(u64, @bitCast(a)), + 128 => @as(u128, @bitCast(a)), else => unreachable, }; var n: T = @bitSizeOf(T); @@ -49,7 +49,7 @@ inline fn clzXi2(comptime T: type, a: T) i32 { x = y; } } - return @intCast(i32, n - @bitCast(T, x)); + return @as(i32, @intCast(n - @as(T, @bitCast(x)))); } fn __clzsi2_thumb1() callconv(.Naked) void { @@ -169,9 +169,9 @@ pub fn __clzti2(a: i128) callconv(.C) i32 { inline fn ctzXi2(comptime T: type, a: T) i32 { var x = switch (@bitSizeOf(T)) { - 32 => @bitCast(u32, a), - 64 => @bitCast(u64, a), - 128 => @bitCast(u128, a), + 32 => @as(u32, @bitCast(a)), + 64 => @as(u64, @bitCast(a)), + 128 => @as(u128, @bitCast(a)), else => unreachable, }; var n: T = 1; @@ -187,7 +187,7 @@ inline fn ctzXi2(comptime T: type, a: T) i32 { x = x >> shift; } } - return @intCast(i32, n - @bitCast(T, (x & 1))); + return @as(i32, @intCast(n - @as(T, @bitCast((x & 1))))); } pub fn __ctzsi2(a: i32) callconv(.C) i32 { @@ -204,9 +204,9 @@ pub fn __ctzti2(a: i128) callconv(.C) i32 { inline fn ffsXi2(comptime T: type, a: T) i32 { var x = switch (@bitSizeOf(T)) { - 32 => @bitCast(u32, a), - 64 => @bitCast(u64, a), - 128 => @bitCast(u128, a), + 32 => @as(u32, @bitCast(a)), + 64 => @as(u64, @bitCast(a)), + 128 => @as(u128, @bitCast(a)), else => unreachable, }; var n: T = 1; @@ -224,7 +224,7 @@ inline fn ffsXi2(comptime T: type, a: T) i32 { } } // return ctz + 1 - return @intCast(i32, n - @bitCast(T, (x & 1))) + @as(i32, 1); + return @as(i32, @intCast(n - @as(T, @bitCast((x & 1))))) + @as(i32, 1); } pub fn __ffssi2(a: i32) callconv(.C) i32 { diff --git a/lib/compiler_rt/ctzdi2_test.zig b/lib/compiler_rt/ctzdi2_test.zig index 4ee1dc0f78..f5b7139b0e 100644 --- a/lib/compiler_rt/ctzdi2_test.zig +++ b/lib/compiler_rt/ctzdi2_test.zig @@ -2,7 +2,7 @@ const ctz = @import("count0bits.zig"); const testing = @import("std").testing; fn test__ctzdi2(a: u64, expected: i32) !void { - var x = @bitCast(i64, a); + var x = @as(i64, @bitCast(a)); var result = ctz.__ctzdi2(x); try testing.expectEqual(expected, result); } diff --git a/lib/compiler_rt/ctzsi2_test.zig b/lib/compiler_rt/ctzsi2_test.zig index 5d9e01f0df..d30a15daac 100644 --- a/lib/compiler_rt/ctzsi2_test.zig +++ b/lib/compiler_rt/ctzsi2_test.zig @@ -2,7 +2,7 @@ const ctz = @import("count0bits.zig"); const testing = @import("std").testing; fn test__ctzsi2(a: u32, expected: i32) !void { - var x = @bitCast(i32, a); + var x = @as(i32, @bitCast(a)); var result = ctz.__ctzsi2(x); try testing.expectEqual(expected, result); } diff --git a/lib/compiler_rt/ctzti2_test.zig b/lib/compiler_rt/ctzti2_test.zig index 4b7fbf8b1c..2d509f5988 100644 --- a/lib/compiler_rt/ctzti2_test.zig +++ b/lib/compiler_rt/ctzti2_test.zig @@ -2,7 +2,7 @@ const ctz = @import("count0bits.zig"); const testing = @import("std").testing; fn test__ctzti2(a: u128, expected: i32) !void { - var x = @bitCast(i128, a); + var x = @as(i128, @bitCast(a)); var result = ctz.__ctzti2(x); try testing.expectEqual(expected, result); } diff --git a/lib/compiler_rt/divdf3.zig b/lib/compiler_rt/divdf3.zig index c71eed6d0f..f6e65f743d 100644 --- a/lib/compiler_rt/divdf3.zig +++ b/lib/compiler_rt/divdf3.zig @@ -47,52 +47,52 @@ inline fn div(a: f64, b: f64) f64 { const absMask = signBit - 1; const exponentMask = absMask ^ significandMask; const qnanRep = exponentMask | quietBit; - const infRep = @bitCast(Z, std.math.inf(f64)); + const infRep = @as(Z, @bitCast(std.math.inf(f64))); - const aExponent = @truncate(u32, (@bitCast(Z, a) >> significandBits) & maxExponent); - const bExponent = @truncate(u32, (@bitCast(Z, b) >> significandBits) & maxExponent); - const quotientSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit; + const aExponent = @as(u32, @truncate((@as(Z, @bitCast(a)) >> significandBits) & maxExponent)); + const bExponent = @as(u32, @truncate((@as(Z, @bitCast(b)) >> significandBits) & maxExponent)); + const quotientSign: Z = (@as(Z, @bitCast(a)) ^ @as(Z, @bitCast(b))) & signBit; - var aSignificand: Z = @bitCast(Z, a) & significandMask; - var bSignificand: Z = @bitCast(Z, b) & significandMask; + var aSignificand: Z = @as(Z, @bitCast(a)) & significandMask; + var bSignificand: Z = @as(Z, @bitCast(b)) & significandMask; var scale: i32 = 0; // Detect if a or b is zero, denormal, infinity, or NaN. if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) { - const aAbs: Z = @bitCast(Z, a) & absMask; - const bAbs: Z = @bitCast(Z, b) & absMask; + const aAbs: Z = @as(Z, @bitCast(a)) & absMask; + const bAbs: Z = @as(Z, @bitCast(b)) & absMask; // NaN / anything = qNaN - if (aAbs > infRep) return @bitCast(f64, @bitCast(Z, a) | quietBit); + if (aAbs > infRep) return @as(f64, @bitCast(@as(Z, @bitCast(a)) | quietBit)); // anything / NaN = qNaN - if (bAbs > infRep) return @bitCast(f64, @bitCast(Z, b) | quietBit); + if (bAbs > infRep) return @as(f64, @bitCast(@as(Z, @bitCast(b)) | quietBit)); if (aAbs == infRep) { // infinity / infinity = NaN if (bAbs == infRep) { - return @bitCast(f64, qnanRep); + return @as(f64, @bitCast(qnanRep)); } // infinity / anything else = +/- infinity else { - return @bitCast(f64, aAbs | quotientSign); + return @as(f64, @bitCast(aAbs | quotientSign)); } } // anything else / infinity = +/- 0 - if (bAbs == infRep) return @bitCast(f64, quotientSign); + if (bAbs == infRep) return @as(f64, @bitCast(quotientSign)); if (aAbs == 0) { // zero / zero = NaN if (bAbs == 0) { - return @bitCast(f64, qnanRep); + return @as(f64, @bitCast(qnanRep)); } // zero / anything else = +/- zero else { - return @bitCast(f64, quotientSign); + return @as(f64, @bitCast(quotientSign)); } } // anything else / zero = +/- infinity - if (bAbs == 0) return @bitCast(f64, infRep | quotientSign); + if (bAbs == 0) return @as(f64, @bitCast(infRep | quotientSign)); // one or both of a or b is denormal, the other (if applicable) is a // normal number. Renormalize one or both of a and b, and set scale to @@ -106,13 +106,13 @@ inline fn div(a: f64, b: f64) f64 { // won't hurt anything.) aSignificand |= implicitBit; bSignificand |= implicitBit; - var quotientExponent: i32 = @bitCast(i32, aExponent -% bExponent) +% scale; + var quotientExponent: i32 = @as(i32, @bitCast(aExponent -% bExponent)) +% scale; // Align the significand of b as a Q31 fixed-point number in the range // [1, 2.0) and get a Q32 approximate reciprocal using a small minimax // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This // is accurate to about 3.5 binary digits. - const q31b: u32 = @truncate(u32, bSignificand >> 21); + const q31b: u32 = @as(u32, @truncate(bSignificand >> 21)); var recip32 = @as(u32, 0x7504f333) -% q31b; // Now refine the reciprocal estimate using a Newton-Raphson iteration: @@ -123,12 +123,12 @@ inline fn div(a: f64, b: f64) f64 { // with each iteration, so after three iterations, we have about 28 binary // digits of accuracy. var correction32: u32 = undefined; - correction32 = @truncate(u32, ~(@as(u64, recip32) *% q31b >> 32) +% 1); - recip32 = @truncate(u32, @as(u64, recip32) *% correction32 >> 31); - correction32 = @truncate(u32, ~(@as(u64, recip32) *% q31b >> 32) +% 1); - recip32 = @truncate(u32, @as(u64, recip32) *% correction32 >> 31); - correction32 = @truncate(u32, ~(@as(u64, recip32) *% q31b >> 32) +% 1); - recip32 = @truncate(u32, @as(u64, recip32) *% correction32 >> 31); + correction32 = @as(u32, @truncate(~(@as(u64, recip32) *% q31b >> 32) +% 1)); + recip32 = @as(u32, @truncate(@as(u64, recip32) *% correction32 >> 31)); + correction32 = @as(u32, @truncate(~(@as(u64, recip32) *% q31b >> 32) +% 1)); + recip32 = @as(u32, @truncate(@as(u64, recip32) *% correction32 >> 31)); + correction32 = @as(u32, @truncate(~(@as(u64, recip32) *% q31b >> 32) +% 1)); + recip32 = @as(u32, @truncate(@as(u64, recip32) *% correction32 >> 31)); // recip32 might have overflowed to exactly zero in the preceding // computation if the high word of b is exactly 1.0. This would sabotage @@ -138,12 +138,12 @@ inline fn div(a: f64, b: f64) f64 { // We need to perform one more iteration to get us to 56 binary digits; // The last iteration needs to happen with extra precision. - const q63blo: u32 = @truncate(u32, bSignificand << 11); + const q63blo: u32 = @as(u32, @truncate(bSignificand << 11)); var correction: u64 = undefined; var reciprocal: u64 = undefined; correction = ~(@as(u64, recip32) *% q31b +% (@as(u64, recip32) *% q63blo >> 32)) +% 1; - const cHi = @truncate(u32, correction >> 32); - const cLo = @truncate(u32, correction); + const cHi = @as(u32, @truncate(correction >> 32)); + const cLo = @as(u32, @truncate(correction)); reciprocal = @as(u64, recip32) *% cHi +% (@as(u64, recip32) *% cLo >> 32); // We already adjusted the 32-bit estimate, now we need to adjust the final @@ -195,7 +195,7 @@ inline fn div(a: f64, b: f64) f64 { if (writtenExponent >= maxExponent) { // If we have overflowed the exponent, return infinity. - return @bitCast(f64, infRep | quotientSign); + return @as(f64, @bitCast(infRep | quotientSign)); } else if (writtenExponent < 1) { if (writtenExponent == 0) { // Check whether the rounded result is normal. @@ -206,22 +206,22 @@ inline fn div(a: f64, b: f64) f64 { absResult += round; if ((absResult & ~significandMask) != 0) { // The rounded result is normal; return it. - return @bitCast(f64, absResult | quotientSign); + return @as(f64, @bitCast(absResult | quotientSign)); } } // Flush denormals to zero. In the future, it would be nice to add // code to round them correctly. - return @bitCast(f64, quotientSign); + return @as(f64, @bitCast(quotientSign)); } else { const round = @intFromBool((residual << 1) > bSignificand); // Clear the implicit bit var absResult = quotient & significandMask; // Insert the exponent - absResult |= @bitCast(Z, @as(SignedZ, writtenExponent)) << significandBits; + absResult |= @as(Z, @bitCast(@as(SignedZ, writtenExponent))) << significandBits; // Round absResult +%= round; // Insert the sign and return - return @bitCast(f64, absResult | quotientSign); + return @as(f64, @bitCast(absResult | quotientSign)); } } diff --git a/lib/compiler_rt/divdf3_test.zig b/lib/compiler_rt/divdf3_test.zig index 28cb0bc4df..93839e1bf7 100644 --- a/lib/compiler_rt/divdf3_test.zig +++ b/lib/compiler_rt/divdf3_test.zig @@ -6,7 +6,7 @@ const __divdf3 = @import("divdf3.zig").__divdf3; const testing = @import("std").testing; fn compareResultD(result: f64, expected: u64) bool { - const rep = @bitCast(u64, result); + const rep = @as(u64, @bitCast(result)); if (rep == expected) { return true; diff --git a/lib/compiler_rt/divhf3.zig b/lib/compiler_rt/divhf3.zig index 6bb607bef9..eaed9d1c2f 100644 --- a/lib/compiler_rt/divhf3.zig +++ b/lib/compiler_rt/divhf3.zig @@ -7,5 +7,5 @@ comptime { pub fn __divhf3(a: f16, b: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, divsf3.__divsf3(a, b)); + return @as(f16, @floatCast(divsf3.__divsf3(a, b))); } diff --git a/lib/compiler_rt/divsf3.zig b/lib/compiler_rt/divsf3.zig index d35220ca26..9e5ade8234 100644 --- a/lib/compiler_rt/divsf3.zig +++ b/lib/compiler_rt/divsf3.zig @@ -44,52 +44,52 @@ inline fn div(a: f32, b: f32) f32 { const absMask = signBit - 1; const exponentMask = absMask ^ significandMask; const qnanRep = exponentMask | quietBit; - const infRep = @bitCast(Z, std.math.inf(f32)); + const infRep = @as(Z, @bitCast(std.math.inf(f32))); - const aExponent = @truncate(u32, (@bitCast(Z, a) >> significandBits) & maxExponent); - const bExponent = @truncate(u32, (@bitCast(Z, b) >> significandBits) & maxExponent); - const quotientSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit; + const aExponent = @as(u32, @truncate((@as(Z, @bitCast(a)) >> significandBits) & maxExponent)); + const bExponent = @as(u32, @truncate((@as(Z, @bitCast(b)) >> significandBits) & maxExponent)); + const quotientSign: Z = (@as(Z, @bitCast(a)) ^ @as(Z, @bitCast(b))) & signBit; - var aSignificand: Z = @bitCast(Z, a) & significandMask; - var bSignificand: Z = @bitCast(Z, b) & significandMask; + var aSignificand: Z = @as(Z, @bitCast(a)) & significandMask; + var bSignificand: Z = @as(Z, @bitCast(b)) & significandMask; var scale: i32 = 0; // Detect if a or b is zero, denormal, infinity, or NaN. if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) { - const aAbs: Z = @bitCast(Z, a) & absMask; - const bAbs: Z = @bitCast(Z, b) & absMask; + const aAbs: Z = @as(Z, @bitCast(a)) & absMask; + const bAbs: Z = @as(Z, @bitCast(b)) & absMask; // NaN / anything = qNaN - if (aAbs > infRep) return @bitCast(f32, @bitCast(Z, a) | quietBit); + if (aAbs > infRep) return @as(f32, @bitCast(@as(Z, @bitCast(a)) | quietBit)); // anything / NaN = qNaN - if (bAbs > infRep) return @bitCast(f32, @bitCast(Z, b) | quietBit); + if (bAbs > infRep) return @as(f32, @bitCast(@as(Z, @bitCast(b)) | quietBit)); if (aAbs == infRep) { // infinity / infinity = NaN if (bAbs == infRep) { - return @bitCast(f32, qnanRep); + return @as(f32, @bitCast(qnanRep)); } // infinity / anything else = +/- infinity else { - return @bitCast(f32, aAbs | quotientSign); + return @as(f32, @bitCast(aAbs | quotientSign)); } } // anything else / infinity = +/- 0 - if (bAbs == infRep) return @bitCast(f32, quotientSign); + if (bAbs == infRep) return @as(f32, @bitCast(quotientSign)); if (aAbs == 0) { // zero / zero = NaN if (bAbs == 0) { - return @bitCast(f32, qnanRep); + return @as(f32, @bitCast(qnanRep)); } // zero / anything else = +/- zero else { - return @bitCast(f32, quotientSign); + return @as(f32, @bitCast(quotientSign)); } } // anything else / zero = +/- infinity - if (bAbs == 0) return @bitCast(f32, infRep | quotientSign); + if (bAbs == 0) return @as(f32, @bitCast(infRep | quotientSign)); // one or both of a or b is denormal, the other (if applicable) is a // normal number. Renormalize one or both of a and b, and set scale to @@ -103,7 +103,7 @@ inline fn div(a: f32, b: f32) f32 { // won't hurt anything.) aSignificand |= implicitBit; bSignificand |= implicitBit; - var quotientExponent: i32 = @bitCast(i32, aExponent -% bExponent) +% scale; + var quotientExponent: i32 = @as(i32, @bitCast(aExponent -% bExponent)) +% scale; // Align the significand of b as a Q31 fixed-point number in the range // [1, 2.0) and get a Q32 approximate reciprocal using a small minimax @@ -120,12 +120,12 @@ inline fn div(a: f32, b: f32) f32 { // with each iteration, so after three iterations, we have about 28 binary // digits of accuracy. var correction: u32 = undefined; - correction = @truncate(u32, ~(@as(u64, reciprocal) *% q31b >> 32) +% 1); - reciprocal = @truncate(u32, @as(u64, reciprocal) *% correction >> 31); - correction = @truncate(u32, ~(@as(u64, reciprocal) *% q31b >> 32) +% 1); - reciprocal = @truncate(u32, @as(u64, reciprocal) *% correction >> 31); - correction = @truncate(u32, ~(@as(u64, reciprocal) *% q31b >> 32) +% 1); - reciprocal = @truncate(u32, @as(u64, reciprocal) *% correction >> 31); + correction = @as(u32, @truncate(~(@as(u64, reciprocal) *% q31b >> 32) +% 1)); + reciprocal = @as(u32, @truncate(@as(u64, reciprocal) *% correction >> 31)); + correction = @as(u32, @truncate(~(@as(u64, reciprocal) *% q31b >> 32) +% 1)); + reciprocal = @as(u32, @truncate(@as(u64, reciprocal) *% correction >> 31)); + correction = @as(u32, @truncate(~(@as(u64, reciprocal) *% q31b >> 32) +% 1)); + reciprocal = @as(u32, @truncate(@as(u64, reciprocal) *% correction >> 31)); // Exhaustive testing shows that the error in reciprocal after three steps // is in the interval [-0x1.f58108p-31, 0x1.d0e48cp-29], in line with our @@ -147,7 +147,7 @@ inline fn div(a: f32, b: f32) f32 { // is the error in the reciprocal of b scaled by the maximum // possible value of a. As a consequence of this error bound, // either q or nextafter(q) is the correctly rounded - var quotient: Z = @truncate(u32, @as(u64, reciprocal) *% (aSignificand << 1) >> 32); + var quotient: Z = @as(u32, @truncate(@as(u64, reciprocal) *% (aSignificand << 1) >> 32)); // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0). // In either case, we are going to compute a residual of the form @@ -175,7 +175,7 @@ inline fn div(a: f32, b: f32) f32 { if (writtenExponent >= maxExponent) { // If we have overflowed the exponent, return infinity. - return @bitCast(f32, infRep | quotientSign); + return @as(f32, @bitCast(infRep | quotientSign)); } else if (writtenExponent < 1) { if (writtenExponent == 0) { // Check whether the rounded result is normal. @@ -186,22 +186,22 @@ inline fn div(a: f32, b: f32) f32 { absResult += round; if ((absResult & ~significandMask) > 0) { // The rounded result is normal; return it. - return @bitCast(f32, absResult | quotientSign); + return @as(f32, @bitCast(absResult | quotientSign)); } } // Flush denormals to zero. In the future, it would be nice to add // code to round them correctly. - return @bitCast(f32, quotientSign); + return @as(f32, @bitCast(quotientSign)); } else { const round = @intFromBool((residual << 1) > bSignificand); // Clear the implicit bit var absResult = quotient & significandMask; // Insert the exponent - absResult |= @bitCast(Z, writtenExponent) << significandBits; + absResult |= @as(Z, @bitCast(writtenExponent)) << significandBits; // Round absResult +%= round; // Insert the sign and return - return @bitCast(f32, absResult | quotientSign); + return @as(f32, @bitCast(absResult | quotientSign)); } } diff --git a/lib/compiler_rt/divsf3_test.zig b/lib/compiler_rt/divsf3_test.zig index 0c06d4c15a..ff562fe54e 100644 --- a/lib/compiler_rt/divsf3_test.zig +++ b/lib/compiler_rt/divsf3_test.zig @@ -6,7 +6,7 @@ const __divsf3 = @import("divsf3.zig").__divsf3; const testing = @import("std").testing; fn compareResultF(result: f32, expected: u32) bool { - const rep = @bitCast(u32, result); + const rep = @as(u32, @bitCast(result)); if (rep == expected) { return true; diff --git a/lib/compiler_rt/divtf3.zig b/lib/compiler_rt/divtf3.zig index 86a2f30cc8..b979cfce96 100644 --- a/lib/compiler_rt/divtf3.zig +++ b/lib/compiler_rt/divtf3.zig @@ -41,52 +41,52 @@ inline fn div(a: f128, b: f128) f128 { const absMask = signBit - 1; const exponentMask = absMask ^ significandMask; const qnanRep = exponentMask | quietBit; - const infRep = @bitCast(Z, std.math.inf(f128)); + const infRep = @as(Z, @bitCast(std.math.inf(f128))); - const aExponent = @truncate(u32, (@bitCast(Z, a) >> significandBits) & maxExponent); - const bExponent = @truncate(u32, (@bitCast(Z, b) >> significandBits) & maxExponent); - const quotientSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit; + const aExponent = @as(u32, @truncate((@as(Z, @bitCast(a)) >> significandBits) & maxExponent)); + const bExponent = @as(u32, @truncate((@as(Z, @bitCast(b)) >> significandBits) & maxExponent)); + const quotientSign: Z = (@as(Z, @bitCast(a)) ^ @as(Z, @bitCast(b))) & signBit; - var aSignificand: Z = @bitCast(Z, a) & significandMask; - var bSignificand: Z = @bitCast(Z, b) & significandMask; + var aSignificand: Z = @as(Z, @bitCast(a)) & significandMask; + var bSignificand: Z = @as(Z, @bitCast(b)) & significandMask; var scale: i32 = 0; // Detect if a or b is zero, denormal, infinity, or NaN. if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) { - const aAbs: Z = @bitCast(Z, a) & absMask; - const bAbs: Z = @bitCast(Z, b) & absMask; + const aAbs: Z = @as(Z, @bitCast(a)) & absMask; + const bAbs: Z = @as(Z, @bitCast(b)) & absMask; // NaN / anything = qNaN - if (aAbs > infRep) return @bitCast(f128, @bitCast(Z, a) | quietBit); + if (aAbs > infRep) return @as(f128, @bitCast(@as(Z, @bitCast(a)) | quietBit)); // anything / NaN = qNaN - if (bAbs > infRep) return @bitCast(f128, @bitCast(Z, b) | quietBit); + if (bAbs > infRep) return @as(f128, @bitCast(@as(Z, @bitCast(b)) | quietBit)); if (aAbs == infRep) { // infinity / infinity = NaN if (bAbs == infRep) { - return @bitCast(f128, qnanRep); + return @as(f128, @bitCast(qnanRep)); } // infinity / anything else = +/- infinity else { - return @bitCast(f128, aAbs | quotientSign); + return @as(f128, @bitCast(aAbs | quotientSign)); } } // anything else / infinity = +/- 0 - if (bAbs == infRep) return @bitCast(f128, quotientSign); + if (bAbs == infRep) return @as(f128, @bitCast(quotientSign)); if (aAbs == 0) { // zero / zero = NaN if (bAbs == 0) { - return @bitCast(f128, qnanRep); + return @as(f128, @bitCast(qnanRep)); } // zero / anything else = +/- zero else { - return @bitCast(f128, quotientSign); + return @as(f128, @bitCast(quotientSign)); } } // anything else / zero = +/- infinity - if (bAbs == 0) return @bitCast(f128, infRep | quotientSign); + if (bAbs == 0) return @as(f128, @bitCast(infRep | quotientSign)); // one or both of a or b is denormal, the other (if applicable) is a // normal number. Renormalize one or both of a and b, and set scale to @@ -100,13 +100,13 @@ inline fn div(a: f128, b: f128) f128 { // won't hurt anything. aSignificand |= implicitBit; bSignificand |= implicitBit; - var quotientExponent: i32 = @bitCast(i32, aExponent -% bExponent) +% scale; + var quotientExponent: i32 = @as(i32, @bitCast(aExponent -% bExponent)) +% scale; // Align the significand of b as a Q63 fixed-point number in the range // [1, 2.0) and get a Q64 approximate reciprocal using a small minimax // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This // is accurate to about 3.5 binary digits. - const q63b = @truncate(u64, bSignificand >> 49); + const q63b = @as(u64, @truncate(bSignificand >> 49)); var recip64 = @as(u64, 0x7504f333F9DE6484) -% q63b; // 0x7504f333F9DE6484 / 2^64 + 1 = 3/4 + 1/sqrt(2) @@ -117,16 +117,16 @@ inline fn div(a: f128, b: f128) f128 { // This doubles the number of correct binary digits in the approximation // with each iteration. var correction64: u64 = undefined; - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); // The reciprocal may have overflowed to zero if the upper half of b is // exactly 1.0. This would sabatoge the full-width final stage of the @@ -135,7 +135,7 @@ inline fn div(a: f128, b: f128) f128 { // We need to perform one more iteration to get us to 112 binary digits; // The last iteration needs to happen with extra precision. - const q127blo: u64 = @truncate(u64, bSignificand << 15); + const q127blo: u64 = @as(u64, @truncate(bSignificand << 15)); var correction: u128 = undefined; var reciprocal: u128 = undefined; @@ -151,8 +151,8 @@ inline fn div(a: f128, b: f128) f128 { correction = -%(r64q63 + (r64q127 >> 64)); - const cHi = @truncate(u64, correction >> 64); - const cLo = @truncate(u64, correction); + const cHi = @as(u64, @truncate(correction >> 64)); + const cLo = @as(u64, @truncate(correction)); wideMultiply(u128, recip64, cHi, &dummy, &r64cH); wideMultiply(u128, recip64, cLo, &dummy, &r64cL); @@ -210,7 +210,7 @@ inline fn div(a: f128, b: f128) f128 { if (writtenExponent >= maxExponent) { // If we have overflowed the exponent, return infinity. - return @bitCast(f128, infRep | quotientSign); + return @as(f128, @bitCast(infRep | quotientSign)); } else if (writtenExponent < 1) { if (writtenExponent == 0) { // Check whether the rounded result is normal. @@ -221,22 +221,22 @@ inline fn div(a: f128, b: f128) f128 { absResult += round; if ((absResult & ~significandMask) > 0) { // The rounded result is normal; return it. - return @bitCast(f128, absResult | quotientSign); + return @as(f128, @bitCast(absResult | quotientSign)); } } // Flush denormals to zero. In the future, it would be nice to add // code to round them correctly. - return @bitCast(f128, quotientSign); + return @as(f128, @bitCast(quotientSign)); } else { const round = @intFromBool((residual << 1) >= bSignificand); // Clear the implicit bit var absResult = quotient & significandMask; // Insert the exponent - absResult |= @intCast(Z, writtenExponent) << significandBits; + absResult |= @as(Z, @intCast(writtenExponent)) << significandBits; // Round absResult +%= round; // Insert the sign and return - return @bitCast(f128, absResult | quotientSign); + return @as(f128, @bitCast(absResult | quotientSign)); } } diff --git a/lib/compiler_rt/divtf3_test.zig b/lib/compiler_rt/divtf3_test.zig index 62204057d4..43413a9181 100644 --- a/lib/compiler_rt/divtf3_test.zig +++ b/lib/compiler_rt/divtf3_test.zig @@ -5,9 +5,9 @@ const testing = std.testing; const __divtf3 = @import("divtf3.zig").__divtf3; fn compareResultLD(result: f128, expectedHi: u64, expectedLo: u64) bool { - const rep = @bitCast(u128, result); - const hi = @truncate(u64, rep >> 64); - const lo = @truncate(u64, rep); + const rep = @as(u128, @bitCast(result)); + const hi = @as(u64, @truncate(rep >> 64)); + const lo = @as(u64, @truncate(rep)); if (hi == expectedHi and lo == expectedLo) { return true; diff --git a/lib/compiler_rt/divti3.zig b/lib/compiler_rt/divti3.zig index 31302aab4d..43bb951ab9 100644 --- a/lib/compiler_rt/divti3.zig +++ b/lib/compiler_rt/divti3.zig @@ -21,7 +21,7 @@ pub fn __divti3(a: i128, b: i128) callconv(.C) i128 { const v128 = @Vector(2, u64); fn __divti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 { - return @bitCast(v128, div(@bitCast(i128, a), @bitCast(i128, b))); + return @as(v128, @bitCast(div(@as(i128, @bitCast(a)), @as(i128, @bitCast(b))))); } inline fn div(a: i128, b: i128) i128 { @@ -31,9 +31,9 @@ inline fn div(a: i128, b: i128) i128 { const an = (a ^ s_a) -% s_a; const bn = (b ^ s_b) -% s_b; - const r = udivmod(u128, @bitCast(u128, an), @bitCast(u128, bn), null); + const r = udivmod(u128, @as(u128, @bitCast(an)), @as(u128, @bitCast(bn)), null); const s = s_a ^ s_b; - return (@bitCast(i128, r) ^ s) -% s; + return (@as(i128, @bitCast(r)) ^ s) -% s; } test { diff --git a/lib/compiler_rt/divti3_test.zig b/lib/compiler_rt/divti3_test.zig index 7992e4312f..bcf45da3f2 100644 --- a/lib/compiler_rt/divti3_test.zig +++ b/lib/compiler_rt/divti3_test.zig @@ -14,8 +14,8 @@ test "divti3" { try test__divti3(-2, 1, -2); try test__divti3(-2, -1, 2); - try test__divti3(@bitCast(i128, @as(u128, 0x8 << 124)), 1, @bitCast(i128, @as(u128, 0x8 << 124))); - try test__divti3(@bitCast(i128, @as(u128, 0x8 << 124)), -1, @bitCast(i128, @as(u128, 0x8 << 124))); - try test__divti3(@bitCast(i128, @as(u128, 0x8 << 124)), -2, @bitCast(i128, @as(u128, 0x4 << 124))); - try test__divti3(@bitCast(i128, @as(u128, 0x8 << 124)), 2, @bitCast(i128, @as(u128, 0xc << 124))); + try test__divti3(@as(i128, @bitCast(@as(u128, 0x8 << 124))), 1, @as(i128, @bitCast(@as(u128, 0x8 << 124)))); + try test__divti3(@as(i128, @bitCast(@as(u128, 0x8 << 124))), -1, @as(i128, @bitCast(@as(u128, 0x8 << 124)))); + try test__divti3(@as(i128, @bitCast(@as(u128, 0x8 << 124))), -2, @as(i128, @bitCast(@as(u128, 0x4 << 124)))); + try test__divti3(@as(i128, @bitCast(@as(u128, 0x8 << 124))), 2, @as(i128, @bitCast(@as(u128, 0xc << 124)))); } diff --git a/lib/compiler_rt/divxf3.zig b/lib/compiler_rt/divxf3.zig index f0e93fa3be..d8e8a0c76d 100644 --- a/lib/compiler_rt/divxf3.zig +++ b/lib/compiler_rt/divxf3.zig @@ -29,53 +29,53 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 { const significandMask = (@as(Z, 1) << significandBits) - 1; const absMask = signBit - 1; - const qnanRep = @bitCast(Z, std.math.nan(T)) | quietBit; - const infRep = @bitCast(Z, std.math.inf(T)); + const qnanRep = @as(Z, @bitCast(std.math.nan(T))) | quietBit; + const infRep = @as(Z, @bitCast(std.math.inf(T))); - const aExponent = @truncate(u32, (@bitCast(Z, a) >> significandBits) & maxExponent); - const bExponent = @truncate(u32, (@bitCast(Z, b) >> significandBits) & maxExponent); - const quotientSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit; + const aExponent = @as(u32, @truncate((@as(Z, @bitCast(a)) >> significandBits) & maxExponent)); + const bExponent = @as(u32, @truncate((@as(Z, @bitCast(b)) >> significandBits) & maxExponent)); + const quotientSign: Z = (@as(Z, @bitCast(a)) ^ @as(Z, @bitCast(b))) & signBit; - var aSignificand: Z = @bitCast(Z, a) & significandMask; - var bSignificand: Z = @bitCast(Z, b) & significandMask; + var aSignificand: Z = @as(Z, @bitCast(a)) & significandMask; + var bSignificand: Z = @as(Z, @bitCast(b)) & significandMask; var scale: i32 = 0; // Detect if a or b is zero, denormal, infinity, or NaN. if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) { - const aAbs: Z = @bitCast(Z, a) & absMask; - const bAbs: Z = @bitCast(Z, b) & absMask; + const aAbs: Z = @as(Z, @bitCast(a)) & absMask; + const bAbs: Z = @as(Z, @bitCast(b)) & absMask; // NaN / anything = qNaN - if (aAbs > infRep) return @bitCast(T, @bitCast(Z, a) | quietBit); + if (aAbs > infRep) return @as(T, @bitCast(@as(Z, @bitCast(a)) | quietBit)); // anything / NaN = qNaN - if (bAbs > infRep) return @bitCast(T, @bitCast(Z, b) | quietBit); + if (bAbs > infRep) return @as(T, @bitCast(@as(Z, @bitCast(b)) | quietBit)); if (aAbs == infRep) { // infinity / infinity = NaN if (bAbs == infRep) { - return @bitCast(T, qnanRep); + return @as(T, @bitCast(qnanRep)); } // infinity / anything else = +/- infinity else { - return @bitCast(T, aAbs | quotientSign); + return @as(T, @bitCast(aAbs | quotientSign)); } } // anything else / infinity = +/- 0 - if (bAbs == infRep) return @bitCast(T, quotientSign); + if (bAbs == infRep) return @as(T, @bitCast(quotientSign)); if (aAbs == 0) { // zero / zero = NaN if (bAbs == 0) { - return @bitCast(T, qnanRep); + return @as(T, @bitCast(qnanRep)); } // zero / anything else = +/- zero else { - return @bitCast(T, quotientSign); + return @as(T, @bitCast(quotientSign)); } } // anything else / zero = +/- infinity - if (bAbs == 0) return @bitCast(T, infRep | quotientSign); + if (bAbs == 0) return @as(T, @bitCast(infRep | quotientSign)); // one or both of a or b is denormal, the other (if applicable) is a // normal number. Renormalize one or both of a and b, and set scale to @@ -83,13 +83,13 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 { if (aAbs < integerBit) scale +%= normalize(T, &aSignificand); if (bAbs < integerBit) scale -%= normalize(T, &bSignificand); } - var quotientExponent: i32 = @bitCast(i32, aExponent -% bExponent) +% scale; + var quotientExponent: i32 = @as(i32, @bitCast(aExponent -% bExponent)) +% scale; // Align the significand of b as a Q63 fixed-point number in the range // [1, 2.0) and get a Q64 approximate reciprocal using a small minimax // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This // is accurate to about 3.5 binary digits. - const q63b = @intCast(u64, bSignificand); + const q63b = @as(u64, @intCast(bSignificand)); var recip64 = @as(u64, 0x7504f333F9DE6484) -% q63b; // 0x7504f333F9DE6484 / 2^64 + 1 = 3/4 + 1/sqrt(2) @@ -100,16 +100,16 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 { // This doubles the number of correct binary digits in the approximation // with each iteration. var correction64: u64 = undefined; - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); // The reciprocal may have overflowed to zero if the upper half of b is // exactly 1.0. This would sabatoge the full-width final stage of the @@ -128,8 +128,8 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 { correction = -%correction; - const cHi = @truncate(u64, correction >> 64); - const cLo = @truncate(u64, correction); + const cHi = @as(u64, @truncate(correction >> 64)); + const cLo = @as(u64, @truncate(correction)); var r64cH: u128 = undefined; var r64cL: u128 = undefined; @@ -164,8 +164,8 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 { // exponent accordingly. var quotient: u64 = if (quotient128 < (integerBit << 1)) b: { quotientExponent -= 1; - break :b @intCast(u64, quotient128); - } else @intCast(u64, quotient128 >> 1); + break :b @as(u64, @intCast(quotient128)); + } else @as(u64, @intCast(quotient128 >> 1)); // We are going to compute a residual of the form // @@ -182,26 +182,26 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 { const writtenExponent = quotientExponent + exponentBias; if (writtenExponent >= maxExponent) { // If we have overflowed the exponent, return infinity. - return @bitCast(T, infRep | quotientSign); + return @as(T, @bitCast(infRep | quotientSign)); } else if (writtenExponent < 1) { if (writtenExponent == 0) { // Check whether the rounded result is normal. if (residual > (bSignificand >> 1)) { // round if (quotient == (integerBit - 1)) // If the rounded result is normal, return it - return @bitCast(T, @bitCast(Z, std.math.floatMin(T)) | quotientSign); + return @as(T, @bitCast(@as(Z, @bitCast(std.math.floatMin(T))) | quotientSign)); } } // Flush denormals to zero. In the future, it would be nice to add // code to round them correctly. - return @bitCast(T, quotientSign); + return @as(T, @bitCast(quotientSign)); } else { const round = @intFromBool(residual > (bSignificand >> 1)); // Insert the exponent - var absResult = quotient | (@intCast(Z, writtenExponent) << significandBits); + var absResult = quotient | (@as(Z, @intCast(writtenExponent)) << significandBits); // Round absResult +%= round; // Insert the sign and return - return @bitCast(T, absResult | quotientSign | integerBit); + return @as(T, @bitCast(absResult | quotientSign | integerBit)); } } diff --git a/lib/compiler_rt/divxf3_test.zig b/lib/compiler_rt/divxf3_test.zig index 0ed2b74217..ff1cef089b 100644 --- a/lib/compiler_rt/divxf3_test.zig +++ b/lib/compiler_rt/divxf3_test.zig @@ -5,11 +5,11 @@ const testing = std.testing; const __divxf3 = @import("divxf3.zig").__divxf3; fn compareResult(result: f80, expected: u80) bool { - const rep = @bitCast(u80, result); + const rep = @as(u80, @bitCast(result)); if (rep == expected) return true; // test other possible NaN representations (signal NaN) - if (math.isNan(result) and math.isNan(@bitCast(f80, expected))) return true; + if (math.isNan(result) and math.isNan(@as(f80, @bitCast(expected)))) return true; return false; } @@ -25,9 +25,9 @@ fn test__divxf3(a: f80, b: f80) !void { const x = __divxf3(a, b); // Next float (assuming normal, non-zero result) - const x_plus_eps = @bitCast(f80, (@bitCast(u80, x) + 1) | integerBit); + const x_plus_eps = @as(f80, @bitCast((@as(u80, @bitCast(x)) + 1) | integerBit)); // Prev float (assuming normal, non-zero result) - const x_minus_eps = @bitCast(f80, (@bitCast(u80, x) - 1) | integerBit); + const x_minus_eps = @as(f80, @bitCast((@as(u80, @bitCast(x)) - 1) | integerBit)); // Make sure result is more accurate than the adjacent floats const err_x = @fabs(@mulAdd(f80, x, b, -a)); diff --git a/lib/compiler_rt/emutls.zig b/lib/compiler_rt/emutls.zig index 47c71efadd..70eb479070 100644 --- a/lib/compiler_rt/emutls.zig +++ b/lib/compiler_rt/emutls.zig @@ -33,18 +33,14 @@ pub fn __emutls_get_address(control: *emutls_control) callconv(.C) *anyopaque { const simple_allocator = struct { /// Allocate a memory chunk for requested type. Return a pointer on the data. pub fn alloc(comptime T: type) *T { - return @ptrCast(*T, @alignCast( - @alignOf(T), - advancedAlloc(@alignOf(T), @sizeOf(T)), - )); + return @ptrCast(@alignCast(advancedAlloc(@alignOf(T), @sizeOf(T)))); } /// Allocate a slice of T, with len elements. pub fn allocSlice(comptime T: type, len: usize) []T { - return @ptrCast([*]T, @alignCast( - @alignOf(T), + return @as([*]T, @ptrCast(@alignCast( advancedAlloc(@alignOf(T), @sizeOf(T) * len), - ))[0 .. len - 1]; + )))[0 .. len - 1]; } /// Allocate a memory chunk. @@ -56,22 +52,19 @@ const simple_allocator = struct { abort(); } - return @ptrCast([*]u8, aligned_ptr); + return @as([*]u8, @ptrCast(aligned_ptr)); } /// Resize a slice. pub fn reallocSlice(comptime T: type, slice: []T, len: usize) []T { - var c_ptr: *anyopaque = @ptrCast(*anyopaque, slice.ptr); - var new_array: [*]T = @ptrCast([*]T, @alignCast( - @alignOf(T), - std.c.realloc(c_ptr, @sizeOf(T) * len) orelse abort(), - )); + var c_ptr: *anyopaque = @as(*anyopaque, @ptrCast(slice.ptr)); + var new_array: [*]T = @ptrCast(@alignCast(std.c.realloc(c_ptr, @sizeOf(T) * len) orelse abort())); return new_array[0..len]; } /// Free a memory chunk allocated with simple_allocator. pub fn free(ptr: anytype) void { - std.c.free(@ptrCast(*anyopaque, ptr)); + std.c.free(@as(*anyopaque, @ptrCast(ptr))); } }; @@ -132,20 +125,20 @@ const ObjectArray = struct { if (self.slots[index] == null) { // initialize the slot const size = control.size; - const alignment = @truncate(u29, control.alignment); + const alignment = @as(u29, @truncate(control.alignment)); var data = simple_allocator.advancedAlloc(alignment, size); errdefer simple_allocator.free(data); if (control.default_value) |value| { // default value: copy the content to newly allocated object. - @memcpy(data[0..size], @ptrCast([*]const u8, value)); + @memcpy(data[0..size], @as([*]const u8, @ptrCast(value))); } else { // no default: return zeroed memory. @memset(data[0..size], 0); } - self.slots[index] = @ptrCast(*anyopaque, data); + self.slots[index] = @as(*anyopaque, @ptrCast(data)); } return self.slots[index].?; @@ -180,18 +173,12 @@ const current_thread_storage = struct { /// Return casted thread specific value. fn getspecific() ?*ObjectArray { - return @ptrCast( - ?*ObjectArray, - @alignCast( - @alignOf(ObjectArray), - std.c.pthread_getspecific(current_thread_storage.key), - ), - ); + return @ptrCast(@alignCast(std.c.pthread_getspecific(current_thread_storage.key))); } /// Set casted thread specific value. fn setspecific(new: ?*ObjectArray) void { - if (std.c.pthread_setspecific(current_thread_storage.key, @ptrCast(*anyopaque, new)) != 0) { + if (std.c.pthread_setspecific(current_thread_storage.key, @as(*anyopaque, @ptrCast(new))) != 0) { abort(); } } @@ -205,10 +192,7 @@ const current_thread_storage = struct { /// Invoked by pthread specific destructor. the passed argument is the ObjectArray pointer. fn deinit(arrayPtr: *anyopaque) callconv(.C) void { - var array = @ptrCast( - *ObjectArray, - @alignCast(@alignOf(ObjectArray), arrayPtr), - ); + var array: *ObjectArray = @ptrCast(@alignCast(arrayPtr)); array.deinit(); } }; @@ -294,7 +278,7 @@ const emutls_control = extern struct { .size = @sizeOf(T), .alignment = @alignOf(T), .object = .{ .index = 0 }, - .default_value = @ptrCast(?*const anyopaque, default_value), + .default_value = @as(?*const anyopaque, @ptrCast(default_value)), }; } @@ -313,10 +297,7 @@ const emutls_control = extern struct { pub fn get_typed_pointer(self: *emutls_control, comptime T: type) *T { assert(self.size == @sizeOf(T)); assert(self.alignment == @alignOf(T)); - return @ptrCast( - *T, - @alignCast(@alignOf(T), self.getPointer()), - ); + return @ptrCast(@alignCast(self.getPointer())); } }; @@ -343,7 +324,7 @@ test "__emutls_get_address zeroed" { try expect(ctl.object.index == 0); // retrieve a variable from ctl - var x = @ptrCast(*usize, @alignCast(@alignOf(usize), __emutls_get_address(&ctl))); + var x: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl))); try expect(ctl.object.index != 0); // index has been allocated for this ctl try expect(x.* == 0); // storage has been zeroed @@ -351,7 +332,7 @@ test "__emutls_get_address zeroed" { x.* = 1234; // retrieve a variable from ctl (same ctl) - var y = @ptrCast(*usize, @alignCast(@alignOf(usize), __emutls_get_address(&ctl))); + var y: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl))); try expect(y.* == 1234); // same content that x.* try expect(x == y); // same pointer @@ -364,7 +345,7 @@ test "__emutls_get_address with default_value" { var ctl = emutls_control.init(usize, &value); try expect(ctl.object.index == 0); - var x: *usize = @ptrCast(*usize, @alignCast(@alignOf(usize), __emutls_get_address(&ctl))); + var x: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl))); try expect(ctl.object.index != 0); try expect(x.* == 5678); // storage initialized with default value @@ -373,7 +354,7 @@ test "__emutls_get_address with default_value" { try expect(value == 5678); // the default value didn't change - var y = @ptrCast(*usize, @alignCast(@alignOf(usize), __emutls_get_address(&ctl))); + var y: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl))); try expect(y.* == 9012); // the modified storage persists } diff --git a/lib/compiler_rt/exp.zig b/lib/compiler_rt/exp.zig index 32a1a84ff9..337376f7fe 100644 --- a/lib/compiler_rt/exp.zig +++ b/lib/compiler_rt/exp.zig @@ -27,7 +27,7 @@ comptime { pub fn __exph(a: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, expf(a)); + return @as(f16, @floatCast(expf(a))); } pub fn expf(x_: f32) callconv(.C) f32 { @@ -39,8 +39,8 @@ pub fn expf(x_: f32) callconv(.C) f32 { const P2 = -2.7667332906e-3; var x = x_; - var hx = @bitCast(u32, x); - const sign = @intCast(i32, hx >> 31); + var hx = @as(u32, @bitCast(x)); + const sign = @as(i32, @intCast(hx >> 31)); hx &= 0x7FFFFFFF; if (math.isNan(x)) { @@ -74,12 +74,12 @@ pub fn expf(x_: f32) callconv(.C) f32 { if (hx > 0x3EB17218) { // |x| > 1.5 * ln2 if (hx > 0x3F851592) { - k = @intFromFloat(i32, invln2 * x + half[@intCast(usize, sign)]); + k = @as(i32, @intFromFloat(invln2 * x + half[@as(usize, @intCast(sign))])); } else { k = 1 - sign - sign; } - const fk = @floatFromInt(f32, k); + const fk = @as(f32, @floatFromInt(k)); hi = x - fk * ln2hi; lo = fk * ln2lo; x = hi - lo; @@ -117,9 +117,9 @@ pub fn exp(x_: f64) callconv(.C) f64 { const P5: f64 = 4.13813679705723846039e-08; var x = x_; - var ux = @bitCast(u64, x); + var ux = @as(u64, @bitCast(x)); var hx = ux >> 32; - const sign = @intCast(i32, hx >> 31); + const sign = @as(i32, @intCast(hx >> 31)); hx &= 0x7FFFFFFF; if (math.isNan(x)) { @@ -157,12 +157,12 @@ pub fn exp(x_: f64) callconv(.C) f64 { if (hx > 0x3FD62E42) { // |x| >= 1.5 * ln2 if (hx > 0x3FF0A2B2) { - k = @intFromFloat(i32, invln2 * x + half[@intCast(usize, sign)]); + k = @as(i32, @intFromFloat(invln2 * x + half[@as(usize, @intCast(sign))])); } else { k = 1 - sign - sign; } - const dk = @floatFromInt(f64, k); + const dk = @as(f64, @floatFromInt(k)); hi = x - dk * ln2hi; lo = dk * ln2lo; x = hi - lo; @@ -191,12 +191,12 @@ pub fn exp(x_: f64) callconv(.C) f64 { pub fn __expx(a: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, expq(a)); + return @as(f80, @floatCast(expq(a))); } pub fn expq(a: f128) callconv(.C) f128 { // TODO: more correct implementation - return exp(@floatCast(f64, a)); + return exp(@as(f64, @floatCast(a))); } pub fn expl(x: c_longdouble) callconv(.C) c_longdouble { diff --git a/lib/compiler_rt/exp2.zig b/lib/compiler_rt/exp2.zig index 731fd7013d..acfeff0e35 100644 --- a/lib/compiler_rt/exp2.zig +++ b/lib/compiler_rt/exp2.zig @@ -27,18 +27,18 @@ comptime { pub fn __exp2h(x: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, exp2f(x)); + return @as(f16, @floatCast(exp2f(x))); } pub fn exp2f(x: f32) callconv(.C) f32 { - const tblsiz = @intCast(u32, exp2ft.len); - const redux: f32 = 0x1.8p23 / @floatFromInt(f32, tblsiz); + const tblsiz = @as(u32, @intCast(exp2ft.len)); + const redux: f32 = 0x1.8p23 / @as(f32, @floatFromInt(tblsiz)); const P1: f32 = 0x1.62e430p-1; const P2: f32 = 0x1.ebfbe0p-3; const P3: f32 = 0x1.c6b348p-5; const P4: f32 = 0x1.3b2c9cp-7; - var u = @bitCast(u32, x); + var u = @as(u32, @bitCast(x)); const ix = u & 0x7FFFFFFF; // |x| > 126 @@ -72,32 +72,32 @@ pub fn exp2f(x: f32) callconv(.C) f32 { // intended result but should confirm how GCC/Clang handle this to ensure. var uf = x + redux; - var i_0 = @bitCast(u32, uf); + var i_0 = @as(u32, @bitCast(uf)); i_0 +%= tblsiz / 2; const k = i_0 / tblsiz; - const uk = @bitCast(f64, @as(u64, 0x3FF + k) << 52); + const uk = @as(f64, @bitCast(@as(u64, 0x3FF + k) << 52)); i_0 &= tblsiz - 1; uf -= redux; const z: f64 = x - uf; - var r: f64 = exp2ft[@intCast(usize, i_0)]; + var r: f64 = exp2ft[@as(usize, @intCast(i_0))]; const t: f64 = r * z; r = r + t * (P1 + z * P2) + t * (z * z) * (P3 + z * P4); - return @floatCast(f32, r * uk); + return @as(f32, @floatCast(r * uk)); } pub fn exp2(x: f64) callconv(.C) f64 { - const tblsiz: u32 = @intCast(u32, exp2dt.len / 2); - const redux: f64 = 0x1.8p52 / @floatFromInt(f64, tblsiz); + const tblsiz: u32 = @as(u32, @intCast(exp2dt.len / 2)); + const redux: f64 = 0x1.8p52 / @as(f64, @floatFromInt(tblsiz)); const P1: f64 = 0x1.62e42fefa39efp-1; const P2: f64 = 0x1.ebfbdff82c575p-3; const P3: f64 = 0x1.c6b08d704a0a6p-5; const P4: f64 = 0x1.3b2ab88f70400p-7; const P5: f64 = 0x1.5d88003875c74p-10; - const ux = @bitCast(u64, x); - const ix = @intCast(u32, ux >> 32) & 0x7FFFFFFF; + const ux = @as(u64, @bitCast(x)); + const ix = @as(u32, @intCast(ux >> 32)) & 0x7FFFFFFF; // TODO: This should be handled beneath. if (math.isNan(x)) { @@ -119,7 +119,7 @@ pub fn exp2(x: f64) callconv(.C) f64 { if (ux >> 63 != 0) { // underflow if (x <= -1075 or x - 0x1.0p52 + 0x1.0p52 != x) { - math.doNotOptimizeAway(@floatCast(f32, -0x1.0p-149 / x)); + math.doNotOptimizeAway(@as(f32, @floatCast(-0x1.0p-149 / x))); } if (x <= -1075) { return 0; @@ -139,18 +139,18 @@ pub fn exp2(x: f64) callconv(.C) f64 { // reduce x var uf: f64 = x + redux; // NOTE: musl performs an implicit 64-bit to 32-bit u32 truncation here - var i_0: u32 = @truncate(u32, @bitCast(u64, uf)); + var i_0: u32 = @as(u32, @truncate(@as(u64, @bitCast(uf)))); i_0 +%= tblsiz / 2; const k: u32 = i_0 / tblsiz * tblsiz; - const ik: i32 = @divTrunc(@bitCast(i32, k), tblsiz); + const ik: i32 = @divTrunc(@as(i32, @bitCast(k)), tblsiz); i_0 %= tblsiz; uf -= redux; // r = exp2(y) = exp2t[i_0] * p(z - eps[i]) var z: f64 = x - uf; - const t: f64 = exp2dt[@intCast(usize, 2 * i_0)]; - z -= exp2dt[@intCast(usize, 2 * i_0 + 1)]; + const t: f64 = exp2dt[@as(usize, @intCast(2 * i_0))]; + z -= exp2dt[@as(usize, @intCast(2 * i_0 + 1))]; const r: f64 = t + t * z * (P1 + z * (P2 + z * (P3 + z * (P4 + z * P5)))); return math.scalbn(r, ik); @@ -158,12 +158,12 @@ pub fn exp2(x: f64) callconv(.C) f64 { pub fn __exp2x(x: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, exp2q(x)); + return @as(f80, @floatCast(exp2q(x))); } pub fn exp2q(x: f128) callconv(.C) f128 { // TODO: more correct implementation - return exp2(@floatCast(f64, x)); + return exp2(@as(f64, @floatCast(x))); } pub fn exp2l(x: c_longdouble) callconv(.C) c_longdouble { diff --git a/lib/compiler_rt/extenddftf2.zig b/lib/compiler_rt/extenddftf2.zig index e7b2d8ed70..af293b5ea2 100644 --- a/lib/compiler_rt/extenddftf2.zig +++ b/lib/compiler_rt/extenddftf2.zig @@ -13,9 +13,9 @@ comptime { } pub fn __extenddftf2(a: f64) callconv(.C) f128 { - return extendf(f128, f64, @bitCast(u64, a)); + return extendf(f128, f64, @as(u64, @bitCast(a))); } fn _Qp_dtoq(c: *f128, a: f64) callconv(.C) void { - c.* = extendf(f128, f64, @bitCast(u64, a)); + c.* = extendf(f128, f64, @as(u64, @bitCast(a))); } diff --git a/lib/compiler_rt/extenddfxf2.zig b/lib/compiler_rt/extenddfxf2.zig index c9e10d57ec..54232d0b70 100644 --- a/lib/compiler_rt/extenddfxf2.zig +++ b/lib/compiler_rt/extenddfxf2.zig @@ -8,5 +8,5 @@ comptime { } pub fn __extenddfxf2(a: f64) callconv(.C) f80 { - return extend_f80(f64, @bitCast(u64, a)); + return extend_f80(f64, @as(u64, @bitCast(a))); } diff --git a/lib/compiler_rt/extendf.zig b/lib/compiler_rt/extendf.zig index feafbfc893..0d9f295ed0 100644 --- a/lib/compiler_rt/extendf.zig +++ b/lib/compiler_rt/extendf.zig @@ -33,7 +33,7 @@ pub inline fn extendf( const dstMinNormal: dst_rep_t = @as(dst_rep_t, 1) << dstSigBits; // Break a into a sign and representation of the absolute value - const aRep: src_rep_t = @bitCast(src_rep_t, a); + const aRep: src_rep_t = @as(src_rep_t, @bitCast(a)); const aAbs: src_rep_t = aRep & srcAbsMask; const sign: src_rep_t = aRep & srcSignMask; var absResult: dst_rep_t = undefined; @@ -58,10 +58,10 @@ pub inline fn extendf( // the correct adjusted exponent in the destination type. const scale: u32 = @clz(aAbs) - @clz(@as(src_rep_t, srcMinNormal)); - absResult = @as(dst_rep_t, aAbs) << @intCast(DstShift, dstSigBits - srcSigBits + scale); + absResult = @as(dst_rep_t, aAbs) << @as(DstShift, @intCast(dstSigBits - srcSigBits + scale)); absResult ^= dstMinNormal; const resultExponent: u32 = dstExpBias - srcExpBias - scale + 1; - absResult |= @intCast(dst_rep_t, resultExponent) << dstSigBits; + absResult |= @as(dst_rep_t, @intCast(resultExponent)) << dstSigBits; } else { // a is zero. absResult = 0; @@ -69,7 +69,7 @@ pub inline fn extendf( // Apply the signbit to (dst_t)abs(a). const result: dst_rep_t align(@alignOf(dst_t)) = absResult | @as(dst_rep_t, sign) << (dstBits - srcBits); - return @bitCast(dst_t, result); + return @as(dst_t, @bitCast(result)); } pub inline fn extend_f80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) f80 { @@ -104,7 +104,7 @@ pub inline fn extend_f80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeI // a is a normal number. // Extend to the destination type by shifting the significand and // exponent into the proper position and rebiasing the exponent. - dst.exp = @intCast(u16, a_abs >> src_sig_bits); + dst.exp = @as(u16, @intCast(a_abs >> src_sig_bits)); dst.exp += dst_exp_bias - src_exp_bias; dst.fraction = @as(u64, a_abs) << (dst_sig_bits - src_sig_bits); dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers @@ -124,9 +124,9 @@ pub inline fn extend_f80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeI const scale: u16 = @clz(a_abs) - @clz(@as(src_rep_t, src_min_normal)); - dst.fraction = @as(u64, a_abs) << @intCast(u6, dst_sig_bits - src_sig_bits + scale); + dst.fraction = @as(u64, a_abs) << @as(u6, @intCast(dst_sig_bits - src_sig_bits + scale)); dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers - dst.exp = @truncate(u16, a_abs >> @intCast(SrcShift, src_sig_bits - scale)); + dst.exp = @as(u16, @truncate(a_abs >> @as(SrcShift, @intCast(src_sig_bits - scale)))); dst.exp ^= 1; dst.exp |= dst_exp_bias - src_exp_bias - scale + 1; } else { diff --git a/lib/compiler_rt/extendf_test.zig b/lib/compiler_rt/extendf_test.zig index e9192ae525..966a0c14a6 100644 --- a/lib/compiler_rt/extendf_test.zig +++ b/lib/compiler_rt/extendf_test.zig @@ -11,12 +11,12 @@ const F16T = @import("./common.zig").F16T; fn test__extenddfxf2(a: f64, expected: u80) !void { const x = __extenddfxf2(a); - const rep = @bitCast(u80, x); + const rep = @as(u80, @bitCast(x)); if (rep == expected) return; // test other possible NaN representation(signal NaN) - if (math.isNan(@bitCast(f80, expected)) and math.isNan(x)) + if (math.isNan(@as(f80, @bitCast(expected))) and math.isNan(x)) return; @panic("__extenddfxf2 test failure"); @@ -25,9 +25,9 @@ fn test__extenddfxf2(a: f64, expected: u80) !void { fn test__extenddftf2(a: f64, expected_hi: u64, expected_lo: u64) !void { const x = __extenddftf2(a); - const rep = @bitCast(u128, x); - const hi = @intCast(u64, rep >> 64); - const lo = @truncate(u64, rep); + const rep = @as(u128, @bitCast(x)); + const hi = @as(u64, @intCast(rep >> 64)); + const lo = @as(u64, @truncate(rep)); if (hi == expected_hi and lo == expected_lo) return; @@ -45,14 +45,14 @@ fn test__extenddftf2(a: f64, expected_hi: u64, expected_lo: u64) !void { } fn test__extendhfsf2(a: u16, expected: u32) !void { - const x = __extendhfsf2(@bitCast(F16T(f32), a)); - const rep = @bitCast(u32, x); + const x = __extendhfsf2(@as(F16T(f32), @bitCast(a))); + const rep = @as(u32, @bitCast(x)); if (rep == expected) { if (rep & 0x7fffffff > 0x7f800000) { return; // NaN is always unequal. } - if (x == @bitCast(f32, expected)) { + if (x == @as(f32, @bitCast(expected))) { return; } } @@ -63,9 +63,9 @@ fn test__extendhfsf2(a: u16, expected: u32) !void { fn test__extendsftf2(a: f32, expected_hi: u64, expected_lo: u64) !void { const x = __extendsftf2(a); - const rep = @bitCast(u128, x); - const hi = @intCast(u64, rep >> 64); - const lo = @truncate(u64, rep); + const rep = @as(u128, @bitCast(x)); + const hi = @as(u64, @intCast(rep >> 64)); + const lo = @as(u64, @truncate(rep)); if (hi == expected_hi and lo == expected_lo) return; @@ -184,35 +184,35 @@ test "extendsftf2" { } fn makeQNaN64() f64 { - return @bitCast(f64, @as(u64, 0x7ff8000000000000)); + return @as(f64, @bitCast(@as(u64, 0x7ff8000000000000))); } fn makeInf64() f64 { - return @bitCast(f64, @as(u64, 0x7ff0000000000000)); + return @as(f64, @bitCast(@as(u64, 0x7ff0000000000000))); } fn makeNaN64(rand: u64) f64 { - return @bitCast(f64, 0x7ff0000000000000 | (rand & 0xfffffffffffff)); + return @as(f64, @bitCast(0x7ff0000000000000 | (rand & 0xfffffffffffff))); } fn makeQNaN32() f32 { - return @bitCast(f32, @as(u32, 0x7fc00000)); + return @as(f32, @bitCast(@as(u32, 0x7fc00000))); } fn makeNaN32(rand: u32) f32 { - return @bitCast(f32, 0x7f800000 | (rand & 0x7fffff)); + return @as(f32, @bitCast(0x7f800000 | (rand & 0x7fffff))); } fn makeInf32() f32 { - return @bitCast(f32, @as(u32, 0x7f800000)); + return @as(f32, @bitCast(@as(u32, 0x7f800000))); } fn test__extendhftf2(a: u16, expected_hi: u64, expected_lo: u64) !void { - const x = __extendhftf2(@bitCast(F16T(f128), a)); + const x = __extendhftf2(@as(F16T(f128), @bitCast(a))); - const rep = @bitCast(u128, x); - const hi = @intCast(u64, rep >> 64); - const lo = @truncate(u64, rep); + const rep = @as(u128, @bitCast(x)); + const hi = @as(u64, @intCast(rep >> 64)); + const lo = @as(u64, @truncate(rep)); if (hi == expected_hi and lo == expected_lo) return; diff --git a/lib/compiler_rt/extendhfdf2.zig b/lib/compiler_rt/extendhfdf2.zig index 1a95002883..92aa3591ff 100644 --- a/lib/compiler_rt/extendhfdf2.zig +++ b/lib/compiler_rt/extendhfdf2.zig @@ -8,5 +8,5 @@ comptime { } pub fn __extendhfdf2(a: common.F16T(f64)) callconv(.C) f64 { - return extendf(f64, f16, @bitCast(u16, a)); + return extendf(f64, f16, @as(u16, @bitCast(a))); } diff --git a/lib/compiler_rt/extendhfsf2.zig b/lib/compiler_rt/extendhfsf2.zig index 86ef751c35..eb144cc79a 100644 --- a/lib/compiler_rt/extendhfsf2.zig +++ b/lib/compiler_rt/extendhfsf2.zig @@ -13,13 +13,13 @@ comptime { } pub fn __extendhfsf2(a: common.F16T(f32)) callconv(.C) f32 { - return extendf(f32, f16, @bitCast(u16, a)); + return extendf(f32, f16, @as(u16, @bitCast(a))); } fn __gnu_h2f_ieee(a: common.F16T(f32)) callconv(.C) f32 { - return extendf(f32, f16, @bitCast(u16, a)); + return extendf(f32, f16, @as(u16, @bitCast(a))); } fn __aeabi_h2f(a: u16) callconv(.AAPCS) f32 { - return extendf(f32, f16, @bitCast(u16, a)); + return extendf(f32, f16, @as(u16, @bitCast(a))); } diff --git a/lib/compiler_rt/extendhftf2.zig b/lib/compiler_rt/extendhftf2.zig index 6479a0f52f..3e63b94e42 100644 --- a/lib/compiler_rt/extendhftf2.zig +++ b/lib/compiler_rt/extendhftf2.zig @@ -8,5 +8,5 @@ comptime { } pub fn __extendhftf2(a: common.F16T(f128)) callconv(.C) f128 { - return extendf(f128, f16, @bitCast(u16, a)); + return extendf(f128, f16, @as(u16, @bitCast(a))); } diff --git a/lib/compiler_rt/extendhfxf2.zig b/lib/compiler_rt/extendhfxf2.zig index bd68582766..2858641d43 100644 --- a/lib/compiler_rt/extendhfxf2.zig +++ b/lib/compiler_rt/extendhfxf2.zig @@ -8,5 +8,5 @@ comptime { } fn __extendhfxf2(a: common.F16T(f80)) callconv(.C) f80 { - return extend_f80(f16, @bitCast(u16, a)); + return extend_f80(f16, @as(u16, @bitCast(a))); } diff --git a/lib/compiler_rt/extendsfdf2.zig b/lib/compiler_rt/extendsfdf2.zig index 0a7ba8df53..4ab9288818 100644 --- a/lib/compiler_rt/extendsfdf2.zig +++ b/lib/compiler_rt/extendsfdf2.zig @@ -12,9 +12,9 @@ comptime { } fn __extendsfdf2(a: f32) callconv(.C) f64 { - return extendf(f64, f32, @bitCast(u32, a)); + return extendf(f64, f32, @as(u32, @bitCast(a))); } fn __aeabi_f2d(a: f32) callconv(.AAPCS) f64 { - return extendf(f64, f32, @bitCast(u32, a)); + return extendf(f64, f32, @as(u32, @bitCast(a))); } diff --git a/lib/compiler_rt/extendsftf2.zig b/lib/compiler_rt/extendsftf2.zig index a74319745a..3783438771 100644 --- a/lib/compiler_rt/extendsftf2.zig +++ b/lib/compiler_rt/extendsftf2.zig @@ -13,9 +13,9 @@ comptime { } pub fn __extendsftf2(a: f32) callconv(.C) f128 { - return extendf(f128, f32, @bitCast(u32, a)); + return extendf(f128, f32, @as(u32, @bitCast(a))); } fn _Qp_stoq(c: *f128, a: f32) callconv(.C) void { - c.* = extendf(f128, f32, @bitCast(u32, a)); + c.* = extendf(f128, f32, @as(u32, @bitCast(a))); } diff --git a/lib/compiler_rt/extendsfxf2.zig b/lib/compiler_rt/extendsfxf2.zig index 938e65c1bd..f41a921f9e 100644 --- a/lib/compiler_rt/extendsfxf2.zig +++ b/lib/compiler_rt/extendsfxf2.zig @@ -8,5 +8,5 @@ comptime { } fn __extendsfxf2(a: f32) callconv(.C) f80 { - return extend_f80(f32, @bitCast(u32, a)); + return extend_f80(f32, @as(u32, @bitCast(a))); } diff --git a/lib/compiler_rt/extendxftf2.zig b/lib/compiler_rt/extendxftf2.zig index c3243d3018..3ddceb6c63 100644 --- a/lib/compiler_rt/extendxftf2.zig +++ b/lib/compiler_rt/extendxftf2.zig @@ -39,12 +39,12 @@ fn __extendxftf2(a: f80) callconv(.C) f128 { // renormalize the significand and clear the leading bit and integer part, // then insert the correct adjusted exponent in the destination type. const scale: u32 = @clz(a_rep.fraction); - abs_result = @as(u128, a_rep.fraction) << @intCast(u7, dst_sig_bits - src_sig_bits + scale + 1); + abs_result = @as(u128, a_rep.fraction) << @as(u7, @intCast(dst_sig_bits - src_sig_bits + scale + 1)); abs_result ^= dst_min_normal; abs_result |= @as(u128, scale + 1) << dst_sig_bits; } // Apply the signbit to (dst_t)abs(a). const result: u128 align(@alignOf(f128)) = abs_result | @as(u128, sign) << (dst_bits - 16); - return @bitCast(f128, result); + return @as(f128, @bitCast(result)); } diff --git a/lib/compiler_rt/fabs.zig b/lib/compiler_rt/fabs.zig index b38e15e593..a58cb1fb08 100644 --- a/lib/compiler_rt/fabs.zig +++ b/lib/compiler_rt/fabs.zig @@ -51,7 +51,7 @@ pub fn fabsl(x: c_longdouble) callconv(.C) c_longdouble { inline fn generic_fabs(x: anytype) @TypeOf(x) { const T = @TypeOf(x); const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits); - const float_bits = @bitCast(TBits, x); + const float_bits = @as(TBits, @bitCast(x)); const remove_sign = ~@as(TBits, 0) >> 1; - return @bitCast(T, float_bits & remove_sign); + return @as(T, @bitCast(float_bits & remove_sign)); } diff --git a/lib/compiler_rt/ffsdi2_test.zig b/lib/compiler_rt/ffsdi2_test.zig index 26d8a195e5..135052bf39 100644 --- a/lib/compiler_rt/ffsdi2_test.zig +++ b/lib/compiler_rt/ffsdi2_test.zig @@ -2,7 +2,7 @@ const ffs = @import("count0bits.zig"); const testing = @import("std").testing; fn test__ffsdi2(a: u64, expected: i32) !void { - var x = @bitCast(i64, a); + var x = @as(i64, @bitCast(a)); var result = ffs.__ffsdi2(x); try testing.expectEqual(expected, result); } diff --git a/lib/compiler_rt/ffssi2_test.zig b/lib/compiler_rt/ffssi2_test.zig index 884d7e47fc..38435a9e4b 100644 --- a/lib/compiler_rt/ffssi2_test.zig +++ b/lib/compiler_rt/ffssi2_test.zig @@ -2,7 +2,7 @@ const ffs = @import("count0bits.zig"); const testing = @import("std").testing; fn test__ffssi2(a: u32, expected: i32) !void { - var x = @bitCast(i32, a); + var x = @as(i32, @bitCast(a)); var result = ffs.__ffssi2(x); try testing.expectEqual(expected, result); } diff --git a/lib/compiler_rt/ffsti2_test.zig b/lib/compiler_rt/ffsti2_test.zig index ce473b7f4e..a0686b33e4 100644 --- a/lib/compiler_rt/ffsti2_test.zig +++ b/lib/compiler_rt/ffsti2_test.zig @@ -2,7 +2,7 @@ const ffs = @import("count0bits.zig"); const testing = @import("std").testing; fn test__ffsti2(a: u128, expected: i32) !void { - var x = @bitCast(i128, a); + var x = @as(i128, @bitCast(a)); var result = ffs.__ffsti2(x); try testing.expectEqual(expected, result); } diff --git a/lib/compiler_rt/fixdfti.zig b/lib/compiler_rt/fixdfti.zig index c3513f6bec..8ee7ce40c5 100644 --- a/lib/compiler_rt/fixdfti.zig +++ b/lib/compiler_rt/fixdfti.zig @@ -19,5 +19,5 @@ pub fn __fixdfti(a: f64) callconv(.C) i128 { const v2u64 = @Vector(2, u64); fn __fixdfti_windows_x86_64(a: f64) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(i128, a)); + return @as(v2u64, @bitCast(intFromFloat(i128, a))); } diff --git a/lib/compiler_rt/fixhfti.zig b/lib/compiler_rt/fixhfti.zig index d2b288a52d..50fd26a9fa 100644 --- a/lib/compiler_rt/fixhfti.zig +++ b/lib/compiler_rt/fixhfti.zig @@ -19,5 +19,5 @@ pub fn __fixhfti(a: f16) callconv(.C) i128 { const v2u64 = @Vector(2, u64); fn __fixhfti_windows_x86_64(a: f16) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(i128, a)); + return @as(v2u64, @bitCast(intFromFloat(i128, a))); } diff --git a/lib/compiler_rt/fixsfti.zig b/lib/compiler_rt/fixsfti.zig index 033e5be5b8..9110b3ca38 100644 --- a/lib/compiler_rt/fixsfti.zig +++ b/lib/compiler_rt/fixsfti.zig @@ -19,5 +19,5 @@ pub fn __fixsfti(a: f32) callconv(.C) i128 { const v2u64 = @Vector(2, u64); fn __fixsfti_windows_x86_64(a: f32) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(i128, a)); + return @as(v2u64, @bitCast(intFromFloat(i128, a))); } diff --git a/lib/compiler_rt/fixtfti.zig b/lib/compiler_rt/fixtfti.zig index c3f574ed8a..6c7a8170f9 100644 --- a/lib/compiler_rt/fixtfti.zig +++ b/lib/compiler_rt/fixtfti.zig @@ -21,5 +21,5 @@ pub fn __fixtfti(a: f128) callconv(.C) i128 { const v2u64 = @Vector(2, u64); fn __fixtfti_windows_x86_64(a: f128) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(i128, a)); + return @as(v2u64, @bitCast(intFromFloat(i128, a))); } diff --git a/lib/compiler_rt/fixunsdfti.zig b/lib/compiler_rt/fixunsdfti.zig index 67959fb98a..31483d91f9 100644 --- a/lib/compiler_rt/fixunsdfti.zig +++ b/lib/compiler_rt/fixunsdfti.zig @@ -19,5 +19,5 @@ pub fn __fixunsdfti(a: f64) callconv(.C) u128 { const v2u64 = @Vector(2, u64); fn __fixunsdfti_windows_x86_64(a: f64) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(u128, a)); + return @as(v2u64, @bitCast(intFromFloat(u128, a))); } diff --git a/lib/compiler_rt/fixunshfti.zig b/lib/compiler_rt/fixunshfti.zig index 5e767dc36c..97a1541aa3 100644 --- a/lib/compiler_rt/fixunshfti.zig +++ b/lib/compiler_rt/fixunshfti.zig @@ -19,5 +19,5 @@ pub fn __fixunshfti(a: f16) callconv(.C) u128 { const v2u64 = @Vector(2, u64); fn __fixunshfti_windows_x86_64(a: f16) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(u128, a)); + return @as(v2u64, @bitCast(intFromFloat(u128, a))); } diff --git a/lib/compiler_rt/fixunssfti.zig b/lib/compiler_rt/fixunssfti.zig index 947164b369..d99b2bfd38 100644 --- a/lib/compiler_rt/fixunssfti.zig +++ b/lib/compiler_rt/fixunssfti.zig @@ -19,5 +19,5 @@ pub fn __fixunssfti(a: f32) callconv(.C) u128 { const v2u64 = @Vector(2, u64); fn __fixunssfti_windows_x86_64(a: f32) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(u128, a)); + return @as(v2u64, @bitCast(intFromFloat(u128, a))); } diff --git a/lib/compiler_rt/fixunstfti.zig b/lib/compiler_rt/fixunstfti.zig index bf9764b1aa..d796849b68 100644 --- a/lib/compiler_rt/fixunstfti.zig +++ b/lib/compiler_rt/fixunstfti.zig @@ -21,5 +21,5 @@ pub fn __fixunstfti(a: f128) callconv(.C) u128 { const v2u64 = @Vector(2, u64); fn __fixunstfti_windows_x86_64(a: f128) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(u128, a)); + return @as(v2u64, @bitCast(intFromFloat(u128, a))); } diff --git a/lib/compiler_rt/fixunsxfti.zig b/lib/compiler_rt/fixunsxfti.zig index b9ed4d8132..86216aa560 100644 --- a/lib/compiler_rt/fixunsxfti.zig +++ b/lib/compiler_rt/fixunsxfti.zig @@ -19,5 +19,5 @@ pub fn __fixunsxfti(a: f80) callconv(.C) u128 { const v2u64 = @Vector(2, u64); fn __fixunsxfti_windows_x86_64(a: f80) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(u128, a)); + return @as(v2u64, @bitCast(intFromFloat(u128, a))); } diff --git a/lib/compiler_rt/fixxfti.zig b/lib/compiler_rt/fixxfti.zig index c9a32d8ad4..f04c68d239 100644 --- a/lib/compiler_rt/fixxfti.zig +++ b/lib/compiler_rt/fixxfti.zig @@ -19,5 +19,5 @@ pub fn __fixxfti(a: f80) callconv(.C) i128 { const v2u64 = @Vector(2, u64); fn __fixxfti_windows_x86_64(a: f80) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(i128, a)); + return @as(v2u64, @bitCast(intFromFloat(i128, a))); } diff --git a/lib/compiler_rt/float_from_int.zig b/lib/compiler_rt/float_from_int.zig index 8a2c233cba..c9ecba07f8 100644 --- a/lib/compiler_rt/float_from_int.zig +++ b/lib/compiler_rt/float_from_int.zig @@ -25,17 +25,17 @@ pub fn floatFromInt(comptime T: type, x: anytype) T { // Compute significand var exp = int_bits - @clz(abs_val) - 1; if (int_bits <= fractional_bits or exp <= fractional_bits) { - const shift_amt = fractional_bits - @intCast(math.Log2Int(uT), exp); + const shift_amt = fractional_bits - @as(math.Log2Int(uT), @intCast(exp)); // Shift up result to line up with the significand - no rounding required - result = (@intCast(uT, abs_val) << shift_amt); + result = (@as(uT, @intCast(abs_val)) << shift_amt); result ^= implicit_bit; // Remove implicit integer bit } else { - var shift_amt = @intCast(math.Log2Int(Z), exp - fractional_bits); + var shift_amt = @as(math.Log2Int(Z), @intCast(exp - fractional_bits)); const exact_tie: bool = @ctz(abs_val) == shift_amt - 1; // Shift down result and remove implicit integer bit - result = @intCast(uT, (abs_val >> (shift_amt - 1))) ^ (implicit_bit << 1); + result = @as(uT, @intCast((abs_val >> (shift_amt - 1)))) ^ (implicit_bit << 1); // Round result, including round-to-even for exact ties result = ((result + 1) >> 1) & ~@as(uT, @intFromBool(exact_tie)); @@ -43,14 +43,14 @@ pub fn floatFromInt(comptime T: type, x: anytype) T { // Compute exponent if ((int_bits > max_exp) and (exp > max_exp)) // If exponent too large, overflow to infinity - return @bitCast(T, sign_bit | @bitCast(uT, inf)); + return @as(T, @bitCast(sign_bit | @as(uT, @bitCast(inf)))); result += (@as(uT, exp) + exp_bias) << math.floatMantissaBits(T); // If the result included a carry, we need to restore the explicit integer bit if (T == f80) result |= 1 << fractional_bits; - return @bitCast(T, sign_bit | result); + return @as(T, @bitCast(sign_bit | result)); } test { diff --git a/lib/compiler_rt/float_from_int_test.zig b/lib/compiler_rt/float_from_int_test.zig index bbc315c745..734168e3c5 100644 --- a/lib/compiler_rt/float_from_int_test.zig +++ b/lib/compiler_rt/float_from_int_test.zig @@ -30,12 +30,12 @@ const __floatuntitf = @import("floatuntitf.zig").__floatuntitf; fn test__floatsisf(a: i32, expected: u32) !void { const r = __floatsisf(a); - try std.testing.expect(@bitCast(u32, r) == expected); + try std.testing.expect(@as(u32, @bitCast(r)) == expected); } fn test_one_floatunsisf(a: u32, expected: u32) !void { const r = __floatunsisf(a); - try std.testing.expect(@bitCast(u32, r) == expected); + try std.testing.expect(@as(u32, @bitCast(r)) == expected); } test "floatsisf" { @@ -43,7 +43,7 @@ test "floatsisf" { try test__floatsisf(1, 0x3f800000); try test__floatsisf(-1, 0xbf800000); try test__floatsisf(0x7FFFFFFF, 0x4f000000); - try test__floatsisf(@bitCast(i32, @intCast(u32, 0x80000000)), 0xcf000000); + try test__floatsisf(@as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), 0xcf000000); } test "floatunsisf" { @@ -72,10 +72,10 @@ test "floatdisf" { try test__floatdisf(-2, -2.0); try test__floatdisf(0x7FFFFF8000000000, 0x1.FFFFFEp+62); try test__floatdisf(0x7FFFFF0000000000, 0x1.FFFFFCp+62); - try test__floatdisf(@bitCast(i64, @as(u64, 0x8000008000000000)), -0x1.FFFFFEp+62); - try test__floatdisf(@bitCast(i64, @as(u64, 0x8000010000000000)), -0x1.FFFFFCp+62); - try test__floatdisf(@bitCast(i64, @as(u64, 0x8000000000000000)), -0x1.000000p+63); - try test__floatdisf(@bitCast(i64, @as(u64, 0x8000000000000001)), -0x1.000000p+63); + try test__floatdisf(@as(i64, @bitCast(@as(u64, 0x8000008000000000))), -0x1.FFFFFEp+62); + try test__floatdisf(@as(i64, @bitCast(@as(u64, 0x8000010000000000))), -0x1.FFFFFCp+62); + try test__floatdisf(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), -0x1.000000p+63); + try test__floatdisf(@as(i64, @bitCast(@as(u64, 0x8000000000000001))), -0x1.000000p+63); try test__floatdisf(0x0007FB72E8000000, 0x1.FEDCBAp+50); try test__floatdisf(0x0007FB72EA000000, 0x1.FEDCBAp+50); try test__floatdisf(0x0007FB72EB000000, 0x1.FEDCBAp+50); @@ -228,17 +228,17 @@ test "floatuntisf" { try test__floatuntisf(make_uti(0x0000000000001FED, 0xCBE0000000000000), 0x1.FEDCBEp+76); // Test overflow to infinity - try test__floatuntisf(@as(u128, math.maxInt(u128)), @bitCast(f32, math.inf(f32))); + try test__floatuntisf(@as(u128, math.maxInt(u128)), @as(f32, @bitCast(math.inf(f32)))); } fn test_one_floatsidf(a: i32, expected: u64) !void { const r = __floatsidf(a); - try std.testing.expect(@bitCast(u64, r) == expected); + try std.testing.expect(@as(u64, @bitCast(r)) == expected); } fn test_one_floatunsidf(a: u32, expected: u64) !void { const r = __floatunsidf(a); - try std.testing.expect(@bitCast(u64, r) == expected); + try std.testing.expect(@as(u64, @bitCast(r)) == expected); } test "floatsidf" { @@ -246,15 +246,15 @@ test "floatsidf" { try test_one_floatsidf(1, 0x3ff0000000000000); try test_one_floatsidf(-1, 0xbff0000000000000); try test_one_floatsidf(0x7FFFFFFF, 0x41dfffffffc00000); - try test_one_floatsidf(@bitCast(i32, @intCast(u32, 0x80000000)), 0xc1e0000000000000); + try test_one_floatsidf(@as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), 0xc1e0000000000000); } test "floatunsidf" { try test_one_floatunsidf(0, 0x0000000000000000); try test_one_floatunsidf(1, 0x3ff0000000000000); try test_one_floatunsidf(0x7FFFFFFF, 0x41dfffffffc00000); - try test_one_floatunsidf(@intCast(u32, 0x80000000), 0x41e0000000000000); - try test_one_floatunsidf(@intCast(u32, 0xFFFFFFFF), 0x41efffffffe00000); + try test_one_floatunsidf(@as(u32, @intCast(0x80000000)), 0x41e0000000000000); + try test_one_floatunsidf(@as(u32, @intCast(0xFFFFFFFF)), 0x41efffffffe00000); } fn test__floatdidf(a: i64, expected: f64) !void { @@ -279,12 +279,12 @@ test "floatdidf" { try test__floatdidf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62); try test__floatdidf(0x7FFFFF0000000000, 0x1.FFFFFCp+62); try test__floatdidf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62); - try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000008000000000)), -0x1.FFFFFEp+62); - try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000000000000800)), -0x1.FFFFFFFFFFFFEp+62); - try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000010000000000)), -0x1.FFFFFCp+62); - try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000000000001000)), -0x1.FFFFFFFFFFFFCp+62); - try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000000000000000)), -0x1.000000p+63); - try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000000000000001)), -0x1.000000p+63); // 0x8000000000000001 + try test__floatdidf(@as(i64, @bitCast(@as(u64, @intCast(0x8000008000000000)))), -0x1.FFFFFEp+62); + try test__floatdidf(@as(i64, @bitCast(@as(u64, @intCast(0x8000000000000800)))), -0x1.FFFFFFFFFFFFEp+62); + try test__floatdidf(@as(i64, @bitCast(@as(u64, @intCast(0x8000010000000000)))), -0x1.FFFFFCp+62); + try test__floatdidf(@as(i64, @bitCast(@as(u64, @intCast(0x8000000000001000)))), -0x1.FFFFFFFFFFFFCp+62); + try test__floatdidf(@as(i64, @bitCast(@as(u64, @intCast(0x8000000000000000)))), -0x1.000000p+63); + try test__floatdidf(@as(i64, @bitCast(@as(u64, @intCast(0x8000000000000001)))), -0x1.000000p+63); // 0x8000000000000001 try test__floatdidf(0x0007FB72E8000000, 0x1.FEDCBAp+50); try test__floatdidf(0x0007FB72EA000000, 0x1.FEDCBA8p+50); try test__floatdidf(0x0007FB72EB000000, 0x1.FEDCBACp+50); @@ -505,7 +505,7 @@ test "floatuntidf" { fn test__floatsitf(a: i32, expected: u128) !void { const r = __floatsitf(a); - try std.testing.expect(@bitCast(u128, r) == expected); + try std.testing.expect(@as(u128, @bitCast(r)) == expected); } test "floatsitf" { @@ -513,16 +513,16 @@ test "floatsitf" { try test__floatsitf(0x7FFFFFFF, 0x401dfffffffc00000000000000000000); try test__floatsitf(0x12345678, 0x401b2345678000000000000000000000); try test__floatsitf(-0x12345678, 0xc01b2345678000000000000000000000); - try test__floatsitf(@bitCast(i32, @intCast(u32, 0xffffffff)), 0xbfff0000000000000000000000000000); - try test__floatsitf(@bitCast(i32, @intCast(u32, 0x80000000)), 0xc01e0000000000000000000000000000); + try test__floatsitf(@as(i32, @bitCast(@as(u32, @intCast(0xffffffff)))), 0xbfff0000000000000000000000000000); + try test__floatsitf(@as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), 0xc01e0000000000000000000000000000); } fn test__floatunsitf(a: u32, expected_hi: u64, expected_lo: u64) !void { const x = __floatunsitf(a); - const x_repr = @bitCast(u128, x); - const x_hi = @intCast(u64, x_repr >> 64); - const x_lo = @truncate(u64, x_repr); + const x_repr = @as(u128, @bitCast(x)); + const x_hi = @as(u64, @intCast(x_repr >> 64)); + const x_lo = @as(u64, @truncate(x_repr)); if (x_hi == expected_hi and x_lo == expected_lo) { return; @@ -552,9 +552,9 @@ fn test__floatditf(a: i64, expected: f128) !void { fn test__floatunditf(a: u64, expected_hi: u64, expected_lo: u64) !void { const x = __floatunditf(a); - const x_repr = @bitCast(u128, x); - const x_hi = @intCast(u64, x_repr >> 64); - const x_lo = @truncate(u64, x_repr); + const x_repr = @as(u128, @bitCast(x)); + const x_hi = @as(u64, @intCast(x_repr >> 64)); + const x_lo = @as(u64, @truncate(x_repr)); if (x_hi == expected_hi and x_lo == expected_lo) { return; @@ -575,10 +575,10 @@ test "floatditf" { try test__floatditf(0x2, make_tf(0x4000000000000000, 0x0)); try test__floatditf(0x1, make_tf(0x3fff000000000000, 0x0)); try test__floatditf(0x0, make_tf(0x0, 0x0)); - try test__floatditf(@bitCast(i64, @as(u64, 0xffffffffffffffff)), make_tf(0xbfff000000000000, 0x0)); - try test__floatditf(@bitCast(i64, @as(u64, 0xfffffffffffffffe)), make_tf(0xc000000000000000, 0x0)); + try test__floatditf(@as(i64, @bitCast(@as(u64, 0xffffffffffffffff))), make_tf(0xbfff000000000000, 0x0)); + try test__floatditf(@as(i64, @bitCast(@as(u64, 0xfffffffffffffffe))), make_tf(0xc000000000000000, 0x0)); try test__floatditf(-0x123456789abcdef1, make_tf(0xc03b23456789abcd, 0xef10000000000000)); - try test__floatditf(@bitCast(i64, @as(u64, 0x8000000000000000)), make_tf(0xc03e000000000000, 0x0)); + try test__floatditf(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), make_tf(0xc03e000000000000, 0x0)); } test "floatunditf" { @@ -773,7 +773,7 @@ fn make_ti(high: u64, low: u64) i128 { var result: u128 = high; result <<= 64; result |= low; - return @bitCast(i128, result); + return @as(i128, @bitCast(result)); } fn make_uti(high: u64, low: u64) u128 { @@ -787,7 +787,7 @@ fn make_tf(high: u64, low: u64) f128 { var result: u128 = high; result <<= 64; result |= low; - return @bitCast(f128, result); + return @as(f128, @bitCast(result)); } test "conversion to f16" { @@ -815,22 +815,22 @@ test "conversion to f80" { const floatFromInt = @import("./float_from_int.zig").floatFromInt; try testing.expect(floatFromInt(f80, @as(i80, -12)) == -12); - try testing.expect(@intFromFloat(u80, floatFromInt(f80, @as(u64, math.maxInt(u64)) + 0)) == math.maxInt(u64) + 0); - try testing.expect(@intFromFloat(u80, floatFromInt(f80, @as(u80, math.maxInt(u64)) + 1)) == math.maxInt(u64) + 1); + try testing.expect(@as(u80, @intFromFloat(floatFromInt(f80, @as(u64, math.maxInt(u64)) + 0))) == math.maxInt(u64) + 0); + try testing.expect(@as(u80, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u64)) + 1))) == math.maxInt(u64) + 1); try testing.expect(floatFromInt(f80, @as(u32, 0)) == 0.0); try testing.expect(floatFromInt(f80, @as(u32, 1)) == 1.0); - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u32, math.maxInt(u24)) + 0)) == math.maxInt(u24)); - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u64)) + 0)) == math.maxInt(u64)); - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u64)) + 1)) == math.maxInt(u64) + 1); // Exact - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u64)) + 2)) == math.maxInt(u64) + 1); // Rounds down - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u64)) + 3)) == math.maxInt(u64) + 3); // Tie - Exact - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u64)) + 4)) == math.maxInt(u64) + 5); // Rounds up + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u32, math.maxInt(u24)) + 0))) == math.maxInt(u24)); + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u64)) + 0))) == math.maxInt(u64)); + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u64)) + 1))) == math.maxInt(u64) + 1); // Exact + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u64)) + 2))) == math.maxInt(u64) + 1); // Rounds down + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u64)) + 3))) == math.maxInt(u64) + 3); // Tie - Exact + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u64)) + 4))) == math.maxInt(u64) + 5); // Rounds up - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u65)) + 0)) == math.maxInt(u65) + 1); // Rounds up - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u65)) + 1)) == math.maxInt(u65) + 1); // Exact - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u65)) + 2)) == math.maxInt(u65) + 1); // Rounds down - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u65)) + 3)) == math.maxInt(u65) + 1); // Tie - Rounds down - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u65)) + 4)) == math.maxInt(u65) + 5); // Rounds up - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u65)) + 5)) == math.maxInt(u65) + 5); // Exact + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u65)) + 0))) == math.maxInt(u65) + 1); // Rounds up + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u65)) + 1))) == math.maxInt(u65) + 1); // Exact + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u65)) + 2))) == math.maxInt(u65) + 1); // Rounds down + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u65)) + 3))) == math.maxInt(u65) + 1); // Tie - Rounds down + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u65)) + 4))) == math.maxInt(u65) + 5); // Rounds up + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u65)) + 5))) == math.maxInt(u65) + 5); // Exact } diff --git a/lib/compiler_rt/floattidf.zig b/lib/compiler_rt/floattidf.zig index c42e8f2974..fa213d4d80 100644 --- a/lib/compiler_rt/floattidf.zig +++ b/lib/compiler_rt/floattidf.zig @@ -17,5 +17,5 @@ pub fn __floattidf(a: i128) callconv(.C) f64 { } fn __floattidf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f64 { - return floatFromInt(f64, @bitCast(i128, a)); + return floatFromInt(f64, @as(i128, @bitCast(a))); } diff --git a/lib/compiler_rt/floattihf.zig b/lib/compiler_rt/floattihf.zig index 90003660ec..752e5b8663 100644 --- a/lib/compiler_rt/floattihf.zig +++ b/lib/compiler_rt/floattihf.zig @@ -17,5 +17,5 @@ pub fn __floattihf(a: i128) callconv(.C) f16 { } fn __floattihf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f16 { - return floatFromInt(f16, @bitCast(i128, a)); + return floatFromInt(f16, @as(i128, @bitCast(a))); } diff --git a/lib/compiler_rt/floattisf.zig b/lib/compiler_rt/floattisf.zig index 09c0b12ed0..0f81bfbb85 100644 --- a/lib/compiler_rt/floattisf.zig +++ b/lib/compiler_rt/floattisf.zig @@ -17,5 +17,5 @@ pub fn __floattisf(a: i128) callconv(.C) f32 { } fn __floattisf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f32 { - return floatFromInt(f32, @bitCast(i128, a)); + return floatFromInt(f32, @as(i128, @bitCast(a))); } diff --git a/lib/compiler_rt/floattitf.zig b/lib/compiler_rt/floattitf.zig index ae0ecbb98a..49397d34a3 100644 --- a/lib/compiler_rt/floattitf.zig +++ b/lib/compiler_rt/floattitf.zig @@ -19,5 +19,5 @@ pub fn __floattitf(a: i128) callconv(.C) f128 { } fn __floattitf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f128 { - return floatFromInt(f128, @bitCast(i128, a)); + return floatFromInt(f128, @as(i128, @bitCast(a))); } diff --git a/lib/compiler_rt/floattixf.zig b/lib/compiler_rt/floattixf.zig index 9c2339ff8a..a8fd2d6ae6 100644 --- a/lib/compiler_rt/floattixf.zig +++ b/lib/compiler_rt/floattixf.zig @@ -17,5 +17,5 @@ pub fn __floattixf(a: i128) callconv(.C) f80 { } fn __floattixf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f80 { - return floatFromInt(f80, @bitCast(i128, a)); + return floatFromInt(f80, @as(i128, @bitCast(a))); } diff --git a/lib/compiler_rt/floatuntidf.zig b/lib/compiler_rt/floatuntidf.zig index a2b46506f0..f036ffd7fe 100644 --- a/lib/compiler_rt/floatuntidf.zig +++ b/lib/compiler_rt/floatuntidf.zig @@ -17,5 +17,5 @@ pub fn __floatuntidf(a: u128) callconv(.C) f64 { } fn __floatuntidf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f64 { - return floatFromInt(f64, @bitCast(u128, a)); + return floatFromInt(f64, @as(u128, @bitCast(a))); } diff --git a/lib/compiler_rt/floatuntihf.zig b/lib/compiler_rt/floatuntihf.zig index f493453c91..97ccf7f5fe 100644 --- a/lib/compiler_rt/floatuntihf.zig +++ b/lib/compiler_rt/floatuntihf.zig @@ -17,5 +17,5 @@ pub fn __floatuntihf(a: u128) callconv(.C) f16 { } fn __floatuntihf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f16 { - return floatFromInt(f16, @bitCast(u128, a)); + return floatFromInt(f16, @as(u128, @bitCast(a))); } diff --git a/lib/compiler_rt/floatuntisf.zig b/lib/compiler_rt/floatuntisf.zig index 9df7b833ea..8d8f771e7d 100644 --- a/lib/compiler_rt/floatuntisf.zig +++ b/lib/compiler_rt/floatuntisf.zig @@ -17,5 +17,5 @@ pub fn __floatuntisf(a: u128) callconv(.C) f32 { } fn __floatuntisf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f32 { - return floatFromInt(f32, @bitCast(u128, a)); + return floatFromInt(f32, @as(u128, @bitCast(a))); } diff --git a/lib/compiler_rt/floatuntitf.zig b/lib/compiler_rt/floatuntitf.zig index 55a5ab4da1..e828f12d8b 100644 --- a/lib/compiler_rt/floatuntitf.zig +++ b/lib/compiler_rt/floatuntitf.zig @@ -19,5 +19,5 @@ pub fn __floatuntitf(a: u128) callconv(.C) f128 { } fn __floatuntitf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f128 { - return floatFromInt(f128, @bitCast(u128, a)); + return floatFromInt(f128, @as(u128, @bitCast(a))); } diff --git a/lib/compiler_rt/floatuntixf.zig b/lib/compiler_rt/floatuntixf.zig index cbf597ca89..c9016bfa06 100644 --- a/lib/compiler_rt/floatuntixf.zig +++ b/lib/compiler_rt/floatuntixf.zig @@ -17,5 +17,5 @@ pub fn __floatuntixf(a: u128) callconv(.C) f80 { } fn __floatuntixf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f80 { - return floatFromInt(f80, @bitCast(u128, a)); + return floatFromInt(f80, @as(u128, @bitCast(a))); } diff --git a/lib/compiler_rt/floor.zig b/lib/compiler_rt/floor.zig index ea274c0d82..dd73be86fd 100644 --- a/lib/compiler_rt/floor.zig +++ b/lib/compiler_rt/floor.zig @@ -26,8 +26,8 @@ comptime { } pub fn __floorh(x: f16) callconv(.C) f16 { - var u = @bitCast(u16, x); - const e = @intCast(i16, (u >> 10) & 31) - 15; + var u = @as(u16, @bitCast(x)); + const e = @as(i16, @intCast((u >> 10) & 31)) - 15; var m: u16 = undefined; // TODO: Shouldn't need this explicit check. @@ -40,7 +40,7 @@ pub fn __floorh(x: f16) callconv(.C) f16 { } if (e >= 0) { - m = @as(u16, 1023) >> @intCast(u4, e); + m = @as(u16, 1023) >> @as(u4, @intCast(e)); if (u & m == 0) { return x; } @@ -48,7 +48,7 @@ pub fn __floorh(x: f16) callconv(.C) f16 { if (u >> 15 != 0) { u += m; } - return @bitCast(f16, u & ~m); + return @as(f16, @bitCast(u & ~m)); } else { math.doNotOptimizeAway(x + 0x1.0p120); if (u >> 15 == 0) { @@ -60,8 +60,8 @@ pub fn __floorh(x: f16) callconv(.C) f16 { } pub fn floorf(x: f32) callconv(.C) f32 { - var u = @bitCast(u32, x); - const e = @intCast(i32, (u >> 23) & 0xFF) - 0x7F; + var u = @as(u32, @bitCast(x)); + const e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F; var m: u32 = undefined; // TODO: Shouldn't need this explicit check. @@ -74,7 +74,7 @@ pub fn floorf(x: f32) callconv(.C) f32 { } if (e >= 0) { - m = @as(u32, 0x007FFFFF) >> @intCast(u5, e); + m = @as(u32, 0x007FFFFF) >> @as(u5, @intCast(e)); if (u & m == 0) { return x; } @@ -82,7 +82,7 @@ pub fn floorf(x: f32) callconv(.C) f32 { if (u >> 31 != 0) { u += m; } - return @bitCast(f32, u & ~m); + return @as(f32, @bitCast(u & ~m)); } else { math.doNotOptimizeAway(x + 0x1.0p120); if (u >> 31 == 0) { @@ -96,7 +96,7 @@ pub fn floorf(x: f32) callconv(.C) f32 { pub fn floor(x: f64) callconv(.C) f64 { const f64_toint = 1.0 / math.floatEps(f64); - const u = @bitCast(u64, x); + const u = @as(u64, @bitCast(x)); const e = (u >> 52) & 0x7FF; var y: f64 = undefined; @@ -126,13 +126,13 @@ pub fn floor(x: f64) callconv(.C) f64 { pub fn __floorx(x: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, floorq(x)); + return @as(f80, @floatCast(floorq(x))); } pub fn floorq(x: f128) callconv(.C) f128 { const f128_toint = 1.0 / math.floatEps(f128); - const u = @bitCast(u128, x); + const u = @as(u128, @bitCast(x)); const e = (u >> 112) & 0x7FFF; var y: f128 = undefined; diff --git a/lib/compiler_rt/fma.zig b/lib/compiler_rt/fma.zig index fe2da1c99c..ed0e6649c6 100644 --- a/lib/compiler_rt/fma.zig +++ b/lib/compiler_rt/fma.zig @@ -28,20 +28,20 @@ comptime { pub fn __fmah(x: f16, y: f16, z: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, fmaf(x, y, z)); + return @as(f16, @floatCast(fmaf(x, y, z))); } pub fn fmaf(x: f32, y: f32, z: f32) callconv(.C) f32 { const xy = @as(f64, x) * y; const xy_z = xy + z; - const u = @bitCast(u64, xy_z); + const u = @as(u64, @bitCast(xy_z)); const e = (u >> 52) & 0x7FF; if ((u & 0x1FFFFFFF) != 0x10000000 or e == 0x7FF or (xy_z - xy == z and xy_z - z == xy)) { - return @floatCast(f32, xy_z); + return @as(f32, @floatCast(xy_z)); } else { // TODO: Handle inexact case with double-rounding - return @floatCast(f32, xy_z); + return @as(f32, @floatCast(xy_z)); } } @@ -95,7 +95,7 @@ pub fn fma(x: f64, y: f64, z: f64) callconv(.C) f64 { pub fn __fmax(a: f80, b: f80, c: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, fmaq(a, b, c)); + return @as(f80, @floatCast(fmaq(a, b, c))); } /// Fused multiply-add: Compute x * y + z with a single rounding error. @@ -201,12 +201,12 @@ fn dd_mul(a: f64, b: f64) dd { fn add_adjusted(a: f64, b: f64) f64 { var sum = dd_add(a, b); if (sum.lo != 0) { - var uhii = @bitCast(u64, sum.hi); + var uhii = @as(u64, @bitCast(sum.hi)); if (uhii & 1 == 0) { // hibits += copysign(1.0, sum.hi, sum.lo) - const uloi = @bitCast(u64, sum.lo); + const uloi = @as(u64, @bitCast(sum.lo)); uhii += 1 - ((uhii ^ uloi) >> 62); - sum.hi = @bitCast(f64, uhii); + sum.hi = @as(f64, @bitCast(uhii)); } } return sum.hi; @@ -215,12 +215,12 @@ fn add_adjusted(a: f64, b: f64) f64 { fn add_and_denorm(a: f64, b: f64, scale: i32) f64 { var sum = dd_add(a, b); if (sum.lo != 0) { - var uhii = @bitCast(u64, sum.hi); - const bits_lost = -@intCast(i32, (uhii >> 52) & 0x7FF) - scale + 1; + var uhii = @as(u64, @bitCast(sum.hi)); + const bits_lost = -@as(i32, @intCast((uhii >> 52) & 0x7FF)) - scale + 1; if ((bits_lost != 1) == (uhii & 1 != 0)) { - const uloi = @bitCast(u64, sum.lo); + const uloi = @as(u64, @bitCast(sum.lo)); uhii += 1 - (((uhii ^ uloi) >> 62) & 2); - sum.hi = @bitCast(f64, uhii); + sum.hi = @as(f64, @bitCast(uhii)); } } return math.scalbn(sum.hi, scale); @@ -257,12 +257,12 @@ fn dd_add128(a: f128, b: f128) dd128 { fn add_adjusted128(a: f128, b: f128) f128 { var sum = dd_add128(a, b); if (sum.lo != 0) { - var uhii = @bitCast(u128, sum.hi); + var uhii = @as(u128, @bitCast(sum.hi)); if (uhii & 1 == 0) { // hibits += copysign(1.0, sum.hi, sum.lo) - const uloi = @bitCast(u128, sum.lo); + const uloi = @as(u128, @bitCast(sum.lo)); uhii += 1 - ((uhii ^ uloi) >> 126); - sum.hi = @bitCast(f128, uhii); + sum.hi = @as(f128, @bitCast(uhii)); } } return sum.hi; @@ -282,12 +282,12 @@ fn add_and_denorm128(a: f128, b: f128, scale: i32) f128 { // If we are losing only one bit to denormalization, however, we must // break the ties manually. if (sum.lo != 0) { - var uhii = @bitCast(u128, sum.hi); - const bits_lost = -@intCast(i32, (uhii >> 112) & 0x7FFF) - scale + 1; + var uhii = @as(u128, @bitCast(sum.hi)); + const bits_lost = -@as(i32, @intCast((uhii >> 112) & 0x7FFF)) - scale + 1; if ((bits_lost != 1) == (uhii & 1 != 0)) { - const uloi = @bitCast(u128, sum.lo); + const uloi = @as(u128, @bitCast(sum.lo)); uhii += 1 - (((uhii ^ uloi) >> 126) & 2); - sum.hi = @bitCast(f128, uhii); + sum.hi = @as(f128, @bitCast(uhii)); } } return math.scalbn(sum.hi, scale); diff --git a/lib/compiler_rt/fmod.zig b/lib/compiler_rt/fmod.zig index b80dffdb82..81706b71e1 100644 --- a/lib/compiler_rt/fmod.zig +++ b/lib/compiler_rt/fmod.zig @@ -22,7 +22,7 @@ comptime { pub fn __fmodh(x: f16, y: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, fmodf(x, y)); + return @as(f16, @floatCast(fmodf(x, y))); } pub fn fmodf(x: f32, y: f32) callconv(.C) f32 { @@ -46,12 +46,12 @@ pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 { const signBit = (@as(Z, 1) << (significandBits + exponentBits)); const maxExponent = ((1 << exponentBits) - 1); - var aRep = @bitCast(Z, a); - var bRep = @bitCast(Z, b); + var aRep = @as(Z, @bitCast(a)); + var bRep = @as(Z, @bitCast(b)); const signA = aRep & signBit; - var expA = @intCast(i32, (@bitCast(Z, a) >> significandBits) & maxExponent); - var expB = @intCast(i32, (@bitCast(Z, b) >> significandBits) & maxExponent); + var expA = @as(i32, @intCast((@as(Z, @bitCast(a)) >> significandBits) & maxExponent)); + var expB = @as(i32, @intCast((@as(Z, @bitCast(b)) >> significandBits) & maxExponent)); // There are 3 cases where the answer is undefined, check for: // - fmodx(val, 0) @@ -82,8 +82,8 @@ pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 { var highA: u64 = 0; var highB: u64 = 0; - var lowA: u64 = @truncate(u64, aRep); - var lowB: u64 = @truncate(u64, bRep); + var lowA: u64 = @as(u64, @truncate(aRep)); + var lowB: u64 = @as(u64, @truncate(bRep)); while (expA > expB) : (expA -= 1) { var high = highA -% highB; @@ -123,11 +123,11 @@ pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 { // Combine the exponent with the sign and significand, normalize if happened to be denormalized if (expA < -fractionalBits) { - return @bitCast(T, signA); + return @as(T, @bitCast(signA)); } else if (expA <= 0) { - return @bitCast(T, (lowA >> @intCast(math.Log2Int(u64), 1 - expA)) | signA); + return @as(T, @bitCast((lowA >> @as(math.Log2Int(u64), @intCast(1 - expA))) | signA)); } else { - return @bitCast(T, lowA | (@as(Z, @intCast(u16, expA)) << significandBits) | signA); + return @as(T, @bitCast(lowA | (@as(Z, @as(u16, @intCast(expA))) << significandBits) | signA)); } } @@ -136,10 +136,10 @@ pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 { pub fn fmodq(a: f128, b: f128) callconv(.C) f128 { var amod = a; var bmod = b; - const aPtr_u64 = @ptrCast([*]u64, &amod); - const bPtr_u64 = @ptrCast([*]u64, &bmod); - const aPtr_u16 = @ptrCast([*]u16, &amod); - const bPtr_u16 = @ptrCast([*]u16, &bmod); + const aPtr_u64 = @as([*]u64, @ptrCast(&amod)); + const bPtr_u64 = @as([*]u64, @ptrCast(&bmod)); + const aPtr_u16 = @as([*]u16, @ptrCast(&amod)); + const bPtr_u16 = @as([*]u16, @ptrCast(&bmod)); const exp_and_sign_index = comptime switch (builtin.target.cpu.arch.endian()) { .Little => 7, @@ -155,8 +155,8 @@ pub fn fmodq(a: f128, b: f128) callconv(.C) f128 { }; const signA = aPtr_u16[exp_and_sign_index] & 0x8000; - var expA = @intCast(i32, (aPtr_u16[exp_and_sign_index] & 0x7fff)); - var expB = @intCast(i32, (bPtr_u16[exp_and_sign_index] & 0x7fff)); + var expA = @as(i32, @intCast((aPtr_u16[exp_and_sign_index] & 0x7fff))); + var expB = @as(i32, @intCast((bPtr_u16[exp_and_sign_index] & 0x7fff))); // There are 3 cases where the answer is undefined, check for: // - fmodq(val, 0) @@ -173,8 +173,8 @@ pub fn fmodq(a: f128, b: f128) callconv(.C) f128 { } // Remove the sign from both - aPtr_u16[exp_and_sign_index] = @bitCast(u16, @intCast(i16, expA)); - bPtr_u16[exp_and_sign_index] = @bitCast(u16, @intCast(i16, expB)); + aPtr_u16[exp_and_sign_index] = @as(u16, @bitCast(@as(i16, @intCast(expA)))); + bPtr_u16[exp_and_sign_index] = @as(u16, @bitCast(@as(i16, @intCast(expB)))); if (amod <= bmod) { if (amod == bmod) { return 0 * a; @@ -241,10 +241,10 @@ pub fn fmodq(a: f128, b: f128) callconv(.C) f128 { // Combine the exponent with the sign, normalize if happend to be denormalized if (expA <= 0) { - aPtr_u16[exp_and_sign_index] = @truncate(u16, @bitCast(u32, (expA +% 120))) | signA; + aPtr_u16[exp_and_sign_index] = @as(u16, @truncate(@as(u32, @bitCast((expA +% 120))))) | signA; amod *= 0x1p-120; } else { - aPtr_u16[exp_and_sign_index] = @truncate(u16, @bitCast(u32, expA)) | signA; + aPtr_u16[exp_and_sign_index] = @as(u16, @truncate(@as(u32, @bitCast(expA)))) | signA; } return amod; @@ -270,14 +270,14 @@ inline fn generic_fmod(comptime T: type, x: T, y: T) T { const exp_bits = if (T == f32) 9 else 12; const bits_minus_1 = bits - 1; const mask = if (T == f32) 0xff else 0x7ff; - var ux = @bitCast(uint, x); - var uy = @bitCast(uint, y); - var ex = @intCast(i32, (ux >> digits) & mask); - var ey = @intCast(i32, (uy >> digits) & mask); - const sx = if (T == f32) @intCast(u32, ux & 0x80000000) else @intCast(i32, ux >> bits_minus_1); + var ux = @as(uint, @bitCast(x)); + var uy = @as(uint, @bitCast(y)); + var ex = @as(i32, @intCast((ux >> digits) & mask)); + var ey = @as(i32, @intCast((uy >> digits) & mask)); + const sx = if (T == f32) @as(u32, @intCast(ux & 0x80000000)) else @as(i32, @intCast(ux >> bits_minus_1)); var i: uint = undefined; - if (uy << 1 == 0 or math.isNan(@bitCast(T, uy)) or ex == mask) + if (uy << 1 == 0 or math.isNan(@as(T, @bitCast(uy))) or ex == mask) return (x * y) / (x * y); if (ux << 1 <= uy << 1) { @@ -293,7 +293,7 @@ inline fn generic_fmod(comptime T: type, x: T, y: T) T { ex -= 1; i <<= 1; }) {} - ux <<= @intCast(log2uint, @bitCast(u32, -ex + 1)); + ux <<= @as(log2uint, @intCast(@as(u32, @bitCast(-ex + 1)))); } else { ux &= math.maxInt(uint) >> exp_bits; ux |= 1 << digits; @@ -304,7 +304,7 @@ inline fn generic_fmod(comptime T: type, x: T, y: T) T { ey -= 1; i <<= 1; }) {} - uy <<= @intCast(log2uint, @bitCast(u32, -ey + 1)); + uy <<= @as(log2uint, @intCast(@as(u32, @bitCast(-ey + 1)))); } else { uy &= math.maxInt(uint) >> exp_bits; uy |= 1 << digits; @@ -334,16 +334,16 @@ inline fn generic_fmod(comptime T: type, x: T, y: T) T { // scale result up if (ex > 0) { ux -%= 1 << digits; - ux |= @as(uint, @bitCast(u32, ex)) << digits; + ux |= @as(uint, @as(u32, @bitCast(ex))) << digits; } else { - ux >>= @intCast(log2uint, @bitCast(u32, -ex + 1)); + ux >>= @as(log2uint, @intCast(@as(u32, @bitCast(-ex + 1)))); } if (T == f32) { ux |= sx; } else { - ux |= @intCast(uint, sx) << bits_minus_1; + ux |= @as(uint, @intCast(sx)) << bits_minus_1; } - return @bitCast(T, ux); + return @as(T, @bitCast(ux)); } test "fmodf" { diff --git a/lib/compiler_rt/int.zig b/lib/compiler_rt/int.zig index 47ff9e4c0c..d61233e7cf 100644 --- a/lib/compiler_rt/int.zig +++ b/lib/compiler_rt/int.zig @@ -52,8 +52,8 @@ test "test_divmodti4" { [_]i128{ -7, 5, -1, -2 }, [_]i128{ 19, 5, 3, 4 }, [_]i128{ 19, -5, -3, 4 }, - [_]i128{ @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 8, @bitCast(i128, @as(u128, 0xf0000000000000000000000000000000)), 0 }, - [_]i128{ @bitCast(i128, @as(u128, 0x80000000000000000000000000000007)), 8, @bitCast(i128, @as(u128, 0xf0000000000000000000000000000001)), -1 }, + [_]i128{ @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 8, @as(i128, @bitCast(@as(u128, 0xf0000000000000000000000000000000))), 0 }, + [_]i128{ @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000007))), 8, @as(i128, @bitCast(@as(u128, 0xf0000000000000000000000000000001))), -1 }, }; for (cases) |case| { @@ -85,8 +85,8 @@ test "test_divmoddi4" { [_]i64{ -7, 5, -1, -2 }, [_]i64{ 19, 5, 3, 4 }, [_]i64{ 19, -5, -3, 4 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), 8, @bitCast(i64, @as(u64, 0xf000000000000000)), 0 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000007)), 8, @bitCast(i64, @as(u64, 0xf000000000000001)), -1 }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 8, @as(i64, @bitCast(@as(u64, 0xf000000000000000))), 0 }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000007))), 8, @as(i64, @bitCast(@as(u64, 0xf000000000000001))), -1 }, }; for (cases) |case| { @@ -110,14 +110,14 @@ test "test_udivmoddi4" { pub fn __divdi3(a: i64, b: i64) callconv(.C) i64 { // Set aside the sign of the quotient. - const sign = @bitCast(u64, (a ^ b) >> 63); + const sign = @as(u64, @bitCast((a ^ b) >> 63)); // Take absolute value of a and b via abs(x) = (x^(x >> 63)) - (x >> 63). const abs_a = (a ^ (a >> 63)) -% (a >> 63); const abs_b = (b ^ (b >> 63)) -% (b >> 63); // Unsigned division - const res = __udivmoddi4(@bitCast(u64, abs_a), @bitCast(u64, abs_b), null); + const res = __udivmoddi4(@as(u64, @bitCast(abs_a)), @as(u64, @bitCast(abs_b)), null); // Apply sign of quotient to result and return. - return @bitCast(i64, (res ^ sign) -% sign); + return @as(i64, @bitCast((res ^ sign) -% sign)); } test "test_divdi3" { @@ -129,10 +129,10 @@ test "test_divdi3" { [_]i64{ -2, 1, -2 }, [_]i64{ -2, -1, 2 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), 1, @bitCast(i64, @as(u64, 0x8000000000000000)) }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), -1, @bitCast(i64, @as(u64, 0x8000000000000000)) }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), -2, 0x4000000000000000 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), 2, @bitCast(i64, @as(u64, 0xC000000000000000)) }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1, @as(i64, @bitCast(@as(u64, 0x8000000000000000))) }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), -1, @as(i64, @bitCast(@as(u64, 0x8000000000000000))) }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), -2, 0x4000000000000000 }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 2, @as(i64, @bitCast(@as(u64, 0xC000000000000000))) }, }; for (cases) |case| { @@ -151,9 +151,9 @@ pub fn __moddi3(a: i64, b: i64) callconv(.C) i64 { const abs_b = (b ^ (b >> 63)) -% (b >> 63); // Unsigned division var r: u64 = undefined; - _ = __udivmoddi4(@bitCast(u64, abs_a), @bitCast(u64, abs_b), &r); + _ = __udivmoddi4(@as(u64, @bitCast(abs_a)), @as(u64, @bitCast(abs_b)), &r); // Apply the sign of the dividend and return. - return (@bitCast(i64, r) ^ (a >> 63)) -% (a >> 63); + return (@as(i64, @bitCast(r)) ^ (a >> 63)) -% (a >> 63); } test "test_moddi3" { @@ -165,12 +165,12 @@ test "test_moddi3" { [_]i64{ -5, 3, -2 }, [_]i64{ -5, -3, -2 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), 1, 0 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), -1, 0 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), 2, 0 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), -2, 0 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), 3, -2 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), -3, -2 }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1, 0 }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), -1, 0 }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 2, 0 }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), -2, 0 }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 3, -2 }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), -3, -2 }, }; for (cases) |case| { @@ -225,8 +225,8 @@ test "test_divmodsi4" { [_]i32{ 19, 5, 3, 4 }, [_]i32{ 19, -5, -3, 4 }, - [_]i32{ @bitCast(i32, @as(u32, 0x80000000)), 8, @bitCast(i32, @as(u32, 0xf0000000)), 0 }, - [_]i32{ @bitCast(i32, @as(u32, 0x80000007)), 8, @bitCast(i32, @as(u32, 0xf0000001)), -1 }, + [_]i32{ @as(i32, @bitCast(@as(u32, 0x80000000))), 8, @as(i32, @bitCast(@as(u32, 0xf0000000))), 0 }, + [_]i32{ @as(i32, @bitCast(@as(u32, 0x80000007))), 8, @as(i32, @bitCast(@as(u32, 0xf0000001))), -1 }, }; for (cases) |case| { @@ -242,7 +242,7 @@ fn test_one_divmodsi4(a: i32, b: i32, expected_q: i32, expected_r: i32) !void { pub fn __udivmodsi4(a: u32, b: u32, rem: *u32) callconv(.C) u32 { const d = __udivsi3(a, b); - rem.* = @bitCast(u32, @bitCast(i32, a) -% (@bitCast(i32, d) * @bitCast(i32, b))); + rem.* = @as(u32, @bitCast(@as(i32, @bitCast(a)) -% (@as(i32, @bitCast(d)) * @as(i32, @bitCast(b))))); return d; } @@ -256,14 +256,14 @@ fn __aeabi_idiv(n: i32, d: i32) callconv(.AAPCS) i32 { inline fn div_i32(n: i32, d: i32) i32 { // Set aside the sign of the quotient. - const sign = @bitCast(u32, (n ^ d) >> 31); + const sign = @as(u32, @bitCast((n ^ d) >> 31)); // Take absolute value of a and b via abs(x) = (x^(x >> 31)) - (x >> 31). const abs_n = (n ^ (n >> 31)) -% (n >> 31); const abs_d = (d ^ (d >> 31)) -% (d >> 31); // abs(a) / abs(b) - const res = @bitCast(u32, abs_n) / @bitCast(u32, abs_d); + const res = @as(u32, @bitCast(abs_n)) / @as(u32, @bitCast(abs_d)); // Apply sign of quotient to result and return. - return @bitCast(i32, (res ^ sign) -% sign); + return @as(i32, @bitCast((res ^ sign) -% sign)); } test "test_divsi3" { @@ -275,10 +275,10 @@ test "test_divsi3" { [_]i32{ -2, 1, -2 }, [_]i32{ -2, -1, 2 }, - [_]i32{ @bitCast(i32, @as(u32, 0x80000000)), 1, @bitCast(i32, @as(u32, 0x80000000)) }, - [_]i32{ @bitCast(i32, @as(u32, 0x80000000)), -1, @bitCast(i32, @as(u32, 0x80000000)) }, - [_]i32{ @bitCast(i32, @as(u32, 0x80000000)), -2, 0x40000000 }, - [_]i32{ @bitCast(i32, @as(u32, 0x80000000)), 2, @bitCast(i32, @as(u32, 0xC0000000)) }, + [_]i32{ @as(i32, @bitCast(@as(u32, 0x80000000))), 1, @as(i32, @bitCast(@as(u32, 0x80000000))) }, + [_]i32{ @as(i32, @bitCast(@as(u32, 0x80000000))), -1, @as(i32, @bitCast(@as(u32, 0x80000000))) }, + [_]i32{ @as(i32, @bitCast(@as(u32, 0x80000000))), -2, 0x40000000 }, + [_]i32{ @as(i32, @bitCast(@as(u32, 0x80000000))), 2, @as(i32, @bitCast(@as(u32, 0xC0000000))) }, }; for (cases) |case| { @@ -304,7 +304,7 @@ inline fn div_u32(n: u32, d: u32) u32 { // special cases if (d == 0) return 0; // ?! if (n == 0) return 0; - var sr = @bitCast(c_uint, @as(c_int, @clz(d)) - @as(c_int, @clz(n))); + var sr = @as(c_uint, @bitCast(@as(c_int, @clz(d)) - @as(c_int, @clz(n)))); // 0 <= sr <= n_uword_bits - 1 or sr large if (sr > n_uword_bits - 1) { // d > r @@ -317,12 +317,12 @@ inline fn div_u32(n: u32, d: u32) u32 { sr += 1; // 1 <= sr <= n_uword_bits - 1 // Not a special case - var q: u32 = n << @intCast(u5, n_uword_bits - sr); - var r: u32 = n >> @intCast(u5, sr); + var q: u32 = n << @as(u5, @intCast(n_uword_bits - sr)); + var r: u32 = n >> @as(u5, @intCast(sr)); var carry: u32 = 0; while (sr > 0) : (sr -= 1) { // r:q = ((r:q) << 1) | carry - r = (r << 1) | (q >> @intCast(u5, n_uword_bits - 1)); + r = (r << 1) | (q >> @as(u5, @intCast(n_uword_bits - 1))); q = (q << 1) | carry; // carry = 0; // if (r.all >= d.all) @@ -330,9 +330,9 @@ inline fn div_u32(n: u32, d: u32) u32 { // r.all -= d.all; // carry = 1; // } - const s = @bitCast(i32, d -% r -% 1) >> @intCast(u5, n_uword_bits - 1); - carry = @intCast(u32, s & 1); - r -= d & @bitCast(u32, s); + const s = @as(i32, @bitCast(d -% r -% 1)) >> @as(u5, @intCast(n_uword_bits - 1)); + carry = @as(u32, @intCast(s & 1)); + r -= d & @as(u32, @bitCast(s)); } q = (q << 1) | carry; return q; @@ -496,11 +496,11 @@ test "test_modsi3" { [_]i32{ 5, -3, 2 }, [_]i32{ -5, 3, -2 }, [_]i32{ -5, -3, -2 }, - [_]i32{ @bitCast(i32, @intCast(u32, 0x80000000)), 1, 0x0 }, - [_]i32{ @bitCast(i32, @intCast(u32, 0x80000000)), 2, 0x0 }, - [_]i32{ @bitCast(i32, @intCast(u32, 0x80000000)), -2, 0x0 }, - [_]i32{ @bitCast(i32, @intCast(u32, 0x80000000)), 3, -2 }, - [_]i32{ @bitCast(i32, @intCast(u32, 0x80000000)), -3, -2 }, + [_]i32{ @as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), 1, 0x0 }, + [_]i32{ @as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), 2, 0x0 }, + [_]i32{ @as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), -2, 0x0 }, + [_]i32{ @as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), 3, -2 }, + [_]i32{ @as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), -3, -2 }, }; for (cases) |case| { diff --git a/lib/compiler_rt/int_from_float.zig b/lib/compiler_rt/int_from_float.zig index 78397a8131..aa2f78f922 100644 --- a/lib/compiler_rt/int_from_float.zig +++ b/lib/compiler_rt/int_from_float.zig @@ -17,9 +17,9 @@ pub inline fn intFromFloat(comptime I: type, a: anytype) I { const sig_mask = (@as(rep_t, 1) << sig_bits) - 1; // Break a into sign, exponent, significand - const a_rep: rep_t = @bitCast(rep_t, a); + const a_rep: rep_t = @as(rep_t, @bitCast(a)); const negative = (a_rep >> (float_bits - 1)) != 0; - const exponent = @intCast(i32, (a_rep << 1) >> (sig_bits + 1)) - exp_bias; + const exponent = @as(i32, @intCast((a_rep << 1) >> (sig_bits + 1))) - exp_bias; const significand: rep_t = (a_rep & sig_mask) | implicit_bit; // If the exponent is negative, the result rounds to zero. @@ -29,9 +29,9 @@ pub inline fn intFromFloat(comptime I: type, a: anytype) I { switch (@typeInfo(I).Int.signedness) { .unsigned => { if (negative) return 0; - if (@intCast(c_uint, exponent) >= @min(int_bits, max_exp)) return math.maxInt(I); + if (@as(c_uint, @intCast(exponent)) >= @min(int_bits, max_exp)) return math.maxInt(I); }, - .signed => if (@intCast(c_uint, exponent) >= @min(int_bits - 1, max_exp)) { + .signed => if (@as(c_uint, @intCast(exponent)) >= @min(int_bits - 1, max_exp)) { return if (negative) math.minInt(I) else math.maxInt(I); }, } @@ -40,9 +40,9 @@ pub inline fn intFromFloat(comptime I: type, a: anytype) I { // Otherwise, shift left. var result: I = undefined; if (exponent < fractional_bits) { - result = @intCast(I, significand >> @intCast(Log2Int(rep_t), fractional_bits - exponent)); + result = @as(I, @intCast(significand >> @as(Log2Int(rep_t), @intCast(fractional_bits - exponent)))); } else { - result = @intCast(I, significand) << @intCast(Log2Int(I), exponent - fractional_bits); + result = @as(I, @intCast(significand)) << @as(Log2Int(I), @intCast(exponent - fractional_bits)); } if ((@typeInfo(I).Int.signedness == .signed) and negative) diff --git a/lib/compiler_rt/log.zig b/lib/compiler_rt/log.zig index 622d509a2f..9c4b0096aa 100644 --- a/lib/compiler_rt/log.zig +++ b/lib/compiler_rt/log.zig @@ -27,7 +27,7 @@ comptime { pub fn __logh(a: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, logf(a)); + return @as(f16, @floatCast(logf(a))); } pub fn logf(x_: f32) callconv(.C) f32 { @@ -39,7 +39,7 @@ pub fn logf(x_: f32) callconv(.C) f32 { const Lg4: f32 = 0xf89e26.0p-26; var x = x_; - var ix = @bitCast(u32, x); + var ix = @as(u32, @bitCast(x)); var k: i32 = 0; // x < 2^(-126) @@ -56,7 +56,7 @@ pub fn logf(x_: f32) callconv(.C) f32 { // subnormal, scale x k -= 25; x *= 0x1.0p25; - ix = @bitCast(u32, x); + ix = @as(u32, @bitCast(x)); } else if (ix >= 0x7F800000) { return x; } else if (ix == 0x3F800000) { @@ -65,9 +65,9 @@ pub fn logf(x_: f32) callconv(.C) f32 { // x into [sqrt(2) / 2, sqrt(2)] ix += 0x3F800000 - 0x3F3504F3; - k += @intCast(i32, ix >> 23) - 0x7F; + k += @as(i32, @intCast(ix >> 23)) - 0x7F; ix = (ix & 0x007FFFFF) + 0x3F3504F3; - x = @bitCast(f32, ix); + x = @as(f32, @bitCast(ix)); const f = x - 1.0; const s = f / (2.0 + f); @@ -77,7 +77,7 @@ pub fn logf(x_: f32) callconv(.C) f32 { const t2 = z * (Lg1 + w * Lg3); const R = t2 + t1; const hfsq = 0.5 * f * f; - const dk = @floatFromInt(f32, k); + const dk = @as(f32, @floatFromInt(k)); return s * (hfsq + R) + dk * ln2_lo - hfsq + f + dk * ln2_hi; } @@ -94,8 +94,8 @@ pub fn log(x_: f64) callconv(.C) f64 { const Lg7: f64 = 1.479819860511658591e-01; var x = x_; - var ix = @bitCast(u64, x); - var hx = @intCast(u32, ix >> 32); + var ix = @as(u64, @bitCast(x)); + var hx = @as(u32, @intCast(ix >> 32)); var k: i32 = 0; if (hx < 0x00100000 or hx >> 31 != 0) { @@ -111,7 +111,7 @@ pub fn log(x_: f64) callconv(.C) f64 { // subnormal, scale x k -= 54; x *= 0x1.0p54; - hx = @intCast(u32, @bitCast(u64, ix) >> 32); + hx = @as(u32, @intCast(@as(u64, @bitCast(ix)) >> 32)); } else if (hx >= 0x7FF00000) { return x; } else if (hx == 0x3FF00000 and ix << 32 == 0) { @@ -120,10 +120,10 @@ pub fn log(x_: f64) callconv(.C) f64 { // x into [sqrt(2) / 2, sqrt(2)] hx += 0x3FF00000 - 0x3FE6A09E; - k += @intCast(i32, hx >> 20) - 0x3FF; + k += @as(i32, @intCast(hx >> 20)) - 0x3FF; hx = (hx & 0x000FFFFF) + 0x3FE6A09E; ix = (@as(u64, hx) << 32) | (ix & 0xFFFFFFFF); - x = @bitCast(f64, ix); + x = @as(f64, @bitCast(ix)); const f = x - 1.0; const hfsq = 0.5 * f * f; @@ -133,19 +133,19 @@ pub fn log(x_: f64) callconv(.C) f64 { const t1 = w * (Lg2 + w * (Lg4 + w * Lg6)); const t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7))); const R = t2 + t1; - const dk = @floatFromInt(f64, k); + const dk = @as(f64, @floatFromInt(k)); return s * (hfsq + R) + dk * ln2_lo - hfsq + f + dk * ln2_hi; } pub fn __logx(a: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, logq(a)); + return @as(f80, @floatCast(logq(a))); } pub fn logq(a: f128) callconv(.C) f128 { // TODO: more correct implementation - return log(@floatCast(f64, a)); + return log(@as(f64, @floatCast(a))); } pub fn logl(x: c_longdouble) callconv(.C) c_longdouble { diff --git a/lib/compiler_rt/log10.zig b/lib/compiler_rt/log10.zig index d45a3d8a40..bbd6392d96 100644 --- a/lib/compiler_rt/log10.zig +++ b/lib/compiler_rt/log10.zig @@ -28,7 +28,7 @@ comptime { pub fn __log10h(a: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, log10f(a)); + return @as(f16, @floatCast(log10f(a))); } pub fn log10f(x_: f32) callconv(.C) f32 { @@ -42,7 +42,7 @@ pub fn log10f(x_: f32) callconv(.C) f32 { const Lg4: f32 = 0xf89e26.0p-26; var x = x_; - var u = @bitCast(u32, x); + var u = @as(u32, @bitCast(x)); var ix = u; var k: i32 = 0; @@ -59,7 +59,7 @@ pub fn log10f(x_: f32) callconv(.C) f32 { k -= 25; x *= 0x1.0p25; - ix = @bitCast(u32, x); + ix = @as(u32, @bitCast(x)); } else if (ix >= 0x7F800000) { return x; } else if (ix == 0x3F800000) { @@ -68,9 +68,9 @@ pub fn log10f(x_: f32) callconv(.C) f32 { // x into [sqrt(2) / 2, sqrt(2)] ix += 0x3F800000 - 0x3F3504F3; - k += @intCast(i32, ix >> 23) - 0x7F; + k += @as(i32, @intCast(ix >> 23)) - 0x7F; ix = (ix & 0x007FFFFF) + 0x3F3504F3; - x = @bitCast(f32, ix); + x = @as(f32, @bitCast(ix)); const f = x - 1.0; const s = f / (2.0 + f); @@ -82,11 +82,11 @@ pub fn log10f(x_: f32) callconv(.C) f32 { const hfsq = 0.5 * f * f; var hi = f - hfsq; - u = @bitCast(u32, hi); + u = @as(u32, @bitCast(hi)); u &= 0xFFFFF000; - hi = @bitCast(f32, u); + hi = @as(f32, @bitCast(u)); const lo = f - hi - hfsq + s * (hfsq + R); - const dk = @floatFromInt(f32, k); + const dk = @as(f32, @floatFromInt(k)); return dk * log10_2lo + (lo + hi) * ivln10lo + lo * ivln10hi + hi * ivln10hi + dk * log10_2hi; } @@ -105,8 +105,8 @@ pub fn log10(x_: f64) callconv(.C) f64 { const Lg7: f64 = 1.479819860511658591e-01; var x = x_; - var ix = @bitCast(u64, x); - var hx = @intCast(u32, ix >> 32); + var ix = @as(u64, @bitCast(x)); + var hx = @as(u32, @intCast(ix >> 32)); var k: i32 = 0; if (hx < 0x00100000 or hx >> 31 != 0) { @@ -122,7 +122,7 @@ pub fn log10(x_: f64) callconv(.C) f64 { // subnormal, scale x k -= 54; x *= 0x1.0p54; - hx = @intCast(u32, @bitCast(u64, x) >> 32); + hx = @as(u32, @intCast(@as(u64, @bitCast(x)) >> 32)); } else if (hx >= 0x7FF00000) { return x; } else if (hx == 0x3FF00000 and ix << 32 == 0) { @@ -131,10 +131,10 @@ pub fn log10(x_: f64) callconv(.C) f64 { // x into [sqrt(2) / 2, sqrt(2)] hx += 0x3FF00000 - 0x3FE6A09E; - k += @intCast(i32, hx >> 20) - 0x3FF; + k += @as(i32, @intCast(hx >> 20)) - 0x3FF; hx = (hx & 0x000FFFFF) + 0x3FE6A09E; ix = (@as(u64, hx) << 32) | (ix & 0xFFFFFFFF); - x = @bitCast(f64, ix); + x = @as(f64, @bitCast(ix)); const f = x - 1.0; const hfsq = 0.5 * f * f; @@ -147,14 +147,14 @@ pub fn log10(x_: f64) callconv(.C) f64 { // hi + lo = f - hfsq + s * (hfsq + R) ~ log(1 + f) var hi = f - hfsq; - var hii = @bitCast(u64, hi); + var hii = @as(u64, @bitCast(hi)); hii &= @as(u64, maxInt(u64)) << 32; - hi = @bitCast(f64, hii); + hi = @as(f64, @bitCast(hii)); const lo = f - hi - hfsq + s * (hfsq + R); // val_hi + val_lo ~ log10(1 + f) + k * log10(2) var val_hi = hi * ivln10hi; - const dk = @floatFromInt(f64, k); + const dk = @as(f64, @floatFromInt(k)); const y = dk * log10_2hi; var val_lo = dk * log10_2lo + (lo + hi) * ivln10lo + lo * ivln10hi; @@ -168,12 +168,12 @@ pub fn log10(x_: f64) callconv(.C) f64 { pub fn __log10x(a: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, log10q(a)); + return @as(f80, @floatCast(log10q(a))); } pub fn log10q(a: f128) callconv(.C) f128 { // TODO: more correct implementation - return log10(@floatCast(f64, a)); + return log10(@as(f64, @floatCast(a))); } pub fn log10l(x: c_longdouble) callconv(.C) c_longdouble { diff --git a/lib/compiler_rt/log2.zig b/lib/compiler_rt/log2.zig index 29595d07d9..f3d80879d0 100644 --- a/lib/compiler_rt/log2.zig +++ b/lib/compiler_rt/log2.zig @@ -28,7 +28,7 @@ comptime { pub fn __log2h(a: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, log2f(a)); + return @as(f16, @floatCast(log2f(a))); } pub fn log2f(x_: f32) callconv(.C) f32 { @@ -40,7 +40,7 @@ pub fn log2f(x_: f32) callconv(.C) f32 { const Lg4: f32 = 0xf89e26.0p-26; var x = x_; - var u = @bitCast(u32, x); + var u = @as(u32, @bitCast(x)); var ix = u; var k: i32 = 0; @@ -57,7 +57,7 @@ pub fn log2f(x_: f32) callconv(.C) f32 { k -= 25; x *= 0x1.0p25; - ix = @bitCast(u32, x); + ix = @as(u32, @bitCast(x)); } else if (ix >= 0x7F800000) { return x; } else if (ix == 0x3F800000) { @@ -66,9 +66,9 @@ pub fn log2f(x_: f32) callconv(.C) f32 { // x into [sqrt(2) / 2, sqrt(2)] ix += 0x3F800000 - 0x3F3504F3; - k += @intCast(i32, ix >> 23) - 0x7F; + k += @as(i32, @intCast(ix >> 23)) - 0x7F; ix = (ix & 0x007FFFFF) + 0x3F3504F3; - x = @bitCast(f32, ix); + x = @as(f32, @bitCast(ix)); const f = x - 1.0; const s = f / (2.0 + f); @@ -80,11 +80,11 @@ pub fn log2f(x_: f32) callconv(.C) f32 { const hfsq = 0.5 * f * f; var hi = f - hfsq; - u = @bitCast(u32, hi); + u = @as(u32, @bitCast(hi)); u &= 0xFFFFF000; - hi = @bitCast(f32, u); + hi = @as(f32, @bitCast(u)); const lo = f - hi - hfsq + s * (hfsq + R); - return (lo + hi) * ivln2lo + lo * ivln2hi + hi * ivln2hi + @floatFromInt(f32, k); + return (lo + hi) * ivln2lo + lo * ivln2hi + hi * ivln2hi + @as(f32, @floatFromInt(k)); } pub fn log2(x_: f64) callconv(.C) f64 { @@ -99,8 +99,8 @@ pub fn log2(x_: f64) callconv(.C) f64 { const Lg7: f64 = 1.479819860511658591e-01; var x = x_; - var ix = @bitCast(u64, x); - var hx = @intCast(u32, ix >> 32); + var ix = @as(u64, @bitCast(x)); + var hx = @as(u32, @intCast(ix >> 32)); var k: i32 = 0; if (hx < 0x00100000 or hx >> 31 != 0) { @@ -116,7 +116,7 @@ pub fn log2(x_: f64) callconv(.C) f64 { // subnormal, scale x k -= 54; x *= 0x1.0p54; - hx = @intCast(u32, @bitCast(u64, x) >> 32); + hx = @as(u32, @intCast(@as(u64, @bitCast(x)) >> 32)); } else if (hx >= 0x7FF00000) { return x; } else if (hx == 0x3FF00000 and ix << 32 == 0) { @@ -125,10 +125,10 @@ pub fn log2(x_: f64) callconv(.C) f64 { // x into [sqrt(2) / 2, sqrt(2)] hx += 0x3FF00000 - 0x3FE6A09E; - k += @intCast(i32, hx >> 20) - 0x3FF; + k += @as(i32, @intCast(hx >> 20)) - 0x3FF; hx = (hx & 0x000FFFFF) + 0x3FE6A09E; ix = (@as(u64, hx) << 32) | (ix & 0xFFFFFFFF); - x = @bitCast(f64, ix); + x = @as(f64, @bitCast(ix)); const f = x - 1.0; const hfsq = 0.5 * f * f; @@ -141,16 +141,16 @@ pub fn log2(x_: f64) callconv(.C) f64 { // hi + lo = f - hfsq + s * (hfsq + R) ~ log(1 + f) var hi = f - hfsq; - var hii = @bitCast(u64, hi); + var hii = @as(u64, @bitCast(hi)); hii &= @as(u64, maxInt(u64)) << 32; - hi = @bitCast(f64, hii); + hi = @as(f64, @bitCast(hii)); const lo = f - hi - hfsq + s * (hfsq + R); var val_hi = hi * ivln2hi; var val_lo = (lo + hi) * ivln2lo + lo * ivln2hi; // spadd(val_hi, val_lo, y) - const y = @floatFromInt(f64, k); + const y = @as(f64, @floatFromInt(k)); const ww = y + val_hi; val_lo += (y - ww) + val_hi; val_hi = ww; @@ -160,12 +160,12 @@ pub fn log2(x_: f64) callconv(.C) f64 { pub fn __log2x(a: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, log2q(a)); + return @as(f80, @floatCast(log2q(a))); } pub fn log2q(a: f128) callconv(.C) f128 { // TODO: more correct implementation - return log2(@floatCast(f64, a)); + return log2(@as(f64, @floatCast(a))); } pub fn log2l(x: c_longdouble) callconv(.C) c_longdouble { diff --git a/lib/compiler_rt/modti3.zig b/lib/compiler_rt/modti3.zig index ef02a697bc..97b005481b 100644 --- a/lib/compiler_rt/modti3.zig +++ b/lib/compiler_rt/modti3.zig @@ -24,7 +24,7 @@ pub fn __modti3(a: i128, b: i128) callconv(.C) i128 { const v2u64 = @Vector(2, u64); fn __modti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 { - return @bitCast(v2u64, mod(@bitCast(i128, a), @bitCast(i128, b))); + return @as(v2u64, @bitCast(mod(@as(i128, @bitCast(a)), @as(i128, @bitCast(b))))); } inline fn mod(a: i128, b: i128) i128 { @@ -35,8 +35,8 @@ inline fn mod(a: i128, b: i128) i128 { const bn = (b ^ s_b) -% s_b; // negate if s == -1 var r: u128 = undefined; - _ = udivmod(u128, @bitCast(u128, an), @bitCast(u128, bn), &r); - return (@bitCast(i128, r) ^ s_a) -% s_a; // negate if s == -1 + _ = udivmod(u128, @as(u128, @bitCast(an)), @as(u128, @bitCast(bn)), &r); + return (@as(i128, @bitCast(r)) ^ s_a) -% s_a; // negate if s == -1 } test { diff --git a/lib/compiler_rt/modti3_test.zig b/lib/compiler_rt/modti3_test.zig index c7cee57f8b..cad78f68bf 100644 --- a/lib/compiler_rt/modti3_test.zig +++ b/lib/compiler_rt/modti3_test.zig @@ -33,5 +33,5 @@ fn make_ti(high: u64, low: u64) i128 { var result: u128 = high; result <<= 64; result |= low; - return @bitCast(i128, result); + return @as(i128, @bitCast(result)); } diff --git a/lib/compiler_rt/mulXi3.zig b/lib/compiler_rt/mulXi3.zig index 3999681034..be3a444ce6 100644 --- a/lib/compiler_rt/mulXi3.zig +++ b/lib/compiler_rt/mulXi3.zig @@ -21,8 +21,8 @@ comptime { } pub fn __mulsi3(a: i32, b: i32) callconv(.C) i32 { - var ua = @bitCast(u32, a); - var ub = @bitCast(u32, b); + var ua = @as(u32, @bitCast(a)); + var ub = @as(u32, @bitCast(b)); var r: u32 = 0; while (ua > 0) { @@ -31,7 +31,7 @@ pub fn __mulsi3(a: i32, b: i32) callconv(.C) i32 { ub <<= 1; } - return @bitCast(i32, r); + return @as(i32, @bitCast(r)); } pub fn __muldi3(a: i64, b: i64) callconv(.C) i64 { @@ -93,7 +93,7 @@ pub fn __multi3(a: i128, b: i128) callconv(.C) i128 { const v2u64 = @Vector(2, u64); fn __multi3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 { - return @bitCast(v2u64, mulX(i128, @bitCast(i128, a), @bitCast(i128, b))); + return @as(v2u64, @bitCast(mulX(i128, @as(i128, @bitCast(a)), @as(i128, @bitCast(b))))); } test { diff --git a/lib/compiler_rt/mulXi3_test.zig b/lib/compiler_rt/mulXi3_test.zig index 128f428af2..3a360098c4 100644 --- a/lib/compiler_rt/mulXi3_test.zig +++ b/lib/compiler_rt/mulXi3_test.zig @@ -46,14 +46,14 @@ test "mulsi3" { try test_one_mulsi3(-46340, 46340, -2147395600); try test_one_mulsi3(46340, -46340, -2147395600); try test_one_mulsi3(-46340, -46340, 2147395600); - try test_one_mulsi3(4194303, 8192, @truncate(i32, 34359730176)); - try test_one_mulsi3(-4194303, 8192, @truncate(i32, -34359730176)); - try test_one_mulsi3(4194303, -8192, @truncate(i32, -34359730176)); - try test_one_mulsi3(-4194303, -8192, @truncate(i32, 34359730176)); - try test_one_mulsi3(8192, 4194303, @truncate(i32, 34359730176)); - try test_one_mulsi3(-8192, 4194303, @truncate(i32, -34359730176)); - try test_one_mulsi3(8192, -4194303, @truncate(i32, -34359730176)); - try test_one_mulsi3(-8192, -4194303, @truncate(i32, 34359730176)); + try test_one_mulsi3(4194303, 8192, @as(i32, @truncate(34359730176))); + try test_one_mulsi3(-4194303, 8192, @as(i32, @truncate(-34359730176))); + try test_one_mulsi3(4194303, -8192, @as(i32, @truncate(-34359730176))); + try test_one_mulsi3(-4194303, -8192, @as(i32, @truncate(34359730176))); + try test_one_mulsi3(8192, 4194303, @as(i32, @truncate(34359730176))); + try test_one_mulsi3(-8192, 4194303, @as(i32, @truncate(-34359730176))); + try test_one_mulsi3(8192, -4194303, @as(i32, @truncate(-34359730176))); + try test_one_mulsi3(-8192, -4194303, @as(i32, @truncate(34359730176))); } test "muldi3" { diff --git a/lib/compiler_rt/mulf3.zig b/lib/compiler_rt/mulf3.zig index 9652782a49..a0320333ad 100644 --- a/lib/compiler_rt/mulf3.zig +++ b/lib/compiler_rt/mulf3.zig @@ -28,53 +28,53 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T { const significandMask = (@as(Z, 1) << significandBits) - 1; const absMask = signBit - 1; - const qnanRep = @bitCast(Z, math.nan(T)) | quietBit; - const infRep = @bitCast(Z, math.inf(T)); - const minNormalRep = @bitCast(Z, math.floatMin(T)); + const qnanRep = @as(Z, @bitCast(math.nan(T))) | quietBit; + const infRep = @as(Z, @bitCast(math.inf(T))); + const minNormalRep = @as(Z, @bitCast(math.floatMin(T))); const ZExp = if (typeWidth >= 32) u32 else Z; - const aExponent = @truncate(ZExp, (@bitCast(Z, a) >> significandBits) & maxExponent); - const bExponent = @truncate(ZExp, (@bitCast(Z, b) >> significandBits) & maxExponent); - const productSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit; + const aExponent = @as(ZExp, @truncate((@as(Z, @bitCast(a)) >> significandBits) & maxExponent)); + const bExponent = @as(ZExp, @truncate((@as(Z, @bitCast(b)) >> significandBits) & maxExponent)); + const productSign: Z = (@as(Z, @bitCast(a)) ^ @as(Z, @bitCast(b))) & signBit; - var aSignificand: ZSignificand = @intCast(ZSignificand, @bitCast(Z, a) & significandMask); - var bSignificand: ZSignificand = @intCast(ZSignificand, @bitCast(Z, b) & significandMask); + var aSignificand: ZSignificand = @as(ZSignificand, @intCast(@as(Z, @bitCast(a)) & significandMask)); + var bSignificand: ZSignificand = @as(ZSignificand, @intCast(@as(Z, @bitCast(b)) & significandMask)); var scale: i32 = 0; // Detect if a or b is zero, denormal, infinity, or NaN. if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) { - const aAbs: Z = @bitCast(Z, a) & absMask; - const bAbs: Z = @bitCast(Z, b) & absMask; + const aAbs: Z = @as(Z, @bitCast(a)) & absMask; + const bAbs: Z = @as(Z, @bitCast(b)) & absMask; // NaN * anything = qNaN - if (aAbs > infRep) return @bitCast(T, @bitCast(Z, a) | quietBit); + if (aAbs > infRep) return @as(T, @bitCast(@as(Z, @bitCast(a)) | quietBit)); // anything * NaN = qNaN - if (bAbs > infRep) return @bitCast(T, @bitCast(Z, b) | quietBit); + if (bAbs > infRep) return @as(T, @bitCast(@as(Z, @bitCast(b)) | quietBit)); if (aAbs == infRep) { // infinity * non-zero = +/- infinity if (bAbs != 0) { - return @bitCast(T, aAbs | productSign); + return @as(T, @bitCast(aAbs | productSign)); } else { // infinity * zero = NaN - return @bitCast(T, qnanRep); + return @as(T, @bitCast(qnanRep)); } } if (bAbs == infRep) { //? non-zero * infinity = +/- infinity if (aAbs != 0) { - return @bitCast(T, bAbs | productSign); + return @as(T, @bitCast(bAbs | productSign)); } else { // zero * infinity = NaN - return @bitCast(T, qnanRep); + return @as(T, @bitCast(qnanRep)); } } // zero * anything = +/- zero - if (aAbs == 0) return @bitCast(T, productSign); + if (aAbs == 0) return @as(T, @bitCast(productSign)); // anything * zero = +/- zero - if (bAbs == 0) return @bitCast(T, productSign); + if (bAbs == 0) return @as(T, @bitCast(productSign)); // one or both of a or b is denormal, the other (if applicable) is a // normal number. Renormalize one or both of a and b, and set scale to @@ -99,7 +99,7 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T { const left_align_shift = ZSignificandBits - fractionalBits - 1; common.wideMultiply(ZSignificand, aSignificand, bSignificand << left_align_shift, &productHi, &productLo); - var productExponent: i32 = @intCast(i32, aExponent + bExponent) - exponentBias + scale; + var productExponent: i32 = @as(i32, @intCast(aExponent + bExponent)) - exponentBias + scale; // Normalize the significand, adjust exponent if needed. if ((productHi & integerBit) != 0) { @@ -110,7 +110,7 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T { } // If we have overflowed the type, return +/- infinity. - if (productExponent >= maxExponent) return @bitCast(T, infRep | productSign); + if (productExponent >= maxExponent) return @as(T, @bitCast(infRep | productSign)); var result: Z = undefined; if (productExponent <= 0) { @@ -120,8 +120,8 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T { // a zero of the appropriate sign. Mathematically there is no need to // handle this case separately, but we make it a special case to // simplify the shift logic. - const shift: u32 = @truncate(u32, @as(Z, 1) -% @bitCast(u32, productExponent)); - if (shift >= ZSignificandBits) return @bitCast(T, productSign); + const shift: u32 = @as(u32, @truncate(@as(Z, 1) -% @as(u32, @bitCast(productExponent)))); + if (shift >= ZSignificandBits) return @as(T, @bitCast(productSign)); // Otherwise, shift the significand of the result so that the round // bit is the high bit of productLo. @@ -135,7 +135,7 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T { } else { // Result is normal before rounding; insert the exponent. result = productHi & significandMask; - result |= @intCast(Z, productExponent) << significandBits; + result |= @as(Z, @intCast(productExponent)) << significandBits; } // Final rounding. The final result may overflow to infinity, or underflow @@ -156,7 +156,7 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T { // Insert the sign of the result: result |= productSign; - return @bitCast(T, result); + return @as(T, @bitCast(result)); } /// Returns `true` if the right shift is inexact (i.e. any bit shifted out is non-zero) @@ -168,12 +168,12 @@ fn wideShrWithTruncation(comptime Z: type, hi: *Z, lo: *Z, count: u32) bool { const S = math.Log2Int(Z); var inexact = false; if (count < typeWidth) { - inexact = (lo.* << @intCast(S, typeWidth -% count)) != 0; - lo.* = (hi.* << @intCast(S, typeWidth -% count)) | (lo.* >> @intCast(S, count)); - hi.* = hi.* >> @intCast(S, count); + inexact = (lo.* << @as(S, @intCast(typeWidth -% count))) != 0; + lo.* = (hi.* << @as(S, @intCast(typeWidth -% count))) | (lo.* >> @as(S, @intCast(count))); + hi.* = hi.* >> @as(S, @intCast(count)); } else if (count < 2 * typeWidth) { - inexact = (hi.* << @intCast(S, 2 * typeWidth -% count) | lo.*) != 0; - lo.* = hi.* >> @intCast(S, count -% typeWidth); + inexact = (hi.* << @as(S, @intCast(2 * typeWidth -% count)) | lo.*) != 0; + lo.* = hi.* >> @as(S, @intCast(count -% typeWidth)); hi.* = 0; } else { inexact = (hi.* | lo.*) != 0; @@ -188,7 +188,7 @@ fn normalize(comptime T: type, significand: *PowerOfTwoSignificandZ(T)) i32 { const integerBit = @as(Z, 1) << math.floatFractionalBits(T); const shift = @clz(significand.*) - @clz(integerBit); - significand.* <<= @intCast(math.Log2Int(Z), shift); + significand.* <<= @as(math.Log2Int(Z), @intCast(shift)); return @as(i32, 1) - shift; } diff --git a/lib/compiler_rt/mulf3_test.zig b/lib/compiler_rt/mulf3_test.zig index 203745e632..afaf6cb219 100644 --- a/lib/compiler_rt/mulf3_test.zig +++ b/lib/compiler_rt/mulf3_test.zig @@ -4,8 +4,8 @@ const std = @import("std"); const math = std.math; -const qnan128 = @bitCast(f128, @as(u128, 0x7fff800000000000) << 64); -const inf128 = @bitCast(f128, @as(u128, 0x7fff000000000000) << 64); +const qnan128 = @as(f128, @bitCast(@as(u128, 0x7fff800000000000) << 64)); +const inf128 = @as(f128, @bitCast(@as(u128, 0x7fff000000000000) << 64)); const __multf3 = @import("multf3.zig").__multf3; const __mulxf3 = @import("mulxf3.zig").__mulxf3; @@ -16,9 +16,9 @@ const __mulsf3 = @import("mulsf3.zig").__mulsf3; // use two 64-bit integers intead of one 128-bit integer // because 128-bit integer constant can't be assigned directly fn compareResultLD(result: f128, expectedHi: u64, expectedLo: u64) bool { - const rep = @bitCast(u128, result); - const hi = @intCast(u64, rep >> 64); - const lo = @truncate(u64, rep); + const rep = @as(u128, @bitCast(result)); + const hi = @as(u64, @intCast(rep >> 64)); + const lo = @as(u64, @truncate(rep)); if (hi == expectedHi and lo == expectedLo) { return true; @@ -45,7 +45,7 @@ fn test__multf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void { fn makeNaN128(rand: u64) f128 { const int_result = @as(u128, 0x7fff000000000000 | (rand & 0xffffffffffff)) << 64; - const float_result = @bitCast(f128, int_result); + const float_result = @as(f128, @bitCast(int_result)); return float_result; } test "multf3" { @@ -60,15 +60,15 @@ test "multf3" { // any * any try test__multf3( - @bitCast(f128, @as(u128, 0x40042eab345678439abcdefea5678234)), - @bitCast(f128, @as(u128, 0x3ffeedcb34a235253948765432134675)), + @as(f128, @bitCast(@as(u128, 0x40042eab345678439abcdefea5678234))), + @as(f128, @bitCast(@as(u128, 0x3ffeedcb34a235253948765432134675))), 0x400423e7f9e3c9fc, 0xd906c2c2a85777c4, ); try test__multf3( - @bitCast(f128, @as(u128, 0x3fcd353e45674d89abacc3a2ebf3ff50)), - @bitCast(f128, @as(u128, 0x3ff6ed8764648369535adf4be3214568)), + @as(f128, @bitCast(@as(u128, 0x3fcd353e45674d89abacc3a2ebf3ff50))), + @as(f128, @bitCast(@as(u128, 0x3ff6ed8764648369535adf4be3214568))), 0x3fc52a163c6223fc, 0xc94c4bf0430768b4, ); @@ -81,8 +81,8 @@ test "multf3" { ); try test__multf3( - @bitCast(f128, @as(u128, 0x3f154356473c82a9fabf2d22ace345df)), - @bitCast(f128, @as(u128, 0x3e38eda98765476743ab21da23d45679)), + @as(f128, @bitCast(@as(u128, 0x3f154356473c82a9fabf2d22ace345df))), + @as(f128, @bitCast(@as(u128, 0x3e38eda98765476743ab21da23d45679))), 0x3d4f37c1a3137cae, 0xfc6807048bc2836a, ); @@ -108,16 +108,16 @@ test "multf3" { try test__multf3(2.0, math.floatTrueMin(f128), 0x0000_0000_0000_0000, 0x0000_0000_0000_0002); } -const qnan80 = @bitCast(f80, @bitCast(u80, math.nan(f80)) | (1 << (math.floatFractionalBits(f80) - 1))); +const qnan80 = @as(f80, @bitCast(@as(u80, @bitCast(math.nan(f80))) | (1 << (math.floatFractionalBits(f80) - 1)))); fn test__mulxf3(a: f80, b: f80, expected: u80) !void { const x = __mulxf3(a, b); - const rep = @bitCast(u80, x); + const rep = @as(u80, @bitCast(x)); if (rep == expected) return; - if (math.isNan(@bitCast(f80, expected)) and math.isNan(x)) + if (math.isNan(@as(f80, @bitCast(expected))) and math.isNan(x)) return; // We don't currently test NaN payload propagation return error.TestFailed; @@ -125,33 +125,33 @@ fn test__mulxf3(a: f80, b: f80, expected: u80) !void { test "mulxf3" { // NaN * any = NaN - try test__mulxf3(qnan80, 0x1.23456789abcdefp+5, @bitCast(u80, qnan80)); - try test__mulxf3(@bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), 0x1.23456789abcdefp+5, @bitCast(u80, qnan80)); + try test__mulxf3(qnan80, 0x1.23456789abcdefp+5, @as(u80, @bitCast(qnan80))); + try test__mulxf3(@as(f80, @bitCast(@as(u80, 0x7fff_8000_8000_3000_0000))), 0x1.23456789abcdefp+5, @as(u80, @bitCast(qnan80))); // any * NaN = NaN - try test__mulxf3(0x1.23456789abcdefp+5, qnan80, @bitCast(u80, qnan80)); - try test__mulxf3(0x1.23456789abcdefp+5, @bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), @bitCast(u80, qnan80)); + try test__mulxf3(0x1.23456789abcdefp+5, qnan80, @as(u80, @bitCast(qnan80))); + try test__mulxf3(0x1.23456789abcdefp+5, @as(f80, @bitCast(@as(u80, 0x7fff_8000_8000_3000_0000))), @as(u80, @bitCast(qnan80))); // NaN * inf = NaN - try test__mulxf3(qnan80, math.inf(f80), @bitCast(u80, qnan80)); + try test__mulxf3(qnan80, math.inf(f80), @as(u80, @bitCast(qnan80))); // inf * NaN = NaN - try test__mulxf3(math.inf(f80), qnan80, @bitCast(u80, qnan80)); + try test__mulxf3(math.inf(f80), qnan80, @as(u80, @bitCast(qnan80))); // inf * inf = inf - try test__mulxf3(math.inf(f80), math.inf(f80), @bitCast(u80, math.inf(f80))); + try test__mulxf3(math.inf(f80), math.inf(f80), @as(u80, @bitCast(math.inf(f80)))); // inf * -inf = -inf - try test__mulxf3(math.inf(f80), -math.inf(f80), @bitCast(u80, -math.inf(f80))); + try test__mulxf3(math.inf(f80), -math.inf(f80), @as(u80, @bitCast(-math.inf(f80)))); // -inf + inf = -inf - try test__mulxf3(-math.inf(f80), math.inf(f80), @bitCast(u80, -math.inf(f80))); + try test__mulxf3(-math.inf(f80), math.inf(f80), @as(u80, @bitCast(-math.inf(f80)))); // inf * any = inf - try test__mulxf3(math.inf(f80), 0x1.2335653452436234723489432abcdefp+5, @bitCast(u80, math.inf(f80))); + try test__mulxf3(math.inf(f80), 0x1.2335653452436234723489432abcdefp+5, @as(u80, @bitCast(math.inf(f80)))); // any * inf = inf - try test__mulxf3(0x1.2335653452436234723489432abcdefp+5, math.inf(f80), @bitCast(u80, math.inf(f80))); + try test__mulxf3(0x1.2335653452436234723489432abcdefp+5, math.inf(f80), @as(u80, @bitCast(math.inf(f80)))); // any * any try test__mulxf3(0x1.0p+0, 0x1.dcba987654321p+5, 0x4004_ee5d_4c3b_2a19_0800); diff --git a/lib/compiler_rt/mulo.zig b/lib/compiler_rt/mulo.zig index 13e58a7800..d40554da10 100644 --- a/lib/compiler_rt/mulo.zig +++ b/lib/compiler_rt/mulo.zig @@ -45,7 +45,7 @@ inline fn muloXi4_genericFast(comptime ST: type, a: ST, b: ST, overflow: *c_int) //invariant: -2^{bitwidth(EST)} < res < 2^{bitwidth(EST)-1} if (res < min or max < res) overflow.* = 1; - return @truncate(ST, res); + return @as(ST, @truncate(res)); } pub fn __mulosi4(a: i32, b: i32, overflow: *c_int) callconv(.C) i32 { diff --git a/lib/compiler_rt/mulodi4_test.zig b/lib/compiler_rt/mulodi4_test.zig index 3944f62ede..37530b1060 100644 --- a/lib/compiler_rt/mulodi4_test.zig +++ b/lib/compiler_rt/mulodi4_test.zig @@ -54,34 +54,34 @@ test "mulodi4" { try test__mulodi4(0x7FFFFFFFFFFFFFFF, -2, 2, 1); try test__mulodi4(-2, 0x7FFFFFFFFFFFFFFF, 2, 1); - try test__mulodi4(0x7FFFFFFFFFFFFFFF, -1, @bitCast(i64, @as(u64, 0x8000000000000001)), 0); - try test__mulodi4(-1, 0x7FFFFFFFFFFFFFFF, @bitCast(i64, @as(u64, 0x8000000000000001)), 0); + try test__mulodi4(0x7FFFFFFFFFFFFFFF, -1, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0); + try test__mulodi4(-1, 0x7FFFFFFFFFFFFFFF, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0); try test__mulodi4(0x7FFFFFFFFFFFFFFF, 0, 0, 0); try test__mulodi4(0, 0x7FFFFFFFFFFFFFFF, 0, 0); try test__mulodi4(0x7FFFFFFFFFFFFFFF, 1, 0x7FFFFFFFFFFFFFFF, 0); try test__mulodi4(1, 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF, 0); - try test__mulodi4(0x7FFFFFFFFFFFFFFF, 2, @bitCast(i64, @as(u64, 0x8000000000000001)), 1); - try test__mulodi4(2, 0x7FFFFFFFFFFFFFFF, @bitCast(i64, @as(u64, 0x8000000000000001)), 1); + try test__mulodi4(0x7FFFFFFFFFFFFFFF, 2, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 1); + try test__mulodi4(2, 0x7FFFFFFFFFFFFFFF, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 1); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), -2, @bitCast(i64, @as(u64, 0x8000000000000000)), 1); - try test__mulodi4(-2, @bitCast(i64, @as(u64, 0x8000000000000000)), @bitCast(i64, @as(u64, 0x8000000000000000)), 1); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), -1, @bitCast(i64, @as(u64, 0x8000000000000000)), 1); - try test__mulodi4(-1, @bitCast(i64, @as(u64, 0x8000000000000000)), @bitCast(i64, @as(u64, 0x8000000000000000)), 1); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), 0, 0, 0); - try test__mulodi4(0, @bitCast(i64, @as(u64, 0x8000000000000000)), 0, 0); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), 1, @bitCast(i64, @as(u64, 0x8000000000000000)), 0); - try test__mulodi4(1, @bitCast(i64, @as(u64, 0x8000000000000000)), @bitCast(i64, @as(u64, 0x8000000000000000)), 0); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), 2, @bitCast(i64, @as(u64, 0x8000000000000000)), 1); - try test__mulodi4(2, @bitCast(i64, @as(u64, 0x8000000000000000)), @bitCast(i64, @as(u64, 0x8000000000000000)), 1); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), -2, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1); + try test__mulodi4(-2, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), -1, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1); + try test__mulodi4(-1, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), 0, 0, 0); + try test__mulodi4(0, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 0, 0); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 0); + try test__mulodi4(1, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 0); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), 2, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1); + try test__mulodi4(2, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), -2, @bitCast(i64, @as(u64, 0x8000000000000001)), 1); - try test__mulodi4(-2, @bitCast(i64, @as(u64, 0x8000000000000001)), @bitCast(i64, @as(u64, 0x8000000000000001)), 1); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), -1, 0x7FFFFFFFFFFFFFFF, 0); - try test__mulodi4(-1, @bitCast(i64, @as(u64, 0x8000000000000001)), 0x7FFFFFFFFFFFFFFF, 0); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), 0, 0, 0); - try test__mulodi4(0, @bitCast(i64, @as(u64, 0x8000000000000001)), 0, 0); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), 1, @bitCast(i64, @as(u64, 0x8000000000000001)), 0); - try test__mulodi4(1, @bitCast(i64, @as(u64, 0x8000000000000001)), @bitCast(i64, @as(u64, 0x8000000000000001)), 0); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), 2, @bitCast(i64, @as(u64, 0x8000000000000000)), 1); - try test__mulodi4(2, @bitCast(i64, @as(u64, 0x8000000000000001)), @bitCast(i64, @as(u64, 0x8000000000000000)), 1); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000001))), -2, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 1); + try test__mulodi4(-2, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 1); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000001))), -1, 0x7FFFFFFFFFFFFFFF, 0); + try test__mulodi4(-1, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0x7FFFFFFFFFFFFFFF, 0); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0, 0, 0); + try test__mulodi4(0, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0, 0); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000001))), 1, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0); + try test__mulodi4(1, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000001))), 2, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1); + try test__mulodi4(2, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1); } diff --git a/lib/compiler_rt/mulosi4_test.zig b/lib/compiler_rt/mulosi4_test.zig index 523faa490f..a6e8178129 100644 --- a/lib/compiler_rt/mulosi4_test.zig +++ b/lib/compiler_rt/mulosi4_test.zig @@ -37,36 +37,36 @@ test "mulosi4" { try test__mulosi4(1, -0x1234567, -0x1234567, 0); try test__mulosi4(-0x1234567, 1, -0x1234567, 0); - try test__mulosi4(0x7FFFFFFF, -2, @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__mulosi4(-2, 0x7FFFFFFF, @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__mulosi4(0x7FFFFFFF, -1, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__mulosi4(-1, 0x7FFFFFFF, @bitCast(i32, @as(u32, 0x80000001)), 0); + try test__mulosi4(0x7FFFFFFF, -2, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__mulosi4(-2, 0x7FFFFFFF, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__mulosi4(0x7FFFFFFF, -1, @as(i32, @bitCast(@as(u32, 0x80000001))), 0); + try test__mulosi4(-1, 0x7FFFFFFF, @as(i32, @bitCast(@as(u32, 0x80000001))), 0); try test__mulosi4(0x7FFFFFFF, 0, 0, 0); try test__mulosi4(0, 0x7FFFFFFF, 0, 0); try test__mulosi4(0x7FFFFFFF, 1, 0x7FFFFFFF, 0); try test__mulosi4(1, 0x7FFFFFFF, 0x7FFFFFFF, 0); - try test__mulosi4(0x7FFFFFFF, 2, @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__mulosi4(2, 0x7FFFFFFF, @bitCast(i32, @as(u32, 0x80000001)), 1); + try test__mulosi4(0x7FFFFFFF, 2, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__mulosi4(2, 0x7FFFFFFF, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000000)), -2, @bitCast(i32, @as(u32, 0x80000000)), 1); - try test__mulosi4(-2, @bitCast(i32, @as(u32, 0x80000000)), @bitCast(i32, @as(u32, 0x80000000)), 1); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000000)), -1, @bitCast(i32, @as(u32, 0x80000000)), 1); - try test__mulosi4(-1, @bitCast(i32, @as(u32, 0x80000000)), @bitCast(i32, @as(u32, 0x80000000)), 1); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000000)), 0, 0, 0); - try test__mulosi4(0, @bitCast(i32, @as(u32, 0x80000000)), 0, 0); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000000)), 1, @bitCast(i32, @as(u32, 0x80000000)), 0); - try test__mulosi4(1, @bitCast(i32, @as(u32, 0x80000000)), @bitCast(i32, @as(u32, 0x80000000)), 0); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000000)), 2, @bitCast(i32, @as(u32, 0x80000000)), 1); - try test__mulosi4(2, @bitCast(i32, @as(u32, 0x80000000)), @bitCast(i32, @as(u32, 0x80000000)), 1); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000000))), -2, @as(i32, @bitCast(@as(u32, 0x80000000))), 1); + try test__mulosi4(-2, @as(i32, @bitCast(@as(u32, 0x80000000))), @as(i32, @bitCast(@as(u32, 0x80000000))), 1); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000000))), -1, @as(i32, @bitCast(@as(u32, 0x80000000))), 1); + try test__mulosi4(-1, @as(i32, @bitCast(@as(u32, 0x80000000))), @as(i32, @bitCast(@as(u32, 0x80000000))), 1); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000000))), 0, 0, 0); + try test__mulosi4(0, @as(i32, @bitCast(@as(u32, 0x80000000))), 0, 0); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000000))), 1, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); + try test__mulosi4(1, @as(i32, @bitCast(@as(u32, 0x80000000))), @as(i32, @bitCast(@as(u32, 0x80000000))), 0); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000000))), 2, @as(i32, @bitCast(@as(u32, 0x80000000))), 1); + try test__mulosi4(2, @as(i32, @bitCast(@as(u32, 0x80000000))), @as(i32, @bitCast(@as(u32, 0x80000000))), 1); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000001)), -2, @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__mulosi4(-2, @bitCast(i32, @as(u32, 0x80000001)), @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000001)), -1, 0x7FFFFFFF, 0); - try test__mulosi4(-1, @bitCast(i32, @as(u32, 0x80000001)), 0x7FFFFFFF, 0); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000001)), 0, 0, 0); - try test__mulosi4(0, @bitCast(i32, @as(u32, 0x80000001)), 0, 0); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000001)), 1, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__mulosi4(1, @bitCast(i32, @as(u32, 0x80000001)), @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000001)), 2, @bitCast(i32, @as(u32, 0x80000000)), 1); - try test__mulosi4(2, @bitCast(i32, @as(u32, 0x80000001)), @bitCast(i32, @as(u32, 0x80000000)), 1); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000001))), -2, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__mulosi4(-2, @as(i32, @bitCast(@as(u32, 0x80000001))), @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000001))), -1, 0x7FFFFFFF, 0); + try test__mulosi4(-1, @as(i32, @bitCast(@as(u32, 0x80000001))), 0x7FFFFFFF, 0); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000001))), 0, 0, 0); + try test__mulosi4(0, @as(i32, @bitCast(@as(u32, 0x80000001))), 0, 0); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000001))), 1, @as(i32, @bitCast(@as(u32, 0x80000001))), 0); + try test__mulosi4(1, @as(i32, @bitCast(@as(u32, 0x80000001))), @as(i32, @bitCast(@as(u32, 0x80000001))), 0); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000001))), 2, @as(i32, @bitCast(@as(u32, 0x80000000))), 1); + try test__mulosi4(2, @as(i32, @bitCast(@as(u32, 0x80000001))), @as(i32, @bitCast(@as(u32, 0x80000000))), 1); } diff --git a/lib/compiler_rt/muloti4_test.zig b/lib/compiler_rt/muloti4_test.zig index 6d204ff785..0b5413dba3 100644 --- a/lib/compiler_rt/muloti4_test.zig +++ b/lib/compiler_rt/muloti4_test.zig @@ -52,38 +52,38 @@ test "muloti4" { try test__muloti4(2097152, -4398046511103, -9223372036852678656, 0); try test__muloti4(-2097152, -4398046511103, 9223372036852678656, 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x00000000000000B504F333F9DE5BE000)), @bitCast(i128, @as(u128, 0x000000000000000000B504F333F9DE5B)), @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFF328DF915DA296E8A000)), 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), -2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1); - try test__muloti4(-2, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x00000000000000B504F333F9DE5BE000))), @as(i128, @bitCast(@as(u128, 0x000000000000000000B504F333F9DE5B))), @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFF328DF915DA296E8A000))), 0); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), -2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1); + try test__muloti4(-2, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1); - try test__muloti4(@bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), -1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 0); - try test__muloti4(-1, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0, 0, 0); - try test__muloti4(0, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0, 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 1, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0); - try test__muloti4(1, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1); - try test__muloti4(2, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), -1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 0); + try test__muloti4(-1, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 0); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 0, 0, 0); + try test__muloti4(0, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 0, 0); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 1, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 0); + try test__muloti4(1, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 0); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1); + try test__muloti4(2, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), -2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1); - try test__muloti4(-2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), -1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1); - try test__muloti4(-1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 0, 0, 0); - try test__muloti4(0, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 0, 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 0); - try test__muloti4(1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1); - try test__muloti4(2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), -2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1); + try test__muloti4(-2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), -1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1); + try test__muloti4(-1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 0, 0, 0); + try test__muloti4(0, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 0, 0); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 0); + try test__muloti4(1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 0); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1); + try test__muloti4(2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), -2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1); - try test__muloti4(-2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), -1, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0); - try test__muloti4(-1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 0, 0, 0); - try test__muloti4(0, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 0, 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 0); - try test__muloti4(1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1); - try test__muloti4(2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), -2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1); + try test__muloti4(-2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), -1, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 0); + try test__muloti4(-1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 0); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 0, 0, 0); + try test__muloti4(0, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 0, 0); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 0); + try test__muloti4(1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 0); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1); + try test__muloti4(2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1); } diff --git a/lib/compiler_rt/negv.zig b/lib/compiler_rt/negv.zig index 5a26dc65e6..64961255c3 100644 --- a/lib/compiler_rt/negv.zig +++ b/lib/compiler_rt/negv.zig @@ -33,7 +33,7 @@ inline fn negvXi(comptime ST: type, a: ST) ST { else => unreachable, }; const N: UT = @bitSizeOf(ST); - const min: ST = @bitCast(ST, (@as(UT, 1) << (N - 1))); + const min: ST = @as(ST, @bitCast((@as(UT, 1) << (N - 1)))); if (a == min) @panic("compiler_rt negv: overflow"); return -a; diff --git a/lib/compiler_rt/parity.zig b/lib/compiler_rt/parity.zig index ee6abf162e..02050ba6bc 100644 --- a/lib/compiler_rt/parity.zig +++ b/lib/compiler_rt/parity.zig @@ -27,9 +27,9 @@ pub fn __parityti2(a: i128) callconv(.C) i32 { inline fn parityXi2(comptime T: type, a: T) i32 { var x = switch (@bitSizeOf(T)) { - 32 => @bitCast(u32, a), - 64 => @bitCast(u64, a), - 128 => @bitCast(u128, a), + 32 => @as(u32, @bitCast(a)), + 64 => @as(u64, @bitCast(a)), + 128 => @as(u128, @bitCast(a)), else => unreachable, }; // Bit Twiddling Hacks: Compute parity in parallel @@ -39,7 +39,7 @@ inline fn parityXi2(comptime T: type, a: T) i32 { shift = shift >> 1; } x &= 0xf; - return (@intCast(u16, 0x6996) >> @intCast(u4, x)) & 1; // optimization for >>2 and >>1 + return (@as(u16, @intCast(0x6996)) >> @as(u4, @intCast(x))) & 1; // optimization for >>2 and >>1 } test { diff --git a/lib/compiler_rt/paritydi2_test.zig b/lib/compiler_rt/paritydi2_test.zig index 1cf587b1ef..5ae8e2d2e7 100644 --- a/lib/compiler_rt/paritydi2_test.zig +++ b/lib/compiler_rt/paritydi2_test.zig @@ -3,13 +3,13 @@ const parity = @import("parity.zig"); const testing = std.testing; fn paritydi2Naive(a: i64) i32 { - var x = @bitCast(u64, a); + var x = @as(u64, @bitCast(a)); var has_parity: bool = false; while (x > 0) { has_parity = !has_parity; x = x & (x - 1); } - return @intCast(i32, @intFromBool(has_parity)); + return @as(i32, @intCast(@intFromBool(has_parity))); } fn test__paritydi2(a: i64) !void { @@ -22,9 +22,9 @@ test "paritydi2" { try test__paritydi2(0); try test__paritydi2(1); try test__paritydi2(2); - try test__paritydi2(@bitCast(i64, @as(u64, 0xffffffff_fffffffd))); - try test__paritydi2(@bitCast(i64, @as(u64, 0xffffffff_fffffffe))); - try test__paritydi2(@bitCast(i64, @as(u64, 0xffffffff_ffffffff))); + try test__paritydi2(@as(i64, @bitCast(@as(u64, 0xffffffff_fffffffd)))); + try test__paritydi2(@as(i64, @bitCast(@as(u64, 0xffffffff_fffffffe)))); + try test__paritydi2(@as(i64, @bitCast(@as(u64, 0xffffffff_ffffffff)))); const RndGen = std.rand.DefaultPrng; var rnd = RndGen.init(42); diff --git a/lib/compiler_rt/paritysi2_test.zig b/lib/compiler_rt/paritysi2_test.zig index c1bac5eaae..3726170b53 100644 --- a/lib/compiler_rt/paritysi2_test.zig +++ b/lib/compiler_rt/paritysi2_test.zig @@ -3,13 +3,13 @@ const parity = @import("parity.zig"); const testing = std.testing; fn paritysi2Naive(a: i32) i32 { - var x = @bitCast(u32, a); + var x = @as(u32, @bitCast(a)); var has_parity: bool = false; while (x > 0) { has_parity = !has_parity; x = x & (x - 1); } - return @intCast(i32, @intFromBool(has_parity)); + return @as(i32, @intCast(@intFromBool(has_parity))); } fn test__paritysi2(a: i32) !void { @@ -22,9 +22,9 @@ test "paritysi2" { try test__paritysi2(0); try test__paritysi2(1); try test__paritysi2(2); - try test__paritysi2(@bitCast(i32, @as(u32, 0xfffffffd))); - try test__paritysi2(@bitCast(i32, @as(u32, 0xfffffffe))); - try test__paritysi2(@bitCast(i32, @as(u32, 0xffffffff))); + try test__paritysi2(@as(i32, @bitCast(@as(u32, 0xfffffffd)))); + try test__paritysi2(@as(i32, @bitCast(@as(u32, 0xfffffffe)))); + try test__paritysi2(@as(i32, @bitCast(@as(u32, 0xffffffff)))); const RndGen = std.rand.DefaultPrng; var rnd = RndGen.init(42); diff --git a/lib/compiler_rt/parityti2_test.zig b/lib/compiler_rt/parityti2_test.zig index 8a869fe718..6f6c2102b1 100644 --- a/lib/compiler_rt/parityti2_test.zig +++ b/lib/compiler_rt/parityti2_test.zig @@ -3,13 +3,13 @@ const parity = @import("parity.zig"); const testing = std.testing; fn parityti2Naive(a: i128) i32 { - var x = @bitCast(u128, a); + var x = @as(u128, @bitCast(a)); var has_parity: bool = false; while (x > 0) { has_parity = !has_parity; x = x & (x - 1); } - return @intCast(i32, @intFromBool(has_parity)); + return @as(i32, @intCast(@intFromBool(has_parity))); } fn test__parityti2(a: i128) !void { @@ -22,9 +22,9 @@ test "parityti2" { try test__parityti2(0); try test__parityti2(1); try test__parityti2(2); - try test__parityti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_fffffffd))); - try test__parityti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_fffffffe))); - try test__parityti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_ffffffff))); + try test__parityti2(@as(i128, @bitCast(@as(u128, 0xffffffff_ffffffff_ffffffff_fffffffd)))); + try test__parityti2(@as(i128, @bitCast(@as(u128, 0xffffffff_ffffffff_ffffffff_fffffffe)))); + try test__parityti2(@as(i128, @bitCast(@as(u128, 0xffffffff_ffffffff_ffffffff_ffffffff)))); const RndGen = std.rand.DefaultPrng; var rnd = RndGen.init(42); diff --git a/lib/compiler_rt/popcount.zig b/lib/compiler_rt/popcount.zig index ddb0b720c7..ab61b0d535 100644 --- a/lib/compiler_rt/popcount.zig +++ b/lib/compiler_rt/popcount.zig @@ -37,7 +37,7 @@ inline fn popcountXi2(comptime ST: type, a: ST) i32 { i128 => u128, else => unreachable, }; - var x = @bitCast(UT, a); + var x = @as(UT, @bitCast(a)); x -= (x >> 1) & (~@as(UT, 0) / 3); // 0x55...55, aggregate duos x = ((x >> 2) & (~@as(UT, 0) / 5)) // 0x33...33, aggregate nibbles + (x & (~@as(UT, 0) / 5)); @@ -46,7 +46,7 @@ inline fn popcountXi2(comptime ST: type, a: ST) i32 { // 8 most significant bits of x + (x<<8) + (x<<16) + .. x *%= ~@as(UT, 0) / 255; // 0x01...01 x >>= (@bitSizeOf(ST) - 8); - return @intCast(i32, x); + return @as(i32, @intCast(x)); } test { diff --git a/lib/compiler_rt/popcountdi2_test.zig b/lib/compiler_rt/popcountdi2_test.zig index e02628e636..daf2c1f183 100644 --- a/lib/compiler_rt/popcountdi2_test.zig +++ b/lib/compiler_rt/popcountdi2_test.zig @@ -5,8 +5,8 @@ const testing = std.testing; fn popcountdi2Naive(a: i64) i32 { var x = a; var r: i32 = 0; - while (x != 0) : (x = @bitCast(i64, @bitCast(u64, x) >> 1)) { - r += @intCast(i32, x & 1); + while (x != 0) : (x = @as(i64, @bitCast(@as(u64, @bitCast(x)) >> 1))) { + r += @as(i32, @intCast(x & 1)); } return r; } @@ -21,9 +21,9 @@ test "popcountdi2" { try test__popcountdi2(0); try test__popcountdi2(1); try test__popcountdi2(2); - try test__popcountdi2(@bitCast(i64, @as(u64, 0xffffffff_fffffffd))); - try test__popcountdi2(@bitCast(i64, @as(u64, 0xffffffff_fffffffe))); - try test__popcountdi2(@bitCast(i64, @as(u64, 0xffffffff_ffffffff))); + try test__popcountdi2(@as(i64, @bitCast(@as(u64, 0xffffffff_fffffffd)))); + try test__popcountdi2(@as(i64, @bitCast(@as(u64, 0xffffffff_fffffffe)))); + try test__popcountdi2(@as(i64, @bitCast(@as(u64, 0xffffffff_ffffffff)))); const RndGen = std.rand.DefaultPrng; var rnd = RndGen.init(42); diff --git a/lib/compiler_rt/popcountsi2_test.zig b/lib/compiler_rt/popcountsi2_test.zig index 7606b1a97e..497b62516f 100644 --- a/lib/compiler_rt/popcountsi2_test.zig +++ b/lib/compiler_rt/popcountsi2_test.zig @@ -5,8 +5,8 @@ const testing = std.testing; fn popcountsi2Naive(a: i32) i32 { var x = a; var r: i32 = 0; - while (x != 0) : (x = @bitCast(i32, @bitCast(u32, x) >> 1)) { - r += @intCast(i32, x & 1); + while (x != 0) : (x = @as(i32, @bitCast(@as(u32, @bitCast(x)) >> 1))) { + r += @as(i32, @intCast(x & 1)); } return r; } @@ -21,9 +21,9 @@ test "popcountsi2" { try test__popcountsi2(0); try test__popcountsi2(1); try test__popcountsi2(2); - try test__popcountsi2(@bitCast(i32, @as(u32, 0xfffffffd))); - try test__popcountsi2(@bitCast(i32, @as(u32, 0xfffffffe))); - try test__popcountsi2(@bitCast(i32, @as(u32, 0xffffffff))); + try test__popcountsi2(@as(i32, @bitCast(@as(u32, 0xfffffffd)))); + try test__popcountsi2(@as(i32, @bitCast(@as(u32, 0xfffffffe)))); + try test__popcountsi2(@as(i32, @bitCast(@as(u32, 0xffffffff)))); const RndGen = std.rand.DefaultPrng; var rnd = RndGen.init(42); diff --git a/lib/compiler_rt/popcountti2_test.zig b/lib/compiler_rt/popcountti2_test.zig index fae2beccd4..b873bcd449 100644 --- a/lib/compiler_rt/popcountti2_test.zig +++ b/lib/compiler_rt/popcountti2_test.zig @@ -5,8 +5,8 @@ const testing = std.testing; fn popcountti2Naive(a: i128) i32 { var x = a; var r: i32 = 0; - while (x != 0) : (x = @bitCast(i128, @bitCast(u128, x) >> 1)) { - r += @intCast(i32, x & 1); + while (x != 0) : (x = @as(i128, @bitCast(@as(u128, @bitCast(x)) >> 1))) { + r += @as(i32, @intCast(x & 1)); } return r; } @@ -21,9 +21,9 @@ test "popcountti2" { try test__popcountti2(0); try test__popcountti2(1); try test__popcountti2(2); - try test__popcountti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_fffffffd))); - try test__popcountti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_fffffffe))); - try test__popcountti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_ffffffff))); + try test__popcountti2(@as(i128, @bitCast(@as(u128, 0xffffffff_ffffffff_ffffffff_fffffffd)))); + try test__popcountti2(@as(i128, @bitCast(@as(u128, 0xffffffff_ffffffff_ffffffff_fffffffe)))); + try test__popcountti2(@as(i128, @bitCast(@as(u128, 0xffffffff_ffffffff_ffffffff_ffffffff)))); const RndGen = std.rand.DefaultPrng; var rnd = RndGen.init(42); diff --git a/lib/compiler_rt/powiXf2.zig b/lib/compiler_rt/powiXf2.zig index b0cec3235d..97dc1b77d1 100644 --- a/lib/compiler_rt/powiXf2.zig +++ b/lib/compiler_rt/powiXf2.zig @@ -25,7 +25,7 @@ inline fn powiXf2(comptime FT: type, a: FT, b: i32) FT { const is_recip: bool = b < 0; var r: FT = 1.0; while (true) { - if (@bitCast(u32, x_b) & @as(u32, 1) != 0) { + if (@as(u32, @bitCast(x_b)) & @as(u32, 1) != 0) { r *= x_a; } x_b = @divTrunc(x_b, @as(i32, 2)); diff --git a/lib/compiler_rt/powiXf2_test.zig b/lib/compiler_rt/powiXf2_test.zig index 5f7828c3e3..7014d2a227 100644 --- a/lib/compiler_rt/powiXf2_test.zig +++ b/lib/compiler_rt/powiXf2_test.zig @@ -49,76 +49,76 @@ test "powihf2" { try test__powihf2(0, 2, 0); try test__powihf2(0, 3, 0); try test__powihf2(0, 4, 0); - try test__powihf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); - try test__powihf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 0); + try test__powihf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); + try test__powihf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 0); try test__powihf2(-0.0, 1, -0.0); try test__powihf2(-0.0, 2, 0); try test__powihf2(-0.0, 3, -0.0); try test__powihf2(-0.0, 4, 0); - try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); - try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -0.0); + try test__powihf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); + try test__powihf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -0.0); try test__powihf2(1, 1, 1); try test__powihf2(1, 2, 1); try test__powihf2(1, 3, 1); try test__powihf2(1, 4, 1); - try test__powihf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1); - try test__powihf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1); + try test__powihf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 1); + try test__powihf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 1); try test__powihf2(inf_f16, 1, inf_f16); try test__powihf2(inf_f16, 2, inf_f16); try test__powihf2(inf_f16, 3, inf_f16); try test__powihf2(inf_f16, 4, inf_f16); - try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f16); - try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f16); + try test__powihf2(inf_f16, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f16); + try test__powihf2(inf_f16, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), inf_f16); try test__powihf2(-inf_f16, 1, -inf_f16); try test__powihf2(-inf_f16, 2, inf_f16); try test__powihf2(-inf_f16, 3, -inf_f16); try test__powihf2(-inf_f16, 4, inf_f16); - try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f16); - try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f16); + try test__powihf2(-inf_f16, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f16); + try test__powihf2(-inf_f16, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -inf_f16); // try test__powihf2(0, -1, inf_f16); try test__powihf2(0, -2, inf_f16); try test__powihf2(0, -3, inf_f16); try test__powihf2(0, -4, inf_f16); - try test__powihf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f16); // 0 ^ anything = +inf - try test__powihf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f16); - try test__powihf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f16); + try test__powihf2(0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f16); // 0 ^ anything = +inf + try test__powihf2(0, @as(i32, @bitCast(@as(u32, 0x80000001))), inf_f16); + try test__powihf2(0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f16); try test__powihf2(-0.0, -1, -inf_f16); try test__powihf2(-0.0, -2, inf_f16); try test__powihf2(-0.0, -3, -inf_f16); try test__powihf2(-0.0, -4, inf_f16); - try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f16); // -0 ^ anything even = +inf - try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f16); // -0 ^ anything odd = -inf - try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f16); + try test__powihf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f16); // -0 ^ anything even = +inf + try test__powihf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000001))), -inf_f16); // -0 ^ anything odd = -inf + try test__powihf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f16); try test__powihf2(1, -1, 1); try test__powihf2(1, -2, 1); try test__powihf2(1, -3, 1); try test__powihf2(1, -4, 1); - try test__powihf2(1, @bitCast(i32, @as(u32, 0x80000002)), 1); // 1.0 ^ anything = 1 - try test__powihf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__powihf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1); + try test__powihf2(1, @as(i32, @bitCast(@as(u32, 0x80000002))), 1); // 1.0 ^ anything = 1 + try test__powihf2(1, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__powihf2(1, @as(i32, @bitCast(@as(u32, 0x80000000))), 1); try test__powihf2(inf_f16, -1, 0); try test__powihf2(inf_f16, -2, 0); try test__powihf2(inf_f16, -3, 0); try test__powihf2(inf_f16, -4, 0); - try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powihf2(inf_f16, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powihf2(inf_f16, @as(i32, @bitCast(@as(u32, 0x80000001))), 0); + try test__powihf2(inf_f16, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); // try test__powihf2(-inf_f16, -1, -0.0); try test__powihf2(-inf_f16, -2, 0); try test__powihf2(-inf_f16, -3, -0.0); try test__powihf2(-inf_f16, -4, 0); - try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x80000001)), -0.0); - try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powihf2(-inf_f16, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powihf2(-inf_f16, @as(i32, @bitCast(@as(u32, 0x80000001))), -0.0); + try test__powihf2(-inf_f16, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); try test__powihf2(2, 10, 1024.0); try test__powihf2(-2, 10, 1024.0); @@ -158,76 +158,76 @@ test "powisf2" { try test__powisf2(0, 2, 0); try test__powisf2(0, 3, 0); try test__powisf2(0, 4, 0); - try test__powisf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); - try test__powisf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 0); + try test__powisf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); + try test__powisf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 0); try test__powisf2(-0.0, 1, -0.0); try test__powisf2(-0.0, 2, 0); try test__powisf2(-0.0, 3, -0.0); try test__powisf2(-0.0, 4, 0); - try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); - try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -0.0); + try test__powisf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); + try test__powisf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -0.0); try test__powisf2(1, 1, 1); try test__powisf2(1, 2, 1); try test__powisf2(1, 3, 1); try test__powisf2(1, 4, 1); - try test__powisf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1); - try test__powisf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1); + try test__powisf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 1); + try test__powisf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 1); try test__powisf2(inf_f32, 1, inf_f32); try test__powisf2(inf_f32, 2, inf_f32); try test__powisf2(inf_f32, 3, inf_f32); try test__powisf2(inf_f32, 4, inf_f32); - try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f32); - try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f32); + try test__powisf2(inf_f32, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f32); + try test__powisf2(inf_f32, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), inf_f32); try test__powisf2(-inf_f32, 1, -inf_f32); try test__powisf2(-inf_f32, 2, inf_f32); try test__powisf2(-inf_f32, 3, -inf_f32); try test__powisf2(-inf_f32, 4, inf_f32); - try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f32); - try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f32); + try test__powisf2(-inf_f32, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f32); + try test__powisf2(-inf_f32, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -inf_f32); try test__powisf2(0, -1, inf_f32); try test__powisf2(0, -2, inf_f32); try test__powisf2(0, -3, inf_f32); try test__powisf2(0, -4, inf_f32); - try test__powisf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f32); - try test__powisf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f32); - try test__powisf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f32); + try test__powisf2(0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f32); + try test__powisf2(0, @as(i32, @bitCast(@as(u32, 0x80000001))), inf_f32); + try test__powisf2(0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f32); try test__powisf2(-0.0, -1, -inf_f32); try test__powisf2(-0.0, -2, inf_f32); try test__powisf2(-0.0, -3, -inf_f32); try test__powisf2(-0.0, -4, inf_f32); - try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f32); - try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f32); - try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f32); + try test__powisf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f32); + try test__powisf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000001))), -inf_f32); + try test__powisf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f32); try test__powisf2(1, -1, 1); try test__powisf2(1, -2, 1); try test__powisf2(1, -3, 1); try test__powisf2(1, -4, 1); - try test__powisf2(1, @bitCast(i32, @as(u32, 0x80000002)), 1); - try test__powisf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__powisf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1); + try test__powisf2(1, @as(i32, @bitCast(@as(u32, 0x80000002))), 1); + try test__powisf2(1, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__powisf2(1, @as(i32, @bitCast(@as(u32, 0x80000000))), 1); try test__powisf2(inf_f32, -1, 0); try test__powisf2(inf_f32, -2, 0); try test__powisf2(inf_f32, -3, 0); try test__powisf2(inf_f32, -4, 0); - try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powisf2(inf_f32, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powisf2(inf_f32, @as(i32, @bitCast(@as(u32, 0x80000001))), 0); + try test__powisf2(inf_f32, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); try test__powisf2(-inf_f32, -1, -0.0); try test__powisf2(-inf_f32, -2, 0); try test__powisf2(-inf_f32, -3, -0.0); try test__powisf2(-inf_f32, -4, 0); - try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x80000001)), -0.0); - try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powisf2(-inf_f32, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powisf2(-inf_f32, @as(i32, @bitCast(@as(u32, 0x80000001))), -0.0); + try test__powisf2(-inf_f32, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); try test__powisf2(2.0, 10, 1024.0); try test__powisf2(-2, 10, 1024.0); @@ -263,76 +263,76 @@ test "powidf2" { try test__powidf2(0, 2, 0); try test__powidf2(0, 3, 0); try test__powidf2(0, 4, 0); - try test__powidf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); - try test__powidf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 0); + try test__powidf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); + try test__powidf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 0); try test__powidf2(-0.0, 1, -0.0); try test__powidf2(-0.0, 2, 0); try test__powidf2(-0.0, 3, -0.0); try test__powidf2(-0.0, 4, 0); - try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); - try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -0.0); + try test__powidf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); + try test__powidf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -0.0); try test__powidf2(1, 1, 1); try test__powidf2(1, 2, 1); try test__powidf2(1, 3, 1); try test__powidf2(1, 4, 1); - try test__powidf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1); - try test__powidf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1); + try test__powidf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 1); + try test__powidf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 1); try test__powidf2(inf_f64, 1, inf_f64); try test__powidf2(inf_f64, 2, inf_f64); try test__powidf2(inf_f64, 3, inf_f64); try test__powidf2(inf_f64, 4, inf_f64); - try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f64); - try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f64); + try test__powidf2(inf_f64, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f64); + try test__powidf2(inf_f64, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), inf_f64); try test__powidf2(-inf_f64, 1, -inf_f64); try test__powidf2(-inf_f64, 2, inf_f64); try test__powidf2(-inf_f64, 3, -inf_f64); try test__powidf2(-inf_f64, 4, inf_f64); - try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f64); - try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f64); + try test__powidf2(-inf_f64, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f64); + try test__powidf2(-inf_f64, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -inf_f64); try test__powidf2(0, -1, inf_f64); try test__powidf2(0, -2, inf_f64); try test__powidf2(0, -3, inf_f64); try test__powidf2(0, -4, inf_f64); - try test__powidf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f64); - try test__powidf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f64); - try test__powidf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f64); + try test__powidf2(0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f64); + try test__powidf2(0, @as(i32, @bitCast(@as(u32, 0x80000001))), inf_f64); + try test__powidf2(0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f64); try test__powidf2(-0.0, -1, -inf_f64); try test__powidf2(-0.0, -2, inf_f64); try test__powidf2(-0.0, -3, -inf_f64); try test__powidf2(-0.0, -4, inf_f64); - try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f64); - try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f64); - try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f64); + try test__powidf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f64); + try test__powidf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000001))), -inf_f64); + try test__powidf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f64); try test__powidf2(1, -1, 1); try test__powidf2(1, -2, 1); try test__powidf2(1, -3, 1); try test__powidf2(1, -4, 1); - try test__powidf2(1, @bitCast(i32, @as(u32, 0x80000002)), 1); - try test__powidf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__powidf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1); + try test__powidf2(1, @as(i32, @bitCast(@as(u32, 0x80000002))), 1); + try test__powidf2(1, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__powidf2(1, @as(i32, @bitCast(@as(u32, 0x80000000))), 1); try test__powidf2(inf_f64, -1, 0); try test__powidf2(inf_f64, -2, 0); try test__powidf2(inf_f64, -3, 0); try test__powidf2(inf_f64, -4, 0); - try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powidf2(inf_f64, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powidf2(inf_f64, @as(i32, @bitCast(@as(u32, 0x80000001))), 0); + try test__powidf2(inf_f64, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); try test__powidf2(-inf_f64, -1, -0.0); try test__powidf2(-inf_f64, -2, 0); try test__powidf2(-inf_f64, -3, -0.0); try test__powidf2(-inf_f64, -4, 0); - try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x80000001)), -0.0); - try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powidf2(-inf_f64, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powidf2(-inf_f64, @as(i32, @bitCast(@as(u32, 0x80000001))), -0.0); + try test__powidf2(-inf_f64, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); try test__powidf2(2, 10, 1024.0); try test__powidf2(-2, 10, 1024.0); @@ -368,76 +368,76 @@ test "powitf2" { try test__powitf2(0, 2, 0); try test__powitf2(0, 3, 0); try test__powitf2(0, 4, 0); - try test__powitf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); + try test__powitf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); try test__powitf2(0, 0x7FFFFFFF, 0); try test__powitf2(-0.0, 1, -0.0); try test__powitf2(-0.0, 2, 0); try test__powitf2(-0.0, 3, -0.0); try test__powitf2(-0.0, 4, 0); - try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); - try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -0.0); + try test__powitf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); + try test__powitf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -0.0); try test__powitf2(1, 1, 1); try test__powitf2(1, 2, 1); try test__powitf2(1, 3, 1); try test__powitf2(1, 4, 1); - try test__powitf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1); - try test__powitf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1); + try test__powitf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 1); + try test__powitf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 1); try test__powitf2(inf_f128, 1, inf_f128); try test__powitf2(inf_f128, 2, inf_f128); try test__powitf2(inf_f128, 3, inf_f128); try test__powitf2(inf_f128, 4, inf_f128); - try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f128); - try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f128); + try test__powitf2(inf_f128, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f128); + try test__powitf2(inf_f128, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), inf_f128); try test__powitf2(-inf_f128, 1, -inf_f128); try test__powitf2(-inf_f128, 2, inf_f128); try test__powitf2(-inf_f128, 3, -inf_f128); try test__powitf2(-inf_f128, 4, inf_f128); - try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f128); - try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f128); + try test__powitf2(-inf_f128, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f128); + try test__powitf2(-inf_f128, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -inf_f128); try test__powitf2(0, -1, inf_f128); try test__powitf2(0, -2, inf_f128); try test__powitf2(0, -3, inf_f128); try test__powitf2(0, -4, inf_f128); - try test__powitf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f128); - try test__powitf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f128); - try test__powitf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f128); + try test__powitf2(0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f128); + try test__powitf2(0, @as(i32, @bitCast(@as(u32, 0x80000001))), inf_f128); + try test__powitf2(0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f128); try test__powitf2(-0.0, -1, -inf_f128); try test__powitf2(-0.0, -2, inf_f128); try test__powitf2(-0.0, -3, -inf_f128); try test__powitf2(-0.0, -4, inf_f128); - try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f128); - try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f128); - try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f128); + try test__powitf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f128); + try test__powitf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000001))), -inf_f128); + try test__powitf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f128); try test__powitf2(1, -1, 1); try test__powitf2(1, -2, 1); try test__powitf2(1, -3, 1); try test__powitf2(1, -4, 1); - try test__powitf2(1, @bitCast(i32, @as(u32, 0x80000002)), 1); - try test__powitf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__powitf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1); + try test__powitf2(1, @as(i32, @bitCast(@as(u32, 0x80000002))), 1); + try test__powitf2(1, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__powitf2(1, @as(i32, @bitCast(@as(u32, 0x80000000))), 1); try test__powitf2(inf_f128, -1, 0); try test__powitf2(inf_f128, -2, 0); try test__powitf2(inf_f128, -3, 0); try test__powitf2(inf_f128, -4, 0); - try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powitf2(inf_f128, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powitf2(inf_f128, @as(i32, @bitCast(@as(u32, 0x80000001))), 0); + try test__powitf2(inf_f128, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); try test__powitf2(-inf_f128, -1, -0.0); try test__powitf2(-inf_f128, -2, 0); try test__powitf2(-inf_f128, -3, -0.0); try test__powitf2(-inf_f128, -4, 0); - try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x80000001)), -0.0); - try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powitf2(-inf_f128, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powitf2(-inf_f128, @as(i32, @bitCast(@as(u32, 0x80000001))), -0.0); + try test__powitf2(-inf_f128, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); try test__powitf2(2, 10, 1024.0); try test__powitf2(-2, 10, 1024.0); @@ -473,76 +473,76 @@ test "powixf2" { try test__powixf2(0, 2, 0); try test__powixf2(0, 3, 0); try test__powixf2(0, 4, 0); - try test__powixf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); - try test__powixf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 0); + try test__powixf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); + try test__powixf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 0); try test__powixf2(-0.0, 1, -0.0); try test__powixf2(-0.0, 2, 0); try test__powixf2(-0.0, 3, -0.0); try test__powixf2(-0.0, 4, 0); - try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); - try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -0.0); + try test__powixf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); + try test__powixf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -0.0); try test__powixf2(1, 1, 1); try test__powixf2(1, 2, 1); try test__powixf2(1, 3, 1); try test__powixf2(1, 4, 1); - try test__powixf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1); - try test__powixf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1); + try test__powixf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 1); + try test__powixf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 1); try test__powixf2(inf_f80, 1, inf_f80); try test__powixf2(inf_f80, 2, inf_f80); try test__powixf2(inf_f80, 3, inf_f80); try test__powixf2(inf_f80, 4, inf_f80); - try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f80); - try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f80); + try test__powixf2(inf_f80, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f80); + try test__powixf2(inf_f80, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), inf_f80); try test__powixf2(-inf_f80, 1, -inf_f80); try test__powixf2(-inf_f80, 2, inf_f80); try test__powixf2(-inf_f80, 3, -inf_f80); try test__powixf2(-inf_f80, 4, inf_f80); - try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f80); - try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f80); + try test__powixf2(-inf_f80, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f80); + try test__powixf2(-inf_f80, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -inf_f80); try test__powixf2(0, -1, inf_f80); try test__powixf2(0, -2, inf_f80); try test__powixf2(0, -3, inf_f80); try test__powixf2(0, -4, inf_f80); - try test__powixf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f80); - try test__powixf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f80); - try test__powixf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f80); + try test__powixf2(0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f80); + try test__powixf2(0, @as(i32, @bitCast(@as(u32, 0x80000001))), inf_f80); + try test__powixf2(0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f80); try test__powixf2(-0.0, -1, -inf_f80); try test__powixf2(-0.0, -2, inf_f80); try test__powixf2(-0.0, -3, -inf_f80); try test__powixf2(-0.0, -4, inf_f80); - try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f80); - try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f80); - try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f80); + try test__powixf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f80); + try test__powixf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000001))), -inf_f80); + try test__powixf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f80); try test__powixf2(1, -1, 1); try test__powixf2(1, -2, 1); try test__powixf2(1, -3, 1); try test__powixf2(1, -4, 1); - try test__powixf2(1, @bitCast(i32, @as(u32, 0x80000002)), 1); - try test__powixf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__powixf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1); + try test__powixf2(1, @as(i32, @bitCast(@as(u32, 0x80000002))), 1); + try test__powixf2(1, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__powixf2(1, @as(i32, @bitCast(@as(u32, 0x80000000))), 1); try test__powixf2(inf_f80, -1, 0); try test__powixf2(inf_f80, -2, 0); try test__powixf2(inf_f80, -3, 0); try test__powixf2(inf_f80, -4, 0); - try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powixf2(inf_f80, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powixf2(inf_f80, @as(i32, @bitCast(@as(u32, 0x80000001))), 0); + try test__powixf2(inf_f80, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); try test__powixf2(-inf_f80, -1, -0.0); try test__powixf2(-inf_f80, -2, 0); try test__powixf2(-inf_f80, -3, -0.0); try test__powixf2(-inf_f80, -4, 0); - try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x80000001)), -0.0); - try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powixf2(-inf_f80, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powixf2(-inf_f80, @as(i32, @bitCast(@as(u32, 0x80000001))), -0.0); + try test__powixf2(-inf_f80, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); try test__powixf2(2, 10, 1024.0); try test__powixf2(-2, 10, 1024.0); diff --git a/lib/compiler_rt/rem_pio2.zig b/lib/compiler_rt/rem_pio2.zig index 315a99c308..14a8733e66 100644 --- a/lib/compiler_rt/rem_pio2.zig +++ b/lib/compiler_rt/rem_pio2.zig @@ -26,7 +26,7 @@ const pio2_3 = 2.02226624871116645580e-21; // 0x3BA3198A, 0x2E000000 const pio2_3t = 8.47842766036889956997e-32; // 0x397B839A, 0x252049C1 fn U(x: anytype) usize { - return @intCast(usize, x); + return @as(usize, @intCast(x)); } fn medium(ix: u32, x: f64, y: *[2]f64) i32 { @@ -41,7 +41,7 @@ fn medium(ix: u32, x: f64, y: *[2]f64) i32 { // rint(x/(pi/2)) @"fn" = x * invpio2 + toint - toint; - n = @intFromFloat(i32, @"fn"); + n = @as(i32, @intFromFloat(@"fn")); r = x - @"fn" * pio2_1; w = @"fn" * pio2_1t; // 1st round, good to 85 bits // Matters with directed rounding. @@ -57,17 +57,17 @@ fn medium(ix: u32, x: f64, y: *[2]f64) i32 { w = @"fn" * pio2_1t; } y[0] = r - w; - ui = @bitCast(u64, y[0]); - ey = @intCast(i32, (ui >> 52) & 0x7ff); - ex = @intCast(i32, ix >> 20); + ui = @as(u64, @bitCast(y[0])); + ey = @as(i32, @intCast((ui >> 52) & 0x7ff)); + ex = @as(i32, @intCast(ix >> 20)); if (ex - ey > 16) { // 2nd round, good to 118 bits t = r; w = @"fn" * pio2_2; r = t - w; w = @"fn" * pio2_2t - ((t - r) - w); y[0] = r - w; - ui = @bitCast(u64, y[0]); - ey = @intCast(i32, (ui >> 52) & 0x7ff); + ui = @as(u64, @bitCast(y[0])); + ey = @as(i32, @intCast((ui >> 52) & 0x7ff)); if (ex - ey > 49) { // 3rd round, good to 151 bits, covers all cases t = r; w = @"fn" * pio2_3; @@ -95,9 +95,9 @@ pub fn rem_pio2(x: f64, y: *[2]f64) i32 { var i: i32 = undefined; var ui: u64 = undefined; - ui = @bitCast(u64, x); + ui = @as(u64, @bitCast(x)); sign = ui >> 63 != 0; - ix = @truncate(u32, (ui >> 32) & 0x7fffffff); + ix = @as(u32, @truncate((ui >> 32) & 0x7fffffff)); if (ix <= 0x400f6a7a) { // |x| ~<= 5pi/4 if ((ix & 0xfffff) == 0x921fb) { // |x| ~= pi/2 or 2pi/2 return medium(ix, x, y); @@ -171,14 +171,14 @@ pub fn rem_pio2(x: f64, y: *[2]f64) i32 { return 0; } // set z = scalbn(|x|,-ilogb(x)+23) - ui = @bitCast(u64, x); + ui = @as(u64, @bitCast(x)); ui &= std.math.maxInt(u64) >> 12; ui |= @as(u64, 0x3ff + 23) << 52; - z = @bitCast(f64, ui); + z = @as(f64, @bitCast(ui)); i = 0; while (i < 2) : (i += 1) { - tx[U(i)] = @floatFromInt(f64, @intFromFloat(i32, z)); + tx[U(i)] = @as(f64, @floatFromInt(@as(i32, @intFromFloat(z)))); z = (z - tx[U(i)]) * 0x1p24; } tx[U(i)] = z; @@ -186,7 +186,7 @@ pub fn rem_pio2(x: f64, y: *[2]f64) i32 { while (tx[U(i)] == 0.0) { i -= 1; } - n = rem_pio2_large(tx[0..], ty[0..], @intCast(i32, (ix >> 20)) - (0x3ff + 23), i + 1, 1); + n = rem_pio2_large(tx[0..], ty[0..], @as(i32, @intCast((ix >> 20))) - (0x3ff + 23), i + 1, 1); if (sign) { y[0] = -ty[0]; y[1] = -ty[1]; diff --git a/lib/compiler_rt/rem_pio2_large.zig b/lib/compiler_rt/rem_pio2_large.zig index afded18387..79262f0e5e 100644 --- a/lib/compiler_rt/rem_pio2_large.zig +++ b/lib/compiler_rt/rem_pio2_large.zig @@ -150,7 +150,7 @@ const PIo2 = [_]f64{ }; fn U(x: anytype) usize { - return @intCast(usize, x); + return @as(usize, @intCast(x)); } /// Returns the last three digits of N with y = x - N*pi/2 so that |y| < pi/2. @@ -295,7 +295,7 @@ pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 { i += 1; j += 1; }) { - f[U(i)] = if (j < 0) 0.0 else @floatFromInt(f64, ipio2[U(j)]); + f[U(i)] = if (j < 0) 0.0 else @as(f64, @floatFromInt(ipio2[U(j)])); } // compute q[0],q[1],...q[jk] @@ -322,22 +322,22 @@ pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 { i += 1; j -= 1; }) { - fw = @floatFromInt(f64, @intFromFloat(i32, 0x1p-24 * z)); - iq[U(i)] = @intFromFloat(i32, z - 0x1p24 * fw); + fw = @as(f64, @floatFromInt(@as(i32, @intFromFloat(0x1p-24 * z)))); + iq[U(i)] = @as(i32, @intFromFloat(z - 0x1p24 * fw)); z = q[U(j - 1)] + fw; } // compute n z = math.scalbn(z, q0); // actual value of z z -= 8.0 * @floor(z * 0.125); // trim off integer >= 8 - n = @intFromFloat(i32, z); - z -= @floatFromInt(f64, n); + n = @as(i32, @intFromFloat(z)); + z -= @as(f64, @floatFromInt(n)); ih = 0; if (q0 > 0) { // need iq[jz-1] to determine n - i = iq[U(jz - 1)] >> @intCast(u5, 24 - q0); + i = iq[U(jz - 1)] >> @as(u5, @intCast(24 - q0)); n += i; - iq[U(jz - 1)] -= i << @intCast(u5, 24 - q0); - ih = iq[U(jz - 1)] >> @intCast(u5, 23 - q0); + iq[U(jz - 1)] -= i << @as(u5, @intCast(24 - q0)); + ih = iq[U(jz - 1)] >> @as(u5, @intCast(23 - q0)); } else if (q0 == 0) { ih = iq[U(jz - 1)] >> 23; } else if (z >= 0.5) { @@ -390,7 +390,7 @@ pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 { i = jz + 1; while (i <= jz + k) : (i += 1) { // add q[jz+1] to q[jz+k] - f[U(jx + i)] = @floatFromInt(f64, ipio2[U(jv + i)]); + f[U(jx + i)] = @as(f64, @floatFromInt(ipio2[U(jv + i)])); j = 0; fw = 0; while (j <= jx) : (j += 1) { @@ -414,13 +414,13 @@ pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 { } else { // break z into 24-bit if necessary z = math.scalbn(z, -q0); if (z >= 0x1p24) { - fw = @floatFromInt(f64, @intFromFloat(i32, 0x1p-24 * z)); - iq[U(jz)] = @intFromFloat(i32, z - 0x1p24 * fw); + fw = @as(f64, @floatFromInt(@as(i32, @intFromFloat(0x1p-24 * z)))); + iq[U(jz)] = @as(i32, @intFromFloat(z - 0x1p24 * fw)); jz += 1; q0 += 24; - iq[U(jz)] = @intFromFloat(i32, fw); + iq[U(jz)] = @as(i32, @intFromFloat(fw)); } else { - iq[U(jz)] = @intFromFloat(i32, z); + iq[U(jz)] = @as(i32, @intFromFloat(z)); } } @@ -428,7 +428,7 @@ pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 { fw = math.scalbn(@as(f64, 1.0), q0); i = jz; while (i >= 0) : (i -= 1) { - q[U(i)] = fw * @floatFromInt(f64, iq[U(i)]); + q[U(i)] = fw * @as(f64, @floatFromInt(iq[U(i)])); fw *= 0x1p-24; } diff --git a/lib/compiler_rt/rem_pio2f.zig b/lib/compiler_rt/rem_pio2f.zig index 9e47bbcb24..2be81313f5 100644 --- a/lib/compiler_rt/rem_pio2f.zig +++ b/lib/compiler_rt/rem_pio2f.zig @@ -30,14 +30,14 @@ pub fn rem_pio2f(x: f32, y: *f64) i32 { var e0: u32 = undefined; var ui: u32 = undefined; - ui = @bitCast(u32, x); + ui = @as(u32, @bitCast(x)); ix = ui & 0x7fffffff; // 25+53 bit pi is good enough for medium size if (ix < 0x4dc90fdb) { // |x| ~< 2^28*(pi/2), medium size // Use a specialized rint() to get fn. - @"fn" = @floatCast(f64, x) * invpio2 + toint - toint; - n = @intFromFloat(i32, @"fn"); + @"fn" = @as(f64, @floatCast(x)) * invpio2 + toint - toint; + n = @as(i32, @intFromFloat(@"fn")); y.* = x - @"fn" * pio2_1 - @"fn" * pio2_1t; // Matters with directed rounding. if (y.* < -pio4) { @@ -59,8 +59,8 @@ pub fn rem_pio2f(x: f32, y: *f64) i32 { sign = ui >> 31 != 0; e0 = (ix >> 23) - (0x7f + 23); // e0 = ilogb(|x|)-23, positive ui = ix - (e0 << 23); - tx[0] = @bitCast(f32, ui); - n = rem_pio2_large(&tx, &ty, @intCast(i32, e0), 1, 0); + tx[0] = @as(f32, @bitCast(ui)); + n = rem_pio2_large(&tx, &ty, @as(i32, @intCast(e0)), 1, 0); if (sign) { y.* = -ty[0]; return -n; diff --git a/lib/compiler_rt/round.zig b/lib/compiler_rt/round.zig index 121371fa17..2c7cb8956a 100644 --- a/lib/compiler_rt/round.zig +++ b/lib/compiler_rt/round.zig @@ -27,14 +27,14 @@ comptime { pub fn __roundh(x: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, roundf(x)); + return @as(f16, @floatCast(roundf(x))); } pub fn roundf(x_: f32) callconv(.C) f32 { const f32_toint = 1.0 / math.floatEps(f32); var x = x_; - const u = @bitCast(u32, x); + const u = @as(u32, @bitCast(x)); const e = (u >> 23) & 0xFF; var y: f32 = undefined; @@ -46,7 +46,7 @@ pub fn roundf(x_: f32) callconv(.C) f32 { } if (e < 0x7F - 1) { math.doNotOptimizeAway(x + f32_toint); - return 0 * @bitCast(f32, u); + return 0 * @as(f32, @bitCast(u)); } y = x + f32_toint - f32_toint - x; @@ -69,7 +69,7 @@ pub fn round(x_: f64) callconv(.C) f64 { const f64_toint = 1.0 / math.floatEps(f64); var x = x_; - const u = @bitCast(u64, x); + const u = @as(u64, @bitCast(x)); const e = (u >> 52) & 0x7FF; var y: f64 = undefined; @@ -81,7 +81,7 @@ pub fn round(x_: f64) callconv(.C) f64 { } if (e < 0x3ff - 1) { math.doNotOptimizeAway(x + f64_toint); - return 0 * @bitCast(f64, u); + return 0 * @as(f64, @bitCast(u)); } y = x + f64_toint - f64_toint - x; @@ -102,14 +102,14 @@ pub fn round(x_: f64) callconv(.C) f64 { pub fn __roundx(x: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, roundq(x)); + return @as(f80, @floatCast(roundq(x))); } pub fn roundq(x_: f128) callconv(.C) f128 { const f128_toint = 1.0 / math.floatEps(f128); var x = x_; - const u = @bitCast(u128, x); + const u = @as(u128, @bitCast(x)); const e = (u >> 112) & 0x7FFF; var y: f128 = undefined; @@ -121,7 +121,7 @@ pub fn roundq(x_: f128) callconv(.C) f128 { } if (e < 0x3FFF - 1) { math.doNotOptimizeAway(x + f128_toint); - return 0 * @bitCast(f128, u); + return 0 * @as(f128, @bitCast(u)); } y = x + f128_toint - f128_toint - x; diff --git a/lib/compiler_rt/shift.zig b/lib/compiler_rt/shift.zig index 4d8658dbc9..0ca5637d9d 100644 --- a/lib/compiler_rt/shift.zig +++ b/lib/compiler_rt/shift.zig @@ -37,13 +37,13 @@ inline fn ashlXi3(comptime T: type, a: T, b: i32) T { if (b >= word_t.bits) { output.s.low = 0; - output.s.high = input.s.low << @intCast(S, b - word_t.bits); + output.s.high = input.s.low << @as(S, @intCast(b - word_t.bits)); } else if (b == 0) { return a; } else { - output.s.low = input.s.low << @intCast(S, b); - output.s.high = input.s.high << @intCast(S, b); - output.s.high |= input.s.low >> @intCast(S, word_t.bits - b); + output.s.low = input.s.low << @as(S, @intCast(b)); + output.s.high = input.s.high << @as(S, @intCast(b)); + output.s.high |= input.s.low >> @as(S, @intCast(word_t.bits - b)); } return output.all; @@ -60,16 +60,16 @@ inline fn ashrXi3(comptime T: type, a: T, b: i32) T { if (b >= word_t.bits) { output.s.high = input.s.high >> (word_t.bits - 1); - output.s.low = input.s.high >> @intCast(S, b - word_t.bits); + output.s.low = input.s.high >> @as(S, @intCast(b - word_t.bits)); } else if (b == 0) { return a; } else { - output.s.high = input.s.high >> @intCast(S, b); - output.s.low = input.s.high << @intCast(S, word_t.bits - b); + output.s.high = input.s.high >> @as(S, @intCast(b)); + output.s.low = input.s.high << @as(S, @intCast(word_t.bits - b)); // Avoid sign-extension here - output.s.low |= @bitCast( + output.s.low |= @as( word_t.HalfT, - @bitCast(word_t.HalfTU, input.s.low) >> @intCast(S, b), + @bitCast(@as(word_t.HalfTU, @bitCast(input.s.low)) >> @as(S, @intCast(b))), ); } @@ -87,13 +87,13 @@ inline fn lshrXi3(comptime T: type, a: T, b: i32) T { if (b >= word_t.bits) { output.s.high = 0; - output.s.low = input.s.high >> @intCast(S, b - word_t.bits); + output.s.low = input.s.high >> @as(S, @intCast(b - word_t.bits)); } else if (b == 0) { return a; } else { - output.s.high = input.s.high >> @intCast(S, b); - output.s.low = input.s.high << @intCast(S, word_t.bits - b); - output.s.low |= input.s.low >> @intCast(S, b); + output.s.high = input.s.high >> @as(S, @intCast(b)); + output.s.low = input.s.high << @as(S, @intCast(word_t.bits - b)); + output.s.low |= input.s.low >> @as(S, @intCast(b)); } return output.all; diff --git a/lib/compiler_rt/shift_test.zig b/lib/compiler_rt/shift_test.zig index b9c5dc64fa..03388bfa1e 100644 --- a/lib/compiler_rt/shift_test.zig +++ b/lib/compiler_rt/shift_test.zig @@ -18,346 +18,346 @@ const __lshrti3 = shift.__lshrti3; fn test__ashlsi3(a: i32, b: i32, expected: u32) !void { const x = __ashlsi3(a, b); - try testing.expectEqual(expected, @bitCast(u32, x)); + try testing.expectEqual(expected, @as(u32, @bitCast(x))); } fn test__ashldi3(a: i64, b: i32, expected: u64) !void { const x = __ashldi3(a, b); - try testing.expectEqual(expected, @bitCast(u64, x)); + try testing.expectEqual(expected, @as(u64, @bitCast(x))); } fn test__ashlti3(a: i128, b: i32, expected: u128) !void { const x = __ashlti3(a, b); - try testing.expectEqual(expected, @bitCast(u128, x)); + try testing.expectEqual(expected, @as(u128, @bitCast(x))); } test "ashlsi3" { - try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 0, 0x12ABCDEF); - try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 1, 0x25579BDE); - try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 2, 0x4AAF37BC); - try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 3, 0x955E6F78); - try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 4, 0x2ABCDEF0); + try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 0, 0x12ABCDEF); + try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 1, 0x25579BDE); + try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 2, 0x4AAF37BC); + try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 3, 0x955E6F78); + try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 4, 0x2ABCDEF0); - try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 28, 0xF0000000); - try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 29, 0xE0000000); - try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 30, 0xC0000000); - try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 31, 0x80000000); + try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 28, 0xF0000000); + try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 29, 0xE0000000); + try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 30, 0xC0000000); + try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 31, 0x80000000); } test "ashldi3" { - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 0, 0x123456789ABCDEF); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 1, 0x2468ACF13579BDE); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 2, 0x48D159E26AF37BC); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 3, 0x91A2B3C4D5E6F78); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 4, 0x123456789ABCDEF0); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 0, 0x123456789ABCDEF); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 1, 0x2468ACF13579BDE); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 2, 0x48D159E26AF37BC); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 3, 0x91A2B3C4D5E6F78); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 4, 0x123456789ABCDEF0); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 28, 0x789ABCDEF0000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 29, 0xF13579BDE0000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 30, 0xE26AF37BC0000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 31, 0xC4D5E6F780000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 28, 0x789ABCDEF0000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 29, 0xF13579BDE0000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 30, 0xE26AF37BC0000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 31, 0xC4D5E6F780000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 32, 0x89ABCDEF00000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 32, 0x89ABCDEF00000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 33, 0x13579BDE00000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 34, 0x26AF37BC00000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 35, 0x4D5E6F7800000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 36, 0x9ABCDEF000000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 33, 0x13579BDE00000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 34, 0x26AF37BC00000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 35, 0x4D5E6F7800000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 36, 0x9ABCDEF000000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 60, 0xF000000000000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 61, 0xE000000000000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 62, 0xC000000000000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 63, 0x8000000000000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 60, 0xF000000000000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 61, 0xE000000000000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 62, 0xC000000000000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 63, 0x8000000000000000); } test "ashlti3" { - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 0, 0xFEDCBA9876543215FEDCBA9876543215); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 1, 0xFDB97530ECA8642BFDB97530ECA8642A); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 2, 0xFB72EA61D950C857FB72EA61D950C854); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 3, 0xF6E5D4C3B2A190AFF6E5D4C3B2A190A8); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 4, 0xEDCBA9876543215FEDCBA98765432150); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 28, 0x876543215FEDCBA98765432150000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 29, 0x0ECA8642BFDB97530ECA8642A0000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 30, 0x1D950C857FB72EA61D950C8540000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 31, 0x3B2A190AFF6E5D4C3B2A190A80000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 32, 0x76543215FEDCBA987654321500000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 33, 0xECA8642BFDB97530ECA8642A00000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 34, 0xD950C857FB72EA61D950C85400000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 35, 0xB2A190AFF6E5D4C3B2A190A800000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 36, 0x6543215FEDCBA9876543215000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 60, 0x5FEDCBA9876543215000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 61, 0xBFDB97530ECA8642A000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 62, 0x7FB72EA61D950C854000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 63, 0xFF6E5D4C3B2A190A8000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 64, 0xFEDCBA98765432150000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 65, 0xFDB97530ECA8642A0000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 66, 0xFB72EA61D950C8540000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 67, 0xF6E5D4C3B2A190A80000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 68, 0xEDCBA987654321500000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 92, 0x87654321500000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 93, 0x0ECA8642A00000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 94, 0x1D950C85400000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 95, 0x3B2A190A800000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 96, 0x76543215000000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 97, 0xECA8642A000000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 98, 0xD950C854000000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 99, 0xB2A190A8000000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 100, 0x65432150000000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 124, 0x50000000000000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 125, 0xA0000000000000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 126, 0x40000000000000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 127, 0x80000000000000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 0, 0xFEDCBA9876543215FEDCBA9876543215); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 1, 0xFDB97530ECA8642BFDB97530ECA8642A); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 2, 0xFB72EA61D950C857FB72EA61D950C854); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 3, 0xF6E5D4C3B2A190AFF6E5D4C3B2A190A8); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 4, 0xEDCBA9876543215FEDCBA98765432150); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 28, 0x876543215FEDCBA98765432150000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 29, 0x0ECA8642BFDB97530ECA8642A0000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 30, 0x1D950C857FB72EA61D950C8540000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 31, 0x3B2A190AFF6E5D4C3B2A190A80000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 32, 0x76543215FEDCBA987654321500000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 33, 0xECA8642BFDB97530ECA8642A00000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 34, 0xD950C857FB72EA61D950C85400000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 35, 0xB2A190AFF6E5D4C3B2A190A800000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 36, 0x6543215FEDCBA9876543215000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 60, 0x5FEDCBA9876543215000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 61, 0xBFDB97530ECA8642A000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 62, 0x7FB72EA61D950C854000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 63, 0xFF6E5D4C3B2A190A8000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 64, 0xFEDCBA98765432150000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 65, 0xFDB97530ECA8642A0000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 66, 0xFB72EA61D950C8540000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 67, 0xF6E5D4C3B2A190A80000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 68, 0xEDCBA987654321500000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 92, 0x87654321500000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 93, 0x0ECA8642A00000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 94, 0x1D950C85400000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 95, 0x3B2A190A800000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 96, 0x76543215000000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 97, 0xECA8642A000000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 98, 0xD950C854000000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 99, 0xB2A190A8000000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 100, 0x65432150000000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 124, 0x50000000000000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 125, 0xA0000000000000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 126, 0x40000000000000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 127, 0x80000000000000000000000000000000); } fn test__ashrsi3(a: i32, b: i32, expected: u32) !void { const x = __ashrsi3(a, b); - try testing.expectEqual(expected, @bitCast(u32, x)); + try testing.expectEqual(expected, @as(u32, @bitCast(x))); } fn test__ashrdi3(a: i64, b: i32, expected: u64) !void { const x = __ashrdi3(a, b); - try testing.expectEqual(expected, @bitCast(u64, x)); + try testing.expectEqual(expected, @as(u64, @bitCast(x))); } fn test__ashrti3(a: i128, b: i32, expected: u128) !void { const x = __ashrti3(a, b); - try testing.expectEqual(expected, @bitCast(u128, x)); + try testing.expectEqual(expected, @as(u128, @bitCast(x))); } test "ashrsi3" { - try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 0, 0xFEDBCA98); - try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 1, 0xFF6DE54C); - try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 2, 0xFFB6F2A6); - try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 3, 0xFFDB7953); - try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 4, 0xFFEDBCA9); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 0, 0xFEDBCA98); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 1, 0xFF6DE54C); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 2, 0xFFB6F2A6); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 3, 0xFFDB7953); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 4, 0xFFEDBCA9); - try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 28, 0xFFFFFFFF); - try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 31, 0xFFFFFFFF); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 28, 0xFFFFFFFF); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 31, 0xFFFFFFFF); - try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 0, 0x8CEF8CEF); - try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 1, 0xC677C677); - try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 2, 0xE33BE33B); - try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 3, 0xF19DF19D); - try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 4, 0xF8CEF8CE); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 0, 0x8CEF8CEF); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 1, 0xC677C677); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 2, 0xE33BE33B); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 3, 0xF19DF19D); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 4, 0xF8CEF8CE); - try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 28, 0xFFFFFFF8); - try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 29, 0xFFFFFFFC); - try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 30, 0xFFFFFFFE); - try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 31, 0xFFFFFFFF); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 28, 0xFFFFFFF8); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 29, 0xFFFFFFFC); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 30, 0xFFFFFFFE); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 31, 0xFFFFFFFF); } test "ashrdi3" { - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 0, 0x123456789ABCDEF); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 1, 0x91A2B3C4D5E6F7); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 2, 0x48D159E26AF37B); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 3, 0x2468ACF13579BD); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 4, 0x123456789ABCDE); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 0, 0x123456789ABCDEF); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 1, 0x91A2B3C4D5E6F7); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 2, 0x48D159E26AF37B); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 3, 0x2468ACF13579BD); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 4, 0x123456789ABCDE); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 28, 0x12345678); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 29, 0x91A2B3C); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 30, 0x48D159E); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 31, 0x2468ACF); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 28, 0x12345678); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 29, 0x91A2B3C); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 30, 0x48D159E); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 31, 0x2468ACF); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 32, 0x1234567); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 32, 0x1234567); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 33, 0x91A2B3); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 34, 0x48D159); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 35, 0x2468AC); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 36, 0x123456); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 33, 0x91A2B3); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 34, 0x48D159); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 35, 0x2468AC); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 36, 0x123456); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 60, 0); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 61, 0); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 62, 0); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 63, 0); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 60, 0); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 61, 0); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 62, 0); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 63, 0); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 0, 0xFEDCBA9876543210); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 1, 0xFF6E5D4C3B2A1908); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 2, 0xFFB72EA61D950C84); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 3, 0xFFDB97530ECA8642); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 4, 0xFFEDCBA987654321); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 0, 0xFEDCBA9876543210); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 1, 0xFF6E5D4C3B2A1908); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 2, 0xFFB72EA61D950C84); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 3, 0xFFDB97530ECA8642); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 4, 0xFFEDCBA987654321); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 28, 0xFFFFFFFFEDCBA987); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 29, 0xFFFFFFFFF6E5D4C3); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 30, 0xFFFFFFFFFB72EA61); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 31, 0xFFFFFFFFFDB97530); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 28, 0xFFFFFFFFEDCBA987); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 29, 0xFFFFFFFFF6E5D4C3); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 30, 0xFFFFFFFFFB72EA61); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 31, 0xFFFFFFFFFDB97530); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 32, 0xFFFFFFFFFEDCBA98); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 32, 0xFFFFFFFFFEDCBA98); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 33, 0xFFFFFFFFFF6E5D4C); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 34, 0xFFFFFFFFFFB72EA6); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 35, 0xFFFFFFFFFFDB9753); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 36, 0xFFFFFFFFFFEDCBA9); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 33, 0xFFFFFFFFFF6E5D4C); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 34, 0xFFFFFFFFFFB72EA6); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 35, 0xFFFFFFFFFFDB9753); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 36, 0xFFFFFFFFFFEDCBA9); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 60, 0xFFFFFFFFFFFFFFFA); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 61, 0xFFFFFFFFFFFFFFFD); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 62, 0xFFFFFFFFFFFFFFFE); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 63, 0xFFFFFFFFFFFFFFFF); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 60, 0xFFFFFFFFFFFFFFFA); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 61, 0xFFFFFFFFFFFFFFFD); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 62, 0xFFFFFFFFFFFFFFFE); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 63, 0xFFFFFFFFFFFFFFFF); } test "ashrti3" { - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 0, 0xFEDCBA9876543215FEDCBA9876543215); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 1, 0xFF6E5D4C3B2A190AFF6E5D4C3B2A190A); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 2, 0xFFB72EA61D950C857FB72EA61D950C85); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 3, 0xFFDB97530ECA8642BFDB97530ECA8642); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 4, 0xFFEDCBA9876543215FEDCBA987654321); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 0, 0xFEDCBA9876543215FEDCBA9876543215); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 1, 0xFF6E5D4C3B2A190AFF6E5D4C3B2A190A); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 2, 0xFFB72EA61D950C857FB72EA61D950C85); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 3, 0xFFDB97530ECA8642BFDB97530ECA8642); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 4, 0xFFEDCBA9876543215FEDCBA987654321); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 28, 0xFFFFFFFFEDCBA9876543215FEDCBA987); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 29, 0xFFFFFFFFF6E5D4C3B2A190AFF6E5D4C3); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 30, 0xFFFFFFFFFB72EA61D950C857FB72EA61); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 31, 0xFFFFFFFFFDB97530ECA8642BFDB97530); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 28, 0xFFFFFFFFEDCBA9876543215FEDCBA987); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 29, 0xFFFFFFFFF6E5D4C3B2A190AFF6E5D4C3); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 30, 0xFFFFFFFFFB72EA61D950C857FB72EA61); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 31, 0xFFFFFFFFFDB97530ECA8642BFDB97530); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 32, 0xFFFFFFFFFEDCBA9876543215FEDCBA98); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 32, 0xFFFFFFFFFEDCBA9876543215FEDCBA98); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 33, 0xFFFFFFFFFF6E5D4C3B2A190AFF6E5D4C); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 34, 0xFFFFFFFFFFB72EA61D950C857FB72EA6); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 35, 0xFFFFFFFFFFDB97530ECA8642BFDB9753); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 36, 0xFFFFFFFFFFEDCBA9876543215FEDCBA9); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 33, 0xFFFFFFFFFF6E5D4C3B2A190AFF6E5D4C); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 34, 0xFFFFFFFFFFB72EA61D950C857FB72EA6); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 35, 0xFFFFFFFFFFDB97530ECA8642BFDB9753); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 36, 0xFFFFFFFFFFEDCBA9876543215FEDCBA9); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 60, 0xFFFFFFFFFFFFFFFFEDCBA9876543215F); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 61, 0xFFFFFFFFFFFFFFFFF6E5D4C3B2A190AF); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 62, 0xFFFFFFFFFFFFFFFFFB72EA61D950C857); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 63, 0xFFFFFFFFFFFFFFFFFDB97530ECA8642B); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 60, 0xFFFFFFFFFFFFFFFFEDCBA9876543215F); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 61, 0xFFFFFFFFFFFFFFFFF6E5D4C3B2A190AF); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 62, 0xFFFFFFFFFFFFFFFFFB72EA61D950C857); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 63, 0xFFFFFFFFFFFFFFFFFDB97530ECA8642B); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 64, 0xFFFFFFFFFFFFFFFFFEDCBA9876543215); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 64, 0xFFFFFFFFFFFFFFFFFEDCBA9876543215); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 65, 0xFFFFFFFFFFFFFFFFFF6E5D4C3B2A190A); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 66, 0xFFFFFFFFFFFFFFFFFFB72EA61D950C85); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 67, 0xFFFFFFFFFFFFFFFFFFDB97530ECA8642); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 68, 0xFFFFFFFFFFFFFFFFFFEDCBA987654321); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 65, 0xFFFFFFFFFFFFFFFFFF6E5D4C3B2A190A); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 66, 0xFFFFFFFFFFFFFFFFFFB72EA61D950C85); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 67, 0xFFFFFFFFFFFFFFFFFFDB97530ECA8642); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 68, 0xFFFFFFFFFFFFFFFFFFEDCBA987654321); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 92, 0xFFFFFFFFFFFFFFFFFFFFFFFFEDCBA987); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 93, 0xFFFFFFFFFFFFFFFFFFFFFFFFF6E5D4C3); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 94, 0xFFFFFFFFFFFFFFFFFFFFFFFFFB72EA61); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 95, 0xFFFFFFFFFFFFFFFFFFFFFFFFFDB97530); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 92, 0xFFFFFFFFFFFFFFFFFFFFFFFFEDCBA987); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 93, 0xFFFFFFFFFFFFFFFFFFFFFFFFF6E5D4C3); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 94, 0xFFFFFFFFFFFFFFFFFFFFFFFFFB72EA61); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 95, 0xFFFFFFFFFFFFFFFFFFFFFFFFFDB97530); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 96, 0xFFFFFFFFFFFFFFFFFFFFFFFFFEDCBA98); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 96, 0xFFFFFFFFFFFFFFFFFFFFFFFFFEDCBA98); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 97, 0xFFFFFFFFFFFFFFFFFFFFFFFFFF6E5D4C); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 98, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFB72EA6); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 99, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFDB9753); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 100, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFEDCBA9); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 97, 0xFFFFFFFFFFFFFFFFFFFFFFFFFF6E5D4C); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 98, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFB72EA6); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 99, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFDB9753); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 100, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFEDCBA9); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 124, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 125, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 126, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 127, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 124, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 125, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 126, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 127, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); } fn test__lshrsi3(a: i32, b: i32, expected: u32) !void { const x = __lshrsi3(a, b); - try testing.expectEqual(expected, @bitCast(u32, x)); + try testing.expectEqual(expected, @as(u32, @bitCast(x))); } fn test__lshrdi3(a: i64, b: i32, expected: u64) !void { const x = __lshrdi3(a, b); - try testing.expectEqual(expected, @bitCast(u64, x)); + try testing.expectEqual(expected, @as(u64, @bitCast(x))); } fn test__lshrti3(a: i128, b: i32, expected: u128) !void { const x = __lshrti3(a, b); - try testing.expectEqual(expected, @bitCast(u128, x)); + try testing.expectEqual(expected, @as(u128, @bitCast(x))); } test "lshrsi3" { - try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 0, 0xFEDBCA98); - try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 1, 0x7F6DE54C); - try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 2, 0x3FB6F2A6); - try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 3, 0x1FDB7953); - try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 4, 0xFEDBCA9); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 0, 0xFEDBCA98); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 1, 0x7F6DE54C); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 2, 0x3FB6F2A6); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 3, 0x1FDB7953); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 4, 0xFEDBCA9); - try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 28, 0xF); - try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 29, 0x7); - try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 30, 0x3); - try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 31, 0x1); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 28, 0xF); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 29, 0x7); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 30, 0x3); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 31, 0x1); - try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 0, 0x8CEF8CEF); - try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 1, 0x4677C677); - try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 2, 0x233BE33B); - try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 3, 0x119DF19D); - try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 4, 0x8CEF8CE); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 0, 0x8CEF8CEF); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 1, 0x4677C677); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 2, 0x233BE33B); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 3, 0x119DF19D); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 4, 0x8CEF8CE); - try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 28, 0x8); - try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 29, 0x4); - try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 30, 0x2); - try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 31, 0x1); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 28, 0x8); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 29, 0x4); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 30, 0x2); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 31, 0x1); } test "lshrdi3" { - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 0, 0x123456789ABCDEF); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 1, 0x91A2B3C4D5E6F7); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 2, 0x48D159E26AF37B); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 3, 0x2468ACF13579BD); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 4, 0x123456789ABCDE); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 0, 0x123456789ABCDEF); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 1, 0x91A2B3C4D5E6F7); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 2, 0x48D159E26AF37B); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 3, 0x2468ACF13579BD); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 4, 0x123456789ABCDE); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 28, 0x12345678); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 29, 0x91A2B3C); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 30, 0x48D159E); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 31, 0x2468ACF); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 28, 0x12345678); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 29, 0x91A2B3C); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 30, 0x48D159E); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 31, 0x2468ACF); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 32, 0x1234567); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 32, 0x1234567); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 33, 0x91A2B3); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 34, 0x48D159); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 35, 0x2468AC); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 36, 0x123456); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 33, 0x91A2B3); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 34, 0x48D159); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 35, 0x2468AC); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 36, 0x123456); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 60, 0); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 61, 0); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 62, 0); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 63, 0); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 60, 0); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 61, 0); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 62, 0); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 63, 0); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 0, 0xFEDCBA9876543210); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 1, 0x7F6E5D4C3B2A1908); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 2, 0x3FB72EA61D950C84); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 3, 0x1FDB97530ECA8642); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 4, 0xFEDCBA987654321); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 0, 0xFEDCBA9876543210); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 1, 0x7F6E5D4C3B2A1908); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 2, 0x3FB72EA61D950C84); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 3, 0x1FDB97530ECA8642); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 4, 0xFEDCBA987654321); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 28, 0xFEDCBA987); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 29, 0x7F6E5D4C3); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 30, 0x3FB72EA61); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 31, 0x1FDB97530); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 28, 0xFEDCBA987); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 29, 0x7F6E5D4C3); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 30, 0x3FB72EA61); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 31, 0x1FDB97530); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 32, 0xFEDCBA98); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 32, 0xFEDCBA98); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 33, 0x7F6E5D4C); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 34, 0x3FB72EA6); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 35, 0x1FDB9753); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 36, 0xFEDCBA9); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 33, 0x7F6E5D4C); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 34, 0x3FB72EA6); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 35, 0x1FDB9753); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 36, 0xFEDCBA9); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 60, 0xA); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 61, 0x5); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 62, 0x2); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 63, 0x1); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 60, 0xA); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 61, 0x5); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 62, 0x2); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 63, 0x1); } test "lshrti3" { - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 0, 0xFEDCBA9876543215FEDCBA987654321F); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 1, 0x7F6E5D4C3B2A190AFF6E5D4C3B2A190F); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 2, 0x3FB72EA61D950C857FB72EA61D950C87); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 3, 0x1FDB97530ECA8642BFDB97530ECA8643); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 4, 0xFEDCBA9876543215FEDCBA987654321); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 28, 0xFEDCBA9876543215FEDCBA987); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 29, 0x7F6E5D4C3B2A190AFF6E5D4C3); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 30, 0x3FB72EA61D950C857FB72EA61); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 31, 0x1FDB97530ECA8642BFDB97530); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 32, 0xFEDCBA9876543215FEDCBA98); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 33, 0x7F6E5D4C3B2A190AFF6E5D4C); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 34, 0x3FB72EA61D950C857FB72EA6); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 35, 0x1FDB97530ECA8642BFDB9753); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 36, 0xFEDCBA9876543215FEDCBA9); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 60, 0xFEDCBA9876543215F); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 61, 0x7F6E5D4C3B2A190AF); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 62, 0x3FB72EA61D950C857); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 63, 0x1FDB97530ECA8642B); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 64, 0xFEDCBA9876543215); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 65, 0x7F6E5D4C3B2A190A); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 66, 0x3FB72EA61D950C85); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 67, 0x1FDB97530ECA8642); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 68, 0xFEDCBA987654321); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 92, 0xFEDCBA987); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 93, 0x7F6E5D4C3); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 94, 0x3FB72EA61); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 95, 0x1FDB97530); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 96, 0xFEDCBA98); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 97, 0x7F6E5D4C); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 98, 0x3FB72EA6); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 99, 0x1FDB9753); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 100, 0xFEDCBA9); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 124, 0xF); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 125, 0x7); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 126, 0x3); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 127, 0x1); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 0, 0xFEDCBA9876543215FEDCBA987654321F); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 1, 0x7F6E5D4C3B2A190AFF6E5D4C3B2A190F); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 2, 0x3FB72EA61D950C857FB72EA61D950C87); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 3, 0x1FDB97530ECA8642BFDB97530ECA8643); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 4, 0xFEDCBA9876543215FEDCBA987654321); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 28, 0xFEDCBA9876543215FEDCBA987); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 29, 0x7F6E5D4C3B2A190AFF6E5D4C3); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 30, 0x3FB72EA61D950C857FB72EA61); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 31, 0x1FDB97530ECA8642BFDB97530); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 32, 0xFEDCBA9876543215FEDCBA98); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 33, 0x7F6E5D4C3B2A190AFF6E5D4C); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 34, 0x3FB72EA61D950C857FB72EA6); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 35, 0x1FDB97530ECA8642BFDB9753); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 36, 0xFEDCBA9876543215FEDCBA9); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 60, 0xFEDCBA9876543215F); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 61, 0x7F6E5D4C3B2A190AF); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 62, 0x3FB72EA61D950C857); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 63, 0x1FDB97530ECA8642B); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 64, 0xFEDCBA9876543215); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 65, 0x7F6E5D4C3B2A190A); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 66, 0x3FB72EA61D950C85); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 67, 0x1FDB97530ECA8642); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 68, 0xFEDCBA987654321); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 92, 0xFEDCBA987); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 93, 0x7F6E5D4C3); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 94, 0x3FB72EA61); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 95, 0x1FDB97530); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 96, 0xFEDCBA98); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 97, 0x7F6E5D4C); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 98, 0x3FB72EA6); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 99, 0x1FDB9753); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 100, 0xFEDCBA9); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 124, 0xF); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 125, 0x7); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 126, 0x3); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 127, 0x1); } diff --git a/lib/compiler_rt/sin.zig b/lib/compiler_rt/sin.zig index eb3d64b0c8..40c8287b87 100644 --- a/lib/compiler_rt/sin.zig +++ b/lib/compiler_rt/sin.zig @@ -31,7 +31,7 @@ comptime { pub fn __sinh(x: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, sinf(x)); + return @as(f16, @floatCast(sinf(x))); } pub fn sinf(x: f32) callconv(.C) f32 { @@ -41,7 +41,7 @@ pub fn sinf(x: f32) callconv(.C) f32 { const s3pio2: f64 = 3.0 * math.pi / 2.0; // 0x4012D97C, 0x7F3321D2 const s4pio2: f64 = 4.0 * math.pi / 2.0; // 0x401921FB, 0x54442D18 - var ix = @bitCast(u32, x); + var ix = @as(u32, @bitCast(x)); const sign = ix >> 31 != 0; ix &= 0x7fffffff; @@ -90,7 +90,7 @@ pub fn sinf(x: f32) callconv(.C) f32 { } pub fn sin(x: f64) callconv(.C) f64 { - var ix = @bitCast(u64, x) >> 32; + var ix = @as(u64, @bitCast(x)) >> 32; ix &= 0x7fffffff; // |x| ~< pi/4 @@ -120,12 +120,12 @@ pub fn sin(x: f64) callconv(.C) f64 { pub fn __sinx(x: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, sinq(x)); + return @as(f80, @floatCast(sinq(x))); } pub fn sinq(x: f128) callconv(.C) f128 { // TODO: more correct implementation - return sin(@floatCast(f64, x)); + return sin(@as(f64, @floatCast(x))); } pub fn sinl(x: c_longdouble) callconv(.C) c_longdouble { @@ -180,11 +180,11 @@ test "sin64.special" { } test "sin32 #9901" { - const float = @bitCast(f32, @as(u32, 0b11100011111111110000000000000000)); + const float = @as(f32, @bitCast(@as(u32, 0b11100011111111110000000000000000))); _ = sinf(float); } test "sin64 #9901" { - const float = @bitCast(f64, @as(u64, 0b1111111101000001000000001111110111111111100000000000000000000001)); + const float = @as(f64, @bitCast(@as(u64, 0b1111111101000001000000001111110111111111100000000000000000000001))); _ = sin(float); } diff --git a/lib/compiler_rt/sincos.zig b/lib/compiler_rt/sincos.zig index 769c8d8389..ffe67e0b33 100644 --- a/lib/compiler_rt/sincos.zig +++ b/lib/compiler_rt/sincos.zig @@ -26,8 +26,8 @@ pub fn __sincosh(x: f16, r_sin: *f16, r_cos: *f16) callconv(.C) void { var big_sin: f32 = undefined; var big_cos: f32 = undefined; sincosf(x, &big_sin, &big_cos); - r_sin.* = @floatCast(f16, big_sin); - r_cos.* = @floatCast(f16, big_cos); + r_sin.* = @as(f16, @floatCast(big_sin)); + r_cos.* = @as(f16, @floatCast(big_cos)); } pub fn sincosf(x: f32, r_sin: *f32, r_cos: *f32) callconv(.C) void { @@ -36,7 +36,7 @@ pub fn sincosf(x: f32, r_sin: *f32, r_cos: *f32) callconv(.C) void { const sc3pio2: f64 = 3.0 * math.pi / 2.0; // 0x4012D97C, 0x7F3321D2 const sc4pio2: f64 = 4.0 * math.pi / 2.0; // 0x401921FB, 0x54442D18 - const pre_ix = @bitCast(u32, x); + const pre_ix = @as(u32, @bitCast(x)); const sign = pre_ix >> 31 != 0; const ix = pre_ix & 0x7fffffff; @@ -126,7 +126,7 @@ pub fn sincosf(x: f32, r_sin: *f32, r_cos: *f32) callconv(.C) void { } pub fn sincos(x: f64, r_sin: *f64, r_cos: *f64) callconv(.C) void { - const ix = @truncate(u32, @bitCast(u64, x) >> 32) & 0x7fffffff; + const ix = @as(u32, @truncate(@as(u64, @bitCast(x)) >> 32)) & 0x7fffffff; // |x| ~< pi/4 if (ix <= 0x3fe921fb) { @@ -182,8 +182,8 @@ pub fn __sincosx(x: f80, r_sin: *f80, r_cos: *f80) callconv(.C) void { var big_sin: f128 = undefined; var big_cos: f128 = undefined; sincosq(x, &big_sin, &big_cos); - r_sin.* = @floatCast(f80, big_sin); - r_cos.* = @floatCast(f80, big_cos); + r_sin.* = @as(f80, @floatCast(big_sin)); + r_cos.* = @as(f80, @floatCast(big_cos)); } pub fn sincosq(x: f128, r_sin: *f128, r_cos: *f128) callconv(.C) void { @@ -191,7 +191,7 @@ pub fn sincosq(x: f128, r_sin: *f128, r_cos: *f128) callconv(.C) void { //return sincos_generic(f128, x, r_sin, r_cos); var small_sin: f64 = undefined; var small_cos: f64 = undefined; - sincos(@floatCast(f64, x), &small_sin, &small_cos); + sincos(@as(f64, @floatCast(x)), &small_sin, &small_cos); r_sin.* = small_sin; r_cos.* = small_cos; } @@ -217,8 +217,8 @@ inline fn sincos_generic(comptime F: type, x: F, r_sin: *F, r_cos: *F) void { const sc1pio4: F = 1.0 * math.pi / 4.0; const bits = @typeInfo(F).Float.bits; const I = std.meta.Int(.unsigned, bits); - const ix = @bitCast(I, x) & (math.maxInt(I) >> 1); - const se = @truncate(u16, ix >> (bits - 16)); + const ix = @as(I, @bitCast(x)) & (math.maxInt(I) >> 1); + const se = @as(u16, @truncate(ix >> (bits - 16))); if (se == 0x7fff) { const result = x - x; @@ -227,7 +227,7 @@ inline fn sincos_generic(comptime F: type, x: F, r_sin: *F, r_cos: *F) void { return; } - if (@bitCast(F, ix) < sc1pio4) { + if (@as(F, @bitCast(ix)) < sc1pio4) { if (se < 0x3fff - math.floatFractionalBits(F) - 1) { // raise underflow if subnormal if (se == 0) { diff --git a/lib/compiler_rt/sqrt.zig b/lib/compiler_rt/sqrt.zig index 2ec9c39e0b..0dbd673306 100644 --- a/lib/compiler_rt/sqrt.zig +++ b/lib/compiler_rt/sqrt.zig @@ -20,13 +20,13 @@ comptime { pub fn __sqrth(x: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, sqrtf(x)); + return @as(f16, @floatCast(sqrtf(x))); } pub fn sqrtf(x: f32) callconv(.C) f32 { const tiny: f32 = 1.0e-30; - const sign: i32 = @bitCast(i32, @as(u32, 0x80000000)); - var ix: i32 = @bitCast(i32, x); + const sign: i32 = @as(i32, @bitCast(@as(u32, 0x80000000))); + var ix: i32 = @as(i32, @bitCast(x)); if ((ix & 0x7F800000) == 0x7F800000) { return x * x + x; // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = snan @@ -96,7 +96,7 @@ pub fn sqrtf(x: f32) callconv(.C) f32 { ix = (q >> 1) + 0x3f000000; ix += m << 23; - return @bitCast(f32, ix); + return @as(f32, @bitCast(ix)); } /// NOTE: The original code is full of implicit signed -> unsigned assumptions and u32 wraparound @@ -105,10 +105,10 @@ pub fn sqrtf(x: f32) callconv(.C) f32 { pub fn sqrt(x: f64) callconv(.C) f64 { const tiny: f64 = 1.0e-300; const sign: u32 = 0x80000000; - const u = @bitCast(u64, x); + const u = @as(u64, @bitCast(x)); - var ix0 = @intCast(u32, u >> 32); - var ix1 = @intCast(u32, u & 0xFFFFFFFF); + var ix0 = @as(u32, @intCast(u >> 32)); + var ix1 = @as(u32, @intCast(u & 0xFFFFFFFF)); // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = nan if (ix0 & 0x7FF00000 == 0x7FF00000) { @@ -125,7 +125,7 @@ pub fn sqrt(x: f64) callconv(.C) f64 { } // normalize x - var m = @intCast(i32, ix0 >> 20); + var m = @as(i32, @intCast(ix0 >> 20)); if (m == 0) { // subnormal while (ix0 == 0) { @@ -139,9 +139,9 @@ pub fn sqrt(x: f64) callconv(.C) f64 { while (ix0 & 0x00100000 == 0) : (i += 1) { ix0 <<= 1; } - m -= @intCast(i32, i) - 1; - ix0 |= ix1 >> @intCast(u5, 32 - i); - ix1 <<= @intCast(u5, i); + m -= @as(i32, @intCast(i)) - 1; + ix0 |= ix1 >> @as(u5, @intCast(32 - i)); + ix1 <<= @as(u5, @intCast(i)); } // unbias exponent @@ -225,21 +225,21 @@ pub fn sqrt(x: f64) callconv(.C) f64 { // NOTE: musl here appears to rely on signed twos-complement wraparound. +% has the same // behaviour at least. - var iix0 = @intCast(i32, ix0); + var iix0 = @as(i32, @intCast(ix0)); iix0 = iix0 +% (m << 20); - const uz = (@intCast(u64, iix0) << 32) | ix1; - return @bitCast(f64, uz); + const uz = (@as(u64, @intCast(iix0)) << 32) | ix1; + return @as(f64, @bitCast(uz)); } pub fn __sqrtx(x: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, sqrtq(x)); + return @as(f80, @floatCast(sqrtq(x))); } pub fn sqrtq(x: f128) callconv(.C) f128 { // TODO: more correct implementation - return sqrt(@floatCast(f64, x)); + return sqrt(@as(f64, @floatCast(x))); } pub fn sqrtl(x: c_longdouble) callconv(.C) c_longdouble { diff --git a/lib/compiler_rt/subdf3.zig b/lib/compiler_rt/subdf3.zig index a7630b6ea2..31e3447298 100644 --- a/lib/compiler_rt/subdf3.zig +++ b/lib/compiler_rt/subdf3.zig @@ -11,11 +11,11 @@ comptime { } fn __subdf3(a: f64, b: f64) callconv(.C) f64 { - const neg_b = @bitCast(f64, @bitCast(u64, b) ^ (@as(u64, 1) << 63)); + const neg_b = @as(f64, @bitCast(@as(u64, @bitCast(b)) ^ (@as(u64, 1) << 63))); return a + neg_b; } fn __aeabi_dsub(a: f64, b: f64) callconv(.AAPCS) f64 { - const neg_b = @bitCast(f64, @bitCast(u64, b) ^ (@as(u64, 1) << 63)); + const neg_b = @as(f64, @bitCast(@as(u64, @bitCast(b)) ^ (@as(u64, 1) << 63))); return a + neg_b; } diff --git a/lib/compiler_rt/subhf3.zig b/lib/compiler_rt/subhf3.zig index f1d648102b..5f84f32725 100644 --- a/lib/compiler_rt/subhf3.zig +++ b/lib/compiler_rt/subhf3.zig @@ -7,6 +7,6 @@ comptime { } fn __subhf3(a: f16, b: f16) callconv(.C) f16 { - const neg_b = @bitCast(f16, @bitCast(u16, b) ^ (@as(u16, 1) << 15)); + const neg_b = @as(f16, @bitCast(@as(u16, @bitCast(b)) ^ (@as(u16, 1) << 15))); return a + neg_b; } diff --git a/lib/compiler_rt/subsf3.zig b/lib/compiler_rt/subsf3.zig index fbc48ead41..f94d9802d1 100644 --- a/lib/compiler_rt/subsf3.zig +++ b/lib/compiler_rt/subsf3.zig @@ -11,11 +11,11 @@ comptime { } fn __subsf3(a: f32, b: f32) callconv(.C) f32 { - const neg_b = @bitCast(f32, @bitCast(u32, b) ^ (@as(u32, 1) << 31)); + const neg_b = @as(f32, @bitCast(@as(u32, @bitCast(b)) ^ (@as(u32, 1) << 31))); return a + neg_b; } fn __aeabi_fsub(a: f32, b: f32) callconv(.AAPCS) f32 { - const neg_b = @bitCast(f32, @bitCast(u32, b) ^ (@as(u32, 1) << 31)); + const neg_b = @as(f32, @bitCast(@as(u32, @bitCast(b)) ^ (@as(u32, 1) << 31))); return a + neg_b; } diff --git a/lib/compiler_rt/subtf3.zig b/lib/compiler_rt/subtf3.zig index 0008905c94..ee6383a07d 100644 --- a/lib/compiler_rt/subtf3.zig +++ b/lib/compiler_rt/subtf3.zig @@ -20,6 +20,6 @@ fn _Qp_sub(c: *f128, a: *const f128, b: *const f128) callconv(.C) void { } inline fn sub(a: f128, b: f128) f128 { - const neg_b = @bitCast(f128, @bitCast(u128, b) ^ (@as(u128, 1) << 127)); + const neg_b = @as(f128, @bitCast(@as(u128, @bitCast(b)) ^ (@as(u128, 1) << 127))); return a + neg_b; } diff --git a/lib/compiler_rt/tan.zig b/lib/compiler_rt/tan.zig index d6ed881afc..79bda60915 100644 --- a/lib/compiler_rt/tan.zig +++ b/lib/compiler_rt/tan.zig @@ -33,7 +33,7 @@ comptime { pub fn __tanh(x: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, tanf(x)); + return @as(f16, @floatCast(tanf(x))); } pub fn tanf(x: f32) callconv(.C) f32 { @@ -43,7 +43,7 @@ pub fn tanf(x: f32) callconv(.C) f32 { const t3pio2: f64 = 3.0 * math.pi / 2.0; // 0x4012D97C, 0x7F3321D2 const t4pio2: f64 = 4.0 * math.pi / 2.0; // 0x401921FB, 0x54442D18 - var ix = @bitCast(u32, x); + var ix = @as(u32, @bitCast(x)); const sign = ix >> 31 != 0; ix &= 0x7fffffff; @@ -81,7 +81,7 @@ pub fn tanf(x: f32) callconv(.C) f32 { } pub fn tan(x: f64) callconv(.C) f64 { - var ix = @bitCast(u64, x) >> 32; + var ix = @as(u64, @bitCast(x)) >> 32; ix &= 0x7fffffff; // |x| ~< pi/4 @@ -106,12 +106,12 @@ pub fn tan(x: f64) callconv(.C) f64 { pub fn __tanx(x: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, tanq(x)); + return @as(f80, @floatCast(tanq(x))); } pub fn tanq(x: f128) callconv(.C) f128 { // TODO: more correct implementation - return tan(@floatCast(f64, x)); + return tan(@as(f64, @floatCast(x))); } pub fn tanl(x: c_longdouble) callconv(.C) c_longdouble { diff --git a/lib/compiler_rt/trig.zig b/lib/compiler_rt/trig.zig index 4a9629e5c0..375f70ddff 100644 --- a/lib/compiler_rt/trig.zig +++ b/lib/compiler_rt/trig.zig @@ -70,7 +70,7 @@ pub fn __cosdf(x: f64) f32 { const z = x * x; const w = z * z; const r = C2 + z * C3; - return @floatCast(f32, ((1.0 + z * C0) + w * C1) + (w * z) * r); + return @as(f32, @floatCast(((1.0 + z * C0) + w * C1) + (w * z) * r)); } /// kernel sin function on ~[-pi/4, pi/4] (except on -0), pi/4 ~ 0.7854 @@ -131,7 +131,7 @@ pub fn __sindf(x: f64) f32 { const w = z * z; const r = S3 + z * S4; const s = z * x; - return @floatCast(f32, (x + s * (S1 + z * S2)) + s * w * r); + return @as(f32, @floatCast((x + s * (S1 + z * S2)) + s * w * r)); } /// kernel tan function on ~[-pi/4, pi/4] (except on -0), pi/4 ~ 0.7854 @@ -199,7 +199,7 @@ pub fn __tan(x_: f64, y_: f64, odd: bool) f64 { var hx: u32 = undefined; var sign: bool = undefined; - hx = @intCast(u32, @bitCast(u64, x) >> 32); + hx = @as(u32, @intCast(@as(u64, @bitCast(x)) >> 32)); const big = (hx & 0x7fffffff) >= 0x3FE59428; // |x| >= 0.6744 if (big) { sign = hx >> 31 != 0; @@ -222,7 +222,7 @@ pub fn __tan(x_: f64, y_: f64, odd: bool) f64 { r = y + z * (s * (r + v) + y) + s * T[0]; w = x + r; if (big) { - s = 1 - 2 * @floatFromInt(f64, @intFromBool(odd)); + s = 1 - 2 * @as(f64, @floatFromInt(@intFromBool(odd))); v = s - 2.0 * (x + (r - w * w / (w + s))); return if (sign) -v else v; } @@ -231,11 +231,11 @@ pub fn __tan(x_: f64, y_: f64, odd: bool) f64 { } // -1.0/(x+r) has up to 2ulp error, so compute it accurately w0 = w; - w0 = @bitCast(f64, @bitCast(u64, w0) & 0xffffffff00000000); + w0 = @as(f64, @bitCast(@as(u64, @bitCast(w0)) & 0xffffffff00000000)); v = r - (w0 - x); // w0+v = r+x a = -1.0 / w; a0 = a; - a0 = @bitCast(f64, @bitCast(u64, a0) & 0xffffffff00000000); + a0 = @as(f64, @bitCast(@as(u64, @bitCast(a0)) & 0xffffffff00000000)); return a0 + a * (1.0 + a0 * w0 + a0 * v); } @@ -269,5 +269,5 @@ pub fn __tandf(x: f64, odd: bool) f32 { const s = z * x; const u = T[0] + z * T[1]; const r0 = (x + s * u) + (s * w) * (t + w * r); - return @floatCast(f32, if (odd) -1.0 / r0 else r0); + return @as(f32, @floatCast(if (odd) -1.0 / r0 else r0)); } diff --git a/lib/compiler_rt/trunc.zig b/lib/compiler_rt/trunc.zig index 8c66ba69e7..031f2eb65c 100644 --- a/lib/compiler_rt/trunc.zig +++ b/lib/compiler_rt/trunc.zig @@ -27,12 +27,12 @@ comptime { pub fn __trunch(x: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, truncf(x)); + return @as(f16, @floatCast(truncf(x))); } pub fn truncf(x: f32) callconv(.C) f32 { - const u = @bitCast(u32, x); - var e = @intCast(i32, ((u >> 23) & 0xFF)) - 0x7F + 9; + const u = @as(u32, @bitCast(x)); + var e = @as(i32, @intCast(((u >> 23) & 0xFF))) - 0x7F + 9; var m: u32 = undefined; if (e >= 23 + 9) { @@ -42,18 +42,18 @@ pub fn truncf(x: f32) callconv(.C) f32 { e = 1; } - m = @as(u32, math.maxInt(u32)) >> @intCast(u5, e); + m = @as(u32, math.maxInt(u32)) >> @as(u5, @intCast(e)); if (u & m == 0) { return x; } else { math.doNotOptimizeAway(x + 0x1p120); - return @bitCast(f32, u & ~m); + return @as(f32, @bitCast(u & ~m)); } } pub fn trunc(x: f64) callconv(.C) f64 { - const u = @bitCast(u64, x); - var e = @intCast(i32, ((u >> 52) & 0x7FF)) - 0x3FF + 12; + const u = @as(u64, @bitCast(x)); + var e = @as(i32, @intCast(((u >> 52) & 0x7FF))) - 0x3FF + 12; var m: u64 = undefined; if (e >= 52 + 12) { @@ -63,23 +63,23 @@ pub fn trunc(x: f64) callconv(.C) f64 { e = 1; } - m = @as(u64, math.maxInt(u64)) >> @intCast(u6, e); + m = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(e)); if (u & m == 0) { return x; } else { math.doNotOptimizeAway(x + 0x1p120); - return @bitCast(f64, u & ~m); + return @as(f64, @bitCast(u & ~m)); } } pub fn __truncx(x: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, truncq(x)); + return @as(f80, @floatCast(truncq(x))); } pub fn truncq(x: f128) callconv(.C) f128 { - const u = @bitCast(u128, x); - var e = @intCast(i32, ((u >> 112) & 0x7FFF)) - 0x3FFF + 16; + const u = @as(u128, @bitCast(x)); + var e = @as(i32, @intCast(((u >> 112) & 0x7FFF))) - 0x3FFF + 16; var m: u128 = undefined; if (e >= 112 + 16) { @@ -89,12 +89,12 @@ pub fn truncq(x: f128) callconv(.C) f128 { e = 1; } - m = @as(u128, math.maxInt(u128)) >> @intCast(u7, e); + m = @as(u128, math.maxInt(u128)) >> @as(u7, @intCast(e)); if (u & m == 0) { return x; } else { math.doNotOptimizeAway(x + 0x1p120); - return @bitCast(f128, u & ~m); + return @as(f128, @bitCast(u & ~m)); } } diff --git a/lib/compiler_rt/truncdfhf2.zig b/lib/compiler_rt/truncdfhf2.zig index e76ad2ce62..ce849a8b9e 100644 --- a/lib/compiler_rt/truncdfhf2.zig +++ b/lib/compiler_rt/truncdfhf2.zig @@ -12,9 +12,9 @@ comptime { } pub fn __truncdfhf2(a: f64) callconv(.C) common.F16T(f64) { - return @bitCast(common.F16T(f64), truncf(f16, f64, a)); + return @as(common.F16T(f64), @bitCast(truncf(f16, f64, a))); } fn __aeabi_d2h(a: f64) callconv(.AAPCS) u16 { - return @bitCast(common.F16T(f64), truncf(f16, f64, a)); + return @as(common.F16T(f64), @bitCast(truncf(f16, f64, a))); } diff --git a/lib/compiler_rt/truncf.zig b/lib/compiler_rt/truncf.zig index 3de342fc99..49c7cd11e1 100644 --- a/lib/compiler_rt/truncf.zig +++ b/lib/compiler_rt/truncf.zig @@ -38,7 +38,7 @@ pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t const dstNaNCode = dstQNaN - 1; // Break a into a sign and representation of the absolute value - const aRep: src_rep_t = @bitCast(src_rep_t, a); + const aRep: src_rep_t = @as(src_rep_t, @bitCast(a)); const aAbs: src_rep_t = aRep & srcAbsMask; const sign: src_rep_t = aRep & srcSignMask; var absResult: dst_rep_t = undefined; @@ -47,7 +47,7 @@ pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t // The exponent of a is within the range of normal numbers in the // destination format. We can convert by simply right-shifting with // rounding and adjusting the exponent. - absResult = @truncate(dst_rep_t, aAbs >> (srcSigBits - dstSigBits)); + absResult = @as(dst_rep_t, @truncate(aAbs >> (srcSigBits - dstSigBits))); absResult -%= @as(dst_rep_t, srcExpBias - dstExpBias) << dstSigBits; const roundBits: src_rep_t = aAbs & roundMask; @@ -62,18 +62,18 @@ pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t // a is NaN. // Conjure the result by beginning with infinity, setting the qNaN // bit and inserting the (truncated) trailing NaN field. - absResult = @intCast(dst_rep_t, dstInfExp) << dstSigBits; + absResult = @as(dst_rep_t, @intCast(dstInfExp)) << dstSigBits; absResult |= dstQNaN; - absResult |= @intCast(dst_rep_t, ((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode); + absResult |= @as(dst_rep_t, @intCast(((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode)); } else if (aAbs >= overflow) { // a overflows to infinity. - absResult = @intCast(dst_rep_t, dstInfExp) << dstSigBits; + absResult = @as(dst_rep_t, @intCast(dstInfExp)) << dstSigBits; } else { // a underflows on conversion to the destination type or is an exact // zero. The result may be a denormal or zero. Extract the exponent // to get the shift amount for the denormalization. - const aExp = @intCast(u32, aAbs >> srcSigBits); - const shift = @intCast(u32, srcExpBias - dstExpBias - aExp + 1); + const aExp = @as(u32, @intCast(aAbs >> srcSigBits)); + const shift = @as(u32, @intCast(srcExpBias - dstExpBias - aExp + 1)); const significand: src_rep_t = (aRep & srcSignificandMask) | srcMinNormal; @@ -81,9 +81,9 @@ pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t if (shift > srcSigBits) { absResult = 0; } else { - const sticky: src_rep_t = @intFromBool(significand << @intCast(SrcShift, srcBits - shift) != 0); - const denormalizedSignificand: src_rep_t = significand >> @intCast(SrcShift, shift) | sticky; - absResult = @intCast(dst_rep_t, denormalizedSignificand >> (srcSigBits - dstSigBits)); + const sticky: src_rep_t = @intFromBool(significand << @as(SrcShift, @intCast(srcBits - shift)) != 0); + const denormalizedSignificand: src_rep_t = significand >> @as(SrcShift, @intCast(shift)) | sticky; + absResult = @as(dst_rep_t, @intCast(denormalizedSignificand >> (srcSigBits - dstSigBits))); const roundBits: src_rep_t = denormalizedSignificand & roundMask; if (roundBits > halfway) { // Round to nearest @@ -96,8 +96,8 @@ pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t } const result: dst_rep_t align(@alignOf(dst_t)) = absResult | - @truncate(dst_rep_t, sign >> @intCast(SrcShift, srcBits - dstBits)); - return @bitCast(dst_t, result); + @as(dst_rep_t, @truncate(sign >> @as(SrcShift, @intCast(srcBits - dstBits)))); + return @as(dst_t, @bitCast(result)); } pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t { @@ -133,7 +133,7 @@ pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t { // destination format. We can convert by simply right-shifting with // rounding and adjusting the exponent. abs_result = @as(dst_rep_t, a_rep.exp) << dst_sig_bits; - abs_result |= @truncate(dst_rep_t, a_rep.fraction >> (src_sig_bits - dst_sig_bits)); + abs_result |= @as(dst_rep_t, @truncate(a_rep.fraction >> (src_sig_bits - dst_sig_bits))); abs_result -%= @as(dst_rep_t, src_exp_bias - dst_exp_bias) << dst_sig_bits; const round_bits = a_rep.fraction & round_mask; @@ -148,12 +148,12 @@ pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t { // a is NaN. // Conjure the result by beginning with infinity, setting the qNaN // bit and inserting the (truncated) trailing NaN field. - abs_result = @intCast(dst_rep_t, dst_inf_exp) << dst_sig_bits; + abs_result = @as(dst_rep_t, @intCast(dst_inf_exp)) << dst_sig_bits; abs_result |= dst_qnan; - abs_result |= @intCast(dst_rep_t, (a_rep.fraction >> (src_sig_bits - dst_sig_bits)) & dst_nan_mask); + abs_result |= @as(dst_rep_t, @intCast((a_rep.fraction >> (src_sig_bits - dst_sig_bits)) & dst_nan_mask)); } else if (a_rep.exp >= overflow) { // a overflows to infinity. - abs_result = @intCast(dst_rep_t, dst_inf_exp) << dst_sig_bits; + abs_result = @as(dst_rep_t, @intCast(dst_inf_exp)) << dst_sig_bits; } else { // a underflows on conversion to the destination type or is an exact // zero. The result may be a denormal or zero. Extract the exponent @@ -164,9 +164,9 @@ pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t { if (shift > src_sig_bits) { abs_result = 0; } else { - const sticky = @intFromBool(a_rep.fraction << @intCast(u6, shift) != 0); - const denormalized_significand = a_rep.fraction >> @intCast(u6, shift) | sticky; - abs_result = @intCast(dst_rep_t, denormalized_significand >> (src_sig_bits - dst_sig_bits)); + const sticky = @intFromBool(a_rep.fraction << @as(u6, @intCast(shift)) != 0); + const denormalized_significand = a_rep.fraction >> @as(u6, @intCast(shift)) | sticky; + abs_result = @as(dst_rep_t, @intCast(denormalized_significand >> (src_sig_bits - dst_sig_bits))); const round_bits = denormalized_significand & round_mask; if (round_bits > halfway) { // Round to nearest @@ -179,7 +179,7 @@ pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t { } const result align(@alignOf(dst_t)) = abs_result | @as(dst_rep_t, sign) << dst_bits - 16; - return @bitCast(dst_t, result); + return @as(dst_t, @bitCast(result)); } test { diff --git a/lib/compiler_rt/truncf_test.zig b/lib/compiler_rt/truncf_test.zig index d4e93cd114..fd1ee9b38a 100644 --- a/lib/compiler_rt/truncf_test.zig +++ b/lib/compiler_rt/truncf_test.zig @@ -10,7 +10,7 @@ const __trunctfdf2 = @import("trunctfdf2.zig").__trunctfdf2; const __trunctfxf2 = @import("trunctfxf2.zig").__trunctfxf2; fn test__truncsfhf2(a: u32, expected: u16) !void { - const actual = @bitCast(u16, __truncsfhf2(@bitCast(f32, a))); + const actual = @as(u16, @bitCast(__truncsfhf2(@as(f32, @bitCast(a))))); if (actual == expected) { return; @@ -73,7 +73,7 @@ test "truncsfhf2" { } fn test__truncdfhf2(a: f64, expected: u16) void { - const rep = @bitCast(u16, __truncdfhf2(a)); + const rep = @as(u16, @bitCast(__truncdfhf2(a))); if (rep == expected) { return; @@ -89,7 +89,7 @@ fn test__truncdfhf2(a: f64, expected: u16) void { } fn test__truncdfhf2_raw(a: u64, expected: u16) void { - const actual = @bitCast(u16, __truncdfhf2(@bitCast(f64, a))); + const actual = @as(u16, @bitCast(__truncdfhf2(@as(f64, @bitCast(a))))); if (actual == expected) { return; @@ -141,7 +141,7 @@ test "truncdfhf2" { fn test__trunctfsf2(a: f128, expected: u32) void { const x = __trunctfsf2(a); - const rep = @bitCast(u32, x); + const rep = @as(u32, @bitCast(x)); if (rep == expected) { return; } @@ -157,11 +157,11 @@ fn test__trunctfsf2(a: f128, expected: u32) void { test "trunctfsf2" { // qnan - test__trunctfsf2(@bitCast(f128, @as(u128, 0x7fff800000000000 << 64)), 0x7fc00000); + test__trunctfsf2(@as(f128, @bitCast(@as(u128, 0x7fff800000000000 << 64))), 0x7fc00000); // nan - test__trunctfsf2(@bitCast(f128, @as(u128, (0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64)), 0x7fc08000); + test__trunctfsf2(@as(f128, @bitCast(@as(u128, (0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64))), 0x7fc08000); // inf - test__trunctfsf2(@bitCast(f128, @as(u128, 0x7fff000000000000 << 64)), 0x7f800000); + test__trunctfsf2(@as(f128, @bitCast(@as(u128, 0x7fff000000000000 << 64))), 0x7f800000); // zero test__trunctfsf2(0.0, 0x0); @@ -174,7 +174,7 @@ test "trunctfsf2" { fn test__trunctfdf2(a: f128, expected: u64) void { const x = __trunctfdf2(a); - const rep = @bitCast(u64, x); + const rep = @as(u64, @bitCast(x)); if (rep == expected) { return; } @@ -190,11 +190,11 @@ fn test__trunctfdf2(a: f128, expected: u64) void { test "trunctfdf2" { // qnan - test__trunctfdf2(@bitCast(f128, @as(u128, 0x7fff800000000000 << 64)), 0x7ff8000000000000); + test__trunctfdf2(@as(f128, @bitCast(@as(u128, 0x7fff800000000000 << 64))), 0x7ff8000000000000); // nan - test__trunctfdf2(@bitCast(f128, @as(u128, (0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64)), 0x7ff8100000000000); + test__trunctfdf2(@as(f128, @bitCast(@as(u128, (0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64))), 0x7ff8100000000000); // inf - test__trunctfdf2(@bitCast(f128, @as(u128, 0x7fff000000000000 << 64)), 0x7ff0000000000000); + test__trunctfdf2(@as(f128, @bitCast(@as(u128, 0x7fff000000000000 << 64))), 0x7ff0000000000000); // zero test__trunctfdf2(0.0, 0x0); @@ -207,7 +207,7 @@ test "trunctfdf2" { fn test__truncdfsf2(a: f64, expected: u32) void { const x = __truncdfsf2(a); - const rep = @bitCast(u32, x); + const rep = @as(u32, @bitCast(x)); if (rep == expected) { return; } @@ -225,11 +225,11 @@ fn test__truncdfsf2(a: f64, expected: u32) void { test "truncdfsf2" { // nan & qnan - test__truncdfsf2(@bitCast(f64, @as(u64, 0x7ff8000000000000)), 0x7fc00000); - test__truncdfsf2(@bitCast(f64, @as(u64, 0x7ff0000000000001)), 0x7fc00000); + test__truncdfsf2(@as(f64, @bitCast(@as(u64, 0x7ff8000000000000))), 0x7fc00000); + test__truncdfsf2(@as(f64, @bitCast(@as(u64, 0x7ff0000000000001))), 0x7fc00000); // inf - test__truncdfsf2(@bitCast(f64, @as(u64, 0x7ff0000000000000)), 0x7f800000); - test__truncdfsf2(@bitCast(f64, @as(u64, 0xfff0000000000000)), 0xff800000); + test__truncdfsf2(@as(f64, @bitCast(@as(u64, 0x7ff0000000000000))), 0x7f800000); + test__truncdfsf2(@as(f64, @bitCast(@as(u64, 0xfff0000000000000))), 0xff800000); test__truncdfsf2(0.0, 0x0); test__truncdfsf2(1.0, 0x3f800000); @@ -242,7 +242,7 @@ test "truncdfsf2" { fn test__trunctfhf2(a: f128, expected: u16) void { const x = __trunctfhf2(a); - const rep = @bitCast(u16, x); + const rep = @as(u16, @bitCast(x)); if (rep == expected) { return; } @@ -254,12 +254,12 @@ fn test__trunctfhf2(a: f128, expected: u16) void { test "trunctfhf2" { // qNaN - test__trunctfhf2(@bitCast(f128, @as(u128, 0x7fff8000000000000000000000000000)), 0x7e00); + test__trunctfhf2(@as(f128, @bitCast(@as(u128, 0x7fff8000000000000000000000000000))), 0x7e00); // NaN - test__trunctfhf2(@bitCast(f128, @as(u128, 0x7fff0000000000000000000000000001)), 0x7e00); + test__trunctfhf2(@as(f128, @bitCast(@as(u128, 0x7fff0000000000000000000000000001))), 0x7e00); // inf - test__trunctfhf2(@bitCast(f128, @as(u128, 0x7fff0000000000000000000000000000)), 0x7c00); - test__trunctfhf2(-@bitCast(f128, @as(u128, 0x7fff0000000000000000000000000000)), 0xfc00); + test__trunctfhf2(@as(f128, @bitCast(@as(u128, 0x7fff0000000000000000000000000000))), 0x7c00); + test__trunctfhf2(-@as(f128, @bitCast(@as(u128, 0x7fff0000000000000000000000000000))), 0xfc00); // zero test__trunctfhf2(0.0, 0x0); test__trunctfhf2(-0.0, 0x8000); diff --git a/lib/compiler_rt/truncsfhf2.zig b/lib/compiler_rt/truncsfhf2.zig index 77dd0ba642..c747d8e37a 100644 --- a/lib/compiler_rt/truncsfhf2.zig +++ b/lib/compiler_rt/truncsfhf2.zig @@ -13,13 +13,13 @@ comptime { } pub fn __truncsfhf2(a: f32) callconv(.C) common.F16T(f32) { - return @bitCast(common.F16T(f32), truncf(f16, f32, a)); + return @as(common.F16T(f32), @bitCast(truncf(f16, f32, a))); } fn __gnu_f2h_ieee(a: f32) callconv(.C) common.F16T(f32) { - return @bitCast(common.F16T(f32), truncf(f16, f32, a)); + return @as(common.F16T(f32), @bitCast(truncf(f16, f32, a))); } fn __aeabi_f2h(a: f32) callconv(.AAPCS) u16 { - return @bitCast(common.F16T(f32), truncf(f16, f32, a)); + return @as(common.F16T(f32), @bitCast(truncf(f16, f32, a))); } diff --git a/lib/compiler_rt/trunctfhf2.zig b/lib/compiler_rt/trunctfhf2.zig index e9cc19da18..9c7a3b6dba 100644 --- a/lib/compiler_rt/trunctfhf2.zig +++ b/lib/compiler_rt/trunctfhf2.zig @@ -8,5 +8,5 @@ comptime { } pub fn __trunctfhf2(a: f128) callconv(.C) common.F16T(f128) { - return @bitCast(common.F16T(f128), truncf(f16, f128, a)); + return @as(common.F16T(f128), @bitCast(truncf(f16, f128, a))); } diff --git a/lib/compiler_rt/trunctfxf2.zig b/lib/compiler_rt/trunctfxf2.zig index 018057f213..8478446b51 100644 --- a/lib/compiler_rt/trunctfxf2.zig +++ b/lib/compiler_rt/trunctfxf2.zig @@ -25,7 +25,7 @@ pub fn __trunctfxf2(a: f128) callconv(.C) f80 { const halfway = 1 << (src_sig_bits - dst_sig_bits - 1); // Break a into a sign and representation of the absolute value - const a_rep = @bitCast(u128, a); + const a_rep = @as(u128, @bitCast(a)); const a_abs = a_rep & src_abs_mask; const sign: u16 = if (a_rep & src_sign_mask != 0) 0x8000 else 0; const integer_bit = 1 << 63; @@ -38,13 +38,13 @@ pub fn __trunctfxf2(a: f128) callconv(.C) f80 { // bit and inserting the (truncated) trailing NaN field. res.exp = 0x7fff; res.fraction = 0x8000000000000000; - res.fraction |= @truncate(u64, a_abs >> (src_sig_bits - dst_sig_bits)); + res.fraction |= @as(u64, @truncate(a_abs >> (src_sig_bits - dst_sig_bits))); } else { // The exponent of a is within the range of normal numbers in the // destination format. We can convert by simply right-shifting with // rounding, adding the explicit integer bit, and adjusting the exponent - res.fraction = @truncate(u64, a_abs >> (src_sig_bits - dst_sig_bits)) | integer_bit; - res.exp = @truncate(u16, a_abs >> src_sig_bits); + res.fraction = @as(u64, @truncate(a_abs >> (src_sig_bits - dst_sig_bits))) | integer_bit; + res.exp = @as(u16, @truncate(a_abs >> src_sig_bits)); const round_bits = a_abs & round_mask; if (round_bits > halfway) { diff --git a/lib/compiler_rt/truncxfhf2.zig b/lib/compiler_rt/truncxfhf2.zig index 31965d3e2a..6dbeca7637 100644 --- a/lib/compiler_rt/truncxfhf2.zig +++ b/lib/compiler_rt/truncxfhf2.zig @@ -8,5 +8,5 @@ comptime { } fn __truncxfhf2(a: f80) callconv(.C) common.F16T(f80) { - return @bitCast(common.F16T(f80), trunc_f80(f16, a)); + return @as(common.F16T(f80), @bitCast(trunc_f80(f16, a))); } diff --git a/lib/compiler_rt/udivmod.zig b/lib/compiler_rt/udivmod.zig index a83ece8ada..0e2a7d9ed1 100644 --- a/lib/compiler_rt/udivmod.zig +++ b/lib/compiler_rt/udivmod.zig @@ -21,11 +21,11 @@ fn divwide_generic(comptime T: type, _u1: T, _u0: T, v_: T, r: *T) T { var un64: T = undefined; var un10: T = undefined; - const s = @intCast(Log2Int(T), @clz(v)); + const s = @as(Log2Int(T), @intCast(@clz(v))); if (s > 0) { // Normalize divisor v <<= s; - un64 = (_u1 << s) | (_u0 >> @intCast(Log2Int(T), (@bitSizeOf(T) - @intCast(T, s)))); + un64 = (_u1 << s) | (_u0 >> @as(Log2Int(T), @intCast((@bitSizeOf(T) - @as(T, @intCast(s)))))); un10 = _u0 << s; } else { // Avoid undefined behavior of (u0 >> @bitSizeOf(T)) @@ -101,8 +101,8 @@ pub fn udivmod(comptime T: type, a_: T, b_: T, maybe_rem: ?*T) T { return 0; } - var a = @bitCast([2]HalfT, a_); - var b = @bitCast([2]HalfT, b_); + var a = @as([2]HalfT, @bitCast(a_)); + var b = @as([2]HalfT, @bitCast(b_)); var q: [2]HalfT = undefined; var r: [2]HalfT = undefined; @@ -119,16 +119,16 @@ pub fn udivmod(comptime T: type, a_: T, b_: T, maybe_rem: ?*T) T { q[lo] = divwide(HalfT, a[hi] % b[lo], a[lo], b[lo], &r[lo]); } if (maybe_rem) |rem| { - rem.* = @bitCast(T, r); + rem.* = @as(T, @bitCast(r)); } - return @bitCast(T, q); + return @as(T, @bitCast(q)); } // 0 <= shift <= 63 var shift: Log2Int(T) = @clz(b[hi]) - @clz(a[hi]); - var af = @bitCast(T, a); - var bf = @bitCast(T, b) << shift; - q = @bitCast([2]HalfT, @as(T, 0)); + var af = @as(T, @bitCast(a)); + var bf = @as(T, @bitCast(b)) << shift; + q = @as([2]HalfT, @bitCast(@as(T, 0))); for (0..shift + 1) |_| { q[lo] <<= 1; @@ -137,13 +137,13 @@ pub fn udivmod(comptime T: type, a_: T, b_: T, maybe_rem: ?*T) T { // af -= bf; // q[lo] |= 1; // } - const s = @bitCast(SignedT, bf -% af -% 1) >> (@bitSizeOf(T) - 1); - q[lo] |= @intCast(HalfT, s & 1); - af -= bf & @bitCast(T, s); + const s = @as(SignedT, @bitCast(bf -% af -% 1)) >> (@bitSizeOf(T) - 1); + q[lo] |= @as(HalfT, @intCast(s & 1)); + af -= bf & @as(T, @bitCast(s)); bf >>= 1; } if (maybe_rem) |rem| { - rem.* = @bitCast(T, af); + rem.* = @as(T, @bitCast(af)); } - return @bitCast(T, q); + return @as(T, @bitCast(q)); } diff --git a/lib/compiler_rt/udivmodei4.zig b/lib/compiler_rt/udivmodei4.zig index 354a926b81..f222c13a4c 100644 --- a/lib/compiler_rt/udivmodei4.zig +++ b/lib/compiler_rt/udivmodei4.zig @@ -83,23 +83,23 @@ fn divmod(q: ?[]u32, r: ?[]u32, u: []const u32, v: []const u32) !void { i = 0; while (i <= n) : (i += 1) { const p = qhat * limb(&vn, i); - const t = limb(&un, i + j) - carry - @truncate(u32, p); - limb_set(&un, i + j, @truncate(u32, @bitCast(u64, t))); - carry = @intCast(i64, p >> 32) - @intCast(i64, t >> 32); + const t = limb(&un, i + j) - carry - @as(u32, @truncate(p)); + limb_set(&un, i + j, @as(u32, @truncate(@as(u64, @bitCast(t))))); + carry = @as(i64, @intCast(p >> 32)) - @as(i64, @intCast(t >> 32)); } const t = limb(&un, j + n + 1) -% carry; - limb_set(&un, j + n + 1, @truncate(u32, @bitCast(u64, t))); - if (q) |q_| limb_set(q_, j, @truncate(u32, qhat)); + limb_set(&un, j + n + 1, @as(u32, @truncate(@as(u64, @bitCast(t))))); + if (q) |q_| limb_set(q_, j, @as(u32, @truncate(qhat))); if (t < 0) { if (q) |q_| limb_set(q_, j, limb(q_, j) - 1); var carry2: u64 = 0; i = 0; while (i <= n) : (i += 1) { const t2 = @as(u64, limb(&un, i + j)) + @as(u64, limb(&vn, i)) + carry2; - limb_set(&un, i + j, @truncate(u32, t2)); + limb_set(&un, i + j, @as(u32, @truncate(t2))); carry2 = t2 >> 32; } - limb_set(&un, j + n + 1, @truncate(u32, limb(&un, j + n + 1) + carry2)); + limb_set(&un, j + n + 1, @as(u32, @truncate(limb(&un, j + n + 1) + carry2))); } if (j == 0) break; } diff --git a/lib/compiler_rt/udivmodti4.zig b/lib/compiler_rt/udivmodti4.zig index 29523fc6e8..8f4748fa7d 100644 --- a/lib/compiler_rt/udivmodti4.zig +++ b/lib/compiler_rt/udivmodti4.zig @@ -20,7 +20,7 @@ pub fn __udivmodti4(a: u128, b: u128, maybe_rem: ?*u128) callconv(.C) u128 { const v2u64 = @Vector(2, u64); fn __udivmodti4_windows_x86_64(a: v2u64, b: v2u64, maybe_rem: ?*u128) callconv(.C) v2u64 { - return @bitCast(v2u64, udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), maybe_rem)); + return @as(v2u64, @bitCast(udivmod(u128, @as(u128, @bitCast(a)), @as(u128, @bitCast(b)), maybe_rem))); } test { diff --git a/lib/compiler_rt/udivti3.zig b/lib/compiler_rt/udivti3.zig index 748e9b6599..1205d65336 100644 --- a/lib/compiler_rt/udivti3.zig +++ b/lib/compiler_rt/udivti3.zig @@ -20,5 +20,5 @@ pub fn __udivti3(a: u128, b: u128) callconv(.C) u128 { const v2u64 = @Vector(2, u64); fn __udivti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 { - return @bitCast(v2u64, udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), null)); + return @as(v2u64, @bitCast(udivmod(u128, @as(u128, @bitCast(a)), @as(u128, @bitCast(b)), null))); } diff --git a/lib/compiler_rt/umodti3.zig b/lib/compiler_rt/umodti3.zig index 097f9a3855..41de97d2bb 100644 --- a/lib/compiler_rt/umodti3.zig +++ b/lib/compiler_rt/umodti3.zig @@ -23,6 +23,6 @@ const v2u64 = @Vector(2, u64); fn __umodti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 { var r: u128 = undefined; - _ = udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), &r); - return @bitCast(v2u64, r); + _ = udivmod(u128, @as(u128, @bitCast(a)), @as(u128, @bitCast(b)), &r); + return @as(v2u64, @bitCast(r)); } diff --git a/lib/ssp.zig b/lib/ssp.zig index f75c4d1a55..4f8eba567f 100644 --- a/lib/ssp.zig +++ b/lib/ssp.zig @@ -46,7 +46,7 @@ export var __stack_chk_guard: usize = blk: { var buf = [1]u8{0} ** @sizeOf(usize); buf[@sizeOf(usize) - 1] = 255; buf[@sizeOf(usize) - 2] = '\n'; - break :blk @bitCast(usize, buf); + break :blk @as(usize, @bitCast(buf)); }; export fn __strcpy_chk(dest: [*:0]u8, src: [*:0]const u8, dest_n: usize) callconv(.C) [*:0]u8 { diff --git a/lib/std/Build.zig b/lib/std/Build.zig index c569e0074a..a411ddc500 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -1111,7 +1111,7 @@ pub fn standardTargetOptions(self: *Build, args: StandardTargetOptionsArgs) Cros var populated_cpu_features = whitelist_cpu.model.features; populated_cpu_features.populateDependencies(all_features); for (all_features, 0..) |feature, i_usize| { - const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize); + const i = @as(std.Target.Cpu.Feature.Set.Index, @intCast(i_usize)); const in_cpu_set = populated_cpu_features.isEnabled(i); if (in_cpu_set) { log.err("{s} ", .{feature.name}); @@ -1119,7 +1119,7 @@ pub fn standardTargetOptions(self: *Build, args: StandardTargetOptionsArgs) Cros } log.err(" Remove: ", .{}); for (all_features, 0..) |feature, i_usize| { - const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize); + const i = @as(std.Target.Cpu.Feature.Set.Index, @intCast(i_usize)); const in_cpu_set = populated_cpu_features.isEnabled(i); const in_actual_set = selected_cpu.features.isEnabled(i); if (in_actual_set and !in_cpu_set) { @@ -1442,13 +1442,13 @@ pub fn execAllowFail( switch (term) { .Exited => |code| { if (code != 0) { - out_code.* = @truncate(u8, code); + out_code.* = @as(u8, @truncate(code)); return error.ExitCodeFailure; } return stdout; }, .Signal, .Stopped, .Unknown => |code| { - out_code.* = @truncate(u8, code); + out_code.* = @as(u8, @truncate(code)); return error.ProcessTerminated; }, } @@ -1815,7 +1815,7 @@ pub fn serializeCpu(allocator: Allocator, cpu: std.Target.Cpu) ![]const u8 { try mcpu_buffer.appendSlice(cpu.model.name); for (all_features, 0..) |feature, i_usize| { - const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize); + const i = @as(std.Target.Cpu.Feature.Set.Index, @intCast(i_usize)); const in_cpu_set = populated_cpu_features.isEnabled(i); const in_actual_set = cpu.features.isEnabled(i); if (in_cpu_set and !in_actual_set) { @@ -1852,7 +1852,7 @@ pub fn hex64(x: u64) [16]u8 { var result: [16]u8 = undefined; var i: usize = 0; while (i < 8) : (i += 1) { - const byte = @truncate(u8, x >> @intCast(u6, 8 * i)); + const byte = @as(u8, @truncate(x >> @as(u6, @intCast(8 * i)))); result[i * 2 + 0] = hex_charset[byte >> 4]; result[i * 2 + 1] = hex_charset[byte & 15]; } diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 3b7f180ae8..b0db88692c 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -128,7 +128,7 @@ fn findPrefixResolved(cache: *const Cache, resolved_path: []u8) !PrefixedPath { const sub_path = try gpa.dupe(u8, resolved_path[p.len + 1 ..]); gpa.free(resolved_path); return PrefixedPath{ - .prefix = @intCast(u8, i), + .prefix = @as(u8, @intCast(i)), .sub_path = sub_path, }; } @@ -653,7 +653,7 @@ pub const Manifest = struct { return error.FileTooBig; } - const contents = try self.cache.gpa.alloc(u8, @intCast(usize, ch_file.stat.size)); + const contents = try self.cache.gpa.alloc(u8, @as(usize, @intCast(ch_file.stat.size))); errdefer self.cache.gpa.free(contents); // Hash while reading from disk, to keep the contents in the cpu cache while diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index a0d7a6a296..f21ef8bc8f 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -355,7 +355,7 @@ pub fn evalZigProcess( }, .error_bundle => { const EbHdr = std.zig.Server.Message.ErrorBundle; - const eb_hdr = @ptrCast(*align(1) const EbHdr, body); + const eb_hdr = @as(*align(1) const EbHdr, @ptrCast(body)); const extra_bytes = body[@sizeOf(EbHdr)..][0 .. @sizeOf(u32) * eb_hdr.extra_len]; const string_bytes = @@ -377,7 +377,7 @@ pub fn evalZigProcess( }, .emit_bin_path => { const EbpHdr = std.zig.Server.Message.EmitBinPath; - const ebp_hdr = @ptrCast(*align(1) const EbpHdr, body); + const ebp_hdr = @as(*align(1) const EbpHdr, @ptrCast(body)); s.result_cached = ebp_hdr.flags.cache_hit; result = try arena.dupe(u8, body[@sizeOf(EbpHdr)..]); }, diff --git a/lib/std/Build/Step/CheckObject.zig b/lib/std/Build/Step/CheckObject.zig index 1c2d86e4e3..171734c450 100644 --- a/lib/std/Build/Step/CheckObject.zig +++ b/lib/std/Build/Step/CheckObject.zig @@ -449,9 +449,9 @@ const MachODumper = struct { }, .SYMTAB => if (opts.dump_symtab) { const lc = cmd.cast(macho.symtab_command).?; - symtab = @ptrCast( + symtab = @as( [*]const macho.nlist_64, - @alignCast(@alignOf(macho.nlist_64), &bytes[lc.symoff]), + @ptrCast(@alignCast(&bytes[lc.symoff])), )[0..lc.nsyms]; strtab = bytes[lc.stroff..][0..lc.strsize]; }, @@ -474,7 +474,7 @@ const MachODumper = struct { try writer.print("{s}\n", .{symtab_label}); for (symtab) |sym| { if (sym.stab()) continue; - const sym_name = mem.sliceTo(@ptrCast([*:0]const u8, strtab.ptr + sym.n_strx), 0); + const sym_name = mem.sliceTo(@as([*:0]const u8, @ptrCast(strtab.ptr + sym.n_strx)), 0); if (sym.sect()) { const sect = sections.items[sym.n_sect - 1]; try writer.print("{x} ({s},{s})", .{ @@ -487,7 +487,7 @@ const MachODumper = struct { } try writer.print(" {s}\n", .{sym_name}); } else if (sym.undf()) { - const ordinal = @divTrunc(@bitCast(i16, sym.n_desc), macho.N_SYMBOL_RESOLVER); + const ordinal = @divTrunc(@as(i16, @bitCast(sym.n_desc)), macho.N_SYMBOL_RESOLVER); const import_name = blk: { if (ordinal <= 0) { if (ordinal == macho.BIND_SPECIAL_DYLIB_SELF) @@ -498,7 +498,7 @@ const MachODumper = struct { break :blk "flat lookup"; unreachable; } - const full_path = imports.items[@bitCast(u16, ordinal) - 1]; + const full_path = imports.items[@as(u16, @bitCast(ordinal)) - 1]; const basename = fs.path.basename(full_path); assert(basename.len > 0); const ext = mem.lastIndexOfScalar(u8, basename, '.') orelse basename.len; @@ -950,8 +950,8 @@ const WasmDumper = struct { switch (opcode) { .i32_const => try writer.print("i32.const {x}\n", .{try std.leb.readILEB128(i32, reader)}), .i64_const => try writer.print("i64.const {x}\n", .{try std.leb.readILEB128(i64, reader)}), - .f32_const => try writer.print("f32.const {x}\n", .{@bitCast(f32, try reader.readIntLittle(u32))}), - .f64_const => try writer.print("f64.const {x}\n", .{@bitCast(f64, try reader.readIntLittle(u64))}), + .f32_const => try writer.print("f32.const {x}\n", .{@as(f32, @bitCast(try reader.readIntLittle(u32)))}), + .f64_const => try writer.print("f64.const {x}\n", .{@as(f64, @bitCast(try reader.readIntLittle(u64)))}), .global_get => try writer.print("global.get {x}\n", .{try std.leb.readULEB128(u32, reader)}), else => unreachable, } diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index 89576c15fa..58973d08d0 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -321,7 +321,7 @@ pub const BuildId = union(enum) { pub fn initHexString(bytes: []const u8) BuildId { var result: BuildId = .{ .hexstring = .{ .bytes = undefined, - .len = @intCast(u8, bytes.len), + .len = @as(u8, @intCast(bytes.len)), } }; @memcpy(result.hexstring.bytes[0..bytes.len], bytes); return result; @@ -342,7 +342,7 @@ pub const BuildId = union(enum) { } else if (mem.startsWith(u8, text, "0x")) { var result: BuildId = .{ .hexstring = undefined }; const slice = try std.fmt.hexToBytes(&result.hexstring.bytes, text[2..]); - result.hexstring.len = @intCast(u8, slice.len); + result.hexstring.len = @as(u8, @intCast(slice.len)); return result; } return error.InvalidBuildIdStyle; @@ -2059,7 +2059,7 @@ fn findVcpkgRoot(allocator: Allocator) !?[]const u8 { const file = fs.cwd().openFile(path_file, .{}) catch return null; defer file.close(); - const size = @intCast(usize, try file.getEndPos()); + const size = @as(usize, @intCast(try file.getEndPos())); const vcpkg_path = try allocator.alloc(u8, size); const size_read = try file.read(vcpkg_path); std.debug.assert(size == size_read); diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index c574dbb5af..3d81873308 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -998,7 +998,7 @@ fn evalZigTest( }, .test_metadata => { const TmHdr = std.zig.Server.Message.TestMetadata; - const tm_hdr = @ptrCast(*align(1) const TmHdr, body); + const tm_hdr = @as(*align(1) const TmHdr, @ptrCast(body)); test_count = tm_hdr.tests_len; const names_bytes = body[@sizeOf(TmHdr)..][0 .. test_count * @sizeOf(u32)]; @@ -1034,7 +1034,7 @@ fn evalZigTest( const md = metadata.?; const TrHdr = std.zig.Server.Message.TestResults; - const tr_hdr = @ptrCast(*align(1) const TrHdr, body); + const tr_hdr = @as(*align(1) const TrHdr, @ptrCast(body)); fail_count += @intFromBool(tr_hdr.flags.fail); skip_count += @intFromBool(tr_hdr.flags.skip); leak_count += @intFromBool(tr_hdr.flags.leak); diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index e3c5fc20dd..e0bb28569d 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -232,14 +232,14 @@ fn clearWithHeldLock(p: *Progress, end_ptr: *usize) void { } var cursor_pos = windows.COORD{ - .X = info.dwCursorPosition.X - @intCast(windows.SHORT, p.columns_written), + .X = info.dwCursorPosition.X - @as(windows.SHORT, @intCast(p.columns_written)), .Y = info.dwCursorPosition.Y, }; if (cursor_pos.X < 0) cursor_pos.X = 0; - const fill_chars = @intCast(windows.DWORD, info.dwSize.X - cursor_pos.X); + const fill_chars = @as(windows.DWORD, @intCast(info.dwSize.X - cursor_pos.X)); var written: windows.DWORD = undefined; if (windows.kernel32.FillConsoleOutputAttribute( diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index f16f8a9a79..a3b469ad6f 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -66,7 +66,7 @@ pub fn setName(self: Thread, name: []const u8) SetNameError!void { if (self.getHandle() == std.c.pthread_self()) { // Set the name of the calling thread (no thread id required). const err = try os.prctl(.SET_NAME, .{@intFromPtr(name_with_terminator.ptr)}); - switch (@enumFromInt(os.E, err)) { + switch (@as(os.E, @enumFromInt(err))) { .SUCCESS => return, else => |e| return os.unexpectedErrno(e), } @@ -176,7 +176,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co if (self.getHandle() == std.c.pthread_self()) { // Get the name of the calling thread (no thread id required). const err = try os.prctl(.GET_NAME, .{@intFromPtr(buffer.ptr)}); - switch (@enumFromInt(os.E, err)) { + switch (@as(os.E, @enumFromInt(err))) { .SUCCESS => return std.mem.sliceTo(buffer, 0), else => |e| return os.unexpectedErrno(e), } @@ -211,7 +211,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co null, )) { .SUCCESS => { - const string = @ptrCast(*const os.windows.UNICODE_STRING, &buf); + const string = @as(*const os.windows.UNICODE_STRING, @ptrCast(&buf)); const len = try std.unicode.utf16leToUtf8(buffer, string.Buffer[0 .. string.Length / 2]); return if (len > 0) buffer[0..len] else null; }, @@ -510,7 +510,7 @@ const WindowsThreadImpl = struct { thread: ThreadCompletion, fn entryFn(raw_ptr: windows.PVOID) callconv(.C) windows.DWORD { - const self = @ptrCast(*@This(), @alignCast(@alignOf(@This()), raw_ptr)); + const self: *@This() = @ptrCast(@alignCast(raw_ptr)); defer switch (self.thread.completion.swap(.completed, .SeqCst)) { .running => {}, .completed => unreachable, @@ -525,7 +525,7 @@ const WindowsThreadImpl = struct { const alloc_ptr = windows.kernel32.HeapAlloc(heap_handle, 0, alloc_bytes) orelse return error.OutOfMemory; errdefer assert(windows.kernel32.HeapFree(heap_handle, 0, alloc_ptr) != 0); - const instance_bytes = @ptrCast([*]u8, alloc_ptr)[0..alloc_bytes]; + const instance_bytes = @as([*]u8, @ptrCast(alloc_ptr))[0..alloc_bytes]; var fba = std.heap.FixedBufferAllocator.init(instance_bytes); const instance = fba.allocator().create(Instance) catch unreachable; instance.* = .{ @@ -547,7 +547,7 @@ const WindowsThreadImpl = struct { null, stack_size, Instance.entryFn, - @ptrCast(*anyopaque, instance), + @as(*anyopaque, @ptrCast(instance)), 0, null, ) orelse { @@ -596,19 +596,19 @@ const PosixThreadImpl = struct { return thread_id; }, .dragonfly => { - return @bitCast(u32, c.lwp_gettid()); + return @as(u32, @bitCast(c.lwp_gettid())); }, .netbsd => { - return @bitCast(u32, c._lwp_self()); + return @as(u32, @bitCast(c._lwp_self())); }, .freebsd => { - return @bitCast(u32, c.pthread_getthreadid_np()); + return @as(u32, @bitCast(c.pthread_getthreadid_np())); }, .openbsd => { - return @bitCast(u32, c.getthrid()); + return @as(u32, @bitCast(c.getthrid())); }, .haiku => { - return @bitCast(u32, c.find_thread(null)); + return @as(u32, @bitCast(c.find_thread(null))); }, else => { return @intFromPtr(c.pthread_self()); @@ -629,7 +629,7 @@ const PosixThreadImpl = struct { error.NameTooLong, error.UnknownName => unreachable, else => |e| return e, }; - return @intCast(usize, count); + return @as(usize, @intCast(count)); }, .solaris => { // The "proper" way to get the cpu count would be to query @@ -637,7 +637,7 @@ const PosixThreadImpl = struct { // cpu. const rc = c.sysconf(os._SC.NPROCESSORS_ONLN); return switch (os.errno(rc)) { - .SUCCESS => @intCast(usize, rc), + .SUCCESS => @as(usize, @intCast(rc)), else => |err| os.unexpectedErrno(err), }; }, @@ -645,7 +645,7 @@ const PosixThreadImpl = struct { var system_info: os.system.system_info = undefined; const rc = os.system.get_system_info(&system_info); // always returns B_OK return switch (os.errno(rc)) { - .SUCCESS => @intCast(usize, system_info.cpu_count), + .SUCCESS => @as(usize, @intCast(system_info.cpu_count)), else => |err| os.unexpectedErrno(err), }; }, @@ -657,7 +657,7 @@ const PosixThreadImpl = struct { error.NameTooLong, error.UnknownName => unreachable, else => |e| return e, }; - return @intCast(usize, count); + return @as(usize, @intCast(count)); }, } } @@ -675,7 +675,7 @@ const PosixThreadImpl = struct { return callFn(f, @as(Args, undefined)); } - const args_ptr = @ptrCast(*Args, @alignCast(@alignOf(Args), raw_arg)); + const args_ptr: *Args = @ptrCast(@alignCast(raw_arg)); defer allocator.destroy(args_ptr); return callFn(f, args_ptr.*); } @@ -699,7 +699,7 @@ const PosixThreadImpl = struct { &handle, &attr, Instance.entryFn, - if (@sizeOf(Args) > 1) @ptrCast(*anyopaque, args_ptr) else undefined, + if (@sizeOf(Args) > 1) @as(*anyopaque, @ptrCast(args_ptr)) else undefined, )) { .SUCCESS => return Impl{ .handle = handle }, .AGAIN => return error.SystemResources, @@ -742,7 +742,7 @@ const LinuxThreadImpl = struct { fn getCurrentId() Id { return tls_thread_id orelse { - const tid = @bitCast(u32, linux.gettid()); + const tid = @as(u32, @bitCast(linux.gettid())); tls_thread_id = tid; return tid; }; @@ -911,7 +911,7 @@ const LinuxThreadImpl = struct { thread: ThreadCompletion, fn entryFn(raw_arg: usize) callconv(.C) u8 { - const self = @ptrFromInt(*@This(), raw_arg); + const self = @as(*@This(), @ptrFromInt(raw_arg)); defer switch (self.thread.completion.swap(.completed, .SeqCst)) { .running => {}, .completed => unreachable, @@ -969,7 +969,7 @@ const LinuxThreadImpl = struct { // map everything but the guard page as read/write os.mprotect( - @alignCast(page_size, mapped[guard_offset..]), + @alignCast(mapped[guard_offset..]), os.PROT.READ | os.PROT.WRITE, ) catch |err| switch (err) { error.AccessDenied => unreachable, @@ -994,7 +994,7 @@ const LinuxThreadImpl = struct { }; } - const instance = @ptrCast(*Instance, @alignCast(@alignOf(Instance), &mapped[instance_offset])); + const instance: *Instance = @ptrCast(@alignCast(&mapped[instance_offset])); instance.* = .{ .fn_args = args, .thread = .{ .mapped = mapped }, diff --git a/lib/std/Thread/Futex.zig b/lib/std/Thread/Futex.zig index 61e39eba27..768442539b 100644 --- a/lib/std/Thread/Futex.zig +++ b/lib/std/Thread/Futex.zig @@ -128,14 +128,14 @@ const WindowsImpl = struct { // NTDLL functions work with time in units of 100 nanoseconds. // Positive values are absolute deadlines while negative values are relative durations. if (timeout) |delay| { - timeout_value = @intCast(os.windows.LARGE_INTEGER, delay / 100); + timeout_value = @as(os.windows.LARGE_INTEGER, @intCast(delay / 100)); timeout_value = -timeout_value; timeout_ptr = &timeout_value; } const rc = os.windows.ntdll.RtlWaitOnAddress( - @ptrCast(?*const anyopaque, ptr), - @ptrCast(?*const anyopaque, &expect), + @as(?*const anyopaque, @ptrCast(ptr)), + @as(?*const anyopaque, @ptrCast(&expect)), @sizeOf(@TypeOf(expect)), timeout_ptr, ); @@ -151,7 +151,7 @@ const WindowsImpl = struct { } fn wake(ptr: *const Atomic(u32), max_waiters: u32) void { - const address = @ptrCast(?*const anyopaque, ptr); + const address = @as(?*const anyopaque, @ptrCast(ptr)); assert(max_waiters != 0); switch (max_waiters) { @@ -186,7 +186,7 @@ const DarwinImpl = struct { // true so that we we know to ignore the ETIMEDOUT result. var timeout_overflowed = false; - const addr = @ptrCast(*const anyopaque, ptr); + const addr = @as(*const anyopaque, @ptrCast(ptr)); const flags = os.darwin.UL_COMPARE_AND_WAIT | os.darwin.ULF_NO_ERRNO; const status = blk: { if (supports_ulock_wait2) { @@ -202,7 +202,7 @@ const DarwinImpl = struct { }; if (status >= 0) return; - switch (@enumFromInt(std.os.E, -status)) { + switch (@as(std.os.E, @enumFromInt(-status))) { // Wait was interrupted by the OS or other spurious signalling. .INTR => {}, // Address of the futex was paged out. This is unlikely, but possible in theory, and @@ -225,11 +225,11 @@ const DarwinImpl = struct { } while (true) { - const addr = @ptrCast(*const anyopaque, ptr); + const addr = @as(*const anyopaque, @ptrCast(ptr)); const status = os.darwin.__ulock_wake(flags, addr, 0); if (status >= 0) return; - switch (@enumFromInt(std.os.E, -status)) { + switch (@as(std.os.E, @enumFromInt(-status))) { .INTR => continue, // spurious wake() .FAULT => unreachable, // __ulock_wake doesn't generate EFAULT according to darwin pthread_cond_t .NOENT => return, // nothing was woken up @@ -245,14 +245,14 @@ const LinuxImpl = struct { fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void { var ts: os.timespec = undefined; if (timeout) |timeout_ns| { - ts.tv_sec = @intCast(@TypeOf(ts.tv_sec), timeout_ns / std.time.ns_per_s); - ts.tv_nsec = @intCast(@TypeOf(ts.tv_nsec), timeout_ns % std.time.ns_per_s); + ts.tv_sec = @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s)); + ts.tv_nsec = @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s)); } const rc = os.linux.futex_wait( - @ptrCast(*const i32, &ptr.value), + @as(*const i32, @ptrCast(&ptr.value)), os.linux.FUTEX.PRIVATE_FLAG | os.linux.FUTEX.WAIT, - @bitCast(i32, expect), + @as(i32, @bitCast(expect)), if (timeout != null) &ts else null, ); @@ -272,7 +272,7 @@ const LinuxImpl = struct { fn wake(ptr: *const Atomic(u32), max_waiters: u32) void { const rc = os.linux.futex_wake( - @ptrCast(*const i32, &ptr.value), + @as(*const i32, @ptrCast(&ptr.value)), os.linux.FUTEX.PRIVATE_FLAG | os.linux.FUTEX.WAKE, std.math.cast(i32, max_waiters) orelse std.math.maxInt(i32), ); @@ -299,8 +299,8 @@ const FreebsdImpl = struct { tm._flags = 0; // use relative time not UMTX_ABSTIME tm._clockid = os.CLOCK.MONOTONIC; - tm._timeout.tv_sec = @intCast(@TypeOf(tm._timeout.tv_sec), timeout_ns / std.time.ns_per_s); - tm._timeout.tv_nsec = @intCast(@TypeOf(tm._timeout.tv_nsec), timeout_ns % std.time.ns_per_s); + tm._timeout.tv_sec = @as(@TypeOf(tm._timeout.tv_sec), @intCast(timeout_ns / std.time.ns_per_s)); + tm._timeout.tv_nsec = @as(@TypeOf(tm._timeout.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s)); } const rc = os.freebsd._umtx_op( @@ -347,14 +347,14 @@ const OpenbsdImpl = struct { fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void { var ts: os.timespec = undefined; if (timeout) |timeout_ns| { - ts.tv_sec = @intCast(@TypeOf(ts.tv_sec), timeout_ns / std.time.ns_per_s); - ts.tv_nsec = @intCast(@TypeOf(ts.tv_nsec), timeout_ns % std.time.ns_per_s); + ts.tv_sec = @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s)); + ts.tv_nsec = @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s)); } const rc = os.openbsd.futex( - @ptrCast(*const volatile u32, &ptr.value), + @as(*const volatile u32, @ptrCast(&ptr.value)), os.openbsd.FUTEX_WAIT | os.openbsd.FUTEX_PRIVATE_FLAG, - @bitCast(c_int, expect), + @as(c_int, @bitCast(expect)), if (timeout != null) &ts else null, null, // FUTEX_WAIT takes no requeue address ); @@ -377,7 +377,7 @@ const OpenbsdImpl = struct { fn wake(ptr: *const Atomic(u32), max_waiters: u32) void { const rc = os.openbsd.futex( - @ptrCast(*const volatile u32, &ptr.value), + @as(*const volatile u32, @ptrCast(&ptr.value)), os.openbsd.FUTEX_WAKE | os.openbsd.FUTEX_PRIVATE_FLAG, std.math.cast(c_int, max_waiters) orelse std.math.maxInt(c_int), null, // FUTEX_WAKE takes no timeout ptr @@ -411,8 +411,8 @@ const DragonflyImpl = struct { } } - const value = @bitCast(c_int, expect); - const addr = @ptrCast(*const volatile c_int, &ptr.value); + const value = @as(c_int, @bitCast(expect)); + const addr = @as(*const volatile c_int, @ptrCast(&ptr.value)); const rc = os.dragonfly.umtx_sleep(addr, value, timeout_us); switch (os.errno(rc)) { @@ -441,7 +441,7 @@ const DragonflyImpl = struct { // https://man.dragonflybsd.org/?command=umtx§ion=2 // > umtx_wakeup() will generally return 0 unless the address is bad. // We are fine with the address being bad (e.g. for Semaphore.post() where Semaphore.wait() frees the Semaphore) - const addr = @ptrCast(*const volatile c_int, &ptr.value); + const addr = @as(*const volatile c_int, @ptrCast(&ptr.value)); _ = os.dragonfly.umtx_wakeup(addr, to_wake); } }; @@ -488,8 +488,8 @@ const PosixImpl = struct { var ts: os.timespec = undefined; if (timeout) |timeout_ns| { os.clock_gettime(os.CLOCK.REALTIME, &ts) catch unreachable; - ts.tv_sec +|= @intCast(@TypeOf(ts.tv_sec), timeout_ns / std.time.ns_per_s); - ts.tv_nsec += @intCast(@TypeOf(ts.tv_nsec), timeout_ns % std.time.ns_per_s); + ts.tv_sec +|= @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s)); + ts.tv_nsec += @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s)); if (ts.tv_nsec >= std.time.ns_per_s) { ts.tv_sec +|= 1; diff --git a/lib/std/Thread/Mutex.zig b/lib/std/Thread/Mutex.zig index 9114caaa12..0f618516b5 100644 --- a/lib/std/Thread/Mutex.zig +++ b/lib/std/Thread/Mutex.zig @@ -242,12 +242,12 @@ const NonAtomicCounter = struct { value: [2]u64 = [_]u64{ 0, 0 }, fn get(self: NonAtomicCounter) u128 { - return @bitCast(u128, self.value); + return @as(u128, @bitCast(self.value)); } fn inc(self: *NonAtomicCounter) void { - for (@bitCast([2]u64, self.get() + 1), 0..) |v, i| { - @ptrCast(*volatile u64, &self.value[i]).* = v; + for (@as([2]u64, @bitCast(self.get() + 1)), 0..) |v, i| { + @as(*volatile u64, @ptrCast(&self.value[i])).* = v; } } }; diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index d3ad94324e..df4c95cbca 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -49,7 +49,7 @@ pub fn eqlString(a: []const u8, b: []const u8) bool { } pub fn hashString(s: []const u8) u32 { - return @truncate(u32, std.hash.Wyhash.hash(0, s)); + return @as(u32, @truncate(std.hash.Wyhash.hash(0, s))); } /// Insertion order is preserved. @@ -617,7 +617,7 @@ pub fn ArrayHashMapUnmanaged( return .{ .keys = slice.items(.key).ptr, .values = slice.items(.value).ptr, - .len = @intCast(u32, slice.len), + .len = @as(u32, @intCast(slice.len)), }; } pub const Iterator = struct { @@ -1409,7 +1409,7 @@ pub fn ArrayHashMapUnmanaged( indexes: []Index(I), ) void { const slot = self.getSlotByIndex(old_entry_index, ctx, header, I, indexes); - indexes[slot].entry_index = @intCast(I, new_entry_index); + indexes[slot].entry_index = @as(I, @intCast(new_entry_index)); } fn removeFromIndexByIndex(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader) void { @@ -1508,7 +1508,7 @@ pub fn ArrayHashMapUnmanaged( const new_index = self.entries.addOneAssumeCapacity(); indexes[slot] = .{ .distance_from_start_index = distance_from_start_index, - .entry_index = @intCast(I, new_index), + .entry_index = @as(I, @intCast(new_index)), }; // update the hash if applicable @@ -1549,7 +1549,7 @@ pub fn ArrayHashMapUnmanaged( const new_index = self.entries.addOneAssumeCapacity(); if (store_hash) hashes_array.ptr[new_index] = h; indexes[slot] = .{ - .entry_index = @intCast(I, new_index), + .entry_index = @as(I, @intCast(new_index)), .distance_from_start_index = distance_from_start_index, }; distance_from_start_index = slot_data.distance_from_start_index; @@ -1639,7 +1639,7 @@ pub fn ArrayHashMapUnmanaged( const start_index = safeTruncate(usize, h); const end_index = start_index +% indexes.len; var index = start_index; - var entry_index = @intCast(I, i); + var entry_index = @as(I, @intCast(i)); var distance_from_start_index: I = 0; while (index != end_index) : ({ index +%= 1; @@ -1776,7 +1776,7 @@ fn capacityIndexSize(bit_index: u8) usize { fn safeTruncate(comptime T: type, val: anytype) T { if (@bitSizeOf(T) >= @bitSizeOf(@TypeOf(val))) return val; - return @truncate(T, val); + return @as(T, @truncate(val)); } /// A single entry in the lookup acceleration structure. These structs @@ -1852,13 +1852,13 @@ const IndexHeader = struct { fn constrainIndex(header: IndexHeader, i: usize) usize { // This is an optimization for modulo of power of two integers; // it requires `indexes_len` to always be a power of two. - return @intCast(usize, i & header.mask()); + return @as(usize, @intCast(i & header.mask())); } /// Returns the attached array of indexes. I must match the type /// returned by capacityIndexType. fn indexes(header: *IndexHeader, comptime I: type) []Index(I) { - const start_ptr = @ptrCast([*]Index(I), @ptrCast([*]u8, header) + @sizeOf(IndexHeader)); + const start_ptr: [*]Index(I) = @alignCast(@ptrCast(@as([*]u8, @ptrCast(header)) + @sizeOf(IndexHeader))); return start_ptr[0..header.length()]; } @@ -1871,15 +1871,15 @@ const IndexHeader = struct { return index_capacities[self.bit_index]; } fn length(self: IndexHeader) usize { - return @as(usize, 1) << @intCast(math.Log2Int(usize), self.bit_index); + return @as(usize, 1) << @as(math.Log2Int(usize), @intCast(self.bit_index)); } fn mask(self: IndexHeader) u32 { - return @intCast(u32, self.length() - 1); + return @as(u32, @intCast(self.length() - 1)); } fn findBitIndex(desired_capacity: usize) !u8 { if (desired_capacity > max_capacity) return error.OutOfMemory; - var new_bit_index = @intCast(u8, std.math.log2_int_ceil(usize, desired_capacity)); + var new_bit_index = @as(u8, @intCast(std.math.log2_int_ceil(usize, desired_capacity))); if (desired_capacity > index_capacities[new_bit_index]) new_bit_index += 1; if (new_bit_index < min_bit_index) new_bit_index = min_bit_index; assert(desired_capacity <= index_capacities[new_bit_index]); @@ -1889,12 +1889,12 @@ const IndexHeader = struct { /// Allocates an index header, and fills the entryIndexes array with empty. /// The distance array contents are undefined. fn alloc(allocator: Allocator, new_bit_index: u8) !*IndexHeader { - const len = @as(usize, 1) << @intCast(math.Log2Int(usize), new_bit_index); + const len = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(new_bit_index)); const index_size = hash_map.capacityIndexSize(new_bit_index); const nbytes = @sizeOf(IndexHeader) + index_size * len; const bytes = try allocator.alignedAlloc(u8, @alignOf(IndexHeader), nbytes); @memset(bytes[@sizeOf(IndexHeader)..], 0xff); - const result = @ptrCast(*IndexHeader, bytes.ptr); + const result: *IndexHeader = @alignCast(@ptrCast(bytes.ptr)); result.* = .{ .bit_index = new_bit_index, }; @@ -1904,7 +1904,7 @@ const IndexHeader = struct { /// Releases the memory for a header and its associated arrays. fn free(header: *IndexHeader, allocator: Allocator) void { const index_size = hash_map.capacityIndexSize(header.bit_index); - const ptr = @ptrCast([*]align(@alignOf(IndexHeader)) u8, header); + const ptr: [*]align(@alignOf(IndexHeader)) u8 = @ptrCast(header); const slice = ptr[0 .. @sizeOf(IndexHeader) + header.length() * index_size]; allocator.free(slice); } @@ -1912,7 +1912,7 @@ const IndexHeader = struct { /// Puts an IndexHeader into the state that it would be in after being freshly allocated. fn reset(header: *IndexHeader) void { const index_size = hash_map.capacityIndexSize(header.bit_index); - const ptr = @ptrCast([*]align(@alignOf(IndexHeader)) u8, header); + const ptr: [*]align(@alignOf(IndexHeader)) u8 = @ptrCast(header); const nbytes = @sizeOf(IndexHeader) + header.length() * index_size; @memset(ptr[@sizeOf(IndexHeader)..nbytes], 0xff); } @@ -2020,25 +2020,25 @@ test "iterator hash map" { var count: usize = 0; while (it.next()) |entry| : (count += 1) { - buffer[@intCast(usize, entry.key_ptr.*)] = entry.value_ptr.*; + buffer[@as(usize, @intCast(entry.key_ptr.*))] = entry.value_ptr.*; } try testing.expect(count == 3); try testing.expect(it.next() == null); for (buffer, 0..) |_, i| { - try testing.expect(buffer[@intCast(usize, keys[i])] == values[i]); + try testing.expect(buffer[@as(usize, @intCast(keys[i]))] == values[i]); } it.reset(); count = 0; while (it.next()) |entry| { - buffer[@intCast(usize, entry.key_ptr.*)] = entry.value_ptr.*; + buffer[@as(usize, @intCast(entry.key_ptr.*))] = entry.value_ptr.*; count += 1; if (count >= 2) break; } for (buffer[0..2], 0..) |_, i| { - try testing.expect(buffer[@intCast(usize, keys[i])] == values[i]); + try testing.expect(buffer[@as(usize, @intCast(keys[i]))] == values[i]); } it.reset(); @@ -2336,11 +2336,11 @@ pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K) fn hash(ctx: Context, key: K) u32 { _ = ctx; if (comptime trait.hasUniqueRepresentation(K)) { - return @truncate(u32, Wyhash.hash(0, std.mem.asBytes(&key))); + return @as(u32, @truncate(Wyhash.hash(0, std.mem.asBytes(&key)))); } else { var hasher = Wyhash.init(0); autoHash(&hasher, key); - return @truncate(u32, hasher.final()); + return @as(u32, @truncate(hasher.final())); } } }.hash; @@ -2380,7 +2380,7 @@ pub fn getAutoHashStratFn(comptime K: type, comptime Context: type, comptime str _ = ctx; var hasher = Wyhash.init(0); std.hash.autoHashStrat(&hasher, key, strategy); - return @truncate(u32, hasher.final()); + return @as(u32, @truncate(hasher.final())); } }.hash; } diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index c2a2486dfa..8f3458481c 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -1123,19 +1123,19 @@ test "std.ArrayList/ArrayListUnmanaged.basic" { { var i: usize = 0; while (i < 10) : (i += 1) { - list.append(@intCast(i32, i + 1)) catch unreachable; + list.append(@as(i32, @intCast(i + 1))) catch unreachable; } } { var i: usize = 0; while (i < 10) : (i += 1) { - try testing.expect(list.items[i] == @intCast(i32, i + 1)); + try testing.expect(list.items[i] == @as(i32, @intCast(i + 1))); } } for (list.items, 0..) |v, i| { - try testing.expect(v == @intCast(i32, i + 1)); + try testing.expect(v == @as(i32, @intCast(i + 1))); } try testing.expect(list.pop() == 10); @@ -1173,19 +1173,19 @@ test "std.ArrayList/ArrayListUnmanaged.basic" { { var i: usize = 0; while (i < 10) : (i += 1) { - list.append(a, @intCast(i32, i + 1)) catch unreachable; + list.append(a, @as(i32, @intCast(i + 1))) catch unreachable; } } { var i: usize = 0; while (i < 10) : (i += 1) { - try testing.expect(list.items[i] == @intCast(i32, i + 1)); + try testing.expect(list.items[i] == @as(i32, @intCast(i + 1))); } } for (list.items, 0..) |v, i| { - try testing.expect(v == @intCast(i32, i + 1)); + try testing.expect(v == @as(i32, @intCast(i + 1))); } try testing.expect(list.pop() == 10); diff --git a/lib/std/atomic/Atomic.zig b/lib/std/atomic/Atomic.zig index c3f17421f3..b9e1b18f77 100644 --- a/lib/std/atomic/Atomic.zig +++ b/lib/std/atomic/Atomic.zig @@ -46,7 +46,7 @@ pub fn Atomic(comptime T: type) type { extern "c" fn __tsan_release(addr: *anyopaque) void; }; - const addr = @ptrCast(*anyopaque, self); + const addr = @as(*anyopaque, @ptrCast(self)); return switch (ordering) { .Unordered, .Monotonic => @compileError(@tagName(ordering) ++ " only applies to atomic loads and stores"), .Acquire => tsan.__tsan_acquire(addr), @@ -307,7 +307,7 @@ pub fn Atomic(comptime T: type) type { // TODO: emit appropriate tsan fence if compiling with tsan _ = ordering; - return @intCast(u1, old_bit); + return @as(u1, @intCast(old_bit)); } }); }; @@ -392,8 +392,8 @@ test "Atomic.swap" { try testing.expectEqual(a.load(.SeqCst), true); var b = Atomic(?*u8).init(null); - try testing.expectEqual(b.swap(@ptrFromInt(?*u8, @alignOf(u8)), ordering), null); - try testing.expectEqual(b.load(.SeqCst), @ptrFromInt(?*u8, @alignOf(u8))); + try testing.expectEqual(b.swap(@as(?*u8, @ptrFromInt(@alignOf(u8))), ordering), null); + try testing.expectEqual(b.load(.SeqCst), @as(?*u8, @ptrFromInt(@alignOf(u8)))); } } @@ -544,7 +544,7 @@ test "Atomic.bitSet" { var x = Atomic(Int).init(0); for (0..@bitSizeOf(Int)) |bit_index| { - const bit = @intCast(std.math.Log2Int(Int), bit_index); + const bit = @as(std.math.Log2Int(Int), @intCast(bit_index)); const mask = @as(Int, 1) << bit; // setting the bit should change the bit @@ -558,7 +558,7 @@ test "Atomic.bitSet" { // all the previous bits should have not changed (still be set) for (0..bit_index) |prev_bit_index| { - const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index); + const prev_bit = @as(std.math.Log2Int(Int), @intCast(prev_bit_index)); const prev_mask = @as(Int, 1) << prev_bit; try testing.expect(x.load(.SeqCst) & prev_mask != 0); } @@ -573,7 +573,7 @@ test "Atomic.bitReset" { var x = Atomic(Int).init(0); for (0..@bitSizeOf(Int)) |bit_index| { - const bit = @intCast(std.math.Log2Int(Int), bit_index); + const bit = @as(std.math.Log2Int(Int), @intCast(bit_index)); const mask = @as(Int, 1) << bit; x.storeUnchecked(x.loadUnchecked() | mask); @@ -588,7 +588,7 @@ test "Atomic.bitReset" { // all the previous bits should have not changed (still be reset) for (0..bit_index) |prev_bit_index| { - const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index); + const prev_bit = @as(std.math.Log2Int(Int), @intCast(prev_bit_index)); const prev_mask = @as(Int, 1) << prev_bit; try testing.expect(x.load(.SeqCst) & prev_mask == 0); } @@ -603,7 +603,7 @@ test "Atomic.bitToggle" { var x = Atomic(Int).init(0); for (0..@bitSizeOf(Int)) |bit_index| { - const bit = @intCast(std.math.Log2Int(Int), bit_index); + const bit = @as(std.math.Log2Int(Int), @intCast(bit_index)); const mask = @as(Int, 1) << bit; // toggling the bit should change the bit @@ -617,7 +617,7 @@ test "Atomic.bitToggle" { // all the previous bits should have not changed (still be toggled back) for (0..bit_index) |prev_bit_index| { - const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index); + const prev_bit = @as(std.math.Log2Int(Int), @intCast(prev_bit_index)); const prev_mask = @as(Int, 1) << prev_bit; try testing.expect(x.load(.SeqCst) & prev_mask == 0); } diff --git a/lib/std/atomic/queue.zig b/lib/std/atomic/queue.zig index 70cb293cf4..78eb746347 100644 --- a/lib/std/atomic/queue.zig +++ b/lib/std/atomic/queue.zig @@ -248,7 +248,7 @@ fn startPuts(ctx: *Context) u8 { const random = prng.random(); while (put_count != 0) : (put_count -= 1) { std.time.sleep(1); // let the os scheduler be our fuzz - const x = @bitCast(i32, random.int(u32)); + const x = @as(i32, @bitCast(random.int(u32))); const node = ctx.allocator.create(Queue(i32).Node) catch unreachable; node.* = .{ .prev = undefined, diff --git a/lib/std/atomic/stack.zig b/lib/std/atomic/stack.zig index 9ad7c76d81..1289217652 100644 --- a/lib/std/atomic/stack.zig +++ b/lib/std/atomic/stack.zig @@ -151,7 +151,7 @@ fn startPuts(ctx: *Context) u8 { const random = prng.random(); while (put_count != 0) : (put_count -= 1) { std.time.sleep(1); // let the os scheduler be our fuzz - const x = @bitCast(i32, random.int(u32)); + const x = @as(i32, @bitCast(random.int(u32))); const node = ctx.allocator.create(Stack(i32).Node) catch unreachable; node.* = Stack(i32).Node{ .next = undefined, diff --git a/lib/std/base64.zig b/lib/std/base64.zig index 869fa47e5e..16e6aa7e8e 100644 --- a/lib/std/base64.zig +++ b/lib/std/base64.zig @@ -108,12 +108,12 @@ pub const Base64Encoder = struct { acc_len += 8; while (acc_len >= 6) { acc_len -= 6; - dest[out_idx] = encoder.alphabet_chars[@truncate(u6, (acc >> acc_len))]; + dest[out_idx] = encoder.alphabet_chars[@as(u6, @truncate((acc >> acc_len)))]; out_idx += 1; } } if (acc_len > 0) { - dest[out_idx] = encoder.alphabet_chars[@truncate(u6, (acc << 6 - acc_len))]; + dest[out_idx] = encoder.alphabet_chars[@as(u6, @truncate((acc << 6 - acc_len)))]; out_idx += 1; } if (encoder.pad_char) |pad_char| { @@ -144,7 +144,7 @@ pub const Base64Decoder = struct { assert(!char_in_alphabet[c]); assert(pad_char == null or c != pad_char.?); - result.char_to_index[c] = @intCast(u8, i); + result.char_to_index[c] = @as(u8, @intCast(i)); char_in_alphabet[c] = true; } return result; @@ -196,7 +196,7 @@ pub const Base64Decoder = struct { acc_len += 6; if (acc_len >= 8) { acc_len -= 8; - dest[dest_idx] = @truncate(u8, acc >> acc_len); + dest[dest_idx] = @as(u8, @truncate(acc >> acc_len)); dest_idx += 1; } } @@ -271,7 +271,7 @@ pub const Base64DecoderWithIgnore = struct { if (acc_len >= 8) { if (dest_idx == dest.len) return error.NoSpaceLeft; acc_len -= 8; - dest[dest_idx] = @truncate(u8, acc >> acc_len); + dest[dest_idx] = @as(u8, @truncate(acc >> acc_len)); dest_idx += 1; } } diff --git a/lib/std/bit_set.zig b/lib/std/bit_set.zig index 4b83e8e057..9e5c707b84 100644 --- a/lib/std/bit_set.zig +++ b/lib/std/bit_set.zig @@ -119,19 +119,19 @@ pub fn IntegerBitSet(comptime size: u16) type { if (range.start == range.end) return; if (MaskInt == u0) return; - const start_bit = @intCast(ShiftInt, range.start); + const start_bit = @as(ShiftInt, @intCast(range.start)); var mask = std.math.boolMask(MaskInt, true) << start_bit; if (range.end != bit_length) { - const end_bit = @intCast(ShiftInt, range.end); - mask &= std.math.boolMask(MaskInt, true) >> @truncate(ShiftInt, @as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit)); + const end_bit = @as(ShiftInt, @intCast(range.end)); + mask &= std.math.boolMask(MaskInt, true) >> @as(ShiftInt, @truncate(@as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit))); } self.mask &= ~mask; mask = std.math.boolMask(MaskInt, value) << start_bit; if (range.end != bit_length) { - const end_bit = @intCast(ShiftInt, range.end); - mask &= std.math.boolMask(MaskInt, value) >> @truncate(ShiftInt, @as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit)); + const end_bit = @as(ShiftInt, @intCast(range.end)); + mask &= std.math.boolMask(MaskInt, value) >> @as(ShiftInt, @truncate(@as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit))); } self.mask |= mask; } @@ -292,7 +292,7 @@ pub fn IntegerBitSet(comptime size: u16) type { .reverse => { const leading_zeroes = @clz(self.bits_remain); const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes; - self.bits_remain &= (@as(MaskInt, 1) << @intCast(ShiftInt, top_bit)) - 1; + self.bits_remain &= (@as(MaskInt, 1) << @as(ShiftInt, @intCast(top_bit))) - 1; return top_bit; }, } @@ -302,11 +302,11 @@ pub fn IntegerBitSet(comptime size: u16) type { fn maskBit(index: usize) MaskInt { if (MaskInt == u0) return 0; - return @as(MaskInt, 1) << @intCast(ShiftInt, index); + return @as(MaskInt, 1) << @as(ShiftInt, @intCast(index)); } fn boolMaskBit(index: usize, value: bool) MaskInt { if (MaskInt == u0) return 0; - return @as(MaskInt, @intFromBool(value)) << @intCast(ShiftInt, index); + return @as(MaskInt, @intFromBool(value)) << @as(ShiftInt, @intCast(index)); } }; } @@ -442,10 +442,10 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type { if (num_masks == 0) return; const start_mask_index = maskIndex(range.start); - const start_bit = @truncate(ShiftInt, range.start); + const start_bit = @as(ShiftInt, @truncate(range.start)); const end_mask_index = maskIndex(range.end); - const end_bit = @truncate(ShiftInt, range.end); + const end_bit = @as(ShiftInt, @truncate(range.end)); if (start_mask_index == end_mask_index) { var mask1 = std.math.boolMask(MaskInt, true) << start_bit; @@ -634,13 +634,13 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type { } fn maskBit(index: usize) MaskInt { - return @as(MaskInt, 1) << @truncate(ShiftInt, index); + return @as(MaskInt, 1) << @as(ShiftInt, @truncate(index)); } fn maskIndex(index: usize) usize { return index >> @bitSizeOf(ShiftInt); } fn boolMaskBit(index: usize, value: bool) MaskInt { - return @as(MaskInt, @intFromBool(value)) << @intCast(ShiftInt, index); + return @as(MaskInt, @intFromBool(value)) << @as(ShiftInt, @intCast(index)); } }; } @@ -731,7 +731,7 @@ pub const DynamicBitSetUnmanaged = struct { // set the padding bits in the old last item to 1 if (fill and old_masks > 0) { const old_padding_bits = old_masks * @bitSizeOf(MaskInt) - old_len; - const old_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, old_padding_bits); + const old_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(old_padding_bits)); self.masks[old_masks - 1] |= ~old_mask; } @@ -745,7 +745,7 @@ pub const DynamicBitSetUnmanaged = struct { // Zero out the padding bits if (new_len > 0) { const padding_bits = new_masks * @bitSizeOf(MaskInt) - new_len; - const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits); + const last_item_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(padding_bits)); self.masks[new_masks - 1] &= last_item_mask; } @@ -816,10 +816,10 @@ pub const DynamicBitSetUnmanaged = struct { if (range.start == range.end) return; const start_mask_index = maskIndex(range.start); - const start_bit = @truncate(ShiftInt, range.start); + const start_bit = @as(ShiftInt, @truncate(range.start)); const end_mask_index = maskIndex(range.end); - const end_bit = @truncate(ShiftInt, range.end); + const end_bit = @as(ShiftInt, @truncate(range.end)); if (start_mask_index == end_mask_index) { var mask1 = std.math.boolMask(MaskInt, true) << start_bit; @@ -887,7 +887,7 @@ pub const DynamicBitSetUnmanaged = struct { } const padding_bits = num_masks * @bitSizeOf(MaskInt) - bit_length; - const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits); + const last_item_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(padding_bits)); self.masks[num_masks - 1] &= last_item_mask; } @@ -996,7 +996,7 @@ pub const DynamicBitSetUnmanaged = struct { pub fn iterator(self: *const Self, comptime options: IteratorOptions) Iterator(options) { const num_masks = numMasks(self.bit_length); const padding_bits = num_masks * @bitSizeOf(MaskInt) - self.bit_length; - const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits); + const last_item_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(padding_bits)); return Iterator(options).init(self.masks[0..num_masks], last_item_mask); } @@ -1005,13 +1005,13 @@ pub const DynamicBitSetUnmanaged = struct { } fn maskBit(index: usize) MaskInt { - return @as(MaskInt, 1) << @truncate(ShiftInt, index); + return @as(MaskInt, 1) << @as(ShiftInt, @truncate(index)); } fn maskIndex(index: usize) usize { return index >> @bitSizeOf(ShiftInt); } fn boolMaskBit(index: usize, value: bool) MaskInt { - return @as(MaskInt, @intFromBool(value)) << @intCast(ShiftInt, index); + return @as(MaskInt, @intFromBool(value)) << @as(ShiftInt, @intCast(index)); } fn numMasks(bit_length: usize) usize { return (bit_length + (@bitSizeOf(MaskInt) - 1)) / @bitSizeOf(MaskInt); @@ -1255,7 +1255,7 @@ fn BitSetIterator(comptime MaskInt: type, comptime options: IteratorOptions) typ .reverse => { const leading_zeroes = @clz(self.bits_remain); const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes; - const no_top_bit_mask = (@as(MaskInt, 1) << @intCast(ShiftInt, top_bit)) - 1; + const no_top_bit_mask = (@as(MaskInt, 1) << @as(ShiftInt, @intCast(top_bit))) - 1; self.bits_remain &= no_top_bit_mask; return top_bit + self.bit_offset; }, diff --git a/lib/std/bounded_array.zig b/lib/std/bounded_array.zig index 0e0b601af6..6986414a24 100644 --- a/lib/std/bounded_array.zig +++ b/lib/std/bounded_array.zig @@ -394,7 +394,7 @@ test "BoundedArrayAligned" { try a.append(255); try a.append(255); - const b = @ptrCast(*const [2]u16, a.constSlice().ptr); + const b = @as(*const [2]u16, @ptrCast(a.constSlice().ptr)); try testing.expectEqual(@as(u16, 0), b[0]); try testing.expectEqual(@as(u16, 65535), b[1]); } diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 54781e4465..99761b146d 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -784,7 +784,7 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr exit_size.* = 256; - return @ptrCast([*:0]u16, utf16.ptr); + return @as([*:0]u16, @ptrCast(utf16.ptr)); } }; diff --git a/lib/std/c.zig b/lib/std/c.zig index 3b4bfef826..149f3ab7e1 100644 --- a/lib/std/c.zig +++ b/lib/std/c.zig @@ -113,7 +113,7 @@ pub usingnamespace switch (builtin.os.tag) { pub fn getErrno(rc: anytype) c.E { if (rc == -1) { - return @enumFromInt(c.E, c._errno().*); + return @as(c.E, @enumFromInt(c._errno().*)); } else { return .SUCCESS; } diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig index 6dd517eada..0f60c2f841 100644 --- a/lib/std/c/darwin.zig +++ b/lib/std/c/darwin.zig @@ -1177,10 +1177,10 @@ pub const sigset_t = u32; pub const empty_sigset: sigset_t = 0; pub const SIG = struct { - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); - pub const HOLD = @ptrFromInt(?Sigaction.handler_fn, 5); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); + pub const HOLD = @as(?Sigaction.handler_fn, @ptrFromInt(5)); /// block specified signal set pub const _BLOCK = 1; @@ -1411,7 +1411,7 @@ pub const MAP = struct { pub const NOCACHE = 0x0400; /// don't reserve needed swap area pub const NORESERVE = 0x0040; - pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize)); + pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize))); }; pub const MSF = struct { @@ -1879,7 +1879,7 @@ pub const W = struct { pub const UNTRACED = 0x00000002; pub fn EXITSTATUS(x: u32) u8 { - return @intCast(u8, x >> 8); + return @as(u8, @intCast(x >> 8)); } pub fn TERMSIG(x: u32) u32 { return status(x); @@ -2463,7 +2463,7 @@ pub const KernE = enum(u32) { pub const mach_msg_return_t = kern_return_t; pub fn getMachMsgError(err: mach_msg_return_t) MachMsgE { - return @enumFromInt(MachMsgE, @truncate(u32, @intCast(usize, err))); + return @as(MachMsgE, @enumFromInt(@as(u32, @truncate(@as(usize, @intCast(err)))))); } /// All special error code bits defined below. @@ -2665,10 +2665,10 @@ pub const RTLD = struct { pub const NODELETE = 0x80; pub const FIRST = 0x100; - pub const NEXT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1))); - pub const DEFAULT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -2))); - pub const SELF = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -3))); - pub const MAIN_ONLY = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -5))); + pub const NEXT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))); + pub const DEFAULT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -2))))); + pub const SELF = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -3))))); + pub const MAIN_ONLY = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -5))))); }; pub const F = struct { @@ -3238,14 +3238,14 @@ pub const PosixSpawn = struct { pub fn get(self: Attr) Error!u16 { var flags: c_short = undefined; switch (errno(posix_spawnattr_getflags(&self.attr, &flags))) { - .SUCCESS => return @bitCast(u16, flags), + .SUCCESS => return @as(u16, @bitCast(flags)), .INVAL => unreachable, else => |err| return unexpectedErrno(err), } } pub fn set(self: *Attr, flags: u16) Error!void { - switch (errno(posix_spawnattr_setflags(&self.attr, @bitCast(c_short, flags)))) { + switch (errno(posix_spawnattr_setflags(&self.attr, @as(c_short, @bitCast(flags))))) { .SUCCESS => return, .INVAL => unreachable, else => |err| return unexpectedErrno(err), @@ -3281,7 +3281,7 @@ pub const PosixSpawn = struct { } pub fn openZ(self: *Actions, fd: fd_t, path: [*:0]const u8, flags: u32, mode: mode_t) Error!void { - switch (errno(posix_spawn_file_actions_addopen(&self.actions, fd, path, @bitCast(c_int, flags), mode))) { + switch (errno(posix_spawn_file_actions_addopen(&self.actions, fd, path, @as(c_int, @bitCast(flags)), mode))) { .SUCCESS => return, .BADF => return error.InvalidFileDescriptor, .NOMEM => return error.SystemResources, @@ -3402,11 +3402,11 @@ pub const PosixSpawn = struct { pub fn waitpid(pid: pid_t, flags: u32) Error!std.os.WaitPidResult { var status: c_int = undefined; while (true) { - const rc = waitpid(pid, &status, @intCast(c_int, flags)); + const rc = waitpid(pid, &status, @as(c_int, @intCast(flags))); switch (errno(rc)) { .SUCCESS => return std.os.WaitPidResult{ - .pid = @intCast(pid_t, rc), - .status = @bitCast(u32, status), + .pid = @as(pid_t, @intCast(rc)), + .status = @as(u32, @bitCast(status)), }, .INTR => continue, .CHILD => return error.ChildExecFailed, @@ -3418,7 +3418,7 @@ pub const PosixSpawn = struct { }; pub fn getKernError(err: kern_return_t) KernE { - return @enumFromInt(KernE, @truncate(u32, @intCast(usize, err))); + return @as(KernE, @enumFromInt(@as(u32, @truncate(@as(usize, @intCast(err)))))); } pub fn unexpectedKernError(err: KernE) std.os.UnexpectedError { @@ -3585,9 +3585,9 @@ pub const MachTask = extern struct { .top => VM_REGION_TOP_INFO, }, switch (tag) { - .basic => @ptrCast(vm_region_info_t, &info.info.basic), - .extended => @ptrCast(vm_region_info_t, &info.info.extended), - .top => @ptrCast(vm_region_info_t, &info.info.top), + .basic => @as(vm_region_info_t, @ptrCast(&info.info.basic)), + .extended => @as(vm_region_info_t, @ptrCast(&info.info.extended)), + .top => @as(vm_region_info_t, @ptrCast(&info.info.top)), }, &count, &objname, @@ -3640,8 +3640,8 @@ pub const MachTask = extern struct { &base_len, &nesting, switch (tag) { - .short => @ptrCast(vm_region_recurse_info_t, &info.info.short), - .full => @ptrCast(vm_region_recurse_info_t, &info.info.full), + .short => @as(vm_region_recurse_info_t, @ptrCast(&info.info.short)), + .full => @as(vm_region_recurse_info_t, @ptrCast(&info.info.full)), }, &count, ))) { @@ -3701,7 +3701,7 @@ pub const MachTask = extern struct { task.port, curr_addr, @intFromPtr(out_buf.ptr), - @intCast(mach_msg_type_number_t, curr_size), + @as(mach_msg_type_number_t, @intCast(curr_size)), ))) { .SUCCESS => {}, .FAILURE => return error.PermissionDenied, @@ -3752,7 +3752,7 @@ pub const MachTask = extern struct { else => |err| return unexpectedKernError(err), } - @memcpy(out_buf[0..curr_bytes_read], @ptrFromInt([*]const u8, vm_memory)); + @memcpy(out_buf[0..curr_bytes_read], @as([*]const u8, @ptrFromInt(vm_memory))); _ = vm_deallocate(mach_task_self(), vm_memory, curr_bytes_read); out_buf = out_buf[curr_bytes_read..]; @@ -3782,10 +3782,10 @@ pub const MachTask = extern struct { switch (getKernError(task_info( task.port, TASK_VM_INFO, - @ptrCast(task_info_t, &vm_info), + @as(task_info_t, @ptrCast(&vm_info)), &info_count, ))) { - .SUCCESS => return @intCast(usize, vm_info.page_size), + .SUCCESS => return @as(usize, @intCast(vm_info.page_size)), else => {}, } } @@ -3802,7 +3802,7 @@ pub const MachTask = extern struct { switch (getKernError(task_info( task.port, MACH_TASK_BASIC_INFO, - @ptrCast(task_info_t, &info), + @as(task_info_t, @ptrCast(&info)), &count, ))) { .SUCCESS => return info, @@ -3832,7 +3832,7 @@ pub const MachTask = extern struct { _ = vm_deallocate( self_task.port, @intFromPtr(list.buf.ptr), - @intCast(vm_size_t, list.buf.len * @sizeOf(mach_port_t)), + @as(vm_size_t, @intCast(list.buf.len * @sizeOf(mach_port_t))), ); } }; @@ -3841,7 +3841,7 @@ pub const MachTask = extern struct { var thread_list: mach_port_array_t = undefined; var thread_count: mach_msg_type_number_t = undefined; switch (getKernError(task_threads(task.port, &thread_list, &thread_count))) { - .SUCCESS => return ThreadList{ .buf = @ptrCast([*]MachThread, thread_list)[0..thread_count] }, + .SUCCESS => return ThreadList{ .buf = @as([*]MachThread, @ptrCast(thread_list))[0..thread_count] }, else => |err| return unexpectedKernError(err), } } @@ -3860,7 +3860,7 @@ pub const MachThread = extern struct { switch (getKernError(thread_info( thread.port, THREAD_BASIC_INFO, - @ptrCast(thread_info_t, &info), + @as(thread_info_t, @ptrCast(&info)), &count, ))) { .SUCCESS => return info, @@ -3874,7 +3874,7 @@ pub const MachThread = extern struct { switch (getKernError(thread_info( thread.port, THREAD_IDENTIFIER_INFO, - @ptrCast(thread_info_t, &info), + @as(thread_info_t, @ptrCast(&info)), &count, ))) { .SUCCESS => return info, @@ -3962,7 +3962,7 @@ pub const thread_affinity_policy_t = [*]thread_affinity_policy; pub const THREAD_AFFINITY = struct { pub const POLICY = 0; - pub const POLICY_COUNT = @intCast(mach_msg_type_number_t, @sizeOf(thread_affinity_policy_data_t) / @sizeOf(integer_t)); + pub const POLICY_COUNT = @as(mach_msg_type_number_t, @intCast(@sizeOf(thread_affinity_policy_data_t) / @sizeOf(integer_t))); }; /// cpu affinity api @@ -4041,7 +4041,7 @@ pub const host_preferred_user_arch_data_t = host_preferred_user_arch; pub const host_preferred_user_arch_t = *host_preferred_user_arch; fn HostCount(comptime HT: type) mach_msg_type_number_t { - return @intCast(mach_msg_type_number_t, @sizeOf(HT) / @sizeOf(integer_t)); + return @as(mach_msg_type_number_t, @intCast(@sizeOf(HT) / @sizeOf(integer_t))); } pub const HOST = struct { diff --git a/lib/std/c/dragonfly.zig b/lib/std/c/dragonfly.zig index 912bb99056..6782aa098a 100644 --- a/lib/std/c/dragonfly.zig +++ b/lib/std/c/dragonfly.zig @@ -172,7 +172,7 @@ pub const PROT = struct { pub const MAP = struct { pub const FILE = 0; - pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize)); + pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize))); pub const ANONYMOUS = ANON; pub const COPY = PRIVATE; pub const SHARED = 1; @@ -208,7 +208,7 @@ pub const W = struct { pub const TRAPPED = 0x0020; pub fn EXITSTATUS(s: u32) u8 { - return @intCast(u8, (s & 0xff00) >> 8); + return @as(u8, @intCast((s & 0xff00) >> 8)); } pub fn TERMSIG(s: u32) u32 { return s & 0x7f; @@ -220,7 +220,7 @@ pub const W = struct { return TERMSIG(s) == 0; } pub fn IFSTOPPED(s: u32) bool { - return @truncate(u16, (((s & 0xffff) *% 0x10001) >> 8)) > 0x7f00; + return @as(u16, @truncate((((s & 0xffff) *% 0x10001) >> 8))) > 0x7f00; } pub fn IFSIGNALED(s: u32) bool { return (s & 0xffff) -% 1 < 0xff; @@ -620,9 +620,9 @@ pub const S = struct { pub const BADSIG = SIG.ERR; pub const SIG = struct { - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); pub const BLOCK = 1; pub const UNBLOCK = 2; @@ -871,10 +871,10 @@ pub const RTLD = struct { pub const NODELETE = 0x01000; pub const NOLOAD = 0x02000; - pub const NEXT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1))); - pub const DEFAULT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -2))); - pub const SELF = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -3))); - pub const ALL = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -4))); + pub const NEXT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))); + pub const DEFAULT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -2))))); + pub const SELF = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -3))))); + pub const ALL = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -4))))); }; pub const dl_phdr_info = extern struct { diff --git a/lib/std/c/freebsd.zig b/lib/std/c/freebsd.zig index 7a265ac2b3..deec41493d 100644 --- a/lib/std/c/freebsd.zig +++ b/lib/std/c/freebsd.zig @@ -20,11 +20,11 @@ fn __BIT_COUNT(bits: []const c_long) c_long { fn __BIT_MASK(s: usize) c_long { var x = s % CPU_SETSIZE; - return @bitCast(c_long, @intCast(c_ulong, 1) << @intCast(u6, x)); + return @as(c_long, @bitCast(@as(c_ulong, @intCast(1)) << @as(u6, @intCast(x)))); } pub fn CPU_COUNT(set: cpuset_t) c_int { - return @intCast(c_int, __BIT_COUNT(set.__bits[0..])); + return @as(c_int, @intCast(__BIT_COUNT(set.__bits[0..]))); } pub fn CPU_ZERO(set: *cpuset_t) void { @@ -529,7 +529,7 @@ pub const cap_rights_t = extern struct { pub const CAP = struct { pub fn RIGHT(idx: u6, bit: u64) u64 { - return (@intCast(u64, 1) << (57 + idx)) | bit; + return (@as(u64, @intCast(1)) << (57 + idx)) | bit; } pub const READ = CAP.RIGHT(0, 0x0000000000000001); pub const WRITE = CAP.RIGHT(0, 0x0000000000000002); @@ -961,7 +961,7 @@ pub const CLOCK = struct { }; pub const MAP = struct { - pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize)); + pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize))); pub const SHARED = 0x0001; pub const PRIVATE = 0x0002; pub const FIXED = 0x0010; @@ -1013,7 +1013,7 @@ pub const W = struct { pub const TRAPPED = 32; pub fn EXITSTATUS(s: u32) u8 { - return @intCast(u8, (s & 0xff00) >> 8); + return @as(u8, @intCast((s & 0xff00) >> 8)); } pub fn TERMSIG(s: u32) u32 { return s & 0x7f; @@ -1025,7 +1025,7 @@ pub const W = struct { return TERMSIG(s) == 0; } pub fn IFSTOPPED(s: u32) bool { - return @truncate(u16, (((s & 0xffff) *% 0x10001) >> 8)) > 0x7f00; + return @as(u16, @truncate((((s & 0xffff) *% 0x10001) >> 8))) > 0x7f00; } pub fn IFSIGNALED(s: u32) bool { return (s & 0xffff) -% 1 < 0xff; @@ -1086,9 +1086,9 @@ pub const SIG = struct { pub const UNBLOCK = 2; pub const SETMASK = 3; - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); pub const WORDS = 4; pub const MAXSIG = 128; @@ -2626,7 +2626,7 @@ pub const domainset_t = extern struct { }; pub fn DOMAINSET_COUNT(set: domainset_t) c_int { - return @intCast(c_int, __BIT_COUNT(set.__bits[0..])); + return @as(c_int, @intCast(__BIT_COUNT(set.__bits[0..]))); } pub const domainset = extern struct { @@ -2650,7 +2650,7 @@ const ioctl_cmd = enum(u32) { }; fn ioImpl(cmd: ioctl_cmd, op: u8, nr: u8, comptime IT: type) u32 { - return @bitCast(u32, @intFromEnum(cmd) | @intCast(u32, @truncate(u8, @sizeOf(IT))) << 16 | @intCast(u32, op) << 8 | nr); + return @as(u32, @bitCast(@intFromEnum(cmd) | @as(u32, @intCast(@as(u8, @truncate(@sizeOf(IT))))) << 16 | @as(u32, @intCast(op)) << 8 | nr)); } pub fn IO(op: u8, nr: u8) u32 { diff --git a/lib/std/c/haiku.zig b/lib/std/c/haiku.zig index 2f9917a0f3..c47ceeb003 100644 --- a/lib/std/c/haiku.zig +++ b/lib/std/c/haiku.zig @@ -414,7 +414,7 @@ pub const CLOCK = struct { pub const MAP = struct { /// mmap() error return code - pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize)); + pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize))); /// changes are seen by others pub const SHARED = 0x01; /// changes are only seen by caller @@ -443,7 +443,7 @@ pub const W = struct { pub const NOWAIT = 0x20; pub fn EXITSTATUS(s: u32) u8 { - return @intCast(u8, s & 0xff); + return @as(u8, @intCast(s & 0xff)); } pub fn TERMSIG(s: u32) u32 { @@ -481,9 +481,9 @@ pub const SA = struct { }; pub const SIG = struct { - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); pub const HUP = 1; pub const INT = 2; diff --git a/lib/std/c/linux.zig b/lib/std/c/linux.zig index d3a3bfdeba..ddc488e115 100644 --- a/lib/std/c/linux.zig +++ b/lib/std/c/linux.zig @@ -32,7 +32,7 @@ pub const MADV = linux.MADV; pub const MAP = struct { pub usingnamespace linux.MAP; /// Only used by libc to communicate failure. - pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize)); + pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize))); }; pub const MSF = linux.MSF; pub const MMAP2_UNIT = linux.MMAP2_UNIT; diff --git a/lib/std/c/netbsd.zig b/lib/std/c/netbsd.zig index 2c7c236ed0..1fc0784287 100644 --- a/lib/std/c/netbsd.zig +++ b/lib/std/c/netbsd.zig @@ -172,9 +172,9 @@ pub const RTLD = struct { pub const NODELETE = 0x01000; pub const NOLOAD = 0x02000; - pub const NEXT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1))); - pub const DEFAULT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -2))); - pub const SELF = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -3))); + pub const NEXT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))); + pub const DEFAULT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -2))))); + pub const SELF = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -3))))); }; pub const dl_phdr_info = extern struct { @@ -597,7 +597,7 @@ pub const CLOCK = struct { }; pub const MAP = struct { - pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize)); + pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize))); pub const SHARED = 0x0001; pub const PRIVATE = 0x0002; pub const REMAPDUP = 0x0004; @@ -653,7 +653,7 @@ pub const W = struct { pub const TRAPPED = 0x00000040; pub fn EXITSTATUS(s: u32) u8 { - return @intCast(u8, (s >> 8) & 0xff); + return @as(u8, @intCast((s >> 8) & 0xff)); } pub fn TERMSIG(s: u32) u32 { return s & 0x7f; @@ -1106,9 +1106,9 @@ pub const winsize = extern struct { const NSIG = 32; pub const SIG = struct { - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); pub const WORDS = 4; pub const MAXSIG = 128; diff --git a/lib/std/c/openbsd.zig b/lib/std/c/openbsd.zig index 47c1aec862..06085903e4 100644 --- a/lib/std/c/openbsd.zig +++ b/lib/std/c/openbsd.zig @@ -449,7 +449,7 @@ pub const CLOCK = struct { }; pub const MAP = struct { - pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize)); + pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize))); pub const SHARED = 0x0001; pub const PRIVATE = 0x0002; pub const FIXED = 0x0010; @@ -488,7 +488,7 @@ pub const W = struct { pub const CONTINUED = 8; pub fn EXITSTATUS(s: u32) u8 { - return @intCast(u8, (s >> 8) & 0xff); + return @as(u8, @intCast((s >> 8) & 0xff)); } pub fn TERMSIG(s: u32) u32 { return (s & 0x7f); @@ -1000,11 +1000,11 @@ pub const winsize = extern struct { const NSIG = 33; pub const SIG = struct { - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); - pub const CATCH = @ptrFromInt(?Sigaction.handler_fn, 2); - pub const HOLD = @ptrFromInt(?Sigaction.handler_fn, 3); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); + pub const CATCH = @as(?Sigaction.handler_fn, @ptrFromInt(2)); + pub const HOLD = @as(?Sigaction.handler_fn, @ptrFromInt(3)); pub const HUP = 1; pub const INT = 2; diff --git a/lib/std/c/solaris.zig b/lib/std/c/solaris.zig index 511bf9ccc5..cbca1805bb 100644 --- a/lib/std/c/solaris.zig +++ b/lib/std/c/solaris.zig @@ -111,10 +111,10 @@ pub const RTLD = struct { pub const FIRST = 0x02000; pub const CONFGEN = 0x10000; - pub const NEXT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1))); - pub const DEFAULT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -2))); - pub const SELF = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -3))); - pub const PROBE = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -4))); + pub const NEXT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))); + pub const DEFAULT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -2))))); + pub const SELF = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -3))))); + pub const PROBE = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -4))))); }; pub const Flock = extern struct { @@ -524,7 +524,7 @@ pub const CLOCK = struct { }; pub const MAP = struct { - pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize)); + pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize))); pub const SHARED = 0x0001; pub const PRIVATE = 0x0002; pub const TYPE = 0x000f; @@ -583,7 +583,7 @@ pub const W = struct { pub const NOWAIT = 0o200; pub fn EXITSTATUS(s: u32) u8 { - return @intCast(u8, (s >> 8) & 0xff); + return @as(u8, @intCast((s >> 8) & 0xff)); } pub fn TERMSIG(s: u32) u32 { return s & 0x7f; @@ -886,10 +886,10 @@ pub const winsize = extern struct { const NSIG = 75; pub const SIG = struct { - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); - pub const HOLD = @ptrFromInt(?Sigaction.handler_fn, 2); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); + pub const HOLD = @as(?Sigaction.handler_fn, @ptrFromInt(2)); pub const WORDS = 4; pub const MAXSIG = 75; @@ -1441,7 +1441,7 @@ pub const AT = struct { /// Magic value that specify the use of the current working directory /// to determine the target of relative file paths in the openat() and /// similar syscalls. - pub const FDCWD = @bitCast(fd_t, @as(u32, 0xffd19553)); + pub const FDCWD = @as(fd_t, @bitCast(@as(u32, 0xffd19553))); /// Do not follow symbolic links pub const SYMLINK_NOFOLLOW = 0x1000; @@ -1907,9 +1907,9 @@ const IoCtlCommand = enum(u32) { }; fn ioImpl(cmd: IoCtlCommand, io_type: u8, nr: u8, comptime IOT: type) i32 { - const size = @intCast(u32, @truncate(u8, @sizeOf(IOT))) << 16; - const t = @intCast(u32, io_type) << 8; - return @bitCast(i32, @intFromEnum(cmd) | size | t | nr); + const size = @as(u32, @intCast(@as(u8, @truncate(@sizeOf(IOT))))) << 16; + const t = @as(u32, @intCast(io_type)) << 8; + return @as(i32, @bitCast(@intFromEnum(cmd) | size | t | nr)); } pub fn IO(io_type: u8, nr: u8) i32 { diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig index 636ef7f4d7..9f4d75084f 100644 --- a/lib/std/child_process.zig +++ b/lib/std/child_process.zig @@ -93,7 +93,7 @@ pub const ChildProcess = struct { switch (builtin.os.tag) { .linux => { if (rus.rusage) |ru| { - return @intCast(usize, ru.maxrss) * 1024; + return @as(usize, @intCast(ru.maxrss)) * 1024; } else { return null; } @@ -108,7 +108,7 @@ pub const ChildProcess = struct { .macos, .ios => { if (rus.rusage) |ru| { // Darwin oddly reports in bytes instead of kilobytes. - return @intCast(usize, ru.maxrss); + return @as(usize, @intCast(ru.maxrss)); } else { return null; } @@ -376,7 +376,7 @@ pub const ChildProcess = struct { if (windows.kernel32.GetExitCodeProcess(self.id, &exit_code) == 0) { break :x Term{ .Unknown = 0 }; } else { - break :x Term{ .Exited = @truncate(u8, exit_code) }; + break :x Term{ .Exited = @as(u8, @truncate(exit_code)) }; } }); @@ -449,7 +449,7 @@ pub const ChildProcess = struct { // has a value greater than 0 if ((fd[0].revents & std.os.POLL.IN) != 0) { const err_int = try readIntFd(err_pipe[0]); - return @errSetCast(SpawnError, @errorFromInt(err_int)); + return @as(SpawnError, @errSetCast(@errorFromInt(err_int))); } } else { // Write maxInt(ErrInt) to the write end of the err_pipe. This is after @@ -462,7 +462,7 @@ pub const ChildProcess = struct { // Here we potentially return the fork child's error from the parent // pid. if (err_int != maxInt(ErrInt)) { - return @errSetCast(SpawnError, @errorFromInt(err_int)); + return @as(SpawnError, @errSetCast(@errorFromInt(err_int))); } } } @@ -542,7 +542,7 @@ pub const ChildProcess = struct { } else if (builtin.output_mode == .Exe) { // Then we have Zig start code and this works. // TODO type-safety for null-termination of `os.environ`. - break :m @ptrCast([*:null]const ?[*:0]const u8, os.environ.ptr); + break :m @as([*:null]const ?[*:0]const u8, @ptrCast(os.environ.ptr)); } else { // TODO come up with a solution for this. @compileError("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process"); @@ -605,7 +605,7 @@ pub const ChildProcess = struct { } // we are the parent - const pid = @intCast(i32, pid_result); + const pid = @as(i32, @intCast(pid_result)); if (self.stdin_behavior == StdIo.Pipe) { self.stdin = File{ .handle = stdin_pipe[1] }; } else { @@ -1015,11 +1015,11 @@ fn windowsCreateProcessPathExt( else => return windows.unexpectedStatus(rc), } - const dir_info = @ptrCast(*windows.FILE_DIRECTORY_INFORMATION, &file_information_buf); + const dir_info = @as(*windows.FILE_DIRECTORY_INFORMATION, @ptrCast(&file_information_buf)); if (dir_info.FileAttributes & windows.FILE_ATTRIBUTE_DIRECTORY != 0) { break :found_name null; } - break :found_name @ptrCast([*]u16, &dir_info.FileName)[0 .. dir_info.FileNameLength / 2]; + break :found_name @as([*]u16, @ptrCast(&dir_info.FileName))[0 .. dir_info.FileNameLength / 2]; }; const unappended_err = unappended: { @@ -1104,7 +1104,7 @@ fn windowsCreateProcessPathExt( else => return windows.unexpectedStatus(rc), } - const dir_info = @ptrCast(*windows.FILE_DIRECTORY_INFORMATION, &file_information_buf); + const dir_info = @as(*windows.FILE_DIRECTORY_INFORMATION, @ptrCast(&file_information_buf)); // Skip directories if (dir_info.FileAttributes & windows.FILE_ATTRIBUTE_DIRECTORY != 0) continue; @@ -1164,7 +1164,7 @@ fn windowsCreateProcess(app_name: [*:0]u16, cmd_line: [*:0]u16, envp_ptr: ?[*]u1 null, windows.TRUE, windows.CREATE_UNICODE_ENVIRONMENT, - @ptrCast(?*anyopaque, envp_ptr), + @as(?*anyopaque, @ptrCast(envp_ptr)), cwd_ptr, lpStartupInfo, lpProcessInformation, @@ -1376,7 +1376,7 @@ fn writeIntFd(fd: i32, value: ErrInt) !void { .capable_io_mode = .blocking, .intended_io_mode = .blocking, }; - file.writer().writeIntNative(u64, @intCast(u64, value)) catch return error.SystemResources; + file.writer().writeIntNative(u64, @as(u64, @intCast(value))) catch return error.SystemResources; } fn readIntFd(fd: i32) !ErrInt { @@ -1385,7 +1385,7 @@ fn readIntFd(fd: i32) !ErrInt { .capable_io_mode = .blocking, .intended_io_mode = .blocking, }; - return @intCast(ErrInt, file.reader().readIntNative(u64) catch return error.SystemResources); + return @as(ErrInt, @intCast(file.reader().readIntNative(u64) catch return error.SystemResources)); } /// Caller must free result. diff --git a/lib/std/coff.zig b/lib/std/coff.zig index d28e54b94c..a08c2c514d 100644 --- a/lib/std/coff.zig +++ b/lib/std/coff.zig @@ -457,12 +457,12 @@ pub const ImportLookupEntry32 = struct { pub fn getImportByName(raw: u32) ?ByName { if (mask & raw != 0) return null; - return @bitCast(ByName, raw); + return @as(ByName, @bitCast(raw)); } pub fn getImportByOrdinal(raw: u32) ?ByOrdinal { if (mask & raw == 0) return null; - return @bitCast(ByOrdinal, raw); + return @as(ByOrdinal, @bitCast(raw)); } }; @@ -483,12 +483,12 @@ pub const ImportLookupEntry64 = struct { pub fn getImportByName(raw: u64) ?ByName { if (mask & raw != 0) return null; - return @bitCast(ByName, raw); + return @as(ByName, @bitCast(raw)); } pub fn getImportByOrdinal(raw: u64) ?ByOrdinal { if (mask & raw == 0) return null; - return @bitCast(ByOrdinal, raw); + return @as(ByOrdinal, @bitCast(raw)); } }; @@ -1146,25 +1146,25 @@ pub const Coff = struct { } pub fn getCoffHeader(self: Coff) CoffHeader { - return @ptrCast(*align(1) const CoffHeader, self.data[self.coff_header_offset..][0..@sizeOf(CoffHeader)]).*; + return @as(*align(1) const CoffHeader, @ptrCast(self.data[self.coff_header_offset..][0..@sizeOf(CoffHeader)])).*; } pub fn getOptionalHeader(self: Coff) OptionalHeader { assert(self.is_image); const offset = self.coff_header_offset + @sizeOf(CoffHeader); - return @ptrCast(*align(1) const OptionalHeader, self.data[offset..][0..@sizeOf(OptionalHeader)]).*; + return @as(*align(1) const OptionalHeader, @ptrCast(self.data[offset..][0..@sizeOf(OptionalHeader)])).*; } pub fn getOptionalHeader32(self: Coff) OptionalHeaderPE32 { assert(self.is_image); const offset = self.coff_header_offset + @sizeOf(CoffHeader); - return @ptrCast(*align(1) const OptionalHeaderPE32, self.data[offset..][0..@sizeOf(OptionalHeaderPE32)]).*; + return @as(*align(1) const OptionalHeaderPE32, @ptrCast(self.data[offset..][0..@sizeOf(OptionalHeaderPE32)])).*; } pub fn getOptionalHeader64(self: Coff) OptionalHeaderPE64 { assert(self.is_image); const offset = self.coff_header_offset + @sizeOf(CoffHeader); - return @ptrCast(*align(1) const OptionalHeaderPE64, self.data[offset..][0..@sizeOf(OptionalHeaderPE64)]).*; + return @as(*align(1) const OptionalHeaderPE64, @ptrCast(self.data[offset..][0..@sizeOf(OptionalHeaderPE64)])).*; } pub fn getImageBase(self: Coff) u64 { @@ -1193,7 +1193,7 @@ pub const Coff = struct { else => unreachable, // We assume we have validated the header already }; const offset = self.coff_header_offset + @sizeOf(CoffHeader) + size; - return @ptrCast([*]align(1) const ImageDataDirectory, self.data[offset..])[0..self.getNumberOfDataDirectories()]; + return @as([*]align(1) const ImageDataDirectory, @ptrCast(self.data[offset..]))[0..self.getNumberOfDataDirectories()]; } pub fn getSymtab(self: *const Coff) ?Symtab { @@ -1217,7 +1217,7 @@ pub const Coff = struct { pub fn getSectionHeaders(self: *const Coff) []align(1) const SectionHeader { const coff_header = self.getCoffHeader(); const offset = self.coff_header_offset + @sizeOf(CoffHeader) + coff_header.size_of_optional_header; - return @ptrCast([*]align(1) const SectionHeader, self.data.ptr + offset)[0..coff_header.number_of_sections]; + return @as([*]align(1) const SectionHeader, @ptrCast(self.data.ptr + offset))[0..coff_header.number_of_sections]; } pub fn getSectionHeadersAlloc(self: *const Coff, allocator: mem.Allocator) ![]SectionHeader { @@ -1303,9 +1303,9 @@ pub const Symtab = struct { return .{ .name = raw[0..8].*, .value = mem.readIntLittle(u32, raw[8..12]), - .section_number = @enumFromInt(SectionNumber, mem.readIntLittle(u16, raw[12..14])), - .type = @bitCast(SymType, mem.readIntLittle(u16, raw[14..16])), - .storage_class = @enumFromInt(StorageClass, raw[16]), + .section_number = @as(SectionNumber, @enumFromInt(mem.readIntLittle(u16, raw[12..14]))), + .type = @as(SymType, @bitCast(mem.readIntLittle(u16, raw[14..16]))), + .storage_class = @as(StorageClass, @enumFromInt(raw[16])), .number_of_aux_symbols = raw[17], }; } @@ -1333,7 +1333,7 @@ pub const Symtab = struct { fn asWeakExtDef(raw: []const u8) WeakExternalDefinition { return .{ .tag_index = mem.readIntLittle(u32, raw[0..4]), - .flag = @enumFromInt(WeakExternalFlag, mem.readIntLittle(u32, raw[4..8])), + .flag = @as(WeakExternalFlag, @enumFromInt(mem.readIntLittle(u32, raw[4..8]))), .unused = raw[8..18].*, }; } @@ -1351,7 +1351,7 @@ pub const Symtab = struct { .number_of_linenumbers = mem.readIntLittle(u16, raw[6..8]), .checksum = mem.readIntLittle(u32, raw[8..12]), .number = mem.readIntLittle(u16, raw[12..14]), - .selection = @enumFromInt(ComdatSelection, raw[14]), + .selection = @as(ComdatSelection, @enumFromInt(raw[14])), .unused = raw[15..18].*, }; } @@ -1384,6 +1384,6 @@ pub const Strtab = struct { pub fn get(self: Strtab, off: u32) []const u8 { assert(off < self.buffer.len); - return mem.sliceTo(@ptrCast([*:0]const u8, self.buffer.ptr + off), 0); + return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.buffer.ptr + off)), 0); } }; diff --git a/lib/std/compress/deflate/bits_utils.zig b/lib/std/compress/deflate/bits_utils.zig index 85bae95bc8..4b440dc44e 100644 --- a/lib/std/compress/deflate/bits_utils.zig +++ b/lib/std/compress/deflate/bits_utils.zig @@ -3,7 +3,7 @@ const math = @import("std").math; // Reverse bit-by-bit a N-bit code. pub fn bitReverse(comptime T: type, value: T, N: usize) T { const r = @bitReverse(value); - return r >> @intCast(math.Log2Int(T), @typeInfo(T).Int.bits - N); + return r >> @as(math.Log2Int(T), @intCast(@typeInfo(T).Int.bits - N)); } test "bitReverse" { diff --git a/lib/std/compress/deflate/compressor.zig b/lib/std/compress/deflate/compressor.zig index e2cbafe520..72de63f162 100644 --- a/lib/std/compress/deflate/compressor.zig +++ b/lib/std/compress/deflate/compressor.zig @@ -160,7 +160,7 @@ fn matchLen(a: []u8, b: []u8, max: u32) u32 { var bounded_b = b[0..max]; for (bounded_a, 0..) |av, i| { if (bounded_b[i] != av) { - return @intCast(u32, i); + return @as(u32, @intCast(i)); } } return max; @@ -313,14 +313,14 @@ pub fn Compressor(comptime WriterType: anytype) type { // the entire table onto the stack (https://golang.org/issue/18625). for (self.hash_prev, 0..) |v, i| { if (v > delta) { - self.hash_prev[i] = @intCast(u32, v - delta); + self.hash_prev[i] = @as(u32, @intCast(v - delta)); } else { self.hash_prev[i] = 0; } } for (self.hash_head, 0..) |v, i| { if (v > delta) { - self.hash_head[i] = @intCast(u32, v - delta); + self.hash_head[i] = @as(u32, @intCast(v - delta)); } else { self.hash_head[i] = 0; } @@ -329,7 +329,7 @@ pub fn Compressor(comptime WriterType: anytype) type { } const n = std.compress.deflate.copy(self.window[self.window_end..], b); self.window_end += n; - return @intCast(u32, n); + return @as(u32, @intCast(n)); } fn writeBlock(self: *Self, tokens: []token.Token, index: usize) !void { @@ -398,13 +398,13 @@ pub fn Compressor(comptime WriterType: anytype) type { // Our chain should point to the previous value. self.hash_prev[di & window_mask] = hh.*; // Set the head of the hash chain to us. - hh.* = @intCast(u32, di + self.hash_offset); + hh.* = @as(u32, @intCast(di + self.hash_offset)); } self.hash = new_h; } // Update window information. self.window_end = n; - self.index = @intCast(u32, n); + self.index = @as(u32, @intCast(n)); } const Match = struct { @@ -471,11 +471,11 @@ pub fn Compressor(comptime WriterType: anytype) type { break; } - if (@intCast(u32, self.hash_prev[i & window_mask]) < self.hash_offset) { + if (@as(u32, @intCast(self.hash_prev[i & window_mask])) < self.hash_offset) { break; } - i = @intCast(u32, self.hash_prev[i & window_mask]) - self.hash_offset; + i = @as(u32, @intCast(self.hash_prev[i & window_mask])) - self.hash_offset; if (i < min_index) { break; } @@ -576,7 +576,7 @@ pub fn Compressor(comptime WriterType: anytype) type { // Flush current output block if any. if (self.byte_available) { // There is still one pending token that needs to be flushed - self.tokens[self.tokens_count] = token.literalToken(@intCast(u32, self.window[self.index - 1])); + self.tokens[self.tokens_count] = token.literalToken(@as(u32, @intCast(self.window[self.index - 1]))); self.tokens_count += 1; self.byte_available = false; } @@ -591,9 +591,9 @@ pub fn Compressor(comptime WriterType: anytype) type { // Update the hash self.hash = hash4(self.window[self.index .. self.index + min_match_length]); var hh = &self.hash_head[self.hash & hash_mask]; - self.chain_head = @intCast(u32, hh.*); - self.hash_prev[self.index & window_mask] = @intCast(u32, self.chain_head); - hh.* = @intCast(u32, self.index + self.hash_offset); + self.chain_head = @as(u32, @intCast(hh.*)); + self.hash_prev[self.index & window_mask] = @as(u32, @intCast(self.chain_head)); + hh.* = @as(u32, @intCast(self.index + self.hash_offset)); } var prev_length = self.length; var prev_offset = self.offset; @@ -614,7 +614,7 @@ pub fn Compressor(comptime WriterType: anytype) type { self.index, self.chain_head -| self.hash_offset, min_match_length - 1, - @intCast(u32, lookahead), + @as(u32, @intCast(lookahead)), ); if (fmatch.ok) { self.length = fmatch.length; @@ -631,12 +631,12 @@ pub fn Compressor(comptime WriterType: anytype) type { // There was a match at the previous step, and the current match is // not better. Output the previous match. if (self.compression_level.fast_skip_hashshing != skip_never) { - self.tokens[self.tokens_count] = token.matchToken(@intCast(u32, self.length - base_match_length), @intCast(u32, self.offset - base_match_offset)); + self.tokens[self.tokens_count] = token.matchToken(@as(u32, @intCast(self.length - base_match_length)), @as(u32, @intCast(self.offset - base_match_offset))); self.tokens_count += 1; } else { self.tokens[self.tokens_count] = token.matchToken( - @intCast(u32, prev_length - base_match_length), - @intCast(u32, prev_offset -| base_match_offset), + @as(u32, @intCast(prev_length - base_match_length)), + @as(u32, @intCast(prev_offset -| base_match_offset)), ); self.tokens_count += 1; } @@ -661,7 +661,7 @@ pub fn Compressor(comptime WriterType: anytype) type { var hh = &self.hash_head[self.hash & hash_mask]; self.hash_prev[index & window_mask] = hh.*; // Set the head of the hash chain to us. - hh.* = @intCast(u32, index + self.hash_offset); + hh.* = @as(u32, @intCast(index + self.hash_offset)); } } self.index = index; @@ -689,7 +689,7 @@ pub fn Compressor(comptime WriterType: anytype) type { if (self.compression_level.fast_skip_hashshing != skip_never) { i = self.index; } - self.tokens[self.tokens_count] = token.literalToken(@intCast(u32, self.window[i])); + self.tokens[self.tokens_count] = token.literalToken(@as(u32, @intCast(self.window[i]))); self.tokens_count += 1; if (self.tokens_count == max_flate_block_tokens) { try self.writeBlock(self.tokens[0..self.tokens_count], i + 1); @@ -707,7 +707,7 @@ pub fn Compressor(comptime WriterType: anytype) type { fn fillStore(self: *Self, b: []const u8) u32 { const n = std.compress.deflate.copy(self.window[self.window_end..], b); self.window_end += n; - return @intCast(u32, n); + return @as(u32, @intCast(n)); } fn store(self: *Self) !void { diff --git a/lib/std/compress/deflate/compressor_test.zig b/lib/std/compress/deflate/compressor_test.zig index 858da8d8b5..5012bb3c07 100644 --- a/lib/std/compress/deflate/compressor_test.zig +++ b/lib/std/compress/deflate/compressor_test.zig @@ -172,7 +172,7 @@ test "deflate/inflate" { defer testing.allocator.free(large_data_chunk); // fill with random data for (large_data_chunk, 0..) |_, i| { - large_data_chunk[i] = @truncate(u8, i) *% @truncate(u8, i); + large_data_chunk[i] = @as(u8, @truncate(i)) *% @as(u8, @truncate(i)); } try testToFromWithLimit(large_data_chunk, limits); } diff --git a/lib/std/compress/deflate/decompressor.zig b/lib/std/compress/deflate/decompressor.zig index 40bde67326..3f6ee151ba 100644 --- a/lib/std/compress/deflate/decompressor.zig +++ b/lib/std/compress/deflate/decompressor.zig @@ -130,30 +130,30 @@ const HuffmanDecoder = struct { // Exception: To be compatible with zlib, we also need to // accept degenerate single-code codings. See also // TestDegenerateHuffmanCoding. - if (code != @as(u32, 1) << @intCast(u5, max) and !(code == 1 and max == 1)) { + if (code != @as(u32, 1) << @as(u5, @intCast(max)) and !(code == 1 and max == 1)) { return false; } self.min = min; if (max > huffman_chunk_bits) { - var num_links = @as(u32, 1) << @intCast(u5, max - huffman_chunk_bits); - self.link_mask = @intCast(u32, num_links - 1); + var num_links = @as(u32, 1) << @as(u5, @intCast(max - huffman_chunk_bits)); + self.link_mask = @as(u32, @intCast(num_links - 1)); // create link tables var link = next_code[huffman_chunk_bits + 1] >> 1; self.links = try self.allocator.alloc([]u16, huffman_num_chunks - link); self.sub_chunks = ArrayList(u32).init(self.allocator); self.initialized = true; - var j = @intCast(u32, link); + var j = @as(u32, @intCast(link)); while (j < huffman_num_chunks) : (j += 1) { - var reverse = @intCast(u32, bu.bitReverse(u16, @intCast(u16, j), 16)); - reverse >>= @intCast(u32, 16 - huffman_chunk_bits); - var off = j - @intCast(u32, link); + var reverse = @as(u32, @intCast(bu.bitReverse(u16, @as(u16, @intCast(j)), 16))); + reverse >>= @as(u32, @intCast(16 - huffman_chunk_bits)); + var off = j - @as(u32, @intCast(link)); if (sanity) { // check we are not overwriting an existing chunk assert(self.chunks[reverse] == 0); } - self.chunks[reverse] = @intCast(u16, off << huffman_value_shift | (huffman_chunk_bits + 1)); + self.chunks[reverse] = @as(u16, @intCast(off << huffman_value_shift | (huffman_chunk_bits + 1))); self.links[off] = try self.allocator.alloc(u16, num_links); if (sanity) { // initialize to a known invalid chunk code (0) to see if we overwrite @@ -170,12 +170,12 @@ const HuffmanDecoder = struct { } var ncode = next_code[n]; next_code[n] += 1; - var chunk = @intCast(u16, (li << huffman_value_shift) | n); - var reverse = @intCast(u16, bu.bitReverse(u16, @intCast(u16, ncode), 16)); - reverse >>= @intCast(u4, 16 - n); + var chunk = @as(u16, @intCast((li << huffman_value_shift) | n)); + var reverse = @as(u16, @intCast(bu.bitReverse(u16, @as(u16, @intCast(ncode)), 16))); + reverse >>= @as(u4, @intCast(16 - n)); if (n <= huffman_chunk_bits) { var off = reverse; - while (off < self.chunks.len) : (off += @as(u16, 1) << @intCast(u4, n)) { + while (off < self.chunks.len) : (off += @as(u16, 1) << @as(u4, @intCast(n))) { // We should never need to overwrite // an existing chunk. Also, 0 is // never a valid chunk, because the @@ -198,12 +198,12 @@ const HuffmanDecoder = struct { var link_tab = self.links[value]; reverse >>= huffman_chunk_bits; var off = reverse; - while (off < link_tab.len) : (off += @as(u16, 1) << @intCast(u4, n - huffman_chunk_bits)) { + while (off < link_tab.len) : (off += @as(u16, 1) << @as(u4, @intCast(n - huffman_chunk_bits))) { if (sanity) { // check we are not overwriting an existing chunk assert(link_tab[off] == 0); } - link_tab[off] = @intCast(u16, chunk); + link_tab[off] = @as(u16, @intCast(chunk)); } } } @@ -494,21 +494,21 @@ pub fn Decompressor(comptime ReaderType: type) type { while (self.nb < 5 + 5 + 4) { try self.moreBits(); } - var nlit = @intCast(u32, self.b & 0x1F) + 257; + var nlit = @as(u32, @intCast(self.b & 0x1F)) + 257; if (nlit > max_num_lit) { corrupt_input_error_offset = self.roffset; self.err = InflateError.CorruptInput; return InflateError.CorruptInput; } self.b >>= 5; - var ndist = @intCast(u32, self.b & 0x1F) + 1; + var ndist = @as(u32, @intCast(self.b & 0x1F)) + 1; if (ndist > max_num_dist) { corrupt_input_error_offset = self.roffset; self.err = InflateError.CorruptInput; return InflateError.CorruptInput; } self.b >>= 5; - var nclen = @intCast(u32, self.b & 0xF) + 4; + var nclen = @as(u32, @intCast(self.b & 0xF)) + 4; // num_codes is 19, so nclen is always valid. self.b >>= 4; self.nb -= 5 + 5 + 4; @@ -519,7 +519,7 @@ pub fn Decompressor(comptime ReaderType: type) type { while (self.nb < 3) { try self.moreBits(); } - self.codebits[code_order[i]] = @intCast(u32, self.b & 0x7); + self.codebits[code_order[i]] = @as(u32, @intCast(self.b & 0x7)); self.b >>= 3; self.nb -= 3; } @@ -575,8 +575,8 @@ pub fn Decompressor(comptime ReaderType: type) type { while (self.nb < nb) { try self.moreBits(); } - rep += @intCast(u32, self.b & (@as(u32, 1) << @intCast(u5, nb)) - 1); - self.b >>= @intCast(u5, nb); + rep += @as(u32, @intCast(self.b & (@as(u32, 1) << @as(u5, @intCast(nb))) - 1)); + self.b >>= @as(u5, @intCast(nb)); self.nb -= nb; if (i + rep > n) { corrupt_input_error_offset = self.roffset; @@ -623,7 +623,7 @@ pub fn Decompressor(comptime ReaderType: type) type { var length: u32 = 0; switch (v) { 0...255 => { - self.dict.writeByte(@intCast(u8, v)); + self.dict.writeByte(@as(u8, @intCast(v))); if (self.dict.availWrite() == 0) { self.to_read = self.dict.readFlush(); self.step = huffmanBlock; @@ -676,8 +676,8 @@ pub fn Decompressor(comptime ReaderType: type) type { while (self.nb < n) { try self.moreBits(); } - length += @intCast(u32, self.b) & ((@as(u32, 1) << @intCast(u5, n)) - 1); - self.b >>= @intCast(u5, n); + length += @as(u32, @intCast(self.b)) & ((@as(u32, 1) << @as(u5, @intCast(n))) - 1); + self.b >>= @as(u5, @intCast(n)); self.nb -= n; } @@ -686,9 +686,9 @@ pub fn Decompressor(comptime ReaderType: type) type { while (self.nb < 5) { try self.moreBits(); } - dist = @intCast( + dist = @as( u32, - bu.bitReverse(u8, @intCast(u8, (self.b & 0x1F) << 3), 8), + @intCast(bu.bitReverse(u8, @as(u8, @intCast((self.b & 0x1F) << 3)), 8)), ); self.b >>= 5; self.nb -= 5; @@ -699,16 +699,16 @@ pub fn Decompressor(comptime ReaderType: type) type { switch (dist) { 0...3 => dist += 1, 4...max_num_dist - 1 => { // 4...29 - var nb = @intCast(u32, dist - 2) >> 1; + var nb = @as(u32, @intCast(dist - 2)) >> 1; // have 1 bit in bottom of dist, need nb more. - var extra = (dist & 1) << @intCast(u5, nb); + var extra = (dist & 1) << @as(u5, @intCast(nb)); while (self.nb < nb) { try self.moreBits(); } - extra |= @intCast(u32, self.b & (@as(u32, 1) << @intCast(u5, nb)) - 1); - self.b >>= @intCast(u5, nb); + extra |= @as(u32, @intCast(self.b & (@as(u32, 1) << @as(u5, @intCast(nb))) - 1)); + self.b >>= @as(u5, @intCast(nb)); self.nb -= nb; - dist = (@as(u32, 1) << @intCast(u5, nb + 1)) + 1 + extra; + dist = (@as(u32, 1) << @as(u5, @intCast(nb + 1))) + 1 + extra; }, else => { corrupt_input_error_offset = self.roffset; @@ -762,10 +762,10 @@ pub fn Decompressor(comptime ReaderType: type) type { self.err = InflateError.UnexpectedEndOfStream; return InflateError.UnexpectedEndOfStream; }; - self.roffset += @intCast(u64, nr); - var n = @intCast(u32, self.buf[0]) | @intCast(u32, self.buf[1]) << 8; - var nn = @intCast(u32, self.buf[2]) | @intCast(u32, self.buf[3]) << 8; - if (@intCast(u16, nn) != @truncate(u16, ~n)) { + self.roffset += @as(u64, @intCast(nr)); + var n = @as(u32, @intCast(self.buf[0])) | @as(u32, @intCast(self.buf[1])) << 8; + var nn = @as(u32, @intCast(self.buf[2])) | @as(u32, @intCast(self.buf[3])) << 8; + if (@as(u16, @intCast(nn)) != @as(u16, @truncate(~n))) { corrupt_input_error_offset = self.roffset; self.err = InflateError.CorruptInput; return InflateError.CorruptInput; @@ -793,9 +793,9 @@ pub fn Decompressor(comptime ReaderType: type) type { if (cnt < buf.len) { self.err = InflateError.UnexpectedEndOfStream; } - self.roffset += @intCast(u64, cnt); - self.copy_len -= @intCast(u32, cnt); - self.dict.writeMark(@intCast(u32, cnt)); + self.roffset += @as(u64, @intCast(cnt)); + self.copy_len -= @as(u32, @intCast(cnt)); + self.dict.writeMark(@as(u32, @intCast(cnt))); if (self.err != null) { return InflateError.UnexpectedEndOfStream; } @@ -826,7 +826,7 @@ pub fn Decompressor(comptime ReaderType: type) type { return InflateError.BadReaderState; }; self.roffset += 1; - self.b |= @as(u32, c) << @intCast(u5, self.nb); + self.b |= @as(u32, c) << @as(u5, @intCast(self.nb)); self.nb += 8; return; } @@ -854,14 +854,14 @@ pub fn Decompressor(comptime ReaderType: type) type { return InflateError.BadReaderState; }; self.roffset += 1; - b |= @intCast(u32, c) << @intCast(u5, nb & 31); + b |= @as(u32, @intCast(c)) << @as(u5, @intCast(nb & 31)); nb += 8; } var chunk = h.chunks[b & (huffman_num_chunks - 1)]; - n = @intCast(u32, chunk & huffman_count_mask); + n = @as(u32, @intCast(chunk & huffman_count_mask)); if (n > huffman_chunk_bits) { chunk = h.links[chunk >> huffman_value_shift][(b >> huffman_chunk_bits) & h.link_mask]; - n = @intCast(u32, chunk & huffman_count_mask); + n = @as(u32, @intCast(chunk & huffman_count_mask)); } if (n <= nb) { if (n == 0) { @@ -871,9 +871,9 @@ pub fn Decompressor(comptime ReaderType: type) type { self.err = InflateError.CorruptInput; return InflateError.CorruptInput; } - self.b = b >> @intCast(u5, n & 31); + self.b = b >> @as(u5, @intCast(n & 31)); self.nb = nb - n; - return @intCast(u32, chunk >> huffman_value_shift); + return @as(u32, @intCast(chunk >> huffman_value_shift)); } } } diff --git a/lib/std/compress/deflate/deflate_fast.zig b/lib/std/compress/deflate/deflate_fast.zig index c86d181cb5..a11548fa1f 100644 --- a/lib/std/compress/deflate/deflate_fast.zig +++ b/lib/std/compress/deflate/deflate_fast.zig @@ -30,23 +30,23 @@ const table_size = 1 << table_bits; // Size of the table. const buffer_reset = math.maxInt(i32) - max_store_block_size * 2; fn load32(b: []u8, i: i32) u32 { - var s = b[@intCast(usize, i) .. @intCast(usize, i) + 4]; - return @intCast(u32, s[0]) | - @intCast(u32, s[1]) << 8 | - @intCast(u32, s[2]) << 16 | - @intCast(u32, s[3]) << 24; + var s = b[@as(usize, @intCast(i)) .. @as(usize, @intCast(i)) + 4]; + return @as(u32, @intCast(s[0])) | + @as(u32, @intCast(s[1])) << 8 | + @as(u32, @intCast(s[2])) << 16 | + @as(u32, @intCast(s[3])) << 24; } fn load64(b: []u8, i: i32) u64 { - var s = b[@intCast(usize, i)..@intCast(usize, i + 8)]; - return @intCast(u64, s[0]) | - @intCast(u64, s[1]) << 8 | - @intCast(u64, s[2]) << 16 | - @intCast(u64, s[3]) << 24 | - @intCast(u64, s[4]) << 32 | - @intCast(u64, s[5]) << 40 | - @intCast(u64, s[6]) << 48 | - @intCast(u64, s[7]) << 56; + var s = b[@as(usize, @intCast(i))..@as(usize, @intCast(i + 8))]; + return @as(u64, @intCast(s[0])) | + @as(u64, @intCast(s[1])) << 8 | + @as(u64, @intCast(s[2])) << 16 | + @as(u64, @intCast(s[3])) << 24 | + @as(u64, @intCast(s[4])) << 32 | + @as(u64, @intCast(s[5])) << 40 | + @as(u64, @intCast(s[6])) << 48 | + @as(u64, @intCast(s[7])) << 56; } fn hash(u: u32) u32 { @@ -117,7 +117,7 @@ pub const DeflateFast = struct { // s_limit is when to stop looking for offset/length copies. The input_margin // lets us use a fast path for emitLiteral in the main loop, while we are // looking for copies. - var s_limit = @intCast(i32, src.len - input_margin); + var s_limit = @as(i32, @intCast(src.len - input_margin)); // next_emit is where in src the next emitLiteral should start from. var next_emit: i32 = 0; @@ -170,7 +170,7 @@ pub const DeflateFast = struct { // A 4-byte match has been found. We'll later see if more than 4 bytes // match. But, prior to the match, src[next_emit..s] are unmatched. Emit // them as literal bytes. - emitLiteral(dst, tokens_count, src[@intCast(usize, next_emit)..@intCast(usize, s)]); + emitLiteral(dst, tokens_count, src[@as(usize, @intCast(next_emit))..@as(usize, @intCast(s))]); // Call emitCopy, and then see if another emitCopy could be our next // move. Repeat until we find no match for the input immediately after @@ -192,8 +192,8 @@ pub const DeflateFast = struct { // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) dst[tokens_count.*] = token.matchToken( - @intCast(u32, l + 4 - base_match_length), - @intCast(u32, s - t - base_match_offset), + @as(u32, @intCast(l + 4 - base_match_length)), + @as(u32, @intCast(s - t - base_match_offset)), ); tokens_count.* += 1; s += l; @@ -209,22 +209,22 @@ pub const DeflateFast = struct { // are faster as one load64 call (with some shifts) instead of // three load32 calls. var x = load64(src, s - 1); - var prev_hash = hash(@truncate(u32, x)); + var prev_hash = hash(@as(u32, @truncate(x))); self.table[prev_hash & table_mask] = TableEntry{ .offset = self.cur + s - 1, - .val = @truncate(u32, x), + .val = @as(u32, @truncate(x)), }; x >>= 8; - var curr_hash = hash(@truncate(u32, x)); + var curr_hash = hash(@as(u32, @truncate(x))); candidate = self.table[curr_hash & table_mask]; self.table[curr_hash & table_mask] = TableEntry{ .offset = self.cur + s, - .val = @truncate(u32, x), + .val = @as(u32, @truncate(x)), }; var offset = s - (candidate.offset - self.cur); - if (offset > max_match_offset or @truncate(u32, x) != candidate.val) { - cv = @truncate(u32, x >> 8); + if (offset > max_match_offset or @as(u32, @truncate(x)) != candidate.val) { + cv = @as(u32, @truncate(x >> 8)); next_hash = hash(cv); s += 1; break; @@ -232,18 +232,18 @@ pub const DeflateFast = struct { } } - if (@intCast(u32, next_emit) < src.len) { - emitLiteral(dst, tokens_count, src[@intCast(usize, next_emit)..]); + if (@as(u32, @intCast(next_emit)) < src.len) { + emitLiteral(dst, tokens_count, src[@as(usize, @intCast(next_emit))..]); } - self.cur += @intCast(i32, src.len); - self.prev_len = @intCast(u32, src.len); + self.cur += @as(i32, @intCast(src.len)); + self.prev_len = @as(u32, @intCast(src.len)); @memcpy(self.prev[0..self.prev_len], src); return; } fn emitLiteral(dst: []token.Token, tokens_count: *u16, lit: []u8) void { for (lit) |v| { - dst[tokens_count.*] = token.literalToken(@intCast(u32, v)); + dst[tokens_count.*] = token.literalToken(@as(u32, @intCast(v))); tokens_count.* += 1; } return; @@ -253,60 +253,60 @@ pub const DeflateFast = struct { // t can be negative to indicate the match is starting in self.prev. // We assume that src[s-4 .. s] and src[t-4 .. t] already match. fn matchLen(self: *Self, s: i32, t: i32, src: []u8) i32 { - var s1 = @intCast(u32, s) + max_match_length - 4; + var s1 = @as(u32, @intCast(s)) + max_match_length - 4; if (s1 > src.len) { - s1 = @intCast(u32, src.len); + s1 = @as(u32, @intCast(src.len)); } // If we are inside the current block if (t >= 0) { - var b = src[@intCast(usize, t)..]; - var a = src[@intCast(usize, s)..@intCast(usize, s1)]; + var b = src[@as(usize, @intCast(t))..]; + var a = src[@as(usize, @intCast(s))..@as(usize, @intCast(s1))]; b = b[0..a.len]; // Extend the match to be as long as possible. for (a, 0..) |_, i| { if (a[i] != b[i]) { - return @intCast(i32, i); + return @as(i32, @intCast(i)); } } - return @intCast(i32, a.len); + return @as(i32, @intCast(a.len)); } // We found a match in the previous block. - var tp = @intCast(i32, self.prev_len) + t; + var tp = @as(i32, @intCast(self.prev_len)) + t; if (tp < 0) { return 0; } // Extend the match to be as long as possible. - var a = src[@intCast(usize, s)..@intCast(usize, s1)]; - var b = self.prev[@intCast(usize, tp)..@intCast(usize, self.prev_len)]; + var a = src[@as(usize, @intCast(s))..@as(usize, @intCast(s1))]; + var b = self.prev[@as(usize, @intCast(tp))..@as(usize, @intCast(self.prev_len))]; if (b.len > a.len) { b = b[0..a.len]; } a = a[0..b.len]; for (b, 0..) |_, i| { if (a[i] != b[i]) { - return @intCast(i32, i); + return @as(i32, @intCast(i)); } } // If we reached our limit, we matched everything we are // allowed to in the previous block and we return. - var n = @intCast(i32, b.len); - if (@intCast(u32, s + n) == s1) { + var n = @as(i32, @intCast(b.len)); + if (@as(u32, @intCast(s + n)) == s1) { return n; } // Continue looking for more matches in the current block. - a = src[@intCast(usize, s + n)..@intCast(usize, s1)]; + a = src[@as(usize, @intCast(s + n))..@as(usize, @intCast(s1))]; b = src[0..a.len]; for (a, 0..) |_, i| { if (a[i] != b[i]) { - return @intCast(i32, i) + n; + return @as(i32, @intCast(i)) + n; } } - return @intCast(i32, a.len) + n; + return @as(i32, @intCast(a.len)) + n; } // Reset resets the encoding history. @@ -574,7 +574,7 @@ test "best speed match 2/2" { var e = DeflateFast{ .prev = previous, - .prev_len = @intCast(u32, previous.len), + .prev_len = @as(u32, @intCast(previous.len)), .table = undefined, .allocator = undefined, .cur = 0, @@ -617,7 +617,7 @@ test "best speed shift offsets" { try expect(want_first_tokens > want_second_tokens); // Forward the current indicator to before wraparound. - enc.cur = buffer_reset - @intCast(i32, test_data.len); + enc.cur = buffer_reset - @as(i32, @intCast(test_data.len)); // Part 1 before wrap, should match clean state. tokens_count = 0; diff --git a/lib/std/compress/deflate/deflate_fast_test.zig b/lib/std/compress/deflate/deflate_fast_test.zig index 1c771d925a..08f6079aa5 100644 --- a/lib/std/compress/deflate/deflate_fast_test.zig +++ b/lib/std/compress/deflate/deflate_fast_test.zig @@ -19,7 +19,7 @@ test "best speed" { defer testing.allocator.free(abcabc); for (abcabc, 0..) |_, i| { - abcabc[i] = @intCast(u8, i % 128); + abcabc[i] = @as(u8, @intCast(i % 128)); } var tc_01 = [_]u32{ 65536, 0 }; @@ -119,16 +119,16 @@ test "best speed max match offset" { // zeros1 is between 0 and 30 zeros. // The difference between the two abc's will be offset, which // is max_match_offset plus or minus a small adjustment. - var src_len: usize = @intCast(usize, offset + @as(i32, abc.len) + @intCast(i32, extra)); + var src_len: usize = @as(usize, @intCast(offset + @as(i32, abc.len) + @as(i32, @intCast(extra)))); var src = try testing.allocator.alloc(u8, src_len); defer testing.allocator.free(src); @memcpy(src[0..abc.len], abc); if (!do_match_before) { - const src_offset: usize = @intCast(usize, offset - @as(i32, xyz.len)); + const src_offset: usize = @as(usize, @intCast(offset - @as(i32, xyz.len))); @memcpy(src[src_offset..][0..xyz.len], xyz); } - const src_offset: usize = @intCast(usize, offset); + const src_offset: usize = @as(usize, @intCast(offset)); @memcpy(src[src_offset..][0..abc.len], abc); var compressed = ArrayList(u8).init(testing.allocator); diff --git a/lib/std/compress/deflate/dict_decoder.zig b/lib/std/compress/deflate/dict_decoder.zig index d9f240e7b4..75fdd359dd 100644 --- a/lib/std/compress/deflate/dict_decoder.zig +++ b/lib/std/compress/deflate/dict_decoder.zig @@ -49,7 +49,7 @@ pub const DictDecoder = struct { if (dict != null) { const src = dict.?[dict.?.len -| self.hist.len..]; @memcpy(self.hist[0..src.len], src); - self.wr_pos = @intCast(u32, dict.?.len); + self.wr_pos = @as(u32, @intCast(dict.?.len)); } if (self.wr_pos == self.hist.len) { @@ -66,7 +66,7 @@ pub const DictDecoder = struct { // Reports the total amount of historical data in the dictionary. pub fn histSize(self: *Self) u32 { if (self.full) { - return @intCast(u32, self.hist.len); + return @as(u32, @intCast(self.hist.len)); } return self.wr_pos; } @@ -78,7 +78,7 @@ pub const DictDecoder = struct { // Reports the available amount of output buffer space. pub fn availWrite(self: *Self) u32 { - return @intCast(u32, self.hist.len - self.wr_pos); + return @as(u32, @intCast(self.hist.len - self.wr_pos)); } // Returns a slice of the available buffer to write data to. @@ -110,10 +110,10 @@ pub const DictDecoder = struct { fn copy(dst: []u8, src: []const u8) u32 { if (src.len > dst.len) { mem.copyForwards(u8, dst, src[0..dst.len]); - return @intCast(u32, dst.len); + return @as(u32, @intCast(dst.len)); } mem.copyForwards(u8, dst[0..src.len], src); - return @intCast(u32, src.len); + return @as(u32, @intCast(src.len)); } // Copies a string at a given (dist, length) to the output. @@ -125,10 +125,10 @@ pub const DictDecoder = struct { assert(0 < dist and dist <= self.histSize()); var dst_base = self.wr_pos; var dst_pos = dst_base; - var src_pos: i32 = @intCast(i32, dst_pos) - @intCast(i32, dist); + var src_pos: i32 = @as(i32, @intCast(dst_pos)) - @as(i32, @intCast(dist)); var end_pos = dst_pos + length; if (end_pos > self.hist.len) { - end_pos = @intCast(u32, self.hist.len); + end_pos = @as(u32, @intCast(self.hist.len)); } // Copy non-overlapping section after destination position. @@ -139,8 +139,8 @@ pub const DictDecoder = struct { // Thus, a backwards copy is performed here; that is, the exact bytes in // the source prior to the copy is placed in the destination. if (src_pos < 0) { - src_pos += @intCast(i32, self.hist.len); - dst_pos += copy(self.hist[dst_pos..end_pos], self.hist[@intCast(usize, src_pos)..]); + src_pos += @as(i32, @intCast(self.hist.len)); + dst_pos += copy(self.hist[dst_pos..end_pos], self.hist[@as(usize, @intCast(src_pos))..]); src_pos = 0; } @@ -160,7 +160,7 @@ pub const DictDecoder = struct { // dst_pos = end_pos; // while (dst_pos < end_pos) { - dst_pos += copy(self.hist[dst_pos..end_pos], self.hist[@intCast(usize, src_pos)..dst_pos]); + dst_pos += copy(self.hist[dst_pos..end_pos], self.hist[@as(usize, @intCast(src_pos))..dst_pos]); } self.wr_pos = dst_pos; diff --git a/lib/std/compress/deflate/huffman_bit_writer.zig b/lib/std/compress/deflate/huffman_bit_writer.zig index a852287b53..5204435106 100644 --- a/lib/std/compress/deflate/huffman_bit_writer.zig +++ b/lib/std/compress/deflate/huffman_bit_writer.zig @@ -107,7 +107,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { } var n = self.nbytes; while (self.nbits != 0) { - self.bytes[n] = @truncate(u8, self.bits); + self.bytes[n] = @as(u8, @truncate(self.bits)); self.bits >>= 8; if (self.nbits > 8) { // Avoid underflow self.nbits -= 8; @@ -132,7 +132,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { if (self.err) { return; } - self.bits |= @intCast(u64, b) << @intCast(u6, self.nbits); + self.bits |= @as(u64, @intCast(b)) << @as(u6, @intCast(self.nbits)); self.nbits += nb; if (self.nbits >= 48) { var bits = self.bits; @@ -140,12 +140,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { self.nbits -= 48; var n = self.nbytes; var bytes = self.bytes[n..][0..6]; - bytes[0] = @truncate(u8, bits); - bytes[1] = @truncate(u8, bits >> 8); - bytes[2] = @truncate(u8, bits >> 16); - bytes[3] = @truncate(u8, bits >> 24); - bytes[4] = @truncate(u8, bits >> 32); - bytes[5] = @truncate(u8, bits >> 40); + bytes[0] = @as(u8, @truncate(bits)); + bytes[1] = @as(u8, @truncate(bits >> 8)); + bytes[2] = @as(u8, @truncate(bits >> 16)); + bytes[3] = @as(u8, @truncate(bits >> 24)); + bytes[4] = @as(u8, @truncate(bits >> 32)); + bytes[5] = @as(u8, @truncate(bits >> 40)); n += 6; if (n >= buffer_flush_size) { try self.write(self.bytes[0..n]); @@ -165,7 +165,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { return; } while (self.nbits != 0) { - self.bytes[n] = @truncate(u8, self.bits); + self.bytes[n] = @as(u8, @truncate(self.bits)); self.bits >>= 8; self.nbits -= 8; n += 1; @@ -209,12 +209,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { // Copy the concatenated code sizes to codegen. Put a marker at the end. var cgnl = codegen[0..num_literals]; for (cgnl, 0..) |_, i| { - cgnl[i] = @intCast(u8, lit_enc.codes[i].len); + cgnl[i] = @as(u8, @intCast(lit_enc.codes[i].len)); } cgnl = codegen[num_literals .. num_literals + num_offsets]; for (cgnl, 0..) |_, i| { - cgnl[i] = @intCast(u8, off_enc.codes[i].len); + cgnl[i] = @as(u8, @intCast(off_enc.codes[i].len)); } codegen[num_literals + num_offsets] = bad_code; @@ -243,7 +243,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { } codegen[out_index] = 16; out_index += 1; - codegen[out_index] = @intCast(u8, n - 3); + codegen[out_index] = @as(u8, @intCast(n - 3)); out_index += 1; self.codegen_freq[16] += 1; count -= n; @@ -256,7 +256,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { } codegen[out_index] = 18; out_index += 1; - codegen[out_index] = @intCast(u8, n - 11); + codegen[out_index] = @as(u8, @intCast(n - 11)); out_index += 1; self.codegen_freq[18] += 1; count -= n; @@ -265,7 +265,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { // 3 <= count <= 10 codegen[out_index] = 17; out_index += 1; - codegen[out_index] = @intCast(u8, count - 3); + codegen[out_index] = @as(u8, @intCast(count - 3)); out_index += 1; self.codegen_freq[17] += 1; count = 0; @@ -307,8 +307,8 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { extra_bits; return DynamicSize{ - .size = @intCast(u32, size), - .num_codegens = @intCast(u32, num_codegens), + .size = @as(u32, @intCast(size)), + .num_codegens = @as(u32, @intCast(num_codegens)), }; } @@ -328,7 +328,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { return .{ .size = 0, .storable = false }; } if (in.?.len <= deflate_const.max_store_block_size) { - return .{ .size = @intCast(u32, (in.?.len + 5) * 8), .storable = true }; + return .{ .size = @as(u32, @intCast((in.?.len + 5) * 8)), .storable = true }; } return .{ .size = 0, .storable = false }; } @@ -337,20 +337,20 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { if (self.err) { return; } - self.bits |= @intCast(u64, c.code) << @intCast(u6, self.nbits); - self.nbits += @intCast(u32, c.len); + self.bits |= @as(u64, @intCast(c.code)) << @as(u6, @intCast(self.nbits)); + self.nbits += @as(u32, @intCast(c.len)); if (self.nbits >= 48) { var bits = self.bits; self.bits >>= 48; self.nbits -= 48; var n = self.nbytes; var bytes = self.bytes[n..][0..6]; - bytes[0] = @truncate(u8, bits); - bytes[1] = @truncate(u8, bits >> 8); - bytes[2] = @truncate(u8, bits >> 16); - bytes[3] = @truncate(u8, bits >> 24); - bytes[4] = @truncate(u8, bits >> 32); - bytes[5] = @truncate(u8, bits >> 40); + bytes[0] = @as(u8, @truncate(bits)); + bytes[1] = @as(u8, @truncate(bits >> 8)); + bytes[2] = @as(u8, @truncate(bits >> 16)); + bytes[3] = @as(u8, @truncate(bits >> 24)); + bytes[4] = @as(u8, @truncate(bits >> 32)); + bytes[5] = @as(u8, @truncate(bits >> 40)); n += 6; if (n >= buffer_flush_size) { try self.write(self.bytes[0..n]); @@ -381,36 +381,36 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { first_bits = 5; } try self.writeBits(first_bits, 3); - try self.writeBits(@intCast(u32, num_literals - 257), 5); - try self.writeBits(@intCast(u32, num_offsets - 1), 5); - try self.writeBits(@intCast(u32, num_codegens - 4), 4); + try self.writeBits(@as(u32, @intCast(num_literals - 257)), 5); + try self.writeBits(@as(u32, @intCast(num_offsets - 1)), 5); + try self.writeBits(@as(u32, @intCast(num_codegens - 4)), 4); var i: u32 = 0; while (i < num_codegens) : (i += 1) { - var value = @intCast(u32, self.codegen_encoding.codes[codegen_order[i]].len); - try self.writeBits(@intCast(u32, value), 3); + var value = @as(u32, @intCast(self.codegen_encoding.codes[codegen_order[i]].len)); + try self.writeBits(@as(u32, @intCast(value)), 3); } i = 0; while (true) { - var code_word: u32 = @intCast(u32, self.codegen[i]); + var code_word: u32 = @as(u32, @intCast(self.codegen[i])); i += 1; if (code_word == bad_code) { break; } - try self.writeCode(self.codegen_encoding.codes[@intCast(u32, code_word)]); + try self.writeCode(self.codegen_encoding.codes[@as(u32, @intCast(code_word))]); switch (code_word) { 16 => { - try self.writeBits(@intCast(u32, self.codegen[i]), 2); + try self.writeBits(@as(u32, @intCast(self.codegen[i])), 2); i += 1; }, 17 => { - try self.writeBits(@intCast(u32, self.codegen[i]), 3); + try self.writeBits(@as(u32, @intCast(self.codegen[i])), 3); i += 1; }, 18 => { - try self.writeBits(@intCast(u32, self.codegen[i]), 7); + try self.writeBits(@as(u32, @intCast(self.codegen[i])), 7); i += 1; }, else => {}, @@ -428,8 +428,8 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { } try self.writeBits(flag, 3); try self.flush(); - try self.writeBits(@intCast(u32, length), 16); - try self.writeBits(@intCast(u32, ~@intCast(u16, length)), 16); + try self.writeBits(@as(u32, @intCast(length)), 16); + try self.writeBits(@as(u32, @intCast(~@as(u16, @intCast(length)))), 16); } fn writeFixedHeader(self: *Self, is_eof: bool) Error!void { @@ -476,14 +476,14 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { var length_code: u32 = length_codes_start + 8; while (length_code < num_literals) : (length_code += 1) { // First eight length codes have extra size = 0. - extra_bits += @intCast(u32, self.literal_freq[length_code]) * - @intCast(u32, length_extra_bits[length_code - length_codes_start]); + extra_bits += @as(u32, @intCast(self.literal_freq[length_code])) * + @as(u32, @intCast(length_extra_bits[length_code - length_codes_start])); } var offset_code: u32 = 4; while (offset_code < num_offsets) : (offset_code += 1) { // First four offset codes have extra size = 0. - extra_bits += @intCast(u32, self.offset_freq[offset_code]) * - @intCast(u32, offset_extra_bits[offset_code]); + extra_bits += @as(u32, @intCast(self.offset_freq[offset_code])) * + @as(u32, @intCast(offset_extra_bits[offset_code])); } } @@ -621,12 +621,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { self.literal_freq[token.literal(deflate_const.end_block_marker)] += 1; // get the number of literals - num_literals = @intCast(u32, self.literal_freq.len); + num_literals = @as(u32, @intCast(self.literal_freq.len)); while (self.literal_freq[num_literals - 1] == 0) { num_literals -= 1; } // get the number of offsets - num_offsets = @intCast(u32, self.offset_freq.len); + num_offsets = @as(u32, @intCast(self.offset_freq.len)); while (num_offsets > 0 and self.offset_freq[num_offsets - 1] == 0) { num_offsets -= 1; } @@ -664,18 +664,18 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { var length = token.length(t); var length_code = token.lengthCode(length); try self.writeCode(le_codes[length_code + length_codes_start]); - var extra_length_bits = @intCast(u32, length_extra_bits[length_code]); + var extra_length_bits = @as(u32, @intCast(length_extra_bits[length_code])); if (extra_length_bits > 0) { - var extra_length = @intCast(u32, length - length_base[length_code]); + var extra_length = @as(u32, @intCast(length - length_base[length_code])); try self.writeBits(extra_length, extra_length_bits); } // Write the offset var offset = token.offset(t); var offset_code = token.offsetCode(offset); try self.writeCode(oe_codes[offset_code]); - var extra_offset_bits = @intCast(u32, offset_extra_bits[offset_code]); + var extra_offset_bits = @as(u32, @intCast(offset_extra_bits[offset_code])); if (extra_offset_bits > 0) { - var extra_offset = @intCast(u32, offset - offset_base[offset_code]); + var extra_offset = @as(u32, @intCast(offset - offset_base[offset_code])); try self.writeBits(extra_offset, extra_offset_bits); } } @@ -742,8 +742,8 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { for (input) |t| { // Bitwriting inlined, ~30% speedup var c = encoding[t]; - self.bits |= @intCast(u64, c.code) << @intCast(u6, self.nbits); - self.nbits += @intCast(u32, c.len); + self.bits |= @as(u64, @intCast(c.code)) << @as(u6, @intCast(self.nbits)); + self.nbits += @as(u32, @intCast(c.len)); if (self.nbits < 48) { continue; } @@ -752,12 +752,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { self.bits >>= 48; self.nbits -= 48; var bytes = self.bytes[n..][0..6]; - bytes[0] = @truncate(u8, bits); - bytes[1] = @truncate(u8, bits >> 8); - bytes[2] = @truncate(u8, bits >> 16); - bytes[3] = @truncate(u8, bits >> 24); - bytes[4] = @truncate(u8, bits >> 32); - bytes[5] = @truncate(u8, bits >> 40); + bytes[0] = @as(u8, @truncate(bits)); + bytes[1] = @as(u8, @truncate(bits >> 8)); + bytes[2] = @as(u8, @truncate(bits >> 16)); + bytes[3] = @as(u8, @truncate(bits >> 24)); + bytes[4] = @as(u8, @truncate(bits >> 32)); + bytes[5] = @as(u8, @truncate(bits >> 40)); n += 6; if (n < buffer_flush_size) { continue; diff --git a/lib/std/compress/deflate/huffman_code.zig b/lib/std/compress/deflate/huffman_code.zig index 689ac1441a..4fea45f863 100644 --- a/lib/std/compress/deflate/huffman_code.zig +++ b/lib/std/compress/deflate/huffman_code.zig @@ -73,7 +73,7 @@ pub const HuffmanEncoder = struct { // Set list to be the set of all non-zero literals and their frequencies for (freq, 0..) |f, i| { if (f != 0) { - list[count] = LiteralNode{ .literal = @intCast(u16, i), .freq = f }; + list[count] = LiteralNode{ .literal = @as(u16, @intCast(i)), .freq = f }; count += 1; } else { list[count] = LiteralNode{ .literal = 0x00, .freq = 0 }; @@ -88,7 +88,7 @@ pub const HuffmanEncoder = struct { // two or fewer literals, everything has bit length 1. for (list, 0..) |node, i| { // "list" is in order of increasing literal value. - self.codes[node.literal].set(@intCast(u16, i), 1); + self.codes[node.literal].set(@as(u16, @intCast(i)), 1); } return; } @@ -105,7 +105,7 @@ pub const HuffmanEncoder = struct { var total: u32 = 0; for (freq, 0..) |f, i| { if (f != 0) { - total += @intCast(u32, f) * @intCast(u32, self.codes[i].len); + total += @as(u32, @intCast(f)) * @as(u32, @intCast(self.codes[i].len)); } } return total; @@ -167,7 +167,7 @@ pub const HuffmanEncoder = struct { } // We need a total of 2*n - 2 items at top level and have already generated 2. - levels[max_bits].needed = 2 * @intCast(u32, n) - 4; + levels[max_bits].needed = 2 * @as(u32, @intCast(n)) - 4; { var level = max_bits; @@ -267,19 +267,19 @@ pub const HuffmanEncoder = struct { // are encoded using "bits" bits, and get the values // code, code + 1, .... The code values are // assigned in literal order (not frequency order). - var chunk = list[list.len - @intCast(u32, bits) ..]; + var chunk = list[list.len - @as(u32, @intCast(bits)) ..]; self.lns = chunk; mem.sort(LiteralNode, self.lns, {}, byLiteral); for (chunk) |node| { self.codes[node.literal] = HuffCode{ - .code = bu.bitReverse(u16, code, @intCast(u5, n)), - .len = @intCast(u16, n), + .code = bu.bitReverse(u16, code, @as(u5, @intCast(n))), + .len = @as(u16, @intCast(n)), }; code += 1; } - list = list[0 .. list.len - @intCast(u32, bits)]; + list = list[0 .. list.len - @as(u32, @intCast(bits))]; } } }; @@ -332,7 +332,7 @@ pub fn generateFixedLiteralEncoding(allocator: Allocator) !HuffmanEncoder { size = 8; }, } - codes[ch] = HuffCode{ .code = bu.bitReverse(u16, bits, @intCast(u5, size)), .len = size }; + codes[ch] = HuffCode{ .code = bu.bitReverse(u16, bits, @as(u5, @intCast(size))), .len = size }; } return h; } @@ -341,7 +341,7 @@ pub fn generateFixedOffsetEncoding(allocator: Allocator) !HuffmanEncoder { var h = try newHuffmanEncoder(allocator, 30); var codes = h.codes; for (codes, 0..) |_, ch| { - codes[ch] = HuffCode{ .code = bu.bitReverse(u16, @intCast(u16, ch), 5), .len = 5 }; + codes[ch] = HuffCode{ .code = bu.bitReverse(u16, @as(u16, @intCast(ch)), 5), .len = 5 }; } return h; } diff --git a/lib/std/compress/deflate/token.zig b/lib/std/compress/deflate/token.zig index d0e9a23647..744fcdeb12 100644 --- a/lib/std/compress/deflate/token.zig +++ b/lib/std/compress/deflate/token.zig @@ -70,16 +70,16 @@ pub fn matchToken(xlength: u32, xoffset: u32) Token { // Returns the literal of a literal token pub fn literal(t: Token) u32 { - return @intCast(u32, t - literal_type); + return @as(u32, @intCast(t - literal_type)); } // Returns the extra offset of a match token pub fn offset(t: Token) u32 { - return @intCast(u32, t) & offset_mask; + return @as(u32, @intCast(t)) & offset_mask; } pub fn length(t: Token) u32 { - return @intCast(u32, (t - match_type) >> length_shift); + return @as(u32, @intCast((t - match_type) >> length_shift)); } pub fn lengthCode(len: u32) u32 { @@ -88,10 +88,10 @@ pub fn lengthCode(len: u32) u32 { // Returns the offset code corresponding to a specific offset pub fn offsetCode(off: u32) u32 { - if (off < @intCast(u32, offset_codes.len)) { + if (off < @as(u32, @intCast(offset_codes.len))) { return offset_codes[off]; } - if (off >> 7 < @intCast(u32, offset_codes.len)) { + if (off >> 7 < @as(u32, @intCast(offset_codes.len))) { return offset_codes[off >> 7] + 14; } return offset_codes[off >> 14] + 28; diff --git a/lib/std/compress/gzip.zig b/lib/std/compress/gzip.zig index 7e9fea6814..f6fb038ae3 100644 --- a/lib/std/compress/gzip.zig +++ b/lib/std/compress/gzip.zig @@ -89,7 +89,7 @@ pub fn Decompress(comptime ReaderType: type) type { if (FLG & FHCRC != 0) { const hash = try source.readIntLittle(u16); - if (hash != @truncate(u16, hasher.hasher.final())) + if (hash != @as(u16, @truncate(hasher.hasher.final()))) return error.WrongChecksum; } diff --git a/lib/std/compress/lzma/decode.zig b/lib/std/compress/lzma/decode.zig index a6adb941a4..0dae9281e8 100644 --- a/lib/std/compress/lzma/decode.zig +++ b/lib/std/compress/lzma/decode.zig @@ -52,11 +52,11 @@ pub const Params = struct { return error.CorruptInput; } - const lc = @intCast(u4, props % 9); + const lc = @as(u4, @intCast(props % 9)); props /= 9; - const lp = @intCast(u3, props % 5); + const lp = @as(u3, @intCast(props % 5)); props /= 5; - const pb = @intCast(u3, props); + const pb = @as(u3, @intCast(props)); const dict_size_provided = try reader.readIntLittle(u32); const dict_size = @max(0x1000, dict_size_provided); @@ -342,7 +342,7 @@ pub const DecoderState = struct { result = (result << 1) ^ @intFromBool(try decoder.decodeBit(reader, &probs[result], update)); } - return @truncate(u8, result - 0x100); + return @as(u8, @truncate(result - 0x100)); } fn decodeDistance( @@ -358,7 +358,7 @@ pub const DecoderState = struct { if (pos_slot < 4) return pos_slot; - const num_direct_bits = @intCast(u5, (pos_slot >> 1) - 1); + const num_direct_bits = @as(u5, @intCast((pos_slot >> 1) - 1)); var result = (2 ^ (pos_slot & 1)) << num_direct_bits; if (pos_slot < 14) { diff --git a/lib/std/compress/lzma2/decode.zig b/lib/std/compress/lzma2/decode.zig index 7297a1a51b..a23007d42a 100644 --- a/lib/std/compress/lzma2/decode.zig +++ b/lib/std/compress/lzma2/decode.zig @@ -119,11 +119,11 @@ pub const Decoder = struct { return error.CorruptInput; } - const lc = @intCast(u4, props % 9); + const lc = @as(u4, @intCast(props % 9)); props /= 9; - const lp = @intCast(u3, props % 5); + const lp = @as(u3, @intCast(props % 5)); props /= 5; - const pb = @intCast(u3, props); + const pb = @as(u3, @intCast(props)); if (lc + lp > 4) { return error.CorruptInput; diff --git a/lib/std/compress/xz.zig b/lib/std/compress/xz.zig index 5debc81835..3ceec90a7a 100644 --- a/lib/std/compress/xz.zig +++ b/lib/std/compress/xz.zig @@ -18,7 +18,7 @@ fn readStreamFlags(reader: anytype, check: *Check) !void { if (reserved1 != 0) return error.CorruptInput; - check.* = @enumFromInt(Check, try bit_reader.readBitsNoEof(u4, 4)); + check.* = @as(Check, @enumFromInt(try bit_reader.readBitsNoEof(u4, 4))); const reserved2 = try bit_reader.readBitsNoEof(u4, 4); if (reserved2 != 0) diff --git a/lib/std/compress/xz/block.zig b/lib/std/compress/xz/block.zig index 2a034011c2..6f4fad1c7f 100644 --- a/lib/std/compress/xz/block.zig +++ b/lib/std/compress/xz/block.zig @@ -108,7 +108,7 @@ pub fn Decoder(comptime ReaderType: type) type { has_unpacked_size: bool, }; - const flags = @bitCast(Flags, try header_reader.readByte()); + const flags = @as(Flags, @bitCast(try header_reader.readByte())); const filter_count = @as(u3, flags.last_filter_index) + 1; if (filter_count > 1) return error.Unsupported; @@ -124,9 +124,9 @@ pub fn Decoder(comptime ReaderType: type) type { _, }; - const filter_id = @enumFromInt( + const filter_id = @as( FilterId, - try std.leb.readULEB128(u64, header_reader), + @enumFromInt(try std.leb.readULEB128(u64, header_reader)), ); if (@intFromEnum(filter_id) >= 0x4000_0000_0000_0000) diff --git a/lib/std/compress/zlib.zig b/lib/std/compress/zlib.zig index 98cabb4732..5580192537 100644 --- a/lib/std/compress/zlib.zig +++ b/lib/std/compress/zlib.zig @@ -41,7 +41,7 @@ pub fn DecompressStream(comptime ReaderType: type) type { // verify the header checksum if (header_u16 % 31 != 0) return error.BadHeader; - const header = @bitCast(ZLibHeader, header_u16); + const header = @as(ZLibHeader, @bitCast(header_u16)); // The CM field must be 8 to indicate the use of DEFLATE if (header.compression_method != ZLibHeader.DEFLATE) @@ -130,9 +130,9 @@ pub fn CompressStream(comptime WriterType: type) type { .preset_dict = 0, .checksum = 0, }; - header.checksum = @truncate(u5, 31 - @bitCast(u16, header) % 31); + header.checksum = @as(u5, @truncate(31 - @as(u16, @bitCast(header)) % 31)); - try dest.writeIntBig(u16, @bitCast(u16, header)); + try dest.writeIntBig(u16, @as(u16, @bitCast(header))); const compression_level: deflate.Compression = switch (options.level) { .no_compression => .no_compression, diff --git a/lib/std/compress/zstandard/decode/block.zig b/lib/std/compress/zstandard/decode/block.zig index 40f5903a24..bbf8492f04 100644 --- a/lib/std/compress/zstandard/decode/block.zig +++ b/lib/std/compress/zstandard/decode/block.zig @@ -894,7 +894,7 @@ pub fn decodeBlockReader( /// Decode the header of a block. pub fn decodeBlockHeader(src: *const [3]u8) frame.Zstandard.Block.Header { const last_block = src[0] & 1 == 1; - const block_type = @enumFromInt(frame.Zstandard.Block.Type, (src[0] & 0b110) >> 1); + const block_type = @as(frame.Zstandard.Block.Type, @enumFromInt((src[0] & 0b110) >> 1)); const block_size = ((src[0] & 0b11111000) >> 3) + (@as(u21, src[1]) << 5) + (@as(u21, src[2]) << 13); return .{ .last_block = last_block, @@ -1008,7 +1008,7 @@ pub fn decodeLiteralsSection( try huffman.decodeHuffmanTree(counting_reader.reader(), buffer) else null; - const huffman_tree_size = @intCast(usize, counting_reader.bytes_read); + const huffman_tree_size = @as(usize, @intCast(counting_reader.bytes_read)); const total_streams_size = std.math.sub(usize, header.compressed_size.?, huffman_tree_size) catch return error.MalformedLiteralsSection; @@ -1058,8 +1058,8 @@ fn decodeStreams(size_format: u2, stream_data: []const u8) !LiteralsSection.Stre /// - `error.EndOfStream` if there are not enough bytes in `source` pub fn decodeLiteralsHeader(source: anytype) !LiteralsSection.Header { const byte0 = try source.readByte(); - const block_type = @enumFromInt(LiteralsSection.BlockType, byte0 & 0b11); - const size_format = @intCast(u2, (byte0 & 0b1100) >> 2); + const block_type = @as(LiteralsSection.BlockType, @enumFromInt(byte0 & 0b11)); + const size_format = @as(u2, @intCast((byte0 & 0b1100) >> 2)); var regenerated_size: u20 = undefined; var compressed_size: ?u18 = null; switch (block_type) { @@ -1132,9 +1132,9 @@ pub fn decodeSequencesHeader( const compression_modes = try source.readByte(); - const matches_mode = @enumFromInt(SequencesSection.Header.Mode, (compression_modes & 0b00001100) >> 2); - const offsets_mode = @enumFromInt(SequencesSection.Header.Mode, (compression_modes & 0b00110000) >> 4); - const literal_mode = @enumFromInt(SequencesSection.Header.Mode, (compression_modes & 0b11000000) >> 6); + const matches_mode = @as(SequencesSection.Header.Mode, @enumFromInt((compression_modes & 0b00001100) >> 2)); + const offsets_mode = @as(SequencesSection.Header.Mode, @enumFromInt((compression_modes & 0b00110000) >> 4)); + const literal_mode = @as(SequencesSection.Header.Mode, @enumFromInt((compression_modes & 0b11000000) >> 6)); if (compression_modes & 0b11 != 0) return error.ReservedBitSet; return SequencesSection.Header{ diff --git a/lib/std/compress/zstandard/decode/fse.zig b/lib/std/compress/zstandard/decode/fse.zig index 232af39ccf..6e987f9c6f 100644 --- a/lib/std/compress/zstandard/decode/fse.zig +++ b/lib/std/compress/zstandard/decode/fse.zig @@ -69,7 +69,7 @@ pub fn decodeFseTable( } fn buildFseTable(values: []const u16, entries: []Table.Fse) !void { - const total_probability = @intCast(u16, entries.len); + const total_probability = @as(u16, @intCast(entries.len)); const accuracy_log = std.math.log2_int(u16, total_probability); assert(total_probability <= 1 << 9); @@ -77,7 +77,7 @@ fn buildFseTable(values: []const u16, entries: []Table.Fse) !void { for (values, 0..) |value, i| { if (value == 0) { entries[entries.len - 1 - less_than_one_count] = Table.Fse{ - .symbol = @intCast(u8, i), + .symbol = @as(u8, @intCast(i)), .baseline = 0, .bits = accuracy_log, }; @@ -99,7 +99,7 @@ fn buildFseTable(values: []const u16, entries: []Table.Fse) !void { const share_size_log = std.math.log2_int(u16, share_size); for (0..probability) |i| { - temp_states[i] = @intCast(u16, position); + temp_states[i] = @as(u16, @intCast(position)); position += (entries.len >> 1) + (entries.len >> 3) + 3; position &= entries.len - 1; while (position >= entries.len - less_than_one_count) { @@ -110,13 +110,13 @@ fn buildFseTable(values: []const u16, entries: []Table.Fse) !void { std.mem.sort(u16, temp_states[0..probability], {}, std.sort.asc(u16)); for (0..probability) |i| { entries[temp_states[i]] = if (i < double_state_count) Table.Fse{ - .symbol = @intCast(u8, symbol), + .symbol = @as(u8, @intCast(symbol)), .bits = share_size_log + 1, - .baseline = single_state_count * share_size + @intCast(u16, i) * 2 * share_size, + .baseline = single_state_count * share_size + @as(u16, @intCast(i)) * 2 * share_size, } else Table.Fse{ - .symbol = @intCast(u8, symbol), + .symbol = @as(u8, @intCast(symbol)), .bits = share_size_log, - .baseline = (@intCast(u16, i) - double_state_count) * share_size, + .baseline = (@as(u16, @intCast(i)) - double_state_count) * share_size, }; } } diff --git a/lib/std/compress/zstandard/decode/huffman.zig b/lib/std/compress/zstandard/decode/huffman.zig index f5e977d0da..13fb1ac5f2 100644 --- a/lib/std/compress/zstandard/decode/huffman.zig +++ b/lib/std/compress/zstandard/decode/huffman.zig @@ -109,8 +109,8 @@ fn decodeDirectHuffmanTree(source: anytype, encoded_symbol_count: usize, weights const weights_byte_count = (encoded_symbol_count + 1) / 2; for (0..weights_byte_count) |i| { const byte = try source.readByte(); - weights[2 * i] = @intCast(u4, byte >> 4); - weights[2 * i + 1] = @intCast(u4, byte & 0xF); + weights[2 * i] = @as(u4, @intCast(byte >> 4)); + weights[2 * i + 1] = @as(u4, @intCast(byte & 0xF)); } return encoded_symbol_count + 1; } @@ -118,7 +118,7 @@ fn decodeDirectHuffmanTree(source: anytype, encoded_symbol_count: usize, weights fn assignSymbols(weight_sorted_prefixed_symbols: []LiteralsSection.HuffmanTree.PrefixedSymbol, weights: [256]u4) usize { for (0..weight_sorted_prefixed_symbols.len) |i| { weight_sorted_prefixed_symbols[i] = .{ - .symbol = @intCast(u8, i), + .symbol = @as(u8, @intCast(i)), .weight = undefined, .prefix = undefined, }; @@ -167,7 +167,7 @@ fn buildHuffmanTree(weights: *[256]u4, symbol_count: usize) error{MalformedHuffm weight_power_sum_big += (@as(u16, 1) << value) >> 1; } if (weight_power_sum_big >= 1 << 11) return error.MalformedHuffmanTree; - const weight_power_sum = @intCast(u16, weight_power_sum_big); + const weight_power_sum = @as(u16, @intCast(weight_power_sum_big)); // advance to next power of two (even if weight_power_sum is a power of 2) // TODO: is it valid to have weight_power_sum == 0? @@ -179,7 +179,7 @@ fn buildHuffmanTree(weights: *[256]u4, symbol_count: usize) error{MalformedHuffm const prefixed_symbol_count = assignSymbols(weight_sorted_prefixed_symbols[0..symbol_count], weights.*); const tree = LiteralsSection.HuffmanTree{ .max_bit_count = max_number_of_bits, - .symbol_count_minus_one = @intCast(u8, prefixed_symbol_count - 1), + .symbol_count_minus_one = @as(u8, @intCast(prefixed_symbol_count - 1)), .nodes = weight_sorted_prefixed_symbols, }; return tree; diff --git a/lib/std/compress/zstandard/decompress.zig b/lib/std/compress/zstandard/decompress.zig index a2ba59e688..bc977d1fba 100644 --- a/lib/std/compress/zstandard/decompress.zig +++ b/lib/std/compress/zstandard/decompress.zig @@ -260,7 +260,7 @@ pub fn decodeFrameArrayList( /// Returns the frame checksum corresponding to the data fed into `hasher` pub fn computeChecksum(hasher: *std.hash.XxHash64) u32 { const hash = hasher.final(); - return @intCast(u32, hash & 0xFFFFFFFF); + return @as(u32, @intCast(hash & 0xFFFFFFFF)); } const FrameError = error{ @@ -398,7 +398,7 @@ pub const FrameContext = struct { const window_size = if (window_size_raw > window_size_max) return error.WindowTooLarge else - @intCast(usize, window_size_raw); + @as(usize, @intCast(window_size_raw)); const should_compute_checksum = frame_header.descriptor.content_checksum_flag and verify_checksum; @@ -585,7 +585,7 @@ pub fn frameWindowSize(header: ZstandardHeader) ?u64 { const exponent = (descriptor & 0b11111000) >> 3; const mantissa = descriptor & 0b00000111; const window_log = 10 + exponent; - const window_base = @as(u64, 1) << @intCast(u6, window_log); + const window_base = @as(u64, 1) << @as(u6, @intCast(window_log)); const window_add = (window_base / 8) * mantissa; return window_base + window_add; } else return header.content_size; @@ -599,7 +599,7 @@ pub fn frameWindowSize(header: ZstandardHeader) ?u64 { pub fn decodeZstandardHeader( source: anytype, ) (@TypeOf(source).Error || error{ EndOfStream, ReservedBitSet })!ZstandardHeader { - const descriptor = @bitCast(ZstandardHeader.Descriptor, try source.readByte()); + const descriptor = @as(ZstandardHeader.Descriptor, @bitCast(try source.readByte())); if (descriptor.reserved) return error.ReservedBitSet; diff --git a/lib/std/crypto/25519/curve25519.zig b/lib/std/crypto/25519/curve25519.zig index f5938dd218..7c3343ba8c 100644 --- a/lib/std/crypto/25519/curve25519.zig +++ b/lib/std/crypto/25519/curve25519.zig @@ -54,7 +54,7 @@ pub const Curve25519 = struct { var swap: u8 = 0; var pos: usize = bits - 1; while (true) : (pos -= 1) { - const bit = (s[pos >> 3] >> @truncate(u3, pos)) & 1; + const bit = (s[pos >> 3] >> @as(u3, @truncate(pos))) & 1; swap ^= bit; Fe.cSwap2(&x2, &x3, &z2, &z3, swap); swap = bit; diff --git a/lib/std/crypto/25519/edwards25519.zig b/lib/std/crypto/25519/edwards25519.zig index 50f34c45f3..bf0c62f9de 100644 --- a/lib/std/crypto/25519/edwards25519.zig +++ b/lib/std/crypto/25519/edwards25519.zig @@ -162,8 +162,8 @@ pub const Edwards25519 = struct { const reduced = if ((s[s.len - 1] & 0x80) == 0) s else scalar.reduce(s); var e: [2 * 32]i8 = undefined; for (reduced, 0..) |x, i| { - e[i * 2 + 0] = @as(i8, @truncate(u4, x)); - e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4)); + e[i * 2 + 0] = @as(i8, @as(u4, @truncate(x))); + e[i * 2 + 1] = @as(i8, @as(u4, @truncate(x >> 4))); } // Now, e[0..63] is between 0 and 15, e[63] is between 0 and 7 var carry: i8 = 0; @@ -190,9 +190,9 @@ pub const Edwards25519 = struct { while (true) : (pos -= 1) { const slot = e[pos]; if (slot > 0) { - q = q.add(pc[@intCast(usize, slot)]); + q = q.add(pc[@as(usize, @intCast(slot))]); } else if (slot < 0) { - q = q.sub(pc[@intCast(usize, -slot)]); + q = q.sub(pc[@as(usize, @intCast(-slot))]); } if (pos == 0) break; q = q.dbl().dbl().dbl().dbl(); @@ -206,7 +206,7 @@ pub const Edwards25519 = struct { var q = Edwards25519.identityElement; var pos: usize = 252; while (true) : (pos -= 4) { - const slot = @truncate(u4, (s[pos >> 3] >> @truncate(u3, pos))); + const slot = @as(u4, @truncate((s[pos >> 3] >> @as(u3, @truncate(pos))))); if (vartime) { if (slot != 0) { q = q.add(pc[slot]); @@ -283,15 +283,15 @@ pub const Edwards25519 = struct { while (true) : (pos -= 1) { const slot1 = e1[pos]; if (slot1 > 0) { - q = q.add(pc1[@intCast(usize, slot1)]); + q = q.add(pc1[@as(usize, @intCast(slot1))]); } else if (slot1 < 0) { - q = q.sub(pc1[@intCast(usize, -slot1)]); + q = q.sub(pc1[@as(usize, @intCast(-slot1))]); } const slot2 = e2[pos]; if (slot2 > 0) { - q = q.add(pc2[@intCast(usize, slot2)]); + q = q.add(pc2[@as(usize, @intCast(slot2))]); } else if (slot2 < 0) { - q = q.sub(pc2[@intCast(usize, -slot2)]); + q = q.sub(pc2[@as(usize, @intCast(-slot2))]); } if (pos == 0) break; q = q.dbl().dbl().dbl().dbl(); @@ -326,9 +326,9 @@ pub const Edwards25519 = struct { for (es, 0..) |e, i| { const slot = e[pos]; if (slot > 0) { - q = q.add(pcs[i][@intCast(usize, slot)]); + q = q.add(pcs[i][@as(usize, @intCast(slot))]); } else if (slot < 0) { - q = q.sub(pcs[i][@intCast(usize, -slot)]); + q = q.sub(pcs[i][@as(usize, @intCast(-slot))]); } } if (pos == 0) break; @@ -427,7 +427,7 @@ pub const Edwards25519 = struct { } const empty_block = [_]u8{0} ** H.block_length; var t = [3]u8{ 0, n * h_l, 0 }; - var xctx_len_u8 = [1]u8{@intCast(u8, xctx.len)}; + var xctx_len_u8 = [1]u8{@as(u8, @intCast(xctx.len))}; var st = H.init(.{}); st.update(empty_block[0..]); st.update(s); diff --git a/lib/std/crypto/25519/field.zig b/lib/std/crypto/25519/field.zig index eec83f3d2e..627df9d4cb 100644 --- a/lib/std/crypto/25519/field.zig +++ b/lib/std/crypto/25519/field.zig @@ -254,11 +254,11 @@ pub const Fe = struct { var rs: [5]u64 = undefined; comptime var i = 0; inline while (i < 4) : (i += 1) { - rs[i] = @truncate(u64, r[i]) & MASK51; - r[i + 1] += @intCast(u64, r[i] >> 51); + rs[i] = @as(u64, @truncate(r[i])) & MASK51; + r[i + 1] += @as(u64, @intCast(r[i] >> 51)); } - rs[4] = @truncate(u64, r[4]) & MASK51; - var carry = @intCast(u64, r[4] >> 51); + rs[4] = @as(u64, @truncate(r[4])) & MASK51; + var carry = @as(u64, @intCast(r[4] >> 51)); rs[0] += 19 * carry; carry = rs[0] >> 51; rs[0] &= MASK51; @@ -278,8 +278,8 @@ pub const Fe = struct { var r: [5]u128 = undefined; comptime var i = 0; inline while (i < 5) : (i += 1) { - ax[i] = @intCast(u128, a.limbs[i]); - bx[i] = @intCast(u128, b.limbs[i]); + ax[i] = @as(u128, @intCast(a.limbs[i])); + bx[i] = @as(u128, @intCast(b.limbs[i])); } i = 1; inline while (i < 5) : (i += 1) { @@ -299,7 +299,7 @@ pub const Fe = struct { var r: [5]u128 = undefined; comptime var i = 0; inline while (i < 5) : (i += 1) { - ax[i] = @intCast(u128, a.limbs[i]); + ax[i] = @as(u128, @intCast(a.limbs[i])); } const a0_2 = 2 * ax[0]; const a1_2 = 2 * ax[1]; @@ -334,15 +334,15 @@ pub const Fe = struct { /// Multiply a field element with a small (32-bit) integer pub inline fn mul32(a: Fe, comptime n: u32) Fe { - const sn = @intCast(u128, n); + const sn = @as(u128, @intCast(n)); var fe: Fe = undefined; var x: u128 = 0; comptime var i = 0; inline while (i < 5) : (i += 1) { x = a.limbs[i] * sn + (x >> 51); - fe.limbs[i] = @truncate(u64, x) & MASK51; + fe.limbs[i] = @as(u64, @truncate(x)) & MASK51; } - fe.limbs[0] += @intCast(u64, x >> 51) * 19; + fe.limbs[0] += @as(u64, @intCast(x >> 51)) * 19; return fe; } @@ -402,7 +402,7 @@ pub const Fe = struct { const t2 = t.sqn(30).mul(t); const t3 = t2.sqn(60).mul(t2); const t4 = t3.sqn(120).mul(t3).sqn(10).mul(u).sqn(3).mul(_11).sq(); - return @bitCast(bool, @truncate(u1, ~(t4.toBytes()[1] & 1))); + return @as(bool, @bitCast(@as(u1, @truncate(~(t4.toBytes()[1] & 1))))); } fn uncheckedSqrt(x2: Fe) Fe { diff --git a/lib/std/crypto/25519/scalar.zig b/lib/std/crypto/25519/scalar.zig index fd6d42aebe..1699c68e12 100644 --- a/lib/std/crypto/25519/scalar.zig +++ b/lib/std/crypto/25519/scalar.zig @@ -27,8 +27,8 @@ pub fn rejectNonCanonical(s: CompressedScalar) NonCanonicalError!void { while (true) : (i -= 1) { const xs = @as(u16, s[i]); const xfield_order_s = @as(u16, field_order_s[i]); - c |= @intCast(u8, ((xs -% xfield_order_s) >> 8) & n); - n &= @intCast(u8, ((xs ^ xfield_order_s) -% 1) >> 8); + c |= @as(u8, @intCast(((xs -% xfield_order_s) >> 8) & n)); + n &= @as(u8, @intCast(((xs ^ xfield_order_s) -% 1) >> 8)); if (i == 0) break; } if (c == 0) { @@ -89,7 +89,7 @@ pub fn neg(s: CompressedScalar) CompressedScalar { var i: usize = 0; while (i < 64) : (i += 1) { carry = @as(u32, fs[i]) -% sx[i] -% @as(u32, carry); - sx[i] = @truncate(u8, carry); + sx[i] = @as(u8, @truncate(carry)); carry = (carry >> 8) & 1; } return reduce64(sx); @@ -129,7 +129,7 @@ pub const Scalar = struct { while (i < 4) : (i += 1) { mem.writeIntLittle(u64, bytes[i * 7 ..][0..8], expanded.limbs[i]); } - mem.writeIntLittle(u32, bytes[i * 7 ..][0..4], @intCast(u32, expanded.limbs[i])); + mem.writeIntLittle(u32, bytes[i * 7 ..][0..4], @as(u32, @intCast(expanded.limbs[i]))); return bytes; } @@ -234,42 +234,42 @@ pub const Scalar = struct { const z80 = xy440; const carry0 = z00 >> 56; - const t10 = @truncate(u64, z00) & 0xffffffffffffff; + const t10 = @as(u64, @truncate(z00)) & 0xffffffffffffff; const c00 = carry0; const t00 = t10; const carry1 = (z10 + c00) >> 56; - const t11 = @truncate(u64, (z10 + c00)) & 0xffffffffffffff; + const t11 = @as(u64, @truncate((z10 + c00))) & 0xffffffffffffff; const c10 = carry1; const t12 = t11; const carry2 = (z20 + c10) >> 56; - const t13 = @truncate(u64, (z20 + c10)) & 0xffffffffffffff; + const t13 = @as(u64, @truncate((z20 + c10))) & 0xffffffffffffff; const c20 = carry2; const t20 = t13; const carry3 = (z30 + c20) >> 56; - const t14 = @truncate(u64, (z30 + c20)) & 0xffffffffffffff; + const t14 = @as(u64, @truncate((z30 + c20))) & 0xffffffffffffff; const c30 = carry3; const t30 = t14; const carry4 = (z40 + c30) >> 56; - const t15 = @truncate(u64, (z40 + c30)) & 0xffffffffffffff; + const t15 = @as(u64, @truncate((z40 + c30))) & 0xffffffffffffff; const c40 = carry4; const t40 = t15; const carry5 = (z50 + c40) >> 56; - const t16 = @truncate(u64, (z50 + c40)) & 0xffffffffffffff; + const t16 = @as(u64, @truncate((z50 + c40))) & 0xffffffffffffff; const c50 = carry5; const t50 = t16; const carry6 = (z60 + c50) >> 56; - const t17 = @truncate(u64, (z60 + c50)) & 0xffffffffffffff; + const t17 = @as(u64, @truncate((z60 + c50))) & 0xffffffffffffff; const c60 = carry6; const t60 = t17; const carry7 = (z70 + c60) >> 56; - const t18 = @truncate(u64, (z70 + c60)) & 0xffffffffffffff; + const t18 = @as(u64, @truncate((z70 + c60))) & 0xffffffffffffff; const c70 = carry7; const t70 = t18; const carry8 = (z80 + c70) >> 56; - const t19 = @truncate(u64, (z80 + c70)) & 0xffffffffffffff; + const t19 = @as(u64, @truncate((z80 + c70))) & 0xffffffffffffff; const c80 = carry8; const t80 = t19; - const t90 = (@truncate(u64, c80)); + const t90 = (@as(u64, @truncate(c80))); const r0 = t00; const r1 = t12; const r2 = t20; @@ -356,26 +356,26 @@ pub const Scalar = struct { const carry12 = (z32 + c21) >> 56; const c31 = carry12; const carry13 = (z42 + c31) >> 56; - const t24 = @truncate(u64, z42 + c31) & 0xffffffffffffff; + const t24 = @as(u64, @truncate(z42 + c31)) & 0xffffffffffffff; const c41 = carry13; const t41 = t24; const carry14 = (z5 + c41) >> 56; - const t25 = @truncate(u64, z5 + c41) & 0xffffffffffffff; + const t25 = @as(u64, @truncate(z5 + c41)) & 0xffffffffffffff; const c5 = carry14; const t5 = t25; const carry15 = (z6 + c5) >> 56; - const t26 = @truncate(u64, z6 + c5) & 0xffffffffffffff; + const t26 = @as(u64, @truncate(z6 + c5)) & 0xffffffffffffff; const c6 = carry15; const t6 = t26; const carry16 = (z7 + c6) >> 56; - const t27 = @truncate(u64, z7 + c6) & 0xffffffffffffff; + const t27 = @as(u64, @truncate(z7 + c6)) & 0xffffffffffffff; const c7 = carry16; const t7 = t27; const carry17 = (z8 + c7) >> 56; - const t28 = @truncate(u64, z8 + c7) & 0xffffffffffffff; + const t28 = @as(u64, @truncate(z8 + c7)) & 0xffffffffffffff; const c8 = carry17; const t8 = t28; - const t9 = @truncate(u64, c8); + const t9 = @as(u64, @truncate(c8)); const qmu4_ = t41; const qmu5_ = t5; @@ -425,22 +425,22 @@ pub const Scalar = struct { const xy31 = @as(u128, qdiv3) * @as(u128, m1); const xy40 = @as(u128, qdiv4) * @as(u128, m0); const carry18 = xy00 >> 56; - const t29 = @truncate(u64, xy00) & 0xffffffffffffff; + const t29 = @as(u64, @truncate(xy00)) & 0xffffffffffffff; const c0 = carry18; const t01 = t29; const carry19 = (xy01 + xy10 + c0) >> 56; - const t31 = @truncate(u64, xy01 + xy10 + c0) & 0xffffffffffffff; + const t31 = @as(u64, @truncate(xy01 + xy10 + c0)) & 0xffffffffffffff; const c12 = carry19; const t110 = t31; const carry20 = (xy02 + xy11 + xy20 + c12) >> 56; - const t32 = @truncate(u64, xy02 + xy11 + xy20 + c12) & 0xffffffffffffff; + const t32 = @as(u64, @truncate(xy02 + xy11 + xy20 + c12)) & 0xffffffffffffff; const c22 = carry20; const t210 = t32; const carry = (xy03 + xy12 + xy21 + xy30 + c22) >> 56; - const t33 = @truncate(u64, xy03 + xy12 + xy21 + xy30 + c22) & 0xffffffffffffff; + const t33 = @as(u64, @truncate(xy03 + xy12 + xy21 + xy30 + c22)) & 0xffffffffffffff; const c32 = carry; const t34 = t33; - const t42 = @truncate(u64, xy04 + xy13 + xy22 + xy31 + xy40 + c32) & 0xffffffffff; + const t42 = @as(u64, @truncate(xy04 + xy13 + xy22 + xy31 + xy40 + c32)) & 0xffffffffff; const qmul0 = t01; const qmul1 = t110; @@ -498,7 +498,7 @@ pub const Scalar = struct { const t = ((b << 56) + s4) -% (y41 + b3); const b4 = b; const t4 = t; - const mask = (b4 -% @intCast(u64, ((1)))); + const mask = (b4 -% @as(u64, @intCast(((1))))); const z04 = s0 ^ (mask & (s0 ^ t0)); const z14 = s1 ^ (mask & (s1 ^ t1)); const z24 = s2 ^ (mask & (s2 ^ t2)); @@ -691,26 +691,26 @@ const ScalarDouble = struct { const carry3 = (z31 + c20) >> 56; const c30 = carry3; const carry4 = (z41 + c30) >> 56; - const t103 = @as(u64, @truncate(u64, z41 + c30)) & 0xffffffffffffff; + const t103 = @as(u64, @as(u64, @truncate(z41 + c30))) & 0xffffffffffffff; const c40 = carry4; const t410 = t103; const carry5 = (z5 + c40) >> 56; - const t104 = @as(u64, @truncate(u64, z5 + c40)) & 0xffffffffffffff; + const t104 = @as(u64, @as(u64, @truncate(z5 + c40))) & 0xffffffffffffff; const c5 = carry5; const t51 = t104; const carry6 = (z6 + c5) >> 56; - const t105 = @as(u64, @truncate(u64, z6 + c5)) & 0xffffffffffffff; + const t105 = @as(u64, @as(u64, @truncate(z6 + c5))) & 0xffffffffffffff; const c6 = carry6; const t61 = t105; const carry7 = (z7 + c6) >> 56; - const t106 = @as(u64, @truncate(u64, z7 + c6)) & 0xffffffffffffff; + const t106 = @as(u64, @as(u64, @truncate(z7 + c6))) & 0xffffffffffffff; const c7 = carry7; const t71 = t106; const carry8 = (z8 + c7) >> 56; - const t107 = @as(u64, @truncate(u64, z8 + c7)) & 0xffffffffffffff; + const t107 = @as(u64, @as(u64, @truncate(z8 + c7))) & 0xffffffffffffff; const c8 = carry8; const t81 = t107; - const t91 = @as(u64, @truncate(u64, c8)); + const t91 = @as(u64, @as(u64, @truncate(c8))); const qmu4_ = t410; const qmu5_ = t51; @@ -760,22 +760,22 @@ const ScalarDouble = struct { const xy31 = @as(u128, qdiv3) * @as(u128, m1); const xy40 = @as(u128, qdiv4) * @as(u128, m0); const carry9 = xy00 >> 56; - const t108 = @truncate(u64, xy00) & 0xffffffffffffff; + const t108 = @as(u64, @truncate(xy00)) & 0xffffffffffffff; const c0 = carry9; const t010 = t108; const carry10 = (xy01 + xy10 + c0) >> 56; - const t109 = @truncate(u64, xy01 + xy10 + c0) & 0xffffffffffffff; + const t109 = @as(u64, @truncate(xy01 + xy10 + c0)) & 0xffffffffffffff; const c11 = carry10; const t110 = t109; const carry11 = (xy02 + xy11 + xy20 + c11) >> 56; - const t1010 = @truncate(u64, xy02 + xy11 + xy20 + c11) & 0xffffffffffffff; + const t1010 = @as(u64, @truncate(xy02 + xy11 + xy20 + c11)) & 0xffffffffffffff; const c21 = carry11; const t210 = t1010; const carry = (xy03 + xy12 + xy21 + xy30 + c21) >> 56; - const t1011 = @truncate(u64, xy03 + xy12 + xy21 + xy30 + c21) & 0xffffffffffffff; + const t1011 = @as(u64, @truncate(xy03 + xy12 + xy21 + xy30 + c21)) & 0xffffffffffffff; const c31 = carry; const t310 = t1011; - const t411 = @truncate(u64, xy04 + xy13 + xy22 + xy31 + xy40 + c31) & 0xffffffffff; + const t411 = @as(u64, @truncate(xy04 + xy13 + xy22 + xy31 + xy40 + c31)) & 0xffffffffff; const qmul0 = t010; const qmul1 = t110; diff --git a/lib/std/crypto/Certificate.zig b/lib/std/crypto/Certificate.zig index 51eb97ab32..a4f0ff604b 100644 --- a/lib/std/crypto/Certificate.zig +++ b/lib/std/crypto/Certificate.zig @@ -312,7 +312,7 @@ pub const Parsed = struct { while (name_i < general_names.slice.end) { const general_name = try der.Element.parse(subject_alt_name, name_i); name_i = general_name.slice.end; - switch (@enumFromInt(GeneralNameTag, @intFromEnum(general_name.identifier.tag))) { + switch (@as(GeneralNameTag, @enumFromInt(@intFromEnum(general_name.identifier.tag)))) { .dNSName => { const dns_name = subject_alt_name[general_name.slice.start..general_name.slice.end]; if (checkHostName(host_name, dns_name)) return; @@ -379,7 +379,7 @@ pub fn parse(cert: Certificate) ParseError!Parsed { const tbs_certificate = try der.Element.parse(cert_bytes, certificate.slice.start); const version_elem = try der.Element.parse(cert_bytes, tbs_certificate.slice.start); const version = try parseVersion(cert_bytes, version_elem); - const serial_number = if (@bitCast(u8, version_elem.identifier) == 0xa0) + const serial_number = if (@as(u8, @bitCast(version_elem.identifier)) == 0xa0) try der.Element.parse(cert_bytes, version_elem.slice.end) else version_elem; @@ -597,8 +597,8 @@ const Date = struct { var month: u4 = 1; while (month < date.month) : (month += 1) { const days: u64 = std.time.epoch.getDaysInMonth( - @enumFromInt(std.time.epoch.YearLeapKind, @intFromBool(is_leap)), - @enumFromInt(std.time.epoch.Month, month), + @as(std.time.epoch.YearLeapKind, @enumFromInt(@intFromBool(is_leap))), + @as(std.time.epoch.Month, @enumFromInt(month)), ); sec += days * std.time.epoch.secs_per_day; } @@ -685,7 +685,7 @@ fn parseEnum(comptime E: type, bytes: []const u8, element: der.Element) ParseEnu pub const ParseVersionError = error{ UnsupportedCertificateVersion, CertificateFieldHasInvalidLength }; pub fn parseVersion(bytes: []const u8, version_elem: der.Element) ParseVersionError!Version { - if (@bitCast(u8, version_elem.identifier) != 0xa0) + if (@as(u8, @bitCast(version_elem.identifier)) != 0xa0) return .v1; if (version_elem.slice.end - version_elem.slice.start != 3) @@ -864,7 +864,7 @@ pub const der = struct { pub fn parse(bytes: []const u8, index: u32) ParseElementError!Element { var i = index; - const identifier = @bitCast(Identifier, bytes[i]); + const identifier = @as(Identifier, @bitCast(bytes[i])); i += 1; const size_byte = bytes[i]; i += 1; @@ -878,7 +878,7 @@ pub const der = struct { }; } - const len_size = @truncate(u7, size_byte); + const len_size = @as(u7, @truncate(size_byte)); if (len_size > @sizeOf(u32)) { return error.CertificateFieldHasInvalidLength; } @@ -1042,10 +1042,10 @@ pub const rsa = struct { var hashed: [Hash.digest_length]u8 = undefined; while (idx < len) { - c[0] = @intCast(u8, (counter >> 24) & 0xFF); - c[1] = @intCast(u8, (counter >> 16) & 0xFF); - c[2] = @intCast(u8, (counter >> 8) & 0xFF); - c[3] = @intCast(u8, counter & 0xFF); + c[0] = @as(u8, @intCast((counter >> 24) & 0xFF)); + c[1] = @as(u8, @intCast((counter >> 16) & 0xFF)); + c[2] = @as(u8, @intCast((counter >> 8) & 0xFF)); + c[3] = @as(u8, @intCast(counter & 0xFF)); std.mem.copyForwards(u8, hash[seed.len..], &c); Hash.hash(&hash, &hashed, .{}); diff --git a/lib/std/crypto/Certificate/Bundle.zig b/lib/std/crypto/Certificate/Bundle.zig index 434de6e0a8..2a5555e301 100644 --- a/lib/std/crypto/Certificate/Bundle.zig +++ b/lib/std/crypto/Certificate/Bundle.zig @@ -131,7 +131,7 @@ pub fn rescanWindows(cb: *Bundle, gpa: Allocator) RescanWindowsError!void { var ctx = w.crypt32.CertEnumCertificatesInStore(store, null); while (ctx) |context| : (ctx = w.crypt32.CertEnumCertificatesInStore(store, ctx)) { - const decoded_start = @intCast(u32, cb.bytes.items.len); + const decoded_start = @as(u32, @intCast(cb.bytes.items.len)); const encoded_cert = context.pbCertEncoded[0..context.cbCertEncoded]; try cb.bytes.appendSlice(gpa, encoded_cert); try cb.parseCert(gpa, decoded_start, now_sec); @@ -213,7 +213,7 @@ pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) AddCertsFrom const needed_capacity = std.math.cast(u32, decoded_size_upper_bound + size) orelse return error.CertificateAuthorityBundleTooBig; try cb.bytes.ensureUnusedCapacity(gpa, needed_capacity); - const end_reserved = @intCast(u32, cb.bytes.items.len + decoded_size_upper_bound); + const end_reserved = @as(u32, @intCast(cb.bytes.items.len + decoded_size_upper_bound)); const buffer = cb.bytes.allocatedSlice()[end_reserved..]; const end_index = try file.readAll(buffer); const encoded_bytes = buffer[0..end_index]; @@ -230,7 +230,7 @@ pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) AddCertsFrom return error.MissingEndCertificateMarker; start_index = cert_end + end_marker.len; const encoded_cert = mem.trim(u8, encoded_bytes[cert_start..cert_end], " \t\r\n"); - const decoded_start = @intCast(u32, cb.bytes.items.len); + const decoded_start = @as(u32, @intCast(cb.bytes.items.len)); const dest_buf = cb.bytes.allocatedSlice()[decoded_start..]; cb.bytes.items.len += try base64.decode(dest_buf, encoded_cert); try cb.parseCert(gpa, decoded_start, now_sec); diff --git a/lib/std/crypto/Certificate/Bundle/macos.zig b/lib/std/crypto/Certificate/Bundle/macos.zig index bd7100eb46..028275a06b 100644 --- a/lib/std/crypto/Certificate/Bundle/macos.zig +++ b/lib/std/crypto/Certificate/Bundle/macos.zig @@ -21,7 +21,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void { const reader = stream.reader(); const db_header = try reader.readStructBig(ApplDbHeader); - assert(mem.eql(u8, "kych", &@bitCast([4]u8, db_header.signature))); + assert(mem.eql(u8, "kych", &@as([4]u8, @bitCast(db_header.signature)))); try stream.seekTo(db_header.schema_offset); @@ -42,7 +42,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void { const table_header = try reader.readStructBig(TableHeader); - if (@enumFromInt(std.os.darwin.cssm.DB_RECORDTYPE, table_header.table_id) != .X509_CERTIFICATE) { + if (@as(std.os.darwin.cssm.DB_RECORDTYPE, @enumFromInt(table_header.table_id)) != .X509_CERTIFICATE) { continue; } @@ -61,7 +61,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void { try cb.bytes.ensureUnusedCapacity(gpa, cert_header.cert_size); - const cert_start = @intCast(u32, cb.bytes.items.len); + const cert_start = @as(u32, @intCast(cb.bytes.items.len)); const dest_buf = cb.bytes.allocatedSlice()[cert_start..]; cb.bytes.items.len += try reader.readAtLeast(dest_buf, cert_header.cert_size); diff --git a/lib/std/crypto/aegis.zig b/lib/std/crypto/aegis.zig index 9709a3a958..75633f7c69 100644 --- a/lib/std/crypto/aegis.zig +++ b/lib/std/crypto/aegis.zig @@ -625,7 +625,7 @@ test "Aegis MAC" { const key = [_]u8{0x00} ** Aegis128LMac.key_length; var msg: [64]u8 = undefined; for (&msg, 0..) |*m, i| { - m.* = @truncate(u8, i); + m.* = @as(u8, @truncate(i)); } const st_init = Aegis128LMac.init(&key); var st = st_init; diff --git a/lib/std/crypto/aes/soft.zig b/lib/std/crypto/aes/soft.zig index 4c2a8ff80d..0b15555ad0 100644 --- a/lib/std/crypto/aes/soft.zig +++ b/lib/std/crypto/aes/soft.zig @@ -51,13 +51,13 @@ pub const Block = struct { const s3 = block.repr[3]; var x: [4]u32 = undefined; - x = table_lookup(&table_encrypt, @truncate(u8, s0), @truncate(u8, s1 >> 8), @truncate(u8, s2 >> 16), @truncate(u8, s3 >> 24)); + x = table_lookup(&table_encrypt, @as(u8, @truncate(s0)), @as(u8, @truncate(s1 >> 8)), @as(u8, @truncate(s2 >> 16)), @as(u8, @truncate(s3 >> 24))); var t0 = x[0] ^ x[1] ^ x[2] ^ x[3]; - x = table_lookup(&table_encrypt, @truncate(u8, s1), @truncate(u8, s2 >> 8), @truncate(u8, s3 >> 16), @truncate(u8, s0 >> 24)); + x = table_lookup(&table_encrypt, @as(u8, @truncate(s1)), @as(u8, @truncate(s2 >> 8)), @as(u8, @truncate(s3 >> 16)), @as(u8, @truncate(s0 >> 24))); var t1 = x[0] ^ x[1] ^ x[2] ^ x[3]; - x = table_lookup(&table_encrypt, @truncate(u8, s2), @truncate(u8, s3 >> 8), @truncate(u8, s0 >> 16), @truncate(u8, s1 >> 24)); + x = table_lookup(&table_encrypt, @as(u8, @truncate(s2)), @as(u8, @truncate(s3 >> 8)), @as(u8, @truncate(s0 >> 16)), @as(u8, @truncate(s1 >> 24))); var t2 = x[0] ^ x[1] ^ x[2] ^ x[3]; - x = table_lookup(&table_encrypt, @truncate(u8, s3), @truncate(u8, s0 >> 8), @truncate(u8, s1 >> 16), @truncate(u8, s2 >> 24)); + x = table_lookup(&table_encrypt, @as(u8, @truncate(s3)), @as(u8, @truncate(s0 >> 8)), @as(u8, @truncate(s1 >> 16)), @as(u8, @truncate(s2 >> 24))); var t3 = x[0] ^ x[1] ^ x[2] ^ x[3]; t0 ^= round_key.repr[0]; @@ -77,31 +77,31 @@ pub const Block = struct { var x: [4]u32 = undefined; x = .{ - table_encrypt[0][@truncate(u8, s0)], - table_encrypt[1][@truncate(u8, s1 >> 8)], - table_encrypt[2][@truncate(u8, s2 >> 16)], - table_encrypt[3][@truncate(u8, s3 >> 24)], + table_encrypt[0][@as(u8, @truncate(s0))], + table_encrypt[1][@as(u8, @truncate(s1 >> 8))], + table_encrypt[2][@as(u8, @truncate(s2 >> 16))], + table_encrypt[3][@as(u8, @truncate(s3 >> 24))], }; var t0 = x[0] ^ x[1] ^ x[2] ^ x[3]; x = .{ - table_encrypt[0][@truncate(u8, s1)], - table_encrypt[1][@truncate(u8, s2 >> 8)], - table_encrypt[2][@truncate(u8, s3 >> 16)], - table_encrypt[3][@truncate(u8, s0 >> 24)], + table_encrypt[0][@as(u8, @truncate(s1))], + table_encrypt[1][@as(u8, @truncate(s2 >> 8))], + table_encrypt[2][@as(u8, @truncate(s3 >> 16))], + table_encrypt[3][@as(u8, @truncate(s0 >> 24))], }; var t1 = x[0] ^ x[1] ^ x[2] ^ x[3]; x = .{ - table_encrypt[0][@truncate(u8, s2)], - table_encrypt[1][@truncate(u8, s3 >> 8)], - table_encrypt[2][@truncate(u8, s0 >> 16)], - table_encrypt[3][@truncate(u8, s1 >> 24)], + table_encrypt[0][@as(u8, @truncate(s2))], + table_encrypt[1][@as(u8, @truncate(s3 >> 8))], + table_encrypt[2][@as(u8, @truncate(s0 >> 16))], + table_encrypt[3][@as(u8, @truncate(s1 >> 24))], }; var t2 = x[0] ^ x[1] ^ x[2] ^ x[3]; x = .{ - table_encrypt[0][@truncate(u8, s3)], - table_encrypt[1][@truncate(u8, s0 >> 8)], - table_encrypt[2][@truncate(u8, s1 >> 16)], - table_encrypt[3][@truncate(u8, s2 >> 24)], + table_encrypt[0][@as(u8, @truncate(s3))], + table_encrypt[1][@as(u8, @truncate(s0 >> 8))], + table_encrypt[2][@as(u8, @truncate(s1 >> 16))], + table_encrypt[3][@as(u8, @truncate(s2 >> 24))], }; var t3 = x[0] ^ x[1] ^ x[2] ^ x[3]; @@ -122,13 +122,13 @@ pub const Block = struct { // Last round uses s-box directly and XORs to produce output. var x: [4]u8 = undefined; - x = sbox_lookup(&sbox_encrypt, @truncate(u8, s3 >> 24), @truncate(u8, s2 >> 16), @truncate(u8, s1 >> 8), @truncate(u8, s0)); + x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s3 >> 24)), @as(u8, @truncate(s2 >> 16)), @as(u8, @truncate(s1 >> 8)), @as(u8, @truncate(s0))); var t0 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]); - x = sbox_lookup(&sbox_encrypt, @truncate(u8, s0 >> 24), @truncate(u8, s3 >> 16), @truncate(u8, s2 >> 8), @truncate(u8, s1)); + x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s0 >> 24)), @as(u8, @truncate(s3 >> 16)), @as(u8, @truncate(s2 >> 8)), @as(u8, @truncate(s1))); var t1 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]); - x = sbox_lookup(&sbox_encrypt, @truncate(u8, s1 >> 24), @truncate(u8, s0 >> 16), @truncate(u8, s3 >> 8), @truncate(u8, s2)); + x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s1 >> 24)), @as(u8, @truncate(s0 >> 16)), @as(u8, @truncate(s3 >> 8)), @as(u8, @truncate(s2))); var t2 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]); - x = sbox_lookup(&sbox_encrypt, @truncate(u8, s2 >> 24), @truncate(u8, s1 >> 16), @truncate(u8, s0 >> 8), @truncate(u8, s3)); + x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s2 >> 24)), @as(u8, @truncate(s1 >> 16)), @as(u8, @truncate(s0 >> 8)), @as(u8, @truncate(s3))); var t3 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]); t0 ^= round_key.repr[0]; @@ -147,13 +147,13 @@ pub const Block = struct { const s3 = block.repr[3]; var x: [4]u32 = undefined; - x = table_lookup(&table_decrypt, @truncate(u8, s0), @truncate(u8, s3 >> 8), @truncate(u8, s2 >> 16), @truncate(u8, s1 >> 24)); + x = table_lookup(&table_decrypt, @as(u8, @truncate(s0)), @as(u8, @truncate(s3 >> 8)), @as(u8, @truncate(s2 >> 16)), @as(u8, @truncate(s1 >> 24))); var t0 = x[0] ^ x[1] ^ x[2] ^ x[3]; - x = table_lookup(&table_decrypt, @truncate(u8, s1), @truncate(u8, s0 >> 8), @truncate(u8, s3 >> 16), @truncate(u8, s2 >> 24)); + x = table_lookup(&table_decrypt, @as(u8, @truncate(s1)), @as(u8, @truncate(s0 >> 8)), @as(u8, @truncate(s3 >> 16)), @as(u8, @truncate(s2 >> 24))); var t1 = x[0] ^ x[1] ^ x[2] ^ x[3]; - x = table_lookup(&table_decrypt, @truncate(u8, s2), @truncate(u8, s1 >> 8), @truncate(u8, s0 >> 16), @truncate(u8, s3 >> 24)); + x = table_lookup(&table_decrypt, @as(u8, @truncate(s2)), @as(u8, @truncate(s1 >> 8)), @as(u8, @truncate(s0 >> 16)), @as(u8, @truncate(s3 >> 24))); var t2 = x[0] ^ x[1] ^ x[2] ^ x[3]; - x = table_lookup(&table_decrypt, @truncate(u8, s3), @truncate(u8, s2 >> 8), @truncate(u8, s1 >> 16), @truncate(u8, s0 >> 24)); + x = table_lookup(&table_decrypt, @as(u8, @truncate(s3)), @as(u8, @truncate(s2 >> 8)), @as(u8, @truncate(s1 >> 16)), @as(u8, @truncate(s0 >> 24))); var t3 = x[0] ^ x[1] ^ x[2] ^ x[3]; t0 ^= round_key.repr[0]; @@ -173,31 +173,31 @@ pub const Block = struct { var x: [4]u32 = undefined; x = .{ - table_decrypt[0][@truncate(u8, s0)], - table_decrypt[1][@truncate(u8, s3 >> 8)], - table_decrypt[2][@truncate(u8, s2 >> 16)], - table_decrypt[3][@truncate(u8, s1 >> 24)], + table_decrypt[0][@as(u8, @truncate(s0))], + table_decrypt[1][@as(u8, @truncate(s3 >> 8))], + table_decrypt[2][@as(u8, @truncate(s2 >> 16))], + table_decrypt[3][@as(u8, @truncate(s1 >> 24))], }; var t0 = x[0] ^ x[1] ^ x[2] ^ x[3]; x = .{ - table_decrypt[0][@truncate(u8, s1)], - table_decrypt[1][@truncate(u8, s0 >> 8)], - table_decrypt[2][@truncate(u8, s3 >> 16)], - table_decrypt[3][@truncate(u8, s2 >> 24)], + table_decrypt[0][@as(u8, @truncate(s1))], + table_decrypt[1][@as(u8, @truncate(s0 >> 8))], + table_decrypt[2][@as(u8, @truncate(s3 >> 16))], + table_decrypt[3][@as(u8, @truncate(s2 >> 24))], }; var t1 = x[0] ^ x[1] ^ x[2] ^ x[3]; x = .{ - table_decrypt[0][@truncate(u8, s2)], - table_decrypt[1][@truncate(u8, s1 >> 8)], - table_decrypt[2][@truncate(u8, s0 >> 16)], - table_decrypt[3][@truncate(u8, s3 >> 24)], + table_decrypt[0][@as(u8, @truncate(s2))], + table_decrypt[1][@as(u8, @truncate(s1 >> 8))], + table_decrypt[2][@as(u8, @truncate(s0 >> 16))], + table_decrypt[3][@as(u8, @truncate(s3 >> 24))], }; var t2 = x[0] ^ x[1] ^ x[2] ^ x[3]; x = .{ - table_decrypt[0][@truncate(u8, s3)], - table_decrypt[1][@truncate(u8, s2 >> 8)], - table_decrypt[2][@truncate(u8, s1 >> 16)], - table_decrypt[3][@truncate(u8, s0 >> 24)], + table_decrypt[0][@as(u8, @truncate(s3))], + table_decrypt[1][@as(u8, @truncate(s2 >> 8))], + table_decrypt[2][@as(u8, @truncate(s1 >> 16))], + table_decrypt[3][@as(u8, @truncate(s0 >> 24))], }; var t3 = x[0] ^ x[1] ^ x[2] ^ x[3]; @@ -218,13 +218,13 @@ pub const Block = struct { // Last round uses s-box directly and XORs to produce output. var x: [4]u8 = undefined; - x = sbox_lookup(&sbox_decrypt, @truncate(u8, s1 >> 24), @truncate(u8, s2 >> 16), @truncate(u8, s3 >> 8), @truncate(u8, s0)); + x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s1 >> 24)), @as(u8, @truncate(s2 >> 16)), @as(u8, @truncate(s3 >> 8)), @as(u8, @truncate(s0))); var t0 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]); - x = sbox_lookup(&sbox_decrypt, @truncate(u8, s2 >> 24), @truncate(u8, s3 >> 16), @truncate(u8, s0 >> 8), @truncate(u8, s1)); + x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s2 >> 24)), @as(u8, @truncate(s3 >> 16)), @as(u8, @truncate(s0 >> 8)), @as(u8, @truncate(s1))); var t1 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]); - x = sbox_lookup(&sbox_decrypt, @truncate(u8, s3 >> 24), @truncate(u8, s0 >> 16), @truncate(u8, s1 >> 8), @truncate(u8, s2)); + x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s3 >> 24)), @as(u8, @truncate(s0 >> 16)), @as(u8, @truncate(s1 >> 8)), @as(u8, @truncate(s2))); var t2 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]); - x = sbox_lookup(&sbox_decrypt, @truncate(u8, s0 >> 24), @truncate(u8, s1 >> 16), @truncate(u8, s2 >> 8), @truncate(u8, s3)); + x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s0 >> 24)), @as(u8, @truncate(s1 >> 16)), @as(u8, @truncate(s2 >> 8)), @as(u8, @truncate(s3))); var t3 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]); t0 ^= round_key.repr[0]; @@ -348,7 +348,7 @@ fn KeySchedule(comptime Aes: type) type { const subw = struct { // Apply sbox_encrypt to each byte in w. fn func(w: u32) u32 { - const x = sbox_lookup(&sbox_key_schedule, @truncate(u8, w), @truncate(u8, w >> 8), @truncate(u8, w >> 16), @truncate(u8, w >> 24)); + const x = sbox_lookup(&sbox_key_schedule, @as(u8, @truncate(w)), @as(u8, @truncate(w >> 8)), @as(u8, @truncate(w >> 16)), @as(u8, @truncate(w >> 24))); return @as(u32, x[3]) << 24 | @as(u32, x[2]) << 16 | @as(u32, x[1]) << 8 | @as(u32, x[0]); } }.func; @@ -386,7 +386,7 @@ fn KeySchedule(comptime Aes: type) type { inline while (j < 4) : (j += 1) { var rk = round_keys[(ei + j) / 4].repr[(ei + j) % 4]; if (i > 0 and i + 4 < total_words) { - const x = sbox_lookup(&sbox_key_schedule, @truncate(u8, rk >> 24), @truncate(u8, rk >> 16), @truncate(u8, rk >> 8), @truncate(u8, rk)); + const x = sbox_lookup(&sbox_key_schedule, @as(u8, @truncate(rk >> 24)), @as(u8, @truncate(rk >> 16)), @as(u8, @truncate(rk >> 8)), @as(u8, @truncate(rk))); const y = table_lookup(&table_decrypt, x[3], x[2], x[1], x[0]); rk = y[0] ^ y[1] ^ y[2] ^ y[3]; } @@ -664,7 +664,7 @@ fn mul(a: u8, b: u8) u8 { } } - return @truncate(u8, s); + return @as(u8, @truncate(s)); } const cache_line_bytes = 64; diff --git a/lib/std/crypto/aes_ocb.zig b/lib/std/crypto/aes_ocb.zig index 6d5ce3779a..a05e8a7248 100644 --- a/lib/std/crypto/aes_ocb.zig +++ b/lib/std/crypto/aes_ocb.zig @@ -86,18 +86,18 @@ fn AesOcb(comptime Aes: anytype) type { fn getOffset(aes_enc_ctx: EncryptCtx, npub: [nonce_length]u8) Block { var nx = [_]u8{0} ** 16; - nx[0] = @intCast(u8, @truncate(u7, tag_length * 8) << 1); + nx[0] = @as(u8, @intCast(@as(u7, @truncate(tag_length * 8)) << 1)); nx[16 - nonce_length - 1] = 1; nx[nx.len - nonce_length ..].* = npub; - const bottom = @truncate(u6, nx[15]); + const bottom = @as(u6, @truncate(nx[15])); nx[15] &= 0xc0; var ktop_: Block = undefined; aes_enc_ctx.encrypt(&ktop_, &nx); const ktop = mem.readIntBig(u128, &ktop_); - var stretch = (@as(u192, ktop) << 64) | @as(u192, @truncate(u64, ktop >> 64) ^ @truncate(u64, ktop >> 56)); + var stretch = (@as(u192, ktop) << 64) | @as(u192, @as(u64, @truncate(ktop >> 64)) ^ @as(u64, @truncate(ktop >> 56))); var offset: Block = undefined; - mem.writeIntBig(u128, &offset, @truncate(u128, stretch >> (64 - @as(u7, bottom)))); + mem.writeIntBig(u128, &offset, @as(u128, @truncate(stretch >> (64 - @as(u7, bottom))))); return offset; } diff --git a/lib/std/crypto/argon2.zig b/lib/std/crypto/argon2.zig index 40df3290c0..898bc24e6f 100644 --- a/lib/std/crypto/argon2.zig +++ b/lib/std/crypto/argon2.zig @@ -95,7 +95,7 @@ pub const Params = struct { pub fn fromLimits(ops_limit: u32, mem_limit: usize) Self { const m = mem_limit / 1024; std.debug.assert(m <= max_int); - return .{ .t = ops_limit, .m = @intCast(u32, m), .p = 1 }; + return .{ .t = ops_limit, .m = @as(u32, @intCast(m)), .p = 1 }; } }; @@ -111,26 +111,26 @@ fn initHash( var tmp: [4]u8 = undefined; var b2 = Blake2b512.init(.{}); mem.writeIntLittle(u32, parameters[0..4], params.p); - mem.writeIntLittle(u32, parameters[4..8], @intCast(u32, dk_len)); + mem.writeIntLittle(u32, parameters[4..8], @as(u32, @intCast(dk_len))); mem.writeIntLittle(u32, parameters[8..12], params.m); mem.writeIntLittle(u32, parameters[12..16], params.t); mem.writeIntLittle(u32, parameters[16..20], version); mem.writeIntLittle(u32, parameters[20..24], @intFromEnum(mode)); b2.update(¶meters); - mem.writeIntLittle(u32, &tmp, @intCast(u32, password.len)); + mem.writeIntLittle(u32, &tmp, @as(u32, @intCast(password.len))); b2.update(&tmp); b2.update(password); - mem.writeIntLittle(u32, &tmp, @intCast(u32, salt.len)); + mem.writeIntLittle(u32, &tmp, @as(u32, @intCast(salt.len))); b2.update(&tmp); b2.update(salt); const secret = params.secret orelse ""; std.debug.assert(secret.len <= max_int); - mem.writeIntLittle(u32, &tmp, @intCast(u32, secret.len)); + mem.writeIntLittle(u32, &tmp, @as(u32, @intCast(secret.len))); b2.update(&tmp); b2.update(secret); const ad = params.ad orelse ""; std.debug.assert(ad.len <= max_int); - mem.writeIntLittle(u32, &tmp, @intCast(u32, ad.len)); + mem.writeIntLittle(u32, &tmp, @as(u32, @intCast(ad.len))); b2.update(&tmp); b2.update(ad); b2.final(h0[0..Blake2b512.digest_length]); @@ -140,7 +140,7 @@ fn initHash( fn blake2bLong(out: []u8, in: []const u8) void { const H = Blake2b512; var outlen_bytes: [4]u8 = undefined; - mem.writeIntLittle(u32, &outlen_bytes, @intCast(u32, out.len)); + mem.writeIntLittle(u32, &outlen_bytes, @as(u32, @intCast(out.len))); var out_buf: [H.digest_length]u8 = undefined; @@ -391,7 +391,7 @@ fn Rp(a: usize, b: usize, c: usize, d: usize) QuarterRound { } fn fBlaMka(x: u64, y: u64) u64 { - const xy = @as(u64, @truncate(u32, x)) * @as(u64, @truncate(u32, y)); + const xy = @as(u64, @as(u32, @truncate(x))) * @as(u64, @as(u32, @truncate(y))); return x +% y +% 2 *% xy; } @@ -448,7 +448,7 @@ fn indexAlpha( lane: u24, index: u32, ) u32 { - var ref_lane = @intCast(u32, rand >> 32) % threads; + var ref_lane = @as(u32, @intCast(rand >> 32)) % threads; if (n == 0 and slice == 0) { ref_lane = lane; } @@ -467,10 +467,10 @@ fn indexAlpha( if (index == 0 or lane == ref_lane) { m -= 1; } - var p = @as(u64, @truncate(u32, rand)); + var p = @as(u64, @as(u32, @truncate(rand))); p = (p * p) >> 32; p = (p * m) >> 32; - return ref_lane * lanes + @intCast(u32, ((s + m - (p + 1)) % lanes)); + return ref_lane * lanes + @as(u32, @intCast(((s + m - (p + 1)) % lanes))); } /// Derives a key from the password, salt, and argon2 parameters. diff --git a/lib/std/crypto/ascon.zig b/lib/std/crypto/ascon.zig index ae4bb57d29..8aa0b109f2 100644 --- a/lib/std/crypto/ascon.zig +++ b/lib/std/crypto/ascon.zig @@ -95,8 +95,8 @@ pub fn State(comptime endian: builtin.Endian) type { /// XOR a byte into the state at a given offset. pub fn addByte(self: *Self, byte: u8, offset: usize) void { const z = switch (endian) { - .Big => 64 - 8 - 8 * @truncate(u6, offset % 8), - .Little => 8 * @truncate(u6, offset % 8), + .Big => 64 - 8 - 8 * @as(u6, @truncate(offset % 8)), + .Little => 8 * @as(u6, @truncate(offset % 8)), }; self.st[offset / 8] ^= @as(u64, byte) << z; } diff --git a/lib/std/crypto/bcrypt.zig b/lib/std/crypto/bcrypt.zig index 7bd140d584..87d2eef79a 100644 --- a/lib/std/crypto/bcrypt.zig +++ b/lib/std/crypto/bcrypt.zig @@ -376,10 +376,10 @@ pub const State = struct { const Halves = struct { l: u32, r: u32 }; fn halfRound(state: *const State, i: u32, j: u32, n: usize) u32 { - var r = state.sboxes[0][@truncate(u8, j >> 24)]; - r +%= state.sboxes[1][@truncate(u8, j >> 16)]; - r ^= state.sboxes[2][@truncate(u8, j >> 8)]; - r +%= state.sboxes[3][@truncate(u8, j)]; + var r = state.sboxes[0][@as(u8, @truncate(j >> 24))]; + r +%= state.sboxes[1][@as(u8, @truncate(j >> 16))]; + r ^= state.sboxes[2][@as(u8, @truncate(j >> 8))]; + r +%= state.sboxes[3][@as(u8, @truncate(j))]; return i ^ r ^ state.subkeys[n]; } diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig index f47c334ee9..17f11382ca 100644 --- a/lib/std/crypto/benchmark.zig +++ b/lib/std/crypto/benchmark.zig @@ -54,8 +54,8 @@ pub fn benchmarkHash(comptime Hash: anytype, comptime bytes: comptime_int) !u64 const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, bytes / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(bytes / elapsed_s)); return throughput; } @@ -95,8 +95,8 @@ pub fn benchmarkMac(comptime Mac: anytype, comptime bytes: comptime_int) !u64 { } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, bytes / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(bytes / elapsed_s)); return throughput; } @@ -125,8 +125,8 @@ pub fn benchmarkKeyExchange(comptime DhKeyExchange: anytype, comptime exchange_c } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, exchange_count / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(exchange_count / elapsed_s)); return throughput; } @@ -148,8 +148,8 @@ pub fn benchmarkSignature(comptime Signature: anytype, comptime signatures_count } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, signatures_count / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(signatures_count / elapsed_s)); return throughput; } @@ -172,8 +172,8 @@ pub fn benchmarkSignatureVerification(comptime Signature: anytype, comptime sign } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, signatures_count / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(signatures_count / elapsed_s)); return throughput; } @@ -201,8 +201,8 @@ pub fn benchmarkBatchSignatureVerification(comptime Signature: anytype, comptime } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = batch.len * @intFromFloat(u64, signatures_count / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = batch.len * @as(u64, @intFromFloat(signatures_count / elapsed_s)); return throughput; } @@ -227,8 +227,8 @@ pub fn benchmarkKem(comptime Kem: anytype, comptime kems_count: comptime_int) !u } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, kems_count / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(kems_count / elapsed_s)); return throughput; } @@ -249,8 +249,8 @@ pub fn benchmarkKemDecaps(comptime Kem: anytype, comptime kems_count: comptime_i } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, kems_count / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(kems_count / elapsed_s)); return throughput; } @@ -267,8 +267,8 @@ pub fn benchmarkKemKeyGen(comptime Kem: anytype, comptime kems_count: comptime_i } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, kems_count / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(kems_count / elapsed_s)); return throughput; } @@ -309,8 +309,8 @@ pub fn benchmarkAead(comptime Aead: anytype, comptime bytes: comptime_int) !u64 mem.doNotOptimizeAway(&in); const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, 2 * bytes / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(2 * bytes / elapsed_s)); return throughput; } @@ -338,8 +338,8 @@ pub fn benchmarkAes(comptime Aes: anytype, comptime count: comptime_int) !u64 { mem.doNotOptimizeAway(&in); const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, count / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(count / elapsed_s)); return throughput; } @@ -367,8 +367,8 @@ pub fn benchmarkAes8(comptime Aes: anytype, comptime count: comptime_int) !u64 { mem.doNotOptimizeAway(&in); const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, 8 * count / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(8 * count / elapsed_s)); return throughput; } @@ -406,7 +406,7 @@ fn benchmarkPwhash( const password = "testpass" ** 2; const opts = .{ .allocator = allocator, - .params = @ptrCast(*const ty.Params, @alignCast(std.meta.alignment(ty.Params), params)).*, + .params = @as(*const ty.Params, @ptrCast(@alignCast(params))).*, .encoding = .phc, }; var buf: [256]u8 = undefined; @@ -422,7 +422,7 @@ fn benchmarkPwhash( } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; const throughput = elapsed_s / count; return throughput; diff --git a/lib/std/crypto/blake2.zig b/lib/std/crypto/blake2.zig index 316ea5e6b7..ba07226d08 100644 --- a/lib/std/crypto/blake2.zig +++ b/lib/std/crypto/blake2.zig @@ -80,7 +80,7 @@ pub fn Blake2s(comptime out_bits: usize) type { const key_len = if (options.key) |key| key.len else 0; // default parameters - d.h[0] ^= 0x01010000 ^ @truncate(u32, key_len << 8) ^ @intCast(u32, options.expected_out_bits >> 3); + d.h[0] ^= 0x01010000 ^ @as(u32, @truncate(key_len << 8)) ^ @as(u32, @intCast(options.expected_out_bits >> 3)); d.t = 0; d.buf_len = 0; @@ -127,7 +127,7 @@ pub fn Blake2s(comptime out_bits: usize) type { // Copy any remainder for next pass. const b_slice = b[off..]; @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice); - d.buf_len += @intCast(u8, b_slice.len); + d.buf_len += @as(u8, @intCast(b_slice.len)); } pub fn final(d: *Self, out: *[digest_length]u8) void { @@ -135,7 +135,7 @@ pub fn Blake2s(comptime out_bits: usize) type { d.t += d.buf_len; d.round(d.buf[0..], true); for (&d.h) |*x| x.* = mem.nativeToLittle(u32, x.*); - out.* = @ptrCast(*[digest_length]u8, &d.h).*; + out.* = @as(*[digest_length]u8, @ptrCast(&d.h)).*; } fn round(d: *Self, b: *const [64]u8, last: bool) void { @@ -152,8 +152,8 @@ pub fn Blake2s(comptime out_bits: usize) type { v[k + 8] = iv[k]; } - v[12] ^= @truncate(u32, d.t); - v[13] ^= @intCast(u32, d.t >> 32); + v[12] ^= @as(u32, @truncate(d.t)); + v[13] ^= @as(u32, @intCast(d.t >> 32)); if (last) v[14] = ~v[14]; const rounds = comptime [_]RoundParam{ @@ -563,7 +563,7 @@ pub fn Blake2b(comptime out_bits: usize) type { // Copy any remainder for next pass. const b_slice = b[off..]; @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice); - d.buf_len += @intCast(u8, b_slice.len); + d.buf_len += @as(u8, @intCast(b_slice.len)); } pub fn final(d: *Self, out: *[digest_length]u8) void { @@ -571,7 +571,7 @@ pub fn Blake2b(comptime out_bits: usize) type { d.t += d.buf_len; d.round(d.buf[0..], true); for (&d.h) |*x| x.* = mem.nativeToLittle(u64, x.*); - out.* = @ptrCast(*[digest_length]u8, &d.h).*; + out.* = @as(*[digest_length]u8, @ptrCast(&d.h)).*; } fn round(d: *Self, b: *const [128]u8, last: bool) void { @@ -588,8 +588,8 @@ pub fn Blake2b(comptime out_bits: usize) type { v[k + 8] = iv[k]; } - v[12] ^= @truncate(u64, d.t); - v[13] ^= @intCast(u64, d.t >> 64); + v[12] ^= @as(u64, @truncate(d.t)); + v[13] ^= @as(u64, @intCast(d.t >> 64)); if (last) v[14] = ~v[14]; const rounds = comptime [_]RoundParam{ diff --git a/lib/std/crypto/blake3.zig b/lib/std/crypto/blake3.zig index 7ad1511e79..fc1859b99d 100644 --- a/lib/std/crypto/blake3.zig +++ b/lib/std/crypto/blake3.zig @@ -89,7 +89,7 @@ const CompressVectorized = struct { counter: u64, flags: u8, ) [16]u32 { - const md = Lane{ @truncate(u32, counter), @truncate(u32, counter >> 32), block_len, @as(u32, flags) }; + const md = Lane{ @as(u32, @truncate(counter)), @as(u32, @truncate(counter >> 32)), block_len, @as(u32, flags) }; var rows = Rows{ chaining_value[0..4].*, chaining_value[4..8].*, IV[0..4].*, md }; var m = Rows{ block_words[0..4].*, block_words[4..8].*, block_words[8..12].*, block_words[12..16].* }; @@ -134,7 +134,7 @@ const CompressVectorized = struct { rows[2] ^= @Vector(4, u32){ chaining_value[0], chaining_value[1], chaining_value[2], chaining_value[3] }; rows[3] ^= @Vector(4, u32){ chaining_value[4], chaining_value[5], chaining_value[6], chaining_value[7] }; - return @bitCast([16]u32, rows); + return @as([16]u32, @bitCast(rows)); } }; @@ -184,8 +184,8 @@ const CompressGeneric = struct { IV[1], IV[2], IV[3], - @truncate(u32, counter), - @truncate(u32, counter >> 32), + @as(u32, @truncate(counter)), + @as(u32, @truncate(counter >> 32)), block_len, flags, }; @@ -206,7 +206,7 @@ else CompressGeneric.compress; fn first8Words(words: [16]u32) [8]u32 { - return @ptrCast(*const [8]u32, &words).*; + return @as(*const [8]u32, @ptrCast(&words)).*; } fn wordsFromLittleEndianBytes(comptime count: usize, bytes: [count * 4]u8) [count]u32 { @@ -285,7 +285,7 @@ const ChunkState = struct { const want = BLOCK_LEN - self.block_len; const take = @min(want, input.len); @memcpy(self.block[self.block_len..][0..take], input[0..take]); - self.block_len += @truncate(u8, take); + self.block_len += @as(u8, @truncate(take)); return input[take..]; } @@ -658,7 +658,7 @@ fn testBlake3(hasher: *Blake3, input_len: usize, expected_hex: [262]u8) !void { // Setup input pattern var input_pattern: [251]u8 = undefined; - for (&input_pattern, 0..) |*e, i| e.* = @truncate(u8, i); + for (&input_pattern, 0..) |*e, i| e.* = @as(u8, @truncate(i)); // Write repeating input pattern to hasher var input_counter = input_len; diff --git a/lib/std/crypto/chacha20.zig b/lib/std/crypto/chacha20.zig index 776387cbd9..6688fb97fa 100644 --- a/lib/std/crypto/chacha20.zig +++ b/lib/std/crypto/chacha20.zig @@ -587,8 +587,8 @@ fn ChaChaWith64BitNonce(comptime rounds_nb: usize) type { const k = keyToWords(key); var c: [4]u32 = undefined; - c[0] = @truncate(u32, counter); - c[1] = @truncate(u32, counter >> 32); + c[0] = @as(u32, @truncate(counter)); + c[1] = @as(u32, @truncate(counter >> 32)); c[2] = mem.readIntLittle(u32, nonce[0..4]); c[3] = mem.readIntLittle(u32, nonce[4..8]); ChaChaImpl(rounds_nb).chacha20Xor(out, in, k, c, true); @@ -600,8 +600,8 @@ fn ChaChaWith64BitNonce(comptime rounds_nb: usize) type { const k = keyToWords(key); var c: [4]u32 = undefined; - c[0] = @truncate(u32, counter); - c[1] = @truncate(u32, counter >> 32); + c[0] = @as(u32, @truncate(counter)); + c[1] = @as(u32, @truncate(counter >> 32)); c[2] = mem.readIntLittle(u32, nonce[0..4]); c[3] = mem.readIntLittle(u32, nonce[4..8]); ChaChaImpl(rounds_nb).chacha20Stream(out, k, c, true); diff --git a/lib/std/crypto/ecdsa.zig b/lib/std/crypto/ecdsa.zig index e552af2e26..1a5335b07e 100644 --- a/lib/std/crypto/ecdsa.zig +++ b/lib/std/crypto/ecdsa.zig @@ -122,9 +122,9 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type { pub fn toDer(self: Signature, buf: *[der_encoded_max_length]u8) []u8 { var fb = io.fixedBufferStream(buf); const w = fb.writer(); - const r_len = @intCast(u8, self.r.len + (self.r[0] >> 7)); - const s_len = @intCast(u8, self.s.len + (self.s[0] >> 7)); - const seq_len = @intCast(u8, 2 + r_len + 2 + s_len); + const r_len = @as(u8, @intCast(self.r.len + (self.r[0] >> 7))); + const s_len = @as(u8, @intCast(self.s.len + (self.s[0] >> 7))); + const seq_len = @as(u8, @intCast(2 + r_len + 2 + s_len)); w.writeAll(&[_]u8{ 0x30, seq_len }) catch unreachable; w.writeAll(&[_]u8{ 0x02, r_len }) catch unreachable; if (self.r[0] >> 7 != 0) { diff --git a/lib/std/crypto/ff.zig b/lib/std/crypto/ff.zig index 7b298c71c2..0a99058b21 100644 --- a/lib/std/crypto/ff.zig +++ b/lib/std/crypto/ff.zig @@ -100,7 +100,7 @@ pub fn Uint(comptime max_bits: comptime_int) type { var x = x_; var out = Self.zero; for (0..out.limbs.capacity()) |i| { - const t = if (@bitSizeOf(T) > t_bits) @truncate(TLimb, x) else x; + const t = if (@bitSizeOf(T) > t_bits) @as(TLimb, @truncate(x)) else x; out.limbs.set(i, t); x = math.shr(T, x, t_bits); } @@ -143,9 +143,9 @@ pub fn Uint(comptime max_bits: comptime_int) type { var remaining_bits = t_bits; var limb = self.limbs.get(i); while (remaining_bits >= 8) { - bytes[out_i] |= math.shl(u8, @truncate(u8, limb), shift); + bytes[out_i] |= math.shl(u8, @as(u8, @truncate(limb)), shift); const consumed = 8 - shift; - limb >>= @truncate(u4, consumed); + limb >>= @as(u4, @truncate(consumed)); remaining_bits -= consumed; shift = 0; switch (endian) { @@ -169,7 +169,7 @@ pub fn Uint(comptime max_bits: comptime_int) type { }, } } - bytes[out_i] |= @truncate(u8, limb); + bytes[out_i] |= @as(u8, @truncate(limb)); shift = remaining_bits; } } @@ -190,7 +190,7 @@ pub fn Uint(comptime max_bits: comptime_int) type { shift += 8; if (shift >= t_bits) { shift -= t_bits; - out.limbs.set(out_i, @truncate(TLimb, out.limbs.get(out_i))); + out.limbs.set(out_i, @as(TLimb, @truncate(out.limbs.get(out_i)))); const overflow = math.shr(Limb, bi, 8 - shift); out_i += 1; if (out_i >= out.limbs.len) { @@ -242,7 +242,7 @@ pub fn Uint(comptime max_bits: comptime_int) type { /// Returns `true` if the integer is odd. pub fn isOdd(x: Self) bool { - return @bitCast(bool, @truncate(u1, x.limbs.get(0))); + return @as(bool, @bitCast(@as(u1, @truncate(x.limbs.get(0))))); } /// Adds `y` to `x`, and returns `true` if the operation overflowed. @@ -273,8 +273,8 @@ pub fn Uint(comptime max_bits: comptime_int) type { var carry: u1 = 0; for (0..x.limbs_count()) |i| { const res = x_limbs[i] + y_limbs[i] + carry; - x_limbs[i] = ct.select(on, @truncate(TLimb, res), x_limbs[i]); - carry = @truncate(u1, res >> t_bits); + x_limbs[i] = ct.select(on, @as(TLimb, @truncate(res)), x_limbs[i]); + carry = @as(u1, @truncate(res >> t_bits)); } return carry; } @@ -288,8 +288,8 @@ pub fn Uint(comptime max_bits: comptime_int) type { var borrow: u1 = 0; for (0..x.limbs_count()) |i| { const res = x_limbs[i] -% y_limbs[i] -% borrow; - x_limbs[i] = ct.select(on, @truncate(TLimb, res), x_limbs[i]); - borrow = @truncate(u1, res >> t_bits); + x_limbs[i] = ct.select(on, @as(TLimb, @truncate(res)), x_limbs[i]); + borrow = @as(u1, @truncate(res >> t_bits)); } return borrow; } @@ -432,7 +432,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type { inline for (0..comptime math.log2_int(usize, t_bits)) |_| { y = y *% (2 -% lo *% y); } - const m0inv = (@as(Limb, 1) << t_bits) - (@truncate(TLimb, y)); + const m0inv = (@as(Limb, 1) << t_bits) - (@as(TLimb, @truncate(y))); const zero = Fe{ .v = FeUint.zero }; @@ -508,18 +508,18 @@ pub fn Modulus(comptime max_bits: comptime_int) type { var need_sub = false; var i: usize = t_bits - 1; while (true) : (i -= 1) { - var carry = @truncate(u1, math.shr(Limb, y, i)); + var carry = @as(u1, @truncate(math.shr(Limb, y, i))); var borrow: u1 = 0; for (0..self.limbs_count()) |j| { const l = ct.select(need_sub, d_limbs[j], x_limbs[j]); var res = (l << 1) + carry; - x_limbs[j] = @truncate(TLimb, res); - carry = @truncate(u1, res >> t_bits); + x_limbs[j] = @as(TLimb, @truncate(res)); + carry = @as(u1, @truncate(res >> t_bits)); res = x_limbs[j] -% m_limbs[j] -% borrow; - d_limbs[j] = @truncate(TLimb, res); + d_limbs[j] = @as(TLimb, @truncate(res)); - borrow = @truncate(u1, res >> t_bits); + borrow = @as(u1, @truncate(res >> t_bits)); } need_sub = ct.eql(carry, borrow); if (i == 0) break; @@ -531,7 +531,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type { pub fn add(self: Self, x: Fe, y: Fe) Fe { var out = x; const overflow = out.v.addWithOverflow(y.v); - const underflow = @bitCast(u1, ct.limbsCmpLt(out.v, self.v)); + const underflow = @as(u1, @bitCast(ct.limbsCmpLt(out.v, self.v))); const need_sub = ct.eql(overflow, underflow); _ = out.v.conditionalSubWithOverflow(need_sub, self.v); return out; @@ -540,7 +540,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type { /// Subtracts two field elements (mod m). pub fn sub(self: Self, x: Fe, y: Fe) Fe { var out = x; - const underflow = @bitCast(bool, out.v.subWithOverflow(y.v)); + const underflow = @as(bool, @bitCast(out.v.subWithOverflow(y.v))); _ = out.v.conditionalAddWithOverflow(underflow, self.v); return out; } @@ -601,7 +601,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type { var wide = ct.mulWide(a_limbs[i], b_limbs[0]); var z_lo = @addWithOverflow(d_limbs[0], wide.lo); - const f = @truncate(TLimb, z_lo[0] *% self.m0inv); + const f = @as(TLimb, @truncate(z_lo[0] *% self.m0inv)); var z_hi = wide.hi +% z_lo[1]; wide = ct.mulWide(f, m_limbs[0]); z_lo = @addWithOverflow(z_lo[0], wide.lo); @@ -620,13 +620,13 @@ pub fn Modulus(comptime max_bits: comptime_int) type { z_lo = @addWithOverflow(z_lo[0], carry); z_hi +%= z_lo[1]; if (j > 0) { - d_limbs[j - 1] = @truncate(TLimb, z_lo[0]); + d_limbs[j - 1] = @as(TLimb, @truncate(z_lo[0])); } carry = (z_hi << 1) | (z_lo[0] >> t_bits); } const z = overflow + carry; - d_limbs[self.limbs_count() - 1] = @truncate(TLimb, z); - overflow = @truncate(u1, z >> t_bits); + d_limbs[self.limbs_count() - 1] = @as(TLimb, @truncate(z)); + overflow = @as(u1, @truncate(z >> t_bits)); } return overflow; } @@ -735,7 +735,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type { t0 = pc[k - 1]; } else { for (pc, 0..) |t, i| { - t0.v.cmov(ct.eql(k, @truncate(u8, i + 1)), t.v); + t0.v.cmov(ct.eql(k, @as(u8, @truncate(i + 1))), t.v); } } const t1 = self.montgomeryMul(out, t0); @@ -771,7 +771,7 @@ const ct_protected = struct { fn eql(x: anytype, y: @TypeOf(x)) bool { const c1 = @subWithOverflow(x, y)[1]; const c2 = @subWithOverflow(y, x)[1]; - return @bitCast(bool, 1 - (c1 | c2)); + return @as(bool, @bitCast(1 - (c1 | c2))); } // Compares two big integers in constant time, returning true if x < y. @@ -782,28 +782,28 @@ const ct_protected = struct { var c: u1 = 0; for (0..x.limbs_count()) |i| { - c = @truncate(u1, (x_limbs[i] -% y_limbs[i] -% c) >> t_bits); + c = @as(u1, @truncate((x_limbs[i] -% y_limbs[i] -% c) >> t_bits)); } - return @bitCast(bool, c); + return @as(bool, @bitCast(c)); } // Compares two big integers in constant time, returning true if x >= y. fn limbsCmpGeq(x: anytype, y: @TypeOf(x)) bool { - return @bitCast(bool, 1 - @intFromBool(ct.limbsCmpLt(x, y))); + return @as(bool, @bitCast(1 - @intFromBool(ct.limbsCmpLt(x, y)))); } // Multiplies two limbs and returns the result as a wide limb. fn mulWide(x: Limb, y: Limb) WideLimb { const half_bits = @typeInfo(Limb).Int.bits / 2; const Half = meta.Int(.unsigned, half_bits); - const x0 = @truncate(Half, x); - const x1 = @truncate(Half, x >> half_bits); - const y0 = @truncate(Half, y); - const y1 = @truncate(Half, y >> half_bits); + const x0 = @as(Half, @truncate(x)); + const x1 = @as(Half, @truncate(x >> half_bits)); + const y0 = @as(Half, @truncate(y)); + const y1 = @as(Half, @truncate(y >> half_bits)); const w0 = math.mulWide(Half, x0, y0); const t = math.mulWide(Half, x1, y0) + (w0 >> half_bits); - var w1: Limb = @truncate(Half, t); - const w2 = @truncate(Half, t >> half_bits); + var w1: Limb = @as(Half, @truncate(t)); + const w2 = @as(Half, @truncate(t >> half_bits)); w1 += math.mulWide(Half, x0, y1); const hi = math.mulWide(Half, x1, y1) + w2 + (w1 >> half_bits); const lo = x *% y; @@ -847,8 +847,8 @@ const ct_unprotected = struct { fn mulWide(x: Limb, y: Limb) WideLimb { const wide = math.mulWide(Limb, x, y); return .{ - .hi = @truncate(Limb, wide >> @typeInfo(Limb).Int.bits), - .lo = @truncate(Limb, wide), + .hi = @as(Limb, @truncate(wide >> @typeInfo(Limb).Int.bits)), + .lo = @as(Limb, @truncate(wide)), }; } }; diff --git a/lib/std/crypto/ghash_polyval.zig b/lib/std/crypto/ghash_polyval.zig index 2fbff25f72..11379cc8e3 100644 --- a/lib/std/crypto/ghash_polyval.zig +++ b/lib/std/crypto/ghash_polyval.zig @@ -96,28 +96,28 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { const product = asm ( \\ vpclmulqdq $0x11, %[x], %[y], %[out] : [out] "=x" (-> @Vector(2, u64)), - : [x] "x" (@bitCast(@Vector(2, u64), x)), - [y] "x" (@bitCast(@Vector(2, u64), y)), + : [x] "x" (@as(@Vector(2, u64), @bitCast(x))), + [y] "x" (@as(@Vector(2, u64), @bitCast(y))), ); - return @bitCast(u128, product); + return @as(u128, @bitCast(product)); }, .lo => { const product = asm ( \\ vpclmulqdq $0x00, %[x], %[y], %[out] : [out] "=x" (-> @Vector(2, u64)), - : [x] "x" (@bitCast(@Vector(2, u64), x)), - [y] "x" (@bitCast(@Vector(2, u64), y)), + : [x] "x" (@as(@Vector(2, u64), @bitCast(x))), + [y] "x" (@as(@Vector(2, u64), @bitCast(y))), ); - return @bitCast(u128, product); + return @as(u128, @bitCast(product)); }, .hi_lo => { const product = asm ( \\ vpclmulqdq $0x10, %[x], %[y], %[out] : [out] "=x" (-> @Vector(2, u64)), - : [x] "x" (@bitCast(@Vector(2, u64), x)), - [y] "x" (@bitCast(@Vector(2, u64), y)), + : [x] "x" (@as(@Vector(2, u64), @bitCast(x))), + [y] "x" (@as(@Vector(2, u64), @bitCast(y))), ); - return @bitCast(u128, product); + return @as(u128, @bitCast(product)); }, } } @@ -129,28 +129,28 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { const product = asm ( \\ pmull2 %[out].1q, %[x].2d, %[y].2d : [out] "=w" (-> @Vector(2, u64)), - : [x] "w" (@bitCast(@Vector(2, u64), x)), - [y] "w" (@bitCast(@Vector(2, u64), y)), + : [x] "w" (@as(@Vector(2, u64), @bitCast(x))), + [y] "w" (@as(@Vector(2, u64), @bitCast(y))), ); - return @bitCast(u128, product); + return @as(u128, @bitCast(product)); }, .lo => { const product = asm ( \\ pmull %[out].1q, %[x].1d, %[y].1d : [out] "=w" (-> @Vector(2, u64)), - : [x] "w" (@bitCast(@Vector(2, u64), x)), - [y] "w" (@bitCast(@Vector(2, u64), y)), + : [x] "w" (@as(@Vector(2, u64), @bitCast(x))), + [y] "w" (@as(@Vector(2, u64), @bitCast(y))), ); - return @bitCast(u128, product); + return @as(u128, @bitCast(product)); }, .hi_lo => { const product = asm ( \\ pmull %[out].1q, %[x].1d, %[y].1d : [out] "=w" (-> @Vector(2, u64)), - : [x] "w" (@bitCast(@Vector(2, u64), x >> 64)), - [y] "w" (@bitCast(@Vector(2, u64), y)), + : [x] "w" (@as(@Vector(2, u64), @bitCast(x >> 64))), + [y] "w" (@as(@Vector(2, u64), @bitCast(y))), ); - return @bitCast(u128, product); + return @as(u128, @bitCast(product)); }, } } @@ -167,8 +167,8 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { // Software carryless multiplication of two 64-bit integers using native 128-bit registers. fn clmulSoft128(x_: u128, y_: u128, comptime half: Selector) u128 { - const x = @truncate(u64, if (half == .hi or half == .hi_lo) x_ >> 64 else x_); - const y = @truncate(u64, if (half == .hi) y_ >> 64 else y_); + const x = @as(u64, @truncate(if (half == .hi or half == .hi_lo) x_ >> 64 else x_)); + const y = @as(u64, @truncate(if (half == .hi) y_ >> 64 else y_)); const x0 = x & 0x1111111111111110; const x1 = x & 0x2222222222222220; @@ -216,12 +216,12 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { // Software carryless multiplication of two 128-bit integers using 64-bit registers. fn clmulSoft128_64(x_: u128, y_: u128, comptime half: Selector) u128 { - const a = @truncate(u64, if (half == .hi or half == .hi_lo) x_ >> 64 else x_); - const b = @truncate(u64, if (half == .hi) y_ >> 64 else y_); - const a0 = @truncate(u32, a); - const a1 = @truncate(u32, a >> 32); - const b0 = @truncate(u32, b); - const b1 = @truncate(u32, b >> 32); + const a = @as(u64, @truncate(if (half == .hi or half == .hi_lo) x_ >> 64 else x_)); + const b = @as(u64, @truncate(if (half == .hi) y_ >> 64 else y_)); + const a0 = @as(u32, @truncate(a)); + const a1 = @as(u32, @truncate(a >> 32)); + const b0 = @as(u32, @truncate(b)); + const b1 = @as(u32, @truncate(b >> 32)); const lo = clmulSoft32(a0, b0); const hi = clmulSoft32(a1, b1); const mid = clmulSoft32(a0 ^ a1, b0 ^ b1) ^ lo ^ hi; @@ -256,8 +256,8 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { // Multiply two 128-bit integers in GF(2^128). inline fn clmul128(x: u128, y: u128) I256 { if (mul_algorithm == .karatsuba) { - const x_hi = @truncate(u64, x >> 64); - const y_hi = @truncate(u64, y >> 64); + const x_hi = @as(u64, @truncate(x >> 64)); + const y_hi = @as(u64, @truncate(y >> 64)); const r_lo = clmul(x, y, .lo); const r_hi = clmul(x, y, .hi); const r_mid = clmul(x ^ x_hi, y ^ y_hi, .lo) ^ r_lo ^ r_hi; @@ -407,7 +407,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { st.pad(); mem.writeInt(u128, out[0..16], st.acc, endian); - utils.secureZero(u8, @ptrCast([*]u8, st)[0..@sizeOf(Self)]); + utils.secureZero(u8, @as([*]u8, @ptrCast(st))[0..@sizeOf(Self)]); } /// Compute the GHASH of a message. @@ -442,7 +442,7 @@ test "ghash2" { var key: [16]u8 = undefined; var i: usize = 0; while (i < key.len) : (i += 1) { - key[i] = @intCast(u8, i * 15 + 1); + key[i] = @as(u8, @intCast(i * 15 + 1)); } const tvs = [_]struct { len: usize, hash: [:0]const u8 }{ .{ .len = 5263, .hash = "b9395f37c131cd403a327ccf82ec016a" }, @@ -461,7 +461,7 @@ test "ghash2" { var m: [tv.len]u8 = undefined; i = 0; while (i < m.len) : (i += 1) { - m[i] = @truncate(u8, i % 254 + 1); + m[i] = @as(u8, @truncate(i % 254 + 1)); } var st = Ghash.init(&key); st.update(&m); diff --git a/lib/std/crypto/isap.zig b/lib/std/crypto/isap.zig index 5b0da739de..1d17e32be8 100644 --- a/lib/std/crypto/isap.zig +++ b/lib/std/crypto/isap.zig @@ -67,7 +67,7 @@ pub const IsapA128A = struct { var i: usize = 0; while (i < y.len * 8 - 1) : (i += 1) { const cur_byte_pos = i / 8; - const cur_bit_pos = @truncate(u3, 7 - (i % 8)); + const cur_bit_pos = @as(u3, @truncate(7 - (i % 8))); const cur_bit = ((y[cur_byte_pos] >> cur_bit_pos) & 1) << 7; isap.st.addByte(cur_bit, 0); isap.st.permuteR(1); diff --git a/lib/std/crypto/keccak_p.zig b/lib/std/crypto/keccak_p.zig index ddc9b1b847..d8130bc87a 100644 --- a/lib/std/crypto/keccak_p.zig +++ b/lib/std/crypto/keccak_p.zig @@ -33,7 +33,7 @@ pub fn KeccakF(comptime f: u11) type { 0x8000000080008081, 0x8000000000008080, 0x0000000080000001, 0x8000000080008008, }; var rc: [max_rounds]T = undefined; - for (&rc, RC64[0..max_rounds]) |*t, c| t.* = @truncate(T, c); + for (&rc, RC64[0..max_rounds]) |*t, c| t.* = @as(T, @truncate(c)); break :rc rc; }; @@ -75,7 +75,7 @@ pub fn KeccakF(comptime f: u11) type { /// XOR a byte into the state at a given offset. pub fn addByte(self: *Self, byte: u8, offset: usize) void { - const z = @sizeOf(T) * @truncate(math.Log2Int(T), offset % @sizeOf(T)); + const z = @sizeOf(T) * @as(math.Log2Int(T), @truncate(offset % @sizeOf(T))); self.st[offset / @sizeOf(T)] ^= @as(T, byte) << z; } diff --git a/lib/std/crypto/kyber_d00.zig b/lib/std/crypto/kyber_d00.zig index 3cb0f02c0d..390ff8e7f2 100644 --- a/lib/std/crypto/kyber_d00.zig +++ b/lib/std/crypto/kyber_d00.zig @@ -579,7 +579,7 @@ test "invNTTReductions bounds" { if (j < 0) { break; } - xs[@intCast(usize, j)] = 1; + xs[@as(usize, @intCast(j))] = 1; } } } @@ -615,7 +615,7 @@ fn invertMod(a: anytype, p: @TypeOf(a)) @TypeOf(a) { // Reduce mod q for testing. fn modQ32(x: i32) i16 { - var y = @intCast(i16, @rem(x, @as(i32, Q))); + var y = @as(i16, @intCast(@rem(x, @as(i32, Q)))); if (y < 0) { y += Q; } @@ -638,7 +638,7 @@ fn montReduce(x: i32) i16 { // Note that x q' might be as big as 2³² and could overflow the int32 // multiplication in the last line. However for any int32s a and b, // we have int32(int64(a)*int64(b)) = int32(a*b) and so the result is ok. - const m = @truncate(i16, @truncate(i32, x *% qInv)); + const m = @as(i16, @truncate(@as(i32, @truncate(x *% qInv)))); // Note that x - m q is divisible by R; indeed modulo R we have // @@ -652,7 +652,7 @@ fn montReduce(x: i32) i16 { // and as both 2¹⁵ q ≤ m q, x < 2¹⁵ q, we have // 2¹⁶ q ≤ x - m q < 2¹⁶ and so q ≤ (x - m q) / R < q as desired. const yR = x - @as(i32, m) * @as(i32, Q); - return @bitCast(i16, @truncate(u16, @bitCast(u32, yR) >> 16)); + return @as(i16, @bitCast(@as(u16, @truncate(@as(u32, @bitCast(yR)) >> 16)))); } test "Test montReduce" { @@ -676,7 +676,7 @@ fn feToMont(x: i16) i16 { test "Test feToMont" { var x: i32 = -(1 << 15); while (x < 1 << 15) : (x += 1) { - const y = feToMont(@intCast(i16, x)); + const y = feToMont(@as(i16, @intCast(x))); try testing.expectEqual(modQ32(@as(i32, y)), modQ32(x * r_mod_q)); } } @@ -703,14 +703,14 @@ fn feBarrettReduce(x: i16) i16 { // To actually compute this, note that // // ⌊x 20156/2²⁶⌋ = (20159 x) >> 26. - return x -% @intCast(i16, (@as(i32, x) * 20159) >> 26) *% Q; + return x -% @as(i16, @intCast((@as(i32, x) * 20159) >> 26)) *% Q; } test "Test Barrett reduction" { var x: i32 = -(1 << 15); while (x < 1 << 15) : (x += 1) { - var y1 = feBarrettReduce(@intCast(i16, x)); - const y2 = @mod(@intCast(i16, x), Q); + var y1 = feBarrettReduce(@as(i16, @intCast(x))); + const y2 = @mod(@as(i16, @intCast(x)), Q); if (x < 0 and @rem(-x, Q) == 0) { y1 -= Q; } @@ -729,9 +729,9 @@ fn csubq(x: i16) i16 { test "Test csubq" { var x: i32 = -29439; while (x < 1 << 15) : (x += 1) { - const y1 = csubq(@intCast(i16, x)); - var y2 = @intCast(i16, x); - if (@intCast(i16, x) >= Q) { + const y1 = csubq(@as(i16, @intCast(x))); + var y2 = @as(i16, @intCast(x)); + if (@as(i16, @intCast(x)) >= Q) { y2 -= Q; } try testing.expectEqual(y1, y2); @@ -762,7 +762,7 @@ fn computeZetas() [128]i16 { @setEvalBranchQuota(10000); var ret: [128]i16 = undefined; for (&ret, 0..) |*r, i| { - const t = @intCast(i16, mpow(@as(i32, zeta), @bitReverse(@intCast(u7, i)), Q)); + const t = @as(i16, @intCast(mpow(@as(i32, zeta), @bitReverse(@as(u7, @intCast(i))), Q))); r.* = csubq(feBarrettReduce(feToMont(t))); } return ret; @@ -945,7 +945,7 @@ const Poly = struct { if (i < 0) { break; } - p.cs[@intCast(usize, i)] = feBarrettReduce(p.cs[@intCast(usize, i)]); + p.cs[@as(usize, @intCast(i))] = feBarrettReduce(p.cs[@as(usize, @intCast(i))]); } } @@ -1020,8 +1020,8 @@ const Poly = struct { // = ⌊(2ᵈ/q)x+½⌋ mod⁺ 2ᵈ // = ⌊((x << d) + q/2) / q⌋ mod⁺ 2ᵈ // = DIV((x << d) + q/2, q) & ((1< 0) { const out_shift = comptime 8 - todo; - out[out_off + j] |= @truncate(u8, (in[i] >> in_shift) << out_shift); + out[out_off + j] |= @as(u8, @truncate((in[i] >> in_shift) << out_shift)); const done = comptime @min(@min(d, todo), d - in_shift); todo -= done; @@ -1094,7 +1094,7 @@ const Poly = struct { // = ⌊(qx + 2ᵈ⁻¹)/2ᵈ⌋ // = (qx + (1<<(d-1))) >> d const qx = @as(u32, out) * @as(u32, Q); - ret.cs[out_off + i] = @intCast(i16, (qx + (1 << (d - 1))) >> d); + ret.cs[out_off + i] = @as(i16, @intCast((qx + (1 << (d - 1))) >> d)); } in_off += in_batch_size; @@ -1209,8 +1209,8 @@ const Poly = struct { // Extract each a and b separately and set coefficient in polynomial. inline for (0..batch_count) |j| { const mask2 = comptime (1 << eta) - 1; - const a = @intCast(i16, (d >> (comptime (2 * j * eta))) & mask2); - const b = @intCast(i16, (d >> (comptime ((2 * j + 1) * eta))) & mask2); + const a = @as(i16, @intCast((d >> (comptime (2 * j * eta))) & mask2)); + const b = @as(i16, @intCast((d >> (comptime ((2 * j + 1) * eta))) & mask2)); ret.cs[batch_count * i + j] = a - b; } } @@ -1246,7 +1246,7 @@ const Poly = struct { inline for (ts) |t| { if (t < Q) { - ret.cs[i] = @intCast(i16, t); + ret.cs[i] = @as(i16, @intCast(t)); i += 1; if (i == N) { @@ -1266,11 +1266,11 @@ const Poly = struct { fn toBytes(p: Poly) [bytes_length]u8 { var ret: [bytes_length]u8 = undefined; for (0..comptime N / 2) |i| { - const t0 = @intCast(u16, p.cs[2 * i]); - const t1 = @intCast(u16, p.cs[2 * i + 1]); - ret[3 * i] = @truncate(u8, t0); - ret[3 * i + 1] = @truncate(u8, (t0 >> 8) | (t1 << 4)); - ret[3 * i + 2] = @truncate(u8, t1 >> 4); + const t0 = @as(u16, @intCast(p.cs[2 * i])); + const t1 = @as(u16, @intCast(p.cs[2 * i + 1])); + ret[3 * i] = @as(u8, @truncate(t0)); + ret[3 * i + 1] = @as(u8, @truncate((t0 >> 8) | (t1 << 4))); + ret[3 * i + 2] = @as(u8, @truncate(t1 >> 4)); } return ret; } @@ -1356,7 +1356,7 @@ fn Vec(comptime K: u8) type { fn noise(comptime eta: u8, nonce: u8, seed: *const [32]u8) Self { var ret: Self = undefined; for (0..K) |i| { - ret.ps[i] = Poly.noise(eta, nonce + @intCast(u8, i), seed); + ret.ps[i] = Poly.noise(eta, nonce + @as(u8, @intCast(i)), seed); } return ret; } @@ -1534,7 +1534,7 @@ test "Compression" { test "noise" { var seed: [32]u8 = undefined; for (&seed, 0..) |*s, i| { - s.* = @intCast(u8, i); + s.* = @as(u8, @intCast(i)); } try testing.expectEqual(Poly.noise(3, 37, &seed).cs, .{ 0, 0, 1, -1, 0, 2, 0, -1, -1, 3, 0, 1, -2, -2, 0, 1, -2, @@ -1580,7 +1580,7 @@ test "noise" { test "uniform sampling" { var seed: [32]u8 = undefined; for (&seed, 0..) |*s, i| { - s.* = @intCast(u8, i); + s.* = @as(u8, @intCast(i)); } try testing.expectEqual(Poly.uniform(seed, 1, 0).cs, .{ 797, 993, 161, 6, 2608, 2385, 2096, 2661, 1676, 247, 2440, @@ -1623,17 +1623,17 @@ test "Test inner PKE" { var seed: [32]u8 = undefined; var pt: [32]u8 = undefined; for (&seed, &pt, 0..) |*s, *p, i| { - s.* = @intCast(u8, i); - p.* = @intCast(u8, i + 32); + s.* = @as(u8, @intCast(i)); + p.* = @as(u8, @intCast(i + 32)); } inline for (modes) |mode| { for (0..100) |i| { var pk: mode.InnerPk = undefined; var sk: mode.InnerSk = undefined; - seed[0] = @intCast(u8, i); + seed[0] = @as(u8, @intCast(i)); mode.innerKeyFromSeed(seed, &pk, &sk); for (0..10) |j| { - seed[1] = @intCast(u8, j); + seed[1] = @as(u8, @intCast(j)); try testing.expectEqual(sk.decrypt(&pk.encrypt(&pt, &seed)), pt); } } @@ -1643,18 +1643,18 @@ test "Test inner PKE" { test "Test happy flow" { var seed: [64]u8 = undefined; for (&seed, 0..) |*s, i| { - s.* = @intCast(u8, i); + s.* = @as(u8, @intCast(i)); } inline for (modes) |mode| { for (0..100) |i| { - seed[0] = @intCast(u8, i); + seed[0] = @as(u8, @intCast(i)); const kp = try mode.KeyPair.create(seed); const sk = try mode.SecretKey.fromBytes(&kp.secret_key.toBytes()); try testing.expectEqual(sk, kp.secret_key); const pk = try mode.PublicKey.fromBytes(&kp.public_key.toBytes()); try testing.expectEqual(pk, kp.public_key); for (0..10) |j| { - seed[1] = @intCast(u8, j); + seed[1] = @as(u8, @intCast(j)); const e = pk.encaps(seed[0..32].*); try testing.expectEqual(e.shared_secret, try sk.decaps(&e.ciphertext)); } @@ -1675,7 +1675,7 @@ test "NIST KAT test" { const mode = modeHash[0]; var seed: [48]u8 = undefined; for (&seed, 0..) |*s, i| { - s.* = @intCast(u8, i); + s.* = @as(u8, @intCast(i)); } var f = sha2.Sha256.init(.{}); const fw = f.writer(); diff --git a/lib/std/crypto/md5.zig b/lib/std/crypto/md5.zig index bd4a78c032..b480cbcd8e 100644 --- a/lib/std/crypto/md5.zig +++ b/lib/std/crypto/md5.zig @@ -80,7 +80,7 @@ pub const Md5 = struct { // Copy any remainder for next pass. const b_slice = b[off..]; @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice); - d.buf_len += @intCast(u8, b_slice.len); + d.buf_len += @as(u8, @intCast(b_slice.len)); // Md5 uses the bottom 64-bits for length padding d.total_len +%= b.len; @@ -103,9 +103,9 @@ pub const Md5 = struct { // Append message length. var i: usize = 1; var len = d.total_len >> 5; - d.buf[56] = @intCast(u8, d.total_len & 0x1f) << 3; + d.buf[56] = @as(u8, @intCast(d.total_len & 0x1f)) << 3; while (i < 8) : (i += 1) { - d.buf[56 + i] = @intCast(u8, len & 0xff); + d.buf[56 + i] = @as(u8, @intCast(len & 0xff)); len >>= 8; } diff --git a/lib/std/crypto/pbkdf2.zig b/lib/std/crypto/pbkdf2.zig index 115fd38b3d..2e0318369b 100644 --- a/lib/std/crypto/pbkdf2.zig +++ b/lib/std/crypto/pbkdf2.zig @@ -74,7 +74,7 @@ pub fn pbkdf2(dk: []u8, password: []const u8, salt: []const u8, rounds: u32, com // block // - const blocks_count = @intCast(u32, std.math.divCeil(usize, dk_len, h_len) catch unreachable); + const blocks_count = @as(u32, @intCast(std.math.divCeil(usize, dk_len, h_len) catch unreachable)); var r = dk_len % h_len; if (r == 0) { r = h_len; diff --git a/lib/std/crypto/pcurves/common.zig b/lib/std/crypto/pcurves/common.zig index 5d41bc190a..edc437517c 100644 --- a/lib/std/crypto/pcurves/common.zig +++ b/lib/std/crypto/pcurves/common.zig @@ -120,7 +120,7 @@ pub fn Field(comptime params: FieldParams) type { /// Return true if the element is odd. pub fn isOdd(fe: Fe) bool { const s = fe.toBytes(.Little); - return @truncate(u1, s[0]) != 0; + return @as(u1, @truncate(s[0])) != 0; } /// Conditonally replace a field element with `a` if `c` is positive. @@ -179,7 +179,7 @@ pub fn Field(comptime params: FieldParams) type { var x: T = n; var t = a; while (true) { - if (@truncate(u1, x) != 0) fe = fe.mul(t); + if (@as(u1, @truncate(x)) != 0) fe = fe.mul(t); x >>= 1; if (x == 0) break; t = t.sq(); @@ -233,7 +233,7 @@ pub fn Field(comptime params: FieldParams) type { } var v_opp: Limbs = undefined; fiat.opp(&v_opp, v); - fiat.selectznz(&v, @truncate(u1, f[f.len - 1] >> (@bitSizeOf(Word) - 1)), v, v_opp); + fiat.selectznz(&v, @as(u1, @truncate(f[f.len - 1] >> (@bitSizeOf(Word) - 1))), v, v_opp); const precomp = blk: { var precomp: Limbs = undefined; diff --git a/lib/std/crypto/pcurves/p256.zig b/lib/std/crypto/pcurves/p256.zig index a797fbce3e..668c0115b2 100644 --- a/lib/std/crypto/pcurves/p256.zig +++ b/lib/std/crypto/pcurves/p256.zig @@ -318,7 +318,7 @@ pub const P256 = struct { var t = P256.identityElement; comptime var i: u8 = 1; inline while (i < pc.len) : (i += 1) { - t.cMov(pc[i], @truncate(u1, (@as(usize, b ^ i) -% 1) >> 8)); + t.cMov(pc[i], @as(u1, @truncate((@as(usize, b ^ i) -% 1) >> 8))); } return t; } @@ -326,8 +326,8 @@ pub const P256 = struct { fn slide(s: [32]u8) [2 * 32 + 1]i8 { var e: [2 * 32 + 1]i8 = undefined; for (s, 0..) |x, i| { - e[i * 2 + 0] = @as(i8, @truncate(u4, x)); - e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4)); + e[i * 2 + 0] = @as(i8, @as(u4, @truncate(x))); + e[i * 2 + 1] = @as(i8, @as(u4, @truncate(x >> 4))); } // Now, e[0..63] is between 0 and 15, e[63] is between 0 and 7 var carry: i8 = 0; @@ -351,9 +351,9 @@ pub const P256 = struct { while (true) : (pos -= 1) { const slot = e[pos]; if (slot > 0) { - q = q.add(pc[@intCast(usize, slot)]); + q = q.add(pc[@as(usize, @intCast(slot))]); } else if (slot < 0) { - q = q.sub(pc[@intCast(usize, -slot)]); + q = q.sub(pc[@as(usize, @intCast(-slot))]); } if (pos == 0) break; q = q.dbl().dbl().dbl().dbl(); @@ -366,7 +366,7 @@ pub const P256 = struct { var q = P256.identityElement; var pos: usize = 252; while (true) : (pos -= 4) { - const slot = @truncate(u4, (s[pos >> 3] >> @truncate(u3, pos))); + const slot = @as(u4, @truncate((s[pos >> 3] >> @as(u3, @truncate(pos))))); if (vartime) { if (slot != 0) { q = q.add(pc[slot]); @@ -445,15 +445,15 @@ pub const P256 = struct { while (true) : (pos -= 1) { const slot1 = e1[pos]; if (slot1 > 0) { - q = q.add(pc1[@intCast(usize, slot1)]); + q = q.add(pc1[@as(usize, @intCast(slot1))]); } else if (slot1 < 0) { - q = q.sub(pc1[@intCast(usize, -slot1)]); + q = q.sub(pc1[@as(usize, @intCast(-slot1))]); } const slot2 = e2[pos]; if (slot2 > 0) { - q = q.add(pc2[@intCast(usize, slot2)]); + q = q.add(pc2[@as(usize, @intCast(slot2))]); } else if (slot2 < 0) { - q = q.sub(pc2[@intCast(usize, -slot2)]); + q = q.sub(pc2[@as(usize, @intCast(-slot2))]); } if (pos == 0) break; q = q.dbl().dbl().dbl().dbl(); diff --git a/lib/std/crypto/pcurves/p256/p256_64.zig b/lib/std/crypto/pcurves/p256/p256_64.zig index e8ba37e845..e8dbaead33 100644 --- a/lib/std/crypto/pcurves/p256/p256_64.zig +++ b/lib/std/crypto/pcurves/p256/p256_64.zig @@ -119,8 +119,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { @setRuntimeSafety(mode == .Debug); const x = @as(u128, arg1) * @as(u128, arg2); - out1.* = @truncate(u64, x); - out2.* = @truncate(u64, x >> 64); + out1.* = @as(u64, @truncate(x)); + out2.* = @as(u64, @truncate(x >> 64)); } /// The function cmovznzU64 is a single-word conditional move. @@ -1355,62 +1355,62 @@ pub fn toBytes(out1: *[32]u8, arg1: [4]u64) void { const x2 = (arg1[2]); const x3 = (arg1[1]); const x4 = (arg1[0]); - const x5 = @truncate(u8, (x4 & @as(u64, 0xff))); + const x5 = @as(u8, @truncate((x4 & @as(u64, 0xff)))); const x6 = (x4 >> 8); - const x7 = @truncate(u8, (x6 & @as(u64, 0xff))); + const x7 = @as(u8, @truncate((x6 & @as(u64, 0xff)))); const x8 = (x6 >> 8); - const x9 = @truncate(u8, (x8 & @as(u64, 0xff))); + const x9 = @as(u8, @truncate((x8 & @as(u64, 0xff)))); const x10 = (x8 >> 8); - const x11 = @truncate(u8, (x10 & @as(u64, 0xff))); + const x11 = @as(u8, @truncate((x10 & @as(u64, 0xff)))); const x12 = (x10 >> 8); - const x13 = @truncate(u8, (x12 & @as(u64, 0xff))); + const x13 = @as(u8, @truncate((x12 & @as(u64, 0xff)))); const x14 = (x12 >> 8); - const x15 = @truncate(u8, (x14 & @as(u64, 0xff))); + const x15 = @as(u8, @truncate((x14 & @as(u64, 0xff)))); const x16 = (x14 >> 8); - const x17 = @truncate(u8, (x16 & @as(u64, 0xff))); - const x18 = @truncate(u8, (x16 >> 8)); - const x19 = @truncate(u8, (x3 & @as(u64, 0xff))); + const x17 = @as(u8, @truncate((x16 & @as(u64, 0xff)))); + const x18 = @as(u8, @truncate((x16 >> 8))); + const x19 = @as(u8, @truncate((x3 & @as(u64, 0xff)))); const x20 = (x3 >> 8); - const x21 = @truncate(u8, (x20 & @as(u64, 0xff))); + const x21 = @as(u8, @truncate((x20 & @as(u64, 0xff)))); const x22 = (x20 >> 8); - const x23 = @truncate(u8, (x22 & @as(u64, 0xff))); + const x23 = @as(u8, @truncate((x22 & @as(u64, 0xff)))); const x24 = (x22 >> 8); - const x25 = @truncate(u8, (x24 & @as(u64, 0xff))); + const x25 = @as(u8, @truncate((x24 & @as(u64, 0xff)))); const x26 = (x24 >> 8); - const x27 = @truncate(u8, (x26 & @as(u64, 0xff))); + const x27 = @as(u8, @truncate((x26 & @as(u64, 0xff)))); const x28 = (x26 >> 8); - const x29 = @truncate(u8, (x28 & @as(u64, 0xff))); + const x29 = @as(u8, @truncate((x28 & @as(u64, 0xff)))); const x30 = (x28 >> 8); - const x31 = @truncate(u8, (x30 & @as(u64, 0xff))); - const x32 = @truncate(u8, (x30 >> 8)); - const x33 = @truncate(u8, (x2 & @as(u64, 0xff))); + const x31 = @as(u8, @truncate((x30 & @as(u64, 0xff)))); + const x32 = @as(u8, @truncate((x30 >> 8))); + const x33 = @as(u8, @truncate((x2 & @as(u64, 0xff)))); const x34 = (x2 >> 8); - const x35 = @truncate(u8, (x34 & @as(u64, 0xff))); + const x35 = @as(u8, @truncate((x34 & @as(u64, 0xff)))); const x36 = (x34 >> 8); - const x37 = @truncate(u8, (x36 & @as(u64, 0xff))); + const x37 = @as(u8, @truncate((x36 & @as(u64, 0xff)))); const x38 = (x36 >> 8); - const x39 = @truncate(u8, (x38 & @as(u64, 0xff))); + const x39 = @as(u8, @truncate((x38 & @as(u64, 0xff)))); const x40 = (x38 >> 8); - const x41 = @truncate(u8, (x40 & @as(u64, 0xff))); + const x41 = @as(u8, @truncate((x40 & @as(u64, 0xff)))); const x42 = (x40 >> 8); - const x43 = @truncate(u8, (x42 & @as(u64, 0xff))); + const x43 = @as(u8, @truncate((x42 & @as(u64, 0xff)))); const x44 = (x42 >> 8); - const x45 = @truncate(u8, (x44 & @as(u64, 0xff))); - const x46 = @truncate(u8, (x44 >> 8)); - const x47 = @truncate(u8, (x1 & @as(u64, 0xff))); + const x45 = @as(u8, @truncate((x44 & @as(u64, 0xff)))); + const x46 = @as(u8, @truncate((x44 >> 8))); + const x47 = @as(u8, @truncate((x1 & @as(u64, 0xff)))); const x48 = (x1 >> 8); - const x49 = @truncate(u8, (x48 & @as(u64, 0xff))); + const x49 = @as(u8, @truncate((x48 & @as(u64, 0xff)))); const x50 = (x48 >> 8); - const x51 = @truncate(u8, (x50 & @as(u64, 0xff))); + const x51 = @as(u8, @truncate((x50 & @as(u64, 0xff)))); const x52 = (x50 >> 8); - const x53 = @truncate(u8, (x52 & @as(u64, 0xff))); + const x53 = @as(u8, @truncate((x52 & @as(u64, 0xff)))); const x54 = (x52 >> 8); - const x55 = @truncate(u8, (x54 & @as(u64, 0xff))); + const x55 = @as(u8, @truncate((x54 & @as(u64, 0xff)))); const x56 = (x54 >> 8); - const x57 = @truncate(u8, (x56 & @as(u64, 0xff))); + const x57 = @as(u8, @truncate((x56 & @as(u64, 0xff)))); const x58 = (x56 >> 8); - const x59 = @truncate(u8, (x58 & @as(u64, 0xff))); - const x60 = @truncate(u8, (x58 >> 8)); + const x59 = @as(u8, @truncate((x58 & @as(u64, 0xff)))); + const x60 = @as(u8, @truncate((x58 >> 8))); out1[0] = x5; out1[1] = x7; out1[2] = x9; @@ -1593,7 +1593,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[ var x1: u64 = undefined; var x2: u1 = undefined; addcarryxU64(&x1, &x2, 0x0, (~arg1), @as(u64, 0x1)); - const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & @as(u64, 0x1)))); + const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & @as(u64, 0x1))))); var x4: u64 = undefined; var x5: u1 = undefined; addcarryxU64(&x4, &x5, 0x0, (~arg1), @as(u64, 0x1)); @@ -1707,7 +1707,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[ cmovznzU64(&x72, x3, (arg5[2]), x66); var x73: u64 = undefined; cmovznzU64(&x73, x3, (arg5[3]), x68); - const x74 = @truncate(u1, (x22 & @as(u64, 0x1))); + const x74 = @as(u1, @truncate((x22 & @as(u64, 0x1)))); var x75: u64 = undefined; cmovznzU64(&x75, x74, @as(u64, 0x0), x7); var x76: u64 = undefined; diff --git a/lib/std/crypto/pcurves/p256/p256_scalar_64.zig b/lib/std/crypto/pcurves/p256/p256_scalar_64.zig index ea102360cf..152c2b8787 100644 --- a/lib/std/crypto/pcurves/p256/p256_scalar_64.zig +++ b/lib/std/crypto/pcurves/p256/p256_scalar_64.zig @@ -119,8 +119,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { @setRuntimeSafety(mode == .Debug); const x = @as(u128, arg1) * @as(u128, arg2); - out1.* = @truncate(u64, x); - out2.* = @truncate(u64, x >> 64); + out1.* = @as(u64, @truncate(x)); + out2.* = @as(u64, @truncate(x >> 64)); } /// The function cmovznzU64 is a single-word conditional move. @@ -1559,62 +1559,62 @@ pub fn toBytes(out1: *[32]u8, arg1: [4]u64) void { const x2 = (arg1[2]); const x3 = (arg1[1]); const x4 = (arg1[0]); - const x5 = @truncate(u8, (x4 & @as(u64, 0xff))); + const x5 = @as(u8, @truncate((x4 & @as(u64, 0xff)))); const x6 = (x4 >> 8); - const x7 = @truncate(u8, (x6 & @as(u64, 0xff))); + const x7 = @as(u8, @truncate((x6 & @as(u64, 0xff)))); const x8 = (x6 >> 8); - const x9 = @truncate(u8, (x8 & @as(u64, 0xff))); + const x9 = @as(u8, @truncate((x8 & @as(u64, 0xff)))); const x10 = (x8 >> 8); - const x11 = @truncate(u8, (x10 & @as(u64, 0xff))); + const x11 = @as(u8, @truncate((x10 & @as(u64, 0xff)))); const x12 = (x10 >> 8); - const x13 = @truncate(u8, (x12 & @as(u64, 0xff))); + const x13 = @as(u8, @truncate((x12 & @as(u64, 0xff)))); const x14 = (x12 >> 8); - const x15 = @truncate(u8, (x14 & @as(u64, 0xff))); + const x15 = @as(u8, @truncate((x14 & @as(u64, 0xff)))); const x16 = (x14 >> 8); - const x17 = @truncate(u8, (x16 & @as(u64, 0xff))); - const x18 = @truncate(u8, (x16 >> 8)); - const x19 = @truncate(u8, (x3 & @as(u64, 0xff))); + const x17 = @as(u8, @truncate((x16 & @as(u64, 0xff)))); + const x18 = @as(u8, @truncate((x16 >> 8))); + const x19 = @as(u8, @truncate((x3 & @as(u64, 0xff)))); const x20 = (x3 >> 8); - const x21 = @truncate(u8, (x20 & @as(u64, 0xff))); + const x21 = @as(u8, @truncate((x20 & @as(u64, 0xff)))); const x22 = (x20 >> 8); - const x23 = @truncate(u8, (x22 & @as(u64, 0xff))); + const x23 = @as(u8, @truncate((x22 & @as(u64, 0xff)))); const x24 = (x22 >> 8); - const x25 = @truncate(u8, (x24 & @as(u64, 0xff))); + const x25 = @as(u8, @truncate((x24 & @as(u64, 0xff)))); const x26 = (x24 >> 8); - const x27 = @truncate(u8, (x26 & @as(u64, 0xff))); + const x27 = @as(u8, @truncate((x26 & @as(u64, 0xff)))); const x28 = (x26 >> 8); - const x29 = @truncate(u8, (x28 & @as(u64, 0xff))); + const x29 = @as(u8, @truncate((x28 & @as(u64, 0xff)))); const x30 = (x28 >> 8); - const x31 = @truncate(u8, (x30 & @as(u64, 0xff))); - const x32 = @truncate(u8, (x30 >> 8)); - const x33 = @truncate(u8, (x2 & @as(u64, 0xff))); + const x31 = @as(u8, @truncate((x30 & @as(u64, 0xff)))); + const x32 = @as(u8, @truncate((x30 >> 8))); + const x33 = @as(u8, @truncate((x2 & @as(u64, 0xff)))); const x34 = (x2 >> 8); - const x35 = @truncate(u8, (x34 & @as(u64, 0xff))); + const x35 = @as(u8, @truncate((x34 & @as(u64, 0xff)))); const x36 = (x34 >> 8); - const x37 = @truncate(u8, (x36 & @as(u64, 0xff))); + const x37 = @as(u8, @truncate((x36 & @as(u64, 0xff)))); const x38 = (x36 >> 8); - const x39 = @truncate(u8, (x38 & @as(u64, 0xff))); + const x39 = @as(u8, @truncate((x38 & @as(u64, 0xff)))); const x40 = (x38 >> 8); - const x41 = @truncate(u8, (x40 & @as(u64, 0xff))); + const x41 = @as(u8, @truncate((x40 & @as(u64, 0xff)))); const x42 = (x40 >> 8); - const x43 = @truncate(u8, (x42 & @as(u64, 0xff))); + const x43 = @as(u8, @truncate((x42 & @as(u64, 0xff)))); const x44 = (x42 >> 8); - const x45 = @truncate(u8, (x44 & @as(u64, 0xff))); - const x46 = @truncate(u8, (x44 >> 8)); - const x47 = @truncate(u8, (x1 & @as(u64, 0xff))); + const x45 = @as(u8, @truncate((x44 & @as(u64, 0xff)))); + const x46 = @as(u8, @truncate((x44 >> 8))); + const x47 = @as(u8, @truncate((x1 & @as(u64, 0xff)))); const x48 = (x1 >> 8); - const x49 = @truncate(u8, (x48 & @as(u64, 0xff))); + const x49 = @as(u8, @truncate((x48 & @as(u64, 0xff)))); const x50 = (x48 >> 8); - const x51 = @truncate(u8, (x50 & @as(u64, 0xff))); + const x51 = @as(u8, @truncate((x50 & @as(u64, 0xff)))); const x52 = (x50 >> 8); - const x53 = @truncate(u8, (x52 & @as(u64, 0xff))); + const x53 = @as(u8, @truncate((x52 & @as(u64, 0xff)))); const x54 = (x52 >> 8); - const x55 = @truncate(u8, (x54 & @as(u64, 0xff))); + const x55 = @as(u8, @truncate((x54 & @as(u64, 0xff)))); const x56 = (x54 >> 8); - const x57 = @truncate(u8, (x56 & @as(u64, 0xff))); + const x57 = @as(u8, @truncate((x56 & @as(u64, 0xff)))); const x58 = (x56 >> 8); - const x59 = @truncate(u8, (x58 & @as(u64, 0xff))); - const x60 = @truncate(u8, (x58 >> 8)); + const x59 = @as(u8, @truncate((x58 & @as(u64, 0xff)))); + const x60 = @as(u8, @truncate((x58 >> 8))); out1[0] = x5; out1[1] = x7; out1[2] = x9; @@ -1797,7 +1797,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[ var x1: u64 = undefined; var x2: u1 = undefined; addcarryxU64(&x1, &x2, 0x0, (~arg1), @as(u64, 0x1)); - const x3 = @truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & @as(u64, 0x1))); + const x3 = @as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & @as(u64, 0x1)))); var x4: u64 = undefined; var x5: u1 = undefined; addcarryxU64(&x4, &x5, 0x0, (~arg1), @as(u64, 0x1)); @@ -1911,7 +1911,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[ cmovznzU64(&x72, x3, (arg5[2]), x66); var x73: u64 = undefined; cmovznzU64(&x73, x3, (arg5[3]), x68); - const x74 = @truncate(u1, (x22 & @as(u64, 0x1))); + const x74 = @as(u1, @truncate((x22 & @as(u64, 0x1)))); var x75: u64 = undefined; cmovznzU64(&x75, x74, @as(u64, 0x0), x7); var x76: u64 = undefined; diff --git a/lib/std/crypto/pcurves/p384.zig b/lib/std/crypto/pcurves/p384.zig index 3d96592f50..d5afd6eb4d 100644 --- a/lib/std/crypto/pcurves/p384.zig +++ b/lib/std/crypto/pcurves/p384.zig @@ -318,7 +318,7 @@ pub const P384 = struct { var t = P384.identityElement; comptime var i: u8 = 1; inline while (i < pc.len) : (i += 1) { - t.cMov(pc[i], @truncate(u1, (@as(usize, b ^ i) -% 1) >> 8)); + t.cMov(pc[i], @as(u1, @truncate((@as(usize, b ^ i) -% 1) >> 8))); } return t; } @@ -326,8 +326,8 @@ pub const P384 = struct { fn slide(s: [48]u8) [2 * 48 + 1]i8 { var e: [2 * 48 + 1]i8 = undefined; for (s, 0..) |x, i| { - e[i * 2 + 0] = @as(i8, @truncate(u4, x)); - e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4)); + e[i * 2 + 0] = @as(i8, @as(u4, @truncate(x))); + e[i * 2 + 1] = @as(i8, @as(u4, @truncate(x >> 4))); } // Now, e[0..63] is between 0 and 15, e[63] is between 0 and 7 var carry: i8 = 0; @@ -351,9 +351,9 @@ pub const P384 = struct { while (true) : (pos -= 1) { const slot = e[pos]; if (slot > 0) { - q = q.add(pc[@intCast(usize, slot)]); + q = q.add(pc[@as(usize, @intCast(slot))]); } else if (slot < 0) { - q = q.sub(pc[@intCast(usize, -slot)]); + q = q.sub(pc[@as(usize, @intCast(-slot))]); } if (pos == 0) break; q = q.dbl().dbl().dbl().dbl(); @@ -366,7 +366,7 @@ pub const P384 = struct { var q = P384.identityElement; var pos: usize = 380; while (true) : (pos -= 4) { - const slot = @truncate(u4, (s[pos >> 3] >> @truncate(u3, pos))); + const slot = @as(u4, @truncate((s[pos >> 3] >> @as(u3, @truncate(pos))))); if (vartime) { if (slot != 0) { q = q.add(pc[slot]); @@ -445,15 +445,15 @@ pub const P384 = struct { while (true) : (pos -= 1) { const slot1 = e1[pos]; if (slot1 > 0) { - q = q.add(pc1[@intCast(usize, slot1)]); + q = q.add(pc1[@as(usize, @intCast(slot1))]); } else if (slot1 < 0) { - q = q.sub(pc1[@intCast(usize, -slot1)]); + q = q.sub(pc1[@as(usize, @intCast(-slot1))]); } const slot2 = e2[pos]; if (slot2 > 0) { - q = q.add(pc2[@intCast(usize, slot2)]); + q = q.add(pc2[@as(usize, @intCast(slot2))]); } else if (slot2 < 0) { - q = q.sub(pc2[@intCast(usize, -slot2)]); + q = q.sub(pc2[@as(usize, @intCast(-slot2))]); } if (pos == 0) break; q = q.dbl().dbl().dbl().dbl(); diff --git a/lib/std/crypto/pcurves/p384/p384_64.zig b/lib/std/crypto/pcurves/p384/p384_64.zig index 45c12835b3..f25a7d65b5 100644 --- a/lib/std/crypto/pcurves/p384/p384_64.zig +++ b/lib/std/crypto/pcurves/p384/p384_64.zig @@ -88,8 +88,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { @setRuntimeSafety(mode == .Debug); const x = @as(u128, arg1) * @as(u128, arg2); - out1.* = @truncate(u64, x); - out2.* = @truncate(u64, x >> 64); + out1.* = @as(u64, @truncate(x)); + out2.* = @as(u64, @truncate(x >> 64)); } /// The function cmovznzU64 is a single-word conditional move. @@ -2928,90 +2928,90 @@ pub fn toBytes(out1: *[48]u8, arg1: [6]u64) void { const x4 = (arg1[2]); const x5 = (arg1[1]); const x6 = (arg1[0]); - const x7 = @truncate(u8, (x6 & 0xff)); + const x7 = @as(u8, @truncate((x6 & 0xff))); const x8 = (x6 >> 8); - const x9 = @truncate(u8, (x8 & 0xff)); + const x9 = @as(u8, @truncate((x8 & 0xff))); const x10 = (x8 >> 8); - const x11 = @truncate(u8, (x10 & 0xff)); + const x11 = @as(u8, @truncate((x10 & 0xff))); const x12 = (x10 >> 8); - const x13 = @truncate(u8, (x12 & 0xff)); + const x13 = @as(u8, @truncate((x12 & 0xff))); const x14 = (x12 >> 8); - const x15 = @truncate(u8, (x14 & 0xff)); + const x15 = @as(u8, @truncate((x14 & 0xff))); const x16 = (x14 >> 8); - const x17 = @truncate(u8, (x16 & 0xff)); + const x17 = @as(u8, @truncate((x16 & 0xff))); const x18 = (x16 >> 8); - const x19 = @truncate(u8, (x18 & 0xff)); - const x20 = @truncate(u8, (x18 >> 8)); - const x21 = @truncate(u8, (x5 & 0xff)); + const x19 = @as(u8, @truncate((x18 & 0xff))); + const x20 = @as(u8, @truncate((x18 >> 8))); + const x21 = @as(u8, @truncate((x5 & 0xff))); const x22 = (x5 >> 8); - const x23 = @truncate(u8, (x22 & 0xff)); + const x23 = @as(u8, @truncate((x22 & 0xff))); const x24 = (x22 >> 8); - const x25 = @truncate(u8, (x24 & 0xff)); + const x25 = @as(u8, @truncate((x24 & 0xff))); const x26 = (x24 >> 8); - const x27 = @truncate(u8, (x26 & 0xff)); + const x27 = @as(u8, @truncate((x26 & 0xff))); const x28 = (x26 >> 8); - const x29 = @truncate(u8, (x28 & 0xff)); + const x29 = @as(u8, @truncate((x28 & 0xff))); const x30 = (x28 >> 8); - const x31 = @truncate(u8, (x30 & 0xff)); + const x31 = @as(u8, @truncate((x30 & 0xff))); const x32 = (x30 >> 8); - const x33 = @truncate(u8, (x32 & 0xff)); - const x34 = @truncate(u8, (x32 >> 8)); - const x35 = @truncate(u8, (x4 & 0xff)); + const x33 = @as(u8, @truncate((x32 & 0xff))); + const x34 = @as(u8, @truncate((x32 >> 8))); + const x35 = @as(u8, @truncate((x4 & 0xff))); const x36 = (x4 >> 8); - const x37 = @truncate(u8, (x36 & 0xff)); + const x37 = @as(u8, @truncate((x36 & 0xff))); const x38 = (x36 >> 8); - const x39 = @truncate(u8, (x38 & 0xff)); + const x39 = @as(u8, @truncate((x38 & 0xff))); const x40 = (x38 >> 8); - const x41 = @truncate(u8, (x40 & 0xff)); + const x41 = @as(u8, @truncate((x40 & 0xff))); const x42 = (x40 >> 8); - const x43 = @truncate(u8, (x42 & 0xff)); + const x43 = @as(u8, @truncate((x42 & 0xff))); const x44 = (x42 >> 8); - const x45 = @truncate(u8, (x44 & 0xff)); + const x45 = @as(u8, @truncate((x44 & 0xff))); const x46 = (x44 >> 8); - const x47 = @truncate(u8, (x46 & 0xff)); - const x48 = @truncate(u8, (x46 >> 8)); - const x49 = @truncate(u8, (x3 & 0xff)); + const x47 = @as(u8, @truncate((x46 & 0xff))); + const x48 = @as(u8, @truncate((x46 >> 8))); + const x49 = @as(u8, @truncate((x3 & 0xff))); const x50 = (x3 >> 8); - const x51 = @truncate(u8, (x50 & 0xff)); + const x51 = @as(u8, @truncate((x50 & 0xff))); const x52 = (x50 >> 8); - const x53 = @truncate(u8, (x52 & 0xff)); + const x53 = @as(u8, @truncate((x52 & 0xff))); const x54 = (x52 >> 8); - const x55 = @truncate(u8, (x54 & 0xff)); + const x55 = @as(u8, @truncate((x54 & 0xff))); const x56 = (x54 >> 8); - const x57 = @truncate(u8, (x56 & 0xff)); + const x57 = @as(u8, @truncate((x56 & 0xff))); const x58 = (x56 >> 8); - const x59 = @truncate(u8, (x58 & 0xff)); + const x59 = @as(u8, @truncate((x58 & 0xff))); const x60 = (x58 >> 8); - const x61 = @truncate(u8, (x60 & 0xff)); - const x62 = @truncate(u8, (x60 >> 8)); - const x63 = @truncate(u8, (x2 & 0xff)); + const x61 = @as(u8, @truncate((x60 & 0xff))); + const x62 = @as(u8, @truncate((x60 >> 8))); + const x63 = @as(u8, @truncate((x2 & 0xff))); const x64 = (x2 >> 8); - const x65 = @truncate(u8, (x64 & 0xff)); + const x65 = @as(u8, @truncate((x64 & 0xff))); const x66 = (x64 >> 8); - const x67 = @truncate(u8, (x66 & 0xff)); + const x67 = @as(u8, @truncate((x66 & 0xff))); const x68 = (x66 >> 8); - const x69 = @truncate(u8, (x68 & 0xff)); + const x69 = @as(u8, @truncate((x68 & 0xff))); const x70 = (x68 >> 8); - const x71 = @truncate(u8, (x70 & 0xff)); + const x71 = @as(u8, @truncate((x70 & 0xff))); const x72 = (x70 >> 8); - const x73 = @truncate(u8, (x72 & 0xff)); + const x73 = @as(u8, @truncate((x72 & 0xff))); const x74 = (x72 >> 8); - const x75 = @truncate(u8, (x74 & 0xff)); - const x76 = @truncate(u8, (x74 >> 8)); - const x77 = @truncate(u8, (x1 & 0xff)); + const x75 = @as(u8, @truncate((x74 & 0xff))); + const x76 = @as(u8, @truncate((x74 >> 8))); + const x77 = @as(u8, @truncate((x1 & 0xff))); const x78 = (x1 >> 8); - const x79 = @truncate(u8, (x78 & 0xff)); + const x79 = @as(u8, @truncate((x78 & 0xff))); const x80 = (x78 >> 8); - const x81 = @truncate(u8, (x80 & 0xff)); + const x81 = @as(u8, @truncate((x80 & 0xff))); const x82 = (x80 >> 8); - const x83 = @truncate(u8, (x82 & 0xff)); + const x83 = @as(u8, @truncate((x82 & 0xff))); const x84 = (x82 >> 8); - const x85 = @truncate(u8, (x84 & 0xff)); + const x85 = @as(u8, @truncate((x84 & 0xff))); const x86 = (x84 >> 8); - const x87 = @truncate(u8, (x86 & 0xff)); + const x87 = @as(u8, @truncate((x86 & 0xff))); const x88 = (x86 >> 8); - const x89 = @truncate(u8, (x88 & 0xff)); - const x90 = @truncate(u8, (x88 >> 8)); + const x89 = @as(u8, @truncate((x88 & 0xff))); + const x90 = @as(u8, @truncate((x88 >> 8))); out1[0] = x7; out1[1] = x9; out1[2] = x11; @@ -3246,7 +3246,7 @@ pub fn divstep(out1: *u64, out2: *[7]u64, out3: *[7]u64, out4: *[6]u64, out5: *[ var x1: u64 = undefined; var x2: u1 = undefined; addcarryxU64(&x1, &x2, 0x0, (~arg1), 0x1); - const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & 0x1))); + const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & 0x1)))); var x4: u64 = undefined; var x5: u1 = undefined; addcarryxU64(&x4, &x5, 0x0, (~arg1), 0x1); @@ -3408,7 +3408,7 @@ pub fn divstep(out1: *u64, out2: *[7]u64, out3: *[7]u64, out4: *[6]u64, out5: *[ cmovznzU64(&x102, x3, (arg5[4]), x94); var x103: u64 = undefined; cmovznzU64(&x103, x3, (arg5[5]), x96); - const x104 = @truncate(u1, (x28 & 0x1)); + const x104 = @as(u1, @truncate((x28 & 0x1))); var x105: u64 = undefined; cmovznzU64(&x105, x104, 0x0, x7); var x106: u64 = undefined; diff --git a/lib/std/crypto/pcurves/p384/p384_scalar_64.zig b/lib/std/crypto/pcurves/p384/p384_scalar_64.zig index 0ce7727148..fc787ba7b9 100644 --- a/lib/std/crypto/pcurves/p384/p384_scalar_64.zig +++ b/lib/std/crypto/pcurves/p384/p384_scalar_64.zig @@ -88,8 +88,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { @setRuntimeSafety(mode == .Debug); const x = @as(u128, arg1) * @as(u128, arg2); - out1.* = @truncate(u64, x); - out2.* = @truncate(u64, x >> 64); + out1.* = @as(u64, @truncate(x)); + out2.* = @as(u64, @truncate(x >> 64)); } /// The function cmovznzU64 is a single-word conditional move. @@ -2982,90 +2982,90 @@ pub fn toBytes(out1: *[48]u8, arg1: [6]u64) void { const x4 = (arg1[2]); const x5 = (arg1[1]); const x6 = (arg1[0]); - const x7 = @truncate(u8, (x6 & 0xff)); + const x7 = @as(u8, @truncate((x6 & 0xff))); const x8 = (x6 >> 8); - const x9 = @truncate(u8, (x8 & 0xff)); + const x9 = @as(u8, @truncate((x8 & 0xff))); const x10 = (x8 >> 8); - const x11 = @truncate(u8, (x10 & 0xff)); + const x11 = @as(u8, @truncate((x10 & 0xff))); const x12 = (x10 >> 8); - const x13 = @truncate(u8, (x12 & 0xff)); + const x13 = @as(u8, @truncate((x12 & 0xff))); const x14 = (x12 >> 8); - const x15 = @truncate(u8, (x14 & 0xff)); + const x15 = @as(u8, @truncate((x14 & 0xff))); const x16 = (x14 >> 8); - const x17 = @truncate(u8, (x16 & 0xff)); + const x17 = @as(u8, @truncate((x16 & 0xff))); const x18 = (x16 >> 8); - const x19 = @truncate(u8, (x18 & 0xff)); - const x20 = @truncate(u8, (x18 >> 8)); - const x21 = @truncate(u8, (x5 & 0xff)); + const x19 = @as(u8, @truncate((x18 & 0xff))); + const x20 = @as(u8, @truncate((x18 >> 8))); + const x21 = @as(u8, @truncate((x5 & 0xff))); const x22 = (x5 >> 8); - const x23 = @truncate(u8, (x22 & 0xff)); + const x23 = @as(u8, @truncate((x22 & 0xff))); const x24 = (x22 >> 8); - const x25 = @truncate(u8, (x24 & 0xff)); + const x25 = @as(u8, @truncate((x24 & 0xff))); const x26 = (x24 >> 8); - const x27 = @truncate(u8, (x26 & 0xff)); + const x27 = @as(u8, @truncate((x26 & 0xff))); const x28 = (x26 >> 8); - const x29 = @truncate(u8, (x28 & 0xff)); + const x29 = @as(u8, @truncate((x28 & 0xff))); const x30 = (x28 >> 8); - const x31 = @truncate(u8, (x30 & 0xff)); + const x31 = @as(u8, @truncate((x30 & 0xff))); const x32 = (x30 >> 8); - const x33 = @truncate(u8, (x32 & 0xff)); - const x34 = @truncate(u8, (x32 >> 8)); - const x35 = @truncate(u8, (x4 & 0xff)); + const x33 = @as(u8, @truncate((x32 & 0xff))); + const x34 = @as(u8, @truncate((x32 >> 8))); + const x35 = @as(u8, @truncate((x4 & 0xff))); const x36 = (x4 >> 8); - const x37 = @truncate(u8, (x36 & 0xff)); + const x37 = @as(u8, @truncate((x36 & 0xff))); const x38 = (x36 >> 8); - const x39 = @truncate(u8, (x38 & 0xff)); + const x39 = @as(u8, @truncate((x38 & 0xff))); const x40 = (x38 >> 8); - const x41 = @truncate(u8, (x40 & 0xff)); + const x41 = @as(u8, @truncate((x40 & 0xff))); const x42 = (x40 >> 8); - const x43 = @truncate(u8, (x42 & 0xff)); + const x43 = @as(u8, @truncate((x42 & 0xff))); const x44 = (x42 >> 8); - const x45 = @truncate(u8, (x44 & 0xff)); + const x45 = @as(u8, @truncate((x44 & 0xff))); const x46 = (x44 >> 8); - const x47 = @truncate(u8, (x46 & 0xff)); - const x48 = @truncate(u8, (x46 >> 8)); - const x49 = @truncate(u8, (x3 & 0xff)); + const x47 = @as(u8, @truncate((x46 & 0xff))); + const x48 = @as(u8, @truncate((x46 >> 8))); + const x49 = @as(u8, @truncate((x3 & 0xff))); const x50 = (x3 >> 8); - const x51 = @truncate(u8, (x50 & 0xff)); + const x51 = @as(u8, @truncate((x50 & 0xff))); const x52 = (x50 >> 8); - const x53 = @truncate(u8, (x52 & 0xff)); + const x53 = @as(u8, @truncate((x52 & 0xff))); const x54 = (x52 >> 8); - const x55 = @truncate(u8, (x54 & 0xff)); + const x55 = @as(u8, @truncate((x54 & 0xff))); const x56 = (x54 >> 8); - const x57 = @truncate(u8, (x56 & 0xff)); + const x57 = @as(u8, @truncate((x56 & 0xff))); const x58 = (x56 >> 8); - const x59 = @truncate(u8, (x58 & 0xff)); + const x59 = @as(u8, @truncate((x58 & 0xff))); const x60 = (x58 >> 8); - const x61 = @truncate(u8, (x60 & 0xff)); - const x62 = @truncate(u8, (x60 >> 8)); - const x63 = @truncate(u8, (x2 & 0xff)); + const x61 = @as(u8, @truncate((x60 & 0xff))); + const x62 = @as(u8, @truncate((x60 >> 8))); + const x63 = @as(u8, @truncate((x2 & 0xff))); const x64 = (x2 >> 8); - const x65 = @truncate(u8, (x64 & 0xff)); + const x65 = @as(u8, @truncate((x64 & 0xff))); const x66 = (x64 >> 8); - const x67 = @truncate(u8, (x66 & 0xff)); + const x67 = @as(u8, @truncate((x66 & 0xff))); const x68 = (x66 >> 8); - const x69 = @truncate(u8, (x68 & 0xff)); + const x69 = @as(u8, @truncate((x68 & 0xff))); const x70 = (x68 >> 8); - const x71 = @truncate(u8, (x70 & 0xff)); + const x71 = @as(u8, @truncate((x70 & 0xff))); const x72 = (x70 >> 8); - const x73 = @truncate(u8, (x72 & 0xff)); + const x73 = @as(u8, @truncate((x72 & 0xff))); const x74 = (x72 >> 8); - const x75 = @truncate(u8, (x74 & 0xff)); - const x76 = @truncate(u8, (x74 >> 8)); - const x77 = @truncate(u8, (x1 & 0xff)); + const x75 = @as(u8, @truncate((x74 & 0xff))); + const x76 = @as(u8, @truncate((x74 >> 8))); + const x77 = @as(u8, @truncate((x1 & 0xff))); const x78 = (x1 >> 8); - const x79 = @truncate(u8, (x78 & 0xff)); + const x79 = @as(u8, @truncate((x78 & 0xff))); const x80 = (x78 >> 8); - const x81 = @truncate(u8, (x80 & 0xff)); + const x81 = @as(u8, @truncate((x80 & 0xff))); const x82 = (x80 >> 8); - const x83 = @truncate(u8, (x82 & 0xff)); + const x83 = @as(u8, @truncate((x82 & 0xff))); const x84 = (x82 >> 8); - const x85 = @truncate(u8, (x84 & 0xff)); + const x85 = @as(u8, @truncate((x84 & 0xff))); const x86 = (x84 >> 8); - const x87 = @truncate(u8, (x86 & 0xff)); + const x87 = @as(u8, @truncate((x86 & 0xff))); const x88 = (x86 >> 8); - const x89 = @truncate(u8, (x88 & 0xff)); - const x90 = @truncate(u8, (x88 >> 8)); + const x89 = @as(u8, @truncate((x88 & 0xff))); + const x90 = @as(u8, @truncate((x88 >> 8))); out1[0] = x7; out1[1] = x9; out1[2] = x11; @@ -3300,7 +3300,7 @@ pub fn divstep(out1: *u64, out2: *[7]u64, out3: *[7]u64, out4: *[6]u64, out5: *[ var x1: u64 = undefined; var x2: u1 = undefined; addcarryxU64(&x1, &x2, 0x0, (~arg1), 0x1); - const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & 0x1))); + const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & 0x1)))); var x4: u64 = undefined; var x5: u1 = undefined; addcarryxU64(&x4, &x5, 0x0, (~arg1), 0x1); @@ -3462,7 +3462,7 @@ pub fn divstep(out1: *u64, out2: *[7]u64, out3: *[7]u64, out4: *[6]u64, out5: *[ cmovznzU64(&x102, x3, (arg5[4]), x94); var x103: u64 = undefined; cmovznzU64(&x103, x3, (arg5[5]), x96); - const x104 = @truncate(u1, (x28 & 0x1)); + const x104 = @as(u1, @truncate((x28 & 0x1))); var x105: u64 = undefined; cmovznzU64(&x105, x104, 0x0, x7); var x106: u64 = undefined; diff --git a/lib/std/crypto/pcurves/secp256k1.zig b/lib/std/crypto/pcurves/secp256k1.zig index f0b086f974..cd7f1faf75 100644 --- a/lib/std/crypto/pcurves/secp256k1.zig +++ b/lib/std/crypto/pcurves/secp256k1.zig @@ -67,8 +67,8 @@ pub const Secp256k1 = struct { const t1 = math.mulWide(u256, k, 21949224512762693861512883645436906316123769664773102907882521278123970637873); const t2 = math.mulWide(u256, k, 103246583619904461035481197785446227098457807945486720222659797044629401272177); - const c1 = @truncate(u128, t1 >> 384) + @truncate(u1, t1 >> 383); - const c2 = @truncate(u128, t2 >> 384) + @truncate(u1, t2 >> 383); + const c1 = @as(u128, @truncate(t1 >> 384)) + @as(u1, @truncate(t1 >> 383)); + const c2 = @as(u128, @truncate(t2 >> 384)) + @as(u1, @truncate(t2 >> 383)); var buf: [32]u8 = undefined; @@ -346,7 +346,7 @@ pub const Secp256k1 = struct { var t = Secp256k1.identityElement; comptime var i: u8 = 1; inline while (i < pc.len) : (i += 1) { - t.cMov(pc[i], @truncate(u1, (@as(usize, b ^ i) -% 1) >> 8)); + t.cMov(pc[i], @as(u1, @truncate((@as(usize, b ^ i) -% 1) >> 8))); } return t; } @@ -354,8 +354,8 @@ pub const Secp256k1 = struct { fn slide(s: [32]u8) [2 * 32 + 1]i8 { var e: [2 * 32 + 1]i8 = undefined; for (s, 0..) |x, i| { - e[i * 2 + 0] = @as(i8, @truncate(u4, x)); - e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4)); + e[i * 2 + 0] = @as(i8, @as(u4, @truncate(x))); + e[i * 2 + 1] = @as(i8, @as(u4, @truncate(x >> 4))); } // Now, e[0..63] is between 0 and 15, e[63] is between 0 and 7 var carry: i8 = 0; @@ -379,9 +379,9 @@ pub const Secp256k1 = struct { while (true) : (pos -= 1) { const slot = e[pos]; if (slot > 0) { - q = q.add(pc[@intCast(usize, slot)]); + q = q.add(pc[@as(usize, @intCast(slot))]); } else if (slot < 0) { - q = q.sub(pc[@intCast(usize, -slot)]); + q = q.sub(pc[@as(usize, @intCast(-slot))]); } if (pos == 0) break; q = q.dbl().dbl().dbl().dbl(); @@ -394,7 +394,7 @@ pub const Secp256k1 = struct { var q = Secp256k1.identityElement; var pos: usize = 252; while (true) : (pos -= 4) { - const slot = @truncate(u4, (s[pos >> 3] >> @truncate(u3, pos))); + const slot = @as(u4, @truncate((s[pos >> 3] >> @as(u3, @truncate(pos))))); if (vartime) { if (slot != 0) { q = q.add(pc[slot]); @@ -482,15 +482,15 @@ pub const Secp256k1 = struct { while (true) : (pos -= 1) { const slot1 = e1[pos]; if (slot1 > 0) { - q = q.add(pc1[@intCast(usize, slot1)]); + q = q.add(pc1[@as(usize, @intCast(slot1))]); } else if (slot1 < 0) { - q = q.sub(pc1[@intCast(usize, -slot1)]); + q = q.sub(pc1[@as(usize, @intCast(-slot1))]); } const slot2 = e2[pos]; if (slot2 > 0) { - q = q.add(pc2[@intCast(usize, slot2)]); + q = q.add(pc2[@as(usize, @intCast(slot2))]); } else if (slot2 < 0) { - q = q.sub(pc2[@intCast(usize, -slot2)]); + q = q.sub(pc2[@as(usize, @intCast(-slot2))]); } if (pos == 0) break; q = q.dbl().dbl().dbl().dbl(); @@ -523,15 +523,15 @@ pub const Secp256k1 = struct { while (true) : (pos -= 1) { const slot1 = e1[pos]; if (slot1 > 0) { - q = q.add(pc1[@intCast(usize, slot1)]); + q = q.add(pc1[@as(usize, @intCast(slot1))]); } else if (slot1 < 0) { - q = q.sub(pc1[@intCast(usize, -slot1)]); + q = q.sub(pc1[@as(usize, @intCast(-slot1))]); } const slot2 = e2[pos]; if (slot2 > 0) { - q = q.add(pc2[@intCast(usize, slot2)]); + q = q.add(pc2[@as(usize, @intCast(slot2))]); } else if (slot2 < 0) { - q = q.sub(pc2[@intCast(usize, -slot2)]); + q = q.sub(pc2[@as(usize, @intCast(-slot2))]); } if (pos == 0) break; q = q.dbl().dbl().dbl().dbl(); diff --git a/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig b/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig index 5643ea88d5..ae3e97c619 100644 --- a/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig +++ b/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig @@ -88,8 +88,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { @setRuntimeSafety(mode == .Debug); const x = @as(u128, arg1) * @as(u128, arg2); - out1.* = @truncate(u64, x); - out2.* = @truncate(u64, x >> 64); + out1.* = @as(u64, @truncate(x)); + out2.* = @as(u64, @truncate(x >> 64)); } /// The function cmovznzU64 is a single-word conditional move. @@ -1488,62 +1488,62 @@ pub fn toBytes(out1: *[32]u8, arg1: [4]u64) void { const x2 = (arg1[2]); const x3 = (arg1[1]); const x4 = (arg1[0]); - const x5 = @truncate(u8, (x4 & 0xff)); + const x5 = @as(u8, @truncate((x4 & 0xff))); const x6 = (x4 >> 8); - const x7 = @truncate(u8, (x6 & 0xff)); + const x7 = @as(u8, @truncate((x6 & 0xff))); const x8 = (x6 >> 8); - const x9 = @truncate(u8, (x8 & 0xff)); + const x9 = @as(u8, @truncate((x8 & 0xff))); const x10 = (x8 >> 8); - const x11 = @truncate(u8, (x10 & 0xff)); + const x11 = @as(u8, @truncate((x10 & 0xff))); const x12 = (x10 >> 8); - const x13 = @truncate(u8, (x12 & 0xff)); + const x13 = @as(u8, @truncate((x12 & 0xff))); const x14 = (x12 >> 8); - const x15 = @truncate(u8, (x14 & 0xff)); + const x15 = @as(u8, @truncate((x14 & 0xff))); const x16 = (x14 >> 8); - const x17 = @truncate(u8, (x16 & 0xff)); - const x18 = @truncate(u8, (x16 >> 8)); - const x19 = @truncate(u8, (x3 & 0xff)); + const x17 = @as(u8, @truncate((x16 & 0xff))); + const x18 = @as(u8, @truncate((x16 >> 8))); + const x19 = @as(u8, @truncate((x3 & 0xff))); const x20 = (x3 >> 8); - const x21 = @truncate(u8, (x20 & 0xff)); + const x21 = @as(u8, @truncate((x20 & 0xff))); const x22 = (x20 >> 8); - const x23 = @truncate(u8, (x22 & 0xff)); + const x23 = @as(u8, @truncate((x22 & 0xff))); const x24 = (x22 >> 8); - const x25 = @truncate(u8, (x24 & 0xff)); + const x25 = @as(u8, @truncate((x24 & 0xff))); const x26 = (x24 >> 8); - const x27 = @truncate(u8, (x26 & 0xff)); + const x27 = @as(u8, @truncate((x26 & 0xff))); const x28 = (x26 >> 8); - const x29 = @truncate(u8, (x28 & 0xff)); + const x29 = @as(u8, @truncate((x28 & 0xff))); const x30 = (x28 >> 8); - const x31 = @truncate(u8, (x30 & 0xff)); - const x32 = @truncate(u8, (x30 >> 8)); - const x33 = @truncate(u8, (x2 & 0xff)); + const x31 = @as(u8, @truncate((x30 & 0xff))); + const x32 = @as(u8, @truncate((x30 >> 8))); + const x33 = @as(u8, @truncate((x2 & 0xff))); const x34 = (x2 >> 8); - const x35 = @truncate(u8, (x34 & 0xff)); + const x35 = @as(u8, @truncate((x34 & 0xff))); const x36 = (x34 >> 8); - const x37 = @truncate(u8, (x36 & 0xff)); + const x37 = @as(u8, @truncate((x36 & 0xff))); const x38 = (x36 >> 8); - const x39 = @truncate(u8, (x38 & 0xff)); + const x39 = @as(u8, @truncate((x38 & 0xff))); const x40 = (x38 >> 8); - const x41 = @truncate(u8, (x40 & 0xff)); + const x41 = @as(u8, @truncate((x40 & 0xff))); const x42 = (x40 >> 8); - const x43 = @truncate(u8, (x42 & 0xff)); + const x43 = @as(u8, @truncate((x42 & 0xff))); const x44 = (x42 >> 8); - const x45 = @truncate(u8, (x44 & 0xff)); - const x46 = @truncate(u8, (x44 >> 8)); - const x47 = @truncate(u8, (x1 & 0xff)); + const x45 = @as(u8, @truncate((x44 & 0xff))); + const x46 = @as(u8, @truncate((x44 >> 8))); + const x47 = @as(u8, @truncate((x1 & 0xff))); const x48 = (x1 >> 8); - const x49 = @truncate(u8, (x48 & 0xff)); + const x49 = @as(u8, @truncate((x48 & 0xff))); const x50 = (x48 >> 8); - const x51 = @truncate(u8, (x50 & 0xff)); + const x51 = @as(u8, @truncate((x50 & 0xff))); const x52 = (x50 >> 8); - const x53 = @truncate(u8, (x52 & 0xff)); + const x53 = @as(u8, @truncate((x52 & 0xff))); const x54 = (x52 >> 8); - const x55 = @truncate(u8, (x54 & 0xff)); + const x55 = @as(u8, @truncate((x54 & 0xff))); const x56 = (x54 >> 8); - const x57 = @truncate(u8, (x56 & 0xff)); + const x57 = @as(u8, @truncate((x56 & 0xff))); const x58 = (x56 >> 8); - const x59 = @truncate(u8, (x58 & 0xff)); - const x60 = @truncate(u8, (x58 >> 8)); + const x59 = @as(u8, @truncate((x58 & 0xff))); + const x60 = @as(u8, @truncate((x58 >> 8))); out1[0] = x5; out1[1] = x7; out1[2] = x9; @@ -1726,7 +1726,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[ var x1: u64 = undefined; var x2: u1 = undefined; addcarryxU64(&x1, &x2, 0x0, (~arg1), 0x1); - const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & 0x1))); + const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & 0x1)))); var x4: u64 = undefined; var x5: u1 = undefined; addcarryxU64(&x4, &x5, 0x0, (~arg1), 0x1); @@ -1840,7 +1840,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[ cmovznzU64(&x72, x3, (arg5[2]), x66); var x73: u64 = undefined; cmovznzU64(&x73, x3, (arg5[3]), x68); - const x74 = @truncate(u1, (x22 & 0x1)); + const x74 = @as(u1, @truncate((x22 & 0x1))); var x75: u64 = undefined; cmovznzU64(&x75, x74, 0x0, x7); var x76: u64 = undefined; diff --git a/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig b/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig index aca1bd3063..12c833bb33 100644 --- a/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig +++ b/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig @@ -88,8 +88,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { @setRuntimeSafety(mode == .Debug); const x = @as(u128, arg1) * @as(u128, arg2); - out1.* = @truncate(u64, x); - out2.* = @truncate(u64, x >> 64); + out1.* = @as(u64, @truncate(x)); + out2.* = @as(u64, @truncate(x >> 64)); } /// The function cmovznzU64 is a single-word conditional move. @@ -1548,62 +1548,62 @@ pub fn toBytes(out1: *[32]u8, arg1: [4]u64) void { const x2 = (arg1[2]); const x3 = (arg1[1]); const x4 = (arg1[0]); - const x5 = @truncate(u8, (x4 & 0xff)); + const x5 = @as(u8, @truncate((x4 & 0xff))); const x6 = (x4 >> 8); - const x7 = @truncate(u8, (x6 & 0xff)); + const x7 = @as(u8, @truncate((x6 & 0xff))); const x8 = (x6 >> 8); - const x9 = @truncate(u8, (x8 & 0xff)); + const x9 = @as(u8, @truncate((x8 & 0xff))); const x10 = (x8 >> 8); - const x11 = @truncate(u8, (x10 & 0xff)); + const x11 = @as(u8, @truncate((x10 & 0xff))); const x12 = (x10 >> 8); - const x13 = @truncate(u8, (x12 & 0xff)); + const x13 = @as(u8, @truncate((x12 & 0xff))); const x14 = (x12 >> 8); - const x15 = @truncate(u8, (x14 & 0xff)); + const x15 = @as(u8, @truncate((x14 & 0xff))); const x16 = (x14 >> 8); - const x17 = @truncate(u8, (x16 & 0xff)); - const x18 = @truncate(u8, (x16 >> 8)); - const x19 = @truncate(u8, (x3 & 0xff)); + const x17 = @as(u8, @truncate((x16 & 0xff))); + const x18 = @as(u8, @truncate((x16 >> 8))); + const x19 = @as(u8, @truncate((x3 & 0xff))); const x20 = (x3 >> 8); - const x21 = @truncate(u8, (x20 & 0xff)); + const x21 = @as(u8, @truncate((x20 & 0xff))); const x22 = (x20 >> 8); - const x23 = @truncate(u8, (x22 & 0xff)); + const x23 = @as(u8, @truncate((x22 & 0xff))); const x24 = (x22 >> 8); - const x25 = @truncate(u8, (x24 & 0xff)); + const x25 = @as(u8, @truncate((x24 & 0xff))); const x26 = (x24 >> 8); - const x27 = @truncate(u8, (x26 & 0xff)); + const x27 = @as(u8, @truncate((x26 & 0xff))); const x28 = (x26 >> 8); - const x29 = @truncate(u8, (x28 & 0xff)); + const x29 = @as(u8, @truncate((x28 & 0xff))); const x30 = (x28 >> 8); - const x31 = @truncate(u8, (x30 & 0xff)); - const x32 = @truncate(u8, (x30 >> 8)); - const x33 = @truncate(u8, (x2 & 0xff)); + const x31 = @as(u8, @truncate((x30 & 0xff))); + const x32 = @as(u8, @truncate((x30 >> 8))); + const x33 = @as(u8, @truncate((x2 & 0xff))); const x34 = (x2 >> 8); - const x35 = @truncate(u8, (x34 & 0xff)); + const x35 = @as(u8, @truncate((x34 & 0xff))); const x36 = (x34 >> 8); - const x37 = @truncate(u8, (x36 & 0xff)); + const x37 = @as(u8, @truncate((x36 & 0xff))); const x38 = (x36 >> 8); - const x39 = @truncate(u8, (x38 & 0xff)); + const x39 = @as(u8, @truncate((x38 & 0xff))); const x40 = (x38 >> 8); - const x41 = @truncate(u8, (x40 & 0xff)); + const x41 = @as(u8, @truncate((x40 & 0xff))); const x42 = (x40 >> 8); - const x43 = @truncate(u8, (x42 & 0xff)); + const x43 = @as(u8, @truncate((x42 & 0xff))); const x44 = (x42 >> 8); - const x45 = @truncate(u8, (x44 & 0xff)); - const x46 = @truncate(u8, (x44 >> 8)); - const x47 = @truncate(u8, (x1 & 0xff)); + const x45 = @as(u8, @truncate((x44 & 0xff))); + const x46 = @as(u8, @truncate((x44 >> 8))); + const x47 = @as(u8, @truncate((x1 & 0xff))); const x48 = (x1 >> 8); - const x49 = @truncate(u8, (x48 & 0xff)); + const x49 = @as(u8, @truncate((x48 & 0xff))); const x50 = (x48 >> 8); - const x51 = @truncate(u8, (x50 & 0xff)); + const x51 = @as(u8, @truncate((x50 & 0xff))); const x52 = (x50 >> 8); - const x53 = @truncate(u8, (x52 & 0xff)); + const x53 = @as(u8, @truncate((x52 & 0xff))); const x54 = (x52 >> 8); - const x55 = @truncate(u8, (x54 & 0xff)); + const x55 = @as(u8, @truncate((x54 & 0xff))); const x56 = (x54 >> 8); - const x57 = @truncate(u8, (x56 & 0xff)); + const x57 = @as(u8, @truncate((x56 & 0xff))); const x58 = (x56 >> 8); - const x59 = @truncate(u8, (x58 & 0xff)); - const x60 = @truncate(u8, (x58 >> 8)); + const x59 = @as(u8, @truncate((x58 & 0xff))); + const x60 = @as(u8, @truncate((x58 >> 8))); out1[0] = x5; out1[1] = x7; out1[2] = x9; @@ -1786,7 +1786,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[ var x1: u64 = undefined; var x2: u1 = undefined; addcarryxU64(&x1, &x2, 0x0, (~arg1), 0x1); - const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & 0x1))); + const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & 0x1)))); var x4: u64 = undefined; var x5: u1 = undefined; addcarryxU64(&x4, &x5, 0x0, (~arg1), 0x1); @@ -1900,7 +1900,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[ cmovznzU64(&x72, x3, (arg5[2]), x66); var x73: u64 = undefined; cmovznzU64(&x73, x3, (arg5[3]), x68); - const x74 = @truncate(u1, (x22 & 0x1)); + const x74 = @as(u1, @truncate((x22 & 0x1))); var x75: u64 = undefined; cmovznzU64(&x75, x74, 0x0, x7); var x76: u64 = undefined; diff --git a/lib/std/crypto/phc_encoding.zig b/lib/std/crypto/phc_encoding.zig index 1eeee39a5a..fecd7f1239 100644 --- a/lib/std/crypto/phc_encoding.zig +++ b/lib/std/crypto/phc_encoding.zig @@ -193,7 +193,7 @@ pub fn serialize(params: anytype, str: []u8) Error![]const u8 { pub fn calcSize(params: anytype) usize { var buf = io.countingWriter(io.null_writer); serializeTo(params, buf.writer()) catch unreachable; - return @intCast(usize, buf.bytes_written); + return @as(usize, @intCast(buf.bytes_written)); } fn serializeTo(params: anytype, out: anytype) !void { diff --git a/lib/std/crypto/poly1305.zig b/lib/std/crypto/poly1305.zig index 51e1c2ab24..5bcb75169d 100644 --- a/lib/std/crypto/poly1305.zig +++ b/lib/std/crypto/poly1305.zig @@ -76,12 +76,12 @@ pub const Poly1305 = struct { const m1 = h1r0 +% h0r1; const m2 = h2r0 +% h1r1; - const t0 = @truncate(u64, m0); - v = @addWithOverflow(@truncate(u64, m1), @truncate(u64, m0 >> 64)); + const t0 = @as(u64, @truncate(m0)); + v = @addWithOverflow(@as(u64, @truncate(m1)), @as(u64, @truncate(m0 >> 64))); const t1 = v[0]; - v = add(@truncate(u64, m2), @truncate(u64, m1 >> 64), v[1]); + v = add(@as(u64, @truncate(m2)), @as(u64, @truncate(m1 >> 64)), v[1]); const t2 = v[0]; - v = add(@truncate(u64, m3), @truncate(u64, m2 >> 64), v[1]); + v = add(@as(u64, @truncate(m3)), @as(u64, @truncate(m2 >> 64)), v[1]); const t3 = v[0]; // Partial reduction @@ -98,9 +98,9 @@ pub const Poly1305 = struct { h1 = v[0]; h2 +%= v[1]; const cc = (cclo | (@as(u128, cchi) << 64)) >> 2; - v = @addWithOverflow(h0, @truncate(u64, cc)); + v = @addWithOverflow(h0, @as(u64, @truncate(cc))); h0 = v[0]; - v = add(h1, @truncate(u64, cc >> 64), v[1]); + v = add(h1, @as(u64, @truncate(cc >> 64)), v[1]); h1 = v[0]; h2 +%= v[1]; } @@ -185,7 +185,7 @@ pub const Poly1305 = struct { mem.writeIntLittle(u64, out[0..8], st.h[0]); mem.writeIntLittle(u64, out[8..16], st.h[1]); - utils.secureZero(u8, @ptrCast([*]u8, st)[0..@sizeOf(Poly1305)]); + utils.secureZero(u8, @as([*]u8, @ptrCast(st))[0..@sizeOf(Poly1305)]); } pub fn create(out: *[mac_length]u8, msg: []const u8, key: *const [key_length]u8) void { diff --git a/lib/std/crypto/salsa20.zig b/lib/std/crypto/salsa20.zig index c8a639ad0b..231f941099 100644 --- a/lib/std/crypto/salsa20.zig +++ b/lib/std/crypto/salsa20.zig @@ -337,8 +337,8 @@ pub fn Salsa(comptime rounds: comptime_int) type { var d: [4]u32 = undefined; d[0] = mem.readIntLittle(u32, nonce[0..4]); d[1] = mem.readIntLittle(u32, nonce[4..8]); - d[2] = @truncate(u32, counter); - d[3] = @truncate(u32, counter >> 32); + d[2] = @as(u32, @truncate(counter)); + d[3] = @as(u32, @truncate(counter >> 32)); SalsaImpl(rounds).salsaXor(out, in, keyToWords(key), d); } }; diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig index 97dd9b95d0..8745a3b34e 100644 --- a/lib/std/crypto/scrypt.zig +++ b/lib/std/crypto/scrypt.zig @@ -73,11 +73,11 @@ fn salsaXor(tmp: *align(16) [16]u32, in: []align(16) const u32, out: []align(16) } fn blockMix(tmp: *align(16) [16]u32, in: []align(16) const u32, out: []align(16) u32, r: u30) void { - blockCopy(tmp, @alignCast(16, in[(2 * r - 1) * 16 ..]), 1); + blockCopy(tmp, @alignCast(in[(2 * r - 1) * 16 ..]), 1); var i: usize = 0; while (i < 2 * r) : (i += 2) { - salsaXor(tmp, @alignCast(16, in[i * 16 ..]), @alignCast(16, out[i * 8 ..])); - salsaXor(tmp, @alignCast(16, in[i * 16 + 16 ..]), @alignCast(16, out[i * 8 + r * 16 ..])); + salsaXor(tmp, @alignCast(in[i * 16 ..]), @alignCast(out[i * 8 ..])); + salsaXor(tmp, @alignCast(in[i * 16 + 16 ..]), @alignCast(out[i * 8 + r * 16 ..])); } } @@ -87,8 +87,8 @@ fn integerify(b: []align(16) const u32, r: u30) u64 { } fn smix(b: []align(16) u8, r: u30, n: usize, v: []align(16) u32, xy: []align(16) u32) void { - var x = @alignCast(16, xy[0 .. 32 * r]); - var y = @alignCast(16, xy[32 * r ..]); + var x: []align(16) u32 = @alignCast(xy[0 .. 32 * r]); + var y: []align(16) u32 = @alignCast(xy[32 * r ..]); for (x, 0..) |*v1, j| { v1.* = mem.readIntSliceLittle(u32, b[4 * j ..]); @@ -97,21 +97,21 @@ fn smix(b: []align(16) u8, r: u30, n: usize, v: []align(16) u32, xy: []align(16) var tmp: [16]u32 align(16) = undefined; var i: usize = 0; while (i < n) : (i += 2) { - blockCopy(@alignCast(16, v[i * (32 * r) ..]), x, 2 * r); + blockCopy(@alignCast(v[i * (32 * r) ..]), x, 2 * r); blockMix(&tmp, x, y, r); - blockCopy(@alignCast(16, v[(i + 1) * (32 * r) ..]), y, 2 * r); + blockCopy(@alignCast(v[(i + 1) * (32 * r) ..]), y, 2 * r); blockMix(&tmp, y, x, r); } i = 0; while (i < n) : (i += 2) { - var j = @intCast(usize, integerify(x, r) & (n - 1)); - blockXor(x, @alignCast(16, v[j * (32 * r) ..]), 2 * r); + var j = @as(usize, @intCast(integerify(x, r) & (n - 1))); + blockXor(x, @alignCast(v[j * (32 * r) ..]), 2 * r); blockMix(&tmp, x, y, r); - j = @intCast(usize, integerify(y, r) & (n - 1)); - blockXor(y, @alignCast(16, v[j * (32 * r) ..]), 2 * r); + j = @as(usize, @intCast(integerify(y, r) & (n - 1))); + blockXor(y, @alignCast(v[j * (32 * r) ..]), 2 * r); blockMix(&tmp, y, x, r); } @@ -147,12 +147,12 @@ pub const Params = struct { const r: u30 = 8; if (ops < mem_limit / 32) { const max_n = ops / (r * 4); - return Self{ .r = r, .p = 1, .ln = @intCast(u6, math.log2(max_n)) }; + return Self{ .r = r, .p = 1, .ln = @as(u6, @intCast(math.log2(max_n))) }; } else { - const max_n = mem_limit / (@intCast(usize, r) * 128); - const ln = @intCast(u6, math.log2(max_n)); + const max_n = mem_limit / (@as(usize, @intCast(r)) * 128); + const ln = @as(u6, @intCast(math.log2(max_n))); const max_rp = @min(0x3fffffff, (ops / 4) / (@as(u64, 1) << ln)); - return Self{ .r = r, .p = @intCast(u30, max_rp / @as(u64, r)), .ln = ln }; + return Self{ .r = r, .p = @as(u30, @intCast(max_rp / @as(u64, r))), .ln = ln }; } } }; @@ -185,7 +185,7 @@ pub fn kdf( const n64 = @as(u64, 1) << params.ln; if (n64 > max_size) return KdfError.WeakParameters; - const n = @intCast(usize, n64); + const n = @as(usize, @intCast(n64)); if (@as(u64, params.r) * @as(u64, params.p) >= 1 << 30 or params.r > max_int / 128 / @as(u64, params.p) or params.r > max_int / 256 or @@ -201,7 +201,7 @@ pub fn kdf( try pwhash.pbkdf2(dk, password, salt, 1, HmacSha256); var i: u32 = 0; while (i < params.p) : (i += 1) { - smix(@alignCast(16, dk[i * 128 * params.r ..]), params.r, n, v, xy); + smix(@alignCast(dk[i * 128 * params.r ..]), params.r, n, v, xy); } try pwhash.pbkdf2(derived_key, password, dk, 1, HmacSha256); } @@ -309,7 +309,7 @@ const crypt_format = struct { pub fn calcSize(params: anytype) usize { var buf = io.countingWriter(io.null_writer); serializeTo(params, buf.writer()) catch unreachable; - return @intCast(usize, buf.bytes_written); + return @as(usize, @intCast(buf.bytes_written)); } fn serializeTo(params: anytype, out: anytype) !void { @@ -343,7 +343,7 @@ const crypt_format = struct { fn intEncode(dst: []u8, src: anytype) void { var n = src; for (dst) |*x| { - x.* = map64[@truncate(u6, n)]; + x.* = map64[@as(u6, @truncate(n))]; n = math.shr(@TypeOf(src), n, 6); } } @@ -352,7 +352,7 @@ const crypt_format = struct { var v: T = 0; for (src, 0..) |x, i| { const vi = mem.indexOfScalar(u8, &map64, x) orelse return EncodingError.InvalidEncoding; - v |= @intCast(T, vi) << @intCast(math.Log2Int(T), i * 6); + v |= @as(T, @intCast(vi)) << @as(math.Log2Int(T), @intCast(i * 6)); } return v; } @@ -366,10 +366,10 @@ const crypt_format = struct { const leftover = src[i * 4 ..]; var v: u24 = 0; for (leftover, 0..) |_, j| { - v |= @as(u24, try intDecode(u6, leftover[j..][0..1])) << @intCast(u5, j * 6); + v |= @as(u24, try intDecode(u6, leftover[j..][0..1])) << @as(u5, @intCast(j * 6)); } for (dst[i * 3 ..], 0..) |*x, j| { - x.* = @truncate(u8, v >> @intCast(u5, j * 8)); + x.* = @as(u8, @truncate(v >> @as(u5, @intCast(j * 8)))); } } @@ -382,7 +382,7 @@ const crypt_format = struct { const leftover = src[i * 3 ..]; var v: u24 = 0; for (leftover, 0..) |x, j| { - v |= @as(u24, x) << @intCast(u5, j * 8); + v |= @as(u24, x) << @as(u5, @intCast(j * 8)); } intEncode(dst[i * 4 ..], v); } diff --git a/lib/std/crypto/sha1.zig b/lib/std/crypto/sha1.zig index 1f5f3eaae2..82e23e0647 100644 --- a/lib/std/crypto/sha1.zig +++ b/lib/std/crypto/sha1.zig @@ -75,7 +75,7 @@ pub const Sha1 = struct { // Copy any remainder for next pass. @memcpy(d.buf[d.buf_len..][0 .. b.len - off], b[off..]); - d.buf_len += @intCast(u8, b[off..].len); + d.buf_len += @as(u8, @intCast(b[off..].len)); d.total_len += b.len; } @@ -97,9 +97,9 @@ pub const Sha1 = struct { // Append message length. var i: usize = 1; var len = d.total_len >> 5; - d.buf[63] = @intCast(u8, d.total_len & 0x1f) << 3; + d.buf[63] = @as(u8, @intCast(d.total_len & 0x1f)) << 3; while (i < 8) : (i += 1) { - d.buf[63 - i] = @intCast(u8, len & 0xff); + d.buf[63 - i] = @as(u8, @intCast(len & 0xff)); len >>= 8; } diff --git a/lib/std/crypto/sha2.zig b/lib/std/crypto/sha2.zig index bd5a7cc5d4..ce543d3906 100644 --- a/lib/std/crypto/sha2.zig +++ b/lib/std/crypto/sha2.zig @@ -132,7 +132,7 @@ fn Sha2x32(comptime params: Sha2Params32) type { // Copy any remainder for next pass. const b_slice = b[off..]; @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice); - d.buf_len += @intCast(u8, b[off..].len); + d.buf_len += @as(u8, @intCast(b[off..].len)); d.total_len += b.len; } @@ -159,9 +159,9 @@ fn Sha2x32(comptime params: Sha2Params32) type { // Append message length. var i: usize = 1; var len = d.total_len >> 5; - d.buf[63] = @intCast(u8, d.total_len & 0x1f) << 3; + d.buf[63] = @as(u8, @intCast(d.total_len & 0x1f)) << 3; while (i < 8) : (i += 1) { - d.buf[63 - i] = @intCast(u8, len & 0xff); + d.buf[63 - i] = @as(u8, @intCast(len & 0xff)); len >>= 8; } @@ -194,7 +194,7 @@ fn Sha2x32(comptime params: Sha2Params32) type { fn round(d: *Self, b: *const [64]u8) void { var s: [64]u32 align(16) = undefined; - for (@ptrCast(*align(1) const [16]u32, b), 0..) |*elem, i| { + for (@as(*align(1) const [16]u32, @ptrCast(b)), 0..) |*elem, i| { s[i] = mem.readIntBig(u32, mem.asBytes(elem)); } @@ -203,7 +203,7 @@ fn Sha2x32(comptime params: Sha2Params32) type { .aarch64 => if (builtin.zig_backend != .stage2_c and comptime std.Target.aarch64.featureSetHas(builtin.cpu.features, .sha2)) { var x: v4u32 = d.s[0..4].*; var y: v4u32 = d.s[4..8].*; - const s_v = @ptrCast(*[16]v4u32, &s); + const s_v = @as(*[16]v4u32, @ptrCast(&s)); comptime var k: u8 = 0; inline while (k < 16) : (k += 1) { @@ -241,7 +241,7 @@ fn Sha2x32(comptime params: Sha2Params32) type { .x86_64 => if (builtin.zig_backend != .stage2_c and comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sha)) { var x: v4u32 = [_]u32{ d.s[5], d.s[4], d.s[1], d.s[0] }; var y: v4u32 = [_]u32{ d.s[7], d.s[6], d.s[3], d.s[2] }; - const s_v = @ptrCast(*[16]v4u32, &s); + const s_v = @as(*[16]v4u32, @ptrCast(&s)); comptime var k: u8 = 0; inline while (k < 16) : (k += 1) { @@ -273,7 +273,7 @@ fn Sha2x32(comptime params: Sha2Params32) type { : [x] "=x" (-> v4u32), : [_] "0" (x), [y] "x" (y), - [_] "{xmm0}" (@bitCast(v4u32, @bitCast(u128, w) >> 64)), + [_] "{xmm0}" (@as(v4u32, @bitCast(@as(u128, @bitCast(w)) >> 64))), ); } @@ -624,7 +624,7 @@ fn Sha2x64(comptime params: Sha2Params64) type { // Copy any remainder for next pass. const b_slice = b[off..]; @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice); - d.buf_len += @intCast(u8, b[off..].len); + d.buf_len += @as(u8, @intCast(b[off..].len)); d.total_len += b.len; } @@ -651,9 +651,9 @@ fn Sha2x64(comptime params: Sha2Params64) type { // Append message length. var i: usize = 1; var len = d.total_len >> 5; - d.buf[127] = @intCast(u8, d.total_len & 0x1f) << 3; + d.buf[127] = @as(u8, @intCast(d.total_len & 0x1f)) << 3; while (i < 16) : (i += 1) { - d.buf[127 - i] = @intCast(u8, len & 0xff); + d.buf[127 - i] = @as(u8, @intCast(len & 0xff)); len >>= 8; } diff --git a/lib/std/crypto/siphash.zig b/lib/std/crypto/siphash.zig index 70f4f2fd53..4399587397 100644 --- a/lib/std/crypto/siphash.zig +++ b/lib/std/crypto/siphash.zig @@ -83,13 +83,13 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round @call(.always_inline, round, .{ self, blob }); } - self.msg_len +%= @truncate(u8, b.len); + self.msg_len +%= @as(u8, @truncate(b.len)); } fn final(self: *Self, b: []const u8) T { std.debug.assert(b.len < 8); - self.msg_len +%= @truncate(u8, b.len); + self.msg_len +%= @as(u8, @truncate(b.len)); var buf = [_]u8{0} ** 8; @memcpy(buf[0..b.len], b); @@ -202,7 +202,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize) const b_slice = b[off + aligned_len ..]; @memcpy(self.buf[self.buf_len..][0..b_slice.len], b_slice); - self.buf_len += @intCast(u8, b_slice.len); + self.buf_len += @as(u8, @intCast(b_slice.len)); } pub fn peek(self: Self) [mac_length]u8 { @@ -329,7 +329,7 @@ test "siphash64-2-4 sanity" { var buffer: [64]u8 = undefined; for (vectors, 0..) |vector, i| { - buffer[i] = @intCast(u8, i); + buffer[i] = @as(u8, @intCast(i)); var out: [siphash.mac_length]u8 = undefined; siphash.create(&out, buffer[0..i], test_key); @@ -409,7 +409,7 @@ test "siphash128-2-4 sanity" { var buffer: [64]u8 = undefined; for (vectors, 0..) |vector, i| { - buffer[i] = @intCast(u8, i); + buffer[i] = @as(u8, @intCast(i)); var out: [siphash.mac_length]u8 = undefined; siphash.create(&out, buffer[0..i], test_key[0..]); @@ -420,7 +420,7 @@ test "siphash128-2-4 sanity" { test "iterative non-divisible update" { var buf: [1024]u8 = undefined; for (&buf, 0..) |*e, i| { - e.* = @truncate(u8, i); + e.* = @as(u8, @truncate(i)); } const key = "0x128dad08f12307"; diff --git a/lib/std/crypto/tlcsprng.zig b/lib/std/crypto/tlcsprng.zig index 54a30cfaba..344da9745d 100644 --- a/lib/std/crypto/tlcsprng.zig +++ b/lib/std/crypto/tlcsprng.zig @@ -102,7 +102,7 @@ fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void { wipe_mem = mem.asBytes(&S.buf); } } - const ctx = @ptrCast(*Context, wipe_mem.ptr); + const ctx = @as(*Context, @ptrCast(wipe_mem.ptr)); switch (ctx.init_state) { .uninitialized => { @@ -158,7 +158,7 @@ fn childAtForkHandler() callconv(.C) void { } fn fillWithCsprng(buffer: []u8) void { - const ctx = @ptrCast(*Context, wipe_mem.ptr); + const ctx = @as(*Context, @ptrCast(wipe_mem.ptr)); return ctx.rng.fill(buffer); } @@ -174,7 +174,7 @@ fn initAndFill(buffer: []u8) void { // the `std.options.cryptoRandomSeed` function is provided. std.options.cryptoRandomSeed(&seed); - const ctx = @ptrCast(*Context, wipe_mem.ptr); + const ctx = @as(*Context, @ptrCast(wipe_mem.ptr)); ctx.rng = Rng.init(seed); std.crypto.utils.secureZero(u8, &seed); diff --git a/lib/std/crypto/tls.zig b/lib/std/crypto/tls.zig index 4c03c48973..eb5a6b4c1a 100644 --- a/lib/std/crypto/tls.zig +++ b/lib/std/crypto/tls.zig @@ -371,12 +371,12 @@ pub fn hkdfExpandLabel( const tls13 = "tls13 "; var buf: [2 + 1 + tls13.len + max_label_len + 1 + max_context_len]u8 = undefined; mem.writeIntBig(u16, buf[0..2], len); - buf[2] = @intCast(u8, tls13.len + label.len); + buf[2] = @as(u8, @intCast(tls13.len + label.len)); buf[3..][0..tls13.len].* = tls13.*; var i: usize = 3 + tls13.len; @memcpy(buf[i..][0..label.len], label); i += label.len; - buf[i] = @intCast(u8, context.len); + buf[i] = @as(u8, @intCast(context.len)); i += 1; @memcpy(buf[i..][0..context.len], context); i += context.len; @@ -411,24 +411,24 @@ pub inline fn enum_array(comptime E: type, comptime tags: []const E) [2 + @sizeO assert(@sizeOf(E) == 2); var result: [tags.len * 2]u8 = undefined; for (tags, 0..) |elem, i| { - result[i * 2] = @truncate(u8, @intFromEnum(elem) >> 8); - result[i * 2 + 1] = @truncate(u8, @intFromEnum(elem)); + result[i * 2] = @as(u8, @truncate(@intFromEnum(elem) >> 8)); + result[i * 2 + 1] = @as(u8, @truncate(@intFromEnum(elem))); } return array(2, result); } pub inline fn int2(x: u16) [2]u8 { return .{ - @truncate(u8, x >> 8), - @truncate(u8, x), + @as(u8, @truncate(x >> 8)), + @as(u8, @truncate(x)), }; } pub inline fn int3(x: u24) [3]u8 { return .{ - @truncate(u8, x >> 16), - @truncate(u8, x >> 8), - @truncate(u8, x), + @as(u8, @truncate(x >> 16)), + @as(u8, @truncate(x >> 8)), + @as(u8, @truncate(x)), }; } @@ -513,7 +513,7 @@ pub const Decoder = struct { .Enum => |info| { const int = d.decode(info.tag_type); if (info.is_exhaustive) @compileError("exhaustive enum cannot be used"); - return @enumFromInt(T, int); + return @as(T, @enumFromInt(int)); }, else => @compileError("unsupported type: " ++ @typeName(T)), } diff --git a/lib/std/crypto/tls/Client.zig b/lib/std/crypto/tls/Client.zig index 94ecf0d3ef..6d5bb86fed 100644 --- a/lib/std/crypto/tls/Client.zig +++ b/lib/std/crypto/tls/Client.zig @@ -140,7 +140,7 @@ pub fn InitError(comptime Stream: type) type { /// /// `host` is only borrowed during this function call. pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) InitError(@TypeOf(stream))!Client { - const host_len = @intCast(u16, host.len); + const host_len = @as(u16, @intCast(host.len)); var random_buffer: [128]u8 = undefined; crypto.random.bytes(&random_buffer); @@ -194,7 +194,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In int2(host_len); const extensions_header = - int2(@intCast(u16, extensions_payload.len + host_len)) ++ + int2(@as(u16, @intCast(extensions_payload.len + host_len))) ++ extensions_payload; const legacy_compression_methods = 0x0100; @@ -209,13 +209,13 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In const out_handshake = [_]u8{@intFromEnum(tls.HandshakeType.client_hello)} ++ - int3(@intCast(u24, client_hello.len + host_len)) ++ + int3(@as(u24, @intCast(client_hello.len + host_len))) ++ client_hello; const plaintext_header = [_]u8{ @intFromEnum(tls.ContentType.handshake), 0x03, 0x01, // legacy_record_version - } ++ int2(@intCast(u16, out_handshake.len + host_len)) ++ out_handshake; + } ++ int2(@as(u16, @intCast(out_handshake.len + host_len))) ++ out_handshake; { var iovecs = [_]std.os.iovec_const{ @@ -457,7 +457,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In const auth_tag = record_decoder.array(P.AEAD.tag_length).*; const V = @Vector(P.AEAD.nonce_length, u8); const pad = [1]u8{0} ** (P.AEAD.nonce_length - 8); - const operand: V = pad ++ @bitCast([8]u8, big(read_seq)); + const operand: V = pad ++ @as([8]u8, @bitCast(big(read_seq))); read_seq += 1; const nonce = @as(V, p.server_handshake_iv) ^ operand; P.AEAD.decrypt(cleartext, ciphertext, auth_tag, record_header, nonce, p.server_handshake_key) catch @@ -466,7 +466,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In }, }; - const inner_ct = @enumFromInt(tls.ContentType, cleartext[cleartext.len - 1]); + const inner_ct = @as(tls.ContentType, @enumFromInt(cleartext[cleartext.len - 1])); if (inner_ct != .handshake) return error.TlsUnexpectedMessage; var ctd = tls.Decoder.fromTheirSlice(cleartext[0 .. cleartext.len - 1]); @@ -520,7 +520,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In const subject_cert: Certificate = .{ .buffer = certd.buf, - .index = @intCast(u32, certd.idx), + .index = @as(u32, @intCast(certd.idx)), }; const subject = try subject_cert.parse(); if (cert_index == 0) { @@ -534,7 +534,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In if (pub_key.len > main_cert_pub_key_buf.len) return error.CertificatePublicKeyInvalid; @memcpy(main_cert_pub_key_buf[0..pub_key.len], pub_key); - main_cert_pub_key_len = @intCast(@TypeOf(main_cert_pub_key_len), pub_key.len); + main_cert_pub_key_len = @as(@TypeOf(main_cert_pub_key_len), @intCast(pub_key.len)); } else { try prev_cert.verify(subject, now_sec); } @@ -679,7 +679,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In .write_seq = 0, .partial_cleartext_idx = 0, .partial_ciphertext_idx = 0, - .partial_ciphertext_end = @intCast(u15, leftover.len), + .partial_ciphertext_end = @as(u15, @intCast(leftover.len)), .received_close_notify = false, .application_cipher = app_cipher, .partially_read_buffer = undefined, @@ -797,11 +797,11 @@ fn prepareCiphertextRecord( const overhead_len = tls.record_header_len + P.AEAD.tag_length + 1; const close_notify_alert_reserved = tls.close_notify_alert.len + overhead_len; while (true) { - const encrypted_content_len = @intCast(u16, @min( + const encrypted_content_len = @as(u16, @intCast(@min( @min(bytes.len - bytes_i, max_ciphertext_len - 1), ciphertext_buf.len - close_notify_alert_reserved - overhead_len - ciphertext_end, - )); + ))); if (encrypted_content_len == 0) return .{ .iovec_end = iovec_end, .ciphertext_end = ciphertext_end, @@ -826,7 +826,7 @@ fn prepareCiphertextRecord( const auth_tag = ciphertext_buf[ciphertext_end..][0..P.AEAD.tag_length]; ciphertext_end += auth_tag.len; const pad = [1]u8{0} ** (P.AEAD.nonce_length - 8); - const operand: V = pad ++ @bitCast([8]u8, big(c.write_seq)); + const operand: V = pad ++ @as([8]u8, @bitCast(big(c.write_seq))); c.write_seq += 1; // TODO send key_update on overflow const nonce = @as(V, p.client_iv) ^ operand; P.AEAD.encrypt(ciphertext, auth_tag, cleartext, ad, nonce, p.client_key); @@ -920,7 +920,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec) // Give away the buffered cleartext we have, if any. const partial_cleartext = c.partially_read_buffer[c.partial_cleartext_idx..c.partial_ciphertext_idx]; if (partial_cleartext.len > 0) { - const amt = @intCast(u15, vp.put(partial_cleartext)); + const amt = @as(u15, @intCast(vp.put(partial_cleartext))); c.partial_cleartext_idx += amt; if (c.partial_cleartext_idx == c.partial_ciphertext_idx and @@ -1037,7 +1037,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec) in = 0; continue; } - const ct = @enumFromInt(tls.ContentType, frag[in]); + const ct = @as(tls.ContentType, @enumFromInt(frag[in])); in += 1; const legacy_version = mem.readIntBig(u16, frag[in..][0..2]); in += 2; @@ -1070,8 +1070,8 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec) switch (ct) { .alert => { if (in + 2 > frag.len) return error.TlsDecodeError; - const level = @enumFromInt(tls.AlertLevel, frag[in]); - const desc = @enumFromInt(tls.AlertDescription, frag[in + 1]); + const level = @as(tls.AlertLevel, @enumFromInt(frag[in])); + const desc = @as(tls.AlertDescription, @enumFromInt(frag[in + 1])); _ = level; try desc.toError(); @@ -1089,7 +1089,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec) in += ciphertext_len; const auth_tag = frag[in..][0..P.AEAD.tag_length].*; const pad = [1]u8{0} ** (P.AEAD.nonce_length - 8); - const operand: V = pad ++ @bitCast([8]u8, big(c.read_seq)); + const operand: V = pad ++ @as([8]u8, @bitCast(big(c.read_seq))); const nonce: [P.AEAD.nonce_length]u8 = @as(V, p.server_iv) ^ operand; const out_buf = vp.peek(); const cleartext_buf = if (ciphertext.len <= out_buf.len) @@ -1105,11 +1105,11 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec) c.read_seq = try std.math.add(u64, c.read_seq, 1); - const inner_ct = @enumFromInt(tls.ContentType, cleartext[cleartext.len - 1]); + const inner_ct = @as(tls.ContentType, @enumFromInt(cleartext[cleartext.len - 1])); switch (inner_ct) { .alert => { - const level = @enumFromInt(tls.AlertLevel, cleartext[0]); - const desc = @enumFromInt(tls.AlertDescription, cleartext[1]); + const level = @as(tls.AlertLevel, @enumFromInt(cleartext[0])); + const desc = @as(tls.AlertDescription, @enumFromInt(cleartext[1])); if (desc == .close_notify) { c.received_close_notify = true; c.partial_ciphertext_end = c.partial_ciphertext_idx; @@ -1124,7 +1124,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec) .handshake => { var ct_i: usize = 0; while (true) { - const handshake_type = @enumFromInt(tls.HandshakeType, cleartext[ct_i]); + const handshake_type = @as(tls.HandshakeType, @enumFromInt(cleartext[ct_i])); ct_i += 1; const handshake_len = mem.readIntBig(u24, cleartext[ct_i..][0..3]); ct_i += 3; @@ -1148,7 +1148,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec) } c.read_seq = 0; - switch (@enumFromInt(tls.KeyUpdateRequest, handshake[0])) { + switch (@as(tls.KeyUpdateRequest, @enumFromInt(handshake[0]))) { .update_requested => { switch (c.application_cipher) { inline else => |*p| { @@ -1186,13 +1186,13 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec) c.partially_read_buffer[c.partial_ciphertext_idx..][0..msg.len], msg, ); - c.partial_ciphertext_idx = @intCast(@TypeOf(c.partial_ciphertext_idx), c.partial_ciphertext_idx + msg.len); + c.partial_ciphertext_idx = @as(@TypeOf(c.partial_ciphertext_idx), @intCast(c.partial_ciphertext_idx + msg.len)); } else { const amt = vp.put(msg); if (amt < msg.len) { const rest = msg[amt..]; c.partial_cleartext_idx = 0; - c.partial_ciphertext_idx = @intCast(@TypeOf(c.partial_ciphertext_idx), rest.len); + c.partial_ciphertext_idx = @as(@TypeOf(c.partial_ciphertext_idx), @intCast(rest.len)); @memcpy(c.partially_read_buffer[0..rest.len], rest); } } @@ -1220,12 +1220,12 @@ fn finishRead(c: *Client, frag: []const u8, in: usize, out: usize) usize { const saved_buf = frag[in..]; if (c.partial_ciphertext_idx > c.partial_cleartext_idx) { // There is cleartext at the beginning already which we need to preserve. - c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), c.partial_ciphertext_idx + saved_buf.len); + c.partial_ciphertext_end = @as(@TypeOf(c.partial_ciphertext_end), @intCast(c.partial_ciphertext_idx + saved_buf.len)); @memcpy(c.partially_read_buffer[c.partial_ciphertext_idx..][0..saved_buf.len], saved_buf); } else { c.partial_cleartext_idx = 0; c.partial_ciphertext_idx = 0; - c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), saved_buf.len); + c.partial_ciphertext_end = @as(@TypeOf(c.partial_ciphertext_end), @intCast(saved_buf.len)); @memcpy(c.partially_read_buffer[0..saved_buf.len], saved_buf); } return out; @@ -1235,14 +1235,14 @@ fn finishRead(c: *Client, frag: []const u8, in: usize, out: usize) usize { fn finishRead2(c: *Client, first: []const u8, frag1: []const u8, out: usize) usize { if (c.partial_ciphertext_idx > c.partial_cleartext_idx) { // There is cleartext at the beginning already which we need to preserve. - c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), c.partial_ciphertext_idx + first.len + frag1.len); + c.partial_ciphertext_end = @as(@TypeOf(c.partial_ciphertext_end), @intCast(c.partial_ciphertext_idx + first.len + frag1.len)); // TODO: eliminate this call to copyForwards std.mem.copyForwards(u8, c.partially_read_buffer[c.partial_ciphertext_idx..][0..first.len], first); @memcpy(c.partially_read_buffer[c.partial_ciphertext_idx + first.len ..][0..frag1.len], frag1); } else { c.partial_cleartext_idx = 0; c.partial_ciphertext_idx = 0; - c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), first.len + frag1.len); + c.partial_ciphertext_end = @as(@TypeOf(c.partial_ciphertext_end), @intCast(first.len + frag1.len)); // TODO: eliminate this call to copyForwards std.mem.copyForwards(u8, c.partially_read_buffer[0..first.len], first); @memcpy(c.partially_read_buffer[first.len..][0..frag1.len], frag1); diff --git a/lib/std/crypto/utils.zig b/lib/std/crypto/utils.zig index 14a235e418..ab1b6eab6a 100644 --- a/lib/std/crypto/utils.zig +++ b/lib/std/crypto/utils.zig @@ -24,7 +24,7 @@ pub fn timingSafeEql(comptime T: type, a: T, b: T) bool { const s = @typeInfo(C).Int.bits; const Cu = std.meta.Int(.unsigned, s); const Cext = std.meta.Int(.unsigned, s + 1); - return @bitCast(bool, @truncate(u1, (@as(Cext, @bitCast(Cu, acc)) -% 1) >> s)); + return @as(bool, @bitCast(@as(u1, @truncate((@as(Cext, @as(Cu, @bitCast(acc))) -% 1) >> s)))); }, .Vector => |info| { const C = info.child; @@ -35,7 +35,7 @@ pub fn timingSafeEql(comptime T: type, a: T, b: T) bool { const s = @typeInfo(C).Int.bits; const Cu = std.meta.Int(.unsigned, s); const Cext = std.meta.Int(.unsigned, s + 1); - return @bitCast(bool, @truncate(u1, (@as(Cext, @bitCast(Cu, acc)) -% 1) >> s)); + return @as(bool, @bitCast(@as(u1, @truncate((@as(Cext, @as(Cu, @bitCast(acc))) -% 1) >> s)))); }, else => { @compileError("Only arrays and vectors can be compared"); @@ -60,14 +60,14 @@ pub fn timingSafeCompare(comptime T: type, a: []const T, b: []const T, endian: E i -= 1; const x1 = a[i]; const x2 = b[i]; - gt |= @truncate(T, (@as(Cext, x2) -% @as(Cext, x1)) >> bits) & eq; - eq &= @truncate(T, (@as(Cext, (x2 ^ x1)) -% 1) >> bits); + gt |= @as(T, @truncate((@as(Cext, x2) -% @as(Cext, x1)) >> bits)) & eq; + eq &= @as(T, @truncate((@as(Cext, (x2 ^ x1)) -% 1) >> bits)); } } else { for (a, 0..) |x1, i| { const x2 = b[i]; - gt |= @truncate(T, (@as(Cext, x2) -% @as(Cext, x1)) >> bits) & eq; - eq &= @truncate(T, (@as(Cext, (x2 ^ x1)) -% 1) >> bits); + gt |= @as(T, @truncate((@as(Cext, x2) -% @as(Cext, x1)) >> bits)) & eq; + eq &= @as(T, @truncate((@as(Cext, (x2 ^ x1)) -% 1) >> bits)); } } if (gt != 0) { @@ -102,7 +102,7 @@ pub fn timingSafeAdd(comptime T: type, a: []const T, b: []const T, result: []T, carry = ov1[1] | ov2[1]; } } - return @bitCast(bool, carry); + return @as(bool, @bitCast(carry)); } /// Subtract two integers serialized as arrays of the same size, in constant time. @@ -129,7 +129,7 @@ pub fn timingSafeSub(comptime T: type, a: []const T, b: []const T, result: []T, borrow = ov1[1] | ov2[1]; } } - return @bitCast(bool, borrow); + return @as(bool, @bitCast(borrow)); } /// Sets a slice to zeroes. diff --git a/lib/std/cstr.zig b/lib/std/cstr.zig index 9bd98a72b7..0888edf10d 100644 --- a/lib/std/cstr.zig +++ b/lib/std/cstr.zig @@ -89,12 +89,12 @@ pub const NullTerminated2DArray = struct { return NullTerminated2DArray{ .allocator = allocator, .byte_count = byte_count, - .ptr = @ptrCast(?[*:null]?[*:0]u8, buf.ptr), + .ptr = @as(?[*:null]?[*:0]u8, @ptrCast(buf.ptr)), }; } pub fn deinit(self: *NullTerminated2DArray) void { - const buf = @ptrCast([*]u8, self.ptr); + const buf = @as([*]u8, @ptrCast(self.ptr)); self.allocator.free(buf[0..self.byte_count]); } }; diff --git a/lib/std/debug.zig b/lib/std/debug.zig index e0726d5444..44f6ce1367 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -460,8 +460,8 @@ pub const StackIterator = struct { // We are unable to determine validity of memory for freestanding targets if (native_os == .freestanding) return true; - const aligned_address = address & ~@intCast(usize, (mem.page_size - 1)); - const aligned_memory = @ptrFromInt([*]align(mem.page_size) u8, aligned_address)[0..mem.page_size]; + const aligned_address = address & ~@as(usize, @intCast((mem.page_size - 1))); + const aligned_memory = @as([*]align(mem.page_size) u8, @ptrFromInt(aligned_address))[0..mem.page_size]; if (native_os != .windows) { if (native_os != .wasi) { @@ -511,7 +511,7 @@ pub const StackIterator = struct { if (fp == 0 or !mem.isAligned(fp, @alignOf(usize)) or !isValidMemory(fp)) return null; - const new_fp = math.add(usize, @ptrFromInt(*const usize, fp).*, fp_bias) catch return null; + const new_fp = math.add(usize, @as(*const usize, @ptrFromInt(fp)).*, fp_bias) catch return null; // Sanity check: the stack grows down thus all the parent frames must be // be at addresses that are greater (or equal) than the previous one. @@ -520,9 +520,9 @@ pub const StackIterator = struct { if (new_fp != 0 and new_fp < self.fp) return null; - const new_pc = @ptrFromInt( + const new_pc = @as( *const usize, - math.add(usize, fp, pc_offset) catch return null, + @ptrFromInt(math.add(usize, fp, pc_offset) catch return null), ).*; self.fp = new_fp; @@ -555,10 +555,10 @@ pub fn writeCurrentStackTrace( pub noinline fn walkStackWindows(addresses: []usize) usize { if (builtin.cpu.arch == .x86) { // RtlVirtualUnwind doesn't exist on x86 - return windows.ntdll.RtlCaptureStackBackTrace(0, addresses.len, @ptrCast(**anyopaque, addresses.ptr), null); + return windows.ntdll.RtlCaptureStackBackTrace(0, addresses.len, @as(**anyopaque, @ptrCast(addresses.ptr)), null); } - const tib = @ptrCast(*const windows.NT_TIB, &windows.teb().Reserved1); + const tib = @as(*const windows.NT_TIB, @ptrCast(&windows.teb().Reserved1)); var context: windows.CONTEXT = std.mem.zeroes(windows.CONTEXT); windows.ntdll.RtlCaptureContext(&context); @@ -584,7 +584,7 @@ pub noinline fn walkStackWindows(addresses: []usize) usize { ); } else { // leaf function - context.setIp(@ptrFromInt(*u64, current_regs.sp).*); + context.setIp(@as(*u64, @ptrFromInt(current_regs.sp)).*); context.setSp(current_regs.sp + @sizeOf(usize)); } @@ -734,7 +734,7 @@ fn printLineInfo( if (printLineFromFile(out_stream, li)) { if (li.column > 0) { // The caret already takes one char - const space_needed = @intCast(usize, li.column - 1); + const space_needed = @as(usize, @intCast(li.column - 1)); try out_stream.writeByteNTimes(' ', space_needed); try tty_config.setColor(out_stream, .green); @@ -883,7 +883,7 @@ fn chopSlice(ptr: []const u8, offset: u64, size: u64) error{Overflow}![]const u8 pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugInfo { nosuspend { const mapped_mem = try mapWholeFile(elf_file); - const hdr = @ptrCast(*const elf.Ehdr, &mapped_mem[0]); + const hdr = @as(*const elf.Ehdr, @ptrCast(&mapped_mem[0])); if (!mem.eql(u8, hdr.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic; if (hdr.e_ident[elf.EI_VERSION] != 1) return error.InvalidElfVersion; @@ -896,14 +896,13 @@ pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugIn const shoff = hdr.e_shoff; const str_section_off = shoff + @as(u64, hdr.e_shentsize) * @as(u64, hdr.e_shstrndx); - const str_shdr = @ptrCast( - *const elf.Shdr, - @alignCast(@alignOf(elf.Shdr), &mapped_mem[math.cast(usize, str_section_off) orelse return error.Overflow]), - ); + const str_shdr: *const elf.Shdr = @ptrCast(@alignCast( + &mapped_mem[math.cast(usize, str_section_off) orelse return error.Overflow], + )); const header_strings = mapped_mem[str_shdr.sh_offset .. str_shdr.sh_offset + str_shdr.sh_size]; - const shdrs = @ptrCast( + const shdrs = @as( [*]const elf.Shdr, - @alignCast(@alignOf(elf.Shdr), &mapped_mem[shoff]), + @ptrCast(@alignCast(&mapped_mem[shoff])), )[0..hdr.e_shnum]; var opt_debug_info: ?[]const u8 = null; @@ -982,10 +981,7 @@ pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugIn fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugInfo { const mapped_mem = try mapWholeFile(macho_file); - const hdr = @ptrCast( - *const macho.mach_header_64, - @alignCast(@alignOf(macho.mach_header_64), mapped_mem.ptr), - ); + const hdr: *const macho.mach_header_64 = @ptrCast(@alignCast(mapped_mem.ptr)); if (hdr.magic != macho.MH_MAGIC_64) return error.InvalidDebugInfo; @@ -998,9 +994,9 @@ fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugIn else => {}, } else return error.MissingDebugInfo; - const syms = @ptrCast( + const syms = @as( [*]const macho.nlist_64, - @alignCast(@alignOf(macho.nlist_64), &mapped_mem[symtab.symoff]), + @ptrCast(@alignCast(&mapped_mem[symtab.symoff])), )[0..symtab.nsyms]; const strings = mapped_mem[symtab.stroff..][0 .. symtab.strsize - 1 :0]; @@ -1055,7 +1051,7 @@ fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugIn }, .fun_strx => { state = .fun_size; - last_sym.size = @intCast(u32, sym.n_value); + last_sym.size = @as(u32, @intCast(sym.n_value)); }, else => return error.InvalidDebugInfo, } @@ -1283,10 +1279,10 @@ pub const DebugInfo = struct { var it = macho.LoadCommandIterator{ .ncmds = header.ncmds, - .buffer = @alignCast(@alignOf(u64), @ptrFromInt( + .buffer = @alignCast(@as( [*]u8, - @intFromPtr(header) + @sizeOf(macho.mach_header_64), - ))[0..header.sizeofcmds], + @ptrFromInt(@intFromPtr(header) + @sizeOf(macho.mach_header_64)), + )[0..header.sizeofcmds]), }; while (it.next()) |cmd| switch (cmd.cmd()) { .SEGMENT_64 => { @@ -1332,7 +1328,7 @@ pub const DebugInfo = struct { return obj_di; } - const mapped_module = @ptrFromInt([*]const u8, module.base_address)[0..module.size]; + const mapped_module = @as([*]const u8, @ptrFromInt(module.base_address))[0..module.size]; const obj_di = try self.allocator.create(ModuleDebugInfo); errdefer self.allocator.destroy(obj_di); @@ -1465,10 +1461,7 @@ pub const ModuleDebugInfo = switch (native_os) { const o_file = try fs.cwd().openFile(o_file_path, .{ .intended_io_mode = .blocking }); const mapped_mem = try mapWholeFile(o_file); - const hdr = @ptrCast( - *const macho.mach_header_64, - @alignCast(@alignOf(macho.mach_header_64), mapped_mem.ptr), - ); + const hdr: *const macho.mach_header_64 = @ptrCast(@alignCast(mapped_mem.ptr)); if (hdr.magic != std.macho.MH_MAGIC_64) return error.InvalidDebugInfo; @@ -1487,21 +1480,18 @@ pub const ModuleDebugInfo = switch (native_os) { if (segcmd == null or symtabcmd == null) return error.MissingDebugInfo; // Parse symbols - const strtab = @ptrCast( + const strtab = @as( [*]const u8, - &mapped_mem[symtabcmd.?.stroff], + @ptrCast(&mapped_mem[symtabcmd.?.stroff]), )[0 .. symtabcmd.?.strsize - 1 :0]; - const symtab = @ptrCast( + const symtab = @as( [*]const macho.nlist_64, - @alignCast( - @alignOf(macho.nlist_64), - &mapped_mem[symtabcmd.?.symoff], - ), + @ptrCast(@alignCast(&mapped_mem[symtabcmd.?.symoff])), )[0..symtabcmd.?.nsyms]; // TODO handle tentative (common) symbols var addr_table = std.StringHashMap(u64).init(allocator); - try addr_table.ensureTotalCapacity(@intCast(u32, symtab.len)); + try addr_table.ensureTotalCapacity(@as(u32, @intCast(symtab.len))); for (symtab) |sym| { if (sym.n_strx == 0) continue; if (sym.undf() or sym.tentative() or sym.abs()) continue; @@ -1943,49 +1933,49 @@ fn dumpSegfaultInfoPosix(sig: i32, addr: usize, ctx_ptr: ?*const anyopaque) void switch (native_arch) { .x86 => { - const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr)); - const ip = @intCast(usize, ctx.mcontext.gregs[os.REG.EIP]); - const bp = @intCast(usize, ctx.mcontext.gregs[os.REG.EBP]); + const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr)); + const ip = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EIP])); + const bp = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EBP])); dumpStackTraceFromBase(bp, ip); }, .x86_64 => { - const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr)); + const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr)); const ip = switch (native_os) { - .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RIP]), - .freebsd => @intCast(usize, ctx.mcontext.rip), - .openbsd => @intCast(usize, ctx.sc_rip), - .macos => @intCast(usize, ctx.mcontext.ss.rip), + .linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RIP])), + .freebsd => @as(usize, @intCast(ctx.mcontext.rip)), + .openbsd => @as(usize, @intCast(ctx.sc_rip)), + .macos => @as(usize, @intCast(ctx.mcontext.ss.rip)), else => unreachable, }; const bp = switch (native_os) { - .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RBP]), - .openbsd => @intCast(usize, ctx.sc_rbp), - .freebsd => @intCast(usize, ctx.mcontext.rbp), - .macos => @intCast(usize, ctx.mcontext.ss.rbp), + .linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RBP])), + .openbsd => @as(usize, @intCast(ctx.sc_rbp)), + .freebsd => @as(usize, @intCast(ctx.mcontext.rbp)), + .macos => @as(usize, @intCast(ctx.mcontext.ss.rbp)), else => unreachable, }; dumpStackTraceFromBase(bp, ip); }, .arm => { - const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr)); - const ip = @intCast(usize, ctx.mcontext.arm_pc); - const bp = @intCast(usize, ctx.mcontext.arm_fp); + const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr)); + const ip = @as(usize, @intCast(ctx.mcontext.arm_pc)); + const bp = @as(usize, @intCast(ctx.mcontext.arm_fp)); dumpStackTraceFromBase(bp, ip); }, .aarch64 => { - const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr)); + const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr)); const ip = switch (native_os) { - .macos => @intCast(usize, ctx.mcontext.ss.pc), - .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.PC]), - .freebsd => @intCast(usize, ctx.mcontext.gpregs.elr), - else => @intCast(usize, ctx.mcontext.pc), + .macos => @as(usize, @intCast(ctx.mcontext.ss.pc)), + .netbsd => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.PC])), + .freebsd => @as(usize, @intCast(ctx.mcontext.gpregs.elr)), + else => @as(usize, @intCast(ctx.mcontext.pc)), }; // x29 is the ABI-designated frame pointer const bp = switch (native_os) { - .macos => @intCast(usize, ctx.mcontext.ss.fp), - .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.FP]), - .freebsd => @intCast(usize, ctx.mcontext.gpregs.x[os.REG.FP]), - else => @intCast(usize, ctx.mcontext.regs[29]), + .macos => @as(usize, @intCast(ctx.mcontext.ss.fp)), + .netbsd => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.FP])), + .freebsd => @as(usize, @intCast(ctx.mcontext.gpregs.x[os.REG.FP])), + else => @as(usize, @intCast(ctx.mcontext.regs[29])), }; dumpStackTraceFromBase(bp, ip); }, diff --git a/lib/std/dwarf.zig b/lib/std/dwarf.zig index 4de08b25d7..aa1ac6959f 100644 --- a/lib/std/dwarf.zig +++ b/lib/std/dwarf.zig @@ -462,7 +462,7 @@ const LineNumberProgram = struct { }); return debug.LineInfo{ - .line = if (self.prev_line >= 0) @intCast(u64, self.prev_line) else 0, + .line = if (self.prev_line >= 0) @as(u64, @intCast(self.prev_line)) else 0, .column = self.prev_column, .file_name = file_name, }; @@ -533,7 +533,7 @@ fn parseFormValueConstant(in_stream: anytype, signed: bool, endian: std.builtin. -1 => blk: { if (signed) { const x = try nosuspend leb.readILEB128(i64, in_stream); - break :blk @bitCast(u64, x); + break :blk @as(u64, @bitCast(x)); } else { const x = try nosuspend leb.readULEB128(u64, in_stream); break :blk x; @@ -939,12 +939,12 @@ pub const DwarfInfo = struct { .Const => |c| try c.asUnsignedLe(), .RangeListOffset => |idx| off: { if (compile_unit.is_64) { - const offset_loc = @intCast(usize, compile_unit.rnglists_base + 8 * idx); + const offset_loc = @as(usize, @intCast(compile_unit.rnglists_base + 8 * idx)); if (offset_loc + 8 > debug_ranges.len) return badDwarf(); const offset = mem.readInt(u64, debug_ranges[offset_loc..][0..8], di.endian); break :off compile_unit.rnglists_base + offset; } else { - const offset_loc = @intCast(usize, compile_unit.rnglists_base + 4 * idx); + const offset_loc = @as(usize, @intCast(compile_unit.rnglists_base + 4 * idx)); if (offset_loc + 4 > debug_ranges.len) return badDwarf(); const offset = mem.readInt(u32, debug_ranges[offset_loc..][0..4], di.endian); break :off compile_unit.rnglists_base + offset; @@ -1134,7 +1134,7 @@ pub const DwarfInfo = struct { ), }; if (attr.form_id == FORM.implicit_const) { - result.attrs.items[i].value.Const.payload = @bitCast(u64, attr.payload); + result.attrs.items[i].value.Const.payload = @as(u64, @bitCast(attr.payload)); } } return result; @@ -1438,7 +1438,7 @@ pub const DwarfInfo = struct { const addr_size = debug_addr[compile_unit.addr_base - 2]; const seg_size = debug_addr[compile_unit.addr_base - 1]; - const byte_offset = @intCast(usize, compile_unit.addr_base + (addr_size + seg_size) * index); + const byte_offset = @as(usize, @intCast(compile_unit.addr_base + (addr_size + seg_size) * index)); if (byte_offset + addr_size > debug_addr.len) return badDwarf(); return switch (addr_size) { 1 => debug_addr[byte_offset], diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index 38c5de9cad..3342ac3f6d 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -71,18 +71,18 @@ pub fn linkmap_iterator(phdrs: []elf.Phdr) !LinkMap.Iterator { while (_DYNAMIC[i].d_tag != elf.DT_NULL) : (i += 1) { switch (_DYNAMIC[i].d_tag) { elf.DT_DEBUG => { - const ptr = @ptrFromInt(?*RDebug, _DYNAMIC[i].d_val); + const ptr = @as(?*RDebug, @ptrFromInt(_DYNAMIC[i].d_val)); if (ptr) |r_debug| { if (r_debug.r_version != 1) return error.InvalidExe; break :init r_debug.r_map; } }, elf.DT_PLTGOT => { - const ptr = @ptrFromInt(?[*]usize, _DYNAMIC[i].d_val); + const ptr = @as(?[*]usize, @ptrFromInt(_DYNAMIC[i].d_val)); if (ptr) |got_table| { // The address to the link_map structure is stored in // the second slot - break :init @ptrFromInt(?*LinkMap, got_table[1]); + break :init @as(?*LinkMap, @ptrFromInt(got_table[1])); } }, else => {}, @@ -132,7 +132,7 @@ pub const ElfDynLib = struct { ); defer os.munmap(file_bytes); - const eh = @ptrCast(*elf.Ehdr, file_bytes.ptr); + const eh = @as(*elf.Ehdr, @ptrCast(file_bytes.ptr)); if (!mem.eql(u8, eh.e_ident[0..4], elf.MAGIC)) return error.NotElfFile; if (eh.e_type != elf.ET.DYN) return error.NotDynamicLibrary; @@ -149,10 +149,10 @@ pub const ElfDynLib = struct { i += 1; ph_addr += eh.e_phentsize; }) { - const ph = @ptrFromInt(*elf.Phdr, ph_addr); + const ph = @as(*elf.Phdr, @ptrFromInt(ph_addr)); switch (ph.p_type) { elf.PT_LOAD => virt_addr_end = @max(virt_addr_end, ph.p_vaddr + ph.p_memsz), - elf.PT_DYNAMIC => maybe_dynv = @ptrFromInt([*]usize, elf_addr + ph.p_offset), + elf.PT_DYNAMIC => maybe_dynv = @as([*]usize, @ptrFromInt(elf_addr + ph.p_offset)), else => {}, } } @@ -180,7 +180,7 @@ pub const ElfDynLib = struct { i += 1; ph_addr += eh.e_phentsize; }) { - const ph = @ptrFromInt(*elf.Phdr, ph_addr); + const ph = @as(*elf.Phdr, @ptrFromInt(ph_addr)); switch (ph.p_type) { elf.PT_LOAD => { // The VirtAddr may not be page-aligned; in such case there will be @@ -188,7 +188,7 @@ pub const ElfDynLib = struct { const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, mem.page_size) - 1); const extra_bytes = (base + ph.p_vaddr) - aligned_addr; const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, mem.page_size); - const ptr = @ptrFromInt([*]align(mem.page_size) u8, aligned_addr); + const ptr = @as([*]align(mem.page_size) u8, @ptrFromInt(aligned_addr)); const prot = elfToMmapProt(ph.p_flags); if ((ph.p_flags & elf.PF_W) == 0) { // If it does not need write access, it can be mapped from the fd. @@ -228,11 +228,11 @@ pub const ElfDynLib = struct { while (dynv[i] != 0) : (i += 2) { const p = base + dynv[i + 1]; switch (dynv[i]) { - elf.DT_STRTAB => maybe_strings = @ptrFromInt([*:0]u8, p), - elf.DT_SYMTAB => maybe_syms = @ptrFromInt([*]elf.Sym, p), - elf.DT_HASH => maybe_hashtab = @ptrFromInt([*]os.Elf_Symndx, p), - elf.DT_VERSYM => maybe_versym = @ptrFromInt([*]u16, p), - elf.DT_VERDEF => maybe_verdef = @ptrFromInt(*elf.Verdef, p), + elf.DT_STRTAB => maybe_strings = @as([*:0]u8, @ptrFromInt(p)), + elf.DT_SYMTAB => maybe_syms = @as([*]elf.Sym, @ptrFromInt(p)), + elf.DT_HASH => maybe_hashtab = @as([*]os.Elf_Symndx, @ptrFromInt(p)), + elf.DT_VERSYM => maybe_versym = @as([*]u16, @ptrFromInt(p)), + elf.DT_VERDEF => maybe_verdef = @as(*elf.Verdef, @ptrFromInt(p)), else => {}, } } @@ -261,7 +261,7 @@ pub const ElfDynLib = struct { pub fn lookup(self: *ElfDynLib, comptime T: type, name: [:0]const u8) ?T { if (self.lookupAddress("", name)) |symbol| { - return @ptrFromInt(T, symbol); + return @as(T, @ptrFromInt(symbol)); } else { return null; } @@ -276,8 +276,8 @@ pub const ElfDynLib = struct { var i: usize = 0; while (i < self.hashtab[1]) : (i += 1) { - if (0 == (@as(u32, 1) << @intCast(u5, self.syms[i].st_info & 0xf) & OK_TYPES)) continue; - if (0 == (@as(u32, 1) << @intCast(u5, self.syms[i].st_info >> 4) & OK_BINDS)) continue; + if (0 == (@as(u32, 1) << @as(u5, @intCast(self.syms[i].st_info & 0xf)) & OK_TYPES)) continue; + if (0 == (@as(u32, 1) << @as(u5, @intCast(self.syms[i].st_info >> 4)) & OK_BINDS)) continue; if (0 == self.syms[i].st_shndx) continue; if (!mem.eql(u8, name, mem.sliceTo(self.strings + self.syms[i].st_name, 0))) continue; if (maybe_versym) |versym| { @@ -301,15 +301,15 @@ pub const ElfDynLib = struct { fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: [*:0]u8) bool { var def = def_arg; - const vsym = @bitCast(u32, vsym_arg) & 0x7fff; + const vsym = @as(u32, @bitCast(vsym_arg)) & 0x7fff; while (true) { if (0 == (def.vd_flags & elf.VER_FLG_BASE) and (def.vd_ndx & 0x7fff) == vsym) break; if (def.vd_next == 0) return false; - def = @ptrFromInt(*elf.Verdef, @intFromPtr(def) + def.vd_next); + def = @as(*elf.Verdef, @ptrFromInt(@intFromPtr(def) + def.vd_next)); } - const aux = @ptrFromInt(*elf.Verdaux, @intFromPtr(def) + def.vd_aux); + const aux = @as(*elf.Verdaux, @ptrFromInt(@intFromPtr(def) + def.vd_aux)); return mem.eql(u8, vername, mem.sliceTo(strings + aux.vda_name, 0)); } @@ -347,7 +347,7 @@ pub const WindowsDynLib = struct { pub fn lookup(self: *WindowsDynLib, comptime T: type, name: [:0]const u8) ?T { if (windows.kernel32.GetProcAddress(self.dll, name.ptr)) |addr| { - return @ptrCast(T, @alignCast(@alignOf(@typeInfo(T).Pointer.child), addr)); + return @as(T, @ptrCast(@alignCast(addr))); } else { return null; } @@ -381,7 +381,7 @@ pub const DlDynlib = struct { // dlsym (and other dl-functions) secretly take shadow parameter - return address on stack // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66826 if (@call(.never_tail, system.dlsym, .{ self.handle, name.ptr })) |symbol| { - return @ptrCast(T, @alignCast(@alignOf(@typeInfo(T).Pointer.child), symbol)); + return @as(T, @ptrCast(@alignCast(symbol))); } else { return null; } diff --git a/lib/std/elf.zig b/lib/std/elf.zig index 9a71f73e05..d464d7d12b 100644 --- a/lib/std/elf.zig +++ b/lib/std/elf.zig @@ -434,8 +434,8 @@ pub const Header = struct { } pub fn parse(hdr_buf: *align(@alignOf(Elf64_Ehdr)) const [@sizeOf(Elf64_Ehdr)]u8) !Header { - const hdr32 = @ptrCast(*const Elf32_Ehdr, hdr_buf); - const hdr64 = @ptrCast(*const Elf64_Ehdr, hdr_buf); + const hdr32 = @as(*const Elf32_Ehdr, @ptrCast(hdr_buf)); + const hdr64 = @as(*const Elf64_Ehdr, @ptrCast(hdr_buf)); if (!mem.eql(u8, hdr32.e_ident[0..4], MAGIC)) return error.InvalidElfMagic; if (hdr32.e_ident[EI_VERSION] != 1) return error.InvalidElfVersion; @@ -454,7 +454,7 @@ pub const Header = struct { const machine = if (need_bswap) blk: { const value = @intFromEnum(hdr32.e_machine); - break :blk @enumFromInt(EM, @byteSwap(value)); + break :blk @as(EM, @enumFromInt(@byteSwap(value))); } else hdr32.e_machine; return @as(Header, .{ @@ -725,10 +725,10 @@ pub const Elf32_Sym = extern struct { st_shndx: Elf32_Section, pub inline fn st_type(self: @This()) u4 { - return @truncate(u4, self.st_info); + return @as(u4, @truncate(self.st_info)); } pub inline fn st_bind(self: @This()) u4 { - return @truncate(u4, self.st_info >> 4); + return @as(u4, @truncate(self.st_info >> 4)); } }; pub const Elf64_Sym = extern struct { @@ -740,10 +740,10 @@ pub const Elf64_Sym = extern struct { st_size: Elf64_Xword, pub inline fn st_type(self: @This()) u4 { - return @truncate(u4, self.st_info); + return @as(u4, @truncate(self.st_info)); } pub inline fn st_bind(self: @This()) u4 { - return @truncate(u4, self.st_info >> 4); + return @as(u4, @truncate(self.st_info >> 4)); } }; pub const Elf32_Syminfo = extern struct { @@ -759,10 +759,10 @@ pub const Elf32_Rel = extern struct { r_info: Elf32_Word, pub inline fn r_sym(self: @This()) u24 { - return @truncate(u24, self.r_info >> 8); + return @as(u24, @truncate(self.r_info >> 8)); } pub inline fn r_type(self: @This()) u8 { - return @truncate(u8, self.r_info); + return @as(u8, @truncate(self.r_info)); } }; pub const Elf64_Rel = extern struct { @@ -770,10 +770,10 @@ pub const Elf64_Rel = extern struct { r_info: Elf64_Xword, pub inline fn r_sym(self: @This()) u32 { - return @truncate(u32, self.r_info >> 32); + return @as(u32, @truncate(self.r_info >> 32)); } pub inline fn r_type(self: @This()) u32 { - return @truncate(u32, self.r_info); + return @as(u32, @truncate(self.r_info)); } }; pub const Elf32_Rela = extern struct { @@ -782,10 +782,10 @@ pub const Elf32_Rela = extern struct { r_addend: Elf32_Sword, pub inline fn r_sym(self: @This()) u24 { - return @truncate(u24, self.r_info >> 8); + return @as(u24, @truncate(self.r_info >> 8)); } pub inline fn r_type(self: @This()) u8 { - return @truncate(u8, self.r_info); + return @as(u8, @truncate(self.r_info)); } }; pub const Elf64_Rela = extern struct { @@ -794,10 +794,10 @@ pub const Elf64_Rela = extern struct { r_addend: Elf64_Sxword, pub inline fn r_sym(self: @This()) u32 { - return @truncate(u32, self.r_info >> 32); + return @as(u32, @truncate(self.r_info >> 32)); } pub inline fn r_type(self: @This()) u32 { - return @truncate(u32, self.r_info); + return @as(u32, @truncate(self.r_info)); } }; pub const Elf32_Dyn = extern struct { diff --git a/lib/std/enums.zig b/lib/std/enums.zig index a5ceebc9b1..9931b1d7c1 100644 --- a/lib/std/enums.zig +++ b/lib/std/enums.zig @@ -16,7 +16,7 @@ pub fn EnumFieldStruct(comptime E: type, comptime Data: type, comptime field_def fields = fields ++ &[_]StructField{.{ .name = field.name, .type = Data, - .default_value = if (field_default) |d| @ptrCast(?*const anyopaque, &d) else null, + .default_value = if (field_default) |d| @as(?*const anyopaque, @ptrCast(&d)) else null, .is_comptime = false, .alignment = if (@sizeOf(Data) > 0) @alignOf(Data) else 0, }}; @@ -61,7 +61,7 @@ test tagName { const E = enum(u8) { a, b, _ }; try testing.expect(tagName(E, .a) != null); try testing.expectEqualStrings("a", tagName(E, .a).?); - try testing.expect(tagName(E, @enumFromInt(E, 42)) == null); + try testing.expect(tagName(E, @as(E, @enumFromInt(42))) == null); } /// Determines the length of a direct-mapped enum array, indexed by @@ -156,7 +156,7 @@ pub fn directEnumArrayDefault( var result: [len]Data = if (default) |d| [_]Data{d} ** len else undefined; inline for (@typeInfo(@TypeOf(init_values)).Struct.fields) |f| { const enum_value = @field(E, f.name); - const index = @intCast(usize, @intFromEnum(enum_value)); + const index = @as(usize, @intCast(@intFromEnum(enum_value))); result[index] = @field(init_values, f.name); } return result; @@ -341,7 +341,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type { var self = initWithCount(0); inline for (@typeInfo(E).Enum.fields) |field| { const c = @field(init_counts, field.name); - const key = @enumFromInt(E, field.value); + const key = @as(E, @enumFromInt(field.value)); self.counts.set(key, c); } return self; @@ -412,7 +412,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type { /// asserts operation will not overflow any key. pub fn addSetAssertSafe(self: *Self, other: Self) void { inline for (@typeInfo(E).Enum.fields) |field| { - const key = @enumFromInt(E, field.value); + const key = @as(E, @enumFromInt(field.value)); self.addAssertSafe(key, other.getCount(key)); } } @@ -420,7 +420,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type { /// Increases the all key counts by given multiset. pub fn addSet(self: *Self, other: Self) error{Overflow}!void { inline for (@typeInfo(E).Enum.fields) |field| { - const key = @enumFromInt(E, field.value); + const key = @as(E, @enumFromInt(field.value)); try self.add(key, other.getCount(key)); } } @@ -430,7 +430,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type { /// then that key will have a key count of zero. pub fn removeSet(self: *Self, other: Self) void { inline for (@typeInfo(E).Enum.fields) |field| { - const key = @enumFromInt(E, field.value); + const key = @as(E, @enumFromInt(field.value)); self.remove(key, other.getCount(key)); } } @@ -439,7 +439,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type { /// given multiset. pub fn eql(self: Self, other: Self) bool { inline for (@typeInfo(E).Enum.fields) |field| { - const key = @enumFromInt(E, field.value); + const key = @as(E, @enumFromInt(field.value)); if (self.getCount(key) != other.getCount(key)) { return false; } @@ -451,7 +451,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type { /// equal to the given multiset. pub fn subsetOf(self: Self, other: Self) bool { inline for (@typeInfo(E).Enum.fields) |field| { - const key = @enumFromInt(E, field.value); + const key = @as(E, @enumFromInt(field.value)); if (self.getCount(key) > other.getCount(key)) { return false; } @@ -463,7 +463,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type { /// equal to the given multiset. pub fn supersetOf(self: Self, other: Self) bool { inline for (@typeInfo(E).Enum.fields) |field| { - const key = @enumFromInt(E, field.value); + const key = @as(E, @enumFromInt(field.value)); if (self.getCount(key) < other.getCount(key)) { return false; } @@ -1281,10 +1281,10 @@ test "std.enums.ensureIndexer" { pub const Key = u32; pub const count: usize = 8; pub fn indexOf(k: Key) usize { - return @intCast(usize, k); + return @as(usize, @intCast(k)); } pub fn keyForIndex(index: usize) Key { - return @intCast(Key, index); + return @as(Key, @intCast(index)); } }); } @@ -1323,14 +1323,14 @@ pub fn EnumIndexer(comptime E: type) type { pub const Key = E; pub const count = fields_len; pub fn indexOf(e: E) usize { - return @intCast(usize, @intFromEnum(e) - min); + return @as(usize, @intCast(@intFromEnum(e) - min)); } pub fn keyForIndex(i: usize) E { // TODO fix addition semantics. This calculation // gives up some safety to avoid artificially limiting // the range of signed enum values to max_isize. - const enum_value = if (min < 0) @bitCast(isize, i) +% min else i + min; - return @enumFromInt(E, @intCast(std.meta.Tag(E), enum_value)); + const enum_value = if (min < 0) @as(isize, @bitCast(i)) +% min else i + min; + return @as(E, @enumFromInt(@as(std.meta.Tag(E), @intCast(enum_value)))); } }; } diff --git a/lib/std/event/lock.zig b/lib/std/event/lock.zig index 9da3943d5d..8608298c29 100644 --- a/lib/std/event/lock.zig +++ b/lib/std/event/lock.zig @@ -55,7 +55,7 @@ pub const Lock = struct { const head = switch (self.head) { UNLOCKED => unreachable, LOCKED => null, - else => @ptrFromInt(*Waiter, self.head), + else => @as(*Waiter, @ptrFromInt(self.head)), }; if (head) |h| { @@ -102,7 +102,7 @@ pub const Lock = struct { break :blk null; }, else => { - const waiter = @ptrFromInt(*Waiter, self.lock.head); + const waiter = @as(*Waiter, @ptrFromInt(self.lock.head)); self.lock.head = if (waiter.next == null) LOCKED else @intFromPtr(waiter.next); if (waiter.next) |next| next.tail = waiter.tail; @@ -130,7 +130,7 @@ test "std.event.Lock" { var lock = Lock{}; testLock(&lock); - const expected_result = [1]i32{3 * @intCast(i32, shared_test_data.len)} ** shared_test_data.len; + const expected_result = [1]i32{3 * @as(i32, @intCast(shared_test_data.len))} ** shared_test_data.len; try testing.expectEqualSlices(i32, &expected_result, &shared_test_data); } fn testLock(lock: *Lock) void { diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig index 7eec26a2b1..b5021a5378 100644 --- a/lib/std/event/loop.zig +++ b/lib/std/event/loop.zig @@ -556,7 +556,7 @@ pub const Loop = struct { self.linuxWaitFd(fd, os.linux.EPOLL.ET | os.linux.EPOLL.ONESHOT | os.linux.EPOLL.IN); }, .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => { - self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_READ, os.system.EV_ONESHOT); + self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_READ, os.system.EV_ONESHOT); }, else => @compileError("Unsupported OS"), } @@ -568,7 +568,7 @@ pub const Loop = struct { self.linuxWaitFd(fd, os.linux.EPOLL.ET | os.linux.EPOLL.ONESHOT | os.linux.EPOLL.OUT); }, .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => { - self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_WRITE, os.system.EV_ONESHOT); + self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_WRITE, os.system.EV_ONESHOT); }, else => @compileError("Unsupported OS"), } @@ -580,8 +580,8 @@ pub const Loop = struct { self.linuxWaitFd(fd, os.linux.EPOLL.ET | os.linux.EPOLL.ONESHOT | os.linux.EPOLL.OUT | os.linux.EPOLL.IN); }, .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => { - self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_READ, os.system.EV_ONESHOT); - self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_WRITE, os.system.EV_ONESHOT); + self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_READ, os.system.EV_ONESHOT); + self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_WRITE, os.system.EV_ONESHOT); }, else => @compileError("Unsupported OS"), } @@ -1415,7 +1415,7 @@ pub const Loop = struct { var events: [1]os.linux.epoll_event = undefined; const count = os.epoll_wait(self.os_data.epollfd, events[0..], -1); for (events[0..count]) |ev| { - const resume_node = @ptrFromInt(*ResumeNode, ev.data.ptr); + const resume_node = @as(*ResumeNode, @ptrFromInt(ev.data.ptr)); const handle = resume_node.handle; const resume_node_id = resume_node.id; switch (resume_node_id) { @@ -1439,7 +1439,7 @@ pub const Loop = struct { const empty_kevs = &[0]os.Kevent{}; const count = os.kevent(self.os_data.kqfd, empty_kevs, eventlist[0..], null) catch unreachable; for (eventlist[0..count]) |ev| { - const resume_node = @ptrFromInt(*ResumeNode, ev.udata); + const resume_node = @as(*ResumeNode, @ptrFromInt(ev.udata)); const handle = resume_node.handle; const resume_node_id = resume_node.id; switch (resume_node_id) { diff --git a/lib/std/event/rwlock.zig b/lib/std/event/rwlock.zig index c19330d5a9..47ddf74fd5 100644 --- a/lib/std/event/rwlock.zig +++ b/lib/std/event/rwlock.zig @@ -223,7 +223,7 @@ test "std.event.RwLock" { _ = testLock(std.heap.page_allocator, &lock); - const expected_result = [1]i32{shared_it_count * @intCast(i32, shared_test_data.len)} ** shared_test_data.len; + const expected_result = [1]i32{shared_it_count * @as(i32, @intCast(shared_test_data.len))} ** shared_test_data.len; try testing.expectEqualSlices(i32, expected_result, shared_test_data); } fn testLock(allocator: Allocator, lock: *RwLock) callconv(.Async) void { @@ -244,12 +244,12 @@ fn testLock(allocator: Allocator, lock: *RwLock) callconv(.Async) void { } for (write_nodes) |*write_node| { - const casted = @ptrCast(*const @Frame(writeRunner), write_node.data); + const casted = @as(*const @Frame(writeRunner), @ptrCast(write_node.data)); await casted; allocator.destroy(casted); } for (read_nodes) |*read_node| { - const casted = @ptrCast(*const @Frame(readRunner), read_node.data); + const casted = @as(*const @Frame(readRunner), @ptrCast(read_node.data)); await casted; allocator.destroy(casted); } @@ -287,6 +287,6 @@ fn readRunner(lock: *RwLock) callconv(.Async) void { defer handle.release(); try testing.expect(shared_test_index == 0); - try testing.expect(shared_test_data[i] == @intCast(i32, shared_count)); + try testing.expect(shared_test_data[i] == @as(i32, @intCast(shared_count))); } } diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index d983aba369..7af21c86df 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -396,7 +396,7 @@ pub const ArgState = struct { } // Mark this argument as used - self.used_args |= @as(ArgSetType, 1) << @intCast(u5, next_index); + self.used_args |= @as(ArgSetType, 1) << @as(u5, @intCast(next_index)); return next_index; } }; @@ -1056,7 +1056,7 @@ pub fn formatFloatScientific( options: FormatOptions, writer: anytype, ) !void { - var x = @floatCast(f64, value); + var x = @as(f64, @floatCast(value)); // Errol doesn't handle these special cases. if (math.signbit(x)) { @@ -1167,9 +1167,9 @@ pub fn formatFloatHexadecimal( const exponent_mask = (1 << exponent_bits) - 1; const exponent_bias = (1 << (exponent_bits - 1)) - 1; - const as_bits = @bitCast(TU, value); + const as_bits = @as(TU, @bitCast(value)); var mantissa = as_bits & mantissa_mask; - var exponent: i32 = @truncate(u16, (as_bits >> mantissa_bits) & exponent_mask); + var exponent: i32 = @as(u16, @truncate((as_bits >> mantissa_bits) & exponent_mask)); const is_denormal = exponent == 0 and mantissa != 0; const is_zero = exponent == 0 and mantissa == 0; @@ -1218,7 +1218,7 @@ pub fn formatFloatHexadecimal( // Drop the excess bits. mantissa >>= 2; // Restore the alignment. - mantissa <<= @intCast(math.Log2Int(TU), (mantissa_digits - precision) * 4); + mantissa <<= @as(math.Log2Int(TU), @intCast((mantissa_digits - precision) * 4)); const overflow = mantissa & (1 << 1 + mantissa_digits * 4) != 0; // Prefer a normalized result in case of overflow. @@ -1296,7 +1296,7 @@ pub fn formatFloatDecimal( errol.roundToPrecision(&float_decimal, precision, errol.RoundMode.Decimal); // exp < 0 means the leading is always 0 as errol result is normalized. - var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0; + var num_digits_whole = if (float_decimal.exp > 0) @as(usize, @intCast(float_decimal.exp)) else 0; // the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this. var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len); @@ -1325,7 +1325,7 @@ pub fn formatFloatDecimal( // Zero-fill until we reach significant digits or run out of precision. if (float_decimal.exp <= 0) { - const zero_digit_count = @intCast(usize, -float_decimal.exp); + const zero_digit_count = @as(usize, @intCast(-float_decimal.exp)); const zeros_to_print = @min(zero_digit_count, precision); var i: usize = 0; @@ -1354,7 +1354,7 @@ pub fn formatFloatDecimal( } } else { // exp < 0 means the leading is always 0 as errol result is normalized. - var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0; + var num_digits_whole = if (float_decimal.exp > 0) @as(usize, @intCast(float_decimal.exp)) else 0; // the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this. var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len); @@ -1380,7 +1380,7 @@ pub fn formatFloatDecimal( // Zero-fill until we reach significant digits or run out of precision. if (float_decimal.exp < 0) { - const zero_digit_count = @intCast(usize, -float_decimal.exp); + const zero_digit_count = @as(usize, @intCast(-float_decimal.exp)); var i: usize = 0; while (i < zero_digit_count) : (i += 1) { @@ -1423,21 +1423,21 @@ pub fn formatInt( if (base == 10) { while (a >= 100) : (a = @divTrunc(a, 100)) { index -= 2; - buf[index..][0..2].* = digits2(@intCast(usize, a % 100)); + buf[index..][0..2].* = digits2(@as(usize, @intCast(a % 100))); } if (a < 10) { index -= 1; - buf[index] = '0' + @intCast(u8, a); + buf[index] = '0' + @as(u8, @intCast(a)); } else { index -= 2; - buf[index..][0..2].* = digits2(@intCast(usize, a)); + buf[index..][0..2].* = digits2(@as(usize, @intCast(a))); } } else { while (true) { const digit = a % base; index -= 1; - buf[index] = digitToChar(@intCast(u8, digit), case); + buf[index] = digitToChar(@as(u8, @intCast(digit)), case); a /= base; if (a == 0) break; } @@ -1595,10 +1595,10 @@ test "fmtDuration" { fn formatDurationSigned(ns: i64, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { if (ns < 0) { - const data = FormatDurationData{ .ns = @intCast(u64, -ns), .negative = true }; + const data = FormatDurationData{ .ns = @as(u64, @intCast(-ns)), .negative = true }; try formatDuration(data, fmt, options, writer); } else { - const data = FormatDurationData{ .ns = @intCast(u64, ns) }; + const data = FormatDurationData{ .ns = @as(u64, @intCast(ns)) }; try formatDuration(data, fmt, options, writer); } } @@ -1846,7 +1846,7 @@ fn parseWithSign( // The first digit of a negative number. // Consider parsing "-4" as an i3. // This should work, but positive 4 overflows i3, so we can't cast the digit to T and subtract. - x = math.cast(T, -@intCast(i8, digit)) orelse return error.Overflow; + x = math.cast(T, -@as(i8, @intCast(digit))) orelse return error.Overflow; continue; } x = try add(T, x, math.cast(T, digit) orelse return error.Overflow); @@ -2099,7 +2099,7 @@ test "optional" { try expectFmt("optional: null\n", "optional: {?}\n", .{value}); } { - const value = @ptrFromInt(?*i32, 0xf000d000); + const value = @as(?*i32, @ptrFromInt(0xf000d000)); try expectFmt("optional: *i32@f000d000\n", "optional: {*}\n", .{value}); } } @@ -2218,7 +2218,7 @@ test "slice" { } { var runtime_zero: usize = 0; - const value = @ptrFromInt([*]align(1) const []const u8, 0xdeadbeef)[runtime_zero..runtime_zero]; + const value = @as([*]align(1) const []const u8, @ptrFromInt(0xdeadbeef))[runtime_zero..runtime_zero]; try expectFmt("slice: []const u8@deadbeef\n", "slice: {*}\n", .{value}); } { @@ -2248,17 +2248,17 @@ test "escape non-printable" { test "pointer" { { - const value = @ptrFromInt(*align(1) i32, 0xdeadbeef); + const value = @as(*align(1) i32, @ptrFromInt(0xdeadbeef)); try expectFmt("pointer: i32@deadbeef\n", "pointer: {}\n", .{value}); try expectFmt("pointer: i32@deadbeef\n", "pointer: {*}\n", .{value}); } const FnPtr = *align(1) const fn () void; { - const value = @ptrFromInt(FnPtr, 0xdeadbeef); + const value = @as(FnPtr, @ptrFromInt(0xdeadbeef)); try expectFmt("pointer: fn() void@deadbeef\n", "pointer: {}\n", .{value}); } { - const value = @ptrFromInt(FnPtr, 0xdeadbeef); + const value = @as(FnPtr, @ptrFromInt(0xdeadbeef)); try expectFmt("pointer: fn() void@deadbeef\n", "pointer: {}\n", .{value}); } } @@ -2267,12 +2267,12 @@ test "cstr" { try expectFmt( "cstr: Test C\n", "cstr: {s}\n", - .{@ptrCast([*c]const u8, "Test C")}, + .{@as([*c]const u8, @ptrCast("Test C"))}, ); try expectFmt( "cstr: Test C\n", "cstr: {s:10}\n", - .{@ptrCast([*c]const u8, "Test C")}, + .{@as([*c]const u8, @ptrCast("Test C"))}, ); } @@ -2360,11 +2360,11 @@ test "non-exhaustive enum" { }; try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.One\n", "enum: {}\n", .{Enum.One}); try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.Two\n", "enum: {}\n", .{Enum.Two}); - try expectFmt("enum: fmt.test.non-exhaustive enum.Enum(4660)\n", "enum: {}\n", .{@enumFromInt(Enum, 0x1234)}); + try expectFmt("enum: fmt.test.non-exhaustive enum.Enum(4660)\n", "enum: {}\n", .{@as(Enum, @enumFromInt(0x1234))}); try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.One\n", "enum: {x}\n", .{Enum.One}); try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.Two\n", "enum: {x}\n", .{Enum.Two}); try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.Two\n", "enum: {X}\n", .{Enum.Two}); - try expectFmt("enum: fmt.test.non-exhaustive enum.Enum(1234)\n", "enum: {x}\n", .{@enumFromInt(Enum, 0x1234)}); + try expectFmt("enum: fmt.test.non-exhaustive enum.Enum(1234)\n", "enum: {x}\n", .{@as(Enum, @enumFromInt(0x1234))}); } test "float.scientific" { @@ -2376,11 +2376,11 @@ test "float.scientific" { test "float.scientific.precision" { try expectFmt("f64: 1.40971e-42", "f64: {e:.5}", .{@as(f64, 1.409706e-42)}); - try expectFmt("f64: 1.00000e-09", "f64: {e:.5}", .{@as(f64, @bitCast(f32, @as(u32, 814313563)))}); - try expectFmt("f64: 7.81250e-03", "f64: {e:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1006632960)))}); + try expectFmt("f64: 1.00000e-09", "f64: {e:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 814313563))))}); + try expectFmt("f64: 7.81250e-03", "f64: {e:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1006632960))))}); // libc rounds 1.000005e+05 to 1.00000e+05 but zig does 1.00001e+05. // In fact, libc doesn't round a lot of 5 cases up when one past the precision point. - try expectFmt("f64: 1.00001e+05", "f64: {e:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1203982400)))}); + try expectFmt("f64: 1.00001e+05", "f64: {e:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1203982400))))}); } test "float.special" { @@ -2472,22 +2472,22 @@ test "float.decimal" { } test "float.libc.sanity" { - try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 916964781)))}); - try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 925353389)))}); - try expectFmt("f64: 0.10000", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1036831278)))}); - try expectFmt("f64: 1.00000", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1065353133)))}); - try expectFmt("f64: 10.00000", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1092616192)))}); + try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 916964781))))}); + try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 925353389))))}); + try expectFmt("f64: 0.10000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1036831278))))}); + try expectFmt("f64: 1.00000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1065353133))))}); + try expectFmt("f64: 10.00000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1092616192))))}); // libc differences // // This is 0.015625 exactly according to gdb. We thus round down, // however glibc rounds up for some reason. This occurs for all // floats of the form x.yyyy25 on a precision point. - try expectFmt("f64: 0.01563", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1015021568)))}); + try expectFmt("f64: 0.01563", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1015021568))))}); // errol3 rounds to ... 630 but libc rounds to ...632. Grisu3 // also rounds to 630 so I'm inclined to believe libc is not // optimal here. - try expectFmt("f64: 18014400656965630.00000", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1518338049)))}); + try expectFmt("f64: 18014400656965630.00000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1518338049))))}); } test "custom" { diff --git a/lib/std/fmt/errol.zig b/lib/std/fmt/errol.zig index b438733589..af686d6448 100644 --- a/lib/std/fmt/errol.zig +++ b/lib/std/fmt/errol.zig @@ -29,11 +29,11 @@ pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: Ro switch (mode) { RoundMode.Decimal => { if (float_decimal.exp >= 0) { - round_digit = precision + @intCast(usize, float_decimal.exp); + round_digit = precision + @as(usize, @intCast(float_decimal.exp)); } else { // if a small negative exp, then adjust we need to offset by the number // of leading zeros that will occur. - const min_exp_required = @intCast(usize, -float_decimal.exp); + const min_exp_required = @as(usize, @intCast(-float_decimal.exp)); if (precision > min_exp_required) { round_digit = precision - min_exp_required; } @@ -59,7 +59,7 @@ pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: Ro float_decimal.exp += 1; // Re-size the buffer to use the reserved leading byte. - const one_before = @ptrFromInt([*]u8, @intFromPtr(&float_decimal.digits[0]) - 1); + const one_before = @as([*]u8, @ptrFromInt(@intFromPtr(&float_decimal.digits[0]) - 1)); float_decimal.digits = one_before[0 .. float_decimal.digits.len + 1]; float_decimal.digits[0] = '1'; return; @@ -80,7 +80,7 @@ pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: Ro /// Corrected Errol3 double to ASCII conversion. pub fn errol3(value: f64, buffer: []u8) FloatDecimal { - const bits = @bitCast(u64, value); + const bits = @as(u64, @bitCast(value)); const i = tableLowerBound(bits); if (i < enum3.len and enum3[i] == bits) { const data = enum3_data[i]; @@ -113,16 +113,16 @@ fn errolSlow(val: f64, buffer: []u8) FloatDecimal { // normalize the midpoint const e = math.frexp(val).exponent; - var exp = @intFromFloat(i16, @floor(307 + @floatFromInt(f64, e) * 0.30103)); + var exp = @as(i16, @intFromFloat(@floor(307 + @as(f64, @floatFromInt(e)) * 0.30103))); if (exp < 20) { exp = 20; - } else if (@intCast(usize, exp) >= lookup_table.len) { - exp = @intCast(i16, lookup_table.len - 1); + } else if (@as(usize, @intCast(exp)) >= lookup_table.len) { + exp = @as(i16, @intCast(lookup_table.len - 1)); } - var mid = lookup_table[@intCast(usize, exp)]; + var mid = lookup_table[@as(usize, @intCast(exp))]; mid = hpProd(mid, val); - const lten = lookup_table[@intCast(usize, exp)].val; + const lten = lookup_table[@as(usize, @intCast(exp))].val; exp -= 307; @@ -171,25 +171,25 @@ fn errolSlow(val: f64, buffer: []u8) FloatDecimal { var buf_index: usize = 0; const bound = buffer.len - 1; while (buf_index < bound) { - var hdig = @intFromFloat(u8, @floor(high.val)); - if ((high.val == @floatFromInt(f64, hdig)) and (high.off < 0)) hdig -= 1; + var hdig = @as(u8, @intFromFloat(@floor(high.val))); + if ((high.val == @as(f64, @floatFromInt(hdig))) and (high.off < 0)) hdig -= 1; - var ldig = @intFromFloat(u8, @floor(low.val)); - if ((low.val == @floatFromInt(f64, ldig)) and (low.off < 0)) ldig -= 1; + var ldig = @as(u8, @intFromFloat(@floor(low.val))); + if ((low.val == @as(f64, @floatFromInt(ldig))) and (low.off < 0)) ldig -= 1; if (ldig != hdig) break; buffer[buf_index] = hdig + '0'; buf_index += 1; - high.val -= @floatFromInt(f64, hdig); - low.val -= @floatFromInt(f64, ldig); + high.val -= @as(f64, @floatFromInt(hdig)); + low.val -= @as(f64, @floatFromInt(ldig)); hpMul10(&high); hpMul10(&low); } const tmp = (high.val + low.val) / 2.0; - var mdig = @intFromFloat(u8, @floor(tmp + 0.5)); - if ((@floatFromInt(f64, mdig) - tmp) == 0.5 and (mdig & 0x1) != 0) mdig -= 1; + var mdig = @as(u8, @intFromFloat(@floor(tmp + 0.5))); + if ((@as(f64, @floatFromInt(mdig)) - tmp) == 0.5 and (mdig & 0x1) != 0) mdig -= 1; buffer[buf_index] = mdig + '0'; buf_index += 1; @@ -248,9 +248,9 @@ fn split(val: f64, hi: *f64, lo: *f64) void { } fn gethi(in: f64) f64 { - const bits = @bitCast(u64, in); + const bits = @as(u64, @bitCast(in)); const new_bits = bits & 0xFFFFFFFFF8000000; - return @bitCast(f64, new_bits); + return @as(f64, @bitCast(new_bits)); } /// Normalize the number by factoring in the error. @@ -303,21 +303,21 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal { assert((val > 9.007199254740992e15) and val < (3.40282366920938e38)); - var mid = @intFromFloat(u128, val); + var mid = @as(u128, @intFromFloat(val)); var low: u128 = mid - fpeint((fpnext(val) - val) / 2.0); var high: u128 = mid + fpeint((val - fpprev(val)) / 2.0); - if (@bitCast(u64, val) & 0x1 != 0) { + if (@as(u64, @bitCast(val)) & 0x1 != 0) { high -= 1; } else { low -= 1; } - var l64 = @intCast(u64, low % pow19); - const lf = @intCast(u64, (low / pow19) % pow19); + var l64 = @as(u64, @intCast(low % pow19)); + const lf = @as(u64, @intCast((low / pow19) % pow19)); - var h64 = @intCast(u64, high % pow19); - const hf = @intCast(u64, (high / pow19) % pow19); + var h64 = @as(u64, @intCast(high % pow19)); + const hf = @as(u64, @intCast((high / pow19) % pow19)); if (lf != hf) { l64 = lf; @@ -333,7 +333,7 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal { x *= 10; } } - const m64 = @truncate(u64, @divTrunc(mid, x)); + const m64 = @as(u64, @truncate(@divTrunc(mid, x))); if (lf != hf) mi += 19; @@ -349,7 +349,7 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal { return FloatDecimal{ .digits = buffer[0..buf_index], - .exp = @intCast(i32, buf_index) + mi, + .exp = @as(i32, @intCast(buf_index)) + mi, }; } @@ -360,33 +360,33 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal { fn errolFixed(val: f64, buffer: []u8) FloatDecimal { assert((val >= 16.0) and (val < 9.007199254740992e15)); - const u = @intFromFloat(u64, val); - const n = @floatFromInt(f64, u); + const u = @as(u64, @intFromFloat(val)); + const n = @as(f64, @floatFromInt(u)); var mid = val - n; var lo = ((fpprev(val) - n) + mid) / 2.0; var hi = ((fpnext(val) - n) + mid) / 2.0; var buf_index = u64toa(u, buffer); - var exp = @intCast(i32, buf_index); + var exp = @as(i32, @intCast(buf_index)); var j = buf_index; buffer[j] = 0; if (mid != 0.0) { while (mid != 0.0) { lo *= 10.0; - const ldig = @intFromFloat(i32, lo); - lo -= @floatFromInt(f64, ldig); + const ldig = @as(i32, @intFromFloat(lo)); + lo -= @as(f64, @floatFromInt(ldig)); mid *= 10.0; - const mdig = @intFromFloat(i32, mid); - mid -= @floatFromInt(f64, mdig); + const mdig = @as(i32, @intFromFloat(mid)); + mid -= @as(f64, @floatFromInt(mdig)); hi *= 10.0; - const hdig = @intFromFloat(i32, hi); - hi -= @floatFromInt(f64, hdig); + const hdig = @as(i32, @intFromFloat(hi)); + hi -= @as(f64, @floatFromInt(hdig)); - buffer[j] = @intCast(u8, mdig + '0'); + buffer[j] = @as(u8, @intCast(mdig + '0')); j += 1; if (hdig != ldig or j > 50) break; @@ -413,11 +413,11 @@ fn errolFixed(val: f64, buffer: []u8) FloatDecimal { } fn fpnext(val: f64) f64 { - return @bitCast(f64, @bitCast(u64, val) +% 1); + return @as(f64, @bitCast(@as(u64, @bitCast(val)) +% 1)); } fn fpprev(val: f64) f64 { - return @bitCast(f64, @bitCast(u64, val) -% 1); + return @as(f64, @bitCast(@as(u64, @bitCast(val)) -% 1)); } pub const c_digits_lut = [_]u8{ @@ -453,7 +453,7 @@ fn u64toa(value_param: u64, buffer: []u8) usize { var buf_index: usize = 0; if (value < kTen8) { - const v = @intCast(u32, value); + const v = @as(u32, @intCast(value)); if (v < 10000) { const d1: u32 = (v / 100) << 1; const d2: u32 = (v % 100) << 1; @@ -508,8 +508,8 @@ fn u64toa(value_param: u64, buffer: []u8) usize { buf_index += 1; } } else if (value < kTen16) { - const v0: u32 = @intCast(u32, value / kTen8); - const v1: u32 = @intCast(u32, value % kTen8); + const v0: u32 = @as(u32, @intCast(value / kTen8)); + const v1: u32 = @as(u32, @intCast(value % kTen8)); const b0: u32 = v0 / 10000; const c0: u32 = v0 % 10000; @@ -579,11 +579,11 @@ fn u64toa(value_param: u64, buffer: []u8) usize { buffer[buf_index] = c_digits_lut[d8 + 1]; buf_index += 1; } else { - const a = @intCast(u32, value / kTen16); // 1 to 1844 + const a = @as(u32, @intCast(value / kTen16)); // 1 to 1844 value %= kTen16; if (a < 10) { - buffer[buf_index] = '0' + @intCast(u8, a); + buffer[buf_index] = '0' + @as(u8, @intCast(a)); buf_index += 1; } else if (a < 100) { const i: u32 = a << 1; @@ -592,7 +592,7 @@ fn u64toa(value_param: u64, buffer: []u8) usize { buffer[buf_index] = c_digits_lut[i + 1]; buf_index += 1; } else if (a < 1000) { - buffer[buf_index] = '0' + @intCast(u8, a / 100); + buffer[buf_index] = '0' + @as(u8, @intCast(a / 100)); buf_index += 1; const i: u32 = (a % 100) << 1; @@ -613,8 +613,8 @@ fn u64toa(value_param: u64, buffer: []u8) usize { buf_index += 1; } - const v0 = @intCast(u32, value / kTen8); - const v1 = @intCast(u32, value % kTen8); + const v0 = @as(u32, @intCast(value / kTen8)); + const v1 = @as(u32, @intCast(value % kTen8)); const b0: u32 = v0 / 10000; const c0: u32 = v0 % 10000; @@ -672,10 +672,10 @@ fn u64toa(value_param: u64, buffer: []u8) usize { } fn fpeint(from: f64) u128 { - const bits = @bitCast(u64, from); + const bits = @as(u64, @bitCast(from)); assert((bits & ((1 << 52) - 1)) == 0); - return @as(u128, 1) << @truncate(u7, (bits >> 52) -% 1023); + return @as(u128, 1) << @as(u7, @truncate((bits >> 52) -% 1023)); } /// Given two different integers with the same length in terms of the number diff --git a/lib/std/fmt/parse_float.zig b/lib/std/fmt/parse_float.zig index b14fe5ca3c..98fbe28032 100644 --- a/lib/std/fmt/parse_float.zig +++ b/lib/std/fmt/parse_float.zig @@ -78,7 +78,7 @@ test "fmt.parseFloat nan and inf" { inline for ([_]type{ f16, f32, f64, f128 }) |T| { const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits); - try expectEqual(@bitCast(Z, try parseFloat(T, "nAn")), @bitCast(Z, std.math.nan(T))); + try expectEqual(@as(Z, @bitCast(try parseFloat(T, "nAn"))), @as(Z, @bitCast(std.math.nan(T)))); try expectEqual(try parseFloat(T, "inF"), std.math.inf(T)); try expectEqual(try parseFloat(T, "-INF"), -std.math.inf(T)); } diff --git a/lib/std/fmt/parse_float/common.zig b/lib/std/fmt/parse_float/common.zig index c1b34a081b..8dba3b4498 100644 --- a/lib/std/fmt/parse_float/common.zig +++ b/lib/std/fmt/parse_float/common.zig @@ -32,7 +32,7 @@ pub fn BiasedFp(comptime T: type) type { pub fn toFloat(self: Self, comptime FloatT: type, negative: bool) FloatT { var word = self.f; - word |= @intCast(MantissaT, self.e) << std.math.floatMantissaBits(FloatT); + word |= @as(MantissaT, @intCast(self.e)) << std.math.floatMantissaBits(FloatT); var f = floatFromUnsigned(FloatT, MantissaT, word); if (negative) f = -f; return f; @@ -42,10 +42,10 @@ pub fn BiasedFp(comptime T: type) type { pub fn floatFromUnsigned(comptime T: type, comptime MantissaT: type, v: MantissaT) T { return switch (T) { - f16 => @bitCast(f16, @truncate(u16, v)), - f32 => @bitCast(f32, @truncate(u32, v)), - f64 => @bitCast(f64, @truncate(u64, v)), - f128 => @bitCast(f128, v), + f16 => @as(f16, @bitCast(@as(u16, @truncate(v)))), + f32 => @as(f32, @bitCast(@as(u32, @truncate(v)))), + f64 => @as(f64, @bitCast(@as(u64, @truncate(v)))), + f128 => @as(f128, @bitCast(v)), else => unreachable, }; } diff --git a/lib/std/fmt/parse_float/convert_eisel_lemire.zig b/lib/std/fmt/parse_float/convert_eisel_lemire.zig index 5c49553a14..6831a308ea 100644 --- a/lib/std/fmt/parse_float/convert_eisel_lemire.zig +++ b/lib/std/fmt/parse_float/convert_eisel_lemire.zig @@ -36,7 +36,7 @@ pub fn convertEiselLemire(comptime T: type, q: i64, w_: u64) ?BiasedFp(f64) { } // Normalize our significant digits, so the most-significant bit is set. - const lz = @clz(@bitCast(u64, w)); + const lz = @clz(@as(u64, @bitCast(w))); w = math.shl(u64, w, lz); const r = computeProductApprox(q, w, float_info.mantissa_explicit_bits + 3); @@ -62,9 +62,9 @@ pub fn convertEiselLemire(comptime T: type, q: i64, w_: u64) ?BiasedFp(f64) { } } - const upper_bit = @intCast(i32, r.hi >> 63); - var mantissa = math.shr(u64, r.hi, upper_bit + 64 - @intCast(i32, float_info.mantissa_explicit_bits) - 3); - var power2 = power(@intCast(i32, q)) + upper_bit - @intCast(i32, lz) - float_info.minimum_exponent; + const upper_bit = @as(i32, @intCast(r.hi >> 63)); + var mantissa = math.shr(u64, r.hi, upper_bit + 64 - @as(i32, @intCast(float_info.mantissa_explicit_bits)) - 3); + var power2 = power(@as(i32, @intCast(q))) + upper_bit - @as(i32, @intCast(lz)) - float_info.minimum_exponent; if (power2 <= 0) { if (-power2 + 1 >= 64) { // Have more than 64 bits below the minimum exponent, must be 0. @@ -93,7 +93,7 @@ pub fn convertEiselLemire(comptime T: type, q: i64, w_: u64) ?BiasedFp(f64) { q >= float_info.min_exponent_round_to_even and q <= float_info.max_exponent_round_to_even and mantissa & 3 == 1 and - math.shl(u64, mantissa, (upper_bit + 64 - @intCast(i32, float_info.mantissa_explicit_bits) - 3)) == r.hi) + math.shl(u64, mantissa, (upper_bit + 64 - @as(i32, @intCast(float_info.mantissa_explicit_bits)) - 3)) == r.hi) { // Zero the lowest bit, so we don't round up. mantissa &= ~@as(u64, 1); @@ -139,8 +139,8 @@ const U128 = struct { pub fn mul(a: u64, b: u64) U128 { const x = @as(u128, a) * b; return .{ - .hi = @truncate(u64, x >> 64), - .lo = @truncate(u64, x), + .hi = @as(u64, @truncate(x >> 64)), + .lo = @as(u64, @truncate(x)), }; } }; @@ -161,7 +161,7 @@ fn computeProductApprox(q: i64, w: u64, comptime precision: usize) U128 { // 5^q < 2^64, then the multiplication always provides an exact value. // That means whenever we need to round ties to even, we always have // an exact value. - const index = @intCast(usize, q - @intCast(i64, eisel_lemire_smallest_power_of_five)); + const index = @as(usize, @intCast(q - @as(i64, @intCast(eisel_lemire_smallest_power_of_five)))); const pow5 = eisel_lemire_table_powers_of_five_128[index]; // Only need one multiplication as long as there is 1 zero but diff --git a/lib/std/fmt/parse_float/convert_fast.zig b/lib/std/fmt/parse_float/convert_fast.zig index 2124e436ab..a148d3946f 100644 --- a/lib/std/fmt/parse_float/convert_fast.zig +++ b/lib/std/fmt/parse_float/convert_fast.zig @@ -108,19 +108,19 @@ pub fn convertFast(comptime T: type, n: Number(T)) ?T { var value: T = 0; if (n.exponent <= info.max_exponent_fast_path) { // normal fast path - value = @floatFromInt(T, n.mantissa); + value = @as(T, @floatFromInt(n.mantissa)); value = if (n.exponent < 0) - value / fastPow10(T, @intCast(usize, -n.exponent)) + value / fastPow10(T, @as(usize, @intCast(-n.exponent))) else - value * fastPow10(T, @intCast(usize, n.exponent)); + value * fastPow10(T, @as(usize, @intCast(n.exponent))); } else { // disguised fast path const shift = n.exponent - info.max_exponent_fast_path; - const mantissa = math.mul(MantissaT, n.mantissa, fastIntPow10(MantissaT, @intCast(usize, shift))) catch return null; + const mantissa = math.mul(MantissaT, n.mantissa, fastIntPow10(MantissaT, @as(usize, @intCast(shift)))) catch return null; if (mantissa > info.max_mantissa_fast_path) { return null; } - value = @floatFromInt(T, mantissa) * fastPow10(T, info.max_exponent_fast_path); + value = @as(T, @floatFromInt(mantissa)) * fastPow10(T, info.max_exponent_fast_path); } if (n.negative) { diff --git a/lib/std/fmt/parse_float/convert_hex.zig b/lib/std/fmt/parse_float/convert_hex.zig index 3b3f797216..815331347c 100644 --- a/lib/std/fmt/parse_float/convert_hex.zig +++ b/lib/std/fmt/parse_float/convert_hex.zig @@ -81,7 +81,7 @@ pub fn convertHex(comptime T: type, n_: Number(T)) T { } var bits = n.mantissa & ((1 << mantissa_bits) - 1); - bits |= @intCast(MantissaT, (n.exponent - exp_bias) & ((1 << exp_bits) - 1)) << mantissa_bits; + bits |= @as(MantissaT, @intCast((n.exponent - exp_bias) & ((1 << exp_bits) - 1))) << mantissa_bits; if (n.negative) { bits |= 1 << (mantissa_bits + exp_bits); } diff --git a/lib/std/fmt/parse_float/convert_slow.zig b/lib/std/fmt/parse_float/convert_slow.zig index 225a1e208c..53cb12ef13 100644 --- a/lib/std/fmt/parse_float/convert_slow.zig +++ b/lib/std/fmt/parse_float/convert_slow.zig @@ -48,13 +48,13 @@ pub fn convertSlow(comptime T: type, s: []const u8) BiasedFp(T) { var exp2: i32 = 0; // Shift right toward (1/2 .. 1] while (d.decimal_point > 0) { - const n = @intCast(usize, d.decimal_point); + const n = @as(usize, @intCast(d.decimal_point)); const shift = getShift(n); d.rightShift(shift); if (d.decimal_point < -Decimal(T).decimal_point_range) { return BiasedFp(T).zero(); } - exp2 += @intCast(i32, shift); + exp2 += @as(i32, @intCast(shift)); } // Shift left toward (1/2 .. 1] while (d.decimal_point <= 0) { @@ -66,7 +66,7 @@ pub fn convertSlow(comptime T: type, s: []const u8) BiasedFp(T) { else => 1, }; } else { - const n = @intCast(usize, -d.decimal_point); + const n = @as(usize, @intCast(-d.decimal_point)); break :blk getShift(n); } }; @@ -74,17 +74,17 @@ pub fn convertSlow(comptime T: type, s: []const u8) BiasedFp(T) { if (d.decimal_point > Decimal(T).decimal_point_range) { return BiasedFp(T).inf(T); } - exp2 -= @intCast(i32, shift); + exp2 -= @as(i32, @intCast(shift)); } // We are now in the range [1/2 .. 1] but the binary format uses [1 .. 2] exp2 -= 1; while (min_exponent + 1 > exp2) { - var n = @intCast(usize, (min_exponent + 1) - exp2); + var n = @as(usize, @intCast((min_exponent + 1) - exp2)); if (n > max_shift) { n = max_shift; } d.rightShift(n); - exp2 += @intCast(i32, n); + exp2 += @as(i32, @intCast(n)); } if (exp2 - min_exponent >= infinite_power) { return BiasedFp(T).inf(T); diff --git a/lib/std/fmt/parse_float/decimal.zig b/lib/std/fmt/parse_float/decimal.zig index 5bb5fa8d5e..f8d736a065 100644 --- a/lib/std/fmt/parse_float/decimal.zig +++ b/lib/std/fmt/parse_float/decimal.zig @@ -114,7 +114,7 @@ pub fn Decimal(comptime T: type) type { return math.maxInt(MantissaT); } - const dp = @intCast(usize, self.decimal_point); + const dp = @as(usize, @intCast(self.decimal_point)); var n: MantissaT = 0; var i: usize = 0; @@ -155,7 +155,7 @@ pub fn Decimal(comptime T: type) type { const quotient = n / 10; const remainder = n - (10 * quotient); if (write_index < max_digits) { - self.digits[write_index] = @intCast(u8, remainder); + self.digits[write_index] = @as(u8, @intCast(remainder)); } else if (remainder > 0) { self.truncated = true; } @@ -167,7 +167,7 @@ pub fn Decimal(comptime T: type) type { const quotient = n / 10; const remainder = n - (10 * quotient); if (write_index < max_digits) { - self.digits[write_index] = @intCast(u8, remainder); + self.digits[write_index] = @as(u8, @intCast(remainder)); } else if (remainder > 0) { self.truncated = true; } @@ -178,7 +178,7 @@ pub fn Decimal(comptime T: type) type { if (self.num_digits > max_digits) { self.num_digits = max_digits; } - self.decimal_point += @intCast(i32, num_new_digits); + self.decimal_point += @as(i32, @intCast(num_new_digits)); self.trim(); } @@ -202,7 +202,7 @@ pub fn Decimal(comptime T: type) type { } } - self.decimal_point -= @intCast(i32, read_index) - 1; + self.decimal_point -= @as(i32, @intCast(read_index)) - 1; if (self.decimal_point < -decimal_point_range) { self.num_digits = 0; self.decimal_point = 0; @@ -212,14 +212,14 @@ pub fn Decimal(comptime T: type) type { const mask = math.shl(MantissaT, 1, shift) - 1; while (read_index < self.num_digits) { - const new_digit = @intCast(u8, math.shr(MantissaT, n, shift)); + const new_digit = @as(u8, @intCast(math.shr(MantissaT, n, shift))); n = (10 * (n & mask)) + self.digits[read_index]; read_index += 1; self.digits[write_index] = new_digit; write_index += 1; } while (n > 0) { - const new_digit = @intCast(u8, math.shr(MantissaT, n, shift)); + const new_digit = @as(u8, @intCast(math.shr(MantissaT, n, shift))); n = 10 * (n & mask); if (write_index < max_digits) { self.digits[write_index] = new_digit; @@ -268,7 +268,7 @@ pub fn Decimal(comptime T: type) type { while (stream.scanDigit(10)) |digit| { d.tryAddDigit(digit); } - d.decimal_point = @intCast(i32, marker) - @intCast(i32, stream.offsetTrue()); + d.decimal_point = @as(i32, @intCast(marker)) - @as(i32, @intCast(stream.offsetTrue())); } if (d.num_digits != 0) { // Ignore trailing zeros if any @@ -284,9 +284,9 @@ pub fn Decimal(comptime T: type) type { i -= 1; if (i == 0) break; } - d.decimal_point += @intCast(i32, n_trailing_zeros); + d.decimal_point += @as(i32, @intCast(n_trailing_zeros)); d.num_digits -= n_trailing_zeros; - d.decimal_point += @intCast(i32, d.num_digits); + d.decimal_point += @as(i32, @intCast(d.num_digits)); if (d.num_digits > max_digits) { d.truncated = true; d.num_digits = max_digits; diff --git a/lib/std/fmt/parse_float/parse.zig b/lib/std/fmt/parse_float/parse.zig index 9f6e75b29a..a31df31312 100644 --- a/lib/std/fmt/parse_float/parse.zig +++ b/lib/std/fmt/parse_float/parse.zig @@ -21,7 +21,7 @@ fn parse8Digits(v_: u64) u64 { v = (v * 10) + (v >> 8); // will not overflow, fits in 63 bits const v1 = (v & mask) *% mul1; const v2 = ((v >> 16) & mask) *% mul2; - return @as(u64, @truncate(u32, (v1 +% v2) >> 32)); + return @as(u64, @as(u32, @truncate((v1 +% v2) >> 32))); } /// Parse digits until a non-digit character is found. @@ -106,7 +106,7 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool var mantissa: MantissaT = 0; tryParseDigits(MantissaT, stream, &mantissa, info.base); var int_end = stream.offsetTrue(); - var n_digits = @intCast(isize, stream.offsetTrue()); + var n_digits = @as(isize, @intCast(stream.offsetTrue())); // the base being 16 implies a 0x prefix, which shouldn't be included in the digit count if (info.base == 16) n_digits -= 2; @@ -117,8 +117,8 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool const marker = stream.offsetTrue(); tryParseDigits(MantissaT, stream, &mantissa, info.base); const n_after_dot = stream.offsetTrue() - marker; - exponent = -@intCast(i64, n_after_dot); - n_digits += @intCast(isize, n_after_dot); + exponent = -@as(i64, @intCast(n_after_dot)); + n_digits += @as(isize, @intCast(n_after_dot)); } // adjust required shift to offset mantissa for base-16 (2^4) @@ -163,7 +163,7 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool // '0' = '.' + 2 const next = stream.firstUnchecked(); if (next != '_') { - n_digits -= @intCast(isize, next -| ('0' - 1)); + n_digits -= @as(isize, @intCast(next -| ('0' - 1))); } else { stream.underscore_count += 1; } @@ -179,7 +179,7 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool exponent = blk: { if (mantissa >= min_n_digit_int(MantissaT, info.max_mantissa_digits)) { // big int - break :blk @intCast(i64, int_end) - @intCast(i64, stream.offsetTrue()); + break :blk @as(i64, @intCast(int_end)) - @as(i64, @intCast(stream.offsetTrue())); } else { // the next byte must be present and be '.' // We know this is true because we had more than 19 @@ -190,7 +190,7 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool stream.advance(1); var marker = stream.offsetTrue(); tryParseNDigits(MantissaT, stream, &mantissa, info.base, info.max_mantissa_digits); - break :blk @intCast(i64, marker) - @intCast(i64, stream.offsetTrue()); + break :blk @as(i64, @intCast(marker)) - @as(i64, @intCast(stream.offsetTrue())); } }; // add back the explicit part diff --git a/lib/std/fs.zig b/lib/std/fs.zig index 8e828fd334..cb6ce2032e 100644 --- a/lib/std/fs.zig +++ b/lib/std/fs.zig @@ -373,13 +373,13 @@ pub const IterableDir = struct { } } self.index = 0; - self.end_index = @intCast(usize, rc); + self.end_index = @as(usize, @intCast(rc)); } - const darwin_entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]); + const darwin_entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index])); const next_index = self.index + darwin_entry.reclen(); self.index = next_index; - const name = @ptrCast([*]u8, &darwin_entry.d_name)[0..darwin_entry.d_namlen]; + const name = @as([*]u8, @ptrCast(&darwin_entry.d_name))[0..darwin_entry.d_namlen]; if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..") or (darwin_entry.d_ino == 0)) { continue :start_over; @@ -421,13 +421,13 @@ pub const IterableDir = struct { } if (rc == 0) return null; self.index = 0; - self.end_index = @intCast(usize, rc); + self.end_index = @as(usize, @intCast(rc)); } - const entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]); + const entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index])); const next_index = self.index + entry.reclen(); self.index = next_index; - const name = mem.sliceTo(@ptrCast([*:0]u8, &entry.d_name), 0); + const name = mem.sliceTo(@as([*:0]u8, @ptrCast(&entry.d_name)), 0); if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) continue :start_over; @@ -485,13 +485,13 @@ pub const IterableDir = struct { } if (rc == 0) return null; self.index = 0; - self.end_index = @intCast(usize, rc); + self.end_index = @as(usize, @intCast(rc)); } - const bsd_entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]); + const bsd_entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index])); const next_index = self.index + bsd_entry.reclen(); self.index = next_index; - const name = @ptrCast([*]u8, &bsd_entry.d_name)[0..bsd_entry.d_namlen]; + const name = @as([*]u8, @ptrCast(&bsd_entry.d_name))[0..bsd_entry.d_namlen]; const skip_zero_fileno = switch (builtin.os.tag) { // d_fileno=0 is used to mark invalid entries or deleted files. @@ -567,12 +567,12 @@ pub const IterableDir = struct { } } self.index = 0; - self.end_index = @intCast(usize, rc); + self.end_index = @as(usize, @intCast(rc)); } - const haiku_entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]); + const haiku_entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index])); const next_index = self.index + haiku_entry.reclen(); self.index = next_index; - const name = mem.sliceTo(@ptrCast([*:0]u8, &haiku_entry.d_name), 0); + const name = mem.sliceTo(@as([*:0]u8, @ptrCast(&haiku_entry.d_name)), 0); if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..") or (haiku_entry.d_ino == 0)) { continue :start_over; @@ -672,11 +672,11 @@ pub const IterableDir = struct { self.index = 0; self.end_index = rc; } - const linux_entry = @ptrCast(*align(1) linux.dirent64, &self.buf[self.index]); + const linux_entry = @as(*align(1) linux.dirent64, @ptrCast(&self.buf[self.index])); const next_index = self.index + linux_entry.reclen(); self.index = next_index; - const name = mem.sliceTo(@ptrCast([*:0]u8, &linux_entry.d_name), 0); + const name = mem.sliceTo(@as([*:0]u8, @ptrCast(&linux_entry.d_name)), 0); // skip . and .. entries if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) { @@ -750,15 +750,14 @@ pub const IterableDir = struct { } } - const aligned_ptr = @alignCast(@alignOf(w.FILE_BOTH_DIR_INFORMATION), &self.buf[self.index]); - const dir_info = @ptrCast(*w.FILE_BOTH_DIR_INFORMATION, aligned_ptr); + const dir_info: *w.FILE_BOTH_DIR_INFORMATION = @ptrCast(@alignCast(&self.buf[self.index])); if (dir_info.NextEntryOffset != 0) { self.index += dir_info.NextEntryOffset; } else { self.index = self.buf.len; } - const name_utf16le = @ptrCast([*]u16, &dir_info.FileName)[0 .. dir_info.FileNameLength / 2]; + const name_utf16le = @as([*]u16, @ptrCast(&dir_info.FileName))[0 .. dir_info.FileNameLength / 2]; if (mem.eql(u16, name_utf16le, &[_]u16{'.'}) or mem.eql(u16, name_utf16le, &[_]u16{ '.', '.' })) continue; @@ -835,7 +834,7 @@ pub const IterableDir = struct { self.index = 0; self.end_index = bufused; } - const entry = @ptrCast(*align(1) w.dirent_t, &self.buf[self.index]); + const entry = @as(*align(1) w.dirent_t, @ptrCast(&self.buf[self.index])); const entry_size = @sizeOf(w.dirent_t); const name_index = self.index + entry_size; if (name_index + entry.d_namlen > self.end_index) { @@ -1789,7 +1788,7 @@ pub const Dir = struct { .fd = undefined, }; - const path_len_bytes = @intCast(u16, mem.sliceTo(sub_path_w, 0).len * 2); + const path_len_bytes = @as(u16, @intCast(mem.sliceTo(sub_path_w, 0).len * 2)); var nt_name = w.UNICODE_STRING{ .Length = path_len_bytes, .MaximumLength = path_len_bytes, diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig index 0c6e8a24f7..e9448aa5d3 100644 --- a/lib/std/fs/file.zig +++ b/lib/std/fs/file.zig @@ -368,7 +368,7 @@ pub const File = struct { return Stat{ .inode = st.ino, - .size = @bitCast(u64, st.size), + .size = @as(u64, @bitCast(st.size)), .mode = st.mode, .kind = kind, .atime = @as(i128, atime.tv_sec) * std.time.ns_per_s + atime.tv_nsec, @@ -398,7 +398,7 @@ pub const File = struct { } return Stat{ .inode = info.InternalInformation.IndexNumber, - .size = @bitCast(u64, info.StandardInformation.EndOfFile), + .size = @as(u64, @bitCast(info.StandardInformation.EndOfFile)), .mode = 0, .kind = if (info.StandardInformation.Directory == 0) .file else .directory, .atime = windows.fromSysTime(info.BasicInformation.LastAccessTime), @@ -650,7 +650,7 @@ pub const File = struct { /// Returns the size of the file pub fn size(self: Self) u64 { - return @intCast(u64, self.stat.size); + return @as(u64, @intCast(self.stat.size)); } /// Returns a `Permissions` struct, representing the permissions on the file @@ -855,7 +855,7 @@ pub const File = struct { if (info.BasicInformation.FileAttributes & windows.FILE_ATTRIBUTE_REPARSE_POINT != 0) { var reparse_buf: [windows.MAXIMUM_REPARSE_DATA_BUFFER_SIZE]u8 = undefined; try windows.DeviceIoControl(self.handle, windows.FSCTL_GET_REPARSE_POINT, null, reparse_buf[0..]); - const reparse_struct = @ptrCast(*const windows.REPARSE_DATA_BUFFER, @alignCast(@alignOf(windows.REPARSE_DATA_BUFFER), &reparse_buf[0])); + const reparse_struct: *const windows.REPARSE_DATA_BUFFER = @ptrCast(@alignCast(&reparse_buf[0])); break :reparse_blk reparse_struct.ReparseTag; } break :reparse_blk 0; @@ -864,7 +864,7 @@ pub const File = struct { break :blk MetadataWindows{ .attributes = info.BasicInformation.FileAttributes, .reparse_tag = reparse_tag, - ._size = @bitCast(u64, info.StandardInformation.EndOfFile), + ._size = @as(u64, @bitCast(info.StandardInformation.EndOfFile)), .access_time = windows.fromSysTime(info.BasicInformation.LastAccessTime), .modified_time = windows.fromSysTime(info.BasicInformation.LastWriteTime), .creation_time = windows.fromSysTime(info.BasicInformation.CreationTime), @@ -881,16 +881,16 @@ pub const File = struct { .NOSYS => { const st = try os.fstat(self.handle); - stx.mode = @intCast(u16, st.mode); + stx.mode = @as(u16, @intCast(st.mode)); // Hacky conversion from timespec to statx_timestamp stx.atime = std.mem.zeroes(os.linux.statx_timestamp); stx.atime.tv_sec = st.atim.tv_sec; - stx.atime.tv_nsec = @intCast(u32, st.atim.tv_nsec); // Guaranteed to succeed (tv_nsec is always below 10^9) + stx.atime.tv_nsec = @as(u32, @intCast(st.atim.tv_nsec)); // Guaranteed to succeed (tv_nsec is always below 10^9) stx.mtime = std.mem.zeroes(os.linux.statx_timestamp); stx.mtime.tv_sec = st.mtim.tv_sec; - stx.mtime.tv_nsec = @intCast(u32, st.mtim.tv_nsec); + stx.mtime.tv_nsec = @as(u32, @intCast(st.mtim.tv_nsec)); stx.mask = os.linux.STATX_BASIC_STATS | os.linux.STATX_MTIME; }, @@ -1414,7 +1414,7 @@ pub const File = struct { amt = try os.sendfile(out_fd, in_fd, offset + off, count - off, zero_iovec, trailers, flags); off += amt; } - amt = @intCast(usize, off - count); + amt = @as(usize, @intCast(off - count)); } var i: usize = 0; while (i < trailers.len) { diff --git a/lib/std/fs/get_app_data_dir.zig b/lib/std/fs/get_app_data_dir.zig index 4f7ba9af62..2f599c3213 100644 --- a/lib/std/fs/get_app_data_dir.zig +++ b/lib/std/fs/get_app_data_dir.zig @@ -23,7 +23,7 @@ pub fn getAppDataDir(allocator: mem.Allocator, appname: []const u8) GetAppDataDi &dir_path_ptr, )) { os.windows.S_OK => { - defer os.windows.ole32.CoTaskMemFree(@ptrCast(*anyopaque, dir_path_ptr)); + defer os.windows.ole32.CoTaskMemFree(@as(*anyopaque, @ptrCast(dir_path_ptr))); const global_dir = unicode.utf16leToUtf8Alloc(allocator, mem.sliceTo(dir_path_ptr, 0)) catch |err| switch (err) { error.UnexpectedSecondSurrogateHalf => return error.AppDataDirUnavailable, error.ExpectedSecondSurrogateHalf => return error.AppDataDirUnavailable, diff --git a/lib/std/fs/wasi.zig b/lib/std/fs/wasi.zig index 75c9b1df78..7ed7a75ea9 100644 --- a/lib/std/fs/wasi.zig +++ b/lib/std/fs/wasi.zig @@ -17,7 +17,7 @@ pub const Preopens = struct { pub fn find(p: Preopens, name: []const u8) ?os.fd_t { for (p.names, 0..) |elem_name, i| { if (mem.eql(u8, elem_name, name)) { - return @intCast(os.fd_t, i); + return @as(os.fd_t, @intCast(i)); } } return null; @@ -34,7 +34,7 @@ pub fn preopensAlloc(gpa: Allocator) Allocator.Error!Preopens { names.appendAssumeCapacity("stdout"); // 1 names.appendAssumeCapacity("stderr"); // 2 while (true) { - const fd = @intCast(wasi.fd_t, names.items.len); + const fd = @as(wasi.fd_t, @intCast(names.items.len)); var prestat: prestat_t = undefined; switch (wasi.fd_prestat_get(fd, &prestat)) { .SUCCESS => {}, diff --git a/lib/std/fs/watch.zig b/lib/std/fs/watch.zig index 0deaa86468..280c8888e6 100644 --- a/lib/std/fs/watch.zig +++ b/lib/std/fs/watch.zig @@ -279,7 +279,7 @@ pub fn Watch(comptime V: type) type { while (!put.cancelled) { kev.* = os.Kevent{ - .ident = @intCast(usize, fd), + .ident = @as(usize, @intCast(fd)), .filter = os.EVFILT_VNODE, .flags = os.EV_ADD | os.EV_ENABLE | os.EV_CLEAR | os.EV_ONESHOT | os.NOTE_WRITE | os.NOTE_DELETE | os.NOTE_REVOKE, @@ -487,14 +487,14 @@ pub fn Watch(comptime V: type) type { var ptr: [*]u8 = &event_buf; const end_ptr = ptr + bytes_transferred; while (@intFromPtr(ptr) < @intFromPtr(end_ptr)) { - const ev = @ptrCast(*const windows.FILE_NOTIFY_INFORMATION, ptr); + const ev = @as(*const windows.FILE_NOTIFY_INFORMATION, @ptrCast(ptr)); const emit = switch (ev.Action) { windows.FILE_ACTION_REMOVED => WatchEventId.Delete, windows.FILE_ACTION_MODIFIED => .CloseWrite, else => null, }; if (emit) |id| { - const basename_ptr = @ptrCast([*]u16, ptr + @sizeOf(windows.FILE_NOTIFY_INFORMATION)); + const basename_ptr = @as([*]u16, @ptrCast(ptr + @sizeOf(windows.FILE_NOTIFY_INFORMATION))); const basename_utf16le = basename_ptr[0 .. ev.FileNameLength / 2]; var basename_data: [std.fs.MAX_PATH_BYTES]u8 = undefined; const basename = basename_data[0 .. std.unicode.utf16leToUtf8(&basename_data, basename_utf16le) catch unreachable]; @@ -510,7 +510,7 @@ pub fn Watch(comptime V: type) type { } if (ev.NextEntryOffset == 0) break; - ptr = @alignCast(@alignOf(windows.FILE_NOTIFY_INFORMATION), ptr + ev.NextEntryOffset); + ptr = @alignCast(ptr + ev.NextEntryOffset); } } } @@ -586,10 +586,10 @@ pub fn Watch(comptime V: type) type { var ptr: [*]u8 = &event_buf; const end_ptr = ptr + bytes_read; while (@intFromPtr(ptr) < @intFromPtr(end_ptr)) { - const ev = @ptrCast(*const os.linux.inotify_event, ptr); + const ev = @as(*const os.linux.inotify_event, @ptrCast(ptr)); if (ev.mask & os.linux.IN_CLOSE_WRITE == os.linux.IN_CLOSE_WRITE) { const basename_ptr = ptr + @sizeOf(os.linux.inotify_event); - const basename = std.mem.span(@ptrCast([*:0]u8, basename_ptr)); + const basename = std.mem.span(@as([*:0]u8, @ptrCast(basename_ptr))); const dir = &self.os_data.wd_table.get(ev.wd).?; if (dir.file_table.getEntry(basename)) |file_value| { @@ -615,7 +615,7 @@ pub fn Watch(comptime V: type) type { } else if (ev.mask & os.linux.IN_DELETE == os.linux.IN_DELETE) { // File or directory was removed or deleted const basename_ptr = ptr + @sizeOf(os.linux.inotify_event); - const basename = std.mem.span(@ptrCast([*:0]u8, basename_ptr)); + const basename = std.mem.span(@as([*:0]u8, @ptrCast(basename_ptr))); const dir = &self.os_data.wd_table.get(ev.wd).?; if (dir.file_table.getEntry(basename)) |file_value| { @@ -628,7 +628,7 @@ pub fn Watch(comptime V: type) type { } } - ptr = @alignCast(@alignOf(os.linux.inotify_event), ptr + @sizeOf(os.linux.inotify_event) + ev.len); + ptr = @alignCast(ptr + @sizeOf(os.linux.inotify_event) + ev.len); } } } diff --git a/lib/std/hash/adler.zig b/lib/std/hash/adler.zig index 78f52b539b..200dc9aafe 100644 --- a/lib/std/hash/adler.zig +++ b/lib/std/hash/adler.zig @@ -118,7 +118,7 @@ test "adler32 very long with variation" { var i: usize = 0; while (i < result.len) : (i += 1) { - result[i] = @truncate(u8, i); + result[i] = @as(u8, @truncate(i)); } break :blk result; diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig index f33bd635fc..c5c6c585eb 100644 --- a/lib/std/hash/auto_hash.zig +++ b/lib/std/hash/auto_hash.zig @@ -92,10 +92,10 @@ pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void { // Help the optimizer see that hashing an int is easy by inlining! // TODO Check if the situation is better after #561 is resolved. .Int => |int| switch (int.signedness) { - .signed => hash(hasher, @bitCast(@Type(.{ .Int = .{ + .signed => hash(hasher, @as(@Type(.{ .Int = .{ .bits = int.bits, .signedness = .unsigned, - } }), key), strat), + } }), @bitCast(key)), strat), .unsigned => { if (comptime meta.trait.hasUniqueRepresentation(Key)) { @call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key) }); diff --git a/lib/std/hash/benchmark.zig b/lib/std/hash/benchmark.zig index 62df89f0ae..699de5ceb4 100644 --- a/lib/std/hash/benchmark.zig +++ b/lib/std/hash/benchmark.zig @@ -122,13 +122,13 @@ pub fn benchmarkHash(comptime H: anytype, bytes: usize, allocator: std.mem.Alloc for (0..blocks_count) |i| { h.update(blocks[i * alignment ..][0..block_size]); } - const final = if (H.has_crypto_api) @truncate(u64, h.finalInt()) else h.final(); + const final = if (H.has_crypto_api) @as(u64, @truncate(h.finalInt())) else h.final(); std.mem.doNotOptimizeAway(final); const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, @floatFromInt(f64, bytes) / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(@as(f64, @floatFromInt(bytes)) / elapsed_s)); return Result{ .hash = final, @@ -152,7 +152,7 @@ pub fn benchmarkHashSmallKeys(comptime H: anytype, key_size: usize, bytes: usize const final = blk: { if (H.init_u8s) |init| { if (H.has_crypto_api) { - break :blk @truncate(u64, H.ty.toInt(small_key, init[0..H.ty.key_length])); + break :blk @as(u64, @truncate(H.ty.toInt(small_key, init[0..H.ty.key_length]))); } else { break :blk H.ty.hash(init, small_key); } @@ -166,8 +166,8 @@ pub fn benchmarkHashSmallKeys(comptime H: anytype, key_size: usize, bytes: usize } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, @floatFromInt(f64, bytes) / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(@as(f64, @floatFromInt(bytes)) / elapsed_s)); std.mem.doNotOptimizeAway(sum); diff --git a/lib/std/hash/cityhash.zig b/lib/std/hash/cityhash.zig index d0884b135f..8040c99b84 100644 --- a/lib/std/hash/cityhash.zig +++ b/lib/std/hash/cityhash.zig @@ -2,7 +2,7 @@ const std = @import("std"); inline fn offsetPtr(ptr: [*]const u8, offset: usize) [*]const u8 { // ptr + offset doesn't work at comptime so we need this instead. - return @ptrCast([*]const u8, &ptr[offset]); + return @as([*]const u8, @ptrCast(&ptr[offset])); } fn fetch32(ptr: [*]const u8, offset: usize) u32 { @@ -49,18 +49,18 @@ pub const CityHash32 = struct { } fn hash32Len0To4(str: []const u8) u32 { - const len: u32 = @truncate(u32, str.len); + const len: u32 = @as(u32, @truncate(str.len)); var b: u32 = 0; var c: u32 = 9; for (str) |v| { - b = b *% c1 +% @bitCast(u32, @intCast(i32, @bitCast(i8, v))); + b = b *% c1 +% @as(u32, @bitCast(@as(i32, @intCast(@as(i8, @bitCast(v)))))); c ^= b; } return fmix(mur(b, mur(len, c))); } fn hash32Len5To12(str: []const u8) u32 { - var a: u32 = @truncate(u32, str.len); + var a: u32 = @as(u32, @truncate(str.len)); var b: u32 = a *% 5; var c: u32 = 9; const d: u32 = b; @@ -73,7 +73,7 @@ pub const CityHash32 = struct { } fn hash32Len13To24(str: []const u8) u32 { - const len: u32 = @truncate(u32, str.len); + const len: u32 = @as(u32, @truncate(str.len)); const a: u32 = fetch32(str.ptr, (str.len >> 1) - 4); const b: u32 = fetch32(str.ptr, 4); const c: u32 = fetch32(str.ptr, str.len - 8); @@ -95,7 +95,7 @@ pub const CityHash32 = struct { } } - const len: u32 = @truncate(u32, str.len); + const len: u32 = @as(u32, @truncate(str.len)); var h: u32 = len; var g: u32 = c1 *% len; var f: u32 = g; @@ -220,9 +220,9 @@ pub const CityHash64 = struct { const a: u8 = str[0]; const b: u8 = str[str.len >> 1]; const c: u8 = str[str.len - 1]; - const y: u32 = @intCast(u32, a) +% (@intCast(u32, b) << 8); - const z: u32 = @truncate(u32, str.len) +% (@intCast(u32, c) << 2); - return shiftmix(@intCast(u64, y) *% k2 ^ @intCast(u64, z) *% k0) *% k2; + const y: u32 = @as(u32, @intCast(a)) +% (@as(u32, @intCast(b)) << 8); + const z: u32 = @as(u32, @truncate(str.len)) +% (@as(u32, @intCast(c)) << 2); + return shiftmix(@as(u64, @intCast(y)) *% k2 ^ @as(u64, @intCast(z)) *% k0) *% k2; } return k2; } @@ -309,7 +309,7 @@ pub const CityHash64 = struct { var w: WeakPair = weakHashLen32WithSeeds(offsetPtr(str.ptr, str.len - 32), y +% k1, x); x = x *% k1 +% fetch64(str.ptr, 0); - len = (len - 1) & ~@intCast(u64, 63); + len = (len - 1) & ~@as(u64, @intCast(63)); var ptr: [*]const u8 = str.ptr; while (true) { @@ -353,19 +353,19 @@ fn SMHasherTest(comptime hash_fn: anytype) u32 { var i: u32 = 0; while (i < 256) : (i += 1) { - key[i] = @intCast(u8, i); + key[i] = @as(u8, @intCast(i)); var h: HashResult = hash_fn(key[0..i], 256 - i); // comptime can't really do reinterpret casting yet, // so we need to write the bytes manually. for (hashes_bytes[i * @sizeOf(HashResult) ..][0..@sizeOf(HashResult)]) |*byte| { - byte.* = @truncate(u8, h); + byte.* = @as(u8, @truncate(h)); h = h >> 8; } } - return @truncate(u32, hash_fn(&hashes_bytes, 0)); + return @as(u32, @truncate(hash_fn(&hashes_bytes, 0))); } fn CityHash32hashIgnoreSeed(str: []const u8, seed: u32) u32 { diff --git a/lib/std/hash/crc.zig b/lib/std/hash/crc.zig index da250af1bf..3e1e458ffc 100644 --- a/lib/std/hash/crc.zig +++ b/lib/std/hash/crc.zig @@ -65,7 +65,7 @@ pub fn Crc(comptime W: type, comptime algorithm: Algorithm(W)) type { } inline fn tableEntry(index: I) I { - return lookup_table[@intCast(u8, index & 0xFF)]; + return lookup_table[@as(u8, @intCast(index & 0xFF))]; } pub fn update(self: *Self, bytes: []const u8) void { @@ -95,7 +95,7 @@ pub fn Crc(comptime W: type, comptime algorithm: Algorithm(W)) type { if (!algorithm.reflect_output) { c >>= @bitSizeOf(I) - @bitSizeOf(W); } - return @intCast(W, c ^ algorithm.xor_output); + return @as(W, @intCast(c ^ algorithm.xor_output)); } pub fn hash(bytes: []const u8) W { @@ -125,7 +125,7 @@ pub fn Crc32WithPoly(comptime poly: Polynomial) type { var tables: [8][256]u32 = undefined; for (&tables[0], 0..) |*e, i| { - var crc = @intCast(u32, i); + var crc = @as(u32, @intCast(i)); var j: usize = 0; while (j < 8) : (j += 1) { if (crc & 1 == 1) { @@ -142,7 +142,7 @@ pub fn Crc32WithPoly(comptime poly: Polynomial) type { var crc = tables[0][i]; var j: usize = 1; while (j < 8) : (j += 1) { - const index = @truncate(u8, crc); + const index = @as(u8, @truncate(crc)); crc = tables[0][index] ^ (crc >> 8); tables[j][i] = crc; } @@ -170,14 +170,14 @@ pub fn Crc32WithPoly(comptime poly: Polynomial) type { lookup_tables[1][p[6]] ^ lookup_tables[2][p[5]] ^ lookup_tables[3][p[4]] ^ - lookup_tables[4][@truncate(u8, self.crc >> 24)] ^ - lookup_tables[5][@truncate(u8, self.crc >> 16)] ^ - lookup_tables[6][@truncate(u8, self.crc >> 8)] ^ - lookup_tables[7][@truncate(u8, self.crc >> 0)]; + lookup_tables[4][@as(u8, @truncate(self.crc >> 24))] ^ + lookup_tables[5][@as(u8, @truncate(self.crc >> 16))] ^ + lookup_tables[6][@as(u8, @truncate(self.crc >> 8))] ^ + lookup_tables[7][@as(u8, @truncate(self.crc >> 0))]; } while (i < input.len) : (i += 1) { - const index = @truncate(u8, self.crc) ^ input[i]; + const index = @as(u8, @truncate(self.crc)) ^ input[i]; self.crc = (self.crc >> 8) ^ lookup_tables[0][index]; } } @@ -218,7 +218,7 @@ pub fn Crc32SmallWithPoly(comptime poly: Polynomial) type { var table: [16]u32 = undefined; for (&table, 0..) |*e, i| { - var crc = @intCast(u32, i * 16); + var crc = @as(u32, @intCast(i * 16)); var j: usize = 0; while (j < 8) : (j += 1) { if (crc & 1 == 1) { @@ -241,8 +241,8 @@ pub fn Crc32SmallWithPoly(comptime poly: Polynomial) type { pub fn update(self: *Self, input: []const u8) void { for (input) |b| { - self.crc = lookup_table[@truncate(u4, self.crc ^ (b >> 0))] ^ (self.crc >> 4); - self.crc = lookup_table[@truncate(u4, self.crc ^ (b >> 4))] ^ (self.crc >> 4); + self.crc = lookup_table[@as(u4, @truncate(self.crc ^ (b >> 0)))] ^ (self.crc >> 4); + self.crc = lookup_table[@as(u4, @truncate(self.crc ^ (b >> 4)))] ^ (self.crc >> 4); } } diff --git a/lib/std/hash/murmur.zig b/lib/std/hash/murmur.zig index 753439a4cf..bd433874ed 100644 --- a/lib/std/hash/murmur.zig +++ b/lib/std/hash/murmur.zig @@ -14,9 +14,9 @@ pub const Murmur2_32 = struct { pub fn hashWithSeed(str: []const u8, seed: u32) u32 { const m: u32 = 0x5bd1e995; - const len = @truncate(u32, str.len); + const len = @as(u32, @truncate(str.len)); var h1: u32 = seed ^ len; - for (@ptrCast([*]align(1) const u32, str.ptr)[0..(len >> 2)]) |v| { + for (@as([*]align(1) const u32, @ptrCast(str.ptr))[0..(len >> 2)]) |v| { var k1: u32 = v; if (native_endian == .Big) k1 = @byteSwap(k1); @@ -29,13 +29,13 @@ pub const Murmur2_32 = struct { const offset = len & 0xfffffffc; const rest = len & 3; if (rest >= 3) { - h1 ^= @intCast(u32, str[offset + 2]) << 16; + h1 ^= @as(u32, @intCast(str[offset + 2])) << 16; } if (rest >= 2) { - h1 ^= @intCast(u32, str[offset + 1]) << 8; + h1 ^= @as(u32, @intCast(str[offset + 1])) << 8; } if (rest >= 1) { - h1 ^= @intCast(u32, str[offset + 0]); + h1 ^= @as(u32, @intCast(str[offset + 0])); h1 *%= m; } h1 ^= h1 >> 13; @@ -73,12 +73,12 @@ pub const Murmur2_32 = struct { const len: u32 = 8; var h1: u32 = seed ^ len; var k1: u32 = undefined; - k1 = @truncate(u32, v) *% m; + k1 = @as(u32, @truncate(v)) *% m; k1 ^= k1 >> 24; k1 *%= m; h1 *%= m; h1 ^= k1; - k1 = @truncate(u32, v >> 32) *% m; + k1 = @as(u32, @truncate(v >> 32)) *% m; k1 ^= k1 >> 24; k1 *%= m; h1 *%= m; @@ -100,7 +100,7 @@ pub const Murmur2_64 = struct { pub fn hashWithSeed(str: []const u8, seed: u64) u64 { const m: u64 = 0xc6a4a7935bd1e995; var h1: u64 = seed ^ (@as(u64, str.len) *% m); - for (@ptrCast([*]align(1) const u64, str.ptr)[0 .. str.len / 8]) |v| { + for (@as([*]align(1) const u64, @ptrCast(str.ptr))[0 .. str.len / 8]) |v| { var k1: u64 = v; if (native_endian == .Big) k1 = @byteSwap(k1); @@ -114,7 +114,7 @@ pub const Murmur2_64 = struct { const offset = str.len - rest; if (rest > 0) { var k1: u64 = 0; - @memcpy(@ptrCast([*]u8, &k1)[0..rest], str[offset..]); + @memcpy(@as([*]u8, @ptrCast(&k1))[0..rest], str[offset..]); if (native_endian == .Big) k1 = @byteSwap(k1); h1 ^= k1; @@ -178,9 +178,9 @@ pub const Murmur3_32 = struct { pub fn hashWithSeed(str: []const u8, seed: u32) u32 { const c1: u32 = 0xcc9e2d51; const c2: u32 = 0x1b873593; - const len = @truncate(u32, str.len); + const len = @as(u32, @truncate(str.len)); var h1: u32 = seed; - for (@ptrCast([*]align(1) const u32, str.ptr)[0..(len >> 2)]) |v| { + for (@as([*]align(1) const u32, @ptrCast(str.ptr))[0..(len >> 2)]) |v| { var k1: u32 = v; if (native_endian == .Big) k1 = @byteSwap(k1); @@ -197,13 +197,13 @@ pub const Murmur3_32 = struct { const offset = len & 0xfffffffc; const rest = len & 3; if (rest == 3) { - k1 ^= @intCast(u32, str[offset + 2]) << 16; + k1 ^= @as(u32, @intCast(str[offset + 2])) << 16; } if (rest >= 2) { - k1 ^= @intCast(u32, str[offset + 1]) << 8; + k1 ^= @as(u32, @intCast(str[offset + 1])) << 8; } if (rest >= 1) { - k1 ^= @intCast(u32, str[offset + 0]); + k1 ^= @as(u32, @intCast(str[offset + 0])); k1 *%= c1; k1 = rotl32(k1, 15); k1 *%= c2; @@ -255,14 +255,14 @@ pub const Murmur3_32 = struct { const len: u32 = 8; var h1: u32 = seed; var k1: u32 = undefined; - k1 = @truncate(u32, v) *% c1; + k1 = @as(u32, @truncate(v)) *% c1; k1 = rotl32(k1, 15); k1 *%= c2; h1 ^= k1; h1 = rotl32(h1, 13); h1 *%= 5; h1 +%= 0xe6546b64; - k1 = @truncate(u32, v >> 32) *% c1; + k1 = @as(u32, @truncate(v >> 32)) *% c1; k1 = rotl32(k1, 15); k1 *%= c2; h1 ^= k1; @@ -286,15 +286,15 @@ fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 { var i: u32 = 0; while (i < 256) : (i += 1) { - key[i] = @truncate(u8, i); + key[i] = @as(u8, @truncate(i)); var h = hash_fn(key[0..i], 256 - i); if (native_endian == .Big) h = @byteSwap(h); - @memcpy(hashes[i * hashbytes ..][0..hashbytes], @ptrCast([*]u8, &h)); + @memcpy(hashes[i * hashbytes ..][0..hashbytes], @as([*]u8, @ptrCast(&h))); } - return @truncate(u32, hash_fn(&hashes, 0)); + return @as(u32, @truncate(hash_fn(&hashes, 0))); } test "murmur2_32" { @@ -307,8 +307,8 @@ test "murmur2_32" { v0le = @byteSwap(v0le); v1le = @byteSwap(v1le); } - try testing.expectEqual(Murmur2_32.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur2_32.hashUint32(v0)); - try testing.expectEqual(Murmur2_32.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur2_32.hashUint64(v1)); + try testing.expectEqual(Murmur2_32.hash(@as([*]u8, @ptrCast(&v0le))[0..4]), Murmur2_32.hashUint32(v0)); + try testing.expectEqual(Murmur2_32.hash(@as([*]u8, @ptrCast(&v1le))[0..8]), Murmur2_32.hashUint64(v1)); } test "murmur2_64" { @@ -321,8 +321,8 @@ test "murmur2_64" { v0le = @byteSwap(v0le); v1le = @byteSwap(v1le); } - try testing.expectEqual(Murmur2_64.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur2_64.hashUint32(v0)); - try testing.expectEqual(Murmur2_64.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur2_64.hashUint64(v1)); + try testing.expectEqual(Murmur2_64.hash(@as([*]u8, @ptrCast(&v0le))[0..4]), Murmur2_64.hashUint32(v0)); + try testing.expectEqual(Murmur2_64.hash(@as([*]u8, @ptrCast(&v1le))[0..8]), Murmur2_64.hashUint64(v1)); } test "murmur3_32" { @@ -335,6 +335,6 @@ test "murmur3_32" { v0le = @byteSwap(v0le); v1le = @byteSwap(v1le); } - try testing.expectEqual(Murmur3_32.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur3_32.hashUint32(v0)); - try testing.expectEqual(Murmur3_32.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur3_32.hashUint64(v1)); + try testing.expectEqual(Murmur3_32.hash(@as([*]u8, @ptrCast(&v0le))[0..4]), Murmur3_32.hashUint32(v0)); + try testing.expectEqual(Murmur3_32.hash(@as([*]u8, @ptrCast(&v1le))[0..8]), Murmur3_32.hashUint64(v1)); } diff --git a/lib/std/hash/wyhash.zig b/lib/std/hash/wyhash.zig index 3573745444..aced3be66e 100644 --- a/lib/std/hash/wyhash.zig +++ b/lib/std/hash/wyhash.zig @@ -132,8 +132,8 @@ pub const Wyhash = struct { inline fn mum(a: *u64, b: *u64) void { const x = @as(u128, a.*) *% b.*; - a.* = @truncate(u64, x); - b.* = @truncate(u64, x >> 64); + a.* = @as(u64, @truncate(x)); + b.* = @as(u64, @truncate(x >> 64)); } inline fn mix(a_: u64, b_: u64) u64 { @@ -252,7 +252,7 @@ test "test ensure idempotent final call" { test "iterative non-divisible update" { var buf: [8192]u8 = undefined; for (&buf, 0..) |*e, i| { - e.* = @truncate(u8, i); + e.* = @as(u8, @truncate(i)); } const seed = 0x128dad08f; diff --git a/lib/std/hash/xxhash.zig b/lib/std/hash/xxhash.zig index 3122406488..f1d1da429d 100644 --- a/lib/std/hash/xxhash.zig +++ b/lib/std/hash/xxhash.zig @@ -212,7 +212,7 @@ pub const XxHash32 = struct { rotl(u32, self.acc3, 12) +% rotl(u32, self.acc4, 18); } - acc = acc +% @intCast(u32, self.byte_count) +% @intCast(u32, self.buf_len); + acc = acc +% @as(u32, @intCast(self.byte_count)) +% @as(u32, @intCast(self.buf_len)); var pos: usize = 0; while (pos + 4 <= self.buf_len) : (pos += 4) { diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index 4f1639cd60..0afe6f9643 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -101,7 +101,7 @@ pub const StringIndexContext = struct { } pub fn hash(self: @This(), x: u32) u64 { - const x_slice = mem.sliceTo(@ptrCast([*:0]const u8, self.bytes.items.ptr) + x, 0); + const x_slice = mem.sliceTo(@as([*:0]const u8, @ptrCast(self.bytes.items.ptr)) + x, 0); return hashString(x_slice); } }; @@ -110,7 +110,7 @@ pub const StringIndexAdapter = struct { bytes: *std.ArrayListUnmanaged(u8), pub fn eql(self: @This(), a_slice: []const u8, b: u32) bool { - const b_slice = mem.sliceTo(@ptrCast([*:0]const u8, self.bytes.items.ptr) + b, 0); + const b_slice = mem.sliceTo(@as([*:0]const u8, @ptrCast(self.bytes.items.ptr)) + b, 0); return mem.eql(u8, a_slice, b_slice); } @@ -777,25 +777,25 @@ pub fn HashMapUnmanaged( fingerprint: FingerPrint = free, used: u1 = 0, - const slot_free = @bitCast(u8, Metadata{ .fingerprint = free }); - const slot_tombstone = @bitCast(u8, Metadata{ .fingerprint = tombstone }); + const slot_free = @as(u8, @bitCast(Metadata{ .fingerprint = free })); + const slot_tombstone = @as(u8, @bitCast(Metadata{ .fingerprint = tombstone })); pub fn isUsed(self: Metadata) bool { return self.used == 1; } pub fn isTombstone(self: Metadata) bool { - return @bitCast(u8, self) == slot_tombstone; + return @as(u8, @bitCast(self)) == slot_tombstone; } pub fn isFree(self: Metadata) bool { - return @bitCast(u8, self) == slot_free; + return @as(u8, @bitCast(self)) == slot_free; } pub fn takeFingerprint(hash: Hash) FingerPrint { const hash_bits = @typeInfo(Hash).Int.bits; const fp_bits = @typeInfo(FingerPrint).Int.bits; - return @truncate(FingerPrint, hash >> (hash_bits - fp_bits)); + return @as(FingerPrint, @truncate(hash >> (hash_bits - fp_bits))); } pub fn fill(self: *Metadata, fp: FingerPrint) void { @@ -899,7 +899,7 @@ pub fn HashMapUnmanaged( } fn capacityForSize(size: Size) Size { - var new_cap = @truncate(u32, (@as(u64, size) * 100) / max_load_percentage + 1); + var new_cap = @as(u32, @truncate((@as(u64, size) * 100) / max_load_percentage + 1)); new_cap = math.ceilPowerOfTwo(u32, new_cap) catch unreachable; return new_cap; } @@ -927,7 +927,7 @@ pub fn HashMapUnmanaged( if (self.metadata) |_| { self.initMetadatas(); self.size = 0; - self.available = @truncate(u32, (self.capacity() * max_load_percentage) / 100); + self.available = @as(u32, @truncate((self.capacity() * max_load_percentage) / 100)); } } @@ -942,7 +942,7 @@ pub fn HashMapUnmanaged( } fn header(self: *const Self) *Header { - return @ptrCast(*Header, @ptrCast([*]Header, @alignCast(@alignOf(Header), self.metadata.?)) - 1); + return @ptrCast(@as([*]Header, @ptrCast(@alignCast(self.metadata.?))) - 1); } fn keys(self: *const Self) [*]K { @@ -1033,7 +1033,7 @@ pub fn HashMapUnmanaged( const hash = ctx.hash(key); const mask = self.capacity() - 1; - var idx = @truncate(usize, hash & mask); + var idx = @as(usize, @truncate(hash & mask)); var metadata = self.metadata.? + idx; while (metadata[0].isUsed()) { @@ -1147,7 +1147,7 @@ pub fn HashMapUnmanaged( const fingerprint = Metadata.takeFingerprint(hash); // Don't loop indefinitely when there are no empty slots. var limit = self.capacity(); - var idx = @truncate(usize, hash & mask); + var idx = @as(usize, @truncate(hash & mask)); var metadata = self.metadata.? + idx; while (!metadata[0].isFree() and limit != 0) { @@ -1325,7 +1325,7 @@ pub fn HashMapUnmanaged( const mask = self.capacity() - 1; const fingerprint = Metadata.takeFingerprint(hash); var limit = self.capacity(); - var idx = @truncate(usize, hash & mask); + var idx = @as(usize, @truncate(hash & mask)); var first_tombstone_idx: usize = self.capacity(); // invalid index var metadata = self.metadata.? + idx; @@ -1450,7 +1450,7 @@ pub fn HashMapUnmanaged( } fn initMetadatas(self: *Self) void { - @memset(@ptrCast([*]u8, self.metadata.?)[0 .. @sizeOf(Metadata) * self.capacity()], 0); + @memset(@as([*]u8, @ptrCast(self.metadata.?))[0 .. @sizeOf(Metadata) * self.capacity()], 0); } // This counts the number of occupied slots (not counting tombstones), which is @@ -1458,7 +1458,7 @@ pub fn HashMapUnmanaged( fn load(self: *const Self) Size { const max_load = (self.capacity() * max_load_percentage) / 100; assert(max_load >= self.available); - return @truncate(Size, max_load - self.available); + return @as(Size, @truncate(max_load - self.available)); } fn growIfNeeded(self: *Self, allocator: Allocator, new_count: Size, ctx: Context) Allocator.Error!void { @@ -1480,7 +1480,7 @@ pub fn HashMapUnmanaged( const new_cap = capacityForSize(self.size); try other.allocate(allocator, new_cap); other.initMetadatas(); - other.available = @truncate(u32, (new_cap * max_load_percentage) / 100); + other.available = @as(u32, @truncate((new_cap * max_load_percentage) / 100)); var i: Size = 0; var metadata = self.metadata.?; @@ -1515,7 +1515,7 @@ pub fn HashMapUnmanaged( defer map.deinit(allocator); try map.allocate(allocator, new_cap); map.initMetadatas(); - map.available = @truncate(u32, (new_cap * max_load_percentage) / 100); + map.available = @as(u32, @truncate((new_cap * max_load_percentage) / 100)); if (self.size != 0) { const old_capacity = self.capacity(); @@ -1558,15 +1558,15 @@ pub fn HashMapUnmanaged( const metadata = ptr + @sizeOf(Header); - const hdr = @ptrFromInt(*Header, ptr); + const hdr = @as(*Header, @ptrFromInt(ptr)); if (@sizeOf([*]V) != 0) { - hdr.values = @ptrFromInt([*]V, ptr + vals_start); + hdr.values = @as([*]V, @ptrFromInt(ptr + vals_start)); } if (@sizeOf([*]K) != 0) { - hdr.keys = @ptrFromInt([*]K, ptr + keys_start); + hdr.keys = @as([*]K, @ptrFromInt(ptr + keys_start)); } hdr.capacity = new_capacity; - self.metadata = @ptrFromInt([*]Metadata, metadata); + self.metadata = @as([*]Metadata, @ptrFromInt(metadata)); } fn deallocate(self: *Self, allocator: Allocator) void { @@ -1589,7 +1589,7 @@ pub fn HashMapUnmanaged( const total_size = std.mem.alignForward(usize, vals_end, max_align); - const slice = @ptrFromInt([*]align(max_align) u8, @intFromPtr(self.header()))[0..total_size]; + const slice = @as([*]align(max_align) u8, @ptrFromInt(@intFromPtr(self.header())))[0..total_size]; allocator.free(slice); self.metadata = null; diff --git a/lib/std/heap.zig b/lib/std/heap.zig index fd5b0754fe..d04f959345 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -61,11 +61,11 @@ const CAllocator = struct { pub const supports_posix_memalign = @hasDecl(c, "posix_memalign"); fn getHeader(ptr: [*]u8) *[*]u8 { - return @ptrFromInt(*[*]u8, @intFromPtr(ptr) - @sizeOf(usize)); + return @as(*[*]u8, @ptrFromInt(@intFromPtr(ptr) - @sizeOf(usize))); } fn alignedAlloc(len: usize, log2_align: u8) ?[*]u8 { - const alignment = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_align); + const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align)); if (supports_posix_memalign) { // The posix_memalign only accepts alignment values that are a // multiple of the pointer size @@ -75,13 +75,13 @@ const CAllocator = struct { if (c.posix_memalign(&aligned_ptr, eff_alignment, len) != 0) return null; - return @ptrCast([*]u8, aligned_ptr); + return @as([*]u8, @ptrCast(aligned_ptr)); } // Thin wrapper around regular malloc, overallocate to account for // alignment padding and store the original malloc()'ed pointer before // the aligned address. - var unaligned_ptr = @ptrCast([*]u8, c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null); + var unaligned_ptr = @as([*]u8, @ptrCast(c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null)); const unaligned_addr = @intFromPtr(unaligned_ptr); const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment); var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr); @@ -195,7 +195,7 @@ fn rawCAlloc( // type in C that is size 8 and has 16 byte alignment, so the alignment may // be 8 bytes rather than 16. Similarly if only 1 byte is requested, malloc // is allowed to return a 1-byte aligned pointer. - return @ptrCast(?[*]u8, c.malloc(len)); + return @as(?[*]u8, @ptrCast(c.malloc(len))); } fn rawCResize( @@ -283,7 +283,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { } fn getRecordPtr(buf: []u8) *align(1) usize { - return @ptrFromInt(*align(1) usize, @intFromPtr(buf.ptr) + buf.len); + return @as(*align(1) usize, @ptrFromInt(@intFromPtr(buf.ptr) + buf.len)); } fn alloc( @@ -293,9 +293,9 @@ pub const HeapAllocator = switch (builtin.os.tag) { return_address: usize, ) ?[*]u8 { _ = return_address; - const self = @ptrCast(*HeapAllocator, @alignCast(@alignOf(HeapAllocator), ctx)); + const self: *HeapAllocator = @ptrCast(@alignCast(ctx)); - const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align); + const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); const amt = n + ptr_align - 1 + @sizeOf(usize); const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .SeqCst); const heap_handle = optional_heap_handle orelse blk: { @@ -308,7 +308,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return null; const root_addr = @intFromPtr(ptr); const aligned_addr = mem.alignForward(usize, root_addr, ptr_align); - const buf = @ptrFromInt([*]u8, aligned_addr)[0..n]; + const buf = @as([*]u8, @ptrFromInt(aligned_addr))[0..n]; getRecordPtr(buf).* = root_addr; return buf.ptr; } @@ -322,7 +322,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { ) bool { _ = log2_buf_align; _ = return_address; - const self = @ptrCast(*HeapAllocator, @alignCast(@alignOf(HeapAllocator), ctx)); + const self: *HeapAllocator = @ptrCast(@alignCast(ctx)); const root_addr = getRecordPtr(buf).*; const align_offset = @intFromPtr(buf.ptr) - root_addr; @@ -330,10 +330,10 @@ pub const HeapAllocator = switch (builtin.os.tag) { const new_ptr = os.windows.kernel32.HeapReAlloc( self.heap_handle.?, os.windows.HEAP_REALLOC_IN_PLACE_ONLY, - @ptrFromInt(*anyopaque, root_addr), + @as(*anyopaque, @ptrFromInt(root_addr)), amt, ) orelse return false; - assert(new_ptr == @ptrFromInt(*anyopaque, root_addr)); + assert(new_ptr == @as(*anyopaque, @ptrFromInt(root_addr))); getRecordPtr(buf.ptr[0..new_size]).* = root_addr; return true; } @@ -346,8 +346,8 @@ pub const HeapAllocator = switch (builtin.os.tag) { ) void { _ = log2_buf_align; _ = return_address; - const self = @ptrCast(*HeapAllocator, @alignCast(@alignOf(HeapAllocator), ctx)); - os.windows.HeapFree(self.heap_handle.?, 0, @ptrFromInt(*anyopaque, getRecordPtr(buf).*)); + const self: *HeapAllocator = @ptrCast(@alignCast(ctx)); + os.windows.HeapFree(self.heap_handle.?, 0, @as(*anyopaque, @ptrFromInt(getRecordPtr(buf).*))); } }, else => @compileError("Unsupported OS"), @@ -415,9 +415,9 @@ pub const FixedBufferAllocator = struct { } fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { - const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx)); + const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx)); _ = ra; - const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align); + const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse return null; const adjusted_index = self.end_index + adjust_off; const new_end_index = adjusted_index + n; @@ -433,7 +433,7 @@ pub const FixedBufferAllocator = struct { new_size: usize, return_address: usize, ) bool { - const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx)); + const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx)); _ = log2_buf_align; _ = return_address; assert(self.ownsSlice(buf)); // sanity check @@ -462,7 +462,7 @@ pub const FixedBufferAllocator = struct { log2_buf_align: u8, return_address: usize, ) void { - const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx)); + const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx)); _ = log2_buf_align; _ = return_address; assert(self.ownsSlice(buf)); // sanity check @@ -473,9 +473,9 @@ pub const FixedBufferAllocator = struct { } fn threadSafeAlloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { - const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx)); + const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx)); _ = ra; - const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align); + const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); var end_index = @atomicLoad(usize, &self.end_index, .SeqCst); while (true) { const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse return null; @@ -537,7 +537,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type { log2_ptr_align: u8, ra: usize, ) ?[*]u8 { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, log2_ptr_align, ra) orelse return self.fallback_allocator.rawAlloc(len, log2_ptr_align, ra); } @@ -549,7 +549,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type { new_len: usize, ra: usize, ) bool { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, log2_buf_align, new_len, ra); } else { @@ -563,7 +563,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type { log2_buf_align: u8, ra: usize, ) void { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, log2_buf_align, ra); } else { @@ -728,14 +728,14 @@ pub fn testAllocator(base_allocator: mem.Allocator) !void { try testing.expect(slice.len == 100); for (slice, 0..) |*item, i| { item.* = try allocator.create(i32); - item.*.* = @intCast(i32, i); + item.*.* = @as(i32, @intCast(i)); } slice = try allocator.realloc(slice, 20000); try testing.expect(slice.len == 20000); for (slice[0..100], 0..) |item, i| { - try testing.expect(item.* == @intCast(i32, i)); + try testing.expect(item.* == @as(i32, @intCast(i))); allocator.destroy(item); } diff --git a/lib/std/heap/PageAllocator.zig b/lib/std/heap/PageAllocator.zig index 12a0bdcf30..3e92aa5eec 100644 --- a/lib/std/heap/PageAllocator.zig +++ b/lib/std/heap/PageAllocator.zig @@ -27,7 +27,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { w.MEM_COMMIT | w.MEM_RESERVE, w.PAGE_READWRITE, ) catch return null; - return @ptrCast([*]align(mem.page_size) u8, @alignCast(mem.page_size, addr)); + return @ptrCast(addr); } const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .Unordered); @@ -40,7 +40,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { 0, ) catch return null; assert(mem.isAligned(@intFromPtr(slice.ptr), mem.page_size)); - const new_hint = @alignCast(mem.page_size, slice.ptr + aligned_len); + const new_hint: [*]align(mem.page_size) u8 = @alignCast(slice.ptr + aligned_len); _ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .Monotonic, .Monotonic); return slice.ptr; } @@ -66,7 +66,7 @@ fn resize( // For shrinking that is not releasing, we will only // decommit the pages not needed anymore. w.VirtualFree( - @ptrFromInt(*anyopaque, new_addr_end), + @as(*anyopaque, @ptrFromInt(new_addr_end)), old_addr_end - new_addr_end, w.MEM_DECOMMIT, ); @@ -85,9 +85,9 @@ fn resize( return true; if (new_size_aligned < buf_aligned_len) { - const ptr = @alignCast(mem.page_size, buf_unaligned.ptr + new_size_aligned); + const ptr = buf_unaligned.ptr + new_size_aligned; // TODO: if the next_mmap_addr_hint is within the unmapped range, update it - os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]); + os.munmap(@alignCast(ptr[0 .. buf_aligned_len - new_size_aligned])); return true; } @@ -104,7 +104,6 @@ fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) v os.windows.VirtualFree(slice.ptr, 0, os.windows.MEM_RELEASE); } else { const buf_aligned_len = mem.alignForward(usize, slice.len, mem.page_size); - const ptr = @alignCast(mem.page_size, slice.ptr); - os.munmap(ptr[0..buf_aligned_len]); + os.munmap(@alignCast(slice.ptr[0..buf_aligned_len])); } } diff --git a/lib/std/heap/ThreadSafeAllocator.zig b/lib/std/heap/ThreadSafeAllocator.zig index fe10eb2fdb..12bb095b30 100644 --- a/lib/std/heap/ThreadSafeAllocator.zig +++ b/lib/std/heap/ThreadSafeAllocator.zig @@ -15,7 +15,7 @@ pub fn allocator(self: *ThreadSafeAllocator) Allocator { } fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { - const self = @ptrCast(*ThreadSafeAllocator, @alignCast(@alignOf(ThreadSafeAllocator), ctx)); + const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx)); self.mutex.lock(); defer self.mutex.unlock(); @@ -23,7 +23,7 @@ fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { } fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool { - const self = @ptrCast(*ThreadSafeAllocator, @alignCast(@alignOf(ThreadSafeAllocator), ctx)); + const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx)); self.mutex.lock(); defer self.mutex.unlock(); @@ -32,7 +32,7 @@ fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_ad } fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void { - const self = @ptrCast(*ThreadSafeAllocator, @alignCast(@alignOf(ThreadSafeAllocator), ctx)); + const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx)); self.mutex.lock(); defer self.mutex.unlock(); diff --git a/lib/std/heap/WasmAllocator.zig b/lib/std/heap/WasmAllocator.zig index e3e436fd2b..60051b688a 100644 --- a/lib/std/heap/WasmAllocator.zig +++ b/lib/std/heap/WasmAllocator.zig @@ -47,7 +47,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[* _ = ctx; _ = return_address; // Make room for the freelist next pointer. - const alignment = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_align); + const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align)); const actual_len = @max(len +| @sizeOf(usize), alignment); const slot_size = math.ceilPowerOfTwo(usize, actual_len) catch return null; const class = math.log2(slot_size) - min_class; @@ -55,7 +55,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[* const addr = a: { const top_free_ptr = frees[class]; if (top_free_ptr != 0) { - const node = @ptrFromInt(*usize, top_free_ptr + (slot_size - @sizeOf(usize))); + const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size - @sizeOf(usize)))); frees[class] = node.*; break :a top_free_ptr; } @@ -74,11 +74,11 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[* break :a next_addr; } }; - return @ptrFromInt([*]u8, addr); + return @as([*]u8, @ptrFromInt(addr)); } const bigpages_needed = bigPagesNeeded(actual_len); const addr = allocBigPages(bigpages_needed); - return @ptrFromInt([*]u8, addr); + return @as([*]u8, @ptrFromInt(addr)); } fn resize( @@ -92,7 +92,7 @@ fn resize( _ = return_address; // We don't want to move anything from one size class to another, but we // can recover bytes in between powers of two. - const buf_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_buf_align); + const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align)); const old_actual_len = @max(buf.len + @sizeOf(usize), buf_align); const new_actual_len = @max(new_len +| @sizeOf(usize), buf_align); const old_small_slot_size = math.ceilPowerOfTwoAssert(usize, old_actual_len); @@ -117,20 +117,20 @@ fn free( ) void { _ = ctx; _ = return_address; - const buf_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_buf_align); + const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align)); const actual_len = @max(buf.len + @sizeOf(usize), buf_align); const slot_size = math.ceilPowerOfTwoAssert(usize, actual_len); const class = math.log2(slot_size) - min_class; const addr = @intFromPtr(buf.ptr); if (class < size_class_count) { - const node = @ptrFromInt(*usize, addr + (slot_size - @sizeOf(usize))); + const node = @as(*usize, @ptrFromInt(addr + (slot_size - @sizeOf(usize)))); node.* = frees[class]; frees[class] = addr; } else { const bigpages_needed = bigPagesNeeded(actual_len); const pow2_pages = math.ceilPowerOfTwoAssert(usize, bigpages_needed); const big_slot_size_bytes = pow2_pages * bigpage_size; - const node = @ptrFromInt(*usize, addr + (big_slot_size_bytes - @sizeOf(usize))); + const node = @as(*usize, @ptrFromInt(addr + (big_slot_size_bytes - @sizeOf(usize)))); const big_class = math.log2(pow2_pages); node.* = big_frees[big_class]; big_frees[big_class] = addr; @@ -148,14 +148,14 @@ fn allocBigPages(n: usize) usize { const top_free_ptr = big_frees[class]; if (top_free_ptr != 0) { - const node = @ptrFromInt(*usize, top_free_ptr + (slot_size_bytes - @sizeOf(usize))); + const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size_bytes - @sizeOf(usize)))); big_frees[class] = node.*; return top_free_ptr; } const page_index = @wasmMemoryGrow(0, pow2_pages * pages_per_bigpage); if (page_index <= 0) return 0; - const addr = @intCast(u32, page_index) * wasm.page_size; + const addr = @as(u32, @intCast(page_index)) * wasm.page_size; return addr; } diff --git a/lib/std/heap/WasmPageAllocator.zig b/lib/std/heap/WasmPageAllocator.zig index c77164ee2d..8f484c52f6 100644 --- a/lib/std/heap/WasmPageAllocator.zig +++ b/lib/std/heap/WasmPageAllocator.zig @@ -40,7 +40,7 @@ const FreeBlock = struct { fn getBit(self: FreeBlock, idx: usize) PageStatus { const bit_offset = 0; - return @enumFromInt(PageStatus, Io.get(mem.sliceAsBytes(self.data), idx, bit_offset)); + return @as(PageStatus, @enumFromInt(Io.get(mem.sliceAsBytes(self.data), idx, bit_offset))); } fn setBits(self: FreeBlock, start_idx: usize, len: usize, val: PageStatus) void { @@ -63,7 +63,7 @@ const FreeBlock = struct { fn useRecycled(self: FreeBlock, num_pages: usize, log2_align: u8) usize { @setCold(true); for (self.data, 0..) |segment, i| { - const spills_into_next = @bitCast(i128, segment) < 0; + const spills_into_next = @as(i128, @bitCast(segment)) < 0; const has_enough_bits = @popCount(segment) >= num_pages; if (!spills_into_next and !has_enough_bits) continue; @@ -109,7 +109,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 { if (len > maxInt(usize) - (mem.page_size - 1)) return null; const page_count = nPages(len); const page_idx = allocPages(page_count, log2_align) catch return null; - return @ptrFromInt([*]u8, page_idx * mem.page_size); + return @as([*]u8, @ptrFromInt(page_idx * mem.page_size)); } fn allocPages(page_count: usize, log2_align: u8) !usize { @@ -129,7 +129,7 @@ fn allocPages(page_count: usize, log2_align: u8) !usize { const next_page_addr = next_page_idx * mem.page_size; const aligned_addr = mem.alignForwardLog2(next_page_addr, log2_align); const drop_page_count = @divExact(aligned_addr - next_page_addr, mem.page_size); - const result = @wasmMemoryGrow(0, @intCast(u32, drop_page_count + page_count)); + const result = @wasmMemoryGrow(0, @as(u32, @intCast(drop_page_count + page_count))); if (result <= 0) return error.OutOfMemory; assert(result == next_page_idx); @@ -137,7 +137,7 @@ fn allocPages(page_count: usize, log2_align: u8) !usize { if (drop_page_count > 0) { freePages(next_page_idx, aligned_page_idx); } - return @intCast(usize, aligned_page_idx); + return @as(usize, @intCast(aligned_page_idx)); } fn freePages(start: usize, end: usize) void { @@ -151,7 +151,7 @@ fn freePages(start: usize, end: usize) void { // TODO: would it be better if we use the first page instead? new_end -= 1; - extended.data = @ptrFromInt([*]u128, new_end * mem.page_size)[0 .. mem.page_size / @sizeOf(u128)]; + extended.data = @as([*]u128, @ptrFromInt(new_end * mem.page_size))[0 .. mem.page_size / @sizeOf(u128)]; // Since this is the first page being freed and we consume it, assume *nothing* is free. @memset(extended.data, PageStatus.none_free); } diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index a8d6641d8d..d547987f63 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -48,7 +48,7 @@ pub const ArenaAllocator = struct { // this has to occur before the free because the free frees node const next_it = node.next; const align_bits = std.math.log2_int(usize, @alignOf(BufNode)); - const alloc_buf = @ptrCast([*]u8, node)[0..node.data]; + const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data]; self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress()); it = next_it; } @@ -128,7 +128,7 @@ pub const ArenaAllocator = struct { const next_it = node.next; if (next_it == null) break node; - const alloc_buf = @ptrCast([*]u8, node)[0..node.data]; + const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data]; self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress()); it = next_it; } else null; @@ -140,7 +140,7 @@ pub const ArenaAllocator = struct { // perfect, no need to invoke the child_allocator if (first_node.data == total_size) return true; - const first_alloc_buf = @ptrCast([*]u8, first_node)[0..first_node.data]; + const first_alloc_buf = @as([*]u8, @ptrCast(first_node))[0..first_node.data]; if (self.child_allocator.rawResize(first_alloc_buf, align_bits, total_size, @returnAddress())) { // successful resize first_node.data = total_size; @@ -151,7 +151,7 @@ pub const ArenaAllocator = struct { return false; }; self.child_allocator.rawFree(first_alloc_buf, align_bits, @returnAddress()); - const node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), new_ptr)); + const node: *BufNode = @ptrCast(@alignCast(new_ptr)); node.* = .{ .data = total_size }; self.state.buffer_list.first = node; } @@ -166,7 +166,7 @@ pub const ArenaAllocator = struct { const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode)); const ptr = self.child_allocator.rawAlloc(len, log2_align, @returnAddress()) orelse return null; - const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), ptr)); + const buf_node: *BufNode = @ptrCast(@alignCast(ptr)); buf_node.* = .{ .data = len }; self.state.buffer_list.prepend(buf_node); self.state.end_index = 0; @@ -174,16 +174,16 @@ pub const ArenaAllocator = struct { } fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { - const self = @ptrCast(*ArenaAllocator, @alignCast(@alignOf(ArenaAllocator), ctx)); + const self: *ArenaAllocator = @ptrCast(@alignCast(ctx)); _ = ra; - const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align); + const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); var cur_node = if (self.state.buffer_list.first) |first_node| first_node else (self.createNode(0, n + ptr_align) orelse return null); while (true) { - const cur_alloc_buf = @ptrCast([*]u8, cur_node)[0..cur_node.data]; + const cur_alloc_buf = @as([*]u8, @ptrCast(cur_node))[0..cur_node.data]; const cur_buf = cur_alloc_buf[@sizeOf(BufNode)..]; const addr = @intFromPtr(cur_buf.ptr) + self.state.end_index; const adjusted_addr = mem.alignForward(usize, addr, ptr_align); @@ -208,12 +208,12 @@ pub const ArenaAllocator = struct { } fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool { - const self = @ptrCast(*ArenaAllocator, @alignCast(@alignOf(ArenaAllocator), ctx)); + const self: *ArenaAllocator = @ptrCast(@alignCast(ctx)); _ = log2_buf_align; _ = ret_addr; const cur_node = self.state.buffer_list.first orelse return false; - const cur_buf = @ptrCast([*]u8, cur_node)[@sizeOf(BufNode)..cur_node.data]; + const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data]; if (@intFromPtr(cur_buf.ptr) + self.state.end_index != @intFromPtr(buf.ptr) + buf.len) { // It's not the most recent allocation, so it cannot be expanded, // but it's fine if they want to make it smaller. @@ -235,10 +235,10 @@ pub const ArenaAllocator = struct { _ = log2_buf_align; _ = ret_addr; - const self = @ptrCast(*ArenaAllocator, @alignCast(@alignOf(ArenaAllocator), ctx)); + const self: *ArenaAllocator = @ptrCast(@alignCast(ctx)); const cur_node = self.state.buffer_list.first orelse return; - const cur_buf = @ptrCast([*]u8, cur_node)[@sizeOf(BufNode)..cur_node.data]; + const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data]; if (@intFromPtr(cur_buf.ptr) + self.state.end_index == @intFromPtr(buf.ptr) + buf.len) { self.state.end_index -= buf.len; diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index 98375c850e..11f7d9dd27 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -250,7 +250,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { used_count: SlotIndex, fn usedBits(bucket: *BucketHeader, index: usize) *u8 { - return @ptrFromInt(*u8, @intFromPtr(bucket) + @sizeOf(BucketHeader) + index); + return @as(*u8, @ptrFromInt(@intFromPtr(bucket) + @sizeOf(BucketHeader) + index)); } fn stackTracePtr( @@ -259,10 +259,10 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { slot_index: SlotIndex, trace_kind: TraceKind, ) *[stack_n]usize { - const start_ptr = @ptrCast([*]u8, bucket) + bucketStackFramesStart(size_class); + const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketStackFramesStart(size_class); const addr = start_ptr + one_trace_size * traces_per_slot * slot_index + @intFromEnum(trace_kind) * @as(usize, one_trace_size); - return @ptrCast(*[stack_n]usize, @alignCast(@alignOf(usize), addr)); + return @ptrCast(@alignCast(addr)); } fn captureStackTrace( @@ -338,9 +338,9 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { if (used_byte != 0) { var bit_index: u3 = 0; while (true) : (bit_index += 1) { - const is_used = @truncate(u1, used_byte >> bit_index) != 0; + const is_used = @as(u1, @truncate(used_byte >> bit_index)) != 0; if (is_used) { - const slot_index = @intCast(SlotIndex, used_bits_byte * 8 + bit_index); + const slot_index = @as(SlotIndex, @intCast(used_bits_byte * 8 + bit_index)); const stack_trace = bucketStackTrace(bucket, size_class, slot_index, .alloc); const addr = bucket.page + slot_index * size_class; log.err("memory address 0x{x} leaked: {}", .{ @@ -361,7 +361,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { var leaks = false; for (self.buckets, 0..) |optional_bucket, bucket_i| { const first_bucket = optional_bucket orelse continue; - const size_class = @as(usize, 1) << @intCast(math.Log2Int(usize), bucket_i); + const size_class = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(bucket_i)); const used_bits_count = usedBitsCount(size_class); var bucket = first_bucket; while (true) { @@ -385,7 +385,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { fn freeBucket(self: *Self, bucket: *BucketHeader, size_class: usize) void { const bucket_size = bucketSize(size_class); - const bucket_slice = @ptrCast([*]align(@alignOf(BucketHeader)) u8, bucket)[0..bucket_size]; + const bucket_slice = @as([*]align(@alignOf(BucketHeader)) u8, @ptrCast(bucket))[0..bucket_size]; self.backing_allocator.free(bucket_slice); } @@ -444,7 +444,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { self.small_allocations.deinit(self.backing_allocator); } self.* = undefined; - return @enumFromInt(Check, @intFromBool(leaks)); + return @as(Check, @enumFromInt(@intFromBool(leaks))); } fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void { @@ -496,7 +496,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { bucket.alloc_cursor += 1; var used_bits_byte = bucket.usedBits(slot_index / 8); - const used_bit_index: u3 = @intCast(u3, slot_index % 8); // TODO cast should be unnecessary + const used_bit_index: u3 = @as(u3, @intCast(slot_index % 8)); // TODO cast should be unnecessary used_bits_byte.* |= (@as(u8, 1) << used_bit_index); bucket.used_count += 1; bucket.captureStackTrace(trace_addr, size_class, slot_index, .alloc); @@ -667,8 +667,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { new_size: usize, ret_addr: usize, ) bool { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); - const log2_old_align = @intCast(Allocator.Log2Align, log2_old_align_u8); + const self: *Self = @ptrCast(@alignCast(ctx)); + const log2_old_align = @as(Allocator.Log2Align, @intCast(log2_old_align_u8)); self.mutex.lock(); defer self.mutex.unlock(); @@ -704,11 +704,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { return self.resizeLarge(old_mem, log2_old_align, new_size, ret_addr); }; const byte_offset = @intFromPtr(old_mem.ptr) - @intFromPtr(bucket.page); - const slot_index = @intCast(SlotIndex, byte_offset / size_class); + const slot_index = @as(SlotIndex, @intCast(byte_offset / size_class)); const used_byte_index = slot_index / 8; - const used_bit_index = @intCast(u3, slot_index % 8); + const used_bit_index = @as(u3, @intCast(slot_index % 8)); const used_byte = bucket.usedBits(used_byte_index); - const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0; + const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0; if (!is_used) { if (config.safety) { reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free)); @@ -739,8 +739,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } if (log2_old_align != entry.value_ptr.log2_ptr_align) { log.err("Allocation alignment {d} does not match resize alignment {d}. Allocation: {} Resize: {}", .{ - @as(usize, 1) << @intCast(math.Log2Int(usize), entry.value_ptr.log2_ptr_align), - @as(usize, 1) << @intCast(math.Log2Int(usize), log2_old_align), + @as(usize, 1) << @as(math.Log2Int(usize), @intCast(entry.value_ptr.log2_ptr_align)), + @as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_old_align)), bucketStackTrace(bucket, size_class, slot_index, .alloc), free_stack_trace, }); @@ -786,8 +786,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { log2_old_align_u8: u8, ret_addr: usize, ) void { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); - const log2_old_align = @intCast(Allocator.Log2Align, log2_old_align_u8); + const self: *Self = @ptrCast(@alignCast(ctx)); + const log2_old_align = @as(Allocator.Log2Align, @intCast(log2_old_align_u8)); self.mutex.lock(); defer self.mutex.unlock(); @@ -825,11 +825,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { return; }; const byte_offset = @intFromPtr(old_mem.ptr) - @intFromPtr(bucket.page); - const slot_index = @intCast(SlotIndex, byte_offset / size_class); + const slot_index = @as(SlotIndex, @intCast(byte_offset / size_class)); const used_byte_index = slot_index / 8; - const used_bit_index = @intCast(u3, slot_index % 8); + const used_bit_index = @as(u3, @intCast(slot_index % 8)); const used_byte = bucket.usedBits(used_byte_index); - const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0; + const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0; if (!is_used) { if (config.safety) { reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free)); @@ -861,8 +861,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } if (log2_old_align != entry.value_ptr.log2_ptr_align) { log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {} Free: {}", .{ - @as(usize, 1) << @intCast(math.Log2Int(usize), entry.value_ptr.log2_ptr_align), - @as(usize, 1) << @intCast(math.Log2Int(usize), log2_old_align), + @as(usize, 1) << @as(math.Log2Int(usize), @intCast(entry.value_ptr.log2_ptr_align)), + @as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_old_align)), bucketStackTrace(bucket, size_class, slot_index, .alloc), free_stack_trace, }); @@ -896,7 +896,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } else { // move alloc_cursor to end so we can tell size_class later const slot_count = @divExact(page_size, size_class); - bucket.alloc_cursor = @truncate(SlotIndex, slot_count); + bucket.alloc_cursor = @as(SlotIndex, @truncate(slot_count)); if (self.empty_buckets) |prev_bucket| { // empty_buckets is ordered newest to oldest through prev so that if // config.never_unmap is false and backing_allocator reuses freed memory @@ -936,11 +936,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } fn alloc(ctx: *anyopaque, len: usize, log2_ptr_align: u8, ret_addr: usize) ?[*]u8 { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); self.mutex.lock(); defer self.mutex.unlock(); if (!self.isAllocationAllowed(len)) return null; - return allocInner(self, len, @intCast(Allocator.Log2Align, log2_ptr_align), ret_addr) catch return null; + return allocInner(self, len, @as(Allocator.Log2Align, @intCast(log2_ptr_align)), ret_addr) catch return null; } fn allocInner( @@ -949,7 +949,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { log2_ptr_align: Allocator.Log2Align, ret_addr: usize, ) Allocator.Error![*]u8 { - const new_aligned_size = @max(len, @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align)); + const new_aligned_size = @max(len, @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align))); if (new_aligned_size > largest_bucket_object_size) { try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1); const ptr = self.backing_allocator.rawAlloc(len, log2_ptr_align, ret_addr) orelse @@ -1002,7 +1002,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { const bucket_size = bucketSize(size_class); const bucket_bytes = try self.backing_allocator.alignedAlloc(u8, @alignOf(BucketHeader), bucket_size); - const ptr = @ptrCast(*BucketHeader, bucket_bytes.ptr); + const ptr = @as(*BucketHeader, @ptrCast(bucket_bytes.ptr)); ptr.* = BucketHeader{ .prev = ptr, .next = ptr, diff --git a/lib/std/heap/log_to_writer_allocator.zig b/lib/std/heap/log_to_writer_allocator.zig index b2d83c416b..b5c86c9beb 100644 --- a/lib/std/heap/log_to_writer_allocator.zig +++ b/lib/std/heap/log_to_writer_allocator.zig @@ -34,7 +34,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { log2_ptr_align: u8, ra: usize, ) ?[*]u8 { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); self.writer.print("alloc : {}", .{len}) catch {}; const result = self.parent_allocator.rawAlloc(len, log2_ptr_align, ra); if (result != null) { @@ -52,7 +52,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { new_len: usize, ra: usize, ) bool { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); if (new_len <= buf.len) { self.writer.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {}; } else { @@ -77,7 +77,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { log2_buf_align: u8, ra: usize, ) void { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); self.writer.print("free : {}\n", .{buf.len}) catch {}; self.parent_allocator.rawFree(buf, log2_buf_align, ra); } diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig index 0d32b5405e..6924a284e3 100644 --- a/lib/std/heap/logging_allocator.zig +++ b/lib/std/heap/logging_allocator.zig @@ -59,7 +59,7 @@ pub fn ScopedLoggingAllocator( log2_ptr_align: u8, ra: usize, ) ?[*]u8 { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); const result = self.parent_allocator.rawAlloc(len, log2_ptr_align, ra); if (result != null) { logHelper( @@ -84,7 +84,7 @@ pub fn ScopedLoggingAllocator( new_len: usize, ra: usize, ) bool { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); if (self.parent_allocator.rawResize(buf, log2_buf_align, new_len, ra)) { if (new_len <= buf.len) { logHelper( @@ -118,7 +118,7 @@ pub fn ScopedLoggingAllocator( log2_buf_align: u8, ra: usize, ) void { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); self.parent_allocator.rawFree(buf, log2_buf_align, ra); logHelper(success_log_level, "free - len: {}", .{buf.len}); } diff --git a/lib/std/heap/memory_pool.zig b/lib/std/heap/memory_pool.zig index 3fc7dfbfca..b56a15d006 100644 --- a/lib/std/heap/memory_pool.zig +++ b/lib/std/heap/memory_pool.zig @@ -70,7 +70,7 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type var i: usize = 0; while (i < initial_size) : (i += 1) { const raw_mem = try pool.allocNew(); - const free_node = @ptrCast(NodePtr, raw_mem); + const free_node = @as(NodePtr, @ptrCast(raw_mem)); free_node.* = Node{ .next = pool.free_list, }; @@ -106,11 +106,11 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type pool.free_list = item.next; break :blk item; } else if (pool_options.growable) - @ptrCast(NodePtr, try pool.allocNew()) + @as(NodePtr, @ptrCast(try pool.allocNew())) else return error.OutOfMemory; - const ptr = @ptrCast(ItemPtr, node); + const ptr = @as(ItemPtr, @ptrCast(node)); ptr.* = undefined; return ptr; } @@ -120,7 +120,7 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type pub fn destroy(pool: *Pool, ptr: ItemPtr) void { ptr.* = undefined; - const node = @ptrCast(NodePtr, ptr); + const node = @as(NodePtr, @ptrCast(ptr)); node.* = Node{ .next = pool.free_list, }; diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig index 942ff4904d..db576e72b2 100644 --- a/lib/std/http/Client.zig +++ b/lib/std/http/Client.zig @@ -187,7 +187,7 @@ pub const Connection = struct { const nread = try conn.rawReadAtLeast(conn.read_buf[0..], 1); if (nread == 0) return error.EndOfStream; conn.read_start = 0; - conn.read_end = @intCast(u16, nread); + conn.read_end = @as(u16, @intCast(nread)); } pub fn peek(conn: *Connection) []const u8 { @@ -208,8 +208,8 @@ pub const Connection = struct { if (available_read > available_buffer) { // partially read buffered data @memcpy(buffer[out_index..], conn.read_buf[conn.read_start..conn.read_end][0..available_buffer]); - out_index += @intCast(u16, available_buffer); - conn.read_start += @intCast(u16, available_buffer); + out_index += @as(u16, @intCast(available_buffer)); + conn.read_start += @as(u16, @intCast(available_buffer)); break; } else if (available_read > 0) { // fully read buffered data @@ -343,7 +343,7 @@ pub const Response = struct { else => return error.HttpHeadersInvalid, }; if (first_line[8] != ' ') return error.HttpHeadersInvalid; - const status = @enumFromInt(http.Status, parseInt3(first_line[9..12].*)); + const status = @as(http.Status, @enumFromInt(parseInt3(first_line[9..12].*))); const reason = mem.trimLeft(u8, first_line[12..], " "); res.version = version; @@ -415,7 +415,7 @@ pub const Response = struct { } inline fn int64(array: *const [8]u8) u64 { - return @bitCast(u64, array.*); + return @as(u64, @bitCast(array.*)); } fn parseInt3(nnn: @Vector(3, u8)) u10 { @@ -649,7 +649,7 @@ pub const Request = struct { try req.connection.?.data.fill(); const nchecked = try req.response.parser.checkCompleteHead(req.client.allocator, req.connection.?.data.peek()); - req.connection.?.data.drop(@intCast(u16, nchecked)); + req.connection.?.data.drop(@as(u16, @intCast(nchecked))); if (req.response.parser.state.isContent()) break; } @@ -768,7 +768,7 @@ pub const Request = struct { try req.connection.?.data.fill(); const nchecked = try req.response.parser.checkCompleteHead(req.client.allocator, req.connection.?.data.peek()); - req.connection.?.data.drop(@intCast(u16, nchecked)); + req.connection.?.data.drop(@as(u16, @intCast(nchecked))); } if (has_trail) { diff --git a/lib/std/http/Server.zig b/lib/std/http/Server.zig index fe57b5735d..8c8661ee21 100644 --- a/lib/std/http/Server.zig +++ b/lib/std/http/Server.zig @@ -46,7 +46,7 @@ pub const Connection = struct { const nread = try conn.rawReadAtLeast(conn.read_buf[0..], 1); if (nread == 0) return error.EndOfStream; conn.read_start = 0; - conn.read_end = @intCast(u16, nread); + conn.read_end = @as(u16, @intCast(nread)); } pub fn peek(conn: *Connection) []const u8 { @@ -67,8 +67,8 @@ pub const Connection = struct { if (available_read > available_buffer) { // partially read buffered data @memcpy(buffer[out_index..], conn.read_buf[conn.read_start..conn.read_end][0..available_buffer]); - out_index += @intCast(u16, available_buffer); - conn.read_start += @intCast(u16, available_buffer); + out_index += @as(u16, @intCast(available_buffer)); + conn.read_start += @as(u16, @intCast(available_buffer)); break; } else if (available_read > 0) { // fully read buffered data @@ -268,7 +268,7 @@ pub const Request = struct { } inline fn int64(array: *const [8]u8) u64 { - return @bitCast(u64, array.*); + return @as(u64, @bitCast(array.*)); } method: http.Method, @@ -493,7 +493,7 @@ pub const Response = struct { try res.connection.fill(); const nchecked = try res.request.parser.checkCompleteHead(res.allocator, res.connection.peek()); - res.connection.drop(@intCast(u16, nchecked)); + res.connection.drop(@as(u16, @intCast(nchecked))); if (res.request.parser.state.isContent()) break; } @@ -560,7 +560,7 @@ pub const Response = struct { try res.connection.fill(); const nchecked = try res.request.parser.checkCompleteHead(res.allocator, res.connection.peek()); - res.connection.drop(@intCast(u16, nchecked)); + res.connection.drop(@as(u16, @intCast(nchecked))); } if (has_trail) { diff --git a/lib/std/http/protocol.zig b/lib/std/http/protocol.zig index 6bafb08483..604267bf16 100644 --- a/lib/std/http/protocol.zig +++ b/lib/std/http/protocol.zig @@ -83,7 +83,7 @@ pub const HeadersParser = struct { /// first byte of content is located at `bytes[result]`. pub fn findHeadersEnd(r: *HeadersParser, bytes: []const u8) u32 { const vector_len: comptime_int = comptime @max(std.simd.suggestVectorSize(u8) orelse 1, 8); - const len = @intCast(u32, bytes.len); + const len = @as(u32, @intCast(bytes.len)); var index: u32 = 0; while (true) { @@ -182,8 +182,8 @@ pub const HeadersParser = struct { const chunk = bytes[index..][0..vector_len]; const v: Vector = chunk.*; - const matches_r = @bitCast(BitVector, v == @splat(vector_len, @as(u8, '\r'))); - const matches_n = @bitCast(BitVector, v == @splat(vector_len, @as(u8, '\n'))); + const matches_r = @as(BitVector, @bitCast(v == @splat(vector_len, @as(u8, '\r')))); + const matches_n = @as(BitVector, @bitCast(v == @splat(vector_len, @as(u8, '\n')))); const matches_or: SizeVector = matches_r | matches_n; const matches = @reduce(.Add, matches_or); @@ -234,7 +234,7 @@ pub const HeadersParser = struct { }, 4...vector_len => { inline for (0..vector_len - 3) |i_usize| { - const i = @truncate(u32, i_usize); + const i = @as(u32, @truncate(i_usize)); const b32 = int32(chunk[i..][0..4]); const b16 = intShift(u16, b32); @@ -405,10 +405,10 @@ pub const HeadersParser = struct { /// If the amount returned is less than `bytes.len`, you may assume that the parser is in the `chunk_data` state /// and that the first byte of the chunk is at `bytes[result]`. pub fn findChunkedLen(r: *HeadersParser, bytes: []const u8) u32 { - const len = @intCast(u32, bytes.len); + const len = @as(u32, @intCast(bytes.len)); for (bytes[0..], 0..) |c, i| { - const index = @intCast(u32, i); + const index = @as(u32, @intCast(i)); switch (r.state) { .chunk_data_suffix => switch (c) { '\r' => r.state = .chunk_data_suffix_r, @@ -529,7 +529,7 @@ pub const HeadersParser = struct { try conn.fill(); const nread = @min(conn.peek().len, data_avail); - conn.drop(@intCast(u16, nread)); + conn.drop(@as(u16, @intCast(nread))); r.next_chunk_length -= nread; if (r.next_chunk_length == 0) r.done = true; @@ -538,7 +538,7 @@ pub const HeadersParser = struct { } else { const out_avail = buffer.len; - const can_read = @intCast(usize, @min(data_avail, out_avail)); + const can_read = @as(usize, @intCast(@min(data_avail, out_avail))); const nread = try conn.read(buffer[0..can_read]); r.next_chunk_length -= nread; @@ -551,7 +551,7 @@ pub const HeadersParser = struct { try conn.fill(); const i = r.findChunkedLen(conn.peek()); - conn.drop(@intCast(u16, i)); + conn.drop(@as(u16, @intCast(i))); switch (r.state) { .invalid => return error.HttpChunkInvalid, @@ -579,10 +579,10 @@ pub const HeadersParser = struct { try conn.fill(); const nread = @min(conn.peek().len, data_avail); - conn.drop(@intCast(u16, nread)); + conn.drop(@as(u16, @intCast(nread))); r.next_chunk_length -= nread; } else if (out_avail > 0) { - const can_read = @intCast(usize, @min(data_avail, out_avail)); + const can_read: usize = @intCast(@min(data_avail, out_avail)); const nread = try conn.read(buffer[out_index..][0..can_read]); r.next_chunk_length -= nread; out_index += nread; @@ -601,21 +601,21 @@ pub const HeadersParser = struct { }; inline fn int16(array: *const [2]u8) u16 { - return @bitCast(u16, array.*); + return @as(u16, @bitCast(array.*)); } inline fn int24(array: *const [3]u8) u24 { - return @bitCast(u24, array.*); + return @as(u24, @bitCast(array.*)); } inline fn int32(array: *const [4]u8) u32 { - return @bitCast(u32, array.*); + return @as(u32, @bitCast(array.*)); } inline fn intShift(comptime T: type, x: anytype) T { switch (@import("builtin").cpu.arch.endian()) { - .Little => return @truncate(T, x >> (@bitSizeOf(@TypeOf(x)) - @bitSizeOf(T))), - .Big => return @truncate(T, x), + .Little => return @as(T, @truncate(x >> (@bitSizeOf(@TypeOf(x)) - @bitSizeOf(T)))), + .Big => return @as(T, @truncate(x)), } } @@ -634,7 +634,7 @@ const MockBufferedConnection = struct { const nread = try conn.conn.read(conn.buf[0..]); if (nread == 0) return error.EndOfStream; conn.start = 0; - conn.end = @truncate(u16, nread); + conn.end = @as(u16, @truncate(nread)); } pub fn peek(conn: *MockBufferedConnection) []const u8 { @@ -652,7 +652,7 @@ const MockBufferedConnection = struct { const left = buffer.len - out_index; if (available > 0) { - const can_read = @truncate(u16, @min(available, left)); + const can_read = @as(u16, @truncate(@min(available, left))); @memcpy(buffer[out_index..][0..can_read], conn.buf[conn.start..][0..can_read]); out_index += can_read; @@ -705,8 +705,8 @@ test "HeadersParser.findHeadersEnd" { for (0..36) |i| { r = HeadersParser.initDynamic(0); - try std.testing.expectEqual(@intCast(u32, i), r.findHeadersEnd(data[0..i])); - try std.testing.expectEqual(@intCast(u32, 35 - i), r.findHeadersEnd(data[i..])); + try std.testing.expectEqual(@as(u32, @intCast(i)), r.findHeadersEnd(data[0..i])); + try std.testing.expectEqual(@as(u32, @intCast(35 - i)), r.findHeadersEnd(data[i..])); } } @@ -761,7 +761,7 @@ test "HeadersParser.read length" { try conn.fill(); const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek()); - conn.drop(@intCast(u16, nchecked)); + conn.drop(@as(u16, @intCast(nchecked))); if (r.state.isContent()) break; } @@ -792,7 +792,7 @@ test "HeadersParser.read chunked" { try conn.fill(); const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek()); - conn.drop(@intCast(u16, nchecked)); + conn.drop(@as(u16, @intCast(nchecked))); if (r.state.isContent()) break; } @@ -822,7 +822,7 @@ test "HeadersParser.read chunked trailer" { try conn.fill(); const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek()); - conn.drop(@intCast(u16, nchecked)); + conn.drop(@as(u16, @intCast(nchecked))); if (r.state.isContent()) break; } @@ -837,7 +837,7 @@ test "HeadersParser.read chunked trailer" { try conn.fill(); const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek()); - conn.drop(@intCast(u16, nchecked)); + conn.drop(@as(u16, @intCast(nchecked))); if (r.state.isContent()) break; } diff --git a/lib/std/io.zig b/lib/std/io.zig index f2804a3107..e7a4476c0f 100644 --- a/lib/std/io.zig +++ b/lib/std/io.zig @@ -275,7 +275,7 @@ pub fn Poller(comptime StreamEnum: type) type { )) { .pending => { self.windows.active.handles_buf[self.windows.active.count] = handle; - self.windows.active.stream_map[self.windows.active.count] = @enumFromInt(StreamEnum, i); + self.windows.active.stream_map[self.windows.active.count] = @as(StreamEnum, @enumFromInt(i)); self.windows.active.count += 1; }, .closed => {}, // don't add to the wait_objects list diff --git a/lib/std/io/bit_reader.zig b/lib/std/io/bit_reader.zig index 4bdb0b9194..7ea2ff5009 100644 --- a/lib/std/io/bit_reader.zig +++ b/lib/std/io/bit_reader.zig @@ -60,7 +60,7 @@ pub fn BitReader(comptime endian: std.builtin.Endian, comptime ReaderType: type) var out_buffer = @as(Buf, 0); if (self.bit_count > 0) { - const n = if (self.bit_count >= bits) @intCast(u3, bits) else self.bit_count; + const n = if (self.bit_count >= bits) @as(u3, @intCast(bits)) else self.bit_count; const shift = u7_bit_count - n; switch (endian) { .Big => { @@ -88,45 +88,45 @@ pub fn BitReader(comptime endian: std.builtin.Endian, comptime ReaderType: type) while (out_bits.* < bits) { const n = bits - out_bits.*; const next_byte = self.forward_reader.readByte() catch |err| switch (err) { - error.EndOfStream => return @intCast(U, out_buffer), + error.EndOfStream => return @as(U, @intCast(out_buffer)), else => |e| return e, }; switch (endian) { .Big => { if (n >= u8_bit_count) { - out_buffer <<= @intCast(u3, u8_bit_count - 1); + out_buffer <<= @as(u3, @intCast(u8_bit_count - 1)); out_buffer <<= 1; out_buffer |= @as(Buf, next_byte); out_bits.* += u8_bit_count; continue; } - const shift = @intCast(u3, u8_bit_count - n); - out_buffer <<= @intCast(BufShift, n); + const shift = @as(u3, @intCast(u8_bit_count - n)); + out_buffer <<= @as(BufShift, @intCast(n)); out_buffer |= @as(Buf, next_byte >> shift); out_bits.* += n; - self.bit_buffer = @truncate(u7, next_byte << @intCast(u3, n - 1)); + self.bit_buffer = @as(u7, @truncate(next_byte << @as(u3, @intCast(n - 1)))); self.bit_count = shift; }, .Little => { if (n >= u8_bit_count) { - out_buffer |= @as(Buf, next_byte) << @intCast(BufShift, out_bits.*); + out_buffer |= @as(Buf, next_byte) << @as(BufShift, @intCast(out_bits.*)); out_bits.* += u8_bit_count; continue; } - const shift = @intCast(u3, u8_bit_count - n); + const shift = @as(u3, @intCast(u8_bit_count - n)); const value = (next_byte << shift) >> shift; - out_buffer |= @as(Buf, value) << @intCast(BufShift, out_bits.*); + out_buffer |= @as(Buf, value) << @as(BufShift, @intCast(out_bits.*)); out_bits.* += n; - self.bit_buffer = @truncate(u7, next_byte >> @intCast(u3, n)); + self.bit_buffer = @as(u7, @truncate(next_byte >> @as(u3, @intCast(n)))); self.bit_count = shift; }, } } - return @intCast(U, out_buffer); + return @as(U, @intCast(out_buffer)); } pub fn alignToByte(self: *Self) void { diff --git a/lib/std/io/bit_writer.zig b/lib/std/io/bit_writer.zig index 0be2e7ab08..ef8f007264 100644 --- a/lib/std/io/bit_writer.zig +++ b/lib/std/io/bit_writer.zig @@ -47,27 +47,27 @@ pub fn BitWriter(comptime endian: std.builtin.Endian, comptime WriterType: type) const Buf = std.meta.Int(.unsigned, buf_bit_count); const BufShift = math.Log2Int(Buf); - const buf_value = @intCast(Buf, value); + const buf_value = @as(Buf, @intCast(value)); - const high_byte_shift = @intCast(BufShift, buf_bit_count - u8_bit_count); + const high_byte_shift = @as(BufShift, @intCast(buf_bit_count - u8_bit_count)); var in_buffer = switch (endian) { - .Big => buf_value << @intCast(BufShift, buf_bit_count - bits), + .Big => buf_value << @as(BufShift, @intCast(buf_bit_count - bits)), .Little => buf_value, }; var in_bits = bits; if (self.bit_count > 0) { const bits_remaining = u8_bit_count - self.bit_count; - const n = @intCast(u3, if (bits_remaining > bits) bits else bits_remaining); + const n = @as(u3, @intCast(if (bits_remaining > bits) bits else bits_remaining)); switch (endian) { .Big => { - const shift = @intCast(BufShift, high_byte_shift + self.bit_count); - const v = @intCast(u8, in_buffer >> shift); + const shift = @as(BufShift, @intCast(high_byte_shift + self.bit_count)); + const v = @as(u8, @intCast(in_buffer >> shift)); self.bit_buffer |= v; in_buffer <<= n; }, .Little => { - const v = @truncate(u8, in_buffer) << @intCast(u3, self.bit_count); + const v = @as(u8, @truncate(in_buffer)) << @as(u3, @intCast(self.bit_count)); self.bit_buffer |= v; in_buffer >>= n; }, @@ -87,15 +87,15 @@ pub fn BitWriter(comptime endian: std.builtin.Endian, comptime WriterType: type) while (in_bits >= u8_bit_count) { switch (endian) { .Big => { - const v = @intCast(u8, in_buffer >> high_byte_shift); + const v = @as(u8, @intCast(in_buffer >> high_byte_shift)); try self.forward_writer.writeByte(v); - in_buffer <<= @intCast(u3, u8_bit_count - 1); + in_buffer <<= @as(u3, @intCast(u8_bit_count - 1)); in_buffer <<= 1; }, .Little => { - const v = @truncate(u8, in_buffer); + const v = @as(u8, @truncate(in_buffer)); try self.forward_writer.writeByte(v); - in_buffer >>= @intCast(u3, u8_bit_count - 1); + in_buffer >>= @as(u3, @intCast(u8_bit_count - 1)); in_buffer >>= 1; }, } @@ -103,10 +103,10 @@ pub fn BitWriter(comptime endian: std.builtin.Endian, comptime WriterType: type) } if (in_bits > 0) { - self.bit_count = @intCast(u4, in_bits); + self.bit_count = @as(u4, @intCast(in_bits)); self.bit_buffer = switch (endian) { - .Big => @truncate(u8, in_buffer >> high_byte_shift), - .Little => @truncate(u8, in_buffer), + .Big => @as(u8, @truncate(in_buffer >> high_byte_shift)), + .Little => @as(u8, @truncate(in_buffer)), }; } } diff --git a/lib/std/io/c_writer.zig b/lib/std/io/c_writer.zig index 62c73d3714..ee87a28dc6 100644 --- a/lib/std/io/c_writer.zig +++ b/lib/std/io/c_writer.zig @@ -13,7 +13,7 @@ pub fn cWriter(c_file: *std.c.FILE) CWriter { fn cWriterWrite(c_file: *std.c.FILE, bytes: []const u8) std.fs.File.WriteError!usize { const amt_written = std.c.fwrite(bytes.ptr, 1, bytes.len, c_file); if (amt_written >= 0) return amt_written; - switch (@enumFromInt(os.E, std.c._errno().*)) { + switch (@as(os.E, @enumFromInt(std.c._errno().*))) { .SUCCESS => unreachable, .INVAL => unreachable, .FAULT => unreachable, diff --git a/lib/std/io/reader.zig b/lib/std/io/reader.zig index abdca56d3c..4dde51838b 100644 --- a/lib/std/io/reader.zig +++ b/lib/std/io/reader.zig @@ -246,7 +246,7 @@ pub fn Reader( /// Same as `readByte` except the returned byte is signed. pub fn readByteSigned(self: Self) (Error || error{EndOfStream})!i8 { - return @bitCast(i8, try self.readByte()); + return @as(i8, @bitCast(try self.readByte())); } /// Reads exactly `num_bytes` bytes and returns as an array. diff --git a/lib/std/json/scanner.zig b/lib/std/json/scanner.zig index 4fb7c1da01..274faba2ff 100644 --- a/lib/std/json/scanner.zig +++ b/lib/std/json/scanner.zig @@ -193,7 +193,7 @@ pub const TokenType = enum { /// to get meaningful information from this. pub const Diagnostics = struct { line_number: u64 = 1, - line_start_cursor: usize = @bitCast(usize, @as(isize, -1)), // Start just "before" the input buffer to get a 1-based column for line 1. + line_start_cursor: usize = @as(usize, @bitCast(@as(isize, -1))), // Start just "before" the input buffer to get a 1-based column for line 1. total_bytes_before_current_input: u64 = 0, cursor_pointer: *const usize = undefined, @@ -1719,7 +1719,7 @@ const BitStack = struct { pub fn push(self: *@This(), b: u1) Allocator.Error!void { const byte_index = self.bit_len >> 3; - const bit_index = @intCast(u3, self.bit_len & 7); + const bit_index = @as(u3, @intCast(self.bit_len & 7)); if (self.bytes.items.len <= byte_index) { try self.bytes.append(0); @@ -1733,8 +1733,8 @@ const BitStack = struct { pub fn peek(self: *const @This()) u1 { const byte_index = (self.bit_len - 1) >> 3; - const bit_index = @intCast(u3, (self.bit_len - 1) & 7); - return @intCast(u1, (self.bytes.items[byte_index] >> bit_index) & 1); + const bit_index = @as(u3, @intCast((self.bit_len - 1) & 7)); + return @as(u1, @intCast((self.bytes.items[byte_index] >> bit_index) & 1)); } pub fn pop(self: *@This()) u1 { diff --git a/lib/std/json/static.zig b/lib/std/json/static.zig index fd3d12d73a..f1926660f3 100644 --- a/lib/std/json/static.zig +++ b/lib/std/json/static.zig @@ -442,7 +442,7 @@ fn internalParse( } if (ptrInfo.sentinel) |some| { - const sentinel_value = @ptrCast(*align(1) const ptrInfo.child, some).*; + const sentinel_value = @as(*align(1) const ptrInfo.child, @ptrCast(some)).*; return try arraylist.toOwnedSliceSentinel(sentinel_value); } @@ -456,7 +456,7 @@ fn internalParse( // Use our own array list so we can append the sentinel. var value_list = ArrayList(u8).init(allocator); _ = try source.allocNextIntoArrayList(&value_list, .alloc_always); - return try value_list.toOwnedSliceSentinel(@ptrCast(*const u8, sentinel_ptr).*); + return try value_list.toOwnedSliceSentinel(@as(*const u8, @ptrCast(sentinel_ptr)).*); } if (ptrInfo.is_const) { switch (try source.nextAllocMax(allocator, .alloc_if_needed, options.max_value_len.?)) { @@ -518,8 +518,8 @@ fn internalParseFromValue( }, .Float, .ComptimeFloat => { switch (source) { - .float => |f| return @floatCast(T, f), - .integer => |i| return @floatFromInt(T, i), + .float => |f| return @as(T, @floatCast(f)), + .integer => |i| return @as(T, @floatFromInt(i)), .number_string, .string => |s| return std.fmt.parseFloat(T, s), else => return error.UnexpectedToken, } @@ -530,12 +530,12 @@ fn internalParseFromValue( if (@round(f) != f) return error.InvalidNumber; if (f > std.math.maxInt(T)) return error.Overflow; if (f < std.math.minInt(T)) return error.Overflow; - return @intFromFloat(T, f); + return @as(T, @intFromFloat(f)); }, .integer => |i| { if (i > std.math.maxInt(T)) return error.Overflow; if (i < std.math.minInt(T)) return error.Overflow; - return @intCast(T, i); + return @as(T, @intCast(i)); }, .number_string, .string => |s| { return sliceToInt(T, s); @@ -686,7 +686,7 @@ fn internalParseFromValue( switch (source) { .array => |array| { const r = if (ptrInfo.sentinel) |sentinel_ptr| - try allocator.allocSentinel(ptrInfo.child, array.items.len, @ptrCast(*align(1) const ptrInfo.child, sentinel_ptr).*) + try allocator.allocSentinel(ptrInfo.child, array.items.len, @as(*align(1) const ptrInfo.child, @ptrCast(sentinel_ptr)).*) else try allocator.alloc(ptrInfo.child, array.items.len); @@ -701,7 +701,7 @@ fn internalParseFromValue( // Dynamic length string. const r = if (ptrInfo.sentinel) |sentinel_ptr| - try allocator.allocSentinel(ptrInfo.child, s.len, @ptrCast(*align(1) const ptrInfo.child, sentinel_ptr).*) + try allocator.allocSentinel(ptrInfo.child, s.len, @as(*align(1) const ptrInfo.child, @ptrCast(sentinel_ptr)).*) else try allocator.alloc(ptrInfo.child, s.len); @memcpy(r[0..], s); @@ -743,7 +743,7 @@ fn sliceToInt(comptime T: type, slice: []const u8) !T { const float = try std.fmt.parseFloat(f128, slice); if (@round(float) != float) return error.InvalidNumber; if (float > std.math.maxInt(T) or float < std.math.minInt(T)) return error.Overflow; - return @intCast(T, @intFromFloat(i128, float)); + return @as(T, @intCast(@as(i128, @intFromFloat(float)))); } fn sliceToEnum(comptime T: type, slice: []const u8) !T { @@ -759,7 +759,7 @@ fn fillDefaultStructValues(comptime T: type, r: *T, fields_seen: *[@typeInfo(T). inline for (@typeInfo(T).Struct.fields, 0..) |field, i| { if (!fields_seen[i]) { if (field.default_value) |default_ptr| { - const default = @ptrCast(*align(1) const field.type, default_ptr).*; + const default = @as(*align(1) const field.type, @ptrCast(default_ptr)).*; @field(r, field.name) = default; } else { return error.MissingField; diff --git a/lib/std/json/stringify.zig b/lib/std/json/stringify.zig index 6d10e95330..5de5db54b9 100644 --- a/lib/std/json/stringify.zig +++ b/lib/std/json/stringify.zig @@ -78,8 +78,8 @@ fn outputUnicodeEscape( assert(codepoint <= 0x10FFFF); // To escape an extended character that is not in the Basic Multilingual Plane, // the character is represented as a 12-character sequence, encoding the UTF-16 surrogate pair. - const high = @intCast(u16, (codepoint - 0x10000) >> 10) + 0xD800; - const low = @intCast(u16, codepoint & 0x3FF) + 0xDC00; + const high = @as(u16, @intCast((codepoint - 0x10000) >> 10)) + 0xD800; + const low = @as(u16, @intCast(codepoint & 0x3FF)) + 0xDC00; try out_stream.writeAll("\\u"); try std.fmt.formatIntValue(high, "x", std.fmt.FormatOptions{ .width = 4, .fill = '0' }, out_stream); try out_stream.writeAll("\\u"); diff --git a/lib/std/json/write_stream.zig b/lib/std/json/write_stream.zig index 760bad13fd..3a2750f5a1 100644 --- a/lib/std/json/write_stream.zig +++ b/lib/std/json/write_stream.zig @@ -176,8 +176,8 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type { .ComptimeInt => { return self.emitNumber(@as(std.math.IntFittingRange(value, value), value)); }, - .Float, .ComptimeFloat => if (@floatCast(f64, value) == value) { - try self.stream.print("{}", .{@floatCast(f64, value)}); + .Float, .ComptimeFloat => if (@as(f64, @floatCast(value)) == value) { + try self.stream.print("{}", .{@as(f64, @floatCast(value))}); self.popState(); return; }, @@ -294,7 +294,7 @@ test "json write stream" { fn getJsonObject(allocator: std.mem.Allocator) !Value { var value = Value{ .object = ObjectMap.init(allocator) }; - try value.object.put("one", Value{ .integer = @intCast(i64, 1) }); + try value.object.put("one", Value{ .integer = @as(i64, @intCast(1)) }); try value.object.put("two", Value{ .float = 2.0 }); return value; } diff --git a/lib/std/leb128.zig b/lib/std/leb128.zig index 859d753a6a..33555caec5 100644 --- a/lib/std/leb128.zig +++ b/lib/std/leb128.zig @@ -30,17 +30,17 @@ pub fn readULEB128(comptime T: type, reader: anytype) !T { if (value > std.math.maxInt(T)) return error.Overflow; } - return @truncate(T, value); + return @as(T, @truncate(value)); } /// Write a single unsigned integer as unsigned LEB128 to the given writer. pub fn writeULEB128(writer: anytype, uint_value: anytype) !void { const T = @TypeOf(uint_value); const U = if (@typeInfo(T).Int.bits < 8) u8 else T; - var value = @intCast(U, uint_value); + var value = @as(U, @intCast(uint_value)); while (true) { - const byte = @truncate(u8, value & 0x7f); + const byte = @as(u8, @truncate(value & 0x7f)); value >>= 7; if (value == 0) { try writer.writeByte(byte); @@ -71,18 +71,18 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T { if (ov[1] != 0) { // Overflow is ok so long as the sign bit is set and this is the last byte if (byte & 0x80 != 0) return error.Overflow; - if (@bitCast(S, ov[0]) >= 0) return error.Overflow; + if (@as(S, @bitCast(ov[0])) >= 0) return error.Overflow; // and all the overflowed bits are 1 - const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift)); - const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift; + const remaining_shift = @as(u3, @intCast(@typeInfo(U).Int.bits - @as(u16, shift))); + const remaining_bits = @as(i8, @bitCast(byte | 0x80)) >> remaining_shift; if (remaining_bits != -1) return error.Overflow; } else { // If we don't overflow and this is the last byte and the number being decoded // is negative, check that the remaining bits are 1 - if ((byte & 0x80 == 0) and (@bitCast(S, ov[0]) < 0)) { - const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift)); - const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift; + if ((byte & 0x80 == 0) and (@as(S, @bitCast(ov[0])) < 0)) { + const remaining_shift = @as(u3, @intCast(@typeInfo(U).Int.bits - @as(u16, shift))); + const remaining_bits = @as(i8, @bitCast(byte | 0x80)) >> remaining_shift; if (remaining_bits != -1) return error.Overflow; } } @@ -92,7 +92,7 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T { const needs_sign_ext = group + 1 < max_group; if (byte & 0x40 != 0 and needs_sign_ext) { const ones = @as(S, -1); - value |= @bitCast(U, ones) << (shift + 7); + value |= @as(U, @bitCast(ones)) << (shift + 7); } break; } @@ -100,13 +100,13 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T { return error.Overflow; } - const result = @bitCast(S, value); + const result = @as(S, @bitCast(value)); // Only applies if we extended to i8 if (S != T) { if (result > std.math.maxInt(T) or result < std.math.minInt(T)) return error.Overflow; } - return @truncate(T, result); + return @as(T, @truncate(result)); } /// Write a single signed integer as signed LEB128 to the given writer. @@ -115,11 +115,11 @@ pub fn writeILEB128(writer: anytype, int_value: anytype) !void { const S = if (@typeInfo(T).Int.bits < 8) i8 else T; const U = std.meta.Int(.unsigned, @typeInfo(S).Int.bits); - var value = @intCast(S, int_value); + var value = @as(S, @intCast(int_value)); while (true) { - const uvalue = @bitCast(U, value); - const byte = @truncate(u8, uvalue); + const uvalue = @as(U, @bitCast(value)); + const byte = @as(u8, @truncate(uvalue)); value >>= 6; if (value == -1 or value == 0) { try writer.writeByte(byte & 0x7F); @@ -141,15 +141,15 @@ pub fn writeILEB128(writer: anytype, int_value: anytype) !void { pub fn writeUnsignedFixed(comptime l: usize, ptr: *[l]u8, int: std.meta.Int(.unsigned, l * 7)) void { const T = @TypeOf(int); const U = if (@typeInfo(T).Int.bits < 8) u8 else T; - var value = @intCast(U, int); + var value = @as(U, @intCast(int)); comptime var i = 0; inline while (i < (l - 1)) : (i += 1) { - const byte = @truncate(u8, value) | 0b1000_0000; + const byte = @as(u8, @truncate(value)) | 0b1000_0000; value >>= 7; ptr[i] = byte; } - ptr[i] = @truncate(u8, value); + ptr[i] = @as(u8, @truncate(value)); } test "writeUnsignedFixed" { @@ -245,7 +245,7 @@ test "deserialize signed LEB128" { try testing.expect((try test_read_ileb128(i16, "\xff\xff\x7f")) == -1); try testing.expect((try test_read_ileb128(i32, "\xff\xff\xff\xff\x7f")) == -1); try testing.expect((try test_read_ileb128(i32, "\x80\x80\x80\x80\x78")) == -0x80000000); - try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == @bitCast(i64, @intCast(u64, 0x8000000000000000))); + try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == @as(i64, @bitCast(@as(u64, @intCast(0x8000000000000000))))); try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x40")) == -0x4000000000000000); try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == -0x8000000000000000); @@ -356,7 +356,7 @@ test "serialize unsigned LEB128" { const max = std.math.maxInt(T); var i = @as(std.meta.Int(.unsigned, @typeInfo(T).Int.bits + 1), min); - while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i)); + while (i <= max) : (i += 1) try test_write_leb128(@as(T, @intCast(i))); } } @@ -374,6 +374,6 @@ test "serialize signed LEB128" { const max = std.math.maxInt(T); var i = @as(std.meta.Int(.signed, @typeInfo(T).Int.bits + 1), min); - while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i)); + while (i <= max) : (i += 1) try test_write_leb128(@as(T, @intCast(i))); } } diff --git a/lib/std/macho.zig b/lib/std/macho.zig index 8bddd67023..1b886e2d90 100644 --- a/lib/std/macho.zig +++ b/lib/std/macho.zig @@ -787,7 +787,7 @@ pub const section_64 = extern struct { } pub fn @"type"(sect: section_64) u8 { - return @truncate(u8, sect.flags & 0xff); + return @as(u8, @truncate(sect.flags & 0xff)); } pub fn attrs(sect: section_64) u32 { @@ -1870,7 +1870,7 @@ pub const LoadCommandIterator = struct { pub fn cast(lc: LoadCommand, comptime Cmd: type) ?Cmd { if (lc.data.len < @sizeOf(Cmd)) return null; - return @ptrCast(*const Cmd, @alignCast(@alignOf(Cmd), &lc.data[0])).*; + return @as(*const Cmd, @ptrCast(@alignCast(&lc.data[0]))).*; } /// Asserts LoadCommand is of type segment_command_64. @@ -1878,9 +1878,9 @@ pub const LoadCommandIterator = struct { const segment_lc = lc.cast(segment_command_64).?; if (segment_lc.nsects == 0) return &[0]section_64{}; const data = lc.data[@sizeOf(segment_command_64)..]; - const sections = @ptrCast( + const sections = @as( [*]const section_64, - @alignCast(@alignOf(section_64), &data[0]), + @ptrCast(@alignCast(&data[0])), )[0..segment_lc.nsects]; return sections; } @@ -1903,16 +1903,16 @@ pub const LoadCommandIterator = struct { pub fn next(it: *LoadCommandIterator) ?LoadCommand { if (it.index >= it.ncmds) return null; - const hdr = @ptrCast( + const hdr = @as( *const load_command, - @alignCast(@alignOf(load_command), &it.buffer[0]), + @ptrCast(@alignCast(&it.buffer[0])), ).*; const cmd = LoadCommand{ .hdr = hdr, .data = it.buffer[0..hdr.cmdsize], }; - it.buffer = @alignCast(@alignOf(u64), it.buffer[hdr.cmdsize..]); + it.buffer = @alignCast(it.buffer[hdr.cmdsize..]); it.index += 1; return cmd; diff --git a/lib/std/math.zig b/lib/std/math.zig index c7d354f787..2a6c24bcb4 100644 --- a/lib/std/math.zig +++ b/lib/std/math.zig @@ -85,31 +85,31 @@ pub const inf_f128 = @compileError("Deprecated: use `inf(f128)` instead"); pub const epsilon = @compileError("Deprecated: use `floatEps` instead"); pub const nan_u16 = @as(u16, 0x7C01); -pub const nan_f16 = @bitCast(f16, nan_u16); +pub const nan_f16 = @as(f16, @bitCast(nan_u16)); pub const qnan_u16 = @as(u16, 0x7E00); -pub const qnan_f16 = @bitCast(f16, qnan_u16); +pub const qnan_f16 = @as(f16, @bitCast(qnan_u16)); pub const nan_u32 = @as(u32, 0x7F800001); -pub const nan_f32 = @bitCast(f32, nan_u32); +pub const nan_f32 = @as(f32, @bitCast(nan_u32)); pub const qnan_u32 = @as(u32, 0x7FC00000); -pub const qnan_f32 = @bitCast(f32, qnan_u32); +pub const qnan_f32 = @as(f32, @bitCast(qnan_u32)); pub const nan_u64 = @as(u64, 0x7FF << 52) | 1; -pub const nan_f64 = @bitCast(f64, nan_u64); +pub const nan_f64 = @as(f64, @bitCast(nan_u64)); pub const qnan_u64 = @as(u64, 0x7ff8000000000000); -pub const qnan_f64 = @bitCast(f64, qnan_u64); +pub const qnan_f64 = @as(f64, @bitCast(qnan_u64)); pub const nan_f80 = make_f80(F80{ .fraction = 0xA000000000000000, .exp = 0x7fff }); pub const qnan_f80 = make_f80(F80{ .fraction = 0xC000000000000000, .exp = 0x7fff }); pub const nan_u128 = @as(u128, 0x7fff0000000000000000000000000001); -pub const nan_f128 = @bitCast(f128, nan_u128); +pub const nan_f128 = @as(f128, @bitCast(nan_u128)); pub const qnan_u128 = @as(u128, 0x7fff8000000000000000000000000000); -pub const qnan_f128 = @bitCast(f128, qnan_u128); +pub const qnan_f128 = @as(f128, @bitCast(qnan_u128)); pub const nan = @import("math/nan.zig").nan; pub const snan = @import("math/nan.zig").snan; @@ -508,10 +508,10 @@ pub fn shl(comptime T: type, a: T, shift_amt: anytype) T { const C = @typeInfo(T).Vector.child; const len = @typeInfo(T).Vector.len; if (abs_shift_amt >= @typeInfo(C).Int.bits) return @splat(len, @as(C, 0)); - break :blk @splat(len, @intCast(Log2Int(C), abs_shift_amt)); + break :blk @splat(len, @as(Log2Int(C), @intCast(abs_shift_amt))); } else { if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0; - break :blk @intCast(Log2Int(T), abs_shift_amt); + break :blk @as(Log2Int(T), @intCast(abs_shift_amt)); } }; @@ -552,10 +552,10 @@ pub fn shr(comptime T: type, a: T, shift_amt: anytype) T { const C = @typeInfo(T).Vector.child; const len = @typeInfo(T).Vector.len; if (abs_shift_amt >= @typeInfo(C).Int.bits) return @splat(len, @as(C, 0)); - break :blk @splat(len, @intCast(Log2Int(C), abs_shift_amt)); + break :blk @splat(len, @as(Log2Int(C), @intCast(abs_shift_amt))); } else { if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0; - break :blk @intCast(Log2Int(T), abs_shift_amt); + break :blk @as(Log2Int(T), @intCast(abs_shift_amt)); } }; @@ -596,7 +596,7 @@ pub fn rotr(comptime T: type, x: T, r: anytype) T { if (@typeInfo(C).Int.signedness == .signed) { @compileError("cannot rotate signed integers"); } - const ar = @intCast(Log2Int(C), @mod(r, @typeInfo(C).Int.bits)); + const ar = @as(Log2Int(C), @intCast(@mod(r, @typeInfo(C).Int.bits))); return (x >> @splat(@typeInfo(T).Vector.len, ar)) | (x << @splat(@typeInfo(T).Vector.len, 1 + ~ar)); } else if (@typeInfo(T).Int.signedness == .signed) { @compileError("cannot rotate signed integer"); @@ -604,7 +604,7 @@ pub fn rotr(comptime T: type, x: T, r: anytype) T { if (T == u0) return 0; if (isPowerOfTwo(@typeInfo(T).Int.bits)) { - const ar = @intCast(Log2Int(T), @mod(r, @typeInfo(T).Int.bits)); + const ar = @as(Log2Int(T), @intCast(@mod(r, @typeInfo(T).Int.bits))); return x >> ar | x << (1 +% ~ar); } else { const ar = @mod(r, @typeInfo(T).Int.bits); @@ -640,7 +640,7 @@ pub fn rotl(comptime T: type, x: T, r: anytype) T { if (@typeInfo(C).Int.signedness == .signed) { @compileError("cannot rotate signed integers"); } - const ar = @intCast(Log2Int(C), @mod(r, @typeInfo(C).Int.bits)); + const ar = @as(Log2Int(C), @intCast(@mod(r, @typeInfo(C).Int.bits))); return (x << @splat(@typeInfo(T).Vector.len, ar)) | (x >> @splat(@typeInfo(T).Vector.len, 1 +% ~ar)); } else if (@typeInfo(T).Int.signedness == .signed) { @compileError("cannot rotate signed integer"); @@ -648,7 +648,7 @@ pub fn rotl(comptime T: type, x: T, r: anytype) T { if (T == u0) return 0; if (isPowerOfTwo(@typeInfo(T).Int.bits)) { - const ar = @intCast(Log2Int(T), @mod(r, @typeInfo(T).Int.bits)); + const ar = @as(Log2Int(T), @intCast(@mod(r, @typeInfo(T).Int.bits))); return x << ar | x >> 1 +% ~ar; } else { const ar = @mod(r, @typeInfo(T).Int.bits); @@ -1029,9 +1029,9 @@ pub fn absCast(x: anytype) switch (@typeInfo(@TypeOf(x))) { if (int_info.signedness == .unsigned) return x; const Uint = std.meta.Int(.unsigned, int_info.bits); if (x < 0) { - return ~@bitCast(Uint, x +% -1); + return ~@as(Uint, @bitCast(x +% -1)); } else { - return @intCast(Uint, x); + return @as(Uint, @intCast(x)); } }, else => unreachable, @@ -1056,7 +1056,7 @@ pub fn negateCast(x: anytype) !std.meta.Int(.signed, @bitSizeOf(@TypeOf(x))) { if (x == -minInt(int)) return minInt(int); - return -@intCast(int, x); + return -@as(int, @intCast(x)); } test "negateCast" { @@ -1080,7 +1080,7 @@ pub fn cast(comptime T: type, x: anytype) ?T { } else if ((is_comptime or minInt(@TypeOf(x)) < minInt(T)) and x < minInt(T)) { return null; } else { - return @intCast(T, x); + return @as(T, @intCast(x)); } } @@ -1102,13 +1102,19 @@ test "cast" { pub const AlignCastError = error{UnalignedMemory}; +fn AlignCastResult(comptime alignment: u29, comptime Ptr: type) type { + var ptr_info = @typeInfo(Ptr); + ptr_info.Pointer.alignment = alignment; + return @Type(ptr_info); +} + /// Align cast a pointer but return an error if it's the wrong alignment -pub fn alignCast(comptime alignment: u29, ptr: anytype) AlignCastError!@TypeOf(@alignCast(alignment, ptr)) { +pub fn alignCast(comptime alignment: u29, ptr: anytype) AlignCastError!AlignCastResult(alignment, @TypeOf(ptr)) { const addr = @intFromPtr(ptr); if (addr % alignment != 0) { return error.UnalignedMemory; } - return @alignCast(alignment, ptr); + return @alignCast(ptr); } /// Asserts `int > 0`. @@ -1172,7 +1178,7 @@ pub inline fn floor(value: anytype) @TypeOf(value) { pub fn floorPowerOfTwo(comptime T: type, value: T) T { const uT = std.meta.Int(.unsigned, @typeInfo(T).Int.bits); if (value <= 0) return 0; - return @as(T, 1) << log2_int(uT, @intCast(uT, value)); + return @as(T, 1) << log2_int(uT, @as(uT, @intCast(value))); } test "floorPowerOfTwo" { @@ -1211,7 +1217,7 @@ pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(@typeInfo( assert(value != 0); const PromotedType = std.meta.Int(@typeInfo(T).Int.signedness, @typeInfo(T).Int.bits + 1); const ShiftType = std.math.Log2Int(PromotedType); - return @as(PromotedType, 1) << @intCast(ShiftType, @typeInfo(T).Int.bits - @clz(value - 1)); + return @as(PromotedType, 1) << @as(ShiftType, @intCast(@typeInfo(T).Int.bits - @clz(value - 1))); } /// Returns the next power of two (if the value is not already a power of two). @@ -1227,7 +1233,7 @@ pub fn ceilPowerOfTwo(comptime T: type, value: T) (error{Overflow}!T) { if (overflowBit & x != 0) { return error.Overflow; } - return @intCast(T, x); + return @as(T, @intCast(x)); } /// Returns the next power of two (if the value is not already a power @@ -1277,7 +1283,7 @@ pub fn log2_int(comptime T: type, x: T) Log2Int(T) { if (@typeInfo(T) != .Int or @typeInfo(T).Int.signedness != .unsigned) @compileError("log2_int requires an unsigned integer, found " ++ @typeName(T)); assert(x != 0); - return @intCast(Log2Int(T), @typeInfo(T).Int.bits - 1 - @clz(x)); + return @as(Log2Int(T), @intCast(@typeInfo(T).Int.bits - 1 - @clz(x))); } /// Return the log base 2 of integer value x, rounding up to the @@ -1311,8 +1317,8 @@ pub fn lossyCast(comptime T: type, value: anytype) T { switch (@typeInfo(T)) { .Float => { switch (@typeInfo(@TypeOf(value))) { - .Int => return @floatFromInt(T, value), - .Float => return @floatCast(T, value), + .Int => return @as(T, @floatFromInt(value)), + .Float => return @as(T, @floatCast(value)), .ComptimeInt => return @as(T, value), .ComptimeFloat => return @as(T, value), else => @compileError("bad type"), @@ -1326,7 +1332,7 @@ pub fn lossyCast(comptime T: type, value: anytype) T { } else if (value <= minInt(T)) { return @as(T, minInt(T)); } else { - return @intCast(T, value); + return @as(T, @intCast(value)); } }, .Float, .ComptimeFloat => { @@ -1335,7 +1341,7 @@ pub fn lossyCast(comptime T: type, value: anytype) T { } else if (value <= minInt(T)) { return @as(T, minInt(T)); } else { - return @intFromFloat(T, value); + return @as(T, @intFromFloat(value)); } }, else => @compileError("bad type"), @@ -1594,7 +1600,7 @@ test "compare between signed and unsigned" { try testing.expect(compare(@as(u8, 255), .gt, @as(i9, -1))); try testing.expect(!compare(@as(u8, 255), .lte, @as(i9, -1))); try testing.expect(compare(@as(u8, 1), .lt, @as(u8, 2))); - try testing.expect(@bitCast(u8, @as(i8, -1)) == @as(u8, 255)); + try testing.expect(@as(u8, @bitCast(@as(i8, -1))) == @as(u8, 255)); try testing.expect(!compare(@as(u8, 255), .eq, @as(i8, -1))); try testing.expect(compare(@as(u8, 1), .eq, @as(u8, 1))); } @@ -1624,7 +1630,7 @@ test "order.compare" { test "compare.reverse" { inline for (@typeInfo(CompareOperator).Enum.fields) |op_field| { - const op = @enumFromInt(CompareOperator, op_field.value); + const op = @as(CompareOperator, @enumFromInt(op_field.value)); try testing.expect(compare(2, op, 3) == compare(3, op.reverse(), 2)); try testing.expect(compare(3, op, 3) == compare(3, op.reverse(), 3)); try testing.expect(compare(4, op, 3) == compare(3, op.reverse(), 4)); @@ -1646,10 +1652,10 @@ pub inline fn boolMask(comptime MaskInt: type, value: bool) MaskInt { if (MaskInt == u1) return @intFromBool(value); if (MaskInt == i1) { // The @as here is a workaround for #7950 - return @bitCast(i1, @as(u1, @intFromBool(value))); + return @as(i1, @bitCast(@as(u1, @intFromBool(value)))); } - return -%@intCast(MaskInt, @intFromBool(value)); + return -%@as(MaskInt, @intCast(@intFromBool(value))); } test "boolMask" { @@ -1680,7 +1686,7 @@ test "boolMask" { /// Return the mod of `num` with the smallest integer type pub fn comptimeMod(num: anytype, comptime denom: comptime_int) IntFittingRange(0, denom - 1) { - return @intCast(IntFittingRange(0, denom - 1), @mod(num, denom)); + return @as(IntFittingRange(0, denom - 1), @intCast(@mod(num, denom))); } pub const F80 = struct { @@ -1690,14 +1696,14 @@ pub const F80 = struct { pub fn make_f80(repr: F80) f80 { const int = (@as(u80, repr.exp) << 64) | repr.fraction; - return @bitCast(f80, int); + return @as(f80, @bitCast(int)); } pub fn break_f80(x: f80) F80 { - const int = @bitCast(u80, x); + const int = @as(u80, @bitCast(x)); return .{ - .fraction = @truncate(u64, int), - .exp = @truncate(u16, int >> 64), + .fraction = @as(u64, @truncate(int)), + .exp = @as(u16, @truncate(int >> 64)), }; } @@ -1709,7 +1715,7 @@ pub inline fn sign(i: anytype) @TypeOf(i) { const T = @TypeOf(i); return switch (@typeInfo(T)) { .Int, .ComptimeInt => @as(T, @intFromBool(i > 0)) - @as(T, @intFromBool(i < 0)), - .Float, .ComptimeFloat => @floatFromInt(T, @intFromBool(i > 0)) - @floatFromInt(T, @intFromBool(i < 0)), + .Float, .ComptimeFloat => @as(T, @floatFromInt(@intFromBool(i > 0))) - @as(T, @floatFromInt(@intFromBool(i < 0))), .Vector => |vinfo| blk: { switch (@typeInfo(vinfo.child)) { .Int, .Float => { diff --git a/lib/std/math/acos.zig b/lib/std/math/acos.zig index e88bed7227..1a29ca7b54 100644 --- a/lib/std/math/acos.zig +++ b/lib/std/math/acos.zig @@ -36,7 +36,7 @@ fn acos32(x: f32) f32 { const pio2_hi = 1.5707962513e+00; const pio2_lo = 7.5497894159e-08; - const hx: u32 = @bitCast(u32, x); + const hx: u32 = @as(u32, @bitCast(x)); const ix: u32 = hx & 0x7FFFFFFF; // |x| >= 1 or nan @@ -72,8 +72,8 @@ fn acos32(x: f32) f32 { // x > 0.5 const z = (1.0 - x) * 0.5; const s = @sqrt(z); - const jx = @bitCast(u32, s); - const df = @bitCast(f32, jx & 0xFFFFF000); + const jx = @as(u32, @bitCast(s)); + const df = @as(f32, @bitCast(jx & 0xFFFFF000)); const c = (z - df * df) / (s + df); const w = r32(z) * s + c; return 2 * (df + w); @@ -100,13 +100,13 @@ fn acos64(x: f64) f64 { const pio2_hi: f64 = 1.57079632679489655800e+00; const pio2_lo: f64 = 6.12323399573676603587e-17; - const ux = @bitCast(u64, x); - const hx = @intCast(u32, ux >> 32); + const ux = @as(u64, @bitCast(x)); + const hx = @as(u32, @intCast(ux >> 32)); const ix = hx & 0x7FFFFFFF; // |x| >= 1 or nan if (ix >= 0x3FF00000) { - const lx = @intCast(u32, ux & 0xFFFFFFFF); + const lx = @as(u32, @intCast(ux & 0xFFFFFFFF)); // acos(1) = 0, acos(-1) = pi if ((ix - 0x3FF00000) | lx == 0) { @@ -141,8 +141,8 @@ fn acos64(x: f64) f64 { // x > 0.5 const z = (1.0 - x) * 0.5; const s = @sqrt(z); - const jx = @bitCast(u64, s); - const df = @bitCast(f64, jx & 0xFFFFFFFF00000000); + const jx = @as(u64, @bitCast(s)); + const df = @as(f64, @bitCast(jx & 0xFFFFFFFF00000000)); const c = (z - df * df) / (s + df); const w = r64(z) * s + c; return 2 * (df + w); diff --git a/lib/std/math/acosh.zig b/lib/std/math/acosh.zig index a78130d2ef..0c6de9933e 100644 --- a/lib/std/math/acosh.zig +++ b/lib/std/math/acosh.zig @@ -24,7 +24,7 @@ pub fn acosh(x: anytype) @TypeOf(x) { // acosh(x) = log(x + sqrt(x * x - 1)) fn acosh32(x: f32) f32 { - const u = @bitCast(u32, x); + const u = @as(u32, @bitCast(x)); const i = u & 0x7FFFFFFF; // |x| < 2, invalid if x < 1 or nan @@ -42,7 +42,7 @@ fn acosh32(x: f32) f32 { } fn acosh64(x: f64) f64 { - const u = @bitCast(u64, x); + const u = @as(u64, @bitCast(x)); const e = (u >> 52) & 0x7FF; // |x| < 2, invalid if x < 1 or nan diff --git a/lib/std/math/asin.zig b/lib/std/math/asin.zig index 48ad04c579..ac1d01ff55 100644 --- a/lib/std/math/asin.zig +++ b/lib/std/math/asin.zig @@ -36,7 +36,7 @@ fn r32(z: f32) f32 { fn asin32(x: f32) f32 { const pio2 = 1.570796326794896558e+00; - const hx: u32 = @bitCast(u32, x); + const hx: u32 = @as(u32, @bitCast(x)); const ix: u32 = hx & 0x7FFFFFFF; // |x| >= 1 @@ -92,13 +92,13 @@ fn asin64(x: f64) f64 { const pio2_hi: f64 = 1.57079632679489655800e+00; const pio2_lo: f64 = 6.12323399573676603587e-17; - const ux = @bitCast(u64, x); - const hx = @intCast(u32, ux >> 32); + const ux = @as(u64, @bitCast(x)); + const hx = @as(u32, @intCast(ux >> 32)); const ix = hx & 0x7FFFFFFF; // |x| >= 1 or nan if (ix >= 0x3FF00000) { - const lx = @intCast(u32, ux & 0xFFFFFFFF); + const lx = @as(u32, @intCast(ux & 0xFFFFFFFF)); // asin(1) = +-pi/2 with inexact if ((ix - 0x3FF00000) | lx == 0) { @@ -128,8 +128,8 @@ fn asin64(x: f64) f64 { if (ix >= 0x3FEF3333) { fx = pio2_hi - 2 * (s + s * r); } else { - const jx = @bitCast(u64, s); - const df = @bitCast(f64, jx & 0xFFFFFFFF00000000); + const jx = @as(u64, @bitCast(s)); + const df = @as(f64, @bitCast(jx & 0xFFFFFFFF00000000)); const c = (z - df * df) / (s + df); fx = 0.5 * pio2_hi - (2 * s * r - (pio2_lo - 2 * c) - (0.5 * pio2_hi - 2 * df)); } diff --git a/lib/std/math/asinh.zig b/lib/std/math/asinh.zig index 65028ef5d9..13b1045bf6 100644 --- a/lib/std/math/asinh.zig +++ b/lib/std/math/asinh.zig @@ -26,11 +26,11 @@ pub fn asinh(x: anytype) @TypeOf(x) { // asinh(x) = sign(x) * log(|x| + sqrt(x * x + 1)) ~= x - x^3/6 + o(x^5) fn asinh32(x: f32) f32 { - const u = @bitCast(u32, x); + const u = @as(u32, @bitCast(x)); const i = u & 0x7FFFFFFF; const s = i >> 31; - var rx = @bitCast(f32, i); // |x| + var rx = @as(f32, @bitCast(i)); // |x| // TODO: Shouldn't need this explicit check. if (math.isNegativeInf(x)) { @@ -58,11 +58,11 @@ fn asinh32(x: f32) f32 { } fn asinh64(x: f64) f64 { - const u = @bitCast(u64, x); + const u = @as(u64, @bitCast(x)); const e = (u >> 52) & 0x7FF; const s = e >> 63; - var rx = @bitCast(f64, u & (maxInt(u64) >> 1)); // |x| + var rx = @as(f64, @bitCast(u & (maxInt(u64) >> 1))); // |x| if (math.isNegativeInf(x)) { return x; diff --git a/lib/std/math/atan.zig b/lib/std/math/atan.zig index 41caae11a6..75be6ea746 100644 --- a/lib/std/math/atan.zig +++ b/lib/std/math/atan.zig @@ -46,7 +46,7 @@ fn atan32(x_: f32) f32 { }; var x = x_; - var ix: u32 = @bitCast(u32, x); + var ix: u32 = @as(u32, @bitCast(x)); const sign = ix >> 31; ix &= 0x7FFFFFFF; @@ -143,8 +143,8 @@ fn atan64(x_: f64) f64 { }; var x = x_; - var ux = @bitCast(u64, x); - var ix = @intCast(u32, ux >> 32); + var ux = @as(u64, @bitCast(x)); + var ix = @as(u32, @intCast(ux >> 32)); const sign = ix >> 31; ix &= 0x7FFFFFFF; @@ -165,7 +165,7 @@ fn atan64(x_: f64) f64 { // |x| < 2^(-27) if (ix < 0x3E400000) { if (ix < 0x00100000) { - math.doNotOptimizeAway(@floatCast(f32, x)); + math.doNotOptimizeAway(@as(f32, @floatCast(x))); } return x; } @@ -212,7 +212,7 @@ fn atan64(x_: f64) f64 { } test "math.atan" { - try expect(@bitCast(u32, atan(@as(f32, 0.2))) == @bitCast(u32, atan32(0.2))); + try expect(@as(u32, @bitCast(atan(@as(f32, 0.2)))) == @as(u32, @bitCast(atan32(0.2)))); try expect(atan(@as(f64, 0.2)) == atan64(0.2)); } diff --git a/lib/std/math/atan2.zig b/lib/std/math/atan2.zig index b9b37e7da4..026c76b5b2 100644 --- a/lib/std/math/atan2.zig +++ b/lib/std/math/atan2.zig @@ -44,8 +44,8 @@ fn atan2_32(y: f32, x: f32) f32 { return x + y; } - var ix = @bitCast(u32, x); - var iy = @bitCast(u32, y); + var ix = @as(u32, @bitCast(x)); + var iy = @as(u32, @bitCast(y)); // x = 1.0 if (ix == 0x3F800000) { @@ -129,13 +129,13 @@ fn atan2_64(y: f64, x: f64) f64 { return x + y; } - var ux = @bitCast(u64, x); - var ix = @intCast(u32, ux >> 32); - var lx = @intCast(u32, ux & 0xFFFFFFFF); + var ux = @as(u64, @bitCast(x)); + var ix = @as(u32, @intCast(ux >> 32)); + var lx = @as(u32, @intCast(ux & 0xFFFFFFFF)); - var uy = @bitCast(u64, y); - var iy = @intCast(u32, uy >> 32); - var ly = @intCast(u32, uy & 0xFFFFFFFF); + var uy = @as(u64, @bitCast(y)); + var iy = @as(u32, @intCast(uy >> 32)); + var ly = @as(u32, @intCast(uy & 0xFFFFFFFF)); // x = 1.0 if ((ix -% 0x3FF00000) | lx == 0) { diff --git a/lib/std/math/atanh.zig b/lib/std/math/atanh.zig index aed5d8bca8..58b56ac8fa 100644 --- a/lib/std/math/atanh.zig +++ b/lib/std/math/atanh.zig @@ -26,11 +26,11 @@ pub fn atanh(x: anytype) @TypeOf(x) { // atanh(x) = log((1 + x) / (1 - x)) / 2 = log1p(2x / (1 - x)) / 2 ~= x + x^3 / 3 + o(x^5) fn atanh_32(x: f32) f32 { - const u = @bitCast(u32, x); + const u = @as(u32, @bitCast(x)); const i = u & 0x7FFFFFFF; const s = u >> 31; - var y = @bitCast(f32, i); // |x| + var y = @as(f32, @bitCast(i)); // |x| if (y == 1.0) { return math.copysign(math.inf(f32), x); @@ -55,11 +55,11 @@ fn atanh_32(x: f32) f32 { } fn atanh_64(x: f64) f64 { - const u = @bitCast(u64, x); + const u = @as(u64, @bitCast(x)); const e = (u >> 52) & 0x7FF; const s = u >> 63; - var y = @bitCast(f64, u & (maxInt(u64) >> 1)); // |x| + var y = @as(f64, @bitCast(u & (maxInt(u64) >> 1))); // |x| if (y == 1.0) { return math.copysign(math.inf(f64), x); @@ -69,7 +69,7 @@ fn atanh_64(x: f64) f64 { if (e < 0x3FF - 32) { // underflow if (e == 0) { - math.doNotOptimizeAway(@floatCast(f32, y)); + math.doNotOptimizeAway(@as(f32, @floatCast(y))); } } // |x| < 0.5 diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index 846a809e05..213876ccad 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -30,7 +30,7 @@ pub fn calcLimbLen(scalar: anytype) usize { } const w_value = std.math.absCast(scalar); - return @intCast(usize, @divFloor(@intCast(Limb, math.log2(w_value)), limb_bits) + 1); + return @as(usize, @intCast(@divFloor(@as(Limb, @intCast(math.log2(w_value))), limb_bits) + 1)); } pub fn calcToStringLimbsBufferLen(a_len: usize, base: u8) usize { @@ -87,8 +87,8 @@ pub fn addMulLimbWithCarry(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb { // r2 = b * c const bc = @as(DoubleLimb, math.mulWide(Limb, b, c)); - const r2 = @truncate(Limb, bc); - const c2 = @truncate(Limb, bc >> limb_bits); + const r2 = @as(Limb, @truncate(bc)); + const c2 = @as(Limb, @truncate(bc >> limb_bits)); // ov2[0] = ov1[0] + r2 const ov2 = @addWithOverflow(ov1[0], r2); @@ -107,8 +107,8 @@ fn subMulLimbWithBorrow(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb { // r2 = b * c const bc = @as(DoubleLimb, std.math.mulWide(Limb, b, c)); - const r2 = @truncate(Limb, bc); - const c2 = @truncate(Limb, bc >> limb_bits); + const r2 = @as(Limb, @truncate(bc)); + const c2 = @as(Limb, @truncate(bc >> limb_bits)); // ov2[0] = ov1[0] - r2 const ov2 = @subWithOverflow(ov1[0], r2); @@ -244,7 +244,7 @@ pub const Mutable = struct { } else { var i: usize = 0; while (true) : (i += 1) { - self.limbs[i] = @truncate(Limb, w_value); + self.limbs[i] = @as(Limb, @truncate(w_value)); w_value >>= limb_bits; if (w_value == 0) break; @@ -340,7 +340,7 @@ pub const Mutable = struct { } const req_limbs = calcTwosCompLimbCount(bit_count); - const bit = @truncate(Log2Limb, bit_count - 1); + const bit = @as(Log2Limb, @truncate(bit_count - 1)); const signmask = @as(Limb, 1) << bit; // 0b0..010..0 where 1 is the sign bit. const mask = (signmask << 1) -% 1; // 0b0..011..1 where the leftmost 1 is the sign bit. @@ -365,7 +365,7 @@ pub const Mutable = struct { r.set(0); } else { const new_req_limbs = calcTwosCompLimbCount(bit_count - 1); - const msb = @truncate(Log2Limb, bit_count - 2); + const msb = @as(Log2Limb, @truncate(bit_count - 2)); const new_signmask = @as(Limb, 1) << msb; // 0b0..010..0 where 1 is the sign bit. const new_mask = (new_signmask << 1) -% 1; // 0b0..001..1 where the rightmost 0 is the sign bit. @@ -1153,7 +1153,7 @@ pub const Mutable = struct { // const msb = @truncate(Log2Limb, checkbit); // const checkmask = (@as(Limb, 1) << msb) -% 1; - if (a.limbs[a.limbs.len - 1] >> @truncate(Log2Limb, checkbit) != 0) { + if (a.limbs[a.limbs.len - 1] >> @as(Log2Limb, @truncate(checkbit)) != 0) { // Need to saturate. r.setTwosCompIntLimit(if (a.positive) .max else .min, signedness, bit_count); return; @@ -1554,7 +1554,7 @@ pub const Mutable = struct { // Optimization for small divisor. By using a half limb we can avoid requiring DoubleLimb // divisions in the hot code path. This may often require compiler_rt software-emulation. if (divisor < maxInt(HalfLimb)) { - lldiv0p5(q.limbs, &r.limbs[0], x.limbs[xy_trailing..x.len], @intCast(HalfLimb, divisor)); + lldiv0p5(q.limbs, &r.limbs[0], x.limbs[xy_trailing..x.len], @as(HalfLimb, @intCast(divisor))); } else { lldiv1(q.limbs, &r.limbs[0], x.limbs[xy_trailing..x.len], divisor); } @@ -1671,7 +1671,7 @@ pub const Mutable = struct { } else { const q0 = (@as(DoubleLimb, x.limbs[i]) << limb_bits) | @as(DoubleLimb, x.limbs[i - 1]); const n0 = @as(DoubleLimb, y.limbs[t]); - q.limbs[k] = @intCast(Limb, q0 / n0); + q.limbs[k] = @as(Limb, @intCast(q0 / n0)); } // 3.2 @@ -1750,7 +1750,7 @@ pub const Mutable = struct { return; } - const bit = @truncate(Log2Limb, bit_count - 1); + const bit = @as(Log2Limb, @truncate(bit_count - 1)); const signmask = @as(Limb, 1) << bit; const mask = (signmask << 1) -% 1; @@ -1781,7 +1781,7 @@ pub const Mutable = struct { return; } - const bit = @truncate(Log2Limb, bit_count - 1); + const bit = @as(Log2Limb, @truncate(bit_count - 1)); const signmask = @as(Limb, 1) << bit; // 0b0..010...0 where 1 is the sign bit. const mask = (signmask << 1) -% 1; // 0b0..01..1 where the leftmost 1 is the sign bit. @@ -1912,7 +1912,7 @@ pub const Mutable = struct { .Big => buffer.len - ((total_bits + 7) / 8), }; - const sign_bit = @as(u8, 1) << @intCast(u3, (total_bits - 1) % 8); + const sign_bit = @as(u8, 1) << @as(u3, @intCast((total_bits - 1) % 8)); positive = ((buffer[last_byte] & sign_bit) == 0); } @@ -1942,7 +1942,7 @@ pub const Mutable = struct { .signed => b: { const SLimb = std.meta.Int(.signed, @bitSizeOf(Limb)); const limb = mem.readVarPackedInt(SLimb, buffer, bit_index + bit_offset, bit_count - bit_index, endian, .signed); - break :b @bitCast(Limb, limb); + break :b @as(Limb, @bitCast(limb)); }, }; @@ -2170,7 +2170,7 @@ pub const Const = struct { var r: UT = 0; if (@sizeOf(UT) <= @sizeOf(Limb)) { - r = @intCast(UT, self.limbs[0]); + r = @as(UT, @intCast(self.limbs[0])); } else { for (self.limbs[0..self.limbs.len], 0..) |_, ri| { const limb = self.limbs[self.limbs.len - ri - 1]; @@ -2180,10 +2180,10 @@ pub const Const = struct { } if (info.signedness == .unsigned) { - return if (self.positive) @intCast(T, r) else error.NegativeIntoUnsigned; + return if (self.positive) @as(T, @intCast(r)) else error.NegativeIntoUnsigned; } else { if (self.positive) { - return @intCast(T, r); + return @as(T, @intCast(r)); } else { if (math.cast(T, r)) |ok| { return -ok; @@ -2292,7 +2292,7 @@ pub const Const = struct { outer: for (self.limbs[0..self.limbs.len]) |limb| { var shift: usize = 0; while (shift < limb_bits) : (shift += base_shift) { - const r = @intCast(u8, (limb >> @intCast(Log2Limb, shift)) & @as(Limb, base - 1)); + const r = @as(u8, @intCast((limb >> @as(Log2Limb, @intCast(shift))) & @as(Limb, base - 1))); const ch = std.fmt.digitToChar(r, case); string[digits_len] = ch; digits_len += 1; @@ -2340,7 +2340,7 @@ pub const Const = struct { var r_word = r.limbs[0]; var i: usize = 0; while (i < digits_per_limb) : (i += 1) { - const ch = std.fmt.digitToChar(@intCast(u8, r_word % base), case); + const ch = std.fmt.digitToChar(@as(u8, @intCast(r_word % base)), case); r_word /= base; string[digits_len] = ch; digits_len += 1; @@ -2352,7 +2352,7 @@ pub const Const = struct { var r_word = q.limbs[0]; while (r_word != 0) { - const ch = std.fmt.digitToChar(@intCast(u8, r_word % base), case); + const ch = std.fmt.digitToChar(@as(u8, @intCast(r_word % base)), case); r_word /= base; string[digits_len] = ch; digits_len += 1; @@ -3680,13 +3680,13 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void { rem.* = 0; } else if (pdiv < b) { quo[i] = 0; - rem.* = @truncate(Limb, pdiv); + rem.* = @as(Limb, @truncate(pdiv)); } else if (pdiv == b) { quo[i] = 1; rem.* = 0; } else { - quo[i] = @truncate(Limb, @divTrunc(pdiv, b)); - rem.* = @truncate(Limb, pdiv - (quo[i] *% b)); + quo[i] = @as(Limb, @truncate(@divTrunc(pdiv, b))); + rem.* = @as(Limb, @truncate(pdiv - (quo[i] *% b))); } } } @@ -3719,7 +3719,7 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void { @setRuntimeSafety(debug_safety); assert(a.len >= 1); - const interior_limb_shift = @truncate(Log2Limb, shift); + const interior_limb_shift = @as(Log2Limb, @truncate(shift)); // We only need the extra limb if the shift of the last element overflows. // This is useful for the implementation of `shiftLeftSat`. @@ -3741,7 +3741,7 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void { r[dst_i] = carry | @call(.always_inline, math.shr, .{ Limb, src_digit, - limb_bits - @intCast(Limb, interior_limb_shift), + limb_bits - @as(Limb, @intCast(interior_limb_shift)), }); carry = (src_digit << interior_limb_shift); } @@ -3756,7 +3756,7 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) void { assert(r.len >= a.len - (shift / limb_bits)); const limb_shift = shift / limb_bits; - const interior_limb_shift = @truncate(Log2Limb, shift); + const interior_limb_shift = @as(Log2Limb, @truncate(shift)); var carry: Limb = 0; var i: usize = 0; @@ -3769,7 +3769,7 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) void { carry = @call(.always_inline, math.shl, .{ Limb, src_digit, - limb_bits - @intCast(Limb, interior_limb_shift), + limb_bits - @as(Limb, @intCast(interior_limb_shift)), }); } } @@ -4150,7 +4150,7 @@ fn llpow(r: []Limb, a: []const Limb, b: u32, tmp_limbs: []Limb) void { // Square the result if the current bit is zero, square and multiply by a if // it is one. var exp_bits = 32 - 1 - b_leading_zeros; - var exp = b << @intCast(u5, 1 + b_leading_zeros); + var exp = b << @as(u5, @intCast(1 + b_leading_zeros)); var i: usize = 0; while (i < exp_bits) : (i += 1) { @@ -4174,9 +4174,9 @@ fn fixedIntFromSignedDoubleLimb(A: SignedDoubleLimb, storage: []Limb) Mutable { assert(storage.len >= 2); const A_is_positive = A >= 0; - const Au = @intCast(DoubleLimb, if (A < 0) -A else A); - storage[0] = @truncate(Limb, Au); - storage[1] = @truncate(Limb, Au >> limb_bits); + const Au = @as(DoubleLimb, @intCast(if (A < 0) -A else A)); + storage[0] = @as(Limb, @truncate(Au)); + storage[1] = @as(Limb, @truncate(Au >> limb_bits)); return .{ .limbs = storage[0..2], .positive = A_is_positive, diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig index 9c3c1b6881..3eaa46d7c1 100644 --- a/lib/std/math/big/int_test.zig +++ b/lib/std/math/big/int_test.zig @@ -2898,19 +2898,19 @@ test "big int conversion write twos complement with padding" { buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xaa }; m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned); - try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaa_02030405_06070809_0a0b0c0d)) == .eq); + try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaa_02030405_06070809_0a0b0c0d))) == .eq); buffer = &[_]u8{ 0xaa, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd }; m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned); - try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaa_02030405_06070809_0a0b0c0d)) == .eq); + try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaa_02030405_06070809_0a0b0c0d))) == .eq); buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xaa, 0xaa, 0xaa, 0xaa }; m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned); - try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaaaaaaaa_02030405_06070809_0a0b0c0d)) == .eq); + try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaaaaaaaa_02030405_06070809_0a0b0c0d))) == .eq); buffer = &[_]u8{ 0xaa, 0xaa, 0xaa, 0xaa, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd }; m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned); - try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaaaaaaaa_02030405_06070809_0a0b0c0d)) == .eq); + try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaaaaaaaa_02030405_06070809_0a0b0c0d))) == .eq); bit_count = 12 * 8 + 2; @@ -3014,20 +3014,20 @@ test "big int bit reverse" { try bitReverseTest(u96, 0x123456789abcdef111213141, 0x828c84888f7b3d591e6a2c48); try bitReverseTest(u128, 0x123456789abcdef11121314151617181, 0x818e868a828c84888f7b3d591e6a2c48); - try bitReverseTest(i8, @bitCast(i8, @as(u8, 0x92)), @bitCast(i8, @as(u8, 0x49))); - try bitReverseTest(i16, @bitCast(i16, @as(u16, 0x1234)), @bitCast(i16, @as(u16, 0x2c48))); - try bitReverseTest(i24, @bitCast(i24, @as(u24, 0x123456)), @bitCast(i24, @as(u24, 0x6a2c48))); - try bitReverseTest(i24, @bitCast(i24, @as(u24, 0x12345f)), @bitCast(i24, @as(u24, 0xfa2c48))); - try bitReverseTest(i24, @bitCast(i24, @as(u24, 0xf23456)), @bitCast(i24, @as(u24, 0x6a2c4f))); - try bitReverseTest(i32, @bitCast(i32, @as(u32, 0x12345678)), @bitCast(i32, @as(u32, 0x1e6a2c48))); - try bitReverseTest(i32, @bitCast(i32, @as(u32, 0xf2345678)), @bitCast(i32, @as(u32, 0x1e6a2c4f))); - try bitReverseTest(i32, @bitCast(i32, @as(u32, 0x1234567f)), @bitCast(i32, @as(u32, 0xfe6a2c48))); - try bitReverseTest(i40, @bitCast(i40, @as(u40, 0x123456789a)), @bitCast(i40, @as(u40, 0x591e6a2c48))); - try bitReverseTest(i48, @bitCast(i48, @as(u48, 0x123456789abc)), @bitCast(i48, @as(u48, 0x3d591e6a2c48))); - try bitReverseTest(i56, @bitCast(i56, @as(u56, 0x123456789abcde)), @bitCast(i56, @as(u56, 0x7b3d591e6a2c48))); - try bitReverseTest(i64, @bitCast(i64, @as(u64, 0x123456789abcdef1)), @bitCast(i64, @as(u64, 0x8f7b3d591e6a2c48))); - try bitReverseTest(i96, @bitCast(i96, @as(u96, 0x123456789abcdef111213141)), @bitCast(i96, @as(u96, 0x828c84888f7b3d591e6a2c48))); - try bitReverseTest(i128, @bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181)), @bitCast(i128, @as(u128, 0x818e868a828c84888f7b3d591e6a2c48))); + try bitReverseTest(i8, @as(i8, @bitCast(@as(u8, 0x92))), @as(i8, @bitCast(@as(u8, 0x49)))); + try bitReverseTest(i16, @as(i16, @bitCast(@as(u16, 0x1234))), @as(i16, @bitCast(@as(u16, 0x2c48)))); + try bitReverseTest(i24, @as(i24, @bitCast(@as(u24, 0x123456))), @as(i24, @bitCast(@as(u24, 0x6a2c48)))); + try bitReverseTest(i24, @as(i24, @bitCast(@as(u24, 0x12345f))), @as(i24, @bitCast(@as(u24, 0xfa2c48)))); + try bitReverseTest(i24, @as(i24, @bitCast(@as(u24, 0xf23456))), @as(i24, @bitCast(@as(u24, 0x6a2c4f)))); + try bitReverseTest(i32, @as(i32, @bitCast(@as(u32, 0x12345678))), @as(i32, @bitCast(@as(u32, 0x1e6a2c48)))); + try bitReverseTest(i32, @as(i32, @bitCast(@as(u32, 0xf2345678))), @as(i32, @bitCast(@as(u32, 0x1e6a2c4f)))); + try bitReverseTest(i32, @as(i32, @bitCast(@as(u32, 0x1234567f))), @as(i32, @bitCast(@as(u32, 0xfe6a2c48)))); + try bitReverseTest(i40, @as(i40, @bitCast(@as(u40, 0x123456789a))), @as(i40, @bitCast(@as(u40, 0x591e6a2c48)))); + try bitReverseTest(i48, @as(i48, @bitCast(@as(u48, 0x123456789abc))), @as(i48, @bitCast(@as(u48, 0x3d591e6a2c48)))); + try bitReverseTest(i56, @as(i56, @bitCast(@as(u56, 0x123456789abcde))), @as(i56, @bitCast(@as(u56, 0x7b3d591e6a2c48)))); + try bitReverseTest(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0x8f7b3d591e6a2c48)))); + try bitReverseTest(i96, @as(i96, @bitCast(@as(u96, 0x123456789abcdef111213141))), @as(i96, @bitCast(@as(u96, 0x828c84888f7b3d591e6a2c48)))); + try bitReverseTest(i128, @as(i128, @bitCast(@as(u128, 0x123456789abcdef11121314151617181))), @as(i128, @bitCast(@as(u128, 0x818e868a828c84888f7b3d591e6a2c48)))); } fn byteSwapTest(comptime T: type, comptime input: comptime_int, comptime expected_output: comptime_int) !void { @@ -3063,16 +3063,16 @@ test "big int byte swap" { try byteSwapTest(u128, 0x123456789abcdef11121314151617181, 0x8171615141312111f1debc9a78563412); try byteSwapTest(i8, -50, -50); - try byteSwapTest(i16, @bitCast(i16, @as(u16, 0x1234)), @bitCast(i16, @as(u16, 0x3412))); - try byteSwapTest(i24, @bitCast(i24, @as(u24, 0x123456)), @bitCast(i24, @as(u24, 0x563412))); - try byteSwapTest(i32, @bitCast(i32, @as(u32, 0x12345678)), @bitCast(i32, @as(u32, 0x78563412))); - try byteSwapTest(i40, @bitCast(i40, @as(u40, 0x123456789a)), @bitCast(i40, @as(u40, 0x9a78563412))); - try byteSwapTest(i48, @bitCast(i48, @as(u48, 0x123456789abc)), @bitCast(i48, @as(u48, 0xbc9a78563412))); - try byteSwapTest(i56, @bitCast(i56, @as(u56, 0x123456789abcde)), @bitCast(i56, @as(u56, 0xdebc9a78563412))); - try byteSwapTest(i64, @bitCast(i64, @as(u64, 0x123456789abcdef1)), @bitCast(i64, @as(u64, 0xf1debc9a78563412))); - try byteSwapTest(i88, @bitCast(i88, @as(u88, 0x123456789abcdef1112131)), @bitCast(i88, @as(u88, 0x312111f1debc9a78563412))); - try byteSwapTest(i96, @bitCast(i96, @as(u96, 0x123456789abcdef111213141)), @bitCast(i96, @as(u96, 0x41312111f1debc9a78563412))); - try byteSwapTest(i128, @bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181)), @bitCast(i128, @as(u128, 0x8171615141312111f1debc9a78563412))); + try byteSwapTest(i16, @as(i16, @bitCast(@as(u16, 0x1234))), @as(i16, @bitCast(@as(u16, 0x3412)))); + try byteSwapTest(i24, @as(i24, @bitCast(@as(u24, 0x123456))), @as(i24, @bitCast(@as(u24, 0x563412)))); + try byteSwapTest(i32, @as(i32, @bitCast(@as(u32, 0x12345678))), @as(i32, @bitCast(@as(u32, 0x78563412)))); + try byteSwapTest(i40, @as(i40, @bitCast(@as(u40, 0x123456789a))), @as(i40, @bitCast(@as(u40, 0x9a78563412)))); + try byteSwapTest(i48, @as(i48, @bitCast(@as(u48, 0x123456789abc))), @as(i48, @bitCast(@as(u48, 0xbc9a78563412)))); + try byteSwapTest(i56, @as(i56, @bitCast(@as(u56, 0x123456789abcde))), @as(i56, @bitCast(@as(u56, 0xdebc9a78563412)))); + try byteSwapTest(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0xf1debc9a78563412)))); + try byteSwapTest(i88, @as(i88, @bitCast(@as(u88, 0x123456789abcdef1112131))), @as(i88, @bitCast(@as(u88, 0x312111f1debc9a78563412)))); + try byteSwapTest(i96, @as(i96, @bitCast(@as(u96, 0x123456789abcdef111213141))), @as(i96, @bitCast(@as(u96, 0x41312111f1debc9a78563412)))); + try byteSwapTest(i128, @as(i128, @bitCast(@as(u128, 0x123456789abcdef11121314151617181))), @as(i128, @bitCast(@as(u128, 0x8171615141312111f1debc9a78563412)))); try byteSwapTest(u512, 0x80, 1 << 511); try byteSwapTest(i512, 0x80, minInt(i512)); @@ -3080,11 +3080,11 @@ test "big int byte swap" { try byteSwapTest(i512, -0x100, (1 << 504) - 1); try byteSwapTest(i400, -0x100, (1 << 392) - 1); try byteSwapTest(i400, -0x2, -(1 << 392) - 1); - try byteSwapTest(i24, @bitCast(i24, @as(u24, 0xf23456)), 0x5634f2); - try byteSwapTest(i24, 0x1234f6, @bitCast(i24, @as(u24, 0xf63412))); - try byteSwapTest(i32, @bitCast(i32, @as(u32, 0xf2345678)), 0x785634f2); - try byteSwapTest(i32, 0x123456f8, @bitCast(i32, @as(u32, 0xf8563412))); - try byteSwapTest(i48, 0x123456789abc, @bitCast(i48, @as(u48, 0xbc9a78563412))); + try byteSwapTest(i24, @as(i24, @bitCast(@as(u24, 0xf23456))), 0x5634f2); + try byteSwapTest(i24, 0x1234f6, @as(i24, @bitCast(@as(u24, 0xf63412)))); + try byteSwapTest(i32, @as(i32, @bitCast(@as(u32, 0xf2345678))), 0x785634f2); + try byteSwapTest(i32, 0x123456f8, @as(i32, @bitCast(@as(u32, 0xf8563412)))); + try byteSwapTest(i48, 0x123456789abc, @as(i48, @bitCast(@as(u48, 0xbc9a78563412)))); } test "big.int mul multi-multi alias r with a and b" { diff --git a/lib/std/math/big/rational.zig b/lib/std/math/big/rational.zig index 22f7ba183f..5313380c27 100644 --- a/lib/std/math/big/rational.zig +++ b/lib/std/math/big/rational.zig @@ -137,7 +137,7 @@ pub const Rational = struct { debug.assert(@typeInfo(T) == .Float); const UnsignedInt = std.meta.Int(.unsigned, @typeInfo(T).Float.bits); - const f_bits = @bitCast(UnsignedInt, f); + const f_bits = @as(UnsignedInt, @bitCast(f)); const exponent_bits = math.floatExponentBits(T); const exponent_bias = (1 << (exponent_bits - 1)) - 1; @@ -146,7 +146,7 @@ pub const Rational = struct { const exponent_mask = (1 << exponent_bits) - 1; const mantissa_mask = (1 << mantissa_bits) - 1; - var exponent = @intCast(i16, (f_bits >> mantissa_bits) & exponent_mask); + var exponent = @as(i16, @intCast((f_bits >> mantissa_bits) & exponent_mask)); var mantissa = f_bits & mantissa_mask; switch (exponent) { @@ -177,9 +177,9 @@ pub const Rational = struct { try self.q.set(1); if (shift >= 0) { - try self.q.shiftLeft(&self.q, @intCast(usize, shift)); + try self.q.shiftLeft(&self.q, @as(usize, @intCast(shift))); } else { - try self.p.shiftLeft(&self.p, @intCast(usize, -shift)); + try self.p.shiftLeft(&self.p, @as(usize, @intCast(-shift))); } try self.reduce(); @@ -210,7 +210,7 @@ pub const Rational = struct { } // 1. left-shift a or sub so that a/b is in [1 << msize1, 1 << (msize2 + 1)] - var exp = @intCast(isize, self.p.bitCountTwosComp()) - @intCast(isize, self.q.bitCountTwosComp()); + var exp = @as(isize, @intCast(self.p.bitCountTwosComp())) - @as(isize, @intCast(self.q.bitCountTwosComp())); var a2 = try self.p.clone(); defer a2.deinit(); @@ -220,9 +220,9 @@ pub const Rational = struct { const shift = msize2 - exp; if (shift >= 0) { - try a2.shiftLeft(&a2, @intCast(usize, shift)); + try a2.shiftLeft(&a2, @as(usize, @intCast(shift))); } else { - try b2.shiftLeft(&b2, @intCast(usize, -shift)); + try b2.shiftLeft(&b2, @as(usize, @intCast(-shift))); } // 2. compute quotient and remainder @@ -254,8 +254,8 @@ pub const Rational = struct { // 4. Rounding if (emin - msize <= exp and exp <= emin) { // denormal - const shift1 = @intCast(math.Log2Int(BitReprType), emin - (exp - 1)); - const lost_bits = mantissa & ((@intCast(BitReprType, 1) << shift1) - 1); + const shift1 = @as(math.Log2Int(BitReprType), @intCast(emin - (exp - 1))); + const lost_bits = mantissa & ((@as(BitReprType, @intCast(1)) << shift1) - 1); have_rem = have_rem or lost_bits != 0; mantissa >>= shift1; exp = 2 - ebias; @@ -276,7 +276,7 @@ pub const Rational = struct { } mantissa >>= 1; - const f = math.scalbn(@floatFromInt(T, mantissa), @intCast(i32, exp - msize1)); + const f = math.scalbn(@as(T, @floatFromInt(mantissa)), @as(i32, @intCast(exp - msize1))); if (math.isInf(f)) { exact = false; } @@ -477,7 +477,7 @@ fn extractLowBits(a: Int, comptime T: type) T { const t_bits = @typeInfo(T).Int.bits; const limb_bits = @typeInfo(Limb).Int.bits; if (t_bits <= limb_bits) { - return @truncate(T, a.limbs[0]); + return @as(T, @truncate(a.limbs[0])); } else { var r: T = 0; comptime var i: usize = 0; diff --git a/lib/std/math/cbrt.zig b/lib/std/math/cbrt.zig index 1ff1818e8d..737757b817 100644 --- a/lib/std/math/cbrt.zig +++ b/lib/std/math/cbrt.zig @@ -27,7 +27,7 @@ fn cbrt32(x: f32) f32 { const B1: u32 = 709958130; // (127 - 127.0 / 3 - 0.03306235651) * 2^23 const B2: u32 = 642849266; // (127 - 127.0 / 3 - 24 / 3 - 0.03306235651) * 2^23 - var u = @bitCast(u32, x); + var u = @as(u32, @bitCast(x)); var hx = u & 0x7FFFFFFF; // cbrt(nan, inf) = itself @@ -41,7 +41,7 @@ fn cbrt32(x: f32) f32 { if (hx == 0) { return x; } - u = @bitCast(u32, x * 0x1.0p24); + u = @as(u32, @bitCast(x * 0x1.0p24)); hx = u & 0x7FFFFFFF; hx = hx / 3 + B2; } else { @@ -52,7 +52,7 @@ fn cbrt32(x: f32) f32 { u |= hx; // first step newton to 16 bits - var t: f64 = @bitCast(f32, u); + var t: f64 = @as(f32, @bitCast(u)); var r: f64 = t * t * t; t = t * (@as(f64, x) + x + r) / (x + r + r); @@ -60,7 +60,7 @@ fn cbrt32(x: f32) f32 { r = t * t * t; t = t * (@as(f64, x) + x + r) / (x + r + r); - return @floatCast(f32, t); + return @as(f32, @floatCast(t)); } fn cbrt64(x: f64) f64 { @@ -74,8 +74,8 @@ fn cbrt64(x: f64) f64 { const P3: f64 = -0.758397934778766047437; const P4: f64 = 0.145996192886612446982; - var u = @bitCast(u64, x); - var hx = @intCast(u32, u >> 32) & 0x7FFFFFFF; + var u = @as(u64, @bitCast(x)); + var hx = @as(u32, @intCast(u >> 32)) & 0x7FFFFFFF; // cbrt(nan, inf) = itself if (hx >= 0x7FF00000) { @@ -84,8 +84,8 @@ fn cbrt64(x: f64) f64 { // cbrt to ~5bits if (hx < 0x00100000) { - u = @bitCast(u64, x * 0x1.0p54); - hx = @intCast(u32, u >> 32) & 0x7FFFFFFF; + u = @as(u64, @bitCast(x * 0x1.0p54)); + hx = @as(u32, @intCast(u >> 32)) & 0x7FFFFFFF; // cbrt(0) is itself if (hx == 0) { @@ -98,7 +98,7 @@ fn cbrt64(x: f64) f64 { u &= 1 << 63; u |= @as(u64, hx) << 32; - var t = @bitCast(f64, u); + var t = @as(f64, @bitCast(u)); // cbrt to 23 bits // cbrt(x) = t * cbrt(x / t^3) ~= t * P(t^3 / x) @@ -106,9 +106,9 @@ fn cbrt64(x: f64) f64 { t = t * ((P0 + r * (P1 + r * P2)) + ((r * r) * r) * (P3 + r * P4)); // Round t away from 0 to 23 bits - u = @bitCast(u64, t); + u = @as(u64, @bitCast(t)); u = (u + 0x80000000) & 0xFFFFFFFFC0000000; - t = @bitCast(f64, u); + t = @as(f64, @bitCast(u)); // one step newton to 53 bits const s = t * t; diff --git a/lib/std/math/complex/atan.zig b/lib/std/math/complex/atan.zig index 56c199016d..381fc43f7d 100644 --- a/lib/std/math/complex/atan.zig +++ b/lib/std/math/complex/atan.zig @@ -32,7 +32,7 @@ fn redupif32(x: f32) f32 { t -= 0.5; } - const u = @floatFromInt(f32, @intFromFloat(i32, t)); + const u = @as(f32, @floatFromInt(@as(i32, @intFromFloat(t)))); return ((x - u * DP1) - u * DP2) - t * DP3; } @@ -81,7 +81,7 @@ fn redupif64(x: f64) f64 { t -= 0.5; } - const u = @floatFromInt(f64, @intFromFloat(i64, t)); + const u = @as(f64, @floatFromInt(@as(i64, @intFromFloat(t)))); return ((x - u * DP1) - u * DP2) - t * DP3; } diff --git a/lib/std/math/complex/cosh.zig b/lib/std/math/complex/cosh.zig index b3ffab5175..413279db2d 100644 --- a/lib/std/math/complex/cosh.zig +++ b/lib/std/math/complex/cosh.zig @@ -26,10 +26,10 @@ fn cosh32(z: Complex(f32)) Complex(f32) { const x = z.re; const y = z.im; - const hx = @bitCast(u32, x); + const hx = @as(u32, @bitCast(x)); const ix = hx & 0x7fffffff; - const hy = @bitCast(u32, y); + const hy = @as(u32, @bitCast(y)); const iy = hy & 0x7fffffff; if (ix < 0x7f800000 and iy < 0x7f800000) { @@ -89,14 +89,14 @@ fn cosh64(z: Complex(f64)) Complex(f64) { const x = z.re; const y = z.im; - const fx = @bitCast(u64, x); - const hx = @intCast(u32, fx >> 32); - const lx = @truncate(u32, fx); + const fx = @as(u64, @bitCast(x)); + const hx = @as(u32, @intCast(fx >> 32)); + const lx = @as(u32, @truncate(fx)); const ix = hx & 0x7fffffff; - const fy = @bitCast(u64, y); - const hy = @intCast(u32, fy >> 32); - const ly = @truncate(u32, fy); + const fy = @as(u64, @bitCast(y)); + const hy = @as(u32, @intCast(fy >> 32)); + const ly = @as(u32, @truncate(fy)); const iy = hy & 0x7fffffff; // nearly non-exceptional case where x, y are finite diff --git a/lib/std/math/complex/exp.zig b/lib/std/math/complex/exp.zig index 84ee251d0e..4644ea4be7 100644 --- a/lib/std/math/complex/exp.zig +++ b/lib/std/math/complex/exp.zig @@ -30,13 +30,13 @@ fn exp32(z: Complex(f32)) Complex(f32) { const x = z.re; const y = z.im; - const hy = @bitCast(u32, y) & 0x7fffffff; + const hy = @as(u32, @bitCast(y)) & 0x7fffffff; // cexp(x + i0) = exp(x) + i0 if (hy == 0) { return Complex(f32).init(@exp(x), y); } - const hx = @bitCast(u32, x); + const hx = @as(u32, @bitCast(x)); // cexp(0 + iy) = cos(y) + isin(y) if ((hx & 0x7fffffff) == 0) { return Complex(f32).init(@cos(y), @sin(y)); @@ -75,18 +75,18 @@ fn exp64(z: Complex(f64)) Complex(f64) { const x = z.re; const y = z.im; - const fy = @bitCast(u64, y); - const hy = @intCast(u32, (fy >> 32) & 0x7fffffff); - const ly = @truncate(u32, fy); + const fy = @as(u64, @bitCast(y)); + const hy = @as(u32, @intCast((fy >> 32) & 0x7fffffff)); + const ly = @as(u32, @truncate(fy)); // cexp(x + i0) = exp(x) + i0 if (hy | ly == 0) { return Complex(f64).init(@exp(x), y); } - const fx = @bitCast(u64, x); - const hx = @intCast(u32, fx >> 32); - const lx = @truncate(u32, fx); + const fx = @as(u64, @bitCast(x)); + const hx = @as(u32, @intCast(fx >> 32)); + const lx = @as(u32, @truncate(fx)); // cexp(0 + iy) = cos(y) + isin(y) if ((hx & 0x7fffffff) | lx == 0) { diff --git a/lib/std/math/complex/ldexp.zig b/lib/std/math/complex/ldexp.zig index c196d4afe6..201b6305af 100644 --- a/lib/std/math/complex/ldexp.zig +++ b/lib/std/math/complex/ldexp.zig @@ -27,10 +27,10 @@ fn frexp_exp32(x: f32, expt: *i32) f32 { const kln2 = 162.88958740; // k * ln2 const exp_x = @exp(x - kln2); - const hx = @bitCast(u32, exp_x); + const hx = @as(u32, @bitCast(exp_x)); // TODO zig should allow this cast implicitly because it should know the value is in range - expt.* = @intCast(i32, hx >> 23) - (0x7f + 127) + k; - return @bitCast(f32, (hx & 0x7fffff) | ((0x7f + 127) << 23)); + expt.* = @as(i32, @intCast(hx >> 23)) - (0x7f + 127) + k; + return @as(f32, @bitCast((hx & 0x7fffff) | ((0x7f + 127) << 23))); } fn ldexp_cexp32(z: Complex(f32), expt: i32) Complex(f32) { @@ -39,10 +39,10 @@ fn ldexp_cexp32(z: Complex(f32), expt: i32) Complex(f32) { const exptf = expt + ex_expt; const half_expt1 = @divTrunc(exptf, 2); - const scale1 = @bitCast(f32, (0x7f + half_expt1) << 23); + const scale1 = @as(f32, @bitCast((0x7f + half_expt1) << 23)); const half_expt2 = exptf - half_expt1; - const scale2 = @bitCast(f32, (0x7f + half_expt2) << 23); + const scale2 = @as(f32, @bitCast((0x7f + half_expt2) << 23)); return Complex(f32).init( @cos(z.im) * exp_x * scale1 * scale2, @@ -56,14 +56,14 @@ fn frexp_exp64(x: f64, expt: *i32) f64 { const exp_x = @exp(x - kln2); - const fx = @bitCast(u64, exp_x); - const hx = @intCast(u32, fx >> 32); - const lx = @truncate(u32, fx); + const fx = @as(u64, @bitCast(exp_x)); + const hx = @as(u32, @intCast(fx >> 32)); + const lx = @as(u32, @truncate(fx)); - expt.* = @intCast(i32, hx >> 20) - (0x3ff + 1023) + k; + expt.* = @as(i32, @intCast(hx >> 20)) - (0x3ff + 1023) + k; const high_word = (hx & 0xfffff) | ((0x3ff + 1023) << 20); - return @bitCast(f64, (@as(u64, high_word) << 32) | lx); + return @as(f64, @bitCast((@as(u64, high_word) << 32) | lx)); } fn ldexp_cexp64(z: Complex(f64), expt: i32) Complex(f64) { @@ -72,10 +72,10 @@ fn ldexp_cexp64(z: Complex(f64), expt: i32) Complex(f64) { const exptf = @as(i64, expt + ex_expt); const half_expt1 = @divTrunc(exptf, 2); - const scale1 = @bitCast(f64, (0x3ff + half_expt1) << (20 + 32)); + const scale1 = @as(f64, @bitCast((0x3ff + half_expt1) << (20 + 32))); const half_expt2 = exptf - half_expt1; - const scale2 = @bitCast(f64, (0x3ff + half_expt2) << (20 + 32)); + const scale2 = @as(f64, @bitCast((0x3ff + half_expt2) << (20 + 32))); return Complex(f64).init( @cos(z.im) * exp_x * scale1 * scale2, diff --git a/lib/std/math/complex/sinh.zig b/lib/std/math/complex/sinh.zig index 9afb7faf30..c9ea0d04fc 100644 --- a/lib/std/math/complex/sinh.zig +++ b/lib/std/math/complex/sinh.zig @@ -26,10 +26,10 @@ fn sinh32(z: Complex(f32)) Complex(f32) { const x = z.re; const y = z.im; - const hx = @bitCast(u32, x); + const hx = @as(u32, @bitCast(x)); const ix = hx & 0x7fffffff; - const hy = @bitCast(u32, y); + const hy = @as(u32, @bitCast(y)); const iy = hy & 0x7fffffff; if (ix < 0x7f800000 and iy < 0x7f800000) { @@ -89,14 +89,14 @@ fn sinh64(z: Complex(f64)) Complex(f64) { const x = z.re; const y = z.im; - const fx = @bitCast(u64, x); - const hx = @intCast(u32, fx >> 32); - const lx = @truncate(u32, fx); + const fx = @as(u64, @bitCast(x)); + const hx = @as(u32, @intCast(fx >> 32)); + const lx = @as(u32, @truncate(fx)); const ix = hx & 0x7fffffff; - const fy = @bitCast(u64, y); - const hy = @intCast(u32, fy >> 32); - const ly = @truncate(u32, fy); + const fy = @as(u64, @bitCast(y)); + const hy = @as(u32, @intCast(fy >> 32)); + const ly = @as(u32, @truncate(fy)); const iy = hy & 0x7fffffff; if (ix < 0x7ff00000 and iy < 0x7ff00000) { diff --git a/lib/std/math/complex/sqrt.zig b/lib/std/math/complex/sqrt.zig index 456d10aa85..fe2e8e6531 100644 --- a/lib/std/math/complex/sqrt.zig +++ b/lib/std/math/complex/sqrt.zig @@ -58,14 +58,14 @@ fn sqrt32(z: Complex(f32)) Complex(f32) { if (dx >= 0) { const t = @sqrt((dx + math.hypot(f64, dx, dy)) * 0.5); return Complex(f32).init( - @floatCast(f32, t), - @floatCast(f32, dy / (2.0 * t)), + @as(f32, @floatCast(t)), + @as(f32, @floatCast(dy / (2.0 * t))), ); } else { const t = @sqrt((-dx + math.hypot(f64, dx, dy)) * 0.5); return Complex(f32).init( - @floatCast(f32, @fabs(y) / (2.0 * t)), - @floatCast(f32, math.copysign(t, y)), + @as(f32, @floatCast(@fabs(y) / (2.0 * t))), + @as(f32, @floatCast(math.copysign(t, y))), ); } } diff --git a/lib/std/math/complex/tanh.zig b/lib/std/math/complex/tanh.zig index 92e197e308..a90f141741 100644 --- a/lib/std/math/complex/tanh.zig +++ b/lib/std/math/complex/tanh.zig @@ -24,7 +24,7 @@ fn tanh32(z: Complex(f32)) Complex(f32) { const x = z.re; const y = z.im; - const hx = @bitCast(u32, x); + const hx = @as(u32, @bitCast(x)); const ix = hx & 0x7fffffff; if (ix >= 0x7f800000) { @@ -32,7 +32,7 @@ fn tanh32(z: Complex(f32)) Complex(f32) { const r = if (y == 0) y else x * y; return Complex(f32).init(x, r); } - const xx = @bitCast(f32, hx - 0x40000000); + const xx = @as(f32, @bitCast(hx - 0x40000000)); const r = if (math.isInf(y)) y else @sin(y) * @cos(y); return Complex(f32).init(xx, math.copysign(@as(f32, 0.0), r)); } @@ -62,11 +62,11 @@ fn tanh64(z: Complex(f64)) Complex(f64) { const x = z.re; const y = z.im; - const fx = @bitCast(u64, x); + const fx = @as(u64, @bitCast(x)); // TODO: zig should allow this conversion implicitly because it can notice that the value necessarily // fits in range. - const hx = @intCast(u32, fx >> 32); - const lx = @truncate(u32, fx); + const hx = @as(u32, @intCast(fx >> 32)); + const lx = @as(u32, @truncate(fx)); const ix = hx & 0x7fffffff; if (ix >= 0x7ff00000) { @@ -75,7 +75,7 @@ fn tanh64(z: Complex(f64)) Complex(f64) { return Complex(f64).init(x, r); } - const xx = @bitCast(f64, (@as(u64, hx - 0x40000000) << 32) | lx); + const xx = @as(f64, @bitCast((@as(u64, hx - 0x40000000) << 32) | lx)); const r = if (math.isInf(y)) y else @sin(y) * @cos(y); return Complex(f64).init(xx, math.copysign(@as(f64, 0.0), r)); } diff --git a/lib/std/math/copysign.zig b/lib/std/math/copysign.zig index b5fd6d4d9a..3cefc0471f 100644 --- a/lib/std/math/copysign.zig +++ b/lib/std/math/copysign.zig @@ -7,9 +7,9 @@ pub fn copysign(magnitude: anytype, sign: @TypeOf(magnitude)) @TypeOf(magnitude) const T = @TypeOf(magnitude); const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits); const sign_bit_mask = @as(TBits, 1) << (@bitSizeOf(T) - 1); - const mag = @bitCast(TBits, magnitude) & ~sign_bit_mask; - const sgn = @bitCast(TBits, sign) & sign_bit_mask; - return @bitCast(T, mag | sgn); + const mag = @as(TBits, @bitCast(magnitude)) & ~sign_bit_mask; + const sgn = @as(TBits, @bitCast(sign)) & sign_bit_mask; + return @as(T, @bitCast(mag | sgn)); } test "math.copysign" { diff --git a/lib/std/math/cosh.zig b/lib/std/math/cosh.zig index d633f2fa0c..085d6fd2f9 100644 --- a/lib/std/math/cosh.zig +++ b/lib/std/math/cosh.zig @@ -29,9 +29,9 @@ pub fn cosh(x: anytype) @TypeOf(x) { // = 1 + 0.5 * (exp(x) - 1) * (exp(x) - 1) / exp(x) // = 1 + (x * x) / 2 + o(x^4) fn cosh32(x: f32) f32 { - const u = @bitCast(u32, x); + const u = @as(u32, @bitCast(x)); const ux = u & 0x7FFFFFFF; - const ax = @bitCast(f32, ux); + const ax = @as(f32, @bitCast(ux)); // |x| < log(2) if (ux < 0x3F317217) { @@ -54,9 +54,9 @@ fn cosh32(x: f32) f32 { } fn cosh64(x: f64) f64 { - const u = @bitCast(u64, x); - const w = @intCast(u32, u >> 32) & (maxInt(u32) >> 1); - const ax = @bitCast(f64, u & (maxInt(u64) >> 1)); + const u = @as(u64, @bitCast(x)); + const w = @as(u32, @intCast(u >> 32)) & (maxInt(u32) >> 1); + const ax = @as(f64, @bitCast(u & (maxInt(u64) >> 1))); // TODO: Shouldn't need this explicit check. if (x == 0.0) { diff --git a/lib/std/math/expm1.zig b/lib/std/math/expm1.zig index 5c4052db56..8192573a88 100644 --- a/lib/std/math/expm1.zig +++ b/lib/std/math/expm1.zig @@ -38,7 +38,7 @@ fn expm1_32(x_: f32) f32 { const Q2: f32 = 1.5807170421e-3; var x = x_; - const ux = @bitCast(u32, x); + const ux = @as(u32, @bitCast(x)); const hx = ux & 0x7FFFFFFF; const sign = hx >> 31; @@ -88,8 +88,8 @@ fn expm1_32(x_: f32) f32 { kf += 0.5; } - k = @intFromFloat(i32, kf); - const t = @floatFromInt(f32, k); + k = @as(i32, @intFromFloat(kf)); + const t = @as(f32, @floatFromInt(k)); hi = x - t * ln2_hi; lo = t * ln2_lo; } @@ -133,7 +133,7 @@ fn expm1_32(x_: f32) f32 { } } - const twopk = @bitCast(f32, @intCast(u32, (0x7F +% k) << 23)); + const twopk = @as(f32, @bitCast(@as(u32, @intCast((0x7F +% k) << 23)))); if (k < 0 or k > 56) { var y = x - e + 1.0; @@ -146,7 +146,7 @@ fn expm1_32(x_: f32) f32 { return y - 1.0; } - const uf = @bitCast(f32, @intCast(u32, 0x7F -% k) << 23); + const uf = @as(f32, @bitCast(@as(u32, @intCast(0x7F -% k)) << 23)); if (k < 23) { return (x - e + (1 - uf)) * twopk; } else { @@ -169,8 +169,8 @@ fn expm1_64(x_: f64) f64 { const Q5: f64 = -2.01099218183624371326e-07; var x = x_; - const ux = @bitCast(u64, x); - const hx = @intCast(u32, ux >> 32) & 0x7FFFFFFF; + const ux = @as(u64, @bitCast(x)); + const hx = @as(u32, @intCast(ux >> 32)) & 0x7FFFFFFF; const sign = ux >> 63; if (math.isNegativeInf(x)) { @@ -219,8 +219,8 @@ fn expm1_64(x_: f64) f64 { kf += 0.5; } - k = @intFromFloat(i32, kf); - const t = @floatFromInt(f64, k); + k = @as(i32, @intFromFloat(kf)); + const t = @as(f64, @floatFromInt(k)); hi = x - t * ln2_hi; lo = t * ln2_lo; } @@ -231,7 +231,7 @@ fn expm1_64(x_: f64) f64 { // |x| < 2^(-54) else if (hx < 0x3C900000) { if (hx < 0x00100000) { - math.doNotOptimizeAway(@floatCast(f32, x)); + math.doNotOptimizeAway(@as(f32, @floatCast(x))); } return x; } else { @@ -264,7 +264,7 @@ fn expm1_64(x_: f64) f64 { } } - const twopk = @bitCast(f64, @intCast(u64, 0x3FF +% k) << 52); + const twopk = @as(f64, @bitCast(@as(u64, @intCast(0x3FF +% k)) << 52)); if (k < 0 or k > 56) { var y = x - e + 1.0; @@ -277,7 +277,7 @@ fn expm1_64(x_: f64) f64 { return y - 1.0; } - const uf = @bitCast(f64, @intCast(u64, 0x3FF -% k) << 52); + const uf = @as(f64, @bitCast(@as(u64, @intCast(0x3FF -% k)) << 52)); if (k < 20) { return (x - e + (1 - uf)) * twopk; } else { diff --git a/lib/std/math/expo2.zig b/lib/std/math/expo2.zig index 4345233173..b451e46865 100644 --- a/lib/std/math/expo2.zig +++ b/lib/std/math/expo2.zig @@ -21,7 +21,7 @@ fn expo2f(x: f32) f32 { const kln2 = 0x1.45C778p+7; const u = (0x7F + k / 2) << 23; - const scale = @bitCast(f32, u); + const scale = @as(f32, @bitCast(u)); return @exp(x - kln2) * scale * scale; } @@ -30,6 +30,6 @@ fn expo2d(x: f64) f64 { const kln2 = 0x1.62066151ADD8BP+10; const u = (0x3FF + k / 2) << 20; - const scale = @bitCast(f64, @as(u64, u) << 32); + const scale = @as(f64, @bitCast(@as(u64, u) << 32)); return @exp(x - kln2) * scale * scale; } diff --git a/lib/std/math/float.zig b/lib/std/math/float.zig index 768cc03285..5552ec5c9c 100644 --- a/lib/std/math/float.zig +++ b/lib/std/math/float.zig @@ -11,7 +11,7 @@ inline fn mantissaOne(comptime T: type) comptime_int { inline fn reconstructFloat(comptime T: type, comptime exponent: comptime_int, comptime mantissa: comptime_int) T { const TBits = @Type(.{ .Int = .{ .signedness = .unsigned, .bits = @bitSizeOf(T) } }); const biased_exponent = @as(TBits, exponent + floatExponentMax(T)); - return @bitCast(T, (biased_exponent << floatMantissaBits(T)) | @as(TBits, mantissa)); + return @as(T, @bitCast((biased_exponent << floatMantissaBits(T)) | @as(TBits, mantissa))); } /// Returns the number of bits in the exponent of floating point type T. diff --git a/lib/std/math/frexp.zig b/lib/std/math/frexp.zig index 31168d28d4..f295b959cb 100644 --- a/lib/std/math/frexp.zig +++ b/lib/std/math/frexp.zig @@ -38,8 +38,8 @@ pub fn frexp(x: anytype) Frexp(@TypeOf(x)) { fn frexp32(x: f32) Frexp(f32) { var result: Frexp(f32) = undefined; - var y = @bitCast(u32, x); - const e = @intCast(i32, y >> 23) & 0xFF; + var y = @as(u32, @bitCast(x)); + const e = @as(i32, @intCast(y >> 23)) & 0xFF; if (e == 0) { if (x != 0) { @@ -68,15 +68,15 @@ fn frexp32(x: f32) Frexp(f32) { result.exponent = e - 0x7E; y &= 0x807FFFFF; y |= 0x3F000000; - result.significand = @bitCast(f32, y); + result.significand = @as(f32, @bitCast(y)); return result; } fn frexp64(x: f64) Frexp(f64) { var result: Frexp(f64) = undefined; - var y = @bitCast(u64, x); - const e = @intCast(i32, y >> 52) & 0x7FF; + var y = @as(u64, @bitCast(x)); + const e = @as(i32, @intCast(y >> 52)) & 0x7FF; if (e == 0) { if (x != 0) { @@ -105,15 +105,15 @@ fn frexp64(x: f64) Frexp(f64) { result.exponent = e - 0x3FE; y &= 0x800FFFFFFFFFFFFF; y |= 0x3FE0000000000000; - result.significand = @bitCast(f64, y); + result.significand = @as(f64, @bitCast(y)); return result; } fn frexp128(x: f128) Frexp(f128) { var result: Frexp(f128) = undefined; - var y = @bitCast(u128, x); - const e = @intCast(i32, y >> 112) & 0x7FFF; + var y = @as(u128, @bitCast(x)); + const e = @as(i32, @intCast(y >> 112)) & 0x7FFF; if (e == 0) { if (x != 0) { @@ -142,7 +142,7 @@ fn frexp128(x: f128) Frexp(f128) { result.exponent = e - 0x3FFE; y &= 0x8000FFFFFFFFFFFFFFFFFFFFFFFFFFFF; y |= 0x3FFE0000000000000000000000000000; - result.significand = @bitCast(f128, y); + result.significand = @as(f128, @bitCast(y)); return result; } diff --git a/lib/std/math/hypot.zig b/lib/std/math/hypot.zig index 981f6143fe..9fb569667b 100644 --- a/lib/std/math/hypot.zig +++ b/lib/std/math/hypot.zig @@ -25,8 +25,8 @@ pub fn hypot(comptime T: type, x: T, y: T) T { } fn hypot32(x: f32, y: f32) f32 { - var ux = @bitCast(u32, x); - var uy = @bitCast(u32, y); + var ux = @as(u32, @bitCast(x)); + var uy = @as(u32, @bitCast(y)); ux &= maxInt(u32) >> 1; uy &= maxInt(u32) >> 1; @@ -36,8 +36,8 @@ fn hypot32(x: f32, y: f32) f32 { uy = tmp; } - var xx = @bitCast(f32, ux); - var yy = @bitCast(f32, uy); + var xx = @as(f32, @bitCast(ux)); + var yy = @as(f32, @bitCast(uy)); if (uy == 0xFF << 23) { return yy; } @@ -56,7 +56,7 @@ fn hypot32(x: f32, y: f32) f32 { yy *= 0x1.0p-90; } - return z * @sqrt(@floatCast(f32, @as(f64, x) * x + @as(f64, y) * y)); + return z * @sqrt(@as(f32, @floatCast(@as(f64, x) * x + @as(f64, y) * y))); } fn sq(hi: *f64, lo: *f64, x: f64) void { @@ -69,8 +69,8 @@ fn sq(hi: *f64, lo: *f64, x: f64) void { } fn hypot64(x: f64, y: f64) f64 { - var ux = @bitCast(u64, x); - var uy = @bitCast(u64, y); + var ux = @as(u64, @bitCast(x)); + var uy = @as(u64, @bitCast(y)); ux &= maxInt(u64) >> 1; uy &= maxInt(u64) >> 1; @@ -82,8 +82,8 @@ fn hypot64(x: f64, y: f64) f64 { const ex = ux >> 52; const ey = uy >> 52; - var xx = @bitCast(f64, ux); - var yy = @bitCast(f64, uy); + var xx = @as(f64, @bitCast(ux)); + var yy = @as(f64, @bitCast(uy)); // hypot(inf, nan) == inf if (ey == 0x7FF) { diff --git a/lib/std/math/ilogb.zig b/lib/std/math/ilogb.zig index 7c58be2ec5..735a2250c9 100644 --- a/lib/std/math/ilogb.zig +++ b/lib/std/math/ilogb.zig @@ -38,8 +38,8 @@ fn ilogbX(comptime T: type, x: T) i32 { const absMask = signBit - 1; - var u = @bitCast(Z, x) & absMask; - var e = @intCast(i32, u >> significandBits); + var u = @as(Z, @bitCast(x)) & absMask; + var e = @as(i32, @intCast(u >> significandBits)); if (e == 0) { if (u == 0) { @@ -49,12 +49,12 @@ fn ilogbX(comptime T: type, x: T) i32 { // offset sign bit, exponent bits, and integer bit (if present) + bias const offset = 1 + exponentBits + @as(comptime_int, @intFromBool(T == f80)) - exponentBias; - return offset - @intCast(i32, @clz(u)); + return offset - @as(i32, @intCast(@clz(u))); } if (e == maxExponent) { math.raiseInvalid(); - if (u > @bitCast(Z, math.inf(T))) { + if (u > @as(Z, @bitCast(math.inf(T)))) { return fp_ilogbnan; // u is a NaN } else return maxInt(i32); } diff --git a/lib/std/math/isfinite.zig b/lib/std/math/isfinite.zig index 556f8a2378..36c6cdd062 100644 --- a/lib/std/math/isfinite.zig +++ b/lib/std/math/isfinite.zig @@ -7,7 +7,7 @@ pub fn isFinite(x: anytype) bool { const T = @TypeOf(x); const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits); const remove_sign = ~@as(TBits, 0) >> 1; - return @bitCast(TBits, x) & remove_sign < @bitCast(TBits, math.inf(T)); + return @as(TBits, @bitCast(x)) & remove_sign < @as(TBits, @bitCast(math.inf(T))); } test "math.isFinite" { diff --git a/lib/std/math/isinf.zig b/lib/std/math/isinf.zig index ac30470f31..9b3a0a8f4a 100644 --- a/lib/std/math/isinf.zig +++ b/lib/std/math/isinf.zig @@ -7,7 +7,7 @@ pub inline fn isInf(x: anytype) bool { const T = @TypeOf(x); const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits); const remove_sign = ~@as(TBits, 0) >> 1; - return @bitCast(TBits, x) & remove_sign == @bitCast(TBits, math.inf(T)); + return @as(TBits, @bitCast(x)) & remove_sign == @as(TBits, @bitCast(math.inf(T))); } /// Returns whether x is an infinity with a positive sign. diff --git a/lib/std/math/isnormal.zig b/lib/std/math/isnormal.zig index 08f848f5df..38b459b54e 100644 --- a/lib/std/math/isnormal.zig +++ b/lib/std/math/isnormal.zig @@ -15,7 +15,7 @@ pub fn isNormal(x: anytype) bool { // The sign bit is removed because all ones would overflow into it. // For f80, even though it has an explicit integer part stored, // the exponent effectively takes priority if mismatching. - const value = @bitCast(TBits, x) +% increment_exp; + const value = @as(TBits, @bitCast(x)) +% increment_exp; return value & remove_sign >= (increment_exp << 1); } @@ -35,7 +35,7 @@ test "math.isNormal" { try expect(!isNormal(@as(T, math.floatTrueMin(T)))); // largest subnormal - try expect(!isNormal(@bitCast(T, ~(~@as(TBits, 0) << math.floatFractionalBits(T))))); + try expect(!isNormal(@as(T, @bitCast(~(~@as(TBits, 0) << math.floatFractionalBits(T)))))); // non-finite numbers try expect(!isNormal(-math.inf(T))); @@ -43,6 +43,6 @@ test "math.isNormal" { try expect(!isNormal(math.nan(T))); // overflow edge-case (described in implementation, also see #10133) - try expect(!isNormal(@bitCast(T, ~@as(TBits, 0)))); + try expect(!isNormal(@as(T, @bitCast(~@as(TBits, 0))))); } } diff --git a/lib/std/math/ldexp.zig b/lib/std/math/ldexp.zig index 448e94f8e5..d32a8189b6 100644 --- a/lib/std/math/ldexp.zig +++ b/lib/std/math/ldexp.zig @@ -16,53 +16,53 @@ pub fn ldexp(x: anytype, n: i32) @TypeOf(x) { const max_biased_exponent = 2 * math.floatExponentMax(T); const mantissa_mask = @as(TBits, (1 << mantissa_bits) - 1); - const repr = @bitCast(TBits, x); + const repr = @as(TBits, @bitCast(x)); const sign_bit = repr & (1 << (exponent_bits + mantissa_bits)); if (math.isNan(x) or !math.isFinite(x)) return x; - var exponent: i32 = @intCast(i32, (repr << 1) >> (mantissa_bits + 1)); + var exponent: i32 = @as(i32, @intCast((repr << 1) >> (mantissa_bits + 1))); if (exponent == 0) exponent += (@as(i32, exponent_bits) + @intFromBool(T == f80)) - @clz(repr << 1); if (n >= 0) { if (n > max_biased_exponent - exponent) { // Overflow. Return +/- inf - return @bitCast(T, @bitCast(TBits, math.inf(T)) | sign_bit); + return @as(T, @bitCast(@as(TBits, @bitCast(math.inf(T))) | sign_bit)); } else if (exponent + n <= 0) { // Result is subnormal - return @bitCast(T, (repr << @intCast(Log2Int(TBits), n)) | sign_bit); + return @as(T, @bitCast((repr << @as(Log2Int(TBits), @intCast(n))) | sign_bit)); } else if (exponent <= 0) { // Result is normal, but needs shifting - var result = @intCast(TBits, n + exponent) << mantissa_bits; - result |= (repr << @intCast(Log2Int(TBits), 1 - exponent)) & mantissa_mask; - return @bitCast(T, result | sign_bit); + var result = @as(TBits, @intCast(n + exponent)) << mantissa_bits; + result |= (repr << @as(Log2Int(TBits), @intCast(1 - exponent))) & mantissa_mask; + return @as(T, @bitCast(result | sign_bit)); } // Result needs no shifting - return @bitCast(T, repr + (@intCast(TBits, n) << mantissa_bits)); + return @as(T, @bitCast(repr + (@as(TBits, @intCast(n)) << mantissa_bits))); } else { if (n <= -exponent) { if (n < -(mantissa_bits + exponent)) - return @bitCast(T, sign_bit); // Severe underflow. Return +/- 0 + return @as(T, @bitCast(sign_bit)); // Severe underflow. Return +/- 0 // Result underflowed, we need to shift and round - const shift = @intCast(Log2Int(TBits), @min(-n, -(exponent + n) + 1)); + const shift = @as(Log2Int(TBits), @intCast(@min(-n, -(exponent + n) + 1))); const exact_tie: bool = @ctz(repr) == shift - 1; var result = repr & mantissa_mask; if (T != f80) // Include integer bit result |= @as(TBits, @intFromBool(exponent > 0)) << fractional_bits; - result = @intCast(TBits, (result >> (shift - 1))); + result = @as(TBits, @intCast((result >> (shift - 1)))); // Round result, including round-to-even for exact ties result = ((result + 1) >> 1) & ~@as(TBits, @intFromBool(exact_tie)); - return @bitCast(T, result | sign_bit); + return @as(T, @bitCast(result | sign_bit)); } // Result is exact, and needs no shifting - return @bitCast(T, repr - (@intCast(TBits, -n) << mantissa_bits)); + return @as(T, @bitCast(repr - (@as(TBits, @intCast(-n)) << mantissa_bits))); } } @@ -105,8 +105,8 @@ test "math.ldexp" { // Multiplications might flush the denormals to zero, esp. at // runtime, so we manually construct the constants here instead. const Z = std.meta.Int(.unsigned, @bitSizeOf(T)); - const EightTimesTrueMin = @bitCast(T, @as(Z, 8)); - const TwoTimesTrueMin = @bitCast(T, @as(Z, 2)); + const EightTimesTrueMin = @as(T, @bitCast(@as(Z, 8))); + const TwoTimesTrueMin = @as(T, @bitCast(@as(Z, 2))); // subnormals -> subnormals try expect(ldexp(math.floatTrueMin(T), 3) == EightTimesTrueMin); diff --git a/lib/std/math/log.zig b/lib/std/math/log.zig index c1a0f5c8e4..9f27130ce1 100644 --- a/lib/std/math/log.zig +++ b/lib/std/math/log.zig @@ -30,12 +30,12 @@ pub fn log(comptime T: type, base: T, x: T) T { // TODO implement integer log without using float math .Int => |IntType| switch (IntType.signedness) { .signed => @compileError("log not implemented for signed integers"), - .unsigned => return @intFromFloat(T, @floor(@log(@floatFromInt(f64, x)) / @log(float_base))), + .unsigned => return @as(T, @intFromFloat(@floor(@log(@as(f64, @floatFromInt(x))) / @log(float_base)))), }, .Float => { switch (T) { - f32 => return @floatCast(f32, @log(@as(f64, x)) / @log(float_base)), + f32 => return @as(f32, @floatCast(@log(@as(f64, x)) / @log(float_base))), f64 => return @log(x) / @log(float_base), else => @compileError("log not implemented for " ++ @typeName(T)), } diff --git a/lib/std/math/log10.zig b/lib/std/math/log10.zig index 44e5a88445..785f11771c 100644 --- a/lib/std/math/log10.zig +++ b/lib/std/math/log10.zig @@ -49,9 +49,9 @@ pub fn log10_int(x: anytype) Log2Int(@TypeOf(x)) { const bit_size = @typeInfo(T).Int.bits; if (bit_size <= 8) { - return @intCast(OutT, log10_int_u8(x)); + return @as(OutT, @intCast(log10_int_u8(x))); } else if (bit_size <= 16) { - return @intCast(OutT, less_than_5(x)); + return @as(OutT, @intCast(less_than_5(x))); } var val = x; @@ -71,7 +71,7 @@ pub fn log10_int(x: anytype) Log2Int(@TypeOf(x)) { log += 5; } - return @intCast(OutT, log + less_than_5(@intCast(u32, val))); + return @as(OutT, @intCast(log + less_than_5(@as(u32, @intCast(val))))); } fn pow10(comptime y: comptime_int) comptime_int { @@ -134,7 +134,7 @@ inline fn less_than_5(x: u32) u32 { } fn oldlog10(x: anytype) u8 { - return @intFromFloat(u8, @log10(@floatFromInt(f64, x))); + return @as(u8, @intFromFloat(@log10(@as(f64, @floatFromInt(x))))); } test "oldlog10 doesn't work" { @@ -158,7 +158,7 @@ test "log10_int vs old implementation" { inline for (int_types) |T| { const last = @min(maxInt(T), 100_000); for (1..last) |i| { - const x = @intCast(T, i); + const x = @as(T, @intCast(i)); try testing.expectEqual(oldlog10(x), log10_int(x)); } @@ -185,10 +185,10 @@ test "log10_int close to powers of 10" { try testing.expectEqual(expected_max_ilog, log10_int(max_val)); for (0..(expected_max_ilog + 1)) |idx| { - const i = @intCast(T, idx); + const i = @as(T, @intCast(idx)); const p: T = try math.powi(T, 10, i); - const b = @intCast(Log2Int(T), i); + const b = @as(Log2Int(T), @intCast(i)); if (p >= 10) { try testing.expectEqual(b - 1, log10_int(p - 9)); diff --git a/lib/std/math/log1p.zig b/lib/std/math/log1p.zig index ad67955a8d..1f986a20c8 100644 --- a/lib/std/math/log1p.zig +++ b/lib/std/math/log1p.zig @@ -33,7 +33,7 @@ fn log1p_32(x: f32) f32 { const Lg3: f32 = 0x91e9ee.0p-25; const Lg4: f32 = 0xf89e26.0p-26; - const u = @bitCast(u32, x); + const u = @as(u32, @bitCast(x)); var ix = u; var k: i32 = 1; var f: f32 = undefined; @@ -72,9 +72,9 @@ fn log1p_32(x: f32) f32 { if (k != 0) { const uf = 1 + x; - var iu = @bitCast(u32, uf); + var iu = @as(u32, @bitCast(uf)); iu += 0x3F800000 - 0x3F3504F3; - k = @intCast(i32, iu >> 23) - 0x7F; + k = @as(i32, @intCast(iu >> 23)) - 0x7F; // correction to avoid underflow in c / u if (k < 25) { @@ -86,7 +86,7 @@ fn log1p_32(x: f32) f32 { // u into [sqrt(2)/2, sqrt(2)] iu = (iu & 0x007FFFFF) + 0x3F3504F3; - f = @bitCast(f32, iu) - 1; + f = @as(f32, @bitCast(iu)) - 1; } const s = f / (2.0 + f); @@ -96,7 +96,7 @@ fn log1p_32(x: f32) f32 { const t2 = z * (Lg1 + w * Lg3); const R = t2 + t1; const hfsq = 0.5 * f * f; - const dk = @floatFromInt(f32, k); + const dk = @as(f32, @floatFromInt(k)); return s * (hfsq + R) + (dk * ln2_lo + c) - hfsq + f + dk * ln2_hi; } @@ -112,8 +112,8 @@ fn log1p_64(x: f64) f64 { const Lg6: f64 = 1.531383769920937332e-01; const Lg7: f64 = 1.479819860511658591e-01; - var ix = @bitCast(u64, x); - var hx = @intCast(u32, ix >> 32); + var ix = @as(u64, @bitCast(x)); + var hx = @as(u32, @intCast(ix >> 32)); var k: i32 = 1; var c: f64 = undefined; var f: f64 = undefined; @@ -150,10 +150,10 @@ fn log1p_64(x: f64) f64 { if (k != 0) { const uf = 1 + x; - const hu = @bitCast(u64, uf); - var iu = @intCast(u32, hu >> 32); + const hu = @as(u64, @bitCast(uf)); + var iu = @as(u32, @intCast(hu >> 32)); iu += 0x3FF00000 - 0x3FE6A09E; - k = @intCast(i32, iu >> 20) - 0x3FF; + k = @as(i32, @intCast(iu >> 20)) - 0x3FF; // correction to avoid underflow in c / u if (k < 54) { @@ -166,7 +166,7 @@ fn log1p_64(x: f64) f64 { // u into [sqrt(2)/2, sqrt(2)] iu = (iu & 0x000FFFFF) + 0x3FE6A09E; const iq = (@as(u64, iu) << 32) | (hu & 0xFFFFFFFF); - f = @bitCast(f64, iq) - 1; + f = @as(f64, @bitCast(iq)) - 1; } const hfsq = 0.5 * f * f; @@ -176,7 +176,7 @@ fn log1p_64(x: f64) f64 { const t1 = w * (Lg2 + w * (Lg4 + w * Lg6)); const t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7))); const R = t2 + t1; - const dk = @floatFromInt(f64, k); + const dk = @as(f64, @floatFromInt(k)); return s * (hfsq + R) + (dk * ln2_lo + c) - hfsq + f + dk * ln2_hi; } diff --git a/lib/std/math/modf.zig b/lib/std/math/modf.zig index d12c497729..b9d0083e3c 100644 --- a/lib/std/math/modf.zig +++ b/lib/std/math/modf.zig @@ -37,8 +37,8 @@ pub fn modf(x: anytype) modf_result(@TypeOf(x)) { fn modf32(x: f32) modf32_result { var result: modf32_result = undefined; - const u = @bitCast(u32, x); - const e = @intCast(i32, (u >> 23) & 0xFF) - 0x7F; + const u = @as(u32, @bitCast(x)); + const e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F; const us = u & 0x80000000; // TODO: Shouldn't need this. @@ -54,26 +54,26 @@ fn modf32(x: f32) modf32_result { if (e == 0x80 and u << 9 != 0) { // nan result.fpart = x; } else { - result.fpart = @bitCast(f32, us); + result.fpart = @as(f32, @bitCast(us)); } return result; } // no integral part if (e < 0) { - result.ipart = @bitCast(f32, us); + result.ipart = @as(f32, @bitCast(us)); result.fpart = x; return result; } - const mask = @as(u32, 0x007FFFFF) >> @intCast(u5, e); + const mask = @as(u32, 0x007FFFFF) >> @as(u5, @intCast(e)); if (u & mask == 0) { result.ipart = x; - result.fpart = @bitCast(f32, us); + result.fpart = @as(f32, @bitCast(us)); return result; } - const uf = @bitCast(f32, u & ~mask); + const uf = @as(f32, @bitCast(u & ~mask)); result.ipart = uf; result.fpart = x - uf; return result; @@ -82,8 +82,8 @@ fn modf32(x: f32) modf32_result { fn modf64(x: f64) modf64_result { var result: modf64_result = undefined; - const u = @bitCast(u64, x); - const e = @intCast(i32, (u >> 52) & 0x7FF) - 0x3FF; + const u = @as(u64, @bitCast(x)); + const e = @as(i32, @intCast((u >> 52) & 0x7FF)) - 0x3FF; const us = u & (1 << 63); if (math.isInf(x)) { @@ -98,26 +98,26 @@ fn modf64(x: f64) modf64_result { if (e == 0x400 and u << 12 != 0) { // nan result.fpart = x; } else { - result.fpart = @bitCast(f64, us); + result.fpart = @as(f64, @bitCast(us)); } return result; } // no integral part if (e < 0) { - result.ipart = @bitCast(f64, us); + result.ipart = @as(f64, @bitCast(us)); result.fpart = x; return result; } - const mask = @as(u64, maxInt(u64) >> 12) >> @intCast(u6, e); + const mask = @as(u64, maxInt(u64) >> 12) >> @as(u6, @intCast(e)); if (u & mask == 0) { result.ipart = x; - result.fpart = @bitCast(f64, us); + result.fpart = @as(f64, @bitCast(us)); return result; } - const uf = @bitCast(f64, u & ~mask); + const uf = @as(f64, @bitCast(u & ~mask)); result.ipart = uf; result.fpart = x - uf; return result; diff --git a/lib/std/math/pow.zig b/lib/std/math/pow.zig index 7643e143e3..36aef966cf 100644 --- a/lib/std/math/pow.zig +++ b/lib/std/math/pow.zig @@ -144,7 +144,7 @@ pub fn pow(comptime T: type, x: T, y: T) T { var xe = r2.exponent; var x1 = r2.significand; - var i = @intFromFloat(std.meta.Int(.signed, @typeInfo(T).Float.bits), yi); + var i = @as(std.meta.Int(.signed, @typeInfo(T).Float.bits), @intFromFloat(yi)); while (i != 0) : (i >>= 1) { const overflow_shift = math.floatExponentBits(T) + 1; if (xe < -(1 << overflow_shift) or (1 << overflow_shift) < xe) { @@ -179,7 +179,7 @@ pub fn pow(comptime T: type, x: T, y: T) T { fn isOddInteger(x: f64) bool { const r = math.modf(x); - return r.fpart == 0.0 and @intFromFloat(i64, r.ipart) & 1 == 1; + return r.fpart == 0.0 and @as(i64, @intFromFloat(r.ipart)) & 1 == 1; } test "math.pow" { diff --git a/lib/std/math/signbit.zig b/lib/std/math/signbit.zig index 9aab487d37..df061568b1 100644 --- a/lib/std/math/signbit.zig +++ b/lib/std/math/signbit.zig @@ -6,7 +6,7 @@ const expect = std.testing.expect; pub fn signbit(x: anytype) bool { const T = @TypeOf(x); const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits); - return @bitCast(TBits, x) >> (@bitSizeOf(T) - 1) != 0; + return @as(TBits, @bitCast(x)) >> (@bitSizeOf(T) - 1) != 0; } test "math.signbit" { diff --git a/lib/std/math/sinh.zig b/lib/std/math/sinh.zig index 5ec47fa3b5..0082f61d3f 100644 --- a/lib/std/math/sinh.zig +++ b/lib/std/math/sinh.zig @@ -29,9 +29,9 @@ pub fn sinh(x: anytype) @TypeOf(x) { // = (exp(x) - 1 + (exp(x) - 1) / exp(x)) / 2 // = x + x^3 / 6 + o(x^5) fn sinh32(x: f32) f32 { - const u = @bitCast(u32, x); + const u = @as(u32, @bitCast(x)); const ux = u & 0x7FFFFFFF; - const ax = @bitCast(f32, ux); + const ax = @as(f32, @bitCast(ux)); if (x == 0.0 or math.isNan(x)) { return x; @@ -60,9 +60,9 @@ fn sinh32(x: f32) f32 { } fn sinh64(x: f64) f64 { - const u = @bitCast(u64, x); - const w = @intCast(u32, u >> 32) & (maxInt(u32) >> 1); - const ax = @bitCast(f64, u & (maxInt(u64) >> 1)); + const u = @as(u64, @bitCast(x)); + const w = @as(u32, @intCast(u >> 32)) & (maxInt(u32) >> 1); + const ax = @as(f64, @bitCast(u & (maxInt(u64) >> 1))); if (x == 0.0 or math.isNan(x)) { return x; diff --git a/lib/std/math/sqrt.zig b/lib/std/math/sqrt.zig index 926582034e..0dd5381cd9 100644 --- a/lib/std/math/sqrt.zig +++ b/lib/std/math/sqrt.zig @@ -57,7 +57,7 @@ fn sqrt_int(comptime T: type, value: T) Sqrt(T) { one >>= 2; } - return @intCast(Sqrt(T), res); + return @as(Sqrt(T), @intCast(res)); } } diff --git a/lib/std/math/tanh.zig b/lib/std/math/tanh.zig index dcde79a925..9c9a3e6801 100644 --- a/lib/std/math/tanh.zig +++ b/lib/std/math/tanh.zig @@ -29,9 +29,9 @@ pub fn tanh(x: anytype) @TypeOf(x) { // = (exp(2x) - 1) / (exp(2x) - 1 + 2) // = (1 - exp(-2x)) / (exp(-2x) - 1 + 2) fn tanh32(x: f32) f32 { - const u = @bitCast(u32, x); + const u = @as(u32, @bitCast(x)); const ux = u & 0x7FFFFFFF; - const ax = @bitCast(f32, ux); + const ax = @as(f32, @bitCast(ux)); const sign = (u >> 31) != 0; var t: f32 = undefined; @@ -66,10 +66,10 @@ fn tanh32(x: f32) f32 { } fn tanh64(x: f64) f64 { - const u = @bitCast(u64, x); + const u = @as(u64, @bitCast(x)); const ux = u & 0x7FFFFFFFFFFFFFFF; - const w = @intCast(u32, ux >> 32); - const ax = @bitCast(f64, ux); + const w = @as(u32, @intCast(ux >> 32)); + const ax = @as(f64, @bitCast(ux)); const sign = (u >> 63) != 0; var t: f64 = undefined; @@ -96,7 +96,7 @@ fn tanh64(x: f64) f64 { } // |x| is subnormal else { - math.doNotOptimizeAway(@floatCast(f32, ax)); + math.doNotOptimizeAway(@as(f32, @floatCast(ax))); t = ax; } diff --git a/lib/std/mem.zig b/lib/std/mem.zig index bbeecdda23..229bc0b63e 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -69,7 +69,7 @@ pub fn ValidationAllocator(comptime T: type) type { ret_addr: usize, ) ?[*]u8 { assert(n > 0); - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); const underlying = self.getUnderlyingAllocatorPtr(); const result = underlying.rawAlloc(n, log2_ptr_align, ret_addr) orelse return null; @@ -84,7 +84,7 @@ pub fn ValidationAllocator(comptime T: type) type { new_len: usize, ret_addr: usize, ) bool { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); assert(buf.len > 0); const underlying = self.getUnderlyingAllocatorPtr(); return underlying.rawResize(buf, log2_buf_align, new_len, ret_addr); @@ -96,7 +96,7 @@ pub fn ValidationAllocator(comptime T: type) type { log2_buf_align: u8, ret_addr: usize, ) void { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); assert(buf.len > 0); const underlying = self.getUnderlyingAllocatorPtr(); underlying.rawFree(buf, log2_buf_align, ret_addr); @@ -169,7 +169,7 @@ test "Allocator.resize" { var values = try testing.allocator.alloc(T, 100); defer testing.allocator.free(values); - for (values, 0..) |*v, i| v.* = @intCast(T, i); + for (values, 0..) |*v, i| v.* = @as(T, @intCast(i)); if (!testing.allocator.resize(values, values.len + 10)) return error.OutOfMemory; values = values.ptr[0 .. values.len + 10]; try testing.expect(values.len == 110); @@ -185,7 +185,7 @@ test "Allocator.resize" { var values = try testing.allocator.alloc(T, 100); defer testing.allocator.free(values); - for (values, 0..) |*v, i| v.* = @floatFromInt(T, i); + for (values, 0..) |*v, i| v.* = @as(T, @floatFromInt(i)); if (!testing.allocator.resize(values, values.len + 10)) return error.OutOfMemory; values = values.ptr[0 .. values.len + 10]; try testing.expect(values.len == 110); @@ -233,7 +233,7 @@ pub fn zeroes(comptime T: type) T { return @as(T, 0); }, .Enum, .EnumLiteral => { - return @enumFromInt(T, 0); + return @as(T, @enumFromInt(0)); }, .Void => { return {}; @@ -264,7 +264,7 @@ pub fn zeroes(comptime T: type) T { switch (ptr_info.size) { .Slice => { if (ptr_info.sentinel) |sentinel| { - if (ptr_info.child == u8 and @ptrCast(*const u8, sentinel).* == 0) { + if (ptr_info.child == u8 and @as(*const u8, @ptrCast(sentinel)).* == 0) { return ""; // A special case for the most common use-case: null-terminated strings. } @compileError("Can't set a sentinel slice to zero. This would require allocating memory."); @@ -282,7 +282,7 @@ pub fn zeroes(comptime T: type) T { }, .Array => |info| { if (info.sentinel) |sentinel_ptr| { - const sentinel = @ptrCast(*align(1) const info.child, sentinel_ptr).*; + const sentinel = @as(*align(1) const info.child, @ptrCast(sentinel_ptr)).*; return [_:sentinel]info.child{zeroes(info.child)} ** info.len; } return [_]info.child{zeroes(info.child)} ** info.len; @@ -456,7 +456,7 @@ pub fn zeroInit(comptime T: type, init: anytype) T { }, } } else if (field.default_value) |default_value_ptr| { - const default_value = @ptrCast(*align(1) const field.type, default_value_ptr).*; + const default_value = @as(*align(1) const field.type, @ptrCast(default_value_ptr)).*; @field(value, field.name) = default_value; } else { switch (@typeInfo(field.type)) { @@ -709,7 +709,7 @@ pub fn span(ptr: anytype) Span(@TypeOf(ptr)) { const l = len(ptr); const ptr_info = @typeInfo(Result).Pointer; if (ptr_info.sentinel) |s_ptr| { - const s = @ptrCast(*align(1) const ptr_info.child, s_ptr).*; + const s = @as(*align(1) const ptr_info.child, @ptrCast(s_ptr)).*; return ptr[0..l :s]; } else { return ptr[0..l]; @@ -740,7 +740,7 @@ fn SliceTo(comptime T: type, comptime end: meta.Elem(T)) type { // to find the value searched for, which is only the case if it matches // the sentinel of the type passed. if (array_info.sentinel) |sentinel_ptr| { - const sentinel = @ptrCast(*align(1) const array_info.child, sentinel_ptr).*; + const sentinel = @as(*align(1) const array_info.child, @ptrCast(sentinel_ptr)).*; if (end == sentinel) { new_ptr_info.sentinel = &end; } else { @@ -755,7 +755,7 @@ fn SliceTo(comptime T: type, comptime end: meta.Elem(T)) type { // to find the value searched for, which is only the case if it matches // the sentinel of the type passed. if (ptr_info.sentinel) |sentinel_ptr| { - const sentinel = @ptrCast(*align(1) const ptr_info.child, sentinel_ptr).*; + const sentinel = @as(*align(1) const ptr_info.child, @ptrCast(sentinel_ptr)).*; if (end == sentinel) { new_ptr_info.sentinel = &end; } else { @@ -793,7 +793,7 @@ pub fn sliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) SliceTo(@Typ const length = lenSliceTo(ptr, end); const ptr_info = @typeInfo(Result).Pointer; if (ptr_info.sentinel) |s_ptr| { - const s = @ptrCast(*align(1) const ptr_info.child, s_ptr).*; + const s = @as(*align(1) const ptr_info.child, @ptrCast(s_ptr)).*; return ptr[0..length :s]; } else { return ptr[0..length]; @@ -810,11 +810,11 @@ test "sliceTo" { try testing.expectEqualSlices(u16, array[0..2], sliceTo(&array, 3)); try testing.expectEqualSlices(u16, array[0..2], sliceTo(array[0..3], 3)); - const sentinel_ptr = @ptrCast([*:5]u16, &array); + const sentinel_ptr = @as([*:5]u16, @ptrCast(&array)); try testing.expectEqualSlices(u16, array[0..2], sliceTo(sentinel_ptr, 3)); try testing.expectEqualSlices(u16, array[0..4], sliceTo(sentinel_ptr, 99)); - const optional_sentinel_ptr = @ptrCast(?[*:5]u16, &array); + const optional_sentinel_ptr = @as(?[*:5]u16, @ptrCast(&array)); try testing.expectEqualSlices(u16, array[0..2], sliceTo(optional_sentinel_ptr, 3).?); try testing.expectEqualSlices(u16, array[0..4], sliceTo(optional_sentinel_ptr, 99).?); @@ -846,7 +846,7 @@ fn lenSliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) usize { .One => switch (@typeInfo(ptr_info.child)) { .Array => |array_info| { if (array_info.sentinel) |sentinel_ptr| { - const sentinel = @ptrCast(*align(1) const array_info.child, sentinel_ptr).*; + const sentinel = @as(*align(1) const array_info.child, @ptrCast(sentinel_ptr)).*; if (sentinel == end) { return indexOfSentinel(array_info.child, end, ptr); } @@ -856,7 +856,7 @@ fn lenSliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) usize { else => {}, }, .Many => if (ptr_info.sentinel) |sentinel_ptr| { - const sentinel = @ptrCast(*align(1) const ptr_info.child, sentinel_ptr).*; + const sentinel = @as(*align(1) const ptr_info.child, @ptrCast(sentinel_ptr)).*; // We may be looking for something other than the sentinel, // but iterating past the sentinel would be a bug so we need // to check for both. @@ -870,7 +870,7 @@ fn lenSliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) usize { }, .Slice => { if (ptr_info.sentinel) |sentinel_ptr| { - const sentinel = @ptrCast(*align(1) const ptr_info.child, sentinel_ptr).*; + const sentinel = @as(*align(1) const ptr_info.child, @ptrCast(sentinel_ptr)).*; if (sentinel == end) { return indexOfSentinel(ptr_info.child, sentinel, ptr); } @@ -893,7 +893,7 @@ test "lenSliceTo" { try testing.expectEqual(@as(usize, 2), lenSliceTo(&array, 3)); try testing.expectEqual(@as(usize, 2), lenSliceTo(array[0..3], 3)); - const sentinel_ptr = @ptrCast([*:5]u16, &array); + const sentinel_ptr = @as([*:5]u16, @ptrCast(&array)); try testing.expectEqual(@as(usize, 2), lenSliceTo(sentinel_ptr, 3)); try testing.expectEqual(@as(usize, 4), lenSliceTo(sentinel_ptr, 99)); @@ -925,7 +925,7 @@ pub fn len(value: anytype) usize { .Many => { const sentinel_ptr = info.sentinel orelse @compileError("invalid type given to std.mem.len: " ++ @typeName(@TypeOf(value))); - const sentinel = @ptrCast(*align(1) const info.child, sentinel_ptr).*; + const sentinel = @as(*align(1) const info.child, @ptrCast(sentinel_ptr)).*; return indexOfSentinel(info.child, sentinel, value); }, .C => { @@ -1331,7 +1331,7 @@ pub fn readVarInt(comptime ReturnType: type, bytes: []const u8, endian: Endian) .Little => { const ShiftType = math.Log2Int(ReturnType); for (bytes, 0..) |b, index| { - result = result | (@as(ReturnType, b) << @intCast(ShiftType, index * 8)); + result = result | (@as(ReturnType, b) << @as(ShiftType, @intCast(index * 8))); } }, } @@ -1359,8 +1359,8 @@ pub fn readVarPackedInt( const Log2N = std.math.Log2Int(T); const read_size = (bit_count + (bit_offset % 8) + 7) / 8; - const bit_shift = @intCast(u3, bit_offset % 8); - const pad = @intCast(Log2N, @bitSizeOf(T) - bit_count); + const bit_shift = @as(u3, @intCast(bit_offset % 8)); + const pad = @as(Log2N, @intCast(@bitSizeOf(T) - bit_count)); const lowest_byte = switch (endian) { .Big => bytes.len - (bit_offset / 8) - read_size, @@ -1372,17 +1372,17 @@ pub fn readVarPackedInt( // These are the same shifts/masks we perform below, but adds `@truncate`/`@intCast` // where needed since int is smaller than a byte. const value = if (read_size == 1) b: { - break :b @truncate(uN, read_bytes[0] >> bit_shift); + break :b @as(uN, @truncate(read_bytes[0] >> bit_shift)); } else b: { const i: u1 = @intFromBool(endian == .Big); - const head = @truncate(uN, read_bytes[i] >> bit_shift); - const tail_shift = @intCast(Log2N, @as(u4, 8) - bit_shift); - const tail = @truncate(uN, read_bytes[1 - i]); + const head = @as(uN, @truncate(read_bytes[i] >> bit_shift)); + const tail_shift = @as(Log2N, @intCast(@as(u4, 8) - bit_shift)); + const tail = @as(uN, @truncate(read_bytes[1 - i])); break :b (tail << tail_shift) | head; }; switch (signedness) { - .signed => return @intCast(T, (@bitCast(iN, value) << pad) >> pad), - .unsigned => return @intCast(T, (@bitCast(uN, value) << pad) >> pad), + .signed => return @as(T, @intCast((@as(iN, @bitCast(value)) << pad) >> pad)), + .unsigned => return @as(T, @intCast((@as(uN, @bitCast(value)) << pad) >> pad)), } } @@ -1398,13 +1398,13 @@ pub fn readVarPackedInt( .Little => { int = read_bytes[0] >> bit_shift; for (read_bytes[1..], 0..) |elem, i| { - int |= (@as(uN, elem) << @intCast(Log2N, (8 * (i + 1) - bit_shift))); + int |= (@as(uN, elem) << @as(Log2N, @intCast((8 * (i + 1) - bit_shift)))); } }, } switch (signedness) { - .signed => return @intCast(T, (@bitCast(iN, int) << pad) >> pad), - .unsigned => return @intCast(T, (@bitCast(uN, int) << pad) >> pad), + .signed => return @as(T, @intCast((@as(iN, @bitCast(int)) << pad) >> pad)), + .unsigned => return @as(T, @intCast((@as(uN, @bitCast(int)) << pad) >> pad)), } } @@ -1414,7 +1414,7 @@ pub fn readVarPackedInt( /// Assumes the endianness of memory is native. This means the function can /// simply pointer cast memory. pub fn readIntNative(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8) T { - return @ptrCast(*align(1) const T, bytes).*; + return @as(*align(1) const T, @ptrCast(bytes)).*; } /// Reads an integer from memory with bit count specified by T. @@ -1480,10 +1480,10 @@ fn readPackedIntLittle(comptime T: type, bytes: []const u8, bit_offset: usize) T const Log2N = std.math.Log2Int(T); const bit_count = @as(usize, @bitSizeOf(T)); - const bit_shift = @intCast(u3, bit_offset % 8); + const bit_shift = @as(u3, @intCast(bit_offset % 8)); const load_size = (bit_count + 7) / 8; - const load_tail_bits = @intCast(u3, (load_size * 8) - bit_count); + const load_tail_bits = @as(u3, @intCast((load_size * 8) - bit_count)); const LoadInt = std.meta.Int(.unsigned, load_size * 8); if (bit_count == 0) @@ -1492,13 +1492,13 @@ fn readPackedIntLittle(comptime T: type, bytes: []const u8, bit_offset: usize) T // Read by loading a LoadInt, and then follow it up with a 1-byte read // of the tail if bit_offset pushed us over a byte boundary. const read_bytes = bytes[bit_offset / 8 ..]; - const val = @truncate(uN, readIntLittle(LoadInt, read_bytes[0..load_size]) >> bit_shift); + const val = @as(uN, @truncate(readIntLittle(LoadInt, read_bytes[0..load_size]) >> bit_shift)); if (bit_shift > load_tail_bits) { - const tail_bits = @intCast(Log2N, bit_shift - load_tail_bits); + const tail_bits = @as(Log2N, @intCast(bit_shift - load_tail_bits)); const tail_byte = read_bytes[load_size]; - const tail_truncated = if (bit_count < 8) @truncate(uN, tail_byte) else @as(uN, tail_byte); - return @bitCast(T, val | (tail_truncated << (@truncate(Log2N, bit_count) -% tail_bits))); - } else return @bitCast(T, val); + const tail_truncated = if (bit_count < 8) @as(uN, @truncate(tail_byte)) else @as(uN, tail_byte); + return @as(T, @bitCast(val | (tail_truncated << (@as(Log2N, @truncate(bit_count)) -% tail_bits)))); + } else return @as(T, @bitCast(val)); } fn readPackedIntBig(comptime T: type, bytes: []const u8, bit_offset: usize) T { @@ -1506,11 +1506,11 @@ fn readPackedIntBig(comptime T: type, bytes: []const u8, bit_offset: usize) T { const Log2N = std.math.Log2Int(T); const bit_count = @as(usize, @bitSizeOf(T)); - const bit_shift = @intCast(u3, bit_offset % 8); + const bit_shift = @as(u3, @intCast(bit_offset % 8)); const byte_count = (@as(usize, bit_shift) + bit_count + 7) / 8; const load_size = (bit_count + 7) / 8; - const load_tail_bits = @intCast(u3, (load_size * 8) - bit_count); + const load_tail_bits = @as(u3, @intCast((load_size * 8) - bit_count)); const LoadInt = std.meta.Int(.unsigned, load_size * 8); if (bit_count == 0) @@ -1520,12 +1520,12 @@ fn readPackedIntBig(comptime T: type, bytes: []const u8, bit_offset: usize) T { // of the tail if bit_offset pushed us over a byte boundary. const end = bytes.len - (bit_offset / 8); const read_bytes = bytes[(end - byte_count)..end]; - const val = @truncate(uN, readIntBig(LoadInt, bytes[(end - load_size)..end][0..load_size]) >> bit_shift); + const val = @as(uN, @truncate(readIntBig(LoadInt, bytes[(end - load_size)..end][0..load_size]) >> bit_shift)); if (bit_shift > load_tail_bits) { - const tail_bits = @intCast(Log2N, bit_shift - load_tail_bits); - const tail_byte = if (bit_count < 8) @truncate(uN, read_bytes[0]) else @as(uN, read_bytes[0]); - return @bitCast(T, val | (tail_byte << (@truncate(Log2N, bit_count) -% tail_bits))); - } else return @bitCast(T, val); + const tail_bits = @as(Log2N, @intCast(bit_shift - load_tail_bits)); + const tail_byte = if (bit_count < 8) @as(uN, @truncate(read_bytes[0])) else @as(uN, read_bytes[0]); + return @as(T, @bitCast(val | (tail_byte << (@as(Log2N, @truncate(bit_count)) -% tail_bits)))); + } else return @as(T, @bitCast(val)); } pub const readPackedIntNative = switch (native_endian) { @@ -1605,7 +1605,7 @@ test "readIntBig and readIntLittle" { /// This function stores in native endian, which means it is implemented as a simple /// memory store. pub fn writeIntNative(comptime T: type, buf: *[(@typeInfo(T).Int.bits + 7) / 8]u8, value: T) void { - @ptrCast(*align(1) T, buf).* = value; + @as(*align(1) T, @ptrCast(buf)).* = value; } /// Writes an integer to memory, storing it in twos-complement. @@ -1642,10 +1642,10 @@ fn writePackedIntLittle(comptime T: type, bytes: []u8, bit_offset: usize, value: const Log2N = std.math.Log2Int(T); const bit_count = @as(usize, @bitSizeOf(T)); - const bit_shift = @intCast(u3, bit_offset % 8); + const bit_shift = @as(u3, @intCast(bit_offset % 8)); const store_size = (@bitSizeOf(T) + 7) / 8; - const store_tail_bits = @intCast(u3, (store_size * 8) - bit_count); + const store_tail_bits = @as(u3, @intCast((store_size * 8) - bit_count)); const StoreInt = std.meta.Int(.unsigned, store_size * 8); if (bit_count == 0) @@ -1656,11 +1656,11 @@ fn writePackedIntLittle(comptime T: type, bytes: []u8, bit_offset: usize, value: const write_bytes = bytes[bit_offset / 8 ..]; const head = write_bytes[0] & ((@as(u8, 1) << bit_shift) - 1); - var write_value = (@as(StoreInt, @bitCast(uN, value)) << bit_shift) | @intCast(StoreInt, head); + var write_value = (@as(StoreInt, @as(uN, @bitCast(value))) << bit_shift) | @as(StoreInt, @intCast(head)); if (bit_shift > store_tail_bits) { - const tail_len = @intCast(Log2N, bit_shift - store_tail_bits); - write_bytes[store_size] &= ~((@as(u8, 1) << @intCast(u3, tail_len)) - 1); - write_bytes[store_size] |= @intCast(u8, (@bitCast(uN, value) >> (@truncate(Log2N, bit_count) -% tail_len))); + const tail_len = @as(Log2N, @intCast(bit_shift - store_tail_bits)); + write_bytes[store_size] &= ~((@as(u8, 1) << @as(u3, @intCast(tail_len))) - 1); + write_bytes[store_size] |= @as(u8, @intCast((@as(uN, @bitCast(value)) >> (@as(Log2N, @truncate(bit_count)) -% tail_len)))); } else if (bit_shift < store_tail_bits) { const tail_len = store_tail_bits - bit_shift; const tail = write_bytes[store_size - 1] & (@as(u8, 0xfe) << (7 - tail_len)); @@ -1675,11 +1675,11 @@ fn writePackedIntBig(comptime T: type, bytes: []u8, bit_offset: usize, value: T) const Log2N = std.math.Log2Int(T); const bit_count = @as(usize, @bitSizeOf(T)); - const bit_shift = @intCast(u3, bit_offset % 8); + const bit_shift = @as(u3, @intCast(bit_offset % 8)); const byte_count = (bit_shift + bit_count + 7) / 8; const store_size = (@bitSizeOf(T) + 7) / 8; - const store_tail_bits = @intCast(u3, (store_size * 8) - bit_count); + const store_tail_bits = @as(u3, @intCast((store_size * 8) - bit_count)); const StoreInt = std.meta.Int(.unsigned, store_size * 8); if (bit_count == 0) @@ -1691,11 +1691,11 @@ fn writePackedIntBig(comptime T: type, bytes: []u8, bit_offset: usize, value: T) const write_bytes = bytes[(end - byte_count)..end]; const head = write_bytes[byte_count - 1] & ((@as(u8, 1) << bit_shift) - 1); - var write_value = (@as(StoreInt, @bitCast(uN, value)) << bit_shift) | @intCast(StoreInt, head); + var write_value = (@as(StoreInt, @as(uN, @bitCast(value))) << bit_shift) | @as(StoreInt, @intCast(head)); if (bit_shift > store_tail_bits) { - const tail_len = @intCast(Log2N, bit_shift - store_tail_bits); - write_bytes[0] &= ~((@as(u8, 1) << @intCast(u3, tail_len)) - 1); - write_bytes[0] |= @intCast(u8, (@bitCast(uN, value) >> (@truncate(Log2N, bit_count) -% tail_len))); + const tail_len = @as(Log2N, @intCast(bit_shift - store_tail_bits)); + write_bytes[0] &= ~((@as(u8, 1) << @as(u3, @intCast(tail_len))) - 1); + write_bytes[0] |= @as(u8, @intCast((@as(uN, @bitCast(value)) >> (@as(Log2N, @truncate(bit_count)) -% tail_len)))); } else if (bit_shift < store_tail_bits) { const tail_len = store_tail_bits - bit_shift; const tail = write_bytes[0] & (@as(u8, 0xfe) << (7 - tail_len)); @@ -1744,14 +1744,14 @@ pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void { return @memset(buffer, 0); } else if (@typeInfo(T).Int.bits == 8) { @memset(buffer, 0); - buffer[0] = @bitCast(u8, value); + buffer[0] = @as(u8, @bitCast(value)); return; } // TODO I want to call writeIntLittle here but comptime eval facilities aren't good enough const uint = std.meta.Int(.unsigned, @typeInfo(T).Int.bits); - var bits = @bitCast(uint, value); + var bits = @as(uint, @bitCast(value)); for (buffer) |*b| { - b.* = @truncate(u8, bits); + b.* = @as(u8, @truncate(bits)); bits >>= 8; } } @@ -1768,17 +1768,17 @@ pub fn writeIntSliceBig(comptime T: type, buffer: []u8, value: T) void { return @memset(buffer, 0); } else if (@typeInfo(T).Int.bits == 8) { @memset(buffer, 0); - buffer[buffer.len - 1] = @bitCast(u8, value); + buffer[buffer.len - 1] = @as(u8, @bitCast(value)); return; } // TODO I want to call writeIntBig here but comptime eval facilities aren't good enough const uint = std.meta.Int(.unsigned, @typeInfo(T).Int.bits); - var bits = @bitCast(uint, value); + var bits = @as(uint, @bitCast(value)); var index: usize = buffer.len; while (index != 0) { index -= 1; - buffer[index] = @truncate(u8, bits); + buffer[index] = @as(u8, @truncate(bits)); bits >>= 8; } } @@ -1822,7 +1822,7 @@ pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value const uN = std.meta.Int(.unsigned, @bitSizeOf(T)); const Log2N = std.math.Log2Int(T); - const bit_shift = @intCast(u3, bit_offset % 8); + const bit_shift = @as(u3, @intCast(bit_offset % 8)); const write_size = (bit_count + bit_shift + 7) / 8; const lowest_byte = switch (endian) { .Big => bytes.len - (bit_offset / 8) - write_size, @@ -1833,8 +1833,8 @@ pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value if (write_size == 1) { // Single byte writes are handled specially, since we need to mask bits // on both ends of the byte. - const mask = (@as(u8, 0xff) >> @intCast(u3, 8 - bit_count)); - const new_bits = @intCast(u8, @bitCast(uN, value) & mask) << bit_shift; + const mask = (@as(u8, 0xff) >> @as(u3, @intCast(8 - bit_count))); + const new_bits = @as(u8, @intCast(@as(uN, @bitCast(value)) & mask)) << bit_shift; write_bytes[0] = (write_bytes[0] & ~(mask << bit_shift)) | new_bits; return; } @@ -1843,31 +1843,31 @@ pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value // Iterate bytes forward for Little-endian, backward for Big-endian const delta: i2 = if (endian == .Big) -1 else 1; - const start = if (endian == .Big) @intCast(isize, write_bytes.len - 1) else 0; + const start = if (endian == .Big) @as(isize, @intCast(write_bytes.len - 1)) else 0; var i: isize = start; // isize for signed index arithmetic // Write first byte, using a mask to protects bits preceding bit_offset const head_mask = @as(u8, 0xff) >> bit_shift; - write_bytes[@intCast(usize, i)] &= ~(head_mask << bit_shift); - write_bytes[@intCast(usize, i)] |= @intCast(u8, @bitCast(uN, remaining) & head_mask) << bit_shift; - remaining >>= @intCast(Log2N, @as(u4, 8) - bit_shift); + write_bytes[@as(usize, @intCast(i))] &= ~(head_mask << bit_shift); + write_bytes[@as(usize, @intCast(i))] |= @as(u8, @intCast(@as(uN, @bitCast(remaining)) & head_mask)) << bit_shift; + remaining >>= @as(Log2N, @intCast(@as(u4, 8) - bit_shift)); i += delta; // Write bytes[1..bytes.len - 1] if (@bitSizeOf(T) > 8) { - const loop_end = start + delta * (@intCast(isize, write_size) - 1); + const loop_end = start + delta * (@as(isize, @intCast(write_size)) - 1); while (i != loop_end) : (i += delta) { - write_bytes[@intCast(usize, i)] = @truncate(u8, @bitCast(uN, remaining)); + write_bytes[@as(usize, @intCast(i))] = @as(u8, @truncate(@as(uN, @bitCast(remaining)))); remaining >>= 8; } } // Write last byte, using a mask to protect bits following bit_offset + bit_count - const following_bits = -%@truncate(u3, bit_shift + bit_count); + const following_bits = -%@as(u3, @truncate(bit_shift + bit_count)); const tail_mask = (@as(u8, 0xff) << following_bits) >> following_bits; - write_bytes[@intCast(usize, i)] &= ~tail_mask; - write_bytes[@intCast(usize, i)] |= @intCast(u8, @bitCast(uN, remaining) & tail_mask); + write_bytes[@as(usize, @intCast(i))] &= ~tail_mask; + write_bytes[@as(usize, @intCast(i))] |= @as(u8, @intCast(@as(uN, @bitCast(remaining)) & tail_mask)); } test "writeIntBig and writeIntLittle" { @@ -3799,15 +3799,14 @@ pub fn alignPointerOffset(ptr: anytype, align_to: usize) ?usize { /// type. pub fn alignPointer(ptr: anytype, align_to: usize) ?@TypeOf(ptr) { const adjust_off = alignPointerOffset(ptr, align_to) orelse return null; - const T = @TypeOf(ptr); // Avoid the use of ptrFromInt to avoid losing the pointer provenance info. - return @alignCast(@typeInfo(T).Pointer.alignment, ptr + adjust_off); + return @alignCast(ptr + adjust_off); } test "alignPointer" { const S = struct { fn checkAlign(comptime T: type, base: usize, align_to: usize, expected: usize) !void { - var ptr = @ptrFromInt(T, base); + var ptr = @as(T, @ptrFromInt(base)); var aligned = alignPointer(ptr, align_to); try testing.expectEqual(expected, @intFromPtr(aligned)); } @@ -3854,9 +3853,7 @@ fn AsBytesReturnType(comptime P: type) type { /// Given a pointer to a single item, returns a slice of the underlying bytes, preserving pointer attributes. pub fn asBytes(ptr: anytype) AsBytesReturnType(@TypeOf(ptr)) { - const P = @TypeOf(ptr); - const T = AsBytesReturnType(P); - return @ptrCast(T, @alignCast(meta.alignment(T), ptr)); + return @ptrCast(@alignCast(ptr)); } test "asBytes" { @@ -3902,7 +3899,7 @@ test "asBytes" { test "asBytes preserves pointer attributes" { const inArr: u32 align(16) = 0xDEADBEEF; - const inPtr = @ptrCast(*align(16) const volatile u32, &inArr); + const inPtr = @as(*align(16) const volatile u32, @ptrCast(&inArr)); const outSlice = asBytes(inPtr); const in = @typeInfo(@TypeOf(inPtr)).Pointer; @@ -3948,7 +3945,7 @@ fn BytesAsValueReturnType(comptime T: type, comptime B: type) type { /// Given a pointer to an array of bytes, returns a pointer to a value of the specified type /// backed by those bytes, preserving pointer attributes. pub fn bytesAsValue(comptime T: type, bytes: anytype) BytesAsValueReturnType(T, @TypeOf(bytes)) { - return @ptrCast(BytesAsValueReturnType(T, @TypeOf(bytes)), bytes); + return @as(BytesAsValueReturnType(T, @TypeOf(bytes)), @ptrCast(bytes)); } test "bytesAsValue" { @@ -3993,7 +3990,7 @@ test "bytesAsValue" { test "bytesAsValue preserves pointer attributes" { const inArr align(16) = [4]u8{ 0xDE, 0xAD, 0xBE, 0xEF }; - const inSlice = @ptrCast(*align(16) const volatile [4]u8, &inArr)[0..]; + const inSlice = @as(*align(16) const volatile [4]u8, @ptrCast(&inArr))[0..]; const outPtr = bytesAsValue(u32, inSlice); const in = @typeInfo(@TypeOf(inSlice)).Pointer; @@ -4043,7 +4040,7 @@ pub fn bytesAsSlice(comptime T: type, bytes: anytype) BytesAsSliceReturnType(T, const cast_target = CopyPtrAttrs(@TypeOf(bytes), .Many, T); - return @ptrCast(cast_target, bytes)[0..@divExact(bytes.len, @sizeOf(T))]; + return @as(cast_target, @ptrCast(bytes))[0..@divExact(bytes.len, @sizeOf(T))]; } test "bytesAsSlice" { @@ -4101,7 +4098,7 @@ test "bytesAsSlice with specified alignment" { test "bytesAsSlice preserves pointer attributes" { const inArr align(16) = [4]u8{ 0xDE, 0xAD, 0xBE, 0xEF }; - const inSlice = @ptrCast(*align(16) const volatile [4]u8, &inArr)[0..]; + const inSlice = @as(*align(16) const volatile [4]u8, @ptrCast(&inArr))[0..]; const outSlice = bytesAsSlice(u16, inSlice); const in = @typeInfo(@TypeOf(inSlice)).Pointer; @@ -4133,7 +4130,7 @@ pub fn sliceAsBytes(slice: anytype) SliceAsBytesReturnType(@TypeOf(slice)) { const cast_target = CopyPtrAttrs(Slice, .Many, u8); - return @ptrCast(cast_target, slice)[0 .. slice.len * @sizeOf(meta.Elem(Slice))]; + return @as(cast_target, @ptrCast(slice))[0 .. slice.len * @sizeOf(meta.Elem(Slice))]; } test "sliceAsBytes" { @@ -4197,7 +4194,7 @@ test "sliceAsBytes and bytesAsSlice back" { test "sliceAsBytes preserves pointer attributes" { const inArr align(16) = [2]u16{ 0xDEAD, 0xBEEF }; - const inSlice = @ptrCast(*align(16) const volatile [2]u16, &inArr)[0..]; + const inSlice = @as(*align(16) const volatile [2]u16, @ptrCast(&inArr))[0..]; const outSlice = sliceAsBytes(inSlice); const in = @typeInfo(@TypeOf(inSlice)).Pointer; @@ -4218,7 +4215,7 @@ pub fn alignForward(comptime T: type, addr: T, alignment: T) T { } pub fn alignForwardLog2(addr: usize, log2_alignment: u8) usize { - const alignment = @as(usize, 1) << @intCast(math.Log2Int(usize), log2_alignment); + const alignment = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_alignment)); return alignForward(usize, addr, alignment); } @@ -4282,7 +4279,7 @@ pub fn doNotOptimizeAway(val: anytype) void { /// .stage2_c doesn't support asm blocks yet, so use volatile stores instead var deopt_target: if (builtin.zig_backend == .stage2_c) u8 else void = undefined; fn doNotOptimizeAwayC(ptr: anytype) void { - const dest = @ptrCast(*volatile u8, &deopt_target); + const dest = @as(*volatile u8, @ptrCast(&deopt_target)); for (asBytes(ptr)) |b| { dest.* = b; } @@ -4433,7 +4430,7 @@ pub fn alignInBytes(bytes: []u8, comptime new_alignment: usize) ?[]align(new_ali error.Overflow => return null, }; const alignment_offset = begin_address_aligned - begin_address; - return @alignCast(new_alignment, bytes[alignment_offset .. alignment_offset + new_length]); + return @alignCast(bytes[alignment_offset .. alignment_offset + new_length]); } /// Returns the largest sub-slice within the given slice that conforms to the new alignment, @@ -4445,7 +4442,7 @@ pub fn alignInSlice(slice: anytype, comptime new_alignment: usize) ?AlignedSlice const Element = @TypeOf(slice[0]); const slice_length_bytes = aligned_bytes.len - (aligned_bytes.len % @sizeOf(Element)); const aligned_slice = bytesAsSlice(Element, aligned_bytes[0..slice_length_bytes]); - return @alignCast(new_alignment, aligned_slice); + return @alignCast(aligned_slice); } test "read/write(Var)PackedInt" { @@ -4490,8 +4487,8 @@ test "read/write(Var)PackedInt" { for ([_]PackedType{ ~@as(PackedType, 0), // all ones: -1 iN / maxInt uN @as(PackedType, 0), // all zeros: 0 iN / 0 uN - @bitCast(PackedType, @as(iPackedType, math.maxInt(iPackedType))), // maxInt iN - @bitCast(PackedType, @as(iPackedType, math.minInt(iPackedType))), // maxInt iN + @as(PackedType, @bitCast(@as(iPackedType, math.maxInt(iPackedType)))), // maxInt iN + @as(PackedType, @bitCast(@as(iPackedType, math.minInt(iPackedType)))), // maxInt iN random.int(PackedType), // random random.int(PackedType), // random }) |write_value| { @@ -4502,11 +4499,11 @@ test "read/write(Var)PackedInt" { // Read const read_value1 = readPackedInt(PackedType, asBytes(&value), offset, native_endian); - try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset)))); + try expect(read_value1 == @as(PackedType, @bitCast(@as(uPackedType, @truncate(value >> @as(Log2T, @intCast(offset))))))); // Write writePackedInt(PackedType, asBytes(&value), offset, write_value, native_endian); - try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset)))); + try expect(write_value == @as(PackedType, @bitCast(@as(uPackedType, @truncate(value >> @as(Log2T, @intCast(offset))))))); // Read again const read_value2 = readPackedInt(PackedType, asBytes(&value), offset, native_endian); @@ -4515,9 +4512,9 @@ test "read/write(Var)PackedInt" { // Verify bits outside of the target integer are unmodified const diff_bits = init_value ^ value; if (offset != offset_at_end) - try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0); + try expect(diff_bits >> @as(Log2T, @intCast(offset + @bitSizeOf(PackedType))) == 0); if (offset != 0) - try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0); + try expect(diff_bits << @as(Log2T, @intCast(@bitSizeOf(BackingType) - offset)) == 0); } { // Fixed-size Read/Write (Foreign-endian) @@ -4527,11 +4524,11 @@ test "read/write(Var)PackedInt" { // Read const read_value1 = readPackedInt(PackedType, asBytes(&value), offset, foreign_endian); - try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset)))); + try expect(read_value1 == @as(PackedType, @bitCast(@as(uPackedType, @truncate(@byteSwap(value) >> @as(Log2T, @intCast(offset))))))); // Write writePackedInt(PackedType, asBytes(&value), offset, write_value, foreign_endian); - try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset)))); + try expect(write_value == @as(PackedType, @bitCast(@as(uPackedType, @truncate(@byteSwap(value) >> @as(Log2T, @intCast(offset))))))); // Read again const read_value2 = readPackedInt(PackedType, asBytes(&value), offset, foreign_endian); @@ -4540,9 +4537,9 @@ test "read/write(Var)PackedInt" { // Verify bits outside of the target integer are unmodified const diff_bits = init_value ^ @byteSwap(value); if (offset != offset_at_end) - try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0); + try expect(diff_bits >> @as(Log2T, @intCast(offset + @bitSizeOf(PackedType))) == 0); if (offset != 0) - try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0); + try expect(diff_bits << @as(Log2T, @intCast(@bitSizeOf(BackingType) - offset)) == 0); } const signedness = @typeInfo(PackedType).Int.signedness; @@ -4559,11 +4556,11 @@ test "read/write(Var)PackedInt" { // Read const read_value1 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), native_endian, signedness); - try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset)))); + try expect(read_value1 == @as(PackedType, @bitCast(@as(uPackedType, @truncate(value >> @as(Log2T, @intCast(offset))))))); // Write writeVarPackedInt(asBytes(&value), offset, @bitSizeOf(PackedType), @as(U, write_value), native_endian); - try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset)))); + try expect(write_value == @as(PackedType, @bitCast(@as(uPackedType, @truncate(value >> @as(Log2T, @intCast(offset))))))); // Read again const read_value2 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), native_endian, signedness); @@ -4572,9 +4569,9 @@ test "read/write(Var)PackedInt" { // Verify bits outside of the target integer are unmodified const diff_bits = init_value ^ value; if (offset != offset_at_end) - try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0); + try expect(diff_bits >> @as(Log2T, @intCast(offset + @bitSizeOf(PackedType))) == 0); if (offset != 0) - try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0); + try expect(diff_bits << @as(Log2T, @intCast(@bitSizeOf(BackingType) - offset)) == 0); } { // Variable-size Read/Write (Foreign-endian) @@ -4587,11 +4584,11 @@ test "read/write(Var)PackedInt" { // Read const read_value1 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), foreign_endian, signedness); - try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset)))); + try expect(read_value1 == @as(PackedType, @bitCast(@as(uPackedType, @truncate(@byteSwap(value) >> @as(Log2T, @intCast(offset))))))); // Write writeVarPackedInt(asBytes(&value), offset, @bitSizeOf(PackedType), @as(U, write_value), foreign_endian); - try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset)))); + try expect(write_value == @as(PackedType, @bitCast(@as(uPackedType, @truncate(@byteSwap(value) >> @as(Log2T, @intCast(offset))))))); // Read again const read_value2 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), foreign_endian, signedness); @@ -4600,9 +4597,9 @@ test "read/write(Var)PackedInt" { // Verify bits outside of the target integer are unmodified const diff_bits = init_value ^ @byteSwap(value); if (offset != offset_at_end) - try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0); + try expect(diff_bits >> @as(Log2T, @intCast(offset + @bitSizeOf(PackedType))) == 0); if (offset != 0) - try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0); + try expect(diff_bits << @as(Log2T, @intCast(@bitSizeOf(BackingType) - offset)) == 0); } } } diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig index 301480f662..214a6443d2 100644 --- a/lib/std/mem/Allocator.zig +++ b/lib/std/mem/Allocator.zig @@ -101,7 +101,7 @@ pub inline fn rawFree(self: Allocator, buf: []u8, log2_buf_align: u8, ret_addr: /// Returns a pointer to undefined memory. /// Call `destroy` with the result to free the memory. pub fn create(self: Allocator, comptime T: type) Error!*T { - if (@sizeOf(T) == 0) return @ptrFromInt(*T, math.maxInt(usize)); + if (@sizeOf(T) == 0) return @as(*T, @ptrFromInt(math.maxInt(usize))); const slice = try self.allocAdvancedWithRetAddr(T, null, 1, @returnAddress()); return &slice[0]; } @@ -112,7 +112,7 @@ pub fn destroy(self: Allocator, ptr: anytype) void { const info = @typeInfo(@TypeOf(ptr)).Pointer; const T = info.child; if (@sizeOf(T) == 0) return; - const non_const_ptr = @ptrCast([*]u8, @constCast(ptr)); + const non_const_ptr = @as([*]u8, @ptrCast(@constCast(ptr))); self.rawFree(non_const_ptr[0..@sizeOf(T)], math.log2(info.alignment), @returnAddress()); } @@ -209,15 +209,15 @@ pub fn allocAdvancedWithRetAddr( if (n == 0) { const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), a); - return @ptrFromInt([*]align(a) T, ptr)[0..0]; + return @as([*]align(a) T, @ptrFromInt(ptr))[0..0]; } const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory; const byte_ptr = self.rawAlloc(byte_count, log2a(a), return_address) orelse return Error.OutOfMemory; // TODO: https://github.com/ziglang/zig/issues/4298 @memset(byte_ptr[0..byte_count], undefined); - const byte_slice = byte_ptr[0..byte_count]; - return mem.bytesAsSlice(T, @alignCast(a, byte_slice)); + const byte_slice: []align(a) u8 = @alignCast(byte_ptr[0..byte_count]); + return mem.bytesAsSlice(T, byte_slice); } /// Requests to modify the size of an allocation. It is guaranteed to not move @@ -268,7 +268,7 @@ pub fn reallocAdvanced( if (new_n == 0) { self.free(old_mem); const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), Slice.alignment); - return @ptrFromInt([*]align(Slice.alignment) T, ptr)[0..0]; + return @as([*]align(Slice.alignment) T, @ptrFromInt(ptr))[0..0]; } const old_byte_slice = mem.sliceAsBytes(old_mem); @@ -276,7 +276,8 @@ pub fn reallocAdvanced( // Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure if (mem.isAligned(@intFromPtr(old_byte_slice.ptr), Slice.alignment)) { if (self.rawResize(old_byte_slice, log2a(Slice.alignment), byte_count, return_address)) { - return mem.bytesAsSlice(T, @alignCast(Slice.alignment, old_byte_slice.ptr[0..byte_count])); + const new_bytes: []align(Slice.alignment) u8 = @alignCast(old_byte_slice.ptr[0..byte_count]); + return mem.bytesAsSlice(T, new_bytes); } } @@ -288,7 +289,8 @@ pub fn reallocAdvanced( @memset(old_byte_slice, undefined); self.rawFree(old_byte_slice, log2a(Slice.alignment), return_address); - return mem.bytesAsSlice(T, @alignCast(Slice.alignment, new_mem[0..byte_count])); + const new_bytes: []align(Slice.alignment) u8 = @alignCast(new_mem[0..byte_count]); + return mem.bytesAsSlice(T, new_bytes); } /// Free an array allocated with `alloc`. To free a single item, diff --git a/lib/std/meta.zig b/lib/std/meta.zig index fedbd1a40d..8fe0aee9fb 100644 --- a/lib/std/meta.zig +++ b/lib/std/meta.zig @@ -185,18 +185,18 @@ pub fn sentinel(comptime T: type) ?Elem(T) { switch (@typeInfo(T)) { .Array => |info| { const sentinel_ptr = info.sentinel orelse return null; - return @ptrCast(*const info.child, sentinel_ptr).*; + return @as(*const info.child, @ptrCast(sentinel_ptr)).*; }, .Pointer => |info| { switch (info.size) { .Many, .Slice => { const sentinel_ptr = info.sentinel orelse return null; - return @ptrCast(*align(1) const info.child, sentinel_ptr).*; + return @as(*align(1) const info.child, @ptrCast(sentinel_ptr)).*; }, .One => switch (@typeInfo(info.child)) { .Array => |array_info| { const sentinel_ptr = array_info.sentinel orelse return null; - return @ptrCast(*align(1) const array_info.child, sentinel_ptr).*; + return @as(*align(1) const array_info.child, @ptrCast(sentinel_ptr)).*; }, else => {}, }, @@ -241,7 +241,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type { .Array = .{ .len = array_info.len, .child = array_info.child, - .sentinel = @ptrCast(?*const anyopaque, &sentinel_val), + .sentinel = @as(?*const anyopaque, @ptrCast(&sentinel_val)), }, }), .is_allowzero = info.is_allowzero, @@ -259,7 +259,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type { .address_space = info.address_space, .child = info.child, .is_allowzero = info.is_allowzero, - .sentinel = @ptrCast(?*const anyopaque, &sentinel_val), + .sentinel = @as(?*const anyopaque, @ptrCast(&sentinel_val)), }, }), else => {}, @@ -277,7 +277,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type { .address_space = ptr_info.address_space, .child = ptr_info.child, .is_allowzero = ptr_info.is_allowzero, - .sentinel = @ptrCast(?*const anyopaque, &sentinel_val), + .sentinel = @as(?*const anyopaque, @ptrCast(&sentinel_val)), }, }), }, @@ -929,8 +929,8 @@ test "intToEnum with error return" { try testing.expect(intToEnum(E1, zero) catch unreachable == E1.A); try testing.expect(intToEnum(E2, one) catch unreachable == E2.B); try testing.expect(intToEnum(E3, zero) catch unreachable == E3.A); - try testing.expect(intToEnum(E3, 127) catch unreachable == @enumFromInt(E3, 127)); - try testing.expect(intToEnum(E3, -128) catch unreachable == @enumFromInt(E3, -128)); + try testing.expect(intToEnum(E3, 127) catch unreachable == @as(E3, @enumFromInt(127))); + try testing.expect(intToEnum(E3, -128) catch unreachable == @as(E3, @enumFromInt(-128))); try testing.expectError(error.InvalidEnumTag, intToEnum(E1, one)); try testing.expectError(error.InvalidEnumTag, intToEnum(E3, 128)); try testing.expectError(error.InvalidEnumTag, intToEnum(E3, -129)); @@ -943,7 +943,7 @@ pub fn intToEnum(comptime EnumTag: type, tag_int: anytype) IntToEnumError!EnumTa if (!enum_info.is_exhaustive) { if (std.math.cast(enum_info.tag_type, tag_int)) |tag| { - return @enumFromInt(EnumTag, tag); + return @as(EnumTag, @enumFromInt(tag)); } return error.InvalidEnumTag; } diff --git a/lib/std/meta/trailer_flags.zig b/lib/std/meta/trailer_flags.zig index cf37fc5adf..d028f88066 100644 --- a/lib/std/meta/trailer_flags.zig +++ b/lib/std/meta/trailer_flags.zig @@ -72,7 +72,7 @@ pub fn TrailerFlags(comptime Fields: type) type { pub fn setMany(self: Self, p: [*]align(@alignOf(Fields)) u8, fields: FieldValues) void { inline for (@typeInfo(Fields).Struct.fields, 0..) |field, i| { if (@field(fields, field.name)) |value| - self.set(p, @enumFromInt(FieldEnum, i), value); + self.set(p, @as(FieldEnum, @enumFromInt(i)), value); } } @@ -89,14 +89,14 @@ pub fn TrailerFlags(comptime Fields: type) type { if (@sizeOf(Field(field)) == 0) return undefined; const off = self.offset(field); - return @ptrCast(*Field(field), @alignCast(@alignOf(Field(field)), p + off)); + return @ptrCast(@alignCast(p + off)); } pub fn ptrConst(self: Self, p: [*]align(@alignOf(Fields)) const u8, comptime field: FieldEnum) *const Field(field) { if (@sizeOf(Field(field)) == 0) return undefined; const off = self.offset(field); - return @ptrCast(*const Field(field), @alignCast(@alignOf(Field(field)), p + off)); + return @ptrCast(@alignCast(p + off)); } pub fn offset(self: Self, comptime field: FieldEnum) usize { diff --git a/lib/std/meta/trait.zig b/lib/std/meta/trait.zig index 3d0e0bce5d..e00fac261c 100644 --- a/lib/std/meta/trait.zig +++ b/lib/std/meta/trait.zig @@ -237,7 +237,7 @@ pub fn isManyItemPtr(comptime T: type) bool { test "isManyItemPtr" { const array = [_]u8{0} ** 10; - const mip = @ptrCast([*]const u8, &array[0]); + const mip = @as([*]const u8, @ptrCast(&array[0])); try testing.expect(isManyItemPtr(@TypeOf(mip))); try testing.expect(!isManyItemPtr(@TypeOf(array))); try testing.expect(!isManyItemPtr(@TypeOf(array[0..1]))); diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig index 26ba6cc919..ffbff62da2 100644 --- a/lib/std/multi_array_list.zig +++ b/lib/std/multi_array_list.zig @@ -78,7 +78,7 @@ pub fn MultiArrayList(comptime T: type) type { const casted_ptr: [*]F = if (@sizeOf(F) == 0) undefined else - @ptrCast([*]F, @alignCast(@alignOf(F), byte_ptr)); + @ptrCast(@alignCast(byte_ptr)); return casted_ptr[0..self.len]; } @@ -89,14 +89,14 @@ pub fn MultiArrayList(comptime T: type) type { else => unreachable, }; inline for (fields, 0..) |field_info, i| { - self.items(@enumFromInt(Field, i))[index] = @field(e, field_info.name); + self.items(@as(Field, @enumFromInt(i)))[index] = @field(e, field_info.name); } } pub fn get(self: Slice, index: usize) T { var result: Elem = undefined; inline for (fields, 0..) |field_info, i| { - @field(result, field_info.name) = self.items(@enumFromInt(Field, i))[index]; + @field(result, field_info.name) = self.items(@as(Field, @enumFromInt(i)))[index]; } return switch (@typeInfo(T)) { .Struct => result, @@ -110,10 +110,9 @@ pub fn MultiArrayList(comptime T: type) type { return .{}; } const unaligned_ptr = self.ptrs[sizes.fields[0]]; - const aligned_ptr = @alignCast(@alignOf(Elem), unaligned_ptr); - const casted_ptr = @ptrCast([*]align(@alignOf(Elem)) u8, aligned_ptr); + const aligned_ptr: [*]align(@alignOf(Elem)) u8 = @alignCast(unaligned_ptr); return .{ - .bytes = casted_ptr, + .bytes = aligned_ptr, .len = self.len, .capacity = self.capacity, }; @@ -294,7 +293,7 @@ pub fn MultiArrayList(comptime T: type) type { }; const slices = self.slice(); inline for (fields, 0..) |field_info, field_index| { - const field_slice = slices.items(@enumFromInt(Field, field_index)); + const field_slice = slices.items(@as(Field, @enumFromInt(field_index))); var i: usize = self.len - 1; while (i > index) : (i -= 1) { field_slice[i] = field_slice[i - 1]; @@ -309,7 +308,7 @@ pub fn MultiArrayList(comptime T: type) type { pub fn swapRemove(self: *Self, index: usize) void { const slices = self.slice(); inline for (fields, 0..) |_, i| { - const field_slice = slices.items(@enumFromInt(Field, i)); + const field_slice = slices.items(@as(Field, @enumFromInt(i))); field_slice[index] = field_slice[self.len - 1]; field_slice[self.len - 1] = undefined; } @@ -321,7 +320,7 @@ pub fn MultiArrayList(comptime T: type) type { pub fn orderedRemove(self: *Self, index: usize) void { const slices = self.slice(); inline for (fields, 0..) |_, field_index| { - const field_slice = slices.items(@enumFromInt(Field, field_index)); + const field_slice = slices.items(@as(Field, @enumFromInt(field_index))); var i = index; while (i < self.len - 1) : (i += 1) { field_slice[i] = field_slice[i + 1]; @@ -358,7 +357,7 @@ pub fn MultiArrayList(comptime T: type) type { const self_slice = self.slice(); inline for (fields, 0..) |field_info, i| { if (@sizeOf(field_info.type) != 0) { - const field = @enumFromInt(Field, i); + const field = @as(Field, @enumFromInt(i)); const dest_slice = self_slice.items(field)[new_len..]; // We use memset here for more efficient codegen in safety-checked, // valgrind-enabled builds. Otherwise the valgrind client request @@ -379,7 +378,7 @@ pub fn MultiArrayList(comptime T: type) type { const other_slice = other.slice(); inline for (fields, 0..) |field_info, i| { if (@sizeOf(field_info.type) != 0) { - const field = @enumFromInt(Field, i); + const field = @as(Field, @enumFromInt(i)); @memcpy(other_slice.items(field), self_slice.items(field)); } } @@ -440,7 +439,7 @@ pub fn MultiArrayList(comptime T: type) type { const other_slice = other.slice(); inline for (fields, 0..) |field_info, i| { if (@sizeOf(field_info.type) != 0) { - const field = @enumFromInt(Field, i); + const field = @as(Field, @enumFromInt(i)); @memcpy(other_slice.items(field), self_slice.items(field)); } } @@ -459,7 +458,7 @@ pub fn MultiArrayList(comptime T: type) type { const result_slice = result.slice(); inline for (fields, 0..) |field_info, i| { if (@sizeOf(field_info.type) != 0) { - const field = @enumFromInt(Field, i); + const field = @as(Field, @enumFromInt(i)); @memcpy(result_slice.items(field), self_slice.items(field)); } } @@ -476,7 +475,7 @@ pub fn MultiArrayList(comptime T: type) type { pub fn swap(sc: @This(), a_index: usize, b_index: usize) void { inline for (fields, 0..) |field_info, i| { if (@sizeOf(field_info.type) != 0) { - const field = @enumFromInt(Field, i); + const field = @as(Field, @enumFromInt(i)); const ptr = sc.slice.items(field); mem.swap(field_info.type, &ptr[a_index], &ptr[b_index]); } @@ -592,9 +591,9 @@ test "basic usage" { var i: usize = 0; while (i < 6) : (i += 1) { try list.append(ally, .{ - .a = @intCast(u32, 4 + i), + .a = @as(u32, @intCast(4 + i)), .b = "whatever", - .c = @intCast(u8, 'd' + i), + .c = @as(u8, @intCast('d' + i)), }); } @@ -791,7 +790,7 @@ test "union" { // Add 6 more things to force a capacity increase. for (0..6) |i| { - try list.append(ally, .{ .a = @intCast(u32, 4 + i) }); + try list.append(ally, .{ .a = @as(u32, @intCast(4 + i)) }); } try testing.expectEqualSlices( diff --git a/lib/std/net.zig b/lib/std/net.zig index 0f8ecbf21e..af291f6414 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -137,8 +137,8 @@ pub const Address = extern union { /// on the address family. pub fn initPosix(addr: *align(4) const os.sockaddr) Address { switch (addr.family) { - os.AF.INET => return Address{ .in = Ip4Address{ .sa = @ptrCast(*const os.sockaddr.in, addr).* } }, - os.AF.INET6 => return Address{ .in6 = Ip6Address{ .sa = @ptrCast(*const os.sockaddr.in6, addr).* } }, + os.AF.INET => return Address{ .in = Ip4Address{ .sa = @as(*const os.sockaddr.in, @ptrCast(addr)).* } }, + os.AF.INET6 => return Address{ .in6 = Ip6Address{ .sa = @as(*const os.sockaddr.in6, @ptrCast(addr)).* } }, else => unreachable, } } @@ -165,8 +165,8 @@ pub const Address = extern union { } pub fn eql(a: Address, b: Address) bool { - const a_bytes = @ptrCast([*]const u8, &a.any)[0..a.getOsSockLen()]; - const b_bytes = @ptrCast([*]const u8, &b.any)[0..b.getOsSockLen()]; + const a_bytes = @as([*]const u8, @ptrCast(&a.any))[0..a.getOsSockLen()]; + const b_bytes = @as([*]const u8, @ptrCast(&b.any))[0..b.getOsSockLen()]; return mem.eql(u8, a_bytes, b_bytes); } @@ -187,7 +187,7 @@ pub const Address = extern union { // provide the full buffer size (e.g. getsockname, getpeername, recvfrom, accept). // // To access the path, std.mem.sliceTo(&address.un.path, 0) should be used. - return @intCast(os.socklen_t, @sizeOf(os.sockaddr.un)); + return @as(os.socklen_t, @intCast(@sizeOf(os.sockaddr.un))); }, else => unreachable, @@ -260,7 +260,7 @@ pub const Ip4Address = extern struct { return Ip4Address{ .sa = os.sockaddr.in{ .port = mem.nativeToBig(u16, port), - .addr = @ptrCast(*align(1) const u32, &addr).*, + .addr = @as(*align(1) const u32, @ptrCast(&addr)).*, }, }; } @@ -285,7 +285,7 @@ pub const Ip4Address = extern struct { ) !void { if (fmt.len != 0) std.fmt.invalidFmtError(fmt, self); _ = options; - const bytes = @ptrCast(*const [4]u8, &self.sa.addr); + const bytes = @as(*const [4]u8, @ptrCast(&self.sa.addr)); try std.fmt.format(out_stream, "{}.{}.{}.{}:{}", .{ bytes[0], bytes[1], @@ -354,9 +354,9 @@ pub const Ip6Address = extern struct { if (index == 14) { return error.InvalidEnd; } - ip_slice[index] = @truncate(u8, x >> 8); + ip_slice[index] = @as(u8, @truncate(x >> 8)); index += 1; - ip_slice[index] = @truncate(u8, x); + ip_slice[index] = @as(u8, @truncate(x)); index += 1; x = 0; @@ -408,13 +408,13 @@ pub const Ip6Address = extern struct { } if (index == 14) { - ip_slice[14] = @truncate(u8, x >> 8); - ip_slice[15] = @truncate(u8, x); + ip_slice[14] = @as(u8, @truncate(x >> 8)); + ip_slice[15] = @as(u8, @truncate(x)); return result; } else { - ip_slice[index] = @truncate(u8, x >> 8); + ip_slice[index] = @as(u8, @truncate(x >> 8)); index += 1; - ip_slice[index] = @truncate(u8, x); + ip_slice[index] = @as(u8, @truncate(x)); index += 1; @memcpy(result.sa.addr[16 - index ..][0..index], ip_slice[0..index]); return result; @@ -473,9 +473,9 @@ pub const Ip6Address = extern struct { if (index == 14) { return error.InvalidEnd; } - ip_slice[index] = @truncate(u8, x >> 8); + ip_slice[index] = @as(u8, @truncate(x >> 8)); index += 1; - ip_slice[index] = @truncate(u8, x); + ip_slice[index] = @as(u8, @truncate(x)); index += 1; x = 0; @@ -542,13 +542,13 @@ pub const Ip6Address = extern struct { result.sa.scope_id = resolved_scope_id; if (index == 14) { - ip_slice[14] = @truncate(u8, x >> 8); - ip_slice[15] = @truncate(u8, x); + ip_slice[14] = @as(u8, @truncate(x >> 8)); + ip_slice[15] = @as(u8, @truncate(x)); return result; } else { - ip_slice[index] = @truncate(u8, x >> 8); + ip_slice[index] = @as(u8, @truncate(x >> 8)); index += 1; - ip_slice[index] = @truncate(u8, x); + ip_slice[index] = @as(u8, @truncate(x)); index += 1; @memcpy(result.sa.addr[16 - index ..][0..index], ip_slice[0..index]); return result; @@ -597,7 +597,7 @@ pub const Ip6Address = extern struct { }); return; } - const big_endian_parts = @ptrCast(*align(1) const [8]u16, &self.sa.addr); + const big_endian_parts = @as(*align(1) const [8]u16, @ptrCast(&self.sa.addr)); const native_endian_parts = switch (native_endian) { .Big => big_endian_parts.*, .Little => blk: { @@ -668,7 +668,7 @@ fn if_nametoindex(name: []const u8) !u32 { // TODO investigate if this needs to be integrated with evented I/O. try os.ioctl_SIOCGIFINDEX(sockfd, &ifr); - return @bitCast(u32, ifr.ifru.ivalue); + return @as(u32, @bitCast(ifr.ifru.ivalue)); } if (comptime builtin.target.os.tag.isDarwin()) { @@ -682,7 +682,7 @@ fn if_nametoindex(name: []const u8) !u32 { const index = os.system.if_nametoindex(if_slice); if (index == 0) return error.InterfaceNotFound; - return @bitCast(u32, index); + return @as(u32, @bitCast(index)); } @compileError("std.net.if_nametoindex unimplemented for this OS"); @@ -804,8 +804,8 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get var first = true; while (true) { const rc = ws2_32.getaddrinfo(name_c.ptr, port_c.ptr, &hints, &res); - switch (@enumFromInt(os.windows.ws2_32.WinsockError, @intCast(u16, rc))) { - @enumFromInt(os.windows.ws2_32.WinsockError, 0) => break, + switch (@as(os.windows.ws2_32.WinsockError, @enumFromInt(@as(u16, @intCast(rc))))) { + @as(os.windows.ws2_32.WinsockError, @enumFromInt(0)) => break, .WSATRY_AGAIN => return error.TemporaryNameServerFailure, .WSANO_RECOVERY => return error.NameServerFailure, .WSAEAFNOSUPPORT => return error.AddressFamilyNotSupported, @@ -841,7 +841,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get var i: usize = 0; while (it) |info| : (it = info.next) { const addr = info.addr orelse continue; - result.addrs[i] = Address.initPosix(@alignCast(4, addr)); + result.addrs[i] = Address.initPosix(@alignCast(addr)); if (info.canonname) |n| { if (result.canon_name == null) { @@ -874,7 +874,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get }; var res: ?*os.addrinfo = null; switch (sys.getaddrinfo(name_c.ptr, port_c.ptr, &hints, &res)) { - @enumFromInt(sys.EAI, 0) => {}, + @as(sys.EAI, @enumFromInt(0)) => {}, .ADDRFAMILY => return error.HostLacksNetworkAddresses, .AGAIN => return error.TemporaryNameServerFailure, .BADFLAGS => unreachable, // Invalid hints @@ -908,7 +908,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get var i: usize = 0; while (it) |info| : (it = info.next) { const addr = info.addr orelse continue; - result.addrs[i] = Address.initPosix(@alignCast(4, addr)); + result.addrs[i] = Address.initPosix(@alignCast(addr)); if (info.canonname) |n| { if (result.canon_name == null) { @@ -1020,7 +1020,7 @@ fn linuxLookupName( for (addrs.items, 0..) |*addr, i| { var key: i32 = 0; var sa6: os.sockaddr.in6 = undefined; - @memset(@ptrCast([*]u8, &sa6)[0..@sizeOf(os.sockaddr.in6)], 0); + @memset(@as([*]u8, @ptrCast(&sa6))[0..@sizeOf(os.sockaddr.in6)], 0); var da6 = os.sockaddr.in6{ .family = os.AF.INET6, .scope_id = addr.addr.in6.sa.scope_id, @@ -1029,7 +1029,7 @@ fn linuxLookupName( .addr = [1]u8{0} ** 16, }; var sa4: os.sockaddr.in = undefined; - @memset(@ptrCast([*]u8, &sa4)[0..@sizeOf(os.sockaddr.in)], 0); + @memset(@as([*]u8, @ptrCast(&sa4))[0..@sizeOf(os.sockaddr.in)], 0); var da4 = os.sockaddr.in{ .family = os.AF.INET, .port = 65535, @@ -1042,18 +1042,18 @@ fn linuxLookupName( var dalen: os.socklen_t = undefined; if (addr.addr.any.family == os.AF.INET6) { da6.addr = addr.addr.in6.sa.addr; - da = @ptrCast(*os.sockaddr, &da6); + da = @ptrCast(&da6); dalen = @sizeOf(os.sockaddr.in6); - sa = @ptrCast(*os.sockaddr, &sa6); + sa = @ptrCast(&sa6); salen = @sizeOf(os.sockaddr.in6); } else { sa6.addr[0..12].* = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff".*; da6.addr[0..12].* = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff".*; mem.writeIntNative(u32, da6.addr[12..], addr.addr.in.sa.addr); da4.addr = addr.addr.in.sa.addr; - da = @ptrCast(*os.sockaddr, &da4); + da = @ptrCast(&da4); dalen = @sizeOf(os.sockaddr.in); - sa = @ptrCast(*os.sockaddr, &sa4); + sa = @ptrCast(&sa4); salen = @sizeOf(os.sockaddr.in); } const dpolicy = policyOf(da6.addr); @@ -1070,7 +1070,7 @@ fn linuxLookupName( os.getsockname(fd, sa, &salen) catch break :syscalls; if (addr.addr.any.family == os.AF.INET) { // TODO sa6.addr[12..16] should return *[4]u8, making this cast unnecessary. - mem.writeIntNative(u32, @ptrCast(*[4]u8, &sa6.addr[12]), sa4.addr); + mem.writeIntNative(u32, @as(*[4]u8, @ptrCast(&sa6.addr[12])), sa4.addr); } if (dscope == @as(i32, scopeOf(sa6.addr))) key |= DAS_MATCHINGSCOPE; if (dlabel == labelOf(sa6.addr)) key |= DAS_MATCHINGLABEL; @@ -1079,7 +1079,7 @@ fn linuxLookupName( key |= dprec << DAS_PREC_SHIFT; key |= (15 - dscope) << DAS_SCOPE_SHIFT; key |= prefixlen << DAS_PREFIX_SHIFT; - key |= (MAXADDRS - @intCast(i32, i)) << DAS_ORDER_SHIFT; + key |= (MAXADDRS - @as(i32, @intCast(i))) << DAS_ORDER_SHIFT; addr.sortkey = key; } mem.sort(LookupAddr, addrs.items, {}, addrCmpLessThan); @@ -1171,7 +1171,7 @@ fn prefixMatch(s: [16]u8, d: [16]u8) u8 { // address. However the definition of the source prefix length is // not clear and thus this limiting is not yet implemented. var i: u8 = 0; - while (i < 128 and ((s[i / 8] ^ d[i / 8]) & (@as(u8, 128) >> @intCast(u3, i % 8))) == 0) : (i += 1) {} + while (i < 128 and ((s[i / 8] ^ d[i / 8]) & (@as(u8, 128) >> @as(u3, @intCast(i % 8)))) == 0) : (i += 1) {} return i; } @@ -1577,7 +1577,7 @@ fn resMSendRc( // Get local address and open/bind a socket var sa: Address = undefined; - @memset(@ptrCast([*]u8, &sa)[0..@sizeOf(Address)], 0); + @memset(@as([*]u8, @ptrCast(&sa))[0..@sizeOf(Address)], 0); sa.any.family = family; try os.bind(fd, &sa.any, sl); @@ -1588,13 +1588,13 @@ fn resMSendRc( }}; const retry_interval = timeout / attempts; var next: u32 = 0; - var t2: u64 = @bitCast(u64, std.time.milliTimestamp()); + var t2: u64 = @as(u64, @bitCast(std.time.milliTimestamp())); var t0 = t2; var t1 = t2 - retry_interval; var servfail_retry: usize = undefined; - outer: while (t2 - t0 < timeout) : (t2 = @bitCast(u64, std.time.milliTimestamp())) { + outer: while (t2 - t0 < timeout) : (t2 = @as(u64, @bitCast(std.time.milliTimestamp()))) { if (t2 - t1 >= retry_interval) { // Query all configured nameservers in parallel var i: usize = 0; diff --git a/lib/std/os.zig b/lib/std/os.zig index 872aeef611..2c49bd9f49 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -494,7 +494,7 @@ pub fn getrandom(buffer: []u8) GetRandomError!void { const res = if (use_c) blk: { const rc = std.c.getrandom(buf.ptr, buf.len, 0); break :blk .{ - .num_read = @bitCast(usize, rc), + .num_read = @as(usize, @bitCast(rc)), .err = std.c.getErrno(rc), }; } else blk: { @@ -608,7 +608,7 @@ pub fn abort() noreturn { sigprocmask(SIG.UNBLOCK, &sigabrtmask, null); // Beyond this point should be unreachable. - @ptrFromInt(*allowzero volatile u8, 0).* = 0; + @as(*allowzero volatile u8, @ptrFromInt(0)).* = 0; raise(SIG.KILL) catch {}; exit(127); // Pid 1 might not be signalled in some containers. } @@ -678,10 +678,10 @@ pub fn exit(status: u8) noreturn { // exit() is only available if exitBootServices() has not been called yet. // This call to exit should not fail, so we don't care about its return value. if (uefi.system_table.boot_services) |bs| { - _ = bs.exit(uefi.handle, @enumFromInt(uefi.Status, status), 0, null); + _ = bs.exit(uefi.handle, @as(uefi.Status, @enumFromInt(status)), 0, null); } // If we can't exit, reboot the system instead. - uefi.system_table.runtime_services.resetSystem(uefi.tables.ResetType.ResetCold, @enumFromInt(uefi.Status, status), 0, null); + uefi.system_table.runtime_services.resetSystem(uefi.tables.ResetType.ResetCold, @as(uefi.Status, @enumFromInt(status)), 0, null); } system.exit(status); } @@ -759,7 +759,7 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize { while (true) { const rc = system.read(fd, buf.ptr, adjusted_len); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, @@ -818,7 +818,7 @@ pub fn readv(fd: fd_t, iov: []const iovec) ReadError!usize { // TODO handle the case when iov_len is too large and get rid of this @intCast const rc = system.readv(fd, iov.ptr, iov_count); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, @@ -892,11 +892,11 @@ pub fn pread(fd: fd_t, buf: []u8, offset: u64) PReadError!usize { const pread_sym = if (lfs64_abi) system.pread64 else system.pread; - const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned while (true) { const rc = pread_sym(fd, buf.ptr, adjusted_len, ioffset); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, @@ -929,7 +929,7 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void { if (builtin.os.tag == .windows) { var io_status_block: windows.IO_STATUS_BLOCK = undefined; var eof_info = windows.FILE_END_OF_FILE_INFORMATION{ - .EndOfFile = @bitCast(windows.LARGE_INTEGER, length), + .EndOfFile = @as(windows.LARGE_INTEGER, @bitCast(length)), }; const rc = windows.ntdll.NtSetInformationFile( @@ -965,7 +965,7 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void { while (true) { const ftruncate_sym = if (lfs64_abi) system.ftruncate64 else system.ftruncate; - const ilen = @bitCast(i64, length); // the OS treats this as unsigned + const ilen = @as(i64, @bitCast(length)); // the OS treats this as unsigned switch (errno(ftruncate_sym(fd, ilen))) { .SUCCESS => return, .INTR => continue, @@ -1001,7 +1001,7 @@ pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) PReadError!usize { if (have_pread_but_not_preadv) { // We could loop here; but proper usage of `preadv` must handle partial reads anyway. // So we simply read into the first vector only. - if (iov.len == 0) return @intCast(usize, 0); + if (iov.len == 0) return @as(usize, @intCast(0)); const first = iov[0]; return pread(fd, first.iov_base[0..first.iov_len], offset); } @@ -1030,11 +1030,11 @@ pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) PReadError!usize { const preadv_sym = if (lfs64_abi) system.preadv64 else system.preadv; - const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned while (true) { const rc = preadv_sym(fd, iov.ptr, iov_count, ioffset); switch (errno(rc)) { - .SUCCESS => return @bitCast(usize, rc), + .SUCCESS => return @as(usize, @bitCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, @@ -1143,7 +1143,7 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize { while (true) { const rc = system.write(fd, bytes.ptr, adjusted_len); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => return error.InvalidArgument, .FAULT => unreachable, @@ -1212,11 +1212,11 @@ pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!usize { } } - const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @intCast(u31, iov.len); + const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @as(u31, @intCast(iov.len)); while (true) { const rc = system.writev(fd, iov.ptr, iov_count); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => return error.InvalidArgument, .FAULT => unreachable, @@ -1304,11 +1304,11 @@ pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize { const pwrite_sym = if (lfs64_abi) system.pwrite64 else system.pwrite; - const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned while (true) { const rc = pwrite_sym(fd, bytes.ptr, adjusted_len, ioffset); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => return error.InvalidArgument, .FAULT => unreachable, @@ -1390,12 +1390,12 @@ pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) PWriteError!usiz const pwritev_sym = if (lfs64_abi) system.pwritev64 else system.pwritev; - const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @intCast(u31, iov.len); - const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @as(u31, @intCast(iov.len)); + const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned while (true) { const rc = pwritev_sym(fd, iov.ptr, iov_count, ioffset); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => return error.InvalidArgument, .FAULT => unreachable, @@ -1504,7 +1504,7 @@ pub fn openZ(file_path: [*:0]const u8, flags: u32, perm: mode_t) OpenError!fd_t while (true) { const rc = open_sym(file_path, flags, perm); switch (errno(rc)) { - .SUCCESS => return @intCast(fd_t, rc), + .SUCCESS => return @as(fd_t, @intCast(rc)), .INTR => continue, .FAULT => unreachable, @@ -1653,11 +1653,11 @@ fn openOptionsFromFlagsWasi(fd: fd_t, oflag: u32) OpenError!WasiOpenOptions { rights &= fsb_cur.fs_rights_inheriting; return WasiOpenOptions{ - .oflags = @truncate(w.oflags_t, (oflag >> 12)) & 0xfff, + .oflags = @as(w.oflags_t, @truncate((oflag >> 12))) & 0xfff, .lookup_flags = if (oflag & O.NOFOLLOW == 0) w.LOOKUP_SYMLINK_FOLLOW else 0, .fs_rights_base = rights, .fs_rights_inheriting = fsb_cur.fs_rights_inheriting, - .fs_flags = @truncate(w.fdflags_t, oflag & 0xfff), + .fs_flags = @as(w.fdflags_t, @truncate(oflag & 0xfff)), }; } @@ -1717,7 +1717,7 @@ pub fn openatZ(dir_fd: fd_t, file_path: [*:0]const u8, flags: u32, mode: mode_t) while (true) { const rc = openat_sym(dir_fd, file_path, flags, mode); switch (errno(rc)) { - .SUCCESS => return @intCast(fd_t, rc), + .SUCCESS => return @as(fd_t, @intCast(rc)), .INTR => continue, .FAULT => unreachable, @@ -1765,7 +1765,7 @@ pub fn openatW(dir_fd: fd_t, file_path_w: []const u16, flags: u32, mode: mode_t) pub fn dup(old_fd: fd_t) !fd_t { const rc = system.dup(old_fd); return switch (errno(rc)) { - .SUCCESS => return @intCast(fd_t, rc), + .SUCCESS => return @as(fd_t, @intCast(rc)), .MFILE => error.ProcessFdQuotaExceeded, .BADF => unreachable, // invalid file descriptor else => |err| return unexpectedErrno(err), @@ -2024,7 +2024,7 @@ pub fn getcwd(out_buffer: []u8) GetCwdError![]u8 { const err = if (builtin.link_libc) blk: { const c_err = if (std.c.getcwd(out_buffer.ptr, out_buffer.len)) |_| 0 else std.c._errno().*; - break :blk @enumFromInt(E, c_err); + break :blk @as(E, @enumFromInt(c_err)); } else blk: { break :blk errno(system.getcwd(out_buffer.ptr, out_buffer.len)); }; @@ -2661,12 +2661,12 @@ pub fn renameatW( const struct_len = @sizeOf(windows.FILE_RENAME_INFORMATION) - 1 + new_path_w.len * 2; if (struct_len > struct_buf_len) return error.NameTooLong; - const rename_info = @ptrCast(*windows.FILE_RENAME_INFORMATION, &rename_info_buf); + const rename_info = @as(*windows.FILE_RENAME_INFORMATION, @ptrCast(&rename_info_buf)); rename_info.* = .{ .ReplaceIfExists = ReplaceIfExists, .RootDirectory = if (std.fs.path.isAbsoluteWindowsWTF16(new_path_w)) null else new_dir_fd, - .FileNameLength = @intCast(u32, new_path_w.len * 2), // already checked error.NameTooLong + .FileNameLength = @as(u32, @intCast(new_path_w.len * 2)), // already checked error.NameTooLong .FileName = undefined, }; @memcpy(@as([*]u16, &rename_info.FileName)[0..new_path_w.len], new_path_w); @@ -2677,7 +2677,7 @@ pub fn renameatW( src_fd, &io_status_block, rename_info, - @intCast(u32, struct_len), // already checked for error.NameTooLong + @as(u32, @intCast(struct_len)), // already checked for error.NameTooLong .FileRenameInformation, ); @@ -3049,7 +3049,7 @@ pub fn readlinkZ(file_path: [*:0]const u8, out_buffer: []u8) ReadLinkError![]u8 } const rc = system.readlink(file_path, out_buffer.ptr, out_buffer.len); switch (errno(rc)) { - .SUCCESS => return out_buffer[0..@bitCast(usize, rc)], + .SUCCESS => return out_buffer[0..@as(usize, @bitCast(rc))], .ACCES => return error.AccessDenied, .FAULT => unreachable, .INVAL => return error.NotLink, @@ -3115,7 +3115,7 @@ pub fn readlinkatZ(dirfd: fd_t, file_path: [*:0]const u8, out_buffer: []u8) Read } const rc = system.readlinkat(dirfd, file_path, out_buffer.ptr, out_buffer.len); switch (errno(rc)) { - .SUCCESS => return out_buffer[0..@bitCast(usize, rc)], + .SUCCESS => return out_buffer[0..@as(usize, @bitCast(rc))], .ACCES => return error.AccessDenied, .FAULT => unreachable, .INVAL => return error.NotLink, @@ -3227,7 +3227,7 @@ pub fn isatty(handle: fd_t) bool { if (builtin.os.tag == .linux) { while (true) { var wsz: linux.winsize = undefined; - const fd = @bitCast(usize, @as(isize, handle)); + const fd = @as(usize, @bitCast(@as(isize, handle))); const rc = linux.syscall3(.ioctl, fd, linux.T.IOCGWINSZ, @intFromPtr(&wsz)); switch (linux.getErrno(rc)) { .SUCCESS => return true, @@ -3271,14 +3271,14 @@ pub fn isCygwinPty(handle: fd_t) bool { var name_info_bytes align(@alignOf(windows.FILE_NAME_INFO)) = [_]u8{0} ** (name_bytes_offset + num_name_bytes); var io_status_block: windows.IO_STATUS_BLOCK = undefined; - const rc = windows.ntdll.NtQueryInformationFile(handle, &io_status_block, &name_info_bytes, @intCast(u32, name_info_bytes.len), .FileNameInformation); + const rc = windows.ntdll.NtQueryInformationFile(handle, &io_status_block, &name_info_bytes, @as(u32, @intCast(name_info_bytes.len)), .FileNameInformation); switch (rc) { .SUCCESS => {}, .INVALID_PARAMETER => unreachable, else => return false, } - const name_info = @ptrCast(*const windows.FILE_NAME_INFO, &name_info_bytes[0]); + const name_info = @as(*const windows.FILE_NAME_INFO, @ptrCast(&name_info_bytes[0])); const name_bytes = name_info_bytes[name_bytes_offset .. name_bytes_offset + @as(usize, name_info.FileNameLength)]; const name_wide = mem.bytesAsSlice(u16, name_bytes); // Note: The name we get from NtQueryInformationFile will be prefixed with a '\', e.g. \msys-1888ae32e00d56aa-pty0-to-master @@ -3325,9 +3325,9 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) SocketError!socket_t else 0; const rc = try windows.WSASocketW( - @bitCast(i32, domain), - @bitCast(i32, filtered_sock_type), - @bitCast(i32, protocol), + @as(i32, @bitCast(domain)), + @as(i32, @bitCast(filtered_sock_type)), + @as(i32, @bitCast(protocol)), null, 0, flags, @@ -3353,7 +3353,7 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) SocketError!socket_t const rc = system.socket(domain, filtered_sock_type, protocol); switch (errno(rc)) { .SUCCESS => { - const fd = @intCast(fd_t, rc); + const fd = @as(fd_t, @intCast(rc)); if (!have_sock_flags) { try setSockFlags(fd, socket_type); } @@ -3679,7 +3679,7 @@ pub fn accept( } else { switch (errno(rc)) { .SUCCESS => { - break @intCast(socket_t, rc); + break @as(socket_t, @intCast(rc)); }, .INTR => continue, .AGAIN => return error.WouldBlock, @@ -3723,7 +3723,7 @@ pub const EpollCreateError = error{ pub fn epoll_create1(flags: u32) EpollCreateError!i32 { const rc = system.epoll_create1(flags); switch (errno(rc)) { - .SUCCESS => return @intCast(i32, rc), + .SUCCESS => return @as(i32, @intCast(rc)), else => |err| return unexpectedErrno(err), .INVAL => unreachable, @@ -3782,9 +3782,9 @@ pub fn epoll_ctl(epfd: i32, op: u32, fd: i32, event: ?*linux.epoll_event) EpollC pub fn epoll_wait(epfd: i32, events: []linux.epoll_event, timeout: i32) usize { while (true) { // TODO get rid of the @intCast - const rc = system.epoll_wait(epfd, events.ptr, @intCast(u32, events.len), timeout); + const rc = system.epoll_wait(epfd, events.ptr, @as(u32, @intCast(events.len)), timeout); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .BADF => unreachable, .FAULT => unreachable, @@ -3803,7 +3803,7 @@ pub const EventFdError = error{ pub fn eventfd(initval: u32, flags: u32) EventFdError!i32 { const rc = system.eventfd(initval, flags); switch (errno(rc)) { - .SUCCESS => return @intCast(i32, rc), + .SUCCESS => return @as(i32, @intCast(rc)), else => |err| return unexpectedErrno(err), .INVAL => unreachable, // invalid parameters @@ -3937,7 +3937,7 @@ pub const ConnectError = error{ /// return error.WouldBlock when EAGAIN or EINPROGRESS is received. pub fn connect(sock: socket_t, sock_addr: *const sockaddr, len: socklen_t) ConnectError!void { if (builtin.os.tag == .windows) { - const rc = windows.ws2_32.connect(sock, sock_addr, @intCast(i32, len)); + const rc = windows.ws2_32.connect(sock, sock_addr, @as(i32, @intCast(len))); if (rc == 0) return; switch (windows.ws2_32.WSAGetLastError()) { .WSAEADDRINUSE => return error.AddressInUse, @@ -3992,10 +3992,10 @@ pub fn connect(sock: socket_t, sock_addr: *const sockaddr, len: socklen_t) Conne pub fn getsockoptError(sockfd: fd_t) ConnectError!void { var err_code: i32 = undefined; var size: u32 = @sizeOf(u32); - const rc = system.getsockopt(sockfd, SOL.SOCKET, SO.ERROR, @ptrCast([*]u8, &err_code), &size); + const rc = system.getsockopt(sockfd, SOL.SOCKET, SO.ERROR, @as([*]u8, @ptrCast(&err_code)), &size); assert(size == 4); switch (errno(rc)) { - .SUCCESS => switch (@enumFromInt(E, err_code)) { + .SUCCESS => switch (@as(E, @enumFromInt(err_code))) { .SUCCESS => return, .ACCES => return error.PermissionDenied, .PERM => return error.PermissionDenied, @@ -4035,13 +4035,13 @@ pub const WaitPidResult = struct { pub fn waitpid(pid: pid_t, flags: u32) WaitPidResult { const Status = if (builtin.link_libc) c_int else u32; var status: Status = undefined; - const coerced_flags = if (builtin.link_libc) @intCast(c_int, flags) else flags; + const coerced_flags = if (builtin.link_libc) @as(c_int, @intCast(flags)) else flags; while (true) { const rc = system.waitpid(pid, &status, coerced_flags); switch (errno(rc)) { .SUCCESS => return .{ - .pid = @intCast(pid_t, rc), - .status = @bitCast(u32, status), + .pid = @as(pid_t, @intCast(rc)), + .status = @as(u32, @bitCast(status)), }, .INTR => continue, .CHILD => unreachable, // The process specified does not exist. It would be a race condition to handle this error. @@ -4054,13 +4054,13 @@ pub fn waitpid(pid: pid_t, flags: u32) WaitPidResult { pub fn wait4(pid: pid_t, flags: u32, ru: ?*rusage) WaitPidResult { const Status = if (builtin.link_libc) c_int else u32; var status: Status = undefined; - const coerced_flags = if (builtin.link_libc) @intCast(c_int, flags) else flags; + const coerced_flags = if (builtin.link_libc) @as(c_int, @intCast(flags)) else flags; while (true) { const rc = system.wait4(pid, &status, coerced_flags, ru); switch (errno(rc)) { .SUCCESS => return .{ - .pid = @intCast(pid_t, rc), - .status = @bitCast(u32, status), + .pid = @as(pid_t, @intCast(rc)), + .status = @as(u32, @bitCast(status)), }, .INTR => continue, .CHILD => unreachable, // The process specified does not exist. It would be a race condition to handle this error. @@ -4182,7 +4182,7 @@ pub const KQueueError = error{ pub fn kqueue() KQueueError!i32 { const rc = system.kqueue(); switch (errno(rc)) { - .SUCCESS => return @intCast(i32, rc), + .SUCCESS => return @as(i32, @intCast(rc)), .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, else => |err| return unexpectedErrno(err), @@ -4223,7 +4223,7 @@ pub fn kevent( timeout, ); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .ACCES => return error.AccessDenied, .FAULT => unreachable, .BADF => unreachable, // Always a race condition. @@ -4247,7 +4247,7 @@ pub const INotifyInitError = error{ pub fn inotify_init1(flags: u32) INotifyInitError!i32 { const rc = system.inotify_init1(flags); switch (errno(rc)) { - .SUCCESS => return @intCast(i32, rc), + .SUCCESS => return @as(i32, @intCast(rc)), .INVAL => unreachable, .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, @@ -4276,7 +4276,7 @@ pub fn inotify_add_watch(inotify_fd: i32, pathname: []const u8, mask: u32) INoti pub fn inotify_add_watchZ(inotify_fd: i32, pathname: [*:0]const u8, mask: u32) INotifyAddWatchError!i32 { const rc = system.inotify_add_watch(inotify_fd, pathname, mask); switch (errno(rc)) { - .SUCCESS => return @intCast(i32, rc), + .SUCCESS => return @as(i32, @intCast(rc)), .ACCES => return error.AccessDenied, .BADF => unreachable, .FAULT => unreachable, @@ -4319,7 +4319,7 @@ pub const MProtectError = error{ pub fn mprotect(memory: []align(mem.page_size) u8, protection: u32) MProtectError!void { assert(mem.isAligned(memory.len, mem.page_size)); if (builtin.os.tag == .windows) { - const win_prot: windows.DWORD = switch (@truncate(u3, protection)) { + const win_prot: windows.DWORD = switch (@as(u3, @truncate(protection))) { 0b000 => windows.PAGE_NOACCESS, 0b001 => windows.PAGE_READONLY, 0b010 => unreachable, // +w -r not allowed @@ -4350,7 +4350,7 @@ pub const ForkError = error{SystemResources} || UnexpectedError; pub fn fork() ForkError!pid_t { const rc = system.fork(); switch (errno(rc)) { - .SUCCESS => return @intCast(pid_t, rc), + .SUCCESS => return @as(pid_t, @intCast(rc)), .AGAIN => return error.SystemResources, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), @@ -4391,14 +4391,14 @@ pub fn mmap( ) MMapError![]align(mem.page_size) u8 { const mmap_sym = if (lfs64_abi) system.mmap64 else system.mmap; - const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned const rc = mmap_sym(ptr, length, prot, flags, fd, ioffset); const err = if (builtin.link_libc) blk: { - if (rc != std.c.MAP.FAILED) return @ptrCast([*]align(mem.page_size) u8, @alignCast(mem.page_size, rc))[0..length]; - break :blk @enumFromInt(E, system._errno().*); + if (rc != std.c.MAP.FAILED) return @as([*]align(mem.page_size) u8, @ptrCast(@alignCast(rc)))[0..length]; + break :blk @as(E, @enumFromInt(system._errno().*)); } else blk: { const err = errno(rc); - if (err == .SUCCESS) return @ptrFromInt([*]align(mem.page_size) u8, rc)[0..length]; + if (err == .SUCCESS) return @as([*]align(mem.page_size) u8, @ptrFromInt(rc))[0..length]; break :blk err; }; switch (err) { @@ -4781,7 +4781,7 @@ pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void { } if (builtin.os.tag == .wasi and !builtin.link_libc) { var new_offset: wasi.filesize_t = undefined; - switch (wasi.fd_seek(fd, @bitCast(wasi.filedelta_t, offset), .SET, &new_offset)) { + switch (wasi.fd_seek(fd, @as(wasi.filedelta_t, @bitCast(offset)), .SET, &new_offset)) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, @@ -4795,7 +4795,7 @@ pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void { const lseek_sym = if (lfs64_abi) system.lseek64 else system.lseek; - const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned switch (errno(lseek_sym(fd, ioffset, SEEK.SET))) { .SUCCESS => return, .BADF => unreachable, // always a race condition @@ -4811,7 +4811,7 @@ pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void { pub fn lseek_CUR(fd: fd_t, offset: i64) SeekError!void { if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) { var result: u64 = undefined; - switch (errno(system.llseek(fd, @bitCast(u64, offset), &result, SEEK.CUR))) { + switch (errno(system.llseek(fd, @as(u64, @bitCast(offset)), &result, SEEK.CUR))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, @@ -4839,7 +4839,7 @@ pub fn lseek_CUR(fd: fd_t, offset: i64) SeekError!void { } const lseek_sym = if (lfs64_abi) system.lseek64 else system.lseek; - const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned switch (errno(lseek_sym(fd, ioffset, SEEK.CUR))) { .SUCCESS => return, .BADF => unreachable, // always a race condition @@ -4855,7 +4855,7 @@ pub fn lseek_CUR(fd: fd_t, offset: i64) SeekError!void { pub fn lseek_END(fd: fd_t, offset: i64) SeekError!void { if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) { var result: u64 = undefined; - switch (errno(system.llseek(fd, @bitCast(u64, offset), &result, SEEK.END))) { + switch (errno(system.llseek(fd, @as(u64, @bitCast(offset)), &result, SEEK.END))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, @@ -4883,7 +4883,7 @@ pub fn lseek_END(fd: fd_t, offset: i64) SeekError!void { } const lseek_sym = if (lfs64_abi) system.lseek64 else system.lseek; - const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned switch (errno(lseek_sym(fd, ioffset, SEEK.END))) { .SUCCESS => return, .BADF => unreachable, // always a race condition @@ -4929,7 +4929,7 @@ pub fn lseek_CUR_get(fd: fd_t) SeekError!u64 { const rc = lseek_sym(fd, 0, SEEK.CUR); switch (errno(rc)) { - .SUCCESS => return @bitCast(u64, rc), + .SUCCESS => return @as(u64, @bitCast(rc)), .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, @@ -4952,7 +4952,7 @@ pub fn fcntl(fd: fd_t, cmd: i32, arg: usize) FcntlError!usize { while (true) { const rc = system.fcntl(fd, cmd, arg); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .AGAIN, .ACCES => return error.Locked, .BADF => unreachable, @@ -5122,7 +5122,7 @@ pub fn realpathZ(pathname: [*:0]const u8, out_buffer: *[MAX_PATH_BYTES]u8) RealP return getFdPath(fd, out_buffer); } - const result_path = std.c.realpath(pathname, out_buffer) orelse switch (@enumFromInt(E, std.c._errno().*)) { + const result_path = std.c.realpath(pathname, out_buffer) orelse switch (@as(E, @enumFromInt(std.c._errno().*))) { .SUCCESS => unreachable, .INVAL => unreachable, .BADF => unreachable, @@ -5269,7 +5269,7 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 { }; var i: usize = 0; while (i < len) { - const kf: *align(1) system.kinfo_file = @ptrCast(*align(1) system.kinfo_file, &buf[i]); + const kf: *align(1) system.kinfo_file = @as(*align(1) system.kinfo_file, @ptrCast(&buf[i])); if (kf.fd == fd) { len = mem.indexOfScalar(u8, &kf.path, 0) orelse MAX_PATH_BYTES; if (len == 0) return error.NameTooLong; @@ -5277,7 +5277,7 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 { @memcpy(result, kf.path[0..len]); return result; } - i += @intCast(usize, kf.structsize); + i += @as(usize, @intCast(kf.structsize)); } return error.InvalidHandle; } @@ -5357,22 +5357,22 @@ pub fn dl_iterate_phdr( if (builtin.link_libc) { switch (system.dl_iterate_phdr(struct { fn callbackC(info: *dl_phdr_info, size: usize, data: ?*anyopaque) callconv(.C) c_int { - const context_ptr = @ptrCast(*const Context, @alignCast(@alignOf(*const Context), data)); + const context_ptr: *const Context = @ptrCast(@alignCast(data)); callback(info, size, context_ptr.*) catch |err| return @intFromError(err); return 0; } - }.callbackC, @ptrFromInt(?*anyopaque, @intFromPtr(&context)))) { + }.callbackC, @as(?*anyopaque, @ptrFromInt(@intFromPtr(&context))))) { 0 => return, - else => |err| return @errSetCast(Error, @errorFromInt(@intCast(u16, err))), // TODO don't hardcode u16 + else => |err| return @as(Error, @errSetCast(@errorFromInt(@as(u16, @intCast(err))))), // TODO don't hardcode u16 } } const elf_base = std.process.getBaseAddress(); - const ehdr = @ptrFromInt(*elf.Ehdr, elf_base); + const ehdr = @as(*elf.Ehdr, @ptrFromInt(elf_base)); // Make sure the base address points to an ELF image. assert(mem.eql(u8, ehdr.e_ident[0..4], elf.MAGIC)); const n_phdr = ehdr.e_phnum; - const phdrs = (@ptrFromInt([*]elf.Phdr, elf_base + ehdr.e_phoff))[0..n_phdr]; + const phdrs = (@as([*]elf.Phdr, @ptrFromInt(elf_base + ehdr.e_phoff)))[0..n_phdr]; var it = dl.linkmap_iterator(phdrs) catch unreachable; @@ -5406,12 +5406,12 @@ pub fn dl_iterate_phdr( var dlpi_phnum: u16 = undefined; if (entry.l_addr != 0) { - const elf_header = @ptrFromInt(*elf.Ehdr, entry.l_addr); - dlpi_phdr = @ptrFromInt([*]elf.Phdr, entry.l_addr + elf_header.e_phoff); + const elf_header = @as(*elf.Ehdr, @ptrFromInt(entry.l_addr)); + dlpi_phdr = @as([*]elf.Phdr, @ptrFromInt(entry.l_addr + elf_header.e_phoff)); dlpi_phnum = elf_header.e_phnum; } else { // This is the running ELF image - dlpi_phdr = @ptrFromInt([*]elf.Phdr, elf_base + ehdr.e_phoff); + dlpi_phdr = @as([*]elf.Phdr, @ptrFromInt(elf_base + ehdr.e_phoff)); dlpi_phnum = ehdr.e_phnum; } @@ -5433,11 +5433,11 @@ pub const ClockGetTimeError = error{UnsupportedClock} || UnexpectedError; pub fn clock_gettime(clk_id: i32, tp: *timespec) ClockGetTimeError!void { if (builtin.os.tag == .wasi and !builtin.link_libc) { var ts: timestamp_t = undefined; - switch (system.clock_time_get(@bitCast(u32, clk_id), 1, &ts)) { + switch (system.clock_time_get(@as(u32, @bitCast(clk_id)), 1, &ts)) { .SUCCESS => { tp.* = .{ - .tv_sec = @intCast(i64, ts / std.time.ns_per_s), - .tv_nsec = @intCast(isize, ts % std.time.ns_per_s), + .tv_sec = @as(i64, @intCast(ts / std.time.ns_per_s)), + .tv_nsec = @as(isize, @intCast(ts % std.time.ns_per_s)), }; }, .INVAL => return error.UnsupportedClock, @@ -5453,8 +5453,8 @@ pub fn clock_gettime(clk_id: i32, tp: *timespec) ClockGetTimeError!void { const ft64 = (@as(u64, ft.dwHighDateTime) << 32) | ft.dwLowDateTime; const ft_per_s = std.time.ns_per_s / 100; tp.* = .{ - .tv_sec = @intCast(i64, ft64 / ft_per_s) + std.time.epoch.windows, - .tv_nsec = @intCast(c_long, ft64 % ft_per_s) * 100, + .tv_sec = @as(i64, @intCast(ft64 / ft_per_s)) + std.time.epoch.windows, + .tv_nsec = @as(c_long, @intCast(ft64 % ft_per_s)) * 100, }; return; } else { @@ -5474,10 +5474,10 @@ pub fn clock_gettime(clk_id: i32, tp: *timespec) ClockGetTimeError!void { pub fn clock_getres(clk_id: i32, res: *timespec) ClockGetTimeError!void { if (builtin.os.tag == .wasi and !builtin.link_libc) { var ts: timestamp_t = undefined; - switch (system.clock_res_get(@bitCast(u32, clk_id), &ts)) { + switch (system.clock_res_get(@as(u32, @bitCast(clk_id)), &ts)) { .SUCCESS => res.* = .{ - .tv_sec = @intCast(i64, ts / std.time.ns_per_s), - .tv_nsec = @intCast(isize, ts % std.time.ns_per_s), + .tv_sec = @as(i64, @intCast(ts / std.time.ns_per_s)), + .tv_nsec = @as(isize, @intCast(ts % std.time.ns_per_s)), }, .INVAL => return error.UnsupportedClock, else => |err| return unexpectedErrno(err), @@ -5747,7 +5747,7 @@ pub fn res_mkquery( // TODO determine the circumstances for this and whether or // not this should be an error. if (j - i - 1 > 62) unreachable; - q[i - 1] = @intCast(u8, j - i); + q[i - 1] = @as(u8, @intCast(j - i)); } q[i + 1] = ty; q[i + 3] = class; @@ -5756,10 +5756,10 @@ pub fn res_mkquery( var ts: timespec = undefined; clock_gettime(CLOCK.REALTIME, &ts) catch {}; const UInt = std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(ts.tv_nsec))); - const unsec = @bitCast(UInt, ts.tv_nsec); - const id = @truncate(u32, unsec + unsec / 65536); - q[0] = @truncate(u8, id / 256); - q[1] = @truncate(u8, id); + const unsec = @as(UInt, @bitCast(ts.tv_nsec)); + const id = @as(u32, @truncate(unsec + unsec / 65536)); + q[0] = @as(u8, @truncate(id / 256)); + q[1] = @as(u8, @truncate(id)); @memcpy(buf[0..n], q[0..n]); return n; @@ -5865,11 +5865,11 @@ pub fn sendmsg( else => |err| return windows.unexpectedWSAError(err), } } else { - return @intCast(usize, rc); + return @as(usize, @intCast(rc)); } } else { switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .ACCES => return error.AccessDenied, .AGAIN => return error.WouldBlock, @@ -5965,13 +5965,13 @@ pub fn sendto( .WSANOTINITIALISED => unreachable, // A successful WSAStartup call must occur before using this function. else => |err| return windows.unexpectedWSAError(err), }, - else => |rc| return @intCast(usize, rc), + else => |rc| return @as(usize, @intCast(rc)), } } while (true) { const rc = system.sendto(sockfd, buf.ptr, buf.len, flags, dest_addr, addrlen); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .ACCES => return error.AccessDenied, .AGAIN => return error.WouldBlock, @@ -6125,16 +6125,16 @@ pub fn sendfile( // Here we match BSD behavior, making a zero count value send as many bytes as possible. const adjusted_count_tmp = if (in_len == 0) max_count else @min(in_len, @as(size_t, max_count)); // TODO we should not need this cast; improve return type of @min - const adjusted_count = @intCast(usize, adjusted_count_tmp); + const adjusted_count = @as(usize, @intCast(adjusted_count_tmp)); const sendfile_sym = if (lfs64_abi) system.sendfile64 else system.sendfile; while (true) { - var offset: off_t = @bitCast(off_t, in_offset); + var offset: off_t = @as(off_t, @bitCast(in_offset)); const rc = sendfile_sym(out_fd, in_fd, &offset, adjusted_count); switch (errno(rc)) { .SUCCESS => { - const amt = @bitCast(usize, rc); + const amt = @as(usize, @bitCast(rc)); total_written += amt; if (in_len == 0 and amt == 0) { // We have detected EOF from `in_fd`. @@ -6209,9 +6209,9 @@ pub fn sendfile( while (true) { var sbytes: off_t = undefined; - const offset = @bitCast(off_t, in_offset); + const offset = @as(off_t, @bitCast(in_offset)); const err = errno(system.sendfile(in_fd, out_fd, offset, adjusted_count, hdtr, &sbytes, flags)); - const amt = @bitCast(usize, sbytes); + const amt = @as(usize, @bitCast(sbytes)); switch (err) { .SUCCESS => return amt, @@ -6286,13 +6286,13 @@ pub fn sendfile( const adjusted_count_temporary = @min(in_len, @as(u63, max_count)); // TODO we should not need this int cast; improve the return type of `@min` - const adjusted_count = @intCast(u63, adjusted_count_temporary); + const adjusted_count = @as(u63, @intCast(adjusted_count_temporary)); while (true) { var sbytes: off_t = adjusted_count; - const signed_offset = @bitCast(i64, in_offset); + const signed_offset = @as(i64, @bitCast(in_offset)); const err = errno(system.sendfile(in_fd, out_fd, signed_offset, &sbytes, hdtr, flags)); - const amt = @bitCast(usize, sbytes); + const amt = @as(usize, @bitCast(sbytes)); switch (err) { .SUCCESS => return amt, @@ -6342,7 +6342,7 @@ pub fn sendfile( // Here we match BSD behavior, making a zero count value send as many bytes as possible. const adjusted_count_tmp = if (in_len == 0) buf.len else @min(buf.len, in_len); // TODO we should not need this cast; improve return type of @min - const adjusted_count = @intCast(usize, adjusted_count_tmp); + const adjusted_count = @as(usize, @intCast(adjusted_count_tmp)); const amt_read = try pread(in_fd, buf[0..adjusted_count], in_offset); if (amt_read == 0) { if (in_len == 0) { @@ -6413,14 +6413,14 @@ pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len std.c.versionCheck(.{ .major = 2, .minor = 27, .patch = 0 }).ok) and has_copy_file_range_syscall.load(.Monotonic))) { - var off_in_copy = @bitCast(i64, off_in); - var off_out_copy = @bitCast(i64, off_out); + var off_in_copy = @as(i64, @bitCast(off_in)); + var off_out_copy = @as(i64, @bitCast(off_out)); while (true) { const rc = system.copy_file_range(fd_in, &off_in_copy, fd_out, &off_out_copy, len, flags); if (builtin.os.tag == .freebsd) { switch (system.getErrno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .BADF => return error.FilesOpenedWithWrongFlags, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, @@ -6433,7 +6433,7 @@ pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len } } else { // assume linux switch (system.getErrno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .BADF => return error.FilesOpenedWithWrongFlags, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, @@ -6486,11 +6486,11 @@ pub fn poll(fds: []pollfd, timeout: i32) PollError!usize { else => |err| return windows.unexpectedWSAError(err), } } else { - return @intCast(usize, rc); + return @as(usize, @intCast(rc)); } } else { switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .FAULT => unreachable, .INTR => continue, .INVAL => unreachable, @@ -6520,7 +6520,7 @@ pub fn ppoll(fds: []pollfd, timeout: ?*const timespec, mask: ?*const sigset_t) P const fds_count = math.cast(nfds_t, fds.len) orelse return error.SystemResources; const rc = system.ppoll(fds.ptr, fds_count, ts_ptr, mask); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .FAULT => unreachable, .INTR => return error.SignalInterrupt, .INVAL => unreachable, @@ -6585,11 +6585,11 @@ pub fn recvfrom( else => |err| return windows.unexpectedWSAError(err), } } else { - return @intCast(usize, rc); + return @as(usize, @intCast(rc)); } } else { switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .BADF => unreachable, // always a race condition .FAULT => unreachable, .INVAL => unreachable, @@ -6681,7 +6681,7 @@ pub const SetSockOptError = error{ /// Set a socket's options. pub fn setsockopt(fd: socket_t, level: u32, optname: u32, opt: []const u8) SetSockOptError!void { if (builtin.os.tag == .windows) { - const rc = windows.ws2_32.setsockopt(fd, @intCast(i32, level), @intCast(i32, optname), opt.ptr, @intCast(i32, opt.len)); + const rc = windows.ws2_32.setsockopt(fd, @as(i32, @intCast(level)), @as(i32, @intCast(optname)), opt.ptr, @as(i32, @intCast(opt.len))); if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, @@ -6694,7 +6694,7 @@ pub fn setsockopt(fd: socket_t, level: u32, optname: u32, opt: []const u8) SetSo } return; } else { - switch (errno(system.setsockopt(fd, level, optname, opt.ptr, @intCast(socklen_t, opt.len)))) { + switch (errno(system.setsockopt(fd, level, optname, opt.ptr, @as(socklen_t, @intCast(opt.len))))) { .SUCCESS => {}, .BADF => unreachable, // always a race condition .NOTSOCK => unreachable, // always a race condition @@ -6731,7 +6731,7 @@ pub fn memfd_createZ(name: [*:0]const u8, flags: u32) MemFdCreateError!fd_t { const getErrno = if (use_c) std.c.getErrno else linux.getErrno; const rc = sys.memfd_create(name, flags); switch (getErrno(rc)) { - .SUCCESS => return @intCast(fd_t, rc), + .SUCCESS => return @as(fd_t, @intCast(rc)), .FAULT => unreachable, // name has invalid memory .INVAL => unreachable, // name/flags are faulty .NFILE => return error.SystemFdQuotaExceeded, @@ -6881,7 +6881,7 @@ pub fn ioctl_SIOCGIFINDEX(fd: fd_t, ifr: *ifreq) IoCtl_SIOCGIFINDEX_Error!void { pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) !fd_t { const rc = system.signalfd(fd, mask, flags); switch (errno(rc)) { - .SUCCESS => return @intCast(fd_t, rc), + .SUCCESS => return @as(fd_t, @intCast(rc)), .BADF, .INVAL => unreachable, .NFILE => return error.SystemFdQuotaExceeded, .NOMEM => return error.SystemResources, @@ -6989,7 +6989,7 @@ pub fn prctl(option: PR, args: anytype) PrctlError!u31 { const rc = system.prctl(@intFromEnum(option), buf[0], buf[1], buf[2], buf[3]); switch (errno(rc)) { - .SUCCESS => return @intCast(u31, rc), + .SUCCESS => return @as(u31, @intCast(rc)), .ACCES => return error.AccessDenied, .BADF => return error.InvalidFileDescriptor, .FAULT => return error.InvalidAddress, @@ -7170,7 +7170,7 @@ pub fn perf_event_open( ) PerfEventOpenError!fd_t { const rc = system.perf_event_open(attr, pid, cpu, group_fd, flags); switch (errno(rc)) { - .SUCCESS => return @intCast(fd_t, rc), + .SUCCESS => return @as(fd_t, @intCast(rc)), .@"2BIG" => return error.TooBig, .ACCES => return error.PermissionDenied, .BADF => unreachable, // group_fd file descriptor is not valid. @@ -7205,7 +7205,7 @@ pub const TimerFdSetError = TimerFdGetError || error{Canceled}; pub fn timerfd_create(clokid: i32, flags: u32) TimerFdCreateError!fd_t { var rc = linux.timerfd_create(clokid, flags); return switch (errno(rc)) { - .SUCCESS => @intCast(fd_t, rc), + .SUCCESS => @as(fd_t, @intCast(rc)), .INVAL => unreachable, .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, @@ -7267,7 +7267,7 @@ pub fn ptrace(request: u32, pid: pid_t, addr: usize, signal: usize) PtraceError! .macos, .ios, .tvos, .watchos => switch (errno(darwin.ptrace( math.cast(i32, request) orelse return error.Overflow, pid, - @ptrFromInt(?[*]u8, addr), + @as(?[*]u8, @ptrFromInt(addr)), math.cast(i32, signal) orelse return error.Overflow, ))) { .SUCCESS => {}, diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index b7ec29383b..6362e9ece1 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -175,62 +175,62 @@ const require_aligned_register_pair = // Split a 64bit value into a {LSB,MSB} pair. // The LE/BE variants specify the endianness to assume. fn splitValueLE64(val: i64) [2]u32 { - const u = @bitCast(u64, val); + const u = @as(u64, @bitCast(val)); return [2]u32{ - @truncate(u32, u), - @truncate(u32, u >> 32), + @as(u32, @truncate(u)), + @as(u32, @truncate(u >> 32)), }; } fn splitValueBE64(val: i64) [2]u32 { - const u = @bitCast(u64, val); + const u = @as(u64, @bitCast(val)); return [2]u32{ - @truncate(u32, u >> 32), - @truncate(u32, u), + @as(u32, @truncate(u >> 32)), + @as(u32, @truncate(u)), }; } fn splitValue64(val: i64) [2]u32 { - const u = @bitCast(u64, val); + const u = @as(u64, @bitCast(val)); switch (native_endian) { .Little => return [2]u32{ - @truncate(u32, u), - @truncate(u32, u >> 32), + @as(u32, @truncate(u)), + @as(u32, @truncate(u >> 32)), }, .Big => return [2]u32{ - @truncate(u32, u >> 32), - @truncate(u32, u), + @as(u32, @truncate(u >> 32)), + @as(u32, @truncate(u)), }, } } /// Get the errno from a syscall return value, or 0 for no error. pub fn getErrno(r: usize) E { - const signed_r = @bitCast(isize, r); + const signed_r = @as(isize, @bitCast(r)); const int = if (signed_r > -4096 and signed_r < 0) -signed_r else 0; - return @enumFromInt(E, int); + return @as(E, @enumFromInt(int)); } pub fn dup(old: i32) usize { - return syscall1(.dup, @bitCast(usize, @as(isize, old))); + return syscall1(.dup, @as(usize, @bitCast(@as(isize, old)))); } pub fn dup2(old: i32, new: i32) usize { if (@hasField(SYS, "dup2")) { - return syscall2(.dup2, @bitCast(usize, @as(isize, old)), @bitCast(usize, @as(isize, new))); + return syscall2(.dup2, @as(usize, @bitCast(@as(isize, old))), @as(usize, @bitCast(@as(isize, new)))); } else { if (old == new) { if (std.debug.runtime_safety) { - const rc = syscall2(.fcntl, @bitCast(usize, @as(isize, old)), F.GETFD); - if (@bitCast(isize, rc) < 0) return rc; + const rc = syscall2(.fcntl, @as(usize, @bitCast(@as(isize, old))), F.GETFD); + if (@as(isize, @bitCast(rc)) < 0) return rc; } - return @intCast(usize, old); + return @as(usize, @intCast(old)); } else { - return syscall3(.dup3, @bitCast(usize, @as(isize, old)), @bitCast(usize, @as(isize, new)), 0); + return syscall3(.dup3, @as(usize, @bitCast(@as(isize, old))), @as(usize, @bitCast(@as(isize, new))), 0); } } } pub fn dup3(old: i32, new: i32, flags: u32) usize { - return syscall3(.dup3, @bitCast(usize, @as(isize, old)), @bitCast(usize, @as(isize, new)), flags); + return syscall3(.dup3, @as(usize, @bitCast(@as(isize, old))), @as(usize, @bitCast(@as(isize, new))), flags); } pub fn chdir(path: [*:0]const u8) usize { @@ -238,7 +238,7 @@ pub fn chdir(path: [*:0]const u8) usize { } pub fn fchdir(fd: fd_t) usize { - return syscall1(.fchdir, @bitCast(usize, @as(isize, fd))); + return syscall1(.fchdir, @as(usize, @bitCast(@as(isize, fd)))); } pub fn chroot(path: [*:0]const u8) usize { @@ -273,7 +273,7 @@ pub fn futimens(fd: i32, times: *const [2]timespec) usize { } pub fn utimensat(dirfd: i32, path: ?[*:0]const u8, times: *const [2]timespec, flags: u32) usize { - return syscall4(.utimensat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), @intFromPtr(times), flags); + return syscall4(.utimensat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), @intFromPtr(times), flags); } pub fn fallocate(fd: i32, mode: i32, offset: i64, length: i64) usize { @@ -282,8 +282,8 @@ pub fn fallocate(fd: i32, mode: i32, offset: i64, length: i64) usize { const length_halves = splitValue64(length); return syscall6( .fallocate, - @bitCast(usize, @as(isize, fd)), - @bitCast(usize, @as(isize, mode)), + @as(usize, @bitCast(@as(isize, fd))), + @as(usize, @bitCast(@as(isize, mode))), offset_halves[0], offset_halves[1], length_halves[0], @@ -292,20 +292,20 @@ pub fn fallocate(fd: i32, mode: i32, offset: i64, length: i64) usize { } else { return syscall4( .fallocate, - @bitCast(usize, @as(isize, fd)), - @bitCast(usize, @as(isize, mode)), - @bitCast(u64, offset), - @bitCast(u64, length), + @as(usize, @bitCast(@as(isize, fd))), + @as(usize, @bitCast(@as(isize, mode))), + @as(u64, @bitCast(offset)), + @as(u64, @bitCast(length)), ); } } pub fn futex_wait(uaddr: *const i32, futex_op: u32, val: i32, timeout: ?*const timespec) usize { - return syscall4(.futex, @intFromPtr(uaddr), futex_op, @bitCast(u32, val), @intFromPtr(timeout)); + return syscall4(.futex, @intFromPtr(uaddr), futex_op, @as(u32, @bitCast(val)), @intFromPtr(timeout)); } pub fn futex_wake(uaddr: *const i32, futex_op: u32, val: i32) usize { - return syscall3(.futex, @intFromPtr(uaddr), futex_op, @bitCast(u32, val)); + return syscall3(.futex, @intFromPtr(uaddr), futex_op, @as(u32, @bitCast(val))); } pub fn getcwd(buf: [*]u8, size: usize) usize { @@ -315,7 +315,7 @@ pub fn getcwd(buf: [*]u8, size: usize) usize { pub fn getdents(fd: i32, dirp: [*]u8, len: usize) usize { return syscall3( .getdents, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(dirp), @min(len, maxInt(c_int)), ); @@ -324,7 +324,7 @@ pub fn getdents(fd: i32, dirp: [*]u8, len: usize) usize { pub fn getdents64(fd: i32, dirp: [*]u8, len: usize) usize { return syscall3( .getdents64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(dirp), @min(len, maxInt(c_int)), ); @@ -335,35 +335,35 @@ pub fn inotify_init1(flags: u32) usize { } pub fn inotify_add_watch(fd: i32, pathname: [*:0]const u8, mask: u32) usize { - return syscall3(.inotify_add_watch, @bitCast(usize, @as(isize, fd)), @intFromPtr(pathname), mask); + return syscall3(.inotify_add_watch, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(pathname), mask); } pub fn inotify_rm_watch(fd: i32, wd: i32) usize { - return syscall2(.inotify_rm_watch, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, wd))); + return syscall2(.inotify_rm_watch, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, wd)))); } pub fn readlink(noalias path: [*:0]const u8, noalias buf_ptr: [*]u8, buf_len: usize) usize { if (@hasField(SYS, "readlink")) { return syscall3(.readlink, @intFromPtr(path), @intFromPtr(buf_ptr), buf_len); } else { - return syscall4(.readlinkat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), @intFromPtr(buf_ptr), buf_len); + return syscall4(.readlinkat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), @intFromPtr(buf_ptr), buf_len); } } pub fn readlinkat(dirfd: i32, noalias path: [*:0]const u8, noalias buf_ptr: [*]u8, buf_len: usize) usize { - return syscall4(.readlinkat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), @intFromPtr(buf_ptr), buf_len); + return syscall4(.readlinkat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), @intFromPtr(buf_ptr), buf_len); } pub fn mkdir(path: [*:0]const u8, mode: u32) usize { if (@hasField(SYS, "mkdir")) { return syscall2(.mkdir, @intFromPtr(path), mode); } else { - return syscall3(.mkdirat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), mode); + return syscall3(.mkdirat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), mode); } } pub fn mkdirat(dirfd: i32, path: [*:0]const u8, mode: u32) usize { - return syscall3(.mkdirat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), mode); + return syscall3(.mkdirat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), mode); } pub fn mknod(path: [*:0]const u8, mode: u32, dev: u32) usize { @@ -375,7 +375,7 @@ pub fn mknod(path: [*:0]const u8, mode: u32, dev: u32) usize { } pub fn mknodat(dirfd: i32, path: [*:0]const u8, mode: u32, dev: u32) usize { - return syscall4(.mknodat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), mode, dev); + return syscall4(.mknodat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), mode, dev); } pub fn mount(special: [*:0]const u8, dir: [*:0]const u8, fstype: ?[*:0]const u8, flags: u32, data: usize) usize { @@ -394,7 +394,7 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, of if (@hasField(SYS, "mmap2")) { // Make sure the offset is also specified in multiples of page size if ((offset & (MMAP2_UNIT - 1)) != 0) - return @bitCast(usize, -@as(isize, @intFromEnum(E.INVAL))); + return @as(usize, @bitCast(-@as(isize, @intFromEnum(E.INVAL)))); return syscall6( .mmap2, @@ -402,8 +402,8 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, of length, prot, flags, - @bitCast(usize, @as(isize, fd)), - @truncate(usize, @bitCast(u64, offset) / MMAP2_UNIT), + @as(usize, @bitCast(@as(isize, fd))), + @as(usize, @truncate(@as(u64, @bitCast(offset)) / MMAP2_UNIT)), ); } else { return syscall6( @@ -412,8 +412,8 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, of length, prot, flags, - @bitCast(usize, @as(isize, fd)), - @bitCast(u64, offset), + @as(usize, @bitCast(@as(isize, fd))), + @as(u64, @bitCast(offset)), ); } } @@ -429,7 +429,7 @@ pub const MSF = struct { }; pub fn msync(address: [*]const u8, length: usize, flags: i32) usize { - return syscall3(.msync, @intFromPtr(address), length, @bitCast(u32, flags)); + return syscall3(.msync, @intFromPtr(address), length, @as(u32, @bitCast(flags))); } pub fn munmap(address: [*]const u8, length: usize) usize { @@ -438,7 +438,7 @@ pub fn munmap(address: [*]const u8, length: usize) usize { pub fn poll(fds: [*]pollfd, n: nfds_t, timeout: i32) usize { if (@hasField(SYS, "poll")) { - return syscall3(.poll, @intFromPtr(fds), n, @bitCast(u32, timeout)); + return syscall3(.poll, @intFromPtr(fds), n, @as(u32, @bitCast(timeout))); } else { return syscall5( .ppoll, @@ -462,69 +462,69 @@ pub fn ppoll(fds: [*]pollfd, n: nfds_t, timeout: ?*timespec, sigmask: ?*const si } pub fn read(fd: i32, buf: [*]u8, count: usize) usize { - return syscall3(.read, @bitCast(usize, @as(isize, fd)), @intFromPtr(buf), count); + return syscall3(.read, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count); } pub fn preadv(fd: i32, iov: [*]const iovec, count: usize, offset: i64) usize { - const offset_u = @bitCast(u64, offset); + const offset_u = @as(u64, @bitCast(offset)); return syscall5( .preadv, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(iov), count, // Kernel expects the offset is split into largest natural word-size. // See following link for detail: // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=601cc11d054ae4b5e9b5babec3d8e4667a2cb9b5 - @truncate(usize, offset_u), - if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0, + @as(usize, @truncate(offset_u)), + if (usize_bits < 64) @as(usize, @truncate(offset_u >> 32)) else 0, ); } pub fn preadv2(fd: i32, iov: [*]const iovec, count: usize, offset: i64, flags: kernel_rwf) usize { - const offset_u = @bitCast(u64, offset); + const offset_u = @as(u64, @bitCast(offset)); return syscall6( .preadv2, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(iov), count, // See comments in preadv - @truncate(usize, offset_u), - if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0, + @as(usize, @truncate(offset_u)), + if (usize_bits < 64) @as(usize, @truncate(offset_u >> 32)) else 0, flags, ); } pub fn readv(fd: i32, iov: [*]const iovec, count: usize) usize { - return syscall3(.readv, @bitCast(usize, @as(isize, fd)), @intFromPtr(iov), count); + return syscall3(.readv, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(iov), count); } pub fn writev(fd: i32, iov: [*]const iovec_const, count: usize) usize { - return syscall3(.writev, @bitCast(usize, @as(isize, fd)), @intFromPtr(iov), count); + return syscall3(.writev, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(iov), count); } pub fn pwritev(fd: i32, iov: [*]const iovec_const, count: usize, offset: i64) usize { - const offset_u = @bitCast(u64, offset); + const offset_u = @as(u64, @bitCast(offset)); return syscall5( .pwritev, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(iov), count, // See comments in preadv - @truncate(usize, offset_u), - if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0, + @as(usize, @truncate(offset_u)), + if (usize_bits < 64) @as(usize, @truncate(offset_u >> 32)) else 0, ); } pub fn pwritev2(fd: i32, iov: [*]const iovec_const, count: usize, offset: i64, flags: kernel_rwf) usize { - const offset_u = @bitCast(u64, offset); + const offset_u = @as(u64, @bitCast(offset)); return syscall6( .pwritev2, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(iov), count, // See comments in preadv - @truncate(usize, offset_u), - if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0, + @as(usize, @truncate(offset_u)), + if (usize_bits < 64) @as(usize, @truncate(offset_u >> 32)) else 0, flags, ); } @@ -533,7 +533,7 @@ pub fn rmdir(path: [*:0]const u8) usize { if (@hasField(SYS, "rmdir")) { return syscall1(.rmdir, @intFromPtr(path)); } else { - return syscall3(.unlinkat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), AT.REMOVEDIR); + return syscall3(.unlinkat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), AT.REMOVEDIR); } } @@ -541,12 +541,12 @@ pub fn symlink(existing: [*:0]const u8, new: [*:0]const u8) usize { if (@hasField(SYS, "symlink")) { return syscall2(.symlink, @intFromPtr(existing), @intFromPtr(new)); } else { - return syscall3(.symlinkat, @intFromPtr(existing), @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(new)); + return syscall3(.symlinkat, @intFromPtr(existing), @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(new)); } } pub fn symlinkat(existing: [*:0]const u8, newfd: i32, newpath: [*:0]const u8) usize { - return syscall3(.symlinkat, @intFromPtr(existing), @bitCast(usize, @as(isize, newfd)), @intFromPtr(newpath)); + return syscall3(.symlinkat, @intFromPtr(existing), @as(usize, @bitCast(@as(isize, newfd))), @intFromPtr(newpath)); } pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: i64) usize { @@ -555,7 +555,7 @@ pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: i64) usize { if (require_aligned_register_pair) { return syscall6( .pread64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count, 0, @@ -565,7 +565,7 @@ pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: i64) usize { } else { return syscall5( .pread64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count, offset_halves[0], @@ -580,10 +580,10 @@ pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: i64) usize { .pread; return syscall4( syscall_number, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count, - @bitCast(u64, offset), + @as(u64, @bitCast(offset)), ); } } @@ -592,12 +592,12 @@ pub fn access(path: [*:0]const u8, mode: u32) usize { if (@hasField(SYS, "access")) { return syscall2(.access, @intFromPtr(path), mode); } else { - return syscall4(.faccessat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), mode, 0); + return syscall4(.faccessat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), mode, 0); } } pub fn faccessat(dirfd: i32, path: [*:0]const u8, mode: u32, flags: u32) usize { - return syscall4(.faccessat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), mode, flags); + return syscall4(.faccessat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), mode, flags); } pub fn pipe(fd: *[2]i32) usize { @@ -615,7 +615,7 @@ pub fn pipe2(fd: *[2]i32, flags: u32) usize { } pub fn write(fd: i32, buf: [*]const u8, count: usize) usize { - return syscall3(.write, @bitCast(usize, @as(isize, fd)), @intFromPtr(buf), count); + return syscall3(.write, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count); } pub fn ftruncate(fd: i32, length: i64) usize { @@ -624,7 +624,7 @@ pub fn ftruncate(fd: i32, length: i64) usize { if (require_aligned_register_pair) { return syscall4( .ftruncate64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), 0, length_halves[0], length_halves[1], @@ -632,7 +632,7 @@ pub fn ftruncate(fd: i32, length: i64) usize { } else { return syscall3( .ftruncate64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), length_halves[0], length_halves[1], ); @@ -640,8 +640,8 @@ pub fn ftruncate(fd: i32, length: i64) usize { } else { return syscall2( .ftruncate, - @bitCast(usize, @as(isize, fd)), - @bitCast(usize, length), + @as(usize, @bitCast(@as(isize, fd))), + @as(usize, @bitCast(length)), ); } } @@ -653,7 +653,7 @@ pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: i64) usize { if (require_aligned_register_pair) { return syscall6( .pwrite64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count, 0, @@ -663,7 +663,7 @@ pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: i64) usize { } else { return syscall5( .pwrite64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count, offset_halves[0], @@ -678,10 +678,10 @@ pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: i64) usize { .pwrite; return syscall4( syscall_number, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count, - @bitCast(u64, offset), + @as(u64, @bitCast(offset)), ); } } @@ -690,9 +690,9 @@ pub fn rename(old: [*:0]const u8, new: [*:0]const u8) usize { if (@hasField(SYS, "rename")) { return syscall2(.rename, @intFromPtr(old), @intFromPtr(new)); } else if (@hasField(SYS, "renameat")) { - return syscall4(.renameat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(old), @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(new)); + return syscall4(.renameat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(old), @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(new)); } else { - return syscall5(.renameat2, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(old), @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(new), 0); + return syscall5(.renameat2, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(old), @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(new), 0); } } @@ -700,17 +700,17 @@ pub fn renameat(oldfd: i32, oldpath: [*]const u8, newfd: i32, newpath: [*]const if (@hasField(SYS, "renameat")) { return syscall4( .renameat, - @bitCast(usize, @as(isize, oldfd)), + @as(usize, @bitCast(@as(isize, oldfd))), @intFromPtr(oldpath), - @bitCast(usize, @as(isize, newfd)), + @as(usize, @bitCast(@as(isize, newfd))), @intFromPtr(newpath), ); } else { return syscall5( .renameat2, - @bitCast(usize, @as(isize, oldfd)), + @as(usize, @bitCast(@as(isize, oldfd))), @intFromPtr(oldpath), - @bitCast(usize, @as(isize, newfd)), + @as(usize, @bitCast(@as(isize, newfd))), @intFromPtr(newpath), 0, ); @@ -720,9 +720,9 @@ pub fn renameat(oldfd: i32, oldpath: [*]const u8, newfd: i32, newpath: [*]const pub fn renameat2(oldfd: i32, oldpath: [*:0]const u8, newfd: i32, newpath: [*:0]const u8, flags: u32) usize { return syscall5( .renameat2, - @bitCast(usize, @as(isize, oldfd)), + @as(usize, @bitCast(@as(isize, oldfd))), @intFromPtr(oldpath), - @bitCast(usize, @as(isize, newfd)), + @as(usize, @bitCast(@as(isize, newfd))), @intFromPtr(newpath), flags, ); @@ -734,7 +734,7 @@ pub fn open(path: [*:0]const u8, flags: u32, perm: mode_t) usize { } else { return syscall4( .openat, - @bitCast(usize, @as(isize, AT.FDCWD)), + @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), flags, perm, @@ -748,7 +748,7 @@ pub fn create(path: [*:0]const u8, perm: mode_t) usize { pub fn openat(dirfd: i32, path: [*:0]const u8, flags: u32, mode: mode_t) usize { // dirfd could be negative, for example AT.FDCWD is -100 - return syscall4(.openat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), flags, mode); + return syscall4(.openat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), flags, mode); } /// See also `clone` (from the arch-specific include) @@ -762,11 +762,11 @@ pub fn clone2(flags: u32, child_stack_ptr: usize) usize { } pub fn close(fd: i32) usize { - return syscall1(.close, @bitCast(usize, @as(isize, fd))); + return syscall1(.close, @as(usize, @bitCast(@as(isize, fd)))); } pub fn fchmod(fd: i32, mode: mode_t) usize { - return syscall2(.fchmod, @bitCast(usize, @as(isize, fd)), mode); + return syscall2(.fchmod, @as(usize, @bitCast(@as(isize, fd))), mode); } pub fn chmod(path: [*:0]const u8, mode: mode_t) usize { @@ -775,7 +775,7 @@ pub fn chmod(path: [*:0]const u8, mode: mode_t) usize { } else { return syscall4( .fchmodat, - @bitCast(usize, @as(isize, AT.FDCWD)), + @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), mode, 0, @@ -785,14 +785,14 @@ pub fn chmod(path: [*:0]const u8, mode: mode_t) usize { pub fn fchown(fd: i32, owner: uid_t, group: gid_t) usize { if (@hasField(SYS, "fchown32")) { - return syscall3(.fchown32, @bitCast(usize, @as(isize, fd)), owner, group); + return syscall3(.fchown32, @as(usize, @bitCast(@as(isize, fd))), owner, group); } else { - return syscall3(.fchown, @bitCast(usize, @as(isize, fd)), owner, group); + return syscall3(.fchown, @as(usize, @bitCast(@as(isize, fd))), owner, group); } } pub fn fchmodat(fd: i32, path: [*:0]const u8, mode: mode_t, flags: u32) usize { - return syscall4(.fchmodat, @bitCast(usize, @as(isize, fd)), @intFromPtr(path), mode, flags); + return syscall4(.fchmodat, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(path), mode, flags); } /// Can only be called on 32 bit systems. For 64 bit see `lseek`. @@ -801,9 +801,9 @@ pub fn llseek(fd: i32, offset: u64, result: ?*u64, whence: usize) usize { // endianness. return syscall5( ._llseek, - @bitCast(usize, @as(isize, fd)), - @truncate(usize, offset >> 32), - @truncate(usize, offset), + @as(usize, @bitCast(@as(isize, fd))), + @as(usize, @truncate(offset >> 32)), + @as(usize, @truncate(offset)), @intFromPtr(result), whence, ); @@ -811,16 +811,16 @@ pub fn llseek(fd: i32, offset: u64, result: ?*u64, whence: usize) usize { /// Can only be called on 64 bit systems. For 32 bit see `llseek`. pub fn lseek(fd: i32, offset: i64, whence: usize) usize { - return syscall3(.lseek, @bitCast(usize, @as(isize, fd)), @bitCast(usize, offset), whence); + return syscall3(.lseek, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(offset)), whence); } pub fn exit(status: i32) noreturn { - _ = syscall1(.exit, @bitCast(usize, @as(isize, status))); + _ = syscall1(.exit, @as(usize, @bitCast(@as(isize, status)))); unreachable; } pub fn exit_group(status: i32) noreturn { - _ = syscall1(.exit_group, @bitCast(usize, @as(isize, status))); + _ = syscall1(.exit_group, @as(usize, @bitCast(@as(isize, status)))); unreachable; } @@ -886,15 +886,15 @@ pub fn getrandom(buf: [*]u8, count: usize, flags: u32) usize { } pub fn kill(pid: pid_t, sig: i32) usize { - return syscall2(.kill, @bitCast(usize, @as(isize, pid)), @bitCast(usize, @as(isize, sig))); + return syscall2(.kill, @as(usize, @bitCast(@as(isize, pid))), @as(usize, @bitCast(@as(isize, sig)))); } pub fn tkill(tid: pid_t, sig: i32) usize { - return syscall2(.tkill, @bitCast(usize, @as(isize, tid)), @bitCast(usize, @as(isize, sig))); + return syscall2(.tkill, @as(usize, @bitCast(@as(isize, tid))), @as(usize, @bitCast(@as(isize, sig)))); } pub fn tgkill(tgid: pid_t, tid: pid_t, sig: i32) usize { - return syscall3(.tgkill, @bitCast(usize, @as(isize, tgid)), @bitCast(usize, @as(isize, tid)), @bitCast(usize, @as(isize, sig))); + return syscall3(.tgkill, @as(usize, @bitCast(@as(isize, tgid))), @as(usize, @bitCast(@as(isize, tid))), @as(usize, @bitCast(@as(isize, sig)))); } pub fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: i32) usize { @@ -903,16 +903,16 @@ pub fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: i32) usize { .link, @intFromPtr(oldpath), @intFromPtr(newpath), - @bitCast(usize, @as(isize, flags)), + @as(usize, @bitCast(@as(isize, flags))), ); } else { return syscall5( .linkat, - @bitCast(usize, @as(isize, AT.FDCWD)), + @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(oldpath), - @bitCast(usize, @as(isize, AT.FDCWD)), + @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(newpath), - @bitCast(usize, @as(isize, flags)), + @as(usize, @bitCast(@as(isize, flags))), ); } } @@ -920,11 +920,11 @@ pub fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: i32) usize { pub fn linkat(oldfd: fd_t, oldpath: [*:0]const u8, newfd: fd_t, newpath: [*:0]const u8, flags: i32) usize { return syscall5( .linkat, - @bitCast(usize, @as(isize, oldfd)), + @as(usize, @bitCast(@as(isize, oldfd))), @intFromPtr(oldpath), - @bitCast(usize, @as(isize, newfd)), + @as(usize, @bitCast(@as(isize, newfd))), @intFromPtr(newpath), - @bitCast(usize, @as(isize, flags)), + @as(usize, @bitCast(@as(isize, flags))), ); } @@ -932,22 +932,22 @@ pub fn unlink(path: [*:0]const u8) usize { if (@hasField(SYS, "unlink")) { return syscall1(.unlink, @intFromPtr(path)); } else { - return syscall3(.unlinkat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), 0); + return syscall3(.unlinkat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), 0); } } pub fn unlinkat(dirfd: i32, path: [*:0]const u8, flags: u32) usize { - return syscall3(.unlinkat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), flags); + return syscall3(.unlinkat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), flags); } pub fn waitpid(pid: pid_t, status: *u32, flags: u32) usize { - return syscall4(.wait4, @bitCast(usize, @as(isize, pid)), @intFromPtr(status), flags, 0); + return syscall4(.wait4, @as(usize, @bitCast(@as(isize, pid))), @intFromPtr(status), flags, 0); } pub fn wait4(pid: pid_t, status: *u32, flags: u32, usage: ?*rusage) usize { return syscall4( .wait4, - @bitCast(usize, @as(isize, pid)), + @as(usize, @bitCast(@as(isize, pid))), @intFromPtr(status), flags, @intFromPtr(usage), @@ -955,18 +955,18 @@ pub fn wait4(pid: pid_t, status: *u32, flags: u32, usage: ?*rusage) usize { } pub fn waitid(id_type: P, id: i32, infop: *siginfo_t, flags: u32) usize { - return syscall5(.waitid, @intFromEnum(id_type), @bitCast(usize, @as(isize, id)), @intFromPtr(infop), flags, 0); + return syscall5(.waitid, @intFromEnum(id_type), @as(usize, @bitCast(@as(isize, id))), @intFromPtr(infop), flags, 0); } pub fn fcntl(fd: fd_t, cmd: i32, arg: usize) usize { - return syscall3(.fcntl, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, cmd)), arg); + return syscall3(.fcntl, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, cmd))), arg); } pub fn flock(fd: fd_t, operation: i32) usize { - return syscall2(.flock, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, operation))); + return syscall2(.flock, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, operation)))); } -var vdso_clock_gettime = @ptrCast(?*const anyopaque, &init_vdso_clock_gettime); +var vdso_clock_gettime = @as(?*const anyopaque, @ptrCast(&init_vdso_clock_gettime)); // We must follow the C calling convention when we call into the VDSO const vdso_clock_gettime_ty = *align(1) const fn (i32, *timespec) callconv(.C) usize; @@ -975,36 +975,36 @@ pub fn clock_gettime(clk_id: i32, tp: *timespec) usize { if (@hasDecl(VDSO, "CGT_SYM")) { const ptr = @atomicLoad(?*const anyopaque, &vdso_clock_gettime, .Unordered); if (ptr) |fn_ptr| { - const f = @ptrCast(vdso_clock_gettime_ty, fn_ptr); + const f = @as(vdso_clock_gettime_ty, @ptrCast(fn_ptr)); const rc = f(clk_id, tp); switch (rc) { - 0, @bitCast(usize, -@as(isize, @intFromEnum(E.INVAL))) => return rc, + 0, @as(usize, @bitCast(-@as(isize, @intFromEnum(E.INVAL)))) => return rc, else => {}, } } } - return syscall2(.clock_gettime, @bitCast(usize, @as(isize, clk_id)), @intFromPtr(tp)); + return syscall2(.clock_gettime, @as(usize, @bitCast(@as(isize, clk_id))), @intFromPtr(tp)); } fn init_vdso_clock_gettime(clk: i32, ts: *timespec) callconv(.C) usize { - const ptr = @ptrFromInt(?*const anyopaque, vdso.lookup(VDSO.CGT_VER, VDSO.CGT_SYM)); + const ptr = @as(?*const anyopaque, @ptrFromInt(vdso.lookup(VDSO.CGT_VER, VDSO.CGT_SYM))); // Note that we may not have a VDSO at all, update the stub address anyway // so that clock_gettime will fall back on the good old (and slow) syscall @atomicStore(?*const anyopaque, &vdso_clock_gettime, ptr, .Monotonic); // Call into the VDSO if available if (ptr) |fn_ptr| { - const f = @ptrCast(vdso_clock_gettime_ty, fn_ptr); + const f = @as(vdso_clock_gettime_ty, @ptrCast(fn_ptr)); return f(clk, ts); } - return @bitCast(usize, -@as(isize, @intFromEnum(E.NOSYS))); + return @as(usize, @bitCast(-@as(isize, @intFromEnum(E.NOSYS)))); } pub fn clock_getres(clk_id: i32, tp: *timespec) usize { - return syscall2(.clock_getres, @bitCast(usize, @as(isize, clk_id)), @intFromPtr(tp)); + return syscall2(.clock_getres, @as(usize, @bitCast(@as(isize, clk_id))), @intFromPtr(tp)); } pub fn clock_settime(clk_id: i32, tp: *const timespec) usize { - return syscall2(.clock_settime, @bitCast(usize, @as(isize, clk_id)), @intFromPtr(tp)); + return syscall2(.clock_settime, @as(usize, @bitCast(@as(isize, clk_id))), @intFromPtr(tp)); } pub fn gettimeofday(tv: *timeval, tz: *timezone) usize { @@ -1053,33 +1053,33 @@ pub fn setregid(rgid: gid_t, egid: gid_t) usize { pub fn getuid() uid_t { if (@hasField(SYS, "getuid32")) { - return @intCast(uid_t, syscall0(.getuid32)); + return @as(uid_t, @intCast(syscall0(.getuid32))); } else { - return @intCast(uid_t, syscall0(.getuid)); + return @as(uid_t, @intCast(syscall0(.getuid))); } } pub fn getgid() gid_t { if (@hasField(SYS, "getgid32")) { - return @intCast(gid_t, syscall0(.getgid32)); + return @as(gid_t, @intCast(syscall0(.getgid32))); } else { - return @intCast(gid_t, syscall0(.getgid)); + return @as(gid_t, @intCast(syscall0(.getgid))); } } pub fn geteuid() uid_t { if (@hasField(SYS, "geteuid32")) { - return @intCast(uid_t, syscall0(.geteuid32)); + return @as(uid_t, @intCast(syscall0(.geteuid32))); } else { - return @intCast(uid_t, syscall0(.geteuid)); + return @as(uid_t, @intCast(syscall0(.geteuid))); } } pub fn getegid() gid_t { if (@hasField(SYS, "getegid32")) { - return @intCast(gid_t, syscall0(.getegid32)); + return @as(gid_t, @intCast(syscall0(.getegid32))); } else { - return @intCast(gid_t, syscall0(.getegid)); + return @as(gid_t, @intCast(syscall0(.getegid))); } } @@ -1154,11 +1154,11 @@ pub fn setgroups(size: usize, list: [*]const gid_t) usize { } pub fn getpid() pid_t { - return @bitCast(pid_t, @truncate(u32, syscall0(.getpid))); + return @as(pid_t, @bitCast(@as(u32, @truncate(syscall0(.getpid))))); } pub fn gettid() pid_t { - return @bitCast(pid_t, @truncate(u32, syscall0(.gettid))); + return @as(pid_t, @bitCast(@as(u32, @truncate(syscall0(.gettid))))); } pub fn sigprocmask(flags: u32, noalias set: ?*const sigset_t, noalias oldset: ?*sigset_t) usize { @@ -1182,9 +1182,9 @@ pub fn sigaction(sig: u6, noalias act: ?*const Sigaction, noalias oact: ?*Sigact .handler = new.handler.handler, .flags = new.flags | SA.RESTORER, .mask = undefined, - .restorer = @ptrCast(k_sigaction_funcs.restorer, restorer_fn), + .restorer = @as(k_sigaction_funcs.restorer, @ptrCast(restorer_fn)), }; - @memcpy(@ptrCast([*]u8, &ksa.mask)[0..mask_size], @ptrCast([*]const u8, &new.mask)); + @memcpy(@as([*]u8, @ptrCast(&ksa.mask))[0..mask_size], @as([*]const u8, @ptrCast(&new.mask))); } const ksa_arg = if (act != null) @intFromPtr(&ksa) else 0; @@ -1199,8 +1199,8 @@ pub fn sigaction(sig: u6, noalias act: ?*const Sigaction, noalias oact: ?*Sigact if (oact) |old| { old.handler.handler = oldksa.handler; - old.flags = @truncate(c_uint, oldksa.flags); - @memcpy(@ptrCast([*]u8, &old.mask)[0..mask_size], @ptrCast([*]const u8, &oldksa.mask)); + old.flags = @as(c_uint, @truncate(oldksa.flags)); + @memcpy(@as([*]u8, @ptrCast(&old.mask))[0..mask_size], @as([*]const u8, @ptrCast(&oldksa.mask))); } return 0; @@ -1211,28 +1211,28 @@ const usize_bits = @typeInfo(usize).Int.bits; pub fn sigaddset(set: *sigset_t, sig: u6) void { const s = sig - 1; // shift in musl: s&8*sizeof *set->__bits-1 - const shift = @intCast(u5, s & (usize_bits - 1)); - const val = @intCast(u32, 1) << shift; - (set.*)[@intCast(usize, s) / usize_bits] |= val; + const shift = @as(u5, @intCast(s & (usize_bits - 1))); + const val = @as(u32, @intCast(1)) << shift; + (set.*)[@as(usize, @intCast(s)) / usize_bits] |= val; } pub fn sigismember(set: *const sigset_t, sig: u6) bool { const s = sig - 1; - return ((set.*)[@intCast(usize, s) / usize_bits] & (@intCast(usize, 1) << (s & (usize_bits - 1)))) != 0; + return ((set.*)[@as(usize, @intCast(s)) / usize_bits] & (@as(usize, @intCast(1)) << (s & (usize_bits - 1)))) != 0; } pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize { if (native_arch == .x86) { - return socketcall(SC.getsockname, &[3]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len) }); + return socketcall(SC.getsockname, &[3]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len) }); } - return syscall3(.getsockname, @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len)); + return syscall3(.getsockname, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len)); } pub fn getpeername(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize { if (native_arch == .x86) { - return socketcall(SC.getpeername, &[3]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len) }); + return socketcall(SC.getpeername, &[3]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len) }); } - return syscall3(.getpeername, @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len)); + return syscall3(.getpeername, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len)); } pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize { @@ -1244,20 +1244,20 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize { pub fn setsockopt(fd: i32, level: u32, optname: u32, optval: [*]const u8, optlen: socklen_t) usize { if (native_arch == .x86) { - return socketcall(SC.setsockopt, &[5]usize{ @bitCast(usize, @as(isize, fd)), level, optname, @intFromPtr(optval), @intCast(usize, optlen) }); + return socketcall(SC.setsockopt, &[5]usize{ @as(usize, @bitCast(@as(isize, fd))), level, optname, @intFromPtr(optval), @as(usize, @intCast(optlen)) }); } - return syscall5(.setsockopt, @bitCast(usize, @as(isize, fd)), level, optname, @intFromPtr(optval), @intCast(usize, optlen)); + return syscall5(.setsockopt, @as(usize, @bitCast(@as(isize, fd))), level, optname, @intFromPtr(optval), @as(usize, @intCast(optlen))); } pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: [*]u8, noalias optlen: *socklen_t) usize { if (native_arch == .x86) { - return socketcall(SC.getsockopt, &[5]usize{ @bitCast(usize, @as(isize, fd)), level, optname, @intFromPtr(optval), @intFromPtr(optlen) }); + return socketcall(SC.getsockopt, &[5]usize{ @as(usize, @bitCast(@as(isize, fd))), level, optname, @intFromPtr(optval), @intFromPtr(optlen) }); } - return syscall5(.getsockopt, @bitCast(usize, @as(isize, fd)), level, optname, @intFromPtr(optval), @intFromPtr(optlen)); + return syscall5(.getsockopt, @as(usize, @bitCast(@as(isize, fd))), level, optname, @intFromPtr(optval), @intFromPtr(optlen)); } pub fn sendmsg(fd: i32, msg: *const msghdr_const, flags: u32) usize { - const fd_usize = @bitCast(usize, @as(isize, fd)); + const fd_usize = @as(usize, @bitCast(@as(isize, fd))); const msg_usize = @intFromPtr(msg); if (native_arch == .x86) { return socketcall(SC.sendmsg, &[3]usize{ fd_usize, msg_usize, flags }); @@ -1275,13 +1275,13 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize var next_unsent: usize = 0; for (msgvec[0..kvlen], 0..) |*msg, i| { var size: i32 = 0; - const msg_iovlen = @intCast(usize, msg.msg_hdr.msg_iovlen); // kernel side this is treated as unsigned + const msg_iovlen = @as(usize, @intCast(msg.msg_hdr.msg_iovlen)); // kernel side this is treated as unsigned for (msg.msg_hdr.msg_iov[0..msg_iovlen]) |iov| { - if (iov.iov_len > std.math.maxInt(i32) or @addWithOverflow(size, @intCast(i32, iov.iov_len))[1] != 0) { + if (iov.iov_len > std.math.maxInt(i32) or @addWithOverflow(size, @as(i32, @intCast(iov.iov_len)))[1] != 0) { // batch-send all messages up to the current message if (next_unsent < i) { const batch_size = i - next_unsent; - const r = syscall4(.sendmmsg, @bitCast(usize, @as(isize, fd)), @intFromPtr(&msgvec[next_unsent]), batch_size, flags); + const r = syscall4(.sendmmsg, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(&msgvec[next_unsent]), batch_size, flags); if (getErrno(r) != 0) return next_unsent; if (r < batch_size) return next_unsent + r; } @@ -1289,7 +1289,7 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize const r = sendmsg(fd, &msg.msg_hdr, flags); if (getErrno(r) != 0) return r; // Linux limits the total bytes sent by sendmsg to INT_MAX, so this cast is safe. - msg.msg_len = @intCast(u32, r); + msg.msg_len = @as(u32, @intCast(r)); next_unsent = i + 1; break; } @@ -1297,17 +1297,17 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize } if (next_unsent < kvlen or next_unsent == 0) { // want to make sure at least one syscall occurs (e.g. to trigger MSG.EOR) const batch_size = kvlen - next_unsent; - const r = syscall4(.sendmmsg, @bitCast(usize, @as(isize, fd)), @intFromPtr(&msgvec[next_unsent]), batch_size, flags); + const r = syscall4(.sendmmsg, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(&msgvec[next_unsent]), batch_size, flags); if (getErrno(r) != 0) return r; return next_unsent + r; } return kvlen; } - return syscall4(.sendmmsg, @bitCast(usize, @as(isize, fd)), @intFromPtr(msgvec), vlen, flags); + return syscall4(.sendmmsg, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(msgvec), vlen, flags); } pub fn connect(fd: i32, addr: *const anyopaque, len: socklen_t) usize { - const fd_usize = @bitCast(usize, @as(isize, fd)); + const fd_usize = @as(usize, @bitCast(@as(isize, fd))); const addr_usize = @intFromPtr(addr); if (native_arch == .x86) { return socketcall(SC.connect, &[3]usize{ fd_usize, addr_usize, len }); @@ -1317,7 +1317,7 @@ pub fn connect(fd: i32, addr: *const anyopaque, len: socklen_t) usize { } pub fn recvmsg(fd: i32, msg: *msghdr, flags: u32) usize { - const fd_usize = @bitCast(usize, @as(isize, fd)); + const fd_usize = @as(usize, @bitCast(@as(isize, fd))); const msg_usize = @intFromPtr(msg); if (native_arch == .x86) { return socketcall(SC.recvmsg, &[3]usize{ fd_usize, msg_usize, flags }); @@ -1334,7 +1334,7 @@ pub fn recvfrom( noalias addr: ?*sockaddr, noalias alen: ?*socklen_t, ) usize { - const fd_usize = @bitCast(usize, @as(isize, fd)); + const fd_usize = @as(usize, @bitCast(@as(isize, fd))); const buf_usize = @intFromPtr(buf); const addr_usize = @intFromPtr(addr); const alen_usize = @intFromPtr(alen); @@ -1347,46 +1347,46 @@ pub fn recvfrom( pub fn shutdown(fd: i32, how: i32) usize { if (native_arch == .x86) { - return socketcall(SC.shutdown, &[2]usize{ @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, how)) }); + return socketcall(SC.shutdown, &[2]usize{ @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, how))) }); } - return syscall2(.shutdown, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, how))); + return syscall2(.shutdown, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, how)))); } pub fn bind(fd: i32, addr: *const sockaddr, len: socklen_t) usize { if (native_arch == .x86) { - return socketcall(SC.bind, &[3]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intCast(usize, len) }); + return socketcall(SC.bind, &[3]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @as(usize, @intCast(len)) }); } - return syscall3(.bind, @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intCast(usize, len)); + return syscall3(.bind, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @as(usize, @intCast(len))); } pub fn listen(fd: i32, backlog: u32) usize { if (native_arch == .x86) { - return socketcall(SC.listen, &[2]usize{ @bitCast(usize, @as(isize, fd)), backlog }); + return socketcall(SC.listen, &[2]usize{ @as(usize, @bitCast(@as(isize, fd))), backlog }); } - return syscall2(.listen, @bitCast(usize, @as(isize, fd)), backlog); + return syscall2(.listen, @as(usize, @bitCast(@as(isize, fd))), backlog); } pub fn sendto(fd: i32, buf: [*]const u8, len: usize, flags: u32, addr: ?*const sockaddr, alen: socklen_t) usize { if (native_arch == .x86) { - return socketcall(SC.sendto, &[6]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(buf), len, flags, @intFromPtr(addr), @intCast(usize, alen) }); + return socketcall(SC.sendto, &[6]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), len, flags, @intFromPtr(addr), @as(usize, @intCast(alen)) }); } - return syscall6(.sendto, @bitCast(usize, @as(isize, fd)), @intFromPtr(buf), len, flags, @intFromPtr(addr), @intCast(usize, alen)); + return syscall6(.sendto, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), len, flags, @intFromPtr(addr), @as(usize, @intCast(alen))); } pub fn sendfile(outfd: i32, infd: i32, offset: ?*i64, count: usize) usize { if (@hasField(SYS, "sendfile64")) { return syscall4( .sendfile64, - @bitCast(usize, @as(isize, outfd)), - @bitCast(usize, @as(isize, infd)), + @as(usize, @bitCast(@as(isize, outfd))), + @as(usize, @bitCast(@as(isize, infd))), @intFromPtr(offset), count, ); } else { return syscall4( .sendfile, - @bitCast(usize, @as(isize, outfd)), - @bitCast(usize, @as(isize, infd)), + @as(usize, @bitCast(@as(isize, outfd))), + @as(usize, @bitCast(@as(isize, infd))), @intFromPtr(offset), count, ); @@ -1395,9 +1395,9 @@ pub fn sendfile(outfd: i32, infd: i32, offset: ?*i64, count: usize) usize { pub fn socketpair(domain: i32, socket_type: i32, protocol: i32, fd: *[2]i32) usize { if (native_arch == .x86) { - return socketcall(SC.socketpair, &[4]usize{ @intCast(usize, domain), @intCast(usize, socket_type), @intCast(usize, protocol), @intFromPtr(fd) }); + return socketcall(SC.socketpair, &[4]usize{ @as(usize, @intCast(domain)), @as(usize, @intCast(socket_type)), @as(usize, @intCast(protocol)), @intFromPtr(fd) }); } - return syscall4(.socketpair, @intCast(usize, domain), @intCast(usize, socket_type), @intCast(usize, protocol), @intFromPtr(fd)); + return syscall4(.socketpair, @as(usize, @intCast(domain)), @as(usize, @intCast(socket_type)), @as(usize, @intCast(protocol)), @intFromPtr(fd)); } pub fn accept(fd: i32, noalias addr: ?*sockaddr, noalias len: ?*socklen_t) usize { @@ -1409,16 +1409,16 @@ pub fn accept(fd: i32, noalias addr: ?*sockaddr, noalias len: ?*socklen_t) usize pub fn accept4(fd: i32, noalias addr: ?*sockaddr, noalias len: ?*socklen_t, flags: u32) usize { if (native_arch == .x86) { - return socketcall(SC.accept4, &[4]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len), flags }); + return socketcall(SC.accept4, &[4]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len), flags }); } - return syscall4(.accept4, @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len), flags); + return syscall4(.accept4, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len), flags); } pub fn fstat(fd: i32, stat_buf: *Stat) usize { if (@hasField(SYS, "fstat64")) { - return syscall2(.fstat64, @bitCast(usize, @as(isize, fd)), @intFromPtr(stat_buf)); + return syscall2(.fstat64, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(stat_buf)); } else { - return syscall2(.fstat, @bitCast(usize, @as(isize, fd)), @intFromPtr(stat_buf)); + return syscall2(.fstat, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(stat_buf)); } } @@ -1440,9 +1440,9 @@ pub fn lstat(pathname: [*:0]const u8, statbuf: *Stat) usize { pub fn fstatat(dirfd: i32, path: [*:0]const u8, stat_buf: *Stat, flags: u32) usize { if (@hasField(SYS, "fstatat64")) { - return syscall4(.fstatat64, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), @intFromPtr(stat_buf), flags); + return syscall4(.fstatat64, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), @intFromPtr(stat_buf), flags); } else { - return syscall4(.fstatat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), @intFromPtr(stat_buf), flags); + return syscall4(.fstatat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), @intFromPtr(stat_buf), flags); } } @@ -1450,14 +1450,14 @@ pub fn statx(dirfd: i32, path: [*]const u8, flags: u32, mask: u32, statx_buf: *S if (@hasField(SYS, "statx")) { return syscall5( .statx, - @bitCast(usize, @as(isize, dirfd)), + @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), flags, mask, @intFromPtr(statx_buf), ); } - return @bitCast(usize, -@as(isize, @intFromEnum(E.NOSYS))); + return @as(usize, @bitCast(-@as(isize, @intFromEnum(E.NOSYS)))); } pub fn listxattr(path: [*:0]const u8, list: [*]u8, size: usize) usize { @@ -1513,9 +1513,9 @@ pub fn sched_yield() usize { } pub fn sched_getaffinity(pid: pid_t, size: usize, set: *cpu_set_t) usize { - const rc = syscall3(.sched_getaffinity, @bitCast(usize, @as(isize, pid)), size, @intFromPtr(set)); - if (@bitCast(isize, rc) < 0) return rc; - if (rc < size) @memset(@ptrCast([*]u8, set)[rc..size], 0); + const rc = syscall3(.sched_getaffinity, @as(usize, @bitCast(@as(isize, pid))), size, @intFromPtr(set)); + if (@as(isize, @bitCast(rc)) < 0) return rc; + if (rc < size) @memset(@as([*]u8, @ptrCast(set))[rc..size], 0); return 0; } @@ -1526,18 +1526,18 @@ pub fn getcpu(cpu: *u32, node: *u32) usize { pub fn sched_getcpu() usize { var cpu: u32 = undefined; const rc = syscall3(.getcpu, @intFromPtr(&cpu), 0, 0); - if (@bitCast(isize, rc) < 0) return rc; - return @intCast(usize, cpu); + if (@as(isize, @bitCast(rc)) < 0) return rc; + return @as(usize, @intCast(cpu)); } /// libc has no wrapper for this syscall pub fn mbind(addr: ?*anyopaque, len: u32, mode: i32, nodemask: *const u32, maxnode: u32, flags: u32) usize { - return syscall6(.mbind, @intFromPtr(addr), len, @bitCast(usize, @as(isize, mode)), @intFromPtr(nodemask), maxnode, flags); + return syscall6(.mbind, @intFromPtr(addr), len, @as(usize, @bitCast(@as(isize, mode))), @intFromPtr(nodemask), maxnode, flags); } pub fn sched_setaffinity(pid: pid_t, size: usize, set: *const cpu_set_t) usize { - const rc = syscall3(.sched_setaffinity, @bitCast(usize, @as(isize, pid)), size, @intFromPtr(set)); - if (@bitCast(isize, rc) < 0) return rc; + const rc = syscall3(.sched_setaffinity, @as(usize, @bitCast(@as(isize, pid))), size, @intFromPtr(set)); + if (@as(isize, @bitCast(rc)) < 0) return rc; return 0; } @@ -1550,7 +1550,7 @@ pub fn epoll_create1(flags: usize) usize { } pub fn epoll_ctl(epoll_fd: i32, op: u32, fd: i32, ev: ?*epoll_event) usize { - return syscall4(.epoll_ctl, @bitCast(usize, @as(isize, epoll_fd)), @intCast(usize, op), @bitCast(usize, @as(isize, fd)), @intFromPtr(ev)); + return syscall4(.epoll_ctl, @as(usize, @bitCast(@as(isize, epoll_fd))), @as(usize, @intCast(op)), @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(ev)); } pub fn epoll_wait(epoll_fd: i32, events: [*]epoll_event, maxevents: u32, timeout: i32) usize { @@ -1560,10 +1560,10 @@ pub fn epoll_wait(epoll_fd: i32, events: [*]epoll_event, maxevents: u32, timeout pub fn epoll_pwait(epoll_fd: i32, events: [*]epoll_event, maxevents: u32, timeout: i32, sigmask: ?*const sigset_t) usize { return syscall6( .epoll_pwait, - @bitCast(usize, @as(isize, epoll_fd)), + @as(usize, @bitCast(@as(isize, epoll_fd))), @intFromPtr(events), - @intCast(usize, maxevents), - @bitCast(usize, @as(isize, timeout)), + @as(usize, @intCast(maxevents)), + @as(usize, @bitCast(@as(isize, timeout))), @intFromPtr(sigmask), @sizeOf(sigset_t), ); @@ -1574,7 +1574,7 @@ pub fn eventfd(count: u32, flags: u32) usize { } pub fn timerfd_create(clockid: i32, flags: u32) usize { - return syscall2(.timerfd_create, @bitCast(usize, @as(isize, clockid)), flags); + return syscall2(.timerfd_create, @as(usize, @bitCast(@as(isize, clockid))), flags); } pub const itimerspec = extern struct { @@ -1583,11 +1583,11 @@ pub const itimerspec = extern struct { }; pub fn timerfd_gettime(fd: i32, curr_value: *itimerspec) usize { - return syscall2(.timerfd_gettime, @bitCast(usize, @as(isize, fd)), @intFromPtr(curr_value)); + return syscall2(.timerfd_gettime, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(curr_value)); } pub fn timerfd_settime(fd: i32, flags: u32, new_value: *const itimerspec, old_value: ?*itimerspec) usize { - return syscall4(.timerfd_settime, @bitCast(usize, @as(isize, fd)), flags, @intFromPtr(new_value), @intFromPtr(old_value)); + return syscall4(.timerfd_settime, @as(usize, @bitCast(@as(isize, fd))), flags, @intFromPtr(new_value), @intFromPtr(old_value)); } pub const sigevent = extern struct { @@ -1609,8 +1609,8 @@ pub const timer_t = ?*anyopaque; pub fn timer_create(clockid: i32, sevp: *sigevent, timerid: *timer_t) usize { var t: timer_t = undefined; - const rc = syscall3(.timer_create, @bitCast(usize, @as(isize, clockid)), @intFromPtr(sevp), @intFromPtr(&t)); - if (@bitCast(isize, rc) < 0) return rc; + const rc = syscall3(.timer_create, @as(usize, @bitCast(@as(isize, clockid))), @intFromPtr(sevp), @intFromPtr(&t)); + if (@as(isize, @bitCast(rc)) < 0) return rc; timerid.* = t; return rc; } @@ -1624,7 +1624,7 @@ pub fn timer_gettime(timerid: timer_t, curr_value: *itimerspec) usize { } pub fn timer_settime(timerid: timer_t, flags: i32, new_value: *const itimerspec, old_value: ?*itimerspec) usize { - return syscall4(.timer_settime, @intFromPtr(timerid), @bitCast(usize, @as(isize, flags)), @intFromPtr(new_value), @intFromPtr(old_value)); + return syscall4(.timer_settime, @intFromPtr(timerid), @as(usize, @bitCast(@as(isize, flags))), @intFromPtr(new_value), @intFromPtr(old_value)); } // Flags for the 'setitimer' system call @@ -1635,11 +1635,11 @@ pub const ITIMER = enum(i32) { }; pub fn getitimer(which: i32, curr_value: *itimerspec) usize { - return syscall2(.getitimer, @bitCast(usize, @as(isize, which)), @intFromPtr(curr_value)); + return syscall2(.getitimer, @as(usize, @bitCast(@as(isize, which))), @intFromPtr(curr_value)); } pub fn setitimer(which: i32, new_value: *const itimerspec, old_value: ?*itimerspec) usize { - return syscall3(.setitimer, @bitCast(usize, @as(isize, which)), @intFromPtr(new_value), @intFromPtr(old_value)); + return syscall3(.setitimer, @as(usize, @bitCast(@as(isize, which))), @intFromPtr(new_value), @intFromPtr(old_value)); } pub fn unshare(flags: usize) usize { @@ -1667,11 +1667,11 @@ pub fn io_uring_setup(entries: u32, p: *io_uring_params) usize { } pub fn io_uring_enter(fd: i32, to_submit: u32, min_complete: u32, flags: u32, sig: ?*sigset_t) usize { - return syscall6(.io_uring_enter, @bitCast(usize, @as(isize, fd)), to_submit, min_complete, flags, @intFromPtr(sig), NSIG / 8); + return syscall6(.io_uring_enter, @as(usize, @bitCast(@as(isize, fd))), to_submit, min_complete, flags, @intFromPtr(sig), NSIG / 8); } pub fn io_uring_register(fd: i32, opcode: IORING_REGISTER, arg: ?*const anyopaque, nr_args: u32) usize { - return syscall4(.io_uring_register, @bitCast(usize, @as(isize, fd)), @intFromEnum(opcode), @intFromPtr(arg), nr_args); + return syscall4(.io_uring_register, @as(usize, @bitCast(@as(isize, fd))), @intFromEnum(opcode), @intFromPtr(arg), nr_args); } pub fn memfd_create(name: [*:0]const u8, flags: u32) usize { @@ -1679,43 +1679,43 @@ pub fn memfd_create(name: [*:0]const u8, flags: u32) usize { } pub fn getrusage(who: i32, usage: *rusage) usize { - return syscall2(.getrusage, @bitCast(usize, @as(isize, who)), @intFromPtr(usage)); + return syscall2(.getrusage, @as(usize, @bitCast(@as(isize, who))), @intFromPtr(usage)); } pub fn tcgetattr(fd: fd_t, termios_p: *termios) usize { - return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.CGETS, @intFromPtr(termios_p)); + return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.CGETS, @intFromPtr(termios_p)); } pub fn tcsetattr(fd: fd_t, optional_action: TCSA, termios_p: *const termios) usize { - return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.CSETS + @intFromEnum(optional_action), @intFromPtr(termios_p)); + return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.CSETS + @intFromEnum(optional_action), @intFromPtr(termios_p)); } pub fn tcgetpgrp(fd: fd_t, pgrp: *pid_t) usize { - return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.IOCGPGRP, @intFromPtr(pgrp)); + return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.IOCGPGRP, @intFromPtr(pgrp)); } pub fn tcsetpgrp(fd: fd_t, pgrp: *const pid_t) usize { - return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.IOCSPGRP, @intFromPtr(pgrp)); + return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.IOCSPGRP, @intFromPtr(pgrp)); } pub fn tcdrain(fd: fd_t) usize { - return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.CSBRK, 1); + return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.CSBRK, 1); } pub fn ioctl(fd: fd_t, request: u32, arg: usize) usize { - return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), request, arg); + return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), request, arg); } pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) usize { - return syscall4(.signalfd4, @bitCast(usize, @as(isize, fd)), @intFromPtr(mask), NSIG / 8, flags); + return syscall4(.signalfd4, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(mask), NSIG / 8, flags); } pub fn copy_file_range(fd_in: fd_t, off_in: ?*i64, fd_out: fd_t, off_out: ?*i64, len: usize, flags: u32) usize { return syscall6( .copy_file_range, - @bitCast(usize, @as(isize, fd_in)), + @as(usize, @bitCast(@as(isize, fd_in))), @intFromPtr(off_in), - @bitCast(usize, @as(isize, fd_out)), + @as(usize, @bitCast(@as(isize, fd_out))), @intFromPtr(off_out), len, flags, @@ -1731,19 +1731,19 @@ pub fn sync() void { } pub fn syncfs(fd: fd_t) usize { - return syscall1(.syncfs, @bitCast(usize, @as(isize, fd))); + return syscall1(.syncfs, @as(usize, @bitCast(@as(isize, fd)))); } pub fn fsync(fd: fd_t) usize { - return syscall1(.fsync, @bitCast(usize, @as(isize, fd))); + return syscall1(.fsync, @as(usize, @bitCast(@as(isize, fd)))); } pub fn fdatasync(fd: fd_t) usize { - return syscall1(.fdatasync, @bitCast(usize, @as(isize, fd))); + return syscall1(.fdatasync, @as(usize, @bitCast(@as(isize, fd)))); } pub fn prctl(option: i32, arg2: usize, arg3: usize, arg4: usize, arg5: usize) usize { - return syscall5(.prctl, @bitCast(usize, @as(isize, option)), arg2, arg3, arg4, arg5); + return syscall5(.prctl, @as(usize, @bitCast(@as(isize, option))), arg2, arg3, arg4, arg5); } pub fn getrlimit(resource: rlimit_resource, rlim: *rlimit) usize { @@ -1759,8 +1759,8 @@ pub fn setrlimit(resource: rlimit_resource, rlim: *const rlimit) usize { pub fn prlimit(pid: pid_t, resource: rlimit_resource, new_limit: ?*const rlimit, old_limit: ?*rlimit) usize { return syscall4( .prlimit64, - @bitCast(usize, @as(isize, pid)), - @bitCast(usize, @as(isize, @intFromEnum(resource))), + @as(usize, @bitCast(@as(isize, pid))), + @as(usize, @bitCast(@as(isize, @intFromEnum(resource)))), @intFromPtr(new_limit), @intFromPtr(old_limit), ); @@ -1775,14 +1775,14 @@ pub fn madvise(address: [*]u8, len: usize, advice: u32) usize { } pub fn pidfd_open(pid: pid_t, flags: u32) usize { - return syscall2(.pidfd_open, @bitCast(usize, @as(isize, pid)), flags); + return syscall2(.pidfd_open, @as(usize, @bitCast(@as(isize, pid))), flags); } pub fn pidfd_getfd(pidfd: fd_t, targetfd: fd_t, flags: u32) usize { return syscall3( .pidfd_getfd, - @bitCast(usize, @as(isize, pidfd)), - @bitCast(usize, @as(isize, targetfd)), + @as(usize, @bitCast(@as(isize, pidfd))), + @as(usize, @bitCast(@as(isize, targetfd))), flags, ); } @@ -1790,8 +1790,8 @@ pub fn pidfd_getfd(pidfd: fd_t, targetfd: fd_t, flags: u32) usize { pub fn pidfd_send_signal(pidfd: fd_t, sig: i32, info: ?*siginfo_t, flags: u32) usize { return syscall4( .pidfd_send_signal, - @bitCast(usize, @as(isize, pidfd)), - @bitCast(usize, @as(isize, sig)), + @as(usize, @bitCast(@as(isize, pidfd))), + @as(usize, @bitCast(@as(isize, sig))), @intFromPtr(info), flags, ); @@ -1800,7 +1800,7 @@ pub fn pidfd_send_signal(pidfd: fd_t, sig: i32, info: ?*siginfo_t, flags: u32) u pub fn process_vm_readv(pid: pid_t, local: []iovec, remote: []const iovec_const, flags: usize) usize { return syscall6( .process_vm_readv, - @bitCast(usize, @as(isize, pid)), + @as(usize, @bitCast(@as(isize, pid))), @intFromPtr(local.ptr), local.len, @intFromPtr(remote.ptr), @@ -1812,7 +1812,7 @@ pub fn process_vm_readv(pid: pid_t, local: []iovec, remote: []const iovec_const, pub fn process_vm_writev(pid: pid_t, local: []const iovec_const, remote: []const iovec_const, flags: usize) usize { return syscall6( .process_vm_writev, - @bitCast(usize, @as(isize, pid)), + @as(usize, @bitCast(@as(isize, pid))), @intFromPtr(local.ptr), local.len, @intFromPtr(remote.ptr), @@ -1830,7 +1830,7 @@ pub fn fadvise(fd: fd_t, offset: i64, len: i64, advice: usize) usize { return syscall7( .fadvise64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), 0, offset_halves[0], offset_halves[1], @@ -1846,7 +1846,7 @@ pub fn fadvise(fd: fd_t, offset: i64, len: i64, advice: usize) usize { return syscall6( .fadvise64_64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), advice, offset_halves[0], offset_halves[1], @@ -1862,7 +1862,7 @@ pub fn fadvise(fd: fd_t, offset: i64, len: i64, advice: usize) usize { return syscall6( .fadvise64_64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), offset_halves[0], offset_halves[1], length_halves[0], @@ -1872,9 +1872,9 @@ pub fn fadvise(fd: fd_t, offset: i64, len: i64, advice: usize) usize { } else { return syscall4( .fadvise64, - @bitCast(usize, @as(isize, fd)), - @bitCast(usize, offset), - @bitCast(usize, len), + @as(usize, @bitCast(@as(isize, fd))), + @as(usize, @bitCast(offset)), + @as(usize, @bitCast(len)), advice, ); } @@ -1890,9 +1890,9 @@ pub fn perf_event_open( return syscall5( .perf_event_open, @intFromPtr(attr), - @bitCast(usize, @as(isize, pid)), - @bitCast(usize, @as(isize, cpu)), - @bitCast(usize, @as(isize, group_fd)), + @as(usize, @bitCast(@as(isize, pid))), + @as(usize, @bitCast(@as(isize, cpu))), + @as(usize, @bitCast(@as(isize, group_fd))), flags, ); } @@ -1911,7 +1911,7 @@ pub fn ptrace( return syscall5( .ptrace, req, - @bitCast(usize, @as(isize, pid)), + @as(usize, @bitCast(@as(isize, pid))), addr, data, addr2, @@ -2057,7 +2057,7 @@ pub const W = struct { pub const NOWAIT = 0x1000000; pub fn EXITSTATUS(s: u32) u8 { - return @intCast(u8, (s & 0xff00) >> 8); + return @as(u8, @intCast((s & 0xff00) >> 8)); } pub fn TERMSIG(s: u32) u32 { return s & 0x7f; @@ -2069,7 +2069,7 @@ pub const W = struct { return TERMSIG(s) == 0; } pub fn IFSTOPPED(s: u32) bool { - return @truncate(u16, ((s & 0xffff) *% 0x10001) >> 8) > 0x7f00; + return @as(u16, @truncate(((s & 0xffff) *% 0x10001) >> 8)) > 0x7f00; } pub fn IFSIGNALED(s: u32) bool { return (s & 0xffff) -% 1 < 0xff; @@ -2154,9 +2154,9 @@ pub const SIG = if (is_mips) struct { pub const SYS = 31; pub const UNUSED = SIG.SYS; - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); } else if (is_sparc) struct { pub const BLOCK = 1; pub const UNBLOCK = 2; @@ -2198,9 +2198,9 @@ pub const SIG = if (is_mips) struct { pub const PWR = LOST; pub const IO = SIG.POLL; - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); } else struct { pub const BLOCK = 0; pub const UNBLOCK = 1; @@ -2241,9 +2241,9 @@ pub const SIG = if (is_mips) struct { pub const SYS = 31; pub const UNUSED = SIG.SYS; - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); }; pub const kernel_rwf = u32; @@ -3541,7 +3541,7 @@ pub const CAP = struct { } pub fn TO_MASK(cap: u8) u32 { - return @as(u32, 1) << @intCast(u5, cap & 31); + return @as(u32, 1) << @as(u5, @intCast(cap & 31)); } pub fn TO_INDEX(cap: u8) u8 { @@ -3598,7 +3598,7 @@ pub const cpu_count_t = std.meta.Int(.unsigned, std.math.log2(CPU_SETSIZE * 8)); fn cpu_mask(s: usize) cpu_count_t { var x = s & (CPU_SETSIZE * 8); - return @intCast(cpu_count_t, 1) << @intCast(u4, x); + return @as(cpu_count_t, @intCast(1)) << @as(u4, @intCast(x)); } pub fn CPU_COUNT(set: cpu_set_t) cpu_count_t { @@ -3999,7 +3999,7 @@ pub const io_uring_cqe = extern struct { pub fn err(self: io_uring_cqe) E { if (self.res > -4096 and self.res < 0) { - return @enumFromInt(E, -self.res); + return @as(E, @enumFromInt(-self.res)); } return .SUCCESS; } diff --git a/lib/std/os/linux/bpf.zig b/lib/std/os/linux/bpf.zig index 87b92587f9..751e5dc95a 100644 --- a/lib/std/os/linux/bpf.zig +++ b/lib/std/os/linux/bpf.zig @@ -643,7 +643,7 @@ pub const Insn = packed struct { .dst = @intFromEnum(dst), .src = @intFromEnum(src), .off = 0, - .imm = @intCast(i32, @truncate(u32, imm)), + .imm = @as(i32, @intCast(@as(u32, @truncate(imm)))), }; } @@ -653,7 +653,7 @@ pub const Insn = packed struct { .dst = 0, .src = 0, .off = 0, - .imm = @intCast(i32, @truncate(u32, imm >> 32)), + .imm = @as(i32, @intCast(@as(u32, @truncate(imm >> 32)))), }; } @@ -666,11 +666,11 @@ pub const Insn = packed struct { } pub fn ld_map_fd1(dst: Reg, map_fd: fd_t) Insn { - return ld_imm_impl1(dst, @enumFromInt(Reg, PSEUDO_MAP_FD), @intCast(u64, map_fd)); + return ld_imm_impl1(dst, @as(Reg, @enumFromInt(PSEUDO_MAP_FD)), @as(u64, @intCast(map_fd))); } pub fn ld_map_fd2(map_fd: fd_t) Insn { - return ld_imm_impl2(@intCast(u64, map_fd)); + return ld_imm_impl2(@as(u64, @intCast(map_fd))); } pub fn st(comptime size: Size, dst: Reg, off: i16, imm: i32) Insn { @@ -786,17 +786,17 @@ test "opcodes" { // TODO: byteswap instructions try expect_opcode(0xd4, Insn.le(.half_word, .r1)); - try expectEqual(@intCast(i32, 16), Insn.le(.half_word, .r1).imm); + try expectEqual(@as(i32, @intCast(16)), Insn.le(.half_word, .r1).imm); try expect_opcode(0xd4, Insn.le(.word, .r1)); - try expectEqual(@intCast(i32, 32), Insn.le(.word, .r1).imm); + try expectEqual(@as(i32, @intCast(32)), Insn.le(.word, .r1).imm); try expect_opcode(0xd4, Insn.le(.double_word, .r1)); - try expectEqual(@intCast(i32, 64), Insn.le(.double_word, .r1).imm); + try expectEqual(@as(i32, @intCast(64)), Insn.le(.double_word, .r1).imm); try expect_opcode(0xdc, Insn.be(.half_word, .r1)); - try expectEqual(@intCast(i32, 16), Insn.be(.half_word, .r1).imm); + try expectEqual(@as(i32, @intCast(16)), Insn.be(.half_word, .r1).imm); try expect_opcode(0xdc, Insn.be(.word, .r1)); - try expectEqual(@intCast(i32, 32), Insn.be(.word, .r1).imm); + try expectEqual(@as(i32, @intCast(32)), Insn.be(.word, .r1).imm); try expect_opcode(0xdc, Insn.be(.double_word, .r1)); - try expectEqual(@intCast(i32, 64), Insn.be(.double_word, .r1).imm); + try expectEqual(@as(i32, @intCast(64)), Insn.be(.double_word, .r1).imm); // memory instructions try expect_opcode(0x18, Insn.ld_dw1(.r1, 0)); @@ -804,7 +804,7 @@ test "opcodes" { // loading a map fd try expect_opcode(0x18, Insn.ld_map_fd1(.r1, 0)); - try expectEqual(@intCast(u4, PSEUDO_MAP_FD), Insn.ld_map_fd1(.r1, 0).src); + try expectEqual(@as(u4, @intCast(PSEUDO_MAP_FD)), Insn.ld_map_fd1(.r1, 0).src); try expect_opcode(0x00, Insn.ld_map_fd2(0)); try expect_opcode(0x38, Insn.ld_abs(.double_word, .r1, .r2, 0)); @@ -1518,7 +1518,7 @@ pub fn map_create(map_type: MapType, key_size: u32, value_size: u32, max_entries const rc = linux.bpf(.map_create, &attr, @sizeOf(MapCreateAttr)); switch (errno(rc)) { - .SUCCESS => return @intCast(fd_t, rc), + .SUCCESS => return @as(fd_t, @intCast(rc)), .INVAL => return error.MapTypeOrAttrInvalid, .NOMEM => return error.SystemResources, .PERM => return error.AccessDenied, @@ -1668,20 +1668,20 @@ pub fn prog_load( attr.prog_load.prog_type = @intFromEnum(prog_type); attr.prog_load.insns = @intFromPtr(insns.ptr); - attr.prog_load.insn_cnt = @intCast(u32, insns.len); + attr.prog_load.insn_cnt = @as(u32, @intCast(insns.len)); attr.prog_load.license = @intFromPtr(license.ptr); attr.prog_load.kern_version = kern_version; attr.prog_load.prog_flags = flags; if (log) |l| { attr.prog_load.log_buf = @intFromPtr(l.buf.ptr); - attr.prog_load.log_size = @intCast(u32, l.buf.len); + attr.prog_load.log_size = @as(u32, @intCast(l.buf.len)); attr.prog_load.log_level = l.level; } const rc = linux.bpf(.prog_load, &attr, @sizeOf(ProgLoadAttr)); return switch (errno(rc)) { - .SUCCESS => @intCast(fd_t, rc), + .SUCCESS => @as(fd_t, @intCast(rc)), .ACCES => error.UnsafeProgram, .FAULT => unreachable, .INVAL => error.InvalidProgram, diff --git a/lib/std/os/linux/bpf/helpers.zig b/lib/std/os/linux/bpf/helpers.zig index b26e7eda29..027220088e 100644 --- a/lib/std/os/linux/bpf/helpers.zig +++ b/lib/std/os/linux/bpf/helpers.zig @@ -11,147 +11,147 @@ const SkFullSock = @compileError("TODO missing os bits: SkFullSock"); // // Note, these function signatures were created from documentation found in // '/usr/include/linux/bpf.h' -pub const map_lookup_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) ?*anyopaque, 1); -pub const map_update_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, key: ?*const anyopaque, value: ?*const anyopaque, flags: u64) c_long, 2); -pub const map_delete_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) c_long, 3); -pub const probe_read = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 4); -pub const ktime_get_ns = @ptrFromInt(*const fn () u64, 5); -pub const trace_printk = @ptrFromInt(*const fn (fmt: [*:0]const u8, fmt_size: u32, arg1: u64, arg2: u64, arg3: u64) c_long, 6); -pub const get_prandom_u32 = @ptrFromInt(*const fn () u32, 7); -pub const get_smp_processor_id = @ptrFromInt(*const fn () u32, 8); -pub const skb_store_bytes = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32, flags: u64) c_long, 9); -pub const l3_csum_replace = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, size: u64) c_long, 10); -pub const l4_csum_replace = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, flags: u64) c_long, 11); -pub const tail_call = @ptrFromInt(*const fn (ctx: ?*anyopaque, prog_array_map: *const kern.MapDef, index: u32) c_long, 12); -pub const clone_redirect = @ptrFromInt(*const fn (skb: *kern.SkBuff, ifindex: u32, flags: u64) c_long, 13); -pub const get_current_pid_tgid = @ptrFromInt(*const fn () u64, 14); -pub const get_current_uid_gid = @ptrFromInt(*const fn () u64, 15); -pub const get_current_comm = @ptrFromInt(*const fn (buf: ?*anyopaque, size_of_buf: u32) c_long, 16); -pub const get_cgroup_classid = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 17); +pub const map_lookup_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) ?*anyopaque, @ptrFromInt(1)); +pub const map_update_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(2)); +pub const map_delete_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) c_long, @ptrFromInt(3)); +pub const probe_read = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(4)); +pub const ktime_get_ns = @as(*const fn () u64, @ptrFromInt(5)); +pub const trace_printk = @as(*const fn (fmt: [*:0]const u8, fmt_size: u32, arg1: u64, arg2: u64, arg3: u64) c_long, @ptrFromInt(6)); +pub const get_prandom_u32 = @as(*const fn () u32, @ptrFromInt(7)); +pub const get_smp_processor_id = @as(*const fn () u32, @ptrFromInt(8)); +pub const skb_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32, flags: u64) c_long, @ptrFromInt(9)); +pub const l3_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, size: u64) c_long, @ptrFromInt(10)); +pub const l4_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, flags: u64) c_long, @ptrFromInt(11)); +pub const tail_call = @as(*const fn (ctx: ?*anyopaque, prog_array_map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(12)); +pub const clone_redirect = @as(*const fn (skb: *kern.SkBuff, ifindex: u32, flags: u64) c_long, @ptrFromInt(13)); +pub const get_current_pid_tgid = @as(*const fn () u64, @ptrFromInt(14)); +pub const get_current_uid_gid = @as(*const fn () u64, @ptrFromInt(15)); +pub const get_current_comm = @as(*const fn (buf: ?*anyopaque, size_of_buf: u32) c_long, @ptrFromInt(16)); +pub const get_cgroup_classid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(17)); // Note vlan_proto is big endian -pub const skb_vlan_push = @ptrFromInt(*const fn (skb: *kern.SkBuff, vlan_proto: u16, vlan_tci: u16) c_long, 18); -pub const skb_vlan_pop = @ptrFromInt(*const fn (skb: *kern.SkBuff) c_long, 19); -pub const skb_get_tunnel_key = @ptrFromInt(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, 20); -pub const skb_set_tunnel_key = @ptrFromInt(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, 21); -pub const perf_event_read = @ptrFromInt(*const fn (map: *const kern.MapDef, flags: u64) u64, 22); -pub const redirect = @ptrFromInt(*const fn (ifindex: u32, flags: u64) c_long, 23); -pub const get_route_realm = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 24); -pub const perf_event_output = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, 25); -pub const skb_load_bytes = @ptrFromInt(*const fn (skb: ?*anyopaque, offset: u32, to: ?*anyopaque, len: u32) c_long, 26); -pub const get_stackid = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64) c_long, 27); +pub const skb_vlan_push = @as(*const fn (skb: *kern.SkBuff, vlan_proto: u16, vlan_tci: u16) c_long, @ptrFromInt(18)); +pub const skb_vlan_pop = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(19)); +pub const skb_get_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(20)); +pub const skb_set_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(21)); +pub const perf_event_read = @as(*const fn (map: *const kern.MapDef, flags: u64) u64, @ptrFromInt(22)); +pub const redirect = @as(*const fn (ifindex: u32, flags: u64) c_long, @ptrFromInt(23)); +pub const get_route_realm = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(24)); +pub const perf_event_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(25)); +pub const skb_load_bytes = @as(*const fn (skb: ?*anyopaque, offset: u32, to: ?*anyopaque, len: u32) c_long, @ptrFromInt(26)); +pub const get_stackid = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64) c_long, @ptrFromInt(27)); // from and to point to __be32 -pub const csum_diff = @ptrFromInt(*const fn (from: *u32, from_size: u32, to: *u32, to_size: u32, seed: u32) i64, 28); -pub const skb_get_tunnel_opt = @ptrFromInt(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, 29); -pub const skb_set_tunnel_opt = @ptrFromInt(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, 30); +pub const csum_diff = @as(*const fn (from: *u32, from_size: u32, to: *u32, to_size: u32, seed: u32) i64, @ptrFromInt(28)); +pub const skb_get_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(29)); +pub const skb_set_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(30)); // proto is __be16 -pub const skb_change_proto = @ptrFromInt(*const fn (skb: *kern.SkBuff, proto: u16, flags: u64) c_long, 31); -pub const skb_change_type = @ptrFromInt(*const fn (skb: *kern.SkBuff, skb_type: u32) c_long, 32); -pub const skb_under_cgroup = @ptrFromInt(*const fn (skb: *kern.SkBuff, map: ?*const anyopaque, index: u32) c_long, 33); -pub const get_hash_recalc = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 34); -pub const get_current_task = @ptrFromInt(*const fn () u64, 35); -pub const probe_write_user = @ptrFromInt(*const fn (dst: ?*anyopaque, src: ?*const anyopaque, len: u32) c_long, 36); -pub const current_task_under_cgroup = @ptrFromInt(*const fn (map: *const kern.MapDef, index: u32) c_long, 37); -pub const skb_change_tail = @ptrFromInt(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, 38); -pub const skb_pull_data = @ptrFromInt(*const fn (skb: *kern.SkBuff, len: u32) c_long, 39); -pub const csum_update = @ptrFromInt(*const fn (skb: *kern.SkBuff, csum: u32) i64, 40); -pub const set_hash_invalid = @ptrFromInt(*const fn (skb: *kern.SkBuff) void, 41); -pub const get_numa_node_id = @ptrFromInt(*const fn () c_long, 42); -pub const skb_change_head = @ptrFromInt(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, 43); -pub const xdp_adjust_head = @ptrFromInt(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, 44); -pub const probe_read_str = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 45); -pub const get_socket_cookie = @ptrFromInt(*const fn (ctx: ?*anyopaque) u64, 46); -pub const get_socket_uid = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 47); -pub const set_hash = @ptrFromInt(*const fn (skb: *kern.SkBuff, hash: u32) c_long, 48); -pub const setsockopt = @ptrFromInt(*const fn (bpf_socket: *kern.SockOps, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, 49); -pub const skb_adjust_room = @ptrFromInt(*const fn (skb: *kern.SkBuff, len_diff: i32, mode: u32, flags: u64) c_long, 50); -pub const redirect_map = @ptrFromInt(*const fn (map: *const kern.MapDef, key: u32, flags: u64) c_long, 51); -pub const sk_redirect_map = @ptrFromInt(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: u32, flags: u64) c_long, 52); -pub const sock_map_update = @ptrFromInt(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 53); -pub const xdp_adjust_meta = @ptrFromInt(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, 54); -pub const perf_event_read_value = @ptrFromInt(*const fn (map: *const kern.MapDef, flags: u64, buf: *kern.PerfEventValue, buf_size: u32) c_long, 55); -pub const perf_prog_read_value = @ptrFromInt(*const fn (ctx: *kern.PerfEventData, buf: *kern.PerfEventValue, buf_size: u32) c_long, 56); -pub const getsockopt = @ptrFromInt(*const fn (bpf_socket: ?*anyopaque, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, 57); -pub const override_return = @ptrFromInt(*const fn (regs: *PtRegs, rc: u64) c_long, 58); -pub const sock_ops_cb_flags_set = @ptrFromInt(*const fn (bpf_sock: *kern.SockOps, argval: c_int) c_long, 59); -pub const msg_redirect_map = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: u32, flags: u64) c_long, 60); -pub const msg_apply_bytes = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, 61); -pub const msg_cork_bytes = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, 62); -pub const msg_pull_data = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, start: u32, end: u32, flags: u64) c_long, 63); -pub const bind = @ptrFromInt(*const fn (ctx: *kern.BpfSockAddr, addr: *kern.SockAddr, addr_len: c_int) c_long, 64); -pub const xdp_adjust_tail = @ptrFromInt(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, 65); -pub const skb_get_xfrm_state = @ptrFromInt(*const fn (skb: *kern.SkBuff, index: u32, xfrm_state: *kern.XfrmState, size: u32, flags: u64) c_long, 66); -pub const get_stack = @ptrFromInt(*const fn (ctx: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, 67); -pub const skb_load_bytes_relative = @ptrFromInt(*const fn (skb: ?*const anyopaque, offset: u32, to: ?*anyopaque, len: u32, start_header: u32) c_long, 68); -pub const fib_lookup = @ptrFromInt(*const fn (ctx: ?*anyopaque, params: *kern.FibLookup, plen: c_int, flags: u32) c_long, 69); -pub const sock_hash_update = @ptrFromInt(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 70); -pub const msg_redirect_hash = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 71); -pub const sk_redirect_hash = @ptrFromInt(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 72); -pub const lwt_push_encap = @ptrFromInt(*const fn (skb: *kern.SkBuff, typ: u32, hdr: ?*anyopaque, len: u32) c_long, 73); -pub const lwt_seg6_store_bytes = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32) c_long, 74); -pub const lwt_seg6_adjust_srh = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, delta: i32) c_long, 75); -pub const lwt_seg6_action = @ptrFromInt(*const fn (skb: *kern.SkBuff, action: u32, param: ?*anyopaque, param_len: u32) c_long, 76); -pub const rc_repeat = @ptrFromInt(*const fn (ctx: ?*anyopaque) c_long, 77); -pub const rc_keydown = @ptrFromInt(*const fn (ctx: ?*anyopaque, protocol: u32, scancode: u64, toggle: u32) c_long, 78); -pub const skb_cgroup_id = @ptrFromInt(*const fn (skb: *kern.SkBuff) u64, 79); -pub const get_current_cgroup_id = @ptrFromInt(*const fn () u64, 80); -pub const get_local_storage = @ptrFromInt(*const fn (map: ?*anyopaque, flags: u64) ?*anyopaque, 81); -pub const sk_select_reuseport = @ptrFromInt(*const fn (reuse: *kern.SkReusePortMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 82); -pub const skb_ancestor_cgroup_id = @ptrFromInt(*const fn (skb: *kern.SkBuff, ancestor_level: c_int) u64, 83); -pub const sk_lookup_tcp = @ptrFromInt(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, 84); -pub const sk_lookup_udp = @ptrFromInt(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, 85); -pub const sk_release = @ptrFromInt(*const fn (sock: *kern.Sock) c_long, 86); -pub const map_push_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, value: ?*const anyopaque, flags: u64) c_long, 87); -pub const map_pop_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, 88); -pub const map_peek_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, 89); -pub const msg_push_data = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, 90); -pub const msg_pop_data = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, 91); -pub const rc_pointer_rel = @ptrFromInt(*const fn (ctx: ?*anyopaque, rel_x: i32, rel_y: i32) c_long, 92); -pub const spin_lock = @ptrFromInt(*const fn (lock: *kern.SpinLock) c_long, 93); -pub const spin_unlock = @ptrFromInt(*const fn (lock: *kern.SpinLock) c_long, 94); -pub const sk_fullsock = @ptrFromInt(*const fn (sk: *kern.Sock) ?*SkFullSock, 95); -pub const tcp_sock = @ptrFromInt(*const fn (sk: *kern.Sock) ?*kern.TcpSock, 96); -pub const skb_ecn_set_ce = @ptrFromInt(*const fn (skb: *kern.SkBuff) c_long, 97); -pub const get_listener_sock = @ptrFromInt(*const fn (sk: *kern.Sock) ?*kern.Sock, 98); -pub const skc_lookup_tcp = @ptrFromInt(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, 99); -pub const tcp_check_syncookie = @ptrFromInt(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) c_long, 100); -pub const sysctl_get_name = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong, flags: u64) c_long, 101); -pub const sysctl_get_current_value = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, 102); -pub const sysctl_get_new_value = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, 103); -pub const sysctl_set_new_value = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*const u8, buf_len: c_ulong) c_long, 104); -pub const strtol = @ptrFromInt(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_long) c_long, 105); -pub const strtoul = @ptrFromInt(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_ulong) c_long, 106); -pub const sk_storage_get = @ptrFromInt(*const fn (map: *const kern.MapDef, sk: *kern.Sock, value: ?*anyopaque, flags: u64) ?*anyopaque, 107); -pub const sk_storage_delete = @ptrFromInt(*const fn (map: *const kern.MapDef, sk: *kern.Sock) c_long, 108); -pub const send_signal = @ptrFromInt(*const fn (sig: u32) c_long, 109); -pub const tcp_gen_syncookie = @ptrFromInt(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) i64, 110); -pub const skb_output = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, 111); -pub const probe_read_user = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 112); -pub const probe_read_kernel = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 113); -pub const probe_read_user_str = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 114); -pub const probe_read_kernel_str = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 115); -pub const tcp_send_ack = @ptrFromInt(*const fn (tp: ?*anyopaque, rcv_nxt: u32) c_long, 116); -pub const send_signal_thread = @ptrFromInt(*const fn (sig: u32) c_long, 117); -pub const jiffies64 = @ptrFromInt(*const fn () u64, 118); -pub const read_branch_records = @ptrFromInt(*const fn (ctx: *kern.PerfEventData, buf: ?*anyopaque, size: u32, flags: u64) c_long, 119); -pub const get_ns_current_pid_tgid = @ptrFromInt(*const fn (dev: u64, ino: u64, nsdata: *kern.PidNsInfo, size: u32) c_long, 120); -pub const xdp_output = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, 121); -pub const get_netns_cookie = @ptrFromInt(*const fn (ctx: ?*anyopaque) u64, 122); -pub const get_current_ancestor_cgroup_id = @ptrFromInt(*const fn (ancestor_level: c_int) u64, 123); -pub const sk_assign = @ptrFromInt(*const fn (skb: *kern.SkBuff, sk: *kern.Sock, flags: u64) c_long, 124); -pub const ktime_get_boot_ns = @ptrFromInt(*const fn () u64, 125); -pub const seq_printf = @ptrFromInt(*const fn (m: *kern.SeqFile, fmt: ?*const u8, fmt_size: u32, data: ?*const anyopaque, data_len: u32) c_long, 126); -pub const seq_write = @ptrFromInt(*const fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long, 127); -pub const sk_cgroup_id = @ptrFromInt(*const fn (sk: *kern.BpfSock) u64, 128); -pub const sk_ancestor_cgroup_id = @ptrFromInt(*const fn (sk: *kern.BpfSock, ancestor_level: c_long) u64, 129); -pub const ringbuf_output = @ptrFromInt(*const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) c_long, 130); -pub const ringbuf_reserve = @ptrFromInt(*const fn (ringbuf: ?*anyopaque, size: u64, flags: u64) ?*anyopaque, 131); -pub const ringbuf_submit = @ptrFromInt(*const fn (data: ?*anyopaque, flags: u64) void, 132); -pub const ringbuf_discard = @ptrFromInt(*const fn (data: ?*anyopaque, flags: u64) void, 133); -pub const ringbuf_query = @ptrFromInt(*const fn (ringbuf: ?*anyopaque, flags: u64) u64, 134); -pub const csum_level = @ptrFromInt(*const fn (skb: *kern.SkBuff, level: u64) c_long, 135); -pub const skc_to_tcp6_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.Tcp6Sock, 136); -pub const skc_to_tcp_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.TcpSock, 137); -pub const skc_to_tcp_timewait_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.TcpTimewaitSock, 138); -pub const skc_to_tcp_request_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.TcpRequestSock, 139); -pub const skc_to_udp6_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.Udp6Sock, 140); -pub const get_task_stack = @ptrFromInt(*const fn (task: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, 141); +pub const skb_change_proto = @as(*const fn (skb: *kern.SkBuff, proto: u16, flags: u64) c_long, @ptrFromInt(31)); +pub const skb_change_type = @as(*const fn (skb: *kern.SkBuff, skb_type: u32) c_long, @ptrFromInt(32)); +pub const skb_under_cgroup = @as(*const fn (skb: *kern.SkBuff, map: ?*const anyopaque, index: u32) c_long, @ptrFromInt(33)); +pub const get_hash_recalc = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(34)); +pub const get_current_task = @as(*const fn () u64, @ptrFromInt(35)); +pub const probe_write_user = @as(*const fn (dst: ?*anyopaque, src: ?*const anyopaque, len: u32) c_long, @ptrFromInt(36)); +pub const current_task_under_cgroup = @as(*const fn (map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(37)); +pub const skb_change_tail = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(38)); +pub const skb_pull_data = @as(*const fn (skb: *kern.SkBuff, len: u32) c_long, @ptrFromInt(39)); +pub const csum_update = @as(*const fn (skb: *kern.SkBuff, csum: u32) i64, @ptrFromInt(40)); +pub const set_hash_invalid = @as(*const fn (skb: *kern.SkBuff) void, @ptrFromInt(41)); +pub const get_numa_node_id = @as(*const fn () c_long, @ptrFromInt(42)); +pub const skb_change_head = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(43)); +pub const xdp_adjust_head = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(44)); +pub const probe_read_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(45)); +pub const get_socket_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(46)); +pub const get_socket_uid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(47)); +pub const set_hash = @as(*const fn (skb: *kern.SkBuff, hash: u32) c_long, @ptrFromInt(48)); +pub const setsockopt = @as(*const fn (bpf_socket: *kern.SockOps, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(49)); +pub const skb_adjust_room = @as(*const fn (skb: *kern.SkBuff, len_diff: i32, mode: u32, flags: u64) c_long, @ptrFromInt(50)); +pub const redirect_map = @as(*const fn (map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(51)); +pub const sk_redirect_map = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(52)); +pub const sock_map_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(53)); +pub const xdp_adjust_meta = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(54)); +pub const perf_event_read_value = @as(*const fn (map: *const kern.MapDef, flags: u64, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(55)); +pub const perf_prog_read_value = @as(*const fn (ctx: *kern.PerfEventData, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(56)); +pub const getsockopt = @as(*const fn (bpf_socket: ?*anyopaque, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(57)); +pub const override_return = @as(*const fn (regs: *PtRegs, rc: u64) c_long, @ptrFromInt(58)); +pub const sock_ops_cb_flags_set = @as(*const fn (bpf_sock: *kern.SockOps, argval: c_int) c_long, @ptrFromInt(59)); +pub const msg_redirect_map = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(60)); +pub const msg_apply_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(61)); +pub const msg_cork_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(62)); +pub const msg_pull_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, end: u32, flags: u64) c_long, @ptrFromInt(63)); +pub const bind = @as(*const fn (ctx: *kern.BpfSockAddr, addr: *kern.SockAddr, addr_len: c_int) c_long, @ptrFromInt(64)); +pub const xdp_adjust_tail = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(65)); +pub const skb_get_xfrm_state = @as(*const fn (skb: *kern.SkBuff, index: u32, xfrm_state: *kern.XfrmState, size: u32, flags: u64) c_long, @ptrFromInt(66)); +pub const get_stack = @as(*const fn (ctx: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(67)); +pub const skb_load_bytes_relative = @as(*const fn (skb: ?*const anyopaque, offset: u32, to: ?*anyopaque, len: u32, start_header: u32) c_long, @ptrFromInt(68)); +pub const fib_lookup = @as(*const fn (ctx: ?*anyopaque, params: *kern.FibLookup, plen: c_int, flags: u32) c_long, @ptrFromInt(69)); +pub const sock_hash_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(70)); +pub const msg_redirect_hash = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(71)); +pub const sk_redirect_hash = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(72)); +pub const lwt_push_encap = @as(*const fn (skb: *kern.SkBuff, typ: u32, hdr: ?*anyopaque, len: u32) c_long, @ptrFromInt(73)); +pub const lwt_seg6_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32) c_long, @ptrFromInt(74)); +pub const lwt_seg6_adjust_srh = @as(*const fn (skb: *kern.SkBuff, offset: u32, delta: i32) c_long, @ptrFromInt(75)); +pub const lwt_seg6_action = @as(*const fn (skb: *kern.SkBuff, action: u32, param: ?*anyopaque, param_len: u32) c_long, @ptrFromInt(76)); +pub const rc_repeat = @as(*const fn (ctx: ?*anyopaque) c_long, @ptrFromInt(77)); +pub const rc_keydown = @as(*const fn (ctx: ?*anyopaque, protocol: u32, scancode: u64, toggle: u32) c_long, @ptrFromInt(78)); +pub const skb_cgroup_id = @as(*const fn (skb: *kern.SkBuff) u64, @ptrFromInt(79)); +pub const get_current_cgroup_id = @as(*const fn () u64, @ptrFromInt(80)); +pub const get_local_storage = @as(*const fn (map: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(81)); +pub const sk_select_reuseport = @as(*const fn (reuse: *kern.SkReusePortMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(82)); +pub const skb_ancestor_cgroup_id = @as(*const fn (skb: *kern.SkBuff, ancestor_level: c_int) u64, @ptrFromInt(83)); +pub const sk_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(84)); +pub const sk_lookup_udp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(85)); +pub const sk_release = @as(*const fn (sock: *kern.Sock) c_long, @ptrFromInt(86)); +pub const map_push_elem = @as(*const fn (map: *const kern.MapDef, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(87)); +pub const map_pop_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(88)); +pub const map_peek_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(89)); +pub const msg_push_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(90)); +pub const msg_pop_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(91)); +pub const rc_pointer_rel = @as(*const fn (ctx: ?*anyopaque, rel_x: i32, rel_y: i32) c_long, @ptrFromInt(92)); +pub const spin_lock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(93)); +pub const spin_unlock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(94)); +pub const sk_fullsock = @as(*const fn (sk: *kern.Sock) ?*SkFullSock, @ptrFromInt(95)); +pub const tcp_sock = @as(*const fn (sk: *kern.Sock) ?*kern.TcpSock, @ptrFromInt(96)); +pub const skb_ecn_set_ce = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(97)); +pub const get_listener_sock = @as(*const fn (sk: *kern.Sock) ?*kern.Sock, @ptrFromInt(98)); +pub const skc_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(99)); +pub const tcp_check_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) c_long, @ptrFromInt(100)); +pub const sysctl_get_name = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong, flags: u64) c_long, @ptrFromInt(101)); +pub const sysctl_get_current_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(102)); +pub const sysctl_get_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(103)); +pub const sysctl_set_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*const u8, buf_len: c_ulong) c_long, @ptrFromInt(104)); +pub const strtol = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_long) c_long, @ptrFromInt(105)); +pub const strtoul = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_ulong) c_long, @ptrFromInt(106)); +pub const sk_storage_get = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock, value: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(107)); +pub const sk_storage_delete = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock) c_long, @ptrFromInt(108)); +pub const send_signal = @as(*const fn (sig: u32) c_long, @ptrFromInt(109)); +pub const tcp_gen_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) i64, @ptrFromInt(110)); +pub const skb_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(111)); +pub const probe_read_user = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(112)); +pub const probe_read_kernel = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(113)); +pub const probe_read_user_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(114)); +pub const probe_read_kernel_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(115)); +pub const tcp_send_ack = @as(*const fn (tp: ?*anyopaque, rcv_nxt: u32) c_long, @ptrFromInt(116)); +pub const send_signal_thread = @as(*const fn (sig: u32) c_long, @ptrFromInt(117)); +pub const jiffies64 = @as(*const fn () u64, @ptrFromInt(118)); +pub const read_branch_records = @as(*const fn (ctx: *kern.PerfEventData, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(119)); +pub const get_ns_current_pid_tgid = @as(*const fn (dev: u64, ino: u64, nsdata: *kern.PidNsInfo, size: u32) c_long, @ptrFromInt(120)); +pub const xdp_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(121)); +pub const get_netns_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(122)); +pub const get_current_ancestor_cgroup_id = @as(*const fn (ancestor_level: c_int) u64, @ptrFromInt(123)); +pub const sk_assign = @as(*const fn (skb: *kern.SkBuff, sk: *kern.Sock, flags: u64) c_long, @ptrFromInt(124)); +pub const ktime_get_boot_ns = @as(*const fn () u64, @ptrFromInt(125)); +pub const seq_printf = @as(*const fn (m: *kern.SeqFile, fmt: ?*const u8, fmt_size: u32, data: ?*const anyopaque, data_len: u32) c_long, @ptrFromInt(126)); +pub const seq_write = @as(*const fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long, @ptrFromInt(127)); +pub const sk_cgroup_id = @as(*const fn (sk: *kern.BpfSock) u64, @ptrFromInt(128)); +pub const sk_ancestor_cgroup_id = @as(*const fn (sk: *kern.BpfSock, ancestor_level: c_long) u64, @ptrFromInt(129)); +pub const ringbuf_output = @as(*const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) c_long, @ptrFromInt(130)); +pub const ringbuf_reserve = @as(*const fn (ringbuf: ?*anyopaque, size: u64, flags: u64) ?*anyopaque, @ptrFromInt(131)); +pub const ringbuf_submit = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(132)); +pub const ringbuf_discard = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(133)); +pub const ringbuf_query = @as(*const fn (ringbuf: ?*anyopaque, flags: u64) u64, @ptrFromInt(134)); +pub const csum_level = @as(*const fn (skb: *kern.SkBuff, level: u64) c_long, @ptrFromInt(135)); +pub const skc_to_tcp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Tcp6Sock, @ptrFromInt(136)); +pub const skc_to_tcp_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpSock, @ptrFromInt(137)); +pub const skc_to_tcp_timewait_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpTimewaitSock, @ptrFromInt(138)); +pub const skc_to_tcp_request_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpRequestSock, @ptrFromInt(139)); +pub const skc_to_udp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Udp6Sock, @ptrFromInt(140)); +pub const get_task_stack = @as(*const fn (task: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(141)); diff --git a/lib/std/os/linux/io_uring.zig b/lib/std/os/linux/io_uring.zig index 875138cf4f..df8cd20773 100644 --- a/lib/std/os/linux/io_uring.zig +++ b/lib/std/os/linux/io_uring.zig @@ -60,7 +60,7 @@ pub const IO_Uring = struct { .NOSYS => return error.SystemOutdated, else => |errno| return os.unexpectedErrno(errno), } - const fd = @intCast(os.fd_t, res); + const fd = @as(os.fd_t, @intCast(res)); assert(fd >= 0); errdefer os.close(fd); @@ -198,7 +198,7 @@ pub const IO_Uring = struct { .INTR => return error.SignalInterrupt, else => |errno| return os.unexpectedErrno(errno), } - return @intCast(u32, res); + return @as(u32, @intCast(res)); } /// Sync internal state with kernel ring state on the SQ side. @@ -937,8 +937,8 @@ pub const IO_Uring = struct { const res = linux.io_uring_register( self.fd, .REGISTER_FILES, - @ptrCast(*const anyopaque, fds.ptr), - @intCast(u32, fds.len), + @as(*const anyopaque, @ptrCast(fds.ptr)), + @as(u32, @intCast(fds.len)), ); try handle_registration_result(res); } @@ -968,8 +968,8 @@ pub const IO_Uring = struct { const res = linux.io_uring_register( self.fd, .REGISTER_FILES_UPDATE, - @ptrCast(*const anyopaque, &update), - @intCast(u32, fds.len), + @as(*const anyopaque, @ptrCast(&update)), + @as(u32, @intCast(fds.len)), ); try handle_registration_result(res); } @@ -982,7 +982,7 @@ pub const IO_Uring = struct { const res = linux.io_uring_register( self.fd, .REGISTER_EVENTFD, - @ptrCast(*const anyopaque, &fd), + @as(*const anyopaque, @ptrCast(&fd)), 1, ); try handle_registration_result(res); @@ -997,7 +997,7 @@ pub const IO_Uring = struct { const res = linux.io_uring_register( self.fd, .REGISTER_EVENTFD_ASYNC, - @ptrCast(*const anyopaque, &fd), + @as(*const anyopaque, @ptrCast(&fd)), 1, ); try handle_registration_result(res); @@ -1022,7 +1022,7 @@ pub const IO_Uring = struct { self.fd, .REGISTER_BUFFERS, buffers.ptr, - @intCast(u32, buffers.len), + @as(u32, @intCast(buffers.len)), ); try handle_registration_result(res); } @@ -1122,20 +1122,17 @@ pub const SubmissionQueue = struct { errdefer os.munmap(mmap_sqes); assert(mmap_sqes.len == size_sqes); - const array = @ptrCast([*]u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.array])); - const sqes = @ptrCast([*]linux.io_uring_sqe, @alignCast(@alignOf(linux.io_uring_sqe), &mmap_sqes[0])); + const array: [*]u32 = @ptrCast(@alignCast(&mmap[p.sq_off.array])); + const sqes: [*]linux.io_uring_sqe = @ptrCast(@alignCast(&mmap_sqes[0])); // We expect the kernel copies p.sq_entries to the u32 pointed to by p.sq_off.ring_entries, // see https://github.com/torvalds/linux/blob/v5.8/fs/io_uring.c#L7843-L7844. - assert( - p.sq_entries == - @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.ring_entries])).*, - ); + assert(p.sq_entries == @as(*u32, @ptrCast(@alignCast(&mmap[p.sq_off.ring_entries]))).*); return SubmissionQueue{ - .head = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.head])), - .tail = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.tail])), - .mask = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.ring_mask])).*, - .flags = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.flags])), - .dropped = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.dropped])), + .head = @ptrCast(@alignCast(&mmap[p.sq_off.head])), + .tail = @ptrCast(@alignCast(&mmap[p.sq_off.tail])), + .mask = @as(*u32, @ptrCast(@alignCast(&mmap[p.sq_off.ring_mask]))).*, + .flags = @ptrCast(@alignCast(&mmap[p.sq_off.flags])), + .dropped = @ptrCast(@alignCast(&mmap[p.sq_off.dropped])), .array = array[0..p.sq_entries], .sqes = sqes[0..p.sq_entries], .mmap = mmap, @@ -1160,17 +1157,13 @@ pub const CompletionQueue = struct { assert(fd >= 0); assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0); const mmap = sq.mmap; - const cqes = @ptrCast( - [*]linux.io_uring_cqe, - @alignCast(@alignOf(linux.io_uring_cqe), &mmap[p.cq_off.cqes]), - ); - assert(p.cq_entries == - @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.ring_entries])).*); + const cqes: [*]linux.io_uring_cqe = @ptrCast(@alignCast(&mmap[p.cq_off.cqes])); + assert(p.cq_entries == @as(*u32, @ptrCast(@alignCast(&mmap[p.cq_off.ring_entries]))).*); return CompletionQueue{ - .head = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.head])), - .tail = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.tail])), - .mask = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.ring_mask])).*, - .overflow = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.overflow])), + .head = @ptrCast(@alignCast(&mmap[p.cq_off.head])), + .tail = @ptrCast(@alignCast(&mmap[p.cq_off.tail])), + .mask = @as(*u32, @ptrCast(@alignCast(&mmap[p.cq_off.ring_mask]))).*, + .overflow = @ptrCast(@alignCast(&mmap[p.cq_off.overflow])), .cqes = cqes[0..p.cq_entries], }; } @@ -1233,7 +1226,7 @@ pub fn io_uring_prep_rw( .fd = fd, .off = offset, .addr = addr, - .len = @intCast(u32, len), + .len = @as(u32, @intCast(len)), .rw_flags = 0, .user_data = 0, .buf_index = 0, @@ -1319,7 +1312,7 @@ pub fn io_uring_prep_epoll_ctl( op: u32, ev: ?*linux.epoll_event, ) void { - io_uring_prep_rw(.EPOLL_CTL, sqe, epfd, @intFromPtr(ev), op, @intCast(u64, fd)); + io_uring_prep_rw(.EPOLL_CTL, sqe, epfd, @intFromPtr(ev), op, @as(u64, @intCast(fd))); } pub fn io_uring_prep_recv(sqe: *linux.io_uring_sqe, fd: os.fd_t, buffer: []u8, flags: u32) void { @@ -1459,7 +1452,7 @@ pub fn io_uring_prep_fallocate( .fd = fd, .off = offset, .addr = len, - .len = @intCast(u32, mode), + .len = @as(u32, @intCast(mode)), .rw_flags = 0, .user_data = 0, .buf_index = 0, @@ -1514,7 +1507,7 @@ pub fn io_uring_prep_renameat( 0, @intFromPtr(new_path), ); - sqe.len = @bitCast(u32, new_dir_fd); + sqe.len = @as(u32, @bitCast(new_dir_fd)); sqe.rw_flags = flags; } @@ -1569,7 +1562,7 @@ pub fn io_uring_prep_linkat( 0, @intFromPtr(new_path), ); - sqe.len = @bitCast(u32, new_dir_fd); + sqe.len = @as(u32, @bitCast(new_dir_fd)); sqe.rw_flags = flags; } @@ -1582,8 +1575,8 @@ pub fn io_uring_prep_provide_buffers( buffer_id: usize, ) void { const ptr = @intFromPtr(buffers); - io_uring_prep_rw(.PROVIDE_BUFFERS, sqe, @intCast(i32, num), ptr, buffer_len, buffer_id); - sqe.buf_index = @intCast(u16, group_id); + io_uring_prep_rw(.PROVIDE_BUFFERS, sqe, @as(i32, @intCast(num)), ptr, buffer_len, buffer_id); + sqe.buf_index = @as(u16, @intCast(group_id)); } pub fn io_uring_prep_remove_buffers( @@ -1591,8 +1584,8 @@ pub fn io_uring_prep_remove_buffers( num: usize, group_id: usize, ) void { - io_uring_prep_rw(.REMOVE_BUFFERS, sqe, @intCast(i32, num), 0, 0, 0); - sqe.buf_index = @intCast(u16, group_id); + io_uring_prep_rw(.REMOVE_BUFFERS, sqe, @as(i32, @intCast(num)), 0, 0, 0); + sqe.buf_index = @as(u16, @intCast(group_id)); } test "structs/offsets/entries" { @@ -1886,12 +1879,12 @@ test "write_fixed/read_fixed" { try testing.expectEqual(linux.io_uring_cqe{ .user_data = 0x45454545, - .res = @intCast(i32, buffers[0].iov_len), + .res = @as(i32, @intCast(buffers[0].iov_len)), .flags = 0, }, cqe_write); try testing.expectEqual(linux.io_uring_cqe{ .user_data = 0x12121212, - .res = @intCast(i32, buffers[1].iov_len), + .res = @as(i32, @intCast(buffers[1].iov_len)), .flags = 0, }, cqe_read); @@ -2145,7 +2138,7 @@ test "timeout (after a relative time)" { }, cqe); // Tests should not depend on timings: skip test if outside margin. - if (!std.math.approxEqAbs(f64, ms, @floatFromInt(f64, stopped - started), margin)) return error.SkipZigTest; + if (!std.math.approxEqAbs(f64, ms, @as(f64, @floatFromInt(stopped - started)), margin)) return error.SkipZigTest; } test "timeout (after a number of completions)" { @@ -2637,7 +2630,7 @@ test "renameat" { ); try testing.expectEqual(linux.IORING_OP.RENAMEAT, sqe.opcode); try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd); - try testing.expectEqual(@as(i32, tmp.dir.fd), @bitCast(i32, sqe.len)); + try testing.expectEqual(@as(i32, tmp.dir.fd), @as(i32, @bitCast(sqe.len))); try testing.expectEqual(@as(u32, 1), try ring.submit()); const cqe = try ring.copy_cqe(); @@ -2850,7 +2843,7 @@ test "linkat" { ); try testing.expectEqual(linux.IORING_OP.LINKAT, sqe.opcode); try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd); - try testing.expectEqual(@as(i32, tmp.dir.fd), @bitCast(i32, sqe.len)); + try testing.expectEqual(@as(i32, tmp.dir.fd), @as(i32, @bitCast(sqe.len))); try testing.expectEqual(@as(u32, 1), try ring.submit()); const cqe = try ring.copy_cqe(); @@ -2898,7 +2891,7 @@ test "provide_buffers: read" { // Provide 4 buffers { - const sqe = try ring.provide_buffers(0xcccccccc, @ptrCast([*]u8, &buffers), buffer_len, buffers.len, group_id, buffer_id); + const sqe = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id); try testing.expectEqual(linux.IORING_OP.PROVIDE_BUFFERS, sqe.opcode); try testing.expectEqual(@as(i32, buffers.len), sqe.fd); try testing.expectEqual(@as(u32, buffers[0].len), sqe.len); @@ -2939,7 +2932,7 @@ test "provide_buffers: read" { try testing.expectEqual(@as(i32, buffer_len), cqe.res); try testing.expectEqual(@as(u64, 0xdededede), cqe.user_data); - try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@intCast(usize, cqe.res)]); + try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]); } // This read should fail @@ -2971,7 +2964,7 @@ test "provide_buffers: read" { const reprovided_buffer_id = 2; { - _ = try ring.provide_buffers(0xabababab, @ptrCast([*]u8, &buffers[reprovided_buffer_id]), buffer_len, 1, group_id, reprovided_buffer_id); + _ = try ring.provide_buffers(0xabababab, @as([*]u8, @ptrCast(&buffers[reprovided_buffer_id])), buffer_len, 1, group_id, reprovided_buffer_id); try testing.expectEqual(@as(u32, 1), try ring.submit()); const cqe = try ring.copy_cqe(); @@ -3003,7 +2996,7 @@ test "provide_buffers: read" { try testing.expectEqual(used_buffer_id, reprovided_buffer_id); try testing.expectEqual(@as(i32, buffer_len), cqe.res); try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data); - try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@intCast(usize, cqe.res)]); + try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]); } } @@ -3030,7 +3023,7 @@ test "remove_buffers" { // Provide 4 buffers { - _ = try ring.provide_buffers(0xcccccccc, @ptrCast([*]u8, &buffers), buffer_len, buffers.len, group_id, buffer_id); + _ = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id); try testing.expectEqual(@as(u32, 1), try ring.submit()); const cqe = try ring.copy_cqe(); @@ -3076,7 +3069,7 @@ test "remove_buffers" { try testing.expect(used_buffer_id >= 0 and used_buffer_id < 4); try testing.expectEqual(@as(i32, buffer_len), cqe.res); try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data); - try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@intCast(usize, cqe.res)]); + try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]); } // Final read should _not_ work @@ -3119,7 +3112,7 @@ test "provide_buffers: accept/connect/send/recv" { // Provide 4 buffers { - const sqe = try ring.provide_buffers(0xcccccccc, @ptrCast([*]u8, &buffers), buffer_len, buffers.len, group_id, buffer_id); + const sqe = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id); try testing.expectEqual(linux.IORING_OP.PROVIDE_BUFFERS, sqe.opcode); try testing.expectEqual(@as(i32, buffers.len), sqe.fd); try testing.expectEqual(@as(u32, buffer_len), sqe.len); @@ -3181,7 +3174,7 @@ test "provide_buffers: accept/connect/send/recv" { try testing.expectEqual(@as(i32, buffer_len), cqe.res); try testing.expectEqual(@as(u64, 0xdededede), cqe.user_data); - const buffer = buffers[used_buffer_id][0..@intCast(usize, cqe.res)]; + const buffer = buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]; try testing.expectEqualSlices(u8, &([_]u8{'z'} ** buffer_len), buffer); } @@ -3213,7 +3206,7 @@ test "provide_buffers: accept/connect/send/recv" { const reprovided_buffer_id = 2; { - _ = try ring.provide_buffers(0xabababab, @ptrCast([*]u8, &buffers[reprovided_buffer_id]), buffer_len, 1, group_id, reprovided_buffer_id); + _ = try ring.provide_buffers(0xabababab, @as([*]u8, @ptrCast(&buffers[reprovided_buffer_id])), buffer_len, 1, group_id, reprovided_buffer_id); try testing.expectEqual(@as(u32, 1), try ring.submit()); const cqe = try ring.copy_cqe(); @@ -3259,7 +3252,7 @@ test "provide_buffers: accept/connect/send/recv" { try testing.expectEqual(used_buffer_id, reprovided_buffer_id); try testing.expectEqual(@as(i32, buffer_len), cqe.res); try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data); - const buffer = buffers[used_buffer_id][0..@intCast(usize, cqe.res)]; + const buffer = buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]; try testing.expectEqualSlices(u8, &([_]u8{'w'} ** buffer_len), buffer); } } diff --git a/lib/std/os/linux/ioctl.zig b/lib/std/os/linux/ioctl.zig index 96ec96c306..7f5d36b72d 100644 --- a/lib/std/os/linux/ioctl.zig +++ b/lib/std/os/linux/ioctl.zig @@ -32,7 +32,7 @@ fn io_impl(dir: Direction, io_type: u8, nr: u8, comptime T: type) u32 { .io_type = io_type, .nr = nr, }; - return @bitCast(u32, request); + return @as(u32, @bitCast(request)); } pub fn IO(io_type: u8, nr: u8) u32 { diff --git a/lib/std/os/linux/start_pie.zig b/lib/std/os/linux/start_pie.zig index c9b1cb1e92..cf557f9d66 100644 --- a/lib/std/os/linux/start_pie.zig +++ b/lib/std/os/linux/start_pie.zig @@ -103,17 +103,17 @@ pub fn relocate(phdrs: []elf.Phdr) void { // Apply the relocations. if (rel_addr != 0) { - const rel = std.mem.bytesAsSlice(elf.Rel, @ptrFromInt([*]u8, rel_addr)[0..rel_size]); + const rel = std.mem.bytesAsSlice(elf.Rel, @as([*]u8, @ptrFromInt(rel_addr))[0..rel_size]); for (rel) |r| { if (r.r_type() != R_RELATIVE) continue; - @ptrFromInt(*usize, base_addr + r.r_offset).* += base_addr; + @as(*usize, @ptrFromInt(base_addr + r.r_offset)).* += base_addr; } } if (rela_addr != 0) { - const rela = std.mem.bytesAsSlice(elf.Rela, @ptrFromInt([*]u8, rela_addr)[0..rela_size]); + const rela = std.mem.bytesAsSlice(elf.Rela, @as([*]u8, @ptrFromInt(rela_addr))[0..rela_size]); for (rela) |r| { if (r.r_type() != R_RELATIVE) continue; - @ptrFromInt(*usize, base_addr + r.r_offset).* += base_addr + @bitCast(usize, r.r_addend); + @as(*usize, @ptrFromInt(base_addr + r.r_offset)).* += base_addr + @as(usize, @bitCast(r.r_addend)); } } } diff --git a/lib/std/os/linux/test.zig b/lib/std/os/linux/test.zig index e1ad36b2e5..170bde6334 100644 --- a/lib/std/os/linux/test.zig +++ b/lib/std/os/linux/test.zig @@ -50,7 +50,7 @@ test "timer" { .it_value = time_interval, }; - err = linux.getErrno(linux.timerfd_settime(@intCast(i32, timer_fd), 0, &new_time, null)); + err = linux.getErrno(linux.timerfd_settime(@as(i32, @intCast(timer_fd)), 0, &new_time, null)); try expect(err == .SUCCESS); var event = linux.epoll_event{ @@ -58,13 +58,13 @@ test "timer" { .data = linux.epoll_data{ .ptr = 0 }, }; - err = linux.getErrno(linux.epoll_ctl(@intCast(i32, epoll_fd), linux.EPOLL.CTL_ADD, @intCast(i32, timer_fd), &event)); + err = linux.getErrno(linux.epoll_ctl(@as(i32, @intCast(epoll_fd)), linux.EPOLL.CTL_ADD, @as(i32, @intCast(timer_fd)), &event)); try expect(err == .SUCCESS); const events_one: linux.epoll_event = undefined; var events = [_]linux.epoll_event{events_one} ** 8; - err = linux.getErrno(linux.epoll_wait(@intCast(i32, epoll_fd), &events, 8, -1)); + err = linux.getErrno(linux.epoll_wait(@as(i32, @intCast(epoll_fd)), &events, 8, -1)); try expect(err == .SUCCESS); } @@ -91,11 +91,11 @@ test "statx" { } try expect(stat_buf.mode == statx_buf.mode); - try expect(@bitCast(u32, stat_buf.uid) == statx_buf.uid); - try expect(@bitCast(u32, stat_buf.gid) == statx_buf.gid); - try expect(@bitCast(u64, @as(i64, stat_buf.size)) == statx_buf.size); - try expect(@bitCast(u64, @as(i64, stat_buf.blksize)) == statx_buf.blksize); - try expect(@bitCast(u64, @as(i64, stat_buf.blocks)) == statx_buf.blocks); + try expect(@as(u32, @bitCast(stat_buf.uid)) == statx_buf.uid); + try expect(@as(u32, @bitCast(stat_buf.gid)) == statx_buf.gid); + try expect(@as(u64, @bitCast(@as(i64, stat_buf.size))) == statx_buf.size); + try expect(@as(u64, @bitCast(@as(i64, stat_buf.blksize))) == statx_buf.blksize); + try expect(@as(u64, @bitCast(@as(i64, stat_buf.blocks))) == statx_buf.blocks); } test "user and group ids" { diff --git a/lib/std/os/linux/tls.zig b/lib/std/os/linux/tls.zig index b60a2ed388..94fa0d1a09 100644 --- a/lib/std/os/linux/tls.zig +++ b/lib/std/os/linux/tls.zig @@ -205,7 +205,7 @@ fn initTLS(phdrs: []elf.Phdr) void { // the data stored in the PT_TLS segment is p_filesz and may be less // than the former tls_align_factor = phdr.p_align; - tls_data = @ptrFromInt([*]u8, img_base + phdr.p_vaddr)[0..phdr.p_filesz]; + tls_data = @as([*]u8, @ptrFromInt(img_base + phdr.p_vaddr))[0..phdr.p_filesz]; tls_data_alloc_size = phdr.p_memsz; } else { tls_align_factor = @alignOf(usize); @@ -263,12 +263,12 @@ fn initTLS(phdrs: []elf.Phdr) void { .dtv_offset = dtv_offset, .data_offset = data_offset, .data_size = tls_data_alloc_size, - .gdt_entry_number = @bitCast(usize, @as(isize, -1)), + .gdt_entry_number = @as(usize, @bitCast(@as(isize, -1))), }; } inline fn alignPtrCast(comptime T: type, ptr: [*]u8) *T { - return @ptrCast(*T, @alignCast(@alignOf(T), ptr)); + return @ptrCast(@alignCast(ptr)); } /// Initializes all the fields of the static TLS area and returns the computed diff --git a/lib/std/os/linux/vdso.zig b/lib/std/os/linux/vdso.zig index c7dc7ae599..50e7ce1dfd 100644 --- a/lib/std/os/linux/vdso.zig +++ b/lib/std/os/linux/vdso.zig @@ -8,7 +8,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { const vdso_addr = std.os.system.getauxval(std.elf.AT_SYSINFO_EHDR); if (vdso_addr == 0) return 0; - const eh = @ptrFromInt(*elf.Ehdr, vdso_addr); + const eh = @as(*elf.Ehdr, @ptrFromInt(vdso_addr)); var ph_addr: usize = vdso_addr + eh.e_phoff; var maybe_dynv: ?[*]usize = null; @@ -19,14 +19,14 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { i += 1; ph_addr += eh.e_phentsize; }) { - const this_ph = @ptrFromInt(*elf.Phdr, ph_addr); + const this_ph = @as(*elf.Phdr, @ptrFromInt(ph_addr)); switch (this_ph.p_type) { // On WSL1 as well as older kernels, the VDSO ELF image is pre-linked in the upper half // of the memory space (e.g. p_vaddr = 0xffffffffff700000 on WSL1). // Wrapping operations are used on this line as well as subsequent calculations relative to base // (lines 47, 78) to ensure no overflow check is tripped. elf.PT_LOAD => base = vdso_addr +% this_ph.p_offset -% this_ph.p_vaddr, - elf.PT_DYNAMIC => maybe_dynv = @ptrFromInt([*]usize, vdso_addr + this_ph.p_offset), + elf.PT_DYNAMIC => maybe_dynv = @as([*]usize, @ptrFromInt(vdso_addr + this_ph.p_offset)), else => {}, } } @@ -45,11 +45,11 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { while (dynv[i] != 0) : (i += 2) { const p = base +% dynv[i + 1]; switch (dynv[i]) { - elf.DT_STRTAB => maybe_strings = @ptrFromInt([*]u8, p), - elf.DT_SYMTAB => maybe_syms = @ptrFromInt([*]elf.Sym, p), - elf.DT_HASH => maybe_hashtab = @ptrFromInt([*]linux.Elf_Symndx, p), - elf.DT_VERSYM => maybe_versym = @ptrFromInt([*]u16, p), - elf.DT_VERDEF => maybe_verdef = @ptrFromInt(*elf.Verdef, p), + elf.DT_STRTAB => maybe_strings = @as([*]u8, @ptrFromInt(p)), + elf.DT_SYMTAB => maybe_syms = @as([*]elf.Sym, @ptrFromInt(p)), + elf.DT_HASH => maybe_hashtab = @as([*]linux.Elf_Symndx, @ptrFromInt(p)), + elf.DT_VERSYM => maybe_versym = @as([*]u16, @ptrFromInt(p)), + elf.DT_VERDEF => maybe_verdef = @as(*elf.Verdef, @ptrFromInt(p)), else => {}, } } @@ -65,10 +65,10 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { var i: usize = 0; while (i < hashtab[1]) : (i += 1) { - if (0 == (@as(u32, 1) << @intCast(u5, syms[i].st_info & 0xf) & OK_TYPES)) continue; - if (0 == (@as(u32, 1) << @intCast(u5, syms[i].st_info >> 4) & OK_BINDS)) continue; + if (0 == (@as(u32, 1) << @as(u5, @intCast(syms[i].st_info & 0xf)) & OK_TYPES)) continue; + if (0 == (@as(u32, 1) << @as(u5, @intCast(syms[i].st_info >> 4)) & OK_BINDS)) continue; if (0 == syms[i].st_shndx) continue; - const sym_name = @ptrCast([*:0]u8, strings + syms[i].st_name); + const sym_name = @as([*:0]u8, @ptrCast(strings + syms[i].st_name)); if (!mem.eql(u8, name, mem.sliceTo(sym_name, 0))) continue; if (maybe_versym) |versym| { if (!checkver(maybe_verdef.?, versym[i], vername, strings)) @@ -82,15 +82,15 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: [*]u8) bool { var def = def_arg; - const vsym = @bitCast(u32, vsym_arg) & 0x7fff; + const vsym = @as(u32, @bitCast(vsym_arg)) & 0x7fff; while (true) { if (0 == (def.vd_flags & elf.VER_FLG_BASE) and (def.vd_ndx & 0x7fff) == vsym) break; if (def.vd_next == 0) return false; - def = @ptrFromInt(*elf.Verdef, @intFromPtr(def) + def.vd_next); + def = @as(*elf.Verdef, @ptrFromInt(@intFromPtr(def) + def.vd_next)); } - const aux = @ptrFromInt(*elf.Verdaux, @intFromPtr(def) + def.vd_aux); - const vda_name = @ptrCast([*:0]u8, strings + aux.vda_name); + const aux = @as(*elf.Verdaux, @ptrFromInt(@intFromPtr(def) + def.vd_aux)); + const vda_name = @as([*:0]u8, @ptrCast(strings + aux.vda_name)); return mem.eql(u8, vername, mem.sliceTo(vda_name, 0)); } diff --git a/lib/std/os/plan9.zig b/lib/std/os/plan9.zig index b628bc2afc..3e1137c7ce 100644 --- a/lib/std/os/plan9.zig +++ b/lib/std/os/plan9.zig @@ -8,9 +8,9 @@ pub const syscall_bits = switch (builtin.cpu.arch) { pub const E = @import("plan9/errno.zig").E; /// Get the errno from a syscall return value, or 0 for no error. pub fn getErrno(r: usize) E { - const signed_r = @bitCast(isize, r); + const signed_r = @as(isize, @bitCast(r)); const int = if (signed_r > -4096 and signed_r < 0) -signed_r else 0; - return @enumFromInt(E, int); + return @as(E, @enumFromInt(int)); } pub const SIG = struct { /// hangup diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig index 888b2f5c1c..d5451f64ac 100644 --- a/lib/std/os/test.zig +++ b/lib/std/os/test.zig @@ -488,7 +488,7 @@ fn iter_fn(info: *dl_phdr_info, size: usize, counter: *usize) IterFnError!void { const reloc_addr = info.dlpi_addr + phdr.p_vaddr; // Find the ELF header - const elf_header = @ptrFromInt(*elf.Ehdr, reloc_addr - phdr.p_offset); + const elf_header = @as(*elf.Ehdr, @ptrFromInt(reloc_addr - phdr.p_offset)); // Validate the magic if (!mem.eql(u8, elf_header.e_ident[0..4], elf.MAGIC)) return error.BadElfMagic; // Consistency check @@ -751,7 +751,7 @@ test "getrlimit and setrlimit" { } inline for (std.meta.fields(os.rlimit_resource)) |field| { - const resource = @enumFromInt(os.rlimit_resource, field.value); + const resource = @as(os.rlimit_resource, @enumFromInt(field.value)); const limit = try os.getrlimit(resource); // On 32 bit MIPS musl includes a fix which changes limits greater than -1UL/2 to RLIM_INFINITY. diff --git a/lib/std/os/uefi.zig b/lib/std/os/uefi.zig index f51caaa86f..7c6eb08a93 100644 --- a/lib/std/os/uefi.zig +++ b/lib/std/os/uefi.zig @@ -143,7 +143,7 @@ pub const FileHandle = *opaque {}; test "GUID formatting" { var bytes = [_]u8{ 137, 60, 203, 50, 128, 128, 124, 66, 186, 19, 80, 73, 135, 59, 194, 135 }; - var guid = @bitCast(Guid, bytes); + var guid = @as(Guid, @bitCast(bytes)); var str = try std.fmt.allocPrint(std.testing.allocator, "{}", .{guid}); defer std.testing.allocator.free(str); diff --git a/lib/std/os/uefi/pool_allocator.zig b/lib/std/os/uefi/pool_allocator.zig index c24d9416f1..3f64a2f3f6 100644 --- a/lib/std/os/uefi/pool_allocator.zig +++ b/lib/std/os/uefi/pool_allocator.zig @@ -9,7 +9,7 @@ const Allocator = mem.Allocator; const UefiPoolAllocator = struct { fn getHeader(ptr: [*]u8) *[*]align(8) u8 { - return @ptrFromInt(*[*]align(8) u8, @intFromPtr(ptr) - @sizeOf(usize)); + return @as(*[*]align(8) u8, @ptrFromInt(@intFromPtr(ptr) - @sizeOf(usize))); } fn alloc( @@ -22,7 +22,7 @@ const UefiPoolAllocator = struct { assert(len > 0); - const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align); + const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); const metadata_len = mem.alignForward(usize, @sizeOf(usize), ptr_align); @@ -135,5 +135,5 @@ fn uefi_free( ) void { _ = log2_old_ptr_align; _ = ret_addr; - _ = uefi.system_table.boot_services.?.freePool(@alignCast(8, buf.ptr)); + _ = uefi.system_table.boot_services.?.freePool(@alignCast(buf.ptr)); } diff --git a/lib/std/os/uefi/protocols/device_path_protocol.zig b/lib/std/os/uefi/protocols/device_path_protocol.zig index c64084e6ed..a083959521 100644 --- a/lib/std/os/uefi/protocols/device_path_protocol.zig +++ b/lib/std/os/uefi/protocols/device_path_protocol.zig @@ -23,10 +23,10 @@ pub const DevicePathProtocol = extern struct { /// Returns the next DevicePathProtocol node in the sequence, if any. pub fn next(self: *DevicePathProtocol) ?*DevicePathProtocol { - if (self.type == .End and @enumFromInt(EndDevicePath.Subtype, self.subtype) == .EndEntire) + if (self.type == .End and @as(EndDevicePath.Subtype, @enumFromInt(self.subtype)) == .EndEntire) return null; - return @ptrCast(*DevicePathProtocol, @ptrCast([*]u8, self) + self.length); + return @as(*DevicePathProtocol, @ptrCast(@as([*]u8, @ptrCast(self)) + self.length)); } /// Calculates the total length of the device path structure in bytes, including the end of device path node. @@ -48,30 +48,30 @@ pub const DevicePathProtocol = extern struct { // DevicePathProtocol for the extra node before the end var buf = try allocator.alloc(u8, path_size + 2 * (path.len + 1) + @sizeOf(DevicePathProtocol)); - @memcpy(buf[0..path_size.len], @ptrCast([*]const u8, self)[0..path_size]); + @memcpy(buf[0..path_size.len], @as([*]const u8, @ptrCast(self))[0..path_size]); // Pointer to the copy of the end node of the current chain, which is - 4 from the buffer // as the end node itself is 4 bytes (type: u8 + subtype: u8 + length: u16). - var new = @ptrCast(*MediaDevicePath.FilePathDevicePath, buf.ptr + path_size - 4); + var new = @as(*MediaDevicePath.FilePathDevicePath, @ptrCast(buf.ptr + path_size - 4)); new.type = .Media; new.subtype = .FilePath; - new.length = @sizeOf(MediaDevicePath.FilePathDevicePath) + 2 * (@intCast(u16, path.len) + 1); + new.length = @sizeOf(MediaDevicePath.FilePathDevicePath) + 2 * (@as(u16, @intCast(path.len)) + 1); // The same as new.getPath(), but not const as we're filling it in. - var ptr = @ptrCast([*:0]align(1) u16, @ptrCast([*]u8, new) + @sizeOf(MediaDevicePath.FilePathDevicePath)); + var ptr = @as([*:0]align(1) u16, @ptrCast(@as([*]u8, @ptrCast(new)) + @sizeOf(MediaDevicePath.FilePathDevicePath))); for (path, 0..) |s, i| ptr[i] = s; ptr[path.len] = 0; - var end = @ptrCast(*EndDevicePath.EndEntireDevicePath, @ptrCast(*DevicePathProtocol, new).next().?); + var end = @as(*EndDevicePath.EndEntireDevicePath, @ptrCast(@as(*DevicePathProtocol, @ptrCast(new)).next().?)); end.type = .End; end.subtype = .EndEntire; end.length = @sizeOf(EndDevicePath.EndEntireDevicePath); - return @ptrCast(*DevicePathProtocol, buf.ptr); + return @as(*DevicePathProtocol, @ptrCast(buf.ptr)); } pub fn getDevicePath(self: *const DevicePathProtocol) ?DevicePath { @@ -103,7 +103,7 @@ pub const DevicePathProtocol = extern struct { if (self.subtype == tag_val) { // e.g. expr = .{ .Pci = @ptrCast(...) } - return @unionInit(TUnion, subtype.name, @ptrCast(subtype.type, self)); + return @unionInit(TUnion, subtype.name, @as(subtype.type, @ptrCast(self))); } } @@ -332,7 +332,7 @@ pub const AcpiDevicePath = union(Subtype) { pub fn adrs(self: *const AdrDevicePath) []align(1) const u32 { // self.length is a minimum of 8 with one adr which is size 4. var entries = (self.length - 4) / @sizeOf(u32); - return @ptrCast([*]align(1) const u32, &self.adr)[0..entries]; + return @as([*]align(1) const u32, @ptrCast(&self.adr))[0..entries]; } }; @@ -550,7 +550,7 @@ pub const MessagingDevicePath = union(Subtype) { pub fn serial_number(self: *const UsbWwidDevicePath) []align(1) const u16 { var serial_len = (self.length - @sizeOf(UsbWwidDevicePath)) / @sizeOf(u16); - return @ptrCast([*]align(1) const u16, @ptrCast([*]const u8, self) + @sizeOf(UsbWwidDevicePath))[0..serial_len]; + return @as([*]align(1) const u16, @ptrCast(@as([*]const u8, @ptrCast(self)) + @sizeOf(UsbWwidDevicePath)))[0..serial_len]; } }; @@ -943,7 +943,7 @@ pub const MediaDevicePath = union(Subtype) { length: u16 align(1), pub fn getPath(self: *const FilePathDevicePath) [*:0]align(1) const u16 { - return @ptrCast([*:0]align(1) const u16, @ptrCast([*]const u8, self) + @sizeOf(FilePathDevicePath)); + return @as([*:0]align(1) const u16, @ptrCast(@as([*]const u8, @ptrCast(self)) + @sizeOf(FilePathDevicePath))); } }; @@ -1068,7 +1068,7 @@ pub const BiosBootSpecificationDevicePath = union(Subtype) { status_flag: u16 align(1), pub fn getDescription(self: *const BBS101DevicePath) [*:0]const u8 { - return @ptrCast([*:0]const u8, self) + @sizeOf(BBS101DevicePath); + return @as([*:0]const u8, @ptrCast(self)) + @sizeOf(BBS101DevicePath); } }; diff --git a/lib/std/os/uefi/protocols/file_protocol.zig b/lib/std/os/uefi/protocols/file_protocol.zig index 729d4020b4..53ec5f81e3 100644 --- a/lib/std/os/uefi/protocols/file_protocol.zig +++ b/lib/std/os/uefi/protocols/file_protocol.zig @@ -152,7 +152,7 @@ pub const FileInfo = extern struct { attribute: u64, pub fn getFileName(self: *const FileInfo) [*:0]const u16 { - return @ptrCast([*:0]const u16, @ptrCast([*]const u8, self) + @sizeOf(FileInfo)); + return @as([*:0]const u16, @ptrCast(@as([*]const u8, @ptrCast(self)) + @sizeOf(FileInfo))); } pub const efi_file_read_only: u64 = 0x0000000000000001; @@ -182,7 +182,7 @@ pub const FileSystemInfo = extern struct { _volume_label: u16, pub fn getVolumeLabel(self: *const FileSystemInfo) [*:0]const u16 { - return @ptrCast([*:0]const u16, &self._volume_label); + return @as([*:0]const u16, @ptrCast(&self._volume_label)); } pub const guid align(8) = Guid{ diff --git a/lib/std/os/uefi/protocols/hii.zig b/lib/std/os/uefi/protocols/hii.zig index 437fa29739..c7199d2950 100644 --- a/lib/std/os/uefi/protocols/hii.zig +++ b/lib/std/os/uefi/protocols/hii.zig @@ -39,7 +39,7 @@ pub const HIISimplifiedFontPackage = extern struct { number_of_wide_glyphs: u16, pub fn getNarrowGlyphs(self: *HIISimplifiedFontPackage) []NarrowGlyph { - return @ptrCast([*]NarrowGlyph, @ptrCast([*]u8, self) + @sizeOf(HIISimplifiedFontPackage))[0..self.number_of_narrow_glyphs]; + return @as([*]NarrowGlyph, @ptrCast(@as([*]u8, @ptrCast(self)) + @sizeOf(HIISimplifiedFontPackage)))[0..self.number_of_narrow_glyphs]; } }; diff --git a/lib/std/os/uefi/protocols/managed_network_protocol.zig b/lib/std/os/uefi/protocols/managed_network_protocol.zig index aff9febd17..5ea63f5a65 100644 --- a/lib/std/os/uefi/protocols/managed_network_protocol.zig +++ b/lib/std/os/uefi/protocols/managed_network_protocol.zig @@ -118,7 +118,7 @@ pub const ManagedNetworkTransmitData = extern struct { fragment_count: u16, pub fn getFragments(self: *ManagedNetworkTransmitData) []ManagedNetworkFragmentData { - return @ptrCast([*]ManagedNetworkFragmentData, @ptrCast([*]u8, self) + @sizeOf(ManagedNetworkTransmitData))[0..self.fragment_count]; + return @as([*]ManagedNetworkFragmentData, @ptrCast(@as([*]u8, @ptrCast(self)) + @sizeOf(ManagedNetworkTransmitData)))[0..self.fragment_count]; } }; diff --git a/lib/std/os/uefi/protocols/udp6_protocol.zig b/lib/std/os/uefi/protocols/udp6_protocol.zig index 96a1d4c318..f772d38d52 100644 --- a/lib/std/os/uefi/protocols/udp6_protocol.zig +++ b/lib/std/os/uefi/protocols/udp6_protocol.zig @@ -87,7 +87,7 @@ pub const Udp6ReceiveData = extern struct { fragment_count: u32, pub fn getFragments(self: *Udp6ReceiveData) []Udp6FragmentData { - return @ptrCast([*]Udp6FragmentData, @ptrCast([*]u8, self) + @sizeOf(Udp6ReceiveData))[0..self.fragment_count]; + return @as([*]Udp6FragmentData, @ptrCast(@as([*]u8, @ptrCast(self)) + @sizeOf(Udp6ReceiveData)))[0..self.fragment_count]; } }; @@ -97,7 +97,7 @@ pub const Udp6TransmitData = extern struct { fragment_count: u32, pub fn getFragments(self: *Udp6TransmitData) []Udp6FragmentData { - return @ptrCast([*]Udp6FragmentData, @ptrCast([*]u8, self) + @sizeOf(Udp6TransmitData))[0..self.fragment_count]; + return @as([*]Udp6FragmentData, @ptrCast(@as([*]u8, @ptrCast(self)) + @sizeOf(Udp6TransmitData)))[0..self.fragment_count]; } }; diff --git a/lib/std/os/uefi/tables/boot_services.zig b/lib/std/os/uefi/tables/boot_services.zig index bfd3865e95..7fc32decb9 100644 --- a/lib/std/os/uefi/tables/boot_services.zig +++ b/lib/std/os/uefi/tables/boot_services.zig @@ -165,7 +165,7 @@ pub const BootServices = extern struct { try self.openProtocol( handle, &protocol.guid, - @ptrCast(*?*anyopaque, &ptr), + @as(*?*anyopaque, @ptrCast(&ptr)), // Invoking handle (loaded image) uefi.handle, // Control handle (null as not a driver) diff --git a/lib/std/os/wasi.zig b/lib/std/os/wasi.zig index 711352e2fe..951d8ee26d 100644 --- a/lib/std/os/wasi.zig +++ b/lib/std/os/wasi.zig @@ -103,13 +103,13 @@ pub const timespec = extern struct { const tv_sec: timestamp_t = tm / 1_000_000_000; const tv_nsec = tm - tv_sec * 1_000_000_000; return timespec{ - .tv_sec = @intCast(time_t, tv_sec), - .tv_nsec = @intCast(isize, tv_nsec), + .tv_sec = @as(time_t, @intCast(tv_sec)), + .tv_nsec = @as(isize, @intCast(tv_nsec)), }; } pub fn toTimestamp(ts: timespec) timestamp_t { - const tm = @intCast(timestamp_t, ts.tv_sec * 1_000_000_000) + @intCast(timestamp_t, ts.tv_nsec); + const tm = @as(timestamp_t, @intCast(ts.tv_sec * 1_000_000_000)) + @as(timestamp_t, @intCast(ts.tv_nsec)); return tm; } }; diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig index 421815c04d..e12e8ac4d3 100644 --- a/lib/std/os/windows.zig +++ b/lib/std/os/windows.zig @@ -30,7 +30,7 @@ pub const gdi32 = @import("windows/gdi32.zig"); pub const winmm = @import("windows/winmm.zig"); pub const crypt32 = @import("windows/crypt32.zig"); -pub const self_process_handle = @ptrFromInt(HANDLE, maxInt(usize)); +pub const self_process_handle = @as(HANDLE, @ptrFromInt(maxInt(usize))); const Self = @This(); @@ -198,9 +198,9 @@ pub fn DeviceIoControl( var io: IO_STATUS_BLOCK = undefined; const in_ptr = if (in) |i| i.ptr else null; - const in_len = if (in) |i| @intCast(ULONG, i.len) else 0; + const in_len = if (in) |i| @as(ULONG, @intCast(i.len)) else 0; const out_ptr = if (out) |o| o.ptr else null; - const out_len = if (out) |o| @intCast(ULONG, o.len) else 0; + const out_len = if (out) |o| @as(ULONG, @intCast(o.len)) else 0; const rc = blk: { if (is_fsctl) { @@ -307,7 +307,7 @@ pub fn WaitForSingleObjectEx(handle: HANDLE, milliseconds: DWORD, alertable: boo pub fn WaitForMultipleObjectsEx(handles: []const HANDLE, waitAll: bool, milliseconds: DWORD, alertable: bool) !u32 { assert(handles.len < MAXIMUM_WAIT_OBJECTS); - const nCount: DWORD = @intCast(DWORD, handles.len); + const nCount: DWORD = @as(DWORD, @intCast(handles.len)); switch (kernel32.WaitForMultipleObjectsEx( nCount, handles.ptr, @@ -419,7 +419,7 @@ pub fn GetQueuedCompletionStatusEx( const success = kernel32.GetQueuedCompletionStatusEx( completion_port, completion_port_entries.ptr, - @intCast(ULONG, completion_port_entries.len), + @as(ULONG, @intCast(completion_port_entries.len)), &num_entries_removed, timeout_ms orelse INFINITE, @intFromBool(alertable), @@ -469,8 +469,8 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo .InternalHigh = 0, .DUMMYUNIONNAME = .{ .DUMMYSTRUCTNAME = .{ - .Offset = @truncate(u32, off), - .OffsetHigh = @truncate(u32, off >> 32), + .Offset = @as(u32, @truncate(off)), + .OffsetHigh = @as(u32, @truncate(off >> 32)), }, }, .hEvent = null, @@ -480,7 +480,7 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo loop.beginOneEvent(); suspend { // TODO handle buffer bigger than DWORD can hold - _ = kernel32.ReadFile(in_hFile, buffer.ptr, @intCast(DWORD, buffer.len), null, &resume_node.base.overlapped); + _ = kernel32.ReadFile(in_hFile, buffer.ptr, @as(DWORD, @intCast(buffer.len)), null, &resume_node.base.overlapped); } var bytes_transferred: DWORD = undefined; if (kernel32.GetOverlappedResult(in_hFile, &resume_node.base.overlapped, &bytes_transferred, FALSE) == 0) { @@ -496,7 +496,7 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo if (offset == null) { // TODO make setting the file position non-blocking const new_off = off + bytes_transferred; - try SetFilePointerEx_CURRENT(in_hFile, @bitCast(i64, new_off)); + try SetFilePointerEx_CURRENT(in_hFile, @as(i64, @bitCast(new_off))); } return @as(usize, bytes_transferred); } else { @@ -510,8 +510,8 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo .InternalHigh = 0, .DUMMYUNIONNAME = .{ .DUMMYSTRUCTNAME = .{ - .Offset = @truncate(u32, off), - .OffsetHigh = @truncate(u32, off >> 32), + .Offset = @as(u32, @truncate(off)), + .OffsetHigh = @as(u32, @truncate(off >> 32)), }, }, .hEvent = null, @@ -563,8 +563,8 @@ pub fn WriteFile( .InternalHigh = 0, .DUMMYUNIONNAME = .{ .DUMMYSTRUCTNAME = .{ - .Offset = @truncate(u32, off), - .OffsetHigh = @truncate(u32, off >> 32), + .Offset = @as(u32, @truncate(off)), + .OffsetHigh = @as(u32, @truncate(off >> 32)), }, }, .hEvent = null, @@ -591,7 +591,7 @@ pub fn WriteFile( if (offset == null) { // TODO make setting the file position non-blocking const new_off = off + bytes_transferred; - try SetFilePointerEx_CURRENT(handle, @bitCast(i64, new_off)); + try SetFilePointerEx_CURRENT(handle, @as(i64, @bitCast(new_off))); } return bytes_transferred; } else { @@ -603,8 +603,8 @@ pub fn WriteFile( .InternalHigh = 0, .DUMMYUNIONNAME = .{ .DUMMYSTRUCTNAME = .{ - .Offset = @truncate(u32, off), - .OffsetHigh = @truncate(u32, off >> 32), + .Offset = @as(u32, @truncate(off)), + .OffsetHigh = @as(u32, @truncate(off >> 32)), }, }, .hEvent = null, @@ -745,19 +745,19 @@ pub fn CreateSymbolicLink( const header_len = @sizeOf(ULONG) + @sizeOf(USHORT) * 2; const symlink_data = SYMLINK_DATA{ .ReparseTag = IO_REPARSE_TAG_SYMLINK, - .ReparseDataLength = @intCast(u16, buf_len - header_len), + .ReparseDataLength = @as(u16, @intCast(buf_len - header_len)), .Reserved = 0, - .SubstituteNameOffset = @intCast(u16, target_path.len * 2), - .SubstituteNameLength = @intCast(u16, target_path.len * 2), + .SubstituteNameOffset = @as(u16, @intCast(target_path.len * 2)), + .SubstituteNameLength = @as(u16, @intCast(target_path.len * 2)), .PrintNameOffset = 0, - .PrintNameLength = @intCast(u16, target_path.len * 2), + .PrintNameLength = @as(u16, @intCast(target_path.len * 2)), .Flags = if (dir) |_| SYMLINK_FLAG_RELATIVE else 0, }; @memcpy(buffer[0..@sizeOf(SYMLINK_DATA)], std.mem.asBytes(&symlink_data)); - @memcpy(buffer[@sizeOf(SYMLINK_DATA)..][0 .. target_path.len * 2], @ptrCast([*]const u8, target_path)); + @memcpy(buffer[@sizeOf(SYMLINK_DATA)..][0 .. target_path.len * 2], @as([*]const u8, @ptrCast(target_path))); const paths_start = @sizeOf(SYMLINK_DATA) + target_path.len * 2; - @memcpy(buffer[paths_start..][0 .. target_path.len * 2], @ptrCast([*]const u8, target_path)); + @memcpy(buffer[paths_start..][0 .. target_path.len * 2], @as([*]const u8, @ptrCast(target_path))); _ = try DeviceIoControl(symlink_handle, FSCTL_SET_REPARSE_POINT, buffer[0..buf_len], null); } @@ -827,10 +827,10 @@ pub fn ReadLink(dir: ?HANDLE, sub_path_w: []const u16, out_buffer: []u8) ReadLin else => |e| return e, }; - const reparse_struct = @ptrCast(*const REPARSE_DATA_BUFFER, @alignCast(@alignOf(REPARSE_DATA_BUFFER), &reparse_buf[0])); + const reparse_struct: *const REPARSE_DATA_BUFFER = @ptrCast(@alignCast(&reparse_buf[0])); switch (reparse_struct.ReparseTag) { IO_REPARSE_TAG_SYMLINK => { - const buf = @ptrCast(*const SYMBOLIC_LINK_REPARSE_BUFFER, @alignCast(@alignOf(SYMBOLIC_LINK_REPARSE_BUFFER), &reparse_struct.DataBuffer[0])); + const buf: *const SYMBOLIC_LINK_REPARSE_BUFFER = @ptrCast(@alignCast(&reparse_struct.DataBuffer[0])); const offset = buf.SubstituteNameOffset >> 1; const len = buf.SubstituteNameLength >> 1; const path_buf = @as([*]const u16, &buf.PathBuffer); @@ -838,7 +838,7 @@ pub fn ReadLink(dir: ?HANDLE, sub_path_w: []const u16, out_buffer: []u8) ReadLin return parseReadlinkPath(path_buf[offset..][0..len], is_relative, out_buffer); }, IO_REPARSE_TAG_MOUNT_POINT => { - const buf = @ptrCast(*const MOUNT_POINT_REPARSE_BUFFER, @alignCast(@alignOf(MOUNT_POINT_REPARSE_BUFFER), &reparse_struct.DataBuffer[0])); + const buf: *const MOUNT_POINT_REPARSE_BUFFER = @ptrCast(@alignCast(&reparse_struct.DataBuffer[0])); const offset = buf.SubstituteNameOffset >> 1; const len = buf.SubstituteNameLength >> 1; const path_buf = @as([*]const u16, &buf.PathBuffer); @@ -884,7 +884,7 @@ pub fn DeleteFile(sub_path_w: []const u16, options: DeleteFileOptions) DeleteFil else FILE_NON_DIRECTORY_FILE | FILE_OPEN_REPARSE_POINT; // would we ever want to delete the target instead? - const path_len_bytes = @intCast(u16, sub_path_w.len * 2); + const path_len_bytes = @as(u16, @intCast(sub_path_w.len * 2)); var nt_name = UNICODE_STRING{ .Length = path_len_bytes, .MaximumLength = path_len_bytes, @@ -1020,7 +1020,7 @@ pub fn SetFilePointerEx_BEGIN(handle: HANDLE, offset: u64) SetFilePointerError!v // "The starting point is zero or the beginning of the file. If [FILE_BEGIN] // is specified, then the liDistanceToMove parameter is interpreted as an unsigned value." // https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-setfilepointerex - const ipos = @bitCast(LARGE_INTEGER, offset); + const ipos = @as(LARGE_INTEGER, @bitCast(offset)); if (kernel32.SetFilePointerEx(handle, ipos, null, FILE_BEGIN) == 0) { switch (kernel32.GetLastError()) { .INVALID_PARAMETER => unreachable, @@ -1064,7 +1064,7 @@ pub fn SetFilePointerEx_CURRENT_get(handle: HANDLE) SetFilePointerError!u64 { } // Based on the docs for FILE_BEGIN, it seems that the returned signed integer // should be interpreted as an unsigned integer. - return @bitCast(u64, result); + return @as(u64, @bitCast(result)); } pub fn QueryObjectName( @@ -1073,7 +1073,7 @@ pub fn QueryObjectName( ) ![]u16 { const out_buffer_aligned = mem.alignInSlice(out_buffer, @alignOf(OBJECT_NAME_INFORMATION)) orelse return error.NameTooLong; - const info = @ptrCast(*OBJECT_NAME_INFORMATION, out_buffer_aligned); + const info = @as(*OBJECT_NAME_INFORMATION, @ptrCast(out_buffer_aligned)); //buffer size is specified in bytes const out_buffer_len = std.math.cast(ULONG, out_buffer_aligned.len * 2) orelse std.math.maxInt(ULONG); //last argument would return the length required for full_buffer, not exposed here @@ -1197,26 +1197,26 @@ pub fn GetFinalPathNameByHandle( }; defer CloseHandle(mgmt_handle); - var input_struct = @ptrCast(*MOUNTMGR_MOUNT_POINT, &input_buf[0]); + var input_struct = @as(*MOUNTMGR_MOUNT_POINT, @ptrCast(&input_buf[0])); input_struct.DeviceNameOffset = @sizeOf(MOUNTMGR_MOUNT_POINT); - input_struct.DeviceNameLength = @intCast(USHORT, volume_name_u16.len * 2); - @memcpy(input_buf[@sizeOf(MOUNTMGR_MOUNT_POINT)..][0 .. volume_name_u16.len * 2], @ptrCast([*]const u8, volume_name_u16.ptr)); + input_struct.DeviceNameLength = @as(USHORT, @intCast(volume_name_u16.len * 2)); + @memcpy(input_buf[@sizeOf(MOUNTMGR_MOUNT_POINT)..][0 .. volume_name_u16.len * 2], @as([*]const u8, @ptrCast(volume_name_u16.ptr))); DeviceIoControl(mgmt_handle, IOCTL_MOUNTMGR_QUERY_POINTS, &input_buf, &output_buf) catch |err| switch (err) { error.AccessDenied => unreachable, else => |e| return e, }; - const mount_points_struct = @ptrCast(*const MOUNTMGR_MOUNT_POINTS, &output_buf[0]); + const mount_points_struct = @as(*const MOUNTMGR_MOUNT_POINTS, @ptrCast(&output_buf[0])); - const mount_points = @ptrCast( + const mount_points = @as( [*]const MOUNTMGR_MOUNT_POINT, - &mount_points_struct.MountPoints[0], + @ptrCast(&mount_points_struct.MountPoints[0]), )[0..mount_points_struct.NumberOfMountPoints]; for (mount_points) |mount_point| { - const symlink = @ptrCast( + const symlink = @as( [*]const u16, - @alignCast(@alignOf(u16), &output_buf[mount_point.SymbolicLinkNameOffset]), + @ptrCast(@alignCast(&output_buf[mount_point.SymbolicLinkNameOffset])), )[0 .. mount_point.SymbolicLinkNameLength / 2]; // Look for `\DosDevices\` prefix. We don't really care if there are more than one symlinks @@ -1282,7 +1282,7 @@ pub fn GetFileSizeEx(hFile: HANDLE) GetFileSizeError!u64 { else => |err| return unexpectedError(err), } } - return @bitCast(u64, file_size); + return @as(u64, @bitCast(file_size)); } pub const GetFileAttributesError = error{ @@ -1313,7 +1313,7 @@ pub fn WSAStartup(majorVersion: u8, minorVersion: u8) !ws2_32.WSADATA { var wsadata: ws2_32.WSADATA = undefined; return switch (ws2_32.WSAStartup((@as(WORD, minorVersion) << 8) | majorVersion, &wsadata)) { 0 => wsadata, - else => |err_int| switch (@enumFromInt(ws2_32.WinsockError, @intCast(u16, err_int))) { + else => |err_int| switch (@as(ws2_32.WinsockError, @enumFromInt(@as(u16, @intCast(err_int))))) { .WSASYSNOTREADY => return error.SystemNotAvailable, .WSAVERNOTSUPPORTED => return error.VersionNotSupported, .WSAEINPROGRESS => return error.BlockingOperationInProgress, @@ -1408,7 +1408,7 @@ pub fn WSASocketW( } pub fn bind(s: ws2_32.SOCKET, name: *const ws2_32.sockaddr, namelen: ws2_32.socklen_t) i32 { - return ws2_32.bind(s, name, @intCast(i32, namelen)); + return ws2_32.bind(s, name, @as(i32, @intCast(namelen))); } pub fn listen(s: ws2_32.SOCKET, backlog: u31) i32 { @@ -1427,15 +1427,15 @@ pub fn closesocket(s: ws2_32.SOCKET) !void { pub fn accept(s: ws2_32.SOCKET, name: ?*ws2_32.sockaddr, namelen: ?*ws2_32.socklen_t) ws2_32.SOCKET { assert((name == null) == (namelen == null)); - return ws2_32.accept(s, name, @ptrCast(?*i32, namelen)); + return ws2_32.accept(s, name, @as(?*i32, @ptrCast(namelen))); } pub fn getsockname(s: ws2_32.SOCKET, name: *ws2_32.sockaddr, namelen: *ws2_32.socklen_t) i32 { - return ws2_32.getsockname(s, name, @ptrCast(*i32, namelen)); + return ws2_32.getsockname(s, name, @as(*i32, @ptrCast(namelen))); } pub fn getpeername(s: ws2_32.SOCKET, name: *ws2_32.sockaddr, namelen: *ws2_32.socklen_t) i32 { - return ws2_32.getpeername(s, name, @ptrCast(*i32, namelen)); + return ws2_32.getpeername(s, name, @as(*i32, @ptrCast(namelen))); } pub fn sendmsg( @@ -1447,28 +1447,28 @@ pub fn sendmsg( if (ws2_32.WSASendMsg(s, msg, flags, &bytes_send, null, null) == ws2_32.SOCKET_ERROR) { return ws2_32.SOCKET_ERROR; } else { - return @as(i32, @intCast(u31, bytes_send)); + return @as(i32, @as(u31, @intCast(bytes_send))); } } pub fn sendto(s: ws2_32.SOCKET, buf: [*]const u8, len: usize, flags: u32, to: ?*const ws2_32.sockaddr, to_len: ws2_32.socklen_t) i32 { - var buffer = ws2_32.WSABUF{ .len = @truncate(u31, len), .buf = @constCast(buf) }; + var buffer = ws2_32.WSABUF{ .len = @as(u31, @truncate(len)), .buf = @constCast(buf) }; var bytes_send: DWORD = undefined; - if (ws2_32.WSASendTo(s, @ptrCast([*]ws2_32.WSABUF, &buffer), 1, &bytes_send, flags, to, @intCast(i32, to_len), null, null) == ws2_32.SOCKET_ERROR) { + if (ws2_32.WSASendTo(s, @as([*]ws2_32.WSABUF, @ptrCast(&buffer)), 1, &bytes_send, flags, to, @as(i32, @intCast(to_len)), null, null) == ws2_32.SOCKET_ERROR) { return ws2_32.SOCKET_ERROR; } else { - return @as(i32, @intCast(u31, bytes_send)); + return @as(i32, @as(u31, @intCast(bytes_send))); } } pub fn recvfrom(s: ws2_32.SOCKET, buf: [*]u8, len: usize, flags: u32, from: ?*ws2_32.sockaddr, from_len: ?*ws2_32.socklen_t) i32 { - var buffer = ws2_32.WSABUF{ .len = @truncate(u31, len), .buf = buf }; + var buffer = ws2_32.WSABUF{ .len = @as(u31, @truncate(len)), .buf = buf }; var bytes_received: DWORD = undefined; var flags_inout = flags; - if (ws2_32.WSARecvFrom(s, @ptrCast([*]ws2_32.WSABUF, &buffer), 1, &bytes_received, &flags_inout, from, @ptrCast(?*i32, from_len), null, null) == ws2_32.SOCKET_ERROR) { + if (ws2_32.WSARecvFrom(s, @as([*]ws2_32.WSABUF, @ptrCast(&buffer)), 1, &bytes_received, &flags_inout, from, @as(?*i32, @ptrCast(from_len)), null, null) == ws2_32.SOCKET_ERROR) { return ws2_32.SOCKET_ERROR; } else { - return @as(i32, @intCast(u31, bytes_received)); + return @as(i32, @as(u31, @intCast(bytes_received))); } } @@ -1489,9 +1489,9 @@ pub fn WSAIoctl( s, dwIoControlCode, if (inBuffer) |i| i.ptr else null, - if (inBuffer) |i| @intCast(DWORD, i.len) else 0, + if (inBuffer) |i| @as(DWORD, @intCast(i.len)) else 0, outBuffer.ptr, - @intCast(DWORD, outBuffer.len), + @as(DWORD, @intCast(outBuffer.len)), &bytes, overlapped, completionRoutine, @@ -1741,7 +1741,7 @@ pub fn QueryPerformanceFrequency() u64 { var result: LARGE_INTEGER = undefined; assert(kernel32.QueryPerformanceFrequency(&result) != 0); // The kernel treats this integer as unsigned. - return @bitCast(u64, result); + return @as(u64, @bitCast(result)); } pub fn QueryPerformanceCounter() u64 { @@ -1750,7 +1750,7 @@ pub fn QueryPerformanceCounter() u64 { var result: LARGE_INTEGER = undefined; assert(kernel32.QueryPerformanceCounter(&result) != 0); // The kernel treats this integer as unsigned. - return @bitCast(u64, result); + return @as(u64, @bitCast(result)); } pub fn InitOnceExecuteOnce(InitOnce: *INIT_ONCE, InitFn: INIT_ONCE_FN, Parameter: ?*anyopaque, Context: ?*anyopaque) void { @@ -1852,7 +1852,7 @@ pub fn teb() *TEB { return switch (native_arch) { .x86 => blk: { if (builtin.zig_backend == .stage2_c) { - break :blk @ptrCast(*TEB, @alignCast(@alignOf(TEB), zig_x86_windows_teb())); + break :blk @ptrCast(@alignCast(zig_x86_windows_teb())); } else { break :blk asm volatile ( \\ movl %%fs:0x18, %[ptr] @@ -1862,7 +1862,7 @@ pub fn teb() *TEB { }, .x86_64 => blk: { if (builtin.zig_backend == .stage2_c) { - break :blk @ptrCast(*TEB, @alignCast(@alignOf(TEB), zig_x86_64_windows_teb())); + break :blk @ptrCast(@alignCast(zig_x86_64_windows_teb())); } else { break :blk asm volatile ( \\ movq %%gs:0x30, %[ptr] @@ -1894,7 +1894,7 @@ pub fn fromSysTime(hns: i64) i128 { pub fn toSysTime(ns: i128) i64 { const hns = @divFloor(ns, 100); - return @intCast(i64, hns) - std.time.epoch.windows * (std.time.ns_per_s / 100); + return @as(i64, @intCast(hns)) - std.time.epoch.windows * (std.time.ns_per_s / 100); } pub fn fileTimeToNanoSeconds(ft: FILETIME) i128 { @@ -1904,22 +1904,22 @@ pub fn fileTimeToNanoSeconds(ft: FILETIME) i128 { /// Converts a number of nanoseconds since the POSIX epoch to a Windows FILETIME. pub fn nanoSecondsToFileTime(ns: i128) FILETIME { - const adjusted = @bitCast(u64, toSysTime(ns)); + const adjusted = @as(u64, @bitCast(toSysTime(ns))); return FILETIME{ - .dwHighDateTime = @truncate(u32, adjusted >> 32), - .dwLowDateTime = @truncate(u32, adjusted), + .dwHighDateTime = @as(u32, @truncate(adjusted >> 32)), + .dwLowDateTime = @as(u32, @truncate(adjusted)), }; } /// Compares two WTF16 strings using RtlEqualUnicodeString pub fn eqlIgnoreCaseWTF16(a: []const u16, b: []const u16) bool { - const a_bytes = @intCast(u16, a.len * 2); + const a_bytes = @as(u16, @intCast(a.len * 2)); const a_string = UNICODE_STRING{ .Length = a_bytes, .MaximumLength = a_bytes, .Buffer = @constCast(a.ptr), }; - const b_bytes = @intCast(u16, b.len * 2); + const b_bytes = @as(u16, @intCast(b.len * 2)); const b_string = UNICODE_STRING{ .Length = b_bytes, .MaximumLength = b_bytes, @@ -2117,7 +2117,7 @@ pub fn wToPrefixedFileW(path: [:0]const u16) !PathSpace { .unc_absolute => nt_prefix.len + 2, else => nt_prefix.len, }; - const buf_len = @intCast(u32, path_space.data.len - path_buf_offset); + const buf_len = @as(u32, @intCast(path_space.data.len - path_buf_offset)); const path_byte_len = ntdll.RtlGetFullPathName_U( path.ptr, buf_len * 2, @@ -2263,7 +2263,7 @@ test getUnprefixedPathType { } fn getFullPathNameW(path: [*:0]const u16, out: []u16) !usize { - const result = kernel32.GetFullPathNameW(path, @intCast(u32, out.len), out.ptr, null); + const result = kernel32.GetFullPathNameW(path, @as(u32, @intCast(out.len)), out.ptr, null); if (result == 0) { switch (kernel32.GetLastError()) { else => |err| return unexpectedError(err), @@ -2284,9 +2284,9 @@ pub fn loadWinsockExtensionFunction(comptime T: type, sock: ws2_32.SOCKET, guid: const rc = ws2_32.WSAIoctl( sock, ws2_32.SIO_GET_EXTENSION_FUNCTION_POINTER, - @ptrCast(*const anyopaque, &guid), + @as(*const anyopaque, @ptrCast(&guid)), @sizeOf(GUID), - @ptrFromInt(?*anyopaque, @intFromPtr(&function)), + @as(?*anyopaque, @ptrFromInt(@intFromPtr(&function))), @sizeOf(T), &num_bytes, null, @@ -2332,7 +2332,7 @@ pub fn unexpectedError(err: Win32Error) std.os.UnexpectedError { } pub fn unexpectedWSAError(err: ws2_32.WinsockError) std.os.UnexpectedError { - return unexpectedError(@enumFromInt(Win32Error, @intFromEnum(err))); + return unexpectedError(@as(Win32Error, @enumFromInt(@intFromEnum(err)))); } /// Call this when you made a windows NtDll call @@ -2530,7 +2530,7 @@ pub fn CTL_CODE(deviceType: u16, function: u12, method: TransferType, access: u2 @intFromEnum(method); } -pub const INVALID_HANDLE_VALUE = @ptrFromInt(HANDLE, maxInt(usize)); +pub const INVALID_HANDLE_VALUE = @as(HANDLE, @ptrFromInt(maxInt(usize))); pub const INVALID_FILE_ATTRIBUTES = @as(DWORD, maxInt(DWORD)); @@ -3119,7 +3119,7 @@ pub const GUID = extern struct { bytes[i] = (try std.fmt.charToDigit(s[hex_offset], 16)) << 4 | try std.fmt.charToDigit(s[hex_offset + 1], 16); } - return @bitCast(GUID, bytes); + return @as(GUID, @bitCast(bytes)); } }; @@ -3150,16 +3150,16 @@ pub const KF_FLAG_SIMPLE_IDLIST = 256; pub const KF_FLAG_ALIAS_ONLY = -2147483648; pub const S_OK = 0; -pub const E_NOTIMPL = @bitCast(c_long, @as(c_ulong, 0x80004001)); -pub const E_NOINTERFACE = @bitCast(c_long, @as(c_ulong, 0x80004002)); -pub const E_POINTER = @bitCast(c_long, @as(c_ulong, 0x80004003)); -pub const E_ABORT = @bitCast(c_long, @as(c_ulong, 0x80004004)); -pub const E_FAIL = @bitCast(c_long, @as(c_ulong, 0x80004005)); -pub const E_UNEXPECTED = @bitCast(c_long, @as(c_ulong, 0x8000FFFF)); -pub const E_ACCESSDENIED = @bitCast(c_long, @as(c_ulong, 0x80070005)); -pub const E_HANDLE = @bitCast(c_long, @as(c_ulong, 0x80070006)); -pub const E_OUTOFMEMORY = @bitCast(c_long, @as(c_ulong, 0x8007000E)); -pub const E_INVALIDARG = @bitCast(c_long, @as(c_ulong, 0x80070057)); +pub const E_NOTIMPL = @as(c_long, @bitCast(@as(c_ulong, 0x80004001))); +pub const E_NOINTERFACE = @as(c_long, @bitCast(@as(c_ulong, 0x80004002))); +pub const E_POINTER = @as(c_long, @bitCast(@as(c_ulong, 0x80004003))); +pub const E_ABORT = @as(c_long, @bitCast(@as(c_ulong, 0x80004004))); +pub const E_FAIL = @as(c_long, @bitCast(@as(c_ulong, 0x80004005))); +pub const E_UNEXPECTED = @as(c_long, @bitCast(@as(c_ulong, 0x8000FFFF))); +pub const E_ACCESSDENIED = @as(c_long, @bitCast(@as(c_ulong, 0x80070005))); +pub const E_HANDLE = @as(c_long, @bitCast(@as(c_ulong, 0x80070006))); +pub const E_OUTOFMEMORY = @as(c_long, @bitCast(@as(c_ulong, 0x8007000E))); +pub const E_INVALIDARG = @as(c_long, @bitCast(@as(c_ulong, 0x80070057))); pub const FILE_FLAG_BACKUP_SEMANTICS = 0x02000000; pub const FILE_FLAG_DELETE_ON_CLOSE = 0x04000000; @@ -3221,7 +3221,7 @@ pub const LSTATUS = LONG; pub const HKEY = *opaque {}; -pub const HKEY_LOCAL_MACHINE: HKEY = @ptrFromInt(HKEY, 0x80000002); +pub const HKEY_LOCAL_MACHINE: HKEY = @as(HKEY, @ptrFromInt(0x80000002)); /// Combines the STANDARD_RIGHTS_REQUIRED, KEY_QUERY_VALUE, KEY_SET_VALUE, KEY_CREATE_SUB_KEY, /// KEY_ENUMERATE_SUB_KEYS, KEY_NOTIFY, and KEY_CREATE_LINK access rights. @@ -4685,7 +4685,7 @@ pub const KUSER_SHARED_DATA = extern struct { /// Read-only user-mode address for the shared data. /// https://www.geoffchappell.com/studies/windows/km/ntoskrnl/inc/api/ntexapi_x/kuser_shared_data/index.htm /// https://msrc-blog.microsoft.com/2022/04/05/randomizing-the-kuser_shared_data-structure-on-windows/ -pub const SharedUserData: *const KUSER_SHARED_DATA = @ptrFromInt(*const KUSER_SHARED_DATA, 0x7FFE0000); +pub const SharedUserData: *const KUSER_SHARED_DATA = @as(*const KUSER_SHARED_DATA, @ptrFromInt(0x7FFE0000)); pub fn IsProcessorFeaturePresent(feature: PF) bool { if (@intFromEnum(feature) >= PROCESSOR_FEATURE_MAX) return false; @@ -4886,7 +4886,7 @@ pub fn WriteProcessMemory(handle: HANDLE, addr: ?LPVOID, buffer: []const u8) Wri switch (ntdll.NtWriteVirtualMemory( handle, addr, - @ptrCast(*const anyopaque, buffer.ptr), + @as(*const anyopaque, @ptrCast(buffer.ptr)), buffer.len, &nwritten, )) { @@ -4919,6 +4919,6 @@ pub fn ProcessBaseAddress(handle: HANDLE) ProcessBaseAddressError!HMODULE { var peb_buf: [@sizeOf(PEB)]u8 align(@alignOf(PEB)) = undefined; const peb_out = try ReadProcessMemory(handle, info.PebBaseAddress, &peb_buf); - const ppeb = @ptrCast(*const PEB, @alignCast(@alignOf(PEB), peb_out.ptr)); + const ppeb: *const PEB = @ptrCast(@alignCast(peb_out.ptr)); return ppeb.ImageBaseAddress; } diff --git a/lib/std/os/windows/user32.zig b/lib/std/os/windows/user32.zig index 0d6fc2c670..8c492cee32 100644 --- a/lib/std/os/windows/user32.zig +++ b/lib/std/os/windows/user32.zig @@ -1275,7 +1275,7 @@ pub const WS_EX_LAYERED = 0x00080000; pub const WS_EX_OVERLAPPEDWINDOW = WS_EX_WINDOWEDGE | WS_EX_CLIENTEDGE; pub const WS_EX_PALETTEWINDOW = WS_EX_WINDOWEDGE | WS_EX_TOOLWINDOW | WS_EX_TOPMOST; -pub const CW_USEDEFAULT = @bitCast(i32, @as(u32, 0x80000000)); +pub const CW_USEDEFAULT = @as(i32, @bitCast(@as(u32, 0x80000000))); pub extern "user32" fn CreateWindowExA(dwExStyle: DWORD, lpClassName: [*:0]const u8, lpWindowName: [*:0]const u8, dwStyle: DWORD, X: i32, Y: i32, nWidth: i32, nHeight: i32, hWindParent: ?HWND, hMenu: ?HMENU, hInstance: HINSTANCE, lpParam: ?LPVOID) callconv(WINAPI) ?HWND; pub fn createWindowExA(dwExStyle: u32, lpClassName: [*:0]const u8, lpWindowName: [*:0]const u8, dwStyle: u32, X: i32, Y: i32, nWidth: i32, nHeight: i32, hWindParent: ?HWND, hMenu: ?HMENU, hInstance: HINSTANCE, lpParam: ?*anyopaque) !HWND { diff --git a/lib/std/os/windows/ws2_32.zig b/lib/std/os/windows/ws2_32.zig index 821b903a34..240c8c849d 100644 --- a/lib/std/os/windows/ws2_32.zig +++ b/lib/std/os/windows/ws2_32.zig @@ -21,7 +21,7 @@ const LPARAM = windows.LPARAM; const FARPROC = windows.FARPROC; pub const SOCKET = *opaque {}; -pub const INVALID_SOCKET = @ptrFromInt(SOCKET, ~@as(usize, 0)); +pub const INVALID_SOCKET = @as(SOCKET, @ptrFromInt(~@as(usize, 0))); pub const GROUP = u32; pub const ADDRESS_FAMILY = u16; diff --git a/lib/std/packed_int_array.zig b/lib/std/packed_int_array.zig index 10d8af0575..cff9eb8cf1 100644 --- a/lib/std/packed_int_array.zig +++ b/lib/std/packed_int_array.zig @@ -73,25 +73,25 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type { const tail_keep_bits = container_bits - (int_bits + head_keep_bits); //read bytes as container - const value_ptr = @ptrCast(*align(1) const Container, &bytes[start_byte]); + const value_ptr = @as(*align(1) const Container, @ptrCast(&bytes[start_byte])); var value = value_ptr.*; if (endian != native_endian) value = @byteSwap(value); switch (endian) { .Big => { - value <<= @intCast(Shift, head_keep_bits); - value >>= @intCast(Shift, head_keep_bits); - value >>= @intCast(Shift, tail_keep_bits); + value <<= @as(Shift, @intCast(head_keep_bits)); + value >>= @as(Shift, @intCast(head_keep_bits)); + value >>= @as(Shift, @intCast(tail_keep_bits)); }, .Little => { - value <<= @intCast(Shift, tail_keep_bits); - value >>= @intCast(Shift, tail_keep_bits); - value >>= @intCast(Shift, head_keep_bits); + value <<= @as(Shift, @intCast(tail_keep_bits)); + value >>= @as(Shift, @intCast(tail_keep_bits)); + value >>= @as(Shift, @intCast(head_keep_bits)); }, } - return @bitCast(Int, @truncate(UnInt, value)); + return @as(Int, @bitCast(@as(UnInt, @truncate(value)))); } /// Sets the integer at `index` to `val` within the packed data beginning @@ -115,21 +115,21 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type { const head_keep_bits = bit_index - (start_byte * 8); const tail_keep_bits = container_bits - (int_bits + head_keep_bits); const keep_shift = switch (endian) { - .Big => @intCast(Shift, tail_keep_bits), - .Little => @intCast(Shift, head_keep_bits), + .Big => @as(Shift, @intCast(tail_keep_bits)), + .Little => @as(Shift, @intCast(head_keep_bits)), }; //position the bits where they need to be in the container - const value = @intCast(Container, @bitCast(UnInt, int)) << keep_shift; + const value = @as(Container, @intCast(@as(UnInt, @bitCast(int)))) << keep_shift; //read existing bytes - const target_ptr = @ptrCast(*align(1) Container, &bytes[start_byte]); + const target_ptr = @as(*align(1) Container, @ptrCast(&bytes[start_byte])); var target = target_ptr.*; if (endian != native_endian) target = @byteSwap(target); //zero the bits we want to replace in the existing bytes - const inv_mask = @intCast(Container, std.math.maxInt(UnInt)) << keep_shift; + const inv_mask = @as(Container, @intCast(std.math.maxInt(UnInt))) << keep_shift; const mask = ~inv_mask; target &= mask; @@ -156,7 +156,7 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type { if (length == 0) return PackedIntSliceEndian(Int, endian).init(new_bytes[0..0], 0); var new_slice = PackedIntSliceEndian(Int, endian).init(new_bytes, length); - new_slice.bit_offset = @intCast(u3, (bit_index - (start_byte * 8))); + new_slice.bit_offset = @as(u3, @intCast((bit_index - (start_byte * 8)))); return new_slice; } @@ -398,7 +398,7 @@ test "PackedIntArray init" { const PackedArray = PackedIntArray(u3, 8); var packed_array = PackedArray.init([_]u3{ 0, 1, 2, 3, 4, 5, 6, 7 }); var i = @as(usize, 0); - while (i < packed_array.len) : (i += 1) try testing.expectEqual(@intCast(u3, i), packed_array.get(i)); + while (i < packed_array.len) : (i += 1) try testing.expectEqual(@as(u3, @intCast(i)), packed_array.get(i)); } test "PackedIntArray initAllTo" { @@ -469,7 +469,7 @@ test "PackedIntSlice of PackedInt(Array/Slice)" { var i = @as(usize, 0); while (i < packed_array.len) : (i += 1) { - packed_array.set(i, @intCast(Int, i % limit)); + packed_array.set(i, @as(Int, @intCast(i % limit))); } //slice of array diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig index 25a6786ec6..4d71ce2103 100644 --- a/lib/std/pdb.zig +++ b/lib/std/pdb.zig @@ -573,7 +573,7 @@ pub const Pdb = struct { if (this_record_len % 4 != 0) { const round_to_next_4 = (this_record_len | 0x3) + 1; const march_forward_bytes = round_to_next_4 - this_record_len; - try stream.seekBy(@intCast(isize, march_forward_bytes)); + try stream.seekBy(@as(isize, @intCast(march_forward_bytes))); this_record_len += march_forward_bytes; } @@ -689,14 +689,14 @@ pub const Pdb = struct { var symbol_i: usize = 0; while (symbol_i != module.symbols.len) { - const prefix = @ptrCast(*align(1) RecordPrefix, &module.symbols[symbol_i]); + const prefix = @as(*align(1) RecordPrefix, @ptrCast(&module.symbols[symbol_i])); if (prefix.RecordLen < 2) return null; switch (prefix.RecordKind) { .S_LPROC32, .S_GPROC32 => { - const proc_sym = @ptrCast(*align(1) ProcSym, &module.symbols[symbol_i + @sizeOf(RecordPrefix)]); + const proc_sym = @as(*align(1) ProcSym, @ptrCast(&module.symbols[symbol_i + @sizeOf(RecordPrefix)])); if (address >= proc_sym.CodeOffset and address < proc_sym.CodeOffset + proc_sym.CodeSize) { - return mem.sliceTo(@ptrCast([*:0]u8, &proc_sym.Name[0]), 0); + return mem.sliceTo(@as([*:0]u8, @ptrCast(&proc_sym.Name[0])), 0); } }, else => {}, @@ -715,7 +715,7 @@ pub const Pdb = struct { var skip_len: usize = undefined; const checksum_offset = module.checksum_offset orelse return error.MissingDebugInfo; while (sect_offset != subsect_info.len) : (sect_offset += skip_len) { - const subsect_hdr = @ptrCast(*align(1) DebugSubsectionHeader, &subsect_info[sect_offset]); + const subsect_hdr = @as(*align(1) DebugSubsectionHeader, @ptrCast(&subsect_info[sect_offset])); skip_len = subsect_hdr.Length; sect_offset += @sizeOf(DebugSubsectionHeader); @@ -723,7 +723,7 @@ pub const Pdb = struct { .Lines => { var line_index = sect_offset; - const line_hdr = @ptrCast(*align(1) LineFragmentHeader, &subsect_info[line_index]); + const line_hdr = @as(*align(1) LineFragmentHeader, @ptrCast(&subsect_info[line_index])); if (line_hdr.RelocSegment == 0) return error.MissingDebugInfo; line_index += @sizeOf(LineFragmentHeader); @@ -737,7 +737,7 @@ pub const Pdb = struct { const subsection_end_index = sect_offset + subsect_hdr.Length; while (line_index < subsection_end_index) { - const block_hdr = @ptrCast(*align(1) LineBlockFragmentHeader, &subsect_info[line_index]); + const block_hdr = @as(*align(1) LineBlockFragmentHeader, @ptrCast(&subsect_info[line_index])); line_index += @sizeOf(LineBlockFragmentHeader); const start_line_index = line_index; @@ -749,7 +749,7 @@ pub const Pdb = struct { // This is done with a simple linear search. var line_i: u32 = 0; while (line_i < block_hdr.NumLines) : (line_i += 1) { - const line_num_entry = @ptrCast(*align(1) LineNumberEntry, &subsect_info[line_index]); + const line_num_entry = @as(*align(1) LineNumberEntry, @ptrCast(&subsect_info[line_index])); line_index += @sizeOf(LineNumberEntry); const vaddr_start = frag_vaddr_start + line_num_entry.Offset; @@ -761,7 +761,7 @@ pub const Pdb = struct { // line_i == 0 would mean that no matching LineNumberEntry was found. if (line_i > 0) { const subsect_index = checksum_offset + block_hdr.NameIndex; - const chksum_hdr = @ptrCast(*align(1) FileChecksumEntryHeader, &module.subsect_info[subsect_index]); + const chksum_hdr = @as(*align(1) FileChecksumEntryHeader, @ptrCast(&module.subsect_info[subsect_index])); const strtab_offset = @sizeOf(PDBStringTableHeader) + chksum_hdr.FileNameOffset; try self.string_table.?.seekTo(strtab_offset); const source_file_name = try self.string_table.?.reader().readUntilDelimiterAlloc(self.allocator, 0, 1024); @@ -771,13 +771,13 @@ pub const Pdb = struct { const column = if (has_column) blk: { const start_col_index = start_line_index + @sizeOf(LineNumberEntry) * block_hdr.NumLines; const col_index = start_col_index + @sizeOf(ColumnNumberEntry) * line_entry_idx; - const col_num_entry = @ptrCast(*align(1) ColumnNumberEntry, &subsect_info[col_index]); + const col_num_entry = @as(*align(1) ColumnNumberEntry, @ptrCast(&subsect_info[col_index])); break :blk col_num_entry.StartColumn; } else 0; const found_line_index = start_line_index + line_entry_idx * @sizeOf(LineNumberEntry); - const line_num_entry = @ptrCast(*align(1) LineNumberEntry, &subsect_info[found_line_index]); - const flags = @ptrCast(*LineNumberEntry.Flags, &line_num_entry.Flags); + const line_num_entry = @as(*align(1) LineNumberEntry, @ptrCast(&subsect_info[found_line_index])); + const flags = @as(*LineNumberEntry.Flags, @ptrCast(&line_num_entry.Flags)); return debug.LineInfo{ .file_name = source_file_name, @@ -836,7 +836,7 @@ pub const Pdb = struct { var sect_offset: usize = 0; var skip_len: usize = undefined; while (sect_offset != mod.subsect_info.len) : (sect_offset += skip_len) { - const subsect_hdr = @ptrCast(*align(1) DebugSubsectionHeader, &mod.subsect_info[sect_offset]); + const subsect_hdr = @as(*align(1) DebugSubsectionHeader, @ptrCast(&mod.subsect_info[sect_offset])); skip_len = subsect_hdr.Length; sect_offset += @sizeOf(DebugSubsectionHeader); @@ -1038,7 +1038,7 @@ const MsfStream = struct { } fn read(self: *MsfStream, buffer: []u8) !usize { - var block_id = @intCast(usize, self.pos / self.block_size); + var block_id = @as(usize, @intCast(self.pos / self.block_size)); if (block_id >= self.blocks.len) return 0; // End of Stream var block = self.blocks[block_id]; var offset = self.pos % self.block_size; @@ -1069,7 +1069,7 @@ const MsfStream = struct { } pub fn seekBy(self: *MsfStream, len: i64) !void { - self.pos = @intCast(u64, @intCast(i64, self.pos) + len); + self.pos = @as(u64, @intCast(@as(i64, @intCast(self.pos)) + len)); if (self.pos >= self.blocks.len * self.block_size) return error.EOF; } diff --git a/lib/std/process.zig b/lib/std/process.zig index 05066fa436..28d4bfcb25 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -68,7 +68,7 @@ pub const EnvMap = struct { pub const EnvNameHashContext = struct { fn upcase(c: u21) u21 { if (c <= std.math.maxInt(u16)) - return std.os.windows.ntdll.RtlUpcaseUnicodeChar(@intCast(u16, c)); + return std.os.windows.ntdll.RtlUpcaseUnicodeChar(@as(u16, @intCast(c))); return c; } @@ -80,9 +80,9 @@ pub const EnvMap = struct { while (it.nextCodepoint()) |cp| { const cp_upper = upcase(cp); h.update(&[_]u8{ - @intCast(u8, (cp_upper >> 16) & 0xff), - @intCast(u8, (cp_upper >> 8) & 0xff), - @intCast(u8, (cp_upper >> 0) & 0xff), + @as(u8, @intCast((cp_upper >> 16) & 0xff)), + @as(u8, @intCast((cp_upper >> 8) & 0xff)), + @as(u8, @intCast((cp_upper >> 0) & 0xff)), }); } return h.final(); @@ -872,8 +872,8 @@ pub fn argsFree(allocator: Allocator, args_alloc: []const [:0]u8) void { for (args_alloc) |arg| { total_bytes += @sizeOf([]u8) + arg.len + 1; } - const unaligned_allocated_buf = @ptrCast([*]const u8, args_alloc.ptr)[0..total_bytes]; - const aligned_allocated_buf = @alignCast(@alignOf([]u8), unaligned_allocated_buf); + const unaligned_allocated_buf = @as([*]const u8, @ptrCast(args_alloc.ptr))[0..total_bytes]; + const aligned_allocated_buf: []align(@alignOf([]u8)) const u8 = @alignCast(unaligned_allocated_buf); return allocator.free(aligned_allocated_buf); } @@ -1143,7 +1143,7 @@ pub fn execve( } else if (builtin.output_mode == .Exe) { // Then we have Zig start code and this works. // TODO type-safety for null-termination of `os.environ`. - break :m @ptrCast([*:null]const ?[*:0]const u8, os.environ.ptr); + break :m @as([*:null]const ?[*:0]const u8, @ptrCast(os.environ.ptr)); } else { // TODO come up with a solution for this. @compileError("missing std lib enhancement: std.process.execv implementation has no way to collect the environment variables to forward to the child process"); @@ -1175,7 +1175,7 @@ pub fn totalSystemMemory() TotalSystemMemoryError!usize { error.NameTooLong, error.UnknownName => unreachable, else => return error.UnknownTotalSystemMemory, }; - return @intCast(usize, physmem); + return @as(usize, @intCast(physmem)); }, .openbsd => { const mib: [2]c_int = [_]c_int{ @@ -1192,7 +1192,7 @@ pub fn totalSystemMemory() TotalSystemMemoryError!usize { else => return error.UnknownTotalSystemMemory, }; assert(physmem >= 0); - return @bitCast(usize, physmem); + return @as(usize, @bitCast(physmem)); }, .windows => { var sbi: std.os.windows.SYSTEM_BASIC_INFORMATION = undefined; diff --git a/lib/std/rand.zig b/lib/std/rand.zig index f07562c911..84dc9d2daf 100644 --- a/lib/std/rand.zig +++ b/lib/std/rand.zig @@ -41,8 +41,7 @@ pub const Random = struct { assert(@typeInfo(@typeInfo(Ptr).Pointer.child) == .Struct); // Must point to a struct const gen = struct { fn fill(ptr: *anyopaque, buf: []u8) void { - const alignment = @typeInfo(Ptr).Pointer.alignment; - const self = @ptrCast(Ptr, @alignCast(alignment, ptr)); + const self: Ptr = @ptrCast(@alignCast(ptr)); fillFn(self, buf); } }; @@ -97,7 +96,7 @@ pub const Random = struct { r.uintLessThan(Index, values.len); const MinInt = MinArrayIndex(Index); - return values[@intCast(MinInt, index)]; + return values[@as(MinInt, @intCast(index))]; } /// Returns a random int `i` such that `minInt(T) <= i <= maxInt(T)`. @@ -114,8 +113,8 @@ pub const Random = struct { // TODO: endian portability is pointless if the underlying prng isn't endian portable. // TODO: document the endian portability of this library. const byte_aligned_result = mem.readIntSliceLittle(ByteAlignedT, &rand_bytes); - const unsigned_result = @truncate(UnsignedT, byte_aligned_result); - return @bitCast(T, unsigned_result); + const unsigned_result = @as(UnsignedT, @truncate(byte_aligned_result)); + return @as(T, @bitCast(unsigned_result)); } /// Constant-time implementation off `uintLessThan`. @@ -126,9 +125,9 @@ pub const Random = struct { comptime assert(bits <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation! assert(0 < less_than); if (bits <= 32) { - return @intCast(T, limitRangeBiased(u32, r.int(u32), less_than)); + return @as(T, @intCast(limitRangeBiased(u32, r.int(u32), less_than))); } else { - return @intCast(T, limitRangeBiased(u64, r.int(u64), less_than)); + return @as(T, @intCast(limitRangeBiased(u64, r.int(u64), less_than))); } } @@ -156,7 +155,7 @@ pub const Random = struct { // "Lemire's (with an extra tweak from me)" var x: Small = r.int(Small); var m: Large = @as(Large, x) * @as(Large, less_than); - var l: Small = @truncate(Small, m); + var l: Small = @as(Small, @truncate(m)); if (l < less_than) { var t: Small = -%less_than; @@ -169,10 +168,10 @@ pub const Random = struct { while (l < t) { x = r.int(Small); m = @as(Large, x) * @as(Large, less_than); - l = @truncate(Small, m); + l = @as(Small, @truncate(m)); } } - return @intCast(T, m >> small_bits); + return @as(T, @intCast(m >> small_bits)); } /// Constant-time implementation off `uintAtMost`. @@ -206,10 +205,10 @@ pub const Random = struct { if (info.signedness == .signed) { // Two's complement makes this math pretty easy. const UnsignedT = std.meta.Int(.unsigned, info.bits); - const lo = @bitCast(UnsignedT, at_least); - const hi = @bitCast(UnsignedT, less_than); + const lo = @as(UnsignedT, @bitCast(at_least)); + const hi = @as(UnsignedT, @bitCast(less_than)); const result = lo +% r.uintLessThanBiased(UnsignedT, hi -% lo); - return @bitCast(T, result); + return @as(T, @bitCast(result)); } else { // The signed implementation would work fine, but we can use stricter arithmetic operators here. return at_least + r.uintLessThanBiased(T, less_than - at_least); @@ -225,10 +224,10 @@ pub const Random = struct { if (info.signedness == .signed) { // Two's complement makes this math pretty easy. const UnsignedT = std.meta.Int(.unsigned, info.bits); - const lo = @bitCast(UnsignedT, at_least); - const hi = @bitCast(UnsignedT, less_than); + const lo = @as(UnsignedT, @bitCast(at_least)); + const hi = @as(UnsignedT, @bitCast(less_than)); const result = lo +% r.uintLessThan(UnsignedT, hi -% lo); - return @bitCast(T, result); + return @as(T, @bitCast(result)); } else { // The signed implementation would work fine, but we can use stricter arithmetic operators here. return at_least + r.uintLessThan(T, less_than - at_least); @@ -243,10 +242,10 @@ pub const Random = struct { if (info.signedness == .signed) { // Two's complement makes this math pretty easy. const UnsignedT = std.meta.Int(.unsigned, info.bits); - const lo = @bitCast(UnsignedT, at_least); - const hi = @bitCast(UnsignedT, at_most); + const lo = @as(UnsignedT, @bitCast(at_least)); + const hi = @as(UnsignedT, @bitCast(at_most)); const result = lo +% r.uintAtMostBiased(UnsignedT, hi -% lo); - return @bitCast(T, result); + return @as(T, @bitCast(result)); } else { // The signed implementation would work fine, but we can use stricter arithmetic operators here. return at_least + r.uintAtMostBiased(T, at_most - at_least); @@ -262,10 +261,10 @@ pub const Random = struct { if (info.signedness == .signed) { // Two's complement makes this math pretty easy. const UnsignedT = std.meta.Int(.unsigned, info.bits); - const lo = @bitCast(UnsignedT, at_least); - const hi = @bitCast(UnsignedT, at_most); + const lo = @as(UnsignedT, @bitCast(at_least)); + const hi = @as(UnsignedT, @bitCast(at_most)); const result = lo +% r.uintAtMost(UnsignedT, hi -% lo); - return @bitCast(T, result); + return @as(T, @bitCast(result)); } else { // The signed implementation would work fine, but we can use stricter arithmetic operators here. return at_least + r.uintAtMost(T, at_most - at_least); @@ -294,9 +293,9 @@ pub const Random = struct { rand_lz += @clz(r.int(u32) | 0x7FF); } } - const mantissa = @truncate(u23, rand); + const mantissa = @as(u23, @truncate(rand)); const exponent = @as(u32, 126 - rand_lz) << 23; - return @bitCast(f32, exponent | mantissa); + return @as(f32, @bitCast(exponent | mantissa)); }, f64 => { // Use 52 random bits for the mantissa, and the rest for the exponent. @@ -321,7 +320,7 @@ pub const Random = struct { } const mantissa = rand & 0xFFFFFFFFFFFFF; const exponent = (1022 - rand_lz) << 52; - return @bitCast(f64, exponent | mantissa); + return @as(f64, @bitCast(exponent | mantissa)); }, else => @compileError("unknown floating point type"), } @@ -333,7 +332,7 @@ pub const Random = struct { pub fn floatNorm(r: Random, comptime T: type) T { const value = ziggurat.next_f64(r, ziggurat.NormDist); switch (T) { - f32 => return @floatCast(f32, value), + f32 => return @as(f32, @floatCast(value)), f64 => return value, else => @compileError("unknown floating point type"), } @@ -345,7 +344,7 @@ pub const Random = struct { pub fn floatExp(r: Random, comptime T: type) T { const value = ziggurat.next_f64(r, ziggurat.ExpDist); switch (T) { - f32 => return @floatCast(f32, value), + f32 => return @as(f32, @floatCast(value)), f64 => return value, else => @compileError("unknown floating point type"), } @@ -379,10 +378,10 @@ pub const Random = struct { } // `i <= j < max <= maxInt(MinInt)` - const max = @intCast(MinInt, buf.len); + const max = @as(MinInt, @intCast(buf.len)); var i: MinInt = 0; while (i < max - 1) : (i += 1) { - const j = @intCast(MinInt, r.intRangeLessThan(Index, i, max)); + const j = @as(MinInt, @intCast(r.intRangeLessThan(Index, i, max))); mem.swap(T, &buf[i], &buf[j]); } } @@ -445,7 +444,7 @@ pub fn limitRangeBiased(comptime T: type, random_int: T, less_than: T) T { // http://www.pcg-random.org/posts/bounded-rands.html // "Integer Multiplication (Biased)" var m: T2 = @as(T2, random_int) * @as(T2, less_than); - return @intCast(T, m >> bits); + return @as(T, @intCast(m >> bits)); } // Generator to extend 64-bit seed values into longer sequences. diff --git a/lib/std/rand/Isaac64.zig b/lib/std/rand/Isaac64.zig index 8c6205e1cd..785c551dfd 100644 --- a/lib/std/rand/Isaac64.zig +++ b/lib/std/rand/Isaac64.zig @@ -38,10 +38,10 @@ fn step(self: *Isaac64, mix: u64, base: usize, comptime m1: usize, comptime m2: const x = self.m[base + m1]; self.a = mix +% self.m[base + m2]; - const y = self.a +% self.b +% self.m[@intCast(usize, (x >> 3) % self.m.len)]; + const y = self.a +% self.b +% self.m[@as(usize, @intCast((x >> 3) % self.m.len))]; self.m[base + m1] = y; - self.b = x +% self.m[@intCast(usize, (y >> 11) % self.m.len)]; + self.b = x +% self.m[@as(usize, @intCast((y >> 11) % self.m.len))]; self.r[self.r.len - 1 - base - m1] = self.b; } @@ -159,7 +159,7 @@ pub fn fill(self: *Isaac64, buf: []u8) void { var n = self.next(); comptime var j: usize = 0; inline while (j < 8) : (j += 1) { - buf[i + j] = @truncate(u8, n); + buf[i + j] = @as(u8, @truncate(n)); n >>= 8; } } @@ -168,7 +168,7 @@ pub fn fill(self: *Isaac64, buf: []u8) void { if (i != buf.len) { var n = self.next(); while (i < buf.len) : (i += 1) { - buf[i] = @truncate(u8, n); + buf[i] = @as(u8, @truncate(n)); n >>= 8; } } diff --git a/lib/std/rand/Pcg.zig b/lib/std/rand/Pcg.zig index 951713cc40..ceeadeab5c 100644 --- a/lib/std/rand/Pcg.zig +++ b/lib/std/rand/Pcg.zig @@ -29,10 +29,10 @@ fn next(self: *Pcg) u32 { const l = self.s; self.s = l *% default_multiplier +% (self.i | 1); - const xor_s = @truncate(u32, ((l >> 18) ^ l) >> 27); - const rot = @intCast(u32, l >> 59); + const xor_s = @as(u32, @truncate(((l >> 18) ^ l) >> 27)); + const rot = @as(u32, @intCast(l >> 59)); - return (xor_s >> @intCast(u5, rot)) | (xor_s << @intCast(u5, (0 -% rot) & 31)); + return (xor_s >> @as(u5, @intCast(rot))) | (xor_s << @as(u5, @intCast((0 -% rot) & 31))); } fn seed(self: *Pcg, init_s: u64) void { @@ -58,7 +58,7 @@ pub fn fill(self: *Pcg, buf: []u8) void { var n = self.next(); comptime var j: usize = 0; inline while (j < 4) : (j += 1) { - buf[i + j] = @truncate(u8, n); + buf[i + j] = @as(u8, @truncate(n)); n >>= 8; } } @@ -67,7 +67,7 @@ pub fn fill(self: *Pcg, buf: []u8) void { if (i != buf.len) { var n = self.next(); while (i < buf.len) : (i += 1) { - buf[i] = @truncate(u8, n); + buf[i] = @as(u8, @truncate(n)); n >>= 8; } } diff --git a/lib/std/rand/RomuTrio.zig b/lib/std/rand/RomuTrio.zig index ff7b4deac1..4ce2b7af01 100644 --- a/lib/std/rand/RomuTrio.zig +++ b/lib/std/rand/RomuTrio.zig @@ -34,7 +34,7 @@ fn next(self: *RomuTrio) u64 { } pub fn seedWithBuf(self: *RomuTrio, buf: [24]u8) void { - const seed_buf = @bitCast([3]u64, buf); + const seed_buf = @as([3]u64, @bitCast(buf)); self.x_state = seed_buf[0]; self.y_state = seed_buf[1]; self.z_state = seed_buf[2]; @@ -58,7 +58,7 @@ pub fn fill(self: *RomuTrio, buf: []u8) void { var n = self.next(); comptime var j: usize = 0; inline while (j < 8) : (j += 1) { - buf[i + j] = @truncate(u8, n); + buf[i + j] = @as(u8, @truncate(n)); n >>= 8; } } @@ -67,7 +67,7 @@ pub fn fill(self: *RomuTrio, buf: []u8) void { if (i != buf.len) { var n = self.next(); while (i < buf.len) : (i += 1) { - buf[i] = @truncate(u8, n); + buf[i] = @as(u8, @truncate(n)); n >>= 8; } } @@ -122,7 +122,7 @@ test "RomuTrio fill" { } test "RomuTrio buf seeding test" { - const buf0 = @bitCast([24]u8, [3]u64{ 16294208416658607535, 13964609475759908645, 4703697494102998476 }); + const buf0 = @as([24]u8, @bitCast([3]u64{ 16294208416658607535, 13964609475759908645, 4703697494102998476 })); const resulting_state = .{ .x = 16294208416658607535, .y = 13964609475759908645, .z = 4703697494102998476 }; var r = RomuTrio.init(0); r.seedWithBuf(buf0); diff --git a/lib/std/rand/Sfc64.zig b/lib/std/rand/Sfc64.zig index a5e6920df7..af439b115b 100644 --- a/lib/std/rand/Sfc64.zig +++ b/lib/std/rand/Sfc64.zig @@ -56,7 +56,7 @@ pub fn fill(self: *Sfc64, buf: []u8) void { var n = self.next(); comptime var j: usize = 0; inline while (j < 8) : (j += 1) { - buf[i + j] = @truncate(u8, n); + buf[i + j] = @as(u8, @truncate(n)); n >>= 8; } } @@ -65,7 +65,7 @@ pub fn fill(self: *Sfc64, buf: []u8) void { if (i != buf.len) { var n = self.next(); while (i < buf.len) : (i += 1) { - buf[i] = @truncate(u8, n); + buf[i] = @as(u8, @truncate(n)); n >>= 8; } } diff --git a/lib/std/rand/Xoroshiro128.zig b/lib/std/rand/Xoroshiro128.zig index 6ddd2eb89e..56c4980e6d 100644 --- a/lib/std/rand/Xoroshiro128.zig +++ b/lib/std/rand/Xoroshiro128.zig @@ -45,7 +45,7 @@ pub fn jump(self: *Xoroshiro128) void { inline for (table) |entry| { var b: usize = 0; while (b < 64) : (b += 1) { - if ((entry & (@as(u64, 1) << @intCast(u6, b))) != 0) { + if ((entry & (@as(u64, 1) << @as(u6, @intCast(b)))) != 0) { s0 ^= self.s[0]; s1 ^= self.s[1]; } @@ -74,7 +74,7 @@ pub fn fill(self: *Xoroshiro128, buf: []u8) void { var n = self.next(); comptime var j: usize = 0; inline while (j < 8) : (j += 1) { - buf[i + j] = @truncate(u8, n); + buf[i + j] = @as(u8, @truncate(n)); n >>= 8; } } @@ -83,7 +83,7 @@ pub fn fill(self: *Xoroshiro128, buf: []u8) void { if (i != buf.len) { var n = self.next(); while (i < buf.len) : (i += 1) { - buf[i] = @truncate(u8, n); + buf[i] = @as(u8, @truncate(n)); n >>= 8; } } diff --git a/lib/std/rand/Xoshiro256.zig b/lib/std/rand/Xoshiro256.zig index 35af701ea1..c72d9ee1a2 100644 --- a/lib/std/rand/Xoshiro256.zig +++ b/lib/std/rand/Xoshiro256.zig @@ -46,13 +46,13 @@ pub fn jump(self: *Xoshiro256) void { var table: u256 = 0x39abdc4529b1661ca9582618e03fc9aad5a61266f0c9392c180ec6d33cfd0aba; while (table != 0) : (table >>= 1) { - if (@truncate(u1, table) != 0) { - s ^= @bitCast(u256, self.s); + if (@as(u1, @truncate(table)) != 0) { + s ^= @as(u256, @bitCast(self.s)); } _ = self.next(); } - self.s = @bitCast([4]u64, s); + self.s = @as([4]u64, @bitCast(s)); } pub fn seed(self: *Xoshiro256, init_s: u64) void { @@ -74,7 +74,7 @@ pub fn fill(self: *Xoshiro256, buf: []u8) void { var n = self.next(); comptime var j: usize = 0; inline while (j < 8) : (j += 1) { - buf[i + j] = @truncate(u8, n); + buf[i + j] = @as(u8, @truncate(n)); n >>= 8; } } @@ -83,7 +83,7 @@ pub fn fill(self: *Xoshiro256, buf: []u8) void { if (i != buf.len) { var n = self.next(); while (i < buf.len) : (i += 1) { - buf[i] = @truncate(u8, n); + buf[i] = @as(u8, @truncate(n)); n >>= 8; } } diff --git a/lib/std/rand/benchmark.zig b/lib/std/rand/benchmark.zig index ea3de9c70d..530556517c 100644 --- a/lib/std/rand/benchmark.zig +++ b/lib/std/rand/benchmark.zig @@ -91,8 +91,8 @@ pub fn benchmark(comptime H: anytype, bytes: usize, comptime block_size: usize) } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, @floatFromInt(f64, bytes) / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(@as(f64, @floatFromInt(bytes)) / elapsed_s)); std.debug.assert(rng.random().int(u64) != 0); diff --git a/lib/std/rand/test.zig b/lib/std/rand/test.zig index 6cc6891c5a..551e47f8ff 100644 --- a/lib/std/rand/test.zig +++ b/lib/std/rand/test.zig @@ -332,13 +332,13 @@ test "Random float chi-square goodness of fit" { while (i < num_numbers) : (i += 1) { const rand_f32 = random.float(f32); const rand_f64 = random.float(f64); - var f32_put = try f32_hist.getOrPut(@intFromFloat(u32, rand_f32 * @floatFromInt(f32, num_buckets))); + var f32_put = try f32_hist.getOrPut(@as(u32, @intFromFloat(rand_f32 * @as(f32, @floatFromInt(num_buckets))))); if (f32_put.found_existing) { f32_put.value_ptr.* += 1; } else { f32_put.value_ptr.* = 1; } - var f64_put = try f64_hist.getOrPut(@intFromFloat(u32, rand_f64 * @floatFromInt(f64, num_buckets))); + var f64_put = try f64_hist.getOrPut(@as(u32, @intFromFloat(rand_f64 * @as(f64, @floatFromInt(num_buckets))))); if (f64_put.found_existing) { f64_put.value_ptr.* += 1; } else { @@ -352,8 +352,8 @@ test "Random float chi-square goodness of fit" { { var j: u32 = 0; while (j < num_buckets) : (j += 1) { - const count = @floatFromInt(f64, (if (f32_hist.get(j)) |v| v else 0)); - const expected = @floatFromInt(f64, num_numbers) / @floatFromInt(f64, num_buckets); + const count = @as(f64, @floatFromInt((if (f32_hist.get(j)) |v| v else 0))); + const expected = @as(f64, @floatFromInt(num_numbers)) / @as(f64, @floatFromInt(num_buckets)); const delta = count - expected; const variance = (delta * delta) / expected; f32_total_variance += variance; @@ -363,8 +363,8 @@ test "Random float chi-square goodness of fit" { { var j: u64 = 0; while (j < num_buckets) : (j += 1) { - const count = @floatFromInt(f64, (if (f64_hist.get(j)) |v| v else 0)); - const expected = @floatFromInt(f64, num_numbers) / @floatFromInt(f64, num_buckets); + const count = @as(f64, @floatFromInt((if (f64_hist.get(j)) |v| v else 0))); + const expected = @as(f64, @floatFromInt(num_numbers)) / @as(f64, @floatFromInt(num_buckets)); const delta = count - expected; const variance = (delta * delta) / expected; f64_total_variance += variance; @@ -421,13 +421,13 @@ fn testRange(r: Random, start: i8, end: i8) !void { try testRangeBias(r, start, end, false); } fn testRangeBias(r: Random, start: i8, end: i8, biased: bool) !void { - const count = @intCast(usize, @as(i32, end) - @as(i32, start)); + const count = @as(usize, @intCast(@as(i32, end) - @as(i32, start))); var values_buffer = [_]bool{false} ** 0x100; const values = values_buffer[0..count]; var i: usize = 0; while (i < count) { const value: i32 = if (biased) r.intRangeLessThanBiased(i8, start, end) else r.intRangeLessThan(i8, start, end); - const index = @intCast(usize, value - start); + const index = @as(usize, @intCast(value - start)); if (!values[index]) { i += 1; values[index] = true; diff --git a/lib/std/rand/ziggurat.zig b/lib/std/rand/ziggurat.zig index afe00a1348..09d695b88d 100644 --- a/lib/std/rand/ziggurat.zig +++ b/lib/std/rand/ziggurat.zig @@ -18,17 +18,17 @@ pub fn next_f64(random: Random, comptime tables: ZigTable) f64 { // We manually construct a float from parts as we can avoid an extra random lookup here by // using the unused exponent for the lookup table entry. const bits = random.int(u64); - const i = @as(usize, @truncate(u8, bits)); + const i = @as(usize, @as(u8, @truncate(bits))); const u = blk: { if (tables.is_symmetric) { // Generate a value in the range [2, 4) and scale into [-1, 1) const repr = ((0x3ff + 1) << 52) | (bits >> 12); - break :blk @bitCast(f64, repr) - 3.0; + break :blk @as(f64, @bitCast(repr)) - 3.0; } else { // Generate a value in the range [1, 2) and scale into (0, 1) const repr = (0x3ff << 52) | (bits >> 12); - break :blk @bitCast(f64, repr) - (1.0 - math.floatEps(f64) / 2.0); + break :blk @as(f64, @bitCast(repr)) - (1.0 - math.floatEps(f64) / 2.0); } }; diff --git a/lib/std/segmented_list.zig b/lib/std/segmented_list.zig index 172fe4e7c3..1c9cffa766 100644 --- a/lib/std/segmented_list.zig +++ b/lib/std/segmented_list.zig @@ -107,7 +107,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type } pub fn deinit(self: *Self, allocator: Allocator) void { - self.freeShelves(allocator, @intCast(ShelfIndex, self.dynamic_segments.len), 0); + self.freeShelves(allocator, @as(ShelfIndex, @intCast(self.dynamic_segments.len)), 0); allocator.free(self.dynamic_segments); self.* = undefined; } @@ -171,7 +171,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type /// TODO update this and related methods to match the conventions set by ArrayList pub fn setCapacity(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void { if (prealloc_item_count != 0) { - if (new_capacity <= @as(usize, 1) << (prealloc_exp + @intCast(ShelfIndex, self.dynamic_segments.len))) { + if (new_capacity <= @as(usize, 1) << (prealloc_exp + @as(ShelfIndex, @intCast(self.dynamic_segments.len)))) { return self.shrinkCapacity(allocator, new_capacity); } } @@ -181,7 +181,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type /// Only grows capacity, or retains current capacity. pub fn growCapacity(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void { const new_cap_shelf_count = shelfCount(new_capacity); - const old_shelf_count = @intCast(ShelfIndex, self.dynamic_segments.len); + const old_shelf_count = @as(ShelfIndex, @intCast(self.dynamic_segments.len)); if (new_cap_shelf_count <= old_shelf_count) return; const new_dynamic_segments = try allocator.alloc([*]T, new_cap_shelf_count); @@ -206,7 +206,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type /// It may fail to reduce the capacity in which case the capacity will remain unchanged. pub fn shrinkCapacity(self: *Self, allocator: Allocator, new_capacity: usize) void { if (new_capacity <= prealloc_item_count) { - const len = @intCast(ShelfIndex, self.dynamic_segments.len); + const len = @as(ShelfIndex, @intCast(self.dynamic_segments.len)); self.freeShelves(allocator, len, 0); allocator.free(self.dynamic_segments); self.dynamic_segments = &[_][*]T{}; @@ -214,7 +214,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type } const new_cap_shelf_count = shelfCount(new_capacity); - const old_shelf_count = @intCast(ShelfIndex, self.dynamic_segments.len); + const old_shelf_count = @as(ShelfIndex, @intCast(self.dynamic_segments.len)); assert(new_cap_shelf_count <= old_shelf_count); if (new_cap_shelf_count == old_shelf_count) return; @@ -424,7 +424,7 @@ fn testSegmentedList(comptime prealloc: usize) !void { { var i: usize = 0; while (i < 100) : (i += 1) { - try list.append(testing.allocator, @intCast(i32, i + 1)); + try list.append(testing.allocator, @as(i32, @intCast(i + 1))); try testing.expect(list.len == i + 1); } } @@ -432,7 +432,7 @@ fn testSegmentedList(comptime prealloc: usize) !void { { var i: usize = 0; while (i < 100) : (i += 1) { - try testing.expect(list.at(i).* == @intCast(i32, i + 1)); + try testing.expect(list.at(i).* == @as(i32, @intCast(i + 1))); } } @@ -492,7 +492,7 @@ fn testSegmentedList(comptime prealloc: usize) !void { var i: i32 = 0; while (i < 100) : (i += 1) { try list.append(testing.allocator, i + 1); - control[@intCast(usize, i)] = i + 1; + control[@as(usize, @intCast(i))] = i + 1; } @memset(dest[0..], 0); diff --git a/lib/std/simd.zig b/lib/std/simd.zig index 78d24a80bf..b3a50168ff 100644 --- a/lib/std/simd.zig +++ b/lib/std/simd.zig @@ -93,8 +93,8 @@ pub inline fn iota(comptime T: type, comptime len: usize) @Vector(len, T) { var out: [len]T = undefined; for (&out, 0..) |*element, i| { element.* = switch (@typeInfo(T)) { - .Int => @intCast(T, i), - .Float => @floatFromInt(T, i), + .Int => @as(T, @intCast(i)), + .Float => @as(T, @floatFromInt(i)), else => @compileError("Can't use type " ++ @typeName(T) ++ " in iota."), }; } @@ -107,7 +107,7 @@ pub inline fn iota(comptime T: type, comptime len: usize) @Vector(len, T) { pub fn repeat(comptime len: usize, vec: anytype) @Vector(len, std.meta.Child(@TypeOf(vec))) { const Child = std.meta.Child(@TypeOf(vec)); - return @shuffle(Child, vec, undefined, iota(i32, len) % @splat(len, @intCast(i32, vectorLength(@TypeOf(vec))))); + return @shuffle(Child, vec, undefined, iota(i32, len) % @splat(len, @as(i32, @intCast(vectorLength(@TypeOf(vec)))))); } /// Returns a vector containing all elements of the first vector at the lower indices followed by all elements of the second vector @@ -139,8 +139,8 @@ pub fn interlace(vecs: anytype) @Vector(vectorLength(@TypeOf(vecs[0])) * vecs.le const a_vec_count = (1 + vecs_arr.len) >> 1; const b_vec_count = vecs_arr.len >> 1; - const a = interlace(@ptrCast(*const [a_vec_count]VecType, vecs_arr[0..a_vec_count]).*); - const b = interlace(@ptrCast(*const [b_vec_count]VecType, vecs_arr[a_vec_count..]).*); + const a = interlace(@as(*const [a_vec_count]VecType, @ptrCast(vecs_arr[0..a_vec_count])).*); + const b = interlace(@as(*const [b_vec_count]VecType, @ptrCast(vecs_arr[a_vec_count..])).*); const a_len = vectorLength(@TypeOf(a)); const b_len = vectorLength(@TypeOf(b)); @@ -148,10 +148,10 @@ pub fn interlace(vecs: anytype) @Vector(vectorLength(@TypeOf(vecs[0])) * vecs.le const indices = comptime blk: { const count_up = iota(i32, len); - const cycle = @divFloor(count_up, @splat(len, @intCast(i32, vecs_arr.len))); + const cycle = @divFloor(count_up, @splat(len, @as(i32, @intCast(vecs_arr.len)))); const select_mask = repeat(len, join(@splat(a_vec_count, true), @splat(b_vec_count, false))); - const a_indices = count_up - cycle * @splat(len, @intCast(i32, b_vec_count)); - const b_indices = shiftElementsRight(count_up - cycle * @splat(len, @intCast(i32, a_vec_count)), a_vec_count, 0); + const a_indices = count_up - cycle * @splat(len, @as(i32, @intCast(b_vec_count))); + const b_indices = shiftElementsRight(count_up - cycle * @splat(len, @as(i32, @intCast(a_vec_count))), a_vec_count, 0); break :blk @select(i32, select_mask, a_indices, ~b_indices); }; @@ -174,7 +174,7 @@ pub fn deinterlace( comptime var i: usize = 0; // for-loops don't work for this, apparently. inline while (i < out.len) : (i += 1) { - const indices = comptime iota(i32, vec_len) * @splat(vec_len, @intCast(i32, vec_count)) + @splat(vec_len, @intCast(i32, i)); + const indices = comptime iota(i32, vec_len) * @splat(vec_len, @as(i32, @intCast(vec_count))) + @splat(vec_len, @as(i32, @intCast(i))); out[i] = @shuffle(Child, interlaced, undefined, indices); } @@ -189,9 +189,9 @@ pub fn extract( const Child = std.meta.Child(@TypeOf(vec)); const len = vectorLength(@TypeOf(vec)); - std.debug.assert(@intCast(comptime_int, first) + @intCast(comptime_int, count) <= len); + std.debug.assert(@as(comptime_int, @intCast(first)) + @as(comptime_int, @intCast(count)) <= len); - return @shuffle(Child, vec, undefined, iota(i32, count) + @splat(count, @intCast(i32, first))); + return @shuffle(Child, vec, undefined, iota(i32, count) + @splat(count, @as(i32, @intCast(first)))); } test "vector patterns" { @@ -263,7 +263,7 @@ pub fn reverseOrder(vec: anytype) @TypeOf(vec) { const Child = std.meta.Child(@TypeOf(vec)); const len = vectorLength(@TypeOf(vec)); - return @shuffle(Child, vec, undefined, @splat(len, @intCast(i32, len) - 1) - iota(i32, len)); + return @shuffle(Child, vec, undefined, @splat(len, @as(i32, @intCast(len)) - 1) - iota(i32, len)); } test "vector shifting" { diff --git a/lib/std/sort/pdq.zig b/lib/std/sort/pdq.zig index 23678a79c6..795dd29fc5 100644 --- a/lib/std/sort/pdq.zig +++ b/lib/std/sort/pdq.zig @@ -251,7 +251,7 @@ fn breakPatterns(a: usize, b: usize, context: anytype) void { const len = b - a; if (len < 8) return; - var rand = @intCast(u64, len); + var rand = @as(u64, @intCast(len)); const modulus = math.ceilPowerOfTwoAssert(u64, len); var i = a + (len / 4) * 2 - 1; @@ -261,7 +261,7 @@ fn breakPatterns(a: usize, b: usize, context: anytype) void { rand ^= rand >> 7; rand ^= rand << 17; - var other = @intCast(usize, rand & (modulus - 1)); + var other = @as(usize, @intCast(rand & (modulus - 1))); if (other >= len) other -= len; context.swap(i, a + other); } diff --git a/lib/std/start.zig b/lib/std/start.zig index 9c83bd881c..d81eb4f9e9 100644 --- a/lib/std/start.zig +++ b/lib/std/start.zig @@ -190,7 +190,7 @@ fn exit2(code: usize) noreturn { else => @compileError("TODO"), }, .windows => { - ExitProcess(@truncate(u32, code)); + ExitProcess(@as(u32, @truncate(code))); }, else => @compileError("TODO"), } @@ -387,23 +387,23 @@ fn wWinMainCRTStartup() callconv(std.os.windows.WINAPI) noreturn { std.debug.maybeEnableSegfaultHandler(); const result: std.os.windows.INT = initEventLoopAndCallWinMain(); - std.os.windows.kernel32.ExitProcess(@bitCast(std.os.windows.UINT, result)); + std.os.windows.kernel32.ExitProcess(@as(std.os.windows.UINT, @bitCast(result))); } fn posixCallMainAndExit() callconv(.C) noreturn { @setAlignStack(16); const argc = argc_argv_ptr[0]; - const argv = @ptrCast([*][*:0]u8, argc_argv_ptr + 1); + const argv = @as([*][*:0]u8, @ptrCast(argc_argv_ptr + 1)); - const envp_optional = @ptrCast([*:null]?[*:0]u8, @alignCast(@alignOf(usize), argv + argc + 1)); + const envp_optional: [*:null]?[*:0]u8 = @ptrCast(@alignCast(argv + argc + 1)); var envp_count: usize = 0; while (envp_optional[envp_count]) |_| : (envp_count += 1) {} - const envp = @ptrCast([*][*:0]u8, envp_optional)[0..envp_count]; + const envp = @as([*][*:0]u8, @ptrCast(envp_optional))[0..envp_count]; if (native_os == .linux) { // Find the beginning of the auxiliary vector - const auxv = @ptrCast([*]elf.Auxv, @alignCast(@alignOf(usize), envp.ptr + envp_count + 1)); + const auxv: [*]elf.Auxv = @ptrCast(@alignCast(envp.ptr + envp_count + 1)); std.os.linux.elf_aux_maybe = auxv; var at_hwcap: usize = 0; @@ -419,7 +419,7 @@ fn posixCallMainAndExit() callconv(.C) noreturn { else => continue, } } - break :init @ptrFromInt([*]elf.Phdr, at_phdr)[0..at_phnum]; + break :init @as([*]elf.Phdr, @ptrFromInt(at_phdr))[0..at_phnum]; }; // Apply the initial relocations as early as possible in the startup @@ -495,20 +495,20 @@ fn callMainWithArgs(argc: usize, argv: [*][*:0]u8, envp: [][*:0]u8) u8 { fn main(c_argc: c_int, c_argv: [*][*:0]c_char, c_envp: [*:null]?[*:0]c_char) callconv(.C) c_int { var env_count: usize = 0; while (c_envp[env_count] != null) : (env_count += 1) {} - const envp = @ptrCast([*][*:0]u8, c_envp)[0..env_count]; + const envp = @as([*][*:0]u8, @ptrCast(c_envp))[0..env_count]; if (builtin.os.tag == .linux) { const at_phdr = std.c.getauxval(elf.AT_PHDR); const at_phnum = std.c.getauxval(elf.AT_PHNUM); - const phdrs = (@ptrFromInt([*]elf.Phdr, at_phdr))[0..at_phnum]; + const phdrs = (@as([*]elf.Phdr, @ptrFromInt(at_phdr)))[0..at_phnum]; expandStackSize(phdrs); } - return @call(.always_inline, callMainWithArgs, .{ @intCast(usize, c_argc), @ptrCast([*][*:0]u8, c_argv), envp }); + return @call(.always_inline, callMainWithArgs, .{ @as(usize, @intCast(c_argc)), @as([*][*:0]u8, @ptrCast(c_argv)), envp }); } fn mainWithoutEnv(c_argc: c_int, c_argv: [*][*:0]c_char) callconv(.C) c_int { - std.os.argv = @ptrCast([*][*:0]u8, c_argv)[0..@intCast(usize, c_argc)]; + std.os.argv = @as([*][*:0]u8, @ptrCast(c_argv))[0..@as(usize, @intCast(c_argc))]; return @call(.always_inline, callMain, .{}); } @@ -629,7 +629,7 @@ pub fn callMain() u8 { pub fn call_wWinMain() std.os.windows.INT { const MAIN_HINSTANCE = @typeInfo(@TypeOf(root.wWinMain)).Fn.params[0].type.?; - const hInstance = @ptrCast(MAIN_HINSTANCE, std.os.windows.kernel32.GetModuleHandleW(null).?); + const hInstance = @as(MAIN_HINSTANCE, @ptrCast(std.os.windows.kernel32.GetModuleHandleW(null).?)); const lpCmdLine = std.os.windows.kernel32.GetCommandLineW(); // There's no (documented) way to get the nCmdShow parameter, so we're diff --git a/lib/std/start_windows_tls.zig b/lib/std/start_windows_tls.zig index a1cd8387dc..48880b4811 100644 --- a/lib/std/start_windows_tls.zig +++ b/lib/std/start_windows_tls.zig @@ -42,7 +42,7 @@ export const _tls_used linksection(".rdata$T") = IMAGE_TLS_DIRECTORY{ .StartAddressOfRawData = &_tls_start, .EndAddressOfRawData = &_tls_end, .AddressOfIndex = &_tls_index, - .AddressOfCallBacks = @ptrCast(*anyopaque, &__xl_a), + .AddressOfCallBacks = @as(*anyopaque, @ptrCast(&__xl_a)), .SizeOfZeroFill = 0, .Characteristics = 0, }; diff --git a/lib/std/tar.zig b/lib/std/tar.zig index 688d093587..bc9a22fb7c 100644 --- a/lib/std/tar.zig +++ b/lib/std/tar.zig @@ -70,8 +70,8 @@ pub const Header = struct { } pub fn fileType(header: Header) FileType { - const result = @enumFromInt(FileType, header.bytes[156]); - return if (result == @enumFromInt(FileType, 0)) .normal else result; + const result = @as(FileType, @enumFromInt(header.bytes[156])); + return if (result == @as(FileType, @enumFromInt(0))) .normal else result; } fn str(header: Header, start: usize, end: usize) []const u8 { @@ -117,7 +117,7 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !voi start += 512; const file_size = try header.fileSize(); const rounded_file_size = std.mem.alignForward(u64, file_size, 512); - const pad_len = @intCast(usize, rounded_file_size - file_size); + const pad_len = @as(usize, @intCast(rounded_file_size - file_size)); const unstripped_file_name = try header.fullFileName(&file_name_buffer); switch (header.fileType()) { .directory => { @@ -146,14 +146,14 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !voi } // Ask for the rounded up file size + 512 for the next header. // TODO: https://github.com/ziglang/zig/issues/14039 - const ask = @intCast(usize, @min( + const ask = @as(usize, @intCast(@min( buffer.len - end, rounded_file_size + 512 - file_off -| (end - start), - )); + ))); end += try reader.readAtLeast(buffer[end..], ask); if (end - start < ask) return error.UnexpectedEndOfStream; // TODO: https://github.com/ziglang/zig/issues/14039 - const slice = buffer[start..@intCast(usize, @min(file_size - file_off + start, end))]; + const slice = buffer[start..@as(usize, @intCast(@min(file_size - file_off + start, end)))]; try file.writeAll(slice); file_off += slice.len; start += slice.len; @@ -167,7 +167,7 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !voi }, .global_extended_header, .extended_header => { if (start + rounded_file_size > end) return error.TarHeadersTooBig; - start = @intCast(usize, start + rounded_file_size); + start = @as(usize, @intCast(start + rounded_file_size)); }, .hard_link => return error.TarUnsupportedFileType, .symbolic_link => return error.TarUnsupportedFileType, diff --git a/lib/std/target.zig b/lib/std/target.zig index ec61292360..2a96e84001 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -711,14 +711,14 @@ pub const Target = struct { pub fn isEnabled(set: Set, arch_feature_index: Index) bool { const usize_index = arch_feature_index / @bitSizeOf(usize); - const bit_index = @intCast(ShiftInt, arch_feature_index % @bitSizeOf(usize)); + const bit_index = @as(ShiftInt, @intCast(arch_feature_index % @bitSizeOf(usize))); return (set.ints[usize_index] & (@as(usize, 1) << bit_index)) != 0; } /// Adds the specified feature but not its dependencies. pub fn addFeature(set: *Set, arch_feature_index: Index) void { const usize_index = arch_feature_index / @bitSizeOf(usize); - const bit_index = @intCast(ShiftInt, arch_feature_index % @bitSizeOf(usize)); + const bit_index = @as(ShiftInt, @intCast(arch_feature_index % @bitSizeOf(usize))); set.ints[usize_index] |= @as(usize, 1) << bit_index; } @@ -730,7 +730,7 @@ pub const Target = struct { /// Removes the specified feature but not its dependents. pub fn removeFeature(set: *Set, arch_feature_index: Index) void { const usize_index = arch_feature_index / @bitSizeOf(usize); - const bit_index = @intCast(ShiftInt, arch_feature_index % @bitSizeOf(usize)); + const bit_index = @as(ShiftInt, @intCast(arch_feature_index % @bitSizeOf(usize))); set.ints[usize_index] &= ~(@as(usize, 1) << bit_index); } @@ -745,7 +745,7 @@ pub const Target = struct { var old = set.ints; while (true) { for (all_features_list, 0..) |feature, index_usize| { - const index = @intCast(Index, index_usize); + const index = @as(Index, @intCast(index_usize)); if (set.isEnabled(index)) { set.addFeatureSet(feature.dependencies); } @@ -757,7 +757,7 @@ pub const Target = struct { } pub fn asBytes(set: *const Set) *const [byte_count]u8 { - return @ptrCast(*const [byte_count]u8, &set.ints); + return @as(*const [byte_count]u8, @ptrCast(&set.ints)); } pub fn eql(set: Set, other_set: Set) bool { @@ -1526,7 +1526,7 @@ pub const Target = struct { pub fn set(self: *DynamicLinker, dl_or_null: ?[]const u8) void { if (dl_or_null) |dl| { @memcpy(self.buffer[0..dl.len], dl); - self.max_byte = @intCast(u8, dl.len - 1); + self.max_byte = @as(u8, @intCast(dl.len - 1)); } else { self.max_byte = null; } @@ -1537,12 +1537,12 @@ pub const Target = struct { var result: DynamicLinker = .{}; const S = struct { fn print(r: *DynamicLinker, comptime fmt: []const u8, args: anytype) DynamicLinker { - r.max_byte = @intCast(u8, (std.fmt.bufPrint(&r.buffer, fmt, args) catch unreachable).len - 1); + r.max_byte = @as(u8, @intCast((std.fmt.bufPrint(&r.buffer, fmt, args) catch unreachable).len - 1)); return r.*; } fn copy(r: *DynamicLinker, s: []const u8) DynamicLinker { @memcpy(r.buffer[0..s.len], s); - r.max_byte = @intCast(u8, s.len - 1); + r.max_byte = @as(u8, @intCast(s.len - 1)); return r.*; } }; @@ -1970,7 +1970,7 @@ pub const Target = struct { 16 => 2, 32 => 4, 64 => 8, - 80 => @intCast(u16, mem.alignForward(usize, 10, c_type_alignment(t, .longdouble))), + 80 => @as(u16, @intCast(mem.alignForward(usize, 10, c_type_alignment(t, .longdouble)))), 128 => 16, else => unreachable, }, diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig index 2cdb78cd1d..313af987ab 100644 --- a/lib/std/testing/failing_allocator.zig +++ b/lib/std/testing/failing_allocator.zig @@ -63,7 +63,7 @@ pub const FailingAllocator = struct { log2_ptr_align: u8, return_address: usize, ) ?[*]u8 { - const self = @ptrCast(*FailingAllocator, @alignCast(@alignOf(FailingAllocator), ctx)); + const self: *FailingAllocator = @ptrCast(@alignCast(ctx)); if (self.index == self.fail_index) { if (!self.has_induced_failure) { @memset(&self.stack_addresses, 0); @@ -91,7 +91,7 @@ pub const FailingAllocator = struct { new_len: usize, ra: usize, ) bool { - const self = @ptrCast(*FailingAllocator, @alignCast(@alignOf(FailingAllocator), ctx)); + const self: *FailingAllocator = @ptrCast(@alignCast(ctx)); if (!self.internal_allocator.rawResize(old_mem, log2_old_align, new_len, ra)) return false; if (new_len < old_mem.len) { @@ -108,7 +108,7 @@ pub const FailingAllocator = struct { log2_old_align: u8, ra: usize, ) void { - const self = @ptrCast(*FailingAllocator, @alignCast(@alignOf(FailingAllocator), ctx)); + const self: *FailingAllocator = @ptrCast(@alignCast(ctx)); self.internal_allocator.rawFree(old_mem, log2_old_align, ra); self.deallocations += 1; self.freed_bytes += old_mem.len; diff --git a/lib/std/time.zig b/lib/std/time.zig index 3eb342fa85..a60a0ef959 100644 --- a/lib/std/time.zig +++ b/lib/std/time.zig @@ -70,7 +70,7 @@ pub fn timestamp() i64 { /// before the epoch. /// See `std.os.clock_gettime` for a POSIX timestamp. pub fn milliTimestamp() i64 { - return @intCast(i64, @divFloor(nanoTimestamp(), ns_per_ms)); + return @as(i64, @intCast(@divFloor(nanoTimestamp(), ns_per_ms))); } /// Get a calendar timestamp, in microseconds, relative to UTC 1970-01-01. @@ -79,7 +79,7 @@ pub fn milliTimestamp() i64 { /// before the epoch. /// See `std.os.clock_gettime` for a POSIX timestamp. pub fn microTimestamp() i64 { - return @intCast(i64, @divFloor(nanoTimestamp(), ns_per_us)); + return @as(i64, @intCast(@divFloor(nanoTimestamp(), ns_per_us))); } /// Get a calendar timestamp, in nanoseconds, relative to UTC 1970-01-01. @@ -96,7 +96,7 @@ pub fn nanoTimestamp() i128 { var ft: os.windows.FILETIME = undefined; os.windows.kernel32.GetSystemTimeAsFileTime(&ft); const ft64 = (@as(u64, ft.dwHighDateTime) << 32) | ft.dwLowDateTime; - return @as(i128, @bitCast(i64, ft64) + epoch_adj) * 100; + return @as(i128, @as(i64, @bitCast(ft64)) + epoch_adj) * 100; } if (builtin.os.tag == .wasi and !builtin.link_libc) { @@ -239,9 +239,9 @@ pub const Instant = struct { } // Convert to ns using fixed point. - const scale = @as(u64, std.time.ns_per_s << 32) / @intCast(u32, qpf); + const scale = @as(u64, std.time.ns_per_s << 32) / @as(u32, @intCast(qpf)); const result = (@as(u96, qpc) * scale) >> 32; - return @truncate(u64, result); + return @as(u64, @truncate(result)); } // WASI timestamps are directly in nanoseconds @@ -250,9 +250,9 @@ pub const Instant = struct { } // Convert timespec diff to ns - const seconds = @intCast(u64, self.timestamp.tv_sec - earlier.timestamp.tv_sec); - const elapsed = (seconds * ns_per_s) + @intCast(u32, self.timestamp.tv_nsec); - return elapsed - @intCast(u32, earlier.timestamp.tv_nsec); + const seconds = @as(u64, @intCast(self.timestamp.tv_sec - earlier.timestamp.tv_sec)); + const elapsed = (seconds * ns_per_s) + @as(u32, @intCast(self.timestamp.tv_nsec)); + return elapsed - @as(u32, @intCast(earlier.timestamp.tv_nsec)); } }; diff --git a/lib/std/time/epoch.zig b/lib/std/time/epoch.zig index 279acc4298..f467721a49 100644 --- a/lib/std/time/epoch.zig +++ b/lib/std/time/epoch.zig @@ -122,9 +122,9 @@ pub const YearAndDay = struct { if (days_left < days_in_month) break; days_left -= days_in_month; - month = @enumFromInt(Month, @intFromEnum(month) + 1); + month = @as(Month, @enumFromInt(@intFromEnum(month) + 1)); } - return .{ .month = month, .day_index = @intCast(u5, days_left) }; + return .{ .month = month, .day_index = @as(u5, @intCast(days_left)) }; } }; @@ -146,7 +146,7 @@ pub const EpochDay = struct { year_day -= year_size; year += 1; } - return .{ .year = year, .day = @intCast(u9, year_day) }; + return .{ .year = year, .day = @as(u9, @intCast(year_day)) }; } }; @@ -156,11 +156,11 @@ pub const DaySeconds = struct { /// the number of hours past the start of the day (0 to 23) pub fn getHoursIntoDay(self: DaySeconds) u5 { - return @intCast(u5, @divTrunc(self.secs, 3600)); + return @as(u5, @intCast(@divTrunc(self.secs, 3600))); } /// the number of minutes past the hour (0 to 59) pub fn getMinutesIntoHour(self: DaySeconds) u6 { - return @intCast(u6, @divTrunc(@mod(self.secs, 3600), 60)); + return @as(u6, @intCast(@divTrunc(@mod(self.secs, 3600), 60))); } /// the number of seconds past the start of the minute (0 to 59) pub fn getSecondsIntoMinute(self: DaySeconds) u6 { @@ -175,7 +175,7 @@ pub const EpochSeconds = struct { /// Returns the number of days since the epoch as an EpochDay. /// Use EpochDay to get information about the day of this time. pub fn getEpochDay(self: EpochSeconds) EpochDay { - return EpochDay{ .day = @intCast(u47, @divTrunc(self.secs, secs_per_day)) }; + return EpochDay{ .day = @as(u47, @intCast(@divTrunc(self.secs, secs_per_day))) }; } /// Returns the number of seconds into the day as DaySeconds. diff --git a/lib/std/tz.zig b/lib/std/tz.zig index 0cb9cefa50..16288bd4ce 100644 --- a/lib/std/tz.zig +++ b/lib/std/tz.zig @@ -155,8 +155,8 @@ pub const Tz = struct { if (corr > std.math.maxInt(i16)) return error.Malformed; // Unreasonably large correction leapseconds[i] = .{ - .occurrence = @intCast(i48, occur), - .correction = @intCast(i16, corr), + .occurrence = @as(i48, @intCast(occur)), + .correction = @as(i16, @intCast(corr)), }; } diff --git a/lib/std/unicode.zig b/lib/std/unicode.zig index 1987d10b0d..12cb74bd92 100644 --- a/lib/std/unicode.zig +++ b/lib/std/unicode.zig @@ -45,22 +45,22 @@ pub fn utf8Encode(c: u21, out: []u8) !u3 { // - Increasing the initial shift by 6 each time // - Each time after the first shorten the shifted // value to a max of 0b111111 (63) - 1 => out[0] = @intCast(u8, c), // Can just do 0 + codepoint for initial range + 1 => out[0] = @as(u8, @intCast(c)), // Can just do 0 + codepoint for initial range 2 => { - out[0] = @intCast(u8, 0b11000000 | (c >> 6)); - out[1] = @intCast(u8, 0b10000000 | (c & 0b111111)); + out[0] = @as(u8, @intCast(0b11000000 | (c >> 6))); + out[1] = @as(u8, @intCast(0b10000000 | (c & 0b111111))); }, 3 => { if (0xd800 <= c and c <= 0xdfff) return error.Utf8CannotEncodeSurrogateHalf; - out[0] = @intCast(u8, 0b11100000 | (c >> 12)); - out[1] = @intCast(u8, 0b10000000 | ((c >> 6) & 0b111111)); - out[2] = @intCast(u8, 0b10000000 | (c & 0b111111)); + out[0] = @as(u8, @intCast(0b11100000 | (c >> 12))); + out[1] = @as(u8, @intCast(0b10000000 | ((c >> 6) & 0b111111))); + out[2] = @as(u8, @intCast(0b10000000 | (c & 0b111111))); }, 4 => { - out[0] = @intCast(u8, 0b11110000 | (c >> 18)); - out[1] = @intCast(u8, 0b10000000 | ((c >> 12) & 0b111111)); - out[2] = @intCast(u8, 0b10000000 | ((c >> 6) & 0b111111)); - out[3] = @intCast(u8, 0b10000000 | (c & 0b111111)); + out[0] = @as(u8, @intCast(0b11110000 | (c >> 18))); + out[1] = @as(u8, @intCast(0b10000000 | ((c >> 12) & 0b111111))); + out[2] = @as(u8, @intCast(0b10000000 | ((c >> 6) & 0b111111))); + out[3] = @as(u8, @intCast(0b10000000 | (c & 0b111111))); }, else => unreachable, } @@ -695,11 +695,11 @@ pub fn utf8ToUtf16LeWithNull(allocator: mem.Allocator, utf8: []const u8) ![:0]u1 var it = view.iterator(); while (it.nextCodepoint()) |codepoint| { if (codepoint < 0x10000) { - const short = @intCast(u16, codepoint); + const short = @as(u16, @intCast(codepoint)); try result.append(mem.nativeToLittle(u16, short)); } else { - const high = @intCast(u16, (codepoint - 0x10000) >> 10) + 0xD800; - const low = @intCast(u16, codepoint & 0x3FF) + 0xDC00; + const high = @as(u16, @intCast((codepoint - 0x10000) >> 10)) + 0xD800; + const low = @as(u16, @intCast(codepoint & 0x3FF)) + 0xDC00; var out: [2]u16 = undefined; out[0] = mem.nativeToLittle(u16, high); out[1] = mem.nativeToLittle(u16, low); @@ -720,12 +720,12 @@ pub fn utf8ToUtf16Le(utf16le: []u16, utf8: []const u8) !usize { const next_src_i = src_i + n; const codepoint = utf8Decode(utf8[src_i..next_src_i]) catch return error.InvalidUtf8; if (codepoint < 0x10000) { - const short = @intCast(u16, codepoint); + const short = @as(u16, @intCast(codepoint)); utf16le[dest_i] = mem.nativeToLittle(u16, short); dest_i += 1; } else { - const high = @intCast(u16, (codepoint - 0x10000) >> 10) + 0xD800; - const low = @intCast(u16, codepoint & 0x3FF) + 0xDC00; + const high = @as(u16, @intCast((codepoint - 0x10000) >> 10)) + 0xD800; + const low = @as(u16, @intCast(codepoint & 0x3FF)) + 0xDC00; utf16le[dest_i] = mem.nativeToLittle(u16, high); utf16le[dest_i + 1] = mem.nativeToLittle(u16, low); dest_i += 2; diff --git a/lib/std/unicode/throughput_test.zig b/lib/std/unicode/throughput_test.zig index b828b4e43f..084406dc78 100644 --- a/lib/std/unicode/throughput_test.zig +++ b/lib/std/unicode/throughput_test.zig @@ -32,8 +32,8 @@ fn benchmarkCodepointCount(buf: []const u8) !ResultCount { } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, @floatFromInt(f64, bytes) / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(@as(f64, @floatFromInt(bytes)) / elapsed_s)); return ResultCount{ .count = r, .throughput = throughput }; } diff --git a/lib/std/valgrind.zig b/lib/std/valgrind.zig index ae4fde0da1..61312e2338 100644 --- a/lib/std/valgrind.zig +++ b/lib/std/valgrind.zig @@ -94,7 +94,7 @@ pub fn IsTool(base: [2]u8, code: usize) bool { } fn doClientRequestExpr(default: usize, request: ClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) usize { - return doClientRequest(default, @intCast(usize, @intFromEnum(request)), a1, a2, a3, a4, a5); + return doClientRequest(default, @as(usize, @intCast(@intFromEnum(request))), a1, a2, a3, a4, a5); } fn doClientRequestStmt(request: ClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) void { diff --git a/lib/std/valgrind/callgrind.zig b/lib/std/valgrind/callgrind.zig index f3d8c7ae3c..3ba74fb525 100644 --- a/lib/std/valgrind/callgrind.zig +++ b/lib/std/valgrind/callgrind.zig @@ -11,7 +11,7 @@ pub const CallgrindClientRequest = enum(usize) { }; fn doCallgrindClientRequestExpr(default: usize, request: CallgrindClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) usize { - return valgrind.doClientRequest(default, @intCast(usize, @intFromEnum(request)), a1, a2, a3, a4, a5); + return valgrind.doClientRequest(default, @as(usize, @intCast(@intFromEnum(request))), a1, a2, a3, a4, a5); } fn doCallgrindClientRequestStmt(request: CallgrindClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) void { diff --git a/lib/std/valgrind/memcheck.zig b/lib/std/valgrind/memcheck.zig index dd6c79cd90..7f5e973c43 100644 --- a/lib/std/valgrind/memcheck.zig +++ b/lib/std/valgrind/memcheck.zig @@ -21,7 +21,7 @@ pub const MemCheckClientRequest = enum(usize) { }; fn doMemCheckClientRequestExpr(default: usize, request: MemCheckClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) usize { - return valgrind.doClientRequest(default, @intCast(usize, @intFromEnum(request)), a1, a2, a3, a4, a5); + return valgrind.doClientRequest(default, @as(usize, @intCast(@intFromEnum(request))), a1, a2, a3, a4, a5); } fn doMemCheckClientRequestStmt(request: MemCheckClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) void { @@ -31,24 +31,24 @@ fn doMemCheckClientRequestStmt(request: MemCheckClientRequest, a1: usize, a2: us /// Mark memory at qzz.ptr as unaddressable for qzz.len bytes. /// This returns -1 when run on Valgrind and 0 otherwise. pub fn makeMemNoAccess(qzz: []u8) i1 { - return @intCast(i1, doMemCheckClientRequestExpr(0, // default return - .MakeMemNoAccess, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0)); + return @as(i1, @intCast(doMemCheckClientRequestExpr(0, // default return + .MakeMemNoAccess, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0))); } /// Similarly, mark memory at qzz.ptr as addressable but undefined /// for qzz.len bytes. /// This returns -1 when run on Valgrind and 0 otherwise. pub fn makeMemUndefined(qzz: []u8) i1 { - return @intCast(i1, doMemCheckClientRequestExpr(0, // default return - .MakeMemUndefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0)); + return @as(i1, @intCast(doMemCheckClientRequestExpr(0, // default return + .MakeMemUndefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0))); } /// Similarly, mark memory at qzz.ptr as addressable and defined /// for qzz.len bytes. pub fn makeMemDefined(qzz: []u8) i1 { // This returns -1 when run on Valgrind and 0 otherwise. - return @intCast(i1, doMemCheckClientRequestExpr(0, // default return - .MakeMemDefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0)); + return @as(i1, @intCast(doMemCheckClientRequestExpr(0, // default return + .MakeMemDefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0))); } /// Similar to makeMemDefined except that addressability is @@ -56,8 +56,8 @@ pub fn makeMemDefined(qzz: []u8) i1 { /// but those which are not addressable are left unchanged. /// This returns -1 when run on Valgrind and 0 otherwise. pub fn makeMemDefinedIfAddressable(qzz: []u8) i1 { - return @intCast(i1, doMemCheckClientRequestExpr(0, // default return - .MakeMemDefinedIfAddressable, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0)); + return @as(i1, @intCast(doMemCheckClientRequestExpr(0, // default return + .MakeMemDefinedIfAddressable, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0))); } /// Create a block-description handle. The description is an ascii @@ -195,7 +195,7 @@ test "countLeakBlocks" { /// impossible to segfault your system by using this call. pub fn getVbits(zza: []u8, zzvbits: []u8) u2 { std.debug.assert(zzvbits.len >= zza.len / 8); - return @intCast(u2, doMemCheckClientRequestExpr(0, .GetVbits, @intFromPtr(zza.ptr), @intFromPtr(zzvbits), zza.len, 0, 0)); + return @as(u2, @intCast(doMemCheckClientRequestExpr(0, .GetVbits, @intFromPtr(zza.ptr), @intFromPtr(zzvbits), zza.len, 0, 0))); } /// Set the validity data for addresses zza, copying it @@ -208,7 +208,7 @@ pub fn getVbits(zza: []u8, zzvbits: []u8) u2 { /// impossible to segfault your system by using this call. pub fn setVbits(zzvbits: []u8, zza: []u8) u2 { std.debug.assert(zzvbits.len >= zza.len / 8); - return @intCast(u2, doMemCheckClientRequestExpr(0, .SetVbits, @intFromPtr(zza.ptr), @intFromPtr(zzvbits), zza.len, 0, 0)); + return @as(u2, @intCast(doMemCheckClientRequestExpr(0, .SetVbits, @intFromPtr(zza.ptr), @intFromPtr(zzvbits), zza.len, 0, 0))); } /// Disable and re-enable reporting of addressing errors in the diff --git a/lib/std/zig.zig b/lib/std/zig.zig index fe6d2ec120..63b620f674 100644 --- a/lib/std/zig.zig +++ b/lib/std/zig.zig @@ -36,7 +36,7 @@ pub fn hashSrc(src: []const u8) SrcHash { } pub fn srcHashEql(a: SrcHash, b: SrcHash) bool { - return @bitCast(u128, a) == @bitCast(u128, b); + return @as(u128, @bitCast(a)) == @as(u128, @bitCast(b)); } pub fn hashName(parent_hash: SrcHash, sep: []const u8, name: []const u8) SrcHash { diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig index 86e4e48820..a82982e262 100644 --- a/lib/std/zig/Ast.zig +++ b/lib/std/zig/Ast.zig @@ -62,7 +62,7 @@ pub fn parse(gpa: Allocator, source: [:0]const u8, mode: Mode) Allocator.Error!A const token = tokenizer.next(); try tokens.append(gpa, .{ .tag = token.tag, - .start = @intCast(u32, token.loc.start), + .start = @as(u32, @intCast(token.loc.start)), }); if (token.tag == .eof) break; } @@ -123,7 +123,7 @@ pub fn renderToArrayList(tree: Ast, buffer: *std.ArrayList(u8)) RenderError!void /// should point after the token in the error message. pub fn errorOffset(tree: Ast, parse_error: Error) u32 { return if (parse_error.token_is_prev) - @intCast(u32, tree.tokenSlice(parse_error.token).len) + @as(u32, @intCast(tree.tokenSlice(parse_error.token).len)) else 0; } @@ -772,7 +772,7 @@ pub fn lastToken(tree: Ast, node: Node.Index) TokenIndex { var n = node; var end_offset: TokenIndex = 0; while (true) switch (tags[n]) { - .root => return @intCast(TokenIndex, tree.tokens.len - 1), + .root => return @as(TokenIndex, @intCast(tree.tokens.len - 1)), .@"usingnamespace", .bool_not, @@ -1288,7 +1288,7 @@ pub fn lastToken(tree: Ast, node: Node.Index) TokenIndex { n = extra.else_expr; }, .@"for" => { - const extra = @bitCast(Node.For, datas[n].rhs); + const extra = @as(Node.For, @bitCast(datas[n].rhs)); n = tree.extra_data[datas[n].lhs + extra.inputs + @intFromBool(extra.has_else)]; }, .@"suspend" => { @@ -1955,7 +1955,7 @@ pub fn forSimple(tree: Ast, node: Node.Index) full.For { pub fn forFull(tree: Ast, node: Node.Index) full.For { const data = tree.nodes.items(.data)[node]; - const extra = @bitCast(Node.For, data.rhs); + const extra = @as(Node.For, @bitCast(data.rhs)); const inputs = tree.extra_data[data.lhs..][0..extra.inputs]; const then_expr = tree.extra_data[data.lhs + extra.inputs]; const else_expr = if (extra.has_else) tree.extra_data[data.lhs + extra.inputs + 1] else 0; diff --git a/lib/std/zig/CrossTarget.zig b/lib/std/zig/CrossTarget.zig index 13219888b2..d42b02d931 100644 --- a/lib/std/zig/CrossTarget.zig +++ b/lib/std/zig/CrossTarget.zig @@ -317,7 +317,7 @@ pub fn parse(args: ParseOptions) !CrossTarget { } const feature_name = cpu_features[start..index]; for (all_features, 0..) |feature, feat_index_usize| { - const feat_index = @intCast(Target.Cpu.Feature.Set.Index, feat_index_usize); + const feat_index = @as(Target.Cpu.Feature.Set.Index, @intCast(feat_index_usize)); if (mem.eql(u8, feature_name, feature.name)) { set.addFeature(feat_index); break; diff --git a/lib/std/zig/ErrorBundle.zig b/lib/std/zig/ErrorBundle.zig index fe3d97517f..201c06d4d7 100644 --- a/lib/std/zig/ErrorBundle.zig +++ b/lib/std/zig/ErrorBundle.zig @@ -94,7 +94,7 @@ pub fn getErrorMessageList(eb: ErrorBundle) ErrorMessageList { pub fn getMessages(eb: ErrorBundle) []const MessageIndex { const list = eb.getErrorMessageList(); - return @ptrCast([]const MessageIndex, eb.extra[list.start..][0..list.len]); + return @as([]const MessageIndex, @ptrCast(eb.extra[list.start..][0..list.len])); } pub fn getErrorMessage(eb: ErrorBundle, index: MessageIndex) ErrorMessage { @@ -109,7 +109,7 @@ pub fn getSourceLocation(eb: ErrorBundle, index: SourceLocationIndex) SourceLoca pub fn getNotes(eb: ErrorBundle, index: MessageIndex) []const MessageIndex { const notes_len = eb.getErrorMessage(index).notes_len; const start = @intFromEnum(index) + @typeInfo(ErrorMessage).Struct.fields.len; - return @ptrCast([]const MessageIndex, eb.extra[start..][0..notes_len]); + return @as([]const MessageIndex, @ptrCast(eb.extra[start..][0..notes_len])); } pub fn getCompileLogOutput(eb: ErrorBundle) [:0]const u8 { @@ -125,8 +125,8 @@ fn extraData(eb: ErrorBundle, comptime T: type, index: usize) struct { data: T, inline for (fields) |field| { @field(result, field.name) = switch (field.type) { u32 => eb.extra[i], - MessageIndex => @enumFromInt(MessageIndex, eb.extra[i]), - SourceLocationIndex => @enumFromInt(SourceLocationIndex, eb.extra[i]), + MessageIndex => @as(MessageIndex, @enumFromInt(eb.extra[i])), + SourceLocationIndex => @as(SourceLocationIndex, @enumFromInt(eb.extra[i])), else => @compileError("bad field type"), }; i += 1; @@ -202,7 +202,7 @@ fn renderErrorMessageToWriter( try counting_stderr.writeAll(": "); // This is the length of the part before the error message: // e.g. "file.zig:4:5: error: " - const prefix_len = @intCast(usize, counting_stderr.context.bytes_written); + const prefix_len = @as(usize, @intCast(counting_stderr.context.bytes_written)); try ttyconf.setColor(stderr, .reset); try ttyconf.setColor(stderr, .bold); if (err_msg.count == 1) { @@ -357,7 +357,7 @@ pub const Wip = struct { } const compile_log_str_index = if (compile_log_text.len == 0) 0 else str: { - const str = @intCast(u32, wip.string_bytes.items.len); + const str = @as(u32, @intCast(wip.string_bytes.items.len)); try wip.string_bytes.ensureUnusedCapacity(gpa, compile_log_text.len + 1); wip.string_bytes.appendSliceAssumeCapacity(compile_log_text); wip.string_bytes.appendAssumeCapacity(0); @@ -365,11 +365,11 @@ pub const Wip = struct { }; wip.setExtra(0, ErrorMessageList{ - .len = @intCast(u32, wip.root_list.items.len), - .start = @intCast(u32, wip.extra.items.len), + .len = @as(u32, @intCast(wip.root_list.items.len)), + .start = @as(u32, @intCast(wip.extra.items.len)), .compile_log_text = compile_log_str_index, }); - try wip.extra.appendSlice(gpa, @ptrCast([]const u32, wip.root_list.items)); + try wip.extra.appendSlice(gpa, @as([]const u32, @ptrCast(wip.root_list.items))); wip.root_list.clearAndFree(gpa); return .{ .string_bytes = try wip.string_bytes.toOwnedSlice(gpa), @@ -386,7 +386,7 @@ pub const Wip = struct { pub fn addString(wip: *Wip, s: []const u8) !u32 { const gpa = wip.gpa; - const index = @intCast(u32, wip.string_bytes.items.len); + const index = @as(u32, @intCast(wip.string_bytes.items.len)); try wip.string_bytes.ensureUnusedCapacity(gpa, s.len + 1); wip.string_bytes.appendSliceAssumeCapacity(s); wip.string_bytes.appendAssumeCapacity(0); @@ -395,7 +395,7 @@ pub const Wip = struct { pub fn printString(wip: *Wip, comptime fmt: []const u8, args: anytype) !u32 { const gpa = wip.gpa; - const index = @intCast(u32, wip.string_bytes.items.len); + const index = @as(u32, @intCast(wip.string_bytes.items.len)); try wip.string_bytes.writer(gpa).print(fmt, args); try wip.string_bytes.append(gpa, 0); return index; @@ -407,15 +407,15 @@ pub const Wip = struct { } pub fn addErrorMessage(wip: *Wip, em: ErrorMessage) !MessageIndex { - return @enumFromInt(MessageIndex, try addExtra(wip, em)); + return @as(MessageIndex, @enumFromInt(try addExtra(wip, em))); } pub fn addErrorMessageAssumeCapacity(wip: *Wip, em: ErrorMessage) MessageIndex { - return @enumFromInt(MessageIndex, addExtraAssumeCapacity(wip, em)); + return @as(MessageIndex, @enumFromInt(addExtraAssumeCapacity(wip, em))); } pub fn addSourceLocation(wip: *Wip, sl: SourceLocation) !SourceLocationIndex { - return @enumFromInt(SourceLocationIndex, try addExtra(wip, sl)); + return @as(SourceLocationIndex, @enumFromInt(try addExtra(wip, sl))); } pub fn addReferenceTrace(wip: *Wip, rt: ReferenceTrace) !void { @@ -431,7 +431,7 @@ pub const Wip = struct { const other_list = other.getMessages(); // The ensureUnusedCapacity call above guarantees this. - const notes_start = wip.reserveNotes(@intCast(u32, other_list.len)) catch unreachable; + const notes_start = wip.reserveNotes(@as(u32, @intCast(other_list.len))) catch unreachable; for (notes_start.., other_list) |note, message| { wip.extra.items[note] = @intFromEnum(wip.addOtherMessage(other, message) catch unreachable); } @@ -441,7 +441,7 @@ pub const Wip = struct { try wip.extra.ensureUnusedCapacity(wip.gpa, notes_len + notes_len * @typeInfo(ErrorBundle.ErrorMessage).Struct.fields.len); wip.extra.items.len += notes_len; - return @intCast(u32, wip.extra.items.len - notes_len); + return @as(u32, @intCast(wip.extra.items.len - notes_len)); } fn addOtherMessage(wip: *Wip, other: ErrorBundle, msg_index: MessageIndex) !MessageIndex { @@ -493,7 +493,7 @@ pub const Wip = struct { fn addExtraAssumeCapacity(wip: *Wip, extra: anytype) u32 { const fields = @typeInfo(@TypeOf(extra)).Struct.fields; - const result = @intCast(u32, wip.extra.items.len); + const result = @as(u32, @intCast(wip.extra.items.len)); wip.extra.items.len += fields.len; setExtra(wip, result, extra); return result; diff --git a/lib/std/zig/Parse.zig b/lib/std/zig/Parse.zig index f3eec86acc..14019571b1 100644 --- a/lib/std/zig/Parse.zig +++ b/lib/std/zig/Parse.zig @@ -36,20 +36,20 @@ const Members = struct { fn listToSpan(p: *Parse, list: []const Node.Index) !Node.SubRange { try p.extra_data.appendSlice(p.gpa, list); return Node.SubRange{ - .start = @intCast(Node.Index, p.extra_data.items.len - list.len), - .end = @intCast(Node.Index, p.extra_data.items.len), + .start = @as(Node.Index, @intCast(p.extra_data.items.len - list.len)), + .end = @as(Node.Index, @intCast(p.extra_data.items.len)), }; } fn addNode(p: *Parse, elem: Ast.Node) Allocator.Error!Node.Index { - const result = @intCast(Node.Index, p.nodes.len); + const result = @as(Node.Index, @intCast(p.nodes.len)); try p.nodes.append(p.gpa, elem); return result; } fn setNode(p: *Parse, i: usize, elem: Ast.Node) Node.Index { p.nodes.set(i, elem); - return @intCast(Node.Index, i); + return @as(Node.Index, @intCast(i)); } fn reserveNode(p: *Parse, tag: Ast.Node.Tag) !usize { @@ -72,7 +72,7 @@ fn unreserveNode(p: *Parse, node_index: usize) void { fn addExtra(p: *Parse, extra: anytype) Allocator.Error!Node.Index { const fields = std.meta.fields(@TypeOf(extra)); try p.extra_data.ensureUnusedCapacity(p.gpa, fields.len); - const result = @intCast(u32, p.extra_data.items.len); + const result = @as(u32, @intCast(p.extra_data.items.len)); inline for (fields) |field| { comptime assert(field.type == Node.Index); p.extra_data.appendAssumeCapacity(@field(extra, field.name)); @@ -1202,10 +1202,10 @@ fn parseForStatement(p: *Parse) !Node.Index { .main_token = for_token, .data = .{ .lhs = (try p.listToSpan(p.scratch.items[scratch_top..])).start, - .rhs = @bitCast(u32, Node.For{ - .inputs = @intCast(u31, inputs), + .rhs = @as(u32, @bitCast(Node.For{ + .inputs = @as(u31, @intCast(inputs)), .has_else = has_else, - }), + })), }, }); } @@ -1486,7 +1486,7 @@ fn parseExprPrecedence(p: *Parse, min_prec: i32) Error!Node.Index { while (true) { const tok_tag = p.token_tags[p.tok_i]; - const info = operTable[@intCast(usize, @intFromEnum(tok_tag))]; + const info = operTable[@as(usize, @intCast(@intFromEnum(tok_tag)))]; if (info.prec < min_prec) { break; } @@ -2087,10 +2087,10 @@ fn parseForExpr(p: *Parse) !Node.Index { .main_token = for_token, .data = .{ .lhs = (try p.listToSpan(p.scratch.items[scratch_top..])).start, - .rhs = @bitCast(u32, Node.For{ - .inputs = @intCast(u31, inputs), + .rhs = @as(u32, @bitCast(Node.For{ + .inputs = @as(u31, @intCast(inputs)), .has_else = has_else, - }), + })), }, }); } @@ -2862,10 +2862,10 @@ fn parseForTypeExpr(p: *Parse) !Node.Index { .main_token = for_token, .data = .{ .lhs = (try p.listToSpan(p.scratch.items[scratch_top..])).start, - .rhs = @bitCast(u32, Node.For{ - .inputs = @intCast(u31, inputs), + .rhs = @as(u32, @bitCast(Node.For{ + .inputs = @as(u31, @intCast(inputs)), .has_else = has_else, - }), + })), }, }); } diff --git a/lib/std/zig/Server.zig b/lib/std/zig/Server.zig index f4f979f012..468219f8f0 100644 --- a/lib/std/zig/Server.zig +++ b/lib/std/zig/Server.zig @@ -132,7 +132,7 @@ pub fn receiveMessage(s: *Server) !InMessage.Header { pub fn receiveBody_u32(s: *Server) !u32 { const fifo = &s.receive_fifo; const buf = fifo.readableSlice(0); - const result = @ptrCast(*align(1) const u32, buf[0..4]).*; + const result = @as(*align(1) const u32, @ptrCast(buf[0..4])).*; fifo.discard(4); return bswap(result); } @@ -140,7 +140,7 @@ pub fn receiveBody_u32(s: *Server) !u32 { pub fn serveStringMessage(s: *Server, tag: OutMessage.Tag, msg: []const u8) !void { return s.serveMessage(.{ .tag = tag, - .bytes_len = @intCast(u32, msg.len), + .bytes_len = @as(u32, @intCast(msg.len)), }, &.{msg}); } @@ -152,7 +152,7 @@ pub fn serveMessage( var iovecs: [10]std.os.iovec_const = undefined; const header_le = bswap(header); iovecs[0] = .{ - .iov_base = @ptrCast([*]const u8, &header_le), + .iov_base = @as([*]const u8, @ptrCast(&header_le)), .iov_len = @sizeOf(OutMessage.Header), }; for (bufs, iovecs[1 .. bufs.len + 1]) |buf, *iovec| { @@ -171,7 +171,7 @@ pub fn serveEmitBinPath( ) !void { try s.serveMessage(.{ .tag = .emit_bin_path, - .bytes_len = @intCast(u32, fs_path.len + @sizeOf(OutMessage.EmitBinPath)), + .bytes_len = @as(u32, @intCast(fs_path.len + @sizeOf(OutMessage.EmitBinPath))), }, &.{ std.mem.asBytes(&header), fs_path, @@ -185,7 +185,7 @@ pub fn serveTestResults( const msg_le = bswap(msg); try s.serveMessage(.{ .tag = .test_results, - .bytes_len = @intCast(u32, @sizeOf(OutMessage.TestResults)), + .bytes_len = @as(u32, @intCast(@sizeOf(OutMessage.TestResults))), }, &.{ std.mem.asBytes(&msg_le), }); @@ -193,14 +193,14 @@ pub fn serveTestResults( pub fn serveErrorBundle(s: *Server, error_bundle: std.zig.ErrorBundle) !void { const eb_hdr: OutMessage.ErrorBundle = .{ - .extra_len = @intCast(u32, error_bundle.extra.len), - .string_bytes_len = @intCast(u32, error_bundle.string_bytes.len), + .extra_len = @as(u32, @intCast(error_bundle.extra.len)), + .string_bytes_len = @as(u32, @intCast(error_bundle.string_bytes.len)), }; const bytes_len = @sizeOf(OutMessage.ErrorBundle) + 4 * error_bundle.extra.len + error_bundle.string_bytes.len; try s.serveMessage(.{ .tag = .error_bundle, - .bytes_len = @intCast(u32, bytes_len), + .bytes_len = @as(u32, @intCast(bytes_len)), }, &.{ std.mem.asBytes(&eb_hdr), // TODO: implement @ptrCast between slices changing the length @@ -218,8 +218,8 @@ pub const TestMetadata = struct { pub fn serveTestMetadata(s: *Server, test_metadata: TestMetadata) !void { const header: OutMessage.TestMetadata = .{ - .tests_len = bswap(@intCast(u32, test_metadata.names.len)), - .string_bytes_len = bswap(@intCast(u32, test_metadata.string_bytes.len)), + .tests_len = bswap(@as(u32, @intCast(test_metadata.names.len))), + .string_bytes_len = bswap(@as(u32, @intCast(test_metadata.string_bytes.len))), }; const bytes_len = @sizeOf(OutMessage.TestMetadata) + 3 * 4 * test_metadata.names.len + test_metadata.string_bytes.len; @@ -237,7 +237,7 @@ pub fn serveTestMetadata(s: *Server, test_metadata: TestMetadata) !void { return s.serveMessage(.{ .tag = .test_metadata, - .bytes_len = @intCast(u32, bytes_len), + .bytes_len = @as(u32, @intCast(bytes_len)), }, &.{ std.mem.asBytes(&header), // TODO: implement @ptrCast between slices changing the length @@ -253,7 +253,7 @@ fn bswap(x: anytype) @TypeOf(x) { const T = @TypeOf(x); switch (@typeInfo(T)) { - .Enum => return @enumFromInt(T, @byteSwap(@intFromEnum(x))), + .Enum => return @as(T, @enumFromInt(@byteSwap(@intFromEnum(x)))), .Int => return @byteSwap(x), .Struct => |info| switch (info.layout) { .Extern => { @@ -265,7 +265,7 @@ fn bswap(x: anytype) @TypeOf(x) { }, .Packed => { const I = info.backing_integer.?; - return @bitCast(T, @byteSwap(@bitCast(I, x))); + return @as(T, @bitCast(@byteSwap(@as(I, @bitCast(x))))); }, .Auto => @compileError("auto layout struct"), }, @@ -286,7 +286,7 @@ fn bswap_and_workaround_u32(bytes_ptr: *const [4]u8) u32 { /// workaround for https://github.com/ziglang/zig/issues/14904 fn bswap_and_workaround_tag(bytes_ptr: *const [4]u8) InMessage.Tag { const int = std.mem.readIntLittle(u32, bytes_ptr); - return @enumFromInt(InMessage.Tag, int); + return @as(InMessage.Tag, @enumFromInt(int)); } const OutMessage = std.zig.Server.Message; diff --git a/lib/std/zig/c_builtins.zig b/lib/std/zig/c_builtins.zig index de9ac95600..7f0414c96f 100644 --- a/lib/std/zig/c_builtins.zig +++ b/lib/std/zig/c_builtins.zig @@ -20,19 +20,19 @@ pub inline fn __builtin_signbitf(val: f32) c_int { pub inline fn __builtin_popcount(val: c_uint) c_int { // popcount of a c_uint will never exceed the capacity of a c_int @setRuntimeSafety(false); - return @bitCast(c_int, @as(c_uint, @popCount(val))); + return @as(c_int, @bitCast(@as(c_uint, @popCount(val)))); } pub inline fn __builtin_ctz(val: c_uint) c_int { // Returns the number of trailing 0-bits in val, starting at the least significant bit position. // In C if `val` is 0, the result is undefined; in zig it's the number of bits in a c_uint @setRuntimeSafety(false); - return @bitCast(c_int, @as(c_uint, @ctz(val))); + return @as(c_int, @bitCast(@as(c_uint, @ctz(val)))); } pub inline fn __builtin_clz(val: c_uint) c_int { // Returns the number of leading 0-bits in x, starting at the most significant bit position. // In C if `val` is 0, the result is undefined; in zig it's the number of bits in a c_uint @setRuntimeSafety(false); - return @bitCast(c_int, @as(c_uint, @clz(val))); + return @as(c_int, @bitCast(@as(c_uint, @clz(val)))); } pub inline fn __builtin_sqrt(val: f64) f64 { @@ -135,7 +135,7 @@ pub inline fn __builtin_object_size(ptr: ?*const anyopaque, ty: c_int) usize { // If it is not possible to determine which objects ptr points to at compile time, // __builtin_object_size should return (size_t) -1 for type 0 or 1 and (size_t) 0 // for type 2 or 3. - if (ty == 0 or ty == 1) return @bitCast(usize, -@as(isize, 1)); + if (ty == 0 or ty == 1) return @as(usize, @bitCast(-@as(isize, 1))); if (ty == 2 or ty == 3) return 0; unreachable; } @@ -151,8 +151,8 @@ pub inline fn __builtin___memset_chk( } pub inline fn __builtin_memset(dst: ?*anyopaque, val: c_int, len: usize) ?*anyopaque { - const dst_cast = @ptrCast([*c]u8, dst); - @memset(dst_cast[0..len], @bitCast(u8, @truncate(i8, val))); + const dst_cast = @as([*c]u8, @ptrCast(dst)); + @memset(dst_cast[0..len], @as(u8, @bitCast(@as(i8, @truncate(val))))); return dst; } @@ -172,8 +172,8 @@ pub inline fn __builtin_memcpy( len: usize, ) ?*anyopaque { if (len > 0) @memcpy( - @ptrCast([*]u8, dst.?)[0..len], - @ptrCast([*]const u8, src.?), + @as([*]u8, @ptrCast(dst.?))[0..len], + @as([*]const u8, @ptrCast(src.?)), ); return dst; } @@ -202,8 +202,8 @@ pub inline fn __builtin_expect(expr: c_long, c: c_long) c_long { /// If tagp is empty, the function returns a NaN whose significand is zero. pub inline fn __builtin_nanf(tagp: []const u8) f32 { const parsed = std.fmt.parseUnsigned(c_ulong, tagp, 0) catch 0; - const bits = @truncate(u23, parsed); // single-precision float trailing significand is 23 bits - return @bitCast(f32, @as(u32, bits) | std.math.qnan_u32); + const bits = @as(u23, @truncate(parsed)); // single-precision float trailing significand is 23 bits + return @as(f32, @bitCast(@as(u32, bits) | std.math.qnan_u32)); } pub inline fn __builtin_huge_valf() f32 { diff --git a/lib/std/zig/c_translation.zig b/lib/std/zig/c_translation.zig index dafef5e63b..2e7bb61df6 100644 --- a/lib/std/zig/c_translation.zig +++ b/lib/std/zig/c_translation.zig @@ -42,9 +42,9 @@ pub fn cast(comptime DestType: type, target: anytype) DestType { }, .Float => { switch (@typeInfo(SourceType)) { - .Int => return @floatFromInt(DestType, target), - .Float => return @floatCast(DestType, target), - .Bool => return @floatFromInt(DestType, @intFromBool(target)), + .Int => return @as(DestType, @floatFromInt(target)), + .Float => return @as(DestType, @floatCast(target)), + .Bool => return @as(DestType, @floatFromInt(@intFromBool(target))), else => {}, } }, @@ -65,36 +65,25 @@ fn castInt(comptime DestType: type, target: anytype) DestType { const source = @typeInfo(@TypeOf(target)).Int; if (dest.bits < source.bits) - return @bitCast(DestType, @truncate(std.meta.Int(source.signedness, dest.bits), target)) + return @as(DestType, @bitCast(@as(std.meta.Int(source.signedness, dest.bits), @truncate(target)))) else - return @bitCast(DestType, @as(std.meta.Int(source.signedness, dest.bits), target)); + return @as(DestType, @bitCast(@as(std.meta.Int(source.signedness, dest.bits), target))); } fn castPtr(comptime DestType: type, target: anytype) DestType { - const dest = ptrInfo(DestType); - const source = ptrInfo(@TypeOf(target)); - - if (source.is_const and !dest.is_const) - return @constCast(target) - else if (source.is_volatile and !dest.is_volatile) - return @volatileCast(target) - else if (@typeInfo(dest.child) == .Opaque) - // dest.alignment would error out - return @ptrCast(DestType, target) - else - return @ptrCast(DestType, @alignCast(dest.alignment, target)); + return @constCast(@volatileCast(@alignCast(@ptrCast(target)))); } fn castToPtr(comptime DestType: type, comptime SourceType: type, target: anytype) DestType { switch (@typeInfo(SourceType)) { .Int => { - return @ptrFromInt(DestType, castInt(usize, target)); + return @as(DestType, @ptrFromInt(castInt(usize, target))); }, .ComptimeInt => { if (target < 0) - return @ptrFromInt(DestType, @bitCast(usize, @intCast(isize, target))) + return @as(DestType, @ptrFromInt(@as(usize, @bitCast(@as(isize, @intCast(target)))))) else - return @ptrFromInt(DestType, @intCast(usize, target)); + return @as(DestType, @ptrFromInt(@as(usize, @intCast(target)))); }, .Pointer => { return castPtr(DestType, target); @@ -120,34 +109,34 @@ fn ptrInfo(comptime PtrType: type) std.builtin.Type.Pointer { test "cast" { var i = @as(i64, 10); - try testing.expect(cast(*u8, 16) == @ptrFromInt(*u8, 16)); + try testing.expect(cast(*u8, 16) == @as(*u8, @ptrFromInt(16))); try testing.expect(cast(*u64, &i).* == @as(u64, 10)); try testing.expect(cast(*i64, @as(?*align(1) i64, &i)) == &i); - try testing.expect(cast(?*u8, 2) == @ptrFromInt(*u8, 2)); + try testing.expect(cast(?*u8, 2) == @as(*u8, @ptrFromInt(2))); try testing.expect(cast(?*i64, @as(*align(1) i64, &i)) == &i); try testing.expect(cast(?*i64, @as(?*align(1) i64, &i)) == &i); - try testing.expectEqual(@as(u32, 4), cast(u32, @ptrFromInt(*u32, 4))); - try testing.expectEqual(@as(u32, 4), cast(u32, @ptrFromInt(?*u32, 4))); + try testing.expectEqual(@as(u32, 4), cast(u32, @as(*u32, @ptrFromInt(4)))); + try testing.expectEqual(@as(u32, 4), cast(u32, @as(?*u32, @ptrFromInt(4)))); try testing.expectEqual(@as(u32, 10), cast(u32, @as(u64, 10))); - try testing.expectEqual(@bitCast(i32, @as(u32, 0x8000_0000)), cast(i32, @as(u32, 0x8000_0000))); + try testing.expectEqual(@as(i32, @bitCast(@as(u32, 0x8000_0000))), cast(i32, @as(u32, 0x8000_0000))); - try testing.expectEqual(@ptrFromInt(*u8, 2), cast(*u8, @ptrFromInt(*const u8, 2))); - try testing.expectEqual(@ptrFromInt(*u8, 2), cast(*u8, @ptrFromInt(*volatile u8, 2))); + try testing.expectEqual(@as(*u8, @ptrFromInt(2)), cast(*u8, @as(*const u8, @ptrFromInt(2)))); + try testing.expectEqual(@as(*u8, @ptrFromInt(2)), cast(*u8, @as(*volatile u8, @ptrFromInt(2)))); - try testing.expectEqual(@ptrFromInt(?*anyopaque, 2), cast(?*anyopaque, @ptrFromInt(*u8, 2))); + try testing.expectEqual(@as(?*anyopaque, @ptrFromInt(2)), cast(?*anyopaque, @as(*u8, @ptrFromInt(2)))); var foo: c_int = -1; - try testing.expect(cast(*anyopaque, -1) == @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1)))); - try testing.expect(cast(*anyopaque, foo) == @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1)))); - try testing.expect(cast(?*anyopaque, -1) == @ptrFromInt(?*anyopaque, @bitCast(usize, @as(isize, -1)))); - try testing.expect(cast(?*anyopaque, foo) == @ptrFromInt(?*anyopaque, @bitCast(usize, @as(isize, -1)))); + try testing.expect(cast(*anyopaque, -1) == @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1)))))); + try testing.expect(cast(*anyopaque, foo) == @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1)))))); + try testing.expect(cast(?*anyopaque, -1) == @as(?*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1)))))); + try testing.expect(cast(?*anyopaque, foo) == @as(?*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1)))))); const FnPtr = ?*align(1) const fn (*anyopaque) void; - try testing.expect(cast(FnPtr, 0) == @ptrFromInt(FnPtr, @as(usize, 0))); - try testing.expect(cast(FnPtr, foo) == @ptrFromInt(FnPtr, @bitCast(usize, @as(isize, -1)))); + try testing.expect(cast(FnPtr, 0) == @as(FnPtr, @ptrFromInt(@as(usize, 0)))); + try testing.expect(cast(FnPtr, foo) == @as(FnPtr, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1)))))); } /// Given a value returns its size as C's sizeof operator would. @@ -192,7 +181,7 @@ pub fn sizeof(target: anytype) usize { const array_info = @typeInfo(ptr.child).Array; if ((array_info.child == u8 or array_info.child == u16) and array_info.sentinel != null and - @ptrCast(*align(1) const array_info.child, array_info.sentinel.?).* == 0) + @as(*align(1) const array_info.child, @ptrCast(array_info.sentinel.?)).* == 0) { // length of the string plus one for the null terminator. return (array_info.len + 1) * @sizeOf(array_info.child); @@ -325,10 +314,10 @@ test "promoteIntLiteral" { pub fn shuffleVectorIndex(comptime this_index: c_int, comptime source_vector_len: usize) i32 { if (this_index <= 0) return 0; - const positive_index = @intCast(usize, this_index); - if (positive_index < source_vector_len) return @intCast(i32, this_index); + const positive_index = @as(usize, @intCast(this_index)); + if (positive_index < source_vector_len) return @as(i32, @intCast(this_index)); const b_index = positive_index - source_vector_len; - return ~@intCast(i32, b_index); + return ~@as(i32, @intCast(b_index)); } test "shuffleVectorIndex" { diff --git a/lib/std/zig/number_literal.zig b/lib/std/zig/number_literal.zig index 66596b3b15..aba588a3ea 100644 --- a/lib/std/zig/number_literal.zig +++ b/lib/std/zig/number_literal.zig @@ -141,7 +141,7 @@ pub fn parseNumberLiteral(bytes: []const u8) Result { 'a'...'z' => c - 'a' + 10, else => return .{ .failure = .{ .invalid_character = i } }, }; - if (digit >= base) return .{ .failure = .{ .invalid_digit = .{ .i = i, .base = @enumFromInt(Base, base) } } }; + if (digit >= base) return .{ .failure = .{ .invalid_digit = .{ .i = i, .base = @as(Base, @enumFromInt(base)) } } }; if (exponent and digit >= 10) return .{ .failure = .{ .invalid_digit_exponent = i } }; underscore = false; special = 0; @@ -159,7 +159,7 @@ pub fn parseNumberLiteral(bytes: []const u8) Result { if (underscore) return .{ .failure = .{ .trailing_underscore = bytes.len - 1 } }; if (special != 0) return .{ .failure = .{ .trailing_special = bytes.len - 1 } }; - if (float) return .{ .float = @enumFromInt(FloatBase, base) }; - if (overflow) return .{ .big_int = @enumFromInt(Base, base) }; + if (float) return .{ .float = @as(FloatBase, @enumFromInt(base)) }; + if (overflow) return .{ .big_int = @as(Base, @enumFromInt(base)) }; return .{ .int = x }; } diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig index e41e9157e6..ca3e99b164 100644 --- a/lib/std/zig/parser_test.zig +++ b/lib/std/zig/parser_test.zig @@ -166,10 +166,10 @@ test "zig fmt: respect line breaks after var declarations" { \\ lookup_tables[1][p[6]] ^ \\ lookup_tables[2][p[5]] ^ \\ lookup_tables[3][p[4]] ^ - \\ lookup_tables[4][@truncate(u8, self.crc >> 24)] ^ - \\ lookup_tables[5][@truncate(u8, self.crc >> 16)] ^ - \\ lookup_tables[6][@truncate(u8, self.crc >> 8)] ^ - \\ lookup_tables[7][@truncate(u8, self.crc >> 0)]; + \\ lookup_tables[4][@as(u8, self.crc >> 24)] ^ + \\ lookup_tables[5][@as(u8, self.crc >> 16)] ^ + \\ lookup_tables[6][@as(u8, self.crc >> 8)] ^ + \\ lookup_tables[7][@as(u8, self.crc >> 0)]; \\ ); } @@ -1108,7 +1108,7 @@ test "zig fmt: async function" { \\ handleRequestFn: fn (*Server, *const std.net.Address, File) callconv(.Async) void, \\}; \\test "hi" { - \\ var ptr = @ptrCast(fn (i32) callconv(.Async) void, other); + \\ var ptr: fn (i32) callconv(.Async) void = @ptrCast(other); \\} \\ ); @@ -1825,10 +1825,10 @@ test "zig fmt: respect line breaks after infix operators" { \\ lookup_tables[1][p[6]] ^ \\ lookup_tables[2][p[5]] ^ \\ lookup_tables[3][p[4]] ^ - \\ lookup_tables[4][@truncate(u8, self.crc >> 24)] ^ - \\ lookup_tables[5][@truncate(u8, self.crc >> 16)] ^ - \\ lookup_tables[6][@truncate(u8, self.crc >> 8)] ^ - \\ lookup_tables[7][@truncate(u8, self.crc >> 0)]; + \\ lookup_tables[4][@as(u8, self.crc >> 24)] ^ + \\ lookup_tables[5][@as(u8, self.crc >> 16)] ^ + \\ lookup_tables[6][@as(u8, self.crc >> 8)] ^ + \\ lookup_tables[7][@as(u8, self.crc >> 0)]; \\} \\ ); @@ -4814,7 +4814,7 @@ test "zig fmt: use of comments and multiline string literals may force the param \\ \\ unknown-length pointers and C pointers cannot be hashed deeply. \\ \\ Consider providing your own hash function. \\ ); - \\ return @intCast(i1, doMemCheckClientRequestExpr(0, // default return + \\ return @intCast(doMemCheckClientRequestExpr(0, // default return \\ .MakeMemUndefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0)); \\} \\ diff --git a/lib/std/zig/perf_test.zig b/lib/std/zig/perf_test.zig index df60978510..a53dee7fa8 100644 --- a/lib/std/zig/perf_test.zig +++ b/lib/std/zig/perf_test.zig @@ -18,9 +18,9 @@ pub fn main() !void { } const end = timer.read(); memory_used /= iterations; - const elapsed_s = @floatFromInt(f64, end - start) / std.time.ns_per_s; - const bytes_per_sec_float = @floatFromInt(f64, source.len * iterations) / elapsed_s; - const bytes_per_sec = @intFromFloat(u64, @floor(bytes_per_sec_float)); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / std.time.ns_per_s; + const bytes_per_sec_float = @as(f64, @floatFromInt(source.len * iterations)) / elapsed_s; + const bytes_per_sec = @as(u64, @intFromFloat(@floor(bytes_per_sec_float))); var stdout_file = std.io.getStdOut(); const stdout = stdout_file.writer(); diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index 72f54b3f4f..2cf7bc9716 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -2719,7 +2719,7 @@ fn renderIdentifier(ais: *Ais, tree: Ast, token_index: Ast.TokenIndex, space: Sp while (contents_i < contents.len and buf_i < longest_keyword_or_primitive_len) { if (contents[contents_i] == '\\') { const res = std.zig.string_literal.parseEscapeSequence(contents, &contents_i).success; - buf[buf_i] = @intCast(u8, res); + buf[buf_i] = @as(u8, @intCast(res)); buf_i += 1; } else { buf[buf_i] = contents[contents_i]; @@ -2773,7 +2773,7 @@ fn renderIdentifierContents(writer: anytype, bytes: []const u8) !void { switch (res) { .success => |codepoint| { if (codepoint <= 0x7f) { - const buf = [1]u8{@intCast(u8, codepoint)}; + const buf = [1]u8{@as(u8, @intCast(codepoint))}; try std.fmt.format(writer, "{}", .{std.zig.fmtEscapes(&buf)}); } else { try writer.writeAll(escape_sequence); diff --git a/lib/std/zig/string_literal.zig b/lib/std/zig/string_literal.zig index 4859c379a0..53b1ab7ca8 100644 --- a/lib/std/zig/string_literal.zig +++ b/lib/std/zig/string_literal.zig @@ -142,7 +142,7 @@ pub fn parseEscapeSequence(slice: []const u8, offset: *usize) ParsedCharLiteral return .{ .failure = .{ .expected_rbrace = i } }; } offset.* = i; - return .{ .success = @intCast(u21, value) }; + return .{ .success = @as(u21, @intCast(value)) }; }, else => return .{ .failure = .{ .invalid_escape_character = offset.* - 1 } }, } @@ -253,7 +253,7 @@ pub fn parseWrite(writer: anytype, bytes: []const u8) error{OutOfMemory}!Result }; try writer.writeAll(buf[0..len]); } else { - try writer.writeByte(@intCast(u8, codepoint)); + try writer.writeByte(@as(u8, @intCast(codepoint))); } }, .failure => |err| return Result{ .failure = err }, diff --git a/lib/std/zig/system/NativeTargetInfo.zig b/lib/std/zig/system/NativeTargetInfo.zig index 29ef752d7a..f69f1e1b1e 100644 --- a/lib/std/zig/system/NativeTargetInfo.zig +++ b/lib/std/zig/system/NativeTargetInfo.zig @@ -479,8 +479,8 @@ fn glibcVerFromRPath(rpath: []const u8) !std.SemanticVersion { fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion { var hdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 align(@alignOf(elf.Elf64_Ehdr)) = undefined; _ = try preadMin(file, &hdr_buf, 0, hdr_buf.len); - const hdr32 = @ptrCast(*elf.Elf32_Ehdr, &hdr_buf); - const hdr64 = @ptrCast(*elf.Elf64_Ehdr, &hdr_buf); + const hdr32 = @as(*elf.Elf32_Ehdr, @ptrCast(&hdr_buf)); + const hdr64 = @as(*elf.Elf64_Ehdr, @ptrCast(&hdr_buf)); if (!mem.eql(u8, hdr32.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic; const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI_DATA]) { elf.ELFDATA2LSB => .Little, @@ -503,8 +503,8 @@ fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion { if (sh_buf.len < shentsize) return error.InvalidElfFile; _ = try preadMin(file, &sh_buf, str_section_off, shentsize); - const shstr32 = @ptrCast(*elf.Elf32_Shdr, @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf)); - const shstr64 = @ptrCast(*elf.Elf64_Shdr, @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf)); + const shstr32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf)); + const shstr64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf)); const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset); const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size); var strtab_buf: [4096:0]u8 = undefined; @@ -529,14 +529,8 @@ fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion { shoff += shentsize; sh_buf_i += shentsize; }) { - const sh32 = @ptrCast( - *elf.Elf32_Shdr, - @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf[sh_buf_i]), - ); - const sh64 = @ptrCast( - *elf.Elf64_Shdr, - @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf[sh_buf_i]), - ); + const sh32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i])); + const sh64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i])); const sh_name_off = elfInt(is_64, need_bswap, sh32.sh_name, sh64.sh_name); const sh_name = mem.sliceTo(shstrtab[sh_name_off..], 0); if (mem.eql(u8, sh_name, ".dynstr")) { @@ -558,7 +552,7 @@ fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion { var buf: [80000]u8 = undefined; if (buf.len < dynstr.size) return error.InvalidGnuLibCVersion; - const dynstr_size = @intCast(usize, dynstr.size); + const dynstr_size = @as(usize, @intCast(dynstr.size)); const dynstr_bytes = buf[0..dynstr_size]; _ = try preadMin(file, dynstr_bytes, dynstr.offset, dynstr_bytes.len); var it = mem.splitScalar(u8, dynstr_bytes, 0); @@ -621,8 +615,8 @@ pub fn abiAndDynamicLinkerFromFile( ) AbiAndDynamicLinkerFromFileError!NativeTargetInfo { var hdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 align(@alignOf(elf.Elf64_Ehdr)) = undefined; _ = try preadMin(file, &hdr_buf, 0, hdr_buf.len); - const hdr32 = @ptrCast(*elf.Elf32_Ehdr, &hdr_buf); - const hdr64 = @ptrCast(*elf.Elf64_Ehdr, &hdr_buf); + const hdr32 = @as(*elf.Elf32_Ehdr, @ptrCast(&hdr_buf)); + const hdr64 = @as(*elf.Elf64_Ehdr, @ptrCast(&hdr_buf)); if (!mem.eql(u8, hdr32.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic; const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI_DATA]) { elf.ELFDATA2LSB => .Little, @@ -668,21 +662,21 @@ pub fn abiAndDynamicLinkerFromFile( phoff += phentsize; ph_buf_i += phentsize; }) { - const ph32 = @ptrCast(*elf.Elf32_Phdr, @alignCast(@alignOf(elf.Elf32_Phdr), &ph_buf[ph_buf_i])); - const ph64 = @ptrCast(*elf.Elf64_Phdr, @alignCast(@alignOf(elf.Elf64_Phdr), &ph_buf[ph_buf_i])); + const ph32: *elf.Elf32_Phdr = @ptrCast(@alignCast(&ph_buf[ph_buf_i])); + const ph64: *elf.Elf64_Phdr = @ptrCast(@alignCast(&ph_buf[ph_buf_i])); const p_type = elfInt(is_64, need_bswap, ph32.p_type, ph64.p_type); switch (p_type) { elf.PT_INTERP => if (look_for_ld) { const p_offset = elfInt(is_64, need_bswap, ph32.p_offset, ph64.p_offset); const p_filesz = elfInt(is_64, need_bswap, ph32.p_filesz, ph64.p_filesz); if (p_filesz > result.dynamic_linker.buffer.len) return error.NameTooLong; - const filesz = @intCast(usize, p_filesz); + const filesz = @as(usize, @intCast(p_filesz)); _ = try preadMin(file, result.dynamic_linker.buffer[0..filesz], p_offset, filesz); // PT_INTERP includes a null byte in filesz. const len = filesz - 1; // dynamic_linker.max_byte is "max", not "len". // We know it will fit in u8 because we check against dynamic_linker.buffer.len above. - result.dynamic_linker.max_byte = @intCast(u8, len - 1); + result.dynamic_linker.max_byte = @as(u8, @intCast(len - 1)); // Use it to determine ABI. const full_ld_path = result.dynamic_linker.buffer[0..len]; @@ -720,14 +714,8 @@ pub fn abiAndDynamicLinkerFromFile( dyn_off += dyn_size; dyn_buf_i += dyn_size; }) { - const dyn32 = @ptrCast( - *elf.Elf32_Dyn, - @alignCast(@alignOf(elf.Elf32_Dyn), &dyn_buf[dyn_buf_i]), - ); - const dyn64 = @ptrCast( - *elf.Elf64_Dyn, - @alignCast(@alignOf(elf.Elf64_Dyn), &dyn_buf[dyn_buf_i]), - ); + const dyn32: *elf.Elf32_Dyn = @ptrCast(@alignCast(&dyn_buf[dyn_buf_i])); + const dyn64: *elf.Elf64_Dyn = @ptrCast(@alignCast(&dyn_buf[dyn_buf_i])); const tag = elfInt(is_64, need_bswap, dyn32.d_tag, dyn64.d_tag); const val = elfInt(is_64, need_bswap, dyn32.d_val, dyn64.d_val); if (tag == elf.DT_RUNPATH) { @@ -755,8 +743,8 @@ pub fn abiAndDynamicLinkerFromFile( if (sh_buf.len < shentsize) return error.InvalidElfFile; _ = try preadMin(file, &sh_buf, str_section_off, shentsize); - const shstr32 = @ptrCast(*elf.Elf32_Shdr, @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf)); - const shstr64 = @ptrCast(*elf.Elf64_Shdr, @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf)); + const shstr32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf)); + const shstr64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf)); const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset); const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size); var strtab_buf: [4096:0]u8 = undefined; @@ -782,14 +770,8 @@ pub fn abiAndDynamicLinkerFromFile( shoff += shentsize; sh_buf_i += shentsize; }) { - const sh32 = @ptrCast( - *elf.Elf32_Shdr, - @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf[sh_buf_i]), - ); - const sh64 = @ptrCast( - *elf.Elf64_Shdr, - @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf[sh_buf_i]), - ); + const sh32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i])); + const sh64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i])); const sh_name_off = elfInt(is_64, need_bswap, sh32.sh_name, sh64.sh_name); const sh_name = mem.sliceTo(shstrtab[sh_name_off..], 0); if (mem.eql(u8, sh_name, ".dynstr")) { diff --git a/lib/std/zig/system/arm.zig b/lib/std/zig/system/arm.zig index da05c8c90d..7d466fc984 100644 --- a/lib/std/zig/system/arm.zig +++ b/lib/std/zig/system/arm.zig @@ -141,7 +141,7 @@ pub const aarch64 = struct { } inline fn bitField(input: u64, offset: u6) u4 { - return @truncate(u4, input >> offset); + return @as(u4, @truncate(input >> offset)); } /// Input array should consist of readouts from 12 system registers such that: @@ -176,23 +176,23 @@ pub const aarch64 = struct { /// Takes readout of MIDR_EL1 register as input. fn detectNativeCoreInfo(midr: u64) CoreInfo { var info = CoreInfo{ - .implementer = @truncate(u8, midr >> 24), - .part = @truncate(u12, midr >> 4), + .implementer = @as(u8, @truncate(midr >> 24)), + .part = @as(u12, @truncate(midr >> 4)), }; blk: { if (info.implementer == 0x41) { // ARM Ltd. - const special_bits = @truncate(u4, info.part >> 8); + const special_bits = @as(u4, @truncate(info.part >> 8)); if (special_bits == 0x0 or special_bits == 0x7) { // TODO Variant and arch encoded differently. break :blk; } } - info.variant |= @intCast(u8, @truncate(u4, midr >> 20)) << 4; - info.variant |= @truncate(u4, midr); - info.architecture = @truncate(u4, midr >> 16); + info.variant |= @as(u8, @intCast(@as(u4, @truncate(midr >> 20)))) << 4; + info.variant |= @as(u4, @truncate(midr)); + info.architecture = @as(u4, @truncate(midr >> 16)); } return info; diff --git a/lib/std/zig/system/windows.zig b/lib/std/zig/system/windows.zig index c5c6f052ec..9c5b614c39 100644 --- a/lib/std/zig/system/windows.zig +++ b/lib/std/zig/system/windows.zig @@ -26,8 +26,8 @@ pub fn detectRuntimeVersion() WindowsVersion { // `---` `` ``--> Sub-version (Starting from Windows 10 onwards) // \ `--> Service pack (Always zero in the constants defined) // `--> OS version (Major & minor) - const os_ver: u16 = @intCast(u16, version_info.dwMajorVersion & 0xff) << 8 | - @intCast(u16, version_info.dwMinorVersion & 0xff); + const os_ver: u16 = @as(u16, @intCast(version_info.dwMajorVersion & 0xff)) << 8 | + @as(u16, @intCast(version_info.dwMinorVersion & 0xff)); const sp_ver: u8 = 0; const sub_ver: u8 = if (os_ver >= 0x0A00) subver: { // There's no other way to obtain this info beside @@ -38,12 +38,12 @@ pub fn detectRuntimeVersion() WindowsVersion { if (version_info.dwBuildNumber >= build) last_idx = i; } - break :subver @truncate(u8, last_idx); + break :subver @as(u8, @truncate(last_idx)); } else 0; const version: u32 = @as(u32, os_ver) << 16 | @as(u16, sp_ver) << 8 | sub_ver; - return @enumFromInt(WindowsVersion, version); + return @as(WindowsVersion, @enumFromInt(version)); } // Technically, a registry value can be as long as 1MB. However, MS recommends storing @@ -100,11 +100,11 @@ fn getCpuInfoFromRegistry(core: usize, args: anytype) !void { REG.MULTI_SZ, => { comptime assert(@sizeOf(std.os.windows.UNICODE_STRING) % 2 == 0); - const unicode = @ptrCast(*std.os.windows.UNICODE_STRING, &tmp_bufs[i]); + const unicode = @as(*std.os.windows.UNICODE_STRING, @ptrCast(&tmp_bufs[i])); unicode.* = .{ .Length = 0, .MaximumLength = max_value_len - @sizeOf(std.os.windows.UNICODE_STRING), - .Buffer = @ptrCast([*]u16, tmp_bufs[i][@sizeOf(std.os.windows.UNICODE_STRING)..]), + .Buffer = @as([*]u16, @ptrCast(tmp_bufs[i][@sizeOf(std.os.windows.UNICODE_STRING)..])), }; break :blk unicode; }, @@ -159,7 +159,7 @@ fn getCpuInfoFromRegistry(core: usize, args: anytype) !void { REG.MULTI_SZ, => { var buf = @field(args, field.name).value_buf; - const entry = @ptrCast(*align(1) const std.os.windows.UNICODE_STRING, table[i + 1].EntryContext); + const entry = @as(*align(1) const std.os.windows.UNICODE_STRING, @ptrCast(table[i + 1].EntryContext)); const len = try std.unicode.utf16leToUtf8(buf, entry.Buffer[0 .. entry.Length / 2]); buf[len] = 0; }, @@ -168,7 +168,7 @@ fn getCpuInfoFromRegistry(core: usize, args: anytype) !void { REG.DWORD_BIG_ENDIAN, REG.QWORD, => { - const entry = @ptrCast([*]align(1) const u8, table[i + 1].EntryContext); + const entry = @as([*]align(1) const u8, @ptrCast(table[i + 1].EntryContext)); switch (@field(args, field.name).value_type) { REG.DWORD, REG.DWORD_BIG_ENDIAN => { @memcpy(@field(args, field.name).value_buf[0..4], entry[0..4]); @@ -254,18 +254,18 @@ pub fn detectNativeCpuAndFeatures() ?Target.Cpu { // CP 4039 -> ID_AA64MMFR1_EL1 // CP 403A -> ID_AA64MMFR2_EL1 getCpuInfoFromRegistry(i, .{ - .{ .key = "CP 4000", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[0]) }, - .{ .key = "CP 4020", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[1]) }, - .{ .key = "CP 4021", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[2]) }, - .{ .key = "CP 4028", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[3]) }, - .{ .key = "CP 4029", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[4]) }, - .{ .key = "CP 402C", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[5]) }, - .{ .key = "CP 402D", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[6]) }, - .{ .key = "CP 4030", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[7]) }, - .{ .key = "CP 4031", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[8]) }, - .{ .key = "CP 4038", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[9]) }, - .{ .key = "CP 4039", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[10]) }, - .{ .key = "CP 403A", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[11]) }, + .{ .key = "CP 4000", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[0])) }, + .{ .key = "CP 4020", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[1])) }, + .{ .key = "CP 4021", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[2])) }, + .{ .key = "CP 4028", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[3])) }, + .{ .key = "CP 4029", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[4])) }, + .{ .key = "CP 402C", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[5])) }, + .{ .key = "CP 402D", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[6])) }, + .{ .key = "CP 4030", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[7])) }, + .{ .key = "CP 4031", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[8])) }, + .{ .key = "CP 4038", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[9])) }, + .{ .key = "CP 4039", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[10])) }, + .{ .key = "CP 403A", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[11])) }, }) catch break :blk null; cores[i] = @import("arm.zig").aarch64.detectNativeCpuAndFeatures(current_arch, registers) orelse diff --git a/lib/std/zig/tokenizer.zig b/lib/std/zig/tokenizer.zig index 0d6a6d4fd8..72f65afb3a 100644 --- a/lib/std/zig/tokenizer.zig +++ b/lib/std/zig/tokenizer.zig @@ -1290,7 +1290,7 @@ pub const Tokenizer = struct { // check utf8-encoded character. const length = std.unicode.utf8ByteSequenceLength(c0) catch return 1; if (self.index + length > self.buffer.len) { - return @intCast(u3, self.buffer.len - self.index); + return @as(u3, @intCast(self.buffer.len - self.index)); } const bytes = self.buffer[self.index .. self.index + length]; switch (length) { diff --git a/lib/test_runner.zig b/lib/test_runner.zig index 8bc79a96c8..842babcdeb 100644 --- a/lib/test_runner.zig +++ b/lib/test_runner.zig @@ -70,12 +70,12 @@ fn mainServer() !void { defer std.testing.allocator.free(expected_panic_msgs); for (test_fns, names, async_frame_sizes, expected_panic_msgs) |test_fn, *name, *async_frame_size, *expected_panic_msg| { - name.* = @intCast(u32, string_bytes.items.len); + name.* = @as(u32, @intCast(string_bytes.items.len)); try string_bytes.ensureUnusedCapacity(std.testing.allocator, test_fn.name.len + 1); string_bytes.appendSliceAssumeCapacity(test_fn.name); string_bytes.appendAssumeCapacity(0); - async_frame_size.* = @intCast(u32, test_fn.async_frame_size orelse 0); + async_frame_size.* = @as(u32, @intCast(test_fn.async_frame_size orelse 0)); expected_panic_msg.* = 0; } @@ -163,7 +163,7 @@ fn mainTerminal() void { std.heap.page_allocator.free(async_frame_buffer); async_frame_buffer = std.heap.page_allocator.alignedAlloc(u8, std.Target.stack_align, size) catch @panic("out of memory"); } - const casted_fn = @ptrCast(fn () callconv(.Async) anyerror!void, test_fn.func); + const casted_fn = @as(fn () callconv(.Async) anyerror!void, @ptrCast(test_fn.func)); break :blk await @asyncCall(async_frame_buffer, {}, casted_fn, .{}); }, .blocking => { diff --git a/src/Air.zig b/src/Air.zig index ec2baf0dab..f7762a5e86 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1106,7 +1106,7 @@ pub const VectorCmp = struct { op: u32, pub fn compareOperator(self: VectorCmp) std.math.CompareOperator { - return @enumFromInt(std.math.CompareOperator, @truncate(u3, self.op)); + return @as(std.math.CompareOperator, @enumFromInt(@as(u3, @truncate(self.op)))); } pub fn encodeOp(compare_operator: std.math.CompareOperator) u32 { @@ -1151,11 +1151,11 @@ pub const Cmpxchg = struct { flags: u32, pub fn successOrder(self: Cmpxchg) std.builtin.AtomicOrder { - return @enumFromInt(std.builtin.AtomicOrder, @truncate(u3, self.flags)); + return @as(std.builtin.AtomicOrder, @enumFromInt(@as(u3, @truncate(self.flags)))); } pub fn failureOrder(self: Cmpxchg) std.builtin.AtomicOrder { - return @enumFromInt(std.builtin.AtomicOrder, @truncate(u3, self.flags >> 3)); + return @as(std.builtin.AtomicOrder, @enumFromInt(@as(u3, @truncate(self.flags >> 3)))); } }; @@ -1166,11 +1166,11 @@ pub const AtomicRmw = struct { flags: u32, pub fn ordering(self: AtomicRmw) std.builtin.AtomicOrder { - return @enumFromInt(std.builtin.AtomicOrder, @truncate(u3, self.flags)); + return @as(std.builtin.AtomicOrder, @enumFromInt(@as(u3, @truncate(self.flags)))); } pub fn op(self: AtomicRmw) std.builtin.AtomicRmwOp { - return @enumFromInt(std.builtin.AtomicRmwOp, @truncate(u4, self.flags >> 3)); + return @as(std.builtin.AtomicRmwOp, @enumFromInt(@as(u4, @truncate(self.flags >> 3)))); } }; @@ -1451,7 +1451,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool) pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { const ref_int = @intFromEnum(ref); if (ref_int < ref_start_index) { - const ip_index = @enumFromInt(InternPool.Index, ref_int); + const ip_index = @as(InternPool.Index, @enumFromInt(ref_int)); return ip_index.toType(); } const inst_index = ref_int - ref_start_index; @@ -1472,9 +1472,9 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end inline for (fields) |field| { @field(result, field.name) = switch (field.type) { u32 => air.extra[i], - Inst.Ref => @enumFromInt(Inst.Ref, air.extra[i]), - i32 => @bitCast(i32, air.extra[i]), - InternPool.Index => @enumFromInt(InternPool.Index, air.extra[i]), + Inst.Ref => @as(Inst.Ref, @enumFromInt(air.extra[i])), + i32 => @as(i32, @bitCast(air.extra[i])), + InternPool.Index => @as(InternPool.Index, @enumFromInt(air.extra[i])), else => @compileError("bad field type: " ++ @typeName(field.type)), }; i += 1; @@ -1494,7 +1494,7 @@ pub fn deinit(air: *Air, gpa: std.mem.Allocator) void { pub const ref_start_index: u32 = InternPool.static_len; pub fn indexToRef(inst: Inst.Index) Inst.Ref { - return @enumFromInt(Inst.Ref, ref_start_index + inst); + return @as(Inst.Ref, @enumFromInt(ref_start_index + inst)); } pub fn refToIndex(inst: Inst.Ref) ?Inst.Index { @@ -1516,10 +1516,10 @@ pub fn refToIndexAllowNone(inst: Inst.Ref) ?Inst.Index { pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { const ref_int = @intFromEnum(inst); if (ref_int < ref_start_index) { - const ip_index = @enumFromInt(InternPool.Index, ref_int); + const ip_index = @as(InternPool.Index, @enumFromInt(ref_int)); return ip_index.toValue(); } - const inst_index = @intCast(Air.Inst.Index, ref_int - ref_start_index); + const inst_index = @as(Air.Inst.Index, @intCast(ref_int - ref_start_index)); const air_datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst_index]) { .interned => return air_datas[inst_index].interned.toValue(), @@ -1747,7 +1747,7 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { .work_group_id, => false, - .assembly => @truncate(u1, air.extraData(Air.Asm, data.ty_pl.payload).data.flags >> 31) != 0, + .assembly => @as(u1, @truncate(air.extraData(Air.Asm, data.ty_pl.payload).data.flags >> 31)) != 0, .load => air.typeOf(data.ty_op.operand, ip).isVolatilePtrIp(ip), .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtrIp(ip), .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip), diff --git a/src/AstGen.zig b/src/AstGen.zig index df64d58549..c7ac569246 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -70,7 +70,7 @@ fn addExtra(astgen: *AstGen, extra: anytype) Allocator.Error!u32 { fn addExtraAssumeCapacity(astgen: *AstGen, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result = @intCast(u32, astgen.extra.items.len); + const result = @as(u32, @intCast(astgen.extra.items.len)); astgen.extra.items.len += fields.len; setExtra(astgen, result, extra); return result; @@ -83,11 +83,11 @@ fn setExtra(astgen: *AstGen, index: usize, extra: anytype) void { astgen.extra.items[i] = switch (field.type) { u32 => @field(extra, field.name), Zir.Inst.Ref => @intFromEnum(@field(extra, field.name)), - i32 => @bitCast(u32, @field(extra, field.name)), - Zir.Inst.Call.Flags => @bitCast(u32, @field(extra, field.name)), - Zir.Inst.BuiltinCall.Flags => @bitCast(u32, @field(extra, field.name)), - Zir.Inst.SwitchBlock.Bits => @bitCast(u32, @field(extra, field.name)), - Zir.Inst.FuncFancy.Bits => @bitCast(u32, @field(extra, field.name)), + i32 => @as(u32, @bitCast(@field(extra, field.name))), + Zir.Inst.Call.Flags => @as(u32, @bitCast(@field(extra, field.name))), + Zir.Inst.BuiltinCall.Flags => @as(u32, @bitCast(@field(extra, field.name))), + Zir.Inst.SwitchBlock.Bits => @as(u32, @bitCast(@field(extra, field.name))), + Zir.Inst.FuncFancy.Bits => @as(u32, @bitCast(@field(extra, field.name))), else => @compileError("bad field type"), }; i += 1; @@ -95,18 +95,18 @@ fn setExtra(astgen: *AstGen, index: usize, extra: anytype) void { } fn reserveExtra(astgen: *AstGen, size: usize) Allocator.Error!u32 { - const result = @intCast(u32, astgen.extra.items.len); + const result = @as(u32, @intCast(astgen.extra.items.len)); try astgen.extra.resize(astgen.gpa, result + size); return result; } fn appendRefs(astgen: *AstGen, refs: []const Zir.Inst.Ref) !void { - const coerced = @ptrCast([]const u32, refs); + const coerced = @as([]const u32, @ptrCast(refs)); return astgen.extra.appendSlice(astgen.gpa, coerced); } fn appendRefsAssumeCapacity(astgen: *AstGen, refs: []const Zir.Inst.Ref) void { - const coerced = @ptrCast([]const u32, refs); + const coerced = @as([]const u32, @ptrCast(refs)); astgen.extra.appendSliceAssumeCapacity(coerced); } @@ -176,7 +176,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir { @typeInfo(Zir.Inst.CompileErrors.Item).Struct.fields.len); astgen.extra.items[err_index] = astgen.addExtraAssumeCapacity(Zir.Inst.CompileErrors{ - .items_len = @intCast(u32, astgen.compile_errors.items.len), + .items_len = @as(u32, @intCast(astgen.compile_errors.items.len)), }); for (astgen.compile_errors.items) |item| { @@ -192,7 +192,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir { astgen.imports.count() * @typeInfo(Zir.Inst.Imports.Item).Struct.fields.len); astgen.extra.items[imports_index] = astgen.addExtraAssumeCapacity(Zir.Inst.Imports{ - .imports_len = @intCast(u32, astgen.imports.count()), + .imports_len = @as(u32, @intCast(astgen.imports.count())), }); var it = astgen.imports.iterator(); @@ -1334,7 +1334,7 @@ fn fnProtoExpr( var param_gz = block_scope.makeSubBlock(scope); defer param_gz.unstack(); const param_type = try expr(¶m_gz, scope, coerced_type_ri, param_type_node); - const param_inst_expected = @intCast(u32, astgen.instructions.len + 1); + const param_inst_expected = @as(u32, @intCast(astgen.instructions.len + 1)); _ = try param_gz.addBreakWithSrcNode(.break_inline, param_inst_expected, param_type, param_type_node); const main_tokens = tree.nodes.items(.main_token); const name_token = param.name_token orelse main_tokens[param_type_node]; @@ -1468,7 +1468,7 @@ fn arrayInitExpr( const array_type_inst = try typeExpr(gz, scope, array_init.ast.type_expr); _ = try gz.addPlNode(.validate_array_init_ty, node, Zir.Inst.ArrayInit{ .ty = array_type_inst, - .init_count = @intCast(u32, array_init.ast.elements.len), + .init_count = @as(u32, @intCast(array_init.ast.elements.len)), }); break :inst .{ .array = array_type_inst, @@ -1533,7 +1533,7 @@ fn arrayInitExprRlNone( const astgen = gz.astgen; const payload_index = try addExtra(astgen, Zir.Inst.MultiOp{ - .operands_len = @intCast(u32, elements.len), + .operands_len = @as(u32, @intCast(elements.len)), }); var extra_index = try reserveExtra(astgen, elements.len); @@ -1558,7 +1558,7 @@ fn arrayInitExprInner( const len = elements.len + @intFromBool(array_ty_inst != .none); const payload_index = try addExtra(astgen, Zir.Inst.MultiOp{ - .operands_len = @intCast(u32, len), + .operands_len = @as(u32, @intCast(len)), }); var extra_index = try reserveExtra(astgen, len); if (array_ty_inst != .none) { @@ -1574,7 +1574,7 @@ fn arrayInitExprInner( .tag = .elem_type_index, .data = .{ .bin = .{ .lhs = array_ty_inst, - .rhs = @enumFromInt(Zir.Inst.Ref, i), + .rhs = @as(Zir.Inst.Ref, @enumFromInt(i)), } }, }); break :ri ResultInfo{ .rl = .{ .coerced_ty = ty_expr } }; @@ -1619,14 +1619,14 @@ fn arrayInitExprRlPtrInner( const astgen = gz.astgen; const payload_index = try addExtra(astgen, Zir.Inst.Block{ - .body_len = @intCast(u32, elements.len), + .body_len = @as(u32, @intCast(elements.len)), }); var extra_index = try reserveExtra(astgen, elements.len); for (elements, 0..) |elem_init, i| { const elem_ptr = try gz.addPlNode(.elem_ptr_imm, elem_init, Zir.Inst.ElemPtrImm{ .ptr = result_ptr, - .index = @intCast(u32, i), + .index = @as(u32, @intCast(i)), }); astgen.extra.items[extra_index] = refToIndex(elem_ptr).?; extra_index += 1; @@ -1776,7 +1776,7 @@ fn structInitExprRlNone( const tree = astgen.tree; const payload_index = try addExtra(astgen, Zir.Inst.StructInitAnon{ - .fields_len = @intCast(u32, struct_init.ast.fields.len), + .fields_len = @as(u32, @intCast(struct_init.ast.fields.len)), }); const field_size = @typeInfo(Zir.Inst.StructInitAnon.Item).Struct.fields.len; var extra_index: usize = try reserveExtra(astgen, struct_init.ast.fields.len * field_size); @@ -1834,7 +1834,7 @@ fn structInitExprRlPtrInner( const tree = astgen.tree; const payload_index = try addExtra(astgen, Zir.Inst.Block{ - .body_len = @intCast(u32, struct_init.ast.fields.len), + .body_len = @as(u32, @intCast(struct_init.ast.fields.len)), }); var extra_index = try reserveExtra(astgen, struct_init.ast.fields.len); @@ -1866,7 +1866,7 @@ fn structInitExprRlTy( const tree = astgen.tree; const payload_index = try addExtra(astgen, Zir.Inst.StructInit{ - .fields_len = @intCast(u32, struct_init.ast.fields.len), + .fields_len = @as(u32, @intCast(struct_init.ast.fields.len)), }); const field_size = @typeInfo(Zir.Inst.StructInit.Item).Struct.fields.len; var extra_index: usize = try reserveExtra(astgen, struct_init.ast.fields.len * field_size); @@ -2105,7 +2105,7 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn } const operand = try reachableExpr(parent_gz, parent_scope, block_gz.break_result_info, rhs, node); - const search_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const search_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); try genDefers(parent_gz, scope, parent_scope, .normal_only); @@ -2511,17 +2511,17 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As .call, .field_call => { const extra_index = gz.astgen.instructions.items(.data)[inst].pl_node.payload_index; const slot = &gz.astgen.extra.items[extra_index]; - var flags = @bitCast(Zir.Inst.Call.Flags, slot.*); + var flags = @as(Zir.Inst.Call.Flags, @bitCast(slot.*)); flags.ensure_result_used = true; - slot.* = @bitCast(u32, flags); + slot.* = @as(u32, @bitCast(flags)); break :b true; }, .builtin_call => { const extra_index = gz.astgen.instructions.items(.data)[inst].pl_node.payload_index; const slot = &gz.astgen.extra.items[extra_index]; - var flags = @bitCast(Zir.Inst.BuiltinCall.Flags, slot.*); + var flags = @as(Zir.Inst.BuiltinCall.Flags, @bitCast(slot.*)); flags.ensure_result_used = true; - slot.* = @bitCast(u32, flags); + slot.* = @as(u32, @bitCast(flags)); break :b true; }, @@ -2897,7 +2897,7 @@ fn genDefers( .index = defer_scope.index, .len = defer_scope.len, }); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = .defer_err_code, .data = .{ .defer_err_code = .{ @@ -2976,7 +2976,7 @@ fn deferStmt( const sub_scope = if (!have_err_code) &defer_gen.base else blk: { try gz.addDbgBlockBegin(); const ident_name = try gz.astgen.identAsString(payload_token); - remapped_err_code = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + remapped_err_code = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); try gz.astgen.instructions.append(gz.astgen.gpa, .{ .tag = .extended, .data = .{ .extended = .{ @@ -3016,7 +3016,7 @@ fn deferStmt( break :blk gz.astgen.countBodyLenAfterFixups(body) + refs; }; - const index = @intCast(u32, gz.astgen.extra.items.len); + const index = @as(u32, @intCast(gz.astgen.extra.items.len)); try gz.astgen.extra.ensureUnusedCapacity(gz.astgen.gpa, body_len); if (have_err_code) { if (gz.astgen.ref_table.fetchRemove(remapped_err_code)) |kv| { @@ -3554,7 +3554,7 @@ fn ptrType( gz.astgen.extra.appendAssumeCapacity(@intFromEnum(bit_end_ref)); } - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); const result = indexToRef(new_index); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = .ptr_type, .data = .{ .ptr_type = .{ @@ -3645,7 +3645,7 @@ const WipMembers = struct { const max_decl_size = 11; fn init(gpa: Allocator, payload: *ArrayListUnmanaged(u32), decl_count: u32, field_count: u32, comptime bits_per_field: u32, comptime max_field_size: u32) Allocator.Error!Self { - const payload_top = @intCast(u32, payload.items.len); + const payload_top = @as(u32, @intCast(payload.items.len)); const decls_start = payload_top + (decl_count + decls_per_u32 - 1) / decls_per_u32; const field_bits_start = decls_start + decl_count * max_decl_size; const fields_start = field_bits_start + if (bits_per_field > 0) blk: { @@ -3700,7 +3700,7 @@ const WipMembers = struct { fn appendToDeclSlice(self: *Self, data: []const u32) void { assert(self.decls_end + data.len <= self.field_bits_start); @memcpy(self.payload.items[self.decls_end..][0..data.len], data); - self.decls_end += @intCast(u32, data.len); + self.decls_end += @as(u32, @intCast(data.len)); } fn appendToField(self: *Self, data: u32) void { @@ -3713,14 +3713,14 @@ const WipMembers = struct { const empty_decl_slots = decls_per_u32 - (self.decl_index % decls_per_u32); if (self.decl_index > 0 and empty_decl_slots < decls_per_u32) { const index = self.payload_top + self.decl_index / decls_per_u32; - self.payload.items[index] >>= @intCast(u5, empty_decl_slots * bits_per_decl); + self.payload.items[index] >>= @as(u5, @intCast(empty_decl_slots * bits_per_decl)); } if (bits_per_field > 0) { const fields_per_u32 = 32 / bits_per_field; const empty_field_slots = fields_per_u32 - (self.field_index % fields_per_u32); if (self.field_index > 0 and empty_field_slots < fields_per_u32) { const index = self.field_bits_start + self.field_index / fields_per_u32; - self.payload.items[index] >>= @intCast(u5, empty_field_slots * bits_per_field); + self.payload.items[index] >>= @as(u5, @intCast(empty_field_slots * bits_per_field)); } } } @@ -3882,7 +3882,7 @@ fn fnDecl( var param_gz = decl_gz.makeSubBlock(scope); defer param_gz.unstack(); const param_type = try expr(¶m_gz, params_scope, coerced_type_ri, param_type_node); - const param_inst_expected = @intCast(u32, astgen.instructions.len + 1); + const param_inst_expected = @as(u32, @intCast(astgen.instructions.len + 1)); _ = try param_gz.addBreakWithSrcNode(.break_inline, param_inst_expected, param_type, param_type_node); const main_tokens = tree.nodes.items(.main_token); @@ -4097,7 +4097,7 @@ fn fnDecl( { const contents_hash = std.zig.hashSrc(tree.getNodeSource(decl_node)); - const casted = @bitCast([4]u32, contents_hash); + const casted = @as([4]u32, @bitCast(contents_hash)); wip_members.appendToDeclSlice(&casted); } { @@ -4248,7 +4248,7 @@ fn globalVarDecl( { const contents_hash = std.zig.hashSrc(tree.getNodeSource(node)); - const casted = @bitCast([4]u32, contents_hash); + const casted = @as([4]u32, @bitCast(contents_hash)); wip_members.appendToDeclSlice(&casted); } { @@ -4303,7 +4303,7 @@ fn comptimeDecl( { const contents_hash = std.zig.hashSrc(tree.getNodeSource(node)); - const casted = @bitCast([4]u32, contents_hash); + const casted = @as([4]u32, @bitCast(contents_hash)); wip_members.appendToDeclSlice(&casted); } { @@ -4355,7 +4355,7 @@ fn usingnamespaceDecl( { const contents_hash = std.zig.hashSrc(tree.getNodeSource(node)); - const casted = @bitCast([4]u32, contents_hash); + const casted = @as([4]u32, @bitCast(contents_hash)); wip_members.appendToDeclSlice(&casted); } { @@ -4542,7 +4542,7 @@ fn testDecl( { const contents_hash = std.zig.hashSrc(tree.getNodeSource(node)); - const casted = @bitCast([4]u32, contents_hash); + const casted = @as([4]u32, @bitCast(contents_hash)); wip_members.appendToDeclSlice(&casted); } { @@ -4642,7 +4642,7 @@ fn structDeclInner( }; const decl_count = try astgen.scanDecls(&namespace, container_decl.ast.members); - const field_count = @intCast(u32, container_decl.ast.members.len - decl_count); + const field_count = @as(u32, @intCast(container_decl.ast.members.len - decl_count)); const bits_per_field = 4; const max_field_size = 5; @@ -4750,7 +4750,7 @@ fn structDeclInner( const old_scratch_len = astgen.scratch.items.len; try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); - wip_members.appendToField(@intCast(u32, astgen.scratch.items.len - old_scratch_len)); + wip_members.appendToField(@as(u32, @intCast(astgen.scratch.items.len - old_scratch_len))); block_scope.instructions.items.len = block_scope.instructions_top; } else { wip_members.appendToField(@intFromEnum(field_type)); @@ -4768,7 +4768,7 @@ fn structDeclInner( const old_scratch_len = astgen.scratch.items.len; try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); - wip_members.appendToField(@intCast(u32, astgen.scratch.items.len - old_scratch_len)); + wip_members.appendToField(@as(u32, @intCast(astgen.scratch.items.len - old_scratch_len))); block_scope.instructions.items.len = block_scope.instructions_top; } @@ -4783,7 +4783,7 @@ fn structDeclInner( const old_scratch_len = astgen.scratch.items.len; try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); - wip_members.appendToField(@intCast(u32, astgen.scratch.items.len - old_scratch_len)); + wip_members.appendToField(@as(u32, @intCast(astgen.scratch.items.len - old_scratch_len))); block_scope.instructions.items.len = block_scope.instructions_top; } else if (member.comptime_token) |comptime_token| { return astgen.failTok(comptime_token, "comptime field without default initialization value", .{}); @@ -4796,7 +4796,7 @@ fn structDeclInner( .fields_len = field_count, .decls_len = decl_count, .backing_int_ref = backing_int_ref, - .backing_int_body_len = @intCast(u32, backing_int_body_len), + .backing_int_body_len = @as(u32, @intCast(backing_int_body_len)), .known_non_opv = known_non_opv, .known_comptime_only = known_comptime_only, .is_tuple = is_tuple, @@ -4856,7 +4856,7 @@ fn unionDeclInner( defer block_scope.unstack(); const decl_count = try astgen.scanDecls(&namespace, members); - const field_count = @intCast(u32, members.len - decl_count); + const field_count = @as(u32, @intCast(members.len - decl_count)); if (layout != .Auto and (auto_enum_tok != null or arg_node != 0)) { const layout_str = if (layout == .Extern) "extern" else "packed"; @@ -5151,7 +5151,7 @@ fn containerDecl( const bits_per_field = 1; const max_field_size = 3; - var wip_members = try WipMembers.init(gpa, &astgen.scratch, @intCast(u32, counts.decls), @intCast(u32, counts.total_fields), bits_per_field, max_field_size); + var wip_members = try WipMembers.init(gpa, &astgen.scratch, @as(u32, @intCast(counts.decls)), @as(u32, @intCast(counts.total_fields)), bits_per_field, max_field_size); defer wip_members.deinit(); for (container_decl.ast.members) |member_node| { @@ -5209,8 +5209,8 @@ fn containerDecl( .nonexhaustive = nonexhaustive, .tag_type = arg_inst, .body_len = body_len, - .fields_len = @intCast(u32, counts.total_fields), - .decls_len = @intCast(u32, counts.decls), + .fields_len = @as(u32, @intCast(counts.total_fields)), + .decls_len = @as(u32, @intCast(counts.decls)), }); wip_members.finishBits(bits_per_field); @@ -5400,7 +5400,7 @@ fn errorSetDecl(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index) InnerError!Zi } setExtra(astgen, payload_index, Zir.Inst.ErrorSetDecl{ - .fields_len = @intCast(u32, fields_len), + .fields_len = @as(u32, @intCast(fields_len)), }); const result = try gz.addPlNodePayloadIndex(.error_set_decl, node, payload_index); return rvalue(gz, ri, result, node); @@ -6463,7 +6463,7 @@ fn forExpr( { var capture_token = for_full.payload_token; for (for_full.ast.inputs, 0..) |input, i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); const capture_is_ref = token_tags[capture_token] == .asterisk; const ident_tok = capture_token + @intFromBool(capture_is_ref); const is_discard = mem.eql(u8, tree.tokenSlice(ident_tok), "_"); @@ -6521,7 +6521,7 @@ fn forExpr( // We use a dedicated ZIR instruction to assert the lengths to assist with // nicer error reporting as well as fewer ZIR bytes emitted. const len: Zir.Inst.Ref = len: { - const lens_len = @intCast(u32, lens.len); + const lens_len = @as(u32, @intCast(lens.len)); try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.MultiOp).Struct.fields.len + lens_len); const len = try parent_gz.addPlNode(.for_len, node, Zir.Inst.MultiOp{ .operands_len = lens_len, @@ -6591,7 +6591,7 @@ fn forExpr( var capture_token = for_full.payload_token; var capture_sub_scope: *Scope = &then_scope.base; for (for_full.ast.inputs, 0..) |input, i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); const capture_is_ref = token_tags[capture_token] == .asterisk; const ident_tok = capture_token + @intFromBool(capture_is_ref); const capture_name = tree.tokenSlice(ident_tok); @@ -6891,7 +6891,7 @@ fn switchExpr( // If any prong has an inline tag capture, allocate a shared dummy instruction for it const tag_inst = if (any_has_tag_capture) tag_inst: { - const inst = @intCast(Zir.Inst.Index, astgen.instructions.len); + const inst = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); try astgen.instructions.append(astgen.gpa, .{ .tag = .extended, .data = .{ .extended = .{ @@ -6984,7 +6984,7 @@ fn switchExpr( break :blk &tag_scope.base; }; - const header_index = @intCast(u32, payloads.items.len); + const header_index = @as(u32, @intCast(payloads.items.len)); const body_len_index = if (is_multi_case) blk: { payloads.items[multi_case_table + multi_case_index] = header_index; multi_case_index += 1; @@ -7074,12 +7074,12 @@ fn switchExpr( }; const body_len = refs_len + astgen.countBodyLenAfterFixups(case_slice); try payloads.ensureUnusedCapacity(gpa, body_len); - payloads.items[body_len_index] = @bitCast(u32, Zir.Inst.SwitchBlock.ProngInfo{ - .body_len = @intCast(u28, body_len), + payloads.items[body_len_index] = @as(u32, @bitCast(Zir.Inst.SwitchBlock.ProngInfo{ + .body_len = @as(u28, @intCast(body_len)), .capture = capture, .is_inline = case.inline_token != null, .has_tag_capture = has_tag_capture, - }); + })); if (astgen.ref_table.fetchRemove(switch_block)) |kv| { appendPossiblyRefdBodyInst(astgen, payloads, kv.value); } @@ -7106,7 +7106,7 @@ fn switchExpr( .has_else = special_prong == .@"else", .has_under = special_prong == .under, .any_has_tag_capture = any_has_tag_capture, - .scalar_cases_len = @intCast(Zir.Inst.SwitchBlock.Bits.ScalarCasesLen, scalar_cases_len), + .scalar_cases_len = @as(Zir.Inst.SwitchBlock.Bits.ScalarCasesLen, @intCast(scalar_cases_len)), }, }); @@ -7140,7 +7140,7 @@ fn switchExpr( end_index += 3 + items_len + 2 * ranges_len; } - const body_len = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, payloads.items[body_len_index]).body_len; + const body_len = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(payloads.items[body_len_index])).body_len; end_index += body_len; switch (strat.tag) { @@ -7579,7 +7579,7 @@ fn tunnelThroughClosure( .src_tok = ns.?.declaring_gz.?.tokenIndexToRelative(token), } }, }); - gop.value_ptr.* = @intCast(Zir.Inst.Index, gz.astgen.instructions.len - 1); + gop.value_ptr.* = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len - 1)); } // Add an instruction to get the value from the closure into @@ -7680,7 +7680,7 @@ fn numberLiteral(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index, source_node: }; // If the value fits into a f64 without losing any precision, store it that way. @setFloatMode(.Strict); - const smaller_float = @floatCast(f64, float_number); + const smaller_float = @as(f64, @floatCast(float_number)); const bigger_again: f128 = smaller_float; if (bigger_again == float_number) { const result = try gz.addFloat(smaller_float); @@ -7688,12 +7688,12 @@ fn numberLiteral(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index, source_node: } // We need to use 128 bits. Break the float into 4 u32 values so we can // put it into the `extra` array. - const int_bits = @bitCast(u128, float_number); + const int_bits = @as(u128, @bitCast(float_number)); const result = try gz.addPlNode(.float128, node, Zir.Inst.Float128{ - .piece0 = @truncate(u32, int_bits), - .piece1 = @truncate(u32, int_bits >> 32), - .piece2 = @truncate(u32, int_bits >> 64), - .piece3 = @truncate(u32, int_bits >> 96), + .piece0 = @as(u32, @truncate(int_bits)), + .piece1 = @as(u32, @truncate(int_bits >> 32)), + .piece2 = @as(u32, @truncate(int_bits >> 64)), + .piece3 = @as(u32, @truncate(int_bits >> 96)), }); return rvalue(gz, ri, result, source_node); }, @@ -7719,22 +7719,22 @@ fn failWithNumberError(astgen: *AstGen, err: std.zig.number_literal.Error, token }); }, .digit_after_base => return astgen.failTok(token, "expected a digit after base prefix", .{}), - .upper_case_base => |i| return astgen.failOff(token, @intCast(u32, i), "base prefix must be lowercase", .{}), - .invalid_float_base => |i| return astgen.failOff(token, @intCast(u32, i), "invalid base for float literal", .{}), - .repeated_underscore => |i| return astgen.failOff(token, @intCast(u32, i), "repeated digit separator", .{}), - .invalid_underscore_after_special => |i| return astgen.failOff(token, @intCast(u32, i), "expected digit before digit separator", .{}), - .invalid_digit => |info| return astgen.failOff(token, @intCast(u32, info.i), "invalid digit '{c}' for {s} base", .{ bytes[info.i], @tagName(info.base) }), - .invalid_digit_exponent => |i| return astgen.failOff(token, @intCast(u32, i), "invalid digit '{c}' in exponent", .{bytes[i]}), - .duplicate_exponent => |i| return astgen.failOff(token, @intCast(u32, i), "duplicate exponent", .{}), - .exponent_after_underscore => |i| return astgen.failOff(token, @intCast(u32, i), "expected digit before exponent", .{}), - .special_after_underscore => |i| return astgen.failOff(token, @intCast(u32, i), "expected digit before '{c}'", .{bytes[i]}), - .trailing_special => |i| return astgen.failOff(token, @intCast(u32, i), "expected digit after '{c}'", .{bytes[i - 1]}), - .trailing_underscore => |i| return astgen.failOff(token, @intCast(u32, i), "trailing digit separator", .{}), + .upper_case_base => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "base prefix must be lowercase", .{}), + .invalid_float_base => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "invalid base for float literal", .{}), + .repeated_underscore => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "repeated digit separator", .{}), + .invalid_underscore_after_special => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "expected digit before digit separator", .{}), + .invalid_digit => |info| return astgen.failOff(token, @as(u32, @intCast(info.i)), "invalid digit '{c}' for {s} base", .{ bytes[info.i], @tagName(info.base) }), + .invalid_digit_exponent => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "invalid digit '{c}' in exponent", .{bytes[i]}), + .duplicate_exponent => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "duplicate exponent", .{}), + .exponent_after_underscore => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "expected digit before exponent", .{}), + .special_after_underscore => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "expected digit before '{c}'", .{bytes[i]}), + .trailing_special => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "expected digit after '{c}'", .{bytes[i - 1]}), + .trailing_underscore => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "trailing digit separator", .{}), .duplicate_period => unreachable, // Validated by tokenizer .invalid_character => unreachable, // Validated by tokenizer .invalid_exponent_sign => |i| { assert(bytes.len >= 2 and bytes[0] == '0' and bytes[1] == 'x'); // Validated by tokenizer - return astgen.failOff(token, @intCast(u32, i), "sign '{c}' cannot follow digit '{c}' in hex base", .{ bytes[i], bytes[i - 1] }); + return astgen.failOff(token, @as(u32, @intCast(i)), "sign '{c}' cannot follow digit '{c}' in hex base", .{ bytes[i], bytes[i - 1] }); }, } } @@ -7801,7 +7801,7 @@ fn asmExpr( if (output_type_bits != 0) { return astgen.failNode(output_node, "inline assembly allows up to one output value", .{}); } - output_type_bits |= @as(u32, 1) << @intCast(u5, i); + output_type_bits |= @as(u32, 1) << @as(u5, @intCast(i)); const out_type_node = node_datas[output_node].lhs; const out_type_inst = try typeExpr(gz, scope, out_type_node); outputs[i] = .{ @@ -8024,11 +8024,11 @@ fn ptrCast( node = node_datas[node].lhs; } - const flags_i = @bitCast(u5, flags); + const flags_i = @as(u5, @bitCast(flags)); assert(flags_i != 0); const ptr_only: Zir.Inst.FullPtrCastFlags = .{ .ptr_cast = true }; - if (flags_i == @bitCast(u5, ptr_only)) { + if (flags_i == @as(u5, @bitCast(ptr_only))) { // Special case: simpler representation return typeCast(gz, scope, ri, root_node, node, .ptr_cast, "@ptrCast"); } @@ -8037,7 +8037,7 @@ fn ptrCast( .const_cast = true, .volatile_cast = true, }; - if ((flags_i & ~@bitCast(u5, no_result_ty_flags)) == 0) { + if ((flags_i & ~@as(u5, @bitCast(no_result_ty_flags))) == 0) { // Result type not needed const cursor = maybeAdvanceSourceCursorToMainToken(gz, root_node); const operand = try expr(gz, scope, .{ .rl = .none }, node); @@ -8119,8 +8119,8 @@ fn typeOf( const body = typeof_scope.instructionsSlice(); const body_len = astgen.countBodyLenAfterFixups(body); astgen.setExtra(payload_index, Zir.Inst.TypeOfPeer{ - .body_len = @intCast(u32, body_len), - .body_index = @intCast(u32, astgen.extra.items.len), + .body_len = @as(u32, @intCast(body_len)), + .body_index = @as(u32, @intCast(astgen.extra.items.len)), .src_node = gz.nodeIndexToRelative(node), }); try astgen.extra.ensureUnusedCapacity(gpa, body_len); @@ -8464,7 +8464,7 @@ fn builtinCall( .node = gz.nodeIndexToRelative(node), .operand = operand, }); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ @@ -9115,7 +9115,7 @@ fn callExpr( } assert(node != 0); - const call_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const call_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); const call_inst = Zir.indexToRef(call_index); try gz.astgen.instructions.append(astgen.gpa, undefined); try gz.instructions.append(astgen.gpa, call_index); @@ -9139,7 +9139,7 @@ fn callExpr( try astgen.scratch.ensureUnusedCapacity(astgen.gpa, countBodyLenAfterFixups(astgen, body)); appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); - astgen.scratch.items[scratch_index] = @intCast(u32, astgen.scratch.items.len - scratch_top); + astgen.scratch.items[scratch_index] = @as(u32, @intCast(astgen.scratch.items.len - scratch_top)); scratch_index += 1; } @@ -9157,8 +9157,8 @@ fn callExpr( .callee = callee_obj, .flags = .{ .pop_error_return_trace = !propagate_error_trace, - .packed_modifier = @intCast(Zir.Inst.Call.Flags.PackedModifier, @intFromEnum(modifier)), - .args_len = @intCast(Zir.Inst.Call.Flags.PackedArgsLen, call.ast.params.len), + .packed_modifier = @as(Zir.Inst.Call.Flags.PackedModifier, @intCast(@intFromEnum(modifier))), + .args_len = @as(Zir.Inst.Call.Flags.PackedArgsLen, @intCast(call.ast.params.len)), }, }); if (call.ast.params.len != 0) { @@ -9178,8 +9178,8 @@ fn callExpr( .field_name_start = callee_field.field_name_start, .flags = .{ .pop_error_return_trace = !propagate_error_trace, - .packed_modifier = @intCast(Zir.Inst.Call.Flags.PackedModifier, @intFromEnum(modifier)), - .args_len = @intCast(Zir.Inst.Call.Flags.PackedArgsLen, call.ast.params.len), + .packed_modifier = @as(Zir.Inst.Call.Flags.PackedModifier, @intCast(@intFromEnum(modifier))), + .args_len = @as(Zir.Inst.Call.Flags.PackedArgsLen, @intCast(call.ast.params.len)), }, }); if (call.ast.params.len != 0) { @@ -10552,7 +10552,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token .invalid_escape_character => |bad_index| { return astgen.failOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "invalid escape character: '{c}'", .{raw_string[bad_index]}, ); @@ -10560,7 +10560,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token .expected_hex_digit => |bad_index| { return astgen.failOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected hex digit, found '{c}'", .{raw_string[bad_index]}, ); @@ -10568,7 +10568,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token .empty_unicode_escape_sequence => |bad_index| { return astgen.failOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "empty unicode escape sequence", .{}, ); @@ -10576,7 +10576,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token .expected_hex_digit_or_rbrace => |bad_index| { return astgen.failOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected hex digit or '}}', found '{c}'", .{raw_string[bad_index]}, ); @@ -10584,7 +10584,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token .invalid_unicode_codepoint => |bad_index| { return astgen.failOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "unicode escape does not correspond to a valid codepoint", .{}, ); @@ -10592,7 +10592,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token .expected_lbrace => |bad_index| { return astgen.failOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected '{{', found '{c}", .{raw_string[bad_index]}, ); @@ -10600,7 +10600,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token .expected_rbrace => |bad_index| { return astgen.failOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected '}}', found '{c}", .{raw_string[bad_index]}, ); @@ -10608,7 +10608,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token .expected_single_quote => |bad_index| { return astgen.failOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected single quote ('), found '{c}", .{raw_string[bad_index]}, ); @@ -10616,7 +10616,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token .invalid_character => |bad_index| { return astgen.failOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "invalid byte in string or character literal: '{c}'", .{raw_string[bad_index]}, ); @@ -10651,14 +10651,14 @@ fn appendErrorNodeNotes( ) Allocator.Error!void { @setCold(true); const string_bytes = &astgen.string_bytes; - const msg = @intCast(u32, string_bytes.items.len); + const msg = @as(u32, @intCast(string_bytes.items.len)); try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args); const notes_index: u32 = if (notes.len != 0) blk: { const notes_start = astgen.extra.items.len; try astgen.extra.ensureTotalCapacity(astgen.gpa, notes_start + 1 + notes.len); - astgen.extra.appendAssumeCapacity(@intCast(u32, notes.len)); + astgen.extra.appendAssumeCapacity(@as(u32, @intCast(notes.len))); astgen.extra.appendSliceAssumeCapacity(notes); - break :blk @intCast(u32, notes_start); + break :blk @as(u32, @intCast(notes_start)); } else 0; try astgen.compile_errors.append(astgen.gpa, .{ .msg = msg, @@ -10743,14 +10743,14 @@ fn appendErrorTokNotesOff( @setCold(true); const gpa = astgen.gpa; const string_bytes = &astgen.string_bytes; - const msg = @intCast(u32, string_bytes.items.len); + const msg = @as(u32, @intCast(string_bytes.items.len)); try string_bytes.writer(gpa).print(format ++ "\x00", args); const notes_index: u32 = if (notes.len != 0) blk: { const notes_start = astgen.extra.items.len; try astgen.extra.ensureTotalCapacity(gpa, notes_start + 1 + notes.len); - astgen.extra.appendAssumeCapacity(@intCast(u32, notes.len)); + astgen.extra.appendAssumeCapacity(@as(u32, @intCast(notes.len))); astgen.extra.appendSliceAssumeCapacity(notes); - break :blk @intCast(u32, notes_start); + break :blk @as(u32, @intCast(notes_start)); } else 0; try astgen.compile_errors.append(gpa, .{ .msg = msg, @@ -10779,7 +10779,7 @@ fn errNoteTokOff( ) Allocator.Error!u32 { @setCold(true); const string_bytes = &astgen.string_bytes; - const msg = @intCast(u32, string_bytes.items.len); + const msg = @as(u32, @intCast(string_bytes.items.len)); try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args); return astgen.addExtra(Zir.Inst.CompileErrors.Item{ .msg = msg, @@ -10798,7 +10798,7 @@ fn errNoteNode( ) Allocator.Error!u32 { @setCold(true); const string_bytes = &astgen.string_bytes; - const msg = @intCast(u32, string_bytes.items.len); + const msg = @as(u32, @intCast(string_bytes.items.len)); try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args); return astgen.addExtra(Zir.Inst.CompileErrors.Item{ .msg = msg, @@ -10812,7 +10812,7 @@ fn errNoteNode( fn identAsString(astgen: *AstGen, ident_token: Ast.TokenIndex) !u32 { const gpa = astgen.gpa; const string_bytes = &astgen.string_bytes; - const str_index = @intCast(u32, string_bytes.items.len); + const str_index = @as(u32, @intCast(string_bytes.items.len)); try astgen.appendIdentStr(ident_token, string_bytes); const key: []const u8 = string_bytes.items[str_index..]; const gop = try astgen.string_table.getOrPutContextAdapted(gpa, key, StringIndexAdapter{ @@ -10858,7 +10858,7 @@ fn docCommentAsStringFromFirst( const gpa = astgen.gpa; const string_bytes = &astgen.string_bytes; - const str_index = @intCast(u32, string_bytes.items.len); + const str_index = @as(u32, @intCast(string_bytes.items.len)); const token_starts = astgen.tree.tokens.items(.start); const token_tags = astgen.tree.tokens.items(.tag); @@ -10901,7 +10901,7 @@ const IndexSlice = struct { index: u32, len: u32 }; fn strLitAsString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !IndexSlice { const gpa = astgen.gpa; const string_bytes = &astgen.string_bytes; - const str_index = @intCast(u32, string_bytes.items.len); + const str_index = @as(u32, @intCast(string_bytes.items.len)); const token_bytes = astgen.tree.tokenSlice(str_lit_token); try astgen.parseStrLit(str_lit_token, string_bytes, token_bytes, 0); const key = string_bytes.items[str_index..]; @@ -10914,7 +10914,7 @@ fn strLitAsString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !IndexSlice { string_bytes.shrinkRetainingCapacity(str_index); return IndexSlice{ .index = gop.key_ptr.*, - .len = @intCast(u32, key.len), + .len = @as(u32, @intCast(key.len)), }; } else { gop.key_ptr.* = str_index; @@ -10924,7 +10924,7 @@ fn strLitAsString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !IndexSlice { try string_bytes.append(gpa, 0); return IndexSlice{ .index = str_index, - .len = @intCast(u32, key.len), + .len = @as(u32, @intCast(key.len)), }; } } @@ -10961,15 +10961,15 @@ fn strLitNodeAsString(astgen: *AstGen, node: Ast.Node.Index) !IndexSlice { const len = string_bytes.items.len - str_index; try string_bytes.append(gpa, 0); return IndexSlice{ - .index = @intCast(u32, str_index), - .len = @intCast(u32, len), + .index = @as(u32, @intCast(str_index)), + .len = @as(u32, @intCast(len)), }; } fn testNameString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !u32 { const gpa = astgen.gpa; const string_bytes = &astgen.string_bytes; - const str_index = @intCast(u32, string_bytes.items.len); + const str_index = @as(u32, @intCast(string_bytes.items.len)); const token_bytes = astgen.tree.tokenSlice(str_lit_token); try string_bytes.append(gpa, 0); // Indicates this is a test. try astgen.parseStrLit(str_lit_token, string_bytes, token_bytes, 0); @@ -11321,7 +11321,7 @@ const GenZir = struct { } fn nodeIndexToRelative(gz: GenZir, node_index: Ast.Node.Index) i32 { - return @bitCast(i32, node_index) - @bitCast(i32, gz.decl_node_index); + return @as(i32, @bitCast(node_index)) - @as(i32, @bitCast(gz.decl_node_index)); } fn tokenIndexToRelative(gz: GenZir, token: Ast.TokenIndex) u32 { @@ -11478,7 +11478,7 @@ const GenZir = struct { const astgen = gz.astgen; const gpa = astgen.gpa; const ret_ref = if (args.ret_ref == .void_type) .none else args.ret_ref; - const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); try astgen.instructions.ensureUnusedCapacity(gpa, 1); @@ -11496,8 +11496,8 @@ const GenZir = struct { const block = node_datas[fn_decl].rhs; const rbrace_start = token_starts[tree.lastToken(block)]; astgen.advanceSourceCursor(rbrace_start); - const rbrace_line = @intCast(u32, astgen.source_line - gz.decl_line); - const rbrace_column = @intCast(u32, astgen.source_column); + const rbrace_line = @as(u32, @intCast(astgen.source_line - gz.decl_line)); + const rbrace_column = @as(u32, @intCast(astgen.source_column)); const columns = args.lbrace_column | (rbrace_column << 16); src_locs_buffer[0] = args.lbrace_line; @@ -11733,18 +11733,18 @@ const GenZir = struct { astgen.extra.appendAssumeCapacity(@intFromEnum(args.init)); } - const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ .opcode = .variable, - .small = @bitCast(u16, Zir.Inst.ExtendedVar.Small{ + .small = @as(u16, @bitCast(Zir.Inst.ExtendedVar.Small{ .has_lib_name = args.lib_name != 0, .has_align = args.align_inst != .none, .has_init = args.init != .none, .is_extern = args.is_extern, .is_threadlocal = args.is_threadlocal, - }), + })), .operand = payload_index, } }, }); @@ -11764,7 +11764,7 @@ const GenZir = struct { try gz.instructions.ensureUnusedCapacity(gpa, 1); try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = tag, .data = .{ .bool_br = .{ @@ -11790,12 +11790,12 @@ const GenZir = struct { try astgen.instructions.ensureUnusedCapacity(gpa, 1); try astgen.string_bytes.ensureUnusedCapacity(gpa, @sizeOf(std.math.big.Limb) * limbs.len); - const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); astgen.instructions.appendAssumeCapacity(.{ .tag = .int_big, .data = .{ .str = .{ - .start = @intCast(u32, astgen.string_bytes.items.len), - .len = @intCast(u32, limbs.len), + .start = @as(u32, @intCast(astgen.string_bytes.items.len)), + .len = @as(u32, @intCast(limbs.len)), } }, }); gz.instructions.appendAssumeCapacity(new_index); @@ -11835,7 +11835,7 @@ const GenZir = struct { src_node: Ast.Node.Index, ) !Zir.Inst.Index { assert(operand != .none); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); try gz.astgen.instructions.append(gz.astgen.gpa, .{ .tag = tag, .data = .{ .un_node = .{ @@ -11858,7 +11858,7 @@ const GenZir = struct { try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); const payload_index = try gz.astgen.addExtra(extra); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = tag, .data = .{ .pl_node = .{ @@ -11910,12 +11910,12 @@ const GenZir = struct { const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Param{ .name = name, .doc_comment = doc_comment_index, - .body_len = @intCast(u32, body_len), + .body_len = @as(u32, @intCast(body_len)), }); gz.astgen.appendBodyWithFixups(param_body); param_gz.unstack(); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = tag, .data = .{ .pl_tok = .{ @@ -11943,7 +11943,7 @@ const GenZir = struct { try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); const payload_index = try gz.astgen.addExtra(extra); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ @@ -11975,12 +11975,12 @@ const GenZir = struct { const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.NodeMultiOp{ .src_node = gz.nodeIndexToRelative(node), }); - const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ .opcode = opcode, - .small = @intCast(u16, operands.len), + .small = @as(u16, @intCast(operands.len)), .operand = payload_index, } }, }); @@ -12000,12 +12000,12 @@ const GenZir = struct { try gz.instructions.ensureUnusedCapacity(gpa, 1); try astgen.instructions.ensureUnusedCapacity(gpa, 1); - const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ .opcode = opcode, - .small = @intCast(u16, trailing_len), + .small = @as(u16, @intCast(trailing_len)), .operand = payload_index, } }, }); @@ -12038,7 +12038,7 @@ const GenZir = struct { abs_tok_index: Ast.TokenIndex, ) !Zir.Inst.Index { const astgen = gz.astgen; - const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); assert(operand != .none); try astgen.instructions.append(astgen.gpa, .{ .tag = tag, @@ -12121,7 +12121,7 @@ const GenZir = struct { .operand_src_node = Zir.Inst.Break.no_src_node, }; const payload_index = try gz.astgen.addExtra(extra); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = tag, .data = .{ .@"break" = .{ @@ -12147,7 +12147,7 @@ const GenZir = struct { .operand_src_node = Zir.Inst.Break.no_src_node, }; const payload_index = try gz.astgen.addExtra(extra); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = tag, .data = .{ .@"break" = .{ @@ -12174,7 +12174,7 @@ const GenZir = struct { .operand_src_node = gz.nodeIndexToRelative(operand_src_node), }; const payload_index = try gz.astgen.addExtra(extra); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = tag, .data = .{ .@"break" = .{ @@ -12201,7 +12201,7 @@ const GenZir = struct { .operand_src_node = gz.nodeIndexToRelative(operand_src_node), }; const payload_index = try gz.astgen.addExtra(extra); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = tag, .data = .{ .@"break" = .{ @@ -12293,7 +12293,7 @@ const GenZir = struct { .data = .{ .extended = .{ .opcode = opcode, .small = undefined, - .operand = @bitCast(u32, gz.nodeIndexToRelative(src_node)), + .operand = @as(u32, @bitCast(gz.nodeIndexToRelative(src_node))), } }, }); } @@ -12336,7 +12336,7 @@ const GenZir = struct { const is_comptime: u4 = @intFromBool(args.is_comptime); const small: u16 = has_type | (has_align << 1) | (is_const << 2) | (is_comptime << 3); - const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ @@ -12390,12 +12390,12 @@ const GenZir = struct { // * 0b000000XX_XXX00000 - `inputs_len`. // * 0b0XXXXX00_00000000 - `clobbers_len`. // * 0bX0000000_00000000 - is volatile - const small: u16 = @intCast(u16, args.outputs.len) | - @intCast(u16, args.inputs.len << 5) | - @intCast(u16, args.clobbers.len << 10) | + const small: u16 = @as(u16, @intCast(args.outputs.len)) | + @as(u16, @intCast(args.inputs.len << 5)) | + @as(u16, @intCast(args.clobbers.len << 10)) | (@as(u16, @intFromBool(args.is_volatile)) << 15); - const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ @@ -12412,7 +12412,7 @@ const GenZir = struct { /// Does *not* append the block instruction to the scope. /// Leaves the `payload_index` field undefined. fn makeBlockInst(gz: *GenZir, tag: Zir.Inst.Tag, node: Ast.Node.Index) !Zir.Inst.Index { - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); const gpa = gz.astgen.gpa; try gz.astgen.instructions.append(gpa, .{ .tag = tag, @@ -12429,7 +12429,7 @@ const GenZir = struct { fn addCondBr(gz: *GenZir, tag: Zir.Inst.Tag, node: Ast.Node.Index) !Zir.Inst.Index { const gpa = gz.astgen.gpa; try gz.instructions.ensureUnusedCapacity(gpa, 1); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); try gz.astgen.instructions.append(gpa, .{ .tag = tag, .data = .{ .pl_node = .{ @@ -12456,11 +12456,11 @@ const GenZir = struct { const gpa = astgen.gpa; try astgen.extra.ensureUnusedCapacity(gpa, 6); - const payload_index = @intCast(u32, astgen.extra.items.len); + const payload_index = @as(u32, @intCast(astgen.extra.items.len)); if (args.src_node != 0) { const node_offset = gz.nodeIndexToRelative(args.src_node); - astgen.extra.appendAssumeCapacity(@bitCast(u32, node_offset)); + astgen.extra.appendAssumeCapacity(@as(u32, @bitCast(node_offset))); } if (args.fields_len != 0) { astgen.extra.appendAssumeCapacity(args.fields_len); @@ -12478,7 +12478,7 @@ const GenZir = struct { .tag = .extended, .data = .{ .extended = .{ .opcode = .struct_decl, - .small = @bitCast(u16, Zir.Inst.StructDecl.Small{ + .small = @as(u16, @bitCast(Zir.Inst.StructDecl.Small{ .has_src_node = args.src_node != 0, .has_fields_len = args.fields_len != 0, .has_decls_len = args.decls_len != 0, @@ -12488,7 +12488,7 @@ const GenZir = struct { .is_tuple = args.is_tuple, .name_strategy = gz.anon_name_strategy, .layout = args.layout, - }), + })), .operand = payload_index, } }, }); @@ -12507,11 +12507,11 @@ const GenZir = struct { const gpa = astgen.gpa; try astgen.extra.ensureUnusedCapacity(gpa, 5); - const payload_index = @intCast(u32, astgen.extra.items.len); + const payload_index = @as(u32, @intCast(astgen.extra.items.len)); if (args.src_node != 0) { const node_offset = gz.nodeIndexToRelative(args.src_node); - astgen.extra.appendAssumeCapacity(@bitCast(u32, node_offset)); + astgen.extra.appendAssumeCapacity(@as(u32, @bitCast(node_offset))); } if (args.tag_type != .none) { astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type)); @@ -12529,7 +12529,7 @@ const GenZir = struct { .tag = .extended, .data = .{ .extended = .{ .opcode = .union_decl, - .small = @bitCast(u16, Zir.Inst.UnionDecl.Small{ + .small = @as(u16, @bitCast(Zir.Inst.UnionDecl.Small{ .has_src_node = args.src_node != 0, .has_tag_type = args.tag_type != .none, .has_body_len = args.body_len != 0, @@ -12538,7 +12538,7 @@ const GenZir = struct { .name_strategy = gz.anon_name_strategy, .layout = args.layout, .auto_enum_tag = args.auto_enum_tag, - }), + })), .operand = payload_index, } }, }); @@ -12556,11 +12556,11 @@ const GenZir = struct { const gpa = astgen.gpa; try astgen.extra.ensureUnusedCapacity(gpa, 5); - const payload_index = @intCast(u32, astgen.extra.items.len); + const payload_index = @as(u32, @intCast(astgen.extra.items.len)); if (args.src_node != 0) { const node_offset = gz.nodeIndexToRelative(args.src_node); - astgen.extra.appendAssumeCapacity(@bitCast(u32, node_offset)); + astgen.extra.appendAssumeCapacity(@as(u32, @bitCast(node_offset))); } if (args.tag_type != .none) { astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type)); @@ -12578,7 +12578,7 @@ const GenZir = struct { .tag = .extended, .data = .{ .extended = .{ .opcode = .enum_decl, - .small = @bitCast(u16, Zir.Inst.EnumDecl.Small{ + .small = @as(u16, @bitCast(Zir.Inst.EnumDecl.Small{ .has_src_node = args.src_node != 0, .has_tag_type = args.tag_type != .none, .has_body_len = args.body_len != 0, @@ -12586,7 +12586,7 @@ const GenZir = struct { .has_decls_len = args.decls_len != 0, .name_strategy = gz.anon_name_strategy, .nonexhaustive = args.nonexhaustive, - }), + })), .operand = payload_index, } }, }); @@ -12600,11 +12600,11 @@ const GenZir = struct { const gpa = astgen.gpa; try astgen.extra.ensureUnusedCapacity(gpa, 2); - const payload_index = @intCast(u32, astgen.extra.items.len); + const payload_index = @as(u32, @intCast(astgen.extra.items.len)); if (args.src_node != 0) { const node_offset = gz.nodeIndexToRelative(args.src_node); - astgen.extra.appendAssumeCapacity(@bitCast(u32, node_offset)); + astgen.extra.appendAssumeCapacity(@as(u32, @bitCast(node_offset))); } if (args.decls_len != 0) { astgen.extra.appendAssumeCapacity(args.decls_len); @@ -12613,11 +12613,11 @@ const GenZir = struct { .tag = .extended, .data = .{ .extended = .{ .opcode = .opaque_decl, - .small = @bitCast(u16, Zir.Inst.OpaqueDecl.Small{ + .small = @as(u16, @bitCast(Zir.Inst.OpaqueDecl.Small{ .has_src_node = args.src_node != 0, .has_decls_len = args.decls_len != 0, .name_strategy = gz.anon_name_strategy, - }), + })), .operand = payload_index, } }, }); @@ -12632,7 +12632,7 @@ const GenZir = struct { try gz.instructions.ensureUnusedCapacity(gpa, 1); try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(inst); gz.instructions.appendAssumeCapacity(new_index); return new_index; @@ -12643,7 +12643,7 @@ const GenZir = struct { try gz.instructions.ensureUnusedCapacity(gpa, 1); try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.len += 1; gz.instructions.appendAssumeCapacity(new_index); return new_index; @@ -12695,7 +12695,7 @@ const GenZir = struct { return; } - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); try gz.astgen.instructions.append(gpa, .{ .tag = .dbg_block_end, .data = undefined }); try gz.instructions.append(gpa, new_index); } @@ -12704,7 +12704,7 @@ const GenZir = struct { /// This can only be for short-lived references; the memory becomes invalidated /// when another string is added. fn nullTerminatedString(astgen: AstGen, index: usize) [*:0]const u8 { - return @ptrCast([*:0]const u8, astgen.string_bytes.items.ptr) + index; + return @as([*:0]const u8, @ptrCast(astgen.string_bytes.items.ptr)) + index; } /// Local variables shadowing detection, including function parameters. @@ -12983,7 +12983,7 @@ fn isInferred(astgen: *AstGen, ref: Zir.Inst.Ref) bool { .extended => { const zir_data = astgen.instructions.items(.data); if (zir_data[inst].extended.opcode != .alloc) return false; - const small = @bitCast(Zir.Inst.AllocExtended.Small, zir_data[inst].extended.small); + const small = @as(Zir.Inst.AllocExtended.Small, @bitCast(zir_data[inst].extended.small)); return !small.has_type; }, @@ -13027,7 +13027,7 @@ fn countBodyLenAfterFixups(astgen: *AstGen, body: []const Zir.Inst.Index) u32 { check_inst = ref_inst; } } - return @intCast(u32, count); + return @as(u32, @intCast(count)); } fn emitDbgStmt(gz: *GenZir, lc: LineColumn) !void { @@ -13059,7 +13059,7 @@ fn lowerAstErrors(astgen: *AstGen) !void { if (token_tags[parse_err.token + @intFromBool(parse_err.token_is_prev)] == .invalid) { const tok = parse_err.token + @intFromBool(parse_err.token_is_prev); - const bad_off = @intCast(u32, tree.tokenSlice(parse_err.token + @intFromBool(parse_err.token_is_prev)).len); + const bad_off = @as(u32, @intCast(tree.tokenSlice(parse_err.token + @intFromBool(parse_err.token_is_prev)).len)); const byte_abs = token_starts[parse_err.token + @intFromBool(parse_err.token_is_prev)] + bad_off; try notes.append(gpa, try astgen.errNoteTokOff(tok, bad_off, "invalid byte: '{'}'", .{ std.zig.fmtEscapes(tree.source[byte_abs..][0..1]), diff --git a/src/Autodoc.zig b/src/Autodoc.zig index 33c57b1197..1b9988c0c3 100644 --- a/src/Autodoc.zig +++ b/src/Autodoc.zig @@ -110,7 +110,7 @@ pub fn generateZirData(self: *Autodoc) !void { comptime std.debug.assert(@intFromEnum(InternPool.Index.first_type) == 0); var i: u32 = 0; while (i <= @intFromEnum(InternPool.Index.last_type)) : (i += 1) { - const ip_index = @enumFromInt(InternPool.Index, i); + const ip_index = @as(InternPool.Index, @enumFromInt(i)); var tmpbuf = std.ArrayList(u8).init(self.arena); if (ip_index == .generic_poison_type) { // Not a real type, doesn't have a normal name @@ -1669,7 +1669,7 @@ fn walkInstruction( // present in json var sentinel: ?DocData.Expr = null; if (ptr.flags.has_sentinel) { - const ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); const ref_result = try self.walkRef(file, parent_scope, parent_src, ref, false); sentinel = ref_result.expr; extra_index += 1; @@ -1677,21 +1677,21 @@ fn walkInstruction( var @"align": ?DocData.Expr = null; if (ptr.flags.has_align) { - const ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); const ref_result = try self.walkRef(file, parent_scope, parent_src, ref, false); @"align" = ref_result.expr; extra_index += 1; } var address_space: ?DocData.Expr = null; if (ptr.flags.has_addrspace) { - const ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); const ref_result = try self.walkRef(file, parent_scope, parent_src, ref, false); address_space = ref_result.expr; extra_index += 1; } var bit_start: ?DocData.Expr = null; if (ptr.flags.has_bit_range) { - const ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); const ref_result = try self.walkRef(file, parent_scope, parent_src, ref, false); address_space = ref_result.expr; extra_index += 1; @@ -1699,7 +1699,7 @@ fn walkInstruction( var host_size: ?DocData.Expr = null; if (ptr.flags.has_bit_range) { - const ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); const ref_result = try self.walkRef(file, parent_scope, parent_src, ref, false); host_size = ref_result.expr; } @@ -2549,11 +2549,11 @@ fn walkInstruction( .enclosing_type = type_slot_index, }; - const small = @bitCast(Zir.Inst.OpaqueDecl.Small, extended.small); + const small = @as(Zir.Inst.OpaqueDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @bitCast(i32, file.zir.extra[extra_index]); + const src_node = @as(i32, @bitCast(file.zir.extra[extra_index])); extra_index += 1; break :blk src_node; } else null; @@ -2606,7 +2606,7 @@ fn walkInstruction( .variable => { const extra = file.zir.extraData(Zir.Inst.ExtendedVar, extended.operand); - const small = @bitCast(Zir.Inst.ExtendedVar.Small, extended.small); + const small = @as(Zir.Inst.ExtendedVar.Small, @bitCast(extended.small)); var extra_index: usize = extra.end; if (small.has_lib_name) extra_index += 1; if (small.has_align) extra_index += 1; @@ -2619,7 +2619,7 @@ fn walkInstruction( }; if (small.has_init) { - const var_init_ref = @enumFromInt(Ref, file.zir.extra[extra_index]); + const var_init_ref = @as(Ref, @enumFromInt(file.zir.extra[extra_index])); const var_init = try self.walkRef(file, parent_scope, parent_src, var_init_ref, need_type); value.expr = var_init.expr; value.typeRef = var_init.typeRef; @@ -2636,11 +2636,11 @@ fn walkInstruction( .enclosing_type = type_slot_index, }; - const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); + const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @bitCast(i32, file.zir.extra[extra_index]); + const src_node = @as(i32, @bitCast(file.zir.extra[extra_index])); extra_index += 1; break :blk src_node; } else null; @@ -2655,7 +2655,7 @@ fn walkInstruction( const tag_type_ref: ?Ref = if (small.has_tag_type) blk: { const tag_type = file.zir.extra[extra_index]; extra_index += 1; - const tag_ref = @enumFromInt(Ref, tag_type); + const tag_ref = @as(Ref, @enumFromInt(tag_type)); break :blk tag_ref; } else null; @@ -2763,11 +2763,11 @@ fn walkInstruction( .enclosing_type = type_slot_index, }; - const small = @bitCast(Zir.Inst.EnumDecl.Small, extended.small); + const small = @as(Zir.Inst.EnumDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @bitCast(i32, file.zir.extra[extra_index]); + const src_node = @as(i32, @bitCast(file.zir.extra[extra_index])); extra_index += 1; break :blk src_node; } else null; @@ -2780,7 +2780,7 @@ fn walkInstruction( const tag_type: ?DocData.Expr = if (small.has_tag_type) blk: { const tag_type = file.zir.extra[extra_index]; extra_index += 1; - const tag_ref = @enumFromInt(Ref, tag_type); + const tag_ref = @as(Ref, @enumFromInt(tag_type)); const wr = try self.walkRef(file, parent_scope, parent_src, tag_ref, false); break :blk wr.expr; } else null; @@ -2826,7 +2826,7 @@ fn walkInstruction( bit_bag_idx += 1; } - const has_value = @truncate(u1, cur_bit_bag) != 0; + const has_value = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const field_name_index = file.zir.extra[extra_index]; @@ -2838,7 +2838,7 @@ fn walkInstruction( const value_expr: ?DocData.Expr = if (has_value) blk: { const value_ref = file.zir.extra[extra_index]; extra_index += 1; - const value = try self.walkRef(file, &scope, src_info, @enumFromInt(Ref, value_ref), false); + const value = try self.walkRef(file, &scope, src_info, @as(Ref, @enumFromInt(value_ref)), false); break :blk value.expr; } else null; try field_values.append(self.arena, value_expr); @@ -2899,11 +2899,11 @@ fn walkInstruction( .enclosing_type = type_slot_index, }; - const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); + const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @bitCast(i32, file.zir.extra[extra_index]); + const src_node = @as(i32, @bitCast(file.zir.extra[extra_index])); extra_index += 1; break :blk src_node; } else null; @@ -2927,7 +2927,7 @@ fn walkInstruction( const backing_int_body_len = file.zir.extra[extra_index]; extra_index += 1; // backing_int_body_len if (backing_int_body_len == 0) { - const backing_int_ref = @enumFromInt(Ref, file.zir.extra[extra_index]); + const backing_int_ref = @as(Ref, @enumFromInt(file.zir.extra[extra_index])); const backing_int_res = try self.walkRef(file, &scope, src_info, backing_int_ref, true); backing_int = backing_int_res.expr; extra_index += 1; // backing_int_ref @@ -3154,7 +3154,7 @@ fn analyzeAllDecls( priv_decl_indexes: *std.ArrayListUnmanaged(usize), ) AutodocErrors!usize { const first_decl_indexes_slot = decl_indexes.items.len; - const original_it = file.zir.declIterator(@intCast(u32, parent_inst_index)); + const original_it = file.zir.declIterator(@as(u32, @intCast(parent_inst_index))); // First loop to discover decl names { @@ -3180,7 +3180,7 @@ fn analyzeAllDecls( const decl_name_index = file.zir.extra[d.sub_index + 5]; switch (decl_name_index) { 0 => { - const is_exported = @truncate(u1, d.flags >> 1); + const is_exported = @as(u1, @truncate(d.flags >> 1)); switch (is_exported) { 0 => continue, // comptime decl 1 => { @@ -3255,10 +3255,10 @@ fn analyzeDecl( d: Zir.DeclIterator.Item, ) AutodocErrors!void { const data = file.zir.instructions.items(.data); - const is_pub = @truncate(u1, d.flags >> 0) != 0; + const is_pub = @as(u1, @truncate(d.flags >> 0)) != 0; // const is_exported = @truncate(u1, d.flags >> 1) != 0; - const has_align = @truncate(u1, d.flags >> 2) != 0; - const has_section_or_addrspace = @truncate(u1, d.flags >> 3) != 0; + const has_align = @as(u1, @truncate(d.flags >> 2)) != 0; + const has_section_or_addrspace = @as(u1, @truncate(d.flags >> 3)) != 0; var extra_index = d.sub_index; // const hash_u32s = file.zir.extra[extra_index..][0..4]; @@ -3277,21 +3277,21 @@ fn analyzeDecl( extra_index += 1; const align_inst: Zir.Inst.Ref = if (!has_align) .none else inst: { - const inst = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const inst = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); extra_index += 1; break :inst inst; }; _ = align_inst; const section_inst: Zir.Inst.Ref = if (!has_section_or_addrspace) .none else inst: { - const inst = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const inst = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); extra_index += 1; break :inst inst; }; _ = section_inst; const addrspace_inst: Zir.Inst.Ref = if (!has_section_or_addrspace) .none else inst: { - const inst = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const inst = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); extra_index += 1; break :inst inst; }; @@ -3381,7 +3381,7 @@ fn analyzeUsingnamespaceDecl( ) AutodocErrors!void { const data = file.zir.instructions.items(.data); - const is_pub = @truncate(u1, d.flags) != 0; + const is_pub = @as(u1, @truncate(d.flags)) != 0; const value_index = file.zir.extra[d.sub_index + 6]; const doc_comment_index = file.zir.extra[d.sub_index + 7]; @@ -4028,7 +4028,7 @@ fn analyzeFancyFunction( ) AutodocErrors!DocData.WalkResult { const tags = file.zir.instructions.items(.tag); const data = file.zir.instructions.items(.data); - const fn_info = file.zir.getFnInfo(@intCast(u32, inst_index)); + const fn_info = file.zir.getFnInfo(@as(u32, @intCast(inst_index))); try self.ast_nodes.ensureUnusedCapacity(self.arena, fn_info.total_params_len); var param_type_refs = try std.ArrayListUnmanaged(DocData.Expr).initCapacity( @@ -4108,7 +4108,7 @@ fn analyzeFancyFunction( var align_index: ?usize = null; if (extra.data.bits.has_align_ref) { - const align_ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const align_ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); align_index = self.exprs.items.len; _ = try self.walkRef(file, scope, parent_src, align_ref, false); extra_index += 1; @@ -4125,7 +4125,7 @@ fn analyzeFancyFunction( var addrspace_index: ?usize = null; if (extra.data.bits.has_addrspace_ref) { - const addrspace_ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const addrspace_ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); addrspace_index = self.exprs.items.len; _ = try self.walkRef(file, scope, parent_src, addrspace_ref, false); extra_index += 1; @@ -4142,7 +4142,7 @@ fn analyzeFancyFunction( var section_index: ?usize = null; if (extra.data.bits.has_section_ref) { - const section_ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const section_ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); section_index = self.exprs.items.len; _ = try self.walkRef(file, scope, parent_src, section_ref, false); extra_index += 1; @@ -4159,7 +4159,7 @@ fn analyzeFancyFunction( var cc_index: ?usize = null; if (extra.data.bits.has_cc_ref and !extra.data.bits.has_cc_body) { - const cc_ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const cc_ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); const cc_expr = try self.walkRef(file, scope, parent_src, cc_ref, false); cc_index = self.exprs.items.len; @@ -4262,7 +4262,7 @@ fn analyzeFunction( ) AutodocErrors!DocData.WalkResult { const tags = file.zir.instructions.items(.tag); const data = file.zir.instructions.items(.data); - const fn_info = file.zir.getFnInfo(@intCast(u32, inst_index)); + const fn_info = file.zir.getFnInfo(@as(u32, @intCast(inst_index))); try self.ast_nodes.ensureUnusedCapacity(self.arena, fn_info.total_params_len); var param_type_refs = try std.ArrayListUnmanaged(DocData.Expr).initCapacity( @@ -4449,13 +4449,13 @@ fn collectUnionFieldInfo( cur_bit_bag = file.zir.extra[bit_bag_index]; bit_bag_index += 1; } - const has_type = @truncate(u1, cur_bit_bag) != 0; + const has_type = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_align = @truncate(u1, cur_bit_bag) != 0; + const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_tag = @truncate(u1, cur_bit_bag) != 0; + const has_tag = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const unused = @truncate(u1, cur_bit_bag) != 0; + const unused = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; _ = unused; @@ -4464,7 +4464,7 @@ fn collectUnionFieldInfo( const doc_comment_index = file.zir.extra[extra_index]; extra_index += 1; const field_type = if (has_type) - @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]) + @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])) else .void_type; if (has_type) extra_index += 1; @@ -4532,13 +4532,13 @@ fn collectStructFieldInfo( cur_bit_bag = file.zir.extra[bit_bag_index]; bit_bag_index += 1; } - const has_align = @truncate(u1, cur_bit_bag) != 0; + const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_default = @truncate(u1, cur_bit_bag) != 0; + const has_default = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; // const is_comptime = @truncate(u1, cur_bit_bag) != 0; cur_bit_bag >>= 1; - const has_type_body = @truncate(u1, cur_bit_bag) != 0; + const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const field_name: ?u32 = if (!is_tuple) blk: { @@ -4558,7 +4558,7 @@ fn collectStructFieldInfo( if (has_type_body) { fields[field_i].type_body_len = file.zir.extra[extra_index]; } else { - fields[field_i].type_ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + fields[field_i].type_ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); } extra_index += 1; @@ -4855,9 +4855,9 @@ fn srcLocInfo( src_node: i32, parent_src: SrcLocInfo, ) !SrcLocInfo { - const sn = @intCast(u32, @intCast(i32, parent_src.src_node) + src_node); + const sn = @as(u32, @intCast(@as(i32, @intCast(parent_src.src_node)) + src_node)); const tree = try file.getTree(self.comp_module.gpa); - const node_idx = @bitCast(Ast.Node.Index, sn); + const node_idx = @as(Ast.Node.Index, @bitCast(sn)); const tokens = tree.nodes.items(.main_token); const tok_idx = tokens[node_idx]; @@ -4876,9 +4876,9 @@ fn declIsVar( src_node: i32, parent_src: SrcLocInfo, ) !bool { - const sn = @intCast(u32, @intCast(i32, parent_src.src_node) + src_node); + const sn = @as(u32, @intCast(@as(i32, @intCast(parent_src.src_node)) + src_node)); const tree = try file.getTree(self.comp_module.gpa); - const node_idx = @bitCast(Ast.Node.Index, sn); + const node_idx = @as(Ast.Node.Index, @bitCast(sn)); const tokens = tree.nodes.items(.main_token); const tags = tree.tokens.items(.tag); diff --git a/src/Compilation.zig b/src/Compilation.zig index 55b3ab95f7..d9273dcdd8 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1046,7 +1046,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { const llvm_cpu_features: ?[*:0]const u8 = if (build_options.have_llvm and use_llvm) blk: { var buf = std.ArrayList(u8).init(arena); for (options.target.cpu.arch.allFeaturesList(), 0..) |feature, index_usize| { - const index = @intCast(Target.Cpu.Feature.Set.Index, index_usize); + const index = @as(Target.Cpu.Feature.Set.Index, @intCast(index_usize)); const is_enabled = options.target.cpu.features.isEnabled(index); if (feature.llvm_name) |llvm_name| { @@ -2562,7 +2562,7 @@ pub fn totalErrorCount(self: *Compilation) u32 { } } - return @intCast(u32, total); + return @as(u32, @intCast(total)); } /// This function is temporally single-threaded. @@ -2596,7 +2596,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { } for (self.lld_errors.items) |lld_error| { - const notes_len = @intCast(u32, lld_error.context_lines.len); + const notes_len = @as(u32, @intCast(lld_error.context_lines.len)); try bundle.addRootErrorMessage(.{ .msg = try bundle.addString(lld_error.msg), @@ -2753,7 +2753,7 @@ pub const ErrorNoteHashContext = struct { std.hash.autoHash(&hasher, src.span_main); } - return @truncate(u32, hasher.final()); + return @as(u32, @truncate(hasher.final())); } pub fn eql( @@ -2830,8 +2830,8 @@ pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Mod .span_start = span.start, .span_main = span.main, .span_end = span.end, - .line = @intCast(u32, loc.line), - .column = @intCast(u32, loc.column), + .line = @as(u32, @intCast(loc.line)), + .column = @as(u32, @intCast(loc.column)), .source_line = 0, }), }); @@ -2842,13 +2842,13 @@ pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Mod .span_start = err_span.start, .span_main = err_span.main, .span_end = err_span.end, - .line = @intCast(u32, err_loc.line), - .column = @intCast(u32, err_loc.column), + .line = @as(u32, @intCast(err_loc.line)), + .column = @as(u32, @intCast(err_loc.column)), .source_line = if (module_err_msg.src_loc.lazy == .entire_file) 0 else try eb.addString(err_loc.source_line), - .reference_trace_len = @intCast(u32, ref_traces.items.len), + .reference_trace_len = @as(u32, @intCast(ref_traces.items.len)), }); for (ref_traces.items) |rt| { @@ -2874,8 +2874,8 @@ pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Mod .span_start = span.start, .span_main = span.main, .span_end = span.end, - .line = @intCast(u32, loc.line), - .column = @intCast(u32, loc.column), + .line = @as(u32, @intCast(loc.line)), + .column = @as(u32, @intCast(loc.column)), .source_line = if (err_loc.eql(loc)) 0 else try eb.addString(loc.source_line), }), }, .{ .eb = eb }); @@ -2884,7 +2884,7 @@ pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Mod } } - const notes_len = @intCast(u32, notes.entries.len); + const notes_len = @as(u32, @intCast(notes.entries.len)); try eb.addRootErrorMessage(.{ .msg = try eb.addString(module_err_msg.msg), @@ -2919,7 +2919,7 @@ pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void { } const token_starts = file.tree.tokens.items(.start); const start = token_starts[item.data.token] + item.data.byte_offset; - const end = start + @intCast(u32, file.tree.tokenSlice(item.data.token).len) - item.data.byte_offset; + const end = start + @as(u32, @intCast(file.tree.tokenSlice(item.data.token).len)) - item.data.byte_offset; break :blk Module.SrcLoc.Span{ .start = start, .end = end, .main = start }; }; const err_loc = std.zig.findLineColumn(file.source, err_span.main); @@ -2935,8 +2935,8 @@ pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void { .span_start = err_span.start, .span_main = err_span.main, .span_end = err_span.end, - .line = @intCast(u32, err_loc.line), - .column = @intCast(u32, err_loc.column), + .line = @as(u32, @intCast(err_loc.line)), + .column = @as(u32, @intCast(err_loc.column)), .source_line = try eb.addString(err_loc.source_line), }), .notes_len = item.data.notesLen(file.zir), @@ -2956,7 +2956,7 @@ pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void { } const token_starts = file.tree.tokens.items(.start); const start = token_starts[note_item.data.token] + note_item.data.byte_offset; - const end = start + @intCast(u32, file.tree.tokenSlice(note_item.data.token).len) - item.data.byte_offset; + const end = start + @as(u32, @intCast(file.tree.tokenSlice(note_item.data.token).len)) - item.data.byte_offset; break :blk Module.SrcLoc.Span{ .start = start, .end = end, .main = start }; }; const loc = std.zig.findLineColumn(file.source, span.main); @@ -2970,8 +2970,8 @@ pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void { .span_start = span.start, .span_main = span.main, .span_end = span.end, - .line = @intCast(u32, loc.line), - .column = @intCast(u32, loc.column), + .line = @as(u32, @intCast(loc.line)), + .column = @as(u32, @intCast(loc.column)), .source_line = if (loc.eql(err_loc)) 0 else @@ -4302,7 +4302,7 @@ pub fn addCCArgs( const all_features_list = target.cpu.arch.allFeaturesList(); try argv.ensureUnusedCapacity(all_features_list.len * 4); for (all_features_list, 0..) |feature, index_usize| { - const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize); + const index = @as(std.Target.Cpu.Feature.Set.Index, @intCast(index_usize)); const is_enabled = target.cpu.features.isEnabled(index); if (feature.llvm_name) |llvm_name| { @@ -5172,7 +5172,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Alloca }); for (target.cpu.arch.allFeaturesList(), 0..) |feature, index_usize| { - const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize); + const index = @as(std.Target.Cpu.Feature.Set.Index, @intCast(index_usize)); const is_enabled = target.cpu.features.isEnabled(index); if (is_enabled) { try buffer.writer().print(" .{},\n", .{std.zig.fmtId(feature.name)}); diff --git a/src/InternPool.zig b/src/InternPool.zig index 33d4108e6d..1a89c239ef 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -80,7 +80,7 @@ const KeyAdapter = struct { pub fn eql(ctx: @This(), a: Key, b_void: void, b_map_index: usize) bool { _ = b_void; - return ctx.intern_pool.indexToKey(@enumFromInt(Index, b_map_index)).eql(a, ctx.intern_pool); + return ctx.intern_pool.indexToKey(@as(Index, @enumFromInt(b_map_index))).eql(a, ctx.intern_pool); } pub fn hash(ctx: @This(), a: Key) u32 { @@ -95,7 +95,7 @@ pub const OptionalMapIndex = enum(u32) { pub fn unwrap(oi: OptionalMapIndex) ?MapIndex { if (oi == .none) return null; - return @enumFromInt(MapIndex, @intFromEnum(oi)); + return @as(MapIndex, @enumFromInt(@intFromEnum(oi))); } }; @@ -104,7 +104,7 @@ pub const MapIndex = enum(u32) { _, pub fn toOptional(i: MapIndex) OptionalMapIndex { - return @enumFromInt(OptionalMapIndex, @intFromEnum(i)); + return @as(OptionalMapIndex, @enumFromInt(@intFromEnum(i))); } }; @@ -114,7 +114,7 @@ pub const RuntimeIndex = enum(u32) { _, pub fn increment(ri: *RuntimeIndex) void { - ri.* = @enumFromInt(RuntimeIndex, @intFromEnum(ri.*) + 1); + ri.* = @as(RuntimeIndex, @enumFromInt(@intFromEnum(ri.*) + 1)); } }; @@ -130,11 +130,11 @@ pub const NullTerminatedString = enum(u32) { _, pub fn toString(self: NullTerminatedString) String { - return @enumFromInt(String, @intFromEnum(self)); + return @as(String, @enumFromInt(@intFromEnum(self))); } pub fn toOptional(self: NullTerminatedString) OptionalNullTerminatedString { - return @enumFromInt(OptionalNullTerminatedString, @intFromEnum(self)); + return @as(OptionalNullTerminatedString, @enumFromInt(@intFromEnum(self))); } const Adapter = struct { @@ -196,7 +196,7 @@ pub const OptionalNullTerminatedString = enum(u32) { pub fn unwrap(oi: OptionalNullTerminatedString) ?NullTerminatedString { if (oi == .none) return null; - return @enumFromInt(NullTerminatedString, @intFromEnum(oi)); + return @as(NullTerminatedString, @enumFromInt(@intFromEnum(oi))); } }; @@ -282,7 +282,7 @@ pub const Key = union(enum) { const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)]; const adapter: NullTerminatedString.Adapter = .{ .strings = self.names }; const field_index = map.getIndexAdapted(name, adapter) orelse return null; - return @intCast(u32, field_index); + return @as(u32, @intCast(field_index)); } }; @@ -420,7 +420,7 @@ pub const Key = union(enum) { const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)]; const adapter: NullTerminatedString.Adapter = .{ .strings = self.names }; const field_index = map.getIndexAdapted(name, adapter) orelse return null; - return @intCast(u32, field_index); + return @as(u32, @intCast(field_index)); } /// Look up field index based on tag value. @@ -440,7 +440,7 @@ pub const Key = union(enum) { const map = &ip.maps.items[@intFromEnum(values_map)]; const adapter: Index.Adapter = .{ .indexes = self.values }; const field_index = map.getIndexAdapted(int_tag_val, adapter) orelse return null; - return @intCast(u32, field_index); + return @as(u32, @intCast(field_index)); } // Auto-numbered enum. Convert `int_tag_val` to field index. const field_index = switch (ip.indexToKey(int_tag_val).int.storage) { @@ -511,12 +511,12 @@ pub const Key = union(enum) { pub fn paramIsComptime(self: @This(), i: u5) bool { assert(i < self.param_types.len); - return @truncate(u1, self.comptime_bits >> i) != 0; + return @as(u1, @truncate(self.comptime_bits >> i)) != 0; } pub fn paramIsNoalias(self: @This(), i: u5) bool { assert(i < self.param_types.len); - return @truncate(u1, self.noalias_bits >> i) != 0; + return @as(u1, @truncate(self.noalias_bits >> i)) != 0; } }; @@ -685,7 +685,7 @@ pub const Key = union(enum) { }; pub fn hash32(key: Key, ip: *const InternPool) u32 { - return @truncate(u32, key.hash64(ip)); + return @as(u32, @truncate(key.hash64(ip))); } pub fn hash64(key: Key, ip: *const InternPool) u64 { @@ -767,7 +767,7 @@ pub const Key = union(enum) { switch (float.storage) { inline else => |val| std.hash.autoHash( &hasher, - @bitCast(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(val))), val), + @as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(val))), @bitCast(val)), ), } return hasher.final(); @@ -812,18 +812,18 @@ pub const Key = union(enum) { if (child == .u8_type) { switch (aggregate.storage) { - .bytes => |bytes| for (bytes[0..@intCast(usize, len)]) |byte| { + .bytes => |bytes| for (bytes[0..@as(usize, @intCast(len))]) |byte| { std.hash.autoHash(&hasher, KeyTag.int); std.hash.autoHash(&hasher, byte); }, - .elems => |elems| for (elems[0..@intCast(usize, len)]) |elem| { + .elems => |elems| for (elems[0..@as(usize, @intCast(len))]) |elem| { const elem_key = ip.indexToKey(elem); std.hash.autoHash(&hasher, @as(KeyTag, elem_key)); switch (elem_key) { .undef => {}, .int => |int| std.hash.autoHash( &hasher, - @intCast(u8, int.storage.u64), + @as(u8, @intCast(int.storage.u64)), ), else => unreachable, } @@ -837,7 +837,7 @@ pub const Key = union(enum) { .undef => {}, .int => |int| std.hash.autoHash( &hasher, - @intCast(u8, int.storage.u64), + @as(u8, @intCast(int.storage.u64)), ), else => unreachable, } @@ -849,7 +849,7 @@ pub const Key = union(enum) { switch (aggregate.storage) { .bytes => unreachable, - .elems => |elems| for (elems[0..@intCast(usize, len)]) |elem| + .elems => |elems| for (elems[0..@as(usize, @intCast(len))]) |elem| std.hash.autoHash(&hasher, elem), .repeated_elem => |elem| { var remaining = len; @@ -1061,10 +1061,10 @@ pub const Key = union(enum) { // These are strange: we'll sometimes represent them as f128, even if the // underlying type is smaller. f80 is an exception: see float_c_longdouble_f80. const a_val = switch (a_info.storage) { - inline else => |val| @floatCast(f128, val), + inline else => |val| @as(f128, @floatCast(val)), }; const b_val = switch (b_info.storage) { - inline else => |val| @floatCast(f128, val), + inline else => |val| @as(f128, @floatCast(val)), }; return a_val == b_val; } @@ -1092,7 +1092,7 @@ pub const Key = union(enum) { const len = ip.aggregateTypeLen(a_info.ty); const StorageTag = @typeInfo(Key.Aggregate.Storage).Union.tag_type.?; if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) { - for (0..@intCast(usize, len)) |elem_index| { + for (0..@as(usize, @intCast(len))) |elem_index| { const a_elem = switch (a_info.storage) { .bytes => |bytes| ip.getIfExists(.{ .int = .{ .ty = .u8_type, @@ -1119,16 +1119,16 @@ pub const Key = union(enum) { const b_bytes = b_info.storage.bytes; return std.mem.eql( u8, - a_bytes[0..@intCast(usize, len)], - b_bytes[0..@intCast(usize, len)], + a_bytes[0..@as(usize, @intCast(len))], + b_bytes[0..@as(usize, @intCast(len))], ); }, .elems => |a_elems| { const b_elems = b_info.storage.elems; return std.mem.eql( Index, - a_elems[0..@intCast(usize, len)], - b_elems[0..@intCast(usize, len)], + a_elems[0..@as(usize, @intCast(len))], + b_elems[0..@as(usize, @intCast(len))], ); }, .repeated_elem => |a_elem| { @@ -2291,7 +2291,7 @@ pub const Alignment = enum(u6) { pub fn fromByteUnits(n: u64) Alignment { if (n == 0) return .none; assert(std.math.isPowerOfTwo(n)); - return @enumFromInt(Alignment, @ctz(n)); + return @as(Alignment, @enumFromInt(@ctz(n))); } pub fn fromNonzeroByteUnits(n: u64) Alignment { @@ -2368,11 +2368,11 @@ pub const PackedU64 = packed struct(u64) { b: u32, pub fn get(x: PackedU64) u64 { - return @bitCast(u64, x); + return @as(u64, @bitCast(x)); } pub fn init(x: u64) PackedU64 { - return @bitCast(PackedU64, x); + return @as(PackedU64, @bitCast(x)); } }; @@ -2435,14 +2435,14 @@ pub const Float64 = struct { pub fn get(self: Float64) f64 { const int_bits = @as(u64, self.piece0) | (@as(u64, self.piece1) << 32); - return @bitCast(f64, int_bits); + return @as(f64, @bitCast(int_bits)); } fn pack(val: f64) Float64 { - const bits = @bitCast(u64, val); + const bits = @as(u64, @bitCast(val)); return .{ - .piece0 = @truncate(u32, bits), - .piece1 = @truncate(u32, bits >> 32), + .piece0 = @as(u32, @truncate(bits)), + .piece1 = @as(u32, @truncate(bits >> 32)), }; } }; @@ -2457,15 +2457,15 @@ pub const Float80 = struct { const int_bits = @as(u80, self.piece0) | (@as(u80, self.piece1) << 32) | (@as(u80, self.piece2) << 64); - return @bitCast(f80, int_bits); + return @as(f80, @bitCast(int_bits)); } fn pack(val: f80) Float80 { - const bits = @bitCast(u80, val); + const bits = @as(u80, @bitCast(val)); return .{ - .piece0 = @truncate(u32, bits), - .piece1 = @truncate(u32, bits >> 32), - .piece2 = @truncate(u16, bits >> 64), + .piece0 = @as(u32, @truncate(bits)), + .piece1 = @as(u32, @truncate(bits >> 32)), + .piece2 = @as(u16, @truncate(bits >> 64)), }; } }; @@ -2482,16 +2482,16 @@ pub const Float128 = struct { (@as(u128, self.piece1) << 32) | (@as(u128, self.piece2) << 64) | (@as(u128, self.piece3) << 96); - return @bitCast(f128, int_bits); + return @as(f128, @bitCast(int_bits)); } fn pack(val: f128) Float128 { - const bits = @bitCast(u128, val); + const bits = @as(u128, @bitCast(val)); return .{ - .piece0 = @truncate(u32, bits), - .piece1 = @truncate(u32, bits >> 32), - .piece2 = @truncate(u32, bits >> 64), - .piece3 = @truncate(u32, bits >> 96), + .piece0 = @as(u32, @truncate(bits)), + .piece1 = @as(u32, @truncate(bits >> 32)), + .piece2 = @as(u32, @truncate(bits >> 64)), + .piece3 = @as(u32, @truncate(bits >> 96)), }; } }; @@ -2575,13 +2575,13 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .type_int_signed => .{ .int_type = .{ .signedness = .signed, - .bits = @intCast(u16, data), + .bits = @as(u16, @intCast(data)), }, }, .type_int_unsigned => .{ .int_type = .{ .signedness = .unsigned, - .bits = @intCast(u16, data), + .bits = @as(u16, @intCast(data)), }, }, .type_array_big => { @@ -2600,8 +2600,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .sentinel = .none, } }; }, - .simple_type => .{ .simple_type = @enumFromInt(SimpleType, data) }, - .simple_value => .{ .simple_value = @enumFromInt(SimpleValue, data) }, + .simple_type => .{ .simple_type = @as(SimpleType, @enumFromInt(data)) }, + .simple_value => .{ .simple_value = @as(SimpleValue, @enumFromInt(data)) }, .type_vector => { const vector_info = ip.extraData(Vector, data); @@ -2620,8 +2620,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { return .{ .ptr_type = ptr_info }; }, - .type_optional => .{ .opt_type = @enumFromInt(Index, data) }, - .type_anyframe => .{ .anyframe_type = @enumFromInt(Index, data) }, + .type_optional => .{ .opt_type = @as(Index, @enumFromInt(data)) }, + .type_anyframe => .{ .anyframe_type = @as(Index, @enumFromInt(data)) }, .type_error_union => .{ .error_union_type = ip.extraData(Key.ErrorUnionType, data) }, .type_error_set => { @@ -2629,17 +2629,17 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { const names_len = error_set.data.names_len; const names = ip.extra.items[error_set.end..][0..names_len]; return .{ .error_set_type = .{ - .names = @ptrCast([]const NullTerminatedString, names), + .names = @as([]const NullTerminatedString, @ptrCast(names)), .names_map = error_set.data.names_map.toOptional(), } }; }, .type_inferred_error_set => .{ - .inferred_error_set_type = @enumFromInt(Module.Fn.InferredErrorSet.Index, data), + .inferred_error_set_type = @as(Module.Fn.InferredErrorSet.Index, @enumFromInt(data)), }, .type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) }, .type_struct => { - const struct_index = @enumFromInt(Module.Struct.OptionalIndex, data); + const struct_index = @as(Module.Struct.OptionalIndex, @enumFromInt(data)); const namespace = if (struct_index.unwrap()) |i| ip.structPtrConst(i).namespace.toOptional() else @@ -2651,7 +2651,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .type_struct_ns => .{ .struct_type = .{ .index = .none, - .namespace = @enumFromInt(Module.Namespace.Index, data).toOptional(), + .namespace = @as(Module.Namespace.Index, @enumFromInt(data)).toOptional(), } }, .type_struct_anon => { @@ -2661,9 +2661,9 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; const names = ip.extra.items[type_struct_anon.end + 2 * fields_len ..][0..fields_len]; return .{ .anon_struct_type = .{ - .types = @ptrCast([]const Index, types), - .values = @ptrCast([]const Index, values), - .names = @ptrCast([]const NullTerminatedString, names), + .types = @as([]const Index, @ptrCast(types)), + .values = @as([]const Index, @ptrCast(values)), + .names = @as([]const NullTerminatedString, @ptrCast(names)), } }; }, .type_tuple_anon => { @@ -2672,30 +2672,30 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { const types = ip.extra.items[type_struct_anon.end..][0..fields_len]; const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; return .{ .anon_struct_type = .{ - .types = @ptrCast([]const Index, types), - .values = @ptrCast([]const Index, values), + .types = @as([]const Index, @ptrCast(types)), + .values = @as([]const Index, @ptrCast(values)), .names = &.{}, } }; }, .type_union_untagged => .{ .union_type = .{ - .index = @enumFromInt(Module.Union.Index, data), + .index = @as(Module.Union.Index, @enumFromInt(data)), .runtime_tag = .none, } }, .type_union_tagged => .{ .union_type = .{ - .index = @enumFromInt(Module.Union.Index, data), + .index = @as(Module.Union.Index, @enumFromInt(data)), .runtime_tag = .tagged, } }, .type_union_safety => .{ .union_type = .{ - .index = @enumFromInt(Module.Union.Index, data), + .index = @as(Module.Union.Index, @enumFromInt(data)), .runtime_tag = .safety, } }, .type_enum_auto => { const enum_auto = ip.extraDataTrail(EnumAuto, data); - const names = @ptrCast( + const names = @as( []const NullTerminatedString, - ip.extra.items[enum_auto.end..][0..enum_auto.data.fields_len], + @ptrCast(ip.extra.items[enum_auto.end..][0..enum_auto.data.fields_len]), ); return .{ .enum_type = .{ .decl = enum_auto.data.decl, @@ -2712,10 +2712,10 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .type_enum_nonexhaustive => ip.indexToKeyEnum(data, .nonexhaustive), .type_function => .{ .func_type = ip.indexToKeyFuncType(data) }, - .undef => .{ .undef = @enumFromInt(Index, data) }, + .undef => .{ .undef = @as(Index, @enumFromInt(data)) }, .runtime_value => .{ .runtime_value = ip.extraData(Tag.TypeValue, data) }, .opt_null => .{ .opt = .{ - .ty = @enumFromInt(Index, data), + .ty = @as(Index, @enumFromInt(data)), .val = .none, } }, .opt_payload => { @@ -2877,7 +2877,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }, .int_i32 => .{ .int = .{ .ty = .i32_type, - .storage = .{ .i64 = @bitCast(i32, data) }, + .storage = .{ .i64 = @as(i32, @bitCast(data)) }, } }, .int_usize => .{ .int = .{ .ty = .usize_type, @@ -2889,7 +2889,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }, .int_comptime_int_i32 => .{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .i64 = @bitCast(i32, data) }, + .storage = .{ .i64 = @as(i32, @bitCast(data)) }, } }, .int_positive => ip.indexToKeyBigInt(data, true), .int_negative => ip.indexToKeyBigInt(data, false), @@ -2913,11 +2913,11 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .float_f16 => .{ .float = .{ .ty = .f16_type, - .storage = .{ .f16 = @bitCast(f16, @intCast(u16, data)) }, + .storage = .{ .f16 = @as(f16, @bitCast(@as(u16, @intCast(data)))) }, } }, .float_f32 => .{ .float = .{ .ty = .f32_type, - .storage = .{ .f32 = @bitCast(f32, data) }, + .storage = .{ .f32 = @as(f32, @bitCast(data)) }, } }, .float_f64 => .{ .float = .{ .ty = .f64_type, @@ -2959,13 +2959,13 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .extern_func => .{ .extern_func = ip.extraData(Tag.ExternFunc, data) }, .func => .{ .func = ip.extraData(Tag.Func, data) }, .only_possible_value => { - const ty = @enumFromInt(Index, data); + const ty = @as(Index, @enumFromInt(data)); const ty_item = ip.items.get(@intFromEnum(ty)); return switch (ty_item.tag) { .type_array_big => { - const sentinel = @ptrCast( + const sentinel = @as( *const [1]Index, - &ip.extra.items[ty_item.data + std.meta.fieldIndex(Array, "sentinel").?], + @ptrCast(&ip.extra.items[ty_item.data + std.meta.fieldIndex(Array, "sentinel").?]), ); return .{ .aggregate = .{ .ty = ty, @@ -2994,7 +2994,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; return .{ .aggregate = .{ .ty = ty, - .storage = .{ .elems = @ptrCast([]const Index, values) }, + .storage = .{ .elems = @as([]const Index, @ptrCast(values)) }, } }; }, @@ -3010,7 +3010,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .bytes => { const extra = ip.extraData(Bytes, data); - const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(extra.ty)); + const len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(extra.ty))); return .{ .aggregate = .{ .ty = extra.ty, .storage = .{ .bytes = ip.string_bytes.items[@intFromEnum(extra.bytes)..][0..len] }, @@ -3018,8 +3018,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .aggregate => { const extra = ip.extraDataTrail(Tag.Aggregate, data); - const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(extra.data.ty)); - const fields = @ptrCast([]const Index, ip.extra.items[extra.end..][0..len]); + const len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(extra.data.ty))); + const fields = @as([]const Index, @ptrCast(ip.extra.items[extra.end..][0..len])); return .{ .aggregate = .{ .ty = extra.data.ty, .storage = .{ .elems = fields }, @@ -3048,14 +3048,14 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .val = .{ .payload = extra.val }, } }; }, - .enum_literal => .{ .enum_literal = @enumFromInt(NullTerminatedString, data) }, + .enum_literal => .{ .enum_literal = @as(NullTerminatedString, @enumFromInt(data)) }, .enum_tag => .{ .enum_tag = ip.extraData(Tag.EnumTag, data) }, .memoized_call => { const extra = ip.extraDataTrail(MemoizedCall, data); return .{ .memoized_call = .{ .func = extra.data.func, - .arg_values = @ptrCast([]const Index, ip.extra.items[extra.end..][0..extra.data.args_len]), + .arg_values = @as([]const Index, @ptrCast(ip.extra.items[extra.end..][0..extra.data.args_len])), .result = extra.data.result, } }; }, @@ -3064,9 +3064,9 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { fn indexToKeyFuncType(ip: *const InternPool, data: u32) Key.FuncType { const type_function = ip.extraDataTrail(TypeFunction, data); - const param_types = @ptrCast( + const param_types = @as( []Index, - ip.extra.items[type_function.end..][0..type_function.data.params_len], + @ptrCast(ip.extra.items[type_function.end..][0..type_function.data.params_len]), ); return .{ .param_types = param_types, @@ -3087,13 +3087,13 @@ fn indexToKeyFuncType(ip: *const InternPool, data: u32) Key.FuncType { fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { const enum_explicit = ip.extraDataTrail(EnumExplicit, data); - const names = @ptrCast( + const names = @as( []const NullTerminatedString, - ip.extra.items[enum_explicit.end..][0..enum_explicit.data.fields_len], + @ptrCast(ip.extra.items[enum_explicit.end..][0..enum_explicit.data.fields_len]), ); - const values = if (enum_explicit.data.values_map != .none) @ptrCast( + const values = if (enum_explicit.data.values_map != .none) @as( []const Index, - ip.extra.items[enum_explicit.end + names.len ..][0..enum_explicit.data.fields_len], + @ptrCast(ip.extra.items[enum_explicit.end + names.len ..][0..enum_explicit.data.fields_len]), ) else &[0]Index{}; return .{ .enum_type = .{ @@ -3122,7 +3122,7 @@ fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const adapter: KeyAdapter = .{ .intern_pool = ip }; const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); - if (gop.found_existing) return @enumFromInt(Index, gop.index); + if (gop.found_existing) return @as(Index, @enumFromInt(gop.index)); try ip.items.ensureUnusedCapacity(gpa, 1); switch (key) { .int_type => |int_type| { @@ -3150,7 +3150,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .tag = .type_slice, .data = @intFromEnum(ptr_type_index), }); - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); } var ptr_type_adjusted = ptr_type; @@ -3174,7 +3174,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .child = array_type.child, }), }); - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); } } @@ -3223,7 +3223,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { assert(std.sort.isSorted(NullTerminatedString, error_set_type.names, {}, NullTerminatedString.indexLessThan)); const names_map = try ip.addMap(gpa); try addStringsToMap(ip, gpa, names_map, error_set_type.names); - const names_len = @intCast(u32, error_set_type.names.len); + const names_len = @as(u32, @intCast(error_set_type.names.len)); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(ErrorSet).Struct.fields.len + names_len); ip.items.appendAssumeCapacity(.{ .tag = .type_error_set, @@ -3232,7 +3232,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .names_map = names_map, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, error_set_type.names)); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(error_set_type.names))); }, .inferred_error_set_type => |ies_index| { ip.items.appendAssumeCapacity(.{ @@ -3284,7 +3284,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { assert(anon_struct_type.types.len == anon_struct_type.values.len); for (anon_struct_type.types) |elem| assert(elem != .none); - const fields_len = @intCast(u32, anon_struct_type.types.len); + const fields_len = @as(u32, @intCast(anon_struct_type.types.len)); if (anon_struct_type.names.len == 0) { try ip.extra.ensureUnusedCapacity( gpa, @@ -3296,9 +3296,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .fields_len = fields_len, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.types)); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.values)); - return @enumFromInt(Index, ip.items.len - 1); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(anon_struct_type.types))); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(anon_struct_type.values))); + return @as(Index, @enumFromInt(ip.items.len - 1)); } assert(anon_struct_type.names.len == anon_struct_type.types.len); @@ -3313,10 +3313,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .fields_len = fields_len, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.types)); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.values)); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.names)); - return @enumFromInt(Index, ip.items.len - 1); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(anon_struct_type.types))); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(anon_struct_type.values))); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(anon_struct_type.names))); + return @as(Index, @enumFromInt(ip.items.len - 1)); }, .union_type => |union_type| { @@ -3348,7 +3348,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const names_map = try ip.addMap(gpa); try addStringsToMap(ip, gpa, names_map, enum_type.names); - const fields_len = @intCast(u32, enum_type.names.len); + const fields_len = @as(u32, @intCast(enum_type.names.len)); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len + fields_len); ip.items.appendAssumeCapacity(.{ @@ -3361,8 +3361,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .fields_len = fields_len, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names)); - return @enumFromInt(Index, ip.items.len - 1); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(enum_type.names))); + return @as(Index, @enumFromInt(ip.items.len - 1)); }, .explicit => return finishGetEnum(ip, gpa, enum_type, .type_enum_explicit), .nonexhaustive => return finishGetEnum(ip, gpa, enum_type, .type_enum_nonexhaustive), @@ -3373,7 +3373,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { assert(func_type.return_type != .none); for (func_type.param_types) |param_type| assert(param_type != .none); - const params_len = @intCast(u32, func_type.param_types.len); + const params_len = @as(u32, @intCast(func_type.param_types.len)); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(TypeFunction).Struct.fields.len + params_len); @@ -3397,7 +3397,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, func_type.param_types)); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(func_type.param_types))); }, .variable => |variable| { @@ -3559,7 +3559,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); }, } - assert(ptr.ty == ip.indexToKey(@enumFromInt(Index, ip.items.len - 1)).ptr.ty); + assert(ptr.ty == ip.indexToKey(@as(Index, @enumFromInt(ip.items.len - 1))).ptr.ty); }, .opt => |opt| { @@ -3593,7 +3593,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .lazy_ty = lazy_ty, }), }); - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); }, } switch (int.ty) { @@ -3608,7 +3608,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { inline .u64, .i64 => |x| { ip.items.appendAssumeCapacity(.{ .tag = .int_u8, - .data = @intCast(u8, x), + .data = @as(u8, @intCast(x)), }); break :b; }, @@ -3625,7 +3625,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { inline .u64, .i64 => |x| { ip.items.appendAssumeCapacity(.{ .tag = .int_u16, - .data = @intCast(u16, x), + .data = @as(u16, @intCast(x)), }); break :b; }, @@ -3642,7 +3642,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { inline .u64, .i64 => |x| { ip.items.appendAssumeCapacity(.{ .tag = .int_u32, - .data = @intCast(u32, x), + .data = @as(u32, @intCast(x)), }); break :b; }, @@ -3653,14 +3653,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const casted = big_int.to(i32) catch unreachable; ip.items.appendAssumeCapacity(.{ .tag = .int_i32, - .data = @bitCast(u32, casted), + .data = @as(u32, @bitCast(casted)), }); break :b; }, inline .u64, .i64 => |x| { ip.items.appendAssumeCapacity(.{ .tag = .int_i32, - .data = @bitCast(u32, @intCast(i32, x)), + .data = @as(u32, @bitCast(@as(i32, @intCast(x)))), }); break :b; }, @@ -3699,7 +3699,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { if (big_int.to(i32)) |casted| { ip.items.appendAssumeCapacity(.{ .tag = .int_comptime_int_i32, - .data = @bitCast(u32, casted), + .data = @as(u32, @bitCast(casted)), }); break :b; } else |_| {} @@ -3715,7 +3715,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { if (std.math.cast(i32, x)) |casted| { ip.items.appendAssumeCapacity(.{ .tag = .int_comptime_int_i32, - .data = @bitCast(u32, casted), + .data = @as(u32, @bitCast(casted)), }); break :b; } @@ -3734,7 +3734,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .value = casted, }), }); - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); } else |_| {} const tag: Tag = if (big_int.positive) .int_positive else .int_negative; @@ -3749,7 +3749,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .value = casted, }), }); - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); } var buf: [2]Limb = undefined; @@ -3816,11 +3816,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { switch (float.ty) { .f16_type => ip.items.appendAssumeCapacity(.{ .tag = .float_f16, - .data = @bitCast(u16, float.storage.f16), + .data = @as(u16, @bitCast(float.storage.f16)), }), .f32_type => ip.items.appendAssumeCapacity(.{ .tag = .float_f32, - .data = @bitCast(u32, float.storage.f32), + .data = @as(u32, @bitCast(float.storage.f32)), }), .f64_type => ip.items.appendAssumeCapacity(.{ .tag = .float_f64, @@ -3872,13 +3872,13 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { assert(child == .u8_type); if (bytes.len != len) { assert(bytes.len == len_including_sentinel); - assert(bytes[@intCast(usize, len)] == ip.indexToKey(sentinel).int.storage.u64); + assert(bytes[@as(usize, @intCast(len))] == ip.indexToKey(sentinel).int.storage.u64); } }, .elems => |elems| { if (elems.len != len) { assert(elems.len == len_including_sentinel); - assert(elems[@intCast(usize, len)] == sentinel); + assert(elems[@as(usize, @intCast(len))] == sentinel); } }, .repeated_elem => |elem| { @@ -3912,7 +3912,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .tag = .only_possible_value, .data = @intFromEnum(aggregate.ty), }); - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); } switch (ty_key) { @@ -3940,16 +3940,16 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .tag = .only_possible_value, .data = @intFromEnum(aggregate.ty), }); - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); }, else => {}, } repeated: { switch (aggregate.storage) { - .bytes => |bytes| for (bytes[1..@intCast(usize, len)]) |byte| + .bytes => |bytes| for (bytes[1..@as(usize, @intCast(len))]) |byte| if (byte != bytes[0]) break :repeated, - .elems => |elems| for (elems[1..@intCast(usize, len)]) |elem| + .elems => |elems| for (elems[1..@as(usize, @intCast(len))]) |elem| if (elem != elems[0]) break :repeated, .repeated_elem => {}, } @@ -3979,12 +3979,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .elem_val = elem, }), }); - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); } if (child == .u8_type) bytes: { const string_bytes_index = ip.string_bytes.items.len; - try ip.string_bytes.ensureUnusedCapacity(gpa, @intCast(usize, len_including_sentinel + 1)); + try ip.string_bytes.ensureUnusedCapacity(gpa, @as(usize, @intCast(len_including_sentinel + 1))); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len); switch (aggregate.storage) { .bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes), @@ -3994,15 +3994,15 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { break :bytes; }, .int => |int| ip.string_bytes.appendAssumeCapacity( - @intCast(u8, int.storage.u64), + @as(u8, @intCast(int.storage.u64)), ), else => unreachable, }, .repeated_elem => |elem| switch (ip.indexToKey(elem)) { .undef => break :bytes, .int => |int| @memset( - ip.string_bytes.addManyAsSliceAssumeCapacity(@intCast(usize, len)), - @intCast(u8, int.storage.u64), + ip.string_bytes.addManyAsSliceAssumeCapacity(@as(usize, @intCast(len))), + @as(u8, @intCast(int.storage.u64)), ), else => unreachable, }, @@ -4010,12 +4010,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const has_internal_null = std.mem.indexOfScalar(u8, ip.string_bytes.items[string_bytes_index..], 0) != null; if (sentinel != .none) ip.string_bytes.appendAssumeCapacity( - @intCast(u8, ip.indexToKey(sentinel).int.storage.u64), + @as(u8, @intCast(ip.indexToKey(sentinel).int.storage.u64)), ); const string = if (has_internal_null) - @enumFromInt(String, string_bytes_index) + @as(String, @enumFromInt(string_bytes_index)) else - (try ip.getOrPutTrailingString(gpa, @intCast(usize, len_including_sentinel))).toString(); + (try ip.getOrPutTrailingString(gpa, @as(usize, @intCast(len_including_sentinel)))).toString(); ip.items.appendAssumeCapacity(.{ .tag = .bytes, .data = ip.addExtraAssumeCapacity(Bytes{ @@ -4023,12 +4023,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .bytes = string, }), }); - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); } try ip.extra.ensureUnusedCapacity( gpa, - @typeInfo(Tag.Aggregate).Struct.fields.len + @intCast(usize, len_including_sentinel), + @typeInfo(Tag.Aggregate).Struct.fields.len + @as(usize, @intCast(len_including_sentinel)), ); ip.items.appendAssumeCapacity(.{ .tag = .aggregate, @@ -4036,7 +4036,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .ty = aggregate.ty, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, aggregate.storage.elems)); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(aggregate.storage.elems))); if (sentinel != .none) ip.extra.appendAssumeCapacity(@intFromEnum(sentinel)); }, @@ -4058,14 +4058,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .tag = .memoized_call, .data = ip.addExtraAssumeCapacity(MemoizedCall{ .func = memoized_call.func, - .args_len = @intCast(u32, memoized_call.arg_values.len), + .args_len = @as(u32, @intCast(memoized_call.arg_values.len)), .result = memoized_call.result, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, memoized_call.arg_values)); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(memoized_call.arg_values))); }, } - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); } /// Provides API for completing an enum type after calling `getIncompleteEnum`. @@ -4093,10 +4093,10 @@ pub const IncompleteEnumType = struct { const field_index = map.count(); const strings = ip.extra.items[self.names_start..][0..field_index]; const adapter: NullTerminatedString.Adapter = .{ - .strings = @ptrCast([]const NullTerminatedString, strings), + .strings = @as([]const NullTerminatedString, @ptrCast(strings)), }; const gop = try map.getOrPutAdapted(gpa, name, adapter); - if (gop.found_existing) return @intCast(u32, gop.index); + if (gop.found_existing) return @as(u32, @intCast(gop.index)); ip.extra.items[self.names_start + field_index] = @intFromEnum(name); return null; } @@ -4109,15 +4109,15 @@ pub const IncompleteEnumType = struct { gpa: Allocator, value: Index, ) Allocator.Error!?u32 { - assert(ip.typeOf(value) == @enumFromInt(Index, ip.extra.items[self.tag_ty_index])); + assert(ip.typeOf(value) == @as(Index, @enumFromInt(ip.extra.items[self.tag_ty_index]))); const map = &ip.maps.items[@intFromEnum(self.values_map.unwrap().?)]; const field_index = map.count(); const indexes = ip.extra.items[self.values_start..][0..field_index]; const adapter: Index.Adapter = .{ - .indexes = @ptrCast([]const Index, indexes), + .indexes = @as([]const Index, @ptrCast(indexes)), }; const gop = try map.getOrPutAdapted(gpa, value, adapter); - if (gop.found_existing) return @intCast(u32, gop.index); + if (gop.found_existing) return @as(u32, @intCast(gop.index)); ip.extra.items[self.values_start + field_index] = @intFromEnum(value); return null; } @@ -4177,7 +4177,7 @@ fn getIncompleteEnumAuto( }); ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), enum_type.fields_len); return .{ - .index = @enumFromInt(Index, ip.items.len - 1), + .index = @as(Index, @enumFromInt(ip.items.len - 1)), .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, .names_map = names_map, .names_start = extra_index + extra_fields_len, @@ -4228,7 +4228,7 @@ fn getIncompleteEnumExplicit( // This is both fields and values (if present). ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), reserved_len); return .{ - .index = @enumFromInt(Index, ip.items.len - 1), + .index = @as(Index, @enumFromInt(ip.items.len - 1)), .tag_ty_index = extra_index + std.meta.fieldIndex(EnumExplicit, "int_tag_type").?, .names_map = names_map, .names_start = extra_index + extra_fields_len, @@ -4251,7 +4251,7 @@ pub fn finishGetEnum( try addIndexesToMap(ip, gpa, values_map, enum_type.values); break :m values_map.toOptional(); }; - const fields_len = @intCast(u32, enum_type.names.len); + const fields_len = @as(u32, @intCast(enum_type.names.len)); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len + fields_len); ip.items.appendAssumeCapacity(.{ @@ -4265,15 +4265,15 @@ pub fn finishGetEnum( .values_map = values_map, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names)); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.values)); - return @enumFromInt(Index, ip.items.len - 1); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(enum_type.names))); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(enum_type.values))); + return @as(Index, @enumFromInt(ip.items.len - 1)); } pub fn getIfExists(ip: *const InternPool, key: Key) ?Index { const adapter: KeyAdapter = .{ .intern_pool = ip }; const index = ip.map.getIndexAdapted(key, adapter) orelse return null; - return @enumFromInt(Index, index); + return @as(Index, @enumFromInt(index)); } pub fn getAssumeExists(ip: *const InternPool, key: Key) Index { @@ -4311,7 +4311,7 @@ fn addIndexesToMap( fn addMap(ip: *InternPool, gpa: Allocator) Allocator.Error!MapIndex { const ptr = try ip.maps.addOne(gpa); ptr.* = .{}; - return @enumFromInt(MapIndex, ip.maps.items.len - 1); + return @as(MapIndex, @enumFromInt(ip.maps.items.len - 1)); } /// This operation only happens under compile error conditions. @@ -4320,7 +4320,7 @@ fn addMap(ip: *InternPool, gpa: Allocator) Allocator.Error!MapIndex { pub const remove = @compileError("InternPool.remove is not currently a supported operation; put a TODO there instead"); fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const Limb) !void { - const limbs_len = @intCast(u32, limbs.len); + const limbs_len = @as(u32, @intCast(limbs.len)); try ip.reserveLimbs(gpa, @typeInfo(Int).Struct.fields.len + limbs_len); ip.items.appendAssumeCapacity(.{ .tag = tag, @@ -4339,7 +4339,7 @@ fn addExtra(ip: *InternPool, gpa: Allocator, extra: anytype) Allocator.Error!u32 } fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { - const result = @intCast(u32, ip.extra.items.len); + const result = @as(u32, @intCast(ip.extra.items.len)); inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { ip.extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), @@ -4354,12 +4354,12 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { String => @intFromEnum(@field(extra, field.name)), NullTerminatedString => @intFromEnum(@field(extra, field.name)), OptionalNullTerminatedString => @intFromEnum(@field(extra, field.name)), - i32 => @bitCast(u32, @field(extra, field.name)), - Tag.TypePointer.Flags => @bitCast(u32, @field(extra, field.name)), - TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)), - Tag.TypePointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), + i32 => @as(u32, @bitCast(@field(extra, field.name))), + Tag.TypePointer.Flags => @as(u32, @bitCast(@field(extra, field.name))), + TypeFunction.Flags => @as(u32, @bitCast(@field(extra, field.name))), + Tag.TypePointer.PackedOffset => @as(u32, @bitCast(@field(extra, field.name))), Tag.TypePointer.VectorIndex => @intFromEnum(@field(extra, field.name)), - Tag.Variable.Flags => @bitCast(u32, @field(extra, field.name)), + Tag.Variable.Flags => @as(u32, @bitCast(@field(extra, field.name))), else => @compileError("bad field type: " ++ @typeName(field.type)), }); } @@ -4380,7 +4380,7 @@ fn addLimbsExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { @sizeOf(u64) => {}, else => @compileError("unsupported host"), } - const result = @intCast(u32, ip.limbs.items.len); + const result = @as(u32, @intCast(ip.limbs.items.len)); inline for (@typeInfo(@TypeOf(extra)).Struct.fields, 0..) |field, i| { const new: u32 = switch (field.type) { u32 => @field(extra, field.name), @@ -4411,23 +4411,23 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct const int32 = ip.extra.items[i + index]; @field(result, field.name) = switch (field.type) { u32 => int32, - Index => @enumFromInt(Index, int32), - Module.Decl.Index => @enumFromInt(Module.Decl.Index, int32), - Module.Namespace.Index => @enumFromInt(Module.Namespace.Index, int32), - Module.Namespace.OptionalIndex => @enumFromInt(Module.Namespace.OptionalIndex, int32), - Module.Fn.Index => @enumFromInt(Module.Fn.Index, int32), - MapIndex => @enumFromInt(MapIndex, int32), - OptionalMapIndex => @enumFromInt(OptionalMapIndex, int32), - RuntimeIndex => @enumFromInt(RuntimeIndex, int32), - String => @enumFromInt(String, int32), - NullTerminatedString => @enumFromInt(NullTerminatedString, int32), - OptionalNullTerminatedString => @enumFromInt(OptionalNullTerminatedString, int32), - i32 => @bitCast(i32, int32), - Tag.TypePointer.Flags => @bitCast(Tag.TypePointer.Flags, int32), - TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32), - Tag.TypePointer.PackedOffset => @bitCast(Tag.TypePointer.PackedOffset, int32), - Tag.TypePointer.VectorIndex => @enumFromInt(Tag.TypePointer.VectorIndex, int32), - Tag.Variable.Flags => @bitCast(Tag.Variable.Flags, int32), + Index => @as(Index, @enumFromInt(int32)), + Module.Decl.Index => @as(Module.Decl.Index, @enumFromInt(int32)), + Module.Namespace.Index => @as(Module.Namespace.Index, @enumFromInt(int32)), + Module.Namespace.OptionalIndex => @as(Module.Namespace.OptionalIndex, @enumFromInt(int32)), + Module.Fn.Index => @as(Module.Fn.Index, @enumFromInt(int32)), + MapIndex => @as(MapIndex, @enumFromInt(int32)), + OptionalMapIndex => @as(OptionalMapIndex, @enumFromInt(int32)), + RuntimeIndex => @as(RuntimeIndex, @enumFromInt(int32)), + String => @as(String, @enumFromInt(int32)), + NullTerminatedString => @as(NullTerminatedString, @enumFromInt(int32)), + OptionalNullTerminatedString => @as(OptionalNullTerminatedString, @enumFromInt(int32)), + i32 => @as(i32, @bitCast(int32)), + Tag.TypePointer.Flags => @as(Tag.TypePointer.Flags, @bitCast(int32)), + TypeFunction.Flags => @as(TypeFunction.Flags, @bitCast(int32)), + Tag.TypePointer.PackedOffset => @as(Tag.TypePointer.PackedOffset, @bitCast(int32)), + Tag.TypePointer.VectorIndex => @as(Tag.TypePointer.VectorIndex, @enumFromInt(int32)), + Tag.Variable.Flags => @as(Tag.Variable.Flags, @bitCast(int32)), else => @compileError("bad field type: " ++ @typeName(field.type)), }; } @@ -4452,13 +4452,13 @@ fn limbData(ip: *const InternPool, comptime T: type, index: usize) T { inline for (@typeInfo(T).Struct.fields, 0..) |field, i| { const host_int = ip.limbs.items[index + i / 2]; const int32 = if (i % 2 == 0) - @truncate(u32, host_int) + @as(u32, @truncate(host_int)) else - @truncate(u32, host_int >> 32); + @as(u32, @truncate(host_int >> 32)); @field(result, field.name) = switch (field.type) { u32 => int32, - Index => @enumFromInt(Index, int32), + Index => @as(Index, @enumFromInt(int32)), else => @compileError("bad field type: " ++ @typeName(field.type)), }; } @@ -4494,8 +4494,8 @@ fn limbsSliceToIndex(ip: *const InternPool, limbs: []const Limb) LimbsAsIndexes }; // TODO: https://github.com/ziglang/zig/issues/1738 return .{ - .start = @intCast(u32, @divExact(@intFromPtr(limbs.ptr) - @intFromPtr(host_slice.ptr), @sizeOf(Limb))), - .len = @intCast(u32, limbs.len), + .start = @as(u32, @intCast(@divExact(@intFromPtr(limbs.ptr) - @intFromPtr(host_slice.ptr), @sizeOf(Limb)))), + .len = @as(u32, @intCast(limbs.len)), }; } @@ -4557,7 +4557,7 @@ pub fn slicePtrType(ip: *const InternPool, i: Index) Index { } const item = ip.items.get(@intFromEnum(i)); switch (item.tag) { - .type_slice => return @enumFromInt(Index, item.data), + .type_slice => return @as(Index, @enumFromInt(item.data)), else => unreachable, // not a slice type } } @@ -4727,7 +4727,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .val = error_union.val, } }), .aggregate => |aggregate| { - const new_len = @intCast(usize, ip.aggregateTypeLen(new_ty)); + const new_len = @as(usize, @intCast(ip.aggregateTypeLen(new_ty))); direct: { const old_ty_child = switch (ip.indexToKey(old_ty)) { inline .array_type, .vector_type => |seq_type| seq_type.child, @@ -4862,7 +4862,7 @@ pub fn indexToStructType(ip: *const InternPool, val: Index) Module.Struct.Option const tags = ip.items.items(.tag); if (tags[@intFromEnum(val)] != .type_struct) return .none; const datas = ip.items.items(.data); - return @enumFromInt(Module.Struct.Index, datas[@intFromEnum(val)]).toOptional(); + return @as(Module.Struct.Index, @enumFromInt(datas[@intFromEnum(val)])).toOptional(); } pub fn indexToUnionType(ip: *const InternPool, val: Index) Module.Union.OptionalIndex { @@ -4873,7 +4873,7 @@ pub fn indexToUnionType(ip: *const InternPool, val: Index) Module.Union.Optional else => return .none, } const datas = ip.items.items(.data); - return @enumFromInt(Module.Union.Index, datas[@intFromEnum(val)]).toOptional(); + return @as(Module.Union.Index, @enumFromInt(datas[@intFromEnum(val)])).toOptional(); } pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType { @@ -4899,7 +4899,7 @@ pub fn indexToInferredErrorSetType(ip: *const InternPool, val: Index) Module.Fn. const tags = ip.items.items(.tag); if (tags[@intFromEnum(val)] != .type_inferred_error_set) return .none; const datas = ip.items.items(.data); - return @enumFromInt(Module.Fn.InferredErrorSet.Index, datas[@intFromEnum(val)]).toOptional(); + return @as(Module.Fn.InferredErrorSet.Index, @enumFromInt(datas[@intFromEnum(val)])).toOptional(); } /// includes .comptime_int_type @@ -5057,7 +5057,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .type_enum_auto => @sizeOf(EnumAuto), .type_opaque => @sizeOf(Key.OpaqueType), .type_struct => b: { - const struct_index = @enumFromInt(Module.Struct.Index, data); + const struct_index = @as(Module.Struct.Index, @enumFromInt(data)); const struct_obj = ip.structPtrConst(struct_index); break :b @sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @@ -5124,13 +5124,13 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .bytes => b: { const info = ip.extraData(Bytes, data); - const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(info.ty)); + const len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty))); break :b @sizeOf(Bytes) + len + @intFromBool(ip.string_bytes.items[@intFromEnum(info.bytes) + len - 1] != 0); }, .aggregate => b: { const info = ip.extraData(Tag.Aggregate, data); - const fields_len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(info.ty)); + const fields_len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty))); break :b @sizeOf(Tag.Aggregate) + (@sizeOf(Index) * fields_len); }, .repeated => @sizeOf(Repeated), @@ -5181,8 +5181,8 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void { for (tags, datas, 0..) |tag, data, i| { try w.print("${d} = {s}(", .{ i, @tagName(tag) }); switch (tag) { - .simple_type => try w.print("{s}", .{@tagName(@enumFromInt(SimpleType, data))}), - .simple_value => try w.print("{s}", .{@tagName(@enumFromInt(SimpleValue, data))}), + .simple_type => try w.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(data)))}), + .simple_value => try w.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(data)))}), .type_int_signed, .type_int_unsigned, @@ -5311,7 +5311,7 @@ pub fn createStruct( } const ptr = try ip.allocated_structs.addOne(gpa); ptr.* = initialization; - return @enumFromInt(Module.Struct.Index, ip.allocated_structs.len - 1); + return @as(Module.Struct.Index, @enumFromInt(ip.allocated_structs.len - 1)); } pub fn destroyStruct(ip: *InternPool, gpa: Allocator, index: Module.Struct.Index) void { @@ -5333,7 +5333,7 @@ pub fn createUnion( } const ptr = try ip.allocated_unions.addOne(gpa); ptr.* = initialization; - return @enumFromInt(Module.Union.Index, ip.allocated_unions.len - 1); + return @as(Module.Union.Index, @enumFromInt(ip.allocated_unions.len - 1)); } pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index) void { @@ -5355,7 +5355,7 @@ pub fn createFunc( } const ptr = try ip.allocated_funcs.addOne(gpa); ptr.* = initialization; - return @enumFromInt(Module.Fn.Index, ip.allocated_funcs.len - 1); + return @as(Module.Fn.Index, @enumFromInt(ip.allocated_funcs.len - 1)); } pub fn destroyFunc(ip: *InternPool, gpa: Allocator, index: Module.Fn.Index) void { @@ -5377,7 +5377,7 @@ pub fn createInferredErrorSet( } const ptr = try ip.allocated_inferred_error_sets.addOne(gpa); ptr.* = initialization; - return @enumFromInt(Module.Fn.InferredErrorSet.Index, ip.allocated_inferred_error_sets.len - 1); + return @as(Module.Fn.InferredErrorSet.Index, @enumFromInt(ip.allocated_inferred_error_sets.len - 1)); } pub fn destroyInferredErrorSet(ip: *InternPool, gpa: Allocator, index: Module.Fn.InferredErrorSet.Index) void { @@ -5406,7 +5406,7 @@ pub fn getOrPutStringFmt( args: anytype, ) Allocator.Error!NullTerminatedString { // ensure that references to string_bytes in args do not get invalidated - const len = @intCast(usize, std.fmt.count(format, args) + 1); + const len = @as(usize, @intCast(std.fmt.count(format, args) + 1)); try ip.string_bytes.ensureUnusedCapacity(gpa, len); ip.string_bytes.writer(undefined).print(format, args) catch unreachable; ip.string_bytes.appendAssumeCapacity(0); @@ -5430,7 +5430,7 @@ pub fn getOrPutTrailingString( len: usize, ) Allocator.Error!NullTerminatedString { const string_bytes = &ip.string_bytes; - const str_index = @intCast(u32, string_bytes.items.len - len); + const str_index = @as(u32, @intCast(string_bytes.items.len - len)); if (len > 0 and string_bytes.getLast() == 0) { _ = string_bytes.pop(); } else { @@ -5444,11 +5444,11 @@ pub fn getOrPutTrailingString( }); if (gop.found_existing) { string_bytes.shrinkRetainingCapacity(str_index); - return @enumFromInt(NullTerminatedString, gop.key_ptr.*); + return @as(NullTerminatedString, @enumFromInt(gop.key_ptr.*)); } else { gop.key_ptr.* = str_index; string_bytes.appendAssumeCapacity(0); - return @enumFromInt(NullTerminatedString, str_index); + return @as(NullTerminatedString, @enumFromInt(str_index)); } } @@ -5456,7 +5456,7 @@ pub fn getString(ip: *InternPool, s: []const u8) OptionalNullTerminatedString { if (ip.string_table.getKeyAdapted(s, std.hash_map.StringIndexAdapter{ .bytes = &ip.string_bytes, })) |index| { - return @enumFromInt(NullTerminatedString, index).toOptional(); + return @as(NullTerminatedString, @enumFromInt(index)).toOptional(); } else { return .none; } @@ -5596,7 +5596,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .undef, .opt_null, .only_possible_value, - => @enumFromInt(Index, ip.items.items(.data)[@intFromEnum(index)]), + => @as(Index, @enumFromInt(ip.items.items(.data)[@intFromEnum(index)])), .simple_value => unreachable, // handled via Index above @@ -5628,7 +5628,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { => |t| { const extra_index = ip.items.items(.data)[@intFromEnum(index)]; const field_index = std.meta.fieldIndex(t.Payload(), "ty").?; - return @enumFromInt(Index, ip.extra.items[extra_index + field_index]); + return @as(Index, @enumFromInt(ip.extra.items[extra_index + field_index])); }, .int_u8 => .u8_type, @@ -5670,7 +5670,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { /// Assumes that the enum's field indexes equal its value tags. pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E { const int = ip.indexToKey(i).enum_tag.int; - return @enumFromInt(E, ip.indexToKey(int).int.storage.u64); + return @as(E, @enumFromInt(ip.indexToKey(int).int.storage.u64)); } pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 { @@ -5703,9 +5703,9 @@ pub fn funcReturnType(ip: *const InternPool, ty: Index) Index { else => unreachable, }; assert(child_item.tag == .type_function); - return @enumFromInt(Index, ip.extra.items[ + return @as(Index, @enumFromInt(ip.extra.items[ child_item.data + std.meta.fieldIndex(TypeFunction, "return_type").? - ]); + ])); } pub fn isNoReturn(ip: *const InternPool, ty: Index) bool { @@ -5736,9 +5736,9 @@ pub fn getBackingDecl(ip: *const InternPool, val: Index) Module.Decl.OptionalInd switch (ip.items.items(.tag)[base]) { inline .ptr_decl, .ptr_mut_decl, - => |tag| return @enumFromInt(Module.Decl.OptionalIndex, ip.extra.items[ + => |tag| return @as(Module.Decl.OptionalIndex, @enumFromInt(ip.extra.items[ ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "decl").? - ]), + ])), inline .ptr_eu_payload, .ptr_opt_payload, .ptr_elem, diff --git a/src/Liveness.zig b/src/Liveness.zig index 1141b8620c..ab7c612758 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -178,14 +178,14 @@ pub fn analyze(gpa: Allocator, air: Air, intern_pool: *const InternPool) Allocat pub fn getTombBits(l: Liveness, inst: Air.Inst.Index) Bpi { const usize_index = (inst * bpi) / @bitSizeOf(usize); - return @truncate(Bpi, l.tomb_bits[usize_index] >> - @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi)); + return @as(Bpi, @truncate(l.tomb_bits[usize_index] >> + @as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi)))); } pub fn isUnused(l: Liveness, inst: Air.Inst.Index) bool { const usize_index = (inst * bpi) / @bitSizeOf(usize); const mask = @as(usize, 1) << - @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1)); + @as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1))); return (l.tomb_bits[usize_index] & mask) != 0; } @@ -193,7 +193,7 @@ pub fn operandDies(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) bool assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); const mask = @as(usize, 1) << - @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); + @as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand)); return (l.tomb_bits[usize_index] & mask) != 0; } @@ -201,7 +201,7 @@ pub fn clearOperandDeath(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); const mask = @as(usize, 1) << - @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); + @as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand)); l.tomb_bits[usize_index] &= ~mask; } @@ -484,11 +484,11 @@ pub fn categorizeOperand( const inst_data = air_datas[inst].pl_op; const callee = inst_data.operand; const extra = air.extraData(Air.Call, inst_data.payload); - const args = @ptrCast([]const Air.Inst.Ref, air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(air.extra[extra.end..][0..extra.data.args_len])); if (args.len + 1 <= bpi - 1) { if (callee == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write); for (args, 0..) |arg, i| { - if (arg == operand_ref) return matchOperandSmallIndex(l, inst, @intCast(OperandInt, i + 1), .write); + if (arg == operand_ref) return matchOperandSmallIndex(l, inst, @as(OperandInt, @intCast(i + 1)), .write); } return .write; } @@ -535,12 +535,12 @@ pub fn categorizeOperand( .aggregate_init => { const ty_pl = air_datas[inst].ty_pl; const aggregate_ty = air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); - const elements = @ptrCast([]const Air.Inst.Ref, air.extra[ty_pl.payload..][0..len]); + const len = @as(usize, @intCast(aggregate_ty.arrayLenIp(ip))); + const elements = @as([]const Air.Inst.Ref, @ptrCast(air.extra[ty_pl.payload..][0..len])); if (elements.len <= bpi - 1) { for (elements, 0..) |elem, i| { - if (elem == operand_ref) return matchOperandSmallIndex(l, inst, @intCast(OperandInt, i), .none); + if (elem == operand_ref) return matchOperandSmallIndex(l, inst, @as(OperandInt, @intCast(i)), .none); } return .none; } @@ -808,20 +808,20 @@ pub const BigTomb = struct { const small_tombs = bpi - 1; if (this_bit_index < small_tombs) { - const dies = @truncate(u1, bt.tomb_bits >> @intCast(Liveness.OperandInt, this_bit_index)) != 0; + const dies = @as(u1, @truncate(bt.tomb_bits >> @as(Liveness.OperandInt, @intCast(this_bit_index)))) != 0; return dies; } const big_bit_index = this_bit_index - small_tombs; while (big_bit_index - bt.extra_offset * 31 >= 31) { - if (@truncate(u1, bt.extra[bt.extra_start + bt.extra_offset] >> 31) != 0) { + if (@as(u1, @truncate(bt.extra[bt.extra_start + bt.extra_offset] >> 31)) != 0) { bt.reached_end = true; return false; } bt.extra_offset += 1; } - const dies = @truncate(u1, bt.extra[bt.extra_start + bt.extra_offset] >> - @intCast(u5, big_bit_index - bt.extra_offset * 31)) != 0; + const dies = @as(u1, @truncate(bt.extra[bt.extra_start + bt.extra_offset] >> + @as(u5, @intCast(big_bit_index - bt.extra_offset * 31)))) != 0; return dies; } }; @@ -838,7 +838,7 @@ const Analysis = struct { fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void { const usize_index = (inst * bpi) / @bitSizeOf(usize); a.tomb_bits[usize_index] |= @as(usize, tomb_bits) << - @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi); + @as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi)); } fn addExtra(a: *Analysis, extra: anytype) Allocator.Error!u32 { @@ -849,7 +849,7 @@ const Analysis = struct { fn addExtraAssumeCapacity(a: *Analysis, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result = @intCast(u32, a.extra.items.len); + const result = @as(u32, @intCast(a.extra.items.len)); inline for (fields) |field| { a.extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), @@ -1108,7 +1108,7 @@ fn analyzeInst( const inst_data = inst_datas[inst].pl_op; const callee = inst_data.operand; const extra = a.air.extraData(Air.Call, inst_data.payload); - const args = @ptrCast([]const Air.Inst.Ref, a.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra[extra.end..][0..extra.data.args_len])); if (args.len + 1 <= bpi - 1) { var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1); buf[0] = callee; @@ -1146,8 +1146,8 @@ fn analyzeInst( .aggregate_init => { const ty_pl = inst_datas[inst].ty_pl; const aggregate_ty = a.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); - const elements = @ptrCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]); + const len = @as(usize, @intCast(aggregate_ty.arrayLenIp(ip))); + const elements = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra[ty_pl.payload..][0..len])); if (elements.len <= bpi - 1) { var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1); @@ -1200,9 +1200,9 @@ fn analyzeInst( .assembly => { const extra = a.air.extraData(Air.Asm, inst_datas[inst].ty_pl.payload); var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, a.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra[extra_i..][0..extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, a.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra[extra_i..][0..extra.data.inputs_len])); extra_i += inputs.len; const num_operands = simple: { @@ -1310,7 +1310,7 @@ fn analyzeOperands( // Don't compute any liveness for constants if (inst_tags[operand] == .interned) continue; - const mask = @as(Bpi, 1) << @intCast(OperandInt, i); + const mask = @as(Bpi, 1) << @as(OperandInt, @intCast(i)); if ((try data.live_set.fetchPut(gpa, operand, {})) == null) { log.debug("[{}] %{}: added %{} to live set (operand dies here)", .{ pass, inst, operand }); @@ -1320,7 +1320,7 @@ fn analyzeOperands( } a.tomb_bits[usize_index] |= @as(usize, tomb_bits) << - @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi); + @as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi)); }, } } @@ -1472,7 +1472,7 @@ fn analyzeInstLoop( const num_breaks = data.breaks.count(); try a.extra.ensureUnusedCapacity(gpa, 1 + num_breaks); - const extra_index = @intCast(u32, a.extra.items.len); + const extra_index = @as(u32, @intCast(a.extra.items.len)); a.extra.appendAssumeCapacity(num_breaks); var it = data.breaks.keyIterator(); @@ -1523,7 +1523,7 @@ fn analyzeInstLoop( // This is necessarily not in the same control flow branch, because loops are noreturn data.live_set.clearRetainingCapacity(); - try data.live_set.ensureUnusedCapacity(gpa, @intCast(u32, loop_live.len)); + try data.live_set.ensureUnusedCapacity(gpa, @as(u32, @intCast(loop_live.len))); for (loop_live) |alive| { data.live_set.putAssumeCapacity(alive, {}); } @@ -1647,8 +1647,8 @@ fn analyzeInstCondBr( log.debug("[{}] %{}: new live set is {}", .{ pass, inst, fmtInstSet(&data.live_set) }); // Write the mirrored deaths to `extra` - const then_death_count = @intCast(u32, then_mirrored_deaths.items.len); - const else_death_count = @intCast(u32, else_mirrored_deaths.items.len); + const then_death_count = @as(u32, @intCast(then_mirrored_deaths.items.len)); + const else_death_count = @as(u32, @intCast(else_mirrored_deaths.items.len)); try a.extra.ensureUnusedCapacity(gpa, std.meta.fields(CondBr).len + then_death_count + else_death_count); const extra_index = a.addExtraAssumeCapacity(CondBr{ .then_death_count = then_death_count, @@ -1758,12 +1758,12 @@ fn analyzeInstSwitchBr( log.debug("[{}] %{}: new live set is {}", .{ pass, inst, fmtInstSet(&data.live_set) }); } - const else_death_count = @intCast(u32, mirrored_deaths[ncases].items.len); + const else_death_count = @as(u32, @intCast(mirrored_deaths[ncases].items.len)); const extra_index = try a.addExtra(SwitchBr{ .else_death_count = else_death_count, }); for (mirrored_deaths[0..ncases]) |mirrored| { - const num = @intCast(u32, mirrored.items.len); + const num = @as(u32, @intCast(mirrored.items.len)); try a.extra.ensureUnusedCapacity(gpa, num + 1); a.extra.appendAssumeCapacity(num); a.extra.appendSliceAssumeCapacity(mirrored.items); @@ -1798,7 +1798,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { inst: Air.Inst.Index, total_operands: usize, ) !Self { - const extra_operands = @intCast(u32, total_operands) -| (bpi - 1); + const extra_operands = @as(u32, @intCast(total_operands)) -| (bpi - 1); const max_extra_tombs = (extra_operands + 30) / 31; const extra_tombs: []u32 = switch (pass) { @@ -1818,7 +1818,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { .a = a, .data = data, .inst = inst, - .operands_remaining = @intCast(u32, total_operands), + .operands_remaining = @as(u32, @intCast(total_operands)), .extra_tombs = extra_tombs, .will_die_immediately = will_die_immediately, }; @@ -1847,7 +1847,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { if (big.will_die_immediately and !big.a.air.mustLower(big.inst, ip)) return; const extra_byte = (big.operands_remaining - (bpi - 1)) / 31; - const extra_bit = @intCast(u5, big.operands_remaining - (bpi - 1) - extra_byte * 31); + const extra_bit = @as(u5, @intCast(big.operands_remaining - (bpi - 1) - extra_byte * 31)); const gpa = big.a.gpa; @@ -1881,7 +1881,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { // keep at least one. var num: usize = big.extra_tombs.len; while (num > 1) { - if (@truncate(u31, big.extra_tombs[num - 1]) != 0) { + if (@as(u31, @truncate(big.extra_tombs[num - 1])) != 0) { // Some operand dies here break; } @@ -1892,7 +1892,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { const extra_tombs = big.extra_tombs[0..num]; - const extra_index = @intCast(u32, big.a.extra.items.len); + const extra_index = @as(u32, @intCast(big.a.extra.items.len)); try big.a.extra.appendSlice(gpa, extra_tombs); try big.a.special.put(gpa, big.inst, extra_index); }, diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index 904e380073..128a2d69b7 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -325,8 +325,8 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .aggregate_init => { const ty_pl = data[inst].ty_pl; const aggregate_ty = self.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); - const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); + const len = @as(usize, @intCast(aggregate_ty.arrayLenIp(ip))); + const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); var bt = self.liveness.iterateBigTomb(inst); for (elements) |element| { @@ -337,9 +337,9 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .call, .call_always_tail, .call_never_tail, .call_never_inline => { const pl_op = data[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast( + const args = @as( []const Air.Inst.Ref, - self.air.extra[extra.end..][0..extra.data.args_len], + @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]), ); var bt = self.liveness.iterateBigTomb(inst); @@ -353,14 +353,14 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); var extra_i = extra.end; - const outputs = @ptrCast( + const outputs = @as( []const Air.Inst.Ref, - self.air.extra[extra_i..][0..extra.data.outputs_len], + @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]), ); extra_i += outputs.len; - const inputs = @ptrCast( + const inputs = @as( []const Air.Inst.Ref, - self.air.extra[extra_i..][0..extra.data.inputs_len], + @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]), ); extra_i += inputs.len; @@ -521,9 +521,9 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast( + const items = @as( []const Air.Inst.Ref, - self.air.extra[case.end..][0..case.data.items_len], + @ptrCast(self.air.extra[case.end..][0..case.data.items_len]), ); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + items.len + case_body.len; @@ -576,7 +576,7 @@ fn verifyInstOperands( operands: [Liveness.bpi - 1]Air.Inst.Ref, ) Error!void { for (operands, 0..) |operand, operand_index| { - const dies = self.liveness.operandDies(inst, @intCast(Liveness.OperandInt, operand_index)); + const dies = self.liveness.operandDies(inst, @as(Liveness.OperandInt, @intCast(operand_index))); try self.verifyOperand(inst, operand, dies); } try self.verifyInst(inst); diff --git a/src/Manifest.zig b/src/Manifest.zig index 0549287e60..199663556d 100644 --- a/src/Manifest.zig +++ b/src/Manifest.zig @@ -102,7 +102,7 @@ pub fn hex64(x: u64) [16]u8 { var result: [16]u8 = undefined; var i: usize = 0; while (i < 8) : (i += 1) { - const byte = @truncate(u8, x >> @intCast(u6, 8 * i)); + const byte = @as(u8, @truncate(x >> @as(u6, @intCast(8 * i)))); result[i * 2 + 0] = hex_charset[byte >> 4]; result[i * 2 + 1] = hex_charset[byte & 15]; } @@ -284,7 +284,7 @@ const Parse = struct { @errorName(err), }); }; - if (@enumFromInt(MultihashFunction, their_multihash_func) != multihash_function) { + if (@as(MultihashFunction, @enumFromInt(their_multihash_func)) != multihash_function) { return fail(p, tok, "unsupported hash function: only sha2-256 is supported", .{}); } } @@ -345,7 +345,7 @@ const Parse = struct { .invalid_escape_character => |bad_index| { try p.appendErrorOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "invalid escape character: '{c}'", .{raw_string[bad_index]}, ); @@ -353,7 +353,7 @@ const Parse = struct { .expected_hex_digit => |bad_index| { try p.appendErrorOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected hex digit, found '{c}'", .{raw_string[bad_index]}, ); @@ -361,7 +361,7 @@ const Parse = struct { .empty_unicode_escape_sequence => |bad_index| { try p.appendErrorOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "empty unicode escape sequence", .{}, ); @@ -369,7 +369,7 @@ const Parse = struct { .expected_hex_digit_or_rbrace => |bad_index| { try p.appendErrorOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected hex digit or '}}', found '{c}'", .{raw_string[bad_index]}, ); @@ -377,7 +377,7 @@ const Parse = struct { .invalid_unicode_codepoint => |bad_index| { try p.appendErrorOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "unicode escape does not correspond to a valid codepoint", .{}, ); @@ -385,7 +385,7 @@ const Parse = struct { .expected_lbrace => |bad_index| { try p.appendErrorOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected '{{', found '{c}", .{raw_string[bad_index]}, ); @@ -393,7 +393,7 @@ const Parse = struct { .expected_rbrace => |bad_index| { try p.appendErrorOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected '}}', found '{c}", .{raw_string[bad_index]}, ); @@ -401,7 +401,7 @@ const Parse = struct { .expected_single_quote => |bad_index| { try p.appendErrorOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected single quote ('), found '{c}", .{raw_string[bad_index]}, ); @@ -409,7 +409,7 @@ const Parse = struct { .invalid_character => |bad_index| { try p.appendErrorOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "invalid byte in string or character literal: '{c}'", .{raw_string[bad_index]}, ); diff --git a/src/Module.zig b/src/Module.zig index 70b9c9bdbb..f88f047578 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -554,7 +554,7 @@ pub const Decl = struct { _, pub fn toOptional(i: Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(i)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(i))); } }; @@ -563,12 +563,12 @@ pub const Decl = struct { _, pub fn init(oi: ?Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(oi orelse return .none)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none))); } pub fn unwrap(oi: OptionalIndex) ?Index { if (oi == .none) return null; - return @enumFromInt(Index, @intFromEnum(oi)); + return @as(Index, @enumFromInt(@intFromEnum(oi))); } }; @@ -619,7 +619,7 @@ pub const Decl = struct { pub fn contentsHashZir(decl: Decl, zir: Zir) std.zig.SrcHash { assert(decl.zir_decl_index != 0); const hash_u32s = zir.extra[decl.zir_decl_index..][0..4]; - const contents_hash = @bitCast(std.zig.SrcHash, hash_u32s.*); + const contents_hash = @as(std.zig.SrcHash, @bitCast(hash_u32s.*)); return contents_hash; } @@ -633,7 +633,7 @@ pub const Decl = struct { if (!decl.has_align) return .none; assert(decl.zir_decl_index != 0); const zir = decl.getFileScope(mod).zir; - return @enumFromInt(Zir.Inst.Ref, zir.extra[decl.zir_decl_index + 8]); + return @as(Zir.Inst.Ref, @enumFromInt(zir.extra[decl.zir_decl_index + 8])); } pub fn zirLinksectionRef(decl: Decl, mod: *Module) Zir.Inst.Ref { @@ -641,7 +641,7 @@ pub const Decl = struct { assert(decl.zir_decl_index != 0); const zir = decl.getFileScope(mod).zir; const extra_index = decl.zir_decl_index + 8 + @intFromBool(decl.has_align); - return @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]); + return @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); } pub fn zirAddrspaceRef(decl: Decl, mod: *Module) Zir.Inst.Ref { @@ -649,7 +649,7 @@ pub const Decl = struct { assert(decl.zir_decl_index != 0); const zir = decl.getFileScope(mod).zir; const extra_index = decl.zir_decl_index + 8 + @intFromBool(decl.has_align) + 1; - return @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]); + return @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); } pub fn relativeToLine(decl: Decl, offset: u32) u32 { @@ -657,11 +657,11 @@ pub const Decl = struct { } pub fn relativeToNodeIndex(decl: Decl, offset: i32) Ast.Node.Index { - return @bitCast(Ast.Node.Index, offset + @bitCast(i32, decl.src_node)); + return @as(Ast.Node.Index, @bitCast(offset + @as(i32, @bitCast(decl.src_node)))); } pub fn nodeIndexToRelative(decl: Decl, node_index: Ast.Node.Index) i32 { - return @bitCast(i32, node_index) - @bitCast(i32, decl.src_node); + return @as(i32, @bitCast(node_index)) - @as(i32, @bitCast(decl.src_node)); } pub fn tokSrcLoc(decl: Decl, token_index: Ast.TokenIndex) LazySrcLoc { @@ -864,7 +864,7 @@ pub const Decl = struct { pub fn getAlignment(decl: Decl, mod: *Module) u32 { assert(decl.has_tv); - return @intCast(u32, decl.alignment.toByteUnitsOptional() orelse decl.ty.abiAlignment(mod)); + return @as(u32, @intCast(decl.alignment.toByteUnitsOptional() orelse decl.ty.abiAlignment(mod))); } pub fn intern(decl: *Decl, mod: *Module) Allocator.Error!void { @@ -922,7 +922,7 @@ pub const Struct = struct { _, pub fn toOptional(i: Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(i)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(i))); } }; @@ -931,12 +931,12 @@ pub const Struct = struct { _, pub fn init(oi: ?Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(oi orelse return .none)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none))); } pub fn unwrap(oi: OptionalIndex) ?Index { if (oi == .none) return null; - return @enumFromInt(Index, @intFromEnum(oi)); + return @as(Index, @enumFromInt(@intFromEnum(oi))); } }; @@ -964,7 +964,7 @@ pub const Struct = struct { ) u32 { if (field.abi_align.toByteUnitsOptional()) |abi_align| { assert(layout != .Packed); - return @intCast(u32, abi_align); + return @as(u32, @intCast(abi_align)); } const target = mod.getTarget(); @@ -1042,7 +1042,7 @@ pub const Struct = struct { var bit_sum: u64 = 0; for (s.fields.values(), 0..) |field, i| { if (i == index) { - return @intCast(u16, bit_sum); + return @as(u16, @intCast(bit_sum)); } bit_sum += field.ty.bitSize(mod); } @@ -1123,7 +1123,7 @@ pub const Union = struct { _, pub fn toOptional(i: Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(i)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(i))); } }; @@ -1132,12 +1132,12 @@ pub const Union = struct { _, pub fn init(oi: ?Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(oi orelse return .none)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none))); } pub fn unwrap(oi: OptionalIndex) ?Index { if (oi == .none) return null; - return @enumFromInt(Index, @intFromEnum(oi)); + return @as(Index, @enumFromInt(@intFromEnum(oi))); } }; @@ -1151,7 +1151,7 @@ pub const Union = struct { /// Keep implementation in sync with `Sema.unionFieldAlignment`. /// Prefer to call that function instead of this one during Sema. pub fn normalAlignment(field: Field, mod: *Module) u32 { - return @intCast(u32, field.abi_align.toByteUnitsOptional() orelse field.ty.abiAlignment(mod)); + return @as(u32, @intCast(field.abi_align.toByteUnitsOptional() orelse field.ty.abiAlignment(mod))); } }; @@ -1205,7 +1205,7 @@ pub const Union = struct { most_index = i; } } - return @intCast(u32, most_index); + return @as(u32, @intCast(most_index)); } /// Returns 0 if the union is represented with 0 bits at runtime. @@ -1267,11 +1267,11 @@ pub const Union = struct { const field_size = field.ty.abiSize(mod); if (field_size > payload_size) { payload_size = field_size; - biggest_field = @intCast(u32, i); + biggest_field = @as(u32, @intCast(i)); } if (field_align > payload_align) { - payload_align = @intCast(u32, field_align); - most_aligned_field = @intCast(u32, i); + payload_align = @as(u32, @intCast(field_align)); + most_aligned_field = @as(u32, @intCast(i)); most_aligned_field_size = field_size; } } @@ -1303,7 +1303,7 @@ pub const Union = struct { size += payload_size; const prev_size = size; size = std.mem.alignForward(u64, size, tag_align); - padding = @intCast(u32, size - prev_size); + padding = @as(u32, @intCast(size - prev_size)); } else { // {Payload, Tag} size += payload_size; @@ -1311,7 +1311,7 @@ pub const Union = struct { size += tag_size; const prev_size = size; size = std.mem.alignForward(u64, size, payload_align); - padding = @intCast(u32, size - prev_size); + padding = @as(u32, @intCast(size - prev_size)); } return .{ .abi_size = size, @@ -1409,7 +1409,7 @@ pub const Fn = struct { _, pub fn toOptional(i: Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(i)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(i))); } }; @@ -1418,12 +1418,12 @@ pub const Fn = struct { _, pub fn init(oi: ?Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(oi orelse return .none)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none))); } pub fn unwrap(oi: OptionalIndex) ?Index { if (oi == .none) return null; - return @enumFromInt(Index, @intFromEnum(oi)); + return @as(Index, @enumFromInt(@intFromEnum(oi))); } }; @@ -1477,7 +1477,7 @@ pub const Fn = struct { _, pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex { - return @enumFromInt(InferredErrorSet.OptionalIndex, @intFromEnum(i)); + return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(i))); } }; @@ -1486,12 +1486,12 @@ pub const Fn = struct { _, pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex { - return @enumFromInt(InferredErrorSet.OptionalIndex, @intFromEnum(oi orelse return .none)); + return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none))); } pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index { if (oi == .none) return null; - return @enumFromInt(InferredErrorSet.Index, @intFromEnum(oi)); + return @as(InferredErrorSet.Index, @enumFromInt(@intFromEnum(oi))); } }; @@ -1613,7 +1613,7 @@ pub const Namespace = struct { _, pub fn toOptional(i: Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(i)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(i))); } }; @@ -1622,12 +1622,12 @@ pub const Namespace = struct { _, pub fn init(oi: ?Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(oi orelse return .none)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none))); } pub fn unwrap(oi: OptionalIndex) ?Index { if (oi == .none) return null; - return @enumFromInt(Index, @intFromEnum(oi)); + return @as(Index, @enumFromInt(@intFromEnum(oi))); } }; @@ -1867,7 +1867,7 @@ pub const File = struct { if (stat.size > std.math.maxInt(u32)) return error.FileTooBig; - const source = try gpa.allocSentinel(u8, @intCast(usize, stat.size), 0); + const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); defer if (!file.source_loaded) gpa.free(source); const amt = try f.readAll(source); if (amt != stat.size) @@ -2116,7 +2116,7 @@ pub const SrcLoc = struct { } pub fn declRelativeToNodeIndex(src_loc: SrcLoc, offset: i32) Ast.TokenIndex { - return @bitCast(Ast.Node.Index, offset + @bitCast(i32, src_loc.parent_decl_node)); + return @as(Ast.Node.Index, @bitCast(offset + @as(i32, @bitCast(src_loc.parent_decl_node)))); } pub const Span = struct { @@ -2135,7 +2135,7 @@ pub const SrcLoc = struct { .token_abs => |tok_index| { const tree = try src_loc.file_scope.getTree(gpa); const start = tree.tokens.items(.start)[tok_index]; - const end = start + @intCast(u32, tree.tokenSlice(tok_index).len); + const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len)); return Span{ .start = start, .end = end, .main = start }; }, .node_abs => |node| { @@ -2146,14 +2146,14 @@ pub const SrcLoc = struct { const tree = try src_loc.file_scope.getTree(gpa); const tok_index = src_loc.declSrcToken(); const start = tree.tokens.items(.start)[tok_index] + byte_off; - const end = start + @intCast(u32, tree.tokenSlice(tok_index).len); + const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len)); return Span{ .start = start, .end = end, .main = start }; }, .token_offset => |tok_off| { const tree = try src_loc.file_scope.getTree(gpa); const tok_index = src_loc.declSrcToken() + tok_off; const start = tree.tokens.items(.start)[tok_index]; - const end = start + @intCast(u32, tree.tokenSlice(tok_index).len); + const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len)); return Span{ .start = start, .end = end, .main = start }; }, .node_offset => |traced_off| { @@ -2206,7 +2206,7 @@ pub const SrcLoc = struct { } const tok_index = full.ast.mut_token + 1; // the name token const start = tree.tokens.items(.start)[tok_index]; - const end = start + @intCast(u32, tree.tokenSlice(tok_index).len); + const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len)); return Span{ .start = start, .end = end, .main = start }; }, .node_offset_var_decl_align => |node_off| { @@ -2292,7 +2292,7 @@ pub const SrcLoc = struct { else => tree.firstToken(node) - 2, }; const start = tree.tokens.items(.start)[tok_index]; - const end = start + @intCast(u32, tree.tokenSlice(tok_index).len); + const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len)); return Span{ .start = start, .end = end, .main = start }; }, .node_offset_deref_ptr => |node_off| { @@ -2359,7 +2359,7 @@ pub const SrcLoc = struct { // that contains this input. const node_tags = tree.nodes.items(.tag); for (node_tags, 0..) |node_tag, node_usize| { - const node = @intCast(Ast.Node.Index, node_usize); + const node = @as(Ast.Node.Index, @intCast(node_usize)); switch (node_tag) { .for_simple, .@"for" => { const for_full = tree.fullFor(node).?; @@ -2479,7 +2479,7 @@ pub const SrcLoc = struct { }; const start = tree.tokens.items(.start)[start_tok]; const end_start = tree.tokens.items(.start)[end_tok]; - const end = end_start + @intCast(u32, tree.tokenSlice(end_tok).len); + const end = end_start + @as(u32, @intCast(tree.tokenSlice(end_tok).len)); return Span{ .start = start, .end = end, .main = start }; }, .node_offset_fn_type_align => |node_off| { @@ -2539,7 +2539,7 @@ pub const SrcLoc = struct { const tree = try src_loc.file_scope.getTree(gpa); const token_tags = tree.tokens.items(.tag); const main_token = tree.nodes.items(.main_token)[src_loc.parent_decl_node]; - const tok_index = @bitCast(Ast.TokenIndex, token_off + @bitCast(i32, main_token)); + const tok_index = @as(Ast.TokenIndex, @bitCast(token_off + @as(i32, @bitCast(main_token)))); var first_tok = tok_index; while (true) switch (token_tags[first_tok - 1]) { @@ -2568,7 +2568,7 @@ pub const SrcLoc = struct { const full = tree.fullFnProto(&buf, parent_node).?; const tok_index = full.lib_name.?; const start = tree.tokens.items(.start)[tok_index]; - const end = start + @intCast(u32, tree.tokenSlice(tok_index).len); + const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len)); return Span{ .start = start, .end = end, .main = start }; }, @@ -2761,7 +2761,7 @@ pub const SrcLoc = struct { end_tok = main; } const start_off = token_starts[start_tok]; - const end_off = token_starts[end_tok] + @intCast(u32, tree.tokenSlice(end_tok).len); + const end_off = token_starts[end_tok] + @as(u32, @intCast(tree.tokenSlice(end_tok).len)); return Span{ .start = start_off, .end = end_off, .main = token_starts[main] }; } }; @@ -3577,7 +3577,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void { if (stat.size > std.math.maxInt(u32)) return error.FileTooBig; - const source = try gpa.allocSentinel(u8, @intCast(usize, stat.size), 0); + const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); defer if (!file.source_loaded) gpa.free(source); const amt = try source_file.readAll(source); if (amt != stat.size) @@ -3609,21 +3609,21 @@ pub fn astGenFile(mod: *Module, file: *File) !void { if (file.zir.instructions.len == 0) @as([*]const u8, undefined) else - @ptrCast([*]const u8, safety_buffer.ptr) + @as([*]const u8, @ptrCast(safety_buffer.ptr)) else - @ptrCast([*]const u8, file.zir.instructions.items(.data).ptr); + @as([*]const u8, @ptrCast(file.zir.instructions.items(.data).ptr)); if (data_has_safety_tag) { // The `Data` union has a safety tag but in the file format we store it without. for (file.zir.instructions.items(.data), 0..) |*data, i| { - const as_struct = @ptrCast(*const HackDataLayout, data); + const as_struct = @as(*const HackDataLayout, @ptrCast(data)); safety_buffer[i] = as_struct.data; } } const header: Zir.Header = .{ - .instructions_len = @intCast(u32, file.zir.instructions.len), - .string_bytes_len = @intCast(u32, file.zir.string_bytes.len), - .extra_len = @intCast(u32, file.zir.extra.len), + .instructions_len = @as(u32, @intCast(file.zir.instructions.len)), + .string_bytes_len = @as(u32, @intCast(file.zir.string_bytes.len)), + .extra_len = @as(u32, @intCast(file.zir.extra.len)), .stat_size = stat.size, .stat_inode = stat.inode, @@ -3631,11 +3631,11 @@ pub fn astGenFile(mod: *Module, file: *File) !void { }; var iovecs = [_]std.os.iovec_const{ .{ - .iov_base = @ptrCast([*]const u8, &header), + .iov_base = @as([*]const u8, @ptrCast(&header)), .iov_len = @sizeOf(Zir.Header), }, .{ - .iov_base = @ptrCast([*]const u8, file.zir.instructions.items(.tag).ptr), + .iov_base = @as([*]const u8, @ptrCast(file.zir.instructions.items(.tag).ptr)), .iov_len = file.zir.instructions.len, }, .{ @@ -3647,7 +3647,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void { .iov_len = file.zir.string_bytes.len, }, .{ - .iov_base = @ptrCast([*]const u8, file.zir.extra.ptr), + .iov_base = @as([*]const u8, @ptrCast(file.zir.extra.ptr)), .iov_len = file.zir.extra.len * 4, }, }; @@ -3722,13 +3722,13 @@ fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) defer if (data_has_safety_tag) gpa.free(safety_buffer); const data_ptr = if (data_has_safety_tag) - @ptrCast([*]u8, safety_buffer.ptr) + @as([*]u8, @ptrCast(safety_buffer.ptr)) else - @ptrCast([*]u8, zir.instructions.items(.data).ptr); + @as([*]u8, @ptrCast(zir.instructions.items(.data).ptr)); var iovecs = [_]std.os.iovec{ .{ - .iov_base = @ptrCast([*]u8, zir.instructions.items(.tag).ptr), + .iov_base = @as([*]u8, @ptrCast(zir.instructions.items(.tag).ptr)), .iov_len = header.instructions_len, }, .{ @@ -3740,7 +3740,7 @@ fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) .iov_len = header.string_bytes_len, }, .{ - .iov_base = @ptrCast([*]u8, zir.extra.ptr), + .iov_base = @as([*]u8, @ptrCast(zir.extra.ptr)), .iov_len = header.extra_len * 4, }, }; @@ -3753,7 +3753,7 @@ fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) const tags = zir.instructions.items(.tag); for (zir.instructions.items(.data), 0..) |*data, i| { const union_tag = Zir.Inst.Tag.data_tags[@intFromEnum(tags[i])]; - const as_struct = @ptrCast(*HackDataLayout, data); + const as_struct = @as(*HackDataLayout, @ptrCast(data)); as_struct.* = .{ .safety_tag = @intFromEnum(union_tag), .data = safety_buffer[i], @@ -4394,7 +4394,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { const struct_obj = mod.structPtr(struct_index); struct_obj.zir_index = main_struct_inst; const extended = file.zir.instructions.items(.data)[main_struct_inst].extended; - const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); + const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); struct_obj.is_tuple = small.is_tuple; var sema_arena = std.heap.ArenaAllocator.init(gpa); @@ -5051,13 +5051,13 @@ pub fn scanNamespace( cur_bit_bag = zir.extra[bit_bag_index]; bit_bag_index += 1; } - const flags = @truncate(u4, cur_bit_bag); + const flags = @as(u4, @truncate(cur_bit_bag)); cur_bit_bag >>= 4; const decl_sub_index = extra_index; extra_index += 8; // src_hash(4) + line(1) + name(1) + value(1) + doc_comment(1) - extra_index += @truncate(u1, flags >> 2); // Align - extra_index += @as(u2, @truncate(u1, flags >> 3)) * 2; // Link section or address space, consists of 2 Refs + extra_index += @as(u1, @truncate(flags >> 2)); // Align + extra_index += @as(u2, @as(u1, @truncate(flags >> 3))) * 2; // Link section or address space, consists of 2 Refs try scanDecl(&scan_decl_iter, decl_sub_index, flags); } @@ -5195,7 +5195,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err new_decl.is_exported = is_exported; new_decl.has_align = has_align; new_decl.has_linksection_or_addrspace = has_linksection_or_addrspace; - new_decl.zir_decl_index = @intCast(u32, decl_sub_index); + new_decl.zir_decl_index = @as(u32, @intCast(decl_sub_index)); new_decl.alive = true; // This Decl corresponds to an AST node and therefore always alive. return; } @@ -5229,7 +5229,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err decl.kind = kind; decl.has_align = has_align; decl.has_linksection_or_addrspace = has_linksection_or_addrspace; - decl.zir_decl_index = @intCast(u32, decl_sub_index); + decl.zir_decl_index = @as(u32, @intCast(decl_sub_index)); if (decl.getOwnedFunctionIndex(mod) != .none) { switch (comp.bin_file.tag) { .coff, .elf, .macho, .plan9 => { @@ -5481,7 +5481,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE // This could be a generic function instantiation, however, in which case we need to // map the comptime parameters to constant values and only emit arg AIR instructions // for the runtime ones. - const runtime_params_len = @intCast(u32, mod.typeToFunc(fn_ty).?.param_types.len); + const runtime_params_len = @as(u32, @intCast(mod.typeToFunc(fn_ty).?.param_types.len)); try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len); try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType` try sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body); @@ -5524,13 +5524,13 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE continue; } const air_ty = try sema.addType(param_ty); - const arg_index = @intCast(u32, sema.air_instructions.len); + const arg_index = @as(u32, @intCast(sema.air_instructions.len)); inner_block.instructions.appendAssumeCapacity(arg_index); sema.air_instructions.appendAssumeCapacity(.{ .tag = .arg, .data = .{ .arg = .{ .ty = air_ty, - .src_index = @intCast(u32, total_param_index), + .src_index = @as(u32, @intCast(total_param_index)), } }, }); sema.inst_map.putAssumeCapacityNoClobber(inst, Air.indexToRef(arg_index)); @@ -5593,7 +5593,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + inner_block.instructions.items.len); const main_block_index = sema.addExtraAssumeCapacity(Air.Block{ - .body_len = @intCast(u32, inner_block.instructions.items.len), + .body_len = @as(u32, @intCast(inner_block.instructions.items.len)), }); sema.air_extra.appendSliceAssumeCapacity(inner_block.instructions.items); sema.air_extra.items[@intFromEnum(Air.ExtraIndex.main_block)] = main_block_index; @@ -5671,7 +5671,7 @@ pub fn createNamespace(mod: *Module, initialization: Namespace) !Namespace.Index } const ptr = try mod.allocated_namespaces.addOne(mod.gpa); ptr.* = initialization; - return @enumFromInt(Namespace.Index, mod.allocated_namespaces.len - 1); + return @as(Namespace.Index, @enumFromInt(mod.allocated_namespaces.len - 1)); } pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void { @@ -5729,7 +5729,7 @@ pub fn allocateNewDecl( } break :d .{ .new_decl = decl, - .decl_index = @enumFromInt(Decl.Index, mod.allocated_decls.len - 1), + .decl_index = @as(Decl.Index, @enumFromInt(mod.allocated_decls.len - 1)), }; }; @@ -5767,7 +5767,7 @@ pub fn getErrorValue( name: InternPool.NullTerminatedString, ) Allocator.Error!ErrorInt { const gop = try mod.global_error_set.getOrPut(mod.gpa, name); - return @intCast(ErrorInt, gop.index); + return @as(ErrorInt, @intCast(gop.index)); } pub fn getErrorValueFromSlice( @@ -6139,7 +6139,7 @@ pub fn paramSrc( if (i == param_i) { if (param.anytype_ellipsis3) |some| { const main_token = tree.nodes.items(.main_token)[decl.src_node]; - return .{ .token_offset_param = @bitCast(i32, some) - @bitCast(i32, main_token) }; + return .{ .token_offset_param = @as(i32, @bitCast(some)) - @as(i32, @bitCast(main_token)) }; } return .{ .node_offset_param = decl.nodeIndexToRelative(param.type_expr) }; } @@ -6892,11 +6892,11 @@ pub fn unionValue(mod: *Module, union_ty: Type, tag: Value, val: Value) Allocato /// losing data if the representation wasn't correct. pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(mod.getTarget())) { - 16 => .{ .f16 = @floatCast(f16, x) }, - 32 => .{ .f32 = @floatCast(f32, x) }, - 64 => .{ .f64 = @floatCast(f64, x) }, - 80 => .{ .f80 = @floatCast(f80, x) }, - 128 => .{ .f128 = @floatCast(f128, x) }, + 16 => .{ .f16 = @as(f16, @floatCast(x)) }, + 32 => .{ .f32 = @as(f32, @floatCast(x)) }, + 64 => .{ .f64 = @as(f64, @floatCast(x)) }, + 80 => .{ .f80 = @as(f80, @floatCast(x)) }, + 128 => .{ .f128 = @as(f128, @floatCast(x)) }, else => unreachable, }; const i = try intern(mod, .{ .float = .{ @@ -6956,18 +6956,18 @@ pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { assert(sign); // Protect against overflow in the following negation. if (x == std.math.minInt(i64)) return 64; - return Type.smallestUnsignedBits(@intCast(u64, -(x + 1))) + 1; + return Type.smallestUnsignedBits(@as(u64, @intCast(-(x + 1)))) + 1; }, .u64 => |x| { return Type.smallestUnsignedBits(x) + @intFromBool(sign); }, .big_int => |big| { - if (big.positive) return @intCast(u16, big.bitCountAbs() + @intFromBool(sign)); + if (big.positive) return @as(u16, @intCast(big.bitCountAbs() + @intFromBool(sign))); // Zero is still a possibility, in which case unsigned is fine if (big.eqZero()) return 0; - return @intCast(u16, big.bitCountTwosComp()); + return @as(u16, @intCast(big.bitCountTwosComp())); }, .lazy_align => |lazy_ty| { return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod)) + @intFromBool(sign); diff --git a/src/Package.zig b/src/Package.zig index dd8f3c8a7e..2e1dd4e14f 100644 --- a/src/Package.zig +++ b/src/Package.zig @@ -390,10 +390,10 @@ const Report = struct { .src_loc = try eb.addSourceLocation(.{ .src_path = try eb.addString(file_path), .span_start = token_starts[msg.tok], - .span_end = @intCast(u32, token_starts[msg.tok] + ast.tokenSlice(msg.tok).len), + .span_end = @as(u32, @intCast(token_starts[msg.tok] + ast.tokenSlice(msg.tok).len)), .span_main = token_starts[msg.tok] + msg.off, - .line = @intCast(u32, start_loc.line), - .column = @intCast(u32, start_loc.column), + .line = @as(u32, @intCast(start_loc.line)), + .column = @as(u32, @intCast(start_loc.column)), .source_line = try eb.addString(ast.source[start_loc.line_start..start_loc.line_end]), }), .notes_len = notes_len, diff --git a/src/Sema.zig b/src/Sema.zig index e45cccd43b..95ebaca9fb 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -212,7 +212,7 @@ pub const InstMap = struct { while (true) { const extra_capacity = better_capacity / 2 + 16; better_capacity += extra_capacity; - better_start -|= @intCast(Zir.Inst.Index, extra_capacity / 2); + better_start -|= @as(Zir.Inst.Index, @intCast(extra_capacity / 2)); if (better_start <= start and end < better_capacity + better_start) break; } @@ -225,7 +225,7 @@ pub const InstMap = struct { allocator.free(map.items); map.items = new_items; - map.start = @intCast(Zir.Inst.Index, better_start); + map.start = @as(Zir.Inst.Index, @intCast(better_start)); } }; @@ -619,7 +619,7 @@ pub const Block = struct { const sema = block.sema; const ty_ref = try sema.addType(aggregate_ty); try sema.air_extra.ensureUnusedCapacity(sema.gpa, elements.len); - const extra_index = @intCast(u32, sema.air_extra.items.len); + const extra_index = @as(u32, @intCast(sema.air_extra.items.len)); sema.appendRefsAssumeCapacity(elements); return block.addInst(.{ @@ -660,7 +660,7 @@ pub const Block = struct { try sema.air_instructions.ensureUnusedCapacity(gpa, 1); try block.instructions.ensureUnusedCapacity(gpa, 1); - const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len); + const result_index = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); sema.air_instructions.appendAssumeCapacity(inst); block.instructions.appendAssumeCapacity(result_index); return result_index; @@ -678,7 +678,7 @@ pub const Block = struct { try sema.air_instructions.ensureUnusedCapacity(gpa, 1); - const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len); + const result_index = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); sema.air_instructions.appendAssumeCapacity(inst); try block.instructions.insert(gpa, index, result_index); @@ -1763,7 +1763,7 @@ pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { const i = @intFromEnum(zir_ref); // First section of indexes correspond to a set number of constant values. // We intentionally map the same indexes to the same values between ZIR and AIR. - if (i < InternPool.static_len) return @enumFromInt(Air.Inst.Ref, i); + if (i < InternPool.static_len) return @as(Air.Inst.Ref, @enumFromInt(i)); // The last section of indexes refers to the map of ZIR => AIR. const inst = sema.inst_map.get(i - InternPool.static_len).?; if (inst == .generic_poison) return error.GenericPoison; @@ -2041,7 +2041,7 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( // First section of indexes correspond to a set number of constant values. const int = @intFromEnum(inst); if (int < InternPool.static_len) { - return @enumFromInt(InternPool.Index, int).toValue(); + return @as(InternPool.Index, @enumFromInt(int)).toValue(); } const i = int - InternPool.static_len; @@ -2430,7 +2430,7 @@ fn analyzeAsAlign( air_ref: Air.Inst.Ref, ) !Alignment { const alignment_big = try sema.analyzeAsInt(block, src, air_ref, align_ty, "alignment must be comptime-known"); - const alignment = @intCast(u32, alignment_big); // We coerce to u29 in the prev line. + const alignment = @as(u32, @intCast(alignment_big)); // We coerce to u29 in the prev line. try sema.validateAlign(block, src, alignment); return Alignment.fromNonzeroByteUnits(alignment); } @@ -2737,7 +2737,7 @@ pub fn analyzeStructDecl( const struct_obj = mod.structPtr(struct_index); const extended = sema.code.instructions.items(.data)[inst].extended; assert(extended.opcode == .struct_decl); - const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); + const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); struct_obj.known_non_opv = small.known_non_opv; if (small.known_comptime_only) { @@ -2774,9 +2774,9 @@ fn zirStructDecl( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; - const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); + const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); const src: LazySrcLoc = if (small.has_src_node) blk: { - const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); + const node_offset = @as(i32, @bitCast(sema.code.extra[extended.operand])); break :blk LazySrcLoc.nodeOffset(node_offset); } else sema.src; @@ -2937,18 +2937,18 @@ fn zirEnumDecl( const mod = sema.mod; const gpa = sema.gpa; - const small = @bitCast(Zir.Inst.EnumDecl.Small, extended.small); + const small = @as(Zir.Inst.EnumDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src: LazySrcLoc = if (small.has_src_node) blk: { - const node_offset = @bitCast(i32, sema.code.extra[extra_index]); + const node_offset = @as(i32, @bitCast(sema.code.extra[extra_index])); extra_index += 1; break :blk LazySrcLoc.nodeOffset(node_offset); } else sema.src; const tag_ty_src: LazySrcLoc = .{ .node_offset_container_tag = src.node_offset.x }; const tag_type_ref = if (small.has_tag_type) blk: { - const tag_type_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const tag_type_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; break :blk tag_type_ref; } else .none; @@ -3108,7 +3108,7 @@ fn zirEnumDecl( cur_bit_bag = sema.code.extra[bit_bag_index]; bit_bag_index += 1; } - const has_tag_value = @truncate(u1, cur_bit_bag) != 0; + const has_tag_value = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const field_name_zir = sema.code.nullTerminatedString(sema.code.extra[extra_index]); @@ -3131,7 +3131,7 @@ fn zirEnumDecl( } const tag_overflow = if (has_tag_value) overflow: { - const tag_val_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const tag_val_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const tag_inst = try sema.resolveInst(tag_val_ref); last_tag_val = sema.resolveConstValue(block, .unneeded, tag_inst, "") catch |err| switch (err) { @@ -3213,11 +3213,11 @@ fn zirUnionDecl( const mod = sema.mod; const gpa = sema.gpa; - const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); + const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src: LazySrcLoc = if (small.has_src_node) blk: { - const node_offset = @bitCast(i32, sema.code.extra[extra_index]); + const node_offset = @as(i32, @bitCast(sema.code.extra[extra_index])); extra_index += 1; break :blk LazySrcLoc.nodeOffset(node_offset); } else sema.src; @@ -3298,11 +3298,11 @@ fn zirOpaqueDecl( defer tracy.end(); const mod = sema.mod; - const small = @bitCast(Zir.Inst.OpaqueDecl.Small, extended.small); + const small = @as(Zir.Inst.OpaqueDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src: LazySrcLoc = if (small.has_src_node) blk: { - const node_offset = @bitCast(i32, sema.code.extra[extra_index]); + const node_offset = @as(i32, @bitCast(sema.code.extra[extra_index])); extra_index += 1; break :blk LazySrcLoc.nodeOffset(node_offset); } else sema.src; @@ -3369,7 +3369,7 @@ fn zirErrorSetDecl( var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, extra.data.fields_len); - var extra_index = @intCast(u32, extra.end); + var extra_index = @as(u32, @intCast(extra.end)); const extra_index_end = extra_index + (extra.data.fields_len * 2); while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string const str_index = sema.code.extra[extra_index]; @@ -3569,18 +3569,18 @@ fn zirAllocExtended( const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = extra.data.src_node }; const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = extra.data.src_node }; - const small = @bitCast(Zir.Inst.AllocExtended.Small, extended.small); + const small = @as(Zir.Inst.AllocExtended.Small, @bitCast(extended.small)); var extra_index: usize = extra.end; const var_ty: Type = if (small.has_type) blk: { - const type_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const type_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; break :blk try sema.resolveType(block, ty_src, type_ref); } else undefined; const alignment = if (small.has_align) blk: { - const align_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const align_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const alignment = try sema.resolveAlign(block, align_src, align_ref); break :blk alignment; @@ -3598,7 +3598,7 @@ fn zirAllocExtended( .is_const = small.is_const, } }, }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1))); } } @@ -3730,7 +3730,7 @@ fn zirAllocInferredComptime( .is_const = is_const, } }, }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1))); } fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -3795,7 +3795,7 @@ fn zirAllocInferred( .is_const = is_const, } }, }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1))); } const result_index = try block.addInstAsIndex(.{ @@ -4037,7 +4037,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com .data = .{ .ty_pl = .{ .ty = ty_inst, .payload = sema.addExtraAssumeCapacity(Air.Block{ - .body_len = @intCast(u32, replacement_block.instructions.items.len), + .body_len = @as(u32, @intCast(replacement_block.instructions.items.len)), }), } }, }); @@ -4121,7 +4121,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. // First pass to look for comptime values. for (args, 0..) |zir_arg, i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); runtime_arg_lens[i] = .none; if (zir_arg == .none) continue; const object = try sema.resolveInst(zir_arg); @@ -4192,7 +4192,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const msg = try sema.errMsg(block, src, "unbounded for loop", .{}); errdefer msg.destroy(gpa); for (args, 0..) |zir_arg, i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); if (zir_arg == .none) continue; const object = try sema.resolveInst(zir_arg); const object_ty = sema.typeOf(object); @@ -4435,7 +4435,7 @@ fn validateUnionInit( } const tag_ty = union_ty.unionTagTypeHypothetical(mod); - const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); + const enum_field_index = @as(u32, @intCast(tag_ty.enumFieldIndex(field_name, mod).?)); const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); if (init_val) |val| { @@ -4547,9 +4547,9 @@ fn validateStructInit( const field_src = init_src; // TODO better source location const default_field_ptr = if (struct_ty.isTuple(mod)) - try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true) + try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @as(u32, @intCast(i)), true) else - try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); + try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @as(u32, @intCast(i)), field_src, struct_ty, true); const init = try sema.addConstant(default_val); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } @@ -4729,9 +4729,9 @@ fn validateStructInit( const field_src = init_src; // TODO better source location const default_field_ptr = if (struct_ty.isTuple(mod)) - try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true) + try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @as(u32, @intCast(i)), true) else - try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); + try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @as(u32, @intCast(i)), field_src, struct_ty, true); const init = try sema.addConstant(field_values[i].toValue()); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } @@ -5165,7 +5165,7 @@ fn storeToInferredAllocComptime( fn zirSetEvalBranchQuota(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const quota = @intCast(u32, try sema.resolveInt(block, src, inst_data.operand, Type.u32, "eval branch quota must be comptime-known")); + const quota = @as(u32, @intCast(try sema.resolveInt(block, src, inst_data.operand, Type.u32, "eval branch quota must be comptime-known"))); sema.branch_quota = @max(sema.branch_quota, quota); } @@ -5388,7 +5388,7 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError // Reserve space for a Loop instruction so that generated Break instructions can // point to it, even if it doesn't end up getting used because the code ends up being // comptime evaluated. - const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); const loop_inst = block_inst + 1; try sema.air_instructions.ensureUnusedCapacity(gpa, 2); sema.air_instructions.appendAssumeCapacity(.{ @@ -5436,7 +5436,7 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + loop_block_len); sema.air_instructions.items(.data)[loop_inst].ty_pl.payload = sema.addExtraAssumeCapacity( - Air.Block{ .body_len = @intCast(u32, loop_block_len) }, + Air.Block{ .body_len = @as(u32, @intCast(loop_block_len)) }, ); sema.air_extra.appendSliceAssumeCapacity(loop_block.instructions.items); } @@ -5586,7 +5586,7 @@ fn zirBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index, force_compt // Reserve space for a Block instruction so that generated Break instructions can // point to it, even if it doesn't end up getting used because the code ends up being // comptime evaluated or is an unlabeled block. - const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = undefined, @@ -5733,7 +5733,7 @@ fn analyzeBlockBody( sema.air_instructions.items(.data)[merges.block_inst] = .{ .ty_pl = .{ .ty = ty_inst, .payload = sema.addExtraAssumeCapacity(Air.Block{ - .body_len = @intCast(u32, child_block.instructions.items.len), + .body_len = @as(u32, @intCast(child_block.instructions.items.len)), }), } }; sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items); @@ -5761,11 +5761,11 @@ fn analyzeBlockBody( // Convert the br instruction to a block instruction that has the coercion // and then a new br inside that returns the coerced instruction. - const sub_block_len = @intCast(u32, coerce_block.instructions.items.len + 1); + const sub_block_len = @as(u32, @intCast(coerce_block.instructions.items.len + 1)); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + sub_block_len); try sema.air_instructions.ensureUnusedCapacity(gpa, 1); - const sub_br_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const sub_br_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); sema.air_instructions.items(.tag)[br] = .block; sema.air_instructions.items(.data)[br] = .{ .ty_pl = .{ @@ -6114,7 +6114,7 @@ fn addDbgVar( try sema.queueFullTypeResolution(operand_ty); // Add the name to the AIR. - const name_extra_index = @intCast(u32, sema.air_extra.items.len); + const name_extra_index = @as(u32, @intCast(sema.air_extra.items.len)); const elements_used = name.len / 4 + 1; try sema.air_extra.ensureUnusedCapacity(sema.gpa, elements_used); const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice()); @@ -6314,7 +6314,7 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref .tag = .save_err_return_trace_index, .data = .{ .ty_pl = .{ .ty = try sema.addType(stack_trace_ty), - .payload = @intCast(u32, field_index), + .payload = @as(u32, @intCast(field_index)), } }, }); } @@ -6386,12 +6386,12 @@ fn popErrorReturnTrace( then_block.instructions.items.len + else_block.instructions.items.len + @typeInfo(Air.Block).Struct.fields.len + 1); // +1 for the sole .cond_br instruction in the .block - const cond_br_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const cond_br_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); try sema.air_instructions.append(gpa, .{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = is_non_error_inst, .payload = sema.addExtraAssumeCapacity(Air.CondBr{ - .then_body_len = @intCast(u32, then_block.instructions.items.len), - .else_body_len = @intCast(u32, else_block.instructions.items.len), + .then_body_len = @as(u32, @intCast(then_block.instructions.items.len)), + .else_body_len = @as(u32, @intCast(else_block.instructions.items.len)), }), } } }); sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items); @@ -6422,7 +6422,7 @@ fn zirCall( const extra = sema.code.extraData(ExtraType, inst_data.payload_index); const args_len = extra.data.flags.args_len; - const modifier = @enumFromInt(std.builtin.CallModifier, extra.data.flags.packed_modifier); + const modifier = @as(std.builtin.CallModifier, @enumFromInt(extra.data.flags.packed_modifier)); const ensure_result_used = extra.data.flags.ensure_result_used; const pop_error_return_trace = extra.data.flags.pop_error_return_trace; @@ -6460,7 +6460,7 @@ fn zirCall( const args_body = sema.code.extra[extra.end..]; var input_is_error = false; - const block_index = @intCast(Air.Inst.Index, block.instructions.items.len); + const block_index = @as(Air.Inst.Index, @intCast(block.instructions.items.len)); const fn_params_len = mod.typeToFunc(func_ty).?.param_types.len; const parent_comptime = block.is_comptime; @@ -6477,7 +6477,7 @@ fn zirCall( // Generate args to comptime params in comptime block. defer block.is_comptime = parent_comptime; - if (arg_index < @min(fn_params_len, 32) and func_ty_info.paramIsComptime(@intCast(u5, arg_index))) { + if (arg_index < @min(fn_params_len, 32) and func_ty_info.paramIsComptime(@as(u5, @intCast(arg_index)))) { block.is_comptime = true; // TODO set comptime_reason } @@ -6533,7 +6533,7 @@ fn zirCall( .tag = .save_err_return_trace_index, .data = .{ .ty_pl = .{ .ty = try sema.addType(stack_trace_ty), - .payload = @intCast(u32, field_index), + .payload = @as(u32, @intCast(field_index)), } }, }); @@ -6809,7 +6809,7 @@ fn analyzeCall( // set to in the `Block`. // This block instruction will be used to capture the return value from the // inlined function. - const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = undefined, @@ -7077,7 +7077,7 @@ fn analyzeCall( if (i < fn_params_len) { const opts: CoerceOpts = .{ .param_src = .{ .func_inst = func, - .param_i = @intCast(u32, i), + .param_i = @as(u32, @intCast(i)), } }; const param_ty = mod.typeToFunc(func_ty).?.param_types[i].toType(); args[i] = sema.analyzeCallArg( @@ -7136,7 +7136,7 @@ fn analyzeCall( .data = .{ .pl_op = .{ .operand = func, .payload = sema.addExtraAssumeCapacity(Air.Call{ - .args_len = @intCast(u32, args.len), + .args_len = @as(u32, @intCast(args.len)), }), } }, }); @@ -7245,7 +7245,7 @@ fn analyzeInlineCallArg( } const casted_arg = sema.coerceExtra(arg_block, param_ty.toType(), uncasted_arg, arg_src, .{ .param_src = .{ .func_inst = func_inst, - .param_i = @intCast(u32, arg_i.*), + .param_i = @as(u32, @intCast(arg_i.*)), } }) catch |err| switch (err) { error.NotCoercible => unreachable, else => |e| return e, @@ -7419,14 +7419,14 @@ fn instantiateGenericCall( var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); + is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i))); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); + is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i))); }, .param_anytype_comptime => { is_anytype = true; @@ -7588,7 +7588,7 @@ fn instantiateGenericCall( // Make a runtime call to the new function, making sure to omit the comptime args. const comptime_args = callee.comptime_args.?; const func_ty = mod.declPtr(callee.owner_decl).ty; - const runtime_args_len = @intCast(u32, mod.typeToFunc(func_ty).?.param_types.len); + const runtime_args_len = @as(u32, @intCast(mod.typeToFunc(func_ty).?.param_types.len)); const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len); { var runtime_i: u32 = 0; @@ -7738,14 +7738,14 @@ fn resolveGenericInstantiationType( var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); + is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i))); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); + is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i))); }, .param_anytype_comptime => { is_anytype = true; @@ -7779,7 +7779,7 @@ fn resolveGenericInstantiationType( .tag = .arg, .data = .{ .arg = .{ .ty = try child_sema.addType(arg_ty), - .src_index = @intCast(u32, arg_i), + .src_index = @as(u32, @intCast(arg_i)), } }, }); child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg); @@ -7799,7 +7799,7 @@ fn resolveGenericInstantiationType( const new_func = new_func_val.getFunctionIndex(mod).unwrap().?; assert(new_func == new_module_func); - const monomorphed_args_index = @intCast(u32, mod.monomorphed_func_keys.items.len); + const monomorphed_args_index = @as(u32, @intCast(mod.monomorphed_func_keys.items.len)); const monomorphed_args = try mod.monomorphed_func_keys.addManyAsSlice(gpa, monomorphed_args_len); var monomorphed_arg_i: u32 = 0; try mod.monomorphed_funcs.ensureUnusedCapacityContext(gpa, monomorphed_args_len + 1, .{ .mod = mod }); @@ -7811,14 +7811,14 @@ fn resolveGenericInstantiationType( var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); + is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i))); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); + is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i))); }, .param_anytype_comptime => { is_anytype = true; @@ -7984,7 +7984,7 @@ fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const len = @intCast(u32, try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known")); + const len = @as(u32, @intCast(try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known"))); const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs); try sema.checkVectorElemType(block, elem_type_src, elem_type); const vector_type = try mod.vectorType(.{ @@ -8140,7 +8140,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD switch (names.len) { 0 => return sema.addConstant(try mod.intValue(Type.err_int, 0)), 1 => { - const int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(names[0]).?); + const int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(names[0]).?)); return sema.addIntUnsigned(Type.err_int, int); }, else => {}, @@ -8727,7 +8727,7 @@ fn zirFunc( const ret_ty: Type = switch (extra.data.ret_body_len) { 0 => Type.void, 1 => blk: { - const ret_ty_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const ret_ty_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; if (sema.resolveType(block, ret_ty_src, ret_ty_ref)) |ret_ty| { break :blk ret_ty; @@ -8964,7 +8964,7 @@ fn funcCommon( for (param_types, block.params.items, 0..) |*dest_param_ty, param, i| { const is_noalias = blk: { const index = std.math.cast(u5, i) orelse break :blk false; - break :blk @truncate(u1, noalias_bits >> index) != 0; + break :blk @as(u1, @truncate(noalias_bits >> index)) != 0; }; dest_param_ty.* = param.ty.toIntern(); sema.analyzeParameter( @@ -9199,8 +9199,8 @@ fn funcCommon( .hash = hash, .lbrace_line = src_locs.lbrace_line, .rbrace_line = src_locs.rbrace_line, - .lbrace_column = @truncate(u16, src_locs.columns), - .rbrace_column = @truncate(u16, src_locs.columns >> 16), + .lbrace_column = @as(u16, @truncate(src_locs.columns)), + .rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)), .branch_quota = default_branch_quota, .is_noinline = is_noinline, }; @@ -9225,7 +9225,7 @@ fn analyzeParameter( const mod = sema.mod; const requires_comptime = try sema.typeRequiresComptime(param.ty); if (param.is_comptime or requires_comptime) { - comptime_bits.* |= @as(u32, 1) << @intCast(u5, i); // TODO: handle cast error + comptime_bits.* |= @as(u32, 1) << @as(u5, @intCast(i)); // TODO: handle cast error } const this_generic = param.ty.isGenericPoison(); is_generic.* = is_generic.* or this_generic; @@ -9411,7 +9411,7 @@ fn zirParam( sema.inst_map.putAssumeCapacityNoClobber(inst, result); } else { // Otherwise we need a dummy runtime instruction. - const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len); + const result_index = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); try sema.air_instructions.append(sema.gpa, .{ .tag = .alloc, .data = .{ .ty = param_ty }, @@ -10287,7 +10287,7 @@ const SwitchProngAnalysis = struct { if (inline_case_capture != .none) { const item_val = sema.resolveConstValue(block, .unneeded, inline_case_capture, "") catch unreachable; if (operand_ty.zigTypeTag(mod) == .Union) { - const field_index = @intCast(u32, operand_ty.unionTagFieldIndex(item_val, mod).?); + const field_index = @as(u32, @intCast(operand_ty.unionTagFieldIndex(item_val, mod).?)); const union_obj = mod.typeToUnion(operand_ty).?; const field_ty = union_obj.fields.values()[field_index].ty; if (capture_byref) { @@ -10346,13 +10346,13 @@ const SwitchProngAnalysis = struct { const union_obj = mod.typeToUnion(operand_ty).?; const first_item_val = sema.resolveConstValue(block, .unneeded, case_vals[0], "") catch unreachable; - const first_field_index = @intCast(u32, operand_ty.unionTagFieldIndex(first_item_val, mod).?); + const first_field_index = @as(u32, @intCast(operand_ty.unionTagFieldIndex(first_item_val, mod).?)); const first_field = union_obj.fields.values()[first_field_index]; const field_tys = try sema.arena.alloc(Type, case_vals.len); for (case_vals, field_tys) |item, *field_ty| { const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; - const field_idx = @intCast(u32, operand_ty.unionTagFieldIndex(item_val, sema.mod).?); + const field_idx = @as(u32, @intCast(operand_ty.unionTagFieldIndex(item_val, sema.mod).?)); field_ty.* = union_obj.fields.values()[field_idx].ty; } @@ -10378,7 +10378,7 @@ const SwitchProngAnalysis = struct { const multi_idx = raw_capture_src.multi_capture; const src_decl_ptr = sema.mod.declPtr(block.src_decl); for (case_srcs, 0..) |*case_src, i| { - const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @intCast(u32, i) } }; + const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @as(u32, @intCast(i)) } }; case_src.* = raw_case_src.resolve(mod, src_decl_ptr, switch_node_offset, .none); } const capture_src = raw_capture_src.resolve(mod, src_decl_ptr, switch_node_offset, .none); @@ -10426,7 +10426,7 @@ const SwitchProngAnalysis = struct { const multi_idx = raw_capture_src.multi_capture; const src_decl_ptr = sema.mod.declPtr(block.src_decl); const capture_src = raw_capture_src.resolve(mod, src_decl_ptr, switch_node_offset, .none); - const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @intCast(u32, i) } }; + const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @as(u32, @intCast(i)) } }; const case_src = raw_case_src.resolve(mod, src_decl_ptr, switch_node_offset, .none); const msg = msg: { const msg = try sema.errMsg(block, capture_src, "capture group with incompatible types", .{}); @@ -10529,12 +10529,12 @@ const SwitchProngAnalysis = struct { var coerce_block = block.makeSubBlock(); defer coerce_block.instructions.deinit(sema.gpa); - const uncoerced = try coerce_block.addStructFieldVal(spa.operand, @intCast(u32, idx), field_tys[idx]); + const uncoerced = try coerce_block.addStructFieldVal(spa.operand, @as(u32, @intCast(idx)), field_tys[idx]); const coerced = sema.coerce(&coerce_block, capture_ty, uncoerced, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { const multi_idx = raw_capture_src.multi_capture; const src_decl_ptr = sema.mod.declPtr(block.src_decl); - const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @intCast(u32, idx) } }; + const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @as(u32, @intCast(idx)) } }; const case_src = raw_case_src.resolve(mod, src_decl_ptr, switch_node_offset, .none); _ = try sema.coerce(&coerce_block, capture_ty, uncoerced, case_src); unreachable; @@ -10545,7 +10545,7 @@ const SwitchProngAnalysis = struct { try cases_extra.ensureUnusedCapacity(3 + coerce_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(u32, coerce_block.instructions.items.len)); // body_len + cases_extra.appendAssumeCapacity(@as(u32, @intCast(coerce_block.instructions.items.len))); // body_len cases_extra.appendAssumeCapacity(@intFromEnum(case_vals[idx])); // item cases_extra.appendSliceAssumeCapacity(coerce_block.instructions.items); // body } @@ -10556,7 +10556,7 @@ const SwitchProngAnalysis = struct { defer coerce_block.instructions.deinit(sema.gpa); const first_imc = in_mem_coercible.findFirstSet().?; - const uncoerced = try coerce_block.addStructFieldVal(spa.operand, @intCast(u32, first_imc), field_tys[first_imc]); + const uncoerced = try coerce_block.addStructFieldVal(spa.operand, @as(u32, @intCast(first_imc)), field_tys[first_imc]); const coerced = try coerce_block.addBitCast(capture_ty, uncoerced); _ = try coerce_block.addBr(capture_block_inst, coerced); @@ -10569,14 +10569,14 @@ const SwitchProngAnalysis = struct { @typeInfo(Air.Block).Struct.fields.len + 1); - const switch_br_inst = @intCast(u32, sema.air_instructions.len); + const switch_br_inst = @as(u32, @intCast(sema.air_instructions.len)); try sema.air_instructions.append(sema.gpa, .{ .tag = .switch_br, .data = .{ .pl_op = .{ .operand = spa.cond, .payload = sema.addExtraAssumeCapacity(Air.SwitchBr{ - .cases_len = @intCast(u32, prong_count), - .else_body_len = @intCast(u32, else_body_len), + .cases_len = @as(u32, @intCast(prong_count)), + .else_body_len = @as(u32, @intCast(else_body_len)), }), } }, }); @@ -10763,7 +10763,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r .has_tag_capture = false, }, .under, .@"else" => blk: { - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[header_extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[header_extra_index])); const extra_body_start = header_extra_index + 1; break :blk .{ .body = sema.code.extra[extra_body_start..][0..info.body_len], @@ -10833,9 +10833,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { - const item_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1 + info.body_len; case_vals.appendAssumeCapacity(try sema.validateSwitchItemEnum( @@ -10856,7 +10856,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len + info.body_len; @@ -10870,7 +10870,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r item_ref, operand_ty, src_node_offset, - .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, + .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } }, )); } @@ -10932,9 +10932,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { - const item_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1 + info.body_len; case_vals.appendAssumeCapacity(try sema.validateSwitchItemError( @@ -10954,7 +10954,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len + info.body_len; @@ -10967,7 +10967,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r item_ref, operand_ty, src_node_offset, - .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, + .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } }, )); } @@ -11073,9 +11073,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { - const item_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1 + info.body_len; case_vals.appendAssumeCapacity(try sema.validateSwitchItemInt( @@ -11095,7 +11095,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len; @@ -11108,16 +11108,16 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r item_ref, operand_ty, src_node_offset, - .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, + .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } }, )); } try case_vals.ensureUnusedCapacity(gpa, 2 * ranges_len); var range_i: u32 = 0; while (range_i < ranges_len) : (range_i += 1) { - const item_first = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const item_first = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; - const item_last = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const item_last = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const vals = try sema.validateSwitchRange( @@ -11168,9 +11168,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { - const item_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1 + info.body_len; case_vals.appendAssumeCapacity(try sema.validateSwitchItemBool( @@ -11190,7 +11190,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len + info.body_len; @@ -11203,7 +11203,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r &false_count, item_ref, src_node_offset, - .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, + .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } }, )); } @@ -11250,9 +11250,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { - const item_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1; extra_index += info.body_len; @@ -11273,7 +11273,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len + info.body_len; @@ -11286,7 +11286,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r item_ref, operand_ty, src_node_offset, - .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, + .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } }, )); } @@ -11324,7 +11324,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r .tag_capture_inst = tag_capture_inst, }; - const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = undefined, @@ -11368,7 +11368,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r var scalar_i: usize = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1; const body = sema.code.extra[extra_index..][0..info.body_len]; extra_index += info.body_len; @@ -11382,7 +11382,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r .normal, body, info.capture, - .{ .scalar_capture = @intCast(u32, scalar_i) }, + .{ .scalar_capture = @as(u32, @intCast(scalar_i)) }, &.{item}, if (info.is_inline) operand else .none, info.has_tag_capture, @@ -11399,7 +11399,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1 + items_len; const body = sema.code.extra[extra_index + 2 * ranges_len ..][0..info.body_len]; @@ -11416,7 +11416,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r .normal, body, info.capture, - .{ .multi_capture = @intCast(u32, multi_i) }, + .{ .multi_capture = @as(u32, @intCast(multi_i)) }, items, if (info.is_inline) operand else .none, info.has_tag_capture, @@ -11443,7 +11443,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r .normal, body, info.capture, - .{ .multi_capture = @intCast(u32, multi_i) }, + .{ .multi_capture = @as(u32, @intCast(multi_i)) }, undefined, // case_vals may be undefined for ranges if (info.is_inline) operand else .none, info.has_tag_capture, @@ -11528,7 +11528,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r var scalar_i: usize = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1; const body = sema.code.extra[extra_index..][0..info.body_len]; extra_index += info.body_len; @@ -11556,7 +11556,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r .normal, body, info.capture, - .{ .scalar_capture = @intCast(u32, scalar_i) }, + .{ .scalar_capture = @as(u32, @intCast(scalar_i)) }, &.{item}, if (info.is_inline) item else .none, info.has_tag_capture, @@ -11569,7 +11569,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); cases_extra.appendAssumeCapacity(@intFromEnum(item)); cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); } @@ -11589,7 +11589,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1 + items_len; const items = case_vals.items[case_val_idx..][0..items_len]; @@ -11654,7 +11654,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); cases_extra.appendAssumeCapacity(@intFromEnum(item_ref)); cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); @@ -11676,7 +11676,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { - const case_src = Module.SwitchProngSrc{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }; + const case_src = Module.SwitchProngSrc{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } }; const decl = mod.declPtr(case_block.src_decl); try sema.emitBackwardBranch(block, case_src.resolve(mod, decl, src_node_offset, .none)); unreachable; @@ -11702,7 +11702,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); cases_extra.appendAssumeCapacity(@intFromEnum(item)); cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); } @@ -11750,8 +11750,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r try cases_extra.ensureUnusedCapacity(gpa, 2 + items.len + case_block.instructions.items.len); - cases_extra.appendAssumeCapacity(@intCast(u32, items.len)); - cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(items.len))); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); for (items) |item| { cases_extra.appendAssumeCapacity(@intFromEnum(item)); @@ -11846,8 +11846,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload = sema.addExtraAssumeCapacity(Air.CondBr{ - .then_body_len = @intCast(u32, prev_then_body.len), - .else_body_len = @intCast(u32, cond_body.len), + .then_body_len = @as(u32, @intCast(prev_then_body.len)), + .else_body_len = @as(u32, @intCast(cond_body.len)), }); sema.air_extra.appendSliceAssumeCapacity(prev_then_body); sema.air_extra.appendSliceAssumeCapacity(cond_body); @@ -11872,7 +11872,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r if (f != null) continue; cases_len += 1; - const item_val = try mod.enumValueFieldIndex(operand_ty, @intCast(u32, i)); + const item_val = try mod.enumValueFieldIndex(operand_ty, @as(u32, @intCast(i))); const item_ref = try sema.addConstant(item_val); case_block.instructions.shrinkRetainingCapacity(0); @@ -11903,7 +11903,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); cases_extra.appendAssumeCapacity(@intFromEnum(item_ref)); cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); } @@ -11944,7 +11944,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); cases_extra.appendAssumeCapacity(@intFromEnum(item_ref)); cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); } @@ -11975,7 +11975,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); cases_extra.appendAssumeCapacity(@intFromEnum(item_ref)); cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); } @@ -12003,7 +12003,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); cases_extra.appendAssumeCapacity(@intFromEnum(Air.Inst.Ref.bool_true)); cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); } @@ -12029,7 +12029,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); cases_extra.appendAssumeCapacity(@intFromEnum(Air.Inst.Ref.bool_false)); cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); } @@ -12098,8 +12098,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload = sema.addExtraAssumeCapacity(Air.CondBr{ - .then_body_len = @intCast(u32, prev_then_body.len), - .else_body_len = @intCast(u32, case_block.instructions.items.len), + .then_body_len = @as(u32, @intCast(prev_then_body.len)), + .else_body_len = @as(u32, @intCast(case_block.instructions.items.len)), }); sema.air_extra.appendSliceAssumeCapacity(prev_then_body); sema.air_extra.appendSliceAssumeCapacity(case_block.instructions.items); @@ -12113,8 +12113,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r _ = try child_block.addInst(.{ .tag = .switch_br, .data = .{ .pl_op = .{ .operand = operand, .payload = sema.addExtraAssumeCapacity(Air.SwitchBr{ - .cases_len = @intCast(u32, cases_len), - .else_body_len = @intCast(u32, final_else_body.len), + .cases_len = @as(u32, @intCast(cases_len)), + .else_body_len = @as(u32, @intCast(final_else_body.len)), }), } } }); sema.air_extra.appendSliceAssumeCapacity(cases_extra.items); @@ -13527,7 +13527,7 @@ fn analyzeTupleMul( var i: u32 = 0; while (i < tuple_len) : (i += 1) { const operand_src = lhs_src; // TODO better source location - element_refs[i] = try sema.tupleFieldValByIndex(block, operand_src, operand, @intCast(u32, i), operand_ty); + element_refs[i] = try sema.tupleFieldValByIndex(block, operand_src, operand, @as(u32, @intCast(i)), operand_ty); } i = 1; while (i < factor) : (i += 1) { @@ -15593,10 +15593,10 @@ fn analyzePtrArithmetic( // The resulting pointer is aligned to the lcd between the offset (an // arbitrary number) and the alignment factor (always a power of two, // non zero). - const new_align = @enumFromInt(Alignment, @min( + const new_align = @as(Alignment, @enumFromInt(@min( @ctz(addend), @intFromEnum(ptr_info.flags.alignment), - )); + ))); assert(new_align != .none); break :t try mod.ptrType(.{ @@ -15675,14 +15675,14 @@ fn zirAsm( const extra = sema.code.extraData(Zir.Inst.Asm, extended.operand); const src = LazySrcLoc.nodeOffset(extra.data.src_node); const ret_ty_src: LazySrcLoc = .{ .node_offset_asm_ret_ty = extra.data.src_node }; - const outputs_len = @truncate(u5, extended.small); - const inputs_len = @truncate(u5, extended.small >> 5); - const clobbers_len = @truncate(u5, extended.small >> 10); - const is_volatile = @truncate(u1, extended.small >> 15) != 0; + const outputs_len = @as(u5, @truncate(extended.small)); + const inputs_len = @as(u5, @truncate(extended.small >> 5)); + const clobbers_len = @as(u5, @truncate(extended.small >> 10)); + const is_volatile = @as(u1, @truncate(extended.small >> 15)) != 0; const is_global_assembly = sema.func_index == .none; const asm_source: []const u8 = if (tmpl_is_expr) blk: { - const tmpl = @enumFromInt(Zir.Inst.Ref, extra.data.asm_source); + const tmpl = @as(Zir.Inst.Ref, @enumFromInt(extra.data.asm_source)); const s: []const u8 = try sema.resolveConstString(block, src, tmpl, "assembly code must be comptime-known"); break :blk s; } else sema.code.nullTerminatedString(extra.data.asm_source); @@ -15721,7 +15721,7 @@ fn zirAsm( const output = sema.code.extraData(Zir.Inst.Asm.Output, extra_i); extra_i = output.end; - const is_type = @truncate(u1, output_type_bits) != 0; + const is_type = @as(u1, @truncate(output_type_bits)) != 0; output_type_bits >>= 1; if (is_type) { @@ -15783,10 +15783,10 @@ fn zirAsm( .data = .{ .ty_pl = .{ .ty = expr_ty, .payload = sema.addExtraAssumeCapacity(Air.Asm{ - .source_len = @intCast(u32, asm_source.len), + .source_len = @as(u32, @intCast(asm_source.len)), .outputs_len = outputs_len, - .inputs_len = @intCast(u32, args.len), - .flags = (@as(u32, @intFromBool(is_volatile)) << 31) | @intCast(u32, clobbers.len), + .inputs_len = @as(u32, @intCast(args.len)), + .flags = (@as(u32, @intFromBool(is_volatile)) << 31) | @as(u32, @intCast(clobbers.len)), }), } }, }); @@ -16192,7 +16192,7 @@ fn zirThis( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const this_decl_index = mod.namespaceDeclIndex(block.namespace); - const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand)); + const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand))); return sema.analyzeDeclVal(block, src, this_decl_index); } @@ -16329,7 +16329,7 @@ fn zirFrameAddress( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand)); + const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand))); try sema.requireRuntimeBlock(block, src, null); return try block.addNoOp(.frame_addr); } @@ -16482,7 +16482,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const is_noalias = blk: { const index = std.math.cast(u5, i) orelse break :blk false; - break :blk @truncate(u1, info.noalias_bits >> index) != 0; + break :blk @as(u1, @truncate(info.noalias_bits >> index)) != 0; }; const param_fields = .{ @@ -16925,7 +16925,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .u64 = @intCast(u64, i) }, + .storage = .{ .u64 = @as(u64, @intCast(i)) }, } }); // TODO: write something like getCoercedInts to avoid needing to dupe const name = try sema.arena.dupe(u8, ip.stringToSlice(enum_type.names[i])); @@ -17739,7 +17739,7 @@ fn zirBoolBr( return sema.resolveBody(parent_block, body, inst); } - const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = .{ .ty_pl = .{ @@ -17801,8 +17801,8 @@ fn finishCondBr( @typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len + 1); const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{ - .then_body_len = @intCast(u32, then_block.instructions.items.len), - .else_body_len = @intCast(u32, else_block.instructions.items.len), + .then_body_len = @as(u32, @intCast(then_block.instructions.items.len)), + .else_body_len = @as(u32, @intCast(else_block.instructions.items.len)), }); sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items); sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items); @@ -17813,7 +17813,7 @@ fn finishCondBr( } } }); sema.air_instructions.items(.data)[block_inst].ty_pl.payload = sema.addExtraAssumeCapacity( - Air.Block{ .body_len = @intCast(u32, child_block.instructions.items.len) }, + Air.Block{ .body_len = @as(u32, @intCast(child_block.instructions.items.len)) }, ); sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items); @@ -17976,8 +17976,8 @@ fn zirCondbr( .data = .{ .pl_op = .{ .operand = cond, .payload = sema.addExtraAssumeCapacity(Air.CondBr{ - .then_body_len = @intCast(u32, true_instructions.len), - .else_body_len = @intCast(u32, sub_block.instructions.items.len), + .then_body_len = @as(u32, @intCast(true_instructions.len)), + .else_body_len = @as(u32, @intCast(sub_block.instructions.items.len)), }), } }, }); @@ -18024,7 +18024,7 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError! .data = .{ .pl_op = .{ .operand = err_union, .payload = sema.addExtraAssumeCapacity(Air.Try{ - .body_len = @intCast(u32, sub_block.instructions.items.len), + .body_len = @as(u32, @intCast(sub_block.instructions.items.len)), }), } }, }); @@ -18084,7 +18084,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr .ty = res_ty_ref, .payload = sema.addExtraAssumeCapacity(Air.TryPtr{ .ptr = operand, - .body_len = @intCast(u32, sub_block.instructions.items.len), + .body_len = @as(u32, @intCast(sub_block.instructions.items.len)), }), } }, }); @@ -18100,7 +18100,7 @@ fn addRuntimeBreak(sema: *Sema, child_block: *Block, break_data: BreakData) !voi const labeled_block = if (!gop.found_existing) blk: { try sema.post_hoc_blocks.ensureUnusedCapacity(sema.gpa, 1); - const new_block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const new_block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); gop.value_ptr.* = Air.indexToRef(new_block_inst); try sema.air_instructions.append(sema.gpa, .{ .tag = .block, @@ -18296,8 +18296,8 @@ fn retWithErrTracing( @typeInfo(Air.Block).Struct.fields.len + 1); const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{ - .then_body_len = @intCast(u32, then_block.instructions.items.len), - .else_body_len = @intCast(u32, else_block.instructions.items.len), + .then_body_len = @as(u32, @intCast(then_block.instructions.items.len)), + .else_body_len = @as(u32, @intCast(else_block.instructions.items.len)), }); sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items); sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items); @@ -18486,7 +18486,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air var extra_i = extra.end; const sentinel = if (inst_data.flags.has_sentinel) blk: { - const ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_i]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i])); extra_i += 1; const coerced = try sema.coerce(block, elem_ty, try sema.resolveInst(ref), sentinel_src); const val = try sema.resolveConstValue(block, sentinel_src, coerced, "pointer sentinel value must be comptime-known"); @@ -18494,7 +18494,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } else .none; const abi_align: Alignment = if (inst_data.flags.has_align) blk: { - const ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_i]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i])); extra_i += 1; const coerced = try sema.coerce(block, Type.u32, try sema.resolveInst(ref), align_src); const val = try sema.resolveConstValue(block, align_src, coerced, "pointer alignment must be comptime-known"); @@ -18507,29 +18507,29 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }, else => {}, } - const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(mod, sema)).?); + const abi_align = @as(u32, @intCast((try val.getUnsignedIntAdvanced(mod, sema)).?)); try sema.validateAlign(block, align_src, abi_align); break :blk Alignment.fromByteUnits(abi_align); } else .none; const address_space: std.builtin.AddressSpace = if (inst_data.flags.has_addrspace) blk: { - const ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_i]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i])); extra_i += 1; break :blk try sema.analyzeAddressSpace(block, addrspace_src, ref, .pointer); } else if (elem_ty.zigTypeTag(mod) == .Fn and target.cpu.arch == .avr) .flash else .generic; const bit_offset = if (inst_data.flags.has_bit_range) blk: { - const ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_i]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i])); extra_i += 1; const bit_offset = try sema.resolveInt(block, bitoffset_src, ref, Type.u16, "pointer bit-offset must be comptime-known"); - break :blk @intCast(u16, bit_offset); + break :blk @as(u16, @intCast(bit_offset)); } else 0; const host_size: u16 = if (inst_data.flags.has_bit_range) blk: { - const ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_i]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i])); extra_i += 1; const host_size = try sema.resolveInt(block, hostsize_src, ref, Type.u16, "pointer host size must be comptime-known"); - break :blk @intCast(u16, host_size); + break :blk @as(u16, @intCast(host_size)); } else 0; if (host_size != 0 and bit_offset >= host_size * 8) { @@ -18669,7 +18669,7 @@ fn unionInit( if (try sema.resolveMaybeUndefVal(init)) |init_val| { const tag_ty = union_ty.unionTagTypeHypothetical(mod); - const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); + const enum_field_index = @as(u32, @intCast(tag_ty.enumFieldIndex(field_name, mod).?)); const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); return sema.addConstant((try mod.intern(.{ .un = .{ .ty = union_ty.toIntern(), @@ -18771,7 +18771,7 @@ fn zirStructInit( const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_type_extra.name_start)); const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src); const tag_ty = resolved_ty.unionTagTypeHypothetical(mod); - const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); + const enum_field_index = @as(u32, @intCast(tag_ty.enumFieldIndex(field_name, mod).?)); const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); const init_inst = try sema.resolveInst(item.data.init); @@ -18915,7 +18915,7 @@ fn finishStructInit( }); const alloc = try block.addTy(.alloc, alloc_ty); for (field_inits, 0..) |field_init, i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); const field_src = dest_src; const field_ptr = try sema.structFieldPtrByIndex(block, dest_src, alloc, i, field_src, struct_ty, true); try sema.storePtr(block, dest_src, field_ptr, field_init); @@ -18958,7 +18958,7 @@ fn zirStructInitAnon( var runtime_index: ?usize = null; var extra_index = extra.end; for (types, 0..) |*field_ty, i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index); extra_index = item.end; @@ -19037,7 +19037,7 @@ fn zirStructInitAnon( const alloc = try block.addTy(.alloc, alloc_ty); var extra_index = extra.end; for (types, 0..) |field_ty, i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index); extra_index = item.end; @@ -19109,7 +19109,7 @@ fn zirArrayInit( const opt_runtime_index: ?u32 = for (resolved_args, 0..) |arg, i| { const comptime_known = try sema.isComptimeKnown(arg); - if (!comptime_known) break @intCast(u32, i); + if (!comptime_known) break @as(u32, @intCast(i)); } else null; const runtime_index = opt_runtime_index orelse { @@ -19244,7 +19244,7 @@ fn zirArrayInitAnon( }); const alloc = try block.addTy(.alloc, alloc_ty); for (operands, 0..) |operand, i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); const field_ptr_ty = try mod.ptrType(.{ .child = types[i], .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, @@ -19395,7 +19395,7 @@ fn zirFrame( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand)); + const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand))); return sema.failWithUseOfAsync(block, src); } @@ -19588,7 +19588,7 @@ fn zirReify( const mod = sema.mod; const gpa = sema.gpa; const ip = &mod.intern_pool; - const name_strategy = @enumFromInt(Zir.Inst.NameStrategy, extended.small); + const name_strategy = @as(Zir.Inst.NameStrategy, @enumFromInt(extended.small)); const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const type_info_ty = try sema.getBuiltinType("Type"); @@ -19600,7 +19600,7 @@ fn zirReify( const target = mod.getTarget(); if (try union_val.val.toValue().anyUndef(mod)) return sema.failWithUseOfUndef(block, src); const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag.toValue(), mod).?; - switch (@enumFromInt(std.builtin.TypeId, tag_index)) { + switch (@as(std.builtin.TypeId, @enumFromInt(tag_index))) { .Type => return Air.Inst.Ref.type_type, .Void => return Air.Inst.Ref.void_type, .Bool => return Air.Inst.Ref.bool_type, @@ -19623,7 +19623,7 @@ fn zirReify( ); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); - const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); + const bits = @as(u16, @intCast(bits_val.toUnsignedInt(mod))); const ty = try mod.intType(signedness, bits); return sema.addType(ty); }, @@ -19636,7 +19636,7 @@ fn zirReify( try ip.getOrPutString(gpa, "child"), ).?); - const len = @intCast(u32, len_val.toUnsignedInt(mod)); + const len = @as(u32, @intCast(len_val.toUnsignedInt(mod))); const child_ty = child_val.toType(); try sema.checkVectorElemType(block, src, child_ty); @@ -19653,7 +19653,7 @@ fn zirReify( try ip.getOrPutString(gpa, "bits"), ).?); - const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); + const bits = @as(u16, @intCast(bits_val.toUnsignedInt(mod))); const ty = switch (bits) { 16 => Type.f16, 32 => Type.f32, @@ -19925,7 +19925,7 @@ fn zirReify( } // Define our empty enum decl - const fields_len = @intCast(u32, try sema.usizeCast(block, src, fields_val.sliceLen(mod))); + const fields_len = @as(u32, @intCast(try sema.usizeCast(block, src, fields_val.sliceLen(mod)))); const incomplete_enum = try ip.getIncompleteEnum(gpa, .{ .decl = new_decl_index, .namespace = .none, @@ -20288,7 +20288,7 @@ fn zirReify( if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const alignment = @intCast(u29, alignment_val.toUnsignedInt(mod)); + const alignment = @as(u29, @intCast(alignment_val.toUnsignedInt(mod))); if (alignment == target_util.defaultFunctionAlignment(target)) { break :alignment .none; } else { @@ -20565,7 +20565,7 @@ fn reifyStruct( try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum); struct_obj.backing_int_ty = backing_int_ty; } else { - struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum)); + struct_obj.backing_int_ty = try mod.intType(.unsigned, @as(u16, @intCast(fields_bit_sum))); } struct_obj.status = .have_layout; @@ -20636,7 +20636,7 @@ fn zirCVaEnd(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C } fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { - const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand)); + const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand))); const va_list_ty = try sema.getBuiltinType("VaList"); try sema.requireRuntimeBlock(block, src, null); @@ -20903,7 +20903,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } fn zirPtrCastFull(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { - const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small)); + const flags = @as(Zir.Inst.FullPtrCastFlags, @bitCast(@as(u5, @truncate(extended.small)))); const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -21310,7 +21310,7 @@ fn ptrCastFull( fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const mod = sema.mod; - const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small)); + const flags = @as(Zir.Inst.FullPtrCastFlags, @bitCast(@as(u5, @truncate(extended.small)))); const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -22271,7 +22271,7 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const len_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const scalar_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - const len = @intCast(u32, try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector splat destination length must be comptime-known")); + const len = @as(u32, @intCast(try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector splat destination length must be comptime-known"))); const scalar = try sema.resolveInst(extra.rhs); const scalar_ty = sema.typeOf(scalar); try sema.checkVectorElemType(block, scalar_src, scalar_ty); @@ -22376,12 +22376,12 @@ fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(sema.mod)}), }; mask_ty = try mod.vectorType(.{ - .len = @intCast(u32, mask_len), + .len = @as(u32, @intCast(mask_len)), .child = .i32_type, }); mask = try sema.coerce(block, mask_ty, mask, mask_src); const mask_val = try sema.resolveConstMaybeUndefVal(block, mask_src, mask, "shuffle mask must be comptime-known"); - return sema.analyzeShuffle(block, inst_data.src_node, elem_ty, a, b, mask_val, @intCast(u32, mask_len)); + return sema.analyzeShuffle(block, inst_data.src_node, elem_ty, a, b, mask_val, @as(u32, @intCast(mask_len))); } fn analyzeShuffle( @@ -22425,8 +22425,8 @@ fn analyzeShuffle( if (maybe_a_len == null and maybe_b_len == null) { return sema.addConstUndef(res_ty); } - const a_len = @intCast(u32, maybe_a_len orelse maybe_b_len.?); - const b_len = @intCast(u32, maybe_b_len orelse a_len); + const a_len = @as(u32, @intCast(maybe_a_len orelse maybe_b_len.?)); + const b_len = @as(u32, @intCast(maybe_b_len orelse a_len)); const a_ty = try mod.vectorType(.{ .len = a_len, @@ -22445,17 +22445,17 @@ fn analyzeShuffle( .{ b_len, b_src, b_ty }, }; - for (0..@intCast(usize, mask_len)) |i| { + for (0..@as(usize, @intCast(mask_len))) |i| { const elem = try mask.elemValue(sema.mod, i); if (elem.isUndef(mod)) continue; const int = elem.toSignedInt(mod); var unsigned: u32 = undefined; var chosen: u32 = undefined; if (int >= 0) { - unsigned = @intCast(u32, int); + unsigned = @as(u32, @intCast(int)); chosen = 0; } else { - unsigned = @intCast(u32, ~int); + unsigned = @as(u32, @intCast(~int)); chosen = 1; } if (unsigned >= operand_info[chosen][0]) { @@ -22488,7 +22488,7 @@ fn analyzeShuffle( continue; } const int = mask_elem_val.toSignedInt(mod); - const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int); + const unsigned = if (int >= 0) @as(u32, @intCast(int)) else @as(u32, @intCast(~int)); values[i] = try (try (if (int >= 0) a_val else b_val).elemValue(mod, unsigned)).intern(elem_ty, mod); } return sema.addConstant((try mod.intern(.{ .aggregate = .{ @@ -22509,23 +22509,23 @@ fn analyzeShuffle( const max_len = try sema.usizeCast(block, max_src, @max(a_len, b_len)); const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len); - for (@intCast(usize, 0)..@intCast(usize, min_len)) |i| { + for (@as(usize, @intCast(0))..@as(usize, @intCast(min_len))) |i| { expand_mask_values[i] = (try mod.intValue(Type.comptime_int, i)).toIntern(); } - for (@intCast(usize, min_len)..@intCast(usize, max_len)) |i| { + for (@as(usize, @intCast(min_len))..@as(usize, @intCast(max_len))) |i| { expand_mask_values[i] = (try mod.intValue(Type.comptime_int, -1)).toIntern(); } const expand_mask = try mod.intern(.{ .aggregate = .{ - .ty = (try mod.vectorType(.{ .len = @intCast(u32, max_len), .child = .comptime_int_type })).toIntern(), + .ty = (try mod.vectorType(.{ .len = @as(u32, @intCast(max_len)), .child = .comptime_int_type })).toIntern(), .storage = .{ .elems = expand_mask_values }, } }); if (a_len < b_len) { const undef = try sema.addConstUndef(a_ty); - a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask.toValue(), @intCast(u32, max_len)); + a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask.toValue(), @as(u32, @intCast(max_len))); } else { const undef = try sema.addConstUndef(b_ty); - b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask.toValue(), @intCast(u32, max_len)); + b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask.toValue(), @as(u32, @intCast(max_len))); } } @@ -22562,7 +22562,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C .Vector, .Array => pred_ty.arrayLen(mod), else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(mod)}), }; - const vec_len = @intCast(u32, try sema.usizeCast(block, pred_src, vec_len_u64)); + const vec_len = @as(u32, @intCast(try sema.usizeCast(block, pred_src, vec_len_u64))); const bool_vec_ty = try mod.vectorType(.{ .len = vec_len, @@ -22930,7 +22930,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount(mod)); for (resolved_args, 0..) |*resolved, i| { - resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @intCast(u32, i), args_ty); + resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @as(u32, @intCast(i)), args_ty); } const callee_ty = sema.typeOf(func); @@ -23048,7 +23048,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr .ty = try sema.addType(result_ptr), .payload = try block.sema.addExtra(Air.FieldParentPtr{ .field_ptr = casted_field_ptr, - .field_index = @intCast(u32, field_index), + .field_index = @as(u32, @intCast(field_index)), }), } }, }); @@ -23684,7 +23684,7 @@ fn zirVarExtended( const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = 0 }; const init_src: LazySrcLoc = .{ .node_offset_var_decl_init = 0 }; - const small = @bitCast(Zir.Inst.ExtendedVar.Small, extended.small); + const small = @as(Zir.Inst.ExtendedVar.Small, @bitCast(extended.small)); var extra_index: usize = extra.end; @@ -23699,7 +23699,7 @@ fn zirVarExtended( assert(!small.has_align); const uncasted_init: Air.Inst.Ref = if (small.has_init) blk: { - const init_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const init_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; break :blk try sema.resolveInst(init_ref); } else .none; @@ -23776,7 +23776,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (val.isGenericPoison()) { break :blk null; } - const alignment = @intCast(u32, val.toUnsignedInt(mod)); + const alignment = @as(u32, @intCast(val.toUnsignedInt(mod))); try sema.validateAlign(block, align_src, alignment); if (alignment == target_util.defaultFunctionAlignment(target)) { break :blk .none; @@ -23784,7 +23784,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A break :blk Alignment.fromNonzeroByteUnits(alignment); } } else if (extra.data.bits.has_align_ref) blk: { - const align_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const align_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const align_tv = sema.resolveInstConst(block, align_src, align_ref, "alignment must be comptime-known") catch |err| switch (err) { error.GenericPoison => { @@ -23792,7 +23792,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - const alignment = @intCast(u32, align_tv.val.toUnsignedInt(mod)); + const alignment = @as(u32, @intCast(align_tv.val.toUnsignedInt(mod))); try sema.validateAlign(block, align_src, alignment); if (alignment == target_util.defaultFunctionAlignment(target)) { break :blk .none; @@ -23814,7 +23814,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } break :blk mod.toEnum(std.builtin.AddressSpace, val); } else if (extra.data.bits.has_addrspace_ref) blk: { - const addrspace_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const addrspace_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const addrspace_tv = sema.resolveInstConst(block, addrspace_src, addrspace_ref, "addrespace must be comptime-known") catch |err| switch (err) { error.GenericPoison => { @@ -23838,7 +23838,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } break :blk FuncLinkSection{ .explicit = try val.toIpString(ty, mod) }; } else if (extra.data.bits.has_section_ref) blk: { - const section_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const section_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const section_name = sema.resolveConstStringIntern(block, section_src, section_ref, "linksection must be comptime-known") catch |err| switch (err) { error.GenericPoison => { @@ -23862,7 +23862,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } break :blk mod.toEnum(std.builtin.CallingConvention, val); } else if (extra.data.bits.has_cc_ref) blk: { - const cc_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const cc_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const cc_tv = sema.resolveInstConst(block, cc_src, cc_ref, "calling convention must be comptime-known") catch |err| switch (err) { error.GenericPoison => { @@ -23886,7 +23886,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const ty = val.toType(); break :blk ty; } else if (extra.data.bits.has_ret_ty_ref) blk: { - const ret_ty_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const ret_ty_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const ret_ty_tv = sema.resolveInstConst(block, ret_src, ret_ty_ref, "return type must be comptime-known") catch |err| switch (err) { error.GenericPoison => { @@ -23995,7 +23995,7 @@ fn zirWasmMemorySize( return sema.fail(block, builtin_src, "builtin @wasmMemorySize is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)}); } - const index = @intCast(u32, try sema.resolveInt(block, index_src, extra.operand, Type.u32, "wasm memory size index must be comptime-known")); + const index = @as(u32, @intCast(try sema.resolveInt(block, index_src, extra.operand, Type.u32, "wasm memory size index must be comptime-known"))); try sema.requireRuntimeBlock(block, builtin_src, null); return block.addInst(.{ .tag = .wasm_memory_size, @@ -24020,7 +24020,7 @@ fn zirWasmMemoryGrow( return sema.fail(block, builtin_src, "builtin @wasmMemoryGrow is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)}); } - const index = @intCast(u32, try sema.resolveInt(block, index_src, extra.lhs, Type.u32, "wasm memory size index must be comptime-known")); + const index = @as(u32, @intCast(try sema.resolveInt(block, index_src, extra.lhs, Type.u32, "wasm memory size index must be comptime-known"))); const delta = try sema.coerce(block, Type.u32, try sema.resolveInst(extra.rhs), delta_src); try sema.requireRuntimeBlock(block, builtin_src, null); @@ -24060,7 +24060,7 @@ fn resolvePrefetchOptions( return std.builtin.PrefetchOptions{ .rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val), - .locality = @intCast(u2, locality_val.toUnsignedInt(mod)), + .locality = @as(u2, @intCast(locality_val.toUnsignedInt(mod))), .cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val), }; } @@ -24259,7 +24259,7 @@ fn zirWorkItem( }, } - const dimension = @intCast(u32, try sema.resolveInt(block, dimension_src, extra.operand, Type.u32, "dimension must be comptime-known")); + const dimension = @as(u32, @intCast(try sema.resolveInt(block, dimension_src, extra.operand, Type.u32, "dimension must be comptime-known"))); try sema.requireRuntimeBlock(block, builtin_src, null); return block.addInst(.{ @@ -24814,7 +24814,7 @@ fn addSafetyCheckExtra( fail_block.instructions.items.len); try sema.air_instructions.ensureUnusedCapacity(gpa, 3); - const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); const cond_br_inst = block_inst + 1; const br_inst = cond_br_inst + 1; sema.air_instructions.appendAssumeCapacity(.{ @@ -24834,7 +24834,7 @@ fn addSafetyCheckExtra( .operand = ok, .payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = 1, - .else_body_len = @intCast(u32, fail_block.instructions.items.len), + .else_body_len = @as(u32, @intCast(fail_block.instructions.items.len)), }), } }, }); @@ -25210,7 +25210,7 @@ fn fieldVal( const union_ty = try sema.resolveTypeFields(child_type); if (union_ty.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| { - const field_index = @intCast(u32, field_index_usize); + const field_index = @as(u32, @intCast(field_index_usize)); return sema.addConstant( try mod.enumValueFieldIndex(enum_ty, field_index), ); @@ -25226,7 +25226,7 @@ fn fieldVal( } const field_index_usize = child_type.enumFieldIndex(field_name, mod) orelse return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); - const field_index = @intCast(u32, field_index_usize); + const field_index = @as(u32, @intCast(field_index_usize)); const enum_val = try mod.enumValueFieldIndex(child_type, field_index); return sema.addConstant(enum_val); }, @@ -25438,7 +25438,7 @@ fn fieldPtr( const union_ty = try sema.resolveTypeFields(child_type); if (union_ty.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| { - const field_index_u32 = @intCast(u32, field_index); + const field_index_u32 = @as(u32, @intCast(field_index)); var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( @@ -25459,7 +25459,7 @@ fn fieldPtr( const field_index = child_type.enumFieldIndex(field_name, mod) orelse { return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }; - const field_index_u32 = @intCast(u32, field_index); + const field_index_u32 = @as(u32, @intCast(field_index)); var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( @@ -25544,7 +25544,7 @@ fn fieldCallBind( if (mod.typeToStruct(struct_ty)) |struct_obj| { const field_index_usize = struct_obj.fields.getIndex(field_name) orelse break :find_field; - const field_index = @intCast(u32, field_index_usize); + const field_index = @as(u32, @intCast(field_index_usize)); const field = struct_obj.fields.values()[field_index]; return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr); @@ -25559,7 +25559,7 @@ fn fieldCallBind( } else { const max = struct_ty.structFieldCount(mod); for (0..max) |i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); if (field_name == struct_ty.structFieldName(i, mod)) { return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(i, mod), i, object_ptr); } @@ -25570,7 +25570,7 @@ fn fieldCallBind( const union_ty = try sema.resolveTypeFields(concrete_ty); const fields = union_ty.unionFields(mod); const field_index_usize = fields.getIndex(field_name) orelse break :find_field; - const field_index = @intCast(u32, field_index_usize); + const field_index = @as(u32, @intCast(field_index_usize)); const field = fields.values()[field_index]; return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr); @@ -25792,7 +25792,7 @@ fn structFieldPtr( const field_index_big = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); - const field_index = @intCast(u32, field_index_big); + const field_index = @as(u32, @intCast(field_index_big)); return sema.structFieldPtrByIndex(block, src, struct_ptr, field_index, field_name_src, struct_ty, initializing); } @@ -25838,7 +25838,7 @@ fn structFieldPtrByIndex( if (i == field_index) { ptr_ty_data.packed_offset.bit_offset = running_bits; } - running_bits += @intCast(u16, f.ty.bitSize(mod)); + running_bits += @as(u16, @intCast(f.ty.bitSize(mod))); } ptr_ty_data.packed_offset.host_size = (running_bits + 7) / 8; @@ -25868,7 +25868,7 @@ fn structFieldPtrByIndex( const elem_size_bits = ptr_ty_data.child.toType().bitSize(mod); if (elem_size_bytes * 8 == elem_size_bits) { const byte_offset = ptr_ty_data.packed_offset.bit_offset / 8; - const new_align = @enumFromInt(Alignment, @ctz(byte_offset | parent_align)); + const new_align = @as(Alignment, @enumFromInt(@ctz(byte_offset | parent_align))); assert(new_align != .none); ptr_ty_data.flags.alignment = new_align; ptr_ty_data.packed_offset = .{ .host_size = 0, .bit_offset = 0 }; @@ -25923,7 +25923,7 @@ fn structFieldVal( const field_index_usize = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); - const field_index = @intCast(u32, field_index_usize); + const field_index = @as(u32, @intCast(field_index_usize)); const field = struct_obj.fields.values()[field_index]; if (field.is_comptime) { @@ -26058,7 +26058,7 @@ fn unionFieldPtr( .address_space = union_ptr_ty.ptrAddressSpace(mod), }, }); - const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?); + const enum_field_index = @as(u32, @intCast(union_obj.tag_ty.enumFieldIndex(field_name, mod).?)); if (initializing and field.ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { @@ -26146,7 +26146,7 @@ fn unionFieldVal( const union_obj = mod.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field = union_obj.fields.values()[field_index]; - const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?); + const enum_field_index = @as(u32, @intCast(union_obj.tag_ty.enumFieldIndex(field_name, mod).?)); if (try sema.resolveMaybeUndefVal(union_byval)) |union_val| { if (union_val.isUndef(mod)) return sema.addConstUndef(field.ty); @@ -26226,7 +26226,7 @@ fn elemPtr( .Struct => { // Tuple field access. const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known"); - const index = @intCast(u32, index_val.toUnsignedInt(mod)); + const index = @as(u32, @intCast(index_val.toUnsignedInt(mod))); return sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init); }, else => { @@ -26261,7 +26261,7 @@ fn elemPtrOneLayerOnly( const runtime_src = rs: { const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index = @intCast(usize, index_val.toUnsignedInt(mod)); + const index = @as(usize, @intCast(index_val.toUnsignedInt(mod))); const result_ty = try sema.elemPtrType(indexable_ty, index); const elem_ptr = try ptr_val.elemPtr(result_ty, index, mod); return sema.addConstant(elem_ptr); @@ -26280,7 +26280,7 @@ fn elemPtrOneLayerOnly( .Struct => { assert(child_ty.isTuple(mod)); const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known"); - const index = @intCast(u32, index_val.toUnsignedInt(mod)); + const index = @as(u32, @intCast(index_val.toUnsignedInt(mod))); return sema.tupleFieldPtr(block, indexable_src, indexable, elem_index_src, index, false); }, else => unreachable, // Guaranteed by checkIndexable @@ -26318,7 +26318,7 @@ fn elemVal( const runtime_src = rs: { const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index = @intCast(usize, index_val.toUnsignedInt(mod)); + const index = @as(usize, @intCast(index_val.toUnsignedInt(mod))); const elem_ty = indexable_ty.elemType2(mod); const many_ptr_ty = try mod.manyConstPtrType(elem_ty); const many_ptr_val = try mod.getCoerced(indexable_val, many_ptr_ty); @@ -26355,7 +26355,7 @@ fn elemVal( .Struct => { // Tuple field access. const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known"); - const index = @intCast(u32, index_val.toUnsignedInt(mod)); + const index = @as(u32, @intCast(index_val.toUnsignedInt(mod))); return sema.tupleField(block, indexable_src, indexable, elem_index_src, index); }, else => unreachable, @@ -26516,7 +26516,7 @@ fn elemValArray( const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(mod)); + const index = @as(usize, @intCast(index_val.toUnsignedInt(mod))); if (array_sent) |s| { if (index == array_len) { return sema.addConstant(s); @@ -26532,7 +26532,7 @@ fn elemValArray( return sema.addConstUndef(elem_ty); } if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(mod)); + const index = @as(usize, @intCast(index_val.toUnsignedInt(mod))); const elem_val = try array_val.elemValue(mod, index); return sema.addConstant(elem_val); } @@ -26644,7 +26644,7 @@ fn elemValSlice( return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); } if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(mod)); + const index = @as(usize, @intCast(index_val.toUnsignedInt(mod))); if (index >= slice_len_s) { const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); @@ -27287,7 +27287,7 @@ fn coerceExtra( return sema.failWithOwnedErrorMsg(msg); }; return sema.addConstant( - try mod.enumValueFieldIndex(dest_ty, @intCast(u32, field_index)), + try mod.enumValueFieldIndex(dest_ty, @as(u32, @intCast(field_index))), ); }, .Union => blk: { @@ -27692,8 +27692,8 @@ const InMemoryCoercionResult = union(enum) { var index: u6 = 0; var actual_noalias = false; while (true) : (index += 1) { - const actual = @truncate(u1, param.actual >> index); - const wanted = @truncate(u1, param.wanted >> index); + const actual = @as(u1, @truncate(param.actual >> index)); + const wanted = @as(u1, @truncate(param.wanted >> index)); if (actual != wanted) { actual_noalias = actual == 1; break; @@ -28218,7 +28218,7 @@ fn coerceInMemoryAllowedFns( const dest_param_ty = dest_info.param_types[param_i].toType(); const src_param_ty = src_info.param_types[param_i].toType(); - const param_i_small = @intCast(u5, param_i); + const param_i_small = @as(u5, @intCast(param_i)); if (dest_info.paramIsComptime(param_i_small) != src_info.paramIsComptime(param_i_small)) { return InMemoryCoercionResult{ .fn_param_comptime = .{ .index = param_i, @@ -28832,7 +28832,7 @@ fn beginComptimePtrMutation( // bytes.len may be one greater than dest_len because of the case when // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. assert(bytes.len >= dest_len); - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); + const elems = try arena.alloc(Value, @as(usize, @intCast(dest_len))); for (elems, 0..) |*elem, i| { elem.* = try mod.intValue(elem_ty, bytes[i]); } @@ -28844,7 +28844,7 @@ fn beginComptimePtrMutation( block, src, elem_ty, - &elems[@intCast(usize, elem_ptr.index)], + &elems[@as(usize, @intCast(elem_ptr.index))], ptr_elem_ty, parent.mut_decl, ); @@ -28872,7 +28872,7 @@ fn beginComptimePtrMutation( block, src, elem_ty, - &elems[@intCast(usize, elem_ptr.index)], + &elems[@as(usize, @intCast(elem_ptr.index))], ptr_elem_ty, parent.mut_decl, ); @@ -28883,7 +28883,7 @@ fn beginComptimePtrMutation( block, src, elem_ty, - &val_ptr.castTag(.aggregate).?.data[@intCast(usize, elem_ptr.index)], + &val_ptr.castTag(.aggregate).?.data[@as(usize, @intCast(elem_ptr.index))], ptr_elem_ty, parent.mut_decl, ), @@ -28909,7 +28909,7 @@ fn beginComptimePtrMutation( block, src, elem_ty, - &elems[@intCast(usize, elem_ptr.index)], + &elems[@as(usize, @intCast(elem_ptr.index))], ptr_elem_ty, parent.mut_decl, ); @@ -28964,7 +28964,7 @@ fn beginComptimePtrMutation( }, .field => |field_ptr| { const base_child_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); - const field_index = @intCast(u32, field_ptr.index); + const field_index = @as(u32, @intCast(field_ptr.index)); var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.base.toValue(), base_child_ty); switch (parent.pointee) { @@ -29401,12 +29401,12 @@ fn beginComptimePtrLoad( } deref.pointee = TypedValue{ .ty = elem_ty, - .val = try array_tv.val.elemValue(mod, @intCast(usize, elem_ptr.index)), + .val = try array_tv.val.elemValue(mod, @as(usize, @intCast(elem_ptr.index))), }; break :blk deref; }, .field => |field_ptr| blk: { - const field_index = @intCast(u32, field_ptr.index); + const field_index = @as(u32, @intCast(field_ptr.index)); const container_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.base.toValue(), container_ty); @@ -29990,7 +29990,7 @@ fn coerceTupleToArray( var runtime_src: ?LazySrcLoc = null; for (element_vals, element_refs, 0..) |*val, *ref, i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); if (i_usize == inst_len) { const sentinel_val = dest_ty.sentinel(mod).?; val.* = sentinel_val.toIntern(); @@ -30101,7 +30101,7 @@ fn coerceTupleToStruct( else => unreachable, }; for (0..field_count) |field_index_usize| { - const field_i = @intCast(u32, field_index_usize); + const field_i = @as(u32, @intCast(field_index_usize)); const field_src = inst_src; // TODO better source location // https://github.com/ziglang/zig/issues/15709 const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) { @@ -30217,7 +30217,7 @@ fn coerceTupleToTuple( var runtime_src: ?LazySrcLoc = null; for (0..dest_field_count) |field_index_usize| { - const field_i = @intCast(u32, field_index_usize); + const field_i = @as(u32, @intCast(field_index_usize)); const field_src = inst_src; // TODO better source location // https://github.com/ziglang/zig/issues/15709 const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) { @@ -31532,7 +31532,7 @@ fn compareIntsOnlyPossibleResult( const ty = try mod.intType( if (is_negative) .signed else .unsigned, - @intCast(u16, req_bits), + @as(u16, @intCast(req_bits)), ); const pop_count = lhs_val.popCount(ty, mod); @@ -32294,7 +32294,7 @@ fn resolvePeerTypesInner( }; return .{ .success = try mod.vectorType(.{ - .len = @intCast(u32, len.?), + .len = @as(u32, @intCast(len.?)), .child = child_ty.toIntern(), }) }; }, @@ -33402,7 +33402,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { for (struct_obj.fields.values(), 0..) |field, i| { optimized_order[i] = if (try sema.typeHasRuntimeBits(field.ty)) - @intCast(u32, i) + @as(u32, @intCast(i)) else Module.Struct.omitted_field; } @@ -33443,7 +33443,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; assert(extended.opcode == .struct_decl); - const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); + const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); if (small.has_backing_int) { var extra_index: usize = extended.operand; @@ -33497,7 +33497,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi const backing_int_src: LazySrcLoc = .{ .node_offset_container_tag = 0 }; const backing_int_ty = blk: { if (backing_int_body_len == 0) { - const backing_int_ref = @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]); + const backing_int_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); break :blk try sema.resolveType(&block, backing_int_src, backing_int_ref); } else { const body = zir.extra[extra_index..][0..backing_int_body_len]; @@ -33543,7 +33543,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi }; return sema.fail(&block, LazySrcLoc.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum}); } - struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum)); + struct_obj.backing_int_ty = try mod.intType(.unsigned, @as(u16, @intCast(fields_bit_sum))); } } @@ -34178,7 +34178,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; assert(extended.opcode == .struct_decl); - const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); + const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src = LazySrcLoc.nodeOffset(0); @@ -34288,13 +34288,13 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void cur_bit_bag = zir.extra[bit_bag_index]; bit_bag_index += 1; } - const has_align = @truncate(u1, cur_bit_bag) != 0; + const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_init = @truncate(u1, cur_bit_bag) != 0; + const has_init = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const is_comptime = @truncate(u1, cur_bit_bag) != 0; + const is_comptime = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_type_body = @truncate(u1, cur_bit_bag) != 0; + const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; var field_name_zir: ?[:0]const u8 = null; @@ -34309,7 +34309,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void if (has_type_body) { fields[field_i].type_body_len = zir.extra[extra_index]; } else { - fields[field_i].type_ref = @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]); + fields[field_i].type_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); } extra_index += 1; @@ -34529,14 +34529,14 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const zir = mod.namespacePtr(union_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[union_obj.zir_index].extended; assert(extended.opcode == .union_decl); - const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); + const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src = LazySrcLoc.nodeOffset(0); extra_index += @intFromBool(small.has_src_node); const tag_type_ref: Zir.Inst.Ref = if (small.has_tag_type) blk: { - const ty_ref = @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]); + const ty_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; break :blk ty_ref; } else .none; @@ -34684,13 +34684,13 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { cur_bit_bag = zir.extra[bit_bag_index]; bit_bag_index += 1; } - const has_type = @truncate(u1, cur_bit_bag) != 0; + const has_type = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_align = @truncate(u1, cur_bit_bag) != 0; + const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_tag = @truncate(u1, cur_bit_bag) != 0; + const has_tag = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const unused = @truncate(u1, cur_bit_bag) != 0; + const unused = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; _ = unused; @@ -34701,19 +34701,19 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { extra_index += 1; const field_type_ref: Zir.Inst.Ref = if (has_type) blk: { - const field_type_ref = @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]); + const field_type_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; break :blk field_type_ref; } else .none; const align_ref: Zir.Inst.Ref = if (has_align) blk: { - const align_ref = @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]); + const align_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; break :blk align_ref; } else .none; const tag_ref: Air.Inst.Ref = if (has_tag) blk: { - const tag_ref = @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]); + const tag_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; break :blk try sema.resolveInst(tag_ref); } else .none; @@ -35427,12 +35427,12 @@ pub fn getTmpAir(sema: Sema) Air { pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { if (@intFromEnum(ty.toIntern()) < Air.ref_start_index) - return @enumFromInt(Air.Inst.Ref, @intFromEnum(ty.toIntern())); + return @as(Air.Inst.Ref, @enumFromInt(@intFromEnum(ty.toIntern()))); try sema.air_instructions.append(sema.gpa, .{ .tag = .interned, .data = .{ .interned = ty.toIntern() }, }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1))); } fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { @@ -35446,12 +35446,12 @@ fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { pub fn addConstant(sema: *Sema, val: Value) SemaError!Air.Inst.Ref { if (@intFromEnum(val.toIntern()) < Air.ref_start_index) - return @enumFromInt(Air.Inst.Ref, @intFromEnum(val.toIntern())); + return @as(Air.Inst.Ref, @enumFromInt(@intFromEnum(val.toIntern()))); try sema.air_instructions.append(sema.gpa, .{ .tag = .interned, .data = .{ .interned = val.toIntern() }, }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1))); } pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 { @@ -35462,12 +35462,12 @@ pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 { pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result = @intCast(u32, sema.air_extra.items.len); + const result = @as(u32, @intCast(sema.air_extra.items.len)); inline for (fields) |field| { sema.air_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), Air.Inst.Ref => @intFromEnum(@field(extra, field.name)), - i32 => @bitCast(u32, @field(extra, field.name)), + i32 => @as(u32, @bitCast(@field(extra, field.name))), InternPool.Index => @intFromEnum(@field(extra, field.name)), else => @compileError("bad field type: " ++ @typeName(field.type)), }); @@ -35476,7 +35476,7 @@ pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { } fn appendRefsAssumeCapacity(sema: *Sema, refs: []const Air.Inst.Ref) void { - const coerced = @ptrCast([]const u32, refs); + const coerced = @as([]const u32, @ptrCast(refs)); sema.air_extra.appendSliceAssumeCapacity(coerced); } @@ -35916,10 +35916,10 @@ fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!u32 { /// Not valid to call for packed unions. /// Keep implementation in sync with `Module.Union.Field.normalAlignment`. fn unionFieldAlignment(sema: *Sema, field: Module.Union.Field) !u32 { - return @intCast(u32, if (field.ty.isNoReturn(sema.mod)) + return @as(u32, @intCast(if (field.ty.isNoReturn(sema.mod)) 0 else - field.abi_align.toByteUnitsOptional() orelse try sema.typeAbiAlignment(field.ty)); + field.abi_align.toByteUnitsOptional() orelse try sema.typeAbiAlignment(field.ty))); } /// Synchronize logic with `Type.isFnOrHasRuntimeBits`. @@ -35951,7 +35951,7 @@ fn unionFieldIndex( const union_obj = mod.typeToUnion(union_ty).?; const field_index_usize = union_obj.fields.getIndex(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name); - return @intCast(u32, field_index_usize); + return @as(u32, @intCast(field_index_usize)); } fn structFieldIndex( @@ -35969,7 +35969,7 @@ fn structFieldIndex( const struct_obj = mod.typeToStruct(struct_ty).?; const field_index_usize = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); - return @intCast(u32, field_index_usize); + return @as(u32, @intCast(field_index_usize)); } } @@ -35983,12 +35983,12 @@ fn anonStructFieldIndex( const mod = sema.mod; switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { .anon_struct_type => |anon_struct_type| for (anon_struct_type.names, 0..) |name, i| { - if (name == field_name) return @intCast(u32, i); + if (name == field_name) return @as(u32, @intCast(i)); }, .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { for (struct_obj.fields.keys(), 0..) |name, i| { if (name == field_name) { - return @intCast(u32, i); + return @as(u32, @intCast(i)); } } }, @@ -36586,9 +36586,9 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { if (!is_packed) break :blk .{}; break :blk .{ - .host_size = @intCast(u16, parent_ty.arrayLen(mod)), - .alignment = @intCast(u32, parent_ty.abiAlignment(mod)), - .vector_index = if (offset) |some| @enumFromInt(VI, some) else .runtime, + .host_size = @as(u16, @intCast(parent_ty.arrayLen(mod))), + .alignment = @as(u32, @intCast(parent_ty.abiAlignment(mod))), + .vector_index = if (offset) |some| @as(VI, @enumFromInt(some)) else .runtime, }; } else .{}; @@ -36607,10 +36607,10 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { // The resulting pointer is aligned to the lcd between the offset (an // arbitrary number) and the alignment factor (always a power of two, // non zero). - const new_align = @enumFromInt(Alignment, @min( + const new_align = @as(Alignment, @enumFromInt(@min( @ctz(addend), @intFromEnum(ptr_info.flags.alignment), - )); + ))); assert(new_align != .none); break :a new_align; }; diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 1e8ab0fd87..5abcd7b280 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -250,7 +250,7 @@ pub fn print( }, .empty_enum_value => return writer.writeAll("(empty enum value)"), .float => |float| switch (float.storage) { - inline else => |x| return writer.print("{d}", .{@floatCast(f64, x)}), + inline else => |x| return writer.print("{d}", .{@as(f64, @floatCast(x))}), }, .ptr => |ptr| { if (ptr.addr == .int) { @@ -273,7 +273,7 @@ pub fn print( for (buf[0..max_len], 0..) |*c, i| { const elem = try val.elemValue(mod, i); if (elem.isUndef(mod)) break :str; - c.* = @intCast(u8, elem.toUnsignedInt(mod)); + c.* = @as(u8, @intCast(elem.toUnsignedInt(mod))); } const truncated = if (len > max_string_len) " (truncated)" else ""; return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); @@ -352,11 +352,11 @@ pub fn print( if (container_ty.isTuple(mod)) { try writer.print("[{d}]", .{field.index}); } - const field_name = container_ty.structFieldName(@intCast(usize, field.index), mod); + const field_name = container_ty.structFieldName(@as(usize, @intCast(field.index)), mod); try writer.print(".{i}", .{field_name.fmt(ip)}); }, .Union => { - const field_name = container_ty.unionFields(mod).keys()[@intCast(usize, field.index)]; + const field_name = container_ty.unionFields(mod).keys()[@as(usize, @intCast(field.index))]; try writer.print(".{i}", .{field_name.fmt(ip)}); }, .Pointer => { diff --git a/src/Zir.zig b/src/Zir.zig index 45ee755d6b..a51290aceb 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -74,12 +74,12 @@ pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, en inline for (fields) |field| { @field(result, field.name) = switch (field.type) { u32 => code.extra[i], - Inst.Ref => @enumFromInt(Inst.Ref, code.extra[i]), - i32 => @bitCast(i32, code.extra[i]), - Inst.Call.Flags => @bitCast(Inst.Call.Flags, code.extra[i]), - Inst.BuiltinCall.Flags => @bitCast(Inst.BuiltinCall.Flags, code.extra[i]), - Inst.SwitchBlock.Bits => @bitCast(Inst.SwitchBlock.Bits, code.extra[i]), - Inst.FuncFancy.Bits => @bitCast(Inst.FuncFancy.Bits, code.extra[i]), + Inst.Ref => @as(Inst.Ref, @enumFromInt(code.extra[i])), + i32 => @as(i32, @bitCast(code.extra[i])), + Inst.Call.Flags => @as(Inst.Call.Flags, @bitCast(code.extra[i])), + Inst.BuiltinCall.Flags => @as(Inst.BuiltinCall.Flags, @bitCast(code.extra[i])), + Inst.SwitchBlock.Bits => @as(Inst.SwitchBlock.Bits, @bitCast(code.extra[i])), + Inst.FuncFancy.Bits => @as(Inst.FuncFancy.Bits, @bitCast(code.extra[i])), else => @compileError("bad field type"), }; i += 1; @@ -101,7 +101,7 @@ pub fn nullTerminatedString(code: Zir, index: usize) [:0]const u8 { pub fn refSlice(code: Zir, start: usize, len: usize) []Inst.Ref { const raw_slice = code.extra[start..][0..len]; - return @ptrCast([]Inst.Ref, raw_slice); + return @as([]Inst.Ref, @ptrCast(raw_slice)); } pub fn hasCompileErrors(code: Zir) bool { @@ -2992,7 +2992,7 @@ pub const Inst = struct { (@as(u128, self.piece1) << 32) | (@as(u128, self.piece2) << 64) | (@as(u128, self.piece3) << 96); - return @bitCast(f128, int_bits); + return @as(f128, @bitCast(int_bits)); } }; @@ -3228,15 +3228,15 @@ pub const DeclIterator = struct { } it.decl_i += 1; - const flags = @truncate(u4, it.cur_bit_bag); + const flags = @as(u4, @truncate(it.cur_bit_bag)); it.cur_bit_bag >>= 4; - const sub_index = @intCast(u32, it.extra_index); + const sub_index = @as(u32, @intCast(it.extra_index)); it.extra_index += 5; // src_hash(4) + line(1) const name = it.zir.nullTerminatedString(it.zir.extra[it.extra_index]); it.extra_index += 3; // name(1) + value(1) + doc_comment(1) - it.extra_index += @truncate(u1, flags >> 2); - it.extra_index += @truncate(u1, flags >> 3); + it.extra_index += @as(u1, @truncate(flags >> 2)); + it.extra_index += @as(u1, @truncate(flags >> 3)); return Item{ .sub_index = sub_index, @@ -3258,7 +3258,7 @@ pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator { const extended = datas[decl_inst].extended; switch (extended.opcode) { .struct_decl => { - const small = @bitCast(Inst.StructDecl.Small, extended.small); + const small = @as(Inst.StructDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; extra_index += @intFromBool(small.has_src_node); extra_index += @intFromBool(small.has_fields_len); @@ -3281,7 +3281,7 @@ pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator { return declIteratorInner(zir, extra_index, decls_len); }, .enum_decl => { - const small = @bitCast(Inst.EnumDecl.Small, extended.small); + const small = @as(Inst.EnumDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; extra_index += @intFromBool(small.has_src_node); extra_index += @intFromBool(small.has_tag_type); @@ -3296,7 +3296,7 @@ pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator { return declIteratorInner(zir, extra_index, decls_len); }, .union_decl => { - const small = @bitCast(Inst.UnionDecl.Small, extended.small); + const small = @as(Inst.UnionDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; extra_index += @intFromBool(small.has_src_node); extra_index += @intFromBool(small.has_tag_type); @@ -3311,7 +3311,7 @@ pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator { return declIteratorInner(zir, extra_index, decls_len); }, .opaque_decl => { - const small = @bitCast(Inst.OpaqueDecl.Small, extended.small); + const small = @as(Inst.OpaqueDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; extra_index += @intFromBool(small.has_src_node); const decls_len = if (small.has_decls_len) decls_len: { @@ -3507,7 +3507,7 @@ fn findDeclsSwitch( const special_prong = extra.data.bits.specialProng(); if (special_prong != .none) { - const body_len = @truncate(u31, zir.extra[extra_index]); + const body_len = @as(u31, @truncate(zir.extra[extra_index])); extra_index += 1; const body = zir.extra[extra_index..][0..body_len]; extra_index += body.len; @@ -3520,7 +3520,7 @@ fn findDeclsSwitch( var scalar_i: usize = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { extra_index += 1; - const body_len = @truncate(u31, zir.extra[extra_index]); + const body_len = @as(u31, @truncate(zir.extra[extra_index])); extra_index += 1; const body = zir.extra[extra_index..][0..body_len]; extra_index += body_len; @@ -3535,7 +3535,7 @@ fn findDeclsSwitch( extra_index += 1; const ranges_len = zir.extra[extra_index]; extra_index += 1; - const body_len = @truncate(u31, zir.extra[extra_index]); + const body_len = @as(u31, @truncate(zir.extra[extra_index])); extra_index += 1; const items = zir.refSlice(extra_index, items_len); extra_index += items_len; @@ -3617,7 +3617,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo { ret_ty_ref = .void_type; }, 1 => { - ret_ty_ref = @enumFromInt(Inst.Ref, zir.extra[extra_index]); + ret_ty_ref = @as(Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; }, else => { @@ -3671,7 +3671,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo { ret_ty_body = zir.extra[extra_index..][0..body_len]; extra_index += ret_ty_body.len; } else if (extra.data.bits.has_ret_ty_ref) { - ret_ty_ref = @enumFromInt(Inst.Ref, zir.extra[extra_index]); + ret_ty_ref = @as(Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; } @@ -3715,7 +3715,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo { pub const ref_start_index: u32 = InternPool.static_len; pub fn indexToRef(inst: Inst.Index) Inst.Ref { - return @enumFromInt(Inst.Ref, ref_start_index + inst); + return @as(Inst.Ref, @enumFromInt(ref_start_index + inst)); } pub fn refToIndex(inst: Inst.Ref) ?Inst.Index { diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 5080a0451a..1d09fcd1cd 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -187,8 +187,8 @@ const DbgInfoReloc = struct { .stack_argument_offset, => |offset| blk: { const adjusted_offset = switch (reloc.mcv) { - .stack_offset => -@intCast(i32, offset), - .stack_argument_offset => @intCast(i32, function.saved_regs_stack_space + offset), + .stack_offset => -@as(i32, @intCast(offset)), + .stack_argument_offset => @as(i32, @intCast(function.saved_regs_stack_space + offset)), else => unreachable, }; break :blk .{ .stack = .{ @@ -224,8 +224,8 @@ const DbgInfoReloc = struct { const adjusted_offset = switch (reloc.mcv) { .ptr_stack_offset, .stack_offset, - => -@intCast(i32, offset), - .stack_argument_offset => @intCast(i32, function.saved_regs_stack_space + offset), + => -@as(i32, @intCast(offset)), + .stack_argument_offset => @as(i32, @intCast(function.saved_regs_stack_space + offset)), else => unreachable, }; break :blk .{ @@ -440,7 +440,7 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { try self.mir_instructions.ensureUnusedCapacity(gpa, 1); - const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len); + const result_index = @as(Air.Inst.Index, @intCast(self.mir_instructions.len)); self.mir_instructions.appendAssumeCapacity(inst); return result_index; } @@ -460,11 +460,11 @@ pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 { pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result = @intCast(u32, self.mir_extra.items.len); + const result = @as(u32, @intCast(self.mir_extra.items.len)); inline for (fields) |field| { self.mir_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), - i32 => @bitCast(u32, @field(extra, field.name)), + i32 => @as(u32, @bitCast(@field(extra, field.name))), else => @compileError("bad field type"), }); } @@ -524,7 +524,7 @@ fn gen(self: *Self) !void { const ty = self.typeOfIndex(inst); - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); const abi_align = ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); @@ -547,7 +547,7 @@ fn gen(self: *Self) !void { self.saved_regs_stack_space = 16; inline for (callee_preserved_regs) |reg| { if (self.register_manager.isRegAllocated(reg)) { - saved_regs |= @as(u32, 1) << @intCast(u5, reg.id()); + saved_regs |= @as(u32, 1) << @as(u5, @intCast(reg.id())); self.saved_regs_stack_space += 8; } } @@ -597,14 +597,14 @@ fn gen(self: *Self) !void { for (self.exitlude_jump_relocs.items) |jmp_reloc| { self.mir_instructions.set(jmp_reloc, .{ .tag = .b, - .data = .{ .inst = @intCast(u32, self.mir_instructions.len) }, + .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len)) }, }); } // add sp, sp, #stack_size _ = try self.addInst(.{ .tag = .add_immediate, - .data = .{ .rr_imm12_sh = .{ .rd = .sp, .rn = .sp, .imm12 = @intCast(u12, stack_size) } }, + .data = .{ .rr_imm12_sh = .{ .rd = .sp, .rn = .sp, .imm12 = @as(u12, @intCast(stack_size)) } }, }); // @@ -948,15 +948,15 @@ fn finishAirBookkeeping(self: *Self) void { fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void { var tomb_bits = self.liveness.getTombBits(inst); for (operands) |op| { - const dies = @truncate(u1, tomb_bits) != 0; + const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; const op_int = @intFromEnum(op); if (op_int < Air.ref_start_index) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); + const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index)); self.processDeath(op_index); } - const is_used = @truncate(u1, tomb_bits) == 0; + const is_used = @as(u1, @truncate(tomb_bits)) == 0; if (is_used) { log.debug("%{d} => {}", .{ inst, result }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -1232,7 +1232,7 @@ fn truncRegister( .rd = dest_reg, .rn = operand_reg, .lsb = 0, - .width = @intCast(u6, int_bits), + .width = @as(u6, @intCast(int_bits)), } }, }); }, @@ -1877,7 +1877,7 @@ fn binOpImmediate( => .{ .rr_imm12_sh = .{ .rd = dest_reg, .rn = lhs_reg, - .imm12 = @intCast(u12, rhs_immediate), + .imm12 = @as(u12, @intCast(rhs_immediate)), } }, .lsl_immediate, .asr_immediate, @@ -1885,7 +1885,7 @@ fn binOpImmediate( => .{ .rr_shift = .{ .rd = dest_reg, .rn = lhs_reg, - .shift = @intCast(u6, rhs_immediate), + .shift = @as(u6, @intCast(rhs_immediate)), } }, else => unreachable, }; @@ -2526,9 +2526,9 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod))); const tuple_align = tuple_ty.abiAlignment(mod); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod))); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), @@ -2654,9 +2654,9 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod))); const tuple_align = tuple_ty.abiAlignment(mod); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod))); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), @@ -2777,7 +2777,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } }, }); - const shift: u6 = @intCast(u6, @as(u7, 64) - @intCast(u7, int_info.bits)); + const shift: u6 = @as(u6, @intCast(@as(u7, 64) - @as(u7, @intCast(int_info.bits)))); if (shift > 0) { // lsl dest_high, dest, #shift _ = try self.addInst(.{ @@ -2837,7 +2837,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .data = .{ .rr_shift = .{ .rd = dest_high_reg, .rn = dest_reg, - .shift = @intCast(u6, int_info.bits), + .shift = @as(u6, @intCast(int_info.bits)), } }, }); @@ -2878,9 +2878,9 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod))); const tuple_align = tuple_ty.abiAlignment(mod); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod))); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}), @@ -2917,7 +2917,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .data = .{ .rr_shift = .{ .rd = dest_reg, .rn = lhs_reg, - .shift = @intCast(u6, imm), + .shift = @as(u6, @intCast(imm)), } }, }); @@ -2932,7 +2932,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .data = .{ .rr_shift = .{ .rd = reconstructed_reg, .rn = dest_reg, - .shift = @intCast(u6, imm), + .shift = @as(u6, @intCast(imm)), } }, }); } else { @@ -3072,7 +3072,7 @@ fn errUnionErr( return try error_union_bind.resolveToMcv(self); } - const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod)); + const err_offset = @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod))); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -3094,7 +3094,7 @@ fn errUnionErr( ); const err_bit_offset = err_offset * 8; - const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8; + const err_bit_size = @as(u32, @intCast(err_ty.abiSize(mod))) * 8; _ = try self.addInst(.{ .tag = .ubfx, // errors are unsigned integers @@ -3103,8 +3103,8 @@ fn errUnionErr( // Set both registers to the X variant to get the full width .rd = dest_reg.toX(), .rn = operand_reg.toX(), - .lsb = @intCast(u6, err_bit_offset), - .width = @intCast(u7, err_bit_size), + .lsb = @as(u6, @intCast(err_bit_offset)), + .width = @as(u7, @intCast(err_bit_size)), }, }, }); @@ -3152,7 +3152,7 @@ fn errUnionPayload( return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); + const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod))); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -3174,7 +3174,7 @@ fn errUnionPayload( ); const payload_bit_offset = payload_offset * 8; - const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8; + const payload_bit_size = @as(u32, @intCast(payload_ty.abiSize(mod))) * 8; _ = try self.addInst(.{ .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, @@ -3183,8 +3183,8 @@ fn errUnionPayload( // Set both registers to the X variant to get the full width .rd = dest_reg.toX(), .rn = operand_reg.toX(), - .lsb = @intCast(u5, payload_bit_offset), - .width = @intCast(u6, payload_bit_size), + .lsb = @as(u5, @intCast(payload_bit_offset)), + .width = @as(u6, @intCast(payload_bit_size)), }, }, }); @@ -3283,9 +3283,9 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue{ .register = reg }; } - const optional_abi_size = @intCast(u32, optional_ty.abiSize(mod)); + const optional_abi_size = @as(u32, @intCast(optional_ty.abiSize(mod))); const optional_abi_align = optional_ty.abiAlignment(mod); - const offset = @intCast(u32, payload_ty.abiSize(mod)); + const offset = @as(u32, @intCast(payload_ty.abiSize(mod))); const stack_offset = try self.allocMem(optional_abi_size, optional_abi_align, inst); try self.genSetStack(payload_ty, stack_offset, operand); @@ -3308,13 +3308,13 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod))); const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); const payload_off = errUnionPayloadOffset(payload_ty, mod); const err_off = errUnionErrorOffset(payload_ty, mod); - try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand); - try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 }); + try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), operand); + try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), .{ .immediate = 0 }); break :result MCValue{ .stack_offset = stack_offset }; }; @@ -3332,13 +3332,13 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod))); const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); const payload_off = errUnionPayloadOffset(payload_ty, mod); const err_off = errUnionErrorOffset(payload_ty, mod); - try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand); - try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef); + try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), operand); + try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), .undef); break :result MCValue{ .stack_offset = stack_offset }; }; @@ -3454,7 +3454,7 @@ fn ptrElemVal( ) !MCValue { const mod = self.bin_file.options.module.?; const elem_ty = ptr_ty.childType(mod); - const elem_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod))); // TODO optimize for elem_sizes of 1, 2, 4, 8 switch (elem_size) { @@ -3716,7 +3716,7 @@ fn genInlineMemcpy( _ = try self.addInst(.{ .tag = .b_cond, .data = .{ .inst_cond = .{ - .inst = @intCast(u32, self.mir_instructions.len + 5), + .inst = @as(u32, @intCast(self.mir_instructions.len + 5)), .cond = .ge, } }, }); @@ -3754,7 +3754,7 @@ fn genInlineMemcpy( // b loop _ = try self.addInst(.{ .tag = .b, - .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 5) }, + .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len - 5)) }, }); // end: @@ -3824,7 +3824,7 @@ fn genInlineMemsetCode( _ = try self.addInst(.{ .tag = .b_cond, .data = .{ .inst_cond = .{ - .inst = @intCast(u32, self.mir_instructions.len + 4), + .inst = @as(u32, @intCast(self.mir_instructions.len + 4)), .cond = .ge, } }, }); @@ -3852,7 +3852,7 @@ fn genInlineMemsetCode( // b loop _ = try self.addInst(.{ .tag = .b, - .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 4) }, + .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len - 4)) }, }); // end: @@ -4002,7 +4002,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type } }, }); }, - .memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = @intCast(u32, addr) }), + .memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }), .linker_load => |load_struct| { const tag: Mir.Inst.Tag = switch (load_struct.type) { .got => .load_memory_ptr_got, @@ -4092,7 +4092,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(mod); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -4117,7 +4117,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); const struct_field_ty = struct_ty.structFieldType(index, mod); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); switch (mcv) { .dead, .unreach => unreachable, @@ -4169,7 +4169,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); const struct_ty = self.air.getRefType(ty_pl.ty).childType(mod); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod)); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(extra.field_index, mod))); switch (field_ptr) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off + struct_field_offset }; @@ -4243,7 +4243,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const pl_op = self.air.instructions.items(.data)[inst].pl_op; const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len])); const ty = self.typeOf(callee); const mod = self.bin_file.options.module.?; @@ -4269,8 +4269,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (info.return_value == .stack_offset) { log.debug("airCall: return by reference", .{}); const ret_ty = fn_ty.fnReturnType(mod); - const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); - const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); + const ret_abi_size = @as(u32, @intCast(ret_ty.abiSize(mod))); + const ret_abi_align = @as(u32, @intCast(ret_ty.abiAlignment(mod))); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); const ret_ptr_reg = self.registerAlias(.x0, Type.usize); @@ -4314,7 +4314,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); - const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); + const got_addr = @as(u32, @intCast(atom.getOffsetTableAddress(elf_file))); try self.genSetReg(Type.usize, .x30, .{ .memory = got_addr }); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl); @@ -4473,7 +4473,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ret_ty.abiSize(mod))); const abi_align = ret_ty.abiAlignment(mod); const offset = try self.allocMem(abi_size, abi_align, null); @@ -4554,7 +4554,7 @@ fn cmp( .tag = .cmp_immediate, .data = .{ .r_imm12_sh = .{ .rn = lhs_reg, - .imm12 = @intCast(u12, rhs_immediate.?), + .imm12 = @as(u12, @intCast(rhs_immediate.?)), } }, }); } else { @@ -4696,7 +4696,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.operandDies(inst, 0)) { const op_int = @intFromEnum(pl_op.operand); if (op_int >= Air.ref_start_index) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); + const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index)); self.processDeath(op_index); } } @@ -4833,7 +4833,7 @@ fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :blk .{ .ty = operand_ty, .bind = operand_bind }; - const offset = @intCast(u32, payload_ty.abiSize(mod)); + const offset = @as(u32, @intCast(payload_ty.abiSize(mod))); const operand_mcv = try operand_bind.resolveToMcv(self); const new_mcv: MCValue = switch (operand_mcv) { .register => |source_reg| new: { @@ -4841,7 +4841,7 @@ fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { const raw_reg = try self.register_manager.allocReg(null, gp); const dest_reg = raw_reg.toX(); - const shift = @intCast(u6, offset * 8); + const shift = @as(u6, @intCast(offset * 8)); if (shift == 0) { try self.genSetReg(payload_ty, dest_reg, operand_mcv); } else { @@ -5026,7 +5026,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; - const start_index = @intCast(u32, self.mir_instructions.len); + const start_index = @as(u32, @intCast(self.mir_instructions.len)); try self.genBody(body); try self.jump(start_index); @@ -5091,7 +5091,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { var case_i: u32 = 0; while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]); + const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len])); assert(items.len > 0); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + items.len + case_body.len; @@ -5209,9 +5209,9 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { const tag = self.mir_instructions.items(.tag)[inst]; switch (tag) { - .cbz => self.mir_instructions.items(.data)[inst].r_inst.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), - .b_cond => self.mir_instructions.items(.data)[inst].inst_cond.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), - .b => self.mir_instructions.items(.data)[inst].inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), + .cbz => self.mir_instructions.items(.data)[inst].r_inst.inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)), + .b_cond => self.mir_instructions.items(.data)[inst].inst_cond.inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)), + .b => self.mir_instructions.items(.data)[inst].inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)), else => unreachable, } } @@ -5262,12 +5262,12 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void { fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); - const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; - const clobbers_len = @truncate(u31, extra.data.flags); + const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; + const clobbers_len = @as(u31, @truncate(extra.data.flags)); var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len])); extra_i += inputs.len; const dead = !is_volatile and self.liveness.isUnused(inst); @@ -5401,7 +5401,7 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5460,7 +5460,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); const overflow_bit_ty = ty.structFieldType(1, mod); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, mod))); const raw_cond_reg = try self.register_manager.allocReg(null, gp); const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty); @@ -5589,7 +5589,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .tag = .ldr_ptr_stack, .data = .{ .load_store_stack = .{ .rt = reg, - .offset = @intCast(u32, off), + .offset = @as(u32, @intCast(off)), } }, }); }, @@ -5605,13 +5605,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .immediate => |x| { _ = try self.addInst(.{ .tag = .movz, - .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x) } }, + .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x)) } }, }); if (x & 0x0000_0000_ffff_0000 != 0) { _ = try self.addInst(.{ .tag = .movk, - .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x >> 16), .hw = 1 } }, + .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 16)), .hw = 1 } }, }); } @@ -5619,13 +5619,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void if (x & 0x0000_ffff_0000_0000 != 0) { _ = try self.addInst(.{ .tag = .movk, - .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x >> 32), .hw = 2 } }, + .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 32)), .hw = 2 } }, }); } if (x & 0xffff_0000_0000_0000 != 0) { _ = try self.addInst(.{ .tag = .movk, - .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x >> 48), .hw = 3 } }, + .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 48)), .hw = 3 } }, }); } } @@ -5696,7 +5696,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .tag = tag, .data = .{ .load_store_stack = .{ .rt = reg, - .offset = @intCast(u32, off), + .offset = @as(u32, @intCast(off)), } }, }); }, @@ -5720,7 +5720,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .tag = tag, .data = .{ .load_store_stack = .{ .rt = reg, - .offset = @intCast(u32, off), + .offset = @as(u32, @intCast(off)), } }, }); }, @@ -5733,7 +5733,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); switch (mcv) { .dead => unreachable, .none, .unreach => return, @@ -5840,7 +5840,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I } }, }); }, - .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }), + .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }), .linker_load => |load_struct| { const tag: Mir.Inst.Tag = switch (load_struct.type) { .got => .load_memory_ptr_got, @@ -5937,7 +5937,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(mod); - const array_len = @intCast(u32, array_ty.arrayLen(mod)); + const array_len = @as(u32, @intCast(array_ty.arrayLen(mod))); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -6058,7 +6058,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); + const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); const result: MCValue = res: { if (self.liveness.isUnused(inst)) break :res MCValue.dead; return self.fail("TODO implement airAggregateInit for {}", .{self.target.cpu.arch}); @@ -6105,7 +6105,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; const error_union_ty = self.typeOf(pl_op.operand); - const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); + const error_union_size = @as(u32, @intCast(error_union_ty.abiSize(mod))); const error_union_align = error_union_ty.abiAlignment(mod); // The error union will die in the body. However, we need the @@ -6247,7 +6247,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod))); if (ret_ty_size == 0) { assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; @@ -6259,7 +6259,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } for (fn_info.param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); if (param_size == 0) { result.args[i] = .{ .none = {} }; continue; @@ -6305,7 +6305,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod))); if (ret_ty_size == 0) { assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; @@ -6325,7 +6325,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { for (fn_info.param_types, 0..) |ty, i| { if (ty.toType().abiSize(mod) > 0) { - const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); const param_alignment = ty.toType().abiAlignment(mod); stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment); diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig index 238a63c921..8cf2386138 100644 --- a/src/arch/aarch64/Emit.zig +++ b/src/arch/aarch64/Emit.zig @@ -81,7 +81,7 @@ pub fn emitMir( // Emit machine code for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); switch (tag) { .add_immediate => try emit.mirAddSubtractImmediate(inst), .adds_immediate => try emit.mirAddSubtractImmediate(inst), @@ -324,7 +324,7 @@ fn lowerBranches(emit: *Emit) !void { // TODO optimization opportunity: do this in codegen while // generating MIR for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); if (isBranch(tag)) { const target_inst = emit.branchTarget(inst); @@ -369,7 +369,7 @@ fn lowerBranches(emit: *Emit) !void { var current_code_offset: usize = 0; for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); // If this instruction contained in the code offset // mapping (when it is a target of a branch or if it is a @@ -384,7 +384,7 @@ fn lowerBranches(emit: *Emit) !void { const target_inst = emit.branchTarget(inst); if (target_inst < inst) { const target_offset = emit.code_offset_mapping.get(target_inst).?; - const offset = @intCast(i64, target_offset) - @intCast(i64, current_code_offset); + const offset = @as(i64, @intCast(target_offset)) - @as(i64, @intCast(current_code_offset)); const branch_type = emit.branch_types.getPtr(inst).?; const optimal_branch_type = try emit.optimalBranchType(tag, offset); if (branch_type.* != optimal_branch_type) { @@ -403,7 +403,7 @@ fn lowerBranches(emit: *Emit) !void { for (origin_list.items) |forward_branch_inst| { const branch_tag = emit.mir.instructions.items(.tag)[forward_branch_inst]; const forward_branch_inst_offset = emit.code_offset_mapping.get(forward_branch_inst).?; - const offset = @intCast(i64, current_code_offset) - @intCast(i64, forward_branch_inst_offset); + const offset = @as(i64, @intCast(current_code_offset)) - @as(i64, @intCast(forward_branch_inst_offset)); const branch_type = emit.branch_types.getPtr(forward_branch_inst).?; const optimal_branch_type = try emit.optimalBranchType(branch_tag, offset); if (branch_type.* != optimal_branch_type) { @@ -434,7 +434,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { } fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { - const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line); + const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(self.prev_di_line)); const delta_pc: usize = self.code.items.len - self.prev_di_pc; switch (self.debug_output) { .dwarf => |dw| { @@ -451,13 +451,13 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { // increasing the line number try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line); // increasing the pc - const d_pc_p9 = @intCast(i64, delta_pc) - quant; + const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant; if (d_pc_p9 > 0) { // minus one because if its the last one, we want to leave space to change the line which is one quanta - try dbg_out.dbg_line.append(@intCast(u8, @divExact(d_pc_p9, quant) + 128) - quant); + try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, quant) + 128)) - quant); if (dbg_out.pcop_change_index.*) |pci| dbg_out.dbg_line.items[pci] += 1; - dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1); + dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1)); } else if (d_pc_p9 == 0) { // we don't need to do anything, because adding the quant does it for us } else unreachable; @@ -548,13 +548,13 @@ fn mirConditionalBranchImmediate(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const inst_cond = emit.mir.instructions.items(.data)[inst].inst_cond; - const offset = @intCast(i64, emit.code_offset_mapping.get(inst_cond.inst).?) - @intCast(i64, emit.code.items.len); + const offset = @as(i64, @intCast(emit.code_offset_mapping.get(inst_cond.inst).?)) - @as(i64, @intCast(emit.code.items.len)); const branch_type = emit.branch_types.get(inst).?; log.debug("mirConditionalBranchImmediate: {} offset={}", .{ inst, offset }); switch (branch_type) { .b_cond => switch (tag) { - .b_cond => try emit.writeInstruction(Instruction.bCond(inst_cond.cond, @intCast(i21, offset))), + .b_cond => try emit.writeInstruction(Instruction.bCond(inst_cond.cond, @as(i21, @intCast(offset)))), else => unreachable, }, else => unreachable, @@ -572,14 +572,14 @@ fn mirBranch(emit: *Emit, inst: Mir.Inst.Index) !void { emit.mir.instructions.items(.tag)[target_inst], }); - const offset = @intCast(i64, emit.code_offset_mapping.get(target_inst).?) - @intCast(i64, emit.code.items.len); + const offset = @as(i64, @intCast(emit.code_offset_mapping.get(target_inst).?)) - @as(i64, @intCast(emit.code.items.len)); const branch_type = emit.branch_types.get(inst).?; log.debug("mirBranch: {} offset={}", .{ inst, offset }); switch (branch_type) { .unconditional_branch_immediate => switch (tag) { - .b => try emit.writeInstruction(Instruction.b(@intCast(i28, offset))), - .bl => try emit.writeInstruction(Instruction.bl(@intCast(i28, offset))), + .b => try emit.writeInstruction(Instruction.b(@as(i28, @intCast(offset)))), + .bl => try emit.writeInstruction(Instruction.bl(@as(i28, @intCast(offset)))), else => unreachable, }, else => unreachable, @@ -590,13 +590,13 @@ fn mirCompareAndBranch(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const r_inst = emit.mir.instructions.items(.data)[inst].r_inst; - const offset = @intCast(i64, emit.code_offset_mapping.get(r_inst.inst).?) - @intCast(i64, emit.code.items.len); + const offset = @as(i64, @intCast(emit.code_offset_mapping.get(r_inst.inst).?)) - @as(i64, @intCast(emit.code.items.len)); const branch_type = emit.branch_types.get(inst).?; log.debug("mirCompareAndBranch: {} offset={}", .{ inst, offset }); switch (branch_type) { .cbz => switch (tag) { - .cbz => try emit.writeInstruction(Instruction.cbz(r_inst.rt, @intCast(i21, offset))), + .cbz => try emit.writeInstruction(Instruction.cbz(r_inst.rt, @as(i21, @intCast(offset)))), else => unreachable, }, else => unreachable, @@ -662,7 +662,7 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void { const relocation = emit.mir.instructions.items(.data)[inst].relocation; const offset = blk: { - const offset = @intCast(u32, emit.code.items.len); + const offset = @as(u32, @intCast(emit.code.items.len)); // bl try emit.writeInstruction(Instruction.bl(0)); break :blk offset; @@ -837,11 +837,11 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const payload = emit.mir.instructions.items(.data)[inst].payload; const data = emit.mir.extraData(Mir.LoadMemoryPie, payload).data; - const reg = @enumFromInt(Register, data.register); + const reg = @as(Register, @enumFromInt(data.register)); // PC-relative displacement to the entry in memory. // adrp - const offset = @intCast(u32, emit.code.items.len); + const offset = @as(u32, @intCast(emit.code.items.len)); try emit.writeInstruction(Instruction.adrp(reg.toX(), 0)); switch (tag) { @@ -1220,7 +1220,7 @@ fn mirNop(emit: *Emit) !void { } fn regListIsSet(reg_list: u32, reg: Register) bool { - return reg_list & @as(u32, 1) << @intCast(u5, reg.id()) != 0; + return reg_list & @as(u32, 1) << @as(u5, @intCast(reg.id())) != 0; } fn mirPushPopRegs(emit: *Emit, inst: Mir.Inst.Index) !void { @@ -1245,7 +1245,7 @@ fn mirPushPopRegs(emit: *Emit, inst: Mir.Inst.Index) !void { var count: u6 = 0; var other_reg: ?Register = null; while (i > 0) : (i -= 1) { - const reg = @enumFromInt(Register, i - 1); + const reg = @as(Register, @enumFromInt(i - 1)); if (regListIsSet(reg_list, reg)) { if (count == 0 and odd_number_of_regs) { try emit.writeInstruction(Instruction.ldr( @@ -1274,7 +1274,7 @@ fn mirPushPopRegs(emit: *Emit, inst: Mir.Inst.Index) !void { var count: u6 = 0; var other_reg: ?Register = null; while (i < 32) : (i += 1) { - const reg = @enumFromInt(Register, i); + const reg = @as(Register, @enumFromInt(i)); if (regListIsSet(reg_list, reg)) { if (count == number_of_regs - 1 and odd_number_of_regs) { try emit.writeInstruction(Instruction.str( diff --git a/src/arch/aarch64/Mir.zig b/src/arch/aarch64/Mir.zig index cc478c874a..6c0a1ec5b4 100644 --- a/src/arch/aarch64/Mir.zig +++ b/src/arch/aarch64/Mir.zig @@ -507,7 +507,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end inline for (fields) |field| { @field(result, field.name) = switch (field.type) { u32 => mir.extra[i], - i32 => @bitCast(i32, mir.extra[i]), + i32 => @as(i32, @bitCast(mir.extra[i])), else => @compileError("bad field type"), }; i += 1; diff --git a/src/arch/aarch64/bits.zig b/src/arch/aarch64/bits.zig index 3446d69950..6e4508fb0e 100644 --- a/src/arch/aarch64/bits.zig +++ b/src/arch/aarch64/bits.zig @@ -80,34 +80,34 @@ pub const Register = enum(u8) { pub fn id(self: Register) u6 { return switch (@intFromEnum(self)) { - @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.x0)), - @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.w0)), + @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.x0))), + @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.w0))), @intFromEnum(Register.sp) => 32, @intFromEnum(Register.wsp) => 32, - @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.q0) + 33), - @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.d0) + 33), - @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.s0) + 33), - @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.h0) + 33), - @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.b0) + 33), + @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.q0) + 33)), + @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.d0) + 33)), + @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.s0) + 33)), + @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.h0) + 33)), + @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.b0) + 33)), else => unreachable, }; } pub fn enc(self: Register) u5 { return switch (@intFromEnum(self)) { - @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.x0)), - @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.w0)), + @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.x0))), + @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.w0))), @intFromEnum(Register.sp) => 31, @intFromEnum(Register.wsp) => 31, - @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.q0)), - @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.d0)), - @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.s0)), - @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.h0)), - @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.b0)), + @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.q0))), + @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.d0))), + @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.s0))), + @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.h0))), + @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.b0))), else => unreachable, }; } @@ -133,13 +133,13 @@ pub const Register = enum(u8) { /// Convert from a general-purpose register to its 64 bit alias. pub fn toX(self: Register) Register { return switch (@intFromEnum(self)) { - @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @enumFromInt( + @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.x0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.x0)), ), - @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @enumFromInt( + @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.x0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.x0)), ), else => unreachable, }; @@ -148,13 +148,13 @@ pub const Register = enum(u8) { /// Convert from a general-purpose register to its 32 bit alias. pub fn toW(self: Register) Register { return switch (@intFromEnum(self)) { - @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @enumFromInt( + @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.w0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.w0)), ), - @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @enumFromInt( + @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.w0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.w0)), ), else => unreachable, }; @@ -163,25 +163,25 @@ pub const Register = enum(u8) { /// Convert from a floating-point register to its 128 bit alias. pub fn toQ(self: Register) Register { return switch (@intFromEnum(self)) { - @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt( + @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.q0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.q0)), ), - @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt( + @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.q0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.q0)), ), - @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt( + @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.q0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.q0)), ), - @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt( + @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.q0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.q0)), ), - @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt( + @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.q0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.q0)), ), else => unreachable, }; @@ -190,25 +190,25 @@ pub const Register = enum(u8) { /// Convert from a floating-point register to its 64 bit alias. pub fn toD(self: Register) Register { return switch (@intFromEnum(self)) { - @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt( + @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.d0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.d0)), ), - @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt( + @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.d0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.d0)), ), - @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt( + @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.d0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.d0)), ), - @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt( + @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.d0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.d0)), ), - @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt( + @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.d0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.d0)), ), else => unreachable, }; @@ -217,25 +217,25 @@ pub const Register = enum(u8) { /// Convert from a floating-point register to its 32 bit alias. pub fn toS(self: Register) Register { return switch (@intFromEnum(self)) { - @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt( + @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.s0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.s0)), ), - @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt( + @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.s0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.s0)), ), - @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt( + @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.s0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.s0)), ), - @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt( + @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.s0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.s0)), ), - @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt( + @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.s0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.s0)), ), else => unreachable, }; @@ -244,25 +244,25 @@ pub const Register = enum(u8) { /// Convert from a floating-point register to its 16 bit alias. pub fn toH(self: Register) Register { return switch (@intFromEnum(self)) { - @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt( + @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.h0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.h0)), ), - @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt( + @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.h0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.h0)), ), - @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt( + @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.h0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.h0)), ), - @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt( + @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.h0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.h0)), ), - @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt( + @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.h0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.h0)), ), else => unreachable, }; @@ -271,25 +271,25 @@ pub const Register = enum(u8) { /// Convert from a floating-point register to its 8 bit alias. pub fn toB(self: Register) Register { return switch (@intFromEnum(self)) { - @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt( + @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.b0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.b0)), ), - @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt( + @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.b0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.b0)), ), - @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt( + @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.b0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.b0)), ), - @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt( + @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.b0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.b0)), ), - @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt( + @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.b0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.b0)), ), else => unreachable, }; @@ -612,27 +612,27 @@ pub const Instruction = union(enum) { pub fn toU32(self: Instruction) u32 { return switch (self) { - .move_wide_immediate => |v| @bitCast(u32, v), - .pc_relative_address => |v| @bitCast(u32, v), - .load_store_register => |v| @bitCast(u32, v), - .load_store_register_pair => |v| @bitCast(u32, v), - .load_literal => |v| @bitCast(u32, v), - .exception_generation => |v| @bitCast(u32, v), - .unconditional_branch_register => |v| @bitCast(u32, v), - .unconditional_branch_immediate => |v| @bitCast(u32, v), - .no_operation => |v| @bitCast(u32, v), - .logical_shifted_register => |v| @bitCast(u32, v), - .add_subtract_immediate => |v| @bitCast(u32, v), - .logical_immediate => |v| @bitCast(u32, v), - .bitfield => |v| @bitCast(u32, v), - .add_subtract_shifted_register => |v| @bitCast(u32, v), - .add_subtract_extended_register => |v| @bitCast(u32, v), + .move_wide_immediate => |v| @as(u32, @bitCast(v)), + .pc_relative_address => |v| @as(u32, @bitCast(v)), + .load_store_register => |v| @as(u32, @bitCast(v)), + .load_store_register_pair => |v| @as(u32, @bitCast(v)), + .load_literal => |v| @as(u32, @bitCast(v)), + .exception_generation => |v| @as(u32, @bitCast(v)), + .unconditional_branch_register => |v| @as(u32, @bitCast(v)), + .unconditional_branch_immediate => |v| @as(u32, @bitCast(v)), + .no_operation => |v| @as(u32, @bitCast(v)), + .logical_shifted_register => |v| @as(u32, @bitCast(v)), + .add_subtract_immediate => |v| @as(u32, @bitCast(v)), + .logical_immediate => |v| @as(u32, @bitCast(v)), + .bitfield => |v| @as(u32, @bitCast(v)), + .add_subtract_shifted_register => |v| @as(u32, @bitCast(v)), + .add_subtract_extended_register => |v| @as(u32, @bitCast(v)), // TODO once packed structs work, this can be refactored .conditional_branch => |v| @as(u32, v.cond) | (@as(u32, v.o0) << 4) | (@as(u32, v.imm19) << 5) | (@as(u32, v.o1) << 24) | (@as(u32, v.fixed) << 25), .compare_and_branch => |v| @as(u32, v.rt) | (@as(u32, v.imm19) << 5) | (@as(u32, v.op) << 24) | (@as(u32, v.fixed) << 25) | (@as(u32, v.sf) << 31), .conditional_select => |v| @as(u32, v.rd) | @as(u32, v.rn) << 5 | @as(u32, v.op2) << 10 | @as(u32, v.cond) << 12 | @as(u32, v.rm) << 16 | @as(u32, v.fixed) << 21 | @as(u32, v.s) << 29 | @as(u32, v.op) << 30 | @as(u32, v.sf) << 31, - .data_processing_3_source => |v| @bitCast(u32, v), - .data_processing_2_source => |v| @bitCast(u32, v), + .data_processing_3_source => |v| @as(u32, @bitCast(v)), + .data_processing_2_source => |v| @as(u32, @bitCast(v)), }; } @@ -650,7 +650,7 @@ pub const Instruction = union(enum) { .move_wide_immediate = .{ .rd = rd.enc(), .imm16 = imm16, - .hw = @intCast(u2, shift / 16), + .hw = @as(u2, @intCast(shift / 16)), .opc = opc, .sf = switch (rd.size()) { 32 => 0, @@ -663,12 +663,12 @@ pub const Instruction = union(enum) { fn pcRelativeAddress(rd: Register, imm21: i21, op: u1) Instruction { assert(rd.size() == 64); - const imm21_u = @bitCast(u21, imm21); + const imm21_u = @as(u21, @bitCast(imm21)); return Instruction{ .pc_relative_address = .{ .rd = rd.enc(), - .immlo = @truncate(u2, imm21_u), - .immhi = @truncate(u19, imm21_u >> 2), + .immlo = @as(u2, @truncate(imm21_u)), + .immhi = @as(u19, @truncate(imm21_u >> 2)), .op = op, }, }; @@ -704,15 +704,15 @@ pub const Instruction = union(enum) { pub fn toU12(self: LoadStoreOffset) u12 { return switch (self) { .immediate => |imm_type| switch (imm_type) { - .post_index => |v| (@intCast(u12, @bitCast(u9, v)) << 2) + 1, - .pre_index => |v| (@intCast(u12, @bitCast(u9, v)) << 2) + 3, + .post_index => |v| (@as(u12, @intCast(@as(u9, @bitCast(v)))) << 2) + 1, + .pre_index => |v| (@as(u12, @intCast(@as(u9, @bitCast(v)))) << 2) + 3, .unsigned => |v| v, }, .register => |r| switch (r.shift) { - .uxtw => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 16 + 2050, - .lsl => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 24 + 2050, - .sxtw => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 48 + 2050, - .sxtx => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 56 + 2050, + .uxtw => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 16 + 2050, + .lsl => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 24 + 2050, + .sxtw => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 48 + 2050, + .sxtx => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 56 + 2050, }, }; } @@ -894,7 +894,7 @@ pub const Instruction = union(enum) { switch (rt1.size()) { 32 => { assert(-256 <= offset and offset <= 252); - const imm7 = @truncate(u7, @bitCast(u9, offset >> 2)); + const imm7 = @as(u7, @truncate(@as(u9, @bitCast(offset >> 2)))); return Instruction{ .load_store_register_pair = .{ .rt1 = rt1.enc(), @@ -909,7 +909,7 @@ pub const Instruction = union(enum) { }, 64 => { assert(-512 <= offset and offset <= 504); - const imm7 = @truncate(u7, @bitCast(u9, offset >> 3)); + const imm7 = @as(u7, @truncate(@as(u9, @bitCast(offset >> 3)))); return Instruction{ .load_store_register_pair = .{ .rt1 = rt1.enc(), @@ -982,7 +982,7 @@ pub const Instruction = union(enum) { ) Instruction { return Instruction{ .unconditional_branch_immediate = .{ - .imm26 = @bitCast(u26, @intCast(i26, offset >> 2)), + .imm26 = @as(u26, @bitCast(@as(i26, @intCast(offset >> 2)))), .op = op, }, }; @@ -1188,7 +1188,7 @@ pub const Instruction = union(enum) { .conditional_branch = .{ .cond = @intFromEnum(cond), .o0 = o0, - .imm19 = @bitCast(u19, @intCast(i19, offset >> 2)), + .imm19 = @as(u19, @bitCast(@as(i19, @intCast(offset >> 2)))), .o1 = o1, }, }; @@ -1204,7 +1204,7 @@ pub const Instruction = union(enum) { return Instruction{ .compare_and_branch = .{ .rt = rt.enc(), - .imm19 = @bitCast(u19, @intCast(i19, offset >> 2)), + .imm19 = @as(u19, @bitCast(@as(i19, @intCast(offset >> 2)))), .op = op, .sf = switch (rt.size()) { 32 => 0b0, @@ -1609,12 +1609,12 @@ pub const Instruction = union(enum) { } pub fn asrImmediate(rd: Register, rn: Register, shift: u6) Instruction { - const imms = @intCast(u6, rd.size() - 1); + const imms = @as(u6, @intCast(rd.size() - 1)); return sbfm(rd, rn, shift, imms); } pub fn sbfx(rd: Register, rn: Register, lsb: u6, width: u7) Instruction { - return sbfm(rd, rn, lsb, @intCast(u6, lsb + width - 1)); + return sbfm(rd, rn, lsb, @as(u6, @intCast(lsb + width - 1))); } pub fn sxtb(rd: Register, rn: Register) Instruction { @@ -1631,17 +1631,17 @@ pub const Instruction = union(enum) { } pub fn lslImmediate(rd: Register, rn: Register, shift: u6) Instruction { - const size = @intCast(u6, rd.size() - 1); + const size = @as(u6, @intCast(rd.size() - 1)); return ubfm(rd, rn, size - shift + 1, size - shift); } pub fn lsrImmediate(rd: Register, rn: Register, shift: u6) Instruction { - const imms = @intCast(u6, rd.size() - 1); + const imms = @as(u6, @intCast(rd.size() - 1)); return ubfm(rd, rn, shift, imms); } pub fn ubfx(rd: Register, rn: Register, lsb: u6, width: u7) Instruction { - return ubfm(rd, rn, lsb, @intCast(u6, lsb + width - 1)); + return ubfm(rd, rn, lsb, @as(u6, @intCast(lsb + width - 1))); } pub fn uxtb(rd: Register, rn: Register) Instruction { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 7ece4ba2e3..885a07ec6e 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -266,8 +266,8 @@ const DbgInfoReloc = struct { .stack_argument_offset, => blk: { const adjusted_stack_offset = switch (reloc.mcv) { - .stack_offset => |offset| -@intCast(i32, offset), - .stack_argument_offset => |offset| @intCast(i32, function.saved_regs_stack_space + offset), + .stack_offset => |offset| -@as(i32, @intCast(offset)), + .stack_argument_offset => |offset| @as(i32, @intCast(function.saved_regs_stack_space + offset)), else => unreachable, }; break :blk .{ .stack = .{ @@ -303,8 +303,8 @@ const DbgInfoReloc = struct { const adjusted_offset = switch (reloc.mcv) { .ptr_stack_offset, .stack_offset, - => -@intCast(i32, offset), - .stack_argument_offset => @intCast(i32, function.saved_regs_stack_space + offset), + => -@as(i32, @intCast(offset)), + .stack_argument_offset => @as(i32, @intCast(function.saved_regs_stack_space + offset)), else => unreachable, }; break :blk .{ .stack = .{ @@ -446,7 +446,7 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { try self.mir_instructions.ensureUnusedCapacity(gpa, 1); - const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len); + const result_index = @as(Air.Inst.Index, @intCast(self.mir_instructions.len)); self.mir_instructions.appendAssumeCapacity(inst); return result_index; } @@ -466,11 +466,11 @@ pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 { pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result = @intCast(u32, self.mir_extra.items.len); + const result = @as(u32, @intCast(self.mir_extra.items.len)); inline for (fields) |field| { self.mir_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), - i32 => @bitCast(u32, @field(extra, field.name)), + i32 => @as(u32, @bitCast(@field(extra, field.name))), else => @compileError("bad field type"), }); } @@ -522,7 +522,7 @@ fn gen(self: *Self) !void { const ty = self.typeOfIndex(inst); - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); const abi_align = ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); @@ -588,7 +588,7 @@ fn gen(self: *Self) !void { for (self.exitlude_jump_relocs.items) |jmp_reloc| { self.mir_instructions.set(jmp_reloc, .{ .tag = .b, - .data = .{ .inst = @intCast(u32, self.mir_instructions.len) }, + .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len)) }, }); } @@ -934,15 +934,15 @@ fn finishAirBookkeeping(self: *Self) void { fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void { var tomb_bits = self.liveness.getTombBits(inst); for (operands) |op| { - const dies = @truncate(u1, tomb_bits) != 0; + const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; const op_int = @intFromEnum(op); if (op_int < Air.ref_start_index) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); + const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index)); self.processDeath(op_index); } - const is_used = @truncate(u1, tomb_bits) == 0; + const is_used = @as(u1, @truncate(tomb_bits)) == 0; if (is_used) { log.debug("%{d} => {}", .{ inst, result }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -1201,7 +1201,7 @@ fn truncRegister( .rd = dest_reg, .rn = operand_reg, .lsb = 0, - .width = @intCast(u6, int_bits), + .width = @as(u6, @intCast(int_bits)), } }, }); } @@ -1591,9 +1591,9 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod))); const tuple_align = tuple_ty.abiAlignment(mod); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod))); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), @@ -1704,9 +1704,9 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod))); const tuple_align = tuple_ty.abiAlignment(mod); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod))); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), @@ -1866,9 +1866,9 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod))); const tuple_align = tuple_ty.abiAlignment(mod); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod))); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}), @@ -1915,7 +1915,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .data = .{ .rr_shift = .{ .rd = dest_reg, .rm = lhs_reg, - .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_mcv.immediate)), + .shift_amount = Instruction.ShiftAmount.imm(@as(u5, @intCast(rhs_mcv.immediate))), } }, }); @@ -1927,7 +1927,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .data = .{ .rr_shift = .{ .rd = reconstructed_reg, .rm = dest_reg, - .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_mcv.immediate)), + .shift_amount = Instruction.ShiftAmount.imm(@as(u5, @intCast(rhs_mcv.immediate))), } }, }); } else { @@ -2020,7 +2020,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const optional_ty = self.typeOfIndex(inst); - const abi_size = @intCast(u32, optional_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(optional_ty.abiSize(mod))); // Optional with a zero-bit payload type is just a boolean true if (abi_size == 1) { @@ -2049,7 +2049,7 @@ fn errUnionErr( return try error_union_bind.resolveToMcv(self); } - const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod)); + const err_offset = @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod))); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -2071,15 +2071,15 @@ fn errUnionErr( ); const err_bit_offset = err_offset * 8; - const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8; + const err_bit_size = @as(u32, @intCast(err_ty.abiSize(mod))) * 8; _ = try self.addInst(.{ .tag = .ubfx, // errors are unsigned integers .data = .{ .rr_lsb_width = .{ .rd = dest_reg, .rn = operand_reg, - .lsb = @intCast(u5, err_bit_offset), - .width = @intCast(u6, err_bit_size), + .lsb = @as(u5, @intCast(err_bit_offset)), + .width = @as(u6, @intCast(err_bit_size)), } }, }); @@ -2126,7 +2126,7 @@ fn errUnionPayload( return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); + const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod))); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -2148,15 +2148,15 @@ fn errUnionPayload( ); const payload_bit_offset = payload_offset * 8; - const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8; + const payload_bit_size = @as(u32, @intCast(payload_ty.abiSize(mod))) * 8; _ = try self.addInst(.{ .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ .rd = dest_reg, .rn = operand_reg, - .lsb = @intCast(u5, payload_bit_offset), - .width = @intCast(u6, payload_bit_size), + .lsb = @as(u5, @intCast(payload_bit_offset)), + .width = @as(u6, @intCast(payload_bit_size)), } }, }); @@ -2235,13 +2235,13 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod))); const abi_align = error_union_ty.abiAlignment(mod); - const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst)); + const stack_offset = @as(u32, @intCast(try self.allocMem(abi_size, abi_align, inst))); const payload_off = errUnionPayloadOffset(payload_ty, mod); const err_off = errUnionErrorOffset(payload_ty, mod); - try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand); - try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 }); + try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), operand); + try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), .{ .immediate = 0 }); break :result MCValue{ .stack_offset = stack_offset }; }; @@ -2259,13 +2259,13 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod))); const abi_align = error_union_ty.abiAlignment(mod); - const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst)); + const stack_offset = @as(u32, @intCast(try self.allocMem(abi_size, abi_align, inst))); const payload_off = errUnionPayloadOffset(payload_ty, mod); const err_off = errUnionErrorOffset(payload_ty, mod); - try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand); - try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef); + try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), operand); + try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), .undef); break :result MCValue{ .stack_offset = stack_offset }; }; @@ -2369,7 +2369,7 @@ fn ptrElemVal( ) !MCValue { const mod = self.bin_file.options.module.?; const elem_ty = ptr_ty.childType(mod); - const elem_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod))); switch (elem_size) { 1, 4 => { @@ -2480,7 +2480,7 @@ fn arrayElemVal( => { const ptr_to_mcv = switch (mcv) { .stack_offset => |off| MCValue{ .ptr_stack_offset = off }, - .memory => |addr| MCValue{ .immediate = @intCast(u32, addr) }, + .memory => |addr| MCValue{ .immediate = @as(u32, @intCast(addr)) }, .stack_argument_offset => |off| blk: { const reg = try self.register_manager.allocReg(null, gp); @@ -2654,7 +2654,7 @@ fn reuseOperand( fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { const mod = self.bin_file.options.module.?; const elem_ty = ptr_ty.childType(mod); - const elem_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod))); switch (ptr) { .none => unreachable, @@ -2759,7 +2759,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { const mod = self.bin_file.options.module.?; - const elem_size = @intCast(u32, value_ty.abiSize(mod)); + const elem_size = @as(u32, @intCast(value_ty.abiSize(mod))); switch (ptr) { .none => unreachable, @@ -2814,7 +2814,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type // sub src_reg, fp, #off try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off }); }, - .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }), + .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }), .stack_argument_offset => |off| { _ = try self.addInst(.{ .tag = .ldr_ptr_stack_argument, @@ -2882,7 +2882,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(mod); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -2906,7 +2906,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); const struct_field_ty = struct_ty.structFieldType(index, mod); switch (mcv) { @@ -2970,15 +2970,15 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { ); const field_bit_offset = struct_field_offset * 8; - const field_bit_size = @intCast(u32, struct_field_ty.abiSize(mod)) * 8; + const field_bit_size = @as(u32, @intCast(struct_field_ty.abiSize(mod))) * 8; _ = try self.addInst(.{ .tag = if (struct_field_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ .rd = dest_reg, .rn = operand_reg, - .lsb = @intCast(u5, field_bit_offset), - .width = @intCast(u6, field_bit_size), + .lsb = @as(u5, @intCast(field_bit_offset)), + .width = @as(u6, @intCast(field_bit_size)), } }, }); @@ -3003,7 +3003,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO implement @fieldParentPtr codegen for unions", .{}); } - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod)); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(extra.field_index, mod))); switch (field_ptr) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off + struct_field_offset }; @@ -3364,7 +3364,7 @@ fn binOpImmediate( => .{ .rr_shift = .{ .rd = dest_reg, .rm = lhs_reg, - .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_immediate)), + .shift_amount = Instruction.ShiftAmount.imm(@as(u5, @intCast(rhs_immediate))), } }, else => unreachable, }; @@ -3895,7 +3895,7 @@ fn ptrArithmetic( .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type else => ptr_ty.childType(mod), }; - const elem_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod))); const base_tag: Air.Inst.Tag = switch (tag) { .ptr_add => .add, @@ -4022,7 +4022,7 @@ fn genInlineMemcpy( _ = try self.addInst(.{ .tag = .b, .cond = .ge, - .data = .{ .inst = @intCast(u32, self.mir_instructions.len + 5) }, + .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len + 5)) }, }); // ldrb tmp, [src, count] @@ -4058,7 +4058,7 @@ fn genInlineMemcpy( // b loop _ = try self.addInst(.{ .tag = .b, - .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 5) }, + .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len - 5)) }, }); // end: @@ -4126,7 +4126,7 @@ fn genInlineMemsetCode( _ = try self.addInst(.{ .tag = .b, .cond = .ge, - .data = .{ .inst = @intCast(u32, self.mir_instructions.len + 4) }, + .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len + 4)) }, }); // strb val, [src, count] @@ -4152,7 +4152,7 @@ fn genInlineMemsetCode( // b loop _ = try self.addInst(.{ .tag = .b, - .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 4) }, + .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len - 4)) }, }); // end: @@ -4216,7 +4216,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const pl_op = self.air.instructions.items(.data)[inst].pl_op; const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len])); const ty = self.typeOf(callee); const mod = self.bin_file.options.module.?; @@ -4248,8 +4248,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: { log.debug("airCall: return by reference", .{}); const ret_ty = fn_ty.fnReturnType(mod); - const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); - const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); + const ret_abi_size = @as(u32, @intCast(ret_ty.abiSize(mod))); + const ret_abi_align = @as(u32, @intCast(ret_ty.abiAlignment(mod))); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); const ptr_ty = try mod.singleMutPtrType(ret_ty); @@ -4294,7 +4294,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); - const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); + const got_addr = @as(u32, @intCast(atom.getOffsetTableAddress(elf_file))); try self.genSetReg(Type.usize, .lr, .{ .memory = got_addr }); } else if (self.bin_file.cast(link.File.MachO)) |_| { unreachable; // unsupported architecture for MachO @@ -4425,7 +4425,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ret_ty.abiSize(mod))); const abi_align = ret_ty.abiAlignment(mod); const offset = try self.allocMem(abi_size, abi_align, null); @@ -4651,7 +4651,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.operandDies(inst, 0)) { const op_int = @intFromEnum(pl_op.operand); if (op_int >= Air.ref_start_index) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); + const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index)); self.processDeath(op_index); } } @@ -4956,7 +4956,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; - const start_index = @intCast(Mir.Inst.Index, self.mir_instructions.len); + const start_index = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)); try self.genBody(body); try self.jump(start_index); @@ -5021,7 +5021,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { var case_i: u32 = 0; while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]); + const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len])); assert(items.len > 0); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + items.len + case_body.len; @@ -5139,7 +5139,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { const tag = self.mir_instructions.items(.tag)[inst]; switch (tag) { - .b => self.mir_instructions.items(.data)[inst].inst = @intCast(Air.Inst.Index, self.mir_instructions.len), + .b => self.mir_instructions.items(.data)[inst].inst = @as(Air.Inst.Index, @intCast(self.mir_instructions.len)), else => unreachable, } } @@ -5188,12 +5188,12 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void { fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); - const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; - const clobbers_len = @truncate(u31, extra.data.flags); + const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; + const clobbers_len = @as(u31, @truncate(extra.data.flags)); var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len])); extra_i += inputs.len; const dead = !is_volatile and self.liveness.isUnused(inst); @@ -5323,7 +5323,7 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5376,7 +5376,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro }, 2 => { const offset = if (stack_offset <= math.maxInt(u8)) blk: { - break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset)); + break :blk Instruction.ExtraLoadStoreOffset.imm(@as(u8, @intCast(stack_offset))); } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset })); _ = try self.addInst(.{ @@ -5404,7 +5404,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg }); const overflow_bit_ty = ty.structFieldType(1, mod); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, mod))); const cond_reg = try self.register_manager.allocReg(null, gp); // C flag: movcs reg, #1 @@ -5457,7 +5457,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro // sub src_reg, fp, #off try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off }); }, - .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }), + .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }), .stack_argument_offset => |off| { _ = try self.addInst(.{ .tag = .ldr_ptr_stack_argument, @@ -5554,7 +5554,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .tag = .movw, .data = .{ .r_imm16 = .{ .rd = reg, - .imm16 = @intCast(u16, x), + .imm16 = @as(u16, @intCast(x)), } }, }); } else { @@ -5562,7 +5562,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .tag = .mov, .data = .{ .r_op_mov = .{ .rd = reg, - .op = Instruction.Operand.imm(@truncate(u8, x), 0), + .op = Instruction.Operand.imm(@as(u8, @truncate(x)), 0), } }, }); _ = try self.addInst(.{ @@ -5570,7 +5570,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .data = .{ .rr_op = .{ .rd = reg, .rn = reg, - .op = Instruction.Operand.imm(@truncate(u8, x >> 8), 12), + .op = Instruction.Operand.imm(@as(u8, @truncate(x >> 8)), 12), } }, }); } @@ -5585,14 +5585,14 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .tag = .movw, .data = .{ .r_imm16 = .{ .rd = reg, - .imm16 = @truncate(u16, x), + .imm16 = @as(u16, @truncate(x)), } }, }); _ = try self.addInst(.{ .tag = .movt, .data = .{ .r_imm16 = .{ .rd = reg, - .imm16 = @truncate(u16, x >> 16), + .imm16 = @as(u16, @truncate(x >> 16)), } }, }); } else { @@ -5605,7 +5605,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .tag = .mov, .data = .{ .r_op_mov = .{ .rd = reg, - .op = Instruction.Operand.imm(@truncate(u8, x), 0), + .op = Instruction.Operand.imm(@as(u8, @truncate(x)), 0), } }, }); _ = try self.addInst(.{ @@ -5613,7 +5613,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .data = .{ .rr_op = .{ .rd = reg, .rn = reg, - .op = Instruction.Operand.imm(@truncate(u8, x >> 8), 12), + .op = Instruction.Operand.imm(@as(u8, @truncate(x >> 8)), 12), } }, }); _ = try self.addInst(.{ @@ -5621,7 +5621,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .data = .{ .rr_op = .{ .rd = reg, .rn = reg, - .op = Instruction.Operand.imm(@truncate(u8, x >> 16), 8), + .op = Instruction.Operand.imm(@as(u8, @truncate(x >> 16)), 8), } }, }); _ = try self.addInst(.{ @@ -5629,7 +5629,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .data = .{ .rr_op = .{ .rd = reg, .rn = reg, - .op = Instruction.Operand.imm(@truncate(u8, x >> 24), 4), + .op = Instruction.Operand.imm(@as(u8, @truncate(x >> 24)), 4), } }, }); } @@ -5654,12 +5654,12 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .memory => |addr| { // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. - try self.genSetReg(ty, reg, .{ .immediate = @intCast(u32, addr) }); + try self.genSetReg(ty, reg, .{ .immediate = @as(u32, @intCast(addr)) }); try self.genLdrRegister(reg, reg, ty); }, .stack_offset => |off| { // TODO: maybe addressing from sp instead of fp - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb, @@ -5677,7 +5677,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void if (extra_offset) { const offset = if (off <= math.maxInt(u8)) blk: { - break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, off)); + break :blk Instruction.ExtraLoadStoreOffset.imm(@as(u8, @intCast(off))); } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.usize, MCValue{ .immediate = off })); _ = try self.addInst(.{ @@ -5693,7 +5693,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void }); } else { const offset = if (off <= math.maxInt(u12)) blk: { - break :blk Instruction.Offset.imm(@intCast(u12, off)); + break :blk Instruction.Offset.imm(@as(u12, @intCast(off))); } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.usize, MCValue{ .immediate = off }), .none); _ = try self.addInst(.{ @@ -5732,7 +5732,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); switch (mcv) { .dead => unreachable, .none, .unreach => return, @@ -5771,7 +5771,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I }, 2 => { const offset = if (stack_offset <= math.maxInt(u8)) blk: { - break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset)); + break :blk Instruction.ExtraLoadStoreOffset.imm(@as(u8, @intCast(stack_offset))); } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset })); _ = try self.addInst(.{ @@ -5814,7 +5814,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I // sub src_reg, fp, #off try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off }); }, - .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }), + .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }), .stack_argument_offset => |off| { _ = try self.addInst(.{ .tag = .ldr_ptr_stack_argument, @@ -5893,7 +5893,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(mod); - const array_len = @intCast(u32, array_ty.arrayLen(mod)); + const array_len = @as(u32, @intCast(array_ty.arrayLen(mod))); const stack_offset = try self.allocMem(8, 8, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); @@ -6010,7 +6010,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); + const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); const result: MCValue = res: { if (self.liveness.isUnused(inst)) break :res MCValue.dead; return self.fail("TODO implement airAggregateInit for arm", .{}); @@ -6058,7 +6058,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; const error_union_ty = self.typeOf(pl_op.operand); const mod = self.bin_file.options.module.?; - const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); + const error_union_size = @as(u32, @intCast(error_union_ty.abiSize(mod))); const error_union_align = error_union_ty.abiAlignment(mod); // The error union will die in the body. However, we need the @@ -6141,7 +6141,7 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { .none => .none, .undef => .undef, .load_got, .load_direct, .load_tlv => unreachable, // TODO - .immediate => |imm| .{ .immediate = @truncate(u32, imm) }, + .immediate => |imm| .{ .immediate = @as(u32, @truncate(imm)) }, .memory => |addr| .{ .memory = addr }, }, .fail => |msg| { @@ -6198,7 +6198,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod))); // TODO handle cases where multiple registers are used if (ret_ty_size <= 4) { result.return_value = .{ .register = c_abi_int_return_regs[0] }; @@ -6216,7 +6216,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { if (ty.toType().abiAlignment(mod) == 8) ncrn = std.mem.alignForward(usize, ncrn, 2); - const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) { if (param_size <= 4) { result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] }; @@ -6245,7 +6245,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod))); if (ret_ty_size == 0) { assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; @@ -6264,7 +6264,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { for (fn_info.param_types, 0..) |ty, i| { if (ty.toType().abiSize(mod) > 0) { - const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); const param_alignment = ty.toType().abiAlignment(mod); stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment); diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig index 17415318de..54062d00a7 100644 --- a/src/arch/arm/Emit.zig +++ b/src/arch/arm/Emit.zig @@ -78,7 +78,7 @@ pub fn emitMir( // Emit machine code for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); switch (tag) { .add => try emit.mirDataProcessing(inst), .adds => try emit.mirDataProcessing(inst), @@ -241,7 +241,7 @@ fn lowerBranches(emit: *Emit) !void { // TODO optimization opportunity: do this in codegen while // generating MIR for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); if (isBranch(tag)) { const target_inst = emit.branchTarget(inst); @@ -286,7 +286,7 @@ fn lowerBranches(emit: *Emit) !void { var current_code_offset: usize = 0; for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); // If this instruction contained in the code offset // mapping (when it is a target of a branch or if it is a @@ -301,7 +301,7 @@ fn lowerBranches(emit: *Emit) !void { const target_inst = emit.branchTarget(inst); if (target_inst < inst) { const target_offset = emit.code_offset_mapping.get(target_inst).?; - const offset = @intCast(i64, target_offset) - @intCast(i64, current_code_offset + 8); + const offset = @as(i64, @intCast(target_offset)) - @as(i64, @intCast(current_code_offset + 8)); const branch_type = emit.branch_types.getPtr(inst).?; const optimal_branch_type = try emit.optimalBranchType(tag, offset); if (branch_type.* != optimal_branch_type) { @@ -320,7 +320,7 @@ fn lowerBranches(emit: *Emit) !void { for (origin_list.items) |forward_branch_inst| { const branch_tag = emit.mir.instructions.items(.tag)[forward_branch_inst]; const forward_branch_inst_offset = emit.code_offset_mapping.get(forward_branch_inst).?; - const offset = @intCast(i64, current_code_offset) - @intCast(i64, forward_branch_inst_offset + 8); + const offset = @as(i64, @intCast(current_code_offset)) - @as(i64, @intCast(forward_branch_inst_offset + 8)); const branch_type = emit.branch_types.getPtr(forward_branch_inst).?; const optimal_branch_type = try emit.optimalBranchType(branch_tag, offset); if (branch_type.* != optimal_branch_type) { @@ -351,7 +351,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { } fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { - const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line); + const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(self.prev_di_line)); const delta_pc: usize = self.code.items.len - self.prev_di_pc; switch (self.debug_output) { .dwarf => |dw| { @@ -368,13 +368,13 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { // increasing the line number try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line); // increasing the pc - const d_pc_p9 = @intCast(i64, delta_pc) - quant; + const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant; if (d_pc_p9 > 0) { // minus one because if its the last one, we want to leave space to change the line which is one quanta - try dbg_out.dbg_line.append(@intCast(u8, @divExact(d_pc_p9, quant) + 128) - quant); + try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, quant) + 128)) - quant); if (dbg_out.pcop_change_index.*) |pci| dbg_out.dbg_line.items[pci] += 1; - dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1); + dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1)); } else if (d_pc_p9 == 0) { // we don't need to do anything, because adding the quant does it for us } else unreachable; @@ -448,13 +448,13 @@ fn mirSubStackPointer(emit: *Emit, inst: Mir.Inst.Index) !void { const scratch: Register = .r4; if (Target.arm.featureSetHas(emit.target.cpu.features, .has_v7)) { - try emit.writeInstruction(Instruction.movw(cond, scratch, @truncate(u16, imm32))); - try emit.writeInstruction(Instruction.movt(cond, scratch, @truncate(u16, imm32 >> 16))); + try emit.writeInstruction(Instruction.movw(cond, scratch, @as(u16, @truncate(imm32)))); + try emit.writeInstruction(Instruction.movt(cond, scratch, @as(u16, @truncate(imm32 >> 16)))); } else { - try emit.writeInstruction(Instruction.mov(cond, scratch, Instruction.Operand.imm(@truncate(u8, imm32), 0))); - try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 8), 12))); - try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 16), 8))); - try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 24), 4))); + try emit.writeInstruction(Instruction.mov(cond, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32)), 0))); + try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32 >> 8)), 12))); + try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32 >> 16)), 8))); + try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32 >> 24)), 4))); } break :blk Instruction.Operand.reg(scratch, Instruction.Operand.Shift.none); @@ -484,12 +484,12 @@ fn mirBranch(emit: *Emit, inst: Mir.Inst.Index) !void { const cond = emit.mir.instructions.items(.cond)[inst]; const target_inst = emit.mir.instructions.items(.data)[inst].inst; - const offset = @intCast(i64, emit.code_offset_mapping.get(target_inst).?) - @intCast(i64, emit.code.items.len + 8); + const offset = @as(i64, @intCast(emit.code_offset_mapping.get(target_inst).?)) - @as(i64, @intCast(emit.code.items.len + 8)); const branch_type = emit.branch_types.get(inst).?; switch (branch_type) { .b => switch (tag) { - .b => try emit.writeInstruction(Instruction.b(cond, @intCast(i26, offset))), + .b => try emit.writeInstruction(Instruction.b(cond, @as(i26, @intCast(offset)))), else => unreachable, }, } @@ -585,7 +585,7 @@ fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void { .ldrb_stack_argument, => { const offset = if (raw_offset <= math.maxInt(u12)) blk: { - break :blk Instruction.Offset.imm(@intCast(u12, raw_offset)); + break :blk Instruction.Offset.imm(@as(u12, @intCast(raw_offset))); } else return emit.fail("TODO mirLoadStack larger offsets", .{}); switch (tag) { @@ -599,7 +599,7 @@ fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void { .ldrsh_stack_argument, => { const offset = if (raw_offset <= math.maxInt(u8)) blk: { - break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, raw_offset)); + break :blk Instruction.ExtraLoadStoreOffset.imm(@as(u8, @intCast(raw_offset))); } else return emit.fail("TODO mirLoadStack larger offsets", .{}); switch (tag) { diff --git a/src/arch/arm/Mir.zig b/src/arch/arm/Mir.zig index 736d0574bb..e890aaf29c 100644 --- a/src/arch/arm/Mir.zig +++ b/src/arch/arm/Mir.zig @@ -287,7 +287,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end inline for (fields) |field| { @field(result, field.name) = switch (field.type) { u32 => mir.extra[i], - i32 => @bitCast(i32, mir.extra[i]), + i32 => @as(i32, @bitCast(mir.extra[i])), else => @compileError("bad field type"), }; i += 1; diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index a4a4fe472b..2e1e26d220 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -13,7 +13,7 @@ pub const Class = union(enum) { i64_array: u8, fn arrSize(total_size: u64, arr_size: u64) Class { - const count = @intCast(u8, std.mem.alignForward(u64, total_size, arr_size) / arr_size); + const count = @as(u8, @intCast(std.mem.alignForward(u64, total_size, arr_size) / arr_size)); if (arr_size == 32) { return .{ .i32_array = count }; } else { diff --git a/src/arch/arm/bits.zig b/src/arch/arm/bits.zig index 1de40a7059..6c33f3e82a 100644 --- a/src/arch/arm/bits.zig +++ b/src/arch/arm/bits.zig @@ -159,7 +159,7 @@ pub const Register = enum(u5) { /// Returns the unique 4-bit ID of this register which is used in /// the machine code pub fn id(self: Register) u4 { - return @truncate(u4, @intFromEnum(self)); + return @as(u4, @truncate(@intFromEnum(self))); } pub fn dwarfLocOp(self: Register) u8 { @@ -399,8 +399,8 @@ pub const Instruction = union(enum) { pub fn toU8(self: Shift) u8 { return switch (self) { - .register => |v| @bitCast(u8, v), - .immediate => |v| @bitCast(u8, v), + .register => |v| @as(u8, @bitCast(v)), + .immediate => |v| @as(u8, @bitCast(v)), }; } @@ -425,8 +425,8 @@ pub const Instruction = union(enum) { pub fn toU12(self: Operand) u12 { return switch (self) { - .register => |v| @bitCast(u12, v), - .immediate => |v| @bitCast(u12, v), + .register => |v| @as(u12, @bitCast(v)), + .immediate => |v| @as(u12, @bitCast(v)), }; } @@ -463,8 +463,8 @@ pub const Instruction = union(enum) { if (x & mask == x) { break Operand{ .immediate = .{ - .imm = @intCast(u8, std.math.rotl(u32, x, 2 * i)), - .rotate = @intCast(u4, i), + .imm = @as(u8, @intCast(std.math.rotl(u32, x, 2 * i))), + .rotate = @as(u4, @intCast(i)), }, }; } @@ -522,7 +522,7 @@ pub const Instruction = union(enum) { pub fn toU12(self: Offset) u12 { return switch (self) { - .register => |v| @bitCast(u12, v), + .register => |v| @as(u12, @bitCast(v)), .immediate => |v| v, }; } @@ -604,20 +604,20 @@ pub const Instruction = union(enum) { pub fn toU32(self: Instruction) u32 { return switch (self) { - .data_processing => |v| @bitCast(u32, v), - .multiply => |v| @bitCast(u32, v), - .multiply_long => |v| @bitCast(u32, v), - .signed_multiply_halfwords => |v| @bitCast(u32, v), - .integer_saturating_arithmetic => |v| @bitCast(u32, v), - .bit_field_extract => |v| @bitCast(u32, v), - .single_data_transfer => |v| @bitCast(u32, v), - .extra_load_store => |v| @bitCast(u32, v), - .block_data_transfer => |v| @bitCast(u32, v), - .branch => |v| @bitCast(u32, v), - .branch_exchange => |v| @bitCast(u32, v), - .supervisor_call => |v| @bitCast(u32, v), + .data_processing => |v| @as(u32, @bitCast(v)), + .multiply => |v| @as(u32, @bitCast(v)), + .multiply_long => |v| @as(u32, @bitCast(v)), + .signed_multiply_halfwords => |v| @as(u32, @bitCast(v)), + .integer_saturating_arithmetic => |v| @as(u32, @bitCast(v)), + .bit_field_extract => |v| @as(u32, @bitCast(v)), + .single_data_transfer => |v| @as(u32, @bitCast(v)), + .extra_load_store => |v| @as(u32, @bitCast(v)), + .block_data_transfer => |v| @as(u32, @bitCast(v)), + .branch => |v| @as(u32, @bitCast(v)), + .branch_exchange => |v| @as(u32, @bitCast(v)), + .supervisor_call => |v| @as(u32, @bitCast(v)), .undefined_instruction => |v| v.imm32, - .breakpoint => |v| @intCast(u32, v.imm4) | (@intCast(u32, v.fixed_1) << 4) | (@intCast(u32, v.imm12) << 8) | (@intCast(u32, v.fixed_2_and_cond) << 20), + .breakpoint => |v| @as(u32, @intCast(v.imm4)) | (@as(u32, @intCast(v.fixed_1)) << 4) | (@as(u32, @intCast(v.imm12)) << 8) | (@as(u32, @intCast(v.fixed_2_and_cond)) << 20), }; } @@ -656,9 +656,9 @@ pub const Instruction = union(enum) { .i = 1, .opcode = if (top) 0b1010 else 0b1000, .s = 0, - .rn = @truncate(u4, imm >> 12), + .rn = @as(u4, @truncate(imm >> 12)), .rd = rd.id(), - .op2 = @truncate(u12, imm), + .op2 = @as(u12, @truncate(imm)), }, }; } @@ -760,7 +760,7 @@ pub const Instruction = union(enum) { .rn = rn.id(), .lsb = lsb, .rd = rd.id(), - .widthm1 = @intCast(u5, width - 1), + .widthm1 = @as(u5, @intCast(width - 1)), .unsigned = unsigned, .cond = @intFromEnum(cond), }, @@ -810,11 +810,11 @@ pub const Instruction = union(enum) { offset: ExtraLoadStoreOffset, ) Instruction { const imm4l: u4 = switch (offset) { - .immediate => |imm| @truncate(u4, imm), + .immediate => |imm| @as(u4, @truncate(imm)), .register => |reg| reg, }; const imm4h: u4 = switch (offset) { - .immediate => |imm| @truncate(u4, imm >> 4), + .immediate => |imm| @as(u4, @truncate(imm >> 4)), .register => 0b0000, }; @@ -853,7 +853,7 @@ pub const Instruction = union(enum) { ) Instruction { return Instruction{ .block_data_transfer = .{ - .register_list = @bitCast(u16, reg_list), + .register_list = @as(u16, @bitCast(reg_list)), .rn = rn.id(), .load_store = load_store, .write_back = @intFromBool(write_back), @@ -870,7 +870,7 @@ pub const Instruction = union(enum) { .branch = .{ .cond = @intFromEnum(cond), .link = link, - .offset = @bitCast(u24, @intCast(i24, offset >> 2)), + .offset = @as(u24, @bitCast(@as(i24, @intCast(offset >> 2)))), }, }; } @@ -904,8 +904,8 @@ pub const Instruction = union(enum) { fn breakpoint(imm: u16) Instruction { return Instruction{ .breakpoint = .{ - .imm12 = @truncate(u12, imm >> 4), - .imm4 = @truncate(u4, imm), + .imm12 = @as(u12, @truncate(imm >> 4)), + .imm4 = @as(u4, @truncate(imm)), }, }; } @@ -1319,7 +1319,7 @@ pub const Instruction = union(enum) { const reg = @as(Register, arg); register_list |= @as(u16, 1) << reg.id(); } - return ldm(cond, .sp, true, @bitCast(RegisterList, register_list)); + return ldm(cond, .sp, true, @as(RegisterList, @bitCast(register_list))); } } @@ -1343,7 +1343,7 @@ pub const Instruction = union(enum) { const reg = @as(Register, arg); register_list |= @as(u16, 1) << reg.id(); } - return stmdb(cond, .sp, true, @bitCast(RegisterList, register_list)); + return stmdb(cond, .sp, true, @as(RegisterList, @bitCast(register_list))); } } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index cba1de92c1..d6bb9f8200 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -323,7 +323,7 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { try self.mir_instructions.ensureUnusedCapacity(gpa, 1); - const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len); + const result_index = @as(Air.Inst.Index, @intCast(self.mir_instructions.len)); self.mir_instructions.appendAssumeCapacity(inst); return result_index; } @@ -336,11 +336,11 @@ pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 { pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result = @intCast(u32, self.mir_extra.items.len); + const result = @as(u32, @intCast(self.mir_extra.items.len)); inline for (fields) |field| { self.mir_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), - i32 => @bitCast(u32, @field(extra, field.name)), + i32 => @as(u32, @bitCast(@field(extra, field.name))), else => @compileError("bad field type"), }); } @@ -752,15 +752,15 @@ fn finishAirBookkeeping(self: *Self) void { fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void { var tomb_bits = self.liveness.getTombBits(inst); for (operands) |op| { - const dies = @truncate(u1, tomb_bits) != 0; + const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; const op_int = @intFromEnum(op); if (op_int < Air.ref_start_index) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); + const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index)); self.processDeath(op_index); } - const is_used = @truncate(u1, tomb_bits) == 0; + const is_used = @as(u1, @truncate(tomb_bits)) == 0; if (is_used) { log.debug("%{d} => {}", .{ inst, result }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -1709,7 +1709,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const fn_ty = self.typeOf(pl_op.operand); const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len])); var info = try self.resolveCallingConventionValues(fn_ty); defer info.deinit(self); @@ -1747,7 +1747,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); - const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); + const got_addr = @as(u32, @intCast(atom.getOffsetTableAddress(elf_file))); try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr }); _ = try self.addInst(.{ .tag = .jalr, @@ -2139,12 +2139,12 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void { fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); - const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; - const clobbers_len = @truncate(u31, extra.data.flags); + const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; + const clobbers_len = @as(u31, @truncate(extra.data.flags)); var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len])); extra_i += inputs.len; const dead = !is_volatile and self.liveness.isUnused(inst); @@ -2289,20 +2289,20 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); }, .immediate => |unsigned_x| { - const x = @bitCast(i64, unsigned_x); + const x = @as(i64, @bitCast(unsigned_x)); if (math.minInt(i12) <= x and x <= math.maxInt(i12)) { _ = try self.addInst(.{ .tag = .addi, .data = .{ .i_type = .{ .rd = reg, .rs1 = .zero, - .imm12 = @intCast(i12, x), + .imm12 = @as(i12, @intCast(x)), } }, }); } else if (math.minInt(i32) <= x and x <= math.maxInt(i32)) { - const lo12 = @truncate(i12, x); + const lo12 = @as(i12, @truncate(x)); const carry: i32 = if (lo12 < 0) 1 else 0; - const hi20 = @truncate(i20, (x >> 12) +% carry); + const hi20 = @as(i20, @truncate((x >> 12) +% carry)); // TODO: add test case for 32-bit immediate _ = try self.addInst(.{ @@ -2501,7 +2501,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); + const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); const result: MCValue = res: { if (self.liveness.isUnused(inst)) break :res MCValue.dead; return self.fail("TODO implement airAggregateInit for riscv64", .{}); @@ -2653,7 +2653,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 }; for (fn_info.param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; @@ -2690,7 +2690,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } else switch (cc) { .Naked => unreachable, .Unspecified, .C => { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod))); if (ret_ty_size <= 8) { result.return_value = .{ .register = .a0 }; } else if (ret_ty_size <= 16) { diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 3b330cbd3f..20f2c40ba4 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -39,7 +39,7 @@ pub fn emitMir( // Emit machine code for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); switch (tag) { .add => try emit.mirRType(inst), .sub => try emit.mirRType(inst), @@ -85,7 +85,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { } fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { - const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line); + const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(self.prev_di_line)); const delta_pc: usize = self.code.items.len - self.prev_di_pc; switch (self.debug_output) { .dwarf => |dw| { @@ -102,13 +102,13 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { // increasing the line number try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line); // increasing the pc - const d_pc_p9 = @intCast(i64, delta_pc) - quant; + const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant; if (d_pc_p9 > 0) { // minus one because if its the last one, we want to leave space to change the line which is one quanta - try dbg_out.dbg_line.append(@intCast(u8, @divExact(d_pc_p9, quant) + 128) - quant); + try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, quant) + 128)) - quant); if (dbg_out.pcop_change_index.*) |pci| dbg_out.dbg_line.items[pci] += 1; - dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1); + dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1)); } else if (d_pc_p9 == 0) { // we don't need to do anything, because adding the quant does it for us } else unreachable; diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 8905b24c3c..da62a68941 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -135,7 +135,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end inline for (fields) |field| { @field(result, field.name) = switch (field.type) { u32 => mir.extra[i], - i32 => @bitCast(i32, mir.extra[i]), + i32 => @as(i32, @bitCast(mir.extra[i])), else => @compileError("bad field type"), }; i += 1; diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig index 5db3bf4f05..2239bd49f8 100644 --- a/src/arch/riscv64/bits.zig +++ b/src/arch/riscv64/bits.zig @@ -56,12 +56,12 @@ pub const Instruction = union(enum) { // TODO: once packed structs work we can remove this monstrosity. pub fn toU32(self: Instruction) u32 { return switch (self) { - .R => |v| @bitCast(u32, v), - .I => |v| @bitCast(u32, v), - .S => |v| @bitCast(u32, v), - .B => |v| @intCast(u32, v.opcode) + (@intCast(u32, v.imm11) << 7) + (@intCast(u32, v.imm1_4) << 8) + (@intCast(u32, v.funct3) << 12) + (@intCast(u32, v.rs1) << 15) + (@intCast(u32, v.rs2) << 20) + (@intCast(u32, v.imm5_10) << 25) + (@intCast(u32, v.imm12) << 31), - .U => |v| @bitCast(u32, v), - .J => |v| @bitCast(u32, v), + .R => |v| @as(u32, @bitCast(v)), + .I => |v| @as(u32, @bitCast(v)), + .S => |v| @as(u32, @bitCast(v)), + .B => |v| @as(u32, @intCast(v.opcode)) + (@as(u32, @intCast(v.imm11)) << 7) + (@as(u32, @intCast(v.imm1_4)) << 8) + (@as(u32, @intCast(v.funct3)) << 12) + (@as(u32, @intCast(v.rs1)) << 15) + (@as(u32, @intCast(v.rs2)) << 20) + (@as(u32, @intCast(v.imm5_10)) << 25) + (@as(u32, @intCast(v.imm12)) << 31), + .U => |v| @as(u32, @bitCast(v)), + .J => |v| @as(u32, @bitCast(v)), }; } @@ -80,7 +80,7 @@ pub const Instruction = union(enum) { // RISC-V is all signed all the time -- convert immediates to unsigned for processing fn iType(op: u7, fn3: u3, rd: Register, r1: Register, imm: i12) Instruction { - const umm = @bitCast(u12, imm); + const umm = @as(u12, @bitCast(imm)); return Instruction{ .I = .{ @@ -94,7 +94,7 @@ pub const Instruction = union(enum) { } fn sType(op: u7, fn3: u3, r1: Register, r2: Register, imm: i12) Instruction { - const umm = @bitCast(u12, imm); + const umm = @as(u12, @bitCast(imm)); return Instruction{ .S = .{ @@ -102,8 +102,8 @@ pub const Instruction = union(enum) { .funct3 = fn3, .rs1 = r1.id(), .rs2 = r2.id(), - .imm0_4 = @truncate(u5, umm), - .imm5_11 = @truncate(u7, umm >> 5), + .imm0_4 = @as(u5, @truncate(umm)), + .imm5_11 = @as(u7, @truncate(umm >> 5)), }, }; } @@ -111,7 +111,7 @@ pub const Instruction = union(enum) { // Use significance value rather than bit value, same for J-type // -- less burden on callsite, bonus semantic checking fn bType(op: u7, fn3: u3, r1: Register, r2: Register, imm: i13) Instruction { - const umm = @bitCast(u13, imm); + const umm = @as(u13, @bitCast(imm)); assert(umm % 2 == 0); // misaligned branch target return Instruction{ @@ -120,17 +120,17 @@ pub const Instruction = union(enum) { .funct3 = fn3, .rs1 = r1.id(), .rs2 = r2.id(), - .imm1_4 = @truncate(u4, umm >> 1), - .imm5_10 = @truncate(u6, umm >> 5), - .imm11 = @truncate(u1, umm >> 11), - .imm12 = @truncate(u1, umm >> 12), + .imm1_4 = @as(u4, @truncate(umm >> 1)), + .imm5_10 = @as(u6, @truncate(umm >> 5)), + .imm11 = @as(u1, @truncate(umm >> 11)), + .imm12 = @as(u1, @truncate(umm >> 12)), }, }; } // We have to extract the 20 bits anyway -- let's not make it more painful fn uType(op: u7, rd: Register, imm: i20) Instruction { - const umm = @bitCast(u20, imm); + const umm = @as(u20, @bitCast(imm)); return Instruction{ .U = .{ @@ -142,17 +142,17 @@ pub const Instruction = union(enum) { } fn jType(op: u7, rd: Register, imm: i21) Instruction { - const umm = @bitCast(u21, imm); + const umm = @as(u21, @bitCast(imm)); assert(umm % 2 == 0); // misaligned jump target return Instruction{ .J = .{ .opcode = op, .rd = rd.id(), - .imm1_10 = @truncate(u10, umm >> 1), - .imm11 = @truncate(u1, umm >> 11), - .imm12_19 = @truncate(u8, umm >> 12), - .imm20 = @truncate(u1, umm >> 20), + .imm1_10 = @as(u10, @truncate(umm >> 1)), + .imm11 = @as(u1, @truncate(umm >> 11)), + .imm12_19 = @as(u8, @truncate(umm >> 12)), + .imm20 = @as(u1, @truncate(umm >> 20)), }, }; } @@ -258,7 +258,7 @@ pub const Instruction = union(enum) { } pub fn sltiu(rd: Register, r1: Register, imm: u12) Instruction { - return iType(0b0010011, 0b011, rd, r1, @bitCast(i12, imm)); + return iType(0b0010011, 0b011, rd, r1, @as(i12, @bitCast(imm))); } // Arithmetic/Logical, Register-Immediate (32-bit) @@ -407,7 +407,7 @@ pub const Register = enum(u6) { /// Returns the unique 4-bit ID of this register which is used in /// the machine code pub fn id(self: Register) u5 { - return @truncate(u5, @intFromEnum(self)); + return @as(u5, @truncate(@intFromEnum(self))); } pub fn dwarfLocOp(reg: Register) u8 { diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index f210f8e144..9975cda5cb 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -415,7 +415,7 @@ fn gen(self: *Self) !void { .branch_predict_int = .{ .ccr = .xcc, .cond = .al, - .inst = @intCast(u32, self.mir_instructions.len), + .inst = @as(u32, @intCast(self.mir_instructions.len)), }, }, }); @@ -840,7 +840,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); + const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); const result: MCValue = res: { if (self.liveness.isUnused(inst)) break :res MCValue.dead; return self.fail("TODO implement airAggregateInit for {}", .{self.target.cpu.arch}); @@ -876,7 +876,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(mod); - const array_len = @intCast(u32, array_ty.arrayLen(mod)); + const array_len = @as(u32, @intCast(array_ty.arrayLen(mod))); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -893,11 +893,11 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = (extra.data.flags & 0x80000000) != 0; - const clobbers_len = @truncate(u31, extra.data.flags); + const clobbers_len = @as(u31, @truncate(extra.data.flags)); var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i .. extra_i + extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i .. extra_i + extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i .. extra_i + extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i .. extra_i + extra.data.inputs_len])); extra_i += inputs.len; const dead = !is_volatile and self.liveness.isUnused(inst); @@ -1237,13 +1237,13 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { switch (operand) { .immediate => |imm| { const swapped = switch (int_info.bits) { - 16 => @byteSwap(@intCast(u16, imm)), - 24 => @byteSwap(@intCast(u24, imm)), - 32 => @byteSwap(@intCast(u32, imm)), - 40 => @byteSwap(@intCast(u40, imm)), - 48 => @byteSwap(@intCast(u48, imm)), - 56 => @byteSwap(@intCast(u56, imm)), - 64 => @byteSwap(@intCast(u64, imm)), + 16 => @byteSwap(@as(u16, @intCast(imm))), + 24 => @byteSwap(@as(u24, @intCast(imm))), + 32 => @byteSwap(@as(u32, @intCast(imm))), + 40 => @byteSwap(@as(u40, @intCast(imm))), + 48 => @byteSwap(@as(u48, @intCast(imm))), + 56 => @byteSwap(@as(u56, @intCast(imm))), + 64 => @byteSwap(@as(u64, @intCast(imm))), else => return self.fail("TODO synthesize SPARCv9 byteswap for other integer sizes", .{}), }; break :result .{ .immediate = swapped }; @@ -1295,7 +1295,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const pl_op = self.air.instructions.items(.data)[inst].pl_op; const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end .. extra.end + extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end .. extra.end + extra.data.args_len])); const ty = self.typeOf(callee); const mod = self.bin_file.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { @@ -1348,7 +1348,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); - break :blk @intCast(u32, atom.getOffsetTableAddress(elf_file)); + break :blk @as(u32, @intCast(atom.getOffsetTableAddress(elf_file))); } else unreachable; try self.genSetReg(Type.usize, .o7, .{ .memory = got_addr }); @@ -1515,7 +1515,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.operandDies(inst, 0)) { const op_int = @intFromEnum(pl_op.operand); if (op_int >= Air.ref_start_index) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); + const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index)); self.processDeath(op_index); } } @@ -1851,7 +1851,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end .. loop.end + loop.data.body_len]; - const start = @intCast(u32, self.mir_instructions.len); + const start = @as(u32, @intCast(self.mir_instructions.len)); try self.genBody(body); try self.jump(start); @@ -2574,7 +2574,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); switch (mcv) { .dead, .unreach => unreachable, @@ -2772,7 +2772,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { const gpa = self.gpa; try self.mir_instructions.ensureUnusedCapacity(gpa, 1); - const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len); + const result_index = @as(Air.Inst.Index, @intCast(self.mir_instructions.len)); self.mir_instructions.appendAssumeCapacity(inst); return result_index; } @@ -3207,7 +3207,7 @@ fn binOpImmediate( .is_imm = true, .rd = dest_reg, .rs1 = lhs_reg, - .rs2_or_imm = .{ .imm = @intCast(u12, rhs.immediate) }, + .rs2_or_imm = .{ .imm = @as(u12, @intCast(rhs.immediate)) }, }, }, .sll, @@ -3218,7 +3218,7 @@ fn binOpImmediate( .is_imm = true, .rd = dest_reg, .rs1 = lhs_reg, - .rs2_or_imm = .{ .imm = @intCast(u5, rhs.immediate) }, + .rs2_or_imm = .{ .imm = @as(u5, @intCast(rhs.immediate)) }, }, }, .sllx, @@ -3229,14 +3229,14 @@ fn binOpImmediate( .is_imm = true, .rd = dest_reg, .rs1 = lhs_reg, - .rs2_or_imm = .{ .imm = @intCast(u6, rhs.immediate) }, + .rs2_or_imm = .{ .imm = @as(u6, @intCast(rhs.immediate)) }, }, }, .cmp => .{ .arithmetic_2op = .{ .is_imm = true, .rs1 = lhs_reg, - .rs2_or_imm = .{ .imm = @intCast(u12, rhs.immediate) }, + .rs2_or_imm = .{ .imm = @as(u12, @intCast(rhs.immediate)) }, }, }, else => unreachable, @@ -3535,7 +3535,7 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); + const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod))); switch (error_union_mcv) { .register => return self.fail("TODO errUnionPayload for registers", .{}), .stack_offset => |off| { @@ -3565,15 +3565,15 @@ fn finishAirBookkeeping(self: *Self) void { fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void { var tomb_bits = self.liveness.getTombBits(inst); for (operands) |op| { - const dies = @truncate(u1, tomb_bits) != 0; + const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; const op_int = @intFromEnum(op); if (op_int < Air.ref_start_index) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); + const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index)); self.processDeath(op_index); } - const is_used = @truncate(u1, tomb_bits) == 0; + const is_used = @as(u1, @truncate(tomb_bits)) == 0; if (is_used) { log.debug("%{d} => {}", .{ inst, result }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -3663,7 +3663,7 @@ fn genInlineMemcpy( .data = .{ .branch_predict_reg = .{ .cond = .ne_zero, .rs1 = len, - .inst = @intCast(u32, self.mir_instructions.len - 2), + .inst = @as(u32, @intCast(self.mir_instructions.len - 2)), } }, }); @@ -3838,7 +3838,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .arithmetic_2op = .{ .is_imm = true, .rs1 = reg, - .rs2_or_imm = .{ .imm = @truncate(u12, x) }, + .rs2_or_imm = .{ .imm = @as(u12, @truncate(x)) }, }, }, }); @@ -3848,7 +3848,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .data = .{ .sethi = .{ .rd = reg, - .imm = @truncate(u22, x >> 10), + .imm = @as(u22, @truncate(x >> 10)), }, }, }); @@ -3860,12 +3860,12 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .is_imm = true, .rd = reg, .rs1 = reg, - .rs2_or_imm = .{ .imm = @truncate(u10, x) }, + .rs2_or_imm = .{ .imm = @as(u10, @truncate(x)) }, }, }, }); } else if (x <= math.maxInt(u44)) { - try self.genSetReg(ty, reg, .{ .immediate = @truncate(u32, x >> 12) }); + try self.genSetReg(ty, reg, .{ .immediate = @as(u32, @truncate(x >> 12)) }); _ = try self.addInst(.{ .tag = .sllx, @@ -3886,7 +3886,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .is_imm = true, .rd = reg, .rs1 = reg, - .rs2_or_imm = .{ .imm = @truncate(u12, x) }, + .rs2_or_imm = .{ .imm = @as(u12, @truncate(x)) }, }, }, }); @@ -3894,8 +3894,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void // Need to allocate a temporary register to load 64-bit immediates. const tmp_reg = try self.register_manager.allocReg(null, gp); - try self.genSetReg(ty, tmp_reg, .{ .immediate = @truncate(u32, x) }); - try self.genSetReg(ty, reg, .{ .immediate = @truncate(u32, x >> 32) }); + try self.genSetReg(ty, tmp_reg, .{ .immediate = @as(u32, @truncate(x)) }); + try self.genSetReg(ty, reg, .{ .immediate = @as(u32, @truncate(x >> 32)) }); _ = try self.addInst(.{ .tag = .sllx, @@ -3994,7 +3994,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); const overflow_bit_ty = ty.structFieldType(1, mod); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, mod))); const cond_reg = try self.register_manager.allocReg(null, gp); // TODO handle floating point CCRs @@ -4412,8 +4412,8 @@ fn parseRegName(name: []const u8) ?Register { fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { const tag = self.mir_instructions.items(.tag)[inst]; switch (tag) { - .bpcc => self.mir_instructions.items(.data)[inst].branch_predict_int.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), - .bpr => self.mir_instructions.items(.data)[inst].branch_predict_reg.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), + .bpcc => self.mir_instructions.items(.data)[inst].branch_predict_int.inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)), + .bpr => self.mir_instructions.items(.data)[inst].branch_predict_reg.inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)), else => unreachable, } } @@ -4490,7 +4490,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) }; for (fn_info.param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; @@ -4522,7 +4522,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) } else if (!ret_ty.hasRuntimeBits(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod))); // The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller. if (ret_ty_size <= 8) { result.return_value = switch (role) { @@ -4721,7 +4721,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(mod); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -4816,7 +4816,7 @@ fn truncRegister( .is_imm = true, .rd = dest_reg, .rs1 = operand_reg, - .rs2_or_imm = .{ .imm = @intCast(u6, 64 - int_bits) }, + .rs2_or_imm = .{ .imm = @as(u6, @intCast(64 - int_bits)) }, }, }, }); @@ -4830,7 +4830,7 @@ fn truncRegister( .is_imm = true, .rd = dest_reg, .rs1 = dest_reg, - .rs2_or_imm = .{ .imm = @intCast(u6, int_bits) }, + .rs2_or_imm = .{ .imm = @as(u6, @intCast(int_bits)) }, }, }, }); diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig index 7d16105348..2c39c70269 100644 --- a/src/arch/sparc64/Emit.zig +++ b/src/arch/sparc64/Emit.zig @@ -70,7 +70,7 @@ pub fn emitMir( // Emit machine code for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); switch (tag) { .dbg_line => try emit.mirDbgLine(inst), .dbg_prologue_end => try emit.mirDebugPrologueEnd(), @@ -294,7 +294,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void { .bpcc => switch (tag) { .bpcc => { const branch_predict_int = emit.mir.instructions.items(.data)[inst].branch_predict_int; - const offset = @intCast(i64, emit.code_offset_mapping.get(branch_predict_int.inst).?) - @intCast(i64, emit.code.items.len); + const offset = @as(i64, @intCast(emit.code_offset_mapping.get(branch_predict_int.inst).?)) - @as(i64, @intCast(emit.code.items.len)); log.debug("mirConditionalBranch: {} offset={}", .{ inst, offset }); try emit.writeInstruction( @@ -303,7 +303,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void { branch_predict_int.annul, branch_predict_int.pt, branch_predict_int.ccr, - @intCast(i21, offset), + @as(i21, @intCast(offset)), ), ); }, @@ -312,7 +312,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void { .bpr => switch (tag) { .bpr => { const branch_predict_reg = emit.mir.instructions.items(.data)[inst].branch_predict_reg; - const offset = @intCast(i64, emit.code_offset_mapping.get(branch_predict_reg.inst).?) - @intCast(i64, emit.code.items.len); + const offset = @as(i64, @intCast(emit.code_offset_mapping.get(branch_predict_reg.inst).?)) - @as(i64, @intCast(emit.code.items.len)); log.debug("mirConditionalBranch: {} offset={}", .{ inst, offset }); try emit.writeInstruction( @@ -321,7 +321,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void { branch_predict_reg.annul, branch_predict_reg.pt, branch_predict_reg.rs1, - @intCast(i18, offset), + @as(i18, @intCast(offset)), ), ); }, @@ -437,9 +437,9 @@ fn mirShift(emit: *Emit, inst: Mir.Inst.Index) !void { if (data.is_imm) { const imm = data.rs2_or_imm.imm; switch (tag) { - .sll => try emit.writeInstruction(Instruction.sll(u5, rs1, @truncate(u5, imm), rd)), - .srl => try emit.writeInstruction(Instruction.srl(u5, rs1, @truncate(u5, imm), rd)), - .sra => try emit.writeInstruction(Instruction.sra(u5, rs1, @truncate(u5, imm), rd)), + .sll => try emit.writeInstruction(Instruction.sll(u5, rs1, @as(u5, @truncate(imm)), rd)), + .srl => try emit.writeInstruction(Instruction.srl(u5, rs1, @as(u5, @truncate(imm)), rd)), + .sra => try emit.writeInstruction(Instruction.sra(u5, rs1, @as(u5, @truncate(imm)), rd)), .sllx => try emit.writeInstruction(Instruction.sllx(u6, rs1, imm, rd)), .srlx => try emit.writeInstruction(Instruction.srlx(u6, rs1, imm, rd)), .srax => try emit.writeInstruction(Instruction.srax(u6, rs1, imm, rd)), @@ -495,7 +495,7 @@ fn branchTarget(emit: *Emit, inst: Mir.Inst.Index) Mir.Inst.Index { } fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void { - const delta_line = @intCast(i32, line) - @intCast(i32, emit.prev_di_line); + const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line)); const delta_pc: usize = emit.code.items.len - emit.prev_di_pc; switch (emit.debug_output) { .dwarf => |dbg_out| { @@ -547,7 +547,7 @@ fn lowerBranches(emit: *Emit) !void { // TODO optimization opportunity: do this in codegen while // generating MIR for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); if (isBranch(tag)) { const target_inst = emit.branchTarget(inst); @@ -592,7 +592,7 @@ fn lowerBranches(emit: *Emit) !void { var current_code_offset: usize = 0; for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); // If this instruction contained in the code offset // mapping (when it is a target of a branch or if it is a @@ -607,7 +607,7 @@ fn lowerBranches(emit: *Emit) !void { const target_inst = emit.branchTarget(inst); if (target_inst < inst) { const target_offset = emit.code_offset_mapping.get(target_inst).?; - const offset = @intCast(i64, target_offset) - @intCast(i64, current_code_offset); + const offset = @as(i64, @intCast(target_offset)) - @as(i64, @intCast(current_code_offset)); const branch_type = emit.branch_types.getPtr(inst).?; const optimal_branch_type = try emit.optimalBranchType(tag, offset); if (branch_type.* != optimal_branch_type) { @@ -626,7 +626,7 @@ fn lowerBranches(emit: *Emit) !void { for (origin_list.items) |forward_branch_inst| { const branch_tag = emit.mir.instructions.items(.tag)[forward_branch_inst]; const forward_branch_inst_offset = emit.code_offset_mapping.get(forward_branch_inst).?; - const offset = @intCast(i64, current_code_offset) - @intCast(i64, forward_branch_inst_offset); + const offset = @as(i64, @intCast(current_code_offset)) - @as(i64, @intCast(forward_branch_inst_offset)); const branch_type = emit.branch_types.getPtr(forward_branch_inst).?; const optimal_branch_type = try emit.optimalBranchType(branch_tag, offset); if (branch_type.* != optimal_branch_type) { diff --git a/src/arch/sparc64/Mir.zig b/src/arch/sparc64/Mir.zig index f9a4056705..31ea4e23c8 100644 --- a/src/arch/sparc64/Mir.zig +++ b/src/arch/sparc64/Mir.zig @@ -379,7 +379,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end inline for (fields) |field| { @field(result, field.name) = switch (field.type) { u32 => mir.extra[i], - i32 => @bitCast(i32, mir.extra[i]), + i32 => @as(i32, @bitCast(mir.extra[i])), else => @compileError("bad field type"), }; i += 1; diff --git a/src/arch/sparc64/bits.zig b/src/arch/sparc64/bits.zig index 81656b422b..04da91ca74 100644 --- a/src/arch/sparc64/bits.zig +++ b/src/arch/sparc64/bits.zig @@ -16,7 +16,7 @@ pub const Register = enum(u6) { // zig fmt: on pub fn id(self: Register) u5 { - return @truncate(u5, @intFromEnum(self)); + return @as(u5, @truncate(@intFromEnum(self))); } pub fn enc(self: Register) u5 { @@ -96,9 +96,9 @@ pub const FloatingPointRegister = enum(u7) { pub fn id(self: FloatingPointRegister) u6 { return switch (self.size()) { - 32 => @truncate(u6, @intFromEnum(self)), - 64 => @truncate(u6, (@intFromEnum(self) - 32) * 2), - 128 => @truncate(u6, (@intFromEnum(self) - 64) * 4), + 32 => @as(u6, @truncate(@intFromEnum(self))), + 64 => @as(u6, @truncate((@intFromEnum(self) - 32) * 2)), + 128 => @as(u6, @truncate((@intFromEnum(self) - 64) * 4)), else => unreachable, }; } @@ -109,7 +109,7 @@ pub const FloatingPointRegister = enum(u7) { // (See section 5.1.4.1 of SPARCv9 ISA specification) const reg_id = self.id(); - return @truncate(u5, reg_id | (reg_id >> 5)); + return @as(u5, @truncate(reg_id | (reg_id >> 5))); } /// Returns the bit-width of the register. @@ -752,13 +752,13 @@ pub const Instruction = union(enum) { // See section 6.2 of the SPARCv9 ISA manual. fn format1(disp: i32) Instruction { - const udisp = @bitCast(u32, disp); + const udisp = @as(u32, @bitCast(disp)); // In SPARC, branch target needs to be aligned to 4 bytes. assert(udisp % 4 == 0); // Discard the last two bits since those are implicitly zero. - const udisp_truncated = @truncate(u30, udisp >> 2); + const udisp_truncated = @as(u30, @truncate(udisp >> 2)); return Instruction{ .format_1 = .{ .disp30 = udisp_truncated, @@ -777,13 +777,13 @@ pub const Instruction = union(enum) { } fn format2b(op2: u3, cond: Condition, annul: bool, disp: i24) Instruction { - const udisp = @bitCast(u24, disp); + const udisp = @as(u24, @bitCast(disp)); // In SPARC, branch target needs to be aligned to 4 bytes. assert(udisp % 4 == 0); // Discard the last two bits since those are implicitly zero. - const udisp_truncated = @truncate(u22, udisp >> 2); + const udisp_truncated = @as(u22, @truncate(udisp >> 2)); return Instruction{ .format_2b = .{ .a = @intFromBool(annul), @@ -795,16 +795,16 @@ pub const Instruction = union(enum) { } fn format2c(op2: u3, cond: Condition, annul: bool, pt: bool, ccr: CCR, disp: i21) Instruction { - const udisp = @bitCast(u21, disp); + const udisp = @as(u21, @bitCast(disp)); // In SPARC, branch target needs to be aligned to 4 bytes. assert(udisp % 4 == 0); // Discard the last two bits since those are implicitly zero. - const udisp_truncated = @truncate(u19, udisp >> 2); + const udisp_truncated = @as(u19, @truncate(udisp >> 2)); - const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1); - const ccr_cc0 = @truncate(u1, @intFromEnum(ccr)); + const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1)); + const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr))); return Instruction{ .format_2c = .{ .a = @intFromBool(annul), @@ -819,16 +819,16 @@ pub const Instruction = union(enum) { } fn format2d(op2: u3, rcond: RCondition, annul: bool, pt: bool, rs1: Register, disp: i18) Instruction { - const udisp = @bitCast(u18, disp); + const udisp = @as(u18, @bitCast(disp)); // In SPARC, branch target needs to be aligned to 4 bytes. assert(udisp % 4 == 0); // Discard the last two bits since those are implicitly zero, // and split it into low and high parts. - const udisp_truncated = @truncate(u16, udisp >> 2); - const udisp_hi = @truncate(u2, (udisp_truncated & 0b1100_0000_0000_0000) >> 14); - const udisp_lo = @truncate(u14, udisp_truncated & 0b0011_1111_1111_1111); + const udisp_truncated = @as(u16, @truncate(udisp >> 2)); + const udisp_hi = @as(u2, @truncate((udisp_truncated & 0b1100_0000_0000_0000) >> 14)); + const udisp_lo = @as(u14, @truncate(udisp_truncated & 0b0011_1111_1111_1111)); return Instruction{ .format_2d = .{ .a = @intFromBool(annul), @@ -860,7 +860,7 @@ pub const Instruction = union(enum) { .rd = rd.enc(), .op3 = op3, .rs1 = rs1.enc(), - .simm13 = @bitCast(u13, imm), + .simm13 = @as(u13, @bitCast(imm)), }, }; } @@ -880,7 +880,7 @@ pub const Instruction = union(enum) { .op = op, .op3 = op3, .rs1 = rs1.enc(), - .simm13 = @bitCast(u13, imm), + .simm13 = @as(u13, @bitCast(imm)), }, }; } @@ -904,7 +904,7 @@ pub const Instruction = union(enum) { .op3 = op3, .rs1 = rs1.enc(), .rcond = @intFromEnum(rcond), - .simm10 = @bitCast(u10, imm), + .simm10 = @as(u10, @bitCast(imm)), }, }; } @@ -922,8 +922,8 @@ pub const Instruction = union(enum) { fn format3h(cmask: MemCompletionConstraint, mmask: MemOrderingConstraint) Instruction { return Instruction{ .format_3h = .{ - .cmask = @bitCast(u3, cmask), - .mmask = @bitCast(u4, mmask), + .cmask = @as(u3, @bitCast(cmask)), + .mmask = @as(u4, @bitCast(mmask)), }, }; } @@ -995,8 +995,8 @@ pub const Instruction = union(enum) { }; } fn format3o(op: u2, op3: u6, opf: u9, ccr: CCR, rs1: Register, rs2: Register) Instruction { - const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1); - const ccr_cc0 = @truncate(u1, @intFromEnum(ccr)); + const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1)); + const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr))); return Instruction{ .format_3o = .{ .op = op, @@ -1051,8 +1051,8 @@ pub const Instruction = union(enum) { } fn format4a(op3: u6, ccr: CCR, rs1: Register, rs2: Register, rd: Register) Instruction { - const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1); - const ccr_cc0 = @truncate(u1, @intFromEnum(ccr)); + const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1)); + const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr))); return Instruction{ .format_4a = .{ .rd = rd.enc(), @@ -1066,8 +1066,8 @@ pub const Instruction = union(enum) { } fn format4b(op3: u6, ccr: CCR, rs1: Register, imm: i11, rd: Register) Instruction { - const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1); - const ccr_cc0 = @truncate(u1, @intFromEnum(ccr)); + const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1)); + const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr))); return Instruction{ .format_4b = .{ .rd = rd.enc(), @@ -1075,15 +1075,15 @@ pub const Instruction = union(enum) { .rs1 = rs1.enc(), .cc1 = ccr_cc1, .cc0 = ccr_cc0, - .simm11 = @bitCast(u11, imm), + .simm11 = @as(u11, @bitCast(imm)), }, }; } fn format4c(op3: u6, cond: Condition, ccr: CCR, rs2: Register, rd: Register) Instruction { - const ccr_cc2 = @truncate(u1, @intFromEnum(ccr) >> 2); - const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1); - const ccr_cc0 = @truncate(u1, @intFromEnum(ccr)); + const ccr_cc2 = @as(u1, @truncate(@intFromEnum(ccr) >> 2)); + const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1)); + const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr))); return Instruction{ .format_4c = .{ .rd = rd.enc(), @@ -1098,9 +1098,9 @@ pub const Instruction = union(enum) { } fn format4d(op3: u6, cond: Condition, ccr: CCR, imm: i11, rd: Register) Instruction { - const ccr_cc2 = @truncate(u1, @intFromEnum(ccr) >> 2); - const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1); - const ccr_cc0 = @truncate(u1, @intFromEnum(ccr)); + const ccr_cc2 = @as(u1, @truncate(@intFromEnum(ccr) >> 2)); + const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1)); + const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr))); return Instruction{ .format_4d = .{ .rd = rd.enc(), @@ -1109,14 +1109,14 @@ pub const Instruction = union(enum) { .cond = cond.enc(), .cc1 = ccr_cc1, .cc0 = ccr_cc0, - .simm11 = @bitCast(u11, imm), + .simm11 = @as(u11, @bitCast(imm)), }, }; } fn format4e(op3: u6, ccr: CCR, rs1: Register, rd: Register, sw_trap: u7) Instruction { - const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1); - const ccr_cc0 = @truncate(u1, @intFromEnum(ccr)); + const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1)); + const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr))); return Instruction{ .format_4e = .{ .rd = rd.enc(), @@ -1468,8 +1468,8 @@ pub const Instruction = union(enum) { pub fn trap(comptime s2: type, cond: ICondition, ccr: CCR, rs1: Register, rs2: s2) Instruction { // Tcc instructions abuse the rd field to store the conditionals. return switch (s2) { - Register => format4a(0b11_1010, ccr, rs1, rs2, @enumFromInt(Register, @intFromEnum(cond))), - u7 => format4e(0b11_1010, ccr, rs1, @enumFromInt(Register, @intFromEnum(cond)), rs2), + Register => format4a(0b11_1010, ccr, rs1, rs2, @as(Register, @enumFromInt(@intFromEnum(cond)))), + u7 => format4e(0b11_1010, ccr, rs1, @as(Register, @enumFromInt(@intFromEnum(cond))), rs2), else => unreachable, }; } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index f9e5eed626..3a50fc9824 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -120,7 +120,7 @@ const WValue = union(enum) { if (local_value < reserved + 2) return; // reserved locals may never be re-used. Also accounts for 2 stack locals. const index = local_value - reserved; - const valtype = @enumFromInt(wasm.Valtype, gen.locals.items[index]); + const valtype = @as(wasm.Valtype, @enumFromInt(gen.locals.items[index])); switch (valtype) { .i32 => gen.free_locals_i32.append(gen.gpa, local_value) catch return, // It's ok to fail any of those, a new local can be allocated instead .i64 => gen.free_locals_i64.append(gen.gpa, local_value) catch return, @@ -817,7 +817,7 @@ fn finishAir(func: *CodeGen, inst: Air.Inst.Index, result: WValue, operands: []c assert(operands.len <= Liveness.bpi - 1); var tomb_bits = func.liveness.getTombBits(inst); for (operands) |operand| { - const dies = @truncate(u1, tomb_bits) != 0; + const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; processDeath(func, operand); @@ -910,7 +910,7 @@ fn addTag(func: *CodeGen, tag: Mir.Inst.Tag) error{OutOfMemory}!void { } fn addExtended(func: *CodeGen, opcode: wasm.MiscOpcode) error{OutOfMemory}!void { - const extra_index = @intCast(u32, func.mir_extra.items.len); + const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); try func.mir_extra.append(func.gpa, @intFromEnum(opcode)); try func.addInst(.{ .tag = .misc_prefix, .data = .{ .payload = extra_index } }); } @@ -934,11 +934,11 @@ fn addImm64(func: *CodeGen, imm: u64) error{OutOfMemory}!void { /// Accepts the index into the list of 128bit-immediates fn addImm128(func: *CodeGen, index: u32) error{OutOfMemory}!void { const simd_values = func.simd_immediates.items[index]; - const extra_index = @intCast(u32, func.mir_extra.items.len); + const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); // tag + 128bit value try func.mir_extra.ensureUnusedCapacity(func.gpa, 5); func.mir_extra.appendAssumeCapacity(std.wasm.simdOpcode(.v128_const)); - func.mir_extra.appendSliceAssumeCapacity(@alignCast(4, mem.bytesAsSlice(u32, &simd_values))); + func.mir_extra.appendSliceAssumeCapacity(@alignCast(mem.bytesAsSlice(u32, &simd_values))); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); } @@ -979,7 +979,7 @@ fn addExtra(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 { /// Returns the index into `mir_extra` fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result = @intCast(u32, func.mir_extra.items.len); + const result = @as(u32, @intCast(func.mir_extra.items.len)); inline for (fields) |field| { func.mir_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), @@ -1020,7 +1020,7 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype { }, .Union => switch (ty.containerLayout(mod)) { .Packed => { - const int_ty = mod.intType(.unsigned, @intCast(u16, ty.bitSize(mod))) catch @panic("out of memory"); + const int_ty = mod.intType(.unsigned, @as(u16, @intCast(ty.bitSize(mod)))) catch @panic("out of memory"); return typeToValtype(int_ty, mod); }, else => wasm.Valtype.i32, @@ -1050,7 +1050,7 @@ fn emitWValue(func: *CodeGen, value: WValue) InnerError!void { .dead => unreachable, // reference to free'd `WValue` (missing reuseOperand?) .none, .stack => {}, // no-op .local => |idx| try func.addLabel(.local_get, idx.value), - .imm32 => |val| try func.addImm32(@bitCast(i32, val)), + .imm32 => |val| try func.addImm32(@as(i32, @bitCast(val))), .imm64 => |val| try func.addImm64(val), .imm128 => |val| try func.addImm128(val), .float32 => |val| try func.addInst(.{ .tag = .f32_const, .data = .{ .float32 = val } }), @@ -1264,7 +1264,7 @@ fn genFunc(func: *CodeGen) InnerError!void { // In case we have a return value, but the last instruction is a noreturn (such as a while loop) // we emit an unreachable instruction to tell the stack validator that part will never be reached. if (func_type.returns.len != 0 and func.air.instructions.len > 0) { - const inst = @intCast(u32, func.air.instructions.len - 1); + const inst = @as(u32, @intCast(func.air.instructions.len - 1)); const last_inst_ty = func.typeOfIndex(inst); if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn(mod)) { try func.addTag(.@"unreachable"); @@ -1287,11 +1287,11 @@ fn genFunc(func: *CodeGen) InnerError!void { try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } }); // get the total stack size const aligned_stack = std.mem.alignForward(u32, func.stack_size, func.stack_alignment); - try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, aligned_stack) } }); + try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(aligned_stack)) } }); // substract it from the current stack pointer try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } }); // Get negative stack aligment - try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, func.stack_alignment) * -1 } }); + try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment)) * -1 } }); // Bitwise-and the value to get the new stack pointer to ensure the pointers are aligned with the abi alignment try prologue.append(.{ .tag = .i32_and, .data = .{ .tag = {} } }); // store the current stack pointer as the bottom, which will be used to calculate all stack pointer offsets @@ -1432,7 +1432,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: if (value != .imm32 and value != .imm64) { const opcode = buildOpcode(.{ .op = .load, - .width = @intCast(u8, abi_size), + .width = @as(u8, @intCast(abi_size)), .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned, .valtype1 = typeToValtype(scalar_type, mod), }); @@ -1468,7 +1468,7 @@ fn lowerToStack(func: *CodeGen, value: WValue) !void { if (offset.value > 0) { switch (func.arch()) { .wasm32 => { - try func.addImm32(@bitCast(i32, offset.value)); + try func.addImm32(@as(i32, @bitCast(offset.value))); try func.addTag(.i32_add); }, .wasm64 => { @@ -1815,7 +1815,7 @@ fn buildPointerOffset(func: *CodeGen, ptr_value: WValue, offset: u64, action: en if (offset + ptr_value.offset() > 0) { switch (func.arch()) { .wasm32 => { - try func.addImm32(@bitCast(i32, @intCast(u32, offset + ptr_value.offset()))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(offset + ptr_value.offset()))))); try func.addTag(.i32_add); }, .wasm64 => { @@ -2111,7 +2111,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(operand); const opcode = buildOpcode(.{ .op = .load, - .width = @intCast(u8, scalar_type.abiSize(mod) * 8), + .width = @as(u8, @intCast(scalar_type.abiSize(mod) * 8)), .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned, .valtype1 = typeToValtype(scalar_type, mod), }); @@ -2180,7 +2180,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif if (modifier == .always_tail) return func.fail("TODO implement tail calls for wasm", .{}); const pl_op = func.air.instructions.items(.data)[inst].pl_op; const extra = func.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, func.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[extra.end..][0..extra.data.args_len])); const ty = func.typeOf(pl_op.operand); const mod = func.bin_file.base.options.module.?; @@ -2319,15 +2319,15 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{}); } - var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(mod))) - 1); - mask <<= @intCast(u6, ptr_info.packed_offset.bit_offset); + var mask = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(ty.bitSize(mod)))) - 1)); + mask <<= @as(u6, @intCast(ptr_info.packed_offset.bit_offset)); mask ^= ~@as(u64, 0); const shift_val = if (ptr_info.packed_offset.host_size <= 4) WValue{ .imm32 = ptr_info.packed_offset.bit_offset } else WValue{ .imm64 = ptr_info.packed_offset.bit_offset }; const mask_val = if (ptr_info.packed_offset.host_size <= 4) - WValue{ .imm32 = @truncate(u32, mask) } + WValue{ .imm32 = @as(u32, @truncate(mask)) } else WValue{ .imm64 = mask }; @@ -2357,7 +2357,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE return func.store(lhs, rhs, Type.anyerror, 0); } - const len = @intCast(u32, abi_size); + const len = @as(u32, @intCast(abi_size)); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, .Optional => { @@ -2372,23 +2372,23 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE return func.store(lhs, rhs, Type.anyerror, 0); } - const len = @intCast(u32, abi_size); + const len = @as(u32, @intCast(abi_size)); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, .Struct, .Array, .Union => if (isByRef(ty, mod)) { - const len = @intCast(u32, abi_size); + const len = @as(u32, @intCast(abi_size)); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, .Vector => switch (determineSimdStoreStrategy(ty, mod)) { .unrolled => { - const len = @intCast(u32, abi_size); + const len = @as(u32, @intCast(abi_size)); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, .direct => { try func.emitWValue(lhs); try func.lowerToStack(rhs); // TODO: Add helper functions for simd opcodes - const extra_index = @intCast(u32, func.mir_extra.items.len); + const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); // stores as := opcode, offset, alignment (opcode::memarg) try func.mir_extra.appendSlice(func.gpa, &[_]u32{ std.wasm.simdOpcode(.v128_store), @@ -2423,7 +2423,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE try func.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset()); return; } else if (abi_size > 16) { - try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(mod)) }); + try func.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(mod))) }); }, else => if (abi_size > 8) { return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{ @@ -2440,7 +2440,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE const valtype = typeToValtype(ty, mod); const opcode = buildOpcode(.{ .valtype1 = valtype, - .width = @intCast(u8, abi_size * 8), + .width = @as(u8, @intCast(abi_size * 8)), .op = .store, }); @@ -2501,7 +2501,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu if (ty.zigTypeTag(mod) == .Vector) { // TODO: Add helper functions for simd opcodes - const extra_index = @intCast(u32, func.mir_extra.items.len); + const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); // stores as := opcode, offset, alignment (opcode::memarg) try func.mir_extra.appendSlice(func.gpa, &[_]u32{ std.wasm.simdOpcode(.v128_load), @@ -2512,7 +2512,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu return WValue{ .stack = {} }; } - const abi_size = @intCast(u8, ty.abiSize(mod)); + const abi_size = @as(u8, @intCast(ty.abiSize(mod))); const opcode = buildOpcode(.{ .valtype1 = typeToValtype(ty, mod), .width = abi_size * 8, @@ -2589,10 +2589,10 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { // For big integers we can ignore this as we will call into compiler-rt which handles this. const result = switch (op) { .shr, .shl => res: { - const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse { + const lhs_wasm_bits = toWasmBits(@as(u16, @intCast(lhs_ty.bitSize(mod)))) orelse { return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; - const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?; + const rhs_wasm_bits = toWasmBits(@as(u16, @intCast(rhs_ty.bitSize(mod)))).?; const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: { const tmp = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try tmp.toLocal(func, lhs_ty); @@ -2868,10 +2868,10 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { // For big integers we can ignore this as we will call into compiler-rt which handles this. const result = switch (op) { .shr, .shl => res: { - const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse { + const lhs_wasm_bits = toWasmBits(@as(u16, @intCast(lhs_ty.bitSize(mod)))) orelse { return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; - const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?; + const rhs_wasm_bits = toWasmBits(@as(u16, @intCast(rhs_ty.bitSize(mod)))).?; const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: { const tmp = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try tmp.toLocal(func, lhs_ty); @@ -2902,7 +2902,7 @@ fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { const mod = func.bin_file.base.options.module.?; assert(ty.abiSize(mod) <= 16); - const bitsize = @intCast(u16, ty.bitSize(mod)); + const bitsize = @as(u16, @intCast(ty.bitSize(mod))); const wasm_bits = toWasmBits(bitsize) orelse { return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{bitsize}); }; @@ -2916,7 +2916,7 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { const result_ptr = try func.allocStack(ty); try func.emitWValue(result_ptr); try func.store(.{ .stack = {} }, lsb, Type.u64, 8 + result_ptr.offset()); - const result = (@as(u64, 1) << @intCast(u6, 64 - (wasm_bits - bitsize))) - 1; + const result = (@as(u64, 1) << @as(u6, @intCast(64 - (wasm_bits - bitsize)))) - 1; try func.emitWValue(result_ptr); _ = try func.load(operand, Type.u64, 0); try func.addImm64(result); @@ -2925,10 +2925,10 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { return result_ptr; } - const result = (@as(u64, 1) << @intCast(u6, bitsize)) - 1; + const result = (@as(u64, 1) << @as(u6, @intCast(bitsize))) - 1; try func.emitWValue(operand); if (bitsize <= 32) { - try func.addImm32(@bitCast(i32, @intCast(u32, result))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(result))))); try func.addTag(.i32_and); } else if (bitsize <= 64) { try func.addImm64(result); @@ -2957,15 +2957,15 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue const index = elem.index; const elem_type = mod.intern_pool.typeOf(elem.base).toType().elemType2(mod); const elem_offset = index * elem_type.abiSize(mod); - return func.lowerParentPtr(elem.base.toValue(), @intCast(u32, elem_offset + offset)); + return func.lowerParentPtr(elem.base.toValue(), @as(u32, @intCast(elem_offset + offset))); }, .field => |field| { const parent_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); const field_offset = switch (parent_ty.zigTypeTag(mod)) { .Struct => switch (parent_ty.containerLayout(mod)) { - .Packed => parent_ty.packedStructFieldByteOffset(@intCast(usize, field.index), mod), - else => parent_ty.structFieldOffset(@intCast(usize, field.index), mod), + .Packed => parent_ty.packedStructFieldByteOffset(@as(usize, @intCast(field.index)), mod), + else => parent_ty.structFieldOffset(@as(usize, @intCast(field.index)), mod), }, .Union => switch (parent_ty.containerLayout(mod)) { .Packed => 0, @@ -2975,7 +2975,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue if (layout.payload_align > layout.tag_align) break :blk 0; // tag is stored first so calculate offset from where payload starts - break :blk @intCast(u32, std.mem.alignForward(u64, layout.tag_size, layout.tag_align)); + break :blk @as(u32, @intCast(std.mem.alignForward(u64, layout.tag_size, layout.tag_align))); }, }, .Pointer => switch (parent_ty.ptrSize(mod)) { @@ -2988,7 +2988,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue }, else => unreachable, }; - return func.lowerParentPtr(field.base.toValue(), @intCast(u32, offset + field_offset)); + return func.lowerParentPtr(field.base.toValue(), @as(u32, @intCast(offset + field_offset))); }, } } @@ -3045,11 +3045,11 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo( comptime assert(@typeInfo(T).Int.signedness == .signed); assert(bits <= 64); const WantedT = std.meta.Int(.unsigned, @typeInfo(T).Int.bits); - if (value >= 0) return @bitCast(WantedT, value); - const max_value = @intCast(u64, (@as(u65, 1) << bits) - 1); - const flipped = @intCast(T, (~-@as(i65, value)) + 1); - const result = @bitCast(WantedT, flipped) & max_value; - return @intCast(WantedT, result); + if (value >= 0) return @as(WantedT, @bitCast(value)); + const max_value = @as(u64, @intCast((@as(u65, 1) << bits) - 1)); + const flipped = @as(T, @intCast((~-@as(i65, value)) + 1)); + const result = @as(WantedT, @bitCast(flipped)) & max_value; + return @as(WantedT, @intCast(result)); } fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { @@ -3150,18 +3150,18 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { const int_info = ty.intInfo(mod); switch (int_info.signedness) { .signed => switch (int_info.bits) { - 0...32 => return WValue{ .imm32 = @intCast(u32, toTwosComplement( + 0...32 => return WValue{ .imm32 = @as(u32, @intCast(toTwosComplement( val.toSignedInt(mod), - @intCast(u6, int_info.bits), - )) }, + @as(u6, @intCast(int_info.bits)), + ))) }, 33...64 => return WValue{ .imm64 = toTwosComplement( val.toSignedInt(mod), - @intCast(u7, int_info.bits), + @as(u7, @intCast(int_info.bits)), ) }, else => unreachable, }, .unsigned => switch (int_info.bits) { - 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, + 0...32 => return WValue{ .imm32 = @as(u32, @intCast(val.toUnsignedInt(mod))) }, 33...64 => return WValue{ .imm64 = val.toUnsignedInt(mod) }, else => unreachable, }, @@ -3198,7 +3198,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType()); }, .float => |float| switch (float.storage) { - .f16 => |f16_val| return WValue{ .imm32 = @bitCast(u16, f16_val) }, + .f16 => |f16_val| return WValue{ .imm32 = @as(u16, @bitCast(f16_val)) }, .f32 => |f32_val| return WValue{ .float32 = f32_val }, .f64 => |f64_val| return WValue{ .float64 = f64_val }, else => unreachable, @@ -3254,7 +3254,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { /// Stores the value as a 128bit-immediate value by storing it inside /// the list and returning the index into this list as `WValue`. fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue { - const index = @intCast(u32, func.simd_immediates.items.len); + const index = @as(u32, @intCast(func.simd_immediates.items.len)); try func.simd_immediates.append(func.gpa, value); return WValue{ .imm128 = index }; } @@ -3270,8 +3270,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { }, .Float => switch (ty.floatBits(func.target)) { 16 => return WValue{ .imm32 = 0xaaaaaaaa }, - 32 => return WValue{ .float32 = @bitCast(f32, @as(u32, 0xaaaaaaaa)) }, - 64 => return WValue{ .float64 = @bitCast(f64, @as(u64, 0xaaaaaaaaaaaaaaaa)) }, + 32 => return WValue{ .float32 = @as(f32, @bitCast(@as(u32, 0xaaaaaaaa))) }, + 64 => return WValue{ .float64 = @as(f64, @bitCast(@as(u64, 0xaaaaaaaaaaaaaaaa))) }, else => unreachable, }, .Pointer => switch (func.arch()) { @@ -3312,13 +3312,13 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod), .int => |int| intStorageAsI32(int.storage, mod), .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int, mod), - .err => |err| @bitCast(i32, @intCast(Module.ErrorInt, mod.global_error_set.getIndex(err.name).?)), + .err => |err| @as(i32, @bitCast(@as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(err.name).?)))), else => unreachable, }, } return switch (ty.zigTypeTag(mod)) { - .ErrorSet => @bitCast(i32, val.getErrorInt(mod)), + .ErrorSet => @as(i32, @bitCast(val.getErrorInt(mod))), else => unreachable, // Programmer called this function for an illegal type }; } @@ -3329,11 +3329,11 @@ fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Module) i32 fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 { return switch (storage) { - .i64 => |x| @intCast(i32, x), - .u64 => |x| @bitCast(i32, @intCast(u32, x)), + .i64 => |x| @as(i32, @intCast(x)), + .u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))), .big_int => unreachable, - .lazy_align => |ty| @bitCast(i32, ty.toType().abiAlignment(mod)), - .lazy_size => |ty| @bitCast(i32, @intCast(u32, ty.toType().abiSize(mod))), + .lazy_align => |ty| @as(i32, @bitCast(ty.toType().abiAlignment(mod))), + .lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(ty.toType().abiSize(mod))))), }; } @@ -3421,7 +3421,7 @@ fn airCondBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.branches.ensureUnusedCapacity(func.gpa, 2); { func.branches.appendAssumeCapacity(.{}); - try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.else_deaths.len)); + try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @as(u32, @intCast(liveness_condbr.else_deaths.len))); defer { var else_stack = func.branches.pop(); else_stack.deinit(func.gpa); @@ -3433,7 +3433,7 @@ fn airCondBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // Outer block that matches the condition { func.branches.appendAssumeCapacity(.{}); - try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.then_deaths.len)); + try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @as(u32, @intCast(liveness_condbr.then_deaths.len))); defer { var then_stack = func.branches.pop(); then_stack.deinit(func.gpa); @@ -3715,7 +3715,7 @@ fn structFieldPtr( } switch (struct_ptr) { .stack_offset => |stack_offset| { - return WValue{ .stack_offset = .{ .value = stack_offset.value + @intCast(u32, offset), .references = 1 } }; + return WValue{ .stack_offset = .{ .value = stack_offset.value + @as(u32, @intCast(offset)), .references = 1 } }; }, else => return func.buildPointerOffset(struct_ptr, offset, .new), } @@ -3755,7 +3755,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.binOp(operand, const_wvalue, backing_ty, .shr); if (field_ty.zigTypeTag(mod) == .Float) { - const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); + const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod)))); const truncated = try func.trunc(shifted_value, int_type, backing_ty); const bitcasted = try func.bitcast(field_ty, int_type, truncated); break :result try bitcasted.toLocal(func, field_ty); @@ -3764,7 +3764,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // we can simply reuse the operand. break :result func.reuseOperand(struct_field.struct_operand, operand); } else if (field_ty.isPtrAtRuntime(mod)) { - const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); + const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod)))); const truncated = try func.trunc(shifted_value, int_type, backing_ty); break :result try truncated.toLocal(func, field_ty); } @@ -3783,14 +3783,14 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } } - const union_int_type = try mod.intType(.unsigned, @intCast(u16, struct_ty.bitSize(mod))); + const union_int_type = try mod.intType(.unsigned, @as(u16, @intCast(struct_ty.bitSize(mod)))); if (field_ty.zigTypeTag(mod) == .Float) { - const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); + const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod)))); const truncated = try func.trunc(operand, int_type, union_int_type); const bitcasted = try func.bitcast(field_ty, int_type, truncated); break :result try bitcasted.toLocal(func, field_ty); } else if (field_ty.isPtrAtRuntime(mod)) { - const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); + const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod)))); const truncated = try func.trunc(operand, int_type, union_int_type); break :result try truncated.toLocal(func, field_ty); } @@ -3847,7 +3847,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var highest_maybe: ?i32 = null; while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = func.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast([]const Air.Inst.Ref, func.air.extra[case.end..][0..case.data.items_len]); + const items = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[case.end..][0..case.data.items_len])); const case_body = func.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + items.len + case_body.len; const values = try func.gpa.alloc(CaseValue, items.len); @@ -3904,7 +3904,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } // Account for default branch so always add '1' - const depth = @intCast(u32, highest - lowest + @intFromBool(has_else_body)) + 1; + const depth = @as(u32, @intCast(highest - lowest + @intFromBool(has_else_body))) + 1; const jump_table: Mir.JumpTable = .{ .length = depth }; const table_extra_index = try func.addExtra(jump_table); try func.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } }); @@ -3915,7 +3915,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const idx = blk: { for (case_list.items, 0..) |case, idx| { for (case.values) |case_value| { - if (case_value.integer == value) break :blk @intCast(u32, idx); + if (case_value.integer == value) break :blk @as(u32, @intCast(idx)); } } // error sets are almost always sparse so we use the default case @@ -4018,7 +4018,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro try func.emitWValue(operand); if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { try func.addMemArg(.i32_load16_u, .{ - .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, mod)), + .offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))), .alignment = Type.anyerror.abiAlignment(mod), }); } @@ -4051,7 +4051,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo break :result WValue{ .none = {} }; } - const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); + const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod))); if (op_is_ptr or isByRef(payload_ty, mod)) { break :result try func.buildPointerOffset(operand, pl_offset, .new); } @@ -4080,7 +4080,7 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) break :result func.reuseOperand(ty_op.operand, operand); } - const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, mod))); + const error_val = try func.load(operand, Type.anyerror, @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod)))); break :result try error_val.toLocal(func, Type.anyerror); }; func.finishAir(inst, result, &.{ty_op.operand}); @@ -4100,13 +4100,13 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void } const err_union = try func.allocStack(err_ty); - const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new); + const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, mod))), .new); try func.store(payload_ptr, operand, pl_ty, 0); // ensure we also write '0' to the error part, so any present stack value gets overwritten by it. try func.emitWValue(err_union); try func.addImm32(0); - const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod)); + const err_val_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))); try func.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 }); break :result err_union; }; @@ -4128,11 +4128,11 @@ fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const err_union = try func.allocStack(err_ty); // store error value - try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, mod))); + try func.store(err_union, operand, Type.anyerror, @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)))); // write 'undefined' to the payload - const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new); - const len = @intCast(u32, err_ty.errorUnionPayload(mod).abiSize(mod)); + const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, mod))), .new); + const len = @as(u32, @intCast(err_ty.errorUnionPayload(mod).abiSize(mod))); try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa }); break :result err_union; @@ -4154,8 +4154,8 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { return func.fail("todo Wasm intcast for bitsize > 128", .{}); } - const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(mod))).?; - const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; + const op_bits = toWasmBits(@as(u16, @intCast(operand_ty.bitSize(mod)))).?; + const wanted_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?; const result = if (op_bits == wanted_bits) func.reuseOperand(ty_op.operand, operand) else @@ -4170,8 +4170,8 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// NOTE: May leave the result on the top of the stack. fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue { const mod = func.bin_file.base.options.module.?; - const given_bitsize = @intCast(u16, given.bitSize(mod)); - const wanted_bitsize = @intCast(u16, wanted.bitSize(mod)); + const given_bitsize = @as(u16, @intCast(given.bitSize(mod))); + const wanted_bitsize = @as(u16, @intCast(wanted.bitSize(mod))); assert(given_bitsize <= 128); assert(wanted_bitsize <= 128); @@ -4396,7 +4396,7 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // calculate index into slice try func.emitWValue(index); - try func.addImm32(@bitCast(i32, @intCast(u32, elem_size))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size))))); try func.addTag(.i32_mul); try func.addTag(.i32_add); @@ -4426,7 +4426,7 @@ fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // calculate index into slice try func.emitWValue(index); - try func.addImm32(@bitCast(i32, @intCast(u32, elem_size))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size))))); try func.addTag(.i32_mul); try func.addTag(.i32_add); @@ -4466,13 +4466,13 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// NOTE: Resulting value is left on the stack. fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue { const mod = func.bin_file.base.options.module.?; - const given_bits = @intCast(u16, given_ty.bitSize(mod)); + const given_bits = @as(u16, @intCast(given_ty.bitSize(mod))); if (toWasmBits(given_bits) == null) { return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits}); } var result = try func.intcast(operand, given_ty, wanted_ty); - const wanted_bits = @intCast(u16, wanted_ty.bitSize(mod)); + const wanted_bits = @as(u16, @intCast(wanted_ty.bitSize(mod))); const wasm_bits = toWasmBits(wanted_bits).?; if (wasm_bits != wanted_bits) { result = try func.wrapOperand(result, wanted_ty); @@ -4505,7 +4505,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } // store the length of the array in the slice - const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen(mod)) }; + const len = WValue{ .imm32 = @as(u32, @intCast(array_ty.arrayLen(mod))) }; try func.store(slice_local, len, Type.usize, func.ptrSize()); func.finishAir(inst, slice_local, &.{ty_op.operand}); @@ -4545,7 +4545,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // calculate index into slice try func.emitWValue(index); - try func.addImm32(@bitCast(i32, @intCast(u32, elem_size))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size))))); try func.addTag(.i32_mul); try func.addTag(.i32_add); @@ -4584,7 +4584,7 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // calculate index into ptr try func.emitWValue(index); - try func.addImm32(@bitCast(i32, @intCast(u32, elem_size))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size))))); try func.addTag(.i32_mul); try func.addTag(.i32_add); @@ -4612,7 +4612,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { try func.lowerToStack(ptr); try func.emitWValue(offset); - try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(mod)))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(pointee_ty.abiSize(mod)))))); try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode)); try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode)); @@ -4635,7 +4635,7 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void const value = try func.resolveInst(bin_op.rhs); const len = switch (ptr_ty.ptrSize(mod)) { .Slice => try func.sliceLen(ptr), - .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType(mod).arrayLen(mod)) }), + .One => @as(WValue, .{ .imm32 = @as(u32, @intCast(ptr_ty.childType(mod).arrayLen(mod))) }), .C, .Many => unreachable, }; @@ -4656,7 +4656,7 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void /// we implement it manually. fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void { const mod = func.bin_file.base.options.module.?; - const abi_size = @intCast(u32, elem_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(elem_ty.abiSize(mod))); // When bulk_memory is enabled, we lower it to wasm's memset instruction. // If not, we lower it ourselves. @@ -4756,7 +4756,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { if (isByRef(array_ty, mod)) { try func.lowerToStack(array); try func.emitWValue(index); - try func.addImm32(@bitCast(i32, @intCast(u32, elem_size))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size))))); try func.addTag(.i32_mul); try func.addTag(.i32_add); } else { @@ -4772,11 +4772,11 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, }; - var operands = [_]u32{ std.wasm.simdOpcode(opcode), @intCast(u8, lane) }; + var operands = [_]u32{ std.wasm.simdOpcode(opcode), @as(u8, @intCast(lane)) }; try func.emitWValue(array); - const extra_index = @intCast(u32, func.mir_extra.items.len); + const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); try func.mir_extra.appendSlice(func.gpa, &operands); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); @@ -4789,7 +4789,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // Is a non-unrolled vector (v128) try func.lowerToStack(stack_vec); try func.emitWValue(index); - try func.addImm32(@bitCast(i32, @intCast(u32, elem_size))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size))))); try func.addTag(.i32_mul); try func.addTag(.i32_add); }, @@ -4886,7 +4886,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try func.allocLocal(ty); try func.emitWValue(operand); // TODO: Add helper functions for simd opcodes - const extra_index = @intCast(u32, func.mir_extra.items.len); + const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); // stores as := opcode, offset, alignment (opcode::memarg) try func.mir_extra.appendSlice(func.gpa, &[_]u32{ opcode, @@ -4907,7 +4907,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }; const result = try func.allocLocal(ty); try func.emitWValue(operand); - const extra_index = @intCast(u32, func.mir_extra.items.len); + const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); try func.mir_extra.append(func.gpa, opcode); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); try func.addLabel(.local_set, result.local.value); @@ -4917,13 +4917,13 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } } const elem_size = elem_ty.bitSize(mod); - const vector_len = @intCast(usize, ty.vectorLen(mod)); + const vector_len = @as(usize, @intCast(ty.vectorLen(mod))); if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) { return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size}); } const result = try func.allocStack(ty); - const elem_byte_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_byte_size = @as(u32, @intCast(elem_ty.abiSize(mod))); var index: usize = 0; var offset: u32 = 0; while (index < vector_len) : (index += 1) { @@ -4966,11 +4966,11 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(result); const loaded = if (value >= 0) - try func.load(a, child_ty, @intCast(u32, @intCast(i64, elem_size) * value)) + try func.load(a, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * value))) else - try func.load(b, child_ty, @intCast(u32, @intCast(i64, elem_size) * ~value)); + try func.load(b, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * ~value))); - try func.store(.stack, loaded, child_ty, result.stack_offset.value + @intCast(u32, elem_size) * @intCast(u32, index)); + try func.store(.stack, loaded, child_ty, result.stack_offset.value + @as(u32, @intCast(elem_size)) * @as(u32, @intCast(index))); } return func.finishAir(inst, result, &.{ extra.a, extra.b }); @@ -4980,22 +4980,22 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } ++ [1]u32{undefined} ** 4; var lanes = std.mem.asBytes(operands[1..]); - for (0..@intCast(usize, mask_len)) |index| { + for (0..@as(usize, @intCast(mask_len))) |index| { const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod); const base_index = if (mask_elem >= 0) - @intCast(u8, @intCast(i64, elem_size) * mask_elem) + @as(u8, @intCast(@as(i64, @intCast(elem_size)) * mask_elem)) else - 16 + @intCast(u8, @intCast(i64, elem_size) * ~mask_elem); + 16 + @as(u8, @intCast(@as(i64, @intCast(elem_size)) * ~mask_elem)); - for (0..@intCast(usize, elem_size)) |byte_offset| { - lanes[index * @intCast(usize, elem_size) + byte_offset] = base_index + @intCast(u8, byte_offset); + for (0..@as(usize, @intCast(elem_size))) |byte_offset| { + lanes[index * @as(usize, @intCast(elem_size)) + byte_offset] = base_index + @as(u8, @intCast(byte_offset)); } } try func.emitWValue(a); try func.emitWValue(b); - const extra_index = @intCast(u32, func.mir_extra.items.len); + const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); try func.mir_extra.appendSlice(func.gpa, &operands); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); @@ -5015,15 +5015,15 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const result_ty = func.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen(mod)); - const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]); + const len = @as(usize, @intCast(result_ty.arrayLen(mod))); + const elements = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[ty_pl.payload..][0..len])); const result: WValue = result_value: { switch (result_ty.zigTypeTag(mod)) { .Array => { const result = try func.allocStack(result_ty); const elem_ty = result_ty.childType(mod); - const elem_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod))); const sentinel = if (result_ty.sentinel(mod)) |sent| blk: { break :blk try func.lowerConstant(sent, elem_ty); } else null; @@ -5087,7 +5087,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { WValue{ .imm64 = current_bit }; const value = try func.resolveInst(elem); - const value_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const value_bit_size = @as(u16, @intCast(field.ty.bitSize(mod))); const int_ty = try mod.intType(.unsigned, value_bit_size); // load our current result on stack so we can perform all transformations @@ -5113,7 +5113,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue; const elem_ty = result_ty.structFieldType(elem_index, mod); - const elem_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod))); const value = try func.resolveInst(elem); try func.store(offset, value, elem_ty, 0); @@ -5174,7 +5174,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new); try func.store(payload_ptr, payload, field.ty, 0); } else { - try func.store(result_ptr, payload, field.ty, @intCast(u32, layout.tag_size)); + try func.store(result_ptr, payload, field.ty, @as(u32, @intCast(layout.tag_size))); } if (layout.tag_size > 0) { @@ -5187,21 +5187,21 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { result_ptr, tag_int, union_obj.tag_ty, - @intCast(u32, layout.payload_size), + @as(u32, @intCast(layout.payload_size)), ); } } break :result result_ptr; } else { const operand = try func.resolveInst(extra.init); - const union_int_type = try mod.intType(.unsigned, @intCast(u16, union_ty.bitSize(mod))); + const union_int_type = try mod.intType(.unsigned, @as(u16, @intCast(union_ty.bitSize(mod)))); if (field.ty.zigTypeTag(mod) == .Float) { - const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod))); + const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field.ty.bitSize(mod)))); const bitcasted = try func.bitcast(field.ty, int_type, operand); const casted = try func.trunc(bitcasted, int_type, union_int_type); break :result try casted.toLocal(func, field.ty); } else if (field.ty.isPtrAtRuntime(mod)) { - const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod))); + const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field.ty.bitSize(mod)))); const casted = try func.intcast(operand, int_type, union_int_type); break :result try casted.toLocal(func, field.ty); } @@ -5334,7 +5334,7 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // when the tag alignment is smaller than the payload, the field will be stored // after the payload. const offset = if (layout.tag_align < layout.payload_align) blk: { - break :blk @intCast(u32, layout.payload_size); + break :blk @as(u32, @intCast(layout.payload_size)); } else @as(u32, 0); try func.store(union_ptr, new_tag, tag_ty, offset); func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); @@ -5353,7 +5353,7 @@ fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // when the tag alignment is smaller than the payload, the field will be stored // after the payload. const offset = if (layout.tag_align < layout.payload_align) blk: { - break :blk @intCast(u32, layout.payload_size); + break :blk @as(u32, @intCast(layout.payload_size)); } else @as(u32, 0); const tag = try func.load(operand, tag_ty, offset); const result = try tag.toLocal(func, tag_ty); @@ -5458,7 +5458,7 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi operand, .{ .imm32 = 0 }, Type.anyerror, - @intCast(u32, errUnionErrorOffset(payload_ty, mod)), + @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod))), ); const result = result: { @@ -5466,7 +5466,7 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi break :result func.reuseOperand(ty_op.operand, operand); } - break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, mod)), .new); + break :result try func.buildPointerOffset(operand, @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod))), .new); }; func.finishAir(inst, result, &.{ty_op.operand}); } @@ -5483,7 +5483,7 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = if (field_offset != 0) result: { const base = try func.buildPointerOffset(field_ptr, 0, .new); try func.addLabel(.local_get, base.local.value); - try func.addImm32(@bitCast(i32, @intCast(u32, field_offset))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(field_offset))))); try func.addTag(.i32_sub); try func.addLabel(.local_set, base.local.value); break :result base; @@ -5514,14 +5514,14 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const slice_len = try func.sliceLen(dst); if (ptr_elem_ty.abiSize(mod) != 1) { try func.emitWValue(slice_len); - try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(mod)) }); + try func.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(mod))) }); try func.addTag(.i32_mul); try func.addLabel(.local_set, slice_len.local.value); } break :blk slice_len; }, .One => @as(WValue, .{ - .imm32 = @intCast(u32, ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(mod)), + .imm32 = @as(u32, @intCast(ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(mod))), }), .C, .Many => unreachable, }; @@ -5611,7 +5611,7 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(operand); switch (func.arch()) { .wasm32 => { - try func.addImm32(@bitCast(i32, @intCast(u32, abi_size))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(abi_size))))); try func.addTag(.i32_mul); try func.addTag(.i32_add); }, @@ -5708,7 +5708,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(mod)); + const offset = @as(u32, @intCast(lhs_ty.abiSize(mod))); try func.store(result_ptr, overflow_local, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); @@ -5830,7 +5830,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(mod)); + const offset = @as(u32, @intCast(lhs_ty.abiSize(mod))); try func.store(result_ptr, overflow_local, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); @@ -6005,7 +6005,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, bin_op_local, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(mod)); + const offset = @as(u32, @intCast(lhs_ty.abiSize(mod))); try func.store(result_ptr, overflow_bit, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); @@ -6149,7 +6149,7 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { switch (wasm_bits) { 32 => { if (wasm_bits != int_info.bits) { - const val: u32 = @as(u32, 1) << @intCast(u5, int_info.bits); + const val: u32 = @as(u32, 1) << @as(u5, @intCast(int_info.bits)); // leave value on the stack _ = try func.binOp(operand, .{ .imm32 = val }, ty, .@"or"); } else try func.emitWValue(operand); @@ -6157,7 +6157,7 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }, 64 => { if (wasm_bits != int_info.bits) { - const val: u64 = @as(u64, 1) << @intCast(u6, int_info.bits); + const val: u64 = @as(u64, 1) << @as(u6, @intCast(int_info.bits)); // leave value on the stack _ = try func.binOp(operand, .{ .imm64 = val }, ty, .@"or"); } else try func.emitWValue(operand); @@ -6172,7 +6172,7 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.i64_ctz); _ = try func.load(operand, Type.u64, 8); if (wasm_bits != int_info.bits) { - try func.addImm64(@as(u64, 1) << @intCast(u6, int_info.bits - 64)); + try func.addImm64(@as(u64, 1) << @as(u6, @intCast(int_info.bits - 64))); try func.addTag(.i64_or); } try func.addTag(.i64_ctz); @@ -6275,7 +6275,7 @@ fn lowerTry( // check if the error tag is set for the error union. try func.emitWValue(err_union); if (pl_has_bits) { - const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod)); + const err_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))); try func.addMemArg(.i32_load16_u, .{ .offset = err_union.offset() + err_offset, .alignment = Type.anyerror.abiAlignment(mod), @@ -6300,7 +6300,7 @@ fn lowerTry( return WValue{ .none = {} }; } - const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, mod)); + const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(pl_ty, mod))); if (isByRef(pl_ty, mod)) { return buildPointerOffset(func, err_union, pl_offset, .new); } @@ -6590,9 +6590,9 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { var bin_result = try (try func.binOp(lhs, rhs, ty, op)).toLocal(func, ty); defer bin_result.free(func); if (wasm_bits != int_info.bits and op == .add) { - const val: u64 = @intCast(u64, (@as(u65, 1) << @intCast(u7, int_info.bits)) - 1); + const val: u64 = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(int_info.bits))) - 1)); const imm_val = switch (wasm_bits) { - 32 => WValue{ .imm32 = @intCast(u32, val) }, + 32 => WValue{ .imm32 = @as(u32, @intCast(val)) }, 64 => WValue{ .imm64 = val }, else => unreachable, }; @@ -6603,7 +6603,7 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { } else { switch (wasm_bits) { 32 => try func.addImm32(if (op == .add) @as(i32, -1) else 0), - 64 => try func.addImm64(if (op == .add) @bitCast(u64, @as(i64, -1)) else 0), + 64 => try func.addImm64(if (op == .add) @as(u64, @bitCast(@as(i64, -1))) else 0), else => unreachable, } try func.emitWValue(bin_result); @@ -6629,16 +6629,16 @@ fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, break :rhs try (try func.signAbsValue(rhs_operand, ty)).toLocal(func, ty); } else rhs_operand; - const max_val: u64 = @intCast(u64, (@as(u65, 1) << @intCast(u7, int_info.bits - 1)) - 1); - const min_val: i64 = (-@intCast(i64, @intCast(u63, max_val))) - 1; + const max_val: u64 = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(int_info.bits - 1))) - 1)); + const min_val: i64 = (-@as(i64, @intCast(@as(u63, @intCast(max_val))))) - 1; const max_wvalue = switch (wasm_bits) { - 32 => WValue{ .imm32 = @truncate(u32, max_val) }, + 32 => WValue{ .imm32 = @as(u32, @truncate(max_val)) }, 64 => WValue{ .imm64 = max_val }, else => unreachable, }; const min_wvalue = switch (wasm_bits) { - 32 => WValue{ .imm32 = @bitCast(u32, @truncate(i32, min_val)) }, - 64 => WValue{ .imm64 = @bitCast(u64, min_val) }, + 32 => WValue{ .imm32 = @as(u32, @bitCast(@as(i32, @truncate(min_val)))) }, + 64 => WValue{ .imm64 = @as(u64, @bitCast(min_val)) }, else => unreachable, }; @@ -6715,11 +6715,11 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }, 64 => blk: { if (!is_signed) { - try func.addImm64(@bitCast(u64, @as(i64, -1))); + try func.addImm64(@as(u64, @bitCast(@as(i64, -1)))); break :blk; } - try func.addImm64(@bitCast(u64, @as(i64, std.math.minInt(i64)))); - try func.addImm64(@bitCast(u64, @as(i64, std.math.maxInt(i64)))); + try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.minInt(i64))))); + try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.maxInt(i64))))); _ = try func.cmp(lhs, .{ .imm64 = 0 }, ty, .lt); try func.addTag(.select); }, @@ -6759,12 +6759,12 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }, 64 => blk: { if (!is_signed) { - try func.addImm64(@bitCast(u64, @as(i64, -1))); + try func.addImm64(@as(u64, @bitCast(@as(i64, -1)))); break :blk; } - try func.addImm64(@bitCast(u64, @as(i64, std.math.minInt(i64)))); - try func.addImm64(@bitCast(u64, @as(i64, std.math.maxInt(i64)))); + try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.minInt(i64))))); + try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.maxInt(i64))))); _ = try func.cmp(shl_res, .{ .imm64 = 0 }, ty, .lt); try func.addTag(.select); }, @@ -6894,7 +6894,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // TODO: Make switch implementation generic so we can use a jump table for this when the tags are not sparse. // generate an if-else chain for each tag value as well as constant. for (enum_ty.enumFields(mod), 0..) |tag_name_ip, field_index_usize| { - const field_index = @intCast(u32, field_index_usize); + const field_index = @as(u32, @intCast(field_index_usize)); const tag_name = mod.intern_pool.stringToSlice(tag_name_ip); // for each tag name, create an unnamed const, // and then get a pointer to its value. @@ -6953,7 +6953,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { try writer.writeByte(std.wasm.opcode(.i32_const)); try relocs.append(.{ .relocation_type = .R_WASM_MEMORY_ADDR_LEB, - .offset = @intCast(u32, body_list.items.len), + .offset = @as(u32, @intCast(body_list.items.len)), .index = tag_sym_index, }); try writer.writeAll(&[_]u8{0} ** 5); // will be relocated @@ -6965,7 +6965,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // store length try writer.writeByte(std.wasm.opcode(.i32_const)); - try leb.writeULEB128(writer, @intCast(u32, tag_name.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(tag_name.len))); try writer.writeByte(std.wasm.opcode(.i32_store)); try leb.writeULEB128(writer, encoded_alignment); try leb.writeULEB128(writer, @as(u32, 4)); @@ -6974,7 +6974,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { try writer.writeByte(std.wasm.opcode(.i64_const)); try relocs.append(.{ .relocation_type = .R_WASM_MEMORY_ADDR_LEB64, - .offset = @intCast(u32, body_list.items.len), + .offset = @as(u32, @intCast(body_list.items.len)), .index = tag_sym_index, }); try writer.writeAll(&[_]u8{0} ** 10); // will be relocated @@ -6986,7 +6986,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // store length try writer.writeByte(std.wasm.opcode(.i64_const)); - try leb.writeULEB128(writer, @intCast(u64, tag_name.len)); + try leb.writeULEB128(writer, @as(u64, @intCast(tag_name.len))); try writer.writeByte(std.wasm.opcode(.i64_store)); try leb.writeULEB128(writer, encoded_alignment); try leb.writeULEB128(writer, @as(u32, 8)); @@ -7026,7 +7026,7 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var lowest: ?u32 = null; var highest: ?u32 = null; for (names) |name| { - const err_int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?); + const err_int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(name).?)); if (lowest) |*l| { if (err_int < l.*) { l.* = err_int; @@ -7054,11 +7054,11 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // lower operand to determine jump table target try func.emitWValue(operand); - try func.addImm32(@intCast(i32, lowest.?)); + try func.addImm32(@as(i32, @intCast(lowest.?))); try func.addTag(.i32_sub); // Account for default branch so always add '1' - const depth = @intCast(u32, highest.? - lowest.? + 1); + const depth = @as(u32, @intCast(highest.? - lowest.? + 1)); const jump_table: Mir.JumpTable = .{ .length = depth }; const table_extra_index = try func.addExtra(jump_table); try func.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } }); @@ -7155,7 +7155,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.i32_and); const and_result = try WValue.toLocal(.stack, func, Type.bool); const result_ptr = try func.allocStack(result_ty); - try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(mod))); + try func.store(result_ptr, and_result, Type.bool, @as(u32, @intCast(ty.abiSize(mod)))); try func.store(result_ptr, ptr_val, ty, 0); break :val result_ptr; } else val: { @@ -7221,13 +7221,13 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr); try func.emitWValue(value); if (op == .Nand) { - const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; + const wasm_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?; const and_res = try func.binOp(value, operand, ty, .@"and"); if (wasm_bits == 32) try func.addImm32(-1) else if (wasm_bits == 64) - try func.addImm64(@bitCast(u64, @as(i64, -1))) + try func.addImm64(@as(u64, @bitCast(@as(i64, -1)))) else return func.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{}); _ = try func.binOp(and_res, .stack, ty, .xor); @@ -7352,14 +7352,14 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.store(.stack, .stack, ty, ptr.offset()); }, .Nand => { - const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; + const wasm_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?; try func.emitWValue(ptr); const and_res = try func.binOp(result, operand, ty, .@"and"); if (wasm_bits == 32) try func.addImm32(-1) else if (wasm_bits == 64) - try func.addImm64(@bitCast(u64, @as(i64, -1))) + try func.addImm64(@as(u64, @bitCast(@as(i64, -1)))) else return func.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{}); _ = try func.binOp(and_res, .stack, ty, .xor); diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig index 3314f4d993..3b1911b895 100644 --- a/src/arch/wasm/Emit.zig +++ b/src/arch/wasm/Emit.zig @@ -45,7 +45,7 @@ pub fn emitMir(emit: *Emit) InnerError!void { try emit.emitLocals(); for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); switch (tag) { // block instructions .block => try emit.emitBlock(tag, inst), @@ -247,7 +247,7 @@ pub fn emitMir(emit: *Emit) InnerError!void { } fn offset(self: Emit) u32 { - return @intCast(u32, self.code.items.len); + return @as(u32, @intCast(self.code.items.len)); } fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { @@ -260,7 +260,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { fn emitLocals(emit: *Emit) !void { const writer = emit.code.writer(); - try leb128.writeULEB128(writer, @intCast(u32, emit.locals.len)); + try leb128.writeULEB128(writer, @as(u32, @intCast(emit.locals.len))); // emit the actual locals amount for (emit.locals) |local| { try leb128.writeULEB128(writer, @as(u32, 1)); @@ -324,13 +324,13 @@ fn emitImm64(emit: *Emit, inst: Mir.Inst.Index) !void { const extra_index = emit.mir.instructions.items(.data)[inst].payload; const value = emit.mir.extraData(Mir.Imm64, extra_index); try emit.code.append(std.wasm.opcode(.i64_const)); - try leb128.writeILEB128(emit.code.writer(), @bitCast(i64, value.data.toU64())); + try leb128.writeILEB128(emit.code.writer(), @as(i64, @bitCast(value.data.toU64()))); } fn emitFloat32(emit: *Emit, inst: Mir.Inst.Index) !void { const value: f32 = emit.mir.instructions.items(.data)[inst].float32; try emit.code.append(std.wasm.opcode(.f32_const)); - try emit.code.writer().writeIntLittle(u32, @bitCast(u32, value)); + try emit.code.writer().writeIntLittle(u32, @as(u32, @bitCast(value))); } fn emitFloat64(emit: *Emit, inst: Mir.Inst.Index) !void { @@ -425,7 +425,7 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void { .offset = mem_offset, .index = mem.pointer, .relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_LEB else .R_WASM_MEMORY_ADDR_LEB64, - .addend = @intCast(i32, mem.offset), + .addend = @as(i32, @intCast(mem.offset)), }); } } @@ -436,7 +436,7 @@ fn emitExtended(emit: *Emit, inst: Mir.Inst.Index) !void { const writer = emit.code.writer(); try emit.code.append(std.wasm.opcode(.misc_prefix)); try leb128.writeULEB128(writer, opcode); - switch (@enumFromInt(std.wasm.MiscOpcode, opcode)) { + switch (@as(std.wasm.MiscOpcode, @enumFromInt(opcode))) { // bulk-memory opcodes .data_drop => { const segment = emit.mir.extra[extra_index + 1]; @@ -475,7 +475,7 @@ fn emitSimd(emit: *Emit, inst: Mir.Inst.Index) !void { const writer = emit.code.writer(); try emit.code.append(std.wasm.opcode(.simd_prefix)); try leb128.writeULEB128(writer, opcode); - switch (@enumFromInt(std.wasm.SimdOpcode, opcode)) { + switch (@as(std.wasm.SimdOpcode, @enumFromInt(opcode))) { .v128_store, .v128_load, .v128_load8_splat, @@ -507,7 +507,7 @@ fn emitSimd(emit: *Emit, inst: Mir.Inst.Index) !void { .f64x2_extract_lane, .f64x2_replace_lane, => { - try writer.writeByte(@intCast(u8, emit.mir.extra[extra_index + 1])); + try writer.writeByte(@as(u8, @intCast(emit.mir.extra[extra_index + 1]))); }, .i8x16_splat, .i16x8_splat, @@ -526,7 +526,7 @@ fn emitAtomic(emit: *Emit, inst: Mir.Inst.Index) !void { const writer = emit.code.writer(); try emit.code.append(std.wasm.opcode(.atomics_prefix)); try leb128.writeULEB128(writer, opcode); - switch (@enumFromInt(std.wasm.AtomicsOpcode, opcode)) { + switch (@as(std.wasm.AtomicsOpcode, @enumFromInt(opcode))) { .i32_atomic_load, .i64_atomic_load, .i32_atomic_load8_u, @@ -623,7 +623,7 @@ fn emitDbgLine(emit: *Emit, inst: Mir.Inst.Index) !void { fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void { if (emit.dbg_output != .dwarf) return; - const delta_line = @intCast(i32, line) - @intCast(i32, emit.prev_di_line); + const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line)); const delta_pc = emit.offset() - emit.prev_di_offset; // TODO: This must emit a relocation to calculate the offset relative // to the code section start. diff --git a/src/arch/wasm/Mir.zig b/src/arch/wasm/Mir.zig index 6e93f0fb88..2d4f624b22 100644 --- a/src/arch/wasm/Mir.zig +++ b/src/arch/wasm/Mir.zig @@ -544,12 +544,12 @@ pub const Inst = struct { /// From a given wasm opcode, returns a MIR tag. pub fn fromOpcode(opcode: std.wasm.Opcode) Tag { - return @enumFromInt(Tag, @intFromEnum(opcode)); // Given `Opcode` is not present as a tag for MIR yet + return @as(Tag, @enumFromInt(@intFromEnum(opcode))); // Given `Opcode` is not present as a tag for MIR yet } /// Returns a wasm opcode from a given MIR tag. pub fn toOpcode(self: Tag) std.wasm.Opcode { - return @enumFromInt(std.wasm.Opcode, @intFromEnum(self)); + return @as(std.wasm.Opcode, @enumFromInt(@intFromEnum(self))); } }; @@ -621,8 +621,8 @@ pub const Imm64 = struct { pub fn fromU64(imm: u64) Imm64 { return .{ - .msb = @truncate(u32, imm >> 32), - .lsb = @truncate(u32, imm), + .msb = @as(u32, @truncate(imm >> 32)), + .lsb = @as(u32, @truncate(imm)), }; } @@ -639,15 +639,15 @@ pub const Float64 = struct { lsb: u32, pub fn fromFloat64(float: f64) Float64 { - const tmp = @bitCast(u64, float); + const tmp = @as(u64, @bitCast(float)); return .{ - .msb = @truncate(u32, tmp >> 32), - .lsb = @truncate(u32, tmp), + .msb = @as(u32, @truncate(tmp >> 32)), + .lsb = @as(u32, @truncate(tmp)), }; } pub fn toF64(self: Float64) f64 { - @bitCast(f64, self.toU64()); + @as(f64, @bitCast(self.toU64())); } pub fn toU64(self: Float64) u64 { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index edf84089b1..4993e3fe45 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -329,7 +329,7 @@ pub const MCValue = union(enum) { .load_frame, .reserved_frame, => unreachable, // not offsettable - .immediate => |imm| .{ .immediate = @bitCast(u64, @bitCast(i64, imm) +% off) }, + .immediate => |imm| .{ .immediate = @as(u64, @bitCast(@as(i64, @bitCast(imm)) +% off)) }, .register => |reg| .{ .register_offset = .{ .reg = reg, .off = off } }, .register_offset => |reg_off| .{ .register_offset = .{ .reg = reg_off.reg, .off = reg_off.off + off }, @@ -360,7 +360,7 @@ pub const MCValue = union(enum) { .lea_frame, .reserved_frame, => unreachable, - .memory => |addr| if (math.cast(i32, @bitCast(i64, addr))) |small_addr| + .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr| Memory.sib(ptr_size, .{ .base = .{ .reg = .ds }, .disp = small_addr }) else Memory.moffs(.ds, addr), @@ -606,7 +606,7 @@ const FrameAlloc = struct { fn init(alloc_abi: struct { size: u64, alignment: u32 }) FrameAlloc { assert(math.isPowerOfTwo(alloc_abi.alignment)); return .{ - .abi_size = @intCast(u31, alloc_abi.size), + .abi_size = @as(u31, @intCast(alloc_abi.size)), .abi_align = math.log2_int(u32, alloc_abi.alignment), .ref_count = 0, }; @@ -694,7 +694,7 @@ pub fn generate( FrameAlloc.init(.{ .size = 0, .alignment = if (mod.align_stack_fns.get(module_fn_index)) |set_align_stack| - @intCast(u32, set_align_stack.alignment.toByteUnitsOptional().?) + @as(u32, @intCast(set_align_stack.alignment.toByteUnitsOptional().?)) else 1, }), @@ -979,7 +979,7 @@ fn fmtTracking(self: *Self) std.fmt.Formatter(formatTracking) { fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { const gpa = self.gpa; try self.mir_instructions.ensureUnusedCapacity(gpa, 1); - const result_index = @intCast(Mir.Inst.Index, self.mir_instructions.len); + const result_index = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)); self.mir_instructions.appendAssumeCapacity(inst); if (inst.tag != .pseudo or switch (inst.ops) { else => true, @@ -1000,11 +1000,11 @@ fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 { fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result = @intCast(u32, self.mir_extra.items.len); + const result = @as(u32, @intCast(self.mir_extra.items.len)); inline for (fields) |field| { self.mir_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), - i32 => @bitCast(u32, @field(extra, field.name)), + i32 => @as(u32, @bitCast(@field(extra, field.name))), else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)), }); } @@ -1214,8 +1214,8 @@ fn asmImmediate(self: *Self, tag: Mir.Inst.FixedTag, imm: Immediate) !void { .data = .{ .i = .{ .fixes = tag[0], .i = switch (imm) { - .signed => |s| @bitCast(u32, s), - .unsigned => |u| @intCast(u32, u), + .signed => |s| @as(u32, @bitCast(s)), + .unsigned => |u| @as(u32, @intCast(u)), }, } }, }); @@ -1246,8 +1246,8 @@ fn asmRegisterImmediate(self: *Self, tag: Mir.Inst.FixedTag, reg: Register, imm: .fixes = tag[0], .r1 = reg, .i = switch (imm) { - .signed => |s| @bitCast(u32, s), - .unsigned => |u| @intCast(u32, u), + .signed => |s| @as(u32, @bitCast(s)), + .unsigned => |u| @as(u32, @intCast(u)), }, } }, .ri64 => .{ .rx = .{ @@ -1316,7 +1316,7 @@ fn asmRegisterRegisterRegisterImmediate( .r1 = reg1, .r2 = reg2, .r3 = reg3, - .i = @intCast(u8, imm.unsigned), + .i = @as(u8, @intCast(imm.unsigned)), } }, }); } @@ -1339,8 +1339,8 @@ fn asmRegisterRegisterImmediate( .r1 = reg1, .r2 = reg2, .i = switch (imm) { - .signed => |s| @bitCast(u32, s), - .unsigned => |u| @intCast(u32, u), + .signed => |s| @as(u32, @bitCast(s)), + .unsigned => |u| @as(u32, @intCast(u)), }, } }, }); @@ -1429,7 +1429,7 @@ fn asmRegisterMemoryImmediate( .data = .{ .rix = .{ .fixes = tag[0], .r1 = reg, - .i = @intCast(u8, imm.unsigned), + .i = @as(u8, @intCast(imm.unsigned)), .payload = switch (m) { .sib => try self.addExtra(Mir.MemorySib.encode(m)), .rip => try self.addExtra(Mir.MemoryRip.encode(m)), @@ -1458,7 +1458,7 @@ fn asmRegisterRegisterMemoryImmediate( .fixes = tag[0], .r1 = reg1, .r2 = reg2, - .i = @intCast(u8, imm.unsigned), + .i = @as(u8, @intCast(imm.unsigned)), .payload = switch (m) { .sib => try self.addExtra(Mir.MemorySib.encode(m)), .rip => try self.addExtra(Mir.MemoryRip.encode(m)), @@ -1490,8 +1490,8 @@ fn asmMemoryRegister(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, reg: Regist fn asmMemoryImmediate(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, imm: Immediate) !void { const payload = try self.addExtra(Mir.Imm32{ .imm = switch (imm) { - .signed => |s| @bitCast(u32, s), - .unsigned => |u| @intCast(u32, u), + .signed => |s| @as(u32, @bitCast(s)), + .unsigned => |u| @as(u32, @intCast(u)), } }); assert(payload + 1 == switch (m) { .sib => try self.addExtra(Mir.MemorySib.encode(m)), @@ -1562,7 +1562,7 @@ fn asmMemoryRegisterImmediate( .data = .{ .rix = .{ .fixes = tag[0], .r1 = reg, - .i = @intCast(u8, imm.unsigned), + .i = @as(u8, @intCast(imm.unsigned)), .payload = switch (m) { .sib => try self.addExtra(Mir.MemorySib.encode(m)), .rip => try self.addExtra(Mir.MemoryRip.encode(m)), @@ -1617,7 +1617,7 @@ fn gen(self: *Self) InnerError!void { // Eliding the reloc will cause a miscompilation in this case. for (self.exitlude_jump_relocs.items) |jmp_reloc| { self.mir_instructions.items(.data)[jmp_reloc].inst.inst = - @intCast(u32, self.mir_instructions.len); + @as(u32, @intCast(self.mir_instructions.len)); } try self.asmPseudo(.pseudo_dbg_epilogue_begin_none); @@ -1739,7 +1739,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { if (builtin.mode == .Debug) { - const mir_inst = @intCast(Mir.Inst.Index, self.mir_instructions.len); + const mir_inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)); try self.mir_to_air_map.put(self.gpa, mir_inst, inst); } @@ -2032,7 +2032,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { var data_off: i32 = 0; for (exitlude_jump_relocs, 0..) |*exitlude_jump_reloc, index_usize| { - const index = @intCast(u32, index_usize); + const index = @as(u32, @intCast(index_usize)); const tag_name = mod.intern_pool.stringToSlice(enum_ty.enumFields(mod)[index_usize]); const tag_val = try mod.enumValueFieldIndex(enum_ty, index); const tag_mcv = try self.genTypedValue(.{ .ty = enum_ty, .val = tag_val }); @@ -2050,7 +2050,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { exitlude_jump_reloc.* = try self.asmJmpReloc(undefined); try self.performReloc(skip_reloc); - data_off += @intCast(i32, tag_name.len + 1); + data_off += @as(i32, @intCast(tag_name.len + 1)); } try self.airTrap(); @@ -2126,7 +2126,7 @@ fn finishAirResult(self: *Self, inst: Air.Inst.Index, result: MCValue) void { fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void { var tomb_bits = self.liveness.getTombBits(inst); for (operands) |op| { - const dies = @truncate(u1, tomb_bits) != 0; + const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; self.processDeath(Air.refToIndexAllowNone(op) orelse continue); @@ -2167,7 +2167,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout { const frame_offset = self.frame_locs.items(.disp); for (stack_frame_order, FrameIndex.named_count..) |*frame_order, frame_index| - frame_order.* = @enumFromInt(FrameIndex, frame_index); + frame_order.* = @as(FrameIndex, @enumFromInt(frame_index)); { const SortContext = struct { frame_align: @TypeOf(frame_align), @@ -2195,7 +2195,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout { } } - var rbp_offset = @intCast(i32, save_reg_list.count() * 8); + var rbp_offset = @as(i32, @intCast(save_reg_list.count() * 8)); self.setFrameLoc(.base_ptr, .rbp, &rbp_offset, false); self.setFrameLoc(.ret_addr, .rbp, &rbp_offset, false); self.setFrameLoc(.args_frame, .rbp, &rbp_offset, false); @@ -2210,22 +2210,22 @@ fn computeFrameLayout(self: *Self) !FrameLayout { rsp_offset = mem.alignForward(i32, rsp_offset, @as(i32, 1) << needed_align); rsp_offset -= stack_frame_align_offset; frame_size[@intFromEnum(FrameIndex.call_frame)] = - @intCast(u31, rsp_offset - frame_offset[@intFromEnum(FrameIndex.stack_frame)]); + @as(u31, @intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.stack_frame)])); return .{ .stack_mask = @as(u32, math.maxInt(u32)) << (if (need_align_stack) needed_align else 0), - .stack_adjust = @intCast(u32, rsp_offset - frame_offset[@intFromEnum(FrameIndex.call_frame)]), + .stack_adjust = @as(u32, @intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.call_frame)])), .save_reg_list = save_reg_list, }; } fn getFrameAddrAlignment(self: *Self, frame_addr: FrameAddr) u32 { const alloc_align = @as(u32, 1) << self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_align; - return @min(alloc_align, @bitCast(u32, frame_addr.off) & (alloc_align - 1)); + return @min(alloc_align, @as(u32, @bitCast(frame_addr.off)) & (alloc_align - 1)); } fn getFrameAddrSize(self: *Self, frame_addr: FrameAddr) u32 { - return self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_size - @intCast(u31, frame_addr.off); + return self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_size - @as(u31, @intCast(frame_addr.off)); } fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { @@ -2245,7 +2245,7 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { _ = self.free_frame_indices.swapRemoveAt(free_i); return frame_index; } - const frame_index = @enumFromInt(FrameIndex, self.frame_allocs.len); + const frame_index = @as(FrameIndex, @enumFromInt(self.frame_allocs.len)); try self.frame_allocs.append(self.gpa, alloc); return frame_index; } @@ -2321,7 +2321,7 @@ const State = struct { fn initRetroactiveState(self: *Self) State { var state: State = undefined; - state.inst_tracking_len = @intCast(u32, self.inst_tracking.count()); + state.inst_tracking_len = @as(u32, @intCast(self.inst_tracking.count())); state.scope_generation = self.scope_generation; return state; } @@ -2393,7 +2393,7 @@ fn restoreState(self: *Self, state: State, deaths: []const Air.Inst.Index, compt } { const reg = RegisterManager.regAtTrackedIndex( - @intCast(RegisterManager.RegisterBitSet.ShiftInt, index), + @as(RegisterManager.RegisterBitSet.ShiftInt, @intCast(index)), ); self.register_manager.freeReg(reg); self.register_manager.getRegAssumeFree(reg, target_maybe_inst); @@ -2628,7 +2628,7 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const dst_ty = self.typeOfIndex(inst); const dst_int_info = dst_ty.intInfo(mod); - const abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty; const extend = switch (src_int_info.signedness) { @@ -2706,9 +2706,9 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dst_ty = self.typeOfIndex(inst); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); const src_ty = self.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); + const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod))); const result = result: { const src_mcv = try self.resolveInst(ty_op.operand); @@ -2753,13 +2753,13 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { }); const elem_ty = src_ty.childType(mod); - const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits)); + const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - dst_info.bits))); const splat_ty = try mod.vectorType(.{ - .len = @intCast(u32, @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)), + .len = @as(u32, @intCast(@divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits))), .child = elem_ty.ip_index, }); - const splat_abi_size = @intCast(u32, splat_ty.abiSize(mod)); + const splat_abi_size = @as(u32, @intCast(splat_ty.abiSize(mod))); const splat_val = try mod.intern(.{ .aggregate = .{ .ty = splat_ty.ip_index, @@ -2834,7 +2834,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, ptr_ty.abiSize(mod)), + @as(i32, @intCast(ptr_ty.abiSize(mod))), len_ty, len, ); @@ -2875,7 +2875,7 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { const src_val = air_data[inst].interned.toValue(); var space: Value.BigIntSpace = undefined; const src_int = src_val.toBigInt(&space, mod); - return @intCast(u16, src_int.bitCountTwosComp()) + + return @as(u16, @intCast(src_int.bitCountTwosComp())) + @intFromBool(src_int.positive and dst_info.signedness == .signed); }, .intcast => { @@ -2964,7 +2964,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { try self.genSetReg(limit_reg, ty, dst_mcv); try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{ - .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1, + .immediate = (@as(u64, 1) << @as(u6, @intCast(reg_bits - 1))) - 1, }); if (reg_extra_bits > 0) { const shifted_rhs_reg = try self.copyToTmpRegister(ty, rhs_mcv); @@ -2983,7 +2983,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .o; } else cc: { try self.genSetReg(limit_reg, ty, .{ - .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - ty.bitSize(mod)), + .immediate = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - ty.bitSize(mod))), }); try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv); @@ -2994,7 +2994,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .c; }; - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); + const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), @@ -3043,7 +3043,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { try self.genSetReg(limit_reg, ty, dst_mcv); try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{ - .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1, + .immediate = (@as(u64, 1) << @as(u6, @intCast(reg_bits - 1))) - 1, }); if (reg_extra_bits > 0) { const shifted_rhs_reg = try self.copyToTmpRegister(ty, rhs_mcv); @@ -3066,7 +3066,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .c; }; - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); + const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), @@ -3114,18 +3114,18 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, rhs_mcv); try self.genShiftBinOpMir(.{ ._, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{ - .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1, + .immediate = (@as(u64, 1) << @as(u6, @intCast(reg_bits - 1))) - 1, }); break :cc .o; } else cc: { try self.genSetReg(limit_reg, ty, .{ - .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - reg_bits), + .immediate = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - reg_bits)), }); break :cc .c; }; const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv); - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); + const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_mcv.register, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), @@ -3172,13 +3172,13 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, mod)), + @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))), Type.u1, .{ .eflags = cc }, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, mod)), + @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))), ty, partial_mcv, ); @@ -3245,13 +3245,13 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, mod)), + @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))), tuple_ty.structFieldType(1, mod), .{ .eflags = cc }, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, mod)), + @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))), tuple_ty.structFieldType(0, mod), partial_mcv, ); @@ -3319,7 +3319,7 @@ fn genSetFrameTruncatedOverflowCompare( ); } - const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, mod)); + const payload_off = @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))); if (hi_limb_off > 0) try self.genSetMem(.{ .frame = frame_index }, payload_off, rest_ty, src_mcv); try self.genSetMem( .{ .frame = frame_index }, @@ -3329,7 +3329,7 @@ fn genSetFrameTruncatedOverflowCompare( ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, mod)), + @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))), tuple_ty.structFieldType(1, mod), if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne }, ); @@ -3386,13 +3386,13 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { if (dst_info.bits >= lhs_active_bits + rhs_active_bits) { try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, mod)), + @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))), tuple_ty.structFieldType(0, mod), partial_mcv, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, mod)), + @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))), tuple_ty.structFieldType(1, mod), .{ .immediate = 0 }, // cc being set is impossible ); @@ -3416,7 +3416,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { /// Quotient is saved in .rax and remainder in .rdx. fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue, rhs: MCValue) !void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); if (abi_size > 8) { return self.fail("TODO implement genIntMulDivOpMir for ABI size larger than 8", .{}); } @@ -3456,7 +3456,7 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue /// Clobbers .rax and .rdx registers. fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCValue { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); const int_info = ty.intInfo(mod); const dividend: Register = switch (lhs) { .register => |reg| reg, @@ -3595,7 +3595,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); const pl_ty = dst_ty.childType(mod); - const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod)); + const pl_abi_size = @as(i32, @intCast(pl_ty.abiSize(mod))); try self.genSetMem(.{ .reg = dst_mcv.getReg().? }, pl_abi_size, Type.bool, .{ .immediate = 1 }); break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv; }; @@ -3628,7 +3628,7 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const result = try self.copyToRegisterWithInstTracking(inst, err_union_ty, operand); if (err_off > 0) { - const shift = @intCast(u6, err_off * 8); + const shift = @as(u6, @intCast(err_off * 8)); try self.genShiftBinOpMir( .{ ._r, .sh }, err_union_ty, @@ -3642,7 +3642,7 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { }, .load_frame => |frame_addr| break :result .{ .load_frame = .{ .index = frame_addr.index, - .off = frame_addr.off + @intCast(i32, err_off), + .off = frame_addr.off + @as(i32, @intCast(err_off)), } }, else => return self.fail("TODO implement unwrap_err_err for {}", .{operand}), } @@ -3674,7 +3674,7 @@ fn genUnwrapErrorUnionPayloadMir( switch (err_union) { .load_frame => |frame_addr| break :result .{ .load_frame = .{ .index = frame_addr.index, - .off = frame_addr.off + @intCast(i32, payload_off), + .off = frame_addr.off + @as(i32, @intCast(payload_off)), } }, .register => |reg| { // TODO reuse operand @@ -3686,7 +3686,7 @@ fn genUnwrapErrorUnionPayloadMir( else .{ .register = try self.copyToTmpRegister(err_union_ty, err_union) }; if (payload_off > 0) { - const shift = @intCast(u6, payload_off * 8); + const shift = @as(u6, @intCast(payload_off * 8)); try self.genShiftBinOpMir( .{ ._r, .sh }, err_union_ty, @@ -3727,8 +3727,8 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(mod); const err_ty = eu_ty.errorUnionSet(mod); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); - const err_abi_size = @intCast(u32, err_ty.abiSize(mod)); + const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod))); + const err_abi_size = @as(u32, @intCast(err_ty.abiSize(mod))); try self.asmRegisterMemory( .{ ._, .mov }, registerAlias(dst_reg, err_abi_size), @@ -3766,8 +3766,8 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(mod); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod))); + const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -3793,8 +3793,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(mod); const err_ty = eu_ty.errorUnionSet(mod); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); - const err_abi_size = @intCast(u32, err_ty.abiSize(mod)); + const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod))); + const err_abi_size = @as(u32, @intCast(err_ty.abiSize(mod))); try self.asmMemoryImmediate( .{ ._, .mov }, Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{ @@ -3814,8 +3814,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod))); + const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -3864,14 +3864,14 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { try self.genCopy(pl_ty, opt_mcv, pl_mcv); if (!same_repr) { - const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod)); + const pl_abi_size = @as(i32, @intCast(pl_ty.abiSize(mod))); switch (opt_mcv) { else => unreachable, .register => |opt_reg| try self.asmRegisterImmediate( .{ ._s, .bt }, opt_reg, - Immediate.u(@intCast(u6, pl_abi_size * 8)), + Immediate.u(@as(u6, @intCast(pl_abi_size * 8))), ), .load_frame => |frame_addr| try self.asmMemoryImmediate( @@ -3903,8 +3903,8 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .{ .immediate = 0 }; const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod)); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); + const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod))); + const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod))); try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand); try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 }); break :result .{ .load_frame = .{ .index = frame_index } }; @@ -3925,8 +3925,8 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result try self.resolveInst(ty_op.operand); const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod)); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); + const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod))); + const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod))); try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef); const operand = try self.resolveInst(ty_op.operand); try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand); @@ -3988,7 +3988,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -4165,7 +4165,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { // additional `mov` is needed at the end to get the actual value const elem_ty = ptr_ty.elemType2(mod); - const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_abi_size = @as(u32, @intCast(elem_ty.abiSize(mod))); const index_ty = self.typeOf(bin_op.rhs); const index_mcv = try self.resolveInst(bin_op.rhs); const index_lock = switch (index_mcv) { @@ -4305,7 +4305,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { .load_frame => |frame_addr| { if (tag_abi_size <= 8) { const off: i32 = if (layout.tag_align < layout.payload_align) - @intCast(i32, layout.payload_size) + @as(i32, @intCast(layout.payload_size)) else 0; break :blk try self.copyToRegisterWithInstTracking(inst, tag_ty, .{ @@ -4317,13 +4317,13 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { }, .register => { const shift: u6 = if (layout.tag_align < layout.payload_align) - @intCast(u6, layout.payload_size * 8) + @as(u6, @intCast(layout.payload_size * 8)) else 0; const result = try self.copyToRegisterWithInstTracking(inst, union_ty, operand); try self.genShiftBinOpMir(.{ ._r, .sh }, Type.usize, result, .{ .immediate = shift }); break :blk MCValue{ - .register = registerAlias(result.register, @intCast(u32, layout.tag_size)), + .register = registerAlias(result.register, @as(u32, @intCast(layout.tag_size))), }; }, else => return self.fail("TODO implement get_union_tag for {}", .{operand}), @@ -4420,7 +4420,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .bsr }, Type.u16, dst_mcv, .{ .register = wide_reg }); } else try self.genBinOpMir(.{ ._, .bsr }, src_ty, dst_mcv, mat_src_mcv); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); + const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(mod))), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(imm_reg, cmov_abi_size), @@ -4430,7 +4430,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .xor }, dst_ty, dst_mcv, .{ .immediate = src_bits - 1 }); } else { const imm_reg = try self.copyToTmpRegister(dst_ty, .{ - .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - self.regBitSize(dst_ty)), + .immediate = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - self.regBitSize(dst_ty))), }); const imm_lock = self.register_manager.lockRegAssumeUnused(imm_reg); defer self.register_manager.unlockReg(imm_lock); @@ -4447,7 +4447,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { .{ .register = wide_reg }, ); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); + const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(mod))), 2); try self.asmCmovccRegisterRegister( registerAlias(imm_reg, cmov_abi_size), registerAlias(dst_reg, cmov_abi_size), @@ -4501,8 +4501,8 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { .{ ._, .@"or" }, wide_ty, tmp_mcv, - .{ .immediate = (@as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - extra_bits)) << - @intCast(u6, src_bits) }, + .{ .immediate = (@as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - extra_bits))) << + @as(u6, @intCast(src_bits)) }, ); break :masked tmp_mcv; } else mat_src_mcv; @@ -4519,7 +4519,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { .{ ._, .@"or" }, Type.u64, dst_mcv, - .{ .immediate = @as(u64, math.maxInt(u64)) << @intCast(u6, src_bits - 64) }, + .{ .immediate = @as(u64, math.maxInt(u64)) << @as(u6, @intCast(src_bits - 64)) }, ); break :masked dst_mcv; } else mat_src_mcv.address().offset(8).deref(); @@ -4547,7 +4547,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .bsf }, Type.u16, dst_mcv, .{ .register = wide_reg }); } else try self.genBinOpMir(.{ ._, .bsf }, src_ty, dst_mcv, mat_src_mcv); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); + const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(mod))), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(width_reg, cmov_abi_size), @@ -4563,7 +4563,7 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { const src_ty = self.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); + const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod))); const src_mcv = try self.resolveInst(ty_op.operand); if (self.hasFeature(.popcnt)) { @@ -4588,7 +4588,7 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; } - const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - src_abi_size * 8); + const mask = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - src_abi_size * 8)); const imm_0_1 = Immediate.u(mask / 0b1_1); const imm_00_11 = Immediate.u(mask / 0b01_01); const imm_0000_1111 = Immediate.u(mask / 0b0001_0001); @@ -4754,7 +4754,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); + const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod))); const src_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, false); @@ -4774,7 +4774,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { else undefined; - const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - src_abi_size * 8); + const mask = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - src_abi_size * 8)); const imm_0000_1111 = Immediate.u(mask / 0b0001_0001); const imm_00_11 = Immediate.u(mask / 0b01_01); const imm_0_1 = Immediate.u(mask / 0b1_1); @@ -5017,7 +5017,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 })) |tag| tag else return self.fail("TODO implement genRound for {}", .{ ty.fmt(self.bin_file.options.module.?), }); - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); const dst_alias = registerAlias(dst_reg, abi_size); switch (mir_tag[0]) { .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( @@ -5057,7 +5057,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const ty = self.typeOf(un_op); - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); const src_mcv = try self.resolveInst(un_op); const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) @@ -5123,7 +5123,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { .{ .v_ps, .cvtph2 }, wide_reg, src_mcv.mem(Memory.PtrSize.fromSize( - @intCast(u32, @divExact(wide_reg.bitSize(), 16)), + @as(u32, @intCast(@divExact(wide_reg.bitSize(), 16))), )), ) else try self.asmRegisterRegister( .{ .v_ps, .cvtph2 }, @@ -5255,10 +5255,10 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn const ptr_info = ptr_ty.ptrInfo(mod); const val_ty = ptr_info.child.toType(); - const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); + const val_abi_size = @as(u32, @intCast(val_ty.abiSize(mod))); const limb_abi_size: u32 = @min(val_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; - const val_byte_off = @intCast(i32, ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size); + const val_byte_off = @as(i32, @intCast(ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size)); const val_bit_off = ptr_info.packed_offset.bit_offset % limb_abi_bits; const val_extra_bits = self.regExtraBits(val_ty); @@ -5404,7 +5404,7 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In const limb_abi_bits = limb_abi_size * 8; const src_bit_size = src_ty.bitSize(mod); - const src_byte_off = @intCast(i32, ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size); + const src_byte_off = @as(i32, @intCast(ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size)); const src_bit_off = ptr_info.packed_offset.bit_offset % limb_abi_bits; const ptr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv); @@ -5421,13 +5421,13 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In .disp = src_byte_off + limb_i * limb_abi_bits, }); - const part_mask = (@as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - part_bit_size)) << - @intCast(u6, part_bit_off); + const part_mask = (@as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - part_bit_size))) << + @as(u6, @intCast(part_bit_off)); const part_mask_not = part_mask ^ - (@as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_abi_bits)); + (@as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - limb_abi_bits))); if (limb_abi_size <= 4) { try self.asmMemoryImmediate(.{ ._, .@"and" }, limb_mem, Immediate.u(part_mask_not)); - } else if (math.cast(i32, @bitCast(i64, part_mask_not))) |small| { + } else if (math.cast(i32, @as(i64, @bitCast(part_mask_not)))) |small| { try self.asmMemoryImmediate(.{ ._, .@"and" }, limb_mem, Immediate.s(small)); } else { const part_mask_reg = try self.register_manager.allocReg(null, gp); @@ -5542,14 +5542,14 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32 const ptr_field_ty = self.typeOfIndex(inst); const ptr_container_ty = self.typeOf(operand); const container_ty = ptr_container_ty.childType(mod); - const field_offset = @intCast(i32, switch (container_ty.containerLayout(mod)) { + const field_offset = @as(i32, @intCast(switch (container_ty.containerLayout(mod)) { .Auto, .Extern => container_ty.structFieldOffset(index, mod), .Packed => if (container_ty.zigTypeTag(mod) == .Struct and ptr_field_ty.ptrInfo(mod).packed_offset.host_size == 0) container_ty.packedStructFieldByteOffset(index, mod) else 0, - }); + })); const src_mcv = try self.resolveInst(operand); const dst_mcv = if (switch (src_mcv) { @@ -5577,7 +5577,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const src_mcv = try self.resolveInst(operand); const field_off = switch (container_ty.containerLayout(mod)) { - .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, mod) * 8), + .Auto, .Extern => @as(u32, @intCast(container_ty.structFieldOffset(index, mod) * 8)), .Packed => if (mod.typeToStruct(container_ty)) |struct_obj| struct_obj.packedFieldBitOffset(mod, index) else @@ -5588,7 +5588,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { .load_frame => |frame_addr| { if (field_off % 8 == 0) { const off_mcv = - src_mcv.address().offset(@intCast(i32, @divExact(field_off, 8))).deref(); + src_mcv.address().offset(@as(i32, @intCast(@divExact(field_off, 8)))).deref(); if (self.reuseOperand(inst, operand, 0, src_mcv)) break :result off_mcv; const dst_mcv = try self.allocRegOrMem(inst, true); @@ -5596,10 +5596,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; } - const field_abi_size = @intCast(u32, field_ty.abiSize(mod)); + const field_abi_size = @as(u32, @intCast(field_ty.abiSize(mod))); const limb_abi_size: u32 = @min(field_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; - const field_byte_off = @intCast(i32, field_off / limb_abi_bits * limb_abi_size); + const field_byte_off = @as(i32, @intCast(field_off / limb_abi_bits * limb_abi_size)); const field_bit_off = field_off % limb_abi_bits; if (field_abi_size > 8) { @@ -5643,7 +5643,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { tmp_reg, Memory.sib(Memory.PtrSize.fromSize(field_abi_size), .{ .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off + field_byte_off + @intCast(i32, limb_abi_size), + .disp = frame_addr.off + field_byte_off + @as(i32, @intCast(limb_abi_size)), }), ); try self.asmRegisterRegisterImmediate( @@ -5724,7 +5724,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const inst_ty = self.typeOfIndex(inst); const parent_ty = inst_ty.childType(mod); - const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, mod)); + const field_offset = @as(i32, @intCast(parent_ty.structFieldOffset(extra.field_index, mod))); const src_mcv = try self.resolveInst(extra.field_ptr); const dst_mcv = if (src_mcv.isRegisterOffset() and @@ -5773,14 +5773,14 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: switch (tag) { .not => { - const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(mod), 8)); + const limb_abi_size = @as(u16, @intCast(@min(src_ty.abiSize(mod), 8))); const int_info = if (src_ty.ip_index == .bool_type) std.builtin.Type.Int{ .signedness = .unsigned, .bits = 1 } else src_ty.intInfo(mod); var byte_off: i32 = 0; while (byte_off * 8 < int_info.bits) : (byte_off += limb_abi_size) { - const limb_bits = @intCast(u16, @min(int_info.bits - byte_off * 8, limb_abi_size * 8)); + const limb_bits = @as(u16, @intCast(@min(int_info.bits - byte_off * 8, limb_abi_size * 8))); const limb_ty = try mod.intType(int_info.signedness, limb_bits); const limb_mcv = switch (byte_off) { 0 => dst_mcv, @@ -5788,7 +5788,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: }; if (int_info.signedness == .unsigned and self.regExtraBits(limb_ty) > 0) { - const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_bits); + const mask = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - limb_bits)); try self.genBinOpMir(.{ ._, .xor }, limb_ty, limb_mcv, .{ .immediate = mask }); } else try self.genUnOpMir(.{ ._, .not }, limb_ty, limb_mcv); } @@ -5801,7 +5801,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MCValue) !void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); if (abi_size > 8) return self.fail("TODO implement {} for {}", .{ mir_tag, dst_ty.fmt(self.bin_file.options.module.?), @@ -5863,7 +5863,7 @@ fn genShiftBinOpMir( break :rhs .{ .register = .rcx }; }; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); if (abi_size <= 8) { switch (lhs_mcv) { .register => |lhs_reg| switch (rhs_mcv) { @@ -5886,7 +5886,7 @@ fn genShiftBinOpMir( const lhs_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (lhs_mcv) { .memory => |addr| .{ .base = .{ .reg = .ds }, - .disp = math.cast(i32, @bitCast(i64, addr)) orelse + .disp = math.cast(i32, @as(i64, @bitCast(addr))) orelse return self.fail("TODO genShiftBinOpMir between {s} and {s}", .{ @tagName(lhs_mcv), @tagName(rhs_mcv), @@ -6151,8 +6151,8 @@ fn genMulDivBinOp( if (dst_ty.zigTypeTag(mod) == .Vector or dst_ty.zigTypeTag(mod) == .Float) { return self.fail("TODO implement genMulDivBinOp for {}", .{dst_ty.fmtDebug()}); } - const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); - const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); + const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); + const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod))); if (switch (tag) { else => unreachable, .mul, .mulwrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2, @@ -6326,7 +6326,7 @@ fn genBinOp( const mod = self.bin_file.options.module.?; const lhs_ty = self.typeOf(lhs_air); const rhs_ty = self.typeOf(rhs_air); - const abi_size = @intCast(u32, lhs_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(lhs_ty.abiSize(mod))); const maybe_mask_reg = switch (air_tag) { else => null, @@ -6481,7 +6481,7 @@ fn genBinOp( .lea_tlv, .lea_frame, => true, - .memory => |addr| math.cast(i32, @bitCast(i64, addr)) == null, + .memory => |addr| math.cast(i32, @as(i64, @bitCast(addr))) == null, else => false, }) .{ .register = try self.copyToTmpRegister(rhs_ty, src_mcv) } else src_mcv; const mat_mcv_lock = switch (mat_src_mcv) { @@ -6506,7 +6506,7 @@ fn genBinOp( }, }; - const cmov_abi_size = @max(@intCast(u32, lhs_ty.abiSize(mod)), 2); + const cmov_abi_size = @max(@as(u32, @intCast(lhs_ty.abiSize(mod))), 2); const tmp_reg = switch (dst_mcv) { .register => |reg| reg, else => try self.copyToTmpRegister(lhs_ty, dst_mcv), @@ -6541,7 +6541,7 @@ fn genBinOp( Memory.sib(Memory.PtrSize.fromSize(cmov_abi_size), switch (mat_src_mcv) { .memory => |addr| .{ .base = .{ .reg = .ds }, - .disp = @intCast(i32, @bitCast(i64, addr)), + .disp = @as(i32, @intCast(@as(i64, @bitCast(addr)))), }, .indirect => |reg_off| .{ .base = .{ .reg = reg_off.reg }, @@ -7429,7 +7429,7 @@ fn genBinOpMir( src_mcv: MCValue, ) !void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); switch (dst_mcv) { .none, .unreach, @@ -7465,28 +7465,28 @@ fn genBinOpMir( 8 => try self.asmRegisterImmediate( mir_tag, dst_alias, - if (math.cast(i8, @bitCast(i64, imm))) |small| + if (math.cast(i8, @as(i64, @bitCast(imm)))) |small| Immediate.s(small) else - Immediate.u(@intCast(u8, imm)), + Immediate.u(@as(u8, @intCast(imm))), ), 16 => try self.asmRegisterImmediate( mir_tag, dst_alias, - if (math.cast(i16, @bitCast(i64, imm))) |small| + if (math.cast(i16, @as(i64, @bitCast(imm)))) |small| Immediate.s(small) else - Immediate.u(@intCast(u16, imm)), + Immediate.u(@as(u16, @intCast(imm))), ), 32 => try self.asmRegisterImmediate( mir_tag, dst_alias, - if (math.cast(i32, @bitCast(i64, imm))) |small| + if (math.cast(i32, @as(i64, @bitCast(imm)))) |small| Immediate.s(small) else - Immediate.u(@intCast(u32, imm)), + Immediate.u(@as(u32, @intCast(imm))), ), - 64 => if (math.cast(i32, @bitCast(i64, imm))) |small| + 64 => if (math.cast(i32, @as(i64, @bitCast(imm)))) |small| try self.asmRegisterImmediate(mir_tag, dst_alias, Immediate.s(small)) else try self.asmRegisterRegister(mir_tag, dst_alias, registerAlias( @@ -7602,8 +7602,8 @@ fn genBinOpMir( => null, .memory, .load_got, .load_direct, .load_tlv => src: { switch (src_mcv) { - .memory => |addr| if (math.cast(i32, @bitCast(i64, addr)) != null and - math.cast(i32, @bitCast(i64, addr) + abi_size - limb_abi_size) != null) + .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr))) != null and + math.cast(i32, @as(i64, @bitCast(addr)) + abi_size - limb_abi_size) != null) break :src null, .load_got, .load_direct, .load_tlv => {}, else => unreachable, @@ -7680,7 +7680,7 @@ fn genBinOpMir( const imm = switch (off) { 0 => src_imm, else => switch (ty_signedness) { - .signed => @bitCast(u64, @bitCast(i64, src_imm) >> 63), + .signed => @as(u64, @bitCast(@as(i64, @bitCast(src_imm)) >> 63)), .unsigned => 0, }, }; @@ -7688,28 +7688,28 @@ fn genBinOpMir( 8 => try self.asmMemoryImmediate( mir_limb_tag, dst_limb_mem, - if (math.cast(i8, @bitCast(i64, imm))) |small| + if (math.cast(i8, @as(i64, @bitCast(imm)))) |small| Immediate.s(small) else - Immediate.u(@intCast(u8, imm)), + Immediate.u(@as(u8, @intCast(imm))), ), 16 => try self.asmMemoryImmediate( mir_limb_tag, dst_limb_mem, - if (math.cast(i16, @bitCast(i64, imm))) |small| + if (math.cast(i16, @as(i64, @bitCast(imm)))) |small| Immediate.s(small) else - Immediate.u(@intCast(u16, imm)), + Immediate.u(@as(u16, @intCast(imm))), ), 32 => try self.asmMemoryImmediate( mir_limb_tag, dst_limb_mem, - if (math.cast(i32, @bitCast(i64, imm))) |small| + if (math.cast(i32, @as(i64, @bitCast(imm)))) |small| Immediate.s(small) else - Immediate.u(@intCast(u32, imm)), + Immediate.u(@as(u32, @intCast(imm))), ), - 64 => if (math.cast(i32, @bitCast(i64, imm))) |small| + 64 => if (math.cast(i32, @as(i64, @bitCast(imm)))) |small| try self.asmMemoryImmediate( mir_limb_tag, dst_limb_mem, @@ -7753,7 +7753,7 @@ fn genBinOpMir( 0 => src_mcv, else => .{ .immediate = 0 }, }, - .memory => |addr| .{ .memory = @bitCast(u64, @bitCast(i64, addr) + off) }, + .memory => |addr| .{ .memory = @as(u64, @bitCast(@as(i64, @bitCast(addr)) + off)) }, .indirect => |reg_off| .{ .indirect = .{ .reg = reg_off.reg, .off = reg_off.off + off, @@ -7780,7 +7780,7 @@ fn genBinOpMir( /// Does not support byte-size operands. fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); switch (dst_mcv) { .none, .unreach, @@ -7847,7 +7847,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (src_mcv) { .memory => |addr| .{ .base = .{ .reg = .ds }, - .disp = math.cast(i32, @bitCast(i64, addr)) orelse + .disp = math.cast(i32, @as(i64, @bitCast(addr))) orelse return self.asmRegisterRegister( .{ .i_, .mul }, dst_alias, @@ -8014,7 +8014,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const pl_op = self.air.instructions.items(.data)[inst].pl_op; const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len])); const ty = self.typeOf(callee); const fn_ty = switch (ty.zigTypeTag(mod)) { @@ -8107,7 +8107,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const got_addr = atom.getOffsetTableAddress(elf_file); try self.asmMemory(.{ ._, .call }, Memory.sib(.qword, .{ .base = .{ .reg = .ds }, - .disp = @intCast(i32, got_addr), + .disp = @as(i32, @intCast(got_addr)), })); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { const atom = try coff_file.getOrCreateAtomForDecl(owner_decl); @@ -8124,7 +8124,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom = p9.getAtom(atom_index); try self.asmMemory(.{ ._, .call }, Memory.sib(.qword, .{ .base = .{ .reg = .ds }, - .disp = @intCast(i32, atom.getOffsetTableAddress(p9)), + .disp = @as(i32, @intCast(atom.getOffsetTableAddress(p9))), })); } else unreachable; } else if (func_value.getExternFunc(mod)) |extern_func| { @@ -8244,7 +8244,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const result = MCValue{ .eflags = switch (ty.zigTypeTag(mod)) { else => result: { - const abi_size = @intCast(u16, ty.abiSize(mod)); + const abi_size = @as(u16, @intCast(ty.abiSize(mod))); const may_flip: enum { may_flip, must_flip, @@ -8441,7 +8441,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { self.eflags_inst = inst; const op_ty = self.typeOf(un_op); - const op_abi_size = @intCast(u32, op_ty.abiSize(mod)); + const op_abi_size = @as(u32, @intCast(op_ty.abiSize(mod))); const op_mcv = try self.resolveInst(un_op); const dst_reg = switch (op_mcv) { .register => |reg| reg, @@ -8650,7 +8650,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty } else - .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; + .{ .off = @as(i32, @intCast(pl_ty.abiSize(mod))), .ty = Type.bool }; switch (opt_mcv) { .none, @@ -8670,18 +8670,18 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC .register => |opt_reg| { if (some_info.off == 0) { - const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); + const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod))); const alias_reg = registerAlias(opt_reg, some_abi_size); assert(some_abi_size * 8 == alias_reg.bitSize()); try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg); return .{ .eflags = .z }; } assert(some_info.ty.ip_index == .bool_type); - const opt_abi_size = @intCast(u32, opt_ty.abiSize(mod)); + const opt_abi_size = @as(u32, @intCast(opt_ty.abiSize(mod))); try self.asmRegisterImmediate( .{ ._, .bt }, registerAlias(opt_reg, opt_abi_size), - Immediate.u(@intCast(u6, some_info.off * 8)), + Immediate.u(@as(u6, @intCast(some_info.off * 8))), ); return .{ .eflags = .nc }; }, @@ -8696,7 +8696,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC defer self.register_manager.unlockReg(addr_reg_lock); try self.genSetReg(addr_reg, Type.usize, opt_mcv.address()); - const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); + const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod))); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ @@ -8709,7 +8709,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC }, .indirect, .load_frame => { - const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); + const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod))); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), switch (opt_mcv) { @@ -8741,7 +8741,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty } else - .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; + .{ .off = @as(i32, @intCast(pl_ty.abiSize(mod))), .ty = Type.bool }; const ptr_reg = switch (ptr_mcv) { .register => |reg| reg, @@ -8750,7 +8750,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const ptr_lock = self.register_manager.lockReg(ptr_reg); defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); + const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod))); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ @@ -8783,7 +8783,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) ! const tmp_reg = try self.copyToTmpRegister(ty, operand); if (err_off > 0) { - const shift = @intCast(u6, err_off * 8); + const shift = @as(u6, @intCast(err_off * 8)); try self.genShiftBinOpMir( .{ ._r, .sh }, ty, @@ -8805,7 +8805,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) ! Type.anyerror, .{ .load_frame = .{ .index = frame_addr.index, - .off = frame_addr.off + @intCast(i32, err_off), + .off = frame_addr.off + @as(i32, @intCast(err_off)), } }, .{ .immediate = 0 }, ), @@ -8943,7 +8943,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; - const jmp_target = @intCast(u32, self.mir_instructions.len); + const jmp_target = @as(u32, @intCast(self.mir_instructions.len)); self.scope_generation += 1; const state = try self.saveState(); @@ -9015,9 +9015,9 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast( + const items = @as( []const Air.Inst.Ref, - self.air.extra[case.end..][0..case.data.items_len], + @ptrCast(self.air.extra[case.end..][0..case.data.items_len]), ); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + items.len + case_body.len; @@ -9066,7 +9066,7 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { } fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void { - const next_inst = @intCast(u32, self.mir_instructions.len); + const next_inst = @as(u32, @intCast(self.mir_instructions.len)); switch (self.mir_instructions.items(.tag)[reloc]) { .j, .jmp => {}, .pseudo => switch (self.mir_instructions.items(.ops)[reloc]) { @@ -9141,11 +9141,11 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); - const clobbers_len = @truncate(u31, extra.data.flags); + const clobbers_len = @as(u31, @truncate(extra.data.flags)); var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len])); extra_i += inputs.len; var result: MCValue = .none; @@ -9281,7 +9281,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { if (std.fmt.parseInt(i32, op_str["$".len..], 0)) |s| { if (mnem_size) |size| { const max = @as(u64, math.maxInt(u64)) >> - @intCast(u6, 64 - (size.bitSize() - 1)); + @as(u6, @intCast(64 - (size.bitSize() - 1))); if ((if (s < 0) ~s else s) > max) return self.fail("Invalid immediate size: '{s}'", .{op_str}); } @@ -9289,7 +9289,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } else |_| if (std.fmt.parseInt(u64, op_str["$".len..], 0)) |u| { if (mnem_size) |size| { const max = @as(u64, math.maxInt(u64)) >> - @intCast(u6, 64 - size.bitSize()); + @as(u6, @intCast(64 - size.bitSize())); if (u > max) return self.fail("Invalid immediate size: '{s}'", .{op_str}); } @@ -9618,7 +9618,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError .indirect => |reg_off| try self.genSetMem(.{ .reg = reg_off.reg }, reg_off.off, ty, src_mcv), .memory, .load_direct, .load_got, .load_tlv => { switch (dst_mcv) { - .memory => |addr| if (math.cast(i32, @bitCast(i64, addr))) |small_addr| + .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr| return self.genSetMem(.{ .reg = .ds }, small_addr, ty, src_mcv), .load_direct, .load_got, .load_tlv => {}, else => unreachable, @@ -9641,7 +9641,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); if (abi_size * 8 > dst_reg.bitSize()) return self.fail("genSetReg called with a value larger than dst_reg", .{}); switch (src_mcv) { @@ -9662,11 +9662,11 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr } else if (abi_size > 4 and math.cast(u32, imm) != null) { // 32-bit moves zero-extend to 64-bit. try self.asmRegisterImmediate(.{ ._, .mov }, dst_reg.to32(), Immediate.u(imm)); - } else if (abi_size <= 4 and @bitCast(i64, imm) < 0) { + } else if (abi_size <= 4 and @as(i64, @bitCast(imm)) < 0) { try self.asmRegisterImmediate( .{ ._, .mov }, registerAlias(dst_reg, abi_size), - Immediate.s(@intCast(i32, @bitCast(i64, imm))), + Immediate.s(@as(i32, @intCast(@as(i64, @bitCast(imm))))), ); } else { try self.asmRegisterImmediate( @@ -9806,7 +9806,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr }, .memory, .load_direct, .load_got, .load_tlv => { switch (src_mcv) { - .memory => |addr| if (math.cast(i32, @bitCast(i64, addr))) |small_addr| { + .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr| { const dst_alias = registerAlias(dst_reg, abi_size); const src_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .{ .reg = .ds }, @@ -9814,7 +9814,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr }); switch (try self.moveStrategy(ty, mem.isAlignedGeneric( u32, - @bitCast(u32, small_addr), + @as(u32, @bitCast(small_addr)), ty.abiAlignment(mod), ))) { .move => |tag| try self.asmRegisterMemory(tag, dst_alias, src_mem), @@ -9928,9 +9928,9 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); const dst_ptr_mcv: MCValue = switch (base) { - .none => .{ .immediate = @bitCast(u64, @as(i64, disp)) }, + .none => .{ .immediate = @as(u64, @bitCast(@as(i64, disp))) }, .reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } }, .frame => |base_frame_index| .{ .lea_frame = .{ .index = base_frame_index, .off = disp } }, }; @@ -9941,9 +9941,9 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .immediate => |imm| switch (abi_size) { 1, 2, 4 => { const immediate = if (ty.isSignedInt(mod)) - Immediate.s(@truncate(i32, @bitCast(i64, imm))) + Immediate.s(@as(i32, @truncate(@as(i64, @bitCast(imm))))) else - Immediate.u(@intCast(u32, imm)); + Immediate.u(@as(u32, @intCast(imm))); try self.asmMemoryImmediate( .{ ._, .mov }, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base, .disp = disp }), @@ -9951,7 +9951,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal ); }, 3, 5...7 => unreachable, - else => if (math.cast(i32, @bitCast(i64, imm))) |small| { + else => if (math.cast(i32, @as(i64, @bitCast(imm)))) |small| { try self.asmMemoryImmediate( .{ ._, .mov }, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base, .disp = disp }), @@ -9963,14 +9963,14 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .{ ._, .mov }, Memory.sib(.dword, .{ .base = base, .disp = disp + offset }), if (ty.isSignedInt(mod)) - Immediate.s(@truncate( + Immediate.s(@as( i32, - @bitCast(i64, imm) >> (math.cast(u6, offset * 8) orelse 63), + @truncate(@as(i64, @bitCast(imm)) >> (math.cast(u6, offset * 8) orelse 63)), )) else - Immediate.u(@truncate( + Immediate.u(@as( u32, - if (math.cast(u6, offset * 8)) |shift| imm >> shift else 0, + @truncate(if (math.cast(u6, offset * 8)) |shift| imm >> shift else 0), )), ); }, @@ -9985,13 +9985,13 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal switch (try self.moveStrategy(ty, switch (base) { .none => mem.isAlignedGeneric( u32, - @bitCast(u32, disp), + @as(u32, @bitCast(disp)), ty.abiAlignment(mod), ), .reg => |reg| switch (reg) { .es, .cs, .ss, .ds => mem.isAlignedGeneric( u32, - @bitCast(u32, disp), + @as(u32, @bitCast(disp)), ty.abiAlignment(mod), ), else => false, @@ -10012,13 +10012,13 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .register_overflow => |ro| { try self.genSetMem( base, - disp + @intCast(i32, ty.structFieldOffset(0, mod)), + disp + @as(i32, @intCast(ty.structFieldOffset(0, mod))), ty.structFieldType(0, mod), .{ .register = ro.reg }, ); try self.genSetMem( base, - disp + @intCast(i32, ty.structFieldOffset(1, mod)), + disp + @as(i32, @intCast(ty.structFieldOffset(1, mod))), ty.structFieldType(1, mod), .{ .eflags = ro.eflags }, ); @@ -10077,7 +10077,7 @@ fn genLazySymbolRef( _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = atom.getOffsetTableAddress(elf_file); const got_mem = - Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }); + Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @as(i32, @intCast(got_addr)) }); switch (tag) { .lea, .mov => try self.asmRegisterMemory(.{ ._, .mov }, reg.to64(), got_mem), .call => try self.asmMemory(.{ ._, .call }, got_mem), @@ -10099,7 +10099,7 @@ fn genLazySymbolRef( _ = atom.getOrCreateOffsetTableEntry(p9_file); const got_addr = atom.getOffsetTableAddress(p9_file); const got_mem = - Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }); + Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @as(i32, @intCast(got_addr)) }); switch (tag) { .lea, .mov => try self.asmRegisterMemory(.{ ._, .mov }, reg.to64(), got_mem), .call => try self.asmMemory(.{ ._, .call }, got_mem), @@ -10195,8 +10195,8 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; if (dst_signedness == src_signedness) break :result dst_mcv; - const abi_size = @intCast(u16, dst_ty.abiSize(mod)); - const bit_size = @intCast(u16, dst_ty.bitSize(mod)); + const abi_size = @as(u16, @intCast(dst_ty.abiSize(mod))); + const bit_size = @as(u16, @intCast(dst_ty.bitSize(mod))); if (abi_size * 8 <= bit_size) break :result dst_mcv; const dst_limbs_len = math.divCeil(i32, bit_size, 64) catch unreachable; @@ -10237,7 +10237,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, ptr_ty.abiSize(mod)), + @as(i32, @intCast(ptr_ty.abiSize(mod))), Type.usize, .{ .immediate = array_len }, ); @@ -10251,7 +10251,7 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.typeOf(ty_op.operand); - const src_bits = @intCast(u32, src_ty.bitSize(mod)); + const src_bits = @as(u32, @intCast(src_ty.bitSize(mod))); const src_signedness = if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; const dst_ty = self.typeOfIndex(inst); @@ -10306,7 +10306,7 @@ fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void { const src_ty = self.typeOf(ty_op.operand); const dst_ty = self.typeOfIndex(inst); - const dst_bits = @intCast(u32, dst_ty.bitSize(mod)); + const dst_bits = @as(u32, @intCast(dst_ty.bitSize(mod))); const dst_signedness = if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned; @@ -10359,7 +10359,7 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.typeOf(extra.ptr); const val_ty = self.typeOf(extra.expected_value); - const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); + const val_abi_size = @as(u32, @intCast(val_ty.abiSize(mod))); try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx }); const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx }); @@ -10461,7 +10461,7 @@ fn atomicOp( }; defer if (val_lock) |lock| self.register_manager.unlockReg(lock); - const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); + const val_abi_size = @as(u32, @intCast(val_ty.abiSize(mod))); const ptr_size = Memory.PtrSize.fromSize(val_abi_size); const ptr_mem = switch (ptr_mcv) { .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(ptr_size), @@ -10539,7 +10539,7 @@ fn atomicOp( defer self.register_manager.unlockReg(tmp_lock); try self.asmRegisterMemory(.{ ._, .mov }, registerAlias(.rax, val_abi_size), ptr_mem); - const loop = @intCast(u32, self.mir_instructions.len); + const loop = @as(u32, @intCast(self.mir_instructions.len)); if (rmw_op != std.builtin.AtomicRmwOp.Xchg) { try self.genSetReg(tmp_reg, val_ty, .{ .register = .rax }); } @@ -10613,7 +10613,7 @@ fn atomicOp( .scale_index = ptr_mem.scaleIndex(), .disp = ptr_mem.sib.disp + 8, })); - const loop = @intCast(u32, self.mir_instructions.len); + const loop = @as(u32, @intCast(self.mir_instructions.len)); const val_mem_mcv: MCValue = switch (val_mcv) { .memory, .indirect, .load_frame => val_mcv, else => .{ .indirect = .{ @@ -10769,7 +10769,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { }; defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock); - const elem_abi_size = @intCast(u31, elem_ty.abiSize(mod)); + const elem_abi_size = @as(u31, @intCast(elem_ty.abiSize(mod))); if (elem_abi_size == 1) { const ptr: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { @@ -11249,9 +11249,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const result_ty = self.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen(mod)); + const len = @as(usize, @intCast(result_ty.arrayLen(mod))); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); + const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); const result: MCValue = result: { switch (result_ty.zigTypeTag(mod)) { .Struct => { @@ -11268,17 +11268,17 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i, mod); - const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod)); + const elem_bit_size = @as(u32, @intCast(elem_ty.bitSize(mod))); if (elem_bit_size > 64) { return self.fail( "TODO airAggregateInit implement packed structs with large fields", .{}, ); } - const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_abi_size = @as(u32, @intCast(elem_ty.abiSize(mod))); const elem_abi_bits = elem_abi_size * 8; const elem_off = struct_obj.packedFieldBitOffset(mod, elem_i); - const elem_byte_off = @intCast(i32, elem_off / elem_abi_bits * elem_abi_size); + const elem_byte_off = @as(i32, @intCast(elem_off / elem_abi_bits * elem_abi_size)); const elem_bit_off = elem_off % elem_abi_bits; const elem_mcv = try self.resolveInst(elem); const mat_elem_mcv = switch (elem_mcv) { @@ -11330,7 +11330,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { elem_ty, .{ .load_frame = .{ .index = frame_index, - .off = elem_byte_off + @intCast(i32, elem_abi_size), + .off = elem_byte_off + @as(i32, @intCast(elem_abi_size)), } }, .{ .register = reg }, ); @@ -11340,7 +11340,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i, mod); - const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod)); + const elem_off = @as(i32, @intCast(result_ty.structFieldOffset(elem_i, mod))); const elem_mcv = try self.resolveInst(elem); const mat_elem_mcv = switch (elem_mcv) { .load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index }, @@ -11354,7 +11354,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const frame_index = try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); const elem_ty = result_ty.childType(mod); - const elem_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod))); for (elements, 0..) |elem, elem_i| { const elem_mcv = try self.resolveInst(elem); @@ -11362,12 +11362,12 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { .load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index }, else => elem_mcv, }; - const elem_off = @intCast(i32, elem_size * elem_i); + const elem_off = @as(i32, @intCast(elem_size * elem_i)); try self.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, mat_elem_mcv); } if (result_ty.sentinel(mod)) |sentinel| try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, elem_size * elements.len), + @as(i32, @intCast(elem_size * elements.len)), elem_ty, try self.genTypedValue(.{ .ty = elem_ty, .val = sentinel }), ); @@ -11416,7 +11416,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const tag_int_val = try tag_val.intFromEnum(tag_ty, mod); const tag_int = tag_int_val.toUnsignedInt(mod); const tag_off = if (layout.tag_align < layout.payload_align) - @intCast(i32, layout.payload_size) + @as(i32, @intCast(layout.payload_size)) else 0; try self.genCopy(tag_ty, dst_mcv.address().offset(tag_off).deref(), .{ .immediate = tag_int }); @@ -11424,7 +11424,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const pl_off = if (layout.tag_align < layout.payload_align) 0 else - @intCast(i32, layout.tag_size); + @as(i32, @intCast(layout.tag_size)); try self.genCopy(src_ty, dst_mcv.address().offset(pl_off).deref(), src_mcv); break :result dst_mcv; @@ -11454,7 +11454,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { var order = [1]u2{0} ** 3; var unused = std.StaticBitSet(3).initFull(); for (ops, &mcvs, &locks, 0..) |op, *mcv, *lock, op_i| { - const op_index = @intCast(u2, op_i); + const op_index = @as(u2, @intCast(op_i)); mcv.* = try self.resolveInst(op); if (unused.isSet(0) and mcv.isRegister() and self.reuseOperand(inst, op, op_index, mcv.*)) { order[op_index] = 1; @@ -11470,7 +11470,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } for (&order, &mcvs, &locks) |*mop_index, *mcv, *lock| { if (mop_index.* != 0) continue; - mop_index.* = 1 + @intCast(u2, unused.toggleFirstSet().?); + mop_index.* = 1 + @as(u2, @intCast(unused.toggleFirstSet().?)); if (mop_index.* > 1 and mcv.isRegister()) continue; const reg = try self.copyToTmpRegister(ty, mcv.*); mcv.* = .{ .register = reg }; @@ -11570,7 +11570,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { var mops: [3]MCValue = undefined; for (order, mcvs) |mop_index, mcv| mops[mop_index - 1] = mcv; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); const mop1_reg = registerAlias(mops[0].getReg().?, abi_size); const mop2_reg = registerAlias(mops[1].getReg().?, abi_size); if (mops[2].isRegister()) try self.asmRegisterRegisterRegister( @@ -11723,7 +11723,7 @@ fn resolveCallingConventionValues( switch (self.target.os.tag) { .windows => { // Align the stack to 16bytes before allocating shadow stack space (if any). - result.stack_byte_count += @intCast(u31, 4 * Type.usize.abiSize(mod)); + result.stack_byte_count += @as(u31, @intCast(4 * Type.usize.abiSize(mod))); }, else => {}, } @@ -11746,7 +11746,7 @@ fn resolveCallingConventionValues( result.return_value = switch (classes[0]) { .integer => InstTracking.init(.{ .register = registerAlias( ret_reg, - @intCast(u32, ret_ty.abiSize(mod)), + @as(u32, @intCast(ret_ty.abiSize(mod))), ) }), .float, .sse => InstTracking.init(.{ .register = .xmm0 }), .memory => ret: { @@ -11782,17 +11782,17 @@ fn resolveCallingConventionValues( }, .float, .sse => switch (self.target.os.tag) { .windows => if (param_reg_i < 4) { - arg.* = .{ .register = @enumFromInt( + arg.* = .{ .register = @as( Register, - @intFromEnum(Register.xmm0) + param_reg_i, + @enumFromInt(@intFromEnum(Register.xmm0) + param_reg_i), ) }; param_reg_i += 1; continue; }, else => if (param_sse_reg_i < 8) { - arg.* = .{ .register = @enumFromInt( + arg.* = .{ .register = @as( Register, - @intFromEnum(Register.xmm0) + param_sse_reg_i, + @enumFromInt(@intFromEnum(Register.xmm0) + param_sse_reg_i), ) }; param_sse_reg_i += 1; continue; @@ -11804,8 +11804,8 @@ fn resolveCallingConventionValues( }), } - const param_size = @intCast(u31, ty.abiSize(mod)); - const param_align = @intCast(u31, ty.abiAlignment(mod)); + const param_size = @as(u31, @intCast(ty.abiSize(mod))); + const param_align = @as(u31, @intCast(ty.abiAlignment(mod))); result.stack_byte_count = mem.alignForward(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -11825,7 +11825,7 @@ fn resolveCallingConventionValues( result.return_value = InstTracking.init(.none); } else { const ret_reg = abi.getCAbiIntReturnRegs(self.target.*)[0]; - const ret_ty_size = @intCast(u31, ret_ty.abiSize(mod)); + const ret_ty_size = @as(u31, @intCast(ret_ty.abiSize(mod))); if (ret_ty_size <= 8 and !ret_ty.isRuntimeFloat()) { const aliased_reg = registerAlias(ret_reg, ret_ty_size); result.return_value = .{ .short = .{ .register = aliased_reg }, .long = .none }; @@ -11844,8 +11844,8 @@ fn resolveCallingConventionValues( arg.* = .none; continue; } - const param_size = @intCast(u31, ty.abiSize(mod)); - const param_align = @intCast(u31, ty.abiAlignment(mod)); + const param_size = @as(u31, @intCast(ty.abiSize(mod))); + const param_align = @as(u31, @intCast(ty.abiAlignment(mod))); result.stack_byte_count = mem.alignForward(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -11932,12 +11932,12 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { const mod = self.bin_file.options.module.?; const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @intCast(u16, ty.bitSize(mod)), + .bits = @as(u16, @intCast(ty.bitSize(mod))), }; const max_reg_bit_width = Register.rax.bitSize(); switch (int_info.signedness) { .signed => { - const shift = @intCast(u6, max_reg_bit_width - int_info.bits); + const shift = @as(u6, @intCast(max_reg_bit_width - int_info.bits)); try self.genShiftBinOpMir( .{ ._l, .sa }, Type.isize, @@ -11952,7 +11952,7 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { ); }, .unsigned => { - const shift = @intCast(u6, max_reg_bit_width - int_info.bits); + const shift = @as(u6, @intCast(max_reg_bit_width - int_info.bits)); const mask = (~@as(u64, 0)) >> shift; if (int_info.bits <= 32) { try self.genBinOpMir( diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig index 78ff918715..9c9aadbd13 100644 --- a/src/arch/x86_64/Emit.zig +++ b/src/arch/x86_64/Emit.zig @@ -19,18 +19,18 @@ pub const Error = Lower.Error || error{ pub fn emitMir(emit: *Emit) Error!void { for (0..emit.lower.mir.instructions.len) |mir_i| { - const mir_index = @intCast(Mir.Inst.Index, mir_i); + const mir_index = @as(Mir.Inst.Index, @intCast(mir_i)); try emit.code_offset_mapping.putNoClobber( emit.lower.allocator, mir_index, - @intCast(u32, emit.code.items.len), + @as(u32, @intCast(emit.code.items.len)), ); const lowered = try emit.lower.lowerMir(mir_index); var lowered_relocs = lowered.relocs; for (lowered.insts, 0..) |lowered_inst, lowered_index| { - const start_offset = @intCast(u32, emit.code.items.len); + const start_offset = @as(u32, @intCast(emit.code.items.len)); try lowered_inst.encode(emit.code.writer(), .{}); - const end_offset = @intCast(u32, emit.code.items.len); + const end_offset = @as(u32, @intCast(emit.code.items.len)); while (lowered_relocs.len > 0 and lowered_relocs[0].lowered_inst_index == lowered_index) : ({ lowered_relocs = lowered_relocs[1..]; @@ -39,7 +39,7 @@ pub fn emitMir(emit: *Emit) Error!void { .source = start_offset, .target = target, .offset = end_offset - 4, - .length = @intCast(u5, end_offset - start_offset), + .length = @as(u5, @intCast(end_offset - start_offset)), }), .linker_extern_fn => |symbol| if (emit.bin_file.cast(link.File.MachO)) |macho_file| { // Add relocation to the decl. @@ -89,7 +89,7 @@ pub fn emitMir(emit: *Emit) Error!void { else => unreachable, }, .target = .{ .sym_index = symbol.sym_index, .file = null }, - .offset = @intCast(u32, end_offset - 4), + .offset = @as(u32, @intCast(end_offset - 4)), .addend = 0, .pcrel = true, .length = 2, @@ -113,7 +113,7 @@ pub fn emitMir(emit: *Emit) Error!void { .linker_import => coff_file.getGlobalByIndex(symbol.sym_index), else => unreachable, }, - .offset = @intCast(u32, end_offset - 4), + .offset = @as(u32, @intCast(end_offset - 4)), .addend = 0, .pcrel = true, .length = 2, @@ -122,7 +122,7 @@ pub fn emitMir(emit: *Emit) Error!void { const atom_index = symbol.atom_index; try p9_file.addReloc(atom_index, .{ // TODO we may need to add a .type field to the relocs if they are .linker_got instead of just .linker_direct .target = symbol.sym_index, // we set sym_index to just be the atom index - .offset = @intCast(u32, end_offset - 4), + .offset = @as(u32, @intCast(end_offset - 4)), .addend = 0, .pcrel = true, }); @@ -209,13 +209,13 @@ fn fixupRelocs(emit: *Emit) Error!void { for (emit.relocs.items) |reloc| { const target = emit.code_offset_mapping.get(reloc.target) orelse return emit.fail("JMP/CALL relocation target not found!", .{}); - const disp = @intCast(i32, @intCast(i64, target) - @intCast(i64, reloc.source + reloc.length)); + const disp = @as(i32, @intCast(@as(i64, @intCast(target)) - @as(i64, @intCast(reloc.source + reloc.length)))); mem.writeIntLittle(i32, emit.code.items[reloc.offset..][0..4], disp); } } fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void { - const delta_line = @intCast(i32, line) - @intCast(i32, emit.prev_di_line); + const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line)); const delta_pc: usize = emit.code.items.len - emit.prev_di_pc; log.debug(" (advance pc={d} and line={d})", .{ delta_line, delta_pc }); switch (emit.debug_output) { @@ -233,22 +233,22 @@ fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void { // increasing the line number try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line); // increasing the pc - const d_pc_p9 = @intCast(i64, delta_pc) - quant; + const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant; if (d_pc_p9 > 0) { // minus one because if its the last one, we want to leave space to change the line which is one quanta var diff = @divExact(d_pc_p9, quant) - quant; while (diff > 0) { if (diff < 64) { - try dbg_out.dbg_line.append(@intCast(u8, diff + 128)); + try dbg_out.dbg_line.append(@as(u8, @intCast(diff + 128))); diff = 0; } else { - try dbg_out.dbg_line.append(@intCast(u8, 64 + 128)); + try dbg_out.dbg_line.append(@as(u8, @intCast(64 + 128))); diff -= 64; } } if (dbg_out.pcop_change_index.*) |pci| dbg_out.dbg_line.items[pci] += 1; - dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1); + dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1)); } else if (d_pc_p9 == 0) { // we don't need to do anything, because adding the quant does it for us } else unreachable; diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index a3963ca149..ca260f5ec4 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -85,7 +85,7 @@ pub fn findByOpcode(opc: []const u8, prefixes: struct { rex: Rex, }, modrm_ext: ?u3) ?Encoding { for (mnemonic_to_encodings_map, 0..) |encs, mnemonic_int| for (encs) |data| { - const enc = Encoding{ .mnemonic = @enumFromInt(Mnemonic, mnemonic_int), .data = data }; + const enc = Encoding{ .mnemonic = @as(Mnemonic, @enumFromInt(mnemonic_int)), .data = data }; if (modrm_ext) |ext| if (ext != data.modrm_ext) continue; if (!std.mem.eql(u8, opc, enc.opcode())) continue; if (prefixes.rex.w) { @@ -763,7 +763,7 @@ fn estimateInstructionLength(prefix: Prefix, encoding: Encoding, ops: []const Op var cwriter = std.io.countingWriter(std.io.null_writer); inst.encode(cwriter.writer(), .{ .allow_frame_loc = true }) catch unreachable; // Not allowed to fail here unless OOM. - return @intCast(usize, cwriter.bytes_written); + return @as(usize, @intCast(cwriter.bytes_written)); } const mnemonic_to_encodings_map = init: { diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index d77ddf3050..53aa182957 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -188,7 +188,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .pseudo_probe_align_ri_s => { try lower.emit(.none, .@"test", &.{ .{ .reg = inst.data.ri.r1 }, - .{ .imm = Immediate.s(@bitCast(i32, inst.data.ri.i)) }, + .{ .imm = Immediate.s(@as(i32, @bitCast(inst.data.ri.i))) }, }); try lower.emit(.none, .jz, &.{ .{ .imm = lower.reloc(.{ .inst = index + 1 }) }, @@ -213,7 +213,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }, .pseudo_probe_adjust_unrolled_ri_s => { var offset = page_size; - while (offset < @bitCast(i32, inst.data.ri.i)) : (offset += page_size) { + while (offset < @as(i32, @bitCast(inst.data.ri.i))) : (offset += page_size) { try lower.emit(.none, .@"test", &.{ .{ .mem = Memory.sib(.dword, .{ .base = .{ .reg = inst.data.ri.r1 }, @@ -224,14 +224,14 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { } try lower.emit(.none, .sub, &.{ .{ .reg = inst.data.ri.r1 }, - .{ .imm = Immediate.s(@bitCast(i32, inst.data.ri.i)) }, + .{ .imm = Immediate.s(@as(i32, @bitCast(inst.data.ri.i))) }, }); assert(lower.result_insts_len <= pseudo_probe_adjust_unrolled_max_insts); }, .pseudo_probe_adjust_setup_rri_s => { try lower.emit(.none, .mov, &.{ .{ .reg = inst.data.rri.r2.to32() }, - .{ .imm = Immediate.s(@bitCast(i32, inst.data.rri.i)) }, + .{ .imm = Immediate.s(@as(i32, @bitCast(inst.data.rri.i))) }, }); try lower.emit(.none, .sub, &.{ .{ .reg = inst.data.rri.r1 }, @@ -289,7 +289,7 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate { .i_s, .mi_sib_s, .mi_rip_s, - => Immediate.s(@bitCast(i32, i)), + => Immediate.s(@as(i32, @bitCast(i))), .rrri, .rri_u, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index 36eacf4db9..7753104b96 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -989,7 +989,7 @@ pub const RegisterList = struct { fn getIndexForReg(registers: []const Register, reg: Register) BitSet.MaskInt { for (registers, 0..) |cpreg, i| { - if (reg.id() == cpreg.id()) return @intCast(u32, i); + if (reg.id() == cpreg.id()) return @as(u32, @intCast(i)); } unreachable; // register not in input register list! } @@ -1009,7 +1009,7 @@ pub const RegisterList = struct { } pub fn count(self: Self) u32 { - return @intCast(u32, self.bitset.count()); + return @as(u32, @intCast(self.bitset.count())); } }; @@ -1023,15 +1023,15 @@ pub const Imm64 = struct { pub fn encode(v: u64) Imm64 { return .{ - .msb = @truncate(u32, v >> 32), - .lsb = @truncate(u32, v), + .msb = @as(u32, @truncate(v >> 32)), + .lsb = @as(u32, @truncate(v)), }; } pub fn decode(imm: Imm64) u64 { var res: u64 = 0; - res |= (@intCast(u64, imm.msb) << 32); - res |= @intCast(u64, imm.lsb); + res |= (@as(u64, @intCast(imm.msb)) << 32); + res |= @as(u64, @intCast(imm.lsb)); return res; } }; @@ -1070,18 +1070,18 @@ pub const MemorySib = struct { } pub fn decode(msib: MemorySib) Memory { - const scale = @truncate(u4, msib.scale_index); + const scale = @as(u4, @truncate(msib.scale_index)); assert(scale == 0 or std.math.isPowerOfTwo(scale)); return .{ .sib = .{ - .ptr_size = @enumFromInt(Memory.PtrSize, msib.ptr_size), - .base = switch (@enumFromInt(Memory.Base.Tag, msib.base_tag)) { + .ptr_size = @as(Memory.PtrSize, @enumFromInt(msib.ptr_size)), + .base = switch (@as(Memory.Base.Tag, @enumFromInt(msib.base_tag))) { .none => .none, - .reg => .{ .reg = @enumFromInt(Register, msib.base) }, - .frame => .{ .frame = @enumFromInt(bits.FrameIndex, msib.base) }, + .reg => .{ .reg = @as(Register, @enumFromInt(msib.base)) }, + .frame => .{ .frame = @as(bits.FrameIndex, @enumFromInt(msib.base)) }, }, .scale_index = .{ .scale = scale, - .index = if (scale > 0) @enumFromInt(Register, msib.scale_index >> 4) else undefined, + .index = if (scale > 0) @as(Register, @enumFromInt(msib.scale_index >> 4)) else undefined, }, .disp = msib.disp, } }; @@ -1103,7 +1103,7 @@ pub const MemoryRip = struct { pub fn decode(mrip: MemoryRip) Memory { return .{ .rip = .{ - .ptr_size = @enumFromInt(Memory.PtrSize, mrip.ptr_size), + .ptr_size = @as(Memory.PtrSize, @enumFromInt(mrip.ptr_size)), .disp = mrip.disp, } }; } @@ -1120,14 +1120,14 @@ pub const MemoryMoffs = struct { pub fn encode(seg: Register, offset: u64) MemoryMoffs { return .{ .seg = @intFromEnum(seg), - .msb = @truncate(u32, offset >> 32), - .lsb = @truncate(u32, offset >> 0), + .msb = @as(u32, @truncate(offset >> 32)), + .lsb = @as(u32, @truncate(offset >> 0)), }; } pub fn decode(moffs: MemoryMoffs) Memory { return .{ .moffs = .{ - .seg = @enumFromInt(Register, moffs.seg), + .seg = @as(Register, @enumFromInt(moffs.seg)), .offset = @as(u64, moffs.msb) << 32 | @as(u64, moffs.lsb) << 0, } }; } @@ -1147,7 +1147,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: u32) struct { data: T, end: inline for (fields) |field| { @field(result, field.name) = switch (field.type) { u32 => mir.extra[i], - i32 => @bitCast(i32, mir.extra[i]), + i32 => @as(i32, @bitCast(mir.extra[i])), else => @compileError("bad field type"), }; i += 1; diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index b4e175f33d..f1ce3ebeb8 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -278,7 +278,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class { // "Otherwise class SSE is used." result[result_i] = .sse; } - byte_i += @intCast(usize, field_size); + byte_i += @as(usize, @intCast(field_size)); if (byte_i == 8) { byte_i = 0; result_i += 1; @@ -293,7 +293,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class { result_i += field_class.len; // If there are any bytes leftover, we have to try to combine // the next field with them. - byte_i = @intCast(usize, field_size % 8); + byte_i = @as(usize, @intCast(field_size % 8)); if (byte_i != 0) result_i -= 1; } } diff --git a/src/arch/x86_64/bits.zig b/src/arch/x86_64/bits.zig index e232a2db05..04b21b9e21 100644 --- a/src/arch/x86_64/bits.zig +++ b/src/arch/x86_64/bits.zig @@ -232,7 +232,7 @@ pub const Register = enum(u7) { else => unreachable, // zig fmt: on }; - return @intCast(u6, @intFromEnum(reg) - base); + return @as(u6, @intCast(@intFromEnum(reg) - base)); } pub fn bitSize(reg: Register) u64 { @@ -291,11 +291,11 @@ pub const Register = enum(u7) { else => unreachable, // zig fmt: on }; - return @truncate(u4, @intFromEnum(reg) - base); + return @as(u4, @truncate(@intFromEnum(reg) - base)); } pub fn lowEnc(reg: Register) u3 { - return @truncate(u3, reg.enc()); + return @as(u3, @truncate(reg.enc())); } pub fn toBitSize(reg: Register, bit_size: u64) Register { @@ -325,19 +325,19 @@ pub const Register = enum(u7) { } pub fn to64(reg: Register) Register { - return @enumFromInt(Register, @intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.rax)); + return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.rax))); } pub fn to32(reg: Register) Register { - return @enumFromInt(Register, @intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.eax)); + return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.eax))); } pub fn to16(reg: Register) Register { - return @enumFromInt(Register, @intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.ax)); + return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.ax))); } pub fn to8(reg: Register) Register { - return @enumFromInt(Register, @intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.al)); + return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.al))); } fn sseBase(reg: Register) u7 { @@ -350,11 +350,11 @@ pub const Register = enum(u7) { } pub fn to256(reg: Register) Register { - return @enumFromInt(Register, @intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.ymm0)); + return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.ymm0))); } pub fn to128(reg: Register) Register { - return @enumFromInt(Register, @intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.xmm0)); + return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.xmm0))); } /// DWARF register encoding @@ -363,7 +363,7 @@ pub const Register = enum(u7) { .general_purpose => if (reg.isExtended()) reg.enc() else - @truncate(u3, @as(u24, 0o54673120) >> @as(u5, reg.enc()) * 3), + @as(u3, @truncate(@as(u24, 0o54673120) >> @as(u5, reg.enc()) * 3)), .sse => 17 + @as(u6, reg.enc()), .x87 => 33 + @as(u6, reg.enc()), .mmx => 41 + @as(u6, reg.enc()), @@ -610,15 +610,15 @@ pub const Immediate = union(enum) { pub fn asUnsigned(imm: Immediate, bit_size: u64) u64 { return switch (imm) { .signed => |x| switch (bit_size) { - 1, 8 => @bitCast(u8, @intCast(i8, x)), - 16 => @bitCast(u16, @intCast(i16, x)), - 32, 64 => @bitCast(u32, x), + 1, 8 => @as(u8, @bitCast(@as(i8, @intCast(x)))), + 16 => @as(u16, @bitCast(@as(i16, @intCast(x)))), + 32, 64 => @as(u32, @bitCast(x)), else => unreachable, }, .unsigned => |x| switch (bit_size) { - 1, 8 => @intCast(u8, x), - 16 => @intCast(u16, x), - 32 => @intCast(u32, x), + 1, 8 => @as(u8, @intCast(x)), + 16 => @as(u16, @intCast(x)), + 32 => @as(u32, @intCast(x)), 64 => x, else => unreachable, }, diff --git a/src/arch/x86_64/encoder.zig b/src/arch/x86_64/encoder.zig index d953a9410d..bc4c59dc86 100644 --- a/src/arch/x86_64/encoder.zig +++ b/src/arch/x86_64/encoder.zig @@ -471,7 +471,7 @@ pub const Instruction = struct { } else { try encoder.sib_baseDisp8(dst); } - try encoder.disp8(@truncate(i8, sib.disp)); + try encoder.disp8(@as(i8, @truncate(sib.disp))); } else { try encoder.modRm_SIBDisp32(src); if (mem.scaleIndex()) |si| { @@ -487,7 +487,7 @@ pub const Instruction = struct { try encoder.modRm_indirectDisp0(src, dst); } else if (math.cast(i8, sib.disp)) |_| { try encoder.modRm_indirectDisp8(src, dst); - try encoder.disp8(@truncate(i8, sib.disp)); + try encoder.disp8(@as(i8, @truncate(sib.disp))); } else { try encoder.modRm_indirectDisp32(src, dst); try encoder.disp32(sib.disp); @@ -509,9 +509,9 @@ pub const Instruction = struct { fn encodeImm(imm: Immediate, kind: Encoding.Op, encoder: anytype) !void { const raw = imm.asUnsigned(kind.immBitSize()); switch (kind.immBitSize()) { - 8 => try encoder.imm8(@intCast(u8, raw)), - 16 => try encoder.imm16(@intCast(u16, raw)), - 32 => try encoder.imm32(@intCast(u32, raw)), + 8 => try encoder.imm8(@as(u8, @intCast(raw))), + 16 => try encoder.imm16(@as(u16, @intCast(raw))), + 32 => try encoder.imm32(@as(u32, @intCast(raw))), 64 => try encoder.imm64(raw), else => unreachable, } @@ -581,7 +581,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type { /// Encodes legacy prefixes pub fn legacyPrefixes(self: Self, prefixes: LegacyPrefixes) !void { - if (@bitCast(u16, prefixes) != 0) { + if (@as(u16, @bitCast(prefixes)) != 0) { // Hopefully this path isn't taken very often, so we'll do it the slow way for now // LOCK @@ -891,7 +891,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type { /// /// It is sign-extended to 64 bits by the cpu. pub fn disp8(self: Self, disp: i8) !void { - try self.writer.writeByte(@bitCast(u8, disp)); + try self.writer.writeByte(@as(u8, @bitCast(disp))); } /// Encode an 32 bit displacement diff --git a/src/clang.zig b/src/clang.zig index d6a655a704..75c7e00c68 100644 --- a/src/clang.zig +++ b/src/clang.zig @@ -117,7 +117,7 @@ pub const APFloatBaseSemantics = enum(c_int) { pub const APInt = opaque { pub fn getLimitedValue(self: *const APInt, comptime T: type) T { - return @truncate(T, ZigClangAPInt_getLimitedValue(self, std.math.maxInt(T))); + return @as(T, @truncate(ZigClangAPInt_getLimitedValue(self, std.math.maxInt(T)))); } extern fn ZigClangAPInt_getLimitedValue(*const APInt, limit: u64) u64; }; diff --git a/src/codegen.zig b/src/codegen.zig index 3bd7dca2c6..9e5ae11a63 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -108,7 +108,7 @@ fn writeFloat(comptime F: type, f: F, target: Target, endian: std.builtin.Endian _ = target; const bits = @typeInfo(F).Float.bits; const Int = @Type(.{ .Int = .{ .signedness = .unsigned, .bits = bits } }); - const int = @bitCast(Int, f); + const int = @as(Int, @bitCast(f)); mem.writeInt(Int, code[0..@divExact(bits, 8)], int, endian); } @@ -143,18 +143,18 @@ pub fn generateLazySymbol( if (lazy_sym.ty.isAnyError(mod)) { alignment.* = 4; const err_names = mod.global_error_set.keys(); - mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, err_names.len), endian); + mem.writeInt(u32, try code.addManyAsArray(4), @as(u32, @intCast(err_names.len)), endian); var offset = code.items.len; try code.resize((1 + err_names.len + 1) * 4); for (err_names) |err_name_nts| { const err_name = mod.intern_pool.stringToSlice(err_name_nts); - mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian); + mem.writeInt(u32, code.items[offset..][0..4], @as(u32, @intCast(code.items.len)), endian); offset += 4; try code.ensureUnusedCapacity(err_name.len + 1); code.appendSliceAssumeCapacity(err_name); code.appendAssumeCapacity(0); } - mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian); + mem.writeInt(u32, code.items[offset..][0..4], @as(u32, @intCast(code.items.len)), endian); return Result.ok; } else if (lazy_sym.ty.zigTypeTag(mod) == .Enum) { alignment.* = 1; @@ -253,12 +253,12 @@ pub fn generateSymbol( }, .err => |err| { const int = try mod.getErrorValue(err.name); - try code.writer().writeInt(u16, @intCast(u16, int), endian); + try code.writer().writeInt(u16, @as(u16, @intCast(int)), endian); }, .error_union => |error_union| { const payload_ty = typed_value.ty.errorUnionPayload(mod); const err_val = switch (error_union.val) { - .err_name => |err_name| @intCast(u16, try mod.getErrorValue(err_name)), + .err_name => |err_name| @as(u16, @intCast(try mod.getErrorValue(err_name))), .payload => @as(u16, 0), }; @@ -397,7 +397,7 @@ pub fn generateSymbol( .ty = array_type.child.toType(), .val = switch (aggregate.storage) { .bytes => unreachable, - .elems => |elems| elems[@intCast(usize, index)], + .elems => |elems| elems[@as(usize, @intCast(index))], .repeated_elem => |elem| elem, }.toValue(), }, code, debug_output, reloc_info)) { @@ -417,7 +417,7 @@ pub fn generateSymbol( .ty = vector_type.child.toType(), .val = switch (aggregate.storage) { .bytes => unreachable, - .elems => |elems| elems[@intCast(usize, index)], + .elems => |elems| elems[@as(usize, @intCast(index))], .repeated_elem => |elem| elem, }.toValue(), }, code, debug_output, reloc_info)) { @@ -509,7 +509,7 @@ pub fn generateSymbol( } else { field_val.toValue().writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable; } - bits += @intCast(u16, field_ty.bitSize(mod)); + bits += @as(u16, @intCast(field_ty.bitSize(mod))); } } else { const struct_begin = code.items.len; @@ -642,10 +642,10 @@ fn lowerParentPtr( eu_payload, code, debug_output, - reloc_info.offset(@intCast(u32, errUnionPayloadOffset( + reloc_info.offset(@as(u32, @intCast(errUnionPayloadOffset( mod.intern_pool.typeOf(eu_payload).toType(), mod, - ))), + )))), ), .opt_payload => |opt_payload| try lowerParentPtr( bin_file, @@ -661,8 +661,8 @@ fn lowerParentPtr( elem.base, code, debug_output, - reloc_info.offset(@intCast(u32, elem.index * - mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).abiSize(mod))), + reloc_info.offset(@as(u32, @intCast(elem.index * + mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).abiSize(mod)))), ), .field => |field| { const base_type = mod.intern_pool.indexToKey(mod.intern_pool.typeOf(field.base)).ptr_type.child; @@ -684,10 +684,10 @@ fn lowerParentPtr( .struct_type, .anon_struct_type, .union_type, - => @intCast(u32, base_type.toType().structFieldOffset( - @intCast(u32, field.index), + => @as(u32, @intCast(base_type.toType().structFieldOffset( + @as(u32, @intCast(field.index)), mod, - )), + ))), else => unreachable, }), ); @@ -735,8 +735,8 @@ fn lowerDeclRef( }); const endian = target.cpu.arch.endian(); switch (ptr_width) { - 16 => mem.writeInt(u16, try code.addManyAsArray(2), @intCast(u16, vaddr), endian), - 32 => mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, vaddr), endian), + 16 => mem.writeInt(u16, try code.addManyAsArray(2), @as(u16, @intCast(vaddr)), endian), + 32 => mem.writeInt(u32, try code.addManyAsArray(4), @as(u32, @intCast(vaddr)), endian), 64 => mem.writeInt(u64, try code.addManyAsArray(8), vaddr, endian), else => unreachable, } @@ -945,7 +945,7 @@ pub fn genTypedValue( const info = typed_value.ty.intInfo(mod); if (info.bits <= ptr_bits) { const unsigned = switch (info.signedness) { - .signed => @bitCast(u64, typed_value.val.toSignedInt(mod)), + .signed => @as(u64, @bitCast(typed_value.val.toSignedInt(mod))), .unsigned => typed_value.val.toUnsignedInt(mod), }; return GenResult.mcv(.{ .immediate = unsigned }); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 8afaae7cfa..317d77602f 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -326,7 +326,7 @@ pub const Function = struct { .cty_idx = try f.typeToIndex(ty, .complete), .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)), }); - return .{ .new_local = @intCast(LocalIndex, f.locals.items.len - 1) }; + return .{ .new_local = @as(LocalIndex, @intCast(f.locals.items.len - 1)) }; } fn allocLocal(f: *Function, inst: Air.Inst.Index, ty: Type) !CValue { @@ -644,7 +644,7 @@ pub const DeclGen = struct { // Ensure complete type definition is visible before accessing fields. _ = try dg.typeToIndex(base_ty, .complete); const field_ty = switch (mod.intern_pool.indexToKey(base_ty.toIntern())) { - .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(@intCast(usize, field.index), mod), + .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(@as(usize, @intCast(field.index)), mod), .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .One, .Many, .C => unreachable, .Slice => switch (field.index) { @@ -662,7 +662,7 @@ pub const DeclGen = struct { try dg.renderCType(writer, ptr_cty); try writer.writeByte(')'); } - switch (fieldLocation(base_ty, ptr_ty, @intCast(u32, field.index), mod)) { + switch (fieldLocation(base_ty, ptr_ty, @as(u32, @intCast(field.index)), mod)) { .begin => try dg.renderParentPtr(writer, field.base, location), .field => |name| { try writer.writeAll("&("); @@ -740,11 +740,11 @@ pub const DeclGen = struct { try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); switch (bits) { - 16 => try writer.print("{x}", .{@bitCast(f16, undefPattern(i16))}), - 32 => try writer.print("{x}", .{@bitCast(f32, undefPattern(i32))}), - 64 => try writer.print("{x}", .{@bitCast(f64, undefPattern(i64))}), - 80 => try writer.print("{x}", .{@bitCast(f80, undefPattern(i80))}), - 128 => try writer.print("{x}", .{@bitCast(f128, undefPattern(i128))}), + 16 => try writer.print("{x}", .{@as(f16, @bitCast(undefPattern(i16)))}), + 32 => try writer.print("{x}", .{@as(f32, @bitCast(undefPattern(i32)))}), + 64 => try writer.print("{x}", .{@as(f64, @bitCast(undefPattern(i64)))}), + 80 => try writer.print("{x}", .{@as(f80, @bitCast(undefPattern(i80)))}), + 128 => try writer.print("{x}", .{@as(f128, @bitCast(undefPattern(i128)))}), else => unreachable, } try writer.writeAll(", "); @@ -1041,11 +1041,11 @@ pub const DeclGen = struct { }; switch (bits) { - 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))), - 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))), - 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))), - 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))), - 128 => repr_val_big.set(@bitCast(u128, f128_val)), + 16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, mod)))), + 32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, mod)))), + 64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, mod)))), + 80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, mod)))), + 128 => repr_val_big.set(@as(u128, @bitCast(f128_val))), else => unreachable, } @@ -1103,11 +1103,11 @@ pub const DeclGen = struct { if (std.math.isNan(f128_val)) switch (bits) { // We only actually need to pass the significand, but it will get // properly masked anyway, so just pass the whole value. - 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}), - 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}), - 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}), - 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}), - 128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}), + 16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, mod)))}), + 32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, mod)))}), + 64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, mod)))}), + 80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, mod)))}), + 128 => try writer.print("\"0x{x}\"", .{@as(u128, @bitCast(f128_val))}), else => unreachable, }; try writer.writeAll(", "); @@ -1225,11 +1225,11 @@ pub const DeclGen = struct { var index: usize = 0; while (index < ai.len) : (index += 1) { const elem_val = try val.elemValue(mod, index); - const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @as(u8, @intCast(elem_val.toUnsignedInt(mod))); try literal.writeChar(elem_val_u8); } if (ai.sentinel) |s| { - const s_u8 = @intCast(u8, s.toUnsignedInt(mod)); + const s_u8 = @as(u8, @intCast(s.toUnsignedInt(mod))); if (s_u8 != 0) try literal.writeChar(s_u8); } try literal.end(); @@ -1239,7 +1239,7 @@ pub const DeclGen = struct { while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeByte(','); const elem_val = try val.elemValue(mod, index); - const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @as(u8, @intCast(elem_val.toUnsignedInt(mod))); try writer.print("'\\x{x}'", .{elem_val_u8}); } if (ai.sentinel) |s| { @@ -1840,7 +1840,7 @@ pub const DeclGen = struct { decl.ty, .{ .decl = decl_index }, CQualifiers.init(.{ .@"const" = variable.is_const }), - @intCast(u32, decl.alignment.toByteUnits(0)), + @as(u32, @intCast(decl.alignment.toByteUnits(0))), .complete, ); try fwd_decl_writer.writeAll(";\n"); @@ -1907,7 +1907,7 @@ pub const DeclGen = struct { const mod = dg.module; const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @intCast(u16, ty.bitSize(mod)), + .bits = @as(u16, @intCast(ty.bitSize(mod))), }; if (is_big) try writer.print(", {}", .{int_info.signedness == .signed}); @@ -2481,7 +2481,7 @@ fn genExports(o: *Object) !void { if (mod.decl_exports.get(o.dg.decl_index.unwrap().?)) |exports| { for (exports.items[1..], 1..) |@"export", i| { try fwd_decl_writer.writeAll("zig_export("); - try o.dg.renderFunctionSignature(fwd_decl_writer, o.dg.decl_index.unwrap().?, .forward, .{ .export_index = @intCast(u32, i) }); + try o.dg.renderFunctionSignature(fwd_decl_writer, o.dg.decl_index.unwrap().?, .forward, .{ .export_index = @as(u32, @intCast(i)) }); try fwd_decl_writer.print(", {s}, {s});\n", .{ fmtStringLiteral(ip.stringToSlice(exports.items[0].opts.name), null), fmtStringLiteral(ip.stringToSlice(@"export".opts.name), null), @@ -2510,7 +2510,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, 0, .complete); try w.writeAll(") {\n switch (tag) {\n"); for (enum_ty.enumFields(mod), 0..) |name_ip, index_usize| { - const index = @intCast(u32, index_usize); + const index = @as(u32, @intCast(index_usize)); const name = mod.intern_pool.stringToSlice(name_ip); const tag_val = try mod.enumValueFieldIndex(enum_ty, index); @@ -2783,7 +2783,7 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con // Remember how many locals there were before entering the body so that we can free any that // were newly introduced. Any new locals must necessarily be logically free after the then // branch is complete. - const pre_locals_len = @intCast(LocalIndex, f.locals.items.len); + const pre_locals_len = @as(LocalIndex, @intCast(f.locals.items.len)); for (leading_deaths) |death| { try die(f, inst, Air.indexToRef(death)); @@ -2804,7 +2804,7 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con // them, unless they were used to store allocs. for (pre_locals_len..f.locals.items.len) |local_i| { - const local_index = @intCast(LocalIndex, local_i); + const local_index = @as(LocalIndex, @intCast(local_i)); if (f.allocs.contains(local_index)) { continue; } @@ -3364,7 +3364,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset); - const field_ty = try mod.intType(.unsigned, @intCast(u16, src_ty.bitSize(mod))); + const field_ty = try mod.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(mod)))); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); @@ -3667,7 +3667,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { var mask = try BigInt.Managed.initCapacity(stack.get(), BigInt.calcTwosCompLimbCount(host_bits)); defer mask.deinit(); - try mask.setTwosCompIntLimit(.max, .unsigned, @intCast(usize, src_bits)); + try mask.setTwosCompIntLimit(.max, .unsigned, @as(usize, @intCast(src_bits))); try mask.shiftLeft(&mask, ptr_info.packed_offset.bit_offset); try mask.bitNotWrap(&mask, .unsigned, host_bits); @@ -4096,7 +4096,7 @@ fn airCall( const pl_op = f.air.instructions.items(.data)[inst].pl_op; const extra = f.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[extra.end..][0..extra.data.args_len])); const resolved_args = try gpa.alloc(CValue, args.len); defer gpa.free(resolved_args); @@ -4537,7 +4537,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca wrap_cty = elem_cty.toSignedness(dest_info.signedness); need_bitcasts = wrap_cty.?.tag() == .zig_i128; bits -= 1; - bits %= @intCast(u16, f.byteSize(elem_cty) * 8); + bits %= @as(u16, @intCast(f.byteSize(elem_cty) * 8)); bits += 1; } try writer.writeAll(" = "); @@ -4711,7 +4711,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { var extra_index: usize = switch_br.end; for (0..switch_br.data.cases_len) |case_i| { const case = f.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast([]const Air.Inst.Ref, f.air.extra[case.end..][0..case.data.items_len]); + const items = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[case.end..][0..case.data.items_len])); const case_body = f.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + case.data.items_len + case_body.len; @@ -4771,13 +4771,13 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Asm, ty_pl.payload); - const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; - const clobbers_len = @truncate(u31, extra.data.flags); + const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; + const clobbers_len = @as(u31, @truncate(extra.data.flags)); const gpa = f.object.dg.gpa; var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[extra_i..][0..extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[extra_i..][0..extra.data.inputs_len])); extra_i += inputs.len; const result = result: { @@ -4794,7 +4794,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { break :local local; } else .none; - const locals_begin = @intCast(LocalIndex, f.locals.items.len); + const locals_begin = @as(LocalIndex, @intCast(f.locals.items.len)); const constraints_extra_begin = extra_i; for (outputs) |output| { const extra_bytes = mem.sliceAsBytes(f.air.extra[extra_i..]); @@ -5402,7 +5402,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { inst_ty.intInfo(mod).signedness else .unsigned; - const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod))); + const field_int_ty = try mod.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(mod)))); const temp_local = try f.allocLocal(inst, field_int_ty); try f.writeCValue(writer, temp_local, .Other); @@ -6033,7 +6033,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value }); const repr_ty = if (ty.isRuntimeFloat()) - mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable + mod.intType(.unsigned, @as(u16, @intCast(ty.abiSize(mod) * 8))) catch unreachable else ty; @@ -6136,7 +6136,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { const operand_mat = try Materialize.start(f, inst, writer, ty, operand); try reap(f, inst, &.{ pl_op.operand, extra.operand }); - const repr_bits = @intCast(u16, ty.abiSize(mod) * 8); + const repr_bits = @as(u16, @intCast(ty.abiSize(mod) * 8)); const is_float = ty.isRuntimeFloat(); const is_128 = repr_bits == 128; const repr_ty = if (is_float) mod.intType(.unsigned, repr_bits) catch unreachable else ty; @@ -6186,7 +6186,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { const ty = ptr_ty.childType(mod); const repr_ty = if (ty.isRuntimeFloat()) - mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable + mod.intType(.unsigned, @as(u16, @intCast(ty.abiSize(mod) * 8))) catch unreachable else ty; @@ -6226,7 +6226,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const repr_ty = if (ty.isRuntimeFloat()) - mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable + mod.intType(.unsigned, @as(u16, @intCast(ty.abiSize(mod) * 8))) catch unreachable else ty; @@ -6574,7 +6574,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("] = "); const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod); - const src_val = try mod.intValue(Type.usize, @intCast(u64, mask_elem ^ mask_elem >> 63)); + const src_val = try mod.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63))); try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other); try writer.writeByte('['); @@ -6745,8 +6745,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const ip = &mod.intern_pool; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const inst_ty = f.typeOfIndex(inst); - const len = @intCast(usize, inst_ty.arrayLen(mod)); - const elements = @ptrCast([]const Air.Inst.Ref, f.air.extra[ty_pl.payload..][0..len]); + const len = @as(usize, @intCast(inst_ty.arrayLen(mod))); + const elements = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[ty_pl.payload..][0..len])); const gpa = f.object.dg.gpa; const resolved_elements = try gpa.alloc(CValue, elements.len); defer gpa.free(resolved_elements); @@ -7387,7 +7387,7 @@ fn fmtStringLiteral(str: []const u8, sentinel: ?u8) std.fmt.Formatter(formatStri fn undefPattern(comptime IntType: type) IntType { const int_info = @typeInfo(IntType).Int; const UnsignedType = std.meta.Int(.unsigned, int_info.bits); - return @bitCast(IntType, @as(UnsignedType, (1 << (int_info.bits | 1)) / 3)); + return @as(IntType, @bitCast(@as(UnsignedType, (1 << (int_info.bits | 1)) / 3))); } const FormatIntLiteralContext = struct { @@ -7438,7 +7438,7 @@ fn formatIntLiteral( } else data.val.toBigInt(&int_buf, mod); assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits)); - const c_bits = @intCast(usize, data.cty.byteSize(data.dg.ctypes.set, target) * 8); + const c_bits = @as(usize, @intCast(data.cty.byteSize(data.dg.ctypes.set, target) * 8)); var one_limbs: [BigInt.calcLimbLen(1)]BigIntLimb = undefined; const one = BigInt.Mutable.init(&one_limbs, 1).toConst(); @@ -7471,7 +7471,7 @@ fn formatIntLiteral( const array_data = data.cty.castTag(.array).?.data; break :info .{ .cty = data.dg.indexToCType(array_data.elem_type), - .count = @intCast(usize, array_data.len), + .count = @as(usize, @intCast(array_data.len)), .endian = target.cpu.arch.endian(), .homogeneous = true, }; @@ -7527,7 +7527,7 @@ fn formatIntLiteral( var c_limb_int_info = std.builtin.Type.Int{ .signedness = undefined, - .bits = @intCast(u16, @divExact(c_bits, c_limb_info.count)), + .bits = @as(u16, @intCast(@divExact(c_bits, c_limb_info.count))), }; var c_limb_cty: CType = undefined; @@ -7727,7 +7727,7 @@ fn lowerFnRetTy(ret_ty: Type, mod: *Module) !Type { fn lowersToArray(ty: Type, mod: *Module) bool { return switch (ty.zigTypeTag(mod)) { .Array, .Vector => return true, - else => return ty.isAbiInt(mod) and toCIntBits(@intCast(u32, ty.bitSize(mod))) == null, + else => return ty.isAbiInt(mod) and toCIntBits(@as(u32, @intCast(ty.bitSize(mod)))) == null, }; } @@ -7735,7 +7735,7 @@ fn reap(f: *Function, inst: Air.Inst.Index, operands: []const Air.Inst.Ref) !voi assert(operands.len <= Liveness.bpi - 1); var tomb_bits = f.liveness.getTombBits(inst); for (operands) |operand| { - const dies = @truncate(u1, tomb_bits) != 0; + const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; try die(f, inst, operand); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index c8ce0be380..efff2e557c 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -138,7 +138,7 @@ pub const CType = extern union { pub fn toIndex(self: Tag) Index { assert(!self.hasPayload()); - return @intCast(Index, @intFromEnum(self)); + return @as(Index, @intCast(@intFromEnum(self))); } pub fn Type(comptime self: Tag) type { @@ -330,7 +330,7 @@ pub const CType = extern union { store: *const Set, pub fn hash(self: @This(), cty: CType) Map.Hash { - return @truncate(Map.Hash, cty.hash(self.store.*)); + return @as(Map.Hash, @truncate(cty.hash(self.store.*))); } pub fn eql(_: @This(), lhs: CType, rhs: CType, _: usize) bool { return lhs.eql(rhs); @@ -340,7 +340,7 @@ pub const CType = extern union { map: Map = .{}, pub fn indexToCType(self: Set, index: Index) CType { - if (index < Tag.no_payload_count) return initTag(@enumFromInt(Tag, index)); + if (index < Tag.no_payload_count) return initTag(@as(Tag, @enumFromInt(index))); return self.map.keys()[index - Tag.no_payload_count]; } @@ -362,7 +362,7 @@ pub const CType = extern union { return if (self.map.getIndexAdapted( ty, TypeAdapter32{ .kind = kind, .lookup = lookup, .convert = &convert }, - )) |idx| @intCast(Index, Tag.no_payload_count + idx) else null; + )) |idx| @as(Index, @intCast(Tag.no_payload_count + idx)) else null; } }; @@ -376,7 +376,7 @@ pub const CType = extern union { pub fn cTypeToIndex(self: *Promoted, cty: CType) Allocator.Error!Index { const t = cty.tag(); - if (@intFromEnum(t) < Tag.no_payload_count) return @intCast(Index, @intFromEnum(t)); + if (@intFromEnum(t) < Tag.no_payload_count) return @as(Index, @intCast(@intFromEnum(t))); const gop = try self.set.map.getOrPutContext(self.gpa(), cty, .{ .store = &self.set }); if (!gop.found_existing) gop.key_ptr.* = cty; @@ -386,7 +386,7 @@ pub const CType = extern union { assert(cty.eql(key.*)); assert(cty.hash(self.set) == key.hash(self.set)); } - return @intCast(Index, Tag.no_payload_count + gop.index); + return @as(Index, @intCast(Tag.no_payload_count + gop.index)); } pub fn typeToIndex( @@ -424,7 +424,7 @@ pub const CType = extern union { assert(adapter.eql(ty, cty.*)); assert(adapter.hash(ty) == cty.hash(self.set)); } - return @intCast(Index, Tag.no_payload_count + gop.index); + return @as(Index, @intCast(Tag.no_payload_count + gop.index)); } }; @@ -1388,7 +1388,7 @@ pub const CType = extern union { .len = @divExact(abi_size, abi_align), .elem_type = tagFromIntInfo(.{ .signedness = .unsigned, - .bits = @intCast(u16, abi_align * 8), + .bits = @as(u16, @intCast(abi_align * 8)), }).toIndex(), } } }; self.value = .{ .cty = initPayload(&self.storage.seq) }; @@ -1492,7 +1492,7 @@ pub const CType = extern union { if (mod.typeToStruct(ty)) |struct_obj| { try self.initType(struct_obj.backing_int_ty, kind, lookup); } else { - const bits = @intCast(u16, ty.bitSize(mod)); + const bits = @as(u16, @intCast(ty.bitSize(mod))); const int_ty = try mod.intType(.unsigned, bits); try self.initType(int_ty, kind, lookup); } @@ -2299,7 +2299,7 @@ pub const CType = extern union { } pub fn hash(self: @This(), ty: Type) u32 { - return @truncate(u32, self.to64().hash(ty)); + return @as(u32, @truncate(self.to64().hash(ty))); } }; }; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 75b8a19e7d..dd8abb1205 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -592,7 +592,7 @@ pub const Object = struct { llvm_errors[0] = llvm_slice_ty.getUndef(); for (llvm_errors[1..], error_name_list[1..]) |*llvm_error, name_nts| { const name = mod.intern_pool.stringToSlice(name_nts); - const str_init = o.context.constString(name.ptr, @intCast(c_uint, name.len), .False); + const str_init = o.context.constString(name.ptr, @as(c_uint, @intCast(name.len)), .False); const str_global = o.llvm_module.addGlobal(str_init.typeOf(), ""); str_global.setInitializer(str_init); str_global.setLinkage(.Private); @@ -607,7 +607,7 @@ pub const Object = struct { llvm_error.* = llvm_slice_ty.constNamedStruct(&slice_fields, slice_fields.len); } - const error_name_table_init = llvm_slice_ty.constArray(llvm_errors.ptr, @intCast(c_uint, error_name_list.len)); + const error_name_table_init = llvm_slice_ty.constArray(llvm_errors.ptr, @as(c_uint, @intCast(error_name_list.len))); const error_name_table_global = o.llvm_module.addGlobal(error_name_table_init.typeOf(), ""); error_name_table_global.setInitializer(error_name_table_init); @@ -1027,7 +1027,7 @@ pub const Object = struct { llvm_arg_i += 1; const param_llvm_ty = try o.lowerType(param_ty); - const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); + const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod))); const int_llvm_ty = o.context.intType(abi_size * 8); const alignment = @max( param_ty.abiAlignment(mod), @@ -1053,7 +1053,7 @@ pub const Object = struct { const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, it.zig_index - 1)) |i| { - if (@truncate(u1, fn_info.noalias_bits >> i) != 0) { + if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) { o.addArgAttr(llvm_func, llvm_arg_i, "noalias"); } } @@ -1083,9 +1083,9 @@ pub const Object = struct { const param_llvm_ty = try o.lowerType(param_ty); const param_alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target); - const llvm_ty = o.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False); + const llvm_ty = o.context.structType(field_types.ptr, @as(c_uint, @intCast(field_types.len)), .False); for (field_types, 0..) |_, field_i_usize| { - const field_i = @intCast(c_uint, field_i_usize); + const field_i = @as(c_uint, @intCast(field_i_usize)); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; const field_ptr = builder.buildStructGEP(llvm_ty, arg_ptr, field_i, ""); @@ -1289,11 +1289,11 @@ pub const Object = struct { if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); if (self.di_map.get(decl)) |di_node| { if (try decl.isFunction(mod)) { - const di_func = @ptrCast(*llvm.DISubprogram, di_node); + const di_func = @as(*llvm.DISubprogram, @ptrCast(di_node)); const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len); di_func.replaceLinkageName(linkage_name); } else { - const di_global = @ptrCast(*llvm.DIGlobalVariable, di_node); + const di_global = @as(*llvm.DIGlobalVariable, @ptrCast(di_node)); const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len); di_global.replaceLinkageName(linkage_name); } @@ -1315,11 +1315,11 @@ pub const Object = struct { if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport); if (self.di_map.get(decl)) |di_node| { if (try decl.isFunction(mod)) { - const di_func = @ptrCast(*llvm.DISubprogram, di_node); + const di_func = @as(*llvm.DISubprogram, @ptrCast(di_node)); const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len); di_func.replaceLinkageName(linkage_name); } else { - const di_global = @ptrCast(*llvm.DIGlobalVariable, di_node); + const di_global = @as(*llvm.DIGlobalVariable, @ptrCast(di_node)); const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len); di_global.replaceLinkageName(linkage_name); } @@ -1390,7 +1390,7 @@ pub const Object = struct { const gop = try o.di_map.getOrPut(gpa, file); errdefer assert(o.di_map.remove(file)); if (gop.found_existing) { - return @ptrCast(*llvm.DIFile, gop.value_ptr.*); + return @as(*llvm.DIFile, @ptrCast(gop.value_ptr.*)); } const dir_path_z = d: { var buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined; @@ -1514,7 +1514,7 @@ pub const Object = struct { if (@sizeOf(usize) == @sizeOf(u64)) { enumerators[i] = dib.createEnumerator2( field_name_z, - @intCast(c_uint, bigint.limbs.len), + @as(c_uint, @intCast(bigint.limbs.len)), bigint.limbs.ptr, int_info.bits, int_info.signedness == .unsigned, @@ -1538,7 +1538,7 @@ pub const Object = struct { ty.abiSize(mod) * 8, ty.abiAlignment(mod) * 8, enumerators.ptr, - @intCast(c_int, enumerators.len), + @as(c_int, @intCast(enumerators.len)), try o.lowerDebugType(int_ty, .full), "", ); @@ -1713,7 +1713,7 @@ pub const Object = struct { ty.abiSize(mod) * 8, ty.abiAlignment(mod) * 8, try o.lowerDebugType(ty.childType(mod), .full), - @intCast(i64, ty.arrayLen(mod)), + @as(i64, @intCast(ty.arrayLen(mod))), ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(array_di_ty)); @@ -2018,7 +2018,7 @@ pub const Object = struct { 0, // flags null, // derived from di_fields.items.ptr, - @intCast(c_int, di_fields.items.len), + @as(c_int, @intCast(di_fields.items.len)), 0, // run time lang null, // vtable holder "", // unique id @@ -2105,7 +2105,7 @@ pub const Object = struct { 0, // flags null, // derived from di_fields.items.ptr, - @intCast(c_int, di_fields.items.len), + @as(c_int, @intCast(di_fields.items.len)), 0, // run time lang null, // vtable holder "", // unique id @@ -2217,7 +2217,7 @@ pub const Object = struct { ty.abiAlignment(mod) * 8, // align in bits 0, // flags di_fields.items.ptr, - @intCast(c_int, di_fields.items.len), + @as(c_int, @intCast(di_fields.items.len)), 0, // run time lang "", // unique id ); @@ -2330,7 +2330,7 @@ pub const Object = struct { const fn_di_ty = dib.createSubroutineType( param_di_types.items.ptr, - @intCast(c_int, param_di_types.items.len), + @as(c_int, @intCast(param_di_types.items.len)), 0, ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. @@ -2487,7 +2487,7 @@ pub const Object = struct { } if (fn_info.alignment.toByteUnitsOptional()) |a| { - llvm_fn.setAlignment(@intCast(c_uint, a)); + llvm_fn.setAlignment(@as(c_uint, @intCast(a))); } // Function attributes that are independent of analysis results of the function body. @@ -2710,7 +2710,7 @@ pub const Object = struct { if (std.debug.runtime_safety) assert((try elem_ty.onePossibleValue(mod)) == null); const elem_llvm_ty = try o.lowerType(elem_ty); const total_len = t.arrayLen(mod) + @intFromBool(t.sentinel(mod) != null); - return elem_llvm_ty.arrayType(@intCast(c_uint, total_len)); + return elem_llvm_ty.arrayType(@as(c_uint, @intCast(total_len))); }, .Vector => { const elem_type = try o.lowerType(t.childType(mod)); @@ -2732,7 +2732,7 @@ pub const Object = struct { }; const offset = child_ty.abiSize(mod) + 1; const abi_size = t.abiSize(mod); - const padding = @intCast(c_uint, abi_size - offset); + const padding = @as(c_uint, @intCast(abi_size - offset)); if (padding == 0) { return o.context.structType(&fields_buf, 2, .False); } @@ -2761,7 +2761,7 @@ pub const Object = struct { std.mem.alignForward(u64, error_size, payload_align) + payload_size; const abi_size = std.mem.alignForward(u64, payload_end, error_align); - const padding = @intCast(c_uint, abi_size - payload_end); + const padding = @as(c_uint, @intCast(abi_size - payload_end)); if (padding == 0) { return o.context.structType(&fields_buf, 2, .False); } @@ -2774,7 +2774,7 @@ pub const Object = struct { std.mem.alignForward(u64, payload_size, error_align) + error_size; const abi_size = std.mem.alignForward(u64, error_end, payload_align); - const padding = @intCast(c_uint, abi_size - error_end); + const padding = @as(c_uint, @intCast(abi_size - error_end)); if (padding == 0) { return o.context.structType(&fields_buf, 2, .False); } @@ -2811,7 +2811,7 @@ pub const Object = struct { const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); try llvm_field_types.append(gpa, llvm_array_ty); } const field_llvm_ty = try o.lowerType(field_ty.toType()); @@ -2824,14 +2824,14 @@ pub const Object = struct { offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); try llvm_field_types.append(gpa, llvm_array_ty); } } llvm_struct_ty.structSetBody( llvm_field_types.items.ptr, - @intCast(c_uint, llvm_field_types.items.len), + @as(c_uint, @intCast(llvm_field_types.items.len)), .False, ); @@ -2880,7 +2880,7 @@ pub const Object = struct { const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); try llvm_field_types.append(gpa, llvm_array_ty); } const field_llvm_ty = try o.lowerType(field.ty); @@ -2893,14 +2893,14 @@ pub const Object = struct { offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); try llvm_field_types.append(gpa, llvm_array_ty); } } llvm_struct_ty.structSetBody( llvm_field_types.items.ptr, - @intCast(c_uint, llvm_field_types.items.len), + @as(c_uint, @intCast(llvm_field_types.items.len)), llvm.Bool.fromBool(any_underaligned_fields), ); @@ -2914,7 +2914,7 @@ pub const Object = struct { const union_obj = mod.typeToUnion(t).?; if (union_obj.layout == .Packed) { - const bitsize = @intCast(c_uint, t.bitSize(mod)); + const bitsize = @as(c_uint, @intCast(t.bitSize(mod))); const int_llvm_ty = o.context.intType(bitsize); gop.value_ptr.* = int_llvm_ty; return int_llvm_ty; @@ -2939,9 +2939,9 @@ pub const Object = struct { break :t llvm_aligned_field_ty; } const padding_len = if (layout.tag_size == 0) - @intCast(c_uint, layout.abi_size - layout.most_aligned_field_size) + @as(c_uint, @intCast(layout.abi_size - layout.most_aligned_field_size)) else - @intCast(c_uint, layout.payload_size - layout.most_aligned_field_size); + @as(c_uint, @intCast(layout.payload_size - layout.most_aligned_field_size)); const fields: [2]*llvm.Type = .{ llvm_aligned_field_ty, o.context.intType(8).arrayType(padding_len), @@ -3020,7 +3020,7 @@ pub const Object = struct { }, .abi_sized_int => { const param_ty = fn_info.param_types[it.zig_index - 1].toType(); - const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); + const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod))); try llvm_params.append(o.context.intType(abi_size * 8)); }, .slice => { @@ -3045,7 +3045,7 @@ pub const Object = struct { .float_array => |count| { const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?); - const field_count = @intCast(c_uint, count); + const field_count = @as(c_uint, @intCast(count)); const arr_ty = float_ty.arrayType(field_count); try llvm_params.append(arr_ty); }, @@ -3059,7 +3059,7 @@ pub const Object = struct { return llvm.functionType( llvm_ret_ty, llvm_params.items.ptr, - @intCast(c_uint, llvm_params.items.len), + @as(c_uint, @intCast(llvm_params.items.len)), llvm.Bool.fromBool(fn_info.is_var_args), ); } @@ -3219,7 +3219,7 @@ pub const Object = struct { } if (@sizeOf(usize) == @sizeOf(u64)) { break :v llvm_type.constIntOfArbitraryPrecision( - @intCast(c_uint, bigint.limbs.len), + @as(c_uint, @intCast(bigint.limbs.len)), bigint.limbs.ptr, ); } @@ -3234,19 +3234,19 @@ pub const Object = struct { const llvm_ty = try o.lowerType(tv.ty); switch (tv.ty.floatBits(target)) { 16 => { - const repr = @bitCast(u16, tv.val.toFloat(f16, mod)); + const repr = @as(u16, @bitCast(tv.val.toFloat(f16, mod))); const llvm_i16 = o.context.intType(16); const int = llvm_i16.constInt(repr, .False); return int.constBitCast(llvm_ty); }, 32 => { - const repr = @bitCast(u32, tv.val.toFloat(f32, mod)); + const repr = @as(u32, @bitCast(tv.val.toFloat(f32, mod))); const llvm_i32 = o.context.intType(32); const int = llvm_i32.constInt(repr, .False); return int.constBitCast(llvm_ty); }, 64 => { - const repr = @bitCast(u64, tv.val.toFloat(f64, mod)); + const repr = @as(u64, @bitCast(tv.val.toFloat(f64, mod))); const llvm_i64 = o.context.intType(64); const int = llvm_i64.constInt(repr, .False); return int.constBitCast(llvm_ty); @@ -3265,7 +3265,7 @@ pub const Object = struct { } }, 128 => { - var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128, mod)); + var buf: [2]u64 = @as([2]u64, @bitCast(tv.val.toFloat(f128, mod))); // LLVM seems to require that the lower half of the f128 be placed first // in the buffer. if (native_endian == .Big) { @@ -3343,7 +3343,7 @@ pub const Object = struct { .array_type => switch (aggregate.storage) { .bytes => |bytes| return o.context.constString( bytes.ptr, - @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), + @as(c_uint, @intCast(tv.ty.arrayLenIncludingSentinel(mod))), .True, // Don't null terminate. Bytes has the sentinel, if any. ), .elems => |elem_vals| { @@ -3358,21 +3358,21 @@ pub const Object = struct { if (need_unnamed) { return o.context.constStruct( llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), + @as(c_uint, @intCast(llvm_elems.len)), .True, ); } else { const llvm_elem_ty = try o.lowerType(elem_ty); return llvm_elem_ty.constArray( llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), + @as(c_uint, @intCast(llvm_elems.len)), ); } }, .repeated_elem => |val| { const elem_ty = tv.ty.childType(mod); const sentinel = tv.ty.sentinel(mod); - const len = @intCast(usize, tv.ty.arrayLen(mod)); + const len = @as(usize, @intCast(tv.ty.arrayLen(mod))); const len_including_sent = len + @intFromBool(sentinel != null); const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); defer gpa.free(llvm_elems); @@ -3393,14 +3393,14 @@ pub const Object = struct { if (need_unnamed) { return o.context.constStruct( llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), + @as(c_uint, @intCast(llvm_elems.len)), .True, ); } else { const llvm_elem_ty = try o.lowerType(elem_ty); return llvm_elem_ty.constArray( llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), + @as(c_uint, @intCast(llvm_elems.len)), ); } }, @@ -3425,7 +3425,7 @@ pub const Object = struct { } return llvm.constVector( llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), + @as(c_uint, @intCast(llvm_elems.len)), ); }, .anon_struct_type => |tuple| { @@ -3450,7 +3450,7 @@ pub const Object = struct { const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); // TODO make this and all other padding elsewhere in debug // builds be 0xaa not undef. llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); @@ -3472,7 +3472,7 @@ pub const Object = struct { offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); } } @@ -3480,14 +3480,14 @@ pub const Object = struct { if (need_unnamed) { return o.context.constStruct( llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), + @as(c_uint, @intCast(llvm_fields.items.len)), .False, ); } else { const llvm_struct_ty = try o.lowerType(tv.ty); return llvm_struct_ty.constNamedStruct( llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), + @as(c_uint, @intCast(llvm_fields.items.len)), ); } }, @@ -3498,7 +3498,7 @@ pub const Object = struct { if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); const big_bits = struct_obj.backing_int_ty.bitSize(mod); - const int_llvm_ty = o.context.intType(@intCast(c_uint, big_bits)); + const int_llvm_ty = o.context.intType(@as(c_uint, @intCast(big_bits))); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); var running_int: *llvm.Value = int_llvm_ty.constNull(); @@ -3510,7 +3510,7 @@ pub const Object = struct { .ty = field.ty, .val = try tv.val.fieldValue(mod, i), }); - const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod))); const small_int_ty = o.context.intType(ty_bit_size); const small_int_val = if (field.ty.isPtrAtRuntime(mod)) non_int_val.constPtrToInt(small_int_ty) @@ -3547,7 +3547,7 @@ pub const Object = struct { const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); // TODO make this and all other padding elsewhere in debug // builds be 0xaa not undef. llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); @@ -3569,7 +3569,7 @@ pub const Object = struct { offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); } } @@ -3577,13 +3577,13 @@ pub const Object = struct { if (need_unnamed) { return o.context.constStruct( llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), + @as(c_uint, @intCast(llvm_fields.items.len)), .False, ); } else { return llvm_struct_ty.constNamedStruct( llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), + @as(c_uint, @intCast(llvm_fields.items.len)), ); } }, @@ -3616,7 +3616,7 @@ pub const Object = struct { if (!field_ty.hasRuntimeBits(mod)) return llvm_union_ty.constNull(); const non_int_val = try lowerValue(o, .{ .ty = field_ty, .val = tag_and_val.val }); - const ty_bit_size = @intCast(u16, field_ty.bitSize(mod)); + const ty_bit_size = @as(u16, @intCast(field_ty.bitSize(mod))); const small_int_ty = o.context.intType(ty_bit_size); const small_int_val = if (field_ty.isPtrAtRuntime(mod)) non_int_val.constPtrToInt(small_int_ty) @@ -3632,7 +3632,7 @@ pub const Object = struct { var need_unnamed: bool = layout.most_aligned_field != field_index; const payload = p: { if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const padding_len = @intCast(c_uint, layout.payload_size); + const padding_len = @as(c_uint, @intCast(layout.payload_size)); break :p o.context.intType(8).arrayType(padding_len).getUndef(); } const field = try lowerValue(o, .{ .ty = field_ty, .val = tag_and_val.val }); @@ -3641,7 +3641,7 @@ pub const Object = struct { if (field_size == layout.payload_size) { break :p field; } - const padding_len = @intCast(c_uint, layout.payload_size - field_size); + const padding_len = @as(c_uint, @intCast(layout.payload_size - field_size)); const fields: [2]*llvm.Value = .{ field, o.context.intType(8).arrayType(padding_len).getUndef(), }; @@ -3706,7 +3706,7 @@ pub const Object = struct { } if (@sizeOf(usize) == @sizeOf(u64)) { break :v llvm_type.constIntOfArbitraryPrecision( - @intCast(c_uint, bigint.limbs.len), + @as(c_uint, @intCast(bigint.limbs.len)), bigint.limbs.ptr, ); } @@ -3799,7 +3799,7 @@ pub const Object = struct { const parent_llvm_ptr = try o.lowerParentPtr(field_ptr.base.toValue(), byte_aligned); const parent_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); - const field_index = @intCast(u32, field_ptr.index); + const field_index = @as(u32, @intCast(field_ptr.index)); const llvm_u32 = o.context.intType(32); switch (parent_ty.zigTypeTag(mod)) { .Union => { @@ -3834,7 +3834,7 @@ pub const Object = struct { var b: usize = 0; for (parent_ty.structFields(mod).values()[0..field_index]) |field| { if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - b += @intCast(usize, field.ty.bitSize(mod)); + b += @as(usize, @intCast(field.ty.bitSize(mod))); } break :b b; }; @@ -3992,9 +3992,9 @@ pub const Object = struct { ) void { const llvm_attr = o.context.createStringAttribute( name.ptr, - @intCast(c_uint, name.len), + @as(c_uint, @intCast(name.len)), value.ptr, - @intCast(c_uint, value.len), + @as(c_uint, @intCast(value.len)), ); val.addAttributeAtIndex(index, llvm_attr); } @@ -4026,14 +4026,14 @@ pub const Object = struct { .Enum => ty.intTagType(mod), .Float => { if (!is_rmw_xchg) return null; - return o.context.intType(@intCast(c_uint, ty.abiSize(mod) * 8)); + return o.context.intType(@as(c_uint, @intCast(ty.abiSize(mod) * 8))); }, .Bool => return o.context.intType(8), else => return null, }; const bit_count = int_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) { - return o.context.intType(@intCast(c_uint, int_ty.abiSize(mod) * 8)); + return o.context.intType(@as(c_uint, @intCast(int_ty.abiSize(mod) * 8))); } else { return null; } @@ -4051,7 +4051,7 @@ pub const Object = struct { if (param_ty.isPtrAtRuntime(mod)) { const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, param_index)) |i| { - if (@truncate(u1, fn_info.noalias_bits >> i) != 0) { + if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) { o.addArgAttr(llvm_fn, llvm_arg_i, "noalias"); } } @@ -4550,7 +4550,7 @@ pub const FuncGen = struct { fn airCall(self: *FuncGen, inst: Air.Inst.Index, attr: llvm.CallAttr) !?*llvm.Value { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len])); const o = self.dg.object; const mod = o.module; const callee_ty = self.typeOf(pl_op.operand); @@ -4638,7 +4638,7 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); + const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod))); const int_llvm_ty = self.context.intType(abi_size * 8); if (isByRef(param_ty, mod)) { @@ -4683,10 +4683,10 @@ pub const FuncGen = struct { break :p p; }; - const llvm_ty = self.context.structType(llvm_types.ptr, @intCast(c_uint, llvm_types.len), .False); + const llvm_ty = self.context.structType(llvm_types.ptr, @as(c_uint, @intCast(llvm_types.len)), .False); try llvm_args.ensureUnusedCapacity(it.llvm_types_len); for (llvm_types, 0..) |field_ty, i_usize| { - const i = @intCast(c_uint, i_usize); + const i = @as(c_uint, @intCast(i_usize)); const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, ""); const load_inst = self.builder.buildLoad(field_ty, field_ptr, ""); load_inst.setAlignment(target.ptrBitWidth() / 8); @@ -4742,7 +4742,7 @@ pub const FuncGen = struct { try o.lowerType(zig_fn_ty), llvm_fn, llvm_args.items.ptr, - @intCast(c_uint, llvm_args.items.len), + @as(c_uint, @intCast(llvm_args.items.len)), toLlvmCallConv(fn_info.cc, target), attr, "", @@ -4788,7 +4788,7 @@ pub const FuncGen = struct { const llvm_arg_i = it.llvm_index - 2; if (math.cast(u5, it.zig_index - 1)) |i| { - if (@truncate(u1, fn_info.noalias_bits >> i) != 0) { + if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) { o.addArgAttr(call, llvm_arg_i, "noalias"); } } @@ -5213,7 +5213,7 @@ pub const FuncGen = struct { phi_node.addIncoming( breaks.items(.val).ptr, breaks.items(.bb).ptr, - @intCast(c_uint, breaks.len), + @as(c_uint, @intCast(breaks.len)), ); return phi_node; } @@ -5379,7 +5379,7 @@ pub const FuncGen = struct { while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]); + const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len])); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + case.data.items_len + case_body.len; @@ -5479,7 +5479,7 @@ pub const FuncGen = struct { } } - const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(mod)); + const operand_bits = @as(u16, @intCast(operand_scalar_ty.bitSize(mod))); const rt_int_bits = compilerRtIntBits(operand_bits); const rt_int_ty = self.context.intType(rt_int_bits); var extended = e: { @@ -5540,7 +5540,7 @@ pub const FuncGen = struct { } } - const rt_int_bits = compilerRtIntBits(@intCast(u16, dest_scalar_ty.bitSize(mod))); + const rt_int_bits = compilerRtIntBits(@as(u16, @intCast(dest_scalar_ty.bitSize(mod)))); const ret_ty = self.context.intType(rt_int_bits); const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: { // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard @@ -5806,12 +5806,12 @@ pub const FuncGen = struct { const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = try o.lowerType(field_ty); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); + const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); } else if (field_ty.isPtrAtRuntime(mod)) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); + const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); @@ -5828,12 +5828,12 @@ pub const FuncGen = struct { const containing_int = struct_llvm_val; const elem_llvm_ty = try o.lowerType(field_ty); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); + const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); } else if (field_ty.isPtrAtRuntime(mod)) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); + const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); @@ -5924,8 +5924,8 @@ pub const FuncGen = struct { fn airDbgStmt(self: *FuncGen, inst: Air.Inst.Index) ?*llvm.Value { const di_scope = self.di_scope orelse return null; const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; - self.prev_dbg_line = @intCast(c_uint, self.base_line + dbg_stmt.line + 1); - self.prev_dbg_column = @intCast(c_uint, dbg_stmt.column + 1); + self.prev_dbg_line = @as(c_uint, @intCast(self.base_line + dbg_stmt.line + 1)); + self.prev_dbg_column = @as(c_uint, @intCast(dbg_stmt.column + 1)); const inlined_at = if (self.dbg_inlined.items.len > 0) self.dbg_inlined.items[self.dbg_inlined.items.len - 1].loc else @@ -5949,7 +5949,7 @@ pub const FuncGen = struct { const cur_debug_location = self.builder.getCurrentDebugLocation2(); try self.dbg_inlined.append(self.gpa, .{ - .loc = @ptrCast(*llvm.DILocation, cur_debug_location), + .loc = @as(*llvm.DILocation, @ptrCast(cur_debug_location)), .scope = self.di_scope.?, .base_line = self.base_line, }); @@ -6107,13 +6107,13 @@ pub const FuncGen = struct { const o = self.dg.object; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); - const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; - const clobbers_len = @truncate(u31, extra.data.flags); + const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; + const clobbers_len = @as(u31, @truncate(extra.data.flags)); var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len])); extra_i += inputs.len; var llvm_constraints: std.ArrayListUnmanaged(u8) = .{}; @@ -6390,7 +6390,7 @@ pub const FuncGen = struct { 1 => llvm_ret_types[0], else => self.context.structType( llvm_ret_types.ptr, - @intCast(c_uint, return_count), + @as(c_uint, @intCast(return_count)), .False, ), }; @@ -6398,7 +6398,7 @@ pub const FuncGen = struct { const llvm_fn_ty = llvm.functionType( ret_llvm_ty, llvm_param_types.ptr, - @intCast(c_uint, param_count), + @as(c_uint, @intCast(param_count)), .False, ); const asm_fn = llvm.getInlineAsm( @@ -6416,7 +6416,7 @@ pub const FuncGen = struct { llvm_fn_ty, asm_fn, llvm_param_values.ptr, - @intCast(c_uint, param_count), + @as(c_uint, @intCast(param_count)), .C, .Auto, "", @@ -6433,7 +6433,7 @@ pub const FuncGen = struct { if (llvm_ret_indirect[i]) continue; const output_value = if (return_count > 1) b: { - break :b self.builder.buildExtractValue(call, @intCast(c_uint, llvm_ret_i), ""); + break :b self.builder.buildExtractValue(call, @as(c_uint, @intCast(llvm_ret_i)), ""); } else call; if (output != .none) { @@ -7315,7 +7315,7 @@ pub const FuncGen = struct { result_vector: *llvm.Value, vector_len: usize, ) !*llvm.Value { - const args_len = @intCast(c_uint, args_vectors.len); + const args_len = @as(c_uint, @intCast(args_vectors.len)); const llvm_i32 = self.context.intType(32); assert(args_len <= 3); @@ -7345,7 +7345,7 @@ pub const FuncGen = struct { const alias = o.llvm_module.getNamedGlobalAlias(fn_name.ptr, fn_name.len); break :b if (alias) |a| a.getAliasee() else null; } orelse b: { - const params_len = @intCast(c_uint, param_types.len); + const params_len = @as(c_uint, @intCast(param_types.len)); const fn_type = llvm.functionType(return_type, param_types.ptr, params_len, .False); const f = o.llvm_module.addFunction(fn_name, fn_type); break :b f; @@ -8319,8 +8319,8 @@ pub const FuncGen = struct { return null; const ordering = toLlvmAtomicOrdering(atomic_load.order); const opt_abi_llvm_ty = o.getAtomicAbiType(elem_ty, false); - const ptr_alignment = @intCast(u32, ptr_info.flags.alignment.toByteUnitsOptional() orelse - ptr_info.child.toType().abiAlignment(mod)); + const ptr_alignment = @as(u32, @intCast(ptr_info.flags.alignment.toByteUnitsOptional() orelse + ptr_info.child.toType().abiAlignment(mod))); const ptr_volatile = llvm.Bool.fromBool(ptr_info.flags.is_volatile); const elem_llvm_ty = try o.lowerType(elem_ty); @@ -8696,10 +8696,10 @@ pub const FuncGen = struct { const valid_block = self.context.appendBasicBlock(self.llvm_func, "Valid"); const invalid_block = self.context.appendBasicBlock(self.llvm_func, "Invalid"); const end_block = self.context.appendBasicBlock(self.llvm_func, "End"); - const switch_instr = self.builder.buildSwitch(operand, invalid_block, @intCast(c_uint, names.len)); + const switch_instr = self.builder.buildSwitch(operand, invalid_block, @as(c_uint, @intCast(names.len))); for (names) |name| { - const err_int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?); + const err_int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(name).?)); const this_tag_int_value = try o.lowerValue(.{ .ty = Type.err_int, .val = try mod.intValue(Type.err_int, err_int), @@ -8779,10 +8779,10 @@ pub const FuncGen = struct { const named_block = self.context.appendBasicBlock(fn_val, "Named"); const unnamed_block = self.context.appendBasicBlock(fn_val, "Unnamed"); const tag_int_value = fn_val.getParam(0); - const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, enum_type.names.len)); + const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @as(c_uint, @intCast(enum_type.names.len))); for (enum_type.names, 0..) |_, field_index_usize| { - const field_index = @intCast(u32, field_index_usize); + const field_index = @as(u32, @intCast(field_index_usize)); const this_tag_int_value = int: { break :int try o.lowerValue(.{ .ty = enum_ty, @@ -8855,16 +8855,16 @@ pub const FuncGen = struct { const bad_value_block = self.context.appendBasicBlock(fn_val, "BadValue"); const tag_int_value = fn_val.getParam(0); - const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @intCast(c_uint, enum_type.names.len)); + const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @as(c_uint, @intCast(enum_type.names.len))); const array_ptr_indices = [_]*llvm.Value{ usize_llvm_ty.constNull(), usize_llvm_ty.constNull(), }; for (enum_type.names, 0..) |name_ip, field_index_usize| { - const field_index = @intCast(u32, field_index_usize); + const field_index = @as(u32, @intCast(field_index_usize)); const name = mod.intern_pool.stringToSlice(name_ip); - const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False); + const str_init = self.context.constString(name.ptr, @as(c_uint, @intCast(name.len)), .False); const str_init_llvm_ty = str_init.typeOf(); const str_global = o.llvm_module.addGlobal(str_init_llvm_ty, ""); str_global.setInitializer(str_init); @@ -8986,7 +8986,7 @@ pub const FuncGen = struct { val.* = llvm_i32.getUndef(); } else { const int = elem.toSignedInt(mod); - const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len); + const unsigned = if (int >= 0) @as(u32, @intCast(int)) else @as(u32, @intCast(~int + a_len)); val.* = llvm_i32.constInt(unsigned, .False); } } @@ -9150,8 +9150,8 @@ pub const FuncGen = struct { const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const result_ty = self.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen(mod)); - const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); + const len = @as(usize, @intCast(result_ty.arrayLen(mod))); + const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); const llvm_result_ty = try o.lowerType(result_ty); switch (result_ty.zigTypeTag(mod)) { @@ -9171,7 +9171,7 @@ pub const FuncGen = struct { const struct_obj = mod.typeToStruct(result_ty).?; assert(struct_obj.haveLayout()); const big_bits = struct_obj.backing_int_ty.bitSize(mod); - const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits)); + const int_llvm_ty = self.context.intType(@as(c_uint, @intCast(big_bits))); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); var running_int: *llvm.Value = int_llvm_ty.constNull(); @@ -9181,7 +9181,7 @@ pub const FuncGen = struct { if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const non_int_val = try self.resolveInst(elem); - const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod))); const small_int_ty = self.context.intType(ty_bit_size); const small_int_val = if (field.ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(non_int_val, small_int_ty, "") @@ -9251,7 +9251,7 @@ pub const FuncGen = struct { for (elements, 0..) |elem, i| { const indices: [2]*llvm.Value = .{ llvm_usize.constNull(), - llvm_usize.constInt(@intCast(c_uint, i), .False), + llvm_usize.constInt(@as(c_uint, @intCast(i)), .False), }; const elem_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); const llvm_elem = try self.resolveInst(elem); @@ -9260,7 +9260,7 @@ pub const FuncGen = struct { if (array_info.sentinel) |sent_val| { const indices: [2]*llvm.Value = .{ llvm_usize.constNull(), - llvm_usize.constInt(@intCast(c_uint, array_info.len), .False), + llvm_usize.constInt(@as(c_uint, @intCast(array_info.len)), .False), }; const elem_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); const llvm_elem = try self.resolveValue(.{ @@ -9289,10 +9289,10 @@ pub const FuncGen = struct { if (union_obj.layout == .Packed) { const big_bits = union_ty.bitSize(mod); - const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits)); + const int_llvm_ty = self.context.intType(@as(c_uint, @intCast(big_bits))); const field = union_obj.fields.values()[extra.field_index]; const non_int_val = try self.resolveInst(extra.init); - const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod))); const small_int_ty = self.context.intType(ty_bit_size); const small_int_val = if (field.ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(non_int_val, small_int_ty, "") @@ -9332,13 +9332,13 @@ pub const FuncGen = struct { const llvm_union_ty = t: { const payload = p: { if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) { - const padding_len = @intCast(c_uint, layout.payload_size); + const padding_len = @as(c_uint, @intCast(layout.payload_size)); break :p self.context.intType(8).arrayType(padding_len); } if (field_size == layout.payload_size) { break :p field_llvm_ty; } - const padding_len = @intCast(c_uint, layout.payload_size - field_size); + const padding_len = @as(c_uint, @intCast(layout.payload_size - field_size)); const fields: [2]*llvm.Type = .{ field_llvm_ty, self.context.intType(8).arrayType(padding_len), }; @@ -9766,8 +9766,8 @@ pub const FuncGen = struct { const elem_ty = info.child.toType(); if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - const ptr_alignment = @intCast(u32, info.flags.alignment.toByteUnitsOptional() orelse - elem_ty.abiAlignment(mod)); + const ptr_alignment = @as(u32, @intCast(info.flags.alignment.toByteUnitsOptional() orelse + elem_ty.abiAlignment(mod))); const ptr_volatile = llvm.Bool.fromBool(info.flags.is_volatile); assert(info.flags.vector_index != .runtime); @@ -9799,7 +9799,7 @@ pub const FuncGen = struct { containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod)); + const elem_bits = @as(c_uint, @intCast(ptr_ty.childType(mod).bitSize(mod))); const shift_amt = containing_int.typeOf().constInt(info.packed_offset.bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = try o.lowerType(elem_ty); @@ -9872,7 +9872,7 @@ pub const FuncGen = struct { assert(ordering == .NotAtomic); containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod)); + const elem_bits = @as(c_uint, @intCast(ptr_ty.childType(mod).bitSize(mod))); const containing_int_ty = containing_int.typeOf(); const shift_amt = containing_int_ty.constInt(info.packed_offset.bit_offset, .False); // Convert to equally-sized integer type in order to perform the bit @@ -9945,7 +9945,7 @@ pub const FuncGen = struct { if (!target_util.hasValgrindSupport(target)) return default_value; const usize_llvm_ty = fg.context.intType(target.ptrBitWidth()); - const usize_alignment = @intCast(c_uint, Type.usize.abiSize(mod)); + const usize_alignment = @as(c_uint, @intCast(Type.usize.abiSize(mod))); const array_llvm_ty = usize_llvm_ty.arrayType(6); const array_ptr = fg.valgrind_client_request_array orelse a: { @@ -9957,7 +9957,7 @@ pub const FuncGen = struct { const zero = usize_llvm_ty.constInt(0, .False); for (array_elements, 0..) |elem, i| { const indexes = [_]*llvm.Value{ - zero, usize_llvm_ty.constInt(@intCast(c_uint, i), .False), + zero, usize_llvm_ty.constInt(@as(c_uint, @intCast(i)), .False), }; const elem_ptr = fg.builder.buildInBoundsGEP(array_llvm_ty, array_ptr, &indexes, indexes.len, ""); const store_inst = fg.builder.buildStore(elem, elem_ptr); @@ -10530,7 +10530,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { assert(classes[0] == .direct and classes[1] == .none); const scalar_type = wasm_c_abi.scalarType(return_type, mod); const abi_size = scalar_type.abiSize(mod); - return o.context.intType(@intCast(c_uint, abi_size * 8)); + return o.context.intType(@as(c_uint, @intCast(abi_size * 8))); }, .aarch64, .aarch64_be => { switch (aarch64_c_abi.classifyType(return_type, mod)) { @@ -10539,7 +10539,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { .byval => return o.lowerType(return_type), .integer => { const bit_size = return_type.bitSize(mod); - return o.context.intType(@intCast(c_uint, bit_size)); + return o.context.intType(@as(c_uint, @intCast(bit_size))); }, .double_integer => return o.context.intType(64).arrayType(2), } @@ -10560,7 +10560,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { .memory => return o.context.voidType(), .integer => { const bit_size = return_type.bitSize(mod); - return o.context.intType(@intCast(c_uint, bit_size)); + return o.context.intType(@as(c_uint, @intCast(bit_size))); }, .double_integer => { var llvm_types_buffer: [2]*llvm.Type = .{ @@ -10598,7 +10598,7 @@ fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { return o.lowerType(return_type); } else { const abi_size = return_type.abiSize(mod); - return o.context.intType(@intCast(c_uint, abi_size * 8)); + return o.context.intType(@as(c_uint, @intCast(abi_size * 8))); } }, .win_i128 => return o.context.intType(64).vectorType(2), @@ -10656,7 +10656,7 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type } if (classes[0] == .integer and classes[1] == .none) { const abi_size = return_type.abiSize(mod); - return o.context.intType(@intCast(c_uint, abi_size * 8)); + return o.context.intType(@as(c_uint, @intCast(abi_size * 8))); } return o.context.structType(&llvm_types_buffer, llvm_types_index, .False); } @@ -11145,28 +11145,28 @@ const AnnotatedDITypePtr = enum(usize) { fn initFwd(di_type: *llvm.DIType) AnnotatedDITypePtr { const addr = @intFromPtr(di_type); - assert(@truncate(u1, addr) == 0); - return @enumFromInt(AnnotatedDITypePtr, addr | 1); + assert(@as(u1, @truncate(addr)) == 0); + return @as(AnnotatedDITypePtr, @enumFromInt(addr | 1)); } fn initFull(di_type: *llvm.DIType) AnnotatedDITypePtr { const addr = @intFromPtr(di_type); - return @enumFromInt(AnnotatedDITypePtr, addr); + return @as(AnnotatedDITypePtr, @enumFromInt(addr)); } fn init(di_type: *llvm.DIType, resolve: Object.DebugResolveStatus) AnnotatedDITypePtr { const addr = @intFromPtr(di_type); const bit = @intFromBool(resolve == .fwd); - return @enumFromInt(AnnotatedDITypePtr, addr | bit); + return @as(AnnotatedDITypePtr, @enumFromInt(addr | bit)); } fn toDIType(self: AnnotatedDITypePtr) *llvm.DIType { const fixed_addr = @intFromEnum(self) & ~@as(usize, 1); - return @ptrFromInt(*llvm.DIType, fixed_addr); + return @as(*llvm.DIType, @ptrFromInt(fixed_addr)); } fn isFwdOnly(self: AnnotatedDITypePtr) bool { - return @truncate(u1, @intFromEnum(self)) != 0; + return @as(u1, @truncate(@intFromEnum(self))) != 0; } }; diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index a8249a870f..b093588e80 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -8,7 +8,7 @@ pub const Bool = enum(c_int) { _, pub fn fromBool(b: bool) Bool { - return @enumFromInt(Bool, @intFromBool(b)); + return @as(Bool, @enumFromInt(@intFromBool(b))); } pub fn toBool(b: Bool) bool { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index d81ca9a015..220909476f 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -466,7 +466,7 @@ pub const DeclGen = struct { unused.* = undef; } - const word = @bitCast(Word, self.partial_word.buffer); + const word = @as(Word, @bitCast(self.partial_word.buffer)); const result_id = try self.dg.spv.constInt(self.u32_ty_ref, word); try self.members.append(self.u32_ty_ref); try self.initializers.append(result_id); @@ -482,7 +482,7 @@ pub const DeclGen = struct { } fn addUndef(self: *@This(), amt: u64) !void { - for (0..@intCast(usize, amt)) |_| { + for (0..@as(usize, @intCast(amt))) |_| { try self.addByte(undef); } } @@ -539,13 +539,13 @@ pub const DeclGen = struct { const mod = self.dg.module; const int_info = ty.intInfo(mod); const int_bits = switch (int_info.signedness) { - .signed => @bitCast(u64, val.toSignedInt(mod)), + .signed => @as(u64, @bitCast(val.toSignedInt(mod))), .unsigned => val.toUnsignedInt(mod), }; // TODO: Swap endianess if the compiler is big endian. const len = ty.abiSize(mod); - try self.addBytes(std.mem.asBytes(&int_bits)[0..@intCast(usize, len)]); + try self.addBytes(std.mem.asBytes(&int_bits)[0..@as(usize, @intCast(len))]); } fn addFloat(self: *@This(), ty: Type, val: Value) !void { @@ -557,15 +557,15 @@ pub const DeclGen = struct { switch (ty.floatBits(target)) { 16 => { const float_bits = val.toFloat(f16, mod); - try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]); + try self.addBytes(std.mem.asBytes(&float_bits)[0..@as(usize, @intCast(len))]); }, 32 => { const float_bits = val.toFloat(f32, mod); - try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]); + try self.addBytes(std.mem.asBytes(&float_bits)[0..@as(usize, @intCast(len))]); }, 64 => { const float_bits = val.toFloat(f64, mod); - try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]); + try self.addBytes(std.mem.asBytes(&float_bits)[0..@as(usize, @intCast(len))]); }, else => unreachable, } @@ -664,7 +664,7 @@ pub const DeclGen = struct { .int => try self.addInt(ty, val), .err => |err| { const int = try mod.getErrorValue(err.name); - try self.addConstInt(u16, @intCast(u16, int)); + try self.addConstInt(u16, @as(u16, @intCast(int))); }, .error_union => |error_union| { const payload_ty = ty.errorUnionPayload(mod); @@ -755,10 +755,10 @@ pub const DeclGen = struct { switch (aggregate.storage) { .bytes => |bytes| try self.addBytes(bytes), .elems, .repeated_elem => { - for (0..@intCast(usize, array_type.len)) |i| { + for (0..@as(usize, @intCast(array_type.len))) |i| { try self.lower(elem_ty, switch (aggregate.storage) { .bytes => unreachable, - .elems => |elem_vals| elem_vals[@intCast(usize, i)].toValue(), + .elems => |elem_vals| elem_vals[@as(usize, @intCast(i))].toValue(), .repeated_elem => |elem_val| elem_val.toValue(), }); } @@ -1132,7 +1132,7 @@ pub const DeclGen = struct { const payload_padding_len = layout.payload_size - active_field_size; if (payload_padding_len != 0) { - const payload_padding_ty_ref = try self.spv.arrayType(@intCast(u32, payload_padding_len), u8_ty_ref); + const payload_padding_ty_ref = try self.spv.arrayType(@as(u32, @intCast(payload_padding_len)), u8_ty_ref); member_types.appendAssumeCapacity(payload_padding_ty_ref); member_names.appendAssumeCapacity(try self.spv.resolveString("payload_padding")); } @@ -1259,7 +1259,7 @@ pub const DeclGen = struct { return try self.spv.resolve(.{ .vector_type = .{ .component_type = try self.resolveType(ty.childType(mod), repr), - .component_count = @intCast(u32, ty.vectorLen(mod)), + .component_count = @as(u32, @intCast(ty.vectorLen(mod))), } }); }, .Struct => { @@ -1588,7 +1588,7 @@ pub const DeclGen = struct { init_val, actual_storage_class, final_storage_class == .Generic, - @intCast(u32, decl.alignment.toByteUnits(0)), + @as(u32, @intCast(decl.alignment.toByteUnits(0))), ); } } @@ -1856,7 +1856,7 @@ pub const DeclGen = struct { } fn maskStrangeInt(self: *DeclGen, ty_ref: CacheRef, value_id: IdRef, bits: u16) !IdRef { - const mask_value = if (bits == 64) 0xFFFF_FFFF_FFFF_FFFF else (@as(u64, 1) << @intCast(u6, bits)) - 1; + const mask_value = if (bits == 64) 0xFFFF_FFFF_FFFF_FFFF else (@as(u64, 1) << @as(u6, @intCast(bits))) - 1; const result_id = self.spv.allocId(); const mask_id = try self.spv.constInt(ty_ref, mask_value); try self.func.body.emit(self.spv.gpa, .OpBitwiseAnd, .{ @@ -2063,7 +2063,7 @@ pub const DeclGen = struct { self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF); } else { const int = elem.toSignedInt(mod); - const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len); + const unsigned = if (int >= 0) @as(u32, @intCast(int)) else @as(u32, @intCast(~int + a_len)); self.func.body.writeOperand(spec.LiteralInteger, unsigned); } } @@ -2689,7 +2689,7 @@ pub const DeclGen = struct { // are not allowed to be created from a phi node, and throw an error for those. const result_type_id = try self.resolveTypeId(ty); - try self.func.body.emitRaw(self.spv.gpa, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent... + try self.func.body.emitRaw(self.spv.gpa, .OpPhi, 2 + @as(u16, @intCast(incoming_blocks.items.len * 2))); // result type + result + variable/parent... self.func.body.writeOperand(spec.IdResultType, result_type_id); self.func.body.writeOperand(spec.IdRef, result_id); @@ -3105,7 +3105,7 @@ pub const DeclGen = struct { while (case_i < num_cases) : (case_i += 1) { // SPIR-V needs a literal here, which' width depends on the case condition. const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]); + const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len])); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + case.data.items_len + case_body.len; @@ -3116,7 +3116,7 @@ pub const DeclGen = struct { return self.todo("switch on runtime value???", .{}); }; const int_val = switch (cond_ty.zigTypeTag(mod)) { - .Int => if (cond_ty.isSignedInt(mod)) @bitCast(u64, value.toSignedInt(mod)) else value.toUnsignedInt(mod), + .Int => if (cond_ty.isSignedInt(mod)) @as(u64, @bitCast(value.toSignedInt(mod))) else value.toUnsignedInt(mod), .Enum => blk: { // TODO: figure out of cond_ty is correct (something with enum literals) break :blk (try value.intFromEnum(cond_ty, mod)).toUnsignedInt(mod); // TODO: composite integer constants @@ -3124,7 +3124,7 @@ pub const DeclGen = struct { else => unreachable, }; const int_lit: spec.LiteralContextDependentNumber = switch (cond_words) { - 1 => .{ .uint32 = @intCast(u32, int_val) }, + 1 => .{ .uint32 = @as(u32, @intCast(int_val)) }, 2 => .{ .uint64 = int_val }, else => unreachable, }; @@ -3139,7 +3139,7 @@ pub const DeclGen = struct { var case_i: u32 = 0; while (case_i < num_cases) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]); + const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len])); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + case.data.items_len + case_body.len; @@ -3167,15 +3167,15 @@ pub const DeclGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); - const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; - const clobbers_len = @truncate(u31, extra.data.flags); + const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; + const clobbers_len = @as(u31, @truncate(extra.data.flags)); if (!is_volatile and self.liveness.isUnused(inst)) return null; var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len])); extra_i += inputs.len; if (outputs.len > 1) { @@ -3297,7 +3297,7 @@ pub const DeclGen = struct { const mod = self.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len])); const callee_ty = self.typeOf(pl_op.operand); const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, diff --git a/src/codegen/spirv/Assembler.zig b/src/codegen/spirv/Assembler.zig index 73a842ebe9..8f466668ea 100644 --- a/src/codegen/spirv/Assembler.zig +++ b/src/codegen/spirv/Assembler.zig @@ -293,7 +293,7 @@ fn processTypeInstruction(self: *Assembler) !AsmValue { return self.fail(0, "{} is not a valid bit count for floats (expected 16, 32 or 64)", .{bits}); }, } - break :blk try self.spv.resolve(.{ .float_type = .{ .bits = @intCast(u16, bits) } }); + break :blk try self.spv.resolve(.{ .float_type = .{ .bits = @as(u16, @intCast(bits)) } }); }, .OpTypeVector => try self.spv.resolve(.{ .vector_type = .{ .component_type = try self.resolveTypeRef(operands[1].ref_id), @@ -306,7 +306,7 @@ fn processTypeInstruction(self: *Assembler) !AsmValue { }, .OpTypePointer => try self.spv.ptrType( try self.resolveTypeRef(operands[2].ref_id), - @enumFromInt(spec.StorageClass, operands[1].value), + @as(spec.StorageClass, @enumFromInt(operands[1].value)), ), .OpTypeFunction => blk: { const param_operands = operands[2..]; @@ -340,7 +340,7 @@ fn processGenericInstruction(self: *Assembler) !?AsmValue { else => switch (self.inst.opcode) { .OpEntryPoint => unreachable, .OpExecutionMode, .OpExecutionModeId => &self.spv.sections.execution_modes, - .OpVariable => switch (@enumFromInt(spec.StorageClass, operands[2].value)) { + .OpVariable => switch (@as(spec.StorageClass, @enumFromInt(operands[2].value))) { .Function => &self.func.prologue, else => { // This is currently disabled because global variables are required to be @@ -391,7 +391,7 @@ fn processGenericInstruction(self: *Assembler) !?AsmValue { } const actual_word_count = section.instructions.items.len - first_word; - section.instructions.items[first_word] |= @as(u32, @intCast(u16, actual_word_count)) << 16 | @intFromEnum(self.inst.opcode); + section.instructions.items[first_word] |= @as(u32, @as(u16, @intCast(actual_word_count))) << 16 | @intFromEnum(self.inst.opcode); if (maybe_result_id) |result| { return AsmValue{ .value = result }; @@ -458,7 +458,7 @@ fn parseInstruction(self: *Assembler) !void { if (!entry.found_existing) { entry.value_ptr.* = .just_declared; } - break :blk @intCast(AsmValue.Ref, entry.index); + break :blk @as(AsmValue.Ref, @intCast(entry.index)); } else null; const opcode_tok = self.currentToken(); @@ -613,7 +613,7 @@ fn parseRefId(self: *Assembler) !void { entry.value_ptr.* = .unresolved_forward_reference; } - const index = @intCast(AsmValue.Ref, entry.index); + const index = @as(AsmValue.Ref, @intCast(entry.index)); try self.inst.operands.append(self.gpa, .{ .ref_id = index }); } @@ -645,7 +645,7 @@ fn parseString(self: *Assembler) !void { else text[1..]; - const string_offset = @intCast(u32, self.inst.string_bytes.items.len); + const string_offset = @as(u32, @intCast(self.inst.string_bytes.items.len)); try self.inst.string_bytes.ensureUnusedCapacity(self.gpa, literal.len + 1); self.inst.string_bytes.appendSliceAssumeCapacity(literal); self.inst.string_bytes.appendAssumeCapacity(0); @@ -693,18 +693,18 @@ fn parseContextDependentInt(self: *Assembler, signedness: std.builtin.Signedness const int = std.fmt.parseInt(i128, text, 0) catch break :invalid; const min = switch (signedness) { .unsigned => 0, - .signed => -(@as(i128, 1) << (@intCast(u7, width) - 1)), + .signed => -(@as(i128, 1) << (@as(u7, @intCast(width)) - 1)), }; - const max = (@as(i128, 1) << (@intCast(u7, width) - @intFromBool(signedness == .signed))) - 1; + const max = (@as(i128, 1) << (@as(u7, @intCast(width)) - @intFromBool(signedness == .signed))) - 1; if (int < min or int > max) { break :invalid; } // Note, we store the sign-extended version here. if (width <= @bitSizeOf(spec.Word)) { - try self.inst.operands.append(self.gpa, .{ .literal32 = @truncate(u32, @bitCast(u128, int)) }); + try self.inst.operands.append(self.gpa, .{ .literal32 = @as(u32, @truncate(@as(u128, @bitCast(int)))) }); } else { - try self.inst.operands.append(self.gpa, .{ .literal64 = @truncate(u64, @bitCast(u128, int)) }); + try self.inst.operands.append(self.gpa, .{ .literal64 = @as(u64, @truncate(@as(u128, @bitCast(int)))) }); } return; } @@ -725,7 +725,7 @@ fn parseContextDependentFloat(self: *Assembler, comptime width: u16) !void { return self.fail(tok.start, "'{s}' is not a valid {}-bit float literal", .{ text, width }); }; - const float_bits = @bitCast(Int, value); + const float_bits = @as(Int, @bitCast(value)); if (width <= @bitSizeOf(spec.Word)) { try self.inst.operands.append(self.gpa, .{ .literal32 = float_bits }); } else { diff --git a/src/codegen/spirv/Cache.zig b/src/codegen/spirv/Cache.zig index 7d7fc0fb0d..7a3b6f61f5 100644 --- a/src/codegen/spirv/Cache.zig +++ b/src/codegen/spirv/Cache.zig @@ -158,16 +158,16 @@ const Tag = enum { high: u32, fn encode(value: f64) Float64 { - const bits = @bitCast(u64, value); + const bits = @as(u64, @bitCast(value)); return .{ - .low = @truncate(u32, bits), - .high = @truncate(u32, bits >> 32), + .low = @as(u32, @truncate(bits)), + .high = @as(u32, @truncate(bits >> 32)), }; } fn decode(self: Float64) f64 { const bits = @as(u64, self.low) | (@as(u64, self.high) << 32); - return @bitCast(f64, bits); + return @as(f64, @bitCast(bits)); } }; @@ -189,8 +189,8 @@ const Tag = enum { fn encode(ty: Ref, value: u64) Int64 { return .{ .ty = ty, - .low = @truncate(u32, value), - .high = @truncate(u32, value >> 32), + .low = @as(u32, @truncate(value)), + .high = @as(u32, @truncate(value >> 32)), }; } @@ -207,13 +207,13 @@ const Tag = enum { fn encode(ty: Ref, value: i64) Int64 { return .{ .ty = ty, - .low = @truncate(u32, @bitCast(u64, value)), - .high = @truncate(u32, @bitCast(u64, value) >> 32), + .low = @as(u32, @truncate(@as(u64, @bitCast(value)))), + .high = @as(u32, @truncate(@as(u64, @bitCast(value)) >> 32)), }; } fn decode(self: Int64) i64 { - return @bitCast(i64, @as(u64, self.low) | (@as(u64, self.high) << 32)); + return @as(i64, @bitCast(@as(u64, self.low) | (@as(u64, self.high) << 32))); } }; }; @@ -305,21 +305,21 @@ pub const Key = union(enum) { /// Turns this value into the corresponding 32-bit literal, 2s complement signed. fn toBits32(self: Int) u32 { return switch (self.value) { - .uint64 => |val| @intCast(u32, val), - .int64 => |val| if (val < 0) @bitCast(u32, @intCast(i32, val)) else @intCast(u32, val), + .uint64 => |val| @as(u32, @intCast(val)), + .int64 => |val| if (val < 0) @as(u32, @bitCast(@as(i32, @intCast(val)))) else @as(u32, @intCast(val)), }; } fn toBits64(self: Int) u64 { return switch (self.value) { .uint64 => |val| val, - .int64 => |val| @bitCast(u64, val), + .int64 => |val| @as(u64, @bitCast(val)), }; } fn to(self: Int, comptime T: type) T { return switch (self.value) { - inline else => |val| @intCast(T, val), + inline else => |val| @as(T, @intCast(val)), }; } }; @@ -357,9 +357,9 @@ pub const Key = union(enum) { .float => |float| { std.hash.autoHash(&hasher, float.ty); switch (float.value) { - .float16 => |value| std.hash.autoHash(&hasher, @bitCast(u16, value)), - .float32 => |value| std.hash.autoHash(&hasher, @bitCast(u32, value)), - .float64 => |value| std.hash.autoHash(&hasher, @bitCast(u64, value)), + .float16 => |value| std.hash.autoHash(&hasher, @as(u16, @bitCast(value))), + .float32 => |value| std.hash.autoHash(&hasher, @as(u32, @bitCast(value))), + .float64 => |value| std.hash.autoHash(&hasher, @as(u64, @bitCast(value))), } }, .function_type => |func| { @@ -379,7 +379,7 @@ pub const Key = union(enum) { }, inline else => |key| std.hash.autoHash(&hasher, key), } - return @truncate(u32, hasher.final()); + return @as(u32, @truncate(hasher.final())); } fn eql(a: Key, b: Key) bool { @@ -411,7 +411,7 @@ pub const Key = union(enum) { pub fn eql(ctx: @This(), a: Key, b_void: void, b_index: usize) bool { _ = b_void; - return ctx.self.lookup(@enumFromInt(Ref, b_index)).eql(a); + return ctx.self.lookup(@as(Ref, @enumFromInt(b_index))).eql(a); } pub fn hash(ctx: @This(), a: Key) u32 { @@ -445,7 +445,7 @@ pub fn materialize(self: *const Self, spv: *Module) !Section { var section = Section{}; errdefer section.deinit(spv.gpa); for (self.items.items(.result_id), 0..) |result_id, index| { - try self.emit(spv, result_id, @enumFromInt(Ref, index), §ion); + try self.emit(spv, result_id, @as(Ref, @enumFromInt(index)), §ion); } return section; } @@ -534,7 +534,7 @@ fn emit( } for (struct_type.memberNames(), 0..) |member_name, i| { if (self.getString(member_name)) |name| { - try spv.memberDebugName(result_id, @intCast(u32, i), "{s}", .{name}); + try spv.memberDebugName(result_id, @as(u32, @intCast(i)), "{s}", .{name}); } } // TODO: Decorations? @@ -557,7 +557,7 @@ fn emit( .float => |float| { const ty_id = self.resultId(float.ty); const lit: Lit = switch (float.value) { - .float16 => |value| .{ .uint32 = @bitCast(u16, value) }, + .float16 => |value| .{ .uint32 = @as(u16, @bitCast(value)) }, .float32 => |value| .{ .float32 = value }, .float64 => |value| .{ .float64 = value }, }; @@ -603,7 +603,7 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref { const adapter: Key.Adapter = .{ .self = self }; const entry = try self.map.getOrPutAdapted(spv.gpa, key, adapter); if (entry.found_existing) { - return @enumFromInt(Ref, entry.index); + return @as(Ref, @enumFromInt(entry.index)); } const result_id = spv.allocId(); const item: Item = switch (key) { @@ -640,10 +640,10 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref { }, .function_type => |function| blk: { const extra = try self.addExtra(spv, Tag.FunctionType{ - .param_len = @intCast(u32, function.parameters.len), + .param_len = @as(u32, @intCast(function.parameters.len)), .return_type = function.return_type, }); - try self.extra.appendSlice(spv.gpa, @ptrCast([]const u32, function.parameters)); + try self.extra.appendSlice(spv.gpa, @as([]const u32, @ptrCast(function.parameters))); break :blk .{ .tag = .type_function, .result_id = result_id, @@ -678,12 +678,12 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref { .struct_type => |struct_type| blk: { const extra = try self.addExtra(spv, Tag.SimpleStructType{ .name = struct_type.name, - .members_len = @intCast(u32, struct_type.member_types.len), + .members_len = @as(u32, @intCast(struct_type.member_types.len)), }); - try self.extra.appendSlice(spv.gpa, @ptrCast([]const u32, struct_type.member_types)); + try self.extra.appendSlice(spv.gpa, @as([]const u32, @ptrCast(struct_type.member_types))); if (struct_type.member_names) |member_names| { - try self.extra.appendSlice(spv.gpa, @ptrCast([]const u32, member_names)); + try self.extra.appendSlice(spv.gpa, @as([]const u32, @ptrCast(member_names))); break :blk Item{ .tag = .type_struct_simple_with_member_names, .result_id = result_id, @@ -721,7 +721,7 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref { .result_id = result_id, .data = try self.addExtra(spv, Tag.UInt32{ .ty = int.ty, - .value = @intCast(u32, val), + .value = @as(u32, @intCast(val)), }), }; } else if (val >= std.math.minInt(i32) and val <= std.math.maxInt(i32)) { @@ -730,20 +730,20 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref { .result_id = result_id, .data = try self.addExtra(spv, Tag.Int32{ .ty = int.ty, - .value = @intCast(i32, val), + .value = @as(i32, @intCast(val)), }), }; } else if (val < 0) { break :blk .{ .tag = .int_large, .result_id = result_id, - .data = try self.addExtra(spv, Tag.Int64.encode(int.ty, @intCast(i64, val))), + .data = try self.addExtra(spv, Tag.Int64.encode(int.ty, @as(i64, @intCast(val)))), }; } else { break :blk .{ .tag = .uint_large, .result_id = result_id, - .data = try self.addExtra(spv, Tag.UInt64.encode(int.ty, @intCast(u64, val))), + .data = try self.addExtra(spv, Tag.UInt64.encode(int.ty, @as(u64, @intCast(val)))), }; } }, @@ -753,12 +753,12 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref { 16 => .{ .tag = .float16, .result_id = result_id, - .data = @bitCast(u16, float.value.float16), + .data = @as(u16, @bitCast(float.value.float16)), }, 32 => .{ .tag = .float32, .result_id = result_id, - .data = @bitCast(u32, float.value.float32), + .data = @as(u32, @bitCast(float.value.float32)), }, 64 => .{ .tag = .float64, @@ -788,7 +788,7 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref { }; try self.items.append(spv.gpa, item); - return @enumFromInt(Ref, entry.index); + return @as(Ref, @enumFromInt(entry.index)); } /// Turn a Ref back into a Key. @@ -797,20 +797,20 @@ pub fn lookup(self: *const Self, ref: Ref) Key { const item = self.items.get(@intFromEnum(ref)); const data = item.data; return switch (item.tag) { - .type_simple => switch (@enumFromInt(Tag.SimpleType, data)) { + .type_simple => switch (@as(Tag.SimpleType, @enumFromInt(data))) { .void => .void_type, .bool => .bool_type, }, .type_int_signed => .{ .int_type = .{ .signedness = .signed, - .bits = @intCast(u16, data), + .bits = @as(u16, @intCast(data)), } }, .type_int_unsigned => .{ .int_type = .{ .signedness = .unsigned, - .bits = @intCast(u16, data), + .bits = @as(u16, @intCast(data)), } }, .type_float => .{ .float_type = .{ - .bits = @intCast(u16, data), + .bits = @as(u16, @intCast(data)), } }, .type_vector => .{ .vector_type = self.extraData(Tag.VectorType, data) }, .type_array => .{ .array_type = self.extraData(Tag.ArrayType, data) }, @@ -819,26 +819,26 @@ pub fn lookup(self: *const Self, ref: Ref) Key { return .{ .function_type = .{ .return_type = payload.data.return_type, - .parameters = @ptrCast([]const Ref, self.extra.items[payload.trail..][0..payload.data.param_len]), + .parameters = @as([]const Ref, @ptrCast(self.extra.items[payload.trail..][0..payload.data.param_len])), }, }; }, .type_ptr_generic => .{ .ptr_type = .{ .storage_class = .Generic, - .child_type = @enumFromInt(Ref, data), + .child_type = @as(Ref, @enumFromInt(data)), }, }, .type_ptr_crosswgp => .{ .ptr_type = .{ .storage_class = .CrossWorkgroup, - .child_type = @enumFromInt(Ref, data), + .child_type = @as(Ref, @enumFromInt(data)), }, }, .type_ptr_function => .{ .ptr_type = .{ .storage_class = .Function, - .child_type = @enumFromInt(Ref, data), + .child_type = @as(Ref, @enumFromInt(data)), }, }, .type_ptr_simple => { @@ -852,7 +852,7 @@ pub fn lookup(self: *const Self, ref: Ref) Key { }, .type_struct_simple => { const payload = self.extraDataTrail(Tag.SimpleStructType, data); - const member_types = @ptrCast([]const Ref, self.extra.items[payload.trail..][0..payload.data.members_len]); + const member_types = @as([]const Ref, @ptrCast(self.extra.items[payload.trail..][0..payload.data.members_len])); return .{ .struct_type = .{ .name = payload.data.name, @@ -864,8 +864,8 @@ pub fn lookup(self: *const Self, ref: Ref) Key { .type_struct_simple_with_member_names => { const payload = self.extraDataTrail(Tag.SimpleStructType, data); const trailing = self.extra.items[payload.trail..]; - const member_types = @ptrCast([]const Ref, trailing[0..payload.data.members_len]); - const member_names = @ptrCast([]const String, trailing[payload.data.members_len..][0..payload.data.members_len]); + const member_types = @as([]const Ref, @ptrCast(trailing[0..payload.data.members_len])); + const member_names = @as([]const String, @ptrCast(trailing[payload.data.members_len..][0..payload.data.members_len])); return .{ .struct_type = .{ .name = payload.data.name, @@ -876,11 +876,11 @@ pub fn lookup(self: *const Self, ref: Ref) Key { }, .float16 => .{ .float = .{ .ty = self.get(.{ .float_type = .{ .bits = 16 } }), - .value = .{ .float16 = @bitCast(f16, @intCast(u16, data)) }, + .value = .{ .float16 = @as(f16, @bitCast(@as(u16, @intCast(data)))) }, } }, .float32 => .{ .float = .{ .ty = self.get(.{ .float_type = .{ .bits = 32 } }), - .value = .{ .float32 = @bitCast(f32, data) }, + .value = .{ .float32 = @as(f32, @bitCast(data)) }, } }, .float64 => .{ .float = .{ .ty = self.get(.{ .float_type = .{ .bits = 64 } }), @@ -923,17 +923,17 @@ pub fn lookup(self: *const Self, ref: Ref) Key { } }; }, .undef => .{ .undef = .{ - .ty = @enumFromInt(Ref, data), + .ty = @as(Ref, @enumFromInt(data)), } }, .null => .{ .null = .{ - .ty = @enumFromInt(Ref, data), + .ty = @as(Ref, @enumFromInt(data)), } }, .bool_true => .{ .bool = .{ - .ty = @enumFromInt(Ref, data), + .ty = @as(Ref, @enumFromInt(data)), .value = true, } }, .bool_false => .{ .bool = .{ - .ty = @enumFromInt(Ref, data), + .ty = @as(Ref, @enumFromInt(data)), .value = false, } }, }; @@ -949,7 +949,7 @@ pub fn resultId(self: Self, ref: Ref) IdResult { fn get(self: *const Self, key: Key) Ref { const adapter: Key.Adapter = .{ .self = self }; const index = self.map.getIndexAdapted(key, adapter).?; - return @enumFromInt(Ref, index); + return @as(Ref, @enumFromInt(index)); } fn addExtra(self: *Self, spv: *Module, extra: anytype) !u32 { @@ -959,12 +959,12 @@ fn addExtra(self: *Self, spv: *Module, extra: anytype) !u32 { } fn addExtraAssumeCapacity(self: *Self, extra: anytype) !u32 { - const payload_offset = @intCast(u32, self.extra.items.len); + const payload_offset = @as(u32, @intCast(self.extra.items.len)); inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { const field_val = @field(extra, field.name); const word = switch (field.type) { u32 => field_val, - i32 => @bitCast(u32, field_val), + i32 => @as(u32, @bitCast(field_val)), Ref => @intFromEnum(field_val), StorageClass => @intFromEnum(field_val), String => @intFromEnum(field_val), @@ -986,16 +986,16 @@ fn extraDataTrail(self: Self, comptime T: type, offset: u32) struct { data: T, t const word = self.extra.items[offset + i]; @field(result, field.name) = switch (field.type) { u32 => word, - i32 => @bitCast(i32, word), - Ref => @enumFromInt(Ref, word), - StorageClass => @enumFromInt(StorageClass, word), - String => @enumFromInt(String, word), + i32 => @as(i32, @bitCast(word)), + Ref => @as(Ref, @enumFromInt(word)), + StorageClass => @as(StorageClass, @enumFromInt(word)), + String => @as(String, @enumFromInt(word)), else => @compileError("Invalid type: " ++ @typeName(field.type)), }; } return .{ .data = result, - .trail = offset + @intCast(u32, fields.len), + .trail = offset + @as(u32, @intCast(fields.len)), }; } @@ -1017,7 +1017,7 @@ pub const String = enum(u32) { _ = ctx; var hasher = std.hash.Wyhash.init(0); hasher.update(a); - return @truncate(u32, hasher.final()); + return @as(u32, @truncate(hasher.final())); } }; }; @@ -1032,10 +1032,10 @@ pub fn addString(self: *Self, spv: *Module, str: []const u8) !String { try self.string_bytes.ensureUnusedCapacity(spv.gpa, 1 + str.len); self.string_bytes.appendSliceAssumeCapacity(str); self.string_bytes.appendAssumeCapacity(0); - entry.value_ptr.* = @intCast(u32, offset); + entry.value_ptr.* = @as(u32, @intCast(offset)); } - return @enumFromInt(String, entry.index); + return @as(String, @enumFromInt(entry.index)); } pub fn getString(self: *const Self, ref: String) ?[]const u8 { diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig index 9d8cca9445..e61ac754ee 100644 --- a/src/codegen/spirv/Module.zig +++ b/src/codegen/spirv/Module.zig @@ -451,8 +451,8 @@ pub fn constInt(self: *Module, ty_ref: CacheRef, value: anytype) !IdRef { return try self.resolveId(.{ .int = .{ .ty = ty_ref, .value = switch (ty.signedness) { - .signed => Value{ .int64 = @intCast(i64, value) }, - .unsigned => Value{ .uint64 = @intCast(u64, value) }, + .signed => Value{ .int64 = @as(i64, @intCast(value)) }, + .unsigned => Value{ .uint64 = @as(u64, @intCast(value)) }, }, } }); } @@ -516,7 +516,7 @@ pub fn allocDecl(self: *Module, kind: DeclKind) !Decl.Index { .begin_dep = undefined, .end_dep = undefined, }); - const index = @enumFromInt(Decl.Index, @intCast(u32, self.decls.items.len - 1)); + const index = @as(Decl.Index, @enumFromInt(@as(u32, @intCast(self.decls.items.len - 1)))); switch (kind) { .func => {}, // If the decl represents a global, also allocate a global node. @@ -540,9 +540,9 @@ pub fn globalPtr(self: *Module, index: Decl.Index) ?*Global { /// Declare ALL dependencies for a decl. pub fn declareDeclDeps(self: *Module, decl_index: Decl.Index, deps: []const Decl.Index) !void { - const begin_dep = @intCast(u32, self.decl_deps.items.len); + const begin_dep = @as(u32, @intCast(self.decl_deps.items.len)); try self.decl_deps.appendSlice(self.gpa, deps); - const end_dep = @intCast(u32, self.decl_deps.items.len); + const end_dep = @as(u32, @intCast(self.decl_deps.items.len)); const decl = self.declPtr(decl_index); decl.begin_dep = begin_dep; @@ -550,13 +550,13 @@ pub fn declareDeclDeps(self: *Module, decl_index: Decl.Index, deps: []const Decl } pub fn beginGlobal(self: *Module) u32 { - return @intCast(u32, self.globals.section.instructions.items.len); + return @as(u32, @intCast(self.globals.section.instructions.items.len)); } pub fn endGlobal(self: *Module, global_index: Decl.Index, begin_inst: u32) void { const global = self.globalPtr(global_index).?; global.begin_inst = begin_inst; - global.end_inst = @intCast(u32, self.globals.section.instructions.items.len); + global.end_inst = @as(u32, @intCast(self.globals.section.instructions.items.len)); } pub fn declareEntryPoint(self: *Module, decl_index: Decl.Index, name: []const u8) !void { diff --git a/src/codegen/spirv/Section.zig b/src/codegen/spirv/Section.zig index b35dc489e4..ae88dc7c8a 100644 --- a/src/codegen/spirv/Section.zig +++ b/src/codegen/spirv/Section.zig @@ -50,7 +50,7 @@ pub fn emitRaw( ) !void { const word_count = 1 + operand_words; try section.instructions.ensureUnusedCapacity(allocator, word_count); - section.writeWord((@intCast(Word, word_count << 16)) | @intFromEnum(opcode)); + section.writeWord((@as(Word, @intCast(word_count << 16))) | @intFromEnum(opcode)); } pub fn emit( @@ -61,7 +61,7 @@ pub fn emit( ) !void { const word_count = instructionSize(opcode, operands); try section.instructions.ensureUnusedCapacity(allocator, word_count); - section.writeWord(@intCast(Word, word_count << 16) | @intFromEnum(opcode)); + section.writeWord(@as(Word, @intCast(word_count << 16)) | @intFromEnum(opcode)); section.writeOperands(opcode.Operands(), operands); } @@ -94,8 +94,8 @@ pub fn writeWords(section: *Section, words: []const Word) void { pub fn writeDoubleWord(section: *Section, dword: DoubleWord) void { section.writeWords(&.{ - @truncate(Word, dword), - @truncate(Word, dword >> @bitSizeOf(Word)), + @as(Word, @truncate(dword)), + @as(Word, @truncate(dword >> @bitSizeOf(Word))), }); } @@ -145,7 +145,7 @@ pub fn writeOperand(section: *Section, comptime Operand: type, operand: Operand) }, .Struct => |info| { if (info.layout == .Packed) { - section.writeWord(@bitCast(Word, operand)); + section.writeWord(@as(Word, @bitCast(operand))); } else { section.writeExtendedMask(Operand, operand); } @@ -166,7 +166,7 @@ fn writeString(section: *Section, str: []const u8) void { var j: usize = 0; while (j < @sizeOf(Word) and i + j < str.len) : (j += 1) { - word |= @as(Word, str[i + j]) << @intCast(Log2Word, j * @bitSizeOf(u8)); + word |= @as(Word, str[i + j]) << @as(Log2Word, @intCast(j * @bitSizeOf(u8))); } section.instructions.appendAssumeCapacity(word); @@ -175,12 +175,12 @@ fn writeString(section: *Section, str: []const u8) void { fn writeContextDependentNumber(section: *Section, operand: spec.LiteralContextDependentNumber) void { switch (operand) { - .int32 => |int| section.writeWord(@bitCast(Word, int)), - .uint32 => |int| section.writeWord(@bitCast(Word, int)), - .int64 => |int| section.writeDoubleWord(@bitCast(DoubleWord, int)), - .uint64 => |int| section.writeDoubleWord(@bitCast(DoubleWord, int)), - .float32 => |float| section.writeWord(@bitCast(Word, float)), - .float64 => |float| section.writeDoubleWord(@bitCast(DoubleWord, float)), + .int32 => |int| section.writeWord(@as(Word, @bitCast(int))), + .uint32 => |int| section.writeWord(@as(Word, @bitCast(int))), + .int64 => |int| section.writeDoubleWord(@as(DoubleWord, @bitCast(int))), + .uint64 => |int| section.writeDoubleWord(@as(DoubleWord, @bitCast(int))), + .float32 => |float| section.writeWord(@as(Word, @bitCast(float))), + .float64 => |float| section.writeDoubleWord(@as(DoubleWord, @bitCast(float))), } } @@ -189,10 +189,10 @@ fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand inline for (@typeInfo(Operand).Struct.fields, 0..) |field, bit| { switch (@typeInfo(field.type)) { .Optional => if (@field(operand, field.name) != null) { - mask |= 1 << @intCast(u5, bit); + mask |= 1 << @as(u5, @intCast(bit)); }, .Bool => if (@field(operand, field.name)) { - mask |= 1 << @intCast(u5, bit); + mask |= 1 << @as(u5, @intCast(bit)); }, else => unreachable, } @@ -392,7 +392,7 @@ test "SPIR-V Section emit() - extended mask" { (@as(Word, 5) << 16) | @intFromEnum(Opcode.OpLoopMerge), 10, 20, - @bitCast(Word, spec.LoopControl{ .Unroll = true, .DependencyLength = true }), + @as(Word, @bitCast(spec.LoopControl{ .Unroll = true, .DependencyLength = true })), 2, }, section.instructions.items); } diff --git a/src/crash_report.zig b/src/crash_report.zig index cb468c101f..fc41528321 100644 --- a/src/crash_report.zig +++ b/src/crash_report.zig @@ -204,49 +204,49 @@ fn handleSegfaultPosix(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const any const stack_ctx: StackContext = switch (builtin.cpu.arch) { .x86 => ctx: { - const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr)); - const ip = @intCast(usize, ctx.mcontext.gregs[os.REG.EIP]); - const bp = @intCast(usize, ctx.mcontext.gregs[os.REG.EBP]); + const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr)); + const ip = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EIP])); + const bp = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EBP])); break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } }; }, .x86_64 => ctx: { - const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr)); + const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr)); const ip = switch (builtin.os.tag) { - .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RIP]), - .freebsd => @intCast(usize, ctx.mcontext.rip), - .openbsd => @intCast(usize, ctx.sc_rip), - .macos => @intCast(usize, ctx.mcontext.ss.rip), + .linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RIP])), + .freebsd => @as(usize, @intCast(ctx.mcontext.rip)), + .openbsd => @as(usize, @intCast(ctx.sc_rip)), + .macos => @as(usize, @intCast(ctx.mcontext.ss.rip)), else => unreachable, }; const bp = switch (builtin.os.tag) { - .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RBP]), - .openbsd => @intCast(usize, ctx.sc_rbp), - .freebsd => @intCast(usize, ctx.mcontext.rbp), - .macos => @intCast(usize, ctx.mcontext.ss.rbp), + .linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RBP])), + .openbsd => @as(usize, @intCast(ctx.sc_rbp)), + .freebsd => @as(usize, @intCast(ctx.mcontext.rbp)), + .macos => @as(usize, @intCast(ctx.mcontext.ss.rbp)), else => unreachable, }; break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } }; }, .arm => ctx: { - const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr)); - const ip = @intCast(usize, ctx.mcontext.arm_pc); - const bp = @intCast(usize, ctx.mcontext.arm_fp); + const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr)); + const ip = @as(usize, @intCast(ctx.mcontext.arm_pc)); + const bp = @as(usize, @intCast(ctx.mcontext.arm_fp)); break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } }; }, .aarch64 => ctx: { - const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr)); + const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr)); const ip = switch (native_os) { - .macos => @intCast(usize, ctx.mcontext.ss.pc), - .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.PC]), - .freebsd => @intCast(usize, ctx.mcontext.gpregs.elr), - else => @intCast(usize, ctx.mcontext.pc), + .macos => @as(usize, @intCast(ctx.mcontext.ss.pc)), + .netbsd => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.PC])), + .freebsd => @as(usize, @intCast(ctx.mcontext.gpregs.elr)), + else => @as(usize, @intCast(ctx.mcontext.pc)), }; // x29 is the ABI-designated frame pointer const bp = switch (native_os) { - .macos => @intCast(usize, ctx.mcontext.ss.fp), - .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.FP]), - .freebsd => @intCast(usize, ctx.mcontext.gpregs.x[os.REG.FP]), - else => @intCast(usize, ctx.mcontext.regs[29]), + .macos => @as(usize, @intCast(ctx.mcontext.ss.fp)), + .netbsd => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.FP])), + .freebsd => @as(usize, @intCast(ctx.mcontext.gpregs.x[os.REG.FP])), + else => @as(usize, @intCast(ctx.mcontext.regs[29])), }; break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } }; }, diff --git a/src/glibc.zig b/src/glibc.zig index bb38c2c987..cf12e8ea46 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -779,13 +779,13 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: *std.Progress.Node) !vo // Test whether the inclusion applies to our current library and target. const ok_lib_and_target = (lib_index == lib_i) and - ((targets & (@as(u32, 1) << @intCast(u5, target_targ_index))) != 0); + ((targets & (@as(u32, 1) << @as(u5, @intCast(target_targ_index)))) != 0); while (true) { const byte = metadata.inclusions[inc_i]; inc_i += 1; const last = (byte & 0b1000_0000) != 0; - const ver_i = @truncate(u7, byte); + const ver_i = @as(u7, @truncate(byte)); if (ok_lib_and_target and ver_i <= target_ver_index) { versions_buffer[versions_len] = ver_i; versions_len += 1; @@ -913,13 +913,13 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: *std.Progress.Node) !vo // Test whether the inclusion applies to our current library and target. const ok_lib_and_target = (lib_index == lib_i) and - ((targets & (@as(u32, 1) << @intCast(u5, target_targ_index))) != 0); + ((targets & (@as(u32, 1) << @as(u5, @intCast(target_targ_index)))) != 0); while (true) { const byte = metadata.inclusions[inc_i]; inc_i += 1; const last = (byte & 0b1000_0000) != 0; - const ver_i = @truncate(u7, byte); + const ver_i = @as(u7, @truncate(byte)); if (ok_lib_and_target and ver_i <= target_ver_index) { versions_buffer[versions_len] = ver_i; versions_len += 1; diff --git a/src/link/C.zig b/src/link/C.zig index 9a42daa061..e3f8653852 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -292,7 +292,7 @@ pub fn flushModule(self: *C, _: *Compilation, prog_node: *std.Progress.Node) !vo { var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; defer export_names.deinit(gpa); - try export_names.ensureTotalCapacity(gpa, @intCast(u32, module.decl_exports.entries.len)); + try export_names.ensureTotalCapacity(gpa, @as(u32, @intCast(module.decl_exports.entries.len))); for (module.decl_exports.values()) |exports| for (exports.items) |@"export"| try export_names.put(gpa, @"export".opts.name, {}); @@ -426,7 +426,7 @@ fn flushCTypes( return ctx.ctypes_map[idx - codegen.CType.Tag.no_payload_count]; } }; - const decl_idx = @intCast(codegen.CType.Index, codegen.CType.Tag.no_payload_count + decl_i); + const decl_idx = @as(codegen.CType.Index, @intCast(codegen.CType.Tag.no_payload_count + decl_i)); const ctx = Context{ .arena = global_ctypes.arena.allocator(), .ctypes_map = f.ctypes_map.items, @@ -437,7 +437,7 @@ fn flushCTypes( .store = &global_ctypes.set, }); const global_idx = - @intCast(codegen.CType.Index, codegen.CType.Tag.no_payload_count + gop.index); + @as(codegen.CType.Index, @intCast(codegen.CType.Tag.no_payload_count + gop.index)); f.ctypes_map.appendAssumeCapacity(global_idx); if (!gop.found_existing) { errdefer _ = global_ctypes.set.map.pop(); @@ -538,7 +538,7 @@ fn flushLazyFn(self: *C, db: *DeclBlock, lazy_fn: codegen.LazyFnMap.Entry) Flush fn flushLazyFns(self: *C, f: *Flush, lazy_fns: codegen.LazyFnMap) FlushDeclError!void { const gpa = self.base.allocator; - try f.lazy_fns.ensureUnusedCapacity(gpa, @intCast(Flush.LazyFns.Size, lazy_fns.count())); + try f.lazy_fns.ensureUnusedCapacity(gpa, @as(Flush.LazyFns.Size, @intCast(lazy_fns.count()))); var it = lazy_fns.iterator(); while (it.next()) |entry| { diff --git a/src/link/Coff.zig b/src/link/Coff.zig index e3fcc941eb..a724d4023a 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -358,7 +358,7 @@ fn populateMissingMetadata(self: *Coff) !void { }); if (self.text_section_index == null) { - const file_size = @intCast(u32, self.base.options.program_code_size_hint); + const file_size = @as(u32, @intCast(self.base.options.program_code_size_hint)); self.text_section_index = try self.allocateSection(".text", file_size, .{ .CNT_CODE = 1, .MEM_EXECUTE = 1, @@ -367,7 +367,7 @@ fn populateMissingMetadata(self: *Coff) !void { } if (self.got_section_index == null) { - const file_size = @intCast(u32, self.base.options.symbol_count_hint) * self.ptr_width.size(); + const file_size = @as(u32, @intCast(self.base.options.symbol_count_hint)) * self.ptr_width.size(); self.got_section_index = try self.allocateSection(".got", file_size, .{ .CNT_INITIALIZED_DATA = 1, .MEM_READ = 1, @@ -392,7 +392,7 @@ fn populateMissingMetadata(self: *Coff) !void { } if (self.idata_section_index == null) { - const file_size = @intCast(u32, self.base.options.symbol_count_hint) * self.ptr_width.size(); + const file_size = @as(u32, @intCast(self.base.options.symbol_count_hint)) * self.ptr_width.size(); self.idata_section_index = try self.allocateSection(".idata", file_size, .{ .CNT_INITIALIZED_DATA = 1, .MEM_READ = 1, @@ -400,7 +400,7 @@ fn populateMissingMetadata(self: *Coff) !void { } if (self.reloc_section_index == null) { - const file_size = @intCast(u32, self.base.options.symbol_count_hint) * @sizeOf(coff.BaseRelocation); + const file_size = @as(u32, @intCast(self.base.options.symbol_count_hint)) * @sizeOf(coff.BaseRelocation); self.reloc_section_index = try self.allocateSection(".reloc", file_size, .{ .CNT_INITIALIZED_DATA = 1, .MEM_DISCARDABLE = 1, @@ -409,7 +409,7 @@ fn populateMissingMetadata(self: *Coff) !void { } if (self.strtab_offset == null) { - const file_size = @intCast(u32, self.strtab.len()); + const file_size = @as(u32, @intCast(self.strtab.len())); self.strtab_offset = self.findFreeSpace(file_size, @alignOf(u32)); // 4bytes aligned seems like a good idea here log.debug("found strtab free space 0x{x} to 0x{x}", .{ self.strtab_offset.?, self.strtab_offset.? + file_size }); } @@ -430,7 +430,7 @@ fn populateMissingMetadata(self: *Coff) !void { } fn allocateSection(self: *Coff, name: []const u8, size: u32, flags: coff.SectionHeaderFlags) !u16 { - const index = @intCast(u16, self.sections.slice().len); + const index = @as(u16, @intCast(self.sections.slice().len)); const off = self.findFreeSpace(size, default_file_alignment); // Memory is always allocated in sequence // TODO: investigate if we can allocate .text last; this way it would never need to grow in memory! @@ -652,7 +652,7 @@ pub fn allocateSymbol(self: *Coff) !u32 { break :blk index; } else { log.debug(" (allocating symbol index {d})", .{self.locals.items.len}); - const index = @intCast(u32, self.locals.items.len); + const index = @as(u32, @intCast(self.locals.items.len)); _ = self.locals.addOneAssumeCapacity(); break :blk index; } @@ -680,7 +680,7 @@ fn allocateGlobal(self: *Coff) !u32 { break :blk index; } else { log.debug(" (allocating global index {d})", .{self.globals.items.len}); - const index = @intCast(u32, self.globals.items.len); + const index = @as(u32, @intCast(self.globals.items.len)); _ = self.globals.addOneAssumeCapacity(); break :blk index; } @@ -704,7 +704,7 @@ fn addGotEntry(self: *Coff, target: SymbolWithLoc) !void { pub fn createAtom(self: *Coff) !Atom.Index { const gpa = self.base.allocator; - const atom_index = @intCast(Atom.Index, self.atoms.items.len); + const atom_index = @as(Atom.Index, @intCast(self.atoms.items.len)); const atom = try self.atoms.addOne(gpa); const sym_index = try self.allocateSymbol(); try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index); @@ -776,7 +776,7 @@ fn writeAtom(self: *Coff, atom_index: Atom.Index, code: []u8) !void { self.resolveRelocs(atom_index, relocs.items, mem_code, slide); const vaddr = sym.value + slide; - const pvaddr = @ptrFromInt(*anyopaque, vaddr); + const pvaddr = @as(*anyopaque, @ptrFromInt(vaddr)); log.debug("writing to memory at address {x}", .{vaddr}); @@ -830,7 +830,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { const sect_id = self.got_section_index.?; if (self.got_table_count_dirty) { - const needed_size = @intCast(u32, self.got_table.entries.items.len * self.ptr_width.size()); + const needed_size = @as(u32, @intCast(self.got_table.entries.items.len * self.ptr_width.size())); try self.growSection(sect_id, needed_size); self.got_table_count_dirty = false; } @@ -847,7 +847,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { switch (self.ptr_width) { .p32 => { var buf: [4]u8 = undefined; - mem.writeIntLittle(u32, &buf, @intCast(u32, entry_value + self.getImageBase())); + mem.writeIntLittle(u32, &buf, @as(u32, @intCast(entry_value + self.getImageBase()))); try self.base.file.?.pwriteAll(&buf, file_offset); }, .p64 => { @@ -862,7 +862,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { const gpa = self.base.allocator; const slide = @intFromPtr(self.hot_state.loaded_base_address.?); const actual_vmaddr = vmaddr + slide; - const pvaddr = @ptrFromInt(*anyopaque, actual_vmaddr); + const pvaddr = @as(*anyopaque, @ptrFromInt(actual_vmaddr)); log.debug("writing GOT entry to memory at address {x}", .{actual_vmaddr}); if (build_options.enable_logging) { switch (self.ptr_width) { @@ -880,7 +880,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { switch (self.ptr_width) { .p32 => { var buf: [4]u8 = undefined; - mem.writeIntLittle(u32, &buf, @intCast(u32, entry_value + slide)); + mem.writeIntLittle(u32, &buf, @as(u32, @intCast(entry_value + slide))); writeMem(handle, pvaddr, &buf) catch |err| { log.warn("writing to protected memory failed with error: {s}", .{@errorName(err)}); }; @@ -1107,7 +1107,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In const atom = self.getAtom(atom_index); const sym = atom.getSymbolPtr(self); try self.setSymbolName(sym, sym_name); - sym.section_number = @enumFromInt(coff.SectionNumber, self.rdata_section_index.? + 1); + sym.section_number = @as(coff.SectionNumber, @enumFromInt(self.rdata_section_index.? + 1)); } const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), tv, &code_buffer, .none, .{ @@ -1125,7 +1125,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In const required_alignment = tv.ty.abiAlignment(mod); const atom = self.getAtomPtr(atom_index); - atom.size = @intCast(u32, code.len); + atom.size = @as(u32, @intCast(code.len)); atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, required_alignment); errdefer self.freeAtom(atom_index); @@ -1241,10 +1241,10 @@ fn updateLazySymbolAtom( }, }; - const code_len = @intCast(u32, code.len); + const code_len = @as(u32, @intCast(code.len)); const symbol = atom.getSymbolPtr(self); try self.setSymbolName(symbol, name); - symbol.section_number = @enumFromInt(coff.SectionNumber, section_index + 1); + symbol.section_number = @as(coff.SectionNumber, @enumFromInt(section_index + 1)); symbol.type = .{ .complex_type = .NULL, .base_type = .NULL }; const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment); @@ -1336,12 +1336,12 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple const atom = self.getAtom(atom_index); const sym_index = atom.getSymbolIndex().?; const sect_index = decl_metadata.section; - const code_len = @intCast(u32, code.len); + const code_len = @as(u32, @intCast(code.len)); if (atom.size != 0) { const sym = atom.getSymbolPtr(self); try self.setSymbolName(sym, decl_name); - sym.section_number = @enumFromInt(coff.SectionNumber, sect_index + 1); + sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_index + 1)); sym.type = .{ .complex_type = complex_type, .base_type = .NULL }; const capacity = atom.capacity(self); @@ -1365,7 +1365,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple } else { const sym = atom.getSymbolPtr(self); try self.setSymbolName(sym, decl_name); - sym.section_number = @enumFromInt(coff.SectionNumber, sect_index + 1); + sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_index + 1)); sym.type = .{ .complex_type = complex_type, .base_type = .NULL }; const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment); @@ -1502,7 +1502,7 @@ pub fn updateDeclExports( const sym = self.getSymbolPtr(sym_loc); try self.setSymbolName(sym, mod.intern_pool.stringToSlice(exp.opts.name)); sym.value = decl_sym.value; - sym.section_number = @enumFromInt(coff.SectionNumber, self.text_section_index.? + 1); + sym.section_number = @as(coff.SectionNumber, @enumFromInt(self.text_section_index.? + 1)); sym.type = .{ .complex_type = .FUNCTION, .base_type = .NULL }; switch (exp.opts.linkage) { @@ -1728,12 +1728,12 @@ pub fn getDeclVAddr(self: *Coff, decl_index: Module.Decl.Index, reloc_info: link try Atom.addRelocation(self, atom_index, .{ .type = .direct, .target = target, - .offset = @intCast(u32, reloc_info.offset), + .offset = @as(u32, @intCast(reloc_info.offset)), .addend = reloc_info.addend, .pcrel = false, .length = 3, }); - try Atom.addBaseRelocation(self, atom_index, @intCast(u32, reloc_info.offset)); + try Atom.addBaseRelocation(self, atom_index, @as(u32, @intCast(reloc_info.offset))); return 0; } @@ -1804,7 +1804,7 @@ fn writeBaseRelocations(self: *Coff) !void { gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa); } try gop.value_ptr.append(.{ - .offset = @intCast(u12, rva - page), + .offset = @as(u12, @intCast(rva - page)), .type = .DIR64, }); } @@ -1818,14 +1818,14 @@ fn writeBaseRelocations(self: *Coff) !void { const sym = self.getSymbol(entry); if (sym.section_number == .UNDEFINED) continue; - const rva = @intCast(u32, header.virtual_address + index * self.ptr_width.size()); + const rva = @as(u32, @intCast(header.virtual_address + index * self.ptr_width.size())); const page = mem.alignBackward(u32, rva, self.page_size); const gop = try page_table.getOrPut(page); if (!gop.found_existing) { gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa); } try gop.value_ptr.append(.{ - .offset = @intCast(u12, rva - page), + .offset = @as(u12, @intCast(rva - page)), .type = .DIR64, }); } @@ -1860,9 +1860,9 @@ fn writeBaseRelocations(self: *Coff) !void { }); } - const block_size = @intCast( + const block_size = @as( u32, - entries.items.len * @sizeOf(coff.BaseRelocation) + @sizeOf(coff.BaseRelocationDirectoryEntry), + @intCast(entries.items.len * @sizeOf(coff.BaseRelocation) + @sizeOf(coff.BaseRelocationDirectoryEntry)), ); try buffer.ensureUnusedCapacity(block_size); buffer.appendSliceAssumeCapacity(mem.asBytes(&coff.BaseRelocationDirectoryEntry{ @@ -1873,7 +1873,7 @@ fn writeBaseRelocations(self: *Coff) !void { } const header = &self.sections.items(.header)[self.reloc_section_index.?]; - const needed_size = @intCast(u32, buffer.items.len); + const needed_size = @as(u32, @intCast(buffer.items.len)); try self.growSection(self.reloc_section_index.?, needed_size); try self.base.file.?.pwriteAll(buffer.items, header.pointer_to_raw_data); @@ -1904,12 +1904,12 @@ fn writeImportTables(self: *Coff) !void { const itable = self.import_tables.values()[i]; iat_size += itable.size() + 8; dir_table_size += @sizeOf(coff.ImportDirectoryEntry); - lookup_table_size += @intCast(u32, itable.entries.items.len + 1) * @sizeOf(coff.ImportLookupEntry64.ByName); + lookup_table_size += @as(u32, @intCast(itable.entries.items.len + 1)) * @sizeOf(coff.ImportLookupEntry64.ByName); for (itable.entries.items) |entry| { const sym_name = self.getSymbolName(entry); - names_table_size += 2 + mem.alignForward(u32, @intCast(u32, sym_name.len + 1), 2); + names_table_size += 2 + mem.alignForward(u32, @as(u32, @intCast(sym_name.len + 1)), 2); } - dll_names_size += @intCast(u32, lib_name.len + ext.len + 1); + dll_names_size += @as(u32, @intCast(lib_name.len + ext.len + 1)); } const needed_size = iat_size + dir_table_size + lookup_table_size + names_table_size + dll_names_size; @@ -1948,7 +1948,7 @@ fn writeImportTables(self: *Coff) !void { const import_name = self.getSymbolName(entry); // IAT and lookup table entry - const lookup = coff.ImportLookupEntry64.ByName{ .name_table_rva = @intCast(u31, header.virtual_address + names_table_offset) }; + const lookup = coff.ImportLookupEntry64.ByName{ .name_table_rva = @as(u31, @intCast(header.virtual_address + names_table_offset)) }; @memcpy( buffer.items[iat_offset..][0..@sizeOf(coff.ImportLookupEntry64.ByName)], mem.asBytes(&lookup), @@ -1964,7 +1964,7 @@ fn writeImportTables(self: *Coff) !void { mem.writeIntLittle(u16, buffer.items[names_table_offset..][0..2], 0); // Hint set to 0 until we learn how to parse DLLs names_table_offset += 2; @memcpy(buffer.items[names_table_offset..][0..import_name.len], import_name); - names_table_offset += @intCast(u32, import_name.len); + names_table_offset += @as(u32, @intCast(import_name.len)); buffer.items[names_table_offset] = 0; names_table_offset += 1; if (!mem.isAlignedGeneric(usize, names_table_offset, @sizeOf(u16))) { @@ -1986,9 +1986,9 @@ fn writeImportTables(self: *Coff) !void { // DLL name @memcpy(buffer.items[dll_names_offset..][0..lib_name.len], lib_name); - dll_names_offset += @intCast(u32, lib_name.len); + dll_names_offset += @as(u32, @intCast(lib_name.len)); @memcpy(buffer.items[dll_names_offset..][0..ext.len], ext); - dll_names_offset += @intCast(u32, ext.len); + dll_names_offset += @as(u32, @intCast(ext.len)); buffer.items[dll_names_offset] = 0; dll_names_offset += 1; } @@ -2027,11 +2027,11 @@ fn writeStrtab(self: *Coff) !void { if (self.strtab_offset == null) return; const allocated_size = self.allocatedSize(self.strtab_offset.?); - const needed_size = @intCast(u32, self.strtab.len()); + const needed_size = @as(u32, @intCast(self.strtab.len())); if (needed_size > allocated_size) { self.strtab_offset = null; - self.strtab_offset = @intCast(u32, self.findFreeSpace(needed_size, @alignOf(u32))); + self.strtab_offset = @as(u32, @intCast(self.findFreeSpace(needed_size, @alignOf(u32)))); } log.debug("writing strtab from 0x{x} to 0x{x}", .{ self.strtab_offset.?, self.strtab_offset.? + needed_size }); @@ -2042,7 +2042,7 @@ fn writeStrtab(self: *Coff) !void { buffer.appendSliceAssumeCapacity(self.strtab.items()); // Here, we do a trick in that we do not commit the size of the strtab to strtab buffer, instead // we write the length of the strtab to a temporary buffer that goes to file. - mem.writeIntLittle(u32, buffer.items[0..4], @intCast(u32, self.strtab.len())); + mem.writeIntLittle(u32, buffer.items[0..4], @as(u32, @intCast(self.strtab.len()))); try self.base.file.?.pwriteAll(buffer.items, self.strtab_offset.?); } @@ -2081,11 +2081,11 @@ fn writeHeader(self: *Coff) !void { } const timestamp = std.time.timestamp(); - const size_of_optional_header = @intCast(u16, self.getOptionalHeaderSize() + self.getDataDirectoryHeadersSize()); + const size_of_optional_header = @as(u16, @intCast(self.getOptionalHeaderSize() + self.getDataDirectoryHeadersSize())); var coff_header = coff.CoffHeader{ .machine = coff.MachineType.fromTargetCpuArch(self.base.options.target.cpu.arch), - .number_of_sections = @intCast(u16, self.sections.slice().len), // TODO what if we prune a section - .time_date_stamp = @truncate(u32, @bitCast(u64, timestamp)), + .number_of_sections = @as(u16, @intCast(self.sections.slice().len)), // TODO what if we prune a section + .time_date_stamp = @as(u32, @truncate(@as(u64, @bitCast(timestamp)))), .pointer_to_symbol_table = self.strtab_offset orelse 0, .number_of_symbols = 0, .size_of_optional_header = size_of_optional_header, @@ -2135,7 +2135,7 @@ fn writeHeader(self: *Coff) !void { .address_of_entry_point = self.entry_addr orelse 0, .base_of_code = base_of_code, .base_of_data = base_of_data, - .image_base = @intCast(u32, image_base), + .image_base = @as(u32, @intCast(image_base)), .section_alignment = self.page_size, .file_alignment = default_file_alignment, .major_operating_system_version = 6, @@ -2155,7 +2155,7 @@ fn writeHeader(self: *Coff) !void { .size_of_heap_reserve = default_size_of_heap_reserve, .size_of_heap_commit = default_size_of_heap_commit, .loader_flags = 0, - .number_of_rva_and_sizes = @intCast(u32, self.data_directories.len), + .number_of_rva_and_sizes = @as(u32, @intCast(self.data_directories.len)), }; writer.writeAll(mem.asBytes(&opt_header)) catch unreachable; }, @@ -2189,7 +2189,7 @@ fn writeHeader(self: *Coff) !void { .size_of_heap_reserve = default_size_of_heap_reserve, .size_of_heap_commit = default_size_of_heap_commit, .loader_flags = 0, - .number_of_rva_and_sizes = @intCast(u32, self.data_directories.len), + .number_of_rva_and_sizes = @as(u32, @intCast(self.data_directories.len)), }; writer.writeAll(mem.asBytes(&opt_header)) catch unreachable; }, @@ -2210,7 +2210,7 @@ fn detectAllocCollision(self: *Coff, start: u32, size: u32) ?u32 { const end = start + padToIdeal(size); if (self.strtab_offset) |off| { - const tight_size = @intCast(u32, self.strtab.len()); + const tight_size = @as(u32, @intCast(self.strtab.len())); const increased_size = padToIdeal(tight_size); const test_end = off + increased_size; if (end > off and start < test_end) { @@ -2265,28 +2265,28 @@ fn allocatedVirtualSize(self: *Coff, start: u32) u32 { inline fn getSizeOfHeaders(self: Coff) u32 { const msdos_hdr_size = msdos_stub.len + 4; - return @intCast(u32, msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize() + - self.getDataDirectoryHeadersSize() + self.getSectionHeadersSize()); + return @as(u32, @intCast(msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize() + + self.getDataDirectoryHeadersSize() + self.getSectionHeadersSize())); } inline fn getOptionalHeaderSize(self: Coff) u32 { return switch (self.ptr_width) { - .p32 => @intCast(u32, @sizeOf(coff.OptionalHeaderPE32)), - .p64 => @intCast(u32, @sizeOf(coff.OptionalHeaderPE64)), + .p32 => @as(u32, @intCast(@sizeOf(coff.OptionalHeaderPE32))), + .p64 => @as(u32, @intCast(@sizeOf(coff.OptionalHeaderPE64))), }; } inline fn getDataDirectoryHeadersSize(self: Coff) u32 { - return @intCast(u32, self.data_directories.len * @sizeOf(coff.ImageDataDirectory)); + return @as(u32, @intCast(self.data_directories.len * @sizeOf(coff.ImageDataDirectory))); } inline fn getSectionHeadersSize(self: Coff) u32 { - return @intCast(u32, self.sections.slice().len * @sizeOf(coff.SectionHeader)); + return @as(u32, @intCast(self.sections.slice().len * @sizeOf(coff.SectionHeader))); } inline fn getDataDirectoryHeadersOffset(self: Coff) u32 { const msdos_hdr_size = msdos_stub.len + 4; - return @intCast(u32, msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize()); + return @as(u32, @intCast(msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize())); } inline fn getSectionHeadersOffset(self: Coff) u32 { @@ -2473,7 +2473,7 @@ fn logSymtab(self: *Coff) void { }; log.debug(" %{d}: {?s} @{x} in {s}({d}), {s}", .{ sym_id, - self.getSymbolName(.{ .sym_index = @intCast(u32, sym_id), .file = null }), + self.getSymbolName(.{ .sym_index = @as(u32, @intCast(sym_id)), .file = null }), sym.value, where, def_index, diff --git a/src/link/Coff/ImportTable.zig b/src/link/Coff/ImportTable.zig index c3ba77e855..c25851fe72 100644 --- a/src/link/Coff/ImportTable.zig +++ b/src/link/Coff/ImportTable.zig @@ -38,7 +38,7 @@ pub fn deinit(itab: *ImportTable, allocator: Allocator) void { /// Size of the import table does not include the sentinel. pub fn size(itab: ImportTable) u32 { - return @intCast(u32, itab.entries.items.len) * @sizeOf(u64); + return @as(u32, @intCast(itab.entries.items.len)) * @sizeOf(u64); } pub fn addImport(itab: *ImportTable, allocator: Allocator, target: SymbolWithLoc) !ImportIndex { @@ -49,7 +49,7 @@ pub fn addImport(itab: *ImportTable, allocator: Allocator, target: SymbolWithLoc break :blk index; } else { log.debug(" (allocating import entry at index {d})", .{itab.entries.items.len}); - const index = @intCast(u32, itab.entries.items.len); + const index = @as(u32, @intCast(itab.entries.items.len)); _ = itab.entries.addOneAssumeCapacity(); break :blk index; } @@ -73,7 +73,7 @@ fn getBaseAddress(ctx: Context) u32 { var addr = header.virtual_address; for (ctx.coff_file.import_tables.values(), 0..) |other_itab, i| { if (ctx.index == i) break; - addr += @intCast(u32, other_itab.entries.items.len * @sizeOf(u64)) + 8; + addr += @as(u32, @intCast(other_itab.entries.items.len * @sizeOf(u64))) + 8; } return addr; } diff --git a/src/link/Coff/Relocation.zig b/src/link/Coff/Relocation.zig index 10d4eed92b..ded7483667 100644 --- a/src/link/Coff/Relocation.zig +++ b/src/link/Coff/Relocation.zig @@ -126,23 +126,23 @@ fn resolveAarch64(self: Relocation, ctx: Context) void { var buffer = ctx.code[self.offset..]; switch (self.type) { .got_page, .import_page, .page => { - const source_page = @intCast(i32, ctx.source_vaddr >> 12); - const target_page = @intCast(i32, ctx.target_vaddr >> 12); - const pages = @bitCast(u21, @intCast(i21, target_page - source_page)); + const source_page = @as(i32, @intCast(ctx.source_vaddr >> 12)); + const target_page = @as(i32, @intCast(ctx.target_vaddr >> 12)); + const pages = @as(u21, @bitCast(@as(i21, @intCast(target_page - source_page)))); var inst = aarch64.Instruction{ .pc_relative_address = mem.bytesToValue(meta.TagPayload( aarch64.Instruction, aarch64.Instruction.pc_relative_address, ), buffer[0..4]), }; - inst.pc_relative_address.immhi = @truncate(u19, pages >> 2); - inst.pc_relative_address.immlo = @truncate(u2, pages); + inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2)); + inst.pc_relative_address.immlo = @as(u2, @truncate(pages)); mem.writeIntLittle(u32, buffer[0..4], inst.toU32()); }, .got_pageoff, .import_pageoff, .pageoff => { assert(!self.pcrel); - const narrowed = @truncate(u12, @intCast(u64, ctx.target_vaddr)); + const narrowed = @as(u12, @truncate(@as(u64, @intCast(ctx.target_vaddr)))); if (isArithmeticOp(buffer[0..4])) { var inst = aarch64.Instruction{ .add_subtract_immediate = mem.bytesToValue(meta.TagPayload( @@ -182,7 +182,7 @@ fn resolveAarch64(self: Relocation, ctx: Context) void { 2 => mem.writeIntLittle( u32, buffer[0..4], - @truncate(u32, ctx.target_vaddr + ctx.image_base), + @as(u32, @truncate(ctx.target_vaddr + ctx.image_base)), ), 3 => mem.writeIntLittle(u64, buffer[0..8], ctx.target_vaddr + ctx.image_base), else => unreachable, @@ -206,17 +206,17 @@ fn resolveX86(self: Relocation, ctx: Context) void { .got, .import => { assert(self.pcrel); - const disp = @intCast(i32, ctx.target_vaddr) - @intCast(i32, ctx.source_vaddr) - 4; + const disp = @as(i32, @intCast(ctx.target_vaddr)) - @as(i32, @intCast(ctx.source_vaddr)) - 4; mem.writeIntLittle(i32, buffer[0..4], disp); }, .direct => { if (self.pcrel) { - const disp = @intCast(i32, ctx.target_vaddr) - @intCast(i32, ctx.source_vaddr) - 4; + const disp = @as(i32, @intCast(ctx.target_vaddr)) - @as(i32, @intCast(ctx.source_vaddr)) - 4; mem.writeIntLittle(i32, buffer[0..4], disp); } else switch (ctx.ptr_width) { - .p32 => mem.writeIntLittle(u32, buffer[0..4], @intCast(u32, ctx.target_vaddr + ctx.image_base)), + .p32 => mem.writeIntLittle(u32, buffer[0..4], @as(u32, @intCast(ctx.target_vaddr + ctx.image_base))), .p64 => switch (self.length) { - 2 => mem.writeIntLittle(u32, buffer[0..4], @truncate(u32, ctx.target_vaddr + ctx.image_base)), + 2 => mem.writeIntLittle(u32, buffer[0..4], @as(u32, @truncate(ctx.target_vaddr + ctx.image_base))), 3 => mem.writeIntLittle(u64, buffer[0..8], ctx.target_vaddr + ctx.image_base), else => unreachable, }, @@ -226,6 +226,6 @@ fn resolveX86(self: Relocation, ctx: Context) void { } inline fn isArithmeticOp(inst: *const [4]u8) bool { - const group_decode = @truncate(u5, inst[3]); + const group_decode = @as(u5, @truncate(inst[3])); return ((group_decode >> 2) == 4); } diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 14be46b621..499855b330 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -138,7 +138,7 @@ pub const DeclState = struct { /// which we use as our target of the relocation. fn addTypeRelocGlobal(self: *DeclState, atom_index: Atom.Index, ty: Type, offset: u32) !void { const resolv = self.abbrev_resolver.get(ty.toIntern()) orelse blk: { - const sym_index = @intCast(u32, self.abbrev_table.items.len); + const sym_index = @as(u32, @intCast(self.abbrev_table.items.len)); try self.abbrev_table.append(self.gpa, .{ .atom_index = atom_index, .type = ty, @@ -225,7 +225,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, Type.bool, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, Type.bool, @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata try dbg_info_buffer.ensureUnusedCapacity(6); dbg_info_buffer.appendAssumeCapacity(0); @@ -237,7 +237,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, payload_ty, @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata const offset = abi_size - payload_ty.abiSize(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), offset); @@ -249,7 +249,7 @@ pub const DeclState = struct { if (ty.isSlice(mod)) { // Slices are structs: struct { .ptr = *, .len = N } const ptr_bits = target.ptrBitWidth(); - const ptr_bytes = @intCast(u8, @divExact(ptr_bits, 8)); + const ptr_bytes = @as(u8, @intCast(@divExact(ptr_bits, 8))); // DW.AT.structure_type try dbg_info_buffer.ensureUnusedCapacity(2); dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevKind.struct_type)); @@ -267,7 +267,7 @@ pub const DeclState = struct { var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); const ptr_ty = ty.slicePtrFieldType(mod); - try self.addTypeRelocGlobal(atom_index, ptr_ty, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, ptr_ty, @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata try dbg_info_buffer.ensureUnusedCapacity(6); dbg_info_buffer.appendAssumeCapacity(0); @@ -279,7 +279,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, Type.usize, @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata try dbg_info_buffer.ensureUnusedCapacity(2); dbg_info_buffer.appendAssumeCapacity(ptr_bytes); @@ -291,7 +291,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 const index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @as(u32, @intCast(index))); } }, .Array => { @@ -302,13 +302,13 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @as(u32, @intCast(index))); // DW.AT.subrange_type try dbg_info_buffer.append(@intFromEnum(AbbrevKind.array_dim)); // DW.AT.type, DW.FORM.ref4 index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, Type.usize, @as(u32, @intCast(index))); // DW.AT.count, DW.FORM.udata const len = ty.arrayLenIncludingSentinel(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), len); @@ -334,7 +334,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata const field_off = ty.structFieldOffset(field_index, mod); try leb128.writeULEB128(dbg_info_buffer.writer(), field_off); @@ -367,7 +367,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, field.ty, @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata const field_off = ty.structFieldOffset(field_index, mod); try leb128.writeULEB128(dbg_info_buffer.writer(), field_off); @@ -404,7 +404,7 @@ pub const DeclState = struct { // TODO do not assume a 64bit enum value - could be bigger. // See https://github.com/ziglang/zig/issues/645 const field_int_val = try value.toValue().intFromEnum(ty, mod); - break :value @bitCast(u64, field_int_val.toSignedInt(mod)); + break :value @as(u64, @bitCast(field_int_val.toSignedInt(mod))); }; mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian); } @@ -439,7 +439,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 const inner_union_index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(inner_union_index + 4); - try self.addTypeRelocLocal(atom_index, @intCast(u32, inner_union_index), 5); + try self.addTypeRelocLocal(atom_index, @as(u32, @intCast(inner_union_index)), 5); // DW.AT.data_member_location, DW.FORM.udata try leb128.writeULEB128(dbg_info_buffer.writer(), payload_offset); } @@ -468,7 +468,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 const index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, field.ty, @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata try dbg_info_buffer.append(0); } @@ -485,7 +485,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 const index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, union_obj.tag_ty, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, union_obj.tag_ty, @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata try leb128.writeULEB128(dbg_info_buffer.writer(), tag_offset); @@ -521,7 +521,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 const index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, payload_ty, @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata try leb128.writeULEB128(dbg_info_buffer.writer(), payload_off); } @@ -536,7 +536,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 const index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, error_ty, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, error_ty, @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata try leb128.writeULEB128(dbg_info_buffer.writer(), error_off); } @@ -640,7 +640,7 @@ pub const DeclState = struct { try dbg_info.ensureUnusedCapacity(5 + name_with_null.len); const index = dbg_info.items.len; try dbg_info.resize(index + 4); // dw.at.type, dw.form.ref4 - try self.addTypeRelocGlobal(atom_index, ty, @intCast(u32, index)); // DW.AT.type, DW.FORM.ref4 + try self.addTypeRelocGlobal(atom_index, ty, @as(u32, @intCast(index))); // DW.AT.type, DW.FORM.ref4 dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string } @@ -723,20 +723,20 @@ pub const DeclState = struct { .memory, .linker_load, => { - const ptr_width = @intCast(u8, @divExact(target.ptrBitWidth(), 8)); + const ptr_width = @as(u8, @intCast(@divExact(target.ptrBitWidth(), 8))); try dbg_info.ensureUnusedCapacity(2 + ptr_width); dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc 1 + ptr_width + @intFromBool(is_ptr), DW.OP.addr, // literal address }); - const offset = @intCast(u32, dbg_info.items.len); + const offset = @as(u32, @intCast(dbg_info.items.len)); const addr = switch (loc) { .memory => |x| x, else => 0, }; switch (ptr_width) { 0...4 => { - try dbg_info.writer().writeInt(u32, @intCast(u32, addr), endian); + try dbg_info.writer().writeInt(u32, @as(u32, @intCast(addr)), endian); }, 5...8 => { try dbg_info.writer().writeInt(u64, addr, endian); @@ -765,19 +765,19 @@ pub const DeclState = struct { if (child_ty.isSignedInt(mod)) DW.OP.consts else DW.OP.constu, }); if (child_ty.isSignedInt(mod)) { - try leb128.writeILEB128(dbg_info.writer(), @bitCast(i64, x)); + try leb128.writeILEB128(dbg_info.writer(), @as(i64, @bitCast(x))); } else { try leb128.writeULEB128(dbg_info.writer(), x); } try dbg_info.append(DW.OP.stack_value); - dbg_info.items[fixup] += @intCast(u8, dbg_info.items.len - fixup - 2); + dbg_info.items[fixup] += @as(u8, @intCast(dbg_info.items.len - fixup - 2)); }, .undef => { // DW.AT.location, DW.FORM.exprloc // uleb128(exprloc_len) // DW.OP.implicit_value uleb128(len_of_bytes) bytes - const abi_size = @intCast(u32, child_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(child_ty.abiSize(mod))); var implicit_value_len = std.ArrayList(u8).init(self.gpa); defer implicit_value_len.deinit(); try leb128.writeULEB128(implicit_value_len.writer(), abi_size); @@ -807,7 +807,7 @@ pub const DeclState = struct { try dbg_info.ensureUnusedCapacity(5 + name_with_null.len); const index = dbg_info.items.len; try dbg_info.resize(index + 4); // dw.at.type, dw.form.ref4 - try self.addTypeRelocGlobal(atom_index, child_ty, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, child_ty, @as(u32, @intCast(index))); dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string } @@ -963,7 +963,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) func.lbrace_line, func.rbrace_line, }); - const line = @intCast(u28, decl.src_line + func.lbrace_line); + const line = @as(u28, @intCast(decl.src_line + func.lbrace_line)); const ptr_width_bytes = self.ptrWidthBytes(); dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{ @@ -1013,7 +1013,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) dbg_info_buffer.items.len += 4; // DW.AT.high_pc, DW.FORM.data4 // if (fn_ret_has_bits) { - try decl_state.addTypeRelocGlobal(di_atom_index, fn_ret_type, @intCast(u32, dbg_info_buffer.items.len)); + try decl_state.addTypeRelocGlobal(di_atom_index, fn_ret_type, @as(u32, @intCast(dbg_info_buffer.items.len))); dbg_info_buffer.items.len += 4; // DW.AT.type, DW.FORM.ref4 } @@ -1055,11 +1055,11 @@ pub fn commitDeclState( .p32 => { { const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, sym_addr), target_endian); + mem.writeInt(u32, ptr, @as(u32, @intCast(sym_addr)), target_endian); } { const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, sym_addr), target_endian); + mem.writeInt(u32, ptr, @as(u32, @intCast(sym_addr)), target_endian); } }, .p64 => { @@ -1079,7 +1079,7 @@ pub fn commitDeclState( sym_size, }); const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, sym_size), target_endian); + mem.writeInt(u32, ptr, @as(u32, @intCast(sym_size)), target_endian); } try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS.extended_op, 1, DW.LNE.end_sequence }); @@ -1091,7 +1091,7 @@ pub fn commitDeclState( // probably need to edit that logic too. const src_fn_index = self.src_fn_decls.get(decl_index).?; const src_fn = self.getAtomPtr(.src_fn, src_fn_index); - src_fn.len = @intCast(u32, dbg_line_buffer.items.len); + src_fn.len = @as(u32, @intCast(dbg_line_buffer.items.len)); if (self.src_fn_last_index) |last_index| blk: { if (src_fn_index == last_index) break :blk; @@ -1254,12 +1254,12 @@ pub fn commitDeclState( }; if (deferred) continue; - symbol.offset = @intCast(u32, dbg_info_buffer.items.len); + symbol.offset = @as(u32, @intCast(dbg_info_buffer.items.len)); try decl_state.addDbgInfoType(mod, di_atom_index, ty); } } - try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len)); + try self.updateDeclDebugInfoAllocation(di_atom_index, @as(u32, @intCast(dbg_info_buffer.items.len))); while (decl_state.abbrev_relocs.popOrNull()) |reloc| { if (reloc.target) |target| { @@ -1402,7 +1402,7 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom_index: Atom.Index, len: u32) self.di_atom_first_index = atom_index; self.di_atom_last_index = atom_index; - atom.off = @intCast(u32, padToIdeal(self.dbgInfoHeaderBytes())); + atom.off = @as(u32, @intCast(padToIdeal(self.dbgInfoHeaderBytes()))); } } @@ -1513,7 +1513,7 @@ pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: Module.Decl. func.lbrace_line, func.rbrace_line, }); - const line = @intCast(u28, decl.src_line + func.lbrace_line); + const line = @as(u28, @intCast(decl.src_line + func.lbrace_line)); var data: [4]u8 = undefined; leb128.writeUnsignedFixed(4, &data, line); @@ -1791,10 +1791,10 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u const dbg_info_end = self.getDebugInfoEnd().? + 1; const init_len = dbg_info_end - after_init_len; if (self.bin_file.tag == .macho) { - mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, init_len)); + mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(init_len))); } else switch (self.ptr_width) { .p32 => { - mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, init_len), target_endian); + mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(init_len)), target_endian); }, .p64 => { di_buf.appendNTimesAssumeCapacity(0xff, 4); @@ -1804,11 +1804,11 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u mem.writeInt(u16, di_buf.addManyAsArrayAssumeCapacity(2), 4, target_endian); // DWARF version const abbrev_offset = self.abbrev_table_offset.?; if (self.bin_file.tag == .macho) { - mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, abbrev_offset)); + mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(abbrev_offset))); di_buf.appendAssumeCapacity(8); // address size } else switch (self.ptr_width) { .p32 => { - mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, abbrev_offset), target_endian); + mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(abbrev_offset)), target_endian); di_buf.appendAssumeCapacity(4); // address size }, .p64 => { @@ -1828,9 +1828,9 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), 0); // DW.AT.stmt_list, DW.FORM.sec_offset mem.writeIntLittle(u64, di_buf.addManyAsArrayAssumeCapacity(8), low_pc); mem.writeIntLittle(u64, di_buf.addManyAsArrayAssumeCapacity(8), high_pc); - mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, name_strp)); - mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, comp_dir_strp)); - mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, producer_strp)); + mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(name_strp))); + mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(comp_dir_strp))); + mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(producer_strp))); } else { self.writeAddrAssumeCapacity(&di_buf, 0); // DW.AT.stmt_list, DW.FORM.sec_offset self.writeAddrAssumeCapacity(&di_buf, low_pc); @@ -1885,7 +1885,7 @@ fn resolveCompilationDir(module: *Module, buffer: *[std.fs.MAX_PATH_BYTES]u8) [] fn writeAddrAssumeCapacity(self: *Dwarf, buf: *std.ArrayList(u8), addr: u64) void { const target_endian = self.target.cpu.arch.endian(); switch (self.ptr_width) { - .p32 => mem.writeInt(u32, buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, addr), target_endian), + .p32 => mem.writeInt(u32, buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(addr)), target_endian), .p64 => mem.writeInt(u64, buf.addManyAsArrayAssumeCapacity(8), addr, target_endian), } } @@ -2152,10 +2152,10 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void { // Go back and populate the initial length. const init_len = di_buf.items.len - after_init_len; if (self.bin_file.tag == .macho) { - mem.writeIntLittle(u32, di_buf.items[init_len_index..][0..4], @intCast(u32, init_len)); + mem.writeIntLittle(u32, di_buf.items[init_len_index..][0..4], @as(u32, @intCast(init_len))); } else switch (self.ptr_width) { .p32 => { - mem.writeInt(u32, di_buf.items[init_len_index..][0..4], @intCast(u32, init_len), target_endian); + mem.writeInt(u32, di_buf.items[init_len_index..][0..4], @as(u32, @intCast(init_len)), target_endian); }, .p64 => { // initial length - length of the .debug_aranges contribution for this compilation unit, @@ -2165,7 +2165,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void { }, } - const needed_size = @intCast(u32, di_buf.items.len); + const needed_size = @as(u32, @intCast(di_buf.items.len)); switch (self.bin_file.tag) { .elf => { const elf_file = self.bin_file.cast(File.Elf).?; @@ -2293,7 +2293,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void { di_buf.appendSliceAssumeCapacity(file); di_buf.appendSliceAssumeCapacity(&[_]u8{ 0, // null byte for the relative path name - @intCast(u8, dir_index), // directory_index + @as(u8, @intCast(dir_index)), // directory_index 0, // mtime (TODO supply this) 0, // file size bytes (TODO supply this) }); @@ -2304,11 +2304,11 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void { switch (self.bin_file.tag) { .macho => { - mem.writeIntLittle(u32, di_buf.items[before_header_len..][0..4], @intCast(u32, header_len)); + mem.writeIntLittle(u32, di_buf.items[before_header_len..][0..4], @as(u32, @intCast(header_len))); }, else => switch (self.ptr_width) { .p32 => { - mem.writeInt(u32, di_buf.items[before_header_len..][0..4], @intCast(u32, header_len), target_endian); + mem.writeInt(u32, di_buf.items[before_header_len..][0..4], @as(u32, @intCast(header_len)), target_endian); }, .p64 => { mem.writeInt(u64, di_buf.items[before_header_len..][0..8], header_len, target_endian); @@ -2348,7 +2348,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void { .macho => { const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?; const sect_index = d_sym.debug_line_section_index.?; - const needed_size = @intCast(u32, d_sym.getSection(sect_index).size + delta); + const needed_size = @as(u32, @intCast(d_sym.getSection(sect_index).size + delta)); try d_sym.growSection(sect_index, needed_size, true); const file_pos = d_sym.getSection(sect_index).offset + first_fn.off; @@ -2384,11 +2384,11 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void { const init_len = self.getDebugLineProgramEnd().? - before_init_len - init_len_size; switch (self.bin_file.tag) { .macho => { - mem.writeIntLittle(u32, di_buf.items[before_init_len..][0..4], @intCast(u32, init_len)); + mem.writeIntLittle(u32, di_buf.items[before_init_len..][0..4], @as(u32, @intCast(init_len))); }, else => switch (self.ptr_width) { .p32 => { - mem.writeInt(u32, di_buf.items[before_init_len..][0..4], @intCast(u32, init_len), target_endian); + mem.writeInt(u32, di_buf.items[before_init_len..][0..4], @as(u32, @intCast(init_len)), target_endian); }, .p64 => { mem.writeInt(u64, di_buf.items[before_init_len + 4 ..][0..8], init_len, target_endian); @@ -2477,7 +2477,7 @@ fn dbgLineNeededHeaderBytes(self: Dwarf, dirs: []const []const u8, files: []cons } size += 1; // file names sentinel - return @intCast(u32, size); + return @as(u32, @intCast(size)); } /// The reloc offset for the line offset of a function from the previous function's line. @@ -2516,7 +2516,7 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void { const di_atom_index = try self.createAtom(.di_atom); log.debug("updateDeclDebugInfoAllocation in flushModule", .{}); - try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len)); + try self.updateDeclDebugInfoAllocation(di_atom_index, @as(u32, @intCast(dbg_info_buffer.items.len))); log.debug("writeDeclDebugInfo in flushModule", .{}); try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items); @@ -2581,7 +2581,7 @@ fn addDIFile(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) !u28 { else => unreachable, } } - return @intCast(u28, gop.index + 1); + return @as(u28, @intCast(gop.index + 1)); } fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct { @@ -2614,7 +2614,7 @@ fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct { const dir_index: u28 = blk: { const dirs_gop = dirs.getOrPutAssumeCapacity(dir_path); - break :blk @intCast(u28, dirs_gop.index + 1); + break :blk @as(u28, @intCast(dirs_gop.index + 1)); }; files_dir_indexes.appendAssumeCapacity(dir_index); @@ -2679,12 +2679,12 @@ fn createAtom(self: *Dwarf, comptime kind: Kind) !Atom.Index { const index = blk: { switch (kind) { .src_fn => { - const index = @intCast(Atom.Index, self.src_fns.items.len); + const index = @as(Atom.Index, @intCast(self.src_fns.items.len)); _ = try self.src_fns.addOne(self.allocator); break :blk index; }, .di_atom => { - const index = @intCast(Atom.Index, self.di_atoms.items.len); + const index = @as(Atom.Index, @intCast(self.di_atoms.items.len)); _ = try self.di_atoms.addOne(self.allocator); break :blk index; }, diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 283bd9ccca..8d08b73d6a 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -455,7 +455,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { const ptr_size: u8 = self.ptrWidthBytes(); if (self.phdr_table_index == null) { - self.phdr_table_index = @intCast(u16, self.program_headers.items.len); + self.phdr_table_index = @as(u16, @intCast(self.program_headers.items.len)); const p_align: u16 = switch (self.ptr_width) { .p32 => @alignOf(elf.Elf32_Phdr), .p64 => @alignOf(elf.Elf64_Phdr), @@ -474,7 +474,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_table_load_index == null) { - self.phdr_table_load_index = @intCast(u16, self.program_headers.items.len); + self.phdr_table_load_index = @as(u16, @intCast(self.program_headers.items.len)); // TODO Same as for GOT const phdr_addr: u64 = if (self.base.options.target.ptrBitWidth() >= 32) 0x1000000 else 0x1000; const p_align = self.page_size; @@ -492,7 +492,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_load_re_index == null) { - self.phdr_load_re_index = @intCast(u16, self.program_headers.items.len); + self.phdr_load_re_index = @as(u16, @intCast(self.program_headers.items.len)); const file_size = self.base.options.program_code_size_hint; const p_align = self.page_size; const off = self.findFreeSpace(file_size, p_align); @@ -513,7 +513,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_got_index == null) { - self.phdr_got_index = @intCast(u16, self.program_headers.items.len); + self.phdr_got_index = @as(u16, @intCast(self.program_headers.items.len)); const file_size = @as(u64, ptr_size) * self.base.options.symbol_count_hint; // We really only need ptr alignment but since we are using PROGBITS, linux requires // page align. @@ -538,7 +538,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_load_ro_index == null) { - self.phdr_load_ro_index = @intCast(u16, self.program_headers.items.len); + self.phdr_load_ro_index = @as(u16, @intCast(self.program_headers.items.len)); // TODO Find a hint about how much data need to be in rodata ? const file_size = 1024; // Same reason as for GOT @@ -561,7 +561,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_load_rw_index == null) { - self.phdr_load_rw_index = @intCast(u16, self.program_headers.items.len); + self.phdr_load_rw_index = @as(u16, @intCast(self.program_headers.items.len)); // TODO Find a hint about how much data need to be in data ? const file_size = 1024; // Same reason as for GOT @@ -584,7 +584,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.shstrtab_index == null) { - self.shstrtab_index = @intCast(u16, self.sections.slice().len); + self.shstrtab_index = @as(u16, @intCast(self.sections.slice().len)); assert(self.shstrtab.buffer.items.len == 0); try self.shstrtab.buffer.append(gpa, 0); // need a 0 at position 0 const off = self.findFreeSpace(self.shstrtab.buffer.items.len, 1); @@ -609,7 +609,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.text_section_index == null) { - self.text_section_index = @intCast(u16, self.sections.slice().len); + self.text_section_index = @as(u16, @intCast(self.sections.slice().len)); const phdr = &self.program_headers.items[self.phdr_load_re_index.?]; try self.sections.append(gpa, .{ @@ -631,7 +631,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.got_section_index == null) { - self.got_section_index = @intCast(u16, self.sections.slice().len); + self.got_section_index = @as(u16, @intCast(self.sections.slice().len)); const phdr = &self.program_headers.items[self.phdr_got_index.?]; try self.sections.append(gpa, .{ @@ -653,7 +653,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.rodata_section_index == null) { - self.rodata_section_index = @intCast(u16, self.sections.slice().len); + self.rodata_section_index = @as(u16, @intCast(self.sections.slice().len)); const phdr = &self.program_headers.items[self.phdr_load_ro_index.?]; try self.sections.append(gpa, .{ @@ -675,7 +675,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.data_section_index == null) { - self.data_section_index = @intCast(u16, self.sections.slice().len); + self.data_section_index = @as(u16, @intCast(self.sections.slice().len)); const phdr = &self.program_headers.items[self.phdr_load_rw_index.?]; try self.sections.append(gpa, .{ @@ -697,7 +697,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.symtab_section_index == null) { - self.symtab_section_index = @intCast(u16, self.sections.slice().len); + self.symtab_section_index = @as(u16, @intCast(self.sections.slice().len)); const min_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym); const each_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym); const file_size = self.base.options.symbol_count_hint * each_size; @@ -714,7 +714,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { .sh_size = file_size, // The section header index of the associated string table. .sh_link = self.shstrtab_index.?, - .sh_info = @intCast(u32, self.local_symbols.items.len), + .sh_info = @as(u32, @intCast(self.local_symbols.items.len)), .sh_addralign = min_align, .sh_entsize = each_size, }, @@ -726,7 +726,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { if (self.dwarf) |*dw| { if (self.debug_str_section_index == null) { - self.debug_str_section_index = @intCast(u16, self.sections.slice().len); + self.debug_str_section_index = @as(u16, @intCast(self.sections.slice().len)); assert(dw.strtab.buffer.items.len == 0); try dw.strtab.buffer.append(gpa, 0); try self.sections.append(gpa, .{ @@ -749,7 +749,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.debug_info_section_index == null) { - self.debug_info_section_index = @intCast(u16, self.sections.slice().len); + self.debug_info_section_index = @as(u16, @intCast(self.sections.slice().len)); const file_size_hint = 200; const p_align = 1; @@ -778,7 +778,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.debug_abbrev_section_index == null) { - self.debug_abbrev_section_index = @intCast(u16, self.sections.slice().len); + self.debug_abbrev_section_index = @as(u16, @intCast(self.sections.slice().len)); const file_size_hint = 128; const p_align = 1; @@ -807,7 +807,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.debug_aranges_section_index == null) { - self.debug_aranges_section_index = @intCast(u16, self.sections.slice().len); + self.debug_aranges_section_index = @as(u16, @intCast(self.sections.slice().len)); const file_size_hint = 160; const p_align = 16; @@ -836,7 +836,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.debug_line_section_index == null) { - self.debug_line_section_index = @intCast(u16, self.sections.slice().len); + self.debug_line_section_index = @as(u16, @intCast(self.sections.slice().len)); const file_size_hint = 250; const p_align = 1; @@ -1100,7 +1100,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node }); switch (self.ptr_width) { - .p32 => try self.base.file.?.pwriteAll(mem.asBytes(&@intCast(u32, target_vaddr)), file_offset), + .p32 => try self.base.file.?.pwriteAll(mem.asBytes(&@as(u32, @intCast(target_vaddr))), file_offset), .p64 => try self.base.file.?.pwriteAll(mem.asBytes(&target_vaddr), file_offset), } @@ -1170,7 +1170,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node if (needed_size > allocated_size) { phdr_table.p_offset = 0; // free the space - phdr_table.p_offset = self.findFreeSpace(needed_size, @intCast(u32, phdr_table.p_align)); + phdr_table.p_offset = self.findFreeSpace(needed_size, @as(u32, @intCast(phdr_table.p_align))); } phdr_table_load.p_offset = mem.alignBackward(u64, phdr_table.p_offset, phdr_table_load.p_align); @@ -2004,7 +2004,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v fn writeDwarfAddrAssumeCapacity(self: *Elf, buf: *std.ArrayList(u8), addr: u64) void { const target_endian = self.base.options.target.cpu.arch.endian(); switch (self.ptr_width) { - .p32 => mem.writeInt(u32, buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, addr), target_endian), + .p32 => mem.writeInt(u32, buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(addr)), target_endian), .p64 => mem.writeInt(u64, buf.addManyAsArrayAssumeCapacity(8), addr, target_endian), } } @@ -2064,15 +2064,15 @@ fn writeElfHeader(self: *Elf) !void { const phdr_table_offset = self.program_headers.items[self.phdr_table_index.?].p_offset; switch (self.ptr_width) { .p32 => { - mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, e_entry), endian); + mem.writeInt(u32, hdr_buf[index..][0..4], @as(u32, @intCast(e_entry)), endian); index += 4; // e_phoff - mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, phdr_table_offset), endian); + mem.writeInt(u32, hdr_buf[index..][0..4], @as(u32, @intCast(phdr_table_offset)), endian); index += 4; // e_shoff - mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, self.shdr_table_offset.?), endian); + mem.writeInt(u32, hdr_buf[index..][0..4], @as(u32, @intCast(self.shdr_table_offset.?)), endian); index += 4; }, .p64 => { @@ -2108,7 +2108,7 @@ fn writeElfHeader(self: *Elf) !void { mem.writeInt(u16, hdr_buf[index..][0..2], e_phentsize, endian); index += 2; - const e_phnum = @intCast(u16, self.program_headers.items.len); + const e_phnum = @as(u16, @intCast(self.program_headers.items.len)); mem.writeInt(u16, hdr_buf[index..][0..2], e_phnum, endian); index += 2; @@ -2119,7 +2119,7 @@ fn writeElfHeader(self: *Elf) !void { mem.writeInt(u16, hdr_buf[index..][0..2], e_shentsize, endian); index += 2; - const e_shnum = @intCast(u16, self.sections.slice().len); + const e_shnum = @as(u16, @intCast(self.sections.slice().len)); mem.writeInt(u16, hdr_buf[index..][0..2], e_shnum, endian); index += 2; @@ -2223,7 +2223,7 @@ fn growAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment: pub fn createAtom(self: *Elf) !Atom.Index { const gpa = self.base.allocator; - const atom_index = @intCast(Atom.Index, self.atoms.items.len); + const atom_index = @as(Atom.Index, @intCast(self.atoms.items.len)); const atom = try self.atoms.addOne(gpa); const local_sym_index = try self.allocateLocalSymbol(); try self.atom_by_index_table.putNoClobber(gpa, local_sym_index, atom_index); @@ -2367,7 +2367,7 @@ pub fn allocateLocalSymbol(self: *Elf) !u32 { break :blk index; } else { log.debug(" (allocating symbol index {d})", .{self.local_symbols.items.len}); - const index = @intCast(u32, self.local_symbols.items.len); + const index = @as(u32, @intCast(self.local_symbols.items.len)); _ = self.local_symbols.addOneAssumeCapacity(); break :blk index; } @@ -2557,7 +2557,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s .iov_len = code.len, }}; var remote_vec: [1]std.os.iovec_const = .{.{ - .iov_base = @ptrFromInt([*]u8, @intCast(usize, local_sym.st_value)), + .iov_base = @as([*]u8, @ptrFromInt(@as(usize, @intCast(local_sym.st_value)))), .iov_len = code.len, }}; const rc = std.os.linux.process_vm_writev(pid, &code_vec, &remote_vec, 0); @@ -2910,7 +2910,7 @@ pub fn updateDeclExports( continue; }, }; - const stt_bits: u8 = @truncate(u4, decl_sym.st_info); + const stt_bits: u8 = @as(u4, @truncate(decl_sym.st_info)); if (decl_metadata.getExport(self, exp_name)) |i| { const sym = &self.global_symbols.items[i]; sym.* = .{ @@ -2926,7 +2926,7 @@ pub fn updateDeclExports( _ = self.global_symbols.addOneAssumeCapacity(); break :blk self.global_symbols.items.len - 1; }; - try decl_metadata.exports.append(gpa, @intCast(u32, i)); + try decl_metadata.exports.append(gpa, @as(u32, @intCast(i))); self.global_symbols.items[i] = .{ .st_name = try self.shstrtab.insert(gpa, exp_name), .st_info = (stb_bits << 4) | stt_bits, @@ -3030,12 +3030,12 @@ fn writeOffsetTableEntry(self: *Elf, index: @TypeOf(self.got_table).Index) !void switch (entry_size) { 2 => { var buf: [2]u8 = undefined; - mem.writeInt(u16, &buf, @intCast(u16, got_value), endian); + mem.writeInt(u16, &buf, @as(u16, @intCast(got_value)), endian); try self.base.file.?.pwriteAll(&buf, off); }, 4 => { var buf: [4]u8 = undefined; - mem.writeInt(u32, &buf, @intCast(u32, got_value), endian); + mem.writeInt(u32, &buf, @as(u32, @intCast(got_value)), endian); try self.base.file.?.pwriteAll(&buf, off); }, 8 => { @@ -3051,7 +3051,7 @@ fn writeOffsetTableEntry(self: *Elf, index: @TypeOf(self.got_table).Index) !void .iov_len = buf.len, }}; var remote_vec: [1]std.os.iovec_const = .{.{ - .iov_base = @ptrFromInt([*]u8, @intCast(usize, vaddr)), + .iov_base = @as([*]u8, @ptrFromInt(@as(usize, @intCast(vaddr)))), .iov_len = buf.len, }}; const rc = std.os.linux.process_vm_writev(pid, &local_vec, &remote_vec, 0); @@ -3086,7 +3086,7 @@ fn writeSymbol(self: *Elf, index: usize) !void { }; const needed_size = (self.local_symbols.items.len + self.global_symbols.items.len) * sym_size; try self.growNonAllocSection(self.symtab_section_index.?, needed_size, sym_align, true); - syms_sect.sh_info = @intCast(u32, self.local_symbols.items.len); + syms_sect.sh_info = @as(u32, @intCast(self.local_symbols.items.len)); } const foreign_endian = self.base.options.target.cpu.arch.endian() != builtin.cpu.arch.endian(); const off = switch (self.ptr_width) { @@ -3101,8 +3101,8 @@ fn writeSymbol(self: *Elf, index: usize) !void { var sym = [1]elf.Elf32_Sym{ .{ .st_name = local.st_name, - .st_value = @intCast(u32, local.st_value), - .st_size = @intCast(u32, local.st_size), + .st_value = @as(u32, @intCast(local.st_value)), + .st_size = @as(u32, @intCast(local.st_size)), .st_info = local.st_info, .st_other = local.st_other, .st_shndx = local.st_shndx, @@ -3148,8 +3148,8 @@ fn writeAllGlobalSymbols(self: *Elf) !void { const global = self.global_symbols.items[i]; sym.* = .{ .st_name = global.st_name, - .st_value = @intCast(u32, global.st_value), - .st_size = @intCast(u32, global.st_size), + .st_value = @as(u32, @intCast(global.st_value)), + .st_size = @as(u32, @intCast(global.st_size)), .st_info = global.st_info, .st_other = global.st_other, .st_shndx = global.st_shndx, @@ -3194,19 +3194,19 @@ fn ptrWidthBytes(self: Elf) u8 { /// Does not necessarily match `ptrWidthBytes` for example can be 2 bytes /// in a 32-bit ELF file. fn archPtrWidthBytes(self: Elf) u8 { - return @intCast(u8, self.base.options.target.ptrBitWidth() / 8); + return @as(u8, @intCast(self.base.options.target.ptrBitWidth() / 8)); } fn progHeaderTo32(phdr: elf.Elf64_Phdr) elf.Elf32_Phdr { return .{ .p_type = phdr.p_type, .p_flags = phdr.p_flags, - .p_offset = @intCast(u32, phdr.p_offset), - .p_vaddr = @intCast(u32, phdr.p_vaddr), - .p_paddr = @intCast(u32, phdr.p_paddr), - .p_filesz = @intCast(u32, phdr.p_filesz), - .p_memsz = @intCast(u32, phdr.p_memsz), - .p_align = @intCast(u32, phdr.p_align), + .p_offset = @as(u32, @intCast(phdr.p_offset)), + .p_vaddr = @as(u32, @intCast(phdr.p_vaddr)), + .p_paddr = @as(u32, @intCast(phdr.p_paddr)), + .p_filesz = @as(u32, @intCast(phdr.p_filesz)), + .p_memsz = @as(u32, @intCast(phdr.p_memsz)), + .p_align = @as(u32, @intCast(phdr.p_align)), }; } @@ -3214,14 +3214,14 @@ fn sectHeaderTo32(shdr: elf.Elf64_Shdr) elf.Elf32_Shdr { return .{ .sh_name = shdr.sh_name, .sh_type = shdr.sh_type, - .sh_flags = @intCast(u32, shdr.sh_flags), - .sh_addr = @intCast(u32, shdr.sh_addr), - .sh_offset = @intCast(u32, shdr.sh_offset), - .sh_size = @intCast(u32, shdr.sh_size), + .sh_flags = @as(u32, @intCast(shdr.sh_flags)), + .sh_addr = @as(u32, @intCast(shdr.sh_addr)), + .sh_offset = @as(u32, @intCast(shdr.sh_offset)), + .sh_size = @as(u32, @intCast(shdr.sh_size)), .sh_link = shdr.sh_link, .sh_info = shdr.sh_info, - .sh_addralign = @intCast(u32, shdr.sh_addralign), - .sh_entsize = @intCast(u32, shdr.sh_entsize), + .sh_addralign = @as(u32, @intCast(shdr.sh_addralign)), + .sh_entsize = @as(u32, @intCast(shdr.sh_entsize)), }; } diff --git a/src/link/MachO.zig b/src/link/MachO.zig index c91d18b0f7..80195a454d 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -741,7 +741,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No }; const sym = self.getSymbol(global); try lc_writer.writeStruct(macho.entry_point_command{ - .entryoff = @intCast(u32, sym.n_value - seg.vmaddr), + .entryoff = @as(u32, @intCast(sym.n_value - seg.vmaddr)), .stacksize = self.base.options.stack_size_override orelse 0, }); }, @@ -757,7 +757,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No }); try load_commands.writeBuildVersionLC(&self.base.options, lc_writer); - const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @intCast(u32, lc_buffer.items.len); + const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @as(u32, @intCast(lc_buffer.items.len)); try lc_writer.writeStruct(self.uuid_cmd); try load_commands.writeLoadDylibLCs(self.dylibs.items, self.referenced_dylibs.keys(), lc_writer); @@ -768,7 +768,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No const ncmds = load_commands.calcNumOfLCs(lc_buffer.items); try self.base.file.?.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64)); - try self.writeHeader(ncmds, @intCast(u32, lc_buffer.items.len)); + try self.writeHeader(ncmds, @as(u32, @intCast(lc_buffer.items.len))); try self.writeUuid(comp, uuid_cmd_offset, requires_codesig); if (codesig) |*csig| { @@ -992,7 +992,7 @@ pub fn parseDylib( const contents = try file.readToEndAllocOptions(gpa, file_size, file_size, @alignOf(u64), null); defer gpa.free(contents); - const dylib_id = @intCast(u16, self.dylibs.items.len); + const dylib_id = @as(u16, @intCast(self.dylibs.items.len)); var dylib = Dylib{ .weak = opts.weak }; dylib.parseFromBinary( @@ -1412,7 +1412,7 @@ pub fn allocateSpecialSymbols(self: *MachO) !void { pub fn createAtom(self: *MachO) !Atom.Index { const gpa = self.base.allocator; - const atom_index = @intCast(Atom.Index, self.atoms.items.len); + const atom_index = @as(Atom.Index, @intCast(self.atoms.items.len)); const atom = try self.atoms.addOne(gpa); const sym_index = try self.allocateSymbol(); try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index); @@ -1588,14 +1588,14 @@ fn resolveSymbolsInDylibs(self: *MachO, actions: *std.ArrayList(ResolveAction)) for (self.dylibs.items, 0..) |dylib, id| { if (!dylib.symbols.contains(sym_name)) continue; - const dylib_id = @intCast(u16, id); + const dylib_id = @as(u16, @intCast(id)); if (!self.referenced_dylibs.contains(dylib_id)) { try self.referenced_dylibs.putNoClobber(gpa, dylib_id, {}); } const ordinal = self.referenced_dylibs.getIndex(dylib_id) orelse unreachable; sym.n_type |= macho.N_EXT; - sym.n_desc = @intCast(u16, ordinal + 1) * macho.N_SYMBOL_RESOLVER; + sym.n_desc = @as(u16, @intCast(ordinal + 1)) * macho.N_SYMBOL_RESOLVER; if (dylib.weak) { sym.n_desc |= macho.N_WEAK_REF; @@ -1789,7 +1789,7 @@ fn allocateSymbol(self: *MachO) !u32 { break :blk index; } else { log.debug(" (allocating symbol index {d})", .{self.locals.items.len}); - const index = @intCast(u32, self.locals.items.len); + const index = @as(u32, @intCast(self.locals.items.len)); _ = self.locals.addOneAssumeCapacity(); break :blk index; } @@ -1815,7 +1815,7 @@ fn allocateGlobal(self: *MachO) !u32 { break :blk index; } else { log.debug(" (allocating symbol index {d})", .{self.globals.items.len}); - const index = @intCast(u32, self.globals.items.len); + const index = @as(u32, @intCast(self.globals.items.len)); _ = self.globals.addOneAssumeCapacity(); break :blk index; } @@ -2563,12 +2563,12 @@ pub fn getDeclVAddr(self: *MachO, decl_index: Module.Decl.Index, reloc_info: Fil try Atom.addRelocation(self, atom_index, .{ .type = .unsigned, .target = .{ .sym_index = sym_index, .file = null }, - .offset = @intCast(u32, reloc_info.offset), + .offset = @as(u32, @intCast(reloc_info.offset)), .addend = reloc_info.addend, .pcrel = false, .length = 3, }); - try Atom.addRebase(self, atom_index, @intCast(u32, reloc_info.offset)); + try Atom.addRebase(self, atom_index, @as(u32, @intCast(reloc_info.offset))); return 0; } @@ -2582,7 +2582,7 @@ fn populateMissingMetadata(self: *MachO) !void { if (self.pagezero_segment_cmd_index == null) { if (pagezero_vmsize > 0) { - self.pagezero_segment_cmd_index = @intCast(u8, self.segments.items.len); + self.pagezero_segment_cmd_index = @as(u8, @intCast(self.segments.items.len)); try self.segments.append(gpa, .{ .segname = makeStaticString("__PAGEZERO"), .vmsize = pagezero_vmsize, @@ -2593,7 +2593,7 @@ fn populateMissingMetadata(self: *MachO) !void { if (self.header_segment_cmd_index == null) { // The first __TEXT segment is immovable and covers MachO header and load commands. - self.header_segment_cmd_index = @intCast(u8, self.segments.items.len); + self.header_segment_cmd_index = @as(u8, @intCast(self.segments.items.len)); const ideal_size = @max(self.base.options.headerpad_size orelse 0, default_headerpad_size); const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size); @@ -2719,7 +2719,7 @@ fn populateMissingMetadata(self: *MachO) !void { } if (self.linkedit_segment_cmd_index == null) { - self.linkedit_segment_cmd_index = @intCast(u8, self.segments.items.len); + self.linkedit_segment_cmd_index = @as(u8, @intCast(self.segments.items.len)); try self.segments.append(gpa, .{ .segname = makeStaticString("__LINKEDIT"), @@ -2752,8 +2752,8 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts const gpa = self.base.allocator; // In incremental context, we create one section per segment pairing. This way, // we can move the segment in raw file as we please. - const segment_id = @intCast(u8, self.segments.items.len); - const section_id = @intCast(u8, self.sections.slice().len); + const segment_id = @as(u8, @intCast(self.segments.items.len)); + const section_id = @as(u8, @intCast(self.sections.slice().len)); const vmaddr = blk: { const prev_segment = self.segments.items[segment_id - 1]; break :blk mem.alignForward(u64, prev_segment.vmaddr + prev_segment.vmsize, self.page_size); @@ -2788,7 +2788,7 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts .sectname = makeStaticString(sectname), .segname = makeStaticString(segname), .addr = mem.alignForward(u64, vmaddr, opts.alignment), - .offset = mem.alignForward(u32, @intCast(u32, off), opts.alignment), + .offset = mem.alignForward(u32, @as(u32, @intCast(off)), opts.alignment), .size = opts.size, .@"align" = math.log2(opts.alignment), .flags = opts.flags, @@ -2832,7 +2832,7 @@ fn growSection(self: *MachO, sect_id: u8, needed_size: u64) !void { current_size, ); if (amt != current_size) return error.InputOutput; - header.offset = @intCast(u32, new_offset); + header.offset = @as(u32, @intCast(new_offset)); segment.fileoff = new_offset; } @@ -2862,7 +2862,7 @@ fn growSectionVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void { // TODO: enforce order by increasing VM addresses in self.sections container. for (self.sections.items(.header)[sect_id + 1 ..], 0..) |*next_header, next_sect_id| { - const index = @intCast(u8, sect_id + 1 + next_sect_id); + const index = @as(u8, @intCast(sect_id + 1 + next_sect_id)); const next_segment = self.getSegmentPtr(index); next_header.addr += diff; next_segment.vmaddr += diff; @@ -2972,7 +2972,7 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm self.segment_table_dirty = true; } - const align_pow = @intCast(u32, math.log2(alignment)); + const align_pow = @as(u32, @intCast(math.log2(alignment))); if (header.@"align" < align_pow) { header.@"align" = align_pow; } @@ -3015,7 +3015,7 @@ pub fn getGlobalSymbol(self: *MachO, name: []const u8, lib_name: ?[]const u8) !u fn writeSegmentHeaders(self: *MachO, writer: anytype) !void { for (self.segments.items, 0..) |seg, i| { - const indexes = self.getSectionIndexes(@intCast(u8, i)); + const indexes = self.getSectionIndexes(@as(u8, @intCast(i))); try writer.writeStruct(seg); for (self.sections.items(.header)[indexes.start..indexes.end]) |header| { try writer.writeStruct(header); @@ -3029,7 +3029,7 @@ fn writeLinkeditSegmentData(self: *MachO) !void { seg.vmsize = 0; for (self.segments.items, 0..) |segment, id| { - if (self.linkedit_segment_cmd_index.? == @intCast(u8, id)) continue; + if (self.linkedit_segment_cmd_index.? == @as(u8, @intCast(id))) continue; if (seg.vmaddr < segment.vmaddr + segment.vmsize) { seg.vmaddr = mem.alignForward(u64, segment.vmaddr + segment.vmsize, self.page_size); } @@ -3115,7 +3115,7 @@ fn collectBindDataFromTableSection(self: *MachO, sect_id: u8, bind: anytype, tab log.debug(" | bind at {x}, import('{s}') in dylib({d})", .{ base_offset + offset, self.getSymbolName(entry), - @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER), + @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER), }); if (bind_sym.weakRef()) { log.debug(" | marking as weak ref ", .{}); @@ -3150,7 +3150,7 @@ fn collectBindData(self: *MachO, bind: anytype, raw_bindings: anytype) !void { const bind_sym = self.getSymbol(binding.target); const bind_sym_name = self.getSymbolName(binding.target); const dylib_ordinal = @divTrunc( - @bitCast(i16, bind_sym.n_desc), + @as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER, ); log.debug(" | bind at {x}, import('{s}') in dylib({d})", .{ @@ -3285,14 +3285,14 @@ fn writeDyldInfoData(self: *MachO) !void { try self.base.file.?.pwriteAll(buffer, rebase_off); try self.populateLazyBindOffsetsInStubHelper(lazy_bind); - self.dyld_info_cmd.rebase_off = @intCast(u32, rebase_off); - self.dyld_info_cmd.rebase_size = @intCast(u32, rebase_size_aligned); - self.dyld_info_cmd.bind_off = @intCast(u32, bind_off); - self.dyld_info_cmd.bind_size = @intCast(u32, bind_size_aligned); - self.dyld_info_cmd.lazy_bind_off = @intCast(u32, lazy_bind_off); - self.dyld_info_cmd.lazy_bind_size = @intCast(u32, lazy_bind_size_aligned); - self.dyld_info_cmd.export_off = @intCast(u32, export_off); - self.dyld_info_cmd.export_size = @intCast(u32, export_size_aligned); + self.dyld_info_cmd.rebase_off = @as(u32, @intCast(rebase_off)); + self.dyld_info_cmd.rebase_size = @as(u32, @intCast(rebase_size_aligned)); + self.dyld_info_cmd.bind_off = @as(u32, @intCast(bind_off)); + self.dyld_info_cmd.bind_size = @as(u32, @intCast(bind_size_aligned)); + self.dyld_info_cmd.lazy_bind_off = @as(u32, @intCast(lazy_bind_off)); + self.dyld_info_cmd.lazy_bind_size = @as(u32, @intCast(lazy_bind_size_aligned)); + self.dyld_info_cmd.export_off = @as(u32, @intCast(export_off)); + self.dyld_info_cmd.export_size = @as(u32, @intCast(export_size_aligned)); } fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void { @@ -3337,7 +3337,7 @@ fn writeSymtab(self: *MachO) !SymtabCtx { for (self.locals.items, 0..) |sym, sym_id| { if (sym.n_strx == 0) continue; // no name, skip - const sym_loc = SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null }; + const sym_loc = SymbolWithLoc{ .sym_index = @as(u32, @intCast(sym_id)), .file = null }; if (self.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip if (self.getGlobal(self.getSymbolName(sym_loc)) != null) continue; // global symbol is either an export or import, skip try locals.append(sym); @@ -3363,16 +3363,16 @@ fn writeSymtab(self: *MachO) !SymtabCtx { const sym = self.getSymbol(global); if (sym.n_strx == 0) continue; // no name, skip if (!sym.undf()) continue; // not an import, skip - const new_index = @intCast(u32, imports.items.len); + const new_index = @as(u32, @intCast(imports.items.len)); var out_sym = sym; out_sym.n_strx = try self.strtab.insert(gpa, self.getSymbolName(global)); try imports.append(out_sym); try imports_table.putNoClobber(global, new_index); } - const nlocals = @intCast(u32, locals.items.len); - const nexports = @intCast(u32, exports.items.len); - const nimports = @intCast(u32, imports.items.len); + const nlocals = @as(u32, @intCast(locals.items.len)); + const nexports = @as(u32, @intCast(exports.items.len)); + const nimports = @as(u32, @intCast(imports.items.len)); const nsyms = nlocals + nexports + nimports; const seg = self.getLinkeditSegmentPtr(); @@ -3392,7 +3392,7 @@ fn writeSymtab(self: *MachO) !SymtabCtx { log.debug("writing symtab from 0x{x} to 0x{x}", .{ offset, offset + needed_size }); try self.base.file.?.pwriteAll(buffer.items, offset); - self.symtab_cmd.symoff = @intCast(u32, offset); + self.symtab_cmd.symoff = @as(u32, @intCast(offset)); self.symtab_cmd.nsyms = nsyms; return SymtabCtx{ @@ -3421,8 +3421,8 @@ fn writeStrtab(self: *MachO) !void { try self.base.file.?.pwriteAll(buffer, offset); - self.symtab_cmd.stroff = @intCast(u32, offset); - self.symtab_cmd.strsize = @intCast(u32, needed_size_aligned); + self.symtab_cmd.stroff = @as(u32, @intCast(offset)); + self.symtab_cmd.strsize = @as(u32, @intCast(needed_size_aligned)); } const SymtabCtx = struct { @@ -3434,8 +3434,8 @@ const SymtabCtx = struct { fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void { const gpa = self.base.allocator; - const nstubs = @intCast(u32, self.stub_table.lookup.count()); - const ngot_entries = @intCast(u32, self.got_table.lookup.count()); + const nstubs = @as(u32, @intCast(self.stub_table.lookup.count())); + const ngot_entries = @as(u32, @intCast(self.got_table.lookup.count())); const nindirectsyms = nstubs * 2 + ngot_entries; const iextdefsym = ctx.nlocalsym; const iundefsym = iextdefsym + ctx.nextdefsym; @@ -3503,7 +3503,7 @@ fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void { self.dysymtab_cmd.nextdefsym = ctx.nextdefsym; self.dysymtab_cmd.iundefsym = iundefsym; self.dysymtab_cmd.nundefsym = ctx.nundefsym; - self.dysymtab_cmd.indirectsymoff = @intCast(u32, offset); + self.dysymtab_cmd.indirectsymoff = @as(u32, @intCast(offset)); self.dysymtab_cmd.nindirectsyms = nindirectsyms; } @@ -3530,8 +3530,8 @@ fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void { // except for code signature data. try self.base.file.?.pwriteAll(&[_]u8{0}, offset + needed_size - 1); - self.codesig_cmd.dataoff = @intCast(u32, offset); - self.codesig_cmd.datasize = @intCast(u32, needed_size); + self.codesig_cmd.dataoff = @as(u32, @intCast(offset)); + self.codesig_cmd.datasize = @as(u32, @intCast(needed_size)); } fn writeCodeSignature(self: *MachO, comp: *const Compilation, code_sig: *CodeSignature) !void { @@ -3711,7 +3711,7 @@ pub fn makeStaticString(bytes: []const u8) [16]u8 { fn getSegmentByName(self: MachO, segname: []const u8) ?u8 { for (self.segments.items, 0..) |seg, i| { - if (mem.eql(u8, segname, seg.segName())) return @intCast(u8, i); + if (mem.eql(u8, segname, seg.segName())) return @as(u8, @intCast(i)); } else return null; } @@ -3734,15 +3734,15 @@ pub fn getSectionByName(self: MachO, segname: []const u8, sectname: []const u8) // TODO investigate caching with a hashmap for (self.sections.items(.header), 0..) |header, i| { if (mem.eql(u8, header.segName(), segname) and mem.eql(u8, header.sectName(), sectname)) - return @intCast(u8, i); + return @as(u8, @intCast(i)); } else return null; } pub fn getSectionIndexes(self: MachO, segment_index: u8) struct { start: u8, end: u8 } { var start: u8 = 0; const nsects = for (self.segments.items, 0..) |seg, i| { - if (i == segment_index) break @intCast(u8, seg.nsects); - start += @intCast(u8, seg.nsects); + if (i == segment_index) break @as(u8, @intCast(seg.nsects)); + start += @as(u8, @intCast(seg.nsects)); } else 0; return .{ .start = start, .end = start + nsects }; } diff --git a/src/link/MachO/Archive.zig b/src/link/MachO/Archive.zig index d222394ad5..5276bf041e 100644 --- a/src/link/MachO/Archive.zig +++ b/src/link/MachO/Archive.zig @@ -169,7 +169,7 @@ fn parseTableOfContents(self: *Archive, allocator: Allocator, reader: anytype) ! }; const object_offset = try symtab_reader.readIntLittle(u32); - const sym_name = mem.sliceTo(@ptrCast([*:0]const u8, strtab.ptr + n_strx), 0); + const sym_name = mem.sliceTo(@as([*:0]const u8, @ptrCast(strtab.ptr + n_strx)), 0); const owned_name = try allocator.dupe(u8, sym_name); const res = try self.toc.getOrPut(allocator, owned_name); defer if (res.found_existing) allocator.free(owned_name); diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index fcb4c16063..f527ca3581 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -72,7 +72,7 @@ const CodeDirectory = struct { .hashSize = hash_size, .hashType = macho.CS_HASHTYPE_SHA256, .platform = 0, - .pageSize = @truncate(u8, std.math.log2(page_size)), + .pageSize = @as(u8, @truncate(std.math.log2(page_size))), .spare2 = 0, .scatterOffset = 0, .teamOffset = 0, @@ -110,7 +110,7 @@ const CodeDirectory = struct { fn size(self: CodeDirectory) u32 { const code_slots = self.inner.nCodeSlots * hash_size; const special_slots = self.inner.nSpecialSlots * hash_size; - return @sizeOf(macho.CodeDirectory) + @intCast(u32, self.ident.len + 1 + special_slots + code_slots); + return @sizeOf(macho.CodeDirectory) + @as(u32, @intCast(self.ident.len + 1 + special_slots + code_slots)); } fn write(self: CodeDirectory, writer: anytype) !void { @@ -139,9 +139,9 @@ const CodeDirectory = struct { try writer.writeAll(self.ident); try writer.writeByte(0); - var i: isize = @intCast(isize, self.inner.nSpecialSlots); + var i: isize = @as(isize, @intCast(self.inner.nSpecialSlots)); while (i > 0) : (i -= 1) { - try writer.writeAll(&self.special_slots[@intCast(usize, i - 1)]); + try writer.writeAll(&self.special_slots[@as(usize, @intCast(i - 1))]); } for (self.code_slots.items) |slot| { @@ -186,7 +186,7 @@ const Entitlements = struct { } fn size(self: Entitlements) u32 { - return @intCast(u32, self.inner.len) + 2 * @sizeOf(u32); + return @as(u32, @intCast(self.inner.len)) + 2 * @sizeOf(u32); } fn write(self: Entitlements, writer: anytype) !void { @@ -281,7 +281,7 @@ pub fn writeAdhocSignature( self.code_directory.inner.execSegFlags = if (opts.output_mode == .Exe) macho.CS_EXECSEG_MAIN_BINARY else 0; self.code_directory.inner.codeLimit = opts.file_size; - const total_pages = @intCast(u32, mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size); + const total_pages = @as(u32, @intCast(mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size)); try self.code_directory.code_slots.ensureTotalCapacityPrecise(gpa, total_pages); self.code_directory.code_slots.items.len = total_pages; @@ -331,7 +331,7 @@ pub fn writeAdhocSignature( } self.code_directory.inner.hashOffset = - @sizeOf(macho.CodeDirectory) + @intCast(u32, self.code_directory.ident.len + 1 + self.code_directory.inner.nSpecialSlots * hash_size); + @sizeOf(macho.CodeDirectory) + @as(u32, @intCast(self.code_directory.ident.len + 1 + self.code_directory.inner.nSpecialSlots * hash_size)); self.code_directory.inner.length = self.code_directory.size(); header.length += self.code_directory.size(); @@ -339,7 +339,7 @@ pub fn writeAdhocSignature( try writer.writeIntBig(u32, header.length); try writer.writeIntBig(u32, header.count); - var offset: u32 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) * @intCast(u32, blobs.items.len); + var offset: u32 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) * @as(u32, @intCast(blobs.items.len)); for (blobs.items) |blob| { try writer.writeIntBig(u32, blob.slotType()); try writer.writeIntBig(u32, offset); @@ -383,7 +383,7 @@ pub fn estimateSize(self: CodeSignature, file_size: u64) u32 { ssize += @sizeOf(macho.BlobIndex) + sig.size(); } ssize += n_special_slots * hash_size; - return @intCast(u32, mem.alignForward(u64, ssize, @sizeOf(u64))); + return @as(u32, @intCast(mem.alignForward(u64, ssize, @sizeOf(u64)))); } pub fn clear(self: *CodeSignature, allocator: Allocator) void { diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig index fdb8c9c816..ade26de920 100644 --- a/src/link/MachO/DebugSymbols.zig +++ b/src/link/MachO/DebugSymbols.zig @@ -64,9 +64,9 @@ pub const Reloc = struct { /// has been called to get a viable debug symbols output. pub fn populateMissingMetadata(self: *DebugSymbols) !void { if (self.dwarf_segment_cmd_index == null) { - self.dwarf_segment_cmd_index = @intCast(u8, self.segments.items.len); + self.dwarf_segment_cmd_index = @as(u8, @intCast(self.segments.items.len)); - const off = @intCast(u64, self.page_size); + const off = @as(u64, @intCast(self.page_size)); const ideal_size: u16 = 200 + 128 + 160 + 250; const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size); @@ -86,7 +86,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void { try self.dwarf.strtab.buffer.append(self.allocator, 0); self.debug_str_section_index = try self.allocateSection( "__debug_str", - @intCast(u32, self.dwarf.strtab.buffer.items.len), + @as(u32, @intCast(self.dwarf.strtab.buffer.items.len)), 0, ); self.debug_string_table_dirty = true; @@ -113,7 +113,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void { } if (self.linkedit_segment_cmd_index == null) { - self.linkedit_segment_cmd_index = @intCast(u8, self.segments.items.len); + self.linkedit_segment_cmd_index = @as(u8, @intCast(self.segments.items.len)); try self.segments.append(self.allocator, .{ .segname = makeStaticString("__LINKEDIT"), .maxprot = macho.PROT.READ, @@ -128,7 +128,7 @@ fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignme var sect = macho.section_64{ .sectname = makeStaticString(sectname), .segname = segment.segname, - .size = @intCast(u32, size), + .size = @as(u32, @intCast(size)), .@"align" = alignment, }; const alignment_pow_2 = try math.powi(u32, 2, alignment); @@ -141,9 +141,9 @@ fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignme off + size, }); - sect.offset = @intCast(u32, off); + sect.offset = @as(u32, @intCast(off)); - const index = @intCast(u8, self.sections.items.len); + const index = @as(u8, @intCast(self.sections.items.len)); try self.sections.append(self.allocator, sect); segment.cmdsize += @sizeOf(macho.section_64); segment.nsects += 1; @@ -176,7 +176,7 @@ pub fn growSection(self: *DebugSymbols, sect_index: u8, needed_size: u32, requir if (amt != existing_size) return error.InputOutput; } - sect.offset = @intCast(u32, new_offset); + sect.offset = @as(u32, @intCast(new_offset)); } sect.size = needed_size; @@ -286,7 +286,7 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void { { const sect_index = self.debug_str_section_index.?; if (self.debug_string_table_dirty or self.dwarf.strtab.buffer.items.len != self.getSection(sect_index).size) { - const needed_size = @intCast(u32, self.dwarf.strtab.buffer.items.len); + const needed_size = @as(u32, @intCast(self.dwarf.strtab.buffer.items.len)); try self.growSection(sect_index, needed_size, false); try self.file.pwriteAll(self.dwarf.strtab.buffer.items, self.getSection(sect_index).offset); self.debug_string_table_dirty = false; @@ -307,7 +307,7 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void { const ncmds = load_commands.calcNumOfLCs(lc_buffer.items); try self.file.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64)); - try self.writeHeader(macho_file, ncmds, @intCast(u32, lc_buffer.items.len)); + try self.writeHeader(macho_file, ncmds, @as(u32, @intCast(lc_buffer.items.len))); assert(!self.debug_abbrev_section_dirty); assert(!self.debug_aranges_section_dirty); @@ -378,7 +378,7 @@ fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, writer: anytype) // Write segment/section headers from the binary file first. const end = macho_file.linkedit_segment_cmd_index.?; for (macho_file.segments.items[0..end], 0..) |seg, i| { - const indexes = macho_file.getSectionIndexes(@intCast(u8, i)); + const indexes = macho_file.getSectionIndexes(@as(u8, @intCast(i))); var out_seg = seg; out_seg.fileoff = 0; out_seg.filesize = 0; @@ -407,7 +407,7 @@ fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, writer: anytype) } // Next, commit DSYM's __LINKEDIT and __DWARF segments headers. for (self.segments.items, 0..) |seg, i| { - const indexes = self.getSectionIndexes(@intCast(u8, i)); + const indexes = self.getSectionIndexes(@as(u8, @intCast(i))); try writer.writeStruct(seg); for (self.sections.items[indexes.start..indexes.end]) |header| { try writer.writeStruct(header); @@ -473,7 +473,7 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void { for (macho_file.locals.items, 0..) |sym, sym_id| { if (sym.n_strx == 0) continue; // no name, skip - const sym_loc = MachO.SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null }; + const sym_loc = MachO.SymbolWithLoc{ .sym_index = @as(u32, @intCast(sym_id)), .file = null }; if (macho_file.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip if (macho_file.getGlobal(macho_file.getSymbolName(sym_loc)) != null) continue; // global symbol is either an export or import, skip var out_sym = sym; @@ -501,10 +501,10 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void { const needed_size = nsyms * @sizeOf(macho.nlist_64); seg.filesize = offset + needed_size - seg.fileoff; - self.symtab_cmd.symoff = @intCast(u32, offset); - self.symtab_cmd.nsyms = @intCast(u32, nsyms); + self.symtab_cmd.symoff = @as(u32, @intCast(offset)); + self.symtab_cmd.nsyms = @as(u32, @intCast(nsyms)); - const locals_off = @intCast(u32, offset); + const locals_off = @as(u32, @intCast(offset)); const locals_size = nlocals * @sizeOf(macho.nlist_64); const exports_off = locals_off + locals_size; const exports_size = nexports * @sizeOf(macho.nlist_64); @@ -521,13 +521,13 @@ fn writeStrtab(self: *DebugSymbols) !void { defer tracy.end(); const seg = &self.segments.items[self.linkedit_segment_cmd_index.?]; - const symtab_size = @intCast(u32, self.symtab_cmd.nsyms * @sizeOf(macho.nlist_64)); + const symtab_size = @as(u32, @intCast(self.symtab_cmd.nsyms * @sizeOf(macho.nlist_64))); const offset = mem.alignForward(u64, self.symtab_cmd.symoff + symtab_size, @alignOf(u64)); const needed_size = mem.alignForward(u64, self.strtab.buffer.items.len, @alignOf(u64)); seg.filesize = offset + needed_size - seg.fileoff; - self.symtab_cmd.stroff = @intCast(u32, offset); - self.symtab_cmd.strsize = @intCast(u32, needed_size); + self.symtab_cmd.stroff = @as(u32, @intCast(offset)); + self.symtab_cmd.strsize = @as(u32, @intCast(needed_size)); log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size }); @@ -542,8 +542,8 @@ fn writeStrtab(self: *DebugSymbols) !void { pub fn getSectionIndexes(self: *DebugSymbols, segment_index: u8) struct { start: u8, end: u8 } { var start: u8 = 0; const nsects = for (self.segments.items, 0..) |seg, i| { - if (i == segment_index) break @intCast(u8, seg.nsects); - start += @intCast(u8, seg.nsects); + if (i == segment_index) break @as(u8, @intCast(seg.nsects)); + start += @as(u8, @intCast(seg.nsects)); } else 0; return .{ .start = start, .end = start + nsects }; } diff --git a/src/link/MachO/DwarfInfo.zig b/src/link/MachO/DwarfInfo.zig index 3218435734..07d98e8e94 100644 --- a/src/link/MachO/DwarfInfo.zig +++ b/src/link/MachO/DwarfInfo.zig @@ -70,7 +70,7 @@ pub fn genSubprogramLookupByName( low_pc = addr; } if (try attr.getConstant(self)) |constant| { - low_pc = @intCast(u64, constant); + low_pc = @as(u64, @intCast(constant)); } }, dwarf.AT.high_pc => { @@ -78,7 +78,7 @@ pub fn genSubprogramLookupByName( high_pc = addr; } if (try attr.getConstant(self)) |constant| { - high_pc = @intCast(u64, constant); + high_pc = @as(u64, @intCast(constant)); } }, else => {}, @@ -261,7 +261,7 @@ pub const Attribute = struct { switch (self.form) { dwarf.FORM.string => { - return mem.sliceTo(@ptrCast([*:0]const u8, debug_info.ptr), 0); + return mem.sliceTo(@as([*:0]const u8, @ptrCast(debug_info.ptr)), 0); }, dwarf.FORM.strp => { const off = if (cuh.is_64bit) @@ -499,5 +499,5 @@ fn findAbbrevEntrySize(self: DwarfInfo, da_off: usize, da_len: usize, di_off: us fn getString(self: DwarfInfo, off: u64) []const u8 { assert(off < self.debug_str.len); - return mem.sliceTo(@ptrCast([*:0]const u8, self.debug_str.ptr + @intCast(usize, off)), 0); + return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.debug_str.ptr + @as(usize, @intCast(off)))), 0); } diff --git a/src/link/MachO/Dylib.zig b/src/link/MachO/Dylib.zig index 971706dae6..ee8f34f756 100644 --- a/src/link/MachO/Dylib.zig +++ b/src/link/MachO/Dylib.zig @@ -75,7 +75,7 @@ pub const Id = struct { .int => |int| { var out: u32 = 0; const major = math.cast(u16, int) orelse return error.Overflow; - out += @intCast(u32, major) << 16; + out += @as(u32, @intCast(major)) << 16; return out; }, .float => |float| { @@ -106,9 +106,9 @@ pub const Id = struct { out += try fmt.parseInt(u8, values[2], 10); } if (count > 1) { - out += @intCast(u32, try fmt.parseInt(u8, values[1], 10)) << 8; + out += @as(u32, @intCast(try fmt.parseInt(u8, values[1], 10))) << 8; } - out += @intCast(u32, try fmt.parseInt(u16, values[0], 10)) << 16; + out += @as(u32, @intCast(try fmt.parseInt(u16, values[0], 10))) << 16; return out; } @@ -164,11 +164,11 @@ pub fn parseFromBinary( switch (cmd.cmd()) { .SYMTAB => { const symtab_cmd = cmd.cast(macho.symtab_command).?; - const symtab = @ptrCast( + const symtab = @as( [*]const macho.nlist_64, // Alignment is guaranteed as a dylib is a final linked image and has to have sections // properly aligned in order to be correctly loaded by the loader. - @alignCast(@alignOf(macho.nlist_64), &data[symtab_cmd.symoff]), + @ptrCast(@alignCast(&data[symtab_cmd.symoff])), )[0..symtab_cmd.nsyms]; const strtab = data[symtab_cmd.stroff..][0..symtab_cmd.strsize]; @@ -176,7 +176,7 @@ pub fn parseFromBinary( const add_to_symtab = sym.ext() and (sym.sect() or sym.indr()); if (!add_to_symtab) continue; - const sym_name = mem.sliceTo(@ptrCast([*:0]const u8, strtab.ptr + sym.n_strx), 0); + const sym_name = mem.sliceTo(@as([*:0]const u8, @ptrCast(strtab.ptr + sym.n_strx)), 0); try self.symbols.putNoClobber(allocator, try allocator.dupe(u8, sym_name), false); } }, diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig index 105a806075..29fe2988b6 100644 --- a/src/link/MachO/Object.zig +++ b/src/link/MachO/Object.zig @@ -164,7 +164,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch) else => {}, } else return; - self.in_symtab = @ptrCast([*]align(1) const macho.nlist_64, self.contents.ptr + symtab.symoff)[0..symtab.nsyms]; + self.in_symtab = @as([*]align(1) const macho.nlist_64, @ptrCast(self.contents.ptr + symtab.symoff))[0..symtab.nsyms]; self.in_strtab = self.contents[symtab.stroff..][0..symtab.strsize]; self.symtab = try allocator.alloc(macho.nlist_64, self.in_symtab.?.len + nsects); @@ -202,7 +202,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch) defer sorted_all_syms.deinit(); for (self.in_symtab.?, 0..) |_, index| { - sorted_all_syms.appendAssumeCapacity(.{ .index = @intCast(u32, index) }); + sorted_all_syms.appendAssumeCapacity(.{ .index = @as(u32, @intCast(index)) }); } // We sort by type: defined < undefined, and @@ -225,18 +225,18 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch) } } if (sym.sect() and section_index_lookup == null) { - section_index_lookup = .{ .start = @intCast(u32, i), .len = 1 }; + section_index_lookup = .{ .start = @as(u32, @intCast(i)), .len = 1 }; } prev_sect_id = sym.n_sect; self.symtab[i] = sym; self.source_symtab_lookup[i] = sym_id.index; - self.reverse_symtab_lookup[sym_id.index] = @intCast(u32, i); - self.source_address_lookup[i] = if (sym.undf()) -1 else @intCast(i64, sym.n_value); + self.reverse_symtab_lookup[sym_id.index] = @as(u32, @intCast(i)); + self.source_address_lookup[i] = if (sym.undf()) -1 else @as(i64, @intCast(sym.n_value)); - const sym_name_len = mem.sliceTo(@ptrCast([*:0]const u8, self.in_strtab.?.ptr + sym.n_strx), 0).len + 1; - self.strtab_lookup[i] = @intCast(u32, sym_name_len); + const sym_name_len = mem.sliceTo(@as([*:0]const u8, @ptrCast(self.in_strtab.?.ptr + sym.n_strx)), 0).len + 1; + self.strtab_lookup[i] = @as(u32, @intCast(sym_name_len)); } // If there were no undefined symbols, make sure we populate the @@ -267,7 +267,7 @@ const SymbolAtIndex = struct { fn getSymbolName(self: SymbolAtIndex, ctx: Context) []const u8 { const off = self.getSymbol(ctx).n_strx; - return mem.sliceTo(@ptrCast([*:0]const u8, ctx.in_strtab.?.ptr + off), 0); + return mem.sliceTo(@as([*:0]const u8, @ptrCast(ctx.in_strtab.?.ptr + off)), 0); } fn getSymbolSeniority(self: SymbolAtIndex, ctx: Context) u2 { @@ -338,7 +338,7 @@ fn filterSymbolsBySection(symbols: []macho.nlist_64, n_sect: u8) struct { .n_sect = n_sect, }); - return .{ .index = @intCast(u32, index), .len = @intCast(u32, len) }; + return .{ .index = @as(u32, @intCast(index)), .len = @as(u32, @intCast(len)) }; } fn filterSymbolsByAddress(symbols: []macho.nlist_64, start_addr: u64, end_addr: u64) struct { @@ -360,7 +360,7 @@ fn filterSymbolsByAddress(symbols: []macho.nlist_64, start_addr: u64, end_addr: .addr = end_addr, }); - return .{ .index = @intCast(u32, index), .len = @intCast(u32, len) }; + return .{ .index = @as(u32, @intCast(index)), .len = @as(u32, @intCast(len)) }; } const SortedSection = struct { @@ -400,7 +400,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void { }; if (sect.size == 0) continue; - const sect_id = @intCast(u8, id); + const sect_id = @as(u8, @intCast(id)); const sym = self.getSectionAliasSymbolPtr(sect_id); sym.* = .{ .n_strx = 0, @@ -417,7 +417,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void { const out_sect_id = (try zld.getOutputSection(sect)) orelse continue; if (sect.size == 0) continue; - const sect_id = @intCast(u8, id); + const sect_id = @as(u8, @intCast(id)); const sym_index = self.getSectionAliasSymbolIndex(sect_id); const atom_index = try self.createAtomFromSubsection( zld, @@ -459,7 +459,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void { defer gpa.free(sorted_sections); for (sections, 0..) |sect, id| { - sorted_sections[id] = .{ .header = sect, .id = @intCast(u8, id) }; + sorted_sections[id] = .{ .header = sect, .id = @as(u8, @intCast(id)) }; } mem.sort(SortedSection, sorted_sections, {}, sectionLessThanByAddress); @@ -651,7 +651,7 @@ fn filterRelocs( const start = @import("zld.zig").bsearch(macho.relocation_info, relocs, Predicate{ .addr = end_addr }); const len = @import("zld.zig").lsearch(macho.relocation_info, relocs[start..], LPredicate{ .addr = start_addr }); - return .{ .start = @intCast(u32, start), .len = @intCast(u32, len) }; + return .{ .start = @as(u32, @intCast(start)), .len = @as(u32, @intCast(len)) }; } /// Parse all relocs for the input section, and sort in descending order. @@ -659,7 +659,7 @@ fn filterRelocs( /// section in a sorted manner which is simply not true. fn parseRelocs(self: *Object, gpa: Allocator, sect_id: u8) !void { const section = self.getSourceSection(sect_id); - const start = @intCast(u32, self.relocations.items.len); + const start = @as(u32, @intCast(self.relocations.items.len)); if (self.getSourceRelocs(section)) |relocs| { try self.relocations.ensureUnusedCapacity(gpa, relocs.len); self.relocations.appendUnalignedSliceAssumeCapacity(relocs); @@ -677,8 +677,8 @@ fn cacheRelocs(self: *Object, zld: *Zld, atom_index: AtomIndex) !void { // If there was no matching symbol present in the source symtab, this means // we are dealing with either an entire section, or part of it, but also // starting at the beginning. - const nbase = @intCast(u32, self.in_symtab.?.len); - const sect_id = @intCast(u8, atom.sym_index - nbase); + const nbase = @as(u32, @intCast(self.in_symtab.?.len)); + const sect_id = @as(u8, @intCast(atom.sym_index - nbase)); break :blk sect_id; }; const source_sect = self.getSourceSection(source_sect_id); @@ -745,7 +745,7 @@ fn parseEhFrameSection(self: *Object, zld: *Zld, object_id: u32) !void { .object_id = object_id, .rel = rel, .code = it.data[offset..], - .base_offset = @intCast(i32, offset), + .base_offset = @as(i32, @intCast(offset)), }); break :blk target; }, @@ -798,7 +798,7 @@ fn parseUnwindInfo(self: *Object, zld: *Zld, object_id: u32) !void { _ = try zld.initSection("__TEXT", "__unwind_info", .{}); } - try self.unwind_records_lookup.ensureTotalCapacity(gpa, @intCast(u32, self.exec_atoms.items.len)); + try self.unwind_records_lookup.ensureTotalCapacity(gpa, @as(u32, @intCast(self.exec_atoms.items.len))); const unwind_records = self.getUnwindRecords(); @@ -834,14 +834,14 @@ fn parseUnwindInfo(self: *Object, zld: *Zld, object_id: u32) !void { .object_id = object_id, .rel = rel, .code = mem.asBytes(&record), - .base_offset = @intCast(i32, offset), + .base_offset = @as(i32, @intCast(offset)), }); log.debug("unwind record {d} tracks {s}", .{ record_id, zld.getSymbolName(target) }); if (target.getFile() != object_id) { self.unwind_relocs_lookup[record_id].dead = true; } else { const atom_index = self.getAtomIndexForSymbol(target.sym_index).?; - self.unwind_records_lookup.putAssumeCapacityNoClobber(atom_index, @intCast(u32, record_id)); + self.unwind_records_lookup.putAssumeCapacityNoClobber(atom_index, @as(u32, @intCast(record_id))); } } } @@ -869,7 +869,7 @@ pub fn getSourceSectionIndexByName(self: Object, segname: []const u8, sectname: const sections = self.getSourceSections(); for (sections, 0..) |sect, i| { if (mem.eql(u8, segname, sect.segName()) and mem.eql(u8, sectname, sect.sectName())) - return @intCast(u8, i); + return @as(u8, @intCast(i)); } else return null; } @@ -898,7 +898,7 @@ pub fn parseDataInCode(self: *Object, gpa: Allocator) !void { } } else return; const ndice = @divExact(cmd.datasize, @sizeOf(macho.data_in_code_entry)); - const dice = @ptrCast([*]align(1) const macho.data_in_code_entry, self.contents.ptr + cmd.dataoff)[0..ndice]; + const dice = @as([*]align(1) const macho.data_in_code_entry, @ptrCast(self.contents.ptr + cmd.dataoff))[0..ndice]; try self.data_in_code.ensureTotalCapacityPrecise(gpa, dice.len); self.data_in_code.appendUnalignedSliceAssumeCapacity(dice); mem.sort(macho.data_in_code_entry, self.data_in_code.items, {}, diceLessThan); @@ -945,12 +945,12 @@ pub fn parseDwarfInfo(self: Object) DwarfInfo { } pub fn getSectionContents(self: Object, sect: macho.section_64) []const u8 { - const size = @intCast(usize, sect.size); + const size = @as(usize, @intCast(sect.size)); return self.contents[sect.offset..][0..size]; } pub fn getSectionAliasSymbolIndex(self: Object, sect_id: u8) u32 { - const start = @intCast(u32, self.in_symtab.?.len); + const start = @as(u32, @intCast(self.in_symtab.?.len)); return start + sect_id; } @@ -964,7 +964,7 @@ pub fn getSectionAliasSymbolPtr(self: *Object, sect_id: u8) *macho.nlist_64 { fn getSourceRelocs(self: Object, sect: macho.section_64) ?[]align(1) const macho.relocation_info { if (sect.nreloc == 0) return null; - return @ptrCast([*]align(1) const macho.relocation_info, self.contents.ptr + sect.reloff)[0..sect.nreloc]; + return @as([*]align(1) const macho.relocation_info, @ptrCast(self.contents.ptr + sect.reloff))[0..sect.nreloc]; } pub fn getRelocs(self: Object, sect_id: u8) []const macho.relocation_info { @@ -1005,25 +1005,25 @@ pub fn getSymbolByAddress(self: Object, addr: u64, sect_hint: ?u8) u32 { const target_sym_index = @import("zld.zig").lsearch( i64, self.source_address_lookup[lookup.start..][0..lookup.len], - Predicate{ .addr = @intCast(i64, addr) }, + Predicate{ .addr = @as(i64, @intCast(addr)) }, ); if (target_sym_index > 0) { - return @intCast(u32, lookup.start + target_sym_index - 1); + return @as(u32, @intCast(lookup.start + target_sym_index - 1)); } } return self.getSectionAliasSymbolIndex(sect_id); } const target_sym_index = @import("zld.zig").lsearch(i64, self.source_address_lookup, Predicate{ - .addr = @intCast(i64, addr), + .addr = @as(i64, @intCast(addr)), }); assert(target_sym_index > 0); - return @intCast(u32, target_sym_index - 1); + return @as(u32, @intCast(target_sym_index - 1)); } pub fn getGlobal(self: Object, sym_index: u32) ?u32 { if (self.globals_lookup[sym_index] == -1) return null; - return @intCast(u32, self.globals_lookup[sym_index]); + return @as(u32, @intCast(self.globals_lookup[sym_index])); } pub fn getAtomIndexForSymbol(self: Object, sym_index: u32) ?AtomIndex { @@ -1041,7 +1041,7 @@ pub fn getUnwindRecords(self: Object) []align(1) const macho.compact_unwind_entr const sect = self.getSourceSection(sect_id); const data = self.getSectionContents(sect); const num_entries = @divExact(data.len, @sizeOf(macho.compact_unwind_entry)); - return @ptrCast([*]align(1) const macho.compact_unwind_entry, data)[0..num_entries]; + return @as([*]align(1) const macho.compact_unwind_entry, @ptrCast(data))[0..num_entries]; } pub fn hasEhFrameRecords(self: Object) bool { diff --git a/src/link/MachO/Relocation.zig b/src/link/MachO/Relocation.zig index 2685cc26e2..b7bbf59cfc 100644 --- a/src/link/MachO/Relocation.zig +++ b/src/link/MachO/Relocation.zig @@ -94,9 +94,9 @@ pub fn resolve(self: Relocation, macho_file: *MachO, atom_index: Atom.Index, cod .tlv_initializer => blk: { assert(self.addend == 0); // Addend here makes no sense. const header = macho_file.sections.items(.header)[macho_file.thread_data_section_index.?]; - break :blk @intCast(i64, target_base_addr - header.addr); + break :blk @as(i64, @intCast(target_base_addr - header.addr)); }, - else => @intCast(i64, target_base_addr) + self.addend, + else => @as(i64, @intCast(target_base_addr)) + self.addend, }; log.debug(" ({x}: [() => 0x{x} ({s})) ({s})", .{ @@ -119,7 +119,7 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: [] .branch => { const displacement = math.cast( i28, - @intCast(i64, target_addr) - @intCast(i64, source_addr), + @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr)), ) orelse unreachable; // TODO codegen should never allow for jump larger than i28 displacement var inst = aarch64.Instruction{ .unconditional_branch_immediate = mem.bytesToValue(meta.TagPayload( @@ -127,25 +127,25 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: [] aarch64.Instruction.unconditional_branch_immediate, ), buffer[0..4]), }; - inst.unconditional_branch_immediate.imm26 = @truncate(u26, @bitCast(u28, displacement >> 2)); + inst.unconditional_branch_immediate.imm26 = @as(u26, @truncate(@as(u28, @bitCast(displacement >> 2)))); mem.writeIntLittle(u32, buffer[0..4], inst.toU32()); }, .page, .got_page => { - const source_page = @intCast(i32, source_addr >> 12); - const target_page = @intCast(i32, target_addr >> 12); - const pages = @bitCast(u21, @intCast(i21, target_page - source_page)); + const source_page = @as(i32, @intCast(source_addr >> 12)); + const target_page = @as(i32, @intCast(target_addr >> 12)); + const pages = @as(u21, @bitCast(@as(i21, @intCast(target_page - source_page)))); var inst = aarch64.Instruction{ .pc_relative_address = mem.bytesToValue(meta.TagPayload( aarch64.Instruction, aarch64.Instruction.pc_relative_address, ), buffer[0..4]), }; - inst.pc_relative_address.immhi = @truncate(u19, pages >> 2); - inst.pc_relative_address.immlo = @truncate(u2, pages); + inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2)); + inst.pc_relative_address.immlo = @as(u2, @truncate(pages)); mem.writeIntLittle(u32, buffer[0..4], inst.toU32()); }, .pageoff, .got_pageoff => { - const narrowed = @truncate(u12, @intCast(u64, target_addr)); + const narrowed = @as(u12, @truncate(@as(u64, @intCast(target_addr)))); if (isArithmeticOp(buffer[0..4])) { var inst = aarch64.Instruction{ .add_subtract_immediate = mem.bytesToValue(meta.TagPayload( @@ -180,8 +180,8 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: [] } }, .tlv_initializer, .unsigned => switch (self.length) { - 2 => mem.writeIntLittle(u32, buffer[0..4], @truncate(u32, @bitCast(u64, target_addr))), - 3 => mem.writeIntLittle(u64, buffer[0..8], @bitCast(u64, target_addr)), + 2 => mem.writeIntLittle(u32, buffer[0..4], @as(u32, @truncate(@as(u64, @bitCast(target_addr))))), + 3 => mem.writeIntLittle(u64, buffer[0..8], @as(u64, @bitCast(target_addr))), else => unreachable, }, .got, .signed, .tlv => unreachable, // Invalid target architecture. @@ -191,16 +191,16 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: [] fn resolveX8664(self: Relocation, source_addr: u64, target_addr: i64, code: []u8) void { switch (self.type) { .branch, .got, .tlv, .signed => { - const displacement = @intCast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr) - 4); - mem.writeIntLittle(u32, code[self.offset..][0..4], @bitCast(u32, displacement)); + const displacement = @as(i32, @intCast(@as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr)) - 4)); + mem.writeIntLittle(u32, code[self.offset..][0..4], @as(u32, @bitCast(displacement))); }, .tlv_initializer, .unsigned => { switch (self.length) { 2 => { - mem.writeIntLittle(u32, code[self.offset..][0..4], @truncate(u32, @bitCast(u64, target_addr))); + mem.writeIntLittle(u32, code[self.offset..][0..4], @as(u32, @truncate(@as(u64, @bitCast(target_addr))))); }, 3 => { - mem.writeIntLittle(u64, code[self.offset..][0..8], @bitCast(u64, target_addr)); + mem.writeIntLittle(u64, code[self.offset..][0..8], @as(u64, @bitCast(target_addr))); }, else => unreachable, } @@ -210,24 +210,24 @@ fn resolveX8664(self: Relocation, source_addr: u64, target_addr: i64, code: []u8 } pub inline fn isArithmeticOp(inst: *const [4]u8) bool { - const group_decode = @truncate(u5, inst[3]); + const group_decode = @as(u5, @truncate(inst[3])); return ((group_decode >> 2) == 4); } pub fn calcPcRelativeDisplacementX86(source_addr: u64, target_addr: u64, correction: u3) error{Overflow}!i32 { - const disp = @intCast(i64, target_addr) - @intCast(i64, source_addr + 4 + correction); + const disp = @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr + 4 + correction)); return math.cast(i32, disp) orelse error.Overflow; } pub fn calcPcRelativeDisplacementArm64(source_addr: u64, target_addr: u64) error{Overflow}!i28 { - const disp = @intCast(i64, target_addr) - @intCast(i64, source_addr); + const disp = @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr)); return math.cast(i28, disp) orelse error.Overflow; } pub fn calcNumberOfPages(source_addr: u64, target_addr: u64) i21 { - const source_page = @intCast(i32, source_addr >> 12); - const target_page = @intCast(i32, target_addr >> 12); - const pages = @intCast(i21, target_page - source_page); + const source_page = @as(i32, @intCast(source_addr >> 12)); + const target_page = @as(i32, @intCast(target_addr >> 12)); + const pages = @as(i21, @intCast(target_page - source_page)); return pages; } @@ -241,7 +241,7 @@ pub const PageOffsetInstKind = enum { }; pub fn calcPageOffset(target_addr: u64, kind: PageOffsetInstKind) !u12 { - const narrowed = @truncate(u12, target_addr); + const narrowed = @as(u12, @truncate(target_addr)); return switch (kind) { .arithmetic, .load_store_8 => narrowed, .load_store_16 => try math.divExact(u12, narrowed, 2), diff --git a/src/link/MachO/Trie.zig b/src/link/MachO/Trie.zig index 34200db7dc..cabe611b64 100644 --- a/src/link/MachO/Trie.zig +++ b/src/link/MachO/Trie.zig @@ -220,7 +220,7 @@ pub const Node = struct { try writer.writeByte(0); } // Write number of edges (max legal number of edges is 256). - try writer.writeByte(@intCast(u8, self.edges.items.len)); + try writer.writeByte(@as(u8, @intCast(self.edges.items.len))); for (self.edges.items) |edge| { // Write edge label and offset to next node in trie. diff --git a/src/link/MachO/UnwindInfo.zig b/src/link/MachO/UnwindInfo.zig index 3c9a438f92..cfef053d1b 100644 --- a/src/link/MachO/UnwindInfo.zig +++ b/src/link/MachO/UnwindInfo.zig @@ -87,7 +87,7 @@ const Page = struct { const record_id = page.page_encodings[index]; const record = info.records.items[record_id]; if (record.compactUnwindEncoding == enc) { - return @intCast(u8, index); + return @as(u8, @intCast(index)); } } return null; @@ -150,14 +150,14 @@ const Page = struct { for (info.records.items[page.start..][0..page.count]) |record| { try writer.writeStruct(macho.unwind_info_regular_second_level_entry{ - .functionOffset = @intCast(u32, record.rangeStart), + .functionOffset = @as(u32, @intCast(record.rangeStart)), .encoding = record.compactUnwindEncoding, }); } }, .compressed => { const entry_offset = @sizeOf(macho.unwind_info_compressed_second_level_page_header) + - @intCast(u16, page.page_encodings_count) * @sizeOf(u32); + @as(u16, @intCast(page.page_encodings_count)) * @sizeOf(u32); try writer.writeStruct(macho.unwind_info_compressed_second_level_page_header{ .entryPageOffset = entry_offset, .entryCount = page.count, @@ -183,8 +183,8 @@ const Page = struct { break :blk ncommon + page.getPageEncoding(info, record.compactUnwindEncoding).?; }; const compressed = macho.UnwindInfoCompressedEntry{ - .funcOffset = @intCast(u24, record.rangeStart - first_entry.rangeStart), - .encodingIndex = @intCast(u8, enc_index), + .funcOffset = @as(u24, @intCast(record.rangeStart - first_entry.rangeStart)), + .encodingIndex = @as(u8, @intCast(enc_index)), }; try writer.writeStruct(compressed); } @@ -214,15 +214,15 @@ pub fn scanRelocs(zld: *Zld) !void { if (!UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch)) { if (getPersonalityFunctionReloc( zld, - @intCast(u32, object_id), + @as(u32, @intCast(object_id)), record_id, )) |rel| { // Personality function; add GOT pointer. const target = Atom.parseRelocTarget(zld, .{ - .object_id = @intCast(u32, object_id), + .object_id = @as(u32, @intCast(object_id)), .rel = rel, .code = mem.asBytes(&record), - .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)), + .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))), }); try Atom.addGotEntry(zld, target); } @@ -258,18 +258,18 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { var record = unwind_records[record_id]; if (UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch)) { - try info.collectPersonalityFromDwarf(zld, @intCast(u32, object_id), atom_index, &record); + try info.collectPersonalityFromDwarf(zld, @as(u32, @intCast(object_id)), atom_index, &record); } else { if (getPersonalityFunctionReloc( zld, - @intCast(u32, object_id), + @as(u32, @intCast(object_id)), record_id, )) |rel| { const target = Atom.parseRelocTarget(zld, .{ - .object_id = @intCast(u32, object_id), + .object_id = @as(u32, @intCast(object_id)), .rel = rel, .code = mem.asBytes(&record), - .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)), + .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))), }); const personality_index = info.getPersonalityFunction(target) orelse inner: { const personality_index = info.personalities_count; @@ -282,14 +282,14 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { UnwindEncoding.setPersonalityIndex(&record.compactUnwindEncoding, personality_index + 1); } - if (getLsdaReloc(zld, @intCast(u32, object_id), record_id)) |rel| { + if (getLsdaReloc(zld, @as(u32, @intCast(object_id)), record_id)) |rel| { const target = Atom.parseRelocTarget(zld, .{ - .object_id = @intCast(u32, object_id), + .object_id = @as(u32, @intCast(object_id)), .rel = rel, .code = mem.asBytes(&record), - .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)), + .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))), }); - record.lsda = @bitCast(u64, target); + record.lsda = @as(u64, @bitCast(target)); } } break :blk record; @@ -302,7 +302,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { if (object.eh_frame_records_lookup.get(atom_index)) |fde_offset| { if (object.eh_frame_relocs_lookup.get(fde_offset).?.dead) continue; var record = nullRecord(); - try info.collectPersonalityFromDwarf(zld, @intCast(u32, object_id), atom_index, &record); + try info.collectPersonalityFromDwarf(zld, @as(u32, @intCast(object_id)), atom_index, &record); switch (cpu_arch) { .aarch64 => UnwindEncoding.setMode(&record.compactUnwindEncoding, macho.UNWIND_ARM64_MODE.DWARF), .x86_64 => UnwindEncoding.setMode(&record.compactUnwindEncoding, macho.UNWIND_X86_64_MODE.DWARF), @@ -320,7 +320,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { const sym = zld.getSymbol(sym_loc); assert(sym.n_desc != N_DEAD); record.rangeStart = sym.n_value; - record.rangeLength = @intCast(u32, atom.size); + record.rangeLength = @as(u32, @intCast(atom.size)); records.appendAssumeCapacity(record); atom_indexes.appendAssumeCapacity(atom_index); @@ -329,7 +329,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { // Fold records try info.records.ensureTotalCapacity(info.gpa, records.items.len); - try info.records_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, atom_indexes.items.len)); + try info.records_lookup.ensureTotalCapacity(info.gpa, @as(u32, @intCast(atom_indexes.items.len))); var maybe_prev: ?macho.compact_unwind_entry = null; for (records.items, 0..) |record, i| { @@ -341,15 +341,15 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { (prev.personalityFunction != record.personalityFunction) or record.lsda > 0) { - const record_id = @intCast(RecordIndex, info.records.items.len); + const record_id = @as(RecordIndex, @intCast(info.records.items.len)); info.records.appendAssumeCapacity(record); maybe_prev = record; break :blk record_id; } else { - break :blk @intCast(RecordIndex, info.records.items.len - 1); + break :blk @as(RecordIndex, @intCast(info.records.items.len - 1)); } } else { - const record_id = @intCast(RecordIndex, info.records.items.len); + const record_id = @as(RecordIndex, @intCast(info.records.items.len)); info.records.appendAssumeCapacity(record); maybe_prev = record; break :blk record_id; @@ -459,14 +459,14 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { } } - page.count = @intCast(u16, i - page.start); + page.count = @as(u16, @intCast(i - page.start)); if (i < info.records.items.len and page.count < max_regular_second_level_entries) { page.kind = .regular; - page.count = @intCast(u16, @min( + page.count = @as(u16, @intCast(@min( max_regular_second_level_entries, info.records.items.len - page.start, - )); + ))); i = page.start + page.count; } else { page.kind = .compressed; @@ -479,11 +479,11 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { } // Save indices of records requiring LSDA relocation - try info.lsdas_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, info.records.items.len)); + try info.lsdas_lookup.ensureTotalCapacity(info.gpa, @as(u32, @intCast(info.records.items.len))); for (info.records.items, 0..) |rec, i| { - info.lsdas_lookup.putAssumeCapacityNoClobber(@intCast(RecordIndex, i), @intCast(u32, info.lsdas.items.len)); + info.lsdas_lookup.putAssumeCapacityNoClobber(@as(RecordIndex, @intCast(i)), @as(u32, @intCast(info.lsdas.items.len))); if (rec.lsda == 0) continue; - try info.lsdas.append(info.gpa, @intCast(RecordIndex, i)); + try info.lsdas.append(info.gpa, @as(RecordIndex, @intCast(i))); } } @@ -506,7 +506,7 @@ fn collectPersonalityFromDwarf( if (cie.getPersonalityPointerReloc( zld, - @intCast(u32, object_id), + @as(u32, @intCast(object_id)), cie_offset, )) |target| { const personality_index = info.getPersonalityFunction(target) orelse inner: { @@ -532,8 +532,8 @@ fn calcRequiredSize(info: UnwindInfo) usize { var total_size: usize = 0; total_size += @sizeOf(macho.unwind_info_section_header); total_size += - @intCast(usize, info.common_encodings_count) * @sizeOf(macho.compact_unwind_encoding_t); - total_size += @intCast(usize, info.personalities_count) * @sizeOf(u32); + @as(usize, @intCast(info.common_encodings_count)) * @sizeOf(macho.compact_unwind_encoding_t); + total_size += @as(usize, @intCast(info.personalities_count)) * @sizeOf(u32); total_size += (info.pages.items.len + 1) * @sizeOf(macho.unwind_info_section_header_index_entry); total_size += info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry); total_size += info.pages.items.len * second_level_page_bytes; @@ -557,7 +557,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void { const atom_index = zld.getGotAtomIndexForSymbol(target).?; const atom = zld.getAtom(atom_index); const sym = zld.getSymbol(atom.getSymbolWithLoc()); - personalities[i] = @intCast(u32, sym.n_value - seg.vmaddr); + personalities[i] = @as(u32, @intCast(sym.n_value - seg.vmaddr)); log.debug(" {d}: 0x{x} ({s})", .{ i, personalities[i], zld.getSymbolName(target) }); } @@ -570,7 +570,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void { } if (rec.compactUnwindEncoding > 0 and !UnwindEncoding.isDwarf(rec.compactUnwindEncoding, cpu_arch)) { - const lsda_target = @bitCast(SymbolWithLoc, rec.lsda); + const lsda_target = @as(SymbolWithLoc, @bitCast(rec.lsda)); if (lsda_target.getFile()) |_| { const sym = zld.getSymbol(lsda_target); rec.lsda = sym.n_value - seg.vmaddr; @@ -601,7 +601,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void { const personalities_offset: u32 = common_encodings_offset + common_encodings_count * @sizeOf(u32); const personalities_count: u32 = info.personalities_count; const indexes_offset: u32 = personalities_offset + personalities_count * @sizeOf(u32); - const indexes_count: u32 = @intCast(u32, info.pages.items.len + 1); + const indexes_count: u32 = @as(u32, @intCast(info.pages.items.len + 1)); try writer.writeStruct(macho.unwind_info_section_header{ .commonEncodingsArraySectionOffset = common_encodings_offset, @@ -615,34 +615,34 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void { try writer.writeAll(mem.sliceAsBytes(info.common_encodings[0..info.common_encodings_count])); try writer.writeAll(mem.sliceAsBytes(personalities[0..info.personalities_count])); - const pages_base_offset = @intCast(u32, size - (info.pages.items.len * second_level_page_bytes)); - const lsda_base_offset = @intCast(u32, pages_base_offset - - (info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry))); + const pages_base_offset = @as(u32, @intCast(size - (info.pages.items.len * second_level_page_bytes))); + const lsda_base_offset = @as(u32, @intCast(pages_base_offset - + (info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry)))); for (info.pages.items, 0..) |page, i| { assert(page.count > 0); const first_entry = info.records.items[page.start]; try writer.writeStruct(macho.unwind_info_section_header_index_entry{ - .functionOffset = @intCast(u32, first_entry.rangeStart), - .secondLevelPagesSectionOffset = @intCast(u32, pages_base_offset + i * second_level_page_bytes), + .functionOffset = @as(u32, @intCast(first_entry.rangeStart)), + .secondLevelPagesSectionOffset = @as(u32, @intCast(pages_base_offset + i * second_level_page_bytes)), .lsdaIndexArraySectionOffset = lsda_base_offset + info.lsdas_lookup.get(page.start).? * @sizeOf(macho.unwind_info_section_header_lsda_index_entry), }); } const last_entry = info.records.items[info.records.items.len - 1]; - const sentinel_address = @intCast(u32, last_entry.rangeStart + last_entry.rangeLength); + const sentinel_address = @as(u32, @intCast(last_entry.rangeStart + last_entry.rangeLength)); try writer.writeStruct(macho.unwind_info_section_header_index_entry{ .functionOffset = sentinel_address, .secondLevelPagesSectionOffset = 0, .lsdaIndexArraySectionOffset = lsda_base_offset + - @intCast(u32, info.lsdas.items.len) * @sizeOf(macho.unwind_info_section_header_lsda_index_entry), + @as(u32, @intCast(info.lsdas.items.len)) * @sizeOf(macho.unwind_info_section_header_lsda_index_entry), }); for (info.lsdas.items) |record_id| { const record = info.records.items[record_id]; try writer.writeStruct(macho.unwind_info_section_header_lsda_index_entry{ - .functionOffset = @intCast(u32, record.rangeStart), - .lsdaOffset = @intCast(u32, record.lsda), + .functionOffset = @as(u32, @intCast(record.rangeStart)), + .lsdaOffset = @as(u32, @intCast(record.lsda)), }); } @@ -674,7 +674,7 @@ fn getRelocs(zld: *Zld, object_id: u32, record_id: usize) []const macho.relocati } fn isPersonalityFunction(record_id: usize, rel: macho.relocation_info) bool { - const base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)); + const base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))); const rel_offset = rel.r_address - base_offset; return rel_offset == 16; } @@ -703,7 +703,7 @@ fn getPersonalityFunction(info: UnwindInfo, global_index: SymbolWithLoc) ?u2 { } fn isLsda(record_id: usize, rel: macho.relocation_info) bool { - const base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)); + const base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))); const rel_offset = rel.r_address - base_offset; return rel_offset == 24; } @@ -754,45 +754,45 @@ fn getCommonEncoding(info: UnwindInfo, enc: macho.compact_unwind_encoding_t) ?u7 pub const UnwindEncoding = struct { pub fn getMode(enc: macho.compact_unwind_encoding_t) u4 { comptime assert(macho.UNWIND_ARM64_MODE_MASK == macho.UNWIND_X86_64_MODE_MASK); - return @truncate(u4, (enc & macho.UNWIND_ARM64_MODE_MASK) >> 24); + return @as(u4, @truncate((enc & macho.UNWIND_ARM64_MODE_MASK) >> 24)); } pub fn isDwarf(enc: macho.compact_unwind_encoding_t, cpu_arch: std.Target.Cpu.Arch) bool { const mode = getMode(enc); return switch (cpu_arch) { - .aarch64 => @enumFromInt(macho.UNWIND_ARM64_MODE, mode) == .DWARF, - .x86_64 => @enumFromInt(macho.UNWIND_X86_64_MODE, mode) == .DWARF, + .aarch64 => @as(macho.UNWIND_ARM64_MODE, @enumFromInt(mode)) == .DWARF, + .x86_64 => @as(macho.UNWIND_X86_64_MODE, @enumFromInt(mode)) == .DWARF, else => unreachable, }; } pub fn setMode(enc: *macho.compact_unwind_encoding_t, mode: anytype) void { - enc.* |= @intCast(u32, @intFromEnum(mode)) << 24; + enc.* |= @as(u32, @intCast(@intFromEnum(mode))) << 24; } pub fn hasLsda(enc: macho.compact_unwind_encoding_t) bool { - const has_lsda = @truncate(u1, (enc & macho.UNWIND_HAS_LSDA) >> 31); + const has_lsda = @as(u1, @truncate((enc & macho.UNWIND_HAS_LSDA) >> 31)); return has_lsda == 1; } pub fn setHasLsda(enc: *macho.compact_unwind_encoding_t, has_lsda: bool) void { - const mask = @intCast(u32, @intFromBool(has_lsda)) << 31; + const mask = @as(u32, @intCast(@intFromBool(has_lsda))) << 31; enc.* |= mask; } pub fn getPersonalityIndex(enc: macho.compact_unwind_encoding_t) u2 { - const index = @truncate(u2, (enc & macho.UNWIND_PERSONALITY_MASK) >> 28); + const index = @as(u2, @truncate((enc & macho.UNWIND_PERSONALITY_MASK) >> 28)); return index; } pub fn setPersonalityIndex(enc: *macho.compact_unwind_encoding_t, index: u2) void { - const mask = @intCast(u32, index) << 28; + const mask = @as(u32, @intCast(index)) << 28; enc.* |= mask; } pub fn getDwarfSectionOffset(enc: macho.compact_unwind_encoding_t, cpu_arch: std.Target.Cpu.Arch) u24 { assert(isDwarf(enc, cpu_arch)); - const offset = @truncate(u24, enc); + const offset = @as(u24, @truncate(enc)); return offset; } diff --git a/src/link/MachO/ZldAtom.zig b/src/link/MachO/ZldAtom.zig index 55a6325a5a..613f0fc86c 100644 --- a/src/link/MachO/ZldAtom.zig +++ b/src/link/MachO/ZldAtom.zig @@ -117,8 +117,8 @@ pub fn getSectionAlias(zld: *Zld, atom_index: AtomIndex) ?SymbolWithLoc { assert(atom.getFile() != null); const object = zld.objects.items[atom.getFile().?]; - const nbase = @intCast(u32, object.in_symtab.?.len); - const ntotal = @intCast(u32, object.symtab.len); + const nbase = @as(u32, @intCast(object.in_symtab.?.len)); + const ntotal = @as(u32, @intCast(object.symtab.len)); var sym_index: u32 = nbase; while (sym_index < ntotal) : (sym_index += 1) { if (object.getAtomIndexForSymbol(sym_index)) |other_atom_index| { @@ -144,8 +144,8 @@ pub fn calcInnerSymbolOffset(zld: *Zld, atom_index: AtomIndex, sym_index: u32) u const base_addr = if (object.getSourceSymbol(atom.sym_index)) |sym| sym.n_value else blk: { - const nbase = @intCast(u32, object.in_symtab.?.len); - const sect_id = @intCast(u8, atom.sym_index - nbase); + const nbase = @as(u32, @intCast(object.in_symtab.?.len)); + const sect_id = @as(u8, @intCast(atom.sym_index - nbase)); const source_sect = object.getSourceSection(sect_id); break :blk source_sect.addr; }; @@ -177,15 +177,15 @@ pub fn getRelocContext(zld: *Zld, atom_index: AtomIndex) RelocContext { if (object.getSourceSymbol(atom.sym_index)) |source_sym| { const source_sect = object.getSourceSection(source_sym.n_sect - 1); return .{ - .base_addr = @intCast(i64, source_sect.addr), - .base_offset = @intCast(i32, source_sym.n_value - source_sect.addr), + .base_addr = @as(i64, @intCast(source_sect.addr)), + .base_offset = @as(i32, @intCast(source_sym.n_value - source_sect.addr)), }; } - const nbase = @intCast(u32, object.in_symtab.?.len); - const sect_id = @intCast(u8, atom.sym_index - nbase); + const nbase = @as(u32, @intCast(object.in_symtab.?.len)); + const sect_id = @as(u8, @intCast(atom.sym_index - nbase)); const source_sect = object.getSourceSection(sect_id); return .{ - .base_addr = @intCast(i64, source_sect.addr), + .base_addr = @as(i64, @intCast(source_sect.addr)), .base_offset = 0, }; } @@ -204,8 +204,8 @@ pub fn parseRelocTarget(zld: *Zld, ctx: struct { log.debug("parsing reloc target in object({d}) '{s}' ", .{ ctx.object_id, object.name }); const sym_index = if (ctx.rel.r_extern == 0) sym_index: { - const sect_id = @intCast(u8, ctx.rel.r_symbolnum - 1); - const rel_offset = @intCast(u32, ctx.rel.r_address - ctx.base_offset); + const sect_id = @as(u8, @intCast(ctx.rel.r_symbolnum - 1)); + const rel_offset = @as(u32, @intCast(ctx.rel.r_address - ctx.base_offset)); const address_in_section = if (ctx.rel.r_pcrel == 0) blk: { break :blk if (ctx.rel.r_length == 3) @@ -214,7 +214,7 @@ pub fn parseRelocTarget(zld: *Zld, ctx: struct { mem.readIntLittle(u32, ctx.code[rel_offset..][0..4]); } else blk: { assert(zld.options.target.cpu.arch == .x86_64); - const correction: u3 = switch (@enumFromInt(macho.reloc_type_x86_64, ctx.rel.r_type)) { + const correction: u3 = switch (@as(macho.reloc_type_x86_64, @enumFromInt(ctx.rel.r_type))) { .X86_64_RELOC_SIGNED => 0, .X86_64_RELOC_SIGNED_1 => 1, .X86_64_RELOC_SIGNED_2 => 2, @@ -222,8 +222,8 @@ pub fn parseRelocTarget(zld: *Zld, ctx: struct { else => unreachable, }; const addend = mem.readIntLittle(i32, ctx.code[rel_offset..][0..4]); - const target_address = @intCast(i64, ctx.base_addr) + ctx.rel.r_address + 4 + correction + addend; - break :blk @intCast(u64, target_address); + const target_address = @as(i64, @intCast(ctx.base_addr)) + ctx.rel.r_address + 4 + correction + addend; + break :blk @as(u64, @intCast(target_address)); }; // Find containing atom @@ -272,7 +272,7 @@ pub fn getRelocTargetAtomIndex(zld: *Zld, target: SymbolWithLoc, is_via_got: boo fn scanAtomRelocsArm64(zld: *Zld, atom_index: AtomIndex, relocs: []align(1) const macho.relocation_info) !void { for (relocs) |rel| { - const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type); + const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type)); switch (rel_type) { .ARM64_RELOC_ADDEND, .ARM64_RELOC_SUBTRACTOR => continue, @@ -318,7 +318,7 @@ fn scanAtomRelocsArm64(zld: *Zld, atom_index: AtomIndex, relocs: []align(1) cons fn scanAtomRelocsX86(zld: *Zld, atom_index: AtomIndex, relocs: []align(1) const macho.relocation_info) !void { for (relocs) |rel| { - const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type); + const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type)); switch (rel_type) { .X86_64_RELOC_SUBTRACTOR => continue, @@ -364,7 +364,7 @@ fn addTlvPtrEntry(zld: *Zld, target: SymbolWithLoc) !void { const gpa = zld.gpa; const atom_index = try zld.createTlvPtrAtom(); - const tlv_ptr_index = @intCast(u32, zld.tlv_ptr_entries.items.len); + const tlv_ptr_index = @as(u32, @intCast(zld.tlv_ptr_entries.items.len)); try zld.tlv_ptr_entries.append(gpa, .{ .target = target, .atom_index = atom_index, @@ -376,7 +376,7 @@ pub fn addGotEntry(zld: *Zld, target: SymbolWithLoc) !void { if (zld.got_table.contains(target)) return; const gpa = zld.gpa; const atom_index = try zld.createGotAtom(); - const got_index = @intCast(u32, zld.got_entries.items.len); + const got_index = @as(u32, @intCast(zld.got_entries.items.len)); try zld.got_entries.append(gpa, .{ .target = target, .atom_index = atom_index, @@ -393,7 +393,7 @@ pub fn addStub(zld: *Zld, target: SymbolWithLoc) !void { _ = try zld.createStubHelperAtom(); _ = try zld.createLazyPointerAtom(); const atom_index = try zld.createStubAtom(); - const stubs_index = @intCast(u32, zld.stubs.items.len); + const stubs_index = @as(u32, @intCast(zld.stubs.items.len)); try zld.stubs.append(gpa, .{ .target = target, .atom_index = atom_index, @@ -489,7 +489,7 @@ fn resolveRelocsArm64( var subtractor: ?SymbolWithLoc = null; for (atom_relocs) |rel| { - const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type); + const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type)); switch (rel_type) { .ARM64_RELOC_ADDEND => { @@ -529,7 +529,7 @@ fn resolveRelocsArm64( .base_addr = context.base_addr, .base_offset = context.base_offset, }); - const rel_offset = @intCast(u32, rel.r_address - context.base_offset); + const rel_offset = @as(u32, @intCast(rel.r_address - context.base_offset)); log.debug(" RELA({s}) @ {x} => %{d} ('{s}') in object({?})", .{ @tagName(rel_type), @@ -590,7 +590,7 @@ fn resolveRelocsArm64( aarch64.Instruction.unconditional_branch_immediate, ), code), }; - inst.unconditional_branch_immediate.imm26 = @truncate(u26, @bitCast(u28, displacement >> 2)); + inst.unconditional_branch_immediate.imm26 = @as(u26, @truncate(@as(u28, @bitCast(displacement >> 2)))); mem.writeIntLittle(u32, code, inst.toU32()); }, @@ -598,11 +598,11 @@ fn resolveRelocsArm64( .ARM64_RELOC_GOT_LOAD_PAGE21, .ARM64_RELOC_TLVP_LOAD_PAGE21, => { - const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0)); + const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0))); log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr}); - const pages = @bitCast(u21, Relocation.calcNumberOfPages(source_addr, adjusted_target_addr)); + const pages = @as(u21, @bitCast(Relocation.calcNumberOfPages(source_addr, adjusted_target_addr))); const code = atom_code[rel_offset..][0..4]; var inst = aarch64.Instruction{ .pc_relative_address = mem.bytesToValue(meta.TagPayload( @@ -610,14 +610,14 @@ fn resolveRelocsArm64( aarch64.Instruction.pc_relative_address, ), code), }; - inst.pc_relative_address.immhi = @truncate(u19, pages >> 2); - inst.pc_relative_address.immlo = @truncate(u2, pages); + inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2)); + inst.pc_relative_address.immlo = @as(u2, @truncate(pages)); mem.writeIntLittle(u32, code, inst.toU32()); addend = null; }, .ARM64_RELOC_PAGEOFF12 => { - const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0)); + const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0))); log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr}); @@ -656,7 +656,7 @@ fn resolveRelocsArm64( .ARM64_RELOC_GOT_LOAD_PAGEOFF12 => { const code = atom_code[rel_offset..][0..4]; - const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0)); + const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0))); log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr}); @@ -674,7 +674,7 @@ fn resolveRelocsArm64( .ARM64_RELOC_TLVP_LOAD_PAGEOFF12 => { const code = atom_code[rel_offset..][0..4]; - const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0)); + const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0))); log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr}); @@ -725,7 +725,7 @@ fn resolveRelocsArm64( .sh = 0, .s = 0, .op = 0, - .sf = @truncate(u1, reg_info.size), + .sf = @as(u1, @truncate(reg_info.size)), }, }; mem.writeIntLittle(u32, code, inst.toU32()); @@ -734,9 +734,9 @@ fn resolveRelocsArm64( .ARM64_RELOC_POINTER_TO_GOT => { log.debug(" | target_addr = 0x{x}", .{target_addr}); - const result = math.cast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr)) orelse + const result = math.cast(i32, @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr))) orelse return error.Overflow; - mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @bitCast(u32, result)); + mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @as(u32, @bitCast(result))); }, .ARM64_RELOC_UNSIGNED => { @@ -747,7 +747,7 @@ fn resolveRelocsArm64( if (rel.r_extern == 0) { const base_addr = if (target.sym_index >= object.source_address_lookup.len) - @intCast(i64, object.getSourceSection(@intCast(u8, rel.r_symbolnum - 1)).addr) + @as(i64, @intCast(object.getSourceSection(@as(u8, @intCast(rel.r_symbolnum - 1))).addr)) else object.source_address_lookup[target.sym_index]; ptr_addend -= base_addr; @@ -756,17 +756,17 @@ fn resolveRelocsArm64( const result = blk: { if (subtractor) |sub| { const sym = zld.getSymbol(sub); - break :blk @intCast(i64, target_addr) - @intCast(i64, sym.n_value) + ptr_addend; + break :blk @as(i64, @intCast(target_addr)) - @as(i64, @intCast(sym.n_value)) + ptr_addend; } else { - break :blk @intCast(i64, target_addr) + ptr_addend; + break :blk @as(i64, @intCast(target_addr)) + ptr_addend; } }; log.debug(" | target_addr = 0x{x}", .{result}); if (rel.r_length == 3) { - mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @bitCast(u64, result)); + mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @as(u64, @bitCast(result))); } else { - mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @truncate(u32, @bitCast(u64, result))); + mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @as(u32, @truncate(@as(u64, @bitCast(result))))); } subtractor = null; @@ -791,7 +791,7 @@ fn resolveRelocsX86( var subtractor: ?SymbolWithLoc = null; for (atom_relocs) |rel| { - const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type); + const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type)); switch (rel_type) { .X86_64_RELOC_SUBTRACTOR => { @@ -823,7 +823,7 @@ fn resolveRelocsX86( .base_addr = context.base_addr, .base_offset = context.base_offset, }); - const rel_offset = @intCast(u32, rel.r_address - context.base_offset); + const rel_offset = @as(u32, @intCast(rel.r_address - context.base_offset)); log.debug(" RELA({s}) @ {x} => %{d} ('{s}') in object({?})", .{ @tagName(rel_type), @@ -851,7 +851,7 @@ fn resolveRelocsX86( switch (rel_type) { .X86_64_RELOC_BRANCH => { const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]); - const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend); + const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend)); log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr}); const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0); mem.writeIntLittle(i32, atom_code[rel_offset..][0..4], disp); @@ -861,7 +861,7 @@ fn resolveRelocsX86( .X86_64_RELOC_GOT_LOAD, => { const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]); - const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend); + const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend)); log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr}); const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0); mem.writeIntLittle(i32, atom_code[rel_offset..][0..4], disp); @@ -869,7 +869,7 @@ fn resolveRelocsX86( .X86_64_RELOC_TLV => { const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]); - const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend); + const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend)); log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr}); const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0); @@ -897,14 +897,14 @@ fn resolveRelocsX86( if (rel.r_extern == 0) { const base_addr = if (target.sym_index >= object.source_address_lookup.len) - @intCast(i64, object.getSourceSection(@intCast(u8, rel.r_symbolnum - 1)).addr) + @as(i64, @intCast(object.getSourceSection(@as(u8, @intCast(rel.r_symbolnum - 1))).addr)) else object.source_address_lookup[target.sym_index]; - addend += @intCast(i32, @intCast(i64, context.base_addr) + rel.r_address + 4 - - @intCast(i64, base_addr)); + addend += @as(i32, @intCast(@as(i64, @intCast(context.base_addr)) + rel.r_address + 4 - + @as(i64, @intCast(base_addr)))); } - const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend); + const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend)); log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr}); @@ -920,7 +920,7 @@ fn resolveRelocsX86( if (rel.r_extern == 0) { const base_addr = if (target.sym_index >= object.source_address_lookup.len) - @intCast(i64, object.getSourceSection(@intCast(u8, rel.r_symbolnum - 1)).addr) + @as(i64, @intCast(object.getSourceSection(@as(u8, @intCast(rel.r_symbolnum - 1))).addr)) else object.source_address_lookup[target.sym_index]; addend -= base_addr; @@ -929,17 +929,17 @@ fn resolveRelocsX86( const result = blk: { if (subtractor) |sub| { const sym = zld.getSymbol(sub); - break :blk @intCast(i64, target_addr) - @intCast(i64, sym.n_value) + addend; + break :blk @as(i64, @intCast(target_addr)) - @as(i64, @intCast(sym.n_value)) + addend; } else { - break :blk @intCast(i64, target_addr) + addend; + break :blk @as(i64, @intCast(target_addr)) + addend; } }; log.debug(" | target_addr = 0x{x}", .{result}); if (rel.r_length == 3) { - mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @bitCast(u64, result)); + mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @as(u64, @bitCast(result))); } else { - mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @truncate(u32, @bitCast(u64, result))); + mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @as(u32, @truncate(@as(u64, @bitCast(result))))); } subtractor = null; @@ -958,19 +958,19 @@ pub fn getAtomCode(zld: *Zld, atom_index: AtomIndex) []const u8 { // If there was no matching symbol present in the source symtab, this means // we are dealing with either an entire section, or part of it, but also // starting at the beginning. - const nbase = @intCast(u32, object.in_symtab.?.len); - const sect_id = @intCast(u8, atom.sym_index - nbase); + const nbase = @as(u32, @intCast(object.in_symtab.?.len)); + const sect_id = @as(u8, @intCast(atom.sym_index - nbase)); const source_sect = object.getSourceSection(sect_id); assert(!source_sect.isZerofill()); const code = object.getSectionContents(source_sect); - const code_len = @intCast(usize, atom.size); + const code_len = @as(usize, @intCast(atom.size)); return code[0..code_len]; }; const source_sect = object.getSourceSection(source_sym.n_sect - 1); assert(!source_sect.isZerofill()); const code = object.getSectionContents(source_sect); - const offset = @intCast(usize, source_sym.n_value - source_sect.addr); - const code_len = @intCast(usize, atom.size); + const offset = @as(usize, @intCast(source_sym.n_value - source_sect.addr)); + const code_len = @as(usize, @intCast(atom.size)); return code[offset..][0..code_len]; } @@ -986,8 +986,8 @@ pub fn getAtomRelocs(zld: *Zld, atom_index: AtomIndex) []const macho.relocation_ // If there was no matching symbol present in the source symtab, this means // we are dealing with either an entire section, or part of it, but also // starting at the beginning. - const nbase = @intCast(u32, object.in_symtab.?.len); - const sect_id = @intCast(u8, atom.sym_index - nbase); + const nbase = @as(u32, @intCast(object.in_symtab.?.len)); + const sect_id = @as(u8, @intCast(atom.sym_index - nbase)); break :blk sect_id; }; const source_sect = object.getSourceSection(source_sect_id); @@ -998,14 +998,14 @@ pub fn getAtomRelocs(zld: *Zld, atom_index: AtomIndex) []const macho.relocation_ pub fn relocRequiresGot(zld: *Zld, rel: macho.relocation_info) bool { switch (zld.options.target.cpu.arch) { - .aarch64 => switch (@enumFromInt(macho.reloc_type_arm64, rel.r_type)) { + .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) { .ARM64_RELOC_GOT_LOAD_PAGE21, .ARM64_RELOC_GOT_LOAD_PAGEOFF12, .ARM64_RELOC_POINTER_TO_GOT, => return true, else => return false, }, - .x86_64 => switch (@enumFromInt(macho.reloc_type_x86_64, rel.r_type)) { + .x86_64 => switch (@as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type))) { .X86_64_RELOC_GOT, .X86_64_RELOC_GOT_LOAD, => return true, diff --git a/src/link/MachO/dead_strip.zig b/src/link/MachO/dead_strip.zig index b2c569447d..890b40ed85 100644 --- a/src/link/MachO/dead_strip.zig +++ b/src/link/MachO/dead_strip.zig @@ -27,10 +27,10 @@ pub fn gcAtoms(zld: *Zld, resolver: *const SymbolResolver) !void { defer arena.deinit(); var roots = AtomTable.init(arena.allocator()); - try roots.ensureUnusedCapacity(@intCast(u32, zld.globals.items.len)); + try roots.ensureUnusedCapacity(@as(u32, @intCast(zld.globals.items.len))); var alive = AtomTable.init(arena.allocator()); - try alive.ensureTotalCapacity(@intCast(u32, zld.atoms.items.len)); + try alive.ensureTotalCapacity(@as(u32, @intCast(zld.atoms.items.len))); try collectRoots(zld, &roots, resolver); try mark(zld, roots, &alive); @@ -99,8 +99,8 @@ fn collectRoots(zld: *Zld, roots: *AtomTable, resolver: *const SymbolResolver) ! const sect_id = if (object.getSourceSymbol(atom.sym_index)) |source_sym| source_sym.n_sect - 1 else sect_id: { - const nbase = @intCast(u32, object.in_symtab.?.len); - const sect_id = @intCast(u8, atom.sym_index - nbase); + const nbase = @as(u32, @intCast(object.in_symtab.?.len)); + const sect_id = @as(u8, @intCast(atom.sym_index - nbase)); break :sect_id sect_id; }; const source_sect = object.getSourceSection(sect_id); @@ -148,7 +148,7 @@ fn markLive(zld: *Zld, atom_index: AtomIndex, alive: *AtomTable) void { for (relocs) |rel| { const target = switch (cpu_arch) { - .aarch64 => switch (@enumFromInt(macho.reloc_type_arm64, rel.r_type)) { + .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) { .ARM64_RELOC_ADDEND => continue, else => Atom.parseRelocTarget(zld, .{ .object_id = atom.getFile().?, @@ -208,7 +208,7 @@ fn refersLive(zld: *Zld, atom_index: AtomIndex, alive: AtomTable) bool { for (relocs) |rel| { const target = switch (cpu_arch) { - .aarch64 => switch (@enumFromInt(macho.reloc_type_arm64, rel.r_type)) { + .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) { .ARM64_RELOC_ADDEND => continue, else => Atom.parseRelocTarget(zld, .{ .object_id = atom.getFile().?, @@ -264,8 +264,8 @@ fn mark(zld: *Zld, roots: AtomTable, alive: *AtomTable) !void { const sect_id = if (object.getSourceSymbol(atom.sym_index)) |source_sym| source_sym.n_sect - 1 else blk: { - const nbase = @intCast(u32, object.in_symtab.?.len); - const sect_id = @intCast(u8, atom.sym_index - nbase); + const nbase = @as(u32, @intCast(object.in_symtab.?.len)); + const sect_id = @as(u8, @intCast(atom.sym_index - nbase)); break :blk sect_id; }; const source_sect = object.getSourceSection(sect_id); @@ -283,7 +283,7 @@ fn mark(zld: *Zld, roots: AtomTable, alive: *AtomTable) !void { for (zld.objects.items, 0..) |_, object_id| { // Traverse unwind and eh_frame records noting if the source symbol has been marked, and if so, // marking all references as live. - try markUnwindRecords(zld, @intCast(u32, object_id), alive); + try markUnwindRecords(zld, @as(u32, @intCast(object_id)), alive); } } @@ -329,7 +329,7 @@ fn markUnwindRecords(zld: *Zld, object_id: u32, alive: *AtomTable) !void { .object_id = object_id, .rel = rel, .code = mem.asBytes(&record), - .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)), + .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))), }); const target_sym = zld.getSymbol(target); if (!target_sym.undf()) { @@ -344,7 +344,7 @@ fn markUnwindRecords(zld: *Zld, object_id: u32, alive: *AtomTable) !void { .object_id = object_id, .rel = rel, .code = mem.asBytes(&record), - .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)), + .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))), }); const target_object = zld.objects.items[target.getFile().?]; const target_atom_index = target_object.getAtomIndexForSymbol(target.sym_index).?; @@ -377,7 +377,7 @@ fn markEhFrameRecord(zld: *Zld, object_id: u32, atom_index: AtomIndex, alive: *A .object_id = object_id, .rel = rel, .code = fde.data, - .base_offset = @intCast(i32, fde_offset) + 4, + .base_offset = @as(i32, @intCast(fde_offset)) + 4, }); const target_sym = zld.getSymbol(target); if (!target_sym.undf()) blk: { diff --git a/src/link/MachO/dyld_info/Rebase.zig b/src/link/MachO/dyld_info/Rebase.zig index 5b386a8136..0f3e96b02f 100644 --- a/src/link/MachO/dyld_info/Rebase.zig +++ b/src/link/MachO/dyld_info/Rebase.zig @@ -31,7 +31,7 @@ pub fn deinit(rebase: *Rebase, gpa: Allocator) void { } pub fn size(rebase: Rebase) u64 { - return @intCast(u64, rebase.buffer.items.len); + return @as(u64, @intCast(rebase.buffer.items.len)); } pub fn finalize(rebase: *Rebase, gpa: Allocator) !void { @@ -145,12 +145,12 @@ fn finalizeSegment(entries: []const Entry, writer: anytype) !void { fn setTypePointer(writer: anytype) !void { log.debug(">>> set type: {d}", .{macho.REBASE_TYPE_POINTER}); - try writer.writeByte(macho.REBASE_OPCODE_SET_TYPE_IMM | @truncate(u4, macho.REBASE_TYPE_POINTER)); + try writer.writeByte(macho.REBASE_OPCODE_SET_TYPE_IMM | @as(u4, @truncate(macho.REBASE_TYPE_POINTER))); } fn setSegmentOffset(segment_id: u8, offset: u64, writer: anytype) !void { log.debug(">>> set segment: {d} and offset: {x}", .{ segment_id, offset }); - try writer.writeByte(macho.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, segment_id)); + try writer.writeByte(macho.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @as(u4, @truncate(segment_id))); try std.leb.writeULEB128(writer, offset); } @@ -163,7 +163,7 @@ fn rebaseAddAddr(addr: u64, writer: anytype) !void { fn rebaseTimes(count: usize, writer: anytype) !void { log.debug(">>> rebase with count: {d}", .{count}); if (count <= 0xf) { - try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_IMM_TIMES | @truncate(u4, count)); + try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_IMM_TIMES | @as(u4, @truncate(count))); } else { try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_ULEB_TIMES); try std.leb.writeULEB128(writer, count); @@ -182,7 +182,7 @@ fn addAddr(addr: u64, writer: anytype) !void { if (std.mem.isAlignedGeneric(u64, addr, @sizeOf(u64))) { const imm = @divExact(addr, @sizeOf(u64)); if (imm <= 0xf) { - try writer.writeByte(macho.REBASE_OPCODE_ADD_ADDR_IMM_SCALED | @truncate(u4, imm)); + try writer.writeByte(macho.REBASE_OPCODE_ADD_ADDR_IMM_SCALED | @as(u4, @truncate(imm))); return; } } diff --git a/src/link/MachO/dyld_info/bind.zig b/src/link/MachO/dyld_info/bind.zig index 14ce1587aa..f804c6466d 100644 --- a/src/link/MachO/dyld_info/bind.zig +++ b/src/link/MachO/dyld_info/bind.zig @@ -39,7 +39,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type { } pub fn size(self: Self) u64 { - return @intCast(u64, self.buffer.items.len); + return @as(u64, @intCast(self.buffer.items.len)); } pub fn finalize(self: *Self, gpa: Allocator, ctx: Ctx) !void { @@ -95,7 +95,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type { const sym = ctx.getSymbol(current.target); const name = ctx.getSymbolName(current.target); const flags: u8 = if (sym.weakRef()) macho.BIND_SYMBOL_FLAGS_WEAK_IMPORT else 0; - const ordinal = @divTrunc(@bitCast(i16, sym.n_desc), macho.N_SYMBOL_RESOLVER); + const ordinal = @divTrunc(@as(i16, @bitCast(sym.n_desc)), macho.N_SYMBOL_RESOLVER); try setSymbol(name, flags, writer); try setTypePointer(writer); @@ -112,7 +112,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type { switch (state) { .start => { if (current.offset < offset) { - try addAddr(@bitCast(u64, @intCast(i64, current.offset) - @intCast(i64, offset)), writer); + try addAddr(@as(u64, @bitCast(@as(i64, @intCast(current.offset)) - @as(i64, @intCast(offset)))), writer); offset = offset - (offset - current.offset); } else if (current.offset > offset) { const delta = current.offset - offset; @@ -130,7 +130,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type { } else if (current.offset > offset) { const delta = current.offset - offset; state = .bind_times_skip; - skip = @intCast(u64, delta); + skip = @as(u64, @intCast(delta)); offset += skip; } else unreachable; i -= 1; @@ -194,7 +194,7 @@ pub fn LazyBind(comptime Ctx: type, comptime Target: type) type { } pub fn size(self: Self) u64 { - return @intCast(u64, self.buffer.items.len); + return @as(u64, @intCast(self.buffer.items.len)); } pub fn finalize(self: *Self, gpa: Allocator, ctx: Ctx) !void { @@ -208,12 +208,12 @@ pub fn LazyBind(comptime Ctx: type, comptime Target: type) type { var addend: i64 = 0; for (self.entries.items) |entry| { - self.offsets.appendAssumeCapacity(@intCast(u32, cwriter.bytes_written)); + self.offsets.appendAssumeCapacity(@as(u32, @intCast(cwriter.bytes_written))); const sym = ctx.getSymbol(entry.target); const name = ctx.getSymbolName(entry.target); const flags: u8 = if (sym.weakRef()) macho.BIND_SYMBOL_FLAGS_WEAK_IMPORT else 0; - const ordinal = @divTrunc(@bitCast(i16, sym.n_desc), macho.N_SYMBOL_RESOLVER); + const ordinal = @divTrunc(@as(i16, @bitCast(sym.n_desc)), macho.N_SYMBOL_RESOLVER); try setSegmentOffset(entry.segment_id, entry.offset, writer); try setSymbol(name, flags, writer); @@ -238,20 +238,20 @@ pub fn LazyBind(comptime Ctx: type, comptime Target: type) type { fn setSegmentOffset(segment_id: u8, offset: u64, writer: anytype) !void { log.debug(">>> set segment: {d} and offset: {x}", .{ segment_id, offset }); - try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, segment_id)); + try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @as(u4, @truncate(segment_id))); try std.leb.writeULEB128(writer, offset); } fn setSymbol(name: []const u8, flags: u8, writer: anytype) !void { log.debug(">>> set symbol: {s} with flags: {x}", .{ name, flags }); - try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | @truncate(u4, flags)); + try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | @as(u4, @truncate(flags))); try writer.writeAll(name); try writer.writeByte(0); } fn setTypePointer(writer: anytype) !void { log.debug(">>> set type: {d}", .{macho.BIND_TYPE_POINTER}); - try writer.writeByte(macho.BIND_OPCODE_SET_TYPE_IMM | @truncate(u4, macho.BIND_TYPE_POINTER)); + try writer.writeByte(macho.BIND_OPCODE_SET_TYPE_IMM | @as(u4, @truncate(macho.BIND_TYPE_POINTER))); } fn setDylibOrdinal(ordinal: i16, writer: anytype) !void { @@ -264,13 +264,13 @@ fn setDylibOrdinal(ordinal: i16, writer: anytype) !void { else => unreachable, // Invalid dylib special binding } log.debug(">>> set dylib special: {d}", .{ordinal}); - const cast = @bitCast(u16, ordinal); - try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @truncate(u4, cast)); + const cast = @as(u16, @bitCast(ordinal)); + try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @as(u4, @truncate(cast))); } else { - const cast = @bitCast(u16, ordinal); + const cast = @as(u16, @bitCast(ordinal)); log.debug(">>> set dylib ordinal: {d}", .{ordinal}); if (cast <= 0xf) { - try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @truncate(u4, cast)); + try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @as(u4, @truncate(cast))); } else { try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB); try std.leb.writeULEB128(writer, cast); @@ -295,7 +295,7 @@ fn doBindAddAddr(addr: u64, writer: anytype) !void { const imm = @divExact(addr, @sizeOf(u64)); if (imm <= 0xf) { try writer.writeByte( - macho.BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED | @truncate(u4, imm), + macho.BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED | @as(u4, @truncate(imm)), ); return; } @@ -341,7 +341,7 @@ const TestContext = struct { fn addSymbol(ctx: *TestContext, gpa: Allocator, name: []const u8, ordinal: i16, flags: u16) !void { const n_strx = try ctx.addString(gpa, name); - var n_desc = @bitCast(u16, ordinal * macho.N_SYMBOL_RESOLVER); + var n_desc = @as(u16, @bitCast(ordinal * macho.N_SYMBOL_RESOLVER)); n_desc |= flags; try ctx.symbols.append(gpa, .{ .n_value = 0, @@ -353,7 +353,7 @@ const TestContext = struct { } fn addString(ctx: *TestContext, gpa: Allocator, name: []const u8) !u32 { - const n_strx = @intCast(u32, ctx.strtab.items.len); + const n_strx = @as(u32, @intCast(ctx.strtab.items.len)); try ctx.strtab.appendSlice(gpa, name); try ctx.strtab.append(gpa, 0); return n_strx; @@ -366,7 +366,7 @@ const TestContext = struct { fn getSymbolName(ctx: TestContext, target: Target) []const u8 { const sym = ctx.getSymbol(target); assert(sym.n_strx < ctx.strtab.items.len); - return std.mem.sliceTo(@ptrCast([*:0]const u8, ctx.strtab.items.ptr + sym.n_strx), 0); + return std.mem.sliceTo(@as([*:0]const u8, @ptrCast(ctx.strtab.items.ptr + sym.n_strx)), 0); } }; diff --git a/src/link/MachO/eh_frame.zig b/src/link/MachO/eh_frame.zig index 1672e37229..eb4419cd7b 100644 --- a/src/link/MachO/eh_frame.zig +++ b/src/link/MachO/eh_frame.zig @@ -36,7 +36,7 @@ pub fn scanRelocs(zld: *Zld) !void { try cies.putNoClobber(cie_offset, {}); it.seekTo(cie_offset); const cie = (try it.next()).?; - try cie.scanRelocs(zld, @intCast(u32, object_id), cie_offset); + try cie.scanRelocs(zld, @as(u32, @intCast(object_id)), cie_offset); } } } @@ -110,7 +110,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void { var eh_frame_offset: u32 = 0; for (zld.objects.items, 0..) |*object, object_id| { - try eh_records.ensureUnusedCapacity(2 * @intCast(u32, object.exec_atoms.items.len)); + try eh_records.ensureUnusedCapacity(2 * @as(u32, @intCast(object.exec_atoms.items.len))); var cies = std.AutoHashMap(u32, u32).init(gpa); defer cies.deinit(); @@ -139,7 +139,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void { eh_it.seekTo(cie_offset); const source_cie_record = (try eh_it.next()).?; var cie_record = try source_cie_record.toOwned(gpa); - try cie_record.relocate(zld, @intCast(u32, object_id), .{ + try cie_record.relocate(zld, @as(u32, @intCast(object_id)), .{ .source_offset = cie_offset, .out_offset = eh_frame_offset, .sect_addr = sect.addr, @@ -151,7 +151,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void { var fde_record = try source_fde_record.toOwned(gpa); fde_record.setCiePointer(eh_frame_offset + 4 - gop.value_ptr.*); - try fde_record.relocate(zld, @intCast(u32, object_id), .{ + try fde_record.relocate(zld, @as(u32, @intCast(object_id)), .{ .source_offset = fde_record_offset, .out_offset = eh_frame_offset, .sect_addr = sect.addr, @@ -194,7 +194,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void { UnwindInfo.UnwindEncoding.setDwarfSectionOffset( &record.compactUnwindEncoding, cpu_arch, - @intCast(u24, eh_frame_offset), + @as(u24, @intCast(eh_frame_offset)), ); const cie_record = eh_records.get( @@ -268,7 +268,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { }) u64 { assert(rec.tag == .fde); const addend = mem.readIntLittle(i64, rec.data[4..][0..8]); - return @intCast(u64, @intCast(i64, ctx.base_addr + ctx.base_offset + 8) + addend); + return @as(u64, @intCast(@as(i64, @intCast(ctx.base_addr + ctx.base_offset + 8)) + addend)); } pub fn setTargetSymbolAddress(rec: *Record, value: u64, ctx: struct { @@ -276,7 +276,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { base_offset: u64, }) !void { assert(rec.tag == .fde); - const addend = @intCast(i64, value) - @intCast(i64, ctx.base_addr + ctx.base_offset + 8); + const addend = @as(i64, @intCast(value)) - @as(i64, @intCast(ctx.base_addr + ctx.base_offset + 8)); mem.writeIntLittle(i64, rec.data[4..][0..8], addend); } @@ -291,7 +291,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { for (relocs) |rel| { switch (cpu_arch) { .aarch64 => { - const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type); + const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type)); switch (rel_type) { .ARM64_RELOC_SUBTRACTOR, .ARM64_RELOC_UNSIGNED, @@ -301,7 +301,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { } }, .x86_64 => { - const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type); + const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type)); switch (rel_type) { .X86_64_RELOC_GOT => {}, else => unreachable, @@ -313,7 +313,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { .object_id = object_id, .rel = rel, .code = rec.data, - .base_offset = @intCast(i32, source_offset) + 4, + .base_offset = @as(i32, @intCast(source_offset)) + 4, }); return target; } @@ -335,40 +335,40 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { .object_id = object_id, .rel = rel, .code = rec.data, - .base_offset = @intCast(i32, ctx.source_offset) + 4, + .base_offset = @as(i32, @intCast(ctx.source_offset)) + 4, }); - const rel_offset = @intCast(u32, rel.r_address - @intCast(i32, ctx.source_offset) - 4); + const rel_offset = @as(u32, @intCast(rel.r_address - @as(i32, @intCast(ctx.source_offset)) - 4)); const source_addr = ctx.sect_addr + rel_offset + ctx.out_offset + 4; switch (cpu_arch) { .aarch64 => { - const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type); + const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type)); switch (rel_type) { .ARM64_RELOC_SUBTRACTOR => { // Address of the __eh_frame in the source object file }, .ARM64_RELOC_POINTER_TO_GOT => { const target_addr = try Atom.getRelocTargetAddress(zld, target, true, false); - const result = math.cast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr)) orelse + const result = math.cast(i32, @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr))) orelse return error.Overflow; mem.writeIntLittle(i32, rec.data[rel_offset..][0..4], result); }, .ARM64_RELOC_UNSIGNED => { assert(rel.r_extern == 1); const target_addr = try Atom.getRelocTargetAddress(zld, target, false, false); - const result = @intCast(i64, target_addr) - @intCast(i64, source_addr); - mem.writeIntLittle(i64, rec.data[rel_offset..][0..8], @intCast(i64, result)); + const result = @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr)); + mem.writeIntLittle(i64, rec.data[rel_offset..][0..8], @as(i64, @intCast(result))); }, else => unreachable, } }, .x86_64 => { - const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type); + const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type)); switch (rel_type) { .X86_64_RELOC_GOT => { const target_addr = try Atom.getRelocTargetAddress(zld, target, true, false); const addend = mem.readIntLittle(i32, rec.data[rel_offset..][0..4]); - const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend); + const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend)); const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0); mem.writeIntLittle(i32, rec.data[rel_offset..][0..4], disp); }, @@ -392,7 +392,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { pub fn getAugmentationString(rec: Record) []const u8 { assert(rec.tag == .cie); - return mem.sliceTo(@ptrCast([*:0]const u8, rec.data.ptr + 5), 0); + return mem.sliceTo(@as([*:0]const u8, @ptrCast(rec.data.ptr + 5)), 0); } pub fn getPersonalityPointer(rec: Record, ctx: struct { @@ -418,7 +418,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { 'P' => { const enc = try reader.readByte(); const offset = ctx.base_offset + 13 + aug_str.len + creader.bytes_read; - const ptr = try getEncodedPointer(enc, @intCast(i64, ctx.base_addr + offset), reader); + const ptr = try getEncodedPointer(enc, @as(i64, @intCast(ctx.base_addr + offset)), reader); return ptr; }, 'L' => { @@ -441,7 +441,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { const reader = stream.reader(); _ = try reader.readByte(); const offset = ctx.base_offset + 25; - const ptr = try getEncodedPointer(enc, @intCast(i64, ctx.base_addr + offset), reader); + const ptr = try getEncodedPointer(enc, @as(i64, @intCast(ctx.base_addr + offset)), reader); return ptr; } @@ -454,7 +454,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { var stream = std.io.fixedBufferStream(rec.data[21..]); const writer = stream.writer(); const offset = ctx.base_offset + 25; - try setEncodedPointer(enc, @intCast(i64, ctx.base_addr + offset), value, writer); + try setEncodedPointer(enc, @as(i64, @intCast(ctx.base_addr + offset)), value, writer); } fn getLsdaEncoding(rec: Record) !?u8 { @@ -494,11 +494,11 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { if (enc == EH_PE.omit) return null; var ptr: i64 = switch (enc & 0x0F) { - EH_PE.absptr => @bitCast(i64, try reader.readIntLittle(u64)), - EH_PE.udata2 => @bitCast(i16, try reader.readIntLittle(u16)), - EH_PE.udata4 => @bitCast(i32, try reader.readIntLittle(u32)), - EH_PE.udata8 => @bitCast(i64, try reader.readIntLittle(u64)), - EH_PE.uleb128 => @bitCast(i64, try leb.readULEB128(u64, reader)), + EH_PE.absptr => @as(i64, @bitCast(try reader.readIntLittle(u64))), + EH_PE.udata2 => @as(i16, @bitCast(try reader.readIntLittle(u16))), + EH_PE.udata4 => @as(i32, @bitCast(try reader.readIntLittle(u32))), + EH_PE.udata8 => @as(i64, @bitCast(try reader.readIntLittle(u64))), + EH_PE.uleb128 => @as(i64, @bitCast(try leb.readULEB128(u64, reader))), EH_PE.sdata2 => try reader.readIntLittle(i16), EH_PE.sdata4 => try reader.readIntLittle(i32), EH_PE.sdata8 => try reader.readIntLittle(i64), @@ -517,13 +517,13 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { else => return null, } - return @bitCast(u64, ptr); + return @as(u64, @bitCast(ptr)); } fn setEncodedPointer(enc: u8, pcrel_offset: i64, value: u64, writer: anytype) !void { if (enc == EH_PE.omit) return; - var actual = @intCast(i64, value); + var actual = @as(i64, @intCast(value)); switch (enc & 0x70) { EH_PE.absptr => {}, @@ -537,13 +537,13 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { } switch (enc & 0x0F) { - EH_PE.absptr => try writer.writeIntLittle(u64, @bitCast(u64, actual)), - EH_PE.udata2 => try writer.writeIntLittle(u16, @bitCast(u16, @intCast(i16, actual))), - EH_PE.udata4 => try writer.writeIntLittle(u32, @bitCast(u32, @intCast(i32, actual))), - EH_PE.udata8 => try writer.writeIntLittle(u64, @bitCast(u64, actual)), - EH_PE.uleb128 => try leb.writeULEB128(writer, @bitCast(u64, actual)), - EH_PE.sdata2 => try writer.writeIntLittle(i16, @intCast(i16, actual)), - EH_PE.sdata4 => try writer.writeIntLittle(i32, @intCast(i32, actual)), + EH_PE.absptr => try writer.writeIntLittle(u64, @as(u64, @bitCast(actual))), + EH_PE.udata2 => try writer.writeIntLittle(u16, @as(u16, @bitCast(@as(i16, @intCast(actual))))), + EH_PE.udata4 => try writer.writeIntLittle(u32, @as(u32, @bitCast(@as(i32, @intCast(actual))))), + EH_PE.udata8 => try writer.writeIntLittle(u64, @as(u64, @bitCast(actual))), + EH_PE.uleb128 => try leb.writeULEB128(writer, @as(u64, @bitCast(actual))), + EH_PE.sdata2 => try writer.writeIntLittle(i16, @as(i16, @intCast(actual))), + EH_PE.sdata4 => try writer.writeIntLittle(i32, @as(i32, @intCast(actual))), EH_PE.sdata8 => try writer.writeIntLittle(i64, actual), EH_PE.sleb128 => try leb.writeILEB128(writer, actual), else => unreachable, diff --git a/src/link/MachO/load_commands.zig b/src/link/MachO/load_commands.zig index eb582e2222..10f446f191 100644 --- a/src/link/MachO/load_commands.zig +++ b/src/link/MachO/load_commands.zig @@ -114,7 +114,7 @@ fn calcLCsSize(gpa: Allocator, options: *const link.Options, ctx: CalcLCsSizeCtx } } - return @intCast(u32, sizeofcmds); + return @as(u32, @intCast(sizeofcmds)); } pub fn calcMinHeaderPad(gpa: Allocator, options: *const link.Options, ctx: CalcLCsSizeCtx) !u64 { @@ -140,7 +140,7 @@ pub fn calcNumOfLCs(lc_buffer: []const u8) u32 { var pos: usize = 0; while (true) { if (pos >= lc_buffer.len) break; - const cmd = @ptrCast(*align(1) const macho.load_command, lc_buffer.ptr + pos).*; + const cmd = @as(*align(1) const macho.load_command, @ptrCast(lc_buffer.ptr + pos)).*; ncmds += 1; pos += cmd.cmdsize; } @@ -149,11 +149,11 @@ pub fn calcNumOfLCs(lc_buffer: []const u8) u32 { pub fn writeDylinkerLC(lc_writer: anytype) !void { const name_len = mem.sliceTo(default_dyld_path, 0).len; - const cmdsize = @intCast(u32, mem.alignForward( + const cmdsize = @as(u32, @intCast(mem.alignForward( u64, @sizeOf(macho.dylinker_command) + name_len, @sizeOf(u64), - )); + ))); try lc_writer.writeStruct(macho.dylinker_command{ .cmd = .LOAD_DYLINKER, .cmdsize = cmdsize, @@ -176,11 +176,11 @@ const WriteDylibLCCtx = struct { fn writeDylibLC(ctx: WriteDylibLCCtx, lc_writer: anytype) !void { const name_len = ctx.name.len + 1; - const cmdsize = @intCast(u32, mem.alignForward( + const cmdsize = @as(u32, @intCast(mem.alignForward( u64, @sizeOf(macho.dylib_command) + name_len, @sizeOf(u64), - )); + ))); try lc_writer.writeStruct(macho.dylib_command{ .cmd = ctx.cmd, .cmdsize = cmdsize, @@ -217,8 +217,8 @@ pub fn writeDylibIdLC(gpa: Allocator, options: *const link.Options, lc_writer: a try writeDylibLC(.{ .cmd = .ID_DYLIB, .name = install_name, - .current_version = @intCast(u32, curr.major << 16 | curr.minor << 8 | curr.patch), - .compatibility_version = @intCast(u32, compat.major << 16 | compat.minor << 8 | compat.patch), + .current_version = @as(u32, @intCast(curr.major << 16 | curr.minor << 8 | curr.patch)), + .compatibility_version = @as(u32, @intCast(compat.major << 16 | compat.minor << 8 | compat.patch)), }, lc_writer); } @@ -253,11 +253,11 @@ pub fn writeRpathLCs(gpa: Allocator, options: *const link.Options, lc_writer: an while (try it.next()) |rpath| { const rpath_len = rpath.len + 1; - const cmdsize = @intCast(u32, mem.alignForward( + const cmdsize = @as(u32, @intCast(mem.alignForward( u64, @sizeOf(macho.rpath_command) + rpath_len, @sizeOf(u64), - )); + ))); try lc_writer.writeStruct(macho.rpath_command{ .cmdsize = cmdsize, .path = @sizeOf(macho.rpath_command), @@ -275,12 +275,12 @@ pub fn writeBuildVersionLC(options: *const link.Options, lc_writer: anytype) !vo const cmdsize = @sizeOf(macho.build_version_command) + @sizeOf(macho.build_tool_version); const platform_version = blk: { const ver = options.target.os.version_range.semver.min; - const platform_version = @intCast(u32, ver.major << 16 | ver.minor << 8); + const platform_version = @as(u32, @intCast(ver.major << 16 | ver.minor << 8)); break :blk platform_version; }; const sdk_version = if (options.native_darwin_sdk) |sdk| blk: { const ver = sdk.version; - const sdk_version = @intCast(u32, ver.major << 16 | ver.minor << 8); + const sdk_version = @as(u32, @intCast(ver.major << 16 | ver.minor << 8)); break :blk sdk_version; } else platform_version; const is_simulator_abi = options.target.abi == .simulator; diff --git a/src/link/MachO/thunks.zig b/src/link/MachO/thunks.zig index f3289e544b..82d0451225 100644 --- a/src/link/MachO/thunks.zig +++ b/src/link/MachO/thunks.zig @@ -131,7 +131,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void { log.debug("GROUP END at {d}", .{group_end}); // Insert thunk at group_end - const thunk_index = @intCast(u32, zld.thunks.items.len); + const thunk_index = @as(u32, @intCast(zld.thunks.items.len)); try zld.thunks.append(gpa, .{ .start_index = undefined, .len = 0 }); // Scan relocs in the group and create trampolines for any unreachable callsite. @@ -174,7 +174,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void { } } - header.size = @intCast(u32, offset); + header.size = @as(u32, @intCast(offset)); } fn allocateThunk( @@ -223,7 +223,7 @@ fn scanRelocs( const base_offset = if (object.getSourceSymbol(atom.sym_index)) |source_sym| blk: { const source_sect = object.getSourceSection(source_sym.n_sect - 1); - break :blk @intCast(i32, source_sym.n_value - source_sect.addr); + break :blk @as(i32, @intCast(source_sym.n_value - source_sect.addr)); } else 0; const code = Atom.getAtomCode(zld, atom_index); @@ -289,7 +289,7 @@ fn scanRelocs( } inline fn relocNeedsThunk(rel: macho.relocation_info) bool { - const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type); + const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type)); return rel_type == .ARM64_RELOC_BRANCH26; } @@ -315,7 +315,7 @@ fn isReachable( if (!allocated.contains(target_atom_index)) return false; - const source_addr = source_sym.n_value + @intCast(u32, rel.r_address - base_offset); + const source_addr = source_sym.n_value + @as(u32, @intCast(rel.r_address - base_offset)); const is_via_got = Atom.relocRequiresGot(zld, rel); const target_addr = Atom.getRelocTargetAddress(zld, target, is_via_got, false) catch unreachable; _ = Relocation.calcPcRelativeDisplacementArm64(source_addr, target_addr) catch @@ -349,7 +349,7 @@ fn getThunkIndex(zld: *Zld, atom_index: AtomIndex) ?ThunkIndex { const end_addr = start_addr + thunk.getSize(); if (start_addr <= sym.n_value and sym.n_value < end_addr) { - return @intCast(u32, i); + return @as(u32, @intCast(i)); } } return null; diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig index 13c1ea73fa..3e828984a9 100644 --- a/src/link/MachO/zld.zig +++ b/src/link/MachO/zld.zig @@ -103,7 +103,7 @@ pub const Zld = struct { const cpu_arch = self.options.target.cpu.arch; const mtime: u64 = mtime: { const stat = file.stat() catch break :mtime 0; - break :mtime @intCast(u64, @divFloor(stat.mtime, 1_000_000_000)); + break :mtime @as(u64, @intCast(@divFloor(stat.mtime, 1_000_000_000))); }; const file_stat = try file.stat(); const file_size = math.cast(usize, file_stat.size) orelse return error.Overflow; @@ -220,7 +220,7 @@ pub const Zld = struct { const contents = try file.readToEndAllocOptions(gpa, file_size, file_size, @alignOf(u64), null); defer gpa.free(contents); - const dylib_id = @intCast(u16, self.dylibs.items.len); + const dylib_id = @as(u16, @intCast(self.dylibs.items.len)); var dylib = Dylib{ .weak = opts.weak }; dylib.parseFromBinary( @@ -535,7 +535,7 @@ pub const Zld = struct { pub fn createEmptyAtom(self: *Zld, sym_index: u32, size: u64, alignment: u32) !AtomIndex { const gpa = self.gpa; - const index = @intCast(AtomIndex, self.atoms.items.len); + const index = @as(AtomIndex, @intCast(self.atoms.items.len)); const atom = try self.atoms.addOne(gpa); atom.* = Atom.empty; atom.sym_index = sym_index; @@ -596,7 +596,7 @@ pub const Zld = struct { const global_index = self.dyld_stub_binder_index orelse return; const target = self.globals.items[global_index]; const atom_index = try self.createGotAtom(); - const got_index = @intCast(u32, self.got_entries.items.len); + const got_index = @as(u32, @intCast(self.got_entries.items.len)); try self.got_entries.append(gpa, .{ .target = target, .atom_index = atom_index, @@ -874,7 +874,7 @@ pub const Zld = struct { } for (self.objects.items, 0..) |_, object_id| { - try self.resolveSymbolsInObject(@intCast(u32, object_id), resolver); + try self.resolveSymbolsInObject(@as(u32, @intCast(object_id)), resolver); } try self.resolveSymbolsInArchives(resolver); @@ -1024,7 +1024,7 @@ pub const Zld = struct { }; assert(offsets.items.len > 0); - const object_id = @intCast(u16, self.objects.items.len); + const object_id = @as(u16, @intCast(self.objects.items.len)); const object = archive.parseObject(gpa, cpu_arch, offsets.items[0]) catch |e| switch (e) { error.MismatchedCpuArchitecture => { log.err("CPU architecture mismatch found in {s}", .{archive.name}); @@ -1055,14 +1055,14 @@ pub const Zld = struct { for (self.dylibs.items, 0..) |dylib, id| { if (!dylib.symbols.contains(sym_name)) continue; - const dylib_id = @intCast(u16, id); + const dylib_id = @as(u16, @intCast(id)); if (!self.referenced_dylibs.contains(dylib_id)) { try self.referenced_dylibs.putNoClobber(self.gpa, dylib_id, {}); } const ordinal = self.referenced_dylibs.getIndex(dylib_id) orelse unreachable; sym.n_type |= macho.N_EXT; - sym.n_desc = @intCast(u16, ordinal + 1) * macho.N_SYMBOL_RESOLVER; + sym.n_desc = @as(u16, @intCast(ordinal + 1)) * macho.N_SYMBOL_RESOLVER; if (dylib.weak) { sym.n_desc |= macho.N_WEAK_REF; @@ -1099,9 +1099,9 @@ pub const Zld = struct { _ = resolver.unresolved.swapRemove(global_index); continue; } else if (allow_undef) { - const n_desc = @bitCast( + const n_desc = @as( u16, - macho.BIND_SPECIAL_DYLIB_FLAT_LOOKUP * @intCast(i16, macho.N_SYMBOL_RESOLVER), + @bitCast(macho.BIND_SPECIAL_DYLIB_FLAT_LOOKUP * @as(i16, @intCast(macho.N_SYMBOL_RESOLVER))), ); sym.n_type = macho.N_EXT; sym.n_desc = n_desc; @@ -1238,7 +1238,7 @@ pub const Zld = struct { const segname = header.segName(); const segment_id = self.getSegmentByName(segname) orelse blk: { log.debug("creating segment '{s}'", .{segname}); - const segment_id = @intCast(u8, self.segments.items.len); + const segment_id = @as(u8, @intCast(self.segments.items.len)); const protection = getSegmentMemoryProtection(segname); try self.segments.append(self.gpa, .{ .cmdsize = @sizeOf(macho.segment_command_64), @@ -1269,7 +1269,7 @@ pub const Zld = struct { pub fn allocateSymbol(self: *Zld) !u32 { try self.locals.ensureUnusedCapacity(self.gpa, 1); log.debug(" (allocating symbol index {d})", .{self.locals.items.len}); - const index = @intCast(u32, self.locals.items.len); + const index = @as(u32, @intCast(self.locals.items.len)); _ = self.locals.addOneAssumeCapacity(); self.locals.items[index] = .{ .n_strx = 0, @@ -1282,7 +1282,7 @@ pub const Zld = struct { } fn addGlobal(self: *Zld, sym_loc: SymbolWithLoc) !u32 { - const global_index = @intCast(u32, self.globals.items.len); + const global_index = @as(u32, @intCast(self.globals.items.len)); try self.globals.append(self.gpa, sym_loc); return global_index; } @@ -1489,7 +1489,7 @@ pub const Zld = struct { if (mem.eql(u8, header.sectName(), "__stub_helper")) continue; // Create jump/branch range extenders if needed. - try thunks.createThunks(self, @intCast(u8, sect_id)); + try thunks.createThunks(self, @as(u8, @intCast(sect_id))); } } } @@ -1502,7 +1502,7 @@ pub const Zld = struct { .dylibs = self.dylibs.items, .referenced_dylibs = self.referenced_dylibs.keys(), }) else 0; - try self.allocateSegment(@intCast(u8, segment_index), base_size); + try self.allocateSegment(@as(u8, @intCast(segment_index)), base_size); } } @@ -1536,12 +1536,12 @@ pub const Zld = struct { for (slice.items(.header)[indexes.start..indexes.end], 0..) |*header, sect_id| { const alignment = try math.powi(u32, 2, header.@"align"); const start_aligned = mem.alignForward(u64, start, alignment); - const n_sect = @intCast(u8, indexes.start + sect_id + 1); + const n_sect = @as(u8, @intCast(indexes.start + sect_id + 1)); header.offset = if (header.isZerofill()) 0 else - @intCast(u32, segment.fileoff + start_aligned); + @as(u32, @intCast(segment.fileoff + start_aligned)); header.addr = segment.vmaddr + start_aligned; var atom_index = slice.items(.first_atom_index)[indexes.start + sect_id]; @@ -1617,7 +1617,7 @@ pub const Zld = struct { ) !u8 { const gpa = self.gpa; log.debug("creating section '{s},{s}'", .{ segname, sectname }); - const index = @intCast(u8, self.sections.slice().len); + const index = @as(u8, @intCast(self.sections.slice().len)); try self.sections.append(gpa, .{ .segment_index = undefined, // Segments will be created automatically later down the pipeline .header = .{ @@ -1673,12 +1673,12 @@ pub const Zld = struct { }, } }; - return (@intCast(u8, segment_precedence) << 4) + section_precedence; + return (@as(u8, @intCast(segment_precedence)) << 4) + section_precedence; } fn writeSegmentHeaders(self: *Zld, writer: anytype) !void { for (self.segments.items, 0..) |seg, i| { - const indexes = self.getSectionIndexes(@intCast(u8, i)); + const indexes = self.getSectionIndexes(@as(u8, @intCast(i))); var out_seg = seg; out_seg.cmdsize = @sizeOf(macho.segment_command_64); out_seg.nsects = 0; @@ -1790,7 +1790,7 @@ pub const Zld = struct { } const segment_index = slice.items(.segment_index)[sect_id]; - const segment = self.getSegment(@intCast(u8, sect_id)); + const segment = self.getSegment(@as(u8, @intCast(sect_id))); if (segment.maxprot & macho.PROT.WRITE == 0) continue; log.debug("{s},{s}", .{ header.segName(), header.sectName() }); @@ -1820,12 +1820,12 @@ pub const Zld = struct { for (relocs) |rel| { switch (cpu_arch) { .aarch64 => { - const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type); + const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type)); if (rel_type != .ARM64_RELOC_UNSIGNED) continue; if (rel.r_length != 3) continue; }, .x86_64 => { - const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type); + const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type)); if (rel_type != .X86_64_RELOC_UNSIGNED) continue; if (rel.r_length != 3) continue; }, @@ -1841,9 +1841,9 @@ pub const Zld = struct { const target_sym = self.getSymbol(target); if (target_sym.undf()) continue; - const base_offset = @intCast(i32, sym.n_value - segment.vmaddr); + const base_offset = @as(i32, @intCast(sym.n_value - segment.vmaddr)); const rel_offset = rel.r_address - ctx.base_offset; - const offset = @intCast(u64, base_offset + rel_offset); + const offset = @as(u64, @intCast(base_offset + rel_offset)); log.debug(" | rebase at {x}", .{offset}); try rebase.entries.append(self.gpa, .{ @@ -1882,7 +1882,7 @@ pub const Zld = struct { const sym = entry.getAtomSymbol(self); const base_offset = sym.n_value - seg.vmaddr; - const dylib_ordinal = @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER); + const dylib_ordinal = @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER); log.debug(" | bind at {x}, import('{s}') in dylib({d})", .{ base_offset, bind_sym_name, @@ -1929,7 +1929,7 @@ pub const Zld = struct { } const segment_index = slice.items(.segment_index)[sect_id]; - const segment = self.getSegment(@intCast(u8, sect_id)); + const segment = self.getSegment(@as(u8, @intCast(sect_id))); if (segment.maxprot & macho.PROT.WRITE == 0) continue; const cpu_arch = self.options.target.cpu.arch; @@ -1959,12 +1959,12 @@ pub const Zld = struct { for (relocs) |rel| { switch (cpu_arch) { .aarch64 => { - const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type); + const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type)); if (rel_type != .ARM64_RELOC_UNSIGNED) continue; if (rel.r_length != 3) continue; }, .x86_64 => { - const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type); + const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type)); if (rel_type != .X86_64_RELOC_UNSIGNED) continue; if (rel.r_length != 3) continue; }, @@ -1983,11 +1983,11 @@ pub const Zld = struct { if (!bind_sym.undf()) continue; const base_offset = sym.n_value - segment.vmaddr; - const rel_offset = @intCast(u32, rel.r_address - ctx.base_offset); - const offset = @intCast(u64, base_offset + rel_offset); + const rel_offset = @as(u32, @intCast(rel.r_address - ctx.base_offset)); + const offset = @as(u64, @intCast(base_offset + rel_offset)); const addend = mem.readIntLittle(i64, code[rel_offset..][0..8]); - const dylib_ordinal = @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER); + const dylib_ordinal = @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER); log.debug(" | bind at {x}, import('{s}') in dylib({d})", .{ base_offset, bind_sym_name, @@ -2039,7 +2039,7 @@ pub const Zld = struct { const stub_entry = self.stubs.items[count]; const bind_sym = stub_entry.getTargetSymbol(self); const bind_sym_name = stub_entry.getTargetSymbolName(self); - const dylib_ordinal = @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER); + const dylib_ordinal = @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER); log.debug(" | lazy bind at {x}, import('{s}') in dylib({d})", .{ base_offset, bind_sym_name, @@ -2165,14 +2165,14 @@ pub const Zld = struct { try self.file.pwriteAll(buffer, rebase_off); try self.populateLazyBindOffsetsInStubHelper(lazy_bind); - self.dyld_info_cmd.rebase_off = @intCast(u32, rebase_off); - self.dyld_info_cmd.rebase_size = @intCast(u32, rebase_size_aligned); - self.dyld_info_cmd.bind_off = @intCast(u32, bind_off); - self.dyld_info_cmd.bind_size = @intCast(u32, bind_size_aligned); - self.dyld_info_cmd.lazy_bind_off = @intCast(u32, lazy_bind_off); - self.dyld_info_cmd.lazy_bind_size = @intCast(u32, lazy_bind_size_aligned); - self.dyld_info_cmd.export_off = @intCast(u32, export_off); - self.dyld_info_cmd.export_size = @intCast(u32, export_size_aligned); + self.dyld_info_cmd.rebase_off = @as(u32, @intCast(rebase_off)); + self.dyld_info_cmd.rebase_size = @as(u32, @intCast(rebase_size_aligned)); + self.dyld_info_cmd.bind_off = @as(u32, @intCast(bind_off)); + self.dyld_info_cmd.bind_size = @as(u32, @intCast(bind_size_aligned)); + self.dyld_info_cmd.lazy_bind_off = @as(u32, @intCast(lazy_bind_off)); + self.dyld_info_cmd.lazy_bind_size = @as(u32, @intCast(lazy_bind_size_aligned)); + self.dyld_info_cmd.export_off = @as(u32, @intCast(export_off)); + self.dyld_info_cmd.export_size = @as(u32, @intCast(export_size_aligned)); } fn populateLazyBindOffsetsInStubHelper(self: *Zld, lazy_bind: LazyBind) !void { @@ -2246,7 +2246,7 @@ pub const Zld = struct { var last_off: u32 = 0; for (addresses.items) |addr| { - const offset = @intCast(u32, addr - text_seg.vmaddr); + const offset = @as(u32, @intCast(addr - text_seg.vmaddr)); const diff = offset - last_off; if (diff == 0) continue; @@ -2258,7 +2258,7 @@ pub const Zld = struct { var buffer = std.ArrayList(u8).init(gpa); defer buffer.deinit(); - const max_size = @intCast(usize, offsets.items.len * @sizeOf(u64)); + const max_size = @as(usize, @intCast(offsets.items.len * @sizeOf(u64))); try buffer.ensureTotalCapacity(max_size); for (offsets.items) |offset| { @@ -2281,8 +2281,8 @@ pub const Zld = struct { try self.file.pwriteAll(buffer.items, offset); - self.function_starts_cmd.dataoff = @intCast(u32, offset); - self.function_starts_cmd.datasize = @intCast(u32, needed_size_aligned); + self.function_starts_cmd.dataoff = @as(u32, @intCast(offset)); + self.function_starts_cmd.datasize = @as(u32, @intCast(needed_size_aligned)); } fn filterDataInCode( @@ -2324,8 +2324,8 @@ pub const Zld = struct { const source_addr = if (object.getSourceSymbol(atom.sym_index)) |source_sym| source_sym.n_value else blk: { - const nbase = @intCast(u32, object.in_symtab.?.len); - const source_sect_id = @intCast(u8, atom.sym_index - nbase); + const nbase = @as(u32, @intCast(object.in_symtab.?.len)); + const source_sect_id = @as(u8, @intCast(atom.sym_index - nbase)); break :blk object.getSourceSection(source_sect_id).addr; }; const filtered_dice = filterDataInCode(dice, source_addr, source_addr + atom.size); @@ -2363,8 +2363,8 @@ pub const Zld = struct { try self.file.pwriteAll(buffer, offset); - self.data_in_code_cmd.dataoff = @intCast(u32, offset); - self.data_in_code_cmd.datasize = @intCast(u32, needed_size_aligned); + self.data_in_code_cmd.dataoff = @as(u32, @intCast(offset)); + self.data_in_code_cmd.datasize = @as(u32, @intCast(needed_size_aligned)); } fn writeSymtabs(self: *Zld) !void { @@ -2428,7 +2428,7 @@ pub const Zld = struct { if (!sym.undf()) continue; // not an import, skip if (sym.n_desc == N_DEAD) continue; - const new_index = @intCast(u32, imports.items.len); + const new_index = @as(u32, @intCast(imports.items.len)); var out_sym = sym; out_sym.n_strx = try self.strtab.insert(gpa, self.getSymbolName(global)); try imports.append(out_sym); @@ -2443,9 +2443,9 @@ pub const Zld = struct { } } - const nlocals = @intCast(u32, locals.items.len); - const nexports = @intCast(u32, exports.items.len); - const nimports = @intCast(u32, imports.items.len); + const nlocals = @as(u32, @intCast(locals.items.len)); + const nexports = @as(u32, @intCast(exports.items.len)); + const nimports = @as(u32, @intCast(imports.items.len)); const nsyms = nlocals + nexports + nimports; const seg = self.getLinkeditSegmentPtr(); @@ -2465,7 +2465,7 @@ pub const Zld = struct { log.debug("writing symtab from 0x{x} to 0x{x}", .{ offset, offset + needed_size }); try self.file.pwriteAll(buffer.items, offset); - self.symtab_cmd.symoff = @intCast(u32, offset); + self.symtab_cmd.symoff = @as(u32, @intCast(offset)); self.symtab_cmd.nsyms = nsyms; return SymtabCtx{ @@ -2493,8 +2493,8 @@ pub const Zld = struct { try self.file.pwriteAll(buffer, offset); - self.symtab_cmd.stroff = @intCast(u32, offset); - self.symtab_cmd.strsize = @intCast(u32, needed_size_aligned); + self.symtab_cmd.stroff = @as(u32, @intCast(offset)); + self.symtab_cmd.strsize = @as(u32, @intCast(needed_size_aligned)); } const SymtabCtx = struct { @@ -2506,8 +2506,8 @@ pub const Zld = struct { fn writeDysymtab(self: *Zld, ctx: SymtabCtx) !void { const gpa = self.gpa; - const nstubs = @intCast(u32, self.stubs.items.len); - const ngot_entries = @intCast(u32, self.got_entries.items.len); + const nstubs = @as(u32, @intCast(self.stubs.items.len)); + const ngot_entries = @as(u32, @intCast(self.got_entries.items.len)); const nindirectsyms = nstubs * 2 + ngot_entries; const iextdefsym = ctx.nlocalsym; const iundefsym = iextdefsym + ctx.nextdefsym; @@ -2572,7 +2572,7 @@ pub const Zld = struct { self.dysymtab_cmd.nextdefsym = ctx.nextdefsym; self.dysymtab_cmd.iundefsym = iundefsym; self.dysymtab_cmd.nundefsym = ctx.nundefsym; - self.dysymtab_cmd.indirectsymoff = @intCast(u32, offset); + self.dysymtab_cmd.indirectsymoff = @as(u32, @intCast(offset)); self.dysymtab_cmd.nindirectsyms = nindirectsyms; } @@ -2599,8 +2599,8 @@ pub const Zld = struct { // except for code signature data. try self.file.pwriteAll(&[_]u8{0}, offset + needed_size - 1); - self.codesig_cmd.dataoff = @intCast(u32, offset); - self.codesig_cmd.datasize = @intCast(u32, needed_size); + self.codesig_cmd.dataoff = @as(u32, @intCast(offset)); + self.codesig_cmd.datasize = @as(u32, @intCast(needed_size)); } fn writeCodeSignature(self: *Zld, comp: *const Compilation, code_sig: *CodeSignature) !void { @@ -2689,7 +2689,7 @@ pub const Zld = struct { fn getSegmentByName(self: Zld, segname: []const u8) ?u8 { for (self.segments.items, 0..) |seg, i| { - if (mem.eql(u8, segname, seg.segName())) return @intCast(u8, i); + if (mem.eql(u8, segname, seg.segName())) return @as(u8, @intCast(i)); } else return null; } @@ -2714,15 +2714,15 @@ pub const Zld = struct { // TODO investigate caching with a hashmap for (self.sections.items(.header), 0..) |header, i| { if (mem.eql(u8, header.segName(), segname) and mem.eql(u8, header.sectName(), sectname)) - return @intCast(u8, i); + return @as(u8, @intCast(i)); } else return null; } pub fn getSectionIndexes(self: Zld, segment_index: u8) struct { start: u8, end: u8 } { var start: u8 = 0; const nsects = for (self.segments.items, 0..) |seg, i| { - if (i == segment_index) break @intCast(u8, seg.nsects); - start += @intCast(u8, seg.nsects); + if (i == segment_index) break @as(u8, @intCast(seg.nsects)); + start += @as(u8, @intCast(seg.nsects)); } else 0; return .{ .start = start, .end = start + nsects }; } @@ -2879,7 +2879,7 @@ pub const Zld = struct { var name_lookup: ?DwarfInfo.SubprogramLookupByName = if (object.header.flags & macho.MH_SUBSECTIONS_VIA_SYMBOLS == 0) blk: { var name_lookup = DwarfInfo.SubprogramLookupByName.init(gpa); errdefer name_lookup.deinit(); - try name_lookup.ensureUnusedCapacity(@intCast(u32, object.atoms.items.len)); + try name_lookup.ensureUnusedCapacity(@as(u32, @intCast(object.atoms.items.len))); try debug_info.genSubprogramLookupByName(compile_unit, lookup, &name_lookup); break :blk name_lookup; } else null; @@ -3069,7 +3069,7 @@ pub const Zld = struct { @memset(&buf, '_'); scoped_log.debug(" %{d}: {s} @{x} in sect({d}), {s}", .{ sym_id, - object.getSymbolName(@intCast(u32, sym_id)), + object.getSymbolName(@as(u32, @intCast(sym_id))), sym.n_value, sym.n_sect, logSymAttributes(sym, &buf), @@ -3252,7 +3252,7 @@ pub const Zld = struct { } }; -pub const N_DEAD: u16 = @bitCast(u16, @as(i16, -1)); +pub const N_DEAD: u16 = @as(u16, @bitCast(@as(i16, -1))); const Section = struct { header: macho.section_64, @@ -3791,7 +3791,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr } for (zld.objects.items, 0..) |*object, object_id| { - try object.splitIntoAtoms(&zld, @intCast(u32, object_id)); + try object.splitIntoAtoms(&zld, @as(u32, @intCast(object_id))); } if (gc_sections) { @@ -3929,7 +3929,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr } else sym.n_value; try lc_writer.writeStruct(macho.entry_point_command{ - .entryoff = @intCast(u32, addr - seg.vmaddr), + .entryoff = @as(u32, @intCast(addr - seg.vmaddr)), .stacksize = options.stack_size_override orelse 0, }); } else { @@ -3943,7 +3943,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr }); try load_commands.writeBuildVersionLC(zld.options, lc_writer); - const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @intCast(u32, lc_buffer.items.len); + const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @as(u32, @intCast(lc_buffer.items.len)); try lc_writer.writeStruct(zld.uuid_cmd); try load_commands.writeLoadDylibLCs(zld.dylibs.items, zld.referenced_dylibs.keys(), lc_writer); @@ -3954,7 +3954,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr const ncmds = load_commands.calcNumOfLCs(lc_buffer.items); try zld.file.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64)); - try zld.writeHeader(ncmds, @intCast(u32, lc_buffer.items.len)); + try zld.writeHeader(ncmds, @as(u32, @intCast(lc_buffer.items.len))); try zld.writeUuid(comp, uuid_cmd_offset, requires_codesig); if (codesig) |*csig| { diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index b36e16452e..ad5292aa88 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -295,7 +295,7 @@ fn putFn(self: *Plan9, decl_index: Module.Decl.Index, out: FnDeclOutput) !void { .sym_index = blk: { try self.syms.append(gpa, undefined); try self.syms.append(gpa, undefined); - break :blk @intCast(u32, self.syms.items.len - 1); + break :blk @as(u32, @intCast(self.syms.items.len - 1)); }, }; try fn_map_res.value_ptr.functions.put(gpa, decl_index, out); @@ -485,7 +485,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !vo .ty = decl.ty, .val = decl_val, }, &code_buffer, .{ .none = {} }, .{ - .parent_atom_index = @intCast(Atom.Index, atom_idx), + .parent_atom_index = @as(Atom.Index, @intCast(atom_idx)), }); const code = switch (res) { .ok => code_buffer.items, @@ -562,10 +562,10 @@ pub fn flush(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.Node) li pub fn changeLine(l: *std.ArrayList(u8), delta_line: i32) !void { if (delta_line > 0 and delta_line < 65) { - const toappend = @intCast(u8, delta_line); + const toappend = @as(u8, @intCast(delta_line)); try l.append(toappend); } else if (delta_line < 0 and delta_line > -65) { - const toadd: u8 = @intCast(u8, -delta_line + 64); + const toadd: u8 = @as(u8, @intCast(-delta_line + 64)); try l.append(toadd); } else if (delta_line != 0) { try l.append(0); @@ -675,7 +675,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No const out = entry.value_ptr.*; { // connect the previous decl to the next - const delta_line = @intCast(i32, out.start_line) - @intCast(i32, linecount); + const delta_line = @as(i32, @intCast(out.start_line)) - @as(i32, @intCast(linecount)); try changeLine(&linecountinfo, delta_line); // TODO change the pc too (maybe?) @@ -692,7 +692,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No atom.offset = off; log.debug("write text decl {*} ({}), lines {d} to {d}.;__GOT+0x{x} vaddr: 0x{x}", .{ decl, decl.name.fmt(&mod.intern_pool), out.start_line + 1, out.end_line, atom.got_index.? * 8, off }); if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian()); } else { mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } @@ -721,7 +721,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No text_i += code.len; text_atom.offset = off; if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[text_atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[text_atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian()); } else { mem.writeInt(u64, got_table[text_atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } @@ -749,7 +749,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No data_i += code.len; atom.offset = off; if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian()); } else { mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } @@ -772,7 +772,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No data_i += code.len; atom.offset = off; if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian()); } else { mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } @@ -792,7 +792,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No data_i += code.len; data_atom.offset = off; if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[data_atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[data_atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian()); } else { mem.writeInt(u64, got_table[data_atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } @@ -815,13 +815,13 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No // generate the header self.hdr = .{ .magic = self.magic, - .text = @intCast(u32, text_i), - .data = @intCast(u32, data_i), - .syms = @intCast(u32, syms.len), + .text = @as(u32, @intCast(text_i)), + .data = @as(u32, @intCast(data_i)), + .syms = @as(u32, @intCast(syms.len)), .bss = 0, .spsz = 0, - .pcsz = @intCast(u32, linecountinfo.items.len), - .entry = @intCast(u32, self.entry_val.?), + .pcsz = @as(u32, @intCast(linecountinfo.items.len)), + .entry = @as(u32, @intCast(self.entry_val.?)), }; @memcpy(hdr_slice, self.hdr.toU8s()[0..hdr_size]); // write the fat header for 64 bit entry points @@ -847,13 +847,13 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No const code = source_atom.code.getCode(self); if (reloc.pcrel) { - const disp = @intCast(i32, target_offset) - @intCast(i32, source_atom.offset.?) - 4 - @intCast(i32, offset); - mem.writeInt(i32, code[@intCast(usize, offset)..][0..4], @intCast(i32, disp), self.base.options.target.cpu.arch.endian()); + const disp = @as(i32, @intCast(target_offset)) - @as(i32, @intCast(source_atom.offset.?)) - 4 - @as(i32, @intCast(offset)); + mem.writeInt(i32, code[@as(usize, @intCast(offset))..][0..4], @as(i32, @intCast(disp)), self.base.options.target.cpu.arch.endian()); } else { if (!self.sixtyfour_bit) { - mem.writeInt(u32, code[@intCast(usize, offset)..][0..4], @intCast(u32, target_offset + addend), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, code[@as(usize, @intCast(offset))..][0..4], @as(u32, @intCast(target_offset + addend)), self.base.options.target.cpu.arch.endian()); } else { - mem.writeInt(u64, code[@intCast(usize, offset)..][0..8], target_offset + addend, self.base.options.target.cpu.arch.endian()); + mem.writeInt(u64, code[@as(usize, @intCast(offset))..][0..8], target_offset + addend, self.base.options.target.cpu.arch.endian()); } } log.debug("relocating the address of '{s}' + {d} into '{s}' + {d} (({s}[{d}] = 0x{x} + 0x{x})", .{ target_symbol.name, addend, source_atom_symbol.name, offset, source_atom_symbol.name, offset, target_offset, addend }); @@ -960,7 +960,7 @@ fn freeUnnamedConsts(self: *Plan9, decl_index: Module.Decl.Index) void { fn createAtom(self: *Plan9) !Atom.Index { const gpa = self.base.allocator; - const index = @intCast(Atom.Index, self.atoms.items.len); + const index = @as(Atom.Index, @intCast(self.atoms.items.len)); const atom = try self.atoms.addOne(gpa); atom.* = .{ .type = .t, @@ -1060,7 +1060,7 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind &required_alignment, &code_buffer, .none, - .{ .parent_atom_index = @intCast(Atom.Index, atom_index) }, + .{ .parent_atom_index = @as(Atom.Index, @intCast(atom_index)) }, ); const code = switch (res) { .ok => code_buffer.items, @@ -1188,7 +1188,7 @@ pub fn writeSym(self: *Plan9, w: anytype, sym: aout.Sym) !void { // log.debug("write sym{{name: {s}, value: {x}}}", .{ sym.name, sym.value }); if (sym.type == .bad) return; // we don't want to write free'd symbols if (!self.sixtyfour_bit) { - try w.writeIntBig(u32, @intCast(u32, sym.value)); + try w.writeIntBig(u32, @as(u32, @intCast(sym.value))); } else { try w.writeIntBig(u64, sym.value); } diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 647450a603..97a05a6e4a 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -317,7 +317,7 @@ pub const StringTable = struct { } try table.string_data.ensureUnusedCapacity(allocator, string.len + 1); - const offset = @intCast(u32, table.string_data.items.len); + const offset = @as(u32, @intCast(table.string_data.items.len)); log.debug("writing new string '{s}' at offset 0x{x}", .{ string, offset }); @@ -333,7 +333,7 @@ pub const StringTable = struct { /// Asserts offset does not exceed bounds. pub fn get(table: StringTable, off: u32) []const u8 { assert(off < table.string_data.items.len); - return mem.sliceTo(@ptrCast([*:0]const u8, table.string_data.items.ptr + off), 0); + return mem.sliceTo(@as([*:0]const u8, @ptrCast(table.string_data.items.ptr + off)), 0); } /// Returns the offset of a given string when it exists. @@ -396,7 +396,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option // For object files we will import the stack pointer symbol if (options.output_mode == .Obj) { symbol.setUndefined(true); - symbol.index = @intCast(u32, wasm_bin.imported_globals_count); + symbol.index = @as(u32, @intCast(wasm_bin.imported_globals_count)); wasm_bin.imported_globals_count += 1; try wasm_bin.imports.putNoClobber( allocator, @@ -408,7 +408,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option }, ); } else { - symbol.index = @intCast(u32, wasm_bin.imported_globals_count + wasm_bin.wasm_globals.items.len); + symbol.index = @as(u32, @intCast(wasm_bin.imported_globals_count + wasm_bin.wasm_globals.items.len)); symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); const global = try wasm_bin.wasm_globals.addOne(allocator); global.* = .{ @@ -431,7 +431,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option }; if (options.output_mode == .Obj or options.import_table) { symbol.setUndefined(true); - symbol.index = @intCast(u32, wasm_bin.imported_tables_count); + symbol.index = @as(u32, @intCast(wasm_bin.imported_tables_count)); wasm_bin.imported_tables_count += 1; try wasm_bin.imports.put(allocator, loc, .{ .module_name = try wasm_bin.string_table.put(allocator, wasm_bin.host_name), @@ -439,7 +439,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option .kind = .{ .table = table }, }); } else { - symbol.index = @intCast(u32, wasm_bin.imported_tables_count + wasm_bin.tables.items.len); + symbol.index = @as(u32, @intCast(wasm_bin.imported_tables_count + wasm_bin.tables.items.len)); try wasm_bin.tables.append(allocator, table); if (options.export_table) { symbol.setFlag(.WASM_SYM_EXPORTED); @@ -519,7 +519,7 @@ fn createSyntheticSymbol(wasm: *Wasm, name: []const u8, tag: Symbol.Tag) !Symbol } fn createSyntheticSymbolOffset(wasm: *Wasm, name_offset: u32, tag: Symbol.Tag) !SymbolLoc { - const sym_index = @intCast(u32, wasm.symbols.items.len); + const sym_index = @as(u32, @intCast(wasm.symbols.items.len)); const loc: SymbolLoc = .{ .index = sym_index, .file = null }; try wasm.symbols.append(wasm.base.allocator, .{ .name = name_offset, @@ -588,7 +588,7 @@ pub fn getOrCreateAtomForDecl(wasm: *Wasm, decl_index: Module.Decl.Index) !Atom. /// Creates a new empty `Atom` and returns its `Atom.Index` fn createAtom(wasm: *Wasm) !Atom.Index { - const index = @intCast(Atom.Index, wasm.managed_atoms.items.len); + const index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len)); const atom = try wasm.managed_atoms.addOne(wasm.base.allocator); atom.* = Atom.empty; atom.sym_index = try wasm.allocateSymbol(); @@ -669,7 +669,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void { log.debug("Resolving symbols in object: '{s}'", .{object.name}); for (object.symtable, 0..) |symbol, i| { - const sym_index = @intCast(u32, i); + const sym_index = @as(u32, @intCast(i)); const location: SymbolLoc = .{ .file = object_index, .index = sym_index, @@ -830,7 +830,7 @@ fn resolveSymbolsInArchives(wasm: *Wasm) !void { // Symbol is found in unparsed object file within current archive. // Parse object and and resolve symbols again before we check remaining // undefined symbols. - const object_file_index = @intCast(u16, wasm.objects.items.len); + const object_file_index = @as(u16, @intCast(wasm.objects.items.len)); var object = try archive.parseObject(wasm.base.allocator, offset.items[0]); try wasm.objects.append(wasm.base.allocator, object); try wasm.resolveSymbolsInObject(object_file_index); @@ -1046,7 +1046,7 @@ fn setupTLSRelocationsFunction(wasm: *Wasm) !void { try writer.writeByte(std.wasm.opcode(.i32_add)); try writer.writeByte(std.wasm.opcode(.global_set)); - try leb.writeULEB128(writer, wasm.imported_globals_count + @intCast(u32, wasm.wasm_globals.items.len + got_index)); + try leb.writeULEB128(writer, wasm.imported_globals_count + @as(u32, @intCast(wasm.wasm_globals.items.len + got_index))); } try writer.writeByte(std.wasm.opcode(.end)); @@ -1091,7 +1091,7 @@ fn validateFeatures( // linked object file so we can test them. for (wasm.objects.items, 0..) |object, object_index| { for (object.features) |feature| { - const value = @intCast(u16, object_index) << 1 | @as(u1, 1); + const value = @as(u16, @intCast(object_index)) << 1 | @as(u1, 1); switch (feature.prefix) { .used => { used[@intFromEnum(feature.tag)] = value; @@ -1117,12 +1117,12 @@ fn validateFeatures( // and insert it into the 'allowed' set. When features are not inferred, // we validate that a used feature is allowed. for (used, 0..) |used_set, used_index| { - const is_enabled = @truncate(u1, used_set) != 0; + const is_enabled = @as(u1, @truncate(used_set)) != 0; if (infer) { allowed[used_index] = is_enabled; emit_features_count.* += @intFromBool(is_enabled); } else if (is_enabled and !allowed[used_index]) { - log.err("feature '{}' not allowed, but used by linked object", .{@enumFromInt(types.Feature.Tag, used_index)}); + log.err("feature '{}' not allowed, but used by linked object", .{@as(types.Feature.Tag, @enumFromInt(used_index))}); log.err(" defined in '{s}'", .{wasm.objects.items[used_set >> 1].name}); valid_feature_set = false; } @@ -1134,7 +1134,7 @@ fn validateFeatures( if (wasm.base.options.shared_memory) { const disallowed_feature = disallowed[@intFromEnum(types.Feature.Tag.shared_mem)]; - if (@truncate(u1, disallowed_feature) != 0) { + if (@as(u1, @truncate(disallowed_feature)) != 0) { log.err( "shared-memory is disallowed by '{s}' because it wasn't compiled with 'atomics' and 'bulk-memory' features enabled", .{wasm.objects.items[disallowed_feature >> 1].name}, @@ -1163,7 +1163,7 @@ fn validateFeatures( if (feature.prefix == .disallowed) continue; // already defined in 'disallowed' set. // from here a feature is always used const disallowed_feature = disallowed[@intFromEnum(feature.tag)]; - if (@truncate(u1, disallowed_feature) != 0) { + if (@as(u1, @truncate(disallowed_feature)) != 0) { log.err("feature '{}' is disallowed, but used by linked object", .{feature.tag}); log.err(" disallowed by '{s}'", .{wasm.objects.items[disallowed_feature >> 1].name}); log.err(" used in '{s}'", .{object.name}); @@ -1175,9 +1175,9 @@ fn validateFeatures( // validate the linked object file has each required feature for (required, 0..) |required_feature, feature_index| { - const is_required = @truncate(u1, required_feature) != 0; + const is_required = @as(u1, @truncate(required_feature)) != 0; if (is_required and !object_used_features[feature_index]) { - log.err("feature '{}' is required but not used in linked object", .{@enumFromInt(types.Feature.Tag, feature_index)}); + log.err("feature '{}' is required but not used in linked object", .{@as(types.Feature.Tag, @enumFromInt(feature_index))}); log.err(" required by '{s}'", .{wasm.objects.items[required_feature >> 1].name}); log.err(" missing in '{s}'", .{object.name}); valid_feature_set = false; @@ -1333,7 +1333,7 @@ pub fn allocateSymbol(wasm: *Wasm) !u32 { wasm.symbols.items[index] = symbol; return index; } - const index = @intCast(u32, wasm.symbols.items.len); + const index = @as(u32, @intCast(wasm.symbols.items.len)); wasm.symbols.appendAssumeCapacity(symbol); return index; } @@ -1485,7 +1485,7 @@ fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8 try atom.code.appendSlice(wasm.base.allocator, code); try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {}); - atom.size = @intCast(u32, code.len); + atom.size = @as(u32, @intCast(code.len)); if (code.len == 0) return; atom.alignment = decl.getAlignment(mod); } @@ -1589,7 +1589,7 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In }; const atom = wasm.getAtomPtr(atom_index); - atom.size = @intCast(u32, code.len); + atom.size = @as(u32, @intCast(code.len)); try atom.code.appendSlice(wasm.base.allocator, code); return atom.sym_index; } @@ -1617,7 +1617,7 @@ pub fn getGlobalSymbol(wasm: *Wasm, name: []const u8, lib_name: ?[]const u8) !u3 symbol.setUndefined(true); const sym_index = if (wasm.symbols_free_list.popOrNull()) |index| index else blk: { - var index = @intCast(u32, wasm.symbols.items.len); + var index = @as(u32, @intCast(wasm.symbols.items.len)); try wasm.symbols.ensureUnusedCapacity(wasm.base.allocator, 1); wasm.symbols.items.len += 1; break :blk index; @@ -1654,15 +1654,15 @@ pub fn getDeclVAddr( try wasm.addTableFunction(target_symbol_index); try atom.relocs.append(wasm.base.allocator, .{ .index = target_symbol_index, - .offset = @intCast(u32, reloc_info.offset), + .offset = @as(u32, @intCast(reloc_info.offset)), .relocation_type = if (is_wasm32) .R_WASM_TABLE_INDEX_I32 else .R_WASM_TABLE_INDEX_I64, }); } else { try atom.relocs.append(wasm.base.allocator, .{ .index = target_symbol_index, - .offset = @intCast(u32, reloc_info.offset), + .offset = @as(u32, @intCast(reloc_info.offset)), .relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_I32 else .R_WASM_MEMORY_ADDR_I64, - .addend = @intCast(i32, reloc_info.addend), + .addend = @as(i32, @intCast(reloc_info.addend)), }); } // we do not know the final address at this point, @@ -1840,7 +1840,7 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void { /// Appends a new entry to the indirect function table pub fn addTableFunction(wasm: *Wasm, symbol_index: u32) !void { - const index = @intCast(u32, wasm.function_table.count()); + const index = @as(u32, @intCast(wasm.function_table.count())); try wasm.function_table.put(wasm.base.allocator, .{ .file = null, .index = symbol_index }, index); } @@ -1971,7 +1971,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void { const symbol = (SymbolLoc{ .file = null, .index = atom.sym_index }).getSymbol(wasm); const final_index: u32 = switch (kind) { .function => result: { - const index = @intCast(u32, wasm.functions.count() + wasm.imported_functions_count); + const index = @as(u32, @intCast(wasm.functions.count() + wasm.imported_functions_count)); const type_index = wasm.atom_types.get(atom_index).?; try wasm.functions.putNoClobber( wasm.base.allocator, @@ -1982,7 +1982,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void { symbol.index = index; if (wasm.code_section_index == null) { - wasm.code_section_index = @intCast(u32, wasm.segments.items.len); + wasm.code_section_index = @as(u32, @intCast(wasm.segments.items.len)); try wasm.segments.append(wasm.base.allocator, .{ .alignment = atom.alignment, .size = atom.size, @@ -2020,12 +2020,12 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void { const index = gop.value_ptr.*; wasm.segments.items[index].size += atom.size; - symbol.index = @intCast(u32, wasm.segment_info.getIndex(index).?); + symbol.index = @as(u32, @intCast(wasm.segment_info.getIndex(index).?)); // segment info already exists, so free its memory wasm.base.allocator.free(segment_name); break :result index; } else { - const index = @intCast(u32, wasm.segments.items.len); + const index = @as(u32, @intCast(wasm.segments.items.len)); var flags: u32 = 0; if (wasm.base.options.shared_memory) { flags |= @intFromEnum(Segment.Flag.WASM_DATA_SEGMENT_IS_PASSIVE); @@ -2038,7 +2038,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void { }); gop.value_ptr.* = index; - const info_index = @intCast(u32, wasm.segment_info.count()); + const info_index = @as(u32, @intCast(wasm.segment_info.count())); try wasm.segment_info.put(wasm.base.allocator, index, segment_info); symbol.index = info_index; break :result index; @@ -2074,13 +2074,13 @@ fn allocateDebugAtoms(wasm: *Wasm) !void { const allocAtom = struct { fn f(bin: *Wasm, maybe_index: *?u32, atom_index: Atom.Index) !void { const index = maybe_index.* orelse idx: { - const index = @intCast(u32, bin.segments.items.len); + const index = @as(u32, @intCast(bin.segments.items.len)); try bin.appendDummySegment(); maybe_index.* = index; break :idx index; }; const atom = bin.getAtomPtr(atom_index); - atom.size = @intCast(u32, atom.code.items.len); + atom.size = @as(u32, @intCast(atom.code.items.len)); bin.symbols.items[atom.sym_index].index = index; try bin.appendAtomAtIndex(index, atom_index); } @@ -2215,7 +2215,7 @@ fn setupInitFunctions(wasm: *Wasm) !void { log.debug("appended init func '{s}'\n", .{object.string_table.get(symbol.name)}); wasm.init_funcs.appendAssumeCapacity(.{ .index = init_func.symbol_index, - .file = @intCast(u16, file_index), + .file = @as(u16, @intCast(file_index)), .priority = init_func.priority, }); } @@ -2248,7 +2248,7 @@ fn setupErrorsLen(wasm: *Wasm) !void { atom.deinit(wasm); break :blk index; } else new_atom: { - const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len); + const atom_index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len)); try wasm.symbol_atom.put(wasm.base.allocator, loc, atom_index); try wasm.managed_atoms.append(wasm.base.allocator, undefined); break :new_atom atom_index; @@ -2257,7 +2257,7 @@ fn setupErrorsLen(wasm: *Wasm) !void { atom.* = Atom.empty; atom.sym_index = loc.index; atom.size = 2; - try atom.code.writer(wasm.base.allocator).writeIntLittle(u16, @intCast(u16, errors_len)); + try atom.code.writer(wasm.base.allocator).writeIntLittle(u16, @as(u16, @intCast(errors_len))); try wasm.parseAtom(atom_index, .{ .data = .read_only }); } @@ -2325,7 +2325,7 @@ fn createSyntheticFunction( const symbol = loc.getSymbol(wasm); const ty_index = try wasm.putOrGetFuncType(func_ty); // create function with above type - const func_index = wasm.imported_functions_count + @intCast(u32, wasm.functions.count()); + const func_index = wasm.imported_functions_count + @as(u32, @intCast(wasm.functions.count())); try wasm.functions.putNoClobber( wasm.base.allocator, .{ .file = null, .index = func_index }, @@ -2334,10 +2334,10 @@ fn createSyntheticFunction( symbol.index = func_index; // create the atom that will be output into the final binary - const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len); + const atom_index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len)); const atom = try wasm.managed_atoms.addOne(wasm.base.allocator); atom.* = .{ - .size = @intCast(u32, function_body.items.len), + .size = @as(u32, @intCast(function_body.items.len)), .offset = 0, .sym_index = loc.index, .file = null, @@ -2369,10 +2369,10 @@ pub fn createFunction( ) !u32 { const loc = try wasm.createSyntheticSymbol(symbol_name, .function); - const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len); + const atom_index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len)); const atom = try wasm.managed_atoms.addOne(wasm.base.allocator); atom.* = .{ - .size = @intCast(u32, function_body.items.len), + .size = @as(u32, @intCast(function_body.items.len)), .offset = 0, .sym_index = loc.index, .file = null, @@ -2386,7 +2386,7 @@ pub fn createFunction( symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); // ensure function does not get exported const section_index = wasm.code_section_index orelse idx: { - const index = @intCast(u32, wasm.segments.items.len); + const index = @as(u32, @intCast(wasm.segments.items.len)); try wasm.appendDummySegment(); break :idx index; }; @@ -2438,7 +2438,7 @@ fn initializeTLSFunction(wasm: *Wasm) !void { try writer.writeByte(std.wasm.opcode(.misc_prefix)); try leb.writeULEB128(writer, std.wasm.miscOpcode(.memory_init)); // segment immediate - try leb.writeULEB128(writer, @intCast(u32, data_index)); + try leb.writeULEB128(writer, @as(u32, @intCast(data_index))); // memory index immediate (always 0) try leb.writeULEB128(writer, @as(u32, 0)); } @@ -2567,16 +2567,16 @@ fn mergeSections(wasm: *Wasm) !void { if (!gop.found_existing) { gop.value_ptr.* = object.functions[index]; } - symbol.index = @intCast(u32, gop.index) + wasm.imported_functions_count; + symbol.index = @as(u32, @intCast(gop.index)) + wasm.imported_functions_count; }, .global => { const original_global = object.globals[index]; - symbol.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count; + symbol.index = @as(u32, @intCast(wasm.wasm_globals.items.len)) + wasm.imported_globals_count; try wasm.wasm_globals.append(wasm.base.allocator, original_global); }, .table => { const original_table = object.tables[index]; - symbol.index = @intCast(u32, wasm.tables.items.len) + wasm.imported_tables_count; + symbol.index = @as(u32, @intCast(wasm.tables.items.len)) + wasm.imported_tables_count; try wasm.tables.append(wasm.base.allocator, original_table); }, else => unreachable, @@ -2596,7 +2596,7 @@ fn mergeTypes(wasm: *Wasm) !void { // type inserted. If we do this for the same function multiple times, // it will be overwritten with the incorrect type. var dirty = std.AutoHashMap(u32, void).init(wasm.base.allocator); - try dirty.ensureUnusedCapacity(@intCast(u32, wasm.functions.count())); + try dirty.ensureUnusedCapacity(@as(u32, @intCast(wasm.functions.count()))); defer dirty.deinit(); for (wasm.resolved_symbols.keys()) |sym_loc| { @@ -2660,10 +2660,10 @@ fn setupExports(wasm: *Wasm) !void { break :blk try wasm.string_table.put(wasm.base.allocator, sym_name); }; const exp: types.Export = if (symbol.tag == .data) exp: { - const global_index = @intCast(u32, wasm.imported_globals_count + wasm.wasm_globals.items.len); + const global_index = @as(u32, @intCast(wasm.imported_globals_count + wasm.wasm_globals.items.len)); try wasm.wasm_globals.append(wasm.base.allocator, .{ .global_type = .{ .valtype = .i32, .mutable = false }, - .init = .{ .i32_const = @intCast(i32, symbol.virtual_address) }, + .init = .{ .i32_const = @as(i32, @intCast(symbol.virtual_address)) }, }); break :exp .{ .name = export_name, @@ -2734,10 +2734,10 @@ fn setupMemory(wasm: *Wasm) !void { memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment); memory_ptr += stack_size; // We always put the stack pointer global at index 0 - wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr)); + wasm.wasm_globals.items[0].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr)))); } - var offset: u32 = @intCast(u32, memory_ptr); + var offset: u32 = @as(u32, @intCast(memory_ptr)); var data_seg_it = wasm.data_segments.iterator(); while (data_seg_it.next()) |entry| { const segment = &wasm.segments.items[entry.value_ptr.*]; @@ -2747,26 +2747,26 @@ fn setupMemory(wasm: *Wasm) !void { if (mem.eql(u8, entry.key_ptr.*, ".tdata")) { if (wasm.findGlobalSymbol("__tls_size")) |loc| { const sym = loc.getSymbol(wasm); - sym.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count; + sym.index = @as(u32, @intCast(wasm.wasm_globals.items.len)) + wasm.imported_globals_count; try wasm.wasm_globals.append(wasm.base.allocator, .{ .global_type = .{ .valtype = .i32, .mutable = false }, - .init = .{ .i32_const = @intCast(i32, segment.size) }, + .init = .{ .i32_const = @as(i32, @intCast(segment.size)) }, }); } if (wasm.findGlobalSymbol("__tls_align")) |loc| { const sym = loc.getSymbol(wasm); - sym.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count; + sym.index = @as(u32, @intCast(wasm.wasm_globals.items.len)) + wasm.imported_globals_count; try wasm.wasm_globals.append(wasm.base.allocator, .{ .global_type = .{ .valtype = .i32, .mutable = false }, - .init = .{ .i32_const = @intCast(i32, segment.alignment) }, + .init = .{ .i32_const = @as(i32, @intCast(segment.alignment)) }, }); } if (wasm.findGlobalSymbol("__tls_base")) |loc| { const sym = loc.getSymbol(wasm); - sym.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count; + sym.index = @as(u32, @intCast(wasm.wasm_globals.items.len)) + wasm.imported_globals_count; try wasm.wasm_globals.append(wasm.base.allocator, .{ .global_type = .{ .valtype = .i32, .mutable = wasm.base.options.shared_memory }, - .init = .{ .i32_const = if (wasm.base.options.shared_memory) @as(u32, 0) else @intCast(i32, memory_ptr) }, + .init = .{ .i32_const = if (wasm.base.options.shared_memory) @as(u32, 0) else @as(i32, @intCast(memory_ptr)) }, }); } } @@ -2782,21 +2782,21 @@ fn setupMemory(wasm: *Wasm) !void { memory_ptr = mem.alignForward(u64, memory_ptr, 4); const loc = try wasm.createSyntheticSymbol("__wasm_init_memory_flag", .data); const sym = loc.getSymbol(wasm); - sym.virtual_address = @intCast(u32, memory_ptr); + sym.virtual_address = @as(u32, @intCast(memory_ptr)); memory_ptr += 4; } if (!place_stack_first and !is_obj) { memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment); memory_ptr += stack_size; - wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr)); + wasm.wasm_globals.items[0].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr)))); } // One of the linked object files has a reference to the __heap_base symbol. // We must set its virtual address so it can be used in relocations. if (wasm.findGlobalSymbol("__heap_base")) |loc| { const symbol = loc.getSymbol(wasm); - symbol.virtual_address = @intCast(u32, mem.alignForward(u64, memory_ptr, heap_alignment)); + symbol.virtual_address = @as(u32, @intCast(mem.alignForward(u64, memory_ptr, heap_alignment))); } // Setup the max amount of pages @@ -2821,12 +2821,12 @@ fn setupMemory(wasm: *Wasm) !void { memory_ptr = mem.alignForward(u64, memory_ptr, std.wasm.page_size); // In case we do not import memory, but define it ourselves, // set the minimum amount of pages on the memory section. - wasm.memories.limits.min = @intCast(u32, memory_ptr / page_size); + wasm.memories.limits.min = @as(u32, @intCast(memory_ptr / page_size)); log.debug("Total memory pages: {d}", .{wasm.memories.limits.min}); if (wasm.findGlobalSymbol("__heap_end")) |loc| { const symbol = loc.getSymbol(wasm); - symbol.virtual_address = @intCast(u32, memory_ptr); + symbol.virtual_address = @as(u32, @intCast(memory_ptr)); } if (wasm.base.options.max_memory) |max_memory| { @@ -2842,7 +2842,7 @@ fn setupMemory(wasm: *Wasm) !void { log.err("Maximum memory exceeds maxmium amount {d}", .{max_memory_allowed}); return error.MemoryTooBig; } - wasm.memories.limits.max = @intCast(u32, max_memory / page_size); + wasm.memories.limits.max = @as(u32, @intCast(max_memory / page_size)); wasm.memories.limits.setFlag(.WASM_LIMITS_FLAG_HAS_MAX); if (wasm.base.options.shared_memory) { wasm.memories.limits.setFlag(.WASM_LIMITS_FLAG_IS_SHARED); @@ -2857,7 +2857,7 @@ fn setupMemory(wasm: *Wasm) !void { pub fn getMatchingSegment(wasm: *Wasm, object_index: u16, relocatable_index: u32) !?u32 { const object: Object = wasm.objects.items[object_index]; const relocatable_data = object.relocatable_data[relocatable_index]; - const index = @intCast(u32, wasm.segments.items.len); + const index = @as(u32, @intCast(wasm.segments.items.len)); switch (relocatable_data.type) { .data => { @@ -3023,10 +3023,10 @@ fn populateErrorNameTable(wasm: *Wasm) !void { const mod = wasm.base.options.module.?; for (mod.global_error_set.keys()) |error_name_nts| { const error_name = mod.intern_pool.stringToSlice(error_name_nts); - const len = @intCast(u32, error_name.len + 1); // names are 0-termianted + const len = @as(u32, @intCast(error_name.len + 1)); // names are 0-termianted const slice_ty = Type.slice_const_u8_sentinel_0; - const offset = @intCast(u32, atom.code.items.len); + const offset = @as(u32, @intCast(atom.code.items.len)); // first we create the data for the slice of the name try atom.code.appendNTimes(wasm.base.allocator, 0, 4); // ptr to name, will be relocated try atom.code.writer(wasm.base.allocator).writeIntLittle(u32, len - 1); @@ -3035,9 +3035,9 @@ fn populateErrorNameTable(wasm: *Wasm) !void { .index = names_atom.sym_index, .relocation_type = .R_WASM_MEMORY_ADDR_I32, .offset = offset, - .addend = @intCast(i32, addend), + .addend = @as(i32, @intCast(addend)), }); - atom.size += @intCast(u32, slice_ty.abiSize(mod)); + atom.size += @as(u32, @intCast(slice_ty.abiSize(mod))); addend += len; // as we updated the error name table, we now store the actual name within the names atom @@ -3063,7 +3063,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void { /// This initializes the index, appends a new segment, /// and finally, creates a managed `Atom`. pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !Atom.Index { - const new_index = @intCast(u32, wasm.segments.items.len); + const new_index = @as(u32, @intCast(wasm.segments.items.len)); index.* = new_index; try wasm.appendDummySegment(); @@ -3294,7 +3294,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l try wasm.parseInputFiles(positionals.items); for (wasm.objects.items, 0..) |_, object_index| { - try wasm.resolveSymbolsInObject(@intCast(u16, object_index)); + try wasm.resolveSymbolsInObject(@as(u16, @intCast(object_index))); } var emit_features_count: u32 = 0; @@ -3309,7 +3309,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l try wasm.setupImports(); for (wasm.objects.items, 0..) |*object, object_index| { - try object.parseIntoAtoms(gpa, @intCast(u16, object_index), wasm); + try object.parseIntoAtoms(gpa, @as(u16, @intCast(object_index)), wasm); } try wasm.allocateAtoms(); @@ -3382,7 +3382,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod try wasm.parseInputFiles(positionals.items); for (wasm.objects.items, 0..) |_, object_index| { - try wasm.resolveSymbolsInObject(@intCast(u16, object_index)); + try wasm.resolveSymbolsInObject(@as(u16, @intCast(object_index))); } var emit_features_count: u32 = 0; @@ -3446,7 +3446,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod } for (wasm.objects.items, 0..) |*object, object_index| { - try object.parseIntoAtoms(wasm.base.allocator, @intCast(u16, object_index), wasm); + try object.parseIntoAtoms(wasm.base.allocator, @as(u16, @intCast(object_index)), wasm); } try wasm.allocateAtoms(); @@ -3497,11 +3497,11 @@ fn writeToFile( log.debug("Writing type section. Count: ({d})", .{wasm.func_types.items.len}); for (wasm.func_types.items) |func_type| { try leb.writeULEB128(binary_writer, std.wasm.function_type); - try leb.writeULEB128(binary_writer, @intCast(u32, func_type.params.len)); + try leb.writeULEB128(binary_writer, @as(u32, @intCast(func_type.params.len))); for (func_type.params) |param_ty| { try leb.writeULEB128(binary_writer, std.wasm.valtype(param_ty)); } - try leb.writeULEB128(binary_writer, @intCast(u32, func_type.returns.len)); + try leb.writeULEB128(binary_writer, @as(u32, @intCast(func_type.returns.len))); for (func_type.returns) |ret_ty| { try leb.writeULEB128(binary_writer, std.wasm.valtype(ret_ty)); } @@ -3511,8 +3511,8 @@ fn writeToFile( binary_bytes.items, header_offset, .type, - @intCast(u32, binary_bytes.items.len - header_offset - header_size), - @intCast(u32, wasm.func_types.items.len), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), + @as(u32, @intCast(wasm.func_types.items.len)), ); section_count += 1; } @@ -3543,8 +3543,8 @@ fn writeToFile( binary_bytes.items, header_offset, .import, - @intCast(u32, binary_bytes.items.len - header_offset - header_size), - @intCast(u32, wasm.imports.count() + @intFromBool(import_memory)), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), + @as(u32, @intCast(wasm.imports.count() + @intFromBool(import_memory))), ); section_count += 1; } @@ -3560,8 +3560,8 @@ fn writeToFile( binary_bytes.items, header_offset, .function, - @intCast(u32, binary_bytes.items.len - header_offset - header_size), - @intCast(u32, wasm.functions.count()), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), + @as(u32, @intCast(wasm.functions.count())), ); section_count += 1; } @@ -3579,8 +3579,8 @@ fn writeToFile( binary_bytes.items, header_offset, .table, - @intCast(u32, binary_bytes.items.len - header_offset - header_size), - @intCast(u32, wasm.tables.items.len), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), + @as(u32, @intCast(wasm.tables.items.len)), ); section_count += 1; } @@ -3594,7 +3594,7 @@ fn writeToFile( binary_bytes.items, header_offset, .memory, - @intCast(u32, binary_bytes.items.len - header_offset - header_size), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), @as(u32, 1), // wasm currently only supports 1 linear memory segment ); section_count += 1; @@ -3614,8 +3614,8 @@ fn writeToFile( binary_bytes.items, header_offset, .global, - @intCast(u32, binary_bytes.items.len - header_offset - header_size), - @intCast(u32, wasm.wasm_globals.items.len), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), + @as(u32, @intCast(wasm.wasm_globals.items.len)), ); section_count += 1; } @@ -3626,14 +3626,14 @@ fn writeToFile( for (wasm.exports.items) |exp| { const name = wasm.string_table.get(exp.name); - try leb.writeULEB128(binary_writer, @intCast(u32, name.len)); + try leb.writeULEB128(binary_writer, @as(u32, @intCast(name.len))); try binary_writer.writeAll(name); try leb.writeULEB128(binary_writer, @intFromEnum(exp.kind)); try leb.writeULEB128(binary_writer, exp.index); } if (!import_memory) { - try leb.writeULEB128(binary_writer, @intCast(u32, "memory".len)); + try leb.writeULEB128(binary_writer, @as(u32, @intCast("memory".len))); try binary_writer.writeAll("memory"); try binary_writer.writeByte(std.wasm.externalKind(.memory)); try leb.writeULEB128(binary_writer, @as(u32, 0)); @@ -3643,8 +3643,8 @@ fn writeToFile( binary_bytes.items, header_offset, .@"export", - @intCast(u32, binary_bytes.items.len - header_offset - header_size), - @intCast(u32, wasm.exports.items.len) + @intFromBool(!import_memory), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), + @as(u32, @intCast(wasm.exports.items.len)) + @intFromBool(!import_memory), ); section_count += 1; } @@ -3665,7 +3665,7 @@ fn writeToFile( if (flags == 0x02) { try leb.writeULEB128(binary_writer, @as(u8, 0)); // represents funcref } - try leb.writeULEB128(binary_writer, @intCast(u32, wasm.function_table.count())); + try leb.writeULEB128(binary_writer, @as(u32, @intCast(wasm.function_table.count()))); var symbol_it = wasm.function_table.keyIterator(); while (symbol_it.next()) |symbol_loc_ptr| { try leb.writeULEB128(binary_writer, symbol_loc_ptr.*.getSymbol(wasm).index); @@ -3675,7 +3675,7 @@ fn writeToFile( binary_bytes.items, header_offset, .element, - @intCast(u32, binary_bytes.items.len - header_offset - header_size), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), @as(u32, 1), ); section_count += 1; @@ -3689,8 +3689,8 @@ fn writeToFile( binary_bytes.items, header_offset, .data_count, - @intCast(u32, binary_bytes.items.len - header_offset - header_size), - @intCast(u32, data_segments_count), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), + @as(u32, @intCast(data_segments_count)), ); } @@ -3731,13 +3731,13 @@ fn writeToFile( try binary_writer.writeAll(sorted_atom.code.items); } - code_section_size = @intCast(u32, binary_bytes.items.len - header_offset - header_size); + code_section_size = @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)); try writeVecSectionHeader( binary_bytes.items, header_offset, .code, code_section_size, - @intCast(u32, wasm.functions.count()), + @as(u32, @intCast(wasm.functions.count())), ); code_section_index = section_count; section_count += 1; @@ -3765,7 +3765,7 @@ fn writeToFile( } // when a segment is passive, it's initialized during runtime. if (!segment.isPassive()) { - try emitInit(binary_writer, .{ .i32_const = @bitCast(i32, segment.offset) }); + try emitInit(binary_writer, .{ .i32_const = @as(i32, @bitCast(segment.offset)) }); } // offset into data section try leb.writeULEB128(binary_writer, segment.size); @@ -3808,8 +3808,8 @@ fn writeToFile( binary_bytes.items, header_offset, .data, - @intCast(u32, binary_bytes.items.len - header_offset - header_size), - @intCast(u32, segment_count), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), + @as(u32, @intCast(segment_count)), ); data_section_index = section_count; section_count += 1; @@ -3927,7 +3927,7 @@ fn emitDebugSection(binary_bytes: *std.ArrayList(u8), data: []const u8, name: [] if (data.len == 0) return; const header_offset = try reserveCustomSectionHeader(binary_bytes); const writer = binary_bytes.writer(); - try leb.writeULEB128(writer, @intCast(u32, name.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(name.len))); try writer.writeAll(name); const start = binary_bytes.items.len - header_offset; @@ -3937,7 +3937,7 @@ fn emitDebugSection(binary_bytes: *std.ArrayList(u8), data: []const u8, name: [] try writeCustomSectionHeader( binary_bytes.items, header_offset, - @intCast(u32, binary_bytes.items.len - header_offset - 6), + @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)), ); } @@ -3946,7 +3946,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void { const writer = binary_bytes.writer(); const producers = "producers"; - try leb.writeULEB128(writer, @intCast(u32, producers.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(producers.len))); try writer.writeAll(producers); try leb.writeULEB128(writer, @as(u32, 2)); // 2 fields: Language + processed-by @@ -3958,7 +3958,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void { // language field { const language = "language"; - try leb.writeULEB128(writer, @intCast(u32, language.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(language.len))); try writer.writeAll(language); // field_value_count (TODO: Parse object files for producer sections to detect their language) @@ -3969,7 +3969,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void { try leb.writeULEB128(writer, @as(u32, 3)); // len of "Zig" try writer.writeAll("Zig"); - try leb.writeULEB128(writer, @intCast(u32, version.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(version.len))); try writer.writeAll(version); } } @@ -3977,7 +3977,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void { // processed-by field { const processed_by = "processed-by"; - try leb.writeULEB128(writer, @intCast(u32, processed_by.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(processed_by.len))); try writer.writeAll(processed_by); // field_value_count (TODO: Parse object files for producer sections to detect other used tools) @@ -3988,7 +3988,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void { try leb.writeULEB128(writer, @as(u32, 3)); // len of "Zig" try writer.writeAll("Zig"); - try leb.writeULEB128(writer, @intCast(u32, version.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(version.len))); try writer.writeAll(version); } } @@ -3996,7 +3996,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void { try writeCustomSectionHeader( binary_bytes.items, header_offset, - @intCast(u32, binary_bytes.items.len - header_offset - 6), + @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)), ); } @@ -4005,17 +4005,17 @@ fn emitBuildIdSection(binary_bytes: *std.ArrayList(u8), build_id: []const u8) !v const writer = binary_bytes.writer(); const hdr_build_id = "build_id"; - try leb.writeULEB128(writer, @intCast(u32, hdr_build_id.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(hdr_build_id.len))); try writer.writeAll(hdr_build_id); try leb.writeULEB128(writer, @as(u32, 1)); - try leb.writeULEB128(writer, @intCast(u32, build_id.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(build_id.len))); try writer.writeAll(build_id); try writeCustomSectionHeader( binary_bytes.items, header_offset, - @intCast(u32, binary_bytes.items.len - header_offset - 6), + @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)), ); } @@ -4024,17 +4024,17 @@ fn emitFeaturesSection(binary_bytes: *std.ArrayList(u8), enabled_features: []con const writer = binary_bytes.writer(); const target_features = "target_features"; - try leb.writeULEB128(writer, @intCast(u32, target_features.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(target_features.len))); try writer.writeAll(target_features); try leb.writeULEB128(writer, features_count); for (enabled_features, 0..) |enabled, feature_index| { if (enabled) { - const feature: types.Feature = .{ .prefix = .used, .tag = @enumFromInt(types.Feature.Tag, feature_index) }; + const feature: types.Feature = .{ .prefix = .used, .tag = @as(types.Feature.Tag, @enumFromInt(feature_index)) }; try leb.writeULEB128(writer, @intFromEnum(feature.prefix)); var buf: [100]u8 = undefined; const string = try std.fmt.bufPrint(&buf, "{}", .{feature.tag}); - try leb.writeULEB128(writer, @intCast(u32, string.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(string.len))); try writer.writeAll(string); } } @@ -4042,7 +4042,7 @@ fn emitFeaturesSection(binary_bytes: *std.ArrayList(u8), enabled_features: []con try writeCustomSectionHeader( binary_bytes.items, header_offset, - @intCast(u32, binary_bytes.items.len - header_offset - 6), + @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)), ); } @@ -4092,7 +4092,7 @@ fn emitNameSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), arena: std.mem const header_offset = try reserveCustomSectionHeader(binary_bytes); const writer = binary_bytes.writer(); - try leb.writeULEB128(writer, @intCast(u32, "name".len)); + try leb.writeULEB128(writer, @as(u32, @intCast("name".len))); try writer.writeAll("name"); try wasm.emitNameSubsection(.function, funcs.values(), writer); @@ -4102,7 +4102,7 @@ fn emitNameSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), arena: std.mem try writeCustomSectionHeader( binary_bytes.items, header_offset, - @intCast(u32, binary_bytes.items.len - header_offset - 6), + @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)), ); } @@ -4112,17 +4112,17 @@ fn emitNameSubsection(wasm: *Wasm, section_id: std.wasm.NameSubsection, names: a defer section_list.deinit(); const sub_writer = section_list.writer(); - try leb.writeULEB128(sub_writer, @intCast(u32, names.len)); + try leb.writeULEB128(sub_writer, @as(u32, @intCast(names.len))); for (names) |name| { log.debug("Emit symbol '{s}' type({s})", .{ name.name, @tagName(section_id) }); try leb.writeULEB128(sub_writer, name.index); - try leb.writeULEB128(sub_writer, @intCast(u32, name.name.len)); + try leb.writeULEB128(sub_writer, @as(u32, @intCast(name.name.len))); try sub_writer.writeAll(name.name); } // From now, write to the actual writer try leb.writeULEB128(writer, @intFromEnum(section_id)); - try leb.writeULEB128(writer, @intCast(u32, section_list.items.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(section_list.items.len))); try writer.writeAll(section_list.items); } @@ -4146,11 +4146,11 @@ fn emitInit(writer: anytype, init_expr: std.wasm.InitExpression) !void { }, .f32_const => |val| { try writer.writeByte(std.wasm.opcode(.f32_const)); - try writer.writeIntLittle(u32, @bitCast(u32, val)); + try writer.writeIntLittle(u32, @as(u32, @bitCast(val))); }, .f64_const => |val| { try writer.writeByte(std.wasm.opcode(.f64_const)); - try writer.writeIntLittle(u64, @bitCast(u64, val)); + try writer.writeIntLittle(u64, @as(u64, @bitCast(val))); }, .global_get => |val| { try writer.writeByte(std.wasm.opcode(.global_get)); @@ -4162,11 +4162,11 @@ fn emitInit(writer: anytype, init_expr: std.wasm.InitExpression) !void { fn emitImport(wasm: *Wasm, writer: anytype, import: types.Import) !void { const module_name = wasm.string_table.get(import.module_name); - try leb.writeULEB128(writer, @intCast(u32, module_name.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(module_name.len))); try writer.writeAll(module_name); const name = wasm.string_table.get(import.name); - try leb.writeULEB128(writer, @intCast(u32, name.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(name.len))); try writer.writeAll(name); try writer.writeByte(@intFromEnum(import.kind)); @@ -4594,7 +4594,7 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) ! fn reserveVecSectionHeader(bytes: *std.ArrayList(u8)) !u32 { // section id + fixed leb contents size + fixed leb vector length const header_size = 1 + 5 + 5; - const offset = @intCast(u32, bytes.items.len); + const offset = @as(u32, @intCast(bytes.items.len)); try bytes.appendSlice(&[_]u8{0} ** header_size); return offset; } @@ -4602,7 +4602,7 @@ fn reserveVecSectionHeader(bytes: *std.ArrayList(u8)) !u32 { fn reserveCustomSectionHeader(bytes: *std.ArrayList(u8)) !u32 { // unlike regular section, we don't emit the count const header_size = 1 + 5; - const offset = @intCast(u32, bytes.items.len); + const offset = @as(u32, @intCast(bytes.items.len)); try bytes.appendSlice(&[_]u8{0} ** header_size); return offset; } @@ -4638,7 +4638,7 @@ fn emitLinkSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table: try wasm.emitSymbolTable(binary_bytes, symbol_table); try wasm.emitSegmentInfo(binary_bytes); - const size = @intCast(u32, binary_bytes.items.len - offset - 6); + const size = @as(u32, @intCast(binary_bytes.items.len - offset - 6)); try writeCustomSectionHeader(binary_bytes.items, offset, size); } @@ -4661,7 +4661,7 @@ fn emitSymbolTable(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table: const sym_name = if (wasm.export_names.get(sym_loc)) |exp_name| wasm.string_table.get(exp_name) else sym_loc.getName(wasm); switch (symbol.tag) { .data => { - try leb.writeULEB128(writer, @intCast(u32, sym_name.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(sym_name.len))); try writer.writeAll(sym_name); if (symbol.isDefined()) { @@ -4678,7 +4678,7 @@ fn emitSymbolTable(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table: else => { try leb.writeULEB128(writer, symbol.index); if (symbol.isDefined()) { - try leb.writeULEB128(writer, @intCast(u32, sym_name.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(sym_name.len))); try writer.writeAll(sym_name); } }, @@ -4686,7 +4686,7 @@ fn emitSymbolTable(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table: } var buf: [10]u8 = undefined; - leb.writeUnsignedFixed(5, buf[0..5], @intCast(u32, binary_bytes.items.len - table_offset + 5)); + leb.writeUnsignedFixed(5, buf[0..5], @as(u32, @intCast(binary_bytes.items.len - table_offset + 5))); leb.writeUnsignedFixed(5, buf[5..], symbol_count); try binary_bytes.insertSlice(table_offset, &buf); } @@ -4696,28 +4696,28 @@ fn emitSegmentInfo(wasm: *Wasm, binary_bytes: *std.ArrayList(u8)) !void { try leb.writeULEB128(writer, @intFromEnum(types.SubsectionType.WASM_SEGMENT_INFO)); const segment_offset = binary_bytes.items.len; - try leb.writeULEB128(writer, @intCast(u32, wasm.segment_info.count())); + try leb.writeULEB128(writer, @as(u32, @intCast(wasm.segment_info.count()))); for (wasm.segment_info.values()) |segment_info| { log.debug("Emit segment: {s} align({d}) flags({b})", .{ segment_info.name, @ctz(segment_info.alignment), segment_info.flags, }); - try leb.writeULEB128(writer, @intCast(u32, segment_info.name.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(segment_info.name.len))); try writer.writeAll(segment_info.name); try leb.writeULEB128(writer, @ctz(segment_info.alignment)); try leb.writeULEB128(writer, segment_info.flags); } var buf: [5]u8 = undefined; - leb.writeUnsignedFixed(5, &buf, @intCast(u32, binary_bytes.items.len - segment_offset)); + leb.writeUnsignedFixed(5, &buf, @as(u32, @intCast(binary_bytes.items.len - segment_offset))); try binary_bytes.insertSlice(segment_offset, &buf); } pub fn getULEB128Size(uint_value: anytype) u32 { const T = @TypeOf(uint_value); const U = if (@typeInfo(T).Int.bits < 8) u8 else T; - var value = @intCast(U, uint_value); + var value = @as(U, @intCast(uint_value)); var size: u32 = 0; while (value != 0) : (size += 1) { @@ -4739,7 +4739,7 @@ fn emitCodeRelocations( // write custom section information const name = "reloc.CODE"; - try leb.writeULEB128(writer, @intCast(u32, name.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(name.len))); try writer.writeAll(name); try leb.writeULEB128(writer, section_index); const reloc_start = binary_bytes.items.len; @@ -4769,7 +4769,7 @@ fn emitCodeRelocations( var buf: [5]u8 = undefined; leb.writeUnsignedFixed(5, &buf, count); try binary_bytes.insertSlice(reloc_start, &buf); - const size = @intCast(u32, binary_bytes.items.len - header_offset - 6); + const size = @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)); try writeCustomSectionHeader(binary_bytes.items, header_offset, size); } @@ -4785,7 +4785,7 @@ fn emitDataRelocations( // write custom section information const name = "reloc.DATA"; - try leb.writeULEB128(writer, @intCast(u32, name.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(name.len))); try writer.writeAll(name); try leb.writeULEB128(writer, section_index); const reloc_start = binary_bytes.items.len; @@ -4821,7 +4821,7 @@ fn emitDataRelocations( var buf: [5]u8 = undefined; leb.writeUnsignedFixed(5, &buf, count); try binary_bytes.insertSlice(reloc_start, &buf); - const size = @intCast(u32, binary_bytes.items.len - header_offset - 6); + const size = @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)); try writeCustomSectionHeader(binary_bytes.items, header_offset, size); } @@ -4852,7 +4852,7 @@ pub fn putOrGetFuncType(wasm: *Wasm, func_type: std.wasm.Type) !u32 { } // functype does not exist. - const index = @intCast(u32, wasm.func_types.items.len); + const index = @as(u32, @intCast(wasm.func_types.items.len)); const params = try wasm.base.allocator.dupe(std.wasm.Valtype, func_type.params); errdefer wasm.base.allocator.free(params); const returns = try wasm.base.allocator.dupe(std.wasm.Valtype, func_type.returns); diff --git a/src/link/Wasm/Atom.zig b/src/link/Wasm/Atom.zig index f8092c6db1..64e9ebaaa1 100644 --- a/src/link/Wasm/Atom.zig +++ b/src/link/Wasm/Atom.zig @@ -114,7 +114,7 @@ pub fn resolveRelocs(atom: *Atom, wasm_bin: *const Wasm) void { .R_WASM_GLOBAL_INDEX_I32, .R_WASM_MEMORY_ADDR_I32, .R_WASM_SECTION_OFFSET_I32, - => std.mem.writeIntLittle(u32, atom.code.items[reloc.offset..][0..4], @intCast(u32, value)), + => std.mem.writeIntLittle(u32, atom.code.items[reloc.offset..][0..4], @as(u32, @intCast(value))), .R_WASM_TABLE_INDEX_I64, .R_WASM_MEMORY_ADDR_I64, => std.mem.writeIntLittle(u64, atom.code.items[reloc.offset..][0..8], value), @@ -127,7 +127,7 @@ pub fn resolveRelocs(atom: *Atom, wasm_bin: *const Wasm) void { .R_WASM_TABLE_NUMBER_LEB, .R_WASM_TYPE_INDEX_LEB, .R_WASM_MEMORY_ADDR_TLS_SLEB, - => leb.writeUnsignedFixed(5, atom.code.items[reloc.offset..][0..5], @intCast(u32, value)), + => leb.writeUnsignedFixed(5, atom.code.items[reloc.offset..][0..5], @as(u32, @intCast(value))), .R_WASM_MEMORY_ADDR_LEB64, .R_WASM_MEMORY_ADDR_SLEB64, .R_WASM_TABLE_INDEX_SLEB64, @@ -173,24 +173,24 @@ fn relocationValue(atom: Atom, relocation: types.Relocation, wasm_bin: *const Wa if (symbol.isUndefined()) { return 0; } - const va = @intCast(i64, symbol.virtual_address); - return @intCast(u32, va + relocation.addend); + const va = @as(i64, @intCast(symbol.virtual_address)); + return @as(u32, @intCast(va + relocation.addend)); }, .R_WASM_EVENT_INDEX_LEB => return symbol.index, .R_WASM_SECTION_OFFSET_I32 => { const target_atom_index = wasm_bin.symbol_atom.get(target_loc).?; const target_atom = wasm_bin.getAtom(target_atom_index); - const rel_value = @intCast(i32, target_atom.offset) + relocation.addend; - return @intCast(u32, rel_value); + const rel_value = @as(i32, @intCast(target_atom.offset)) + relocation.addend; + return @as(u32, @intCast(rel_value)); }, .R_WASM_FUNCTION_OFFSET_I32 => { const target_atom_index = wasm_bin.symbol_atom.get(target_loc) orelse { - return @bitCast(u32, @as(i32, -1)); + return @as(u32, @bitCast(@as(i32, -1))); }; const target_atom = wasm_bin.getAtom(target_atom_index); const offset: u32 = 11 + Wasm.getULEB128Size(target_atom.size); // Header (11 bytes fixed-size) + body size (leb-encoded) - const rel_value = @intCast(i32, target_atom.offset + offset) + relocation.addend; - return @intCast(u32, rel_value); + const rel_value = @as(i32, @intCast(target_atom.offset + offset)) + relocation.addend; + return @as(u32, @intCast(rel_value)); }, .R_WASM_MEMORY_ADDR_TLS_SLEB, .R_WASM_MEMORY_ADDR_TLS_SLEB64, diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig index db96381938..8e4df417ae 100644 --- a/src/link/Wasm/Object.zig +++ b/src/link/Wasm/Object.zig @@ -93,7 +93,7 @@ const RelocatableData = struct { const data_alignment = object.segment_info[relocatable_data.index].alignment; if (data_alignment == 0) return 1; // Decode from power of 2 to natural alignment - return @as(u32, 1) << @intCast(u5, data_alignment); + return @as(u32, 1) << @as(u5, @intCast(data_alignment)); } /// Returns the symbol kind that corresponds to the relocatable section @@ -130,7 +130,7 @@ pub fn create(gpa: Allocator, file: std.fs.File, name: []const u8, maybe_max_siz const size = maybe_max_size orelse size: { errdefer gpa.free(object.name); const stat = try file.stat(); - break :size @intCast(usize, stat.size); + break :size @as(usize, @intCast(stat.size)); }; const file_contents = try gpa.alloc(u8, size); @@ -365,7 +365,7 @@ fn Parser(comptime ReaderType: type) type { const len = try readLeb(u32, parser.reader.reader()); var limited_reader = std.io.limitedReader(parser.reader.reader(), len); const reader = limited_reader.reader(); - switch (@enumFromInt(std.wasm.Section, byte)) { + switch (@as(std.wasm.Section, @enumFromInt(byte))) { .custom => { const name_len = try readLeb(u32, reader); const name = try gpa.alloc(u8, name_len); @@ -375,13 +375,13 @@ fn Parser(comptime ReaderType: type) type { if (std.mem.eql(u8, name, "linking")) { is_object_file.* = true; parser.object.relocatable_data = relocatable_data.items; // at this point no new relocatable sections will appear so we're free to store them. - try parser.parseMetadata(gpa, @intCast(usize, reader.context.bytes_left)); + try parser.parseMetadata(gpa, @as(usize, @intCast(reader.context.bytes_left))); } else if (std.mem.startsWith(u8, name, "reloc")) { try parser.parseRelocations(gpa); } else if (std.mem.eql(u8, name, "target_features")) { try parser.parseFeatures(gpa); } else if (std.mem.startsWith(u8, name, ".debug")) { - const debug_size = @intCast(u32, reader.context.bytes_left); + const debug_size = @as(u32, @intCast(reader.context.bytes_left)); const debug_content = try gpa.alloc(u8, debug_size); errdefer gpa.free(debug_content); try reader.readNoEof(debug_content); @@ -514,7 +514,7 @@ fn Parser(comptime ReaderType: type) type { const count = try readLeb(u32, reader); while (index < count) : (index += 1) { const code_len = try readLeb(u32, reader); - const offset = @intCast(u32, start - reader.context.bytes_left); + const offset = @as(u32, @intCast(start - reader.context.bytes_left)); const data = try gpa.alloc(u8, code_len); errdefer gpa.free(data); try reader.readNoEof(data); @@ -538,7 +538,7 @@ fn Parser(comptime ReaderType: type) type { _ = flags; // TODO: Do we need to check flags to detect passive/active memory? _ = data_offset; const data_len = try readLeb(u32, reader); - const offset = @intCast(u32, start - reader.context.bytes_left); + const offset = @as(u32, @intCast(start - reader.context.bytes_left)); const data = try gpa.alloc(u8, data_len); errdefer gpa.free(data); try reader.readNoEof(data); @@ -645,7 +645,7 @@ fn Parser(comptime ReaderType: type) type { /// such as access to the `import` section to find the name of a symbol. fn parseSubsection(parser: *ObjectParser, gpa: Allocator, reader: anytype) !void { const sub_type = try leb.readULEB128(u8, reader); - log.debug("Found subsection: {s}", .{@tagName(@enumFromInt(types.SubsectionType, sub_type))}); + log.debug("Found subsection: {s}", .{@tagName(@as(types.SubsectionType, @enumFromInt(sub_type)))}); const payload_len = try leb.readULEB128(u32, reader); if (payload_len == 0) return; @@ -655,7 +655,7 @@ fn Parser(comptime ReaderType: type) type { // every subsection contains a 'count' field const count = try leb.readULEB128(u32, limited_reader); - switch (@enumFromInt(types.SubsectionType, sub_type)) { + switch (@as(types.SubsectionType, @enumFromInt(sub_type))) { .WASM_SEGMENT_INFO => { const segments = try gpa.alloc(types.Segment, count); errdefer gpa.free(segments); @@ -714,7 +714,7 @@ fn Parser(comptime ReaderType: type) type { errdefer gpa.free(symbols); for (symbols) |*symbol| { symbol.* = .{ - .kind = @enumFromInt(types.ComdatSym.Type, try leb.readULEB128(u8, reader)), + .kind = @as(types.ComdatSym.Type, @enumFromInt(try leb.readULEB128(u8, reader))), .index = try leb.readULEB128(u32, reader), }; } @@ -758,7 +758,7 @@ fn Parser(comptime ReaderType: type) type { /// requires access to `Object` to find the name of a symbol when it's /// an import and flag `WASM_SYM_EXPLICIT_NAME` is not set. fn parseSymbol(parser: *ObjectParser, gpa: Allocator, reader: anytype) !Symbol { - const tag = @enumFromInt(Symbol.Tag, try leb.readULEB128(u8, reader)); + const tag = @as(Symbol.Tag, @enumFromInt(try leb.readULEB128(u8, reader))); const flags = try leb.readULEB128(u32, reader); var symbol: Symbol = .{ .flags = flags, @@ -846,7 +846,7 @@ fn readLeb(comptime T: type, reader: anytype) !T { /// Asserts `T` is an enum fn readEnum(comptime T: type, reader: anytype) !T { switch (@typeInfo(T)) { - .Enum => |enum_type| return @enumFromInt(T, try readLeb(enum_type.tag_type, reader)), + .Enum => |enum_type| return @as(T, @enumFromInt(try readLeb(enum_type.tag_type, reader))), else => @compileError("T must be an enum. Instead was given type " ++ @typeName(T)), } } @@ -867,7 +867,7 @@ fn readLimits(reader: anytype) !std.wasm.Limits { fn readInit(reader: anytype) !std.wasm.InitExpression { const opcode = try reader.readByte(); - const init_expr: std.wasm.InitExpression = switch (@enumFromInt(std.wasm.Opcode, opcode)) { + const init_expr: std.wasm.InitExpression = switch (@as(std.wasm.Opcode, @enumFromInt(opcode))) { .i32_const => .{ .i32_const = try readLeb(i32, reader) }, .global_get => .{ .global_get = try readLeb(u32, reader) }, else => @panic("TODO: initexpression for other opcodes"), @@ -899,7 +899,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b switch (symbol.tag) { .function, .data, .section => if (!symbol.isUndefined()) { const gop = try symbol_for_segment.getOrPut(.{ .kind = symbol.tag, .index = symbol.index }); - const sym_idx = @intCast(u32, symbol_index); + const sym_idx = @as(u32, @intCast(symbol_index)); if (!gop.found_existing) { gop.value_ptr.* = std.ArrayList(u32).init(gpa); } @@ -910,11 +910,11 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b } for (object.relocatable_data, 0..) |relocatable_data, index| { - const final_index = (try wasm_bin.getMatchingSegment(object_index, @intCast(u32, index))) orelse { + const final_index = (try wasm_bin.getMatchingSegment(object_index, @as(u32, @intCast(index)))) orelse { continue; // found unknown section, so skip parsing into atom as we do not know how to handle it. }; - const atom_index = @intCast(Atom.Index, wasm_bin.managed_atoms.items.len); + const atom_index = @as(Atom.Index, @intCast(wasm_bin.managed_atoms.items.len)); const atom = try wasm_bin.managed_atoms.addOne(gpa); atom.* = Atom.empty; atom.file = object_index; diff --git a/src/link/Wasm/types.zig b/src/link/Wasm/types.zig index 9bf54f25c3..cce5cdef49 100644 --- a/src/link/Wasm/types.zig +++ b/src/link/Wasm/types.zig @@ -205,7 +205,7 @@ pub const Feature = struct { /// From a given cpu feature, returns its linker feature pub fn fromCpuFeature(feature: std.Target.wasm.Feature) Tag { - return @enumFromInt(Tag, @intFromEnum(feature)); + return @as(Tag, @enumFromInt(@intFromEnum(feature))); } pub fn format(tag: Tag, comptime fmt: []const u8, opt: std.fmt.FormatOptions, writer: anytype) !void { diff --git a/src/link/strtab.zig b/src/link/strtab.zig index abb58defef..0d71c9bf83 100644 --- a/src/link/strtab.zig +++ b/src/link/strtab.zig @@ -45,7 +45,7 @@ pub fn StringTable(comptime log_scope: @Type(.EnumLiteral)) type { const off = entry.key_ptr.*; const save = entry.value_ptr.*; if (!save) continue; - const new_off = @intCast(u32, buffer.items.len); + const new_off = @as(u32, @intCast(buffer.items.len)); buffer.appendSliceAssumeCapacity(self.getAssumeExists(off)); idx_map.putAssumeCapacityNoClobber(off, new_off); } @@ -73,7 +73,7 @@ pub fn StringTable(comptime log_scope: @Type(.EnumLiteral)) type { } try self.buffer.ensureUnusedCapacity(gpa, string.len + 1); - const new_off = @intCast(u32, self.buffer.items.len); + const new_off = @as(u32, @intCast(self.buffer.items.len)); log.debug("writing new string '{s}' at offset 0x{x}", .{ string, new_off }); @@ -103,7 +103,7 @@ pub fn StringTable(comptime log_scope: @Type(.EnumLiteral)) type { pub fn get(self: Self, off: u32) ?[]const u8 { log.debug("getting string at 0x{x}", .{off}); if (off >= self.buffer.items.len) return null; - return mem.sliceTo(@ptrCast([*:0]const u8, self.buffer.items.ptr + off), 0); + return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.buffer.items.ptr + off)), 0); } pub fn getAssumeExists(self: Self, off: u32) []const u8 { diff --git a/src/link/table_section.zig b/src/link/table_section.zig index 891f3b1a50..2c70b03f42 100644 --- a/src/link/table_section.zig +++ b/src/link/table_section.zig @@ -18,7 +18,7 @@ pub fn TableSection(comptime Entry: type) type { break :blk index; } else { log.debug(" (allocating entry at index {d})", .{self.entries.items.len}); - const index = @intCast(u32, self.entries.items.len); + const index = @as(u32, @intCast(self.entries.items.len)); _ = self.entries.addOneAssumeCapacity(); break :blk index; } diff --git a/src/link/tapi/Tokenizer.zig b/src/link/tapi/Tokenizer.zig index df46bb7d83..eb1ffc0e81 100644 --- a/src/link/tapi/Tokenizer.zig +++ b/src/link/tapi/Tokenizer.zig @@ -67,11 +67,11 @@ pub const TokenIterator = struct { } pub fn seekBy(self: *TokenIterator, offset: isize) void { - const new_pos = @bitCast(isize, self.pos) + offset; + const new_pos = @as(isize, @bitCast(self.pos)) + offset; if (new_pos < 0) { self.pos = 0; } else { - self.pos = @intCast(usize, new_pos); + self.pos = @as(usize, @intCast(new_pos)); } } }; diff --git a/src/main.zig b/src/main.zig index 22d2d075d1..02e1ef6f00 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3523,7 +3523,7 @@ fn progressThread(progress: *std.Progress, server: *const Server, reset: *std.Th server.serveMessage(.{ .tag = .progress, - .bytes_len = @intCast(u32, progress_string.len), + .bytes_len = @as(u32, @intCast(progress_string.len)), }, &.{ progress_string, }) catch |err| { @@ -5020,8 +5020,8 @@ pub fn clangMain(alloc: Allocator, args: []const []const u8) error{OutOfMemory}! // Convert the args to the null-terminated format Clang expects. const argv = try argsCopyZ(arena, args); - const exit_code = ZigClang_main(@intCast(c_int, argv.len), argv.ptr); - return @bitCast(u8, @truncate(i8, exit_code)); + const exit_code = ZigClang_main(@as(c_int, @intCast(argv.len)), argv.ptr); + return @as(u8, @bitCast(@as(i8, @truncate(exit_code)))); } pub fn llvmArMain(alloc: Allocator, args: []const []const u8) error{OutOfMemory}!u8 { @@ -5035,8 +5035,8 @@ pub fn llvmArMain(alloc: Allocator, args: []const []const u8) error{OutOfMemory} // Convert the args to the format llvm-ar expects. // We intentionally shave off the zig binary at args[0]. const argv = try argsCopyZ(arena, args[1..]); - const exit_code = ZigLlvmAr_main(@intCast(c_int, argv.len), argv.ptr); - return @bitCast(u8, @truncate(i8, exit_code)); + const exit_code = ZigLlvmAr_main(@as(c_int, @intCast(argv.len)), argv.ptr); + return @as(u8, @bitCast(@as(i8, @truncate(exit_code)))); } /// The first argument determines which backend is invoked. The options are: @@ -5072,7 +5072,7 @@ pub fn lldMain( // "If an error occurs, false will be returned." const ok = rc: { const llvm = @import("codegen/llvm/bindings.zig"); - const argc = @intCast(c_int, argv.len); + const argc = @as(c_int, @intCast(argv.len)); if (mem.eql(u8, args[1], "ld.lld")) { break :rc llvm.LinkELF(argc, argv.ptr, can_exit_early, false); } else if (mem.eql(u8, args[1], "lld-link")) { @@ -5507,7 +5507,7 @@ pub fn cmdAstCheck( if (stat.size > max_src_size) return error.FileTooBig; - const source = try arena.allocSentinel(u8, @intCast(usize, stat.size), 0); + const source = try arena.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); const amt = try f.readAll(source); if (amt != stat.size) return error.UnexpectedEndOfFile; @@ -5703,7 +5703,7 @@ pub fn cmdChangelist( file.pkg = try Package.create(gpa, null, file.sub_file_path); defer file.pkg.destroy(gpa); - const source = try arena.allocSentinel(u8, @intCast(usize, stat.size), 0); + const source = try arena.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); const amt = try f.readAll(source); if (amt != stat.size) return error.UnexpectedEndOfFile; @@ -5739,7 +5739,7 @@ pub fn cmdChangelist( if (new_stat.size > max_src_size) return error.FileTooBig; - const new_source = try arena.allocSentinel(u8, @intCast(usize, new_stat.size), 0); + const new_source = try arena.allocSentinel(u8, @as(usize, @intCast(new_stat.size)), 0); const new_amt = try new_f.readAll(new_source); if (new_amt != new_stat.size) return error.UnexpectedEndOfFile; diff --git a/src/objcopy.zig b/src/objcopy.zig index f89af1737c..887396ee18 100644 --- a/src/objcopy.zig +++ b/src/objcopy.zig @@ -345,7 +345,7 @@ const BinaryElfOutput = struct { const shstrtab_shdr = (try section_headers.next()).?; - const buffer = try allocator.alloc(u8, @intCast(usize, shstrtab_shdr.sh_size)); + const buffer = try allocator.alloc(u8, @as(usize, @intCast(shstrtab_shdr.sh_size))); errdefer allocator.free(buffer); const num_read = try elf_file.preadAll(buffer, shstrtab_shdr.sh_offset); @@ -363,11 +363,11 @@ const BinaryElfOutput = struct { newSection.binaryOffset = 0; newSection.elfOffset = section.sh_offset; - newSection.fileSize = @intCast(usize, section.sh_size); + newSection.fileSize = @as(usize, @intCast(section.sh_size)); newSection.segment = null; newSection.name = if (self.shstrtab) |shstrtab| - std.mem.span(@ptrCast([*:0]const u8, &shstrtab[section.sh_name])) + std.mem.span(@as([*:0]const u8, @ptrCast(&shstrtab[section.sh_name]))) else null; @@ -382,7 +382,7 @@ const BinaryElfOutput = struct { newSegment.physicalAddress = if (phdr.p_paddr != 0) phdr.p_paddr else phdr.p_vaddr; newSegment.virtualAddress = phdr.p_vaddr; - newSegment.fileSize = @intCast(usize, phdr.p_filesz); + newSegment.fileSize = @as(usize, @intCast(phdr.p_filesz)); newSegment.elfOffset = phdr.p_offset; newSegment.binaryOffset = 0; newSegment.firstSection = null; @@ -478,8 +478,8 @@ const HexWriter = struct { const MAX_PAYLOAD_LEN: u8 = 16; fn addressParts(address: u16) [2]u8 { - const msb = @truncate(u8, address >> 8); - const lsb = @truncate(u8, address); + const msb = @as(u8, @truncate(address >> 8)); + const lsb = @as(u8, @truncate(address)); return [2]u8{ msb, lsb }; } @@ -508,14 +508,14 @@ const HexWriter = struct { fn Data(address: u32, data: []const u8) Record { return Record{ - .address = @intCast(u16, address % 0x10000), + .address = @as(u16, @intCast(address % 0x10000)), .payload = .{ .Data = data }, }; } fn Address(address: u32) Record { assert(address > 0xFFFF); - const segment = @intCast(u16, address / 0x10000); + const segment = @as(u16, @intCast(address / 0x10000)); if (address > 0xFFFFF) { return Record{ .address = 0, @@ -540,7 +540,7 @@ const HexWriter = struct { fn checksum(self: Record) u8 { const payload_bytes = self.getPayloadBytes(); - var sum: u8 = @intCast(u8, payload_bytes.len); + var sum: u8 = @as(u8, @intCast(payload_bytes.len)); const parts = addressParts(self.address); sum +%= parts[0]; sum +%= parts[1]; @@ -560,7 +560,7 @@ const HexWriter = struct { assert(payload_bytes.len <= MAX_PAYLOAD_LEN); const line = try std.fmt.bufPrint(&outbuf, ":{0X:0>2}{1X:0>4}{2X:0>2}{3s}{4X:0>2}" ++ linesep, .{ - @intCast(u8, payload_bytes.len), + @as(u8, @intCast(payload_bytes.len)), self.address, @intFromEnum(self.payload), std.fmt.fmtSliceHexUpper(payload_bytes), @@ -574,10 +574,10 @@ const HexWriter = struct { var buf: [MAX_PAYLOAD_LEN]u8 = undefined; var bytes_read: usize = 0; while (bytes_read < segment.fileSize) { - const row_address = @intCast(u32, segment.physicalAddress + bytes_read); + const row_address = @as(u32, @intCast(segment.physicalAddress + bytes_read)); const remaining = segment.fileSize - bytes_read; - const to_read = @intCast(usize, @min(remaining, MAX_PAYLOAD_LEN)); + const to_read = @as(usize, @intCast(@min(remaining, MAX_PAYLOAD_LEN))); const did_read = try elf_file.preadAll(buf[0..to_read], segment.elfOffset + bytes_read); if (did_read < to_read) return error.UnexpectedEOF; @@ -593,7 +593,7 @@ const HexWriter = struct { try Record.Address(address).write(self.out_file); } try record.write(self.out_file); - self.prev_addr = @intCast(u32, record.address + data.len); + self.prev_addr = @as(u32, @intCast(record.address + data.len)); } fn writeEOF(self: HexWriter) File.WriteError!void { @@ -814,7 +814,7 @@ fn ElfFile(comptime is_64: bool) type { const need_strings = (idx == header.shstrndx); if (need_data or need_strings) { - const buffer = try allocator.alignedAlloc(u8, section_memory_align, @intCast(usize, section.section.sh_size)); + const buffer = try allocator.alignedAlloc(u8, section_memory_align, @as(usize, @intCast(section.section.sh_size))); const bytes_read = try in_file.preadAll(buffer, section.section.sh_offset); if (bytes_read != section.section.sh_size) return error.TRUNCATED_ELF; section.payload = buffer; @@ -831,7 +831,7 @@ fn ElfFile(comptime is_64: bool) type { } else null; if (section.section.sh_name != 0 and header.shstrndx != elf.SHN_UNDEF) - section.name = std.mem.span(@ptrCast([*:0]const u8, §ions[header.shstrndx].payload.?[section.section.sh_name])); + section.name = std.mem.span(@as([*:0]const u8, @ptrCast(§ions[header.shstrndx].payload.?[section.section.sh_name]))); const category_from_program: SectionCategory = if (section.segment != null) .exe else .debug; section.category = switch (section.section.sh_type) { @@ -935,7 +935,7 @@ fn ElfFile(comptime is_64: bool) type { const update = §ions_update[self.raw_elf_header.e_shstrndx]; const name: []const u8 = ".gnu_debuglink"; - const new_offset = @intCast(u32, strtab.payload.?.len); + const new_offset = @as(u32, @intCast(strtab.payload.?.len)); const buf = try allocator.alignedAlloc(u8, section_memory_align, new_offset + name.len + 1); @memcpy(buf[0..new_offset], strtab.payload.?); @memcpy(buf[new_offset..][0..name.len], name); @@ -965,7 +965,7 @@ fn ElfFile(comptime is_64: bool) type { update.payload = payload; update.section = section.section; update.section.?.sh_addralign = @alignOf(Elf_Chdr); - update.section.?.sh_size = @intCast(Elf_OffSize, payload.len); + update.section.?.sh_size = @as(Elf_OffSize, @intCast(payload.len)); update.section.?.sh_flags |= elf.SHF_COMPRESSED; } } @@ -991,7 +991,7 @@ fn ElfFile(comptime is_64: bool) type { const data = std.mem.sliceAsBytes(self.program_segments); assert(data.len == @as(usize, updated_elf_header.e_phentsize) * updated_elf_header.e_phnum); cmdbuf.appendAssumeCapacity(.{ .write_data = .{ .data = data, .out_offset = updated_elf_header.e_phoff } }); - eof_offset = updated_elf_header.e_phoff + @intCast(Elf_OffSize, data.len); + eof_offset = updated_elf_header.e_phoff + @as(Elf_OffSize, @intCast(data.len)); } // update sections and queue payload writes @@ -1032,7 +1032,7 @@ fn ElfFile(comptime is_64: bool) type { dest.sh_info = sections_update[src.sh_info].remap_idx; if (payload) |data| - dest.sh_size = @intCast(Elf_OffSize, data.len); + dest.sh_size = @as(Elf_OffSize, @intCast(data.len)); const addralign = if (src.sh_addralign == 0 or dest.sh_type == elf.SHT_NOBITS) 1 else src.sh_addralign; dest.sh_offset = std.mem.alignForward(Elf_OffSize, eof_offset, addralign); @@ -1056,7 +1056,7 @@ fn ElfFile(comptime is_64: bool) type { const data = try allocator.alignedAlloc(u8, section_memory_align, src_data.len); @memcpy(data, src_data); - const defs = @ptrCast([*]Elf_Verdef, data)[0 .. @intCast(usize, src.sh_size) / @sizeOf(Elf_Verdef)]; + const defs = @as([*]Elf_Verdef, @ptrCast(data))[0 .. @as(usize, @intCast(src.sh_size)) / @sizeOf(Elf_Verdef)]; for (defs) |*def| { if (def.vd_ndx != elf.SHN_UNDEF) def.vd_ndx = sections_update[src.sh_info].remap_idx; @@ -1068,7 +1068,7 @@ fn ElfFile(comptime is_64: bool) type { const data = try allocator.alignedAlloc(u8, section_memory_align, src_data.len); @memcpy(data, src_data); - const syms = @ptrCast([*]Elf_Sym, data)[0 .. @intCast(usize, src.sh_size) / @sizeOf(Elf_Sym)]; + const syms = @as([*]Elf_Sym, @ptrCast(data))[0 .. @as(usize, @intCast(src.sh_size)) / @sizeOf(Elf_Sym)]; for (syms) |*sym| { if (sym.st_shndx != elf.SHN_UNDEF and sym.st_shndx < elf.SHN_LORESERVE) sym.st_shndx = sections_update[sym.st_shndx].remap_idx; @@ -1110,7 +1110,7 @@ fn ElfFile(comptime is_64: bool) type { .sh_flags = 0, .sh_addr = 0, .sh_offset = eof_offset, - .sh_size = @intCast(Elf_OffSize, payload.len), + .sh_size = @as(Elf_OffSize, @intCast(payload.len)), .sh_link = elf.SHN_UNDEF, .sh_info = elf.SHN_UNDEF, .sh_addralign = 4, @@ -1119,7 +1119,7 @@ fn ElfFile(comptime is_64: bool) type { dest_section_idx += 1; cmdbuf.appendAssumeCapacity(.{ .write_data = .{ .data = payload, .out_offset = eof_offset } }); - eof_offset += @intCast(Elf_OffSize, payload.len); + eof_offset += @as(Elf_OffSize, @intCast(payload.len)); } assert(dest_section_idx == new_shnum); @@ -1232,7 +1232,7 @@ const ElfFileHelper = struct { fused_cmd = null; } if (data.out_offset > offset) { - consolidated.appendAssumeCapacity(.{ .write_data = .{ .data = zeroes[0..@intCast(usize, data.out_offset - offset)], .out_offset = offset } }); + consolidated.appendAssumeCapacity(.{ .write_data = .{ .data = zeroes[0..@as(usize, @intCast(data.out_offset - offset))], .out_offset = offset } }); } consolidated.appendAssumeCapacity(cmd); offset = data.out_offset + data.data.len; @@ -1249,7 +1249,7 @@ const ElfFileHelper = struct { } else { consolidated.appendAssumeCapacity(prev); if (range.out_offset > offset) { - consolidated.appendAssumeCapacity(.{ .write_data = .{ .data = zeroes[0..@intCast(usize, range.out_offset - offset)], .out_offset = offset } }); + consolidated.appendAssumeCapacity(.{ .write_data = .{ .data = zeroes[0..@as(usize, @intCast(range.out_offset - offset))], .out_offset = offset } }); } fused_cmd = cmd; } @@ -1286,7 +1286,7 @@ const ElfFileHelper = struct { var section_reader = std.io.limitedReader(in_file.reader(), size); // allocate as large as decompressed data. if the compression doesn't fit, keep the data uncompressed. - const compressed_data = try allocator.alignedAlloc(u8, 8, @intCast(usize, size)); + const compressed_data = try allocator.alignedAlloc(u8, 8, @as(usize, @intCast(size))); var compressed_stream = std.io.fixedBufferStream(compressed_data); try compressed_stream.writer().writeAll(prefix); @@ -1317,7 +1317,7 @@ const ElfFileHelper = struct { }; } - const compressed_len = @intCast(usize, compressed_stream.getPos() catch unreachable); + const compressed_len = @as(usize, @intCast(compressed_stream.getPos() catch unreachable)); const data = allocator.realloc(compressed_data, compressed_len) catch compressed_data; return data[0..compressed_len]; } diff --git a/src/print_air.zig b/src/print_air.zig index d73ec30891..4ae83271a1 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -91,7 +91,7 @@ const Writer = struct { fn writeAllConstants(w: *Writer, s: anytype) @TypeOf(s).Error!void { for (w.air.instructions.items(.tag), 0..) |tag, i| { if (tag != .interned) continue; - const inst = @intCast(Air.Inst.Index, i); + const inst = @as(Air.Inst.Index, @intCast(i)); try w.writeInst(s, inst); try s.writeByte('\n'); } @@ -424,8 +424,8 @@ const Writer = struct { const mod = w.module; const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; const vector_ty = w.air.getRefType(ty_pl.ty); - const len = @intCast(usize, vector_ty.arrayLen(mod)); - const elements = @ptrCast([]const Air.Inst.Ref, w.air.extra[ty_pl.payload..][0..len]); + const len = @as(usize, @intCast(vector_ty.arrayLen(mod))); + const elements = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[ty_pl.payload..][0..len])); try w.writeType(s, vector_ty); try s.writeAll(", ["); @@ -607,8 +607,8 @@ const Writer = struct { fn writeAssembly(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; const extra = w.air.extraData(Air.Asm, ty_pl.payload); - const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; - const clobbers_len = @truncate(u31, extra.data.flags); + const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; + const clobbers_len = @as(u31, @truncate(extra.data.flags)); var extra_i: usize = extra.end; var op_index: usize = 0; @@ -619,9 +619,9 @@ const Writer = struct { try s.writeAll(", volatile"); } - const outputs = @ptrCast([]const Air.Inst.Ref, w.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[extra_i..][0..extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, w.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[extra_i..][0..extra.data.inputs_len])); extra_i += inputs.len; for (outputs) |output| { @@ -699,7 +699,7 @@ const Writer = struct { fn writeCall(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const pl_op = w.air.instructions.items(.data)[inst].pl_op; const extra = w.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, w.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[extra.end..][0..extra.data.args_len])); try w.writeOperand(s, inst, 0, pl_op.operand); try s.writeAll(", ["); for (args, 0..) |arg, i| { @@ -855,7 +855,7 @@ const Writer = struct { while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = w.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast([]const Air.Inst.Ref, w.air.extra[case.end..][0..case.data.items_len]); + const items = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[case.end..][0..case.data.items_len])); const case_body = w.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + case.data.items_len + case_body.len; @@ -934,13 +934,13 @@ const Writer = struct { const small_tomb_bits = Liveness.bpi - 1; const dies = if (w.liveness) |liveness| blk: { if (op_index < small_tomb_bits) - break :blk liveness.operandDies(inst, @intCast(Liveness.OperandInt, op_index)); + break :blk liveness.operandDies(inst, @as(Liveness.OperandInt, @intCast(op_index))); var extra_index = liveness.special.get(inst).?; var tomb_op_index: usize = small_tomb_bits; while (true) { const bits = liveness.extra[extra_index]; if (op_index < tomb_op_index + 31) { - break :blk @truncate(u1, bits >> @intCast(u5, op_index - tomb_op_index)) != 0; + break :blk @as(u1, @truncate(bits >> @as(u5, @intCast(op_index - tomb_op_index)))) != 0; } if ((bits >> 31) != 0) break :blk false; extra_index += 1; diff --git a/src/print_targets.zig b/src/print_targets.zig index ea4e30ae58..62e1d3b158 100644 --- a/src/print_targets.zig +++ b/src/print_targets.zig @@ -100,7 +100,7 @@ pub fn cmdTargets( try jws.objectField(model.name); try jws.beginArray(); for (arch.allFeaturesList(), 0..) |feature, i_usize| { - const index = @intCast(Target.Cpu.Feature.Set.Index, i_usize); + const index = @as(Target.Cpu.Feature.Set.Index, @intCast(i_usize)); if (model.features.isEnabled(index)) { try jws.arrayElem(); try jws.emitString(feature.name); @@ -147,7 +147,7 @@ pub fn cmdTargets( try jws.objectField("features"); try jws.beginArray(); for (native_target.cpu.arch.allFeaturesList(), 0..) |feature, i_usize| { - const index = @intCast(Target.Cpu.Feature.Set.Index, i_usize); + const index = @as(Target.Cpu.Feature.Set.Index, @intCast(i_usize)); if (cpu.features.isEnabled(index)) { try jws.arrayElem(); try jws.emitString(feature.name); diff --git a/src/print_zir.zig b/src/print_zir.zig index 472461cd04..42a9abf401 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -131,7 +131,7 @@ const Writer = struct { recurse_blocks: bool, fn relativeToNodeIndex(self: *Writer, offset: i32) Ast.Node.Index { - return @bitCast(Ast.Node.Index, offset + @bitCast(i32, self.parent_decl_node)); + return @as(Ast.Node.Index, @bitCast(offset + @as(i32, @bitCast(self.parent_decl_node)))); } fn writeInstToStream( @@ -542,7 +542,7 @@ const Writer = struct { } fn writeExtNode(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { - const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand)); + const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand))); try stream.writeAll(")) "); try self.writeSrc(stream, src); } @@ -631,25 +631,25 @@ const Writer = struct { var extra_index = extra.end; if (inst_data.flags.has_sentinel) { try stream.writeAll(", "); - try self.writeInstRef(stream, @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index])); + try self.writeInstRef(stream, @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]))); extra_index += 1; } if (inst_data.flags.has_align) { try stream.writeAll(", align("); - try self.writeInstRef(stream, @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index])); + try self.writeInstRef(stream, @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]))); extra_index += 1; if (inst_data.flags.has_bit_range) { const bit_start = extra_index + @intFromBool(inst_data.flags.has_addrspace); try stream.writeAll(":"); - try self.writeInstRef(stream, @enumFromInt(Zir.Inst.Ref, self.code.extra[bit_start])); + try self.writeInstRef(stream, @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[bit_start]))); try stream.writeAll(":"); - try self.writeInstRef(stream, @enumFromInt(Zir.Inst.Ref, self.code.extra[bit_start + 1])); + try self.writeInstRef(stream, @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[bit_start + 1]))); } try stream.writeAll(")"); } if (inst_data.flags.has_addrspace) { try stream.writeAll(", addrspace("); - try self.writeInstRef(stream, @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index])); + try self.writeInstRef(stream, @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]))); try stream.writeAll(")"); } try stream.writeAll(") "); @@ -691,7 +691,7 @@ const Writer = struct { const src = inst_data.src(); const number = extra.get(); // TODO improve std.format to be able to print f128 values - try stream.print("{d}) ", .{@floatCast(f64, number)}); + try stream.print("{d}) ", .{@as(f64, @floatCast(number))}); try self.writeSrc(stream, src); } @@ -964,7 +964,7 @@ const Writer = struct { } fn writePtrCastFull(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { - const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small)); + const flags = @as(Zir.Inst.FullPtrCastFlags, @bitCast(@as(u5, @truncate(extended.small)))); const extra = self.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); if (flags.ptr_cast) try stream.writeAll("ptr_cast, "); @@ -980,7 +980,7 @@ const Writer = struct { } fn writePtrCastNoDest(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { - const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small)); + const flags = @as(Zir.Inst.FullPtrCastFlags, @bitCast(@as(u5, @truncate(extended.small)))); const extra = self.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); if (flags.const_cast) try stream.writeAll("const_cast, "); @@ -1103,14 +1103,14 @@ const Writer = struct { ) !void { const extra = self.code.extraData(Zir.Inst.Asm, extended.operand); const src = LazySrcLoc.nodeOffset(extra.data.src_node); - const outputs_len = @truncate(u5, extended.small); - const inputs_len = @truncate(u5, extended.small >> 5); - const clobbers_len = @truncate(u5, extended.small >> 10); - const is_volatile = @truncate(u1, extended.small >> 15) != 0; + const outputs_len = @as(u5, @truncate(extended.small)); + const inputs_len = @as(u5, @truncate(extended.small >> 5)); + const clobbers_len = @as(u5, @truncate(extended.small >> 10)); + const is_volatile = @as(u1, @truncate(extended.small >> 15)) != 0; try self.writeFlag(stream, "volatile, ", is_volatile); if (tmpl_is_expr) { - try self.writeInstRef(stream, @enumFromInt(Zir.Inst.Ref, extra.data.asm_source)); + try self.writeInstRef(stream, @as(Zir.Inst.Ref, @enumFromInt(extra.data.asm_source))); try stream.writeAll(", "); } else { const asm_source = self.code.nullTerminatedString(extra.data.asm_source); @@ -1126,7 +1126,7 @@ const Writer = struct { const output = self.code.extraData(Zir.Inst.Asm.Output, extra_i); extra_i = output.end; - const is_type = @truncate(u1, output_type_bits) != 0; + const is_type = @as(u1, @truncate(output_type_bits)) != 0; output_type_bits >>= 1; const name = self.code.nullTerminatedString(output.data.name); @@ -1205,7 +1205,7 @@ const Writer = struct { if (extra.data.flags.ensure_result_used) { try stream.writeAll("nodiscard "); } - try stream.print(".{s}, ", .{@tagName(@enumFromInt(std.builtin.CallModifier, extra.data.flags.packed_modifier))}); + try stream.print(".{s}, ", .{@tagName(@as(std.builtin.CallModifier, @enumFromInt(extra.data.flags.packed_modifier)))}); switch (kind) { .direct => try self.writeInstRef(stream, extra.data.callee), .field => { @@ -1280,12 +1280,12 @@ const Writer = struct { } fn writeStructDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { - const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); + const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @bitCast(i32, self.code.extra[extra_index]); + const src_node = @as(i32, @bitCast(self.code.extra[extra_index])); extra_index += 1; break :blk src_node; } else null; @@ -1313,7 +1313,7 @@ const Writer = struct { extra_index += 1; try stream.writeAll("Packed("); if (backing_int_body_len == 0) { - const backing_int_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const backing_int_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; try self.writeInstRef(stream, backing_int_ref); } else { @@ -1369,13 +1369,13 @@ const Writer = struct { cur_bit_bag = self.code.extra[bit_bag_index]; bit_bag_index += 1; } - const has_align = @truncate(u1, cur_bit_bag) != 0; + const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_default = @truncate(u1, cur_bit_bag) != 0; + const has_default = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const is_comptime = @truncate(u1, cur_bit_bag) != 0; + const is_comptime = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_type_body = @truncate(u1, cur_bit_bag) != 0; + const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; var field_name: u32 = 0; @@ -1395,7 +1395,7 @@ const Writer = struct { if (has_type_body) { fields[field_i].type_len = self.code.extra[extra_index]; } else { - fields[field_i].type = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + fields[field_i].type = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); } extra_index += 1; @@ -1469,18 +1469,18 @@ const Writer = struct { } fn writeUnionDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { - const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); + const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @bitCast(i32, self.code.extra[extra_index]); + const src_node = @as(i32, @bitCast(self.code.extra[extra_index])); extra_index += 1; break :blk src_node; } else null; const tag_type_ref = if (small.has_tag_type) blk: { - const tag_type_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const tag_type_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; break :blk tag_type_ref; } else .none; @@ -1557,13 +1557,13 @@ const Writer = struct { cur_bit_bag = self.code.extra[bit_bag_index]; bit_bag_index += 1; } - const has_type = @truncate(u1, cur_bit_bag) != 0; + const has_type = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_align = @truncate(u1, cur_bit_bag) != 0; + const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_value = @truncate(u1, cur_bit_bag) != 0; + const has_value = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const unused = @truncate(u1, cur_bit_bag) != 0; + const unused = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; _ = unused; @@ -1578,14 +1578,14 @@ const Writer = struct { try stream.print("{}", .{std.zig.fmtId(field_name)}); if (has_type) { - const field_type = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const field_type = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; try stream.writeAll(": "); try self.writeInstRef(stream, field_type); } if (has_align) { - const align_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const align_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; try stream.writeAll(" align("); @@ -1593,7 +1593,7 @@ const Writer = struct { try stream.writeAll(")"); } if (has_value) { - const default_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const default_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; try stream.writeAll(" = "); @@ -1621,13 +1621,13 @@ const Writer = struct { cur_bit_bag = self.code.extra[bit_bag_index]; bit_bag_index += 1; } - const is_pub = @truncate(u1, cur_bit_bag) != 0; + const is_pub = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const is_exported = @truncate(u1, cur_bit_bag) != 0; + const is_exported = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_align = @truncate(u1, cur_bit_bag) != 0; + const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_section_or_addrspace = @truncate(u1, cur_bit_bag) != 0; + const has_section_or_addrspace = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const sub_index = extra_index; @@ -1644,23 +1644,23 @@ const Writer = struct { extra_index += 1; const align_inst: Zir.Inst.Ref = if (!has_align) .none else inst: { - const inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; break :inst inst; }; const section_inst: Zir.Inst.Ref = if (!has_section_or_addrspace) .none else inst: { - const inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; break :inst inst; }; const addrspace_inst: Zir.Inst.Ref = if (!has_section_or_addrspace) .none else inst: { - const inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; break :inst inst; }; const pub_str = if (is_pub) "pub " else ""; - const hash_bytes = @bitCast([16]u8, hash_u32s.*); + const hash_bytes = @as([16]u8, @bitCast(hash_u32s.*)); if (decl_name_index == 0) { try stream.writeByteNTimes(' ', self.indent); const name = if (is_exported) "usingnamespace" else "comptime"; @@ -1728,17 +1728,17 @@ const Writer = struct { } fn writeEnumDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { - const small = @bitCast(Zir.Inst.EnumDecl.Small, extended.small); + const small = @as(Zir.Inst.EnumDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @bitCast(i32, self.code.extra[extra_index]); + const src_node = @as(i32, @bitCast(self.code.extra[extra_index])); extra_index += 1; break :blk src_node; } else null; const tag_type_ref = if (small.has_tag_type) blk: { - const tag_type_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const tag_type_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; break :blk tag_type_ref; } else .none; @@ -1808,7 +1808,7 @@ const Writer = struct { cur_bit_bag = self.code.extra[bit_bag_index]; bit_bag_index += 1; } - const has_tag_value = @truncate(u1, cur_bit_bag) != 0; + const has_tag_value = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const field_name = self.code.nullTerminatedString(self.code.extra[extra_index]); @@ -1823,7 +1823,7 @@ const Writer = struct { try stream.print("{}", .{std.zig.fmtId(field_name)}); if (has_tag_value) { - const tag_value_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const tag_value_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; try stream.writeAll(" = "); @@ -1844,11 +1844,11 @@ const Writer = struct { stream: anytype, extended: Zir.Inst.Extended.InstData, ) !void { - const small = @bitCast(Zir.Inst.OpaqueDecl.Small, extended.small); + const small = @as(Zir.Inst.OpaqueDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @bitCast(i32, self.code.extra[extra_index]); + const src_node = @as(i32, @bitCast(self.code.extra[extra_index])); extra_index += 1; break :blk src_node; } else null; @@ -1892,7 +1892,7 @@ const Writer = struct { try stream.writeAll("{\n"); self.indent += 2; - var extra_index = @intCast(u32, extra.end); + var extra_index = @as(u32, @intCast(extra.end)); const extra_index_end = extra_index + (extra.data.fields_len * 2); while (extra_index < extra_index_end) : (extra_index += 2) { const str_index = self.code.extra[extra_index]; @@ -1945,7 +1945,7 @@ const Writer = struct { else => break :else_prong, }; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, self.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(self.code.extra[extra_index])); const capture_text = switch (info.capture) { .none => "", .by_val => "by_val ", @@ -1966,9 +1966,9 @@ const Writer = struct { const scalar_cases_len = extra.data.bits.scalar_cases_len; var scalar_i: usize = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { - const item_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const item_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, self.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(self.code.extra[extra_index])); extra_index += 1; const body = self.code.extra[extra_index..][0..info.body_len]; extra_index += info.body_len; @@ -1993,7 +1993,7 @@ const Writer = struct { extra_index += 1; const ranges_len = self.code.extra[extra_index]; extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, self.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(self.code.extra[extra_index])); extra_index += 1; const items = self.code.refSlice(extra_index, items_len); extra_index += items_len; @@ -2014,9 +2014,9 @@ const Writer = struct { var range_i: usize = 0; while (range_i < ranges_len) : (range_i += 1) { - const item_first = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const item_first = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; - const item_last = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const item_last = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; if (range_i != 0 or items.len != 0) { @@ -2117,7 +2117,7 @@ const Writer = struct { ret_ty_ref = .void_type; }, 1 => { - ret_ty_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + ret_ty_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; }, else => { @@ -2188,7 +2188,7 @@ const Writer = struct { align_body = self.code.extra[extra_index..][0..body_len]; extra_index += align_body.len; } else if (extra.data.bits.has_align_ref) { - align_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + align_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; } if (extra.data.bits.has_addrspace_body) { @@ -2197,7 +2197,7 @@ const Writer = struct { addrspace_body = self.code.extra[extra_index..][0..body_len]; extra_index += addrspace_body.len; } else if (extra.data.bits.has_addrspace_ref) { - addrspace_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + addrspace_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; } if (extra.data.bits.has_section_body) { @@ -2206,7 +2206,7 @@ const Writer = struct { section_body = self.code.extra[extra_index..][0..body_len]; extra_index += section_body.len; } else if (extra.data.bits.has_section_ref) { - section_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + section_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; } if (extra.data.bits.has_cc_body) { @@ -2215,7 +2215,7 @@ const Writer = struct { cc_body = self.code.extra[extra_index..][0..body_len]; extra_index += cc_body.len; } else if (extra.data.bits.has_cc_ref) { - cc_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + cc_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; } if (extra.data.bits.has_ret_ty_body) { @@ -2224,7 +2224,7 @@ const Writer = struct { ret_ty_body = self.code.extra[extra_index..][0..body_len]; extra_index += ret_ty_body.len; } else if (extra.data.bits.has_ret_ty_ref) { - ret_ty_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + ret_ty_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; } @@ -2266,7 +2266,7 @@ const Writer = struct { fn writeVarExtended(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { const extra = self.code.extraData(Zir.Inst.ExtendedVar, extended.operand); - const small = @bitCast(Zir.Inst.ExtendedVar.Small, extended.small); + const small = @as(Zir.Inst.ExtendedVar.Small, @bitCast(extended.small)); try self.writeInstRef(stream, extra.data.var_type); @@ -2277,12 +2277,12 @@ const Writer = struct { try stream.print(", lib_name=\"{}\"", .{std.zig.fmtEscapes(lib_name)}); } const align_inst: Zir.Inst.Ref = if (!small.has_align) .none else blk: { - const align_inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const align_inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; break :blk align_inst; }; const init_inst: Zir.Inst.Ref = if (!small.has_init) .none else blk: { - const init_inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const init_inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; break :blk init_inst; }; @@ -2295,17 +2295,17 @@ const Writer = struct { fn writeAllocExtended(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { const extra = self.code.extraData(Zir.Inst.AllocExtended, extended.operand); - const small = @bitCast(Zir.Inst.AllocExtended.Small, extended.small); + const small = @as(Zir.Inst.AllocExtended.Small, @bitCast(extended.small)); const src = LazySrcLoc.nodeOffset(extra.data.src_node); var extra_index: usize = extra.end; const type_inst: Zir.Inst.Ref = if (!small.has_type) .none else blk: { - const type_inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const type_inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; break :blk type_inst; }; const align_inst: Zir.Inst.Ref = if (!small.has_align) .none else blk: { - const align_inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const align_inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; break :blk align_inst; }; @@ -2473,8 +2473,8 @@ const Writer = struct { try stream.writeAll(") "); if (body.len != 0) { try stream.print("(lbrace={d}:{d},rbrace={d}:{d}) ", .{ - src_locs.lbrace_line + 1, @truncate(u16, src_locs.columns) + 1, - src_locs.rbrace_line + 1, @truncate(u16, src_locs.columns >> 16) + 1, + src_locs.lbrace_line + 1, @as(u16, @truncate(src_locs.columns)) + 1, + src_locs.rbrace_line + 1, @as(u16, @truncate(src_locs.columns >> 16)) + 1, }); } try self.writeSrc(stream, src); @@ -2507,7 +2507,7 @@ const Writer = struct { fn writeInstRef(self: *Writer, stream: anytype, ref: Zir.Inst.Ref) !void { const i = @intFromEnum(ref); - if (i < InternPool.static_len) return stream.print("@{}", .{@enumFromInt(InternPool.Index, i)}); + if (i < InternPool.static_len) return stream.print("@{}", .{@as(InternPool.Index, @enumFromInt(i))}); return self.writeInstIndex(stream, i - InternPool.static_len); } diff --git a/src/register_manager.zig b/src/register_manager.zig index f9e2daeab1..322f623eec 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -427,13 +427,13 @@ const MockRegister3 = enum(u3) { pub fn id(reg: MockRegister3) u3 { return switch (@intFromEnum(reg)) { - 0...3 => @as(u3, @truncate(u2, @intFromEnum(reg))), + 0...3 => @as(u3, @as(u2, @truncate(@intFromEnum(reg)))), 4...7 => @intFromEnum(reg), }; } pub fn enc(reg: MockRegister3) u2 { - return @truncate(u2, @intFromEnum(reg)); + return @as(u2, @truncate(@intFromEnum(reg))); } const gp_regs = [_]MockRegister3{ .r0, .r1, .r2, .r3 }; diff --git a/src/tracy.zig b/src/tracy.zig index 580e29805a..10f2410091 100644 --- a/src/tracy.zig +++ b/src/tracy.zig @@ -132,7 +132,7 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type { } fn allocFn(ptr: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ptr)); + const self: *Self = @ptrCast(@alignCast(ptr)); const result = self.parent_allocator.rawAlloc(len, ptr_align, ret_addr); if (result) |data| { if (len != 0) { @@ -149,7 +149,7 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type { } fn resizeFn(ptr: *anyopaque, buf: []u8, buf_align: u8, new_len: usize, ret_addr: usize) bool { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ptr)); + const self: *Self = @ptrCast(@alignCast(ptr)); if (self.parent_allocator.rawResize(buf, buf_align, new_len, ret_addr)) { if (name) |n| { freeNamed(buf.ptr, n); @@ -168,7 +168,7 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type { } fn freeFn(ptr: *anyopaque, buf: []u8, buf_align: u8, ret_addr: usize) void { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ptr)); + const self: *Self = @ptrCast(@alignCast(ptr)); self.parent_allocator.rawFree(buf, buf_align, ret_addr); // this condition is to handle free being called on an empty slice that was never even allocated // example case: `std.process.getSelfExeSharedLibPaths` can return `&[_][:0]u8{}` diff --git a/src/translate_c.zig b/src/translate_c.zig index 4078bd0f34..6f208b3492 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -467,7 +467,7 @@ fn prepopulateGlobalNameTable(ast_unit: *clang.ASTUnit, c: *Context) !void { const entity = it.deref(); switch (entity.getKind()) { .MacroDefinitionKind => { - const macro = @ptrCast(*clang.MacroDefinitionRecord, entity); + const macro = @as(*clang.MacroDefinitionRecord, @ptrCast(entity)); const raw_name = macro.getName_getNameStart(); const name = try c.str(raw_name); @@ -481,13 +481,13 @@ fn prepopulateGlobalNameTable(ast_unit: *clang.ASTUnit, c: *Context) !void { } fn declVisitorNamesOnlyC(context: ?*anyopaque, decl: *const clang.Decl) callconv(.C) bool { - const c = @ptrCast(*Context, @alignCast(@alignOf(Context), context)); + const c: *Context = @ptrCast(@alignCast(context)); declVisitorNamesOnly(c, decl) catch return false; return true; } fn declVisitorC(context: ?*anyopaque, decl: *const clang.Decl) callconv(.C) bool { - const c = @ptrCast(*Context, @alignCast(@alignOf(Context), context)); + const c: *Context = @ptrCast(@alignCast(context)); declVisitor(c, decl) catch return false; return true; } @@ -499,37 +499,37 @@ fn declVisitorNamesOnly(c: *Context, decl: *const clang.Decl) Error!void { // Check for typedefs with unnamed enum/record child types. if (decl.getKind() == .Typedef) { - const typedef_decl = @ptrCast(*const clang.TypedefNameDecl, decl); + const typedef_decl = @as(*const clang.TypedefNameDecl, @ptrCast(decl)); var child_ty = typedef_decl.getUnderlyingType().getTypePtr(); const addr: usize = while (true) switch (child_ty.getTypeClass()) { .Enum => { - const enum_ty = @ptrCast(*const clang.EnumType, child_ty); + const enum_ty = @as(*const clang.EnumType, @ptrCast(child_ty)); const enum_decl = enum_ty.getDecl(); // check if this decl is unnamed - if (@ptrCast(*const clang.NamedDecl, enum_decl).getName_bytes_begin()[0] != 0) return; + if (@as(*const clang.NamedDecl, @ptrCast(enum_decl)).getName_bytes_begin()[0] != 0) return; break @intFromPtr(enum_decl.getCanonicalDecl()); }, .Record => { - const record_ty = @ptrCast(*const clang.RecordType, child_ty); + const record_ty = @as(*const clang.RecordType, @ptrCast(child_ty)); const record_decl = record_ty.getDecl(); // check if this decl is unnamed - if (@ptrCast(*const clang.NamedDecl, record_decl).getName_bytes_begin()[0] != 0) return; + if (@as(*const clang.NamedDecl, @ptrCast(record_decl)).getName_bytes_begin()[0] != 0) return; break @intFromPtr(record_decl.getCanonicalDecl()); }, .Elaborated => { - const elaborated_ty = @ptrCast(*const clang.ElaboratedType, child_ty); + const elaborated_ty = @as(*const clang.ElaboratedType, @ptrCast(child_ty)); child_ty = elaborated_ty.getNamedType().getTypePtr(); }, .Decayed => { - const decayed_ty = @ptrCast(*const clang.DecayedType, child_ty); + const decayed_ty = @as(*const clang.DecayedType, @ptrCast(child_ty)); child_ty = decayed_ty.getDecayedType().getTypePtr(); }, .Attributed => { - const attributed_ty = @ptrCast(*const clang.AttributedType, child_ty); + const attributed_ty = @as(*const clang.AttributedType, @ptrCast(child_ty)); child_ty = attributed_ty.getEquivalentType().getTypePtr(); }, .MacroQualified => { - const macroqualified_ty = @ptrCast(*const clang.MacroQualifiedType, child_ty); + const macroqualified_ty = @as(*const clang.MacroQualifiedType, @ptrCast(child_ty)); child_ty = macroqualified_ty.getModifiedType().getTypePtr(); }, else => return, @@ -552,25 +552,25 @@ fn declVisitorNamesOnly(c: *Context, decl: *const clang.Decl) Error!void { fn declVisitor(c: *Context, decl: *const clang.Decl) Error!void { switch (decl.getKind()) { .Function => { - return visitFnDecl(c, @ptrCast(*const clang.FunctionDecl, decl)); + return visitFnDecl(c, @as(*const clang.FunctionDecl, @ptrCast(decl))); }, .Typedef => { - try transTypeDef(c, &c.global_scope.base, @ptrCast(*const clang.TypedefNameDecl, decl)); + try transTypeDef(c, &c.global_scope.base, @as(*const clang.TypedefNameDecl, @ptrCast(decl))); }, .Enum => { - try transEnumDecl(c, &c.global_scope.base, @ptrCast(*const clang.EnumDecl, decl)); + try transEnumDecl(c, &c.global_scope.base, @as(*const clang.EnumDecl, @ptrCast(decl))); }, .Record => { - try transRecordDecl(c, &c.global_scope.base, @ptrCast(*const clang.RecordDecl, decl)); + try transRecordDecl(c, &c.global_scope.base, @as(*const clang.RecordDecl, @ptrCast(decl))); }, .Var => { - return visitVarDecl(c, @ptrCast(*const clang.VarDecl, decl), null); + return visitVarDecl(c, @as(*const clang.VarDecl, @ptrCast(decl)), null); }, .Empty => { // Do nothing }, .FileScopeAsm => { - try transFileScopeAsm(c, &c.global_scope.base, @ptrCast(*const clang.FileScopeAsmDecl, decl)); + try transFileScopeAsm(c, &c.global_scope.base, @as(*const clang.FileScopeAsmDecl, @ptrCast(decl))); }, else => { const decl_name = try c.str(decl.getDeclKindName()); @@ -595,7 +595,7 @@ fn transFileScopeAsm(c: *Context, scope: *Scope, file_scope_asm: *const clang.Fi } fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void { - const fn_name = try c.str(@ptrCast(*const clang.NamedDecl, fn_decl).getName_bytes_begin()); + const fn_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(fn_decl)).getName_bytes_begin()); if (c.global_scope.sym_table.contains(fn_name)) return; // Avoid processing this decl twice @@ -630,22 +630,22 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void { switch (fn_type.getTypeClass()) { .Attributed => { - const attr_type = @ptrCast(*const clang.AttributedType, fn_type); + const attr_type = @as(*const clang.AttributedType, @ptrCast(fn_type)); fn_qt = attr_type.getEquivalentType(); }, .Paren => { - const paren_type = @ptrCast(*const clang.ParenType, fn_type); + const paren_type = @as(*const clang.ParenType, @ptrCast(fn_type)); fn_qt = paren_type.getInnerType(); }, else => break fn_type, } }; - const fn_ty = @ptrCast(*const clang.FunctionType, fn_type); + const fn_ty = @as(*const clang.FunctionType, @ptrCast(fn_type)); const return_qt = fn_ty.getReturnType(); const proto_node = switch (fn_type.getTypeClass()) { .FunctionProto => blk: { - const fn_proto_type = @ptrCast(*const clang.FunctionProtoType, fn_type); + const fn_proto_type = @as(*const clang.FunctionProtoType, @ptrCast(fn_type)); if (has_body and fn_proto_type.isVariadic()) { decl_ctx.has_body = false; decl_ctx.storage_class = .Extern; @@ -661,7 +661,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void { }; }, .FunctionNoProto => blk: { - const fn_no_proto_type = @ptrCast(*const clang.FunctionType, fn_type); + const fn_no_proto_type = @as(*const clang.FunctionType, @ptrCast(fn_type)); break :blk transFnNoProto(c, fn_no_proto_type, fn_decl_loc, decl_ctx, true) catch |err| switch (err) { error.UnsupportedType => { return failDecl(c, fn_decl_loc, fn_name, "unable to resolve prototype of function", .{}); @@ -714,7 +714,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void { param_id += 1; } - const casted_body = @ptrCast(*const clang.CompoundStmt, body_stmt); + const casted_body = @as(*const clang.CompoundStmt, @ptrCast(body_stmt)); transCompoundStmtInline(c, casted_body, &block_scope) catch |err| switch (err) { error.OutOfMemory => |e| return e, error.UnsupportedTranslation, @@ -788,7 +788,7 @@ fn stringLiteralToCharStar(c: *Context, str: Node) Error!Node { /// if mangled_name is not null, this var decl was declared in a block scope. fn visitVarDecl(c: *Context, var_decl: *const clang.VarDecl, mangled_name: ?[]const u8) Error!void { - const var_name = mangled_name orelse try c.str(@ptrCast(*const clang.NamedDecl, var_decl).getName_bytes_begin()); + const var_name = mangled_name orelse try c.str(@as(*const clang.NamedDecl, @ptrCast(var_decl)).getName_bytes_begin()); if (c.global_scope.sym_table.contains(var_name)) return; // Avoid processing this decl twice @@ -830,7 +830,7 @@ fn visitVarDecl(c: *Context, var_decl: *const clang.VarDecl, mangled_name: ?[]co if (has_init) trans_init: { if (decl_init) |expr| { const node_or_error = if (expr.getStmtClass() == .StringLiteralClass) - transStringLiteralInitializer(c, @ptrCast(*const clang.StringLiteral, expr), type_node) + transStringLiteralInitializer(c, @as(*const clang.StringLiteral, @ptrCast(expr)), type_node) else transExprCoercing(c, scope, expr, .used); init_node = node_or_error catch |err| switch (err) { @@ -918,7 +918,7 @@ fn transTypeDef(c: *Context, scope: *Scope, typedef_decl: *const clang.TypedefNa const toplevel = scope.id == .root; const bs: *Scope.Block = if (!toplevel) try scope.findBlockScope(c) else undefined; - var name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, typedef_decl).getName_bytes_begin()); + var name: []const u8 = try c.str(@as(*const clang.NamedDecl, @ptrCast(typedef_decl)).getName_bytes_begin()); try c.typedefs.put(c.gpa, name, {}); if (builtin_typedef_map.get(name)) |builtin| { @@ -981,7 +981,7 @@ fn buildFlexibleArrayFn( .is_noalias = false, }; - const array_type = @ptrCast(*const clang.ArrayType, field_qt.getTypePtr()); + const array_type = @as(*const clang.ArrayType, @ptrCast(field_qt.getTypePtr())); const element_qt = array_type.getElementType(); const element_type = try transQualType(c, scope, element_qt, field_decl.getLocation()); @@ -1077,7 +1077,7 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_decl: *const clang.RecordD var is_union = false; var container_kind_name: []const u8 = undefined; - var bare_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, record_decl).getName_bytes_begin()); + var bare_name: []const u8 = try c.str(@as(*const clang.NamedDecl, @ptrCast(record_decl)).getName_bytes_begin()); if (record_decl.isUnion()) { container_kind_name = "union"; @@ -1138,7 +1138,7 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_decl: *const clang.RecordD } var is_anon = false; - var field_name = try c.str(@ptrCast(*const clang.NamedDecl, field_decl).getName_bytes_begin()); + var field_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(field_decl)).getName_bytes_begin()); if (field_decl.isAnonymousStructOrUnion() or field_name.len == 0) { // Context.getMangle() is not used here because doing so causes unpredictable field names for anonymous fields. field_name = try std.fmt.allocPrint(c.arena, "unnamed_{d}", .{unnamed_field_count}); @@ -1167,7 +1167,7 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_decl: *const clang.RecordD }; const alignment = if (has_flexible_array and field_decl.getFieldIndex() == 0) - @intCast(c_uint, record_alignment) + @as(c_uint, @intCast(record_alignment)) else ClangAlignment.forField(c, field_decl, record_def).zigAlignment(); @@ -1224,7 +1224,7 @@ fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const clang.EnumDecl) E const bs: *Scope.Block = if (!toplevel) try scope.findBlockScope(c) else undefined; var is_unnamed = false; - var bare_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, enum_decl).getName_bytes_begin()); + var bare_name: []const u8 = try c.str(@as(*const clang.NamedDecl, @ptrCast(enum_decl)).getName_bytes_begin()); var name = bare_name; if (c.unnamed_typedefs.get(@intFromPtr(enum_decl.getCanonicalDecl()))) |typedef_name| { bare_name = typedef_name; @@ -1244,13 +1244,13 @@ fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const clang.EnumDecl) E const end_it = enum_def.enumerator_end(); while (it.neq(end_it)) : (it = it.next()) { const enum_const = it.deref(); - var enum_val_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, enum_const).getName_bytes_begin()); + var enum_val_name: []const u8 = try c.str(@as(*const clang.NamedDecl, @ptrCast(enum_const)).getName_bytes_begin()); if (!toplevel) { enum_val_name = try bs.makeMangledName(c, enum_val_name); } - const enum_const_qt = @ptrCast(*const clang.ValueDecl, enum_const).getType(); - const enum_const_loc = @ptrCast(*const clang.Decl, enum_const).getLocation(); + const enum_const_qt = @as(*const clang.ValueDecl, @ptrCast(enum_const)).getType(); + const enum_const_loc = @as(*const clang.Decl, @ptrCast(enum_const)).getLocation(); const enum_const_type_node: ?Node = transQualType(c, scope, enum_const_qt, enum_const_loc) catch |err| switch (err) { error.UnsupportedType => null, else => |e| return e, @@ -1325,77 +1325,77 @@ fn transStmt( ) TransError!Node { const sc = stmt.getStmtClass(); switch (sc) { - .BinaryOperatorClass => return transBinaryOperator(c, scope, @ptrCast(*const clang.BinaryOperator, stmt), result_used), - .CompoundStmtClass => return transCompoundStmt(c, scope, @ptrCast(*const clang.CompoundStmt, stmt)), - .CStyleCastExprClass => return transCStyleCastExprClass(c, scope, @ptrCast(*const clang.CStyleCastExpr, stmt), result_used), - .DeclStmtClass => return transDeclStmt(c, scope, @ptrCast(*const clang.DeclStmt, stmt)), - .DeclRefExprClass => return transDeclRefExpr(c, scope, @ptrCast(*const clang.DeclRefExpr, stmt)), - .ImplicitCastExprClass => return transImplicitCastExpr(c, scope, @ptrCast(*const clang.ImplicitCastExpr, stmt), result_used), - .IntegerLiteralClass => return transIntegerLiteral(c, scope, @ptrCast(*const clang.IntegerLiteral, stmt), result_used, .with_as), - .ReturnStmtClass => return transReturnStmt(c, scope, @ptrCast(*const clang.ReturnStmt, stmt)), - .StringLiteralClass => return transStringLiteral(c, scope, @ptrCast(*const clang.StringLiteral, stmt), result_used), + .BinaryOperatorClass => return transBinaryOperator(c, scope, @as(*const clang.BinaryOperator, @ptrCast(stmt)), result_used), + .CompoundStmtClass => return transCompoundStmt(c, scope, @as(*const clang.CompoundStmt, @ptrCast(stmt))), + .CStyleCastExprClass => return transCStyleCastExprClass(c, scope, @as(*const clang.CStyleCastExpr, @ptrCast(stmt)), result_used), + .DeclStmtClass => return transDeclStmt(c, scope, @as(*const clang.DeclStmt, @ptrCast(stmt))), + .DeclRefExprClass => return transDeclRefExpr(c, scope, @as(*const clang.DeclRefExpr, @ptrCast(stmt))), + .ImplicitCastExprClass => return transImplicitCastExpr(c, scope, @as(*const clang.ImplicitCastExpr, @ptrCast(stmt)), result_used), + .IntegerLiteralClass => return transIntegerLiteral(c, scope, @as(*const clang.IntegerLiteral, @ptrCast(stmt)), result_used, .with_as), + .ReturnStmtClass => return transReturnStmt(c, scope, @as(*const clang.ReturnStmt, @ptrCast(stmt))), + .StringLiteralClass => return transStringLiteral(c, scope, @as(*const clang.StringLiteral, @ptrCast(stmt)), result_used), .ParenExprClass => { - const expr = try transExpr(c, scope, @ptrCast(*const clang.ParenExpr, stmt).getSubExpr(), .used); + const expr = try transExpr(c, scope, @as(*const clang.ParenExpr, @ptrCast(stmt)).getSubExpr(), .used); return maybeSuppressResult(c, result_used, expr); }, - .InitListExprClass => return transInitListExpr(c, scope, @ptrCast(*const clang.InitListExpr, stmt), result_used), - .ImplicitValueInitExprClass => return transImplicitValueInitExpr(c, scope, @ptrCast(*const clang.Expr, stmt)), - .IfStmtClass => return transIfStmt(c, scope, @ptrCast(*const clang.IfStmt, stmt)), - .WhileStmtClass => return transWhileLoop(c, scope, @ptrCast(*const clang.WhileStmt, stmt)), - .DoStmtClass => return transDoWhileLoop(c, scope, @ptrCast(*const clang.DoStmt, stmt)), + .InitListExprClass => return transInitListExpr(c, scope, @as(*const clang.InitListExpr, @ptrCast(stmt)), result_used), + .ImplicitValueInitExprClass => return transImplicitValueInitExpr(c, scope, @as(*const clang.Expr, @ptrCast(stmt))), + .IfStmtClass => return transIfStmt(c, scope, @as(*const clang.IfStmt, @ptrCast(stmt))), + .WhileStmtClass => return transWhileLoop(c, scope, @as(*const clang.WhileStmt, @ptrCast(stmt))), + .DoStmtClass => return transDoWhileLoop(c, scope, @as(*const clang.DoStmt, @ptrCast(stmt))), .NullStmtClass => { return Tag.empty_block.init(); }, .ContinueStmtClass => return Tag.@"continue".init(), .BreakStmtClass => return Tag.@"break".init(), - .ForStmtClass => return transForLoop(c, scope, @ptrCast(*const clang.ForStmt, stmt)), - .FloatingLiteralClass => return transFloatingLiteral(c, @ptrCast(*const clang.FloatingLiteral, stmt), result_used), + .ForStmtClass => return transForLoop(c, scope, @as(*const clang.ForStmt, @ptrCast(stmt))), + .FloatingLiteralClass => return transFloatingLiteral(c, @as(*const clang.FloatingLiteral, @ptrCast(stmt)), result_used), .ConditionalOperatorClass => { - return transConditionalOperator(c, scope, @ptrCast(*const clang.ConditionalOperator, stmt), result_used); + return transConditionalOperator(c, scope, @as(*const clang.ConditionalOperator, @ptrCast(stmt)), result_used); }, .BinaryConditionalOperatorClass => { - return transBinaryConditionalOperator(c, scope, @ptrCast(*const clang.BinaryConditionalOperator, stmt), result_used); + return transBinaryConditionalOperator(c, scope, @as(*const clang.BinaryConditionalOperator, @ptrCast(stmt)), result_used); }, - .SwitchStmtClass => return transSwitch(c, scope, @ptrCast(*const clang.SwitchStmt, stmt)), + .SwitchStmtClass => return transSwitch(c, scope, @as(*const clang.SwitchStmt, @ptrCast(stmt))), .CaseStmtClass, .DefaultStmtClass => { return fail(c, error.UnsupportedTranslation, stmt.getBeginLoc(), "TODO complex switch", .{}); }, - .ConstantExprClass => return transConstantExpr(c, scope, @ptrCast(*const clang.Expr, stmt), result_used), - .PredefinedExprClass => return transPredefinedExpr(c, scope, @ptrCast(*const clang.PredefinedExpr, stmt), result_used), - .CharacterLiteralClass => return transCharLiteral(c, scope, @ptrCast(*const clang.CharacterLiteral, stmt), result_used, .with_as), - .StmtExprClass => return transStmtExpr(c, scope, @ptrCast(*const clang.StmtExpr, stmt), result_used), - .MemberExprClass => return transMemberExpr(c, scope, @ptrCast(*const clang.MemberExpr, stmt), result_used), - .ArraySubscriptExprClass => return transArrayAccess(c, scope, @ptrCast(*const clang.ArraySubscriptExpr, stmt), result_used), - .CallExprClass => return transCallExpr(c, scope, @ptrCast(*const clang.CallExpr, stmt), result_used), - .UnaryExprOrTypeTraitExprClass => return transUnaryExprOrTypeTraitExpr(c, scope, @ptrCast(*const clang.UnaryExprOrTypeTraitExpr, stmt), result_used), - .UnaryOperatorClass => return transUnaryOperator(c, scope, @ptrCast(*const clang.UnaryOperator, stmt), result_used), - .CompoundAssignOperatorClass => return transCompoundAssignOperator(c, scope, @ptrCast(*const clang.CompoundAssignOperator, stmt), result_used), + .ConstantExprClass => return transConstantExpr(c, scope, @as(*const clang.Expr, @ptrCast(stmt)), result_used), + .PredefinedExprClass => return transPredefinedExpr(c, scope, @as(*const clang.PredefinedExpr, @ptrCast(stmt)), result_used), + .CharacterLiteralClass => return transCharLiteral(c, scope, @as(*const clang.CharacterLiteral, @ptrCast(stmt)), result_used, .with_as), + .StmtExprClass => return transStmtExpr(c, scope, @as(*const clang.StmtExpr, @ptrCast(stmt)), result_used), + .MemberExprClass => return transMemberExpr(c, scope, @as(*const clang.MemberExpr, @ptrCast(stmt)), result_used), + .ArraySubscriptExprClass => return transArrayAccess(c, scope, @as(*const clang.ArraySubscriptExpr, @ptrCast(stmt)), result_used), + .CallExprClass => return transCallExpr(c, scope, @as(*const clang.CallExpr, @ptrCast(stmt)), result_used), + .UnaryExprOrTypeTraitExprClass => return transUnaryExprOrTypeTraitExpr(c, scope, @as(*const clang.UnaryExprOrTypeTraitExpr, @ptrCast(stmt)), result_used), + .UnaryOperatorClass => return transUnaryOperator(c, scope, @as(*const clang.UnaryOperator, @ptrCast(stmt)), result_used), + .CompoundAssignOperatorClass => return transCompoundAssignOperator(c, scope, @as(*const clang.CompoundAssignOperator, @ptrCast(stmt)), result_used), .OpaqueValueExprClass => { - const source_expr = @ptrCast(*const clang.OpaqueValueExpr, stmt).getSourceExpr().?; + const source_expr = @as(*const clang.OpaqueValueExpr, @ptrCast(stmt)).getSourceExpr().?; const expr = try transExpr(c, scope, source_expr, .used); return maybeSuppressResult(c, result_used, expr); }, - .OffsetOfExprClass => return transOffsetOfExpr(c, @ptrCast(*const clang.OffsetOfExpr, stmt), result_used), + .OffsetOfExprClass => return transOffsetOfExpr(c, @as(*const clang.OffsetOfExpr, @ptrCast(stmt)), result_used), .CompoundLiteralExprClass => { - const compound_literal = @ptrCast(*const clang.CompoundLiteralExpr, stmt); + const compound_literal = @as(*const clang.CompoundLiteralExpr, @ptrCast(stmt)); return transExpr(c, scope, compound_literal.getInitializer(), result_used); }, .GenericSelectionExprClass => { - const gen_sel = @ptrCast(*const clang.GenericSelectionExpr, stmt); + const gen_sel = @as(*const clang.GenericSelectionExpr, @ptrCast(stmt)); return transExpr(c, scope, gen_sel.getResultExpr(), result_used); }, .ConvertVectorExprClass => { - const conv_vec = @ptrCast(*const clang.ConvertVectorExpr, stmt); + const conv_vec = @as(*const clang.ConvertVectorExpr, @ptrCast(stmt)); const conv_vec_node = try transConvertVectorExpr(c, scope, conv_vec); return maybeSuppressResult(c, result_used, conv_vec_node); }, .ShuffleVectorExprClass => { - const shuffle_vec_expr = @ptrCast(*const clang.ShuffleVectorExpr, stmt); + const shuffle_vec_expr = @as(*const clang.ShuffleVectorExpr, @ptrCast(stmt)); const shuffle_vec_node = try transShuffleVectorExpr(c, scope, shuffle_vec_expr); return maybeSuppressResult(c, result_used, shuffle_vec_node); }, .ChooseExprClass => { - const choose_expr = @ptrCast(*const clang.ChooseExpr, stmt); + const choose_expr = @as(*const clang.ChooseExpr, @ptrCast(stmt)); return transExpr(c, scope, choose_expr.getChosenSubExpr(), result_used); }, // When adding new cases here, see comment for maybeBlockify() @@ -1421,21 +1421,21 @@ fn transConvertVectorExpr( scope: *Scope, expr: *const clang.ConvertVectorExpr, ) TransError!Node { - const base_stmt = @ptrCast(*const clang.Stmt, expr); + const base_stmt = @as(*const clang.Stmt, @ptrCast(expr)); var block_scope = try Scope.Block.init(c, scope, true); defer block_scope.deinit(); const src_expr = expr.getSrcExpr(); const src_type = qualTypeCanon(src_expr.getType()); - const src_vector_ty = @ptrCast(*const clang.VectorType, src_type); + const src_vector_ty = @as(*const clang.VectorType, @ptrCast(src_type)); const src_element_qt = src_vector_ty.getElementType(); const src_expr_node = try transExpr(c, &block_scope.base, src_expr, .used); const dst_qt = expr.getTypeSourceInfo_getType(); const dst_type_node = try transQualType(c, &block_scope.base, dst_qt, base_stmt.getBeginLoc()); - const dst_vector_ty = @ptrCast(*const clang.VectorType, qualTypeCanon(dst_qt)); + const dst_vector_ty = @as(*const clang.VectorType, @ptrCast(qualTypeCanon(dst_qt))); const num_elements = dst_vector_ty.getNumElements(); const dst_element_qt = dst_vector_ty.getElementType(); @@ -1490,7 +1490,7 @@ fn makeShuffleMask(c: *Context, scope: *Scope, expr: *const clang.ShuffleVectorE const init_list = try c.arena.alloc(Node, mask_len); for (init_list, 0..) |*init, i| { - const index_expr = try transExprCoercing(c, scope, expr.getExpr(@intCast(c_uint, i + 2)), .used); + const index_expr = try transExprCoercing(c, scope, expr.getExpr(@as(c_uint, @intCast(i + 2))), .used); const converted_index = try Tag.helpers_shuffle_vector_index.create(c.arena, .{ .lhs = index_expr, .rhs = vector_len }); init.* = converted_index; } @@ -1514,7 +1514,7 @@ fn transShuffleVectorExpr( scope: *Scope, expr: *const clang.ShuffleVectorExpr, ) TransError!Node { - const base_expr = @ptrCast(*const clang.Expr, expr); + const base_expr = @as(*const clang.Expr, @ptrCast(expr)); const num_subexprs = expr.getNumSubExprs(); if (num_subexprs < 3) return fail(c, error.UnsupportedTranslation, base_expr.getBeginLoc(), "ShuffleVector needs at least 1 index", .{}); @@ -1545,7 +1545,7 @@ fn transSimpleOffsetOfExpr(c: *Context, expr: *const clang.OffsetOfExpr) TransEr if (c.decl_table.get(@intFromPtr(record_decl.getCanonicalDecl()))) |type_name| { const type_node = try Tag.type.create(c.arena, type_name); - var raw_field_name = try c.str(@ptrCast(*const clang.NamedDecl, field_decl).getName_bytes_begin()); + var raw_field_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(field_decl)).getName_bytes_begin()); const quoted_field_name = try std.fmt.allocPrint(c.arena, "\"{s}\"", .{raw_field_name}); const field_name_node = try Tag.string_literal.create(c.arena, quoted_field_name); @@ -1829,7 +1829,7 @@ fn transCStyleCastExprClass( stmt: *const clang.CStyleCastExpr, result_used: ResultUsed, ) TransError!Node { - const cast_expr = @ptrCast(*const clang.CastExpr, stmt); + const cast_expr = @as(*const clang.CastExpr, @ptrCast(stmt)); const sub_expr = stmt.getSubExpr(); const dst_type = stmt.getType(); const src_type = sub_expr.getType(); @@ -1838,7 +1838,7 @@ fn transCStyleCastExprClass( const cast_node = if (cast_expr.getCastKind() == .ToUnion) blk: { const field_decl = cast_expr.getTargetFieldForToUnionCast(dst_type, src_type).?; // C syntax error if target field is null - const field_name = try c.str(@ptrCast(*const clang.NamedDecl, field_decl).getName_bytes_begin()); + const field_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(field_decl)).getName_bytes_begin()); const union_ty = try transQualType(c, scope, dst_type, loc); @@ -1923,12 +1923,12 @@ fn transDeclStmtOne( ) TransError!void { switch (decl.getKind()) { .Var => { - const var_decl = @ptrCast(*const clang.VarDecl, decl); + const var_decl = @as(*const clang.VarDecl, @ptrCast(decl)); const decl_init = var_decl.getInit(); const loc = decl.getLocation(); const qual_type = var_decl.getTypeSourceInfo_getType(); - const name = try c.str(@ptrCast(*const clang.NamedDecl, var_decl).getName_bytes_begin()); + const name = try c.str(@as(*const clang.NamedDecl, @ptrCast(var_decl)).getName_bytes_begin()); const mangled_name = try block_scope.makeMangledName(c, name); if (var_decl.getStorageClass() == .Extern) { @@ -1945,7 +1945,7 @@ fn transDeclStmtOne( var init_node = if (decl_init) |expr| if (expr.getStmtClass() == .StringLiteralClass) - try transStringLiteralInitializer(c, @ptrCast(*const clang.StringLiteral, expr), type_node) + try transStringLiteralInitializer(c, @as(*const clang.StringLiteral, @ptrCast(expr)), type_node) else try transExprCoercing(c, scope, expr, .used) else if (is_static_local) @@ -1980,7 +1980,7 @@ fn transDeclStmtOne( const cleanup_attr = var_decl.getCleanupAttribute(); if (cleanup_attr) |fn_decl| { - const cleanup_fn_name = try c.str(@ptrCast(*const clang.NamedDecl, fn_decl).getName_bytes_begin()); + const cleanup_fn_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(fn_decl)).getName_bytes_begin()); const fn_id = try Tag.identifier.create(c.arena, cleanup_fn_name); const varname = try Tag.identifier.create(c.arena, mangled_name); @@ -1995,16 +1995,16 @@ fn transDeclStmtOne( } }, .Typedef => { - try transTypeDef(c, scope, @ptrCast(*const clang.TypedefNameDecl, decl)); + try transTypeDef(c, scope, @as(*const clang.TypedefNameDecl, @ptrCast(decl))); }, .Record => { - try transRecordDecl(c, scope, @ptrCast(*const clang.RecordDecl, decl)); + try transRecordDecl(c, scope, @as(*const clang.RecordDecl, @ptrCast(decl))); }, .Enum => { - try transEnumDecl(c, scope, @ptrCast(*const clang.EnumDecl, decl)); + try transEnumDecl(c, scope, @as(*const clang.EnumDecl, @ptrCast(decl))); }, .Function => { - try visitFnDecl(c, @ptrCast(*const clang.FunctionDecl, decl)); + try visitFnDecl(c, @as(*const clang.FunctionDecl, @ptrCast(decl))); }, else => { const decl_name = try c.str(decl.getDeclKindName()); @@ -2030,15 +2030,15 @@ fn transDeclRefExpr( expr: *const clang.DeclRefExpr, ) TransError!Node { const value_decl = expr.getDecl(); - const name = try c.str(@ptrCast(*const clang.NamedDecl, value_decl).getName_bytes_begin()); + const name = try c.str(@as(*const clang.NamedDecl, @ptrCast(value_decl)).getName_bytes_begin()); const mangled_name = scope.getAlias(name); - var ref_expr = if (cIsFunctionDeclRef(@ptrCast(*const clang.Expr, expr))) + var ref_expr = if (cIsFunctionDeclRef(@as(*const clang.Expr, @ptrCast(expr)))) try Tag.fn_identifier.create(c.arena, mangled_name) else try Tag.identifier.create(c.arena, mangled_name); - if (@ptrCast(*const clang.Decl, value_decl).getKind() == .Var) { - const var_decl = @ptrCast(*const clang.VarDecl, value_decl); + if (@as(*const clang.Decl, @ptrCast(value_decl)).getKind() == .Var) { + const var_decl = @as(*const clang.VarDecl, @ptrCast(value_decl)); if (var_decl.isStaticLocal()) { ref_expr = try Tag.field_access.create(c.arena, .{ .lhs = ref_expr, @@ -2057,7 +2057,7 @@ fn transImplicitCastExpr( result_used: ResultUsed, ) TransError!Node { const sub_expr = expr.getSubExpr(); - const dest_type = getExprQualType(c, @ptrCast(*const clang.Expr, expr)); + const dest_type = getExprQualType(c, @as(*const clang.Expr, @ptrCast(expr))); const src_type = getExprQualType(c, sub_expr); switch (expr.getCastKind()) { .BitCast, .FloatingCast, .FloatingToIntegral, .IntegralToFloating, .IntegralCast, .PointerToIntegral, .IntegralToPointer => { @@ -2111,7 +2111,7 @@ fn transImplicitCastExpr( else => |kind| return fail( c, error.UnsupportedTranslation, - @ptrCast(*const clang.Stmt, expr).getBeginLoc(), + @as(*const clang.Stmt, @ptrCast(expr)).getBeginLoc(), "unsupported CastKind {s}", .{@tagName(kind)}, ), @@ -2141,9 +2141,9 @@ fn transBoolExpr( expr: *const clang.Expr, used: ResultUsed, ) TransError!Node { - if (@ptrCast(*const clang.Stmt, expr).getStmtClass() == .IntegerLiteralClass) { + if (@as(*const clang.Stmt, @ptrCast(expr)).getStmtClass() == .IntegerLiteralClass) { var signum: c_int = undefined; - if (!(@ptrCast(*const clang.IntegerLiteral, expr).getSignum(&signum, c.clang_context))) { + if (!(@as(*const clang.IntegerLiteral, @ptrCast(expr)).getSignum(&signum, c.clang_context))) { return fail(c, error.UnsupportedTranslation, expr.getBeginLoc(), "invalid integer literal", .{}); } const is_zero = signum == 0; @@ -2168,20 +2168,20 @@ fn exprIsBooleanType(expr: *const clang.Expr) bool { fn exprIsNarrowStringLiteral(expr: *const clang.Expr) bool { switch (expr.getStmtClass()) { .StringLiteralClass => { - const string_lit = @ptrCast(*const clang.StringLiteral, expr); + const string_lit = @as(*const clang.StringLiteral, @ptrCast(expr)); return string_lit.getCharByteWidth() == 1; }, .PredefinedExprClass => return true, .UnaryOperatorClass => { - const op_expr = @ptrCast(*const clang.UnaryOperator, expr).getSubExpr(); + const op_expr = @as(*const clang.UnaryOperator, @ptrCast(expr)).getSubExpr(); return exprIsNarrowStringLiteral(op_expr); }, .ParenExprClass => { - const op_expr = @ptrCast(*const clang.ParenExpr, expr).getSubExpr(); + const op_expr = @as(*const clang.ParenExpr, @ptrCast(expr)).getSubExpr(); return exprIsNarrowStringLiteral(op_expr); }, .GenericSelectionExprClass => { - const gen_sel = @ptrCast(*const clang.GenericSelectionExpr, expr); + const gen_sel = @as(*const clang.GenericSelectionExpr, @ptrCast(expr)); return exprIsNarrowStringLiteral(gen_sel.getResultExpr()); }, else => return false, @@ -2190,11 +2190,11 @@ fn exprIsNarrowStringLiteral(expr: *const clang.Expr) bool { fn exprIsFlexibleArrayRef(c: *Context, expr: *const clang.Expr) bool { if (expr.getStmtClass() == .MemberExprClass) { - const member_expr = @ptrCast(*const clang.MemberExpr, expr); + const member_expr = @as(*const clang.MemberExpr, @ptrCast(expr)); const member_decl = member_expr.getMemberDecl(); - const decl_kind = @ptrCast(*const clang.Decl, member_decl).getKind(); + const decl_kind = @as(*const clang.Decl, @ptrCast(member_decl)).getKind(); if (decl_kind == .Field) { - const field_decl = @ptrCast(*const clang.FieldDecl, member_decl); + const field_decl = @as(*const clang.FieldDecl, @ptrCast(member_decl)); return isFlexibleArrayFieldDecl(c, field_decl); } } @@ -2229,7 +2229,7 @@ fn finishBoolExpr( ) TransError!Node { switch (ty.getTypeClass()) { .Builtin => { - const builtin_ty = @ptrCast(*const clang.BuiltinType, ty); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty)); switch (builtin_ty.getKind()) { .Bool => return node, @@ -2273,7 +2273,7 @@ fn finishBoolExpr( return Tag.not_equal.create(c.arena, .{ .lhs = node, .rhs = Tag.null_literal.init() }); }, .Typedef => { - const typedef_ty = @ptrCast(*const clang.TypedefType, ty); + const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty)); const typedef_decl = typedef_ty.getDecl(); const underlying_type = typedef_decl.getUnderlyingType(); return finishBoolExpr(c, scope, loc, underlying_type.getTypePtr(), node, used); @@ -2283,7 +2283,7 @@ fn finishBoolExpr( return Tag.not_equal.create(c.arena, .{ .lhs = node, .rhs = Tag.zero_literal.init() }); }, .Elaborated => { - const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty); + const elaborated_ty = @as(*const clang.ElaboratedType, @ptrCast(ty)); const named_type = elaborated_ty.getNamedType(); return finishBoolExpr(c, scope, loc, named_type.getTypePtr(), node, used); }, @@ -2325,7 +2325,7 @@ fn transIntegerLiteral( // But the first step is to be correct, and the next step is to make the output more elegant. // @as(T, x) - const expr_base = @ptrCast(*const clang.Expr, expr); + const expr_base = @as(*const clang.Expr, @ptrCast(expr)); const ty_node = try transQualType(c, scope, expr_base.getType(), expr_base.getBeginLoc()); const rhs = try transCreateNodeAPInt(c, eval_result.Val.getInt()); const as = try Tag.as.create(c.arena, .{ .lhs = ty_node, .rhs = rhs }); @@ -2374,7 +2374,7 @@ fn transStringLiteral( const str_type = @tagName(stmt.getKind()); const name = try std.fmt.allocPrint(c.arena, "zig.{s}_string_{d}", .{ str_type, c.getMangle() }); - const expr_base = @ptrCast(*const clang.Expr, stmt); + const expr_base = @as(*const clang.Expr, @ptrCast(stmt)); const array_type = try transQualTypeInitialized(c, scope, expr_base.getType(), expr_base, expr_base.getBeginLoc()); const lit_array = try transStringLiteralInitializer(c, stmt, array_type); const decl = try Tag.var_simple.create(c.arena, .{ .name = name, .init = lit_array }); @@ -2451,11 +2451,11 @@ fn transStringLiteralInitializer( /// both operands resolve to addresses. The C standard requires that both operands /// point to elements of the same array object, but we do not verify that here. fn cIsPointerDiffExpr(stmt: *const clang.BinaryOperator) bool { - const lhs = @ptrCast(*const clang.Stmt, stmt.getLHS()); - const rhs = @ptrCast(*const clang.Stmt, stmt.getRHS()); + const lhs = @as(*const clang.Stmt, @ptrCast(stmt.getLHS())); + const rhs = @as(*const clang.Stmt, @ptrCast(stmt.getRHS())); return stmt.getOpcode() == .Sub and - qualTypeIsPtr(@ptrCast(*const clang.Expr, lhs).getType()) and - qualTypeIsPtr(@ptrCast(*const clang.Expr, rhs).getType()); + qualTypeIsPtr(@as(*const clang.Expr, @ptrCast(lhs)).getType()) and + qualTypeIsPtr(@as(*const clang.Expr, @ptrCast(rhs)).getType()); } fn cIsEnum(qt: clang.QualType) bool { @@ -2472,7 +2472,7 @@ fn cIsVector(qt: clang.QualType) bool { fn cIntTypeForEnum(enum_qt: clang.QualType) clang.QualType { assert(cIsEnum(enum_qt)); const ty = enum_qt.getCanonicalType().getTypePtr(); - const enum_ty = @ptrCast(*const clang.EnumType, ty); + const enum_ty = @as(*const clang.EnumType, @ptrCast(ty)); const enum_decl = enum_ty.getDecl(); return enum_decl.getIntegerType(); } @@ -2588,29 +2588,29 @@ fn transCCast( } fn transExpr(c: *Context, scope: *Scope, expr: *const clang.Expr, used: ResultUsed) TransError!Node { - return transStmt(c, scope, @ptrCast(*const clang.Stmt, expr), used); + return transStmt(c, scope, @as(*const clang.Stmt, @ptrCast(expr)), used); } /// Same as `transExpr` but with the knowledge that the operand will be type coerced, and therefore /// an `@as` would be redundant. This is used to prevent redundant `@as` in integer literals. fn transExprCoercing(c: *Context, scope: *Scope, expr: *const clang.Expr, used: ResultUsed) TransError!Node { - switch (@ptrCast(*const clang.Stmt, expr).getStmtClass()) { + switch (@as(*const clang.Stmt, @ptrCast(expr)).getStmtClass()) { .IntegerLiteralClass => { - return transIntegerLiteral(c, scope, @ptrCast(*const clang.IntegerLiteral, expr), .used, .no_as); + return transIntegerLiteral(c, scope, @as(*const clang.IntegerLiteral, @ptrCast(expr)), .used, .no_as); }, .CharacterLiteralClass => { - return transCharLiteral(c, scope, @ptrCast(*const clang.CharacterLiteral, expr), .used, .no_as); + return transCharLiteral(c, scope, @as(*const clang.CharacterLiteral, @ptrCast(expr)), .used, .no_as); }, .UnaryOperatorClass => { - const un_expr = @ptrCast(*const clang.UnaryOperator, expr); + const un_expr = @as(*const clang.UnaryOperator, @ptrCast(expr)); if (un_expr.getOpcode() == .Extension) { return transExprCoercing(c, scope, un_expr.getSubExpr(), used); } }, .ImplicitCastExprClass => { - const cast_expr = @ptrCast(*const clang.ImplicitCastExpr, expr); + const cast_expr = @as(*const clang.ImplicitCastExpr, @ptrCast(expr)); const sub_expr = cast_expr.getSubExpr(); - switch (@ptrCast(*const clang.Stmt, sub_expr).getStmtClass()) { + switch (@as(*const clang.Stmt, @ptrCast(sub_expr)).getStmtClass()) { .IntegerLiteralClass, .CharacterLiteralClass => switch (cast_expr.getCastKind()) { .IntegralToFloating => return transExprCoercing(c, scope, sub_expr, used), .IntegralCast => { @@ -2634,15 +2634,15 @@ fn literalFitsInType(c: *Context, expr: *const clang.Expr, qt: clang.QualType) b const is_signed = cIsSignedInteger(qt); const width_max_int = (@as(u64, 1) << math.lossyCast(u6, width - @intFromBool(is_signed))) - 1; - switch (@ptrCast(*const clang.Stmt, expr).getStmtClass()) { + switch (@as(*const clang.Stmt, @ptrCast(expr)).getStmtClass()) { .CharacterLiteralClass => { - const char_lit = @ptrCast(*const clang.CharacterLiteral, expr); + const char_lit = @as(*const clang.CharacterLiteral, @ptrCast(expr)); const val = char_lit.getValue(); // If the val is less than the max int then it fits. return val <= width_max_int; }, .IntegerLiteralClass => { - const int_lit = @ptrCast(*const clang.IntegerLiteral, expr); + const int_lit = @as(*const clang.IntegerLiteral, @ptrCast(expr)); var eval_result: clang.ExprEvalResult = undefined; if (!int_lit.EvaluateAsInt(&eval_result, c.clang_context)) { return false; @@ -2695,7 +2695,7 @@ fn transInitListExprRecord( // Generate the field assignment expression: // .field_name = expr - var raw_name = try c.str(@ptrCast(*const clang.NamedDecl, field_decl).getName_bytes_begin()); + var raw_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(field_decl)).getName_bytes_begin()); if (field_decl.isAnonymousStructOrUnion()) { const name = c.decl_table.get(@intFromPtr(field_decl.getCanonicalDecl())).?; raw_name = try c.arena.dupe(u8, name); @@ -2736,8 +2736,8 @@ fn transInitListExprArray( const child_qt = arr_type.getElementType(); const child_type = try transQualType(c, scope, child_qt, loc); const init_count = expr.getNumInits(); - assert(@ptrCast(*const clang.Type, arr_type).isConstantArrayType()); - const const_arr_ty = @ptrCast(*const clang.ConstantArrayType, arr_type); + assert(@as(*const clang.Type, @ptrCast(arr_type)).isConstantArrayType()); + const const_arr_ty = @as(*const clang.ConstantArrayType, @ptrCast(arr_type)); const size_ap_int = const_arr_ty.getSize(); const all_count = size_ap_int.getLimitedValue(usize); const leftover_count = all_count - init_count; @@ -2757,7 +2757,7 @@ fn transInitListExprArray( const init_list = try c.arena.alloc(Node, init_count); for (init_list, 0..) |*init, i| { - const elem_expr = expr.getInit(@intCast(c_uint, i)); + const elem_expr = expr.getInit(@as(c_uint, @intCast(i))); init.* = try transExprCoercing(c, scope, elem_expr, .used); } const init_node = try Tag.array_init.create(c.arena, .{ @@ -2791,8 +2791,8 @@ fn transInitListExprVector( loc: clang.SourceLocation, expr: *const clang.InitListExpr, ) TransError!Node { - const qt = getExprQualType(c, @ptrCast(*const clang.Expr, expr)); - const vector_ty = @ptrCast(*const clang.VectorType, qualTypeCanon(qt)); + const qt = getExprQualType(c, @as(*const clang.Expr, @ptrCast(expr))); + const vector_ty = @as(*const clang.VectorType, @ptrCast(qualTypeCanon(qt))); const init_count = expr.getNumInits(); const num_elements = vector_ty.getNumElements(); @@ -2822,7 +2822,7 @@ fn transInitListExprVector( var i: usize = 0; while (i < init_count) : (i += 1) { const mangled_name = try block_scope.makeMangledName(c, "tmp"); - const init_expr = expr.getInit(@intCast(c_uint, i)); + const init_expr = expr.getInit(@as(c_uint, @intCast(i))); const tmp_decl_node = try Tag.var_simple.create(c.arena, .{ .name = mangled_name, .init = try transExpr(c, &block_scope.base, init_expr, .used), @@ -2860,9 +2860,9 @@ fn transInitListExpr( expr: *const clang.InitListExpr, used: ResultUsed, ) TransError!Node { - const qt = getExprQualType(c, @ptrCast(*const clang.Expr, expr)); + const qt = getExprQualType(c, @as(*const clang.Expr, @ptrCast(expr))); var qual_type = qt.getTypePtr(); - const source_loc = @ptrCast(*const clang.Expr, expr).getBeginLoc(); + const source_loc = @as(*const clang.Expr, @ptrCast(expr)).getBeginLoc(); if (qualTypeWasDemotedToOpaque(c, qt)) { return fail(c, error.UnsupportedTranslation, source_loc, "cannot initialize opaque type", .{}); @@ -2900,7 +2900,7 @@ fn transZeroInitExpr( ) TransError!Node { switch (ty.getTypeClass()) { .Builtin => { - const builtin_ty = @ptrCast(*const clang.BuiltinType, ty); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty)); switch (builtin_ty.getKind()) { .Bool => return Tag.false_literal.init(), .Char_U, @@ -2929,7 +2929,7 @@ fn transZeroInitExpr( }, .Pointer => return Tag.null_literal.init(), .Typedef => { - const typedef_ty = @ptrCast(*const clang.TypedefType, ty); + const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty)); const typedef_decl = typedef_ty.getDecl(); return transZeroInitExpr( c, @@ -2998,7 +2998,7 @@ fn transIfStmt( }, }; defer cond_scope.deinit(); - const cond_expr = @ptrCast(*const clang.Expr, stmt.getCond()); + const cond_expr = @as(*const clang.Expr, @ptrCast(stmt.getCond())); const cond = try transBoolExpr(c, &cond_scope.base, cond_expr, .used); const then_stmt = stmt.getThen(); @@ -3034,7 +3034,7 @@ fn transWhileLoop( }, }; defer cond_scope.deinit(); - const cond_expr = @ptrCast(*const clang.Expr, stmt.getCond()); + const cond_expr = @as(*const clang.Expr, @ptrCast(stmt.getCond())); const cond = try transBoolExpr(c, &cond_scope.base, cond_expr, .used); var loop_scope = Scope{ @@ -3063,7 +3063,7 @@ fn transDoWhileLoop( }, }; defer cond_scope.deinit(); - const cond = try transBoolExpr(c, &cond_scope.base, @ptrCast(*const clang.Expr, stmt.getCond()), .used); + const cond = try transBoolExpr(c, &cond_scope.base, @as(*const clang.Expr, @ptrCast(stmt.getCond())), .used); const if_not_break = switch (cond.tag()) { .true_literal => { const body_node = try maybeBlockify(c, scope, stmt.getBody()); @@ -3184,7 +3184,7 @@ fn transSwitch( const body = stmt.getBody(); assert(body.getStmtClass() == .CompoundStmtClass); - const compound_stmt = @ptrCast(*const clang.CompoundStmt, body); + const compound_stmt = @as(*const clang.CompoundStmt, @ptrCast(body)); var it = compound_stmt.body_begin(); const end_it = compound_stmt.body_end(); // Iterate over switch body and collect all cases. @@ -3211,12 +3211,12 @@ fn transSwitch( }, .DefaultStmtClass => { has_default = true; - const default_stmt = @ptrCast(*const clang.DefaultStmt, it[0]); + const default_stmt = @as(*const clang.DefaultStmt, @ptrCast(it[0])); var sub = default_stmt.getSubStmt(); while (true) switch (sub.getStmtClass()) { - .CaseStmtClass => sub = @ptrCast(*const clang.CaseStmt, sub).getSubStmt(), - .DefaultStmtClass => sub = @ptrCast(*const clang.DefaultStmt, sub).getSubStmt(), + .CaseStmtClass => sub = @as(*const clang.CaseStmt, @ptrCast(sub)).getSubStmt(), + .DefaultStmtClass => sub = @as(*const clang.DefaultStmt, @ptrCast(sub)).getSubStmt(), else => break, }; @@ -3255,11 +3255,11 @@ fn transCaseStmt(c: *Context, scope: *Scope, stmt: *const clang.Stmt, items: *st .DefaultStmtClass => { seen_default = true; items.items.len = 0; - const default_stmt = @ptrCast(*const clang.DefaultStmt, sub); + const default_stmt = @as(*const clang.DefaultStmt, @ptrCast(sub)); sub = default_stmt.getSubStmt(); }, .CaseStmtClass => { - const case_stmt = @ptrCast(*const clang.CaseStmt, sub); + const case_stmt = @as(*const clang.CaseStmt, @ptrCast(sub)); if (seen_default) { items.items.len = 0; @@ -3326,10 +3326,10 @@ fn transSwitchProngStmtInline( return; }, .CaseStmtClass => { - var sub = @ptrCast(*const clang.CaseStmt, it[0]).getSubStmt(); + var sub = @as(*const clang.CaseStmt, @ptrCast(it[0])).getSubStmt(); while (true) switch (sub.getStmtClass()) { - .CaseStmtClass => sub = @ptrCast(*const clang.CaseStmt, sub).getSubStmt(), - .DefaultStmtClass => sub = @ptrCast(*const clang.DefaultStmt, sub).getSubStmt(), + .CaseStmtClass => sub = @as(*const clang.CaseStmt, @ptrCast(sub)).getSubStmt(), + .DefaultStmtClass => sub = @as(*const clang.DefaultStmt, @ptrCast(sub)).getSubStmt(), else => break, }; const result = try transStmt(c, &block.base, sub, .unused); @@ -3340,10 +3340,10 @@ fn transSwitchProngStmtInline( } }, .DefaultStmtClass => { - var sub = @ptrCast(*const clang.DefaultStmt, it[0]).getSubStmt(); + var sub = @as(*const clang.DefaultStmt, @ptrCast(it[0])).getSubStmt(); while (true) switch (sub.getStmtClass()) { - .CaseStmtClass => sub = @ptrCast(*const clang.CaseStmt, sub).getSubStmt(), - .DefaultStmtClass => sub = @ptrCast(*const clang.DefaultStmt, sub).getSubStmt(), + .CaseStmtClass => sub = @as(*const clang.CaseStmt, @ptrCast(sub)).getSubStmt(), + .DefaultStmtClass => sub = @as(*const clang.DefaultStmt, @ptrCast(sub)).getSubStmt(), else => break, }; const result = try transStmt(c, &block.base, sub, .unused); @@ -3354,7 +3354,7 @@ fn transSwitchProngStmtInline( } }, .CompoundStmtClass => { - const result = try transCompoundStmt(c, &block.base, @ptrCast(*const clang.CompoundStmt, it[0])); + const result = try transCompoundStmt(c, &block.base, @as(*const clang.CompoundStmt, @ptrCast(it[0]))); try block.statements.append(result); if (result.isNoreturn(true)) { return; @@ -3381,7 +3381,7 @@ fn transConstantExpr(c: *Context, scope: *Scope, expr: *const clang.Expr, used: .Int => { // See comment in `transIntegerLiteral` for why this code is here. // @as(T, x) - const expr_base = @ptrCast(*const clang.Expr, expr); + const expr_base = @as(*const clang.Expr, @ptrCast(expr)); const as_node = try Tag.as.create(c.arena, .{ .lhs = try transQualType(c, scope, expr_base.getType(), expr_base.getBeginLoc()), .rhs = try transCreateNodeAPInt(c, result.Val.getInt()), @@ -3400,7 +3400,7 @@ fn transPredefinedExpr(c: *Context, scope: *Scope, expr: *const clang.Predefined fn transCreateCharLitNode(c: *Context, narrow: bool, val: u32) TransError!Node { return Tag.char_literal.create(c.arena, if (narrow) - try std.fmt.allocPrint(c.arena, "'{'}'", .{std.zig.fmtEscapes(&.{@intCast(u8, val)})}) + try std.fmt.allocPrint(c.arena, "'{'}'", .{std.zig.fmtEscapes(&.{@as(u8, @intCast(val))})}) else try std.fmt.allocPrint(c.arena, "'\\u{{{x}}}'", .{val})); } @@ -3427,7 +3427,7 @@ fn transCharLiteral( } // See comment in `transIntegerLiteral` for why this code is here. // @as(T, x) - const expr_base = @ptrCast(*const clang.Expr, stmt); + const expr_base = @as(*const clang.Expr, @ptrCast(stmt)); const as_node = try Tag.as.create(c.arena, .{ .lhs = try transQualType(c, scope, expr_base.getType(), expr_base.getBeginLoc()), .rhs = int_lit_node, @@ -3469,22 +3469,22 @@ fn transMemberExpr(c: *Context, scope: *Scope, stmt: *const clang.MemberExpr, re const member_decl = stmt.getMemberDecl(); const name = blk: { - const decl_kind = @ptrCast(*const clang.Decl, member_decl).getKind(); + const decl_kind = @as(*const clang.Decl, @ptrCast(member_decl)).getKind(); // If we're referring to a anonymous struct/enum find the bogus name // we've assigned to it during the RecordDecl translation if (decl_kind == .Field) { - const field_decl = @ptrCast(*const clang.FieldDecl, member_decl); + const field_decl = @as(*const clang.FieldDecl, @ptrCast(member_decl)); if (field_decl.isAnonymousStructOrUnion()) { const name = c.decl_table.get(@intFromPtr(field_decl.getCanonicalDecl())).?; break :blk try c.arena.dupe(u8, name); } } - const decl = @ptrCast(*const clang.NamedDecl, member_decl); + const decl = @as(*const clang.NamedDecl, @ptrCast(member_decl)); break :blk try c.str(decl.getName_bytes_begin()); }; var node = try Tag.field_access.create(c.arena, .{ .lhs = container_node, .field_name = name }); - if (exprIsFlexibleArrayRef(c, @ptrCast(*const clang.Expr, stmt))) { + if (exprIsFlexibleArrayRef(c, @as(*const clang.Expr, @ptrCast(stmt)))) { node = try Tag.call.create(c.arena, .{ .lhs = node, .args = &.{} }); } return maybeSuppressResult(c, result_used, node); @@ -3582,8 +3582,8 @@ fn transArrayAccess(c: *Context, scope: *Scope, stmt: *const clang.ArraySubscrip // Unwrap the base statement if it's an array decayed to a bare pointer type // so that we index the array itself var unwrapped_base = base_stmt; - if (@ptrCast(*const clang.Stmt, base_stmt).getStmtClass() == .ImplicitCastExprClass) { - const implicit_cast = @ptrCast(*const clang.ImplicitCastExpr, base_stmt); + if (@as(*const clang.Stmt, @ptrCast(base_stmt)).getStmtClass() == .ImplicitCastExprClass) { + const implicit_cast = @as(*const clang.ImplicitCastExpr, @ptrCast(base_stmt)); if (implicit_cast.getCastKind() == .ArrayToPointerDecay) { unwrapped_base = implicit_cast.getSubExpr(); @@ -3620,17 +3620,17 @@ fn transArrayAccess(c: *Context, scope: *Scope, stmt: *const clang.ArraySubscrip fn cIsFunctionDeclRef(expr: *const clang.Expr) bool { switch (expr.getStmtClass()) { .ParenExprClass => { - const op_expr = @ptrCast(*const clang.ParenExpr, expr).getSubExpr(); + const op_expr = @as(*const clang.ParenExpr, @ptrCast(expr)).getSubExpr(); return cIsFunctionDeclRef(op_expr); }, .DeclRefExprClass => { - const decl_ref = @ptrCast(*const clang.DeclRefExpr, expr); + const decl_ref = @as(*const clang.DeclRefExpr, @ptrCast(expr)); const value_decl = decl_ref.getDecl(); const qt = value_decl.getType(); return qualTypeChildIsFnProto(qt); }, .ImplicitCastExprClass => { - const implicit_cast = @ptrCast(*const clang.ImplicitCastExpr, expr); + const implicit_cast = @as(*const clang.ImplicitCastExpr, @ptrCast(expr)); const cast_kind = implicit_cast.getCastKind(); if (cast_kind == .BuiltinFnToFnPtr) return true; if (cast_kind == .FunctionToPointerDecay) { @@ -3639,12 +3639,12 @@ fn cIsFunctionDeclRef(expr: *const clang.Expr) bool { return false; }, .UnaryOperatorClass => { - const un_op = @ptrCast(*const clang.UnaryOperator, expr); + const un_op = @as(*const clang.UnaryOperator, @ptrCast(expr)); const opcode = un_op.getOpcode(); return (opcode == .AddrOf or opcode == .Deref) and cIsFunctionDeclRef(un_op.getSubExpr()); }, .GenericSelectionExprClass => { - const gen_sel = @ptrCast(*const clang.GenericSelectionExpr, expr); + const gen_sel = @as(*const clang.GenericSelectionExpr, @ptrCast(expr)); return cIsFunctionDeclRef(gen_sel.getResultExpr()); }, else => return false, @@ -3679,11 +3679,11 @@ fn transCallExpr(c: *Context, scope: *Scope, stmt: *const clang.CallExpr, result .Proto => |fn_proto| { const param_count = fn_proto.getNumParams(); if (i < param_count) { - const param_qt = fn_proto.getParamType(@intCast(c_uint, i)); + const param_qt = fn_proto.getParamType(@as(c_uint, @intCast(i))); if (isBoolRes(arg) and cIsNativeInt(param_qt)) { arg = try Tag.int_from_bool.create(c.arena, arg); } else if (arg.tag() == .string_literal and qualTypeIsCharStar(param_qt)) { - const loc = @ptrCast(*const clang.Stmt, stmt).getBeginLoc(); + const loc = @as(*const clang.Stmt, @ptrCast(stmt)).getBeginLoc(); const dst_type_node = try transQualType(c, scope, param_qt, loc); arg = try removeCVQualifiers(c, dst_type_node, arg); } @@ -3729,10 +3729,10 @@ fn qualTypeGetFnProto(qt: clang.QualType, is_ptr: *bool) ?ClangFunctionType { ty = child_qt.getTypePtr(); } if (ty.getTypeClass() == .FunctionProto) { - return ClangFunctionType{ .Proto = @ptrCast(*const clang.FunctionProtoType, ty) }; + return ClangFunctionType{ .Proto = @as(*const clang.FunctionProtoType, @ptrCast(ty)) }; } if (ty.getTypeClass() == .FunctionNoProto) { - return ClangFunctionType{ .NoProto = @ptrCast(*const clang.FunctionType, ty) }; + return ClangFunctionType{ .NoProto = @as(*const clang.FunctionType, @ptrCast(ty)) }; } return null; } @@ -4141,9 +4141,9 @@ fn transFloatingLiteral(c: *Context, expr: *const clang.FloatingLiteral, used: R fn transBinaryConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang.BinaryConditionalOperator, used: ResultUsed) TransError!Node { // GNU extension of the ternary operator where the middle expression is // omitted, the condition itself is returned if it evaluates to true - const qt = @ptrCast(*const clang.Expr, stmt).getType(); + const qt = @as(*const clang.Expr, @ptrCast(stmt)).getType(); const res_is_bool = qualTypeIsBoolean(qt); - const casted_stmt = @ptrCast(*const clang.AbstractConditionalOperator, stmt); + const casted_stmt = @as(*const clang.AbstractConditionalOperator, @ptrCast(stmt)); const cond_expr = casted_stmt.getCond(); const false_expr = casted_stmt.getFalseExpr(); @@ -4203,9 +4203,9 @@ fn transConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang.Condi }; defer cond_scope.deinit(); - const qt = @ptrCast(*const clang.Expr, stmt).getType(); + const qt = @as(*const clang.Expr, @ptrCast(stmt)).getType(); const res_is_bool = qualTypeIsBoolean(qt); - const casted_stmt = @ptrCast(*const clang.AbstractConditionalOperator, stmt); + const casted_stmt = @as(*const clang.AbstractConditionalOperator, @ptrCast(stmt)); const cond_expr = casted_stmt.getCond(); const true_expr = casted_stmt.getTrueExpr(); const false_expr = casted_stmt.getFalseExpr(); @@ -4246,7 +4246,7 @@ fn addTopLevelDecl(c: *Context, name: []const u8, decl_node: Node) !void { fn transQualTypeInitializedStringLiteral(c: *Context, elem_ty: Node, string_lit: *const clang.StringLiteral) TypeError!Node { const string_lit_size = string_lit.getLength(); - const array_size = @intCast(usize, string_lit_size); + const array_size = @as(usize, @intCast(string_lit_size)); // incomplete array initialized with empty string, will be translated as [1]T{0} // see https://github.com/ziglang/zig/issues/8256 @@ -4266,16 +4266,16 @@ fn transQualTypeInitialized( ) TypeError!Node { const ty = qt.getTypePtr(); if (ty.getTypeClass() == .IncompleteArray) { - const incomplete_array_ty = @ptrCast(*const clang.IncompleteArrayType, ty); + const incomplete_array_ty = @as(*const clang.IncompleteArrayType, @ptrCast(ty)); const elem_ty = try transType(c, scope, incomplete_array_ty.getElementType().getTypePtr(), source_loc); switch (decl_init.getStmtClass()) { .StringLiteralClass => { - const string_lit = @ptrCast(*const clang.StringLiteral, decl_init); + const string_lit = @as(*const clang.StringLiteral, @ptrCast(decl_init)); return transQualTypeInitializedStringLiteral(c, elem_ty, string_lit); }, .InitListExprClass => { - const init_expr = @ptrCast(*const clang.InitListExpr, decl_init); + const init_expr = @as(*const clang.InitListExpr, @ptrCast(decl_init)); const size = init_expr.getNumInits(); if (init_expr.isStringLiteralInit()) { @@ -4306,7 +4306,7 @@ fn transQualTypeIntWidthOf(c: *Context, ty: clang.QualType, is_signed: bool) Typ /// Asserts the type is an integer. fn transTypeIntWidthOf(c: *Context, ty: *const clang.Type, is_signed: bool) TypeError!Node { assert(ty.getTypeClass() == .Builtin); - const builtin_ty = @ptrCast(*const clang.BuiltinType, ty); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty)); return Tag.type.create(c.arena, switch (builtin_ty.getKind()) { .Char_U, .Char_S, .UChar, .SChar, .Char8 => if (is_signed) "i8" else "u8", .UShort, .Short => if (is_signed) "c_short" else "c_ushort", @@ -4324,7 +4324,7 @@ fn isCBuiltinType(qt: clang.QualType, kind: clang.BuiltinTypeKind) bool { const c_type = qualTypeCanon(qt); if (c_type.getTypeClass() != .Builtin) return false; - const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type)); return builtin_ty.getKind() == kind; } @@ -4341,7 +4341,7 @@ fn qualTypeIntBitWidth(c: *Context, qt: clang.QualType) !u32 { switch (ty.getTypeClass()) { .Builtin => { - const builtin_ty = @ptrCast(*const clang.BuiltinType, ty); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty)); switch (builtin_ty.getKind()) { .Char_U, @@ -4358,9 +4358,9 @@ fn qualTypeIntBitWidth(c: *Context, qt: clang.QualType) !u32 { unreachable; }, .Typedef => { - const typedef_ty = @ptrCast(*const clang.TypedefType, ty); + const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty)); const typedef_decl = typedef_ty.getDecl(); - const type_name = try c.str(@ptrCast(*const clang.NamedDecl, typedef_decl).getName_bytes_begin()); + const type_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(typedef_decl)).getName_bytes_begin()); if (mem.eql(u8, type_name, "uint8_t") or mem.eql(u8, type_name, "int8_t")) { return 8; @@ -4396,12 +4396,12 @@ fn getExprQualType(c: *Context, expr: *const clang.Expr) clang.QualType { blk: { // If this is a C `char *`, turn it into a `const char *` if (expr.getStmtClass() != .ImplicitCastExprClass) break :blk; - const cast_expr = @ptrCast(*const clang.ImplicitCastExpr, expr); + const cast_expr = @as(*const clang.ImplicitCastExpr, @ptrCast(expr)); if (cast_expr.getCastKind() != .ArrayToPointerDecay) break :blk; const sub_expr = cast_expr.getSubExpr(); if (sub_expr.getStmtClass() != .StringLiteralClass) break :blk; const array_qt = sub_expr.getType(); - const array_type = @ptrCast(*const clang.ArrayType, array_qt.getTypePtr()); + const array_type = @as(*const clang.ArrayType, @ptrCast(array_qt.getTypePtr())); var pointee_qt = array_type.getElementType(); pointee_qt.addConst(); return c.clang_context.getPointerType(pointee_qt); @@ -4412,11 +4412,11 @@ fn getExprQualType(c: *Context, expr: *const clang.Expr) clang.QualType { fn typeIsOpaque(c: *Context, ty: *const clang.Type, loc: clang.SourceLocation) bool { switch (ty.getTypeClass()) { .Builtin => { - const builtin_ty = @ptrCast(*const clang.BuiltinType, ty); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty)); return builtin_ty.getKind() == .Void; }, .Record => { - const record_ty = @ptrCast(*const clang.RecordType, ty); + const record_ty = @as(*const clang.RecordType, @ptrCast(ty)); const record_decl = record_ty.getDecl(); const record_def = record_decl.getDefinition() orelse return true; @@ -4432,12 +4432,12 @@ fn typeIsOpaque(c: *Context, ty: *const clang.Type, loc: clang.SourceLocation) b return false; }, .Elaborated => { - const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty); + const elaborated_ty = @as(*const clang.ElaboratedType, @ptrCast(ty)); const qt = elaborated_ty.getNamedType(); return typeIsOpaque(c, qt.getTypePtr(), loc); }, .Typedef => { - const typedef_ty = @ptrCast(*const clang.TypedefType, ty); + const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty)); const typedef_decl = typedef_ty.getDecl(); const underlying_type = typedef_decl.getUnderlyingType(); return typeIsOpaque(c, underlying_type.getTypePtr(), loc); @@ -4459,7 +4459,7 @@ fn qualTypeIsCharStar(qt: clang.QualType) bool { fn cIsUnqualifiedChar(qt: clang.QualType) bool { const c_type = qualTypeCanon(qt); if (c_type.getTypeClass() != .Builtin) return false; - const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type)); return switch (builtin_ty.getKind()) { .Char_S, .Char_U => true, else => false, @@ -4473,7 +4473,7 @@ fn cIsInteger(qt: clang.QualType) bool { fn cIsUnsignedInteger(qt: clang.QualType) bool { const c_type = qualTypeCanon(qt); if (c_type.getTypeClass() != .Builtin) return false; - const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type)); return switch (builtin_ty.getKind()) { .Char_U, .UChar, @@ -4492,7 +4492,7 @@ fn cIsUnsignedInteger(qt: clang.QualType) bool { fn cIntTypeToIndex(qt: clang.QualType) u8 { const c_type = qualTypeCanon(qt); assert(c_type.getTypeClass() == .Builtin); - const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type)); return switch (builtin_ty.getKind()) { .Bool, .Char_U, .Char_S, .UChar, .SChar, .Char8 => 1, .WChar_U, .WChar_S => 2, @@ -4513,9 +4513,9 @@ fn cIntTypeCmp(a: clang.QualType, b: clang.QualType) math.Order { /// Checks if expr is an integer literal >= 0 fn cIsNonNegativeIntLiteral(c: *Context, expr: *const clang.Expr) bool { - if (@ptrCast(*const clang.Stmt, expr).getStmtClass() == .IntegerLiteralClass) { + if (@as(*const clang.Stmt, @ptrCast(expr)).getStmtClass() == .IntegerLiteralClass) { var signum: c_int = undefined; - if (!(@ptrCast(*const clang.IntegerLiteral, expr).getSignum(&signum, c.clang_context))) { + if (!(@as(*const clang.IntegerLiteral, @ptrCast(expr)).getSignum(&signum, c.clang_context))) { return false; } return signum >= 0; @@ -4526,7 +4526,7 @@ fn cIsNonNegativeIntLiteral(c: *Context, expr: *const clang.Expr) bool { fn cIsSignedInteger(qt: clang.QualType) bool { const c_type = qualTypeCanon(qt); if (c_type.getTypeClass() != .Builtin) return false; - const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type)); return switch (builtin_ty.getKind()) { .SChar, .Short, @@ -4543,14 +4543,14 @@ fn cIsSignedInteger(qt: clang.QualType) bool { fn cIsNativeInt(qt: clang.QualType) bool { const c_type = qualTypeCanon(qt); if (c_type.getTypeClass() != .Builtin) return false; - const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type)); return builtin_ty.getKind() == .Int; } fn cIsFloating(qt: clang.QualType) bool { const c_type = qualTypeCanon(qt); if (c_type.getTypeClass() != .Builtin) return false; - const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type)); return switch (builtin_ty.getKind()) { .Float, .Double, @@ -4564,7 +4564,7 @@ fn cIsFloating(qt: clang.QualType) bool { fn cIsLongLongInteger(qt: clang.QualType) bool { const c_type = qualTypeCanon(qt); if (c_type.getTypeClass() != .Builtin) return false; - const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type)); return switch (builtin_ty.getKind()) { .LongLong, .ULongLong, .Int128, .UInt128 => true, else => false, @@ -4681,8 +4681,8 @@ fn transCreateNodeAPInt(c: *Context, int: *const clang.APSInt) !Node { limb_i += 2; data_i += 1; }) { - limbs[limb_i] = @truncate(u32, data[data_i]); - limbs[limb_i + 1] = @truncate(u32, data[data_i] >> 32); + limbs[limb_i] = @as(u32, @truncate(data[data_i])); + limbs[limb_i + 1] = @as(u32, @truncate(data[data_i] >> 32)); } }, else => @compileError("unimplemented"), @@ -4772,7 +4772,7 @@ fn transCreateNodeShiftOp( fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clang.SourceLocation) TypeError!Node { switch (ty.getTypeClass()) { .Builtin => { - const builtin_ty = @ptrCast(*const clang.BuiltinType, ty); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty)); return Tag.type.create(c.arena, switch (builtin_ty.getKind()) { .Void => "anyopaque", .Bool => "bool", @@ -4797,17 +4797,17 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan }); }, .FunctionProto => { - const fn_proto_ty = @ptrCast(*const clang.FunctionProtoType, ty); + const fn_proto_ty = @as(*const clang.FunctionProtoType, @ptrCast(ty)); const fn_proto = try transFnProto(c, null, fn_proto_ty, source_loc, null, false); return Node.initPayload(&fn_proto.base); }, .FunctionNoProto => { - const fn_no_proto_ty = @ptrCast(*const clang.FunctionType, ty); + const fn_no_proto_ty = @as(*const clang.FunctionType, @ptrCast(ty)); const fn_proto = try transFnNoProto(c, fn_no_proto_ty, source_loc, null, false); return Node.initPayload(&fn_proto.base); }, .Paren => { - const paren_ty = @ptrCast(*const clang.ParenType, ty); + const paren_ty = @as(*const clang.ParenType, @ptrCast(ty)); return transQualType(c, scope, paren_ty.getInnerType(), source_loc); }, .Pointer => { @@ -4832,7 +4832,7 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan return Tag.c_pointer.create(c.arena, ptr_info); }, .ConstantArray => { - const const_arr_ty = @ptrCast(*const clang.ConstantArrayType, ty); + const const_arr_ty = @as(*const clang.ConstantArrayType, @ptrCast(ty)); const size_ap_int = const_arr_ty.getSize(); const size = size_ap_int.getLimitedValue(usize); @@ -4841,7 +4841,7 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan return Tag.array_type.create(c.arena, .{ .len = size, .elem_type = elem_type }); }, .IncompleteArray => { - const incomplete_array_ty = @ptrCast(*const clang.IncompleteArrayType, ty); + const incomplete_array_ty = @as(*const clang.IncompleteArrayType, @ptrCast(ty)); const child_qt = incomplete_array_ty.getElementType(); const is_const = child_qt.isConstQualified(); @@ -4851,11 +4851,11 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan return Tag.c_pointer.create(c.arena, .{ .is_const = is_const, .is_volatile = is_volatile, .elem_type = elem_type }); }, .Typedef => { - const typedef_ty = @ptrCast(*const clang.TypedefType, ty); + const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty)); const typedef_decl = typedef_ty.getDecl(); var trans_scope = scope; - if (@ptrCast(*const clang.Decl, typedef_decl).castToNamedDecl()) |named_decl| { + if (@as(*const clang.Decl, @ptrCast(typedef_decl)).castToNamedDecl()) |named_decl| { const decl_name = try c.str(named_decl.getName_bytes_begin()); if (c.global_names.get(decl_name)) |_| trans_scope = &c.global_scope.base; if (builtin_typedef_map.get(decl_name)) |builtin| return Tag.type.create(c.arena, builtin); @@ -4865,11 +4865,11 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan return Tag.identifier.create(c.arena, name); }, .Record => { - const record_ty = @ptrCast(*const clang.RecordType, ty); + const record_ty = @as(*const clang.RecordType, @ptrCast(ty)); const record_decl = record_ty.getDecl(); var trans_scope = scope; - if (@ptrCast(*const clang.Decl, record_decl).castToNamedDecl()) |named_decl| { + if (@as(*const clang.Decl, @ptrCast(record_decl)).castToNamedDecl()) |named_decl| { const decl_name = try c.str(named_decl.getName_bytes_begin()); if (c.global_names.get(decl_name)) |_| trans_scope = &c.global_scope.base; } @@ -4878,11 +4878,11 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan return Tag.identifier.create(c.arena, name); }, .Enum => { - const enum_ty = @ptrCast(*const clang.EnumType, ty); + const enum_ty = @as(*const clang.EnumType, @ptrCast(ty)); const enum_decl = enum_ty.getDecl(); var trans_scope = scope; - if (@ptrCast(*const clang.Decl, enum_decl).castToNamedDecl()) |named_decl| { + if (@as(*const clang.Decl, @ptrCast(enum_decl)).castToNamedDecl()) |named_decl| { const decl_name = try c.str(named_decl.getName_bytes_begin()); if (c.global_names.get(decl_name)) |_| trans_scope = &c.global_scope.base; } @@ -4891,27 +4891,27 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan return Tag.identifier.create(c.arena, name); }, .Elaborated => { - const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty); + const elaborated_ty = @as(*const clang.ElaboratedType, @ptrCast(ty)); return transQualType(c, scope, elaborated_ty.getNamedType(), source_loc); }, .Decayed => { - const decayed_ty = @ptrCast(*const clang.DecayedType, ty); + const decayed_ty = @as(*const clang.DecayedType, @ptrCast(ty)); return transQualType(c, scope, decayed_ty.getDecayedType(), source_loc); }, .Attributed => { - const attributed_ty = @ptrCast(*const clang.AttributedType, ty); + const attributed_ty = @as(*const clang.AttributedType, @ptrCast(ty)); return transQualType(c, scope, attributed_ty.getEquivalentType(), source_loc); }, .MacroQualified => { - const macroqualified_ty = @ptrCast(*const clang.MacroQualifiedType, ty); + const macroqualified_ty = @as(*const clang.MacroQualifiedType, @ptrCast(ty)); return transQualType(c, scope, macroqualified_ty.getModifiedType(), source_loc); }, .TypeOf => { - const typeof_ty = @ptrCast(*const clang.TypeOfType, ty); + const typeof_ty = @as(*const clang.TypeOfType, @ptrCast(ty)); return transQualType(c, scope, typeof_ty.getUnmodifiedType(), source_loc); }, .TypeOfExpr => { - const typeofexpr_ty = @ptrCast(*const clang.TypeOfExprType, ty); + const typeofexpr_ty = @as(*const clang.TypeOfExprType, @ptrCast(ty)); const underlying_expr = transExpr(c, scope, typeofexpr_ty.getUnderlyingExpr(), .used) catch |err| switch (err) { error.UnsupportedTranslation => { return fail(c, error.UnsupportedType, source_loc, "unsupported underlying expression for TypeOfExpr", .{}); @@ -4921,7 +4921,7 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan return Tag.typeof.create(c.arena, underlying_expr); }, .Vector => { - const vector_ty = @ptrCast(*const clang.VectorType, ty); + const vector_ty = @as(*const clang.VectorType, @ptrCast(ty)); const num_elements = vector_ty.getNumElements(); const element_qt = vector_ty.getElementType(); return Tag.vector.create(c.arena, .{ @@ -4944,14 +4944,14 @@ fn qualTypeWasDemotedToOpaque(c: *Context, qt: clang.QualType) bool { const ty = qt.getTypePtr(); switch (qt.getTypeClass()) { .Typedef => { - const typedef_ty = @ptrCast(*const clang.TypedefType, ty); + const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty)); const typedef_decl = typedef_ty.getDecl(); const underlying_type = typedef_decl.getUnderlyingType(); return qualTypeWasDemotedToOpaque(c, underlying_type); }, .Record => { - const record_ty = @ptrCast(*const clang.RecordType, ty); + const record_ty = @as(*const clang.RecordType, @ptrCast(ty)); const record_decl = record_ty.getDecl(); const canonical = @intFromPtr(record_decl.getCanonicalDecl()); @@ -4967,26 +4967,26 @@ fn qualTypeWasDemotedToOpaque(c: *Context, qt: clang.QualType) bool { return false; }, .Enum => { - const enum_ty = @ptrCast(*const clang.EnumType, ty); + const enum_ty = @as(*const clang.EnumType, @ptrCast(ty)); const enum_decl = enum_ty.getDecl(); const canonical = @intFromPtr(enum_decl.getCanonicalDecl()); return c.opaque_demotes.contains(canonical); }, .Elaborated => { - const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty); + const elaborated_ty = @as(*const clang.ElaboratedType, @ptrCast(ty)); return qualTypeWasDemotedToOpaque(c, elaborated_ty.getNamedType()); }, .Decayed => { - const decayed_ty = @ptrCast(*const clang.DecayedType, ty); + const decayed_ty = @as(*const clang.DecayedType, @ptrCast(ty)); return qualTypeWasDemotedToOpaque(c, decayed_ty.getDecayedType()); }, .Attributed => { - const attributed_ty = @ptrCast(*const clang.AttributedType, ty); + const attributed_ty = @as(*const clang.AttributedType, @ptrCast(ty)); return qualTypeWasDemotedToOpaque(c, attributed_ty.getEquivalentType()); }, .MacroQualified => { - const macroqualified_ty = @ptrCast(*const clang.MacroQualifiedType, ty); + const macroqualified_ty = @as(*const clang.MacroQualifiedType, @ptrCast(ty)); return qualTypeWasDemotedToOpaque(c, macroqualified_ty.getModifiedType()); }, else => return false, @@ -4997,28 +4997,28 @@ fn isAnyopaque(qt: clang.QualType) bool { const ty = qt.getTypePtr(); switch (ty.getTypeClass()) { .Builtin => { - const builtin_ty = @ptrCast(*const clang.BuiltinType, ty); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty)); return builtin_ty.getKind() == .Void; }, .Typedef => { - const typedef_ty = @ptrCast(*const clang.TypedefType, ty); + const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty)); const typedef_decl = typedef_ty.getDecl(); return isAnyopaque(typedef_decl.getUnderlyingType()); }, .Elaborated => { - const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty); + const elaborated_ty = @as(*const clang.ElaboratedType, @ptrCast(ty)); return isAnyopaque(elaborated_ty.getNamedType().getCanonicalType()); }, .Decayed => { - const decayed_ty = @ptrCast(*const clang.DecayedType, ty); + const decayed_ty = @as(*const clang.DecayedType, @ptrCast(ty)); return isAnyopaque(decayed_ty.getDecayedType().getCanonicalType()); }, .Attributed => { - const attributed_ty = @ptrCast(*const clang.AttributedType, ty); + const attributed_ty = @as(*const clang.AttributedType, @ptrCast(ty)); return isAnyopaque(attributed_ty.getEquivalentType().getCanonicalType()); }, .MacroQualified => { - const macroqualified_ty = @ptrCast(*const clang.MacroQualifiedType, ty); + const macroqualified_ty = @as(*const clang.MacroQualifiedType, @ptrCast(ty)); return isAnyopaque(macroqualified_ty.getModifiedType().getCanonicalType()); }, else => return false, @@ -5066,7 +5066,7 @@ fn transFnProto( fn_decl_context: ?FnDeclContext, is_pub: bool, ) !*ast.Payload.Func { - const fn_ty = @ptrCast(*const clang.FunctionType, fn_proto_ty); + const fn_ty = @as(*const clang.FunctionType, @ptrCast(fn_proto_ty)); const cc = try transCC(c, fn_ty, source_loc); const is_var_args = fn_proto_ty.isVariadic(); return finishTransFnProto(c, fn_decl, fn_proto_ty, fn_ty, source_loc, fn_decl_context, is_var_args, cc, is_pub); @@ -5108,14 +5108,14 @@ fn finishTransFnProto( var i: usize = 0; while (i < param_count) : (i += 1) { - const param_qt = fn_proto_ty.?.getParamType(@intCast(c_uint, i)); + const param_qt = fn_proto_ty.?.getParamType(@as(c_uint, @intCast(i))); const is_noalias = param_qt.isRestrictQualified(); const param_name: ?[]const u8 = if (fn_decl) |decl| blk: { - const param = decl.getParamDecl(@intCast(c_uint, i)); - const param_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, param).getName_bytes_begin()); + const param = decl.getParamDecl(@as(c_uint, @intCast(i))); + const param_name: []const u8 = try c.str(@as(*const clang.NamedDecl, @ptrCast(param)).getName_bytes_begin()); if (param_name.len < 1) break :blk null; @@ -5576,7 +5576,7 @@ fn transPreprocessorEntities(c: *Context, unit: *clang.ASTUnit) Error!void { tok_list.items.len = 0; switch (entity.getKind()) { .MacroDefinitionKind => { - const macro = @ptrCast(*clang.MacroDefinitionRecord, entity); + const macro = @as(*clang.MacroDefinitionRecord, @ptrCast(entity)); const raw_name = macro.getName_getNameStart(); const begin_loc = macro.getSourceRange_getBegin(); @@ -6046,7 +6046,7 @@ fn escapeUnprintables(ctx: *Context, m: *MacroCtx) ![]const u8 { if (std.unicode.utf8ValidateSlice(zigified)) return zigified; const formatter = std.fmt.fmtSliceEscapeLower(zigified); - const encoded_size = @intCast(usize, std.fmt.count("{s}", .{formatter})); + const encoded_size = @as(usize, @intCast(std.fmt.count("{s}", .{formatter}))); var output = try ctx.arena.alloc(u8, encoded_size); return std.fmt.bufPrint(output, "{s}", .{formatter}) catch |err| switch (err) { error.NoSpaceLeft => unreachable, diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig index a24bff0176..50a7a79f87 100644 --- a/src/translate_c/ast.zig +++ b/src/translate_c/ast.zig @@ -393,7 +393,7 @@ pub const Node = extern union { pub fn tag(self: Node) Tag { if (self.tag_if_small_enough < Tag.no_payload_count) { - return @enumFromInt(Tag, @intCast(std.meta.Tag(Tag), self.tag_if_small_enough)); + return @as(Tag, @enumFromInt(@as(std.meta.Tag(Tag), @intCast(self.tag_if_small_enough)))); } else { return self.ptr_otherwise.tag; } @@ -778,7 +778,7 @@ pub fn render(gpa: Allocator, nodes: []const Node) !std.zig.Ast { try ctx.tokens.append(gpa, .{ .tag = .eof, - .start = @intCast(u32, ctx.buf.items.len), + .start = @as(u32, @intCast(ctx.buf.items.len)), }); return std.zig.Ast{ @@ -808,10 +808,10 @@ const Context = struct { try c.tokens.append(c.gpa, .{ .tag = tag, - .start = @intCast(u32, start_index), + .start = @as(u32, @intCast(start_index)), }); - return @intCast(u32, c.tokens.len - 1); + return @as(u32, @intCast(c.tokens.len - 1)); } fn addToken(c: *Context, tag: TokenTag, bytes: []const u8) Allocator.Error!TokenIndex { @@ -827,13 +827,13 @@ const Context = struct { fn listToSpan(c: *Context, list: []const NodeIndex) Allocator.Error!NodeSubRange { try c.extra_data.appendSlice(c.gpa, list); return NodeSubRange{ - .start = @intCast(NodeIndex, c.extra_data.items.len - list.len), - .end = @intCast(NodeIndex, c.extra_data.items.len), + .start = @as(NodeIndex, @intCast(c.extra_data.items.len - list.len)), + .end = @as(NodeIndex, @intCast(c.extra_data.items.len)), }; } fn addNode(c: *Context, elem: std.zig.Ast.Node) Allocator.Error!NodeIndex { - const result = @intCast(NodeIndex, c.nodes.len); + const result = @as(NodeIndex, @intCast(c.nodes.len)); try c.nodes.append(c.gpa, elem); return result; } @@ -841,7 +841,7 @@ const Context = struct { fn addExtra(c: *Context, extra: anytype) Allocator.Error!NodeIndex { const fields = std.meta.fields(@TypeOf(extra)); try c.extra_data.ensureUnusedCapacity(c.gpa, fields.len); - const result = @intCast(u32, c.extra_data.items.len); + const result = @as(u32, @intCast(c.extra_data.items.len)); inline for (fields) |field| { comptime std.debug.assert(field.type == NodeIndex); c.extra_data.appendAssumeCapacity(@field(extra, field.name)); diff --git a/src/type.zig b/src/type.zig index 280c292314..e4ae2d2c35 100644 --- a/src/type.zig +++ b/src/type.zig @@ -807,7 +807,7 @@ pub const Type = struct { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| { if (ptr_type.flags.alignment.toByteUnitsOptional()) |a| { - return @intCast(u32, a); + return @as(u32, @intCast(a)); } else if (opt_sema) |sema| { const res = try ptr_type.child.toType().abiAlignmentAdvanced(mod, .{ .sema = sema }); return res.scalar; @@ -886,7 +886,7 @@ pub const Type = struct { }, .vector_type => |vector_type| { const bits_u64 = try bitSizeAdvanced(vector_type.child.toType(), mod, opt_sema); - const bits = @intCast(u32, bits_u64); + const bits = @as(u32, @intCast(bits_u64)); const bytes = ((bits * vector_type.len) + 7) / 8; const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); return AbiAlignmentAdvanced{ .scalar = alignment }; @@ -901,7 +901,7 @@ pub const Type = struct { // represents machine code; not a pointer .func_type => |func_type| return AbiAlignmentAdvanced{ .scalar = if (func_type.alignment.toByteUnitsOptional()) |a| - @intCast(u32, a) + @as(u32, @intCast(a)) else target_util.defaultFunctionAlignment(target), }, @@ -1015,7 +1015,7 @@ pub const Type = struct { else => |e| return e, })) continue; - const field_align = @intCast(u32, field.abi_align.toByteUnitsOptional() orelse + const field_align = @as(u32, @intCast(field.abi_align.toByteUnitsOptional() orelse switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |a| a, .val => switch (strat) { @@ -1026,7 +1026,7 @@ pub const Type = struct { .storage = .{ .lazy_align = ty.toIntern() }, } })).toValue() }, }, - }); + })); big_align = @max(big_align, field_align); // This logic is duplicated in Module.Struct.Field.alignment. @@ -1221,7 +1221,7 @@ pub const Type = struct { else => |e| return e, })) continue; - const field_align = @intCast(u32, field.abi_align.toByteUnitsOptional() orelse + const field_align = @as(u32, @intCast(field.abi_align.toByteUnitsOptional() orelse switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |a| a, .val => switch (strat) { @@ -1232,7 +1232,7 @@ pub const Type = struct { .storage = .{ .lazy_align = ty.toIntern() }, } })).toValue() }, }, - }); + })); max_align = @max(max_align, field_align); } return AbiAlignmentAdvanced{ .scalar = max_align }; @@ -1307,7 +1307,7 @@ pub const Type = struct { } })).toValue() }, }; const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema); - const elem_bits = @intCast(u32, elem_bits_u64); + const elem_bits = @as(u32, @intCast(elem_bits_u64)); const total_bits = elem_bits * vector_type.len; const total_bytes = (total_bits + 7) / 8; const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { @@ -1573,12 +1573,12 @@ pub const Type = struct { fn intAbiSize(bits: u16, target: Target) u64 { const alignment = intAbiAlignment(bits, target); - return std.mem.alignForward(u64, @intCast(u16, (@as(u17, bits) + 7) / 8), alignment); + return std.mem.alignForward(u64, @as(u16, @intCast((@as(u17, bits) + 7) / 8)), alignment); } fn intAbiAlignment(bits: u16, target: Target) u32 { return @min( - std.math.ceilPowerOfTwoPromote(u16, @intCast(u16, (@as(u17, bits) + 7) / 8)), + std.math.ceilPowerOfTwoPromote(u16, @as(u16, @intCast((@as(u17, bits) + 7) / 8))), target.maxIntAlignment(), ); } @@ -2166,7 +2166,7 @@ pub const Type = struct { pub fn vectorLen(ty: Type, mod: *const Module) u32 { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .vector_type => |vector_type| vector_type.len, - .anon_struct_type => |tuple| @intCast(u32, tuple.types.len), + .anon_struct_type => |tuple| @as(u32, @intCast(tuple.types.len)), else => unreachable, }; } @@ -3124,7 +3124,7 @@ pub const Type = struct { for (struct_obj.fields.values(), 0..) |f, i| { if (!f.ty.hasRuntimeBits(mod)) continue; - const field_bits = @intCast(u16, f.ty.bitSize(mod)); + const field_bits = @as(u16, @intCast(f.ty.bitSize(mod))); if (i == field_index) { bit_offset = running_bits; elem_size_bits = field_bits; @@ -3385,8 +3385,8 @@ pub const Type = struct { pub fn smallestUnsignedBits(max: u64) u16 { if (max == 0) return 0; const base = std.math.log2(max); - const upper = (@as(u64, 1) << @intCast(u6, base)) - 1; - return @intCast(u16, base + @intFromBool(upper < max)); + const upper = (@as(u64, 1) << @as(u6, @intCast(base))) - 1; + return @as(u16, @intCast(base + @intFromBool(upper < max))); } /// This is only used for comptime asserts. Bump this number when you make a change diff --git a/src/value.zig b/src/value.zig index 542dfb73ec..1c22717152 100644 --- a/src/value.zig +++ b/src/value.zig @@ -112,7 +112,7 @@ pub const Value = struct { return self.castTag(T.base_tag); } inline for (@typeInfo(Tag).Enum.fields) |field| { - const t = @enumFromInt(Tag, field.value); + const t = @as(Tag, @enumFromInt(field.value)); if (self.legacy.ptr_otherwise.tag == t) { if (T == t.Type()) { return @fieldParentPtr(T, "base", self.legacy.ptr_otherwise); @@ -203,8 +203,8 @@ pub const Value = struct { .bytes => |bytes| try ip.getOrPutString(mod.gpa, bytes), .elems => try arrayToIpString(val, ty.arrayLen(mod), mod), .repeated_elem => |elem| { - const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); - const len = @intCast(usize, ty.arrayLen(mod)); + const byte = @as(u8, @intCast(elem.toValue().toUnsignedInt(mod))); + const len = @as(usize, @intCast(ty.arrayLen(mod))); try ip.string_bytes.appendNTimes(mod.gpa, byte, len); return ip.getOrPutTrailingString(mod.gpa, len); }, @@ -226,8 +226,8 @@ pub const Value = struct { .bytes => |bytes| try allocator.dupe(u8, bytes), .elems => try arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), .repeated_elem => |elem| { - const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); - const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); + const byte = @as(u8, @intCast(elem.toValue().toUnsignedInt(mod))); + const result = try allocator.alloc(u8, @as(usize, @intCast(ty.arrayLen(mod)))); @memset(result, byte); return result; }, @@ -237,10 +237,10 @@ pub const Value = struct { } fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 { - const result = try allocator.alloc(u8, @intCast(usize, len)); + const result = try allocator.alloc(u8, @as(usize, @intCast(len))); for (result, 0..) |*elem, i| { const elem_val = try val.elemValue(mod, i); - elem.* = @intCast(u8, elem_val.toUnsignedInt(mod)); + elem.* = @as(u8, @intCast(elem_val.toUnsignedInt(mod))); } return result; } @@ -248,7 +248,7 @@ pub const Value = struct { fn arrayToIpString(val: Value, len_u64: u64, mod: *Module) !InternPool.NullTerminatedString { const gpa = mod.gpa; const ip = &mod.intern_pool; - const len = @intCast(usize, len_u64); + const len = @as(usize, @intCast(len_u64)); try ip.string_bytes.ensureUnusedCapacity(gpa, len); for (0..len) |i| { // I don't think elemValue has the possibility to affect ip.string_bytes. Let's @@ -256,7 +256,7 @@ pub const Value = struct { const prev = ip.string_bytes.items.len; const elem_val = try val.elemValue(mod, i); assert(ip.string_bytes.items.len == prev); - const byte = @intCast(u8, elem_val.toUnsignedInt(mod)); + const byte = @as(u8, @intCast(elem_val.toUnsignedInt(mod))); ip.string_bytes.appendAssumeCapacity(byte); } return ip.getOrPutTrailingString(gpa, len); @@ -303,7 +303,7 @@ pub const Value = struct { } }); }, .aggregate => { - const len = @intCast(usize, ty.arrayLen(mod)); + const len = @as(usize, @intCast(ty.arrayLen(mod))); const old_elems = val.castTag(.aggregate).?.data[0..len]; const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len); defer mod.gpa.free(new_elems); @@ -534,7 +534,7 @@ pub const Value = struct { const base_addr = (try field.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; const struct_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty); - return base_addr + struct_ty.structFieldOffset(@intCast(usize, field.index), mod); + return base_addr + struct_ty.structFieldOffset(@as(usize, @intCast(field.index)), mod); }, else => null, }, @@ -561,9 +561,9 @@ pub const Value = struct { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(i64) catch unreachable, .i64 => |x| x, - .u64 => |x| @intCast(i64, x), - .lazy_align => |ty| @intCast(i64, ty.toType().abiAlignment(mod)), - .lazy_size => |ty| @intCast(i64, ty.toType().abiSize(mod)), + .u64 => |x| @as(i64, @intCast(x)), + .lazy_align => |ty| @as(i64, @intCast(ty.toType().abiAlignment(mod))), + .lazy_size => |ty| @as(i64, @intCast(ty.toType().abiSize(mod))), }, else => unreachable, }, @@ -604,7 +604,7 @@ pub const Value = struct { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); if (val.isUndef(mod)) { - const size = @intCast(usize, ty.abiSize(mod)); + const size = @as(usize, @intCast(ty.abiSize(mod))); @memset(buffer[0..size], 0xaa); return; } @@ -623,17 +623,17 @@ pub const Value = struct { bigint.writeTwosComplement(buffer[0..byte_count], endian); }, .Float => switch (ty.floatBits(target)) { - 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(u16, val.toFloat(f16, mod)), endian), - 32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(u32, val.toFloat(f32, mod)), endian), - 64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(u64, val.toFloat(f64, mod)), endian), - 80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(u80, val.toFloat(f80, mod)), endian), - 128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(u128, val.toFloat(f128, mod)), endian), + 16 => std.mem.writeInt(u16, buffer[0..2], @as(u16, @bitCast(val.toFloat(f16, mod))), endian), + 32 => std.mem.writeInt(u32, buffer[0..4], @as(u32, @bitCast(val.toFloat(f32, mod))), endian), + 64 => std.mem.writeInt(u64, buffer[0..8], @as(u64, @bitCast(val.toFloat(f64, mod))), endian), + 80 => std.mem.writeInt(u80, buffer[0..10], @as(u80, @bitCast(val.toFloat(f80, mod))), endian), + 128 => std.mem.writeInt(u128, buffer[0..16], @as(u128, @bitCast(val.toFloat(f128, mod))), endian), else => unreachable, }, .Array => { const len = ty.arrayLen(mod); const elem_ty = ty.childType(mod); - const elem_size = @intCast(usize, elem_ty.abiSize(mod)); + const elem_size = @as(usize, @intCast(elem_ty.abiSize(mod))); var elem_i: usize = 0; var buf_off: usize = 0; while (elem_i < len) : (elem_i += 1) { @@ -645,13 +645,13 @@ pub const Value = struct { .Vector => { // We use byte_count instead of abi_size here, so that any padding bytes // follow the data bytes, on both big- and little-endian systems. - const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; + const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, .Struct => switch (ty.containerLayout(mod)) { .Auto => return error.IllDefinedMemoryLayout, .Extern => for (ty.structFields(mod).values(), 0..) |field, i| { - const off = @intCast(usize, ty.structFieldOffset(i, mod)); + const off = @as(usize, @intCast(ty.structFieldOffset(i, mod))); const field_val = switch (val.ip_index) { .none => switch (val.tag()) { .bytes => { @@ -674,7 +674,7 @@ pub const Value = struct { try writeToMemory(field_val, field.ty, mod, buffer[off..]); }, .Packed => { - const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; + const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, }, @@ -686,14 +686,14 @@ pub const Value = struct { .error_union => |error_union| error_union.val.err_name, else => unreachable, }; - const int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?); - std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian); + const int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(name).?)); + std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @as(Int, @intCast(int)), endian); }, .Union => switch (ty.containerLayout(mod)) { .Auto => return error.IllDefinedMemoryLayout, .Extern => return error.Unimplemented, .Packed => { - const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; + const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, }, @@ -730,7 +730,7 @@ pub const Value = struct { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); if (val.isUndef(mod)) { - const bit_size = @intCast(usize, ty.bitSize(mod)); + const bit_size = @as(usize, @intCast(ty.bitSize(mod))); std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian); return; } @@ -742,9 +742,9 @@ pub const Value = struct { .Big => buffer.len - bit_offset / 8 - 1, }; if (val.toBool()) { - buffer[byte_index] |= (@as(u8, 1) << @intCast(u3, bit_offset % 8)); + buffer[byte_index] |= (@as(u8, 1) << @as(u3, @intCast(bit_offset % 8))); } else { - buffer[byte_index] &= ~(@as(u8, 1) << @intCast(u3, bit_offset % 8)); + buffer[byte_index] &= ~(@as(u8, 1) << @as(u3, @intCast(bit_offset % 8))); } }, .Int, .Enum => { @@ -759,17 +759,17 @@ pub const Value = struct { } }, .Float => switch (ty.floatBits(target)) { - 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(u16, val.toFloat(f16, mod)), endian), - 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(u32, val.toFloat(f32, mod)), endian), - 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(u64, val.toFloat(f64, mod)), endian), - 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(u80, val.toFloat(f80, mod)), endian), - 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(u128, val.toFloat(f128, mod)), endian), + 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @as(u16, @bitCast(val.toFloat(f16, mod))), endian), + 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @as(u32, @bitCast(val.toFloat(f32, mod))), endian), + 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @as(u64, @bitCast(val.toFloat(f64, mod))), endian), + 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @as(u80, @bitCast(val.toFloat(f80, mod))), endian), + 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @as(u128, @bitCast(val.toFloat(f128, mod))), endian), else => unreachable, }, .Vector => { const elem_ty = ty.childType(mod); - const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); - const len = @intCast(usize, ty.arrayLen(mod)); + const elem_bit_size = @as(u16, @intCast(elem_ty.bitSize(mod))); + const len = @as(usize, @intCast(ty.arrayLen(mod))); var bits: u16 = 0; var elem_i: usize = 0; @@ -789,7 +789,7 @@ pub const Value = struct { const fields = ty.structFields(mod).values(); const storage = mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage; for (fields, 0..) |field, i| { - const field_bits = @intCast(u16, field.ty.bitSize(mod)); + const field_bits = @as(u16, @intCast(field.ty.bitSize(mod))); const field_val = switch (storage) { .bytes => unreachable, .elems => |elems| elems[i], @@ -865,12 +865,12 @@ pub const Value = struct { if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 .signed => { const val = std.mem.readVarInt(i64, buffer[0..byte_count], endian); - const result = (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits); + const result = (val << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits)); return mod.getCoerced(try mod.intValue(int_ty, result), ty); }, .unsigned => { const val = std.mem.readVarInt(u64, buffer[0..byte_count], endian); - const result = (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits); + const result = (val << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits)); return mod.getCoerced(try mod.intValue(int_ty, result), ty); }, } else { // Slow path, we have to construct a big-int @@ -886,22 +886,22 @@ pub const Value = struct { .Float => return (try mod.intern(.{ .float = .{ .ty = ty.toIntern(), .storage = switch (ty.floatBits(target)) { - 16 => .{ .f16 = @bitCast(f16, std.mem.readInt(u16, buffer[0..2], endian)) }, - 32 => .{ .f32 = @bitCast(f32, std.mem.readInt(u32, buffer[0..4], endian)) }, - 64 => .{ .f64 = @bitCast(f64, std.mem.readInt(u64, buffer[0..8], endian)) }, - 80 => .{ .f80 = @bitCast(f80, std.mem.readInt(u80, buffer[0..10], endian)) }, - 128 => .{ .f128 = @bitCast(f128, std.mem.readInt(u128, buffer[0..16], endian)) }, + 16 => .{ .f16 = @as(f16, @bitCast(std.mem.readInt(u16, buffer[0..2], endian))) }, + 32 => .{ .f32 = @as(f32, @bitCast(std.mem.readInt(u32, buffer[0..4], endian))) }, + 64 => .{ .f64 = @as(f64, @bitCast(std.mem.readInt(u64, buffer[0..8], endian))) }, + 80 => .{ .f80 = @as(f80, @bitCast(std.mem.readInt(u80, buffer[0..10], endian))) }, + 128 => .{ .f128 = @as(f128, @bitCast(std.mem.readInt(u128, buffer[0..16], endian))) }, else => unreachable, }, } })).toValue(), .Array => { const elem_ty = ty.childType(mod); const elem_size = elem_ty.abiSize(mod); - const elems = try arena.alloc(InternPool.Index, @intCast(usize, ty.arrayLen(mod))); + const elems = try arena.alloc(InternPool.Index, @as(usize, @intCast(ty.arrayLen(mod)))); var offset: usize = 0; for (elems) |*elem| { elem.* = try (try readFromMemory(elem_ty, mod, buffer[offset..], arena)).intern(elem_ty, mod); - offset += @intCast(usize, elem_size); + offset += @as(usize, @intCast(elem_size)); } return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), @@ -911,7 +911,7 @@ pub const Value = struct { .Vector => { // We use byte_count instead of abi_size here, so that any padding bytes // follow the data bytes, on both big- and little-endian systems. - const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; + const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); }, .Struct => switch (ty.containerLayout(mod)) { @@ -920,8 +920,8 @@ pub const Value = struct { const fields = ty.structFields(mod).values(); const field_vals = try arena.alloc(InternPool.Index, fields.len); for (field_vals, fields, 0..) |*field_val, field, i| { - const off = @intCast(usize, ty.structFieldOffset(i, mod)); - const sz = @intCast(usize, field.ty.abiSize(mod)); + const off = @as(usize, @intCast(ty.structFieldOffset(i, mod))); + const sz = @as(usize, @intCast(field.ty.abiSize(mod))); field_val.* = try (try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena)).intern(field.ty, mod); } return (try mod.intern(.{ .aggregate = .{ @@ -930,7 +930,7 @@ pub const Value = struct { } })).toValue(); }, .Packed => { - const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; + const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); }, }, @@ -938,7 +938,7 @@ pub const Value = struct { // TODO revisit this when we have the concept of the error tag type const Int = u16; const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], endian); - const name = mod.global_error_set.keys()[@intCast(usize, int)]; + const name = mod.global_error_set.keys()[@as(usize, @intCast(int))]; return (try mod.intern(.{ .err = .{ .ty = ty.toIntern(), .name = name, @@ -977,7 +977,7 @@ pub const Value = struct { .Big => buffer[buffer.len - bit_offset / 8 - 1], .Little => buffer[bit_offset / 8], }; - if (((byte >> @intCast(u3, bit_offset % 8)) & 1) == 0) { + if (((byte >> @as(u3, @intCast(bit_offset % 8))) & 1) == 0) { return Value.false; } else { return Value.true; @@ -1009,7 +1009,7 @@ pub const Value = struct { } // Slow path, we have to construct a big-int - const abi_size = @intCast(usize, ty.abiSize(mod)); + const abi_size = @as(usize, @intCast(ty.abiSize(mod))); const Limb = std.math.big.Limb; const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb); const limbs_buffer = try arena.alloc(Limb, limb_count); @@ -1021,20 +1021,20 @@ pub const Value = struct { .Float => return (try mod.intern(.{ .float = .{ .ty = ty.toIntern(), .storage = switch (ty.floatBits(target)) { - 16 => .{ .f16 = @bitCast(f16, std.mem.readPackedInt(u16, buffer, bit_offset, endian)) }, - 32 => .{ .f32 = @bitCast(f32, std.mem.readPackedInt(u32, buffer, bit_offset, endian)) }, - 64 => .{ .f64 = @bitCast(f64, std.mem.readPackedInt(u64, buffer, bit_offset, endian)) }, - 80 => .{ .f80 = @bitCast(f80, std.mem.readPackedInt(u80, buffer, bit_offset, endian)) }, - 128 => .{ .f128 = @bitCast(f128, std.mem.readPackedInt(u128, buffer, bit_offset, endian)) }, + 16 => .{ .f16 = @as(f16, @bitCast(std.mem.readPackedInt(u16, buffer, bit_offset, endian))) }, + 32 => .{ .f32 = @as(f32, @bitCast(std.mem.readPackedInt(u32, buffer, bit_offset, endian))) }, + 64 => .{ .f64 = @as(f64, @bitCast(std.mem.readPackedInt(u64, buffer, bit_offset, endian))) }, + 80 => .{ .f80 = @as(f80, @bitCast(std.mem.readPackedInt(u80, buffer, bit_offset, endian))) }, + 128 => .{ .f128 = @as(f128, @bitCast(std.mem.readPackedInt(u128, buffer, bit_offset, endian))) }, else => unreachable, }, } })).toValue(), .Vector => { const elem_ty = ty.childType(mod); - const elems = try arena.alloc(InternPool.Index, @intCast(usize, ty.arrayLen(mod))); + const elems = try arena.alloc(InternPool.Index, @as(usize, @intCast(ty.arrayLen(mod)))); var bits: u16 = 0; - const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); + const elem_bit_size = @as(u16, @intCast(elem_ty.bitSize(mod))); for (elems, 0..) |_, i| { // On big-endian systems, LLVM reverses the element order of vectors by default const tgt_elem_i = if (endian == .Big) elems.len - i - 1 else i; @@ -1054,7 +1054,7 @@ pub const Value = struct { const fields = ty.structFields(mod).values(); const field_vals = try arena.alloc(InternPool.Index, fields.len); for (fields, 0..) |field, i| { - const field_bits = @intCast(u16, field.ty.bitSize(mod)); + const field_bits = @as(u16, @intCast(field.ty.bitSize(mod))); field_vals[i] = try (try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena)).intern(field.ty, mod); bits += field_bits; } @@ -1081,18 +1081,18 @@ pub const Value = struct { pub fn toFloat(val: Value, comptime T: type, mod: *Module) T { return switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { - .big_int => |big_int| @floatCast(T, bigIntToFloat(big_int.limbs, big_int.positive)), + .big_int => |big_int| @as(T, @floatCast(bigIntToFloat(big_int.limbs, big_int.positive))), inline .u64, .i64 => |x| { if (T == f80) { @panic("TODO we can't lower this properly on non-x86 llvm backend yet"); } - return @floatFromInt(T, x); + return @as(T, @floatFromInt(x)); }, - .lazy_align => |ty| @floatFromInt(T, ty.toType().abiAlignment(mod)), - .lazy_size => |ty| @floatFromInt(T, ty.toType().abiSize(mod)), + .lazy_align => |ty| @as(T, @floatFromInt(ty.toType().abiAlignment(mod))), + .lazy_size => |ty| @as(T, @floatFromInt(ty.toType().abiSize(mod))), }, .float => |float| switch (float.storage) { - inline else => |x| @floatCast(T, x), + inline else => |x| @as(T, @floatCast(x)), }, else => unreachable, }; @@ -1107,7 +1107,7 @@ pub const Value = struct { var i: usize = limbs.len; while (i != 0) { i -= 1; - const limb: f128 = @floatFromInt(f128, limbs[i]); + const limb: f128 = @as(f128, @floatFromInt(limbs[i])); result = @mulAdd(f128, base, result, limb); } if (positive) { @@ -1132,7 +1132,7 @@ pub const Value = struct { pub fn popCount(val: Value, ty: Type, mod: *Module) u64 { var bigint_buf: BigIntSpace = undefined; const bigint = val.toBigInt(&bigint_buf, mod); - return @intCast(u64, bigint.popCount(ty.intInfo(mod).bits)); + return @as(u64, @intCast(bigint.popCount(ty.intInfo(mod).bits))); } pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { @@ -1505,10 +1505,10 @@ pub const Value = struct { .int, .eu_payload => unreachable, .opt_payload => |base| base.toValue().elemValue(mod, index), .comptime_field => |field_val| field_val.toValue().elemValue(mod, index), - .elem => |elem| elem.base.toValue().elemValue(mod, index + @intCast(usize, elem.index)), + .elem => |elem| elem.base.toValue().elemValue(mod, index + @as(usize, @intCast(elem.index))), .field => |field| if (field.base.toValue().pointerDecl(mod)) |decl_index| { const base_decl = mod.declPtr(decl_index); - const field_val = try base_decl.val.fieldValue(mod, @intCast(usize, field.index)); + const field_val = try base_decl.val.fieldValue(mod, @as(usize, @intCast(field.index))); return field_val.elemValue(mod, index); } else unreachable, }, @@ -1604,18 +1604,18 @@ pub const Value = struct { .comptime_field => |comptime_field| comptime_field.toValue() .sliceArray(mod, arena, start, end), .elem => |elem| elem.base.toValue() - .sliceArray(mod, arena, start + @intCast(usize, elem.index), end + @intCast(usize, elem.index)), + .sliceArray(mod, arena, start + @as(usize, @intCast(elem.index)), end + @as(usize, @intCast(elem.index))), else => unreachable, }, .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ .ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) { .array_type => |array_type| try mod.arrayType(.{ - .len = @intCast(u32, end - start), + .len = @as(u32, @intCast(end - start)), .child = array_type.child, .sentinel = if (end == array_type.len) array_type.sentinel else .none, }), .vector_type => |vector_type| try mod.vectorType(.{ - .len = @intCast(u32, end - start), + .len = @as(u32, @intCast(end - start)), .child = vector_type.child, }), else => unreachable, @@ -1734,7 +1734,7 @@ pub const Value = struct { .simple_value => |v| v == .undefined, .ptr => |ptr| switch (ptr.len) { .none => false, - else => for (0..@intCast(usize, ptr.len.toValue().toUnsignedInt(mod))) |index| { + else => for (0..@as(usize, @intCast(ptr.len.toValue().toUnsignedInt(mod)))) |index| { if (try (try val.elemValue(mod, index)).anyUndef(mod)) break true; } else false, }, @@ -1783,7 +1783,7 @@ pub const Value = struct { pub fn getErrorInt(val: Value, mod: *const Module) Module.ErrorInt { return if (getErrorName(val, mod).unwrap()) |err_name| - @intCast(Module.ErrorInt, mod.global_error_set.getIndex(err_name).?) + @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(err_name).?)) else 0; } @@ -1868,11 +1868,11 @@ pub const Value = struct { fn floatFromIntInner(x: anytype, dest_ty: Type, mod: *Module) !Value { const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (dest_ty.floatBits(target)) { - 16 => .{ .f16 = @floatFromInt(f16, x) }, - 32 => .{ .f32 = @floatFromInt(f32, x) }, - 64 => .{ .f64 = @floatFromInt(f64, x) }, - 80 => .{ .f80 = @floatFromInt(f80, x) }, - 128 => .{ .f128 = @floatFromInt(f128, x) }, + 16 => .{ .f16 = @as(f16, @floatFromInt(x)) }, + 32 => .{ .f32 = @as(f32, @floatFromInt(x)) }, + 64 => .{ .f64 = @as(f64, @floatFromInt(x)) }, + 80 => .{ .f80 = @as(f80, @floatFromInt(x)) }, + 128 => .{ .f128 = @as(f128, @floatFromInt(x)) }, else => unreachable, }; return (try mod.intern(.{ .float = .{ @@ -1887,7 +1887,7 @@ pub const Value = struct { } const w_value = @fabs(scalar); - return @divFloor(@intFromFloat(std.math.big.Limb, std.math.log2(w_value)), @typeInfo(std.math.big.Limb).Int.bits) + 1; + return @divFloor(@as(std.math.big.Limb, @intFromFloat(std.math.log2(w_value))), @typeInfo(std.math.big.Limb).Int.bits) + 1; } pub const OverflowArithmeticResult = struct { @@ -2738,14 +2738,14 @@ pub const Value = struct { for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); const bits_elem = try bits.elemValue(mod, i); - scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod)).intern(scalar_ty, mod); + scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @as(u16, @intCast(bits_elem.toUnsignedInt(mod))), mod)).intern(scalar_ty, mod); } return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, } })).toValue(); } - return intTruncScalar(val, ty, allocator, signedness, @intCast(u16, bits.toUnsignedInt(mod)), mod); + return intTruncScalar(val, ty, allocator, signedness, @as(u16, @intCast(bits.toUnsignedInt(mod))), mod); } pub fn intTruncScalar( @@ -2793,7 +2793,7 @@ pub const Value = struct { // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const shift = @intCast(usize, rhs.toUnsignedInt(mod)); + const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod))); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, @@ -2855,7 +2855,7 @@ pub const Value = struct { const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const shift = @intCast(usize, rhs.toUnsignedInt(mod)); + const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod))); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, @@ -2912,7 +2912,7 @@ pub const Value = struct { var lhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const shift = @intCast(usize, rhs.toUnsignedInt(mod)); + const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod))); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits) + 1, @@ -2984,7 +2984,7 @@ pub const Value = struct { // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const shift = @intCast(usize, rhs.toUnsignedInt(mod)); + const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod))); const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8)); if (result_limbs == 0) { diff --git a/test/behavior/align.zig b/test/behavior/align.zig index d3e4d81250..c8eb71a433 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -24,7 +24,7 @@ test "slicing array of length 1 can not assume runtime index is always zero" { const slice = @as(*align(4) [1]u8, &foo)[runtime_index..]; try expect(@TypeOf(slice) == []u8); try expect(slice.len == 0); - try expect(@truncate(u2, @intFromPtr(slice.ptr) - 1) == 0); + try expect(@as(u2, @truncate(@intFromPtr(slice.ptr) - 1)) == 0); } test "default alignment allows unspecified in type syntax" { @@ -47,7 +47,7 @@ test "@alignCast pointers" { try expect(x == 2); } fn expectsOnly1(x: *align(1) u32) void { - expects4(@alignCast(4, x)); + expects4(@alignCast(x)); } fn expects4(x: *align(4) u32) void { x.* += 1; @@ -213,12 +213,6 @@ test "alignment and size of structs with 128-bit fields" { } } -test "@ptrCast preserves alignment of bigger source" { - var x: u32 align(16) = 1234; - const ptr = @ptrCast(*u8, &x); - try expect(@TypeOf(ptr) == *align(16) u8); -} - test "alignstack" { try expect(fnWithAlignedStack() == 1234); } @@ -249,7 +243,7 @@ test "specifying alignment allows pointer cast" { } fn testBytesAlign(b: u8) !void { var bytes align(4) = [_]u8{ b, b, b, b }; - const ptr = @ptrCast(*u32, &bytes[0]); + const ptr = @as(*u32, @ptrCast(&bytes[0])); try expect(ptr.* == 0x33333333); } @@ -265,7 +259,7 @@ test "@alignCast slices" { try expect(slice[0] == 2); } fn sliceExpectsOnly1(slice: []align(1) u32) void { - sliceExpects4(@alignCast(4, slice)); + sliceExpects4(@alignCast(slice)); } fn sliceExpects4(slice: []align(4) u32) void { slice[0] += 1; @@ -302,8 +296,8 @@ test "page aligned array on stack" { try expect(@intFromPtr(&array[0]) & 0xFFF == 0); try expect(array[3] == 4); - try expect(@truncate(u4, @intFromPtr(&number1)) == 0); - try expect(@truncate(u4, @intFromPtr(&number2)) == 0); + try expect(@as(u4, @truncate(@intFromPtr(&number1))) == 0); + try expect(@as(u4, @truncate(@intFromPtr(&number2))) == 0); try expect(number1 == 42); try expect(number2 == 43); } @@ -366,7 +360,7 @@ test "@alignCast functions" { try expect(fnExpectsOnly1(simple4) == 0x19); } fn fnExpectsOnly1(ptr: *const fn () align(1) i32) i32 { - return fnExpects4(@alignCast(4, ptr)); + return fnExpects4(@alignCast(ptr)); } fn fnExpects4(ptr: *const fn () align(4) i32) i32 { return ptr(); @@ -461,9 +455,11 @@ fn testIndex2(ptr: [*]align(4) u8, index: usize, comptime T: type) !void { test "alignment of function with c calling convention" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + const a = @alignOf(@TypeOf(nothing)); + var runtime_nothing = ¬hing; - const casted1 = @ptrCast(*const u8, runtime_nothing); - const casted2 = @ptrCast(*const fn () callconv(.C) void, casted1); + const casted1: *align(a) const u8 = @ptrCast(runtime_nothing); + const casted2: *const fn () callconv(.C) void = @ptrCast(casted1); casted2(); } @@ -588,7 +584,7 @@ test "@alignCast null" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var ptr: ?*anyopaque = null; - const aligned: ?*anyopaque = @alignCast(@alignOf(?*anyopaque), ptr); + const aligned: ?*anyopaque = @alignCast(ptr); try expect(aligned == null); } diff --git a/test/behavior/array.zig b/test/behavior/array.zig index 9ef4a55b39..bc8176aa9c 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -170,7 +170,7 @@ test "array with sentinels" { { var zero_sized: [0:0xde]u8 = [_:0xde]u8{}; try expect(zero_sized[0] == 0xde); - var reinterpreted = @ptrCast(*[1]u8, &zero_sized); + var reinterpreted = @as(*[1]u8, @ptrCast(&zero_sized)); try expect(reinterpreted[0] == 0xde); } var arr: [3:0x55]u8 = undefined; @@ -694,7 +694,7 @@ test "array init of container level array variable" { test "runtime initialized sentinel-terminated array literal" { var c: u16 = 300; const f = &[_:0x9999]u16{c}; - const g = @ptrCast(*const [4]u8, f); + const g = @as(*const [4]u8, @ptrCast(f)); try std.testing.expect(g[2] == 0x99); try std.testing.expect(g[3] == 0x99); } diff --git a/test/behavior/async_fn.zig b/test/behavior/async_fn.zig index dcbe78b091..7eaa5c78d0 100644 --- a/test/behavior/async_fn.zig +++ b/test/behavior/async_fn.zig @@ -136,12 +136,12 @@ test "@frameSize" { const S = struct { fn doTheTest() !void { { - var ptr = @ptrCast(fn (i32) callconv(.Async) void, other); + var ptr = @as(fn (i32) callconv(.Async) void, @ptrCast(other)); const size = @frameSize(ptr); try expect(size == @sizeOf(@Frame(other))); } { - var ptr = @ptrCast(fn () callconv(.Async) void, first); + var ptr = @as(fn () callconv(.Async) void, @ptrCast(first)); const size = @frameSize(ptr); try expect(size == @sizeOf(@Frame(first))); } @@ -1184,7 +1184,7 @@ test "using @TypeOf on a generic function call" { global_frame = @frame(); } const F = @TypeOf(async amain(x - 1)); - const frame = @ptrFromInt(*F, @intFromPtr(&buf)); + const frame = @as(*F, @ptrFromInt(@intFromPtr(&buf))); return await @asyncCall(frame, {}, amain, .{x - 1}); } }; @@ -1212,7 +1212,7 @@ test "recursive call of await @asyncCall with struct return type" { global_frame = @frame(); } const F = @TypeOf(async amain(x - 1)); - const frame = @ptrFromInt(*F, @intFromPtr(&buf)); + const frame = @as(*F, @ptrFromInt(@intFromPtr(&buf))); return await @asyncCall(frame, {}, amain, .{x - 1}); } @@ -1833,7 +1833,7 @@ test "avoid forcing frame alignment resolution implicit cast to *anyopaque" { } }; var frame = async S.foo(); - resume @ptrCast(anyframe->bool, @alignCast(@alignOf(@Frame(S.foo)), S.x)); + resume @as(anyframe->bool, @ptrCast(@alignCast(S.x))); try expect(nosuspend await frame); } diff --git a/test/behavior/atomics.zig b/test/behavior/atomics.zig index 4394e62f6f..5264ef75cf 100644 --- a/test/behavior/atomics.zig +++ b/test/behavior/atomics.zig @@ -326,7 +326,7 @@ fn testAtomicRmwInt128(comptime signedness: std.builtin.Signedness) !void { const uint = std.meta.Int(.unsigned, 128); const int = std.meta.Int(signedness, 128); - const initial: int = @bitCast(int, @as(uint, 0xaaaaaaaa_bbbbbbbb_cccccccc_dddddddd)); + const initial: int = @as(int, @bitCast(@as(uint, 0xaaaaaaaa_bbbbbbbb_cccccccc_dddddddd))); const replacement: int = 0x00000000_00000005_00000000_00000003; var x: int align(16) = initial; diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index f98cf8f237..87cbb3e242 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -20,7 +20,7 @@ test "truncate" { try comptime expect(testTruncate(0x10fd) == 0xfd); } fn testTruncate(x: u32) u8 { - return @truncate(u8, x); + return @as(u8, @truncate(x)); } test "truncate to non-power-of-two integers" { @@ -56,7 +56,7 @@ test "truncate to non-power-of-two integers from 128-bit" { } fn testTrunc(comptime Big: type, comptime Little: type, big: Big, little: Little) !void { - try expect(@truncate(Little, big) == little); + try expect(@as(Little, @truncate(big)) == little); } const g1: i32 = 1233 + 1; @@ -229,9 +229,9 @@ test "opaque types" { const global_a: i32 = 1234; const global_b: *const i32 = &global_a; -const global_c: *const f32 = @ptrCast(*const f32, global_b); +const global_c: *const f32 = @as(*const f32, @ptrCast(global_b)); test "compile time global reinterpret" { - const d = @ptrCast(*const i32, global_c); + const d = @as(*const i32, @ptrCast(global_c)); try expect(d.* == 1234); } @@ -362,7 +362,7 @@ test "variable is allowed to be a pointer to an opaque type" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var x: i32 = 1234; - _ = hereIsAnOpaqueType(@ptrCast(*OpaqueA, &x)); + _ = hereIsAnOpaqueType(@as(*OpaqueA, @ptrCast(&x))); } fn hereIsAnOpaqueType(ptr: *OpaqueA) *OpaqueA { var a = ptr; @@ -442,7 +442,7 @@ test "array 3D const double ptr with offset" { } fn testArray2DConstDoublePtr(ptr: *const f32) !void { - const ptr2 = @ptrCast([*]const f32, ptr); + const ptr2 = @as([*]const f32, @ptrCast(ptr)); try expect(ptr2[0] == 1.0); try expect(ptr2[1] == 2.0); } @@ -574,9 +574,9 @@ test "constant equal function pointers" { fn emptyFn() void {} -const addr1 = @ptrCast(*const u8, &emptyFn); +const addr1 = @as(*const u8, @ptrCast(&emptyFn)); test "comptime cast fn to ptr" { - const addr2 = @ptrCast(*const u8, &emptyFn); + const addr2 = @as(*const u8, @ptrCast(&emptyFn)); try comptime expect(addr1 == addr2); } @@ -667,7 +667,7 @@ test "string escapes" { test "explicit cast optional pointers" { const a: ?*i32 = undefined; - const b: ?*f32 = @ptrCast(?*f32, a); + const b: ?*f32 = @as(?*f32, @ptrCast(a)); _ = b; } @@ -752,7 +752,7 @@ test "auto created variables have correct alignment" { const S = struct { fn foo(str: [*]const u8) u32 { - for (@ptrCast([*]align(1) const u32, str)[0..1]) |v| { + for (@as([*]align(1) const u32, @ptrCast(str))[0..1]) |v| { return v; } return 0; @@ -772,7 +772,7 @@ test "extern variable with non-pointer opaque type" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; @export(var_to_export, .{ .name = "opaque_extern_var" }); - try expect(@ptrCast(*align(1) u32, &opaque_extern_var).* == 42); + try expect(@as(*align(1) u32, @ptrCast(&opaque_extern_var)).* == 42); } extern var opaque_extern_var: opaque {}; var var_to_export: u32 = 42; diff --git a/test/behavior/bit_shifting.zig b/test/behavior/bit_shifting.zig index 03eb4433e1..8b605385d2 100644 --- a/test/behavior/bit_shifting.zig +++ b/test/behavior/bit_shifting.zig @@ -28,7 +28,7 @@ fn ShardedTable(comptime Key: type, comptime mask_bit_count: comptime_int, compt // TODO: https://github.com/ziglang/zig/issues/1544 // This cast could be implicit if we teach the compiler that // u32 >> 30 -> u2 - return @intCast(ShardKey, shard_key); + return @as(ShardKey, @intCast(shard_key)); } pub fn put(self: *Self, node: *Node) void { @@ -85,14 +85,14 @@ fn testShardedTable(comptime Key: type, comptime mask_bit_count: comptime_int, c var table = Table.create(); var node_buffer: [node_count]Table.Node = undefined; for (&node_buffer, 0..) |*node, i| { - const key = @intCast(Key, i); + const key = @as(Key, @intCast(i)); try expect(table.get(key) == null); node.init(key, {}); table.put(node); } for (&node_buffer, 0..) |*node, i| { - try expect(table.get(@intCast(Key, i)) == node); + try expect(table.get(@as(Key, @intCast(i))) == node); } } diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index f71a05cada..0c137a2baa 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -71,11 +71,11 @@ fn testBitCast(comptime N: usize) !void { } fn conv_iN(comptime N: usize, x: std.meta.Int(.signed, N)) std.meta.Int(.unsigned, N) { - return @bitCast(std.meta.Int(.unsigned, N), x); + return @as(std.meta.Int(.unsigned, N), @bitCast(x)); } fn conv_uN(comptime N: usize, x: std.meta.Int(.unsigned, N)) std.meta.Int(.signed, N) { - return @bitCast(std.meta.Int(.signed, N), x); + return @as(std.meta.Int(.signed, N), @bitCast(x)); } test "bitcast uX to bytes" { @@ -114,14 +114,14 @@ fn testBitCastuXToBytes(comptime N: usize) !void { while (byte_i < (byte_count - 1)) : (byte_i += 1) { try expect(bytes[byte_i] == 0xff); } - try expect(((bytes[byte_i] ^ 0xff) << -%@truncate(u3, N)) == 0); + try expect(((bytes[byte_i] ^ 0xff) << -%@as(u3, @truncate(N))) == 0); }, .Big => { var byte_i = byte_count - 1; while (byte_i > 0) : (byte_i -= 1) { try expect(bytes[byte_i] == 0xff); } - try expect(((bytes[byte_i] ^ 0xff) << -%@truncate(u3, N)) == 0); + try expect(((bytes[byte_i] ^ 0xff) << -%@as(u3, @truncate(N))) == 0); }, } } @@ -130,12 +130,12 @@ fn testBitCastuXToBytes(comptime N: usize) !void { test "nested bitcast" { const S = struct { fn moo(x: isize) !void { - try expect(@intCast(isize, 42) == x); + try expect(@as(isize, @intCast(42)) == x); } fn foo(x: isize) !void { try @This().moo( - @bitCast(isize, if (x != 0) @bitCast(usize, x) else @bitCast(usize, x)), + @as(isize, @bitCast(if (x != 0) @as(usize, @bitCast(x)) else @as(usize, @bitCast(x)))), ); } }; @@ -146,7 +146,7 @@ test "nested bitcast" { // issue #3010: compiler segfault test "bitcast literal [4]u8 param to u32" { - const ip = @bitCast(u32, [_]u8{ 255, 255, 255, 255 }); + const ip = @as(u32, @bitCast([_]u8{ 255, 255, 255, 255 })); try expect(ip == maxInt(u32)); } @@ -154,7 +154,7 @@ test "bitcast generates a temporary value" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var y = @as(u16, 0x55AA); - const x = @bitCast(u16, @bitCast([2]u8, y)); + const x = @as(u16, @bitCast(@as([2]u8, @bitCast(y)))); try expect(y == x); } @@ -175,7 +175,7 @@ test "@bitCast packed structs at runtime and comptime" { const S = struct { fn doTheTest() !void { var full = Full{ .number = 0x1234 }; - var two_halves = @bitCast(Divided, full); + var two_halves = @as(Divided, @bitCast(full)); try expect(two_halves.half1 == 0x34); try expect(two_halves.quarter3 == 0x2); try expect(two_halves.quarter4 == 0x1); @@ -200,7 +200,7 @@ test "@bitCast extern structs at runtime and comptime" { const S = struct { fn doTheTest() !void { var full = Full{ .number = 0x1234 }; - var two_halves = @bitCast(TwoHalves, full); + var two_halves = @as(TwoHalves, @bitCast(full)); switch (native_endian) { .Big => { try expect(two_halves.half1 == 0x12); @@ -230,8 +230,8 @@ test "bitcast packed struct to integer and back" { const S = struct { fn doTheTest() !void { var move = LevelUpMove{ .move_id = 1, .level = 2 }; - var v = @bitCast(u16, move); - var back_to_a_move = @bitCast(LevelUpMove, v); + var v = @as(u16, @bitCast(move)); + var back_to_a_move = @as(LevelUpMove, @bitCast(v)); try expect(back_to_a_move.move_id == 1); try expect(back_to_a_move.level == 2); } @@ -250,7 +250,7 @@ test "implicit cast to error union by returning" { try expect((func(-1) catch unreachable) == maxInt(u64)); } pub fn func(sz: i64) anyerror!u64 { - return @bitCast(u64, sz); + return @as(u64, @bitCast(sz)); } }; try S.entry(); @@ -261,7 +261,7 @@ test "bitcast packed struct literal to byte" { const Foo = packed struct { value: u8, }; - const casted = @bitCast(u8, Foo{ .value = 0xF }); + const casted = @as(u8, @bitCast(Foo{ .value = 0xF })); try expect(casted == 0xf); } @@ -269,7 +269,7 @@ test "comptime bitcast used in expression has the correct type" { const Foo = packed struct { value: u8, }; - try expect(@bitCast(u8, Foo{ .value = 0xF }) == 0xf); + try expect(@as(u8, @bitCast(Foo{ .value = 0xF })) == 0xf); } test "bitcast passed as tuple element" { @@ -279,7 +279,7 @@ test "bitcast passed as tuple element" { try expect(args[0] == 12.34); } }; - try S.foo(.{@bitCast(f32, @as(u32, 0x414570A4))}); + try S.foo(.{@as(f32, @bitCast(@as(u32, 0x414570A4)))}); } test "triple level result location with bitcast sandwich passed as tuple element" { @@ -289,7 +289,7 @@ test "triple level result location with bitcast sandwich passed as tuple element try expect(args[0] > 12.33 and args[0] < 12.35); } }; - try S.foo(.{@as(f64, @bitCast(f32, @as(u32, 0x414570A4)))}); + try S.foo(.{@as(f64, @as(f32, @bitCast(@as(u32, 0x414570A4))))}); } test "@bitCast packed struct of floats" { @@ -318,7 +318,7 @@ test "@bitCast packed struct of floats" { const S = struct { fn doTheTest() !void { var foo = Foo{}; - var v = @bitCast(Foo2, foo); + var v = @as(Foo2, @bitCast(foo)); try expect(v.a == foo.a); try expect(v.b == foo.b); try expect(v.c == foo.c); @@ -360,12 +360,12 @@ test "comptime @bitCast packed struct to int and back" { // S -> Int var s: S = .{}; - try expectEqual(@bitCast(Int, s), comptime @bitCast(Int, S{})); + try expectEqual(@as(Int, @bitCast(s)), comptime @as(Int, @bitCast(S{}))); // Int -> S var i: Int = 0; - const rt_cast = @bitCast(S, i); - const ct_cast = comptime @bitCast(S, @as(Int, 0)); + const rt_cast = @as(S, @bitCast(i)); + const ct_cast = comptime @as(S, @bitCast(@as(Int, 0))); inline for (@typeInfo(S).Struct.fields) |field| { try expectEqual(@field(rt_cast, field.name), @field(ct_cast, field.name)); } @@ -381,10 +381,10 @@ test "comptime bitcast with fields following f80" { const FloatT = extern struct { f: f80, x: u128 align(16) }; const x: FloatT = .{ .f = 0.5, .x = 123 }; - var x_as_uint: u256 = comptime @bitCast(u256, x); + var x_as_uint: u256 = comptime @as(u256, @bitCast(x)); - try expect(x.f == @bitCast(FloatT, x_as_uint).f); - try expect(x.x == @bitCast(FloatT, x_as_uint).x); + try expect(x.f == @as(FloatT, @bitCast(x_as_uint)).f); + try expect(x.x == @as(FloatT, @bitCast(x_as_uint)).x); } test "bitcast vector to integer and back" { @@ -398,20 +398,20 @@ test "bitcast vector to integer and back" { const arr: [16]bool = [_]bool{ true, false } ++ [_]bool{true} ** 14; var x = @splat(16, true); x[1] = false; - try expect(@bitCast(u16, x) == comptime @bitCast(u16, @as(@Vector(16, bool), arr))); + try expect(@as(u16, @bitCast(x)) == comptime @as(u16, @bitCast(@as(@Vector(16, bool), arr)))); } fn bitCastWrapper16(x: f16) u16 { - return @bitCast(u16, x); + return @as(u16, @bitCast(x)); } fn bitCastWrapper32(x: f32) u32 { - return @bitCast(u32, x); + return @as(u32, @bitCast(x)); } fn bitCastWrapper64(x: f64) u64 { - return @bitCast(u64, x); + return @as(u64, @bitCast(x)); } fn bitCastWrapper128(x: f128) u128 { - return @bitCast(u128, x); + return @as(u128, @bitCast(x)); } test "bitcast nan float does modify signaling bit" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO @@ -425,37 +425,37 @@ test "bitcast nan float does modify signaling bit" { // 16 bit const snan_f16_const = math.nan_f16; - try expectEqual(math.nan_u16, @bitCast(u16, snan_f16_const)); + try expectEqual(math.nan_u16, @as(u16, @bitCast(snan_f16_const))); try expectEqual(math.nan_u16, bitCastWrapper16(snan_f16_const)); var snan_f16_var = math.nan_f16; - try expectEqual(math.nan_u16, @bitCast(u16, snan_f16_var)); + try expectEqual(math.nan_u16, @as(u16, @bitCast(snan_f16_var))); try expectEqual(math.nan_u16, bitCastWrapper16(snan_f16_var)); // 32 bit const snan_f32_const = math.nan_f32; - try expectEqual(math.nan_u32, @bitCast(u32, snan_f32_const)); + try expectEqual(math.nan_u32, @as(u32, @bitCast(snan_f32_const))); try expectEqual(math.nan_u32, bitCastWrapper32(snan_f32_const)); var snan_f32_var = math.nan_f32; - try expectEqual(math.nan_u32, @bitCast(u32, snan_f32_var)); + try expectEqual(math.nan_u32, @as(u32, @bitCast(snan_f32_var))); try expectEqual(math.nan_u32, bitCastWrapper32(snan_f32_var)); // 64 bit const snan_f64_const = math.nan_f64; - try expectEqual(math.nan_u64, @bitCast(u64, snan_f64_const)); + try expectEqual(math.nan_u64, @as(u64, @bitCast(snan_f64_const))); try expectEqual(math.nan_u64, bitCastWrapper64(snan_f64_const)); var snan_f64_var = math.nan_f64; - try expectEqual(math.nan_u64, @bitCast(u64, snan_f64_var)); + try expectEqual(math.nan_u64, @as(u64, @bitCast(snan_f64_var))); try expectEqual(math.nan_u64, bitCastWrapper64(snan_f64_var)); // 128 bit const snan_f128_const = math.nan_f128; - try expectEqual(math.nan_u128, @bitCast(u128, snan_f128_const)); + try expectEqual(math.nan_u128, @as(u128, @bitCast(snan_f128_const))); try expectEqual(math.nan_u128, bitCastWrapper128(snan_f128_const)); var snan_f128_var = math.nan_f128; - try expectEqual(math.nan_u128, @bitCast(u128, snan_f128_var)); + try expectEqual(math.nan_u128, @as(u128, @bitCast(snan_f128_var))); try expectEqual(math.nan_u128, bitCastWrapper128(snan_f128_var)); } diff --git a/test/behavior/bitreverse.zig b/test/behavior/bitreverse.zig index e19a560a9d..722edef25e 100644 --- a/test/behavior/bitreverse.zig +++ b/test/behavior/bitreverse.zig @@ -62,20 +62,20 @@ fn testBitReverse() !void { // using comptime_ints, signed, positive try expect(@bitReverse(@as(u8, 0)) == 0); - try expect(@bitReverse(@bitCast(i8, @as(u8, 0x92))) == @bitCast(i8, @as(u8, 0x49))); - try expect(@bitReverse(@bitCast(i16, @as(u16, 0x1234))) == @bitCast(i16, @as(u16, 0x2c48))); - try expect(@bitReverse(@bitCast(i24, @as(u24, 0x123456))) == @bitCast(i24, @as(u24, 0x6a2c48))); - try expect(@bitReverse(@bitCast(i24, @as(u24, 0x12345f))) == @bitCast(i24, @as(u24, 0xfa2c48))); - try expect(@bitReverse(@bitCast(i24, @as(u24, 0xf23456))) == @bitCast(i24, @as(u24, 0x6a2c4f))); - try expect(@bitReverse(@bitCast(i32, @as(u32, 0x12345678))) == @bitCast(i32, @as(u32, 0x1e6a2c48))); - try expect(@bitReverse(@bitCast(i32, @as(u32, 0xf2345678))) == @bitCast(i32, @as(u32, 0x1e6a2c4f))); - try expect(@bitReverse(@bitCast(i32, @as(u32, 0x1234567f))) == @bitCast(i32, @as(u32, 0xfe6a2c48))); - try expect(@bitReverse(@bitCast(i40, @as(u40, 0x123456789a))) == @bitCast(i40, @as(u40, 0x591e6a2c48))); - try expect(@bitReverse(@bitCast(i48, @as(u48, 0x123456789abc))) == @bitCast(i48, @as(u48, 0x3d591e6a2c48))); - try expect(@bitReverse(@bitCast(i56, @as(u56, 0x123456789abcde))) == @bitCast(i56, @as(u56, 0x7b3d591e6a2c48))); - try expect(@bitReverse(@bitCast(i64, @as(u64, 0x123456789abcdef1))) == @bitCast(i64, @as(u64, 0x8f7b3d591e6a2c48))); - try expect(@bitReverse(@bitCast(i96, @as(u96, 0x123456789abcdef111213141))) == @bitCast(i96, @as(u96, 0x828c84888f7b3d591e6a2c48))); - try expect(@bitReverse(@bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181))) == @bitCast(i128, @as(u128, 0x818e868a828c84888f7b3d591e6a2c48))); + try expect(@bitReverse(@as(i8, @bitCast(@as(u8, 0x92)))) == @as(i8, @bitCast(@as(u8, 0x49)))); + try expect(@bitReverse(@as(i16, @bitCast(@as(u16, 0x1234)))) == @as(i16, @bitCast(@as(u16, 0x2c48)))); + try expect(@bitReverse(@as(i24, @bitCast(@as(u24, 0x123456)))) == @as(i24, @bitCast(@as(u24, 0x6a2c48)))); + try expect(@bitReverse(@as(i24, @bitCast(@as(u24, 0x12345f)))) == @as(i24, @bitCast(@as(u24, 0xfa2c48)))); + try expect(@bitReverse(@as(i24, @bitCast(@as(u24, 0xf23456)))) == @as(i24, @bitCast(@as(u24, 0x6a2c4f)))); + try expect(@bitReverse(@as(i32, @bitCast(@as(u32, 0x12345678)))) == @as(i32, @bitCast(@as(u32, 0x1e6a2c48)))); + try expect(@bitReverse(@as(i32, @bitCast(@as(u32, 0xf2345678)))) == @as(i32, @bitCast(@as(u32, 0x1e6a2c4f)))); + try expect(@bitReverse(@as(i32, @bitCast(@as(u32, 0x1234567f)))) == @as(i32, @bitCast(@as(u32, 0xfe6a2c48)))); + try expect(@bitReverse(@as(i40, @bitCast(@as(u40, 0x123456789a)))) == @as(i40, @bitCast(@as(u40, 0x591e6a2c48)))); + try expect(@bitReverse(@as(i48, @bitCast(@as(u48, 0x123456789abc)))) == @as(i48, @bitCast(@as(u48, 0x3d591e6a2c48)))); + try expect(@bitReverse(@as(i56, @bitCast(@as(u56, 0x123456789abcde)))) == @as(i56, @bitCast(@as(u56, 0x7b3d591e6a2c48)))); + try expect(@bitReverse(@as(i64, @bitCast(@as(u64, 0x123456789abcdef1)))) == @as(i64, @bitCast(@as(u64, 0x8f7b3d591e6a2c48)))); + try expect(@bitReverse(@as(i96, @bitCast(@as(u96, 0x123456789abcdef111213141)))) == @as(i96, @bitCast(@as(u96, 0x828c84888f7b3d591e6a2c48)))); + try expect(@bitReverse(@as(i128, @bitCast(@as(u128, 0x123456789abcdef11121314151617181)))) == @as(i128, @bitCast(@as(u128, 0x818e868a828c84888f7b3d591e6a2c48)))); // using signed, negative. Compare to runtime ints returned from llvm. var neg8: i8 = -18; diff --git a/test/behavior/bool.zig b/test/behavior/bool.zig index 50a098c111..5d09e5f8a0 100644 --- a/test/behavior/bool.zig +++ b/test/behavior/bool.zig @@ -15,8 +15,8 @@ test "cast bool to int" { const f = false; try expectEqual(@as(u32, 1), @intFromBool(t)); try expectEqual(@as(u32, 0), @intFromBool(f)); - try expectEqual(-1, @bitCast(i1, @intFromBool(t))); - try expectEqual(0, @bitCast(i1, @intFromBool(f))); + try expectEqual(-1, @as(i1, @bitCast(@intFromBool(t)))); + try expectEqual(0, @as(i1, @bitCast(@intFromBool(f)))); try expectEqual(u1, @TypeOf(@intFromBool(t))); try expectEqual(u1, @TypeOf(@intFromBool(f))); try nonConstCastIntFromBool(t, f); @@ -25,8 +25,8 @@ test "cast bool to int" { fn nonConstCastIntFromBool(t: bool, f: bool) !void { try expectEqual(@as(u32, 1), @intFromBool(t)); try expectEqual(@as(u32, 0), @intFromBool(f)); - try expectEqual(@as(i1, -1), @bitCast(i1, @intFromBool(t))); - try expectEqual(@as(i1, 0), @bitCast(i1, @intFromBool(f))); + try expectEqual(@as(i1, -1), @as(i1, @bitCast(@intFromBool(t)))); + try expectEqual(@as(i1, 0), @as(i1, @bitCast(@intFromBool(f)))); try expectEqual(u1, @TypeOf(@intFromBool(t))); try expectEqual(u1, @TypeOf(@intFromBool(f))); } diff --git a/test/behavior/bugs/11995.zig b/test/behavior/bugs/11995.zig index 0ee8e56214..fe554bc4bf 100644 --- a/test/behavior/bugs/11995.zig +++ b/test/behavior/bugs/11995.zig @@ -25,7 +25,7 @@ test { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var string: [5]u8 = "hello".*; - const arg_data = wuffs_base__slice_u8{ .ptr = @ptrCast([*c]u8, &string), .len = string.len }; + const arg_data = wuffs_base__slice_u8{ .ptr = @as([*c]u8, @ptrCast(&string)), .len = string.len }; var arg_meta = wuffs_base__io_buffer_meta{ .wi = 1, .ri = 2, .pos = 3, .closed = true }; wuffs_base__make_io_buffer(arg_data, &arg_meta); try std.testing.expectEqualStrings("wello", arg_data.ptr[0..arg_data.len]); diff --git a/test/behavior/bugs/12051.zig b/test/behavior/bugs/12051.zig index 5509ab97cd..342e851b77 100644 --- a/test/behavior/bugs/12051.zig +++ b/test/behavior/bugs/12051.zig @@ -30,8 +30,8 @@ const Y = struct { return .{ .a = 0, .b = false, - .c = @bitCast(Z, @as(u32, 0)), - .d = @bitCast(Z, @as(u32, 0)), + .c = @as(Z, @bitCast(@as(u32, 0))), + .d = @as(Z, @bitCast(@as(u32, 0))), }; } }; diff --git a/test/behavior/bugs/12119.zig b/test/behavior/bugs/12119.zig index bb5167a3da..6cfb015eb0 100644 --- a/test/behavior/bugs/12119.zig +++ b/test/behavior/bugs/12119.zig @@ -12,6 +12,6 @@ test { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const zerox32: u8x32 = [_]u8{0} ** 32; - const bigsum: u32x8 = @bitCast(u32x8, zerox32); + const bigsum: u32x8 = @as(u32x8, @bitCast(zerox32)); try std.testing.expectEqual(0, @reduce(.Add, bigsum)); } diff --git a/test/behavior/bugs/12450.zig b/test/behavior/bugs/12450.zig index db91529051..5ab6565f3c 100644 --- a/test/behavior/bugs/12450.zig +++ b/test/behavior/bugs/12450.zig @@ -16,7 +16,7 @@ test { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - var f1: *align(16) Foo = @alignCast(16, @ptrCast(*align(1) Foo, &buffer[0])); + var f1: *align(16) Foo = @alignCast(@as(*align(1) Foo, @ptrCast(&buffer[0]))); try expect(@typeInfo(@TypeOf(f1)).Pointer.alignment == 16); try expect(@intFromPtr(f1) == @intFromPtr(&f1.a)); try expect(@typeInfo(@TypeOf(&f1.a)).Pointer.alignment == 16); diff --git a/test/behavior/bugs/12723.zig b/test/behavior/bugs/12723.zig index abecf89025..955cc11c11 100644 --- a/test/behavior/bugs/12723.zig +++ b/test/behavior/bugs/12723.zig @@ -3,6 +3,6 @@ const expect = @import("std").testing.expect; test "Non-exhaustive enum backed by comptime_int" { const E = enum(comptime_int) { a, b, c, _ }; comptime var e: E = .a; - e = @enumFromInt(E, 378089457309184723749); + e = @as(E, @enumFromInt(378089457309184723749)); try expect(@intFromEnum(e) == 378089457309184723749); } diff --git a/test/behavior/bugs/13664.zig b/test/behavior/bugs/13664.zig index 34f6e9110b..b0ea3f70af 100644 --- a/test/behavior/bugs/13664.zig +++ b/test/behavior/bugs/13664.zig @@ -21,7 +21,7 @@ test { const timestamp: i64 = value(); const id = ID{ .fields = Fields{ - .timestamp = @intCast(u50, timestamp), + .timestamp = @as(u50, @intCast(timestamp)), .random_bits = 420, } }; try std.testing.expect((ID{ .value = id.value }).fields.timestamp == timestamp); diff --git a/test/behavior/bugs/421.zig b/test/behavior/bugs/421.zig index 1ed4a66738..f92bfb9899 100644 --- a/test/behavior/bugs/421.zig +++ b/test/behavior/bugs/421.zig @@ -16,6 +16,6 @@ fn testBitCastArray() !void { } fn extractOne64(a: u128) u64 { - const x = @bitCast([2]u64, a); + const x = @as([2]u64, @bitCast(a)); return x[1]; } diff --git a/test/behavior/bugs/6781.zig b/test/behavior/bugs/6781.zig index 2f5d7a3807..aac0c31a11 100644 --- a/test/behavior/bugs/6781.zig +++ b/test/behavior/bugs/6781.zig @@ -23,7 +23,7 @@ pub const JournalHeader = packed struct { var target: [32]u8 = undefined; std.crypto.hash.Blake3.hash(entry[checksum_offset + checksum_size ..], target[0..], .{}); - return @bitCast(u128, target[0..checksum_size].*); + return @as(u128, @bitCast(target[0..checksum_size].*)); } pub fn calculate_hash_chain_root(self: *const JournalHeader) u128 { @@ -42,16 +42,16 @@ pub const JournalHeader = packed struct { assert(prev_hash_chain_root_offset + prev_hash_chain_root_size == checksum_offset); - const header = @bitCast([@sizeOf(JournalHeader)]u8, self.*); + const header = @as([@sizeOf(JournalHeader)]u8, @bitCast(self.*)); const source = header[prev_hash_chain_root_offset .. checksum_offset + checksum_size]; assert(source.len == prev_hash_chain_root_size + checksum_size); var target: [32]u8 = undefined; std.crypto.hash.Blake3.hash(source, target[0..], .{}); if (segfault) { - return @bitCast(u128, target[0..hash_chain_root_size].*); + return @as(u128, @bitCast(target[0..hash_chain_root_size].*)); } else { var array = target[0..hash_chain_root_size].*; - return @bitCast(u128, array); + return @as(u128, @bitCast(array)); } } diff --git a/test/behavior/bugs/718.zig b/test/behavior/bugs/718.zig index b0f0d1ec52..0dad101e4b 100644 --- a/test/behavior/bugs/718.zig +++ b/test/behavior/bugs/718.zig @@ -15,7 +15,7 @@ test "zero keys with @memset" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - @memset(@ptrCast([*]u8, &keys)[0..@sizeOf(@TypeOf(keys))], 0); + @memset(@as([*]u8, @ptrCast(&keys))[0..@sizeOf(@TypeOf(keys))], 0); try expect(!keys.up); try expect(!keys.down); try expect(!keys.left); diff --git a/test/behavior/bugs/726.zig b/test/behavior/bugs/726.zig index 0cd8abc1cf..37e8d31cc9 100644 --- a/test/behavior/bugs/726.zig +++ b/test/behavior/bugs/726.zig @@ -8,7 +8,7 @@ test "@ptrCast from const to nullable" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const c: u8 = 4; - var x: ?*const u8 = @ptrCast(?*const u8, &c); + var x: ?*const u8 = @as(?*const u8, @ptrCast(&c)); try expect(x.?.* == 4); } @@ -21,6 +21,6 @@ test "@ptrCast from var in empty struct to nullable" { const container = struct { var c: u8 = 4; }; - var x: ?*const u8 = @ptrCast(?*const u8, &container.c); + var x: ?*const u8 = @as(?*const u8, @ptrCast(&container.c)); try expect(x.?.* == 4); } diff --git a/test/behavior/builtin_functions_returning_void_or_noreturn.zig b/test/behavior/builtin_functions_returning_void_or_noreturn.zig index ae369c4e9d..1eb2ef3049 100644 --- a/test/behavior/builtin_functions_returning_void_or_noreturn.zig +++ b/test/behavior/builtin_functions_returning_void_or_noreturn.zig @@ -17,8 +17,8 @@ test { try testing.expectEqual(void, @TypeOf(@breakpoint())); try testing.expectEqual({}, @export(x, .{ .name = "x" })); try testing.expectEqual({}, @fence(.Acquire)); - try testing.expectEqual({}, @memcpy(@ptrFromInt([*]u8, 1)[0..0], @ptrFromInt([*]u8, 1)[0..0])); - try testing.expectEqual({}, @memset(@ptrFromInt([*]u8, 1)[0..0], undefined)); + try testing.expectEqual({}, @memcpy(@as([*]u8, @ptrFromInt(1))[0..0], @as([*]u8, @ptrFromInt(1))[0..0])); + try testing.expectEqual({}, @memset(@as([*]u8, @ptrFromInt(1))[0..0], undefined)); try testing.expectEqual(noreturn, @TypeOf(if (true) @panic("") else {})); try testing.expectEqual({}, @prefetch(&val, .{})); try testing.expectEqual({}, @setAlignStack(16)); diff --git a/test/behavior/byteswap.zig b/test/behavior/byteswap.zig index 8d28285d27..ce33834ffa 100644 --- a/test/behavior/byteswap.zig +++ b/test/behavior/byteswap.zig @@ -16,13 +16,13 @@ test "@byteSwap integers" { try t(u8, 0x12, 0x12); try t(u16, 0x1234, 0x3412); try t(u24, 0x123456, 0x563412); - try t(i24, @bitCast(i24, @as(u24, 0xf23456)), 0x5634f2); - try t(i24, 0x1234f6, @bitCast(i24, @as(u24, 0xf63412))); + try t(i24, @as(i24, @bitCast(@as(u24, 0xf23456))), 0x5634f2); + try t(i24, 0x1234f6, @as(i24, @bitCast(@as(u24, 0xf63412)))); try t(u32, 0x12345678, 0x78563412); - try t(i32, @bitCast(i32, @as(u32, 0xf2345678)), 0x785634f2); - try t(i32, 0x123456f8, @bitCast(i32, @as(u32, 0xf8563412))); + try t(i32, @as(i32, @bitCast(@as(u32, 0xf2345678))), 0x785634f2); + try t(i32, 0x123456f8, @as(i32, @bitCast(@as(u32, 0xf8563412)))); try t(u40, 0x123456789a, 0x9a78563412); - try t(i48, 0x123456789abc, @bitCast(i48, @as(u48, 0xbc9a78563412))); + try t(i48, 0x123456789abc, @as(i48, @bitCast(@as(u48, 0xbc9a78563412)))); try t(u56, 0x123456789abcde, 0xdebc9a78563412); try t(u64, 0x123456789abcdef1, 0xf1debc9a78563412); try t(u88, 0x123456789abcdef1112131, 0x312111f1debc9a78563412); @@ -31,19 +31,19 @@ test "@byteSwap integers" { try t(u0, @as(u0, 0), 0); try t(i8, @as(i8, -50), -50); - try t(i16, @bitCast(i16, @as(u16, 0x1234)), @bitCast(i16, @as(u16, 0x3412))); - try t(i24, @bitCast(i24, @as(u24, 0x123456)), @bitCast(i24, @as(u24, 0x563412))); - try t(i32, @bitCast(i32, @as(u32, 0x12345678)), @bitCast(i32, @as(u32, 0x78563412))); - try t(u40, @bitCast(i40, @as(u40, 0x123456789a)), @as(u40, 0x9a78563412)); - try t(i48, @bitCast(i48, @as(u48, 0x123456789abc)), @bitCast(i48, @as(u48, 0xbc9a78563412))); - try t(i56, @bitCast(i56, @as(u56, 0x123456789abcde)), @bitCast(i56, @as(u56, 0xdebc9a78563412))); - try t(i64, @bitCast(i64, @as(u64, 0x123456789abcdef1)), @bitCast(i64, @as(u64, 0xf1debc9a78563412))); - try t(i88, @bitCast(i88, @as(u88, 0x123456789abcdef1112131)), @bitCast(i88, @as(u88, 0x312111f1debc9a78563412))); - try t(i96, @bitCast(i96, @as(u96, 0x123456789abcdef111213141)), @bitCast(i96, @as(u96, 0x41312111f1debc9a78563412))); + try t(i16, @as(i16, @bitCast(@as(u16, 0x1234))), @as(i16, @bitCast(@as(u16, 0x3412)))); + try t(i24, @as(i24, @bitCast(@as(u24, 0x123456))), @as(i24, @bitCast(@as(u24, 0x563412)))); + try t(i32, @as(i32, @bitCast(@as(u32, 0x12345678))), @as(i32, @bitCast(@as(u32, 0x78563412)))); + try t(u40, @as(i40, @bitCast(@as(u40, 0x123456789a))), @as(u40, 0x9a78563412)); + try t(i48, @as(i48, @bitCast(@as(u48, 0x123456789abc))), @as(i48, @bitCast(@as(u48, 0xbc9a78563412)))); + try t(i56, @as(i56, @bitCast(@as(u56, 0x123456789abcde))), @as(i56, @bitCast(@as(u56, 0xdebc9a78563412)))); + try t(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0xf1debc9a78563412)))); + try t(i88, @as(i88, @bitCast(@as(u88, 0x123456789abcdef1112131))), @as(i88, @bitCast(@as(u88, 0x312111f1debc9a78563412)))); + try t(i96, @as(i96, @bitCast(@as(u96, 0x123456789abcdef111213141))), @as(i96, @bitCast(@as(u96, 0x41312111f1debc9a78563412)))); try t( i128, - @bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181)), - @bitCast(i128, @as(u128, 0x8171615141312111f1debc9a78563412)), + @as(i128, @bitCast(@as(u128, 0x123456789abcdef11121314151617181))), + @as(i128, @bitCast(@as(u128, 0x8171615141312111f1debc9a78563412))), ); } fn t(comptime I: type, input: I, expected_output: I) !void { diff --git a/test/behavior/call.zig b/test/behavior/call.zig index 627df37e9b..633f5e9c3f 100644 --- a/test/behavior/call.zig +++ b/test/behavior/call.zig @@ -368,7 +368,7 @@ test "Enum constructed by @Type passed as generic argument" { } }; inline for (@typeInfo(S.E).Enum.fields, 0..) |_, i| { - try S.foo(@enumFromInt(S.E, i), i); + try S.foo(@as(S.E, @enumFromInt(i)), i); } } diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index d51d864ea1..e6aa53bd41 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -10,13 +10,13 @@ const native_endian = builtin.target.cpu.arch.endian(); test "int to ptr cast" { const x = @as(usize, 13); - const y = @ptrFromInt(*u8, x); + const y = @as(*u8, @ptrFromInt(x)); const z = @intFromPtr(y); try expect(z == 13); } test "integer literal to pointer cast" { - const vga_mem = @ptrFromInt(*u16, 0xB8000); + const vga_mem = @as(*u16, @ptrFromInt(0xB8000)); try expect(@intFromPtr(vga_mem) == 0xB8000); } @@ -52,7 +52,7 @@ fn testResolveUndefWithInt(b: bool, x: i32) !void { } test "@intCast to comptime_int" { - try expect(@intCast(comptime_int, 0) == 0); + try expect(@as(comptime_int, @intCast(0)) == 0); } test "implicit cast comptime numbers to any type when the value fits" { @@ -68,29 +68,29 @@ test "implicit cast comptime_int to comptime_float" { test "comptime_int @floatFromInt" { { - const result = @floatFromInt(f16, 1234); + const result = @as(f16, @floatFromInt(1234)); try expect(@TypeOf(result) == f16); try expect(result == 1234.0); } { - const result = @floatFromInt(f32, 1234); + const result = @as(f32, @floatFromInt(1234)); try expect(@TypeOf(result) == f32); try expect(result == 1234.0); } { - const result = @floatFromInt(f64, 1234); + const result = @as(f64, @floatFromInt(1234)); try expect(@TypeOf(result) == f64); try expect(result == 1234.0); } { - const result = @floatFromInt(f128, 1234); + const result = @as(f128, @floatFromInt(1234)); try expect(@TypeOf(result) == f128); try expect(result == 1234.0); } // big comptime_int (> 64 bits) to f128 conversion { - const result = @floatFromInt(f128, 0x1_0000_0000_0000_0000); + const result = @as(f128, @floatFromInt(0x1_0000_0000_0000_0000)); try expect(@TypeOf(result) == f128); try expect(result == 0x1_0000_0000_0000_0000.0); } @@ -107,8 +107,8 @@ test "@floatFromInt" { } fn testIntToFloat(k: i32) !void { - const f = @floatFromInt(f32, k); - const i = @intFromFloat(i32, f); + const f = @as(f32, @floatFromInt(k)); + const i = @as(i32, @intFromFloat(f)); try expect(i == k); } }; @@ -131,8 +131,8 @@ test "@floatFromInt(f80)" { fn testIntToFloat(comptime Int: type, k: Int) !void { @setRuntimeSafety(false); // TODO - const f = @floatFromInt(f80, k); - const i = @intFromFloat(Int, f); + const f = @as(f80, @floatFromInt(k)); + const i = @as(Int, @intFromFloat(f)); try expect(i == k); } }; @@ -165,7 +165,7 @@ test "@intFromFloat" { fn testIntFromFloats() !void { const x = @as(i32, 1e4); try expect(x == 10000); - const y = @intFromFloat(i32, @as(f32, 1e4)); + const y = @as(i32, @intFromFloat(@as(f32, 1e4))); try expect(y == 10000); try expectIntFromFloat(f32, 255.1, u8, 255); try expectIntFromFloat(f32, 127.2, i8, 127); @@ -173,7 +173,7 @@ fn testIntFromFloats() !void { } fn expectIntFromFloat(comptime F: type, f: F, comptime I: type, i: I) !void { - try expect(@intFromFloat(I, f) == i); + try expect(@as(I, @intFromFloat(f)) == i); } test "implicitly cast indirect pointer to maybe-indirect pointer" { @@ -208,29 +208,29 @@ test "implicitly cast indirect pointer to maybe-indirect pointer" { } test "@intCast comptime_int" { - const result = @intCast(i32, 1234); + const result = @as(i32, @intCast(1234)); try expect(@TypeOf(result) == i32); try expect(result == 1234); } test "@floatCast comptime_int and comptime_float" { { - const result = @floatCast(f16, 1234); + const result = @as(f16, @floatCast(1234)); try expect(@TypeOf(result) == f16); try expect(result == 1234.0); } { - const result = @floatCast(f16, 1234.0); + const result = @as(f16, @floatCast(1234.0)); try expect(@TypeOf(result) == f16); try expect(result == 1234.0); } { - const result = @floatCast(f32, 1234); + const result = @as(f32, @floatCast(1234)); try expect(@TypeOf(result) == f32); try expect(result == 1234.0); } { - const result = @floatCast(f32, 1234.0); + const result = @as(f32, @floatCast(1234.0)); try expect(@TypeOf(result) == f32); try expect(result == 1234.0); } @@ -276,21 +276,21 @@ test "*usize to *void" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var i = @as(usize, 0); - var v = @ptrCast(*void, &i); + var v = @as(*void, @ptrCast(&i)); v.* = {}; } test "@enumFromInt passed a comptime_int to an enum with one item" { const E = enum { A }; - const x = @enumFromInt(E, 0); + const x = @as(E, @enumFromInt(0)); try expect(x == E.A); } test "@intCast to u0 and use the result" { const S = struct { fn doTheTest(zero: u1, one: u1, bigzero: i32) !void { - try expect((one << @intCast(u0, bigzero)) == 1); - try expect((zero << @intCast(u0, bigzero)) == 0); + try expect((one << @as(u0, @intCast(bigzero))) == 1); + try expect((zero << @as(u0, @intCast(bigzero))) == 0); } }; try S.doTheTest(0, 1, 0); @@ -605,7 +605,7 @@ test "cast *[1][*]const u8 to [*]const ?[*]const u8" { const window_name = [1][*]const u8{"window name"}; const x: [*]const ?[*]const u8 = &window_name; - try expect(mem.eql(u8, std.mem.sliceTo(@ptrCast([*:0]const u8, x[0].?), 0), "window name")); + try expect(mem.eql(u8, std.mem.sliceTo(@as([*:0]const u8, @ptrCast(x[0].?)), 0), "window name")); } test "vector casts" { @@ -625,9 +625,9 @@ test "vector casts" { var up3 = @as(@Vector(2, u64), up0); // Downcast (safety-checked) var down0 = up3; - var down1 = @intCast(@Vector(2, u32), down0); - var down2 = @intCast(@Vector(2, u16), down0); - var down3 = @intCast(@Vector(2, u8), down0); + var down1 = @as(@Vector(2, u32), @intCast(down0)); + var down2 = @as(@Vector(2, u16), @intCast(down0)); + var down3 = @as(@Vector(2, u8), @intCast(down0)); try expect(mem.eql(u16, &@as([2]u16, up1), &[2]u16{ 0x55, 0xaa })); try expect(mem.eql(u32, &@as([2]u32, up2), &[2]u32{ 0x55, 0xaa })); @@ -660,12 +660,12 @@ test "@floatCast cast down" { { var double: f64 = 0.001534; - var single = @floatCast(f32, double); + var single = @as(f32, @floatCast(double)); try expect(single == 0.001534); } { const double: f64 = 0.001534; - const single = @floatCast(f32, double); + const single = @as(f32, @floatCast(double)); try expect(single == 0.001534); } } @@ -1041,7 +1041,7 @@ test "cast between C pointer with different but compatible types" { } fn doTheTest() !void { var x = [_]u16{ 4, 2, 1, 3 }; - try expect(foo(@ptrCast([*]u16, &x)) == 4); + try expect(foo(@as([*]u16, @ptrCast(&x))) == 4); } }; try S.doTheTest(); @@ -1093,10 +1093,10 @@ test "peer type resolve array pointer and unknown pointer" { test "comptime float casts" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - const a = @floatFromInt(comptime_float, 1); + const a = @as(comptime_float, @floatFromInt(1)); try expect(a == 1); try expect(@TypeOf(a) == comptime_float); - const b = @intFromFloat(comptime_int, 2); + const b = @as(comptime_int, @intFromFloat(2)); try expect(b == 2); try expect(@TypeOf(b) == comptime_int); @@ -1111,7 +1111,7 @@ test "pointer reinterpret const float to int" { // The hex representation is 0x3fe3333333333303. const float: f64 = 5.99999999999994648725e-01; const float_ptr = &float; - const int_ptr = @ptrCast(*const i32, float_ptr); + const int_ptr = @as(*const i32, @ptrCast(float_ptr)); const int_val = int_ptr.*; if (native_endian == .Little) try expect(int_val == 0x33333303) @@ -1134,7 +1134,7 @@ test "implicit cast from [*]T to ?*anyopaque" { fn incrementVoidPtrArray(array: ?*anyopaque, len: usize) void { var n: usize = 0; while (n < len) : (n += 1) { - @ptrCast([*]u8, array.?)[n] += 1; + @as([*]u8, @ptrCast(array.?))[n] += 1; } } @@ -1146,7 +1146,7 @@ test "compile time int to ptr of function" { // On some architectures function pointers must be aligned. const hardcoded_fn_addr = maxInt(usize) & ~@as(usize, 0xf); -pub const FUNCTION_CONSTANT = @ptrFromInt(PFN_void, hardcoded_fn_addr); +pub const FUNCTION_CONSTANT = @as(PFN_void, @ptrFromInt(hardcoded_fn_addr)); pub const PFN_void = *const fn (*anyopaque) callconv(.C) void; fn foobar(func: PFN_void) !void { @@ -1161,10 +1161,10 @@ test "implicit ptr to *anyopaque" { var a: u32 = 1; var ptr: *align(@alignOf(u32)) anyopaque = &a; - var b: *u32 = @ptrCast(*u32, ptr); + var b: *u32 = @as(*u32, @ptrCast(ptr)); try expect(b.* == 1); var ptr2: ?*align(@alignOf(u32)) anyopaque = &a; - var c: *u32 = @ptrCast(*u32, ptr2.?); + var c: *u32 = @as(*u32, @ptrCast(ptr2.?)); try expect(c.* == 1); } @@ -1235,11 +1235,11 @@ fn testCast128() !void { } fn cast128Int(x: f128) u128 { - return @bitCast(u128, x); + return @as(u128, @bitCast(x)); } fn cast128Float(x: u128) f128 { - return @bitCast(f128, x); + return @as(f128, @bitCast(x)); } test "implicit cast from *[N]T to ?[*]T" { @@ -1270,7 +1270,7 @@ test "implicit cast from *T to ?*anyopaque" { } fn incrementVoidPtrValue(value: ?*anyopaque) void { - @ptrCast(*u8, value.?).* += 1; + @as(*u8, @ptrCast(value.?)).* += 1; } test "implicit cast *[0]T to E![]const u8" { @@ -1284,11 +1284,11 @@ test "implicit cast *[0]T to E![]const u8" { var global_array: [4]u8 = undefined; test "cast from array reference to fn: comptime fn ptr" { - const f = @ptrCast(*align(1) const fn () callconv(.C) void, &global_array); + const f = @as(*align(1) const fn () callconv(.C) void, @ptrCast(&global_array)); try expect(@intFromPtr(f) == @intFromPtr(&global_array)); } test "cast from array reference to fn: runtime fn ptr" { - var f = @ptrCast(*align(1) const fn () callconv(.C) void, &global_array); + var f = @as(*align(1) const fn () callconv(.C) void, @ptrCast(&global_array)); try expect(@intFromPtr(f) == @intFromPtr(&global_array)); } @@ -1337,7 +1337,7 @@ test "assignment to optional pointer result loc" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var foo: struct { ptr: ?*anyopaque } = .{ .ptr = &global_struct }; - try expect(foo.ptr.? == @ptrCast(*anyopaque, &global_struct)); + try expect(foo.ptr.? == @as(*anyopaque, @ptrCast(&global_struct))); } test "cast between *[N]void and []void" { @@ -1393,9 +1393,9 @@ test "cast f128 to narrower types" { const S = struct { fn doTheTest() !void { var x: f128 = 1234.0; - try expect(@as(f16, 1234.0) == @floatCast(f16, x)); - try expect(@as(f32, 1234.0) == @floatCast(f32, x)); - try expect(@as(f64, 1234.0) == @floatCast(f64, x)); + try expect(@as(f16, 1234.0) == @as(f16, @floatCast(x))); + try expect(@as(f32, 1234.0) == @as(f32, @floatCast(x))); + try expect(@as(f64, 1234.0) == @as(f64, @floatCast(x))); } }; try S.doTheTest(); @@ -1500,8 +1500,8 @@ test "coerce between pointers of compatible differently-named floats" { } test "peer type resolution of const and non-const pointer to array" { - const a = @ptrFromInt(*[1024]u8, 42); - const b = @ptrFromInt(*const [1024]u8, 42); + const a = @as(*[1024]u8, @ptrFromInt(42)); + const b = @as(*const [1024]u8, @ptrFromInt(42)); try std.testing.expect(@TypeOf(a, b) == *const [1024]u8); try std.testing.expect(a == b); } @@ -1512,7 +1512,7 @@ test "intFromFloat to zero-bit int" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const a: f32 = 0.0; - try comptime std.testing.expect(@intFromFloat(u0, a) == 0); + try comptime std.testing.expect(@as(u0, @intFromFloat(a)) == 0); } test "peer type resolution of function pointer and function body" { @@ -1547,10 +1547,10 @@ test "bitcast packed struct with u0" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO const S = packed struct(u2) { a: u0, b: u2 }; - const s = @bitCast(S, @as(u2, 2)); + const s = @as(S, @bitCast(@as(u2, 2))); try expect(s.a == 0); try expect(s.b == 2); - const i = @bitCast(u2, s); + const i = @as(u2, @bitCast(s)); try expect(i == 2); } @@ -1560,7 +1560,7 @@ test "optional pointer coerced to optional allowzero pointer" { var p: ?*u32 = undefined; var q: ?*allowzero u32 = undefined; - p = @ptrFromInt(*u32, 4); + p = @as(*u32, @ptrFromInt(4)); q = p; try expect(@intFromPtr(q.?) == 4); } @@ -1583,7 +1583,7 @@ test "peer type resolution forms error union" { 0 => unreachable, 42 => error.AccessDenied, else => unreachable, - } else @intCast(u32, foo); + } else @as(u32, @intCast(foo)); try expect(try result == 123); } @@ -1623,8 +1623,8 @@ test "peer type resolution: const sentinel slice and mutable non-sentinel slice" const S = struct { fn doTheTest(comptime T: type, comptime s: T) !void { - var a: [:s]const T = @ptrFromInt(*const [2:s]T, 0x1000); - var b: []T = @ptrFromInt(*[3]T, 0x2000); + var a: [:s]const T = @as(*const [2:s]T, @ptrFromInt(0x1000)); + var b: []T = @as(*[3]T, @ptrFromInt(0x2000)); comptime assert(@TypeOf(a, b) == []const T); comptime assert(@TypeOf(b, a) == []const T); @@ -1634,8 +1634,8 @@ test "peer type resolution: const sentinel slice and mutable non-sentinel slice" const R = @TypeOf(r1); - try expectEqual(@as(R, @ptrFromInt(*const [2:s]T, 0x1000)), r1); - try expectEqual(@as(R, @ptrFromInt(*const [3]T, 0x2000)), r2); + try expectEqual(@as(R, @as(*const [2:s]T, @ptrFromInt(0x1000))), r1); + try expectEqual(@as(R, @as(*const [3]T, @ptrFromInt(0x2000))), r2); } }; @@ -1815,7 +1815,7 @@ test "peer type resolution: three-way resolution combines error set and optional const E = error{Foo}; var a: E = error.Foo; - var b: *const [5:0]u8 = @ptrFromInt(*const [5:0]u8, 0x1000); + var b: *const [5:0]u8 = @as(*const [5:0]u8, @ptrFromInt(0x1000)); var c: ?[*:0]u8 = null; comptime assert(@TypeOf(a, b, c) == E!?[*:0]const u8); comptime assert(@TypeOf(a, c, b) == E!?[*:0]const u8); @@ -1844,7 +1844,7 @@ test "peer type resolution: three-way resolution combines error set and optional const T = @TypeOf(r1); try expectEqual(@as(T, error.Foo), r1); - try expectEqual(@as(T, @ptrFromInt([*:0]u8, 0x1000)), r2); + try expectEqual(@as(T, @as([*:0]u8, @ptrFromInt(0x1000))), r2); try expectEqual(@as(T, null), r3); } @@ -2114,7 +2114,7 @@ test "peer type resolution: many compatible pointers" { 4 => "foo-4", else => unreachable, }; - try expectEqualSlices(u8, expected, std.mem.span(@ptrCast([*:0]const u8, r))); + try expectEqualSlices(u8, expected, std.mem.span(@as([*:0]const u8, @ptrCast(r)))); } } diff --git a/test/behavior/cast_int.zig b/test/behavior/cast_int.zig index 041ee193e8..6d4f530409 100644 --- a/test/behavior/cast_int.zig +++ b/test/behavior/cast_int.zig @@ -11,6 +11,6 @@ test "@intCast i32 to u7" { var x: u128 = maxInt(u128); var y: i32 = 120; - var z = x >> @intCast(u7, y); + var z = x >> @as(u7, @intCast(y)); try expect(z == 0xff); } diff --git a/test/behavior/comptime_memory.zig b/test/behavior/comptime_memory.zig index d327afb783..b0c5e9c91e 100644 --- a/test/behavior/comptime_memory.zig +++ b/test/behavior/comptime_memory.zig @@ -6,7 +6,7 @@ const ptr_size = @sizeOf(usize); test "type pun signed and unsigned as single pointer" { comptime { var x: u32 = 0; - const y = @ptrCast(*i32, &x); + const y = @as(*i32, @ptrCast(&x)); y.* = -1; try testing.expectEqual(@as(u32, 0xFFFFFFFF), x); } @@ -15,7 +15,7 @@ test "type pun signed and unsigned as single pointer" { test "type pun signed and unsigned as many pointer" { comptime { var x: u32 = 0; - const y = @ptrCast([*]i32, &x); + const y = @as([*]i32, @ptrCast(&x)); y[0] = -1; try testing.expectEqual(@as(u32, 0xFFFFFFFF), x); } @@ -24,7 +24,7 @@ test "type pun signed and unsigned as many pointer" { test "type pun signed and unsigned as array pointer" { comptime { var x: u32 = 0; - const y = @ptrCast(*[1]i32, &x); + const y = @as(*[1]i32, @ptrCast(&x)); y[0] = -1; try testing.expectEqual(@as(u32, 0xFFFFFFFF), x); } @@ -38,7 +38,7 @@ test "type pun signed and unsigned as offset many pointer" { comptime { var x: u32 = 0; - var y = @ptrCast([*]i32, &x); + var y = @as([*]i32, @ptrCast(&x)); y -= 10; y[10] = -1; try testing.expectEqual(@as(u32, 0xFFFFFFFF), x); @@ -53,7 +53,7 @@ test "type pun signed and unsigned as array pointer with pointer arithemtic" { comptime { var x: u32 = 0; - const y = @ptrCast([*]i32, &x) - 10; + const y = @as([*]i32, @ptrCast(&x)) - 10; const z: *[15]i32 = y[0..15]; z[10] = -1; try testing.expectEqual(@as(u32, 0xFFFFFFFF), x); @@ -64,9 +64,9 @@ test "type pun value and struct" { comptime { const StructOfU32 = extern struct { x: u32 }; var inst: StructOfU32 = .{ .x = 0 }; - @ptrCast(*i32, &inst.x).* = -1; + @as(*i32, @ptrCast(&inst.x)).* = -1; try testing.expectEqual(@as(u32, 0xFFFFFFFF), inst.x); - @ptrCast(*i32, &inst).* = -2; + @as(*i32, @ptrCast(&inst)).* = -2; try testing.expectEqual(@as(u32, 0xFFFFFFFE), inst.x); } } @@ -81,8 +81,8 @@ test "type pun endianness" { comptime { const StructOfBytes = extern struct { x: [4]u8 }; var inst: StructOfBytes = .{ .x = [4]u8{ 0, 0, 0, 0 } }; - const structPtr = @ptrCast(*align(1) u32, &inst); - const arrayPtr = @ptrCast(*align(1) u32, &inst.x); + const structPtr = @as(*align(1) u32, @ptrCast(&inst)); + const arrayPtr = @as(*align(1) u32, @ptrCast(&inst.x)); inst.x[0] = 0xFE; inst.x[2] = 0xBE; try testing.expectEqual(bigToNativeEndian(u32, 0xFE00BE00), structPtr.*); @@ -124,8 +124,8 @@ fn shuffle(ptr: usize, comptime From: type, comptime To: type) usize { @compileError("Mismatched sizes! " ++ @typeName(From) ++ " and " ++ @typeName(To) ++ " must have the same size!"); const array_len = @divExact(ptr_size, @sizeOf(From)); var result: usize = 0; - const pSource = @ptrCast(*align(1) const [array_len]From, &ptr); - const pResult = @ptrCast(*align(1) [array_len]To, &result); + const pSource = @as(*align(1) const [array_len]From, @ptrCast(&ptr)); + const pResult = @as(*align(1) [array_len]To, @ptrCast(&result)); var i: usize = 0; while (i < array_len) : (i += 1) { inline for (@typeInfo(To).Struct.fields) |f| { @@ -136,8 +136,8 @@ fn shuffle(ptr: usize, comptime From: type, comptime To: type) usize { } fn doTypePunBitsTest(as_bits: *Bits) !void { - const as_u32 = @ptrCast(*align(1) u32, as_bits); - const as_bytes = @ptrCast(*[4]u8, as_bits); + const as_u32 = @as(*align(1) u32, @ptrCast(as_bits)); + const as_bytes = @as(*[4]u8, @ptrCast(as_bits)); as_u32.* = bigToNativeEndian(u32, 0xB0A7DEED); try testing.expectEqual(@as(u1, 0x00), as_bits.p0); try testing.expectEqual(@as(u4, 0x08), as_bits.p1); @@ -176,7 +176,7 @@ test "type pun bits" { comptime { var v: u32 = undefined; - try doTypePunBitsTest(@ptrCast(*Bits, &v)); + try doTypePunBitsTest(@as(*Bits, @ptrCast(&v))); } } @@ -194,7 +194,7 @@ test "basic pointer preservation" { comptime { const lazy_address = @intFromPtr(&imports.global_u32); try testing.expectEqual(@intFromPtr(&imports.global_u32), lazy_address); - try testing.expectEqual(&imports.global_u32, @ptrFromInt(*u32, lazy_address)); + try testing.expectEqual(&imports.global_u32, @as(*u32, @ptrFromInt(lazy_address))); } } @@ -207,8 +207,8 @@ test "byte copy preserves linker value" { const ct_value = comptime blk: { const lazy = &imports.global_u32; var result: *u32 = undefined; - const pSource = @ptrCast(*const [ptr_size]u8, &lazy); - const pResult = @ptrCast(*[ptr_size]u8, &result); + const pSource = @as(*const [ptr_size]u8, @ptrCast(&lazy)); + const pResult = @as(*[ptr_size]u8, @ptrCast(&result)); var i: usize = 0; while (i < ptr_size) : (i += 1) { pResult[i] = pSource[i]; @@ -230,8 +230,8 @@ test "unordered byte copy preserves linker value" { const ct_value = comptime blk: { const lazy = &imports.global_u32; var result: *u32 = undefined; - const pSource = @ptrCast(*const [ptr_size]u8, &lazy); - const pResult = @ptrCast(*[ptr_size]u8, &result); + const pSource = @as(*const [ptr_size]u8, @ptrCast(&lazy)); + const pResult = @as(*[ptr_size]u8, @ptrCast(&result)); if (ptr_size > 8) @compileError("This array needs to be expanded for platform with very big pointers"); const shuffled_indices = [_]usize{ 4, 5, 2, 6, 1, 3, 0, 7 }; for (shuffled_indices) |i| { @@ -274,12 +274,12 @@ test "dance on linker values" { arr[0] = @intFromPtr(&imports.global_u32); arr[1] = @intFromPtr(&imports.global_u32); - const weird_ptr = @ptrCast([*]Bits, @ptrCast([*]u8, &arr) + @sizeOf(usize) - 3); + const weird_ptr = @as([*]Bits, @ptrCast(@as([*]u8, @ptrCast(&arr)) + @sizeOf(usize) - 3)); try doTypePunBitsTest(&weird_ptr[0]); if (ptr_size > @sizeOf(Bits)) try doTypePunBitsTest(&weird_ptr[1]); - var arr_bytes = @ptrCast(*[2][ptr_size]u8, &arr); + var arr_bytes = @as(*[2][ptr_size]u8, @ptrCast(&arr)); var rebuilt_bytes: [ptr_size]u8 = undefined; var i: usize = 0; @@ -290,7 +290,7 @@ test "dance on linker values" { rebuilt_bytes[i] = arr_bytes[1][i]; } - try testing.expectEqual(&imports.global_u32, @ptrFromInt(*u32, @bitCast(usize, rebuilt_bytes))); + try testing.expectEqual(&imports.global_u32, @as(*u32, @ptrFromInt(@as(usize, @bitCast(rebuilt_bytes))))); } } @@ -316,7 +316,7 @@ test "offset array ptr by element size" { try testing.expectEqual(@intFromPtr(&arr[2]), address + 2 * @sizeOf(VirtualStruct)); try testing.expectEqual(@intFromPtr(&arr[3]), address + @sizeOf(VirtualStruct) * 3); - const secondElement = @ptrFromInt(*VirtualStruct, @intFromPtr(&arr[0]) + 2 * @sizeOf(VirtualStruct)); + const secondElement = @as(*VirtualStruct, @ptrFromInt(@intFromPtr(&arr[0]) + 2 * @sizeOf(VirtualStruct))); try testing.expectEqual(bigToNativeEndian(u32, 0x02060a0e), secondElement.x); } } @@ -334,15 +334,15 @@ test "offset instance by field size" { var ptr = @intFromPtr(&inst); ptr -= 4; ptr += @offsetOf(VirtualStruct, "x"); - try testing.expectEqual(@as(u32, 0), @ptrFromInt([*]u32, ptr)[1]); + try testing.expectEqual(@as(u32, 0), @as([*]u32, @ptrFromInt(ptr))[1]); ptr -= @offsetOf(VirtualStruct, "x"); ptr += @offsetOf(VirtualStruct, "y"); - try testing.expectEqual(@as(u32, 1), @ptrFromInt([*]u32, ptr)[1]); + try testing.expectEqual(@as(u32, 1), @as([*]u32, @ptrFromInt(ptr))[1]); ptr = ptr - @offsetOf(VirtualStruct, "y") + @offsetOf(VirtualStruct, "z"); - try testing.expectEqual(@as(u32, 2), @ptrFromInt([*]u32, ptr)[1]); + try testing.expectEqual(@as(u32, 2), @as([*]u32, @ptrFromInt(ptr))[1]); ptr = @intFromPtr(&inst.z) - 4 - @offsetOf(VirtualStruct, "z"); ptr += @offsetOf(VirtualStruct, "w"); - try testing.expectEqual(@as(u32, 3), @ptrFromInt(*u32, ptr + 4).*); + try testing.expectEqual(@as(u32, 3), @as(*u32, @ptrFromInt(ptr + 4)).*); } } @@ -363,13 +363,13 @@ test "offset field ptr by enclosing array element size" { var i: usize = 0; while (i < 4) : (i += 1) { - var ptr: [*]u8 = @ptrCast([*]u8, &arr[0]); + var ptr: [*]u8 = @as([*]u8, @ptrCast(&arr[0])); ptr += i; ptr += @offsetOf(VirtualStruct, "x"); var j: usize = 0; while (j < 4) : (j += 1) { const base = ptr + j * @sizeOf(VirtualStruct); - try testing.expectEqual(@intCast(u8, i * 4 + j), base[0]); + try testing.expectEqual(@as(u8, @intCast(i * 4 + j)), base[0]); } } } @@ -393,7 +393,7 @@ test "accessing reinterpreted memory of parent object" { .c = 2.6, }; const ptr = &x.b[0]; - const b = @ptrCast([*c]const u8, ptr)[5]; + const b = @as([*c]const u8, @ptrCast(ptr))[5]; try testing.expect(b == expected); } } @@ -407,11 +407,11 @@ test "bitcast packed union to integer" { comptime { const a = U{ .x = 1 }; const b = U{ .y = 2 }; - const cast_a = @bitCast(u2, a); - const cast_b = @bitCast(u2, b); + const cast_a = @as(u2, @bitCast(a)); + const cast_b = @as(u2, @bitCast(b)); // truncated because the upper bit is garbage memory that we don't care about - try testing.expectEqual(@as(u1, 1), @truncate(u1, cast_a)); + try testing.expectEqual(@as(u1, 1), @as(u1, @truncate(cast_a))); try testing.expectEqual(@as(u2, 2), cast_b); } } @@ -435,6 +435,6 @@ test "dereference undefined pointer to zero-bit type" { test "type pun extern struct" { const S = extern struct { f: u8 }; comptime var s = S{ .f = 123 }; - @ptrCast(*u8, &s).* = 72; + @as(*u8, @ptrCast(&s)).* = 72; try testing.expectEqual(@as(u8, 72), s.f); } diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig index 1076f5e3ea..ffb254f765 100644 --- a/test/behavior/enum.zig +++ b/test/behavior/enum.zig @@ -20,7 +20,7 @@ test "enum to int" { } fn testIntToEnumEval(x: i32) !void { - try expect(@enumFromInt(IntToEnumNumber, x) == IntToEnumNumber.Three); + try expect(@as(IntToEnumNumber, @enumFromInt(x)) == IntToEnumNumber.Three); } const IntToEnumNumber = enum { Zero, One, Two, Three, Four }; @@ -629,7 +629,7 @@ test "non-exhaustive enum" { .b => true, _ => false, }); - e = @enumFromInt(E, 12); + e = @as(E, @enumFromInt(12)); try expect(switch (e) { .a => false, .b => false, @@ -648,9 +648,9 @@ test "non-exhaustive enum" { }); try expect(@typeInfo(E).Enum.fields.len == 2); - e = @enumFromInt(E, 12); + e = @as(E, @enumFromInt(12)); try expect(@intFromEnum(e) == 12); - e = @enumFromInt(E, y); + e = @as(E, @enumFromInt(y)); try expect(@intFromEnum(e) == 52); try expect(@typeInfo(E).Enum.is_exhaustive == false); } @@ -666,7 +666,7 @@ test "empty non-exhaustive enum" { const E = enum(u8) { _ }; fn doTheTest(y: u8) !void { - var e = @enumFromInt(E, y); + var e = @as(E, @enumFromInt(y)); try expect(switch (e) { _ => true, }); @@ -693,7 +693,7 @@ test "single field non-exhaustive enum" { .a => true, _ => false, }); - e = @enumFromInt(E, 12); + e = @as(E, @enumFromInt(12)); try expect(switch (e) { .a => false, _ => true, @@ -709,7 +709,7 @@ test "single field non-exhaustive enum" { else => false, }); - try expect(@intFromEnum(@enumFromInt(E, y)) == y); + try expect(@intFromEnum(@as(E, @enumFromInt(y))) == y); try expect(@typeInfo(E).Enum.fields.len == 1); try expect(@typeInfo(E).Enum.is_exhaustive == false); } @@ -741,8 +741,8 @@ const MultipleChoice2 = enum(u32) { }; test "cast integer literal to enum" { - try expect(@enumFromInt(MultipleChoice2, 0) == MultipleChoice2.Unspecified1); - try expect(@enumFromInt(MultipleChoice2, 40) == MultipleChoice2.B); + try expect(@as(MultipleChoice2, @enumFromInt(0)) == MultipleChoice2.Unspecified1); + try expect(@as(MultipleChoice2, @enumFromInt(40)) == MultipleChoice2.B); } test "enum with specified and unspecified tag values" { @@ -1155,7 +1155,7 @@ test "size of enum with only one tag which has explicit integer tag type" { var s1: S1 = undefined; s1.e = .nope; try expect(s1.e == .nope); - const ptr = @ptrCast(*u8, &s1); + const ptr = @as(*u8, @ptrCast(&s1)); try expect(ptr.* == 10); var s0: S0 = undefined; @@ -1183,7 +1183,7 @@ test "Non-exhaustive enum with nonstandard int size behaves correctly" { test "runtime int to enum with one possible value" { const E = enum { one }; var runtime: usize = 0; - if (@enumFromInt(E, runtime) != .one) { + if (@as(E, @enumFromInt(runtime)) != .one) { @compileError("test failed"); } } @@ -1194,7 +1194,7 @@ test "enum tag from a local variable" { return enum(Inner) { _ }; } }; - const i = @enumFromInt(S.Int(u32), 0); + const i = @as(S.Int(u32), @enumFromInt(0)); try std.testing.expect(@intFromEnum(i) == 0); } @@ -1203,12 +1203,12 @@ test "auto-numbered enum with signed tag type" { try std.testing.expectEqual(@as(i32, 0), @intFromEnum(E.a)); try std.testing.expectEqual(@as(i32, 1), @intFromEnum(E.b)); - try std.testing.expectEqual(E.a, @enumFromInt(E, 0)); - try std.testing.expectEqual(E.b, @enumFromInt(E, 1)); - try std.testing.expectEqual(E.a, @enumFromInt(E, @as(i32, 0))); - try std.testing.expectEqual(E.b, @enumFromInt(E, @as(i32, 1))); - try std.testing.expectEqual(E.a, @enumFromInt(E, @as(u32, 0))); - try std.testing.expectEqual(E.b, @enumFromInt(E, @as(u32, 1))); + try std.testing.expectEqual(E.a, @as(E, @enumFromInt(0))); + try std.testing.expectEqual(E.b, @as(E, @enumFromInt(1))); + try std.testing.expectEqual(E.a, @as(E, @enumFromInt(@as(i32, 0)))); + try std.testing.expectEqual(E.b, @as(E, @enumFromInt(@as(i32, 1)))); + try std.testing.expectEqual(E.a, @as(E, @enumFromInt(@as(u32, 0)))); + try std.testing.expectEqual(E.b, @as(E, @enumFromInt(@as(u32, 1)))); try std.testing.expectEqualStrings("a", @tagName(E.a)); try std.testing.expectEqualStrings("b", @tagName(E.b)); } diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 14b0eca030..06062ac66c 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -234,9 +234,9 @@ const Set1 = error{ A, B }; const Set2 = error{ A, C }; fn testExplicitErrorSetCast(set1: Set1) !void { - var x = @errSetCast(Set2, set1); + var x = @as(Set2, @errSetCast(set1)); try expect(@TypeOf(x) == Set2); - var y = @errSetCast(Set1, x); + var y = @as(Set1, @errSetCast(x)); try expect(@TypeOf(y) == Set1); try expect(y == error.A); } diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 85dc5e29b5..f2b91e66ac 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -9,7 +9,7 @@ test "compile time recursion" { try expect(some_data.len == 21); } -var some_data: [@intCast(usize, fibonacci(7))]u8 = undefined; +var some_data: [@as(usize, @intCast(fibonacci(7)))]u8 = undefined; fn fibonacci(x: i32) i32 { if (x <= 1) return 1; return fibonacci(x - 1) + fibonacci(x - 2); @@ -123,7 +123,7 @@ fn fnWithSetRuntimeSafety() i32 { test "compile-time downcast when the bits fit" { comptime { const spartan_count: u16 = 255; - const byte = @intCast(u8, spartan_count); + const byte = @as(u8, @intCast(spartan_count)); try expect(byte == 255); } } @@ -149,7 +149,7 @@ test "a type constructed in a global expression" { l.array[0] = 10; l.array[1] = 11; l.array[2] = 12; - const ptr = @ptrCast([*]u8, &l.array); + const ptr = @as([*]u8, @ptrCast(&l.array)); try expect(ptr[0] == 10); try expect(ptr[1] == 11); try expect(ptr[2] == 12); @@ -332,7 +332,7 @@ fn generateTable(comptime T: type) [1010]T { var res: [1010]T = undefined; var i: usize = 0; while (i < 1010) : (i += 1) { - res[i] = @intCast(T, i); + res[i] = @as(T, @intCast(i)); } return res; } @@ -460,7 +460,7 @@ test "binary math operator in partially inlined function" { var b: [16]u8 = undefined; for (&b, 0..) |*r, i| - r.* = @intCast(u8, i + 1); + r.* = @as(u8, @intCast(i + 1)); copyWithPartialInline(s[0..], b[0..]); try expect(s[0] == 0x1020304); @@ -942,7 +942,7 @@ test "comptime pointer load through elem_ptr" { .x = i, }; } - var ptr = @ptrCast([*]S, &array); + var ptr = @as([*]S, @ptrCast(&array)); var x = ptr[0].x; assert(x == 0); ptr += 1; @@ -1281,9 +1281,9 @@ test "comptime write through extern struct reinterpreted as array" { c: u8, }; var s: S = undefined; - @ptrCast(*[3]u8, &s)[0] = 1; - @ptrCast(*[3]u8, &s)[1] = 2; - @ptrCast(*[3]u8, &s)[2] = 3; + @as(*[3]u8, @ptrCast(&s))[0] = 1; + @as(*[3]u8, @ptrCast(&s))[1] = 2; + @as(*[3]u8, @ptrCast(&s))[2] = 3; assert(s.a == 1); assert(s.b == 2); assert(s.c == 3); @@ -1371,7 +1371,7 @@ test "lazy value is resolved as slice operand" { var a: [512]u64 = undefined; const ptr1 = a[0..@sizeOf(A)]; - const ptr2 = @ptrCast([*]u8, &a)[0..@sizeOf(A)]; + const ptr2 = @as([*]u8, @ptrCast(&a))[0..@sizeOf(A)]; try expect(@intFromPtr(ptr1) == @intFromPtr(ptr2)); try expect(ptr1.len == ptr2.len); } diff --git a/test/behavior/export.zig b/test/behavior/export.zig index 4928e86725..4751ccafe5 100644 --- a/test/behavior/export.zig +++ b/test/behavior/export.zig @@ -7,7 +7,7 @@ const builtin = @import("builtin"); // can't really run this test but we can make sure it has no compile error // and generates code -const vram = @ptrFromInt([*]volatile u8, 0x20000000)[0..0x8000]; +const vram = @as([*]volatile u8, @ptrFromInt(0x20000000))[0..0x8000]; export fn writeToVRam() void { vram[0] = 'X'; } diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index 56f3885a4a..e21645ae8f 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -94,7 +94,7 @@ test "negative f128 intFromFloat at compile-time" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const a: f128 = -2; - var b = @intFromFloat(i64, a); + var b = @as(i64, @intFromFloat(a)); try expect(@as(i64, -2) == b); } @@ -387,11 +387,11 @@ fn testLog() !void { } { var a: f32 = e; - try expect(@log(a) == 1 or @log(a) == @bitCast(f32, @as(u32, 0x3f7fffff))); + try expect(@log(a) == 1 or @log(a) == @as(f32, @bitCast(@as(u32, 0x3f7fffff)))); } { var a: f64 = e; - try expect(@log(a) == 1 or @log(a) == @bitCast(f64, @as(u64, 0x3ff0000000000000))); + try expect(@log(a) == 1 or @log(a) == @as(f64, @bitCast(@as(u64, 0x3ff0000000000000)))); } inline for ([_]type{ f16, f32, f64 }) |ty| { const eps = epsForType(ty); diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index 6c7e127964..e7b7e63e33 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -326,7 +326,7 @@ test "function pointers" { &fn4, }; for (fns, 0..) |f, i| { - try expect(f() == @intCast(u32, i) + 5); + try expect(f() == @as(u32, @intCast(i)) + 5); } } fn fn1() u32 { @@ -512,8 +512,8 @@ test "using @ptrCast on function pointers" { fn run() !void { const a = A{ .data = "abcd".* }; - const casted_fn = @ptrCast(*const fn (*const anyopaque, usize) *const u8, &at); - const casted_impl = @ptrCast(*const anyopaque, &a); + const casted_fn = @as(*const fn (*const anyopaque, usize) *const u8, @ptrCast(&at)); + const casted_impl = @as(*const anyopaque, @ptrCast(&a)); const ptr = casted_fn(casted_impl, 2); try expect(ptr.* == 'c'); } @@ -575,7 +575,7 @@ test "lazy values passed to anytype parameter" { try B.foo(.{ .x = @sizeOf(B) }); const C = struct {}; - try expect(@truncate(u32, @sizeOf(C)) == 0); + try expect(@as(u32, @truncate(@sizeOf(C))) == 0); const D = struct {}; try expect(@sizeOf(D) << 1 == 0); diff --git a/test/behavior/fn_in_struct_in_comptime.zig b/test/behavior/fn_in_struct_in_comptime.zig index b31b377c04..0acadbc5ea 100644 --- a/test/behavior/fn_in_struct_in_comptime.zig +++ b/test/behavior/fn_in_struct_in_comptime.zig @@ -14,5 +14,5 @@ fn get_foo() fn (*u8) usize { test "define a function in an anonymous struct in comptime" { const foo = get_foo(); - try expect(foo(@ptrFromInt(*u8, 12345)) == 12345); + try expect(foo(@as(*u8, @ptrFromInt(12345))) == 12345); } diff --git a/test/behavior/for.zig b/test/behavior/for.zig index 12b82c44a4..f751d35d96 100644 --- a/test/behavior/for.zig +++ b/test/behavior/for.zig @@ -84,7 +84,7 @@ test "basic for loop" { } for (array, 0..) |item, index| { _ = item; - buffer[buf_index] = @intCast(u8, index); + buffer[buf_index] = @as(u8, @intCast(index)); buf_index += 1; } const array_ptr = &array; @@ -94,7 +94,7 @@ test "basic for loop" { } for (array_ptr, 0..) |item, index| { _ = item; - buffer[buf_index] = @intCast(u8, index); + buffer[buf_index] = @as(u8, @intCast(index)); buf_index += 1; } const unknown_size: []const u8 = &array; @@ -103,7 +103,7 @@ test "basic for loop" { buf_index += 1; } for (unknown_size, 0..) |_, index| { - buffer[buf_index] = @intCast(u8, index); + buffer[buf_index] = @as(u8, @intCast(index)); buf_index += 1; } @@ -208,7 +208,7 @@ test "for on slice with allowzero ptr" { const S = struct { fn doTheTest(slice: []const u8) !void { - var ptr = @ptrCast([*]allowzero const u8, slice.ptr)[0..slice.len]; + var ptr = @as([*]allowzero const u8, @ptrCast(slice.ptr))[0..slice.len]; for (ptr, 0..) |x, i| try expect(x == i + 1); for (ptr, 0..) |*x, i| try expect(x.* == i + 1); } @@ -393,7 +393,7 @@ test "raw pointer and counter" { const ptr: [*]u8 = &buf; for (ptr, 0..4) |*a, b| { - a.* = @intCast(u8, 'A' + b); + a.* = @as(u8, @intCast('A' + b)); } try expect(buf[0] == 'A'); diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index f0c8516f67..7d4a841a62 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -97,7 +97,7 @@ test "type constructed by comptime function call" { l.array[0] = 10; l.array[1] = 11; l.array[2] = 12; - const ptr = @ptrCast([*]u8, &l.array); + const ptr = @as([*]u8, @ptrCast(&l.array)); try expect(ptr[0] == 10); try expect(ptr[1] == 11); try expect(ptr[2] == 12); @@ -171,7 +171,7 @@ fn getByte(ptr: ?*const u8) u8 { return ptr.?.*; } fn getFirstByte(comptime T: type, mem: []const T) u8 { - return getByte(@ptrCast(*const u8, &mem[0])); + return getByte(@as(*const u8, @ptrCast(&mem[0]))); } test "generic fn keeps non-generic parameter types" { @@ -428,7 +428,7 @@ test "null sentinel pointer passed as generic argument" { try std.testing.expect(@intFromPtr(a) == 8); } }; - try S.doTheTest((@ptrFromInt([*:null]const [*c]const u8, 8))); + try S.doTheTest((@as([*:null]const [*c]const u8, @ptrFromInt(8)))); } test "generic function passed as comptime argument" { diff --git a/test/behavior/int128.zig b/test/behavior/int128.zig index 6fd2c192a2..42f0b00922 100644 --- a/test/behavior/int128.zig +++ b/test/behavior/int128.zig @@ -38,7 +38,7 @@ test "undefined 128 bit int" { var undef: u128 = undefined; var undef_signed: i128 = undefined; - try expect(undef == 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa and @bitCast(u128, undef_signed) == undef); + try expect(undef == 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa and @as(u128, @bitCast(undef_signed)) == undef); } test "int128" { @@ -49,7 +49,7 @@ test "int128" { var buff: i128 = -1; try expect(buff < 0 and (buff + 1) == 0); - try expect(@intCast(i8, buff) == @as(i8, -1)); + try expect(@as(i8, @intCast(buff)) == @as(i8, -1)); buff = minInt(i128); try expect(buff < 0); @@ -73,16 +73,16 @@ test "truncate int128" { { var buff: u128 = maxInt(u128); - try expect(@truncate(u64, buff) == maxInt(u64)); - try expect(@truncate(u90, buff) == maxInt(u90)); - try expect(@truncate(u128, buff) == maxInt(u128)); + try expect(@as(u64, @truncate(buff)) == maxInt(u64)); + try expect(@as(u90, @truncate(buff)) == maxInt(u90)); + try expect(@as(u128, @truncate(buff)) == maxInt(u128)); } { var buff: i128 = maxInt(i128); - try expect(@truncate(i64, buff) == -1); - try expect(@truncate(i90, buff) == -1); - try expect(@truncate(i128, buff) == maxInt(i128)); + try expect(@as(i64, @truncate(buff)) == -1); + try expect(@as(i90, @truncate(buff)) == -1); + try expect(@as(i128, @truncate(buff)) == maxInt(i128)); } } diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 42c328c7d4..3b5d4876fd 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -391,11 +391,11 @@ test "binary not 128-bit" { break :x ~@as(u128, 0x55555555_55555555_55555555_55555555) == 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa; }); try expect(comptime x: { - break :x ~@as(i128, 0x55555555_55555555_55555555_55555555) == @bitCast(i128, @as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa)); + break :x ~@as(i128, 0x55555555_55555555_55555555_55555555) == @as(i128, @bitCast(@as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa))); }); try testBinaryNot128(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa); - try testBinaryNot128(i128, @bitCast(i128, @as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa))); + try testBinaryNot128(i128, @as(i128, @bitCast(@as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa)))); } fn testBinaryNot128(comptime Type: type, x: Type) !void { @@ -1156,29 +1156,29 @@ test "quad hex float literal parsing accurate" { // implied 1 is dropped, with an exponent of 0 (0x3fff) after biasing. const expected: u128 = 0x3fff1111222233334444555566667777; - try expect(@bitCast(u128, a) == expected); + try expect(@as(u128, @bitCast(a)) == expected); // non-normalized const b: f128 = 0x11.111222233334444555566667777p-4; - try expect(@bitCast(u128, b) == expected); + try expect(@as(u128, @bitCast(b)) == expected); const S = struct { fn doTheTest() !void { { var f: f128 = 0x1.2eab345678439abcdefea56782346p+5; - try expect(@bitCast(u128, f) == 0x40042eab345678439abcdefea5678234); + try expect(@as(u128, @bitCast(f)) == 0x40042eab345678439abcdefea5678234); } { var f: f128 = 0x1.edcb34a235253948765432134674fp-1; - try expect(@bitCast(u128, f) == 0x3ffeedcb34a235253948765432134675); // round-to-even + try expect(@as(u128, @bitCast(f)) == 0x3ffeedcb34a235253948765432134675); // round-to-even } { var f: f128 = 0x1.353e45674d89abacc3a2ebf3ff4ffp-50; - try expect(@bitCast(u128, f) == 0x3fcd353e45674d89abacc3a2ebf3ff50); + try expect(@as(u128, @bitCast(f)) == 0x3fcd353e45674d89abacc3a2ebf3ff50); } { var f: f128 = 0x1.ed8764648369535adf4be3214567fp-9; - try expect(@bitCast(u128, f) == 0x3ff6ed8764648369535adf4be3214568); + try expect(@as(u128, @bitCast(f)) == 0x3ff6ed8764648369535adf4be3214568); } const exp2ft = [_]f64{ 0x1.6a09e667f3bcdp-1, @@ -1233,7 +1233,7 @@ test "quad hex float literal parsing accurate" { }; for (exp2ft, 0..) |x, i| { - try expect(@bitCast(u64, x) == answers[i]); + try expect(@as(u64, @bitCast(x)) == answers[i]); } } }; @@ -1586,7 +1586,7 @@ test "signed zeros are represented properly" { fn testOne(comptime T: type) !void { const ST = std.meta.Int(.unsigned, @typeInfo(T).Float.bits); var as_fp_val = -@as(T, 0.0); - var as_uint_val = @bitCast(ST, as_fp_val); + var as_uint_val = @as(ST, @bitCast(as_fp_val)); // Ensure the sign bit is set. try expect(as_uint_val >> (@typeInfo(T).Float.bits - 1) == 1); } diff --git a/test/behavior/memcpy.zig b/test/behavior/memcpy.zig index 3a87b66fb1..f1776dfe57 100644 --- a/test/behavior/memcpy.zig +++ b/test/behavior/memcpy.zig @@ -59,7 +59,7 @@ fn testMemcpyDestManyPtr() !void { var str = "hello".*; var buf: [5]u8 = undefined; var len: usize = 5; - @memcpy(@ptrCast([*]u8, &buf), @ptrCast([*]const u8, &str)[0..len]); + @memcpy(@as([*]u8, @ptrCast(&buf)), @as([*]const u8, @ptrCast(&str))[0..len]); try expect(buf[0] == 'h'); try expect(buf[1] == 'e'); try expect(buf[2] == 'l'); diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index 037fee74ee..12cc027ef4 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -166,7 +166,7 @@ test "correct sizeOf and offsets in packed structs" { try expectEqual(4, @sizeOf(PStruct)); if (native_endian == .Little) { - const s1 = @bitCast(PStruct, @as(u32, 0x12345678)); + const s1 = @as(PStruct, @bitCast(@as(u32, 0x12345678))); try expectEqual(false, s1.bool_a); try expectEqual(false, s1.bool_b); try expectEqual(false, s1.bool_c); @@ -180,7 +180,7 @@ test "correct sizeOf and offsets in packed structs" { try expectEqual(@as(u10, 0b1101000101), s1.u10_a); try expectEqual(@as(u10, 0b0001001000), s1.u10_b); - const s2 = @bitCast(packed struct { x: u1, y: u7, z: u24 }, @as(u32, 0xd5c71ff4)); + const s2 = @as(packed struct { x: u1, y: u7, z: u24 }, @bitCast(@as(u32, 0xd5c71ff4))); try expectEqual(@as(u1, 0), s2.x); try expectEqual(@as(u7, 0b1111010), s2.y); try expectEqual(@as(u24, 0xd5c71f), s2.z); @@ -207,7 +207,7 @@ test "nested packed structs" { try expectEqual(24, @bitOffsetOf(S3, "y")); if (native_endian == .Little) { - const s3 = @bitCast(S3Padded, @as(u64, 0xe952d5c71ff4)).s3; + const s3 = @as(S3Padded, @bitCast(@as(u64, 0xe952d5c71ff4))).s3; try expectEqual(@as(u8, 0xf4), s3.x.a); try expectEqual(@as(u8, 0x1f), s3.x.b); try expectEqual(@as(u8, 0xc7), s3.x.c); @@ -600,7 +600,7 @@ test "packed struct initialized in bitcast" { const T = packed struct { val: u8 }; var val: u8 = 123; - const t = @bitCast(u8, T{ .val = val }); + const t = @as(u8, @bitCast(T{ .val = val })); try expect(t == val); } @@ -627,7 +627,7 @@ test "pointer to container level packed struct field" { }, var arr = [_]u32{0} ** 2; }; - @ptrCast(*S, &S.arr[0]).other_bits.enable_3 = true; + @as(*S, @ptrCast(&S.arr[0])).other_bits.enable_3 = true; try expect(S.arr[0] == 0x10000000); } diff --git a/test/behavior/packed_struct_explicit_backing_int.zig b/test/behavior/packed_struct_explicit_backing_int.zig index 62dd178fd5..9e476572ab 100644 --- a/test/behavior/packed_struct_explicit_backing_int.zig +++ b/test/behavior/packed_struct_explicit_backing_int.zig @@ -25,7 +25,7 @@ test "packed struct explicit backing integer" { try expectEqual(24, @bitOffsetOf(S3, "y")); if (native_endian == .Little) { - const s3 = @bitCast(S3Padded, @as(u64, 0xe952d5c71ff4)).s3; + const s3 = @as(S3Padded, @bitCast(@as(u64, 0xe952d5c71ff4))).s3; try expectEqual(@as(u8, 0xf4), s3.x.a); try expectEqual(@as(u8, 0x1f), s3.x.b); try expectEqual(@as(u8, 0xc7), s3.x.c); diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index 4e04fe580c..d007e7b480 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -184,8 +184,8 @@ test "implicit cast error unions with non-optional to optional pointer" { } test "compare equality of optional and non-optional pointer" { - const a = @ptrFromInt(*const usize, 0x12345678); - const b = @ptrFromInt(?*usize, 0x12345678); + const a = @as(*const usize, @ptrFromInt(0x12345678)); + const b = @as(?*usize, @ptrFromInt(0x12345678)); try expect(a == b); try expect(b == a); } @@ -197,7 +197,7 @@ test "allowzero pointer and slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - var ptr = @ptrFromInt([*]allowzero i32, 0); + var ptr = @as([*]allowzero i32, @ptrFromInt(0)); var opt_ptr: ?[*]allowzero i32 = ptr; try expect(opt_ptr != null); try expect(@intFromPtr(ptr) == 0); @@ -286,9 +286,9 @@ test "null terminated pointer" { const S = struct { fn doTheTest() !void { var array_with_zero = [_:0]u8{ 'h', 'e', 'l', 'l', 'o' }; - var zero_ptr: [*:0]const u8 = @ptrCast([*:0]const u8, &array_with_zero); + var zero_ptr: [*:0]const u8 = @as([*:0]const u8, @ptrCast(&array_with_zero)); var no_zero_ptr: [*]const u8 = zero_ptr; - var zero_ptr_again = @ptrCast([*:0]const u8, no_zero_ptr); + var zero_ptr_again = @as([*:0]const u8, @ptrCast(no_zero_ptr)); try expect(std.mem.eql(u8, std.mem.sliceTo(zero_ptr_again, 0), "hello")); } }; @@ -367,7 +367,7 @@ test "pointer sentinel with +inf" { } test "pointer to array at fixed address" { - const array = @ptrFromInt(*volatile [2]u32, 0x10); + const array = @as(*volatile [2]u32, @ptrFromInt(0x10)); // Silly check just to reference `array` try expect(@intFromPtr(&array[0]) == 0x10); try expect(@intFromPtr(&array[1]) == 0x14); @@ -406,13 +406,13 @@ test "pointer arithmetic affects the alignment" { test "@intFromPtr on null optional at comptime" { { - const pointer = @ptrFromInt(?*u8, 0x000); + const pointer = @as(?*u8, @ptrFromInt(0x000)); const x = @intFromPtr(pointer); _ = x; try comptime expect(0 == @intFromPtr(pointer)); } { - const pointer = @ptrFromInt(?*u8, 0xf00); + const pointer = @as(?*u8, @ptrFromInt(0xf00)); try comptime expect(0xf00 == @intFromPtr(pointer)); } } @@ -463,8 +463,8 @@ test "element pointer arithmetic to slice" { }; const elem_ptr = &cases[0]; // *[2]i32 - const many = @ptrCast([*][2]i32, elem_ptr); - const many_elem = @ptrCast(*[2]i32, &many[1]); + const many = @as([*][2]i32, @ptrCast(elem_ptr)); + const many_elem = @as(*[2]i32, @ptrCast(&many[1])); const items: []i32 = many_elem; try testing.expect(items.len == 2); try testing.expect(items[1] == 3); @@ -512,7 +512,7 @@ test "ptrCast comptime known slice to C pointer" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const s: [:0]const u8 = "foo"; - var p = @ptrCast([*c]const u8, s); + var p = @as([*c]const u8, @ptrCast(s)); try std.testing.expectEqualStrings(s, std.mem.sliceTo(p, 0)); } @@ -550,7 +550,7 @@ test "pointer to array has explicit alignment" { const Base = extern struct { a: u8 }; const Base2 = extern struct { a: u8 }; fn func(ptr: *[4]Base) *align(1) [4]Base2 { - return @alignCast(1, @ptrCast(*[4]Base2, ptr)); + return @alignCast(@as(*[4]Base2, @ptrCast(ptr))); } }; var bases = [_]S.Base{.{ .a = 2 }} ** 4; diff --git a/test/behavior/popcount.zig b/test/behavior/popcount.zig index 51146b14c8..da152d4dc5 100644 --- a/test/behavior/popcount.zig +++ b/test/behavior/popcount.zig @@ -63,7 +63,7 @@ fn testPopCountIntegers() !void { try expect(@popCount(x) == 2); } comptime { - try expect(@popCount(@bitCast(u8, @as(i8, -120))) == 2); + try expect(@popCount(@as(u8, @bitCast(@as(i8, -120)))) == 2); } } diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig index aadae132d9..3a2ec9db19 100644 --- a/test/behavior/ptrcast.zig +++ b/test/behavior/ptrcast.zig @@ -16,7 +16,7 @@ fn testReinterpretBytesAsInteger() !void { .Little => 0xab785634, .Big => 0x345678ab, }; - try expect(@ptrCast(*align(1) const u32, bytes[1..5]).* == expected); + try expect(@as(*align(1) const u32, @ptrCast(bytes[1..5])).* == expected); } test "reinterpret an array over multiple elements, with no well-defined layout" { @@ -32,7 +32,7 @@ test "reinterpret an array over multiple elements, with no well-defined layout" fn testReinterpretWithOffsetAndNoWellDefinedLayout() !void { const bytes: ?[5]?u8 = [5]?u8{ 0x12, 0x34, 0x56, 0x78, 0x9a }; const ptr = &bytes.?[1]; - const copy: [4]?u8 = @ptrCast(*const [4]?u8, ptr).*; + const copy: [4]?u8 = @as(*const [4]?u8, @ptrCast(ptr)).*; _ = copy; //try expect(@ptrCast(*align(1)?u8, bytes[1..5]).* == ); } @@ -51,7 +51,7 @@ fn testReinterpretStructWrappedBytesAsInteger() !void { .Little => 0xab785634, .Big => 0x345678ab, }; - try expect(@ptrCast(*align(1) const u32, obj.bytes[1..5]).* == expected); + try expect(@as(*align(1) const u32, @ptrCast(obj.bytes[1..5])).* == expected); } test "reinterpret bytes of an array into an extern struct" { @@ -71,7 +71,7 @@ fn testReinterpretBytesAsExternStruct() !void { c: u8, }; - var ptr = @ptrCast(*const S, &bytes); + var ptr = @as(*const S, @ptrCast(&bytes)); var val = ptr.c; try expect(val == 5); } @@ -95,7 +95,7 @@ fn testReinterpretExternStructAsExternStruct() !void { a: u32 align(2), c: u8, }; - var ptr = @ptrCast(*const S2, &bytes); + var ptr = @as(*const S2, @ptrCast(&bytes)); var val = ptr.c; try expect(val == 5); } @@ -121,7 +121,7 @@ fn testReinterpretOverAlignedExternStructAsExternStruct() !void { a2: u16, c: u8, }; - var ptr = @ptrCast(*const S2, &bytes); + var ptr = @as(*const S2, @ptrCast(&bytes)); var val = ptr.c; try expect(val == 5); } @@ -138,13 +138,13 @@ test "lower reinterpreted comptime field ptr (with under-aligned fields)" { a: u32 align(2), c: u8, }; - comptime var ptr = @ptrCast(*const S, &bytes); + comptime var ptr = @as(*const S, @ptrCast(&bytes)); var val = &ptr.c; try expect(val.* == 5); // Test lowering an elem ptr comptime var src_value = S{ .a = 15, .c = 5 }; - comptime var ptr2 = @ptrCast(*[@sizeOf(S)]u8, &src_value); + comptime var ptr2 = @as(*[@sizeOf(S)]u8, @ptrCast(&src_value)); var val2 = &ptr2[4]; try expect(val2.* == 5); } @@ -161,13 +161,13 @@ test "lower reinterpreted comptime field ptr" { a: u32, c: u8, }; - comptime var ptr = @ptrCast(*const S, &bytes); + comptime var ptr = @as(*const S, @ptrCast(&bytes)); var val = &ptr.c; try expect(val.* == 5); // Test lowering an elem ptr comptime var src_value = S{ .a = 15, .c = 5 }; - comptime var ptr2 = @ptrCast(*[@sizeOf(S)]u8, &src_value); + comptime var ptr2 = @as(*[@sizeOf(S)]u8, @ptrCast(&src_value)); var val2 = &ptr2[4]; try expect(val2.* == 5); } @@ -190,27 +190,17 @@ const Bytes = struct { pub fn init(v: u32) Bytes { var res: Bytes = undefined; - @ptrCast(*align(1) u32, &res.bytes).* = v; + @as(*align(1) u32, @ptrCast(&res.bytes)).* = v; return res; } }; -test "comptime ptrcast keeps larger alignment" { - if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - - comptime { - const a: u32 = 1234; - const p = @ptrCast([*]const u8, &a); - try expect(@TypeOf(p) == [*]align(@alignOf(u32)) const u8); - } -} - test "ptrcast of const integer has the correct object size" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - const is_value = ~@intCast(isize, std.math.minInt(isize)); - const is_bytes = @ptrCast([*]const u8, &is_value)[0..@sizeOf(isize)]; + const is_value = ~@as(isize, @intCast(std.math.minInt(isize))); + const is_bytes = @as([*]const u8, @ptrCast(&is_value))[0..@sizeOf(isize)]; if (@sizeOf(isize) == 8) { switch (native_endian) { .Little => { @@ -248,7 +238,7 @@ test "implicit optional pointer to optional anyopaque pointer" { var buf: [4]u8 = "aoeu".*; var x: ?[*]u8 = &buf; var y: ?*anyopaque = x; - var z = @ptrCast(*[4]u8, y); + var z = @as(*[4]u8, @ptrCast(y)); try expect(std.mem.eql(u8, z, "aoeu")); } @@ -260,7 +250,7 @@ test "@ptrCast slice to slice" { const S = struct { fn foo(slice: []u32) []i32 { - return @ptrCast([]i32, slice); + return @as([]i32, @ptrCast(slice)); } }; var buf: [4]u32 = .{ 0, 0, 0, 0 }; @@ -277,7 +267,7 @@ test "comptime @ptrCast a subset of an array, then write through it" { comptime { var buff: [16]u8 align(4) = undefined; - const len_bytes = @ptrCast(*u32, &buff); + const len_bytes = @as(*u32, @ptrCast(&buff)); len_bytes.* = 16; std.mem.copy(u8, buff[4..], "abcdef"); } @@ -286,7 +276,7 @@ test "comptime @ptrCast a subset of an array, then write through it" { test "@ptrCast undefined value at comptime" { const S = struct { fn transmute(comptime T: type, comptime U: type, value: T) U { - return @ptrCast(*const U, &value).*; + return @as(*const U, @ptrCast(&value)).*; } }; comptime { diff --git a/test/behavior/ptrfromint.zig b/test/behavior/ptrfromint.zig index c07a6df834..72244aa7d1 100644 --- a/test/behavior/ptrfromint.zig +++ b/test/behavior/ptrfromint.zig @@ -9,7 +9,7 @@ test "casting integer address to function pointer" { fn addressToFunction() void { var addr: usize = 0xdeadbee0; - _ = @ptrFromInt(*const fn () void, addr); + _ = @as(*const fn () void, @ptrFromInt(addr)); } test "mutate through ptr initialized with constant ptrFromInt value" { @@ -21,7 +21,7 @@ test "mutate through ptr initialized with constant ptrFromInt value" { } fn forceCompilerAnalyzeBranchHardCodedPtrDereference(x: bool) void { - const hardCodedP = @ptrFromInt(*volatile u8, 0xdeadbeef); + const hardCodedP = @as(*volatile u8, @ptrFromInt(0xdeadbeef)); if (x) { hardCodedP.* = hardCodedP.* | 10; } else { @@ -34,7 +34,7 @@ test "@ptrFromInt creates null pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - const ptr = @ptrFromInt(?*u32, 0); + const ptr = @as(?*u32, @ptrFromInt(0)); try expectEqual(@as(?*u32, null), ptr); } @@ -43,6 +43,6 @@ test "@ptrFromInt creates allowzero zero pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - const ptr = @ptrFromInt(*allowzero u32, 0); + const ptr = @as(*allowzero u32, @ptrFromInt(0)); try expectEqual(@as(usize, 0), @intFromPtr(ptr)); } diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig index 3657e77e50..a161be66eb 100644 --- a/test/behavior/sizeof_and_typeof.zig +++ b/test/behavior/sizeof_and_typeof.zig @@ -231,7 +231,7 @@ test "@sizeOf comparison against zero" { test "hardcoded address in typeof expression" { const S = struct { - fn func() @TypeOf(@ptrFromInt(*[]u8, 0x10).*[0]) { + fn func() @TypeOf(@as(*[]u8, @ptrFromInt(0x10)).*[0]) { return 0; } }; @@ -252,7 +252,7 @@ test "array access of generic param in typeof expression" { test "lazy size cast to float" { { const S = struct { a: u8 }; - try expect(@floatFromInt(f32, @sizeOf(S)) == 1.0); + try expect(@as(f32, @floatFromInt(@sizeOf(S))) == 1.0); } { const S = struct { a: u8 }; diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index fcbae214ac..4316aca34f 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -129,7 +129,7 @@ test "generic malloc free" { } var some_mem: [100]u8 = undefined; fn memAlloc(comptime T: type, n: usize) anyerror![]T { - return @ptrCast([*]T, &some_mem[0])[0..n]; + return @as([*]T, @ptrCast(&some_mem[0]))[0..n]; } fn memFree(comptime T: type, memory: []T) void { _ = memory; @@ -138,7 +138,7 @@ fn memFree(comptime T: type, memory: []T) void { test "slice of hardcoded address to pointer" { const S = struct { fn doTheTest() !void { - const pointer = @ptrFromInt([*]u8, 0x04)[0..2]; + const pointer = @as([*]u8, @ptrFromInt(0x04))[0..2]; try comptime expect(@TypeOf(pointer) == *[2]u8); const slice: []const u8 = pointer; try expect(@intFromPtr(slice.ptr) == 4); @@ -152,7 +152,7 @@ test "slice of hardcoded address to pointer" { test "comptime slice of pointer preserves comptime var" { comptime { var buff: [10]u8 = undefined; - var a = @ptrCast([*]u8, &buff); + var a = @as([*]u8, @ptrCast(&buff)); a[0..1][0] = 1; try expect(buff[0..][0..][0] == 1); } @@ -161,7 +161,7 @@ test "comptime slice of pointer preserves comptime var" { test "comptime pointer cast array and then slice" { const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 }; - const ptrA: [*]const u8 = @ptrCast([*]const u8, &array); + const ptrA: [*]const u8 = @as([*]const u8, @ptrCast(&array)); const sliceA: []const u8 = ptrA[0..2]; const ptrB: [*]const u8 = &array; @@ -188,7 +188,7 @@ test "slicing pointer by length" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 }; - const ptr: [*]const u8 = @ptrCast([*]const u8, &array); + const ptr: [*]const u8 = @as([*]const u8, @ptrCast(&array)); const slice = ptr[1..][0..5]; try expect(slice.len == 5); var i: usize = 0; @@ -197,7 +197,7 @@ test "slicing pointer by length" { } } -const x = @ptrFromInt([*]i32, 0x1000)[0..0x500]; +const x = @as([*]i32, @ptrFromInt(0x1000))[0..0x500]; const y = x[0x100..]; test "compile time slice of pointer to hard coded address" { try expect(@intFromPtr(x) == 0x1000); @@ -262,7 +262,7 @@ test "C pointer slice access" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var buf: [10]u32 = [1]u32{42} ** 10; - const c_ptr = @ptrCast([*c]const u32, &buf); + const c_ptr = @as([*c]const u32, @ptrCast(&buf)); var runtime_zero: usize = 0; try comptime expectEqual([]const u32, @TypeOf(c_ptr[runtime_zero..1])); @@ -352,7 +352,7 @@ test "@ptrCast slice to pointer" { fn doTheTest() !void { var array align(@alignOf(u16)) = [5]u8{ 0xff, 0xff, 0xff, 0xff, 0xff }; var slice: []align(@alignOf(u16)) u8 = &array; - var ptr = @ptrCast(*u16, slice); + var ptr = @as(*u16, @ptrCast(slice)); try expect(ptr.* == 65535); } }; @@ -837,13 +837,13 @@ test "empty slice ptr is non null" { { const empty_slice: []u8 = &[_]u8{}; const p: [*]u8 = empty_slice.ptr + 0; - const t = @ptrCast([*]i8, p); + const t = @as([*]i8, @ptrCast(p)); try expect(@intFromPtr(t) == @intFromPtr(empty_slice.ptr)); } { const empty_slice: []u8 = &.{}; const p: [*]u8 = empty_slice.ptr + 0; - const t = @ptrCast([*]i8, p); + const t = @as([*]i8, @ptrCast(p)); try expect(@intFromPtr(t) == @intFromPtr(empty_slice.ptr)); } } diff --git a/test/behavior/slice_sentinel_comptime.zig b/test/behavior/slice_sentinel_comptime.zig index 368860547e..31b7e2349e 100644 --- a/test/behavior/slice_sentinel_comptime.zig +++ b/test/behavior/slice_sentinel_comptime.zig @@ -25,7 +25,7 @@ test "comptime slice-sentinel in bounds (unterminated)" { // vector_ConstPtrSpecialRef comptime { var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*]u8 = @ptrCast([*]u8, &buf); + var target: [*]u8 = @as([*]u8, @ptrCast(&buf)); const slice = target[0..3 :'d']; _ = slice; } @@ -41,7 +41,7 @@ test "comptime slice-sentinel in bounds (unterminated)" { // cvector_ConstPtrSpecialRef comptime { var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*c]u8 = @ptrCast([*c]u8, &buf); + var target: [*c]u8 = @as([*c]u8, @ptrCast(&buf)); const slice = target[0..3 :'d']; _ = slice; } @@ -82,7 +82,7 @@ test "comptime slice-sentinel in bounds (end,unterminated)" { // vector_ConstPtrSpecialRef comptime { var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{0xff} ** 10; - var target: [*]u8 = @ptrCast([*]u8, &buf); + var target: [*]u8 = @as([*]u8, @ptrCast(&buf)); const slice = target[0..13 :0xff]; _ = slice; } @@ -98,7 +98,7 @@ test "comptime slice-sentinel in bounds (end,unterminated)" { // cvector_ConstPtrSpecialRef comptime { var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{0xff} ** 10; - var target: [*c]u8 = @ptrCast([*c]u8, &buf); + var target: [*c]u8 = @as([*c]u8, @ptrCast(&buf)); const slice = target[0..13 :0xff]; _ = slice; } @@ -139,7 +139,7 @@ test "comptime slice-sentinel in bounds (terminated)" { // vector_ConstPtrSpecialRef comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*]u8 = @ptrCast([*]u8, &buf); + var target: [*]u8 = @as([*]u8, @ptrCast(&buf)); const slice = target[0..3 :'d']; _ = slice; } @@ -155,7 +155,7 @@ test "comptime slice-sentinel in bounds (terminated)" { // cvector_ConstPtrSpecialRef comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*c]u8 = @ptrCast([*c]u8, &buf); + var target: [*c]u8 = @as([*c]u8, @ptrCast(&buf)); const slice = target[0..3 :'d']; _ = slice; } @@ -196,7 +196,7 @@ test "comptime slice-sentinel in bounds (on target sentinel)" { // vector_ConstPtrSpecialRef comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*]u8 = @ptrCast([*]u8, &buf); + var target: [*]u8 = @as([*]u8, @ptrCast(&buf)); const slice = target[0..14 :0]; _ = slice; } @@ -212,7 +212,7 @@ test "comptime slice-sentinel in bounds (on target sentinel)" { // cvector_ConstPtrSpecialRef comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*c]u8 = @ptrCast([*c]u8, &buf); + var target: [*c]u8 = @as([*c]u8, @ptrCast(&buf)); const slice = target[0..14 :0]; _ = slice; } diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 6ced42998e..95b2718efd 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -92,7 +92,7 @@ test "structs" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var foo: StructFoo = undefined; - @memset(@ptrCast([*]u8, &foo)[0..@sizeOf(StructFoo)], 0); + @memset(@as([*]u8, @ptrCast(&foo))[0..@sizeOf(StructFoo)], 0); foo.a += 1; foo.b = foo.a == 1; try testFoo(foo); @@ -479,14 +479,14 @@ test "runtime struct initialization of bitfield" { .y = x1, }; const s2 = Nibbles{ - .x = @intCast(u4, x2), - .y = @intCast(u4, x2), + .x = @as(u4, @intCast(x2)), + .y = @as(u4, @intCast(x2)), }; try expect(s1.x == x1); try expect(s1.y == x1); - try expect(s2.x == @intCast(u4, x2)); - try expect(s2.y == @intCast(u4, x2)); + try expect(s2.x == @as(u4, @intCast(x2))); + try expect(s2.y == @as(u4, @intCast(x2))); } var x1 = @as(u4, 1); @@ -515,8 +515,8 @@ test "packed struct fields are ordered from LSB to MSB" { var all: u64 = 0x7765443322221111; var bytes: [8]u8 align(@alignOf(Bitfields)) = undefined; - @memcpy(bytes[0..8], @ptrCast([*]u8, &all)); - var bitfields = @ptrCast(*Bitfields, &bytes).*; + @memcpy(bytes[0..8], @as([*]u8, @ptrCast(&all))); + var bitfields = @as(*Bitfields, @ptrCast(&bytes)).*; try expect(bitfields.f1 == 0x1111); try expect(bitfields.f2 == 0x2222); @@ -1281,7 +1281,7 @@ test "packed struct aggregate init" { const S = struct { fn foo(a: i2, b: i6) u8 { - return @bitCast(u8, P{ .a = a, .b = b }); + return @as(u8, @bitCast(P{ .a = a, .b = b })); } const P = packed struct { @@ -1289,7 +1289,7 @@ test "packed struct aggregate init" { b: i6, }; }; - const result = @bitCast(u8, S.foo(1, 2)); + const result = @as(u8, @bitCast(S.foo(1, 2))); try expect(result == 9); } @@ -1365,7 +1365,7 @@ test "under-aligned struct field" { }; var runtime: usize = 1234; const ptr = &S{ .events = 0, .data = .{ .u64 = runtime } }; - const array = @ptrCast(*const [12]u8, ptr); + const array = @as(*const [12]u8, @ptrCast(ptr)); const result = std.mem.readIntNative(u64, array[4..12]); try expect(result == 1234); } diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index bcbfc81ed4..0ae7c510ef 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -590,9 +590,9 @@ test "switch on pointer type" { field: u32, }; - const P1 = @ptrFromInt(*X, 0x400); - const P2 = @ptrFromInt(*X, 0x800); - const P3 = @ptrFromInt(*X, 0xC00); + const P1 = @as(*X, @ptrFromInt(0x400)); + const P2 = @as(*X, @ptrFromInt(0x800)); + const P3 = @as(*X, @ptrFromInt(0xC00)); fn doTheTest(arg: *X) i32 { switch (arg) { @@ -682,9 +682,9 @@ test "enum value without tag name used as switch item" { b = 2, _, }; - var e: E = @enumFromInt(E, 0); + var e: E = @as(E, @enumFromInt(0)); switch (e) { - @enumFromInt(E, 0) => {}, + @as(E, @enumFromInt(0)) => {}, .a => return error.TestFailed, .b => return error.TestFailed, _ => return error.TestFailed, diff --git a/test/behavior/translate_c_macros.zig b/test/behavior/translate_c_macros.zig index a69396c203..68e91bfa58 100644 --- a/test/behavior/translate_c_macros.zig +++ b/test/behavior/translate_c_macros.zig @@ -60,7 +60,7 @@ test "cast negative integer to pointer" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - try expectEqual(@ptrFromInt(?*anyopaque, @bitCast(usize, @as(isize, -1))), h.MAP_FAILED); + try expectEqual(@as(?*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))), h.MAP_FAILED); } test "casting to union with a macro" { @@ -89,7 +89,7 @@ test "casting or calling a value with a paren-surrounded macro" { const l: c_long = 42; const casted = h.CAST_OR_CALL_WITH_PARENS(c_int, l); - try expect(casted == @intCast(c_int, l)); + try expect(casted == @as(c_int, @intCast(l))); const Helper = struct { fn foo(n: c_int) !void { diff --git a/test/behavior/truncate.zig b/test/behavior/truncate.zig index 3ea979009e..4fc095b66c 100644 --- a/test/behavior/truncate.zig +++ b/test/behavior/truncate.zig @@ -4,58 +4,58 @@ const expect = std.testing.expect; test "truncate u0 to larger integer allowed and has comptime-known result" { var x: u0 = 0; - const y = @truncate(u8, x); + const y = @as(u8, @truncate(x)); try comptime expect(y == 0); } test "truncate.u0.literal" { - var z = @truncate(u0, 0); + var z = @as(u0, @truncate(0)); try expect(z == 0); } test "truncate.u0.const" { const c0: usize = 0; - var z = @truncate(u0, c0); + var z = @as(u0, @truncate(c0)); try expect(z == 0); } test "truncate.u0.var" { var d: u8 = 2; - var z = @truncate(u0, d); + var z = @as(u0, @truncate(d)); try expect(z == 0); } test "truncate i0 to larger integer allowed and has comptime-known result" { var x: i0 = 0; - const y = @truncate(i8, x); + const y = @as(i8, @truncate(x)); try comptime expect(y == 0); } test "truncate.i0.literal" { - var z = @truncate(i0, 0); + var z = @as(i0, @truncate(0)); try expect(z == 0); } test "truncate.i0.const" { const c0: isize = 0; - var z = @truncate(i0, c0); + var z = @as(i0, @truncate(c0)); try expect(z == 0); } test "truncate.i0.var" { var d: i8 = 2; - var z = @truncate(i0, d); + var z = @as(i0, @truncate(d)); try expect(z == 0); } test "truncate on comptime integer" { - var x = @truncate(u16, 9999); + var x = @as(u16, @truncate(9999)); try expect(x == 9999); - var y = @truncate(u16, -21555); + var y = @as(u16, @truncate(-21555)); try expect(y == 0xabcd); - var z = @truncate(i16, -65537); + var z = @as(i16, @truncate(-65537)); try expect(z == -1); - var w = @truncate(u1, 1 << 100); + var w = @as(u1, @truncate(1 << 100)); try expect(w == 0); } @@ -69,7 +69,7 @@ test "truncate on vectors" { const S = struct { fn doTheTest() !void { var v1: @Vector(4, u16) = .{ 0xaabb, 0xccdd, 0xeeff, 0x1122 }; - var v2 = @truncate(u8, v1); + var v2: @Vector(4, u8) = @truncate(v1); try expect(std.mem.eql(u8, &@as([4]u8, v2), &[4]u8{ 0xbb, 0xdd, 0xff, 0x22 })); } }; diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig index ee414365c3..e9d3fcd0aa 100644 --- a/test/behavior/tuple.zig +++ b/test/behavior/tuple.zig @@ -403,7 +403,7 @@ test "nested runtime conditionals in tuple initializer" { var data: u8 = 0; const x = .{ - if (data != 0) "" else switch (@truncate(u1, data)) { + if (data != 0) "" else switch (@as(u1, @truncate(data))) { 0 => "up", 1 => "down", }, diff --git a/test/behavior/tuple_declarations.zig b/test/behavior/tuple_declarations.zig index c053447ccc..84b04d3e53 100644 --- a/test/behavior/tuple_declarations.zig +++ b/test/behavior/tuple_declarations.zig @@ -21,7 +21,7 @@ test "tuple declaration type info" { try expectEqualStrings(info.fields[0].name, "0"); try expect(info.fields[0].type == u32); - try expect(@ptrCast(*const u32, @alignCast(@alignOf(u32), info.fields[0].default_value)).* == 1); + try expect(@as(*const u32, @ptrCast(@alignCast(info.fields[0].default_value))).* == 1); try expect(info.fields[0].is_comptime); try expect(info.fields[0].alignment == 2); diff --git a/test/behavior/type.zig b/test/behavior/type.zig index 9420b5d2fd..a2ede838b2 100644 --- a/test/behavior/type.zig +++ b/test/behavior/type.zig @@ -289,7 +289,7 @@ test "Type.Struct" { try testing.expectEqual(@as(?*const anyopaque, null), infoB.fields[0].default_value); try testing.expectEqualSlices(u8, "y", infoB.fields[1].name); try testing.expectEqual(u32, infoB.fields[1].type); - try testing.expectEqual(@as(u32, 5), @ptrCast(*align(1) const u32, infoB.fields[1].default_value.?).*); + try testing.expectEqual(@as(u32, 5), @as(*align(1) const u32, @ptrCast(infoB.fields[1].default_value.?)).*); try testing.expectEqual(@as(usize, 0), infoB.decls.len); try testing.expectEqual(@as(bool, false), infoB.is_tuple); @@ -298,10 +298,10 @@ test "Type.Struct" { try testing.expectEqual(Type.ContainerLayout.Packed, infoC.layout); try testing.expectEqualSlices(u8, "x", infoC.fields[0].name); try testing.expectEqual(u8, infoC.fields[0].type); - try testing.expectEqual(@as(u8, 3), @ptrCast(*const u8, infoC.fields[0].default_value.?).*); + try testing.expectEqual(@as(u8, 3), @as(*const u8, @ptrCast(infoC.fields[0].default_value.?)).*); try testing.expectEqualSlices(u8, "y", infoC.fields[1].name); try testing.expectEqual(u32, infoC.fields[1].type); - try testing.expectEqual(@as(u32, 5), @ptrCast(*align(1) const u32, infoC.fields[1].default_value.?).*); + try testing.expectEqual(@as(u32, 5), @as(*align(1) const u32, @ptrCast(infoC.fields[1].default_value.?)).*); try testing.expectEqual(@as(usize, 0), infoC.decls.len); try testing.expectEqual(@as(bool, false), infoC.is_tuple); @@ -311,10 +311,10 @@ test "Type.Struct" { try testing.expectEqual(Type.ContainerLayout.Auto, infoD.layout); try testing.expectEqualSlices(u8, "x", infoD.fields[0].name); try testing.expectEqual(comptime_int, infoD.fields[0].type); - try testing.expectEqual(@as(comptime_int, 3), @ptrCast(*const comptime_int, infoD.fields[0].default_value.?).*); + try testing.expectEqual(@as(comptime_int, 3), @as(*const comptime_int, @ptrCast(infoD.fields[0].default_value.?)).*); try testing.expectEqualSlices(u8, "y", infoD.fields[1].name); try testing.expectEqual(comptime_int, infoD.fields[1].type); - try testing.expectEqual(@as(comptime_int, 5), @ptrCast(*const comptime_int, infoD.fields[1].default_value.?).*); + try testing.expectEqual(@as(comptime_int, 5), @as(*const comptime_int, @ptrCast(infoD.fields[1].default_value.?)).*); try testing.expectEqual(@as(usize, 0), infoD.decls.len); try testing.expectEqual(@as(bool, false), infoD.is_tuple); @@ -324,10 +324,10 @@ test "Type.Struct" { try testing.expectEqual(Type.ContainerLayout.Auto, infoE.layout); try testing.expectEqualSlices(u8, "0", infoE.fields[0].name); try testing.expectEqual(comptime_int, infoE.fields[0].type); - try testing.expectEqual(@as(comptime_int, 1), @ptrCast(*const comptime_int, infoE.fields[0].default_value.?).*); + try testing.expectEqual(@as(comptime_int, 1), @as(*const comptime_int, @ptrCast(infoE.fields[0].default_value.?)).*); try testing.expectEqualSlices(u8, "1", infoE.fields[1].name); try testing.expectEqual(comptime_int, infoE.fields[1].type); - try testing.expectEqual(@as(comptime_int, 2), @ptrCast(*const comptime_int, infoE.fields[1].default_value.?).*); + try testing.expectEqual(@as(comptime_int, 2), @as(*const comptime_int, @ptrCast(infoE.fields[1].default_value.?)).*); try testing.expectEqual(@as(usize, 0), infoE.decls.len); try testing.expectEqual(@as(bool, true), infoE.is_tuple); @@ -379,7 +379,7 @@ test "Type.Enum" { try testing.expectEqual(false, @typeInfo(Bar).Enum.is_exhaustive); try testing.expectEqual(@as(u32, 1), @intFromEnum(Bar.a)); try testing.expectEqual(@as(u32, 5), @intFromEnum(Bar.b)); - try testing.expectEqual(@as(u32, 6), @intFromEnum(@enumFromInt(Bar, 6))); + try testing.expectEqual(@as(u32, 6), @intFromEnum(@as(Bar, @enumFromInt(6)))); } test "Type.Union" { diff --git a/test/behavior/type_info.zig b/test/behavior/type_info.zig index 87ae96768a..0d026c0078 100644 --- a/test/behavior/type_info.zig +++ b/test/behavior/type_info.zig @@ -113,7 +113,7 @@ fn testNullTerminatedPtr() !void { try expect(ptr_info.Pointer.size == .Many); try expect(ptr_info.Pointer.is_const == false); try expect(ptr_info.Pointer.is_volatile == false); - try expect(@ptrCast(*const u8, ptr_info.Pointer.sentinel.?).* == 0); + try expect(@as(*const u8, @ptrCast(ptr_info.Pointer.sentinel.?)).* == 0); try expect(@typeInfo([:0]u8).Pointer.sentinel != null); } @@ -151,7 +151,7 @@ fn testArray() !void { const info = @typeInfo([10:0]u8); try expect(info.Array.len == 10); try expect(info.Array.child == u8); - try expect(@ptrCast(*const u8, info.Array.sentinel.?).* == @as(u8, 0)); + try expect(@as(*const u8, @ptrCast(info.Array.sentinel.?)).* == @as(u8, 0)); try expect(@sizeOf([10:0]u8) == info.Array.len + 1); } } @@ -295,8 +295,8 @@ fn testStruct() !void { try expect(unpacked_struct_info.Struct.is_tuple == false); try expect(unpacked_struct_info.Struct.backing_integer == null); try expect(unpacked_struct_info.Struct.fields[0].alignment == @alignOf(u32)); - try expect(@ptrCast(*align(1) const u32, unpacked_struct_info.Struct.fields[0].default_value.?).* == 4); - try expect(mem.eql(u8, "foobar", @ptrCast(*align(1) const *const [6:0]u8, unpacked_struct_info.Struct.fields[1].default_value.?).*)); + try expect(@as(*align(1) const u32, @ptrCast(unpacked_struct_info.Struct.fields[0].default_value.?)).* == 4); + try expect(mem.eql(u8, "foobar", @as(*align(1) const *const [6:0]u8, @ptrCast(unpacked_struct_info.Struct.fields[1].default_value.?)).*)); } const TestStruct = struct { @@ -319,7 +319,7 @@ fn testPackedStruct() !void { try expect(struct_info.Struct.fields[0].alignment == 0); try expect(struct_info.Struct.fields[2].type == f32); try expect(struct_info.Struct.fields[2].default_value == null); - try expect(@ptrCast(*align(1) const u32, struct_info.Struct.fields[3].default_value.?).* == 4); + try expect(@as(*align(1) const u32, @ptrCast(struct_info.Struct.fields[3].default_value.?)).* == 4); try expect(struct_info.Struct.fields[3].alignment == 0); try expect(struct_info.Struct.decls.len == 2); try expect(struct_info.Struct.decls[0].is_pub); @@ -504,7 +504,7 @@ test "type info for async frames" { switch (@typeInfo(@Frame(add))) { .Frame => |frame| { - try expect(@ptrCast(@TypeOf(add), frame.function) == add); + try expect(@as(@TypeOf(add), @ptrCast(frame.function)) == add); }, else => unreachable, } @@ -564,7 +564,7 @@ test "typeInfo resolves usingnamespace declarations" { test "value from struct @typeInfo default_value can be loaded at comptime" { comptime { const a = @typeInfo(@TypeOf(.{ .foo = @as(u8, 1) })).Struct.fields[0].default_value; - try expect(@ptrCast(*const u8, a).* == 1); + try expect(@as(*const u8, @ptrCast(a)).* == 1); } } @@ -607,6 +607,6 @@ test "@typeInfo decls ignore dependency loops" { test "type info of tuple of string literal default value" { const struct_field = @typeInfo(@TypeOf(.{"hi"})).Struct.fields[0]; - const value = @ptrCast(*align(1) const *const [2:0]u8, struct_field.default_value.?).*; + const value = @as(*align(1) const *const [2:0]u8, @ptrCast(struct_field.default_value.?)).*; comptime std.debug.assert(value[0] == 'h'); } diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 47864a83c9..2aab98ea72 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -1244,7 +1244,7 @@ test "@intCast to u0" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var zeros = @Vector(2, u32){ 0, 0 }; - const casted = @intCast(@Vector(2, u0), zeros); + const casted = @as(@Vector(2, u0), @intCast(zeros)); _ = casted[0]; } diff --git a/test/c_abi/main.zig b/test/c_abi/main.zig index f06d455060..d421f0aace 100644 --- a/test/c_abi/main.zig +++ b/test/c_abi/main.zig @@ -143,7 +143,7 @@ export fn zig_longdouble(x: c_longdouble) void { extern fn c_ptr(*anyopaque) void; test "C ABI pointer" { - c_ptr(@ptrFromInt(*anyopaque, 0xdeadbeef)); + c_ptr(@as(*anyopaque, @ptrFromInt(0xdeadbeef))); } export fn zig_ptr(x: *anyopaque) void { @@ -1058,14 +1058,14 @@ test "C function that takes byval struct called via function pointer" { var fn_ptr = &c_func_ptr_byval; fn_ptr( - @ptrFromInt(*anyopaque, 1), - @ptrFromInt(*anyopaque, 2), + @as(*anyopaque, @ptrFromInt(1)), + @as(*anyopaque, @ptrFromInt(2)), ByVal{ .origin = .{ .x = 9, .y = 10, .z = 11 }, .size = .{ .width = 12, .height = 13, .depth = 14 }, }, @as(c_ulong, 3), - @ptrFromInt(*anyopaque, 4), + @as(*anyopaque, @ptrFromInt(4)), @as(c_ulong, 5), ); } @@ -1098,7 +1098,7 @@ test "f80 bare" { if (!has_f80) return error.SkipZigTest; const a = c_f80(12.34); - try expect(@floatCast(f64, a) == 56.78); + try expect(@as(f64, @floatCast(a)) == 56.78); } const f80_struct = extern struct { @@ -1111,7 +1111,7 @@ test "f80 struct" { if (builtin.mode != .Debug) return error.SkipZigTest; const a = c_f80_struct(.{ .a = 12.34 }); - try expect(@floatCast(f64, a.a) == 56.78); + try expect(@as(f64, @floatCast(a.a)) == 56.78); } const f80_extra_struct = extern struct { @@ -1124,7 +1124,7 @@ test "f80 extra struct" { if (builtin.target.cpu.arch == .x86) return error.SkipZigTest; const a = c_f80_extra_struct(.{ .a = 12.34, .b = 42 }); - try expect(@floatCast(f64, a.a) == 56.78); + try expect(@as(f64, @floatCast(a.a)) == 56.78); try expect(a.b == 24); } @@ -1133,7 +1133,7 @@ test "f128 bare" { if (!has_f128) return error.SkipZigTest; const a = c_f128(12.34); - try expect(@floatCast(f64, a) == 56.78); + try expect(@as(f64, @floatCast(a)) == 56.78); } const f128_struct = extern struct { @@ -1144,7 +1144,7 @@ test "f128 struct" { if (!has_f128) return error.SkipZigTest; const a = c_f128_struct(.{ .a = 12.34 }); - try expect(@floatCast(f64, a.a) == 56.78); + try expect(@as(f64, @floatCast(a.a)) == 56.78); } // The stdcall attribute on C functions is ignored when compiled on non-x86 diff --git a/test/cases/compile_errors/alignCast_expects_pointer_or_slice.zig b/test/cases/compile_errors/alignCast_expects_pointer_or_slice.zig index 1b8e3767b2..25345aced0 100644 --- a/test/cases/compile_errors/alignCast_expects_pointer_or_slice.zig +++ b/test/cases/compile_errors/alignCast_expects_pointer_or_slice.zig @@ -1,9 +1,10 @@ export fn entry() void { - @alignCast(4, @as(u32, 3)); + const x: *align(8) u32 = @alignCast(@as(u32, 3)); + _ = x; } // error // backend=stage2 // target=native // -// :2:19: error: expected pointer type, found 'u32' +// :2:41: error: expected pointer type, found 'u32' diff --git a/test/cases/compile_errors/bad_alignCast_at_comptime.zig b/test/cases/compile_errors/bad_alignCast_at_comptime.zig index 885700ecac..c870521822 100644 --- a/test/cases/compile_errors/bad_alignCast_at_comptime.zig +++ b/test/cases/compile_errors/bad_alignCast_at_comptime.zig @@ -1,6 +1,6 @@ comptime { - const ptr = @ptrFromInt(*align(1) i32, 0x1); - const aligned = @alignCast(4, ptr); + const ptr: *align(1) i32 = @ptrFromInt(0x1); + const aligned: *align(4) i32 = @alignCast(ptr); _ = aligned; } @@ -8,4 +8,4 @@ comptime { // backend=stage2 // target=native // -// :3:35: error: pointer address 0x1 is not aligned to 4 bytes +// :3:47: error: pointer address 0x1 is not aligned to 4 bytes diff --git a/test/cases/compile_errors/bitCast_same_size_but_bit_count_mismatch.zig b/test/cases/compile_errors/bitCast_same_size_but_bit_count_mismatch.zig index 2f7bd9c9bc..e366e0cb03 100644 --- a/test/cases/compile_errors/bitCast_same_size_but_bit_count_mismatch.zig +++ b/test/cases/compile_errors/bitCast_same_size_but_bit_count_mismatch.zig @@ -1,5 +1,5 @@ export fn entry(byte: u8) void { - var oops = @bitCast(u7, byte); + var oops: u7 = @bitCast(byte); _ = oops; } @@ -7,4 +7,4 @@ export fn entry(byte: u8) void { // backend=stage2 // target=native // -// :2:16: error: @bitCast size mismatch: destination type 'u7' has 7 bits but source type 'u8' has 8 bits +// :2:20: error: @bitCast size mismatch: destination type 'u7' has 7 bits but source type 'u8' has 8 bits diff --git a/test/cases/compile_errors/bitCast_to_enum_type.zig b/test/cases/compile_errors/bitCast_to_enum_type.zig index b3bc72c21b..7f3711b7f1 100644 --- a/test/cases/compile_errors/bitCast_to_enum_type.zig +++ b/test/cases/compile_errors/bitCast_to_enum_type.zig @@ -1,6 +1,6 @@ export fn entry() void { const E = enum(u32) { a, b }; - const y = @bitCast(E, @as(u32, 3)); + const y: E = @bitCast(@as(u32, 3)); _ = y; } @@ -8,5 +8,5 @@ export fn entry() void { // backend=stage2 // target=native // -// :3:24: error: cannot @bitCast to 'tmp.entry.E' -// :3:24: note: use @enumFromInt to cast from 'u32' +// :3:18: error: cannot @bitCast to 'tmp.entry.E' +// :3:18: note: use @enumFromInt to cast from 'u32' diff --git a/test/cases/compile_errors/bitCast_with_different_sizes_inside_an_expression.zig b/test/cases/compile_errors/bitCast_with_different_sizes_inside_an_expression.zig index bf87ba8bc5..f73dfeb38a 100644 --- a/test/cases/compile_errors/bitCast_with_different_sizes_inside_an_expression.zig +++ b/test/cases/compile_errors/bitCast_with_different_sizes_inside_an_expression.zig @@ -1,5 +1,5 @@ export fn entry() void { - var foo = (@bitCast(u8, @as(f32, 1.0)) == 0xf); + var foo = (@as(u8, @bitCast(@as(f32, 1.0))) == 0xf); _ = foo; } @@ -7,4 +7,4 @@ export fn entry() void { // backend=stage2 // target=native // -// :2:16: error: @bitCast size mismatch: destination type 'u8' has 8 bits but source type 'f32' has 32 bits +// :2:24: error: @bitCast size mismatch: destination type 'u8' has 8 bits but source type 'f32' has 32 bits diff --git a/test/cases/compile_errors/cast_negative_value_to_unsigned_integer.zig b/test/cases/compile_errors/cast_negative_value_to_unsigned_integer.zig index ebd9012015..57206b267f 100644 --- a/test/cases/compile_errors/cast_negative_value_to_unsigned_integer.zig +++ b/test/cases/compile_errors/cast_negative_value_to_unsigned_integer.zig @@ -1,6 +1,6 @@ comptime { const value: i32 = -1; - const unsigned = @intCast(u32, value); + const unsigned: u32 = @intCast(value); _ = unsigned; } export fn entry1() void { diff --git a/test/cases/compile_errors/compile_log_a_pointer_to_an_opaque_value.zig b/test/cases/compile_errors/compile_log_a_pointer_to_an_opaque_value.zig index 73de52fc97..4f79da9fb1 100644 --- a/test/cases/compile_errors/compile_log_a_pointer_to_an_opaque_value.zig +++ b/test/cases/compile_errors/compile_log_a_pointer_to_an_opaque_value.zig @@ -1,5 +1,5 @@ export fn entry() void { - @compileLog(@as(*align(1) const anyopaque, @ptrCast(*const anyopaque, &entry))); + @compileLog(@as(*const anyopaque, @ptrCast(&entry))); } // error diff --git a/test/cases/compile_errors/compile_time_null_ptr_cast.zig b/test/cases/compile_errors/compile_time_null_ptr_cast.zig index 25805e9f35..7d25931aaa 100644 --- a/test/cases/compile_errors/compile_time_null_ptr_cast.zig +++ b/test/cases/compile_errors/compile_time_null_ptr_cast.zig @@ -1,6 +1,6 @@ comptime { var opt_ptr: ?*i32 = null; - const ptr = @ptrCast(*i32, opt_ptr); + const ptr: *i32 = @ptrCast(opt_ptr); _ = ptr; } diff --git a/test/cases/compile_errors/compile_time_undef_ptr_cast.zig b/test/cases/compile_errors/compile_time_undef_ptr_cast.zig index 14edd293de..d93e8bc73d 100644 --- a/test/cases/compile_errors/compile_time_undef_ptr_cast.zig +++ b/test/cases/compile_errors/compile_time_undef_ptr_cast.zig @@ -1,6 +1,6 @@ comptime { var undef_ptr: *i32 = undefined; - const ptr = @ptrCast(*i32, undef_ptr); + const ptr: *i32 = @ptrCast(undef_ptr); _ = ptr; } diff --git a/test/cases/compile_errors/comptime_call_of_function_pointer.zig b/test/cases/compile_errors/comptime_call_of_function_pointer.zig index d6598aab39..574f55e9f3 100644 --- a/test/cases/compile_errors/comptime_call_of_function_pointer.zig +++ b/test/cases/compile_errors/comptime_call_of_function_pointer.zig @@ -1,5 +1,5 @@ export fn entry() void { - const fn_ptr = @ptrFromInt(*align(1) fn () void, 0xffd2); + const fn_ptr: *align(1) fn () void = @ptrFromInt(0xffd2); comptime fn_ptr(); } diff --git a/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_memory_at_target_index_terminated.zig b/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_memory_at_target_index_terminated.zig index ffa21af10a..83c48e8acd 100644 --- a/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_memory_at_target_index_terminated.zig +++ b/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_memory_at_target_index_terminated.zig @@ -24,7 +24,7 @@ export fn foo_vector_ConstPtrSpecialBaseArray() void { export fn foo_vector_ConstPtrSpecialRef() void { comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*]u8 = @ptrCast([*]u8, &buf); + var target: [*]u8 = @ptrCast(&buf); const slice = target[0..3 :0]; _ = slice; } @@ -40,7 +40,7 @@ export fn foo_cvector_ConstPtrSpecialBaseArray() void { export fn foo_cvector_ConstPtrSpecialRef() void { comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*c]u8 = @ptrCast([*c]u8, &buf); + var target: [*c]u8 = @ptrCast(&buf); const slice = target[0..3 :0]; _ = slice; } diff --git a/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_memory_at_target_index_unterminated.zig b/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_memory_at_target_index_unterminated.zig index c5bb2d9643..c111b026a5 100644 --- a/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_memory_at_target_index_unterminated.zig +++ b/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_memory_at_target_index_unterminated.zig @@ -24,7 +24,7 @@ export fn foo_vector_ConstPtrSpecialBaseArray() void { export fn foo_vector_ConstPtrSpecialRef() void { comptime { var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*]u8 = @ptrCast([*]u8, &buf); + var target: [*]u8 = @ptrCast(&buf); const slice = target[0..3 :0]; _ = slice; } @@ -40,7 +40,7 @@ export fn foo_cvector_ConstPtrSpecialBaseArray() void { export fn foo_cvector_ConstPtrSpecialRef() void { comptime { var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*c]u8 = @ptrCast([*c]u8, &buf); + var target: [*c]u8 = @ptrCast(&buf); const slice = target[0..3 :0]; _ = slice; } diff --git a/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_target-sentinel.zig b/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_target-sentinel.zig index aa52fb9756..24aa36949b 100644 --- a/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_target-sentinel.zig +++ b/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_target-sentinel.zig @@ -24,7 +24,7 @@ export fn foo_vector_ConstPtrSpecialBaseArray() void { export fn foo_vector_ConstPtrSpecialRef() void { comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*]u8 = @ptrCast([*]u8, &buf); + var target: [*]u8 = @ptrCast(&buf); const slice = target[0..14 :255]; _ = slice; } @@ -40,7 +40,7 @@ export fn foo_cvector_ConstPtrSpecialBaseArray() void { export fn foo_cvector_ConstPtrSpecialRef() void { comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*c]u8 = @ptrCast([*c]u8, &buf); + var target: [*c]u8 = @ptrCast(&buf); const slice = target[0..14 :255]; _ = slice; } diff --git a/test/cases/compile_errors/comptime_slice-sentinel_is_out_of_bounds_terminated.zig b/test/cases/compile_errors/comptime_slice-sentinel_is_out_of_bounds_terminated.zig index 86bd4ce8bb..249d59414a 100644 --- a/test/cases/compile_errors/comptime_slice-sentinel_is_out_of_bounds_terminated.zig +++ b/test/cases/compile_errors/comptime_slice-sentinel_is_out_of_bounds_terminated.zig @@ -24,7 +24,7 @@ export fn foo_vector_ConstPtrSpecialBaseArray() void { export fn foo_vector_ConstPtrSpecialRef() void { comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*]u8 = @ptrCast([*]u8, &buf); + var target: [*]u8 = @ptrCast(&buf); const slice = target[0..15 :0]; _ = slice; } @@ -40,7 +40,7 @@ export fn foo_cvector_ConstPtrSpecialBaseArray() void { export fn foo_cvector_ConstPtrSpecialRef() void { comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*c]u8 = @ptrCast([*c]u8, &buf); + var target: [*c]u8 = @ptrCast(&buf); const slice = target[0..15 :0]; _ = slice; } diff --git a/test/cases/compile_errors/comptime_slice-sentinel_is_out_of_bounds_unterminated.zig b/test/cases/compile_errors/comptime_slice-sentinel_is_out_of_bounds_unterminated.zig index e1b8a5bc2d..a6e599ca38 100644 --- a/test/cases/compile_errors/comptime_slice-sentinel_is_out_of_bounds_unterminated.zig +++ b/test/cases/compile_errors/comptime_slice-sentinel_is_out_of_bounds_unterminated.zig @@ -24,7 +24,7 @@ export fn foo_vector_ConstPtrSpecialBaseArray() void { export fn foo_vector_ConstPtrSpecialRef() void { comptime { var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*]u8 = @ptrCast([*]u8, &buf); + var target: [*]u8 = @ptrCast(&buf); const slice = target[0..14 :0]; _ = slice; } @@ -40,7 +40,7 @@ export fn foo_cvector_ConstPtrSpecialBaseArray() void { export fn foo_cvector_ConstPtrSpecialRef() void { comptime { var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*c]u8 = @ptrCast([*c]u8, &buf); + var target: [*c]u8 = @ptrCast(&buf); const slice = target[0..14 :0]; _ = slice; } diff --git a/test/cases/compile_errors/enumFromInt_on_non-exhaustive_enums_checks_int_in_range.zig b/test/cases/compile_errors/enumFromInt_on_non-exhaustive_enums_checks_int_in_range.zig index dfef66b628..112017d29d 100644 --- a/test/cases/compile_errors/enumFromInt_on_non-exhaustive_enums_checks_int_in_range.zig +++ b/test/cases/compile_errors/enumFromInt_on_non-exhaustive_enums_checks_int_in_range.zig @@ -1,11 +1,11 @@ pub export fn entry() void { const E = enum(u3) { a, b, c, _ }; - @compileLog(@enumFromInt(E, 100)); + @compileLog(@as(E, @enumFromInt(100))); } // error // target=native // backend=stage2 // -// :3:17: error: int value '100' out of range of non-exhaustive enum 'tmp.entry.E' +// :3:24: error: int value '100' out of range of non-exhaustive enum 'tmp.entry.E' // :2:15: note: enum declared here diff --git a/test/cases/compile_errors/enum_in_field_count_range_but_not_matching_tag.zig b/test/cases/compile_errors/enum_in_field_count_range_but_not_matching_tag.zig index 0cf9fcce01..3e1190cc32 100644 --- a/test/cases/compile_errors/enum_in_field_count_range_but_not_matching_tag.zig +++ b/test/cases/compile_errors/enum_in_field_count_range_but_not_matching_tag.zig @@ -3,7 +3,7 @@ const Foo = enum(u32) { B = 11, }; export fn entry() void { - var x = @enumFromInt(Foo, 0); + var x: Foo = @enumFromInt(0); _ = x; } @@ -11,5 +11,5 @@ export fn entry() void { // backend=stage2 // target=native // -// :6:13: error: enum 'tmp.Foo' has no tag with value '0' +// :6:18: error: enum 'tmp.Foo' has no tag with value '0' // :1:13: note: enum declared here diff --git a/test/cases/compile_errors/explicit_error_set_cast_known_at_comptime_violates_error_sets.zig b/test/cases/compile_errors/explicit_error_set_cast_known_at_comptime_violates_error_sets.zig index a3af883198..cfb01c3ddc 100644 --- a/test/cases/compile_errors/explicit_error_set_cast_known_at_comptime_violates_error_sets.zig +++ b/test/cases/compile_errors/explicit_error_set_cast_known_at_comptime_violates_error_sets.zig @@ -2,7 +2,7 @@ const Set1 = error{ A, B }; const Set2 = error{ A, C }; comptime { var x = Set1.B; - var y = @errSetCast(Set2, x); + var y: Set2 = @errSetCast(x); _ = y; } @@ -10,4 +10,4 @@ comptime { // backend=stage2 // target=native // -// :5:13: error: 'error.B' not a member of error set 'error{C,A}' +// :5:19: error: 'error.B' not a member of error set 'error{C,A}' diff --git a/test/cases/compile_errors/explicitly_casting_non_tag_type_to_enum.zig b/test/cases/compile_errors/explicitly_casting_non_tag_type_to_enum.zig index 6ae39489a0..bb920138e1 100644 --- a/test/cases/compile_errors/explicitly_casting_non_tag_type_to_enum.zig +++ b/test/cases/compile_errors/explicitly_casting_non_tag_type_to_enum.zig @@ -7,7 +7,7 @@ const Small = enum(u2) { export fn entry() void { var y = @as(f32, 3); - var x = @enumFromInt(Small, y); + var x: Small = @enumFromInt(y); _ = x; } diff --git a/test/cases/compile_errors/fieldParentPtr-comptime_field_ptr_not_based_on_struct.zig b/test/cases/compile_errors/fieldParentPtr-comptime_field_ptr_not_based_on_struct.zig index 9fc8038d7a..2147fb8aed 100644 --- a/test/cases/compile_errors/fieldParentPtr-comptime_field_ptr_not_based_on_struct.zig +++ b/test/cases/compile_errors/fieldParentPtr-comptime_field_ptr_not_based_on_struct.zig @@ -8,7 +8,7 @@ const foo = Foo{ }; comptime { - const field_ptr = @ptrFromInt(*i32, 0x1234); + const field_ptr: *i32 = @ptrFromInt(0x1234); const another_foo_ptr = @fieldParentPtr(Foo, "b", field_ptr); _ = another_foo_ptr; } diff --git a/test/cases/compile_errors/field_access_of_opaque_type.zig b/test/cases/compile_errors/field_access_of_opaque_type.zig index f9ec483305..7f975c4b0a 100644 --- a/test/cases/compile_errors/field_access_of_opaque_type.zig +++ b/test/cases/compile_errors/field_access_of_opaque_type.zig @@ -2,7 +2,7 @@ const MyType = opaque {}; export fn entry() bool { var x: i32 = 1; - return bar(@ptrCast(*MyType, &x)); + return bar(@ptrCast(&x)); } fn bar(x: *MyType) bool { diff --git a/test/cases/compile_errors/incorrect_type_to_memset_memcpy.zig b/test/cases/compile_errors/incorrect_type_to_memset_memcpy.zig index 44405b3c20..55af9c1185 100644 --- a/test/cases/compile_errors/incorrect_type_to_memset_memcpy.zig +++ b/test/cases/compile_errors/incorrect_type_to_memset_memcpy.zig @@ -2,7 +2,7 @@ pub export fn entry() void { var buf: [5]u8 = .{ 1, 2, 3, 4, 5 }; var slice: []u8 = &buf; const a: u32 = 1234; - @memcpy(slice.ptr, @ptrCast([*]const u8, &a)); + @memcpy(slice.ptr, @as([*]const u8, @ptrCast(&a))); } pub export fn entry1() void { var buf: [5]u8 = .{ 1, 2, 3, 4, 5 }; @@ -39,7 +39,7 @@ pub export fn memset_array() void { // // :5:5: error: unknown @memcpy length // :5:18: note: destination type '[*]u8' provides no length -// :5:24: note: source type '[*]align(4) const u8' provides no length +// :5:24: note: source type '[*]const u8' provides no length // :10:13: error: type '*u8' is not an indexable pointer // :10:13: note: operand must be a slice, a many pointer or a pointer to an array // :15:13: error: type '*u8' is not an indexable pointer diff --git a/test/cases/compile_errors/increase_pointer_alignment_in_ptrCast.zig b/test/cases/compile_errors/increase_pointer_alignment_in_ptrCast.zig index 8d7e14acae..22bd90b068 100644 --- a/test/cases/compile_errors/increase_pointer_alignment_in_ptrCast.zig +++ b/test/cases/compile_errors/increase_pointer_alignment_in_ptrCast.zig @@ -1,6 +1,6 @@ export fn entry() u32 { var bytes: [4]u8 = [_]u8{ 0x01, 0x02, 0x03, 0x04 }; - const ptr = @ptrCast(*u32, &bytes[0]); + const ptr: *u32 = @ptrCast(&bytes[0]); return ptr.*; } @@ -8,7 +8,7 @@ export fn entry() u32 { // backend=stage2 // target=native // -// :3:17: error: cast increases pointer alignment +// :3:23: error: cast increases pointer alignment // :3:32: note: '*u8' has alignment '1' -// :3:26: note: '*u32' has alignment '4' -// :3:17: note: consider using '@alignCast' +// :3:23: note: '*u32' has alignment '4' +// :3:23: note: use @alignCast to assert pointer alignment diff --git a/test/cases/compile_errors/int-float_conversion_to_comptime_int-float.zig b/test/cases/compile_errors/int-float_conversion_to_comptime_int-float.zig index ecf8f61fc5..7724632069 100644 --- a/test/cases/compile_errors/int-float_conversion_to_comptime_int-float.zig +++ b/test/cases/compile_errors/int-float_conversion_to_comptime_int-float.zig @@ -1,17 +1,17 @@ export fn foo() void { var a: f32 = 2; - _ = @intFromFloat(comptime_int, a); + _ = @as(comptime_int, @intFromFloat(a)); } export fn bar() void { var a: u32 = 2; - _ = @floatFromInt(comptime_float, a); + _ = @as(comptime_float, @floatFromInt(a)); } // error // backend=stage2 // target=native // -// :3:37: error: unable to resolve comptime value -// :3:37: note: value being casted to 'comptime_int' must be comptime-known -// :7:39: error: unable to resolve comptime value -// :7:39: note: value being casted to 'comptime_float' must be comptime-known +// :3:41: error: unable to resolve comptime value +// :3:41: note: value being casted to 'comptime_int' must be comptime-known +// :7:43: error: unable to resolve comptime value +// :7:43: note: value being casted to 'comptime_float' must be comptime-known diff --git a/test/cases/compile_errors/intFromFloat_comptime_safety.zig b/test/cases/compile_errors/intFromFloat_comptime_safety.zig index 275f67006f..e3bfc3eb96 100644 --- a/test/cases/compile_errors/intFromFloat_comptime_safety.zig +++ b/test/cases/compile_errors/intFromFloat_comptime_safety.zig @@ -1,17 +1,17 @@ comptime { - _ = @intFromFloat(i8, @as(f32, -129.1)); + _ = @as(i8, @intFromFloat(@as(f32, -129.1))); } comptime { - _ = @intFromFloat(u8, @as(f32, -1.1)); + _ = @as(u8, @intFromFloat(@as(f32, -1.1))); } comptime { - _ = @intFromFloat(u8, @as(f32, 256.1)); + _ = @as(u8, @intFromFloat(@as(f32, 256.1))); } // error // backend=stage2 // target=native // -// :2:27: error: float value '-129.10000610351562' cannot be stored in integer type 'i8' -// :5:27: error: float value '-1.100000023841858' cannot be stored in integer type 'u8' -// :8:27: error: float value '256.1000061035156' cannot be stored in integer type 'u8' +// :2:31: error: float value '-129.10000610351562' cannot be stored in integer type 'i8' +// :5:31: error: float value '-1.100000023841858' cannot be stored in integer type 'u8' +// :8:31: error: float value '256.1000061035156' cannot be stored in integer type 'u8' diff --git a/test/cases/compile_errors/intFromPtr_0_to_non_optional_pointer.zig b/test/cases/compile_errors/intFromPtr_0_to_non_optional_pointer.zig index 4a2ea05eaa..e443b3daa9 100644 --- a/test/cases/compile_errors/intFromPtr_0_to_non_optional_pointer.zig +++ b/test/cases/compile_errors/intFromPtr_0_to_non_optional_pointer.zig @@ -1,5 +1,5 @@ export fn entry() void { - var b = @ptrFromInt(*i32, 0); + var b: *i32 = @ptrFromInt(0); _ = b; } diff --git a/test/cases/compile_errors/int_to_err_non_global_invalid_number.zig b/test/cases/compile_errors/int_to_err_non_global_invalid_number.zig index 6a1f2db531..32f4657ed5 100644 --- a/test/cases/compile_errors/int_to_err_non_global_invalid_number.zig +++ b/test/cases/compile_errors/int_to_err_non_global_invalid_number.zig @@ -8,7 +8,7 @@ const Set2 = error{ }; comptime { var x = @intFromError(Set1.B); - var y = @errSetCast(Set2, @errorFromInt(x)); + var y: Set2 = @errSetCast(@errorFromInt(x)); _ = y; } @@ -16,4 +16,4 @@ comptime { // backend=llvm // target=native // -// :11:13: error: 'error.B' not a member of error set 'error{C,A}' +// :11:19: error: 'error.B' not a member of error set 'error{C,A}' diff --git a/test/cases/compile_errors/integer_cast_truncates_bits.zig b/test/cases/compile_errors/integer_cast_truncates_bits.zig index 82eb6b61cf..a230dd3e5b 100644 --- a/test/cases/compile_errors/integer_cast_truncates_bits.zig +++ b/test/cases/compile_errors/integer_cast_truncates_bits.zig @@ -1,6 +1,6 @@ export fn entry1() void { const spartan_count: u16 = 300; - const byte = @intCast(u8, spartan_count); + const byte: u8 = @intCast(spartan_count); _ = byte; } export fn entry2() void { diff --git a/test/cases/compile_errors/integer_underflow_error.zig b/test/cases/compile_errors/integer_underflow_error.zig index 275b593ecc..49f46ee558 100644 --- a/test/cases/compile_errors/integer_underflow_error.zig +++ b/test/cases/compile_errors/integer_underflow_error.zig @@ -1,9 +1,9 @@ export fn entry() void { - _ = @ptrFromInt(*anyopaque, ~@as(usize, @import("std").math.maxInt(usize)) - 1); + _ = @as(*anyopaque, @ptrFromInt(~@as(usize, @import("std").math.maxInt(usize)) - 1)); } // error // backend=stage2 // target=native // -// :2:80: error: overflow of integer type 'usize' with value '-1' +// :2:84: error: overflow of integer type 'usize' with value '-1' diff --git a/test/cases/compile_errors/invalid_float_casts.zig b/test/cases/compile_errors/invalid_float_casts.zig index 507ced1e57..789eb10976 100644 --- a/test/cases/compile_errors/invalid_float_casts.zig +++ b/test/cases/compile_errors/invalid_float_casts.zig @@ -1,25 +1,25 @@ export fn foo() void { var a: f32 = 2; - _ = @floatCast(comptime_float, a); + _ = @as(comptime_float, @floatCast(a)); } export fn bar() void { var a: f32 = 2; - _ = @intFromFloat(f32, a); + _ = @as(f32, @intFromFloat(a)); } export fn baz() void { var a: f32 = 2; - _ = @floatFromInt(f32, a); + _ = @as(f32, @floatFromInt(a)); } export fn qux() void { var a: u32 = 2; - _ = @floatCast(f32, a); + _ = @as(f32, @floatCast(a)); } // error // backend=stage2 // target=native // -// :3:36: error: unable to cast runtime value to 'comptime_float' -// :7:23: error: expected integer type, found 'f32' -// :11:28: error: expected integer type, found 'f32' -// :15:25: error: expected float type, found 'u32' +// :3:40: error: unable to cast runtime value to 'comptime_float' +// :7:18: error: expected integer type, found 'f32' +// :11:32: error: expected integer type, found 'f32' +// :15:29: error: expected float type, found 'u32' diff --git a/test/cases/compile_errors/invalid_int_casts.zig b/test/cases/compile_errors/invalid_int_casts.zig index 262a096bd9..1e52c52609 100644 --- a/test/cases/compile_errors/invalid_int_casts.zig +++ b/test/cases/compile_errors/invalid_int_casts.zig @@ -1,25 +1,25 @@ export fn foo() void { var a: u32 = 2; - _ = @intCast(comptime_int, a); + _ = @as(comptime_int, @intCast(a)); } export fn bar() void { var a: u32 = 2; - _ = @floatFromInt(u32, a); + _ = @as(u32, @floatFromInt(a)); } export fn baz() void { var a: u32 = 2; - _ = @intFromFloat(u32, a); + _ = @as(u32, @intFromFloat(a)); } export fn qux() void { var a: f32 = 2; - _ = @intCast(u32, a); + _ = @as(u32, @intCast(a)); } // error // backend=stage2 // target=native // -// :3:32: error: unable to cast runtime value to 'comptime_int' -// :7:23: error: expected float type, found 'u32' -// :11:28: error: expected float type, found 'u32' -// :15:23: error: expected integer or vector, found 'f32' +// :3:36: error: unable to cast runtime value to 'comptime_int' +// :7:18: error: expected float type, found 'u32' +// :11:32: error: expected float type, found 'u32' +// :15:27: error: expected integer or vector, found 'f32' diff --git a/test/cases/compile_errors/invalid_non-exhaustive_enum_to_union.zig b/test/cases/compile_errors/invalid_non-exhaustive_enum_to_union.zig index 5457a61d3f..d7a93edfcd 100644 --- a/test/cases/compile_errors/invalid_non-exhaustive_enum_to_union.zig +++ b/test/cases/compile_errors/invalid_non-exhaustive_enum_to_union.zig @@ -8,12 +8,12 @@ const U = union(E) { b, }; export fn foo() void { - var e = @enumFromInt(E, 15); + var e: E = @enumFromInt(15); var u: U = e; _ = u; } export fn bar() void { - const e = @enumFromInt(E, 15); + const e: E = @enumFromInt(15); var u: U = e; _ = u; } @@ -24,5 +24,5 @@ export fn bar() void { // // :12:16: error: runtime coercion to union 'tmp.U' from non-exhaustive enum // :1:11: note: enum declared here -// :17:16: error: union 'tmp.U' has no tag with value '@enumFromInt(tmp.E, 15)' +// :17:16: error: union 'tmp.U' has no tag with value '@enumFromInt(15)' // :6:11: note: union declared here diff --git a/test/cases/compile_errors/issue_3818_bitcast_from_parray-slice_to_u16.zig b/test/cases/compile_errors/issue_3818_bitcast_from_parray-slice_to_u16.zig index 7a4c0eb7e8..c6566bb46a 100644 --- a/test/cases/compile_errors/issue_3818_bitcast_from_parray-slice_to_u16.zig +++ b/test/cases/compile_errors/issue_3818_bitcast_from_parray-slice_to_u16.zig @@ -1,11 +1,11 @@ export fn foo1() void { var bytes = [_]u8{ 1, 2 }; - const word: u16 = @bitCast(u16, bytes[0..]); + const word: u16 = @bitCast(bytes[0..]); _ = word; } export fn foo2() void { var bytes: []const u8 = &[_]u8{ 1, 2 }; - const word: u16 = @bitCast(u16, bytes); + const word: u16 = @bitCast(bytes); _ = word; } @@ -13,7 +13,7 @@ export fn foo2() void { // backend=stage2 // target=native // -// :3:42: error: cannot @bitCast from '*[2]u8' -// :3:42: note: use @intFromPtr to cast to 'u16' -// :8:37: error: cannot @bitCast from '[]const u8' -// :8:37: note: use @intFromPtr to cast to 'u16' +// :3:37: error: cannot @bitCast from '*[2]u8' +// :3:37: note: use @intFromPtr to cast to 'u16' +// :8:32: error: cannot @bitCast from '[]const u8' +// :8:32: note: use @intFromPtr to cast to 'u16' diff --git a/test/cases/compile_errors/load_too_many_bytes_from_comptime_reinterpreted_pointer.zig b/test/cases/compile_errors/load_too_many_bytes_from_comptime_reinterpreted_pointer.zig index baeb3e8c82..e4952e6951 100644 --- a/test/cases/compile_errors/load_too_many_bytes_from_comptime_reinterpreted_pointer.zig +++ b/test/cases/compile_errors/load_too_many_bytes_from_comptime_reinterpreted_pointer.zig @@ -1,7 +1,7 @@ export fn entry() void { const float: f32 align(@alignOf(i64)) = 5.99999999999994648725e-01; const float_ptr = &float; - const int_ptr = @ptrCast(*const i64, float_ptr); + const int_ptr: *const i64 = @ptrCast(float_ptr); const int_val = int_ptr.*; _ = int_val; } diff --git a/test/cases/compile_errors/missing_builtin_arg_in_initializer.zig b/test/cases/compile_errors/missing_builtin_arg_in_initializer.zig index 0bada117b2..cdbebf5457 100644 --- a/test/cases/compile_errors/missing_builtin_arg_in_initializer.zig +++ b/test/cases/compile_errors/missing_builtin_arg_in_initializer.zig @@ -1,8 +1,11 @@ comptime { - const v = @as(); + const a = @as(); } comptime { - const u = @bitCast(u32); + const b = @bitCast(); +} +comptime { + const c = @as(u32); } // error @@ -10,4 +13,5 @@ comptime { // target=native // // :2:15: error: expected 2 arguments, found 0 -// :5:15: error: expected 2 arguments, found 1 +// :5:15: error: expected 1 argument, found 0 +// :8:15: error: expected 2 arguments, found 1 diff --git a/test/cases/compile_errors/non_float_passed_to_intFromFloat.zig b/test/cases/compile_errors/non_float_passed_to_intFromFloat.zig index fac51c59c8..ee0b5e733e 100644 --- a/test/cases/compile_errors/non_float_passed_to_intFromFloat.zig +++ b/test/cases/compile_errors/non_float_passed_to_intFromFloat.zig @@ -1,5 +1,5 @@ export fn entry() void { - const x = @intFromFloat(i32, @as(i32, 54)); + const x: i32 = @intFromFloat(@as(i32, 54)); _ = x; } diff --git a/test/cases/compile_errors/non_int_passed_to_floatFromInt.zig b/test/cases/compile_errors/non_int_passed_to_floatFromInt.zig index 63e6753a53..c60842e980 100644 --- a/test/cases/compile_errors/non_int_passed_to_floatFromInt.zig +++ b/test/cases/compile_errors/non_int_passed_to_floatFromInt.zig @@ -1,5 +1,5 @@ export fn entry() void { - const x = @floatFromInt(f32, 1.1); + const x: f32 = @floatFromInt(1.1); _ = x; } diff --git a/test/cases/compile_errors/out_of_int_range_comptime_float_passed_to_intFromFloat.zig b/test/cases/compile_errors/out_of_int_range_comptime_float_passed_to_intFromFloat.zig index 574ffc5a20..d9cfd4b2de 100644 --- a/test/cases/compile_errors/out_of_int_range_comptime_float_passed_to_intFromFloat.zig +++ b/test/cases/compile_errors/out_of_int_range_comptime_float_passed_to_intFromFloat.zig @@ -1,5 +1,5 @@ export fn entry() void { - const x = @intFromFloat(i8, 200); + const x: i8 = @intFromFloat(200); _ = x; } diff --git a/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig b/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig index f27f5f4f93..a704ea456b 100644 --- a/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig +++ b/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig @@ -1,6 +1,6 @@ export fn entry() void { const x: i32 = 1234; - const y = @ptrCast(*i32, &x); + const y: *i32 = @ptrCast(&x); _ = y; } @@ -8,5 +8,5 @@ export fn entry() void { // backend=stage2 // target=native // -// :3:15: error: cast discards const qualifier -// :3:15: note: consider using '@constCast' +// :3:21: error: cast discards const qualifier +// :3:21: note: use @constCast to discard const qualifier diff --git a/test/cases/compile_errors/ptrFromInt_non_ptr_type.zig b/test/cases/compile_errors/ptrFromInt_non_ptr_type.zig index f472789aff..c75ceb444b 100644 --- a/test/cases/compile_errors/ptrFromInt_non_ptr_type.zig +++ b/test/cases/compile_errors/ptrFromInt_non_ptr_type.zig @@ -1,15 +1,15 @@ pub export fn entry() void { - _ = @ptrFromInt(i32, 10); + _ = @as(i32, @ptrFromInt(10)); } pub export fn entry2() void { - _ = @ptrFromInt([]u8, 20); + _ = @as([]u8, @ptrFromInt(20)); } // error // backend=stage2 // target=native // -// :2:21: error: expected pointer type, found 'i32' -// :6:21: error: integer cannot be converted to slice type '[]u8' -// :6:21: note: slice length cannot be inferred from address +// :2:18: error: expected pointer type, found 'i32' +// :6:19: error: integer cannot be converted to slice type '[]u8' +// :6:19: note: slice length cannot be inferred from address diff --git a/test/cases/compile_errors/ptrFromInt_with_misaligned_address.zig b/test/cases/compile_errors/ptrFromInt_with_misaligned_address.zig index c45e998d82..dfcbf6849c 100644 --- a/test/cases/compile_errors/ptrFromInt_with_misaligned_address.zig +++ b/test/cases/compile_errors/ptrFromInt_with_misaligned_address.zig @@ -1,5 +1,5 @@ pub export fn entry() void { - var y = @ptrFromInt([*]align(4) u8, 5); + var y: [*]align(4) u8 = @ptrFromInt(5); _ = y; } diff --git a/test/cases/compile_errors/ptrcast_to_non-pointer.zig b/test/cases/compile_errors/ptrcast_to_non-pointer.zig index 66a11a602b..ec93dc12c2 100644 --- a/test/cases/compile_errors/ptrcast_to_non-pointer.zig +++ b/test/cases/compile_errors/ptrcast_to_non-pointer.zig @@ -1,9 +1,9 @@ export fn entry(a: *i32) usize { - return @ptrCast(usize, a); + return @ptrCast(a); } // error // backend=llvm // target=native // -// :2:21: error: expected pointer type, found 'usize' +// :2:12: error: expected pointer type, found 'usize' diff --git a/test/cases/compile_errors/reading_past_end_of_pointer_casted_array.zig b/test/cases/compile_errors/reading_past_end_of_pointer_casted_array.zig index d3d9b03ff5..b06b541984 100644 --- a/test/cases/compile_errors/reading_past_end_of_pointer_casted_array.zig +++ b/test/cases/compile_errors/reading_past_end_of_pointer_casted_array.zig @@ -1,7 +1,7 @@ comptime { const array: [4]u8 = "aoeu".*; const sub_array = array[1..]; - const int_ptr = @ptrCast(*const u24, @alignCast(@alignOf(u24), sub_array)); + const int_ptr: *const u24 = @ptrCast(@alignCast(sub_array)); const deref = int_ptr.*; _ = deref; } diff --git a/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_non-integer_tag_type.zig b/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_non-integer_tag_type.zig index 9b140a0923..b26ec70296 100644 --- a/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_non-integer_tag_type.zig +++ b/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_non-integer_tag_type.zig @@ -7,7 +7,7 @@ const Tag = @Type(.{ }, }); export fn entry() void { - _ = @enumFromInt(Tag, 0); + _ = @as(Tag, @enumFromInt(0)); } // error diff --git a/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_undefined_tag_type.zig b/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_undefined_tag_type.zig index b2cd8e1214..5d5294ba30 100644 --- a/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_undefined_tag_type.zig +++ b/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_undefined_tag_type.zig @@ -7,7 +7,7 @@ const Tag = @Type(.{ }, }); export fn entry() void { - _ = @enumFromInt(Tag, 0); + _ = @as(Tag, @enumFromInt(0)); } // error diff --git a/test/cases/compile_errors/slice_cannot_have_its_bytes_reinterpreted.zig b/test/cases/compile_errors/slice_cannot_have_its_bytes_reinterpreted.zig index 5fab9c90a9..85fb0065d1 100644 --- a/test/cases/compile_errors/slice_cannot_have_its_bytes_reinterpreted.zig +++ b/test/cases/compile_errors/slice_cannot_have_its_bytes_reinterpreted.zig @@ -1,6 +1,6 @@ export fn foo() void { const bytes align(@alignOf([]const u8)) = [1]u8{0xfa} ** 16; - var value = @ptrCast(*const []const u8, &bytes).*; + var value = @as(*const []const u8, @ptrCast(&bytes)).*; _ = value; } @@ -8,4 +8,4 @@ export fn foo() void { // backend=stage2 // target=native // -// :3:52: error: comptime dereference requires '[]const u8' to have a well-defined layout, but it does not. +// :3:57: error: comptime dereference requires '[]const u8' to have a well-defined layout, but it does not. diff --git a/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig b/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig index df454a38d0..2b45fb6076 100644 --- a/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig +++ b/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig @@ -1,6 +1,6 @@ test "enum" { const E = enum(u8) { A, B, _ }; - _ = @tagName(@enumFromInt(E, 5)); + _ = @tagName(@as(E, @enumFromInt(5))); } // error @@ -8,5 +8,5 @@ test "enum" { // target=native // is_test=1 // -// :3:9: error: no field with value '@enumFromInt(tmp.test.enum.E, 5)' in enum 'test.enum.E' +// :3:9: error: no field with value '@enumFromInt(5)' in enum 'test.enum.E' // :2:15: note: declared here diff --git a/test/cases/compile_errors/truncate_sign_mismatch.zig b/test/cases/compile_errors/truncate_sign_mismatch.zig index a05660e28c..b34dfa8e07 100644 --- a/test/cases/compile_errors/truncate_sign_mismatch.zig +++ b/test/cases/compile_errors/truncate_sign_mismatch.zig @@ -1,25 +1,25 @@ export fn entry1() i8 { var x: u32 = 10; - return @truncate(i8, x); + return @truncate(x); } export fn entry2() u8 { var x: i32 = -10; - return @truncate(u8, x); + return @truncate(x); } export fn entry3() i8 { comptime var x: u32 = 10; - return @truncate(i8, x); + return @truncate(x); } export fn entry4() u8 { comptime var x: i32 = -10; - return @truncate(u8, x); + return @truncate(x); } // error // backend=stage2 // target=native // -// :3:26: error: expected signed integer type, found 'u32' -// :7:26: error: expected unsigned integer type, found 'i32' -// :11:26: error: expected signed integer type, found 'u32' -// :15:26: error: expected unsigned integer type, found 'i32' +// :3:22: error: expected signed integer type, found 'u32' +// :7:22: error: expected unsigned integer type, found 'i32' +// :11:22: error: expected signed integer type, found 'u32' +// :15:22: error: expected unsigned integer type, found 'i32' diff --git a/test/cases/compile_errors/wrong_pointer_coerced_to_pointer_to_opaque_{}.zig b/test/cases/compile_errors/wrong_pointer_coerced_to_pointer_to_opaque_{}.zig index a050eb6a4c..a7c8f0eb72 100644 --- a/test/cases/compile_errors/wrong_pointer_coerced_to_pointer_to_opaque_{}.zig +++ b/test/cases/compile_errors/wrong_pointer_coerced_to_pointer_to_opaque_{}.zig @@ -2,7 +2,7 @@ const Derp = opaque {}; extern fn bar(d: *Derp) void; export fn foo() void { var x = @as(u8, 1); - bar(@ptrCast(*anyopaque, &x)); + bar(@as(*anyopaque, @ptrCast(&x))); } // error diff --git a/test/cases/enum_values.0.zig b/test/cases/enum_values.0.zig index 2c44a095dd..71c3e3521a 100644 --- a/test/cases/enum_values.0.zig +++ b/test/cases/enum_values.0.zig @@ -7,7 +7,7 @@ pub fn main() void { number1; number2; } - const number3 = @enumFromInt(Number, 2); + const number3: Number = @enumFromInt(2); if (@intFromEnum(number3) != 2) { unreachable; } diff --git a/test/cases/enum_values.1.zig b/test/cases/enum_values.1.zig index 1b5a9836db..934106dd79 100644 --- a/test/cases/enum_values.1.zig +++ b/test/cases/enum_values.1.zig @@ -3,7 +3,7 @@ const Number = enum { One, Two, Three }; pub fn main() void { var number1 = Number.One; var number2: Number = .Two; - const number3 = @enumFromInt(Number, 2); + const number3: Number = @enumFromInt(2); assert(number1 != number2); assert(number2 != number3); assert(@intFromEnum(number1) == 0); diff --git a/test/cases/error_in_nested_declaration.zig b/test/cases/error_in_nested_declaration.zig index 710b821e65..20afacfb68 100644 --- a/test/cases/error_in_nested_declaration.zig +++ b/test/cases/error_in_nested_declaration.zig @@ -3,7 +3,7 @@ const S = struct { c: i32, a: struct { pub fn str(_: @This(), extra: []u32) []i32 { - return @bitCast([]i32, extra); + return @bitCast(extra); } }, }; @@ -27,5 +27,5 @@ pub export fn entry2() void { // target=native // // :17:12: error: C pointers cannot point to opaque types -// :6:29: error: cannot @bitCast to '[]i32' -// :6:29: note: use @ptrCast to cast from '[]u32' +// :6:20: error: cannot @bitCast to '[]i32' +// :6:20: note: use @ptrCast to cast from '[]u32' diff --git a/test/cases/int_to_ptr.0.zig b/test/cases/int_to_ptr.0.zig index ba14c03804..09efb8b1a5 100644 --- a/test/cases/int_to_ptr.0.zig +++ b/test/cases/int_to_ptr.0.zig @@ -1,8 +1,8 @@ pub fn main() void { - _ = @ptrFromInt(*u8, 0); + _ = @as(*u8, @ptrFromInt(0)); } // error // output_mode=Exe // -// :2:24: error: pointer type '*u8' does not allow address zero +// :2:18: error: pointer type '*u8' does not allow address zero diff --git a/test/cases/int_to_ptr.1.zig b/test/cases/int_to_ptr.1.zig index e75ae81f6f..d5aed471e1 100644 --- a/test/cases/int_to_ptr.1.zig +++ b/test/cases/int_to_ptr.1.zig @@ -1,7 +1,7 @@ pub fn main() void { - _ = @ptrFromInt(*u32, 2); + _ = @as(*u32, @ptrFromInt(2)); } // error // -// :2:25: error: pointer type '*u32' requires aligned address +// :2:19: error: pointer type '*u32' requires aligned address diff --git a/test/cases/llvm/f_segment_address_space_reading_and_writing.zig b/test/cases/llvm/f_segment_address_space_reading_and_writing.zig index 507362a937..ddcd41bf16 100644 --- a/test/cases/llvm/f_segment_address_space_reading_and_writing.zig +++ b/test/cases/llvm/f_segment_address_space_reading_and_writing.zig @@ -34,7 +34,7 @@ pub fn main() void { setFs(@intFromPtr(&test_value)); assert(getFs() == @intFromPtr(&test_value)); - var test_ptr = @ptrFromInt(*allowzero addrspace(.fs) u64, 0); + var test_ptr: *allowzero addrspace(.fs) u64 = @ptrFromInt(0); assert(test_ptr.* == 12345); test_ptr.* = 98765; assert(test_value == 98765); diff --git a/test/cases/llvm/large_slices.zig b/test/cases/llvm/large_slices.zig index f90e588ab0..8e9431df8c 100644 --- a/test/cases/llvm/large_slices.zig +++ b/test/cases/llvm/large_slices.zig @@ -1,5 +1,5 @@ pub fn main() void { - const large_slice = @ptrFromInt([*]const u8, 1)[0..(0xffffffffffffffff >> 3)]; + const large_slice = @as([*]const u8, @ptrFromInt(1))[0..(0xffffffffffffffff >> 3)]; _ = large_slice; } diff --git a/test/cases/safety/@alignCast misaligned.zig b/test/cases/safety/@alignCast misaligned.zig index 538e0ecdf6..ade27c2747 100644 --- a/test/cases/safety/@alignCast misaligned.zig +++ b/test/cases/safety/@alignCast misaligned.zig @@ -16,7 +16,8 @@ pub fn main() !void { } fn foo(bytes: []u8) u32 { const slice4 = bytes[1..5]; - const int_slice = std.mem.bytesAsSlice(u32, @alignCast(4, slice4)); + const aligned: *align(4) [4]u8 = @alignCast(slice4); + const int_slice = std.mem.bytesAsSlice(u32, aligned); return int_slice[0]; } // run diff --git a/test/cases/safety/@enumFromInt - no matching tag value.zig b/test/cases/safety/@enumFromInt - no matching tag value.zig index 57f8954f93..5051869cc0 100644 --- a/test/cases/safety/@enumFromInt - no matching tag value.zig +++ b/test/cases/safety/@enumFromInt - no matching tag value.zig @@ -17,7 +17,7 @@ pub fn main() !void { return error.TestFailed; } fn bar(a: u2) Foo { - return @enumFromInt(Foo, a); + return @enumFromInt(a); } fn baz(_: Foo) void {} diff --git a/test/cases/safety/@errSetCast error not present in destination.zig b/test/cases/safety/@errSetCast error not present in destination.zig index 372bd80aa5..84aeb7610e 100644 --- a/test/cases/safety/@errSetCast error not present in destination.zig +++ b/test/cases/safety/@errSetCast error not present in destination.zig @@ -14,7 +14,7 @@ pub fn main() !void { return error.TestFailed; } fn foo(set1: Set1) Set2 { - return @errSetCast(Set2, set1); + return @errSetCast(set1); } // run // backend=llvm diff --git a/test/cases/safety/@intCast to u0.zig b/test/cases/safety/@intCast to u0.zig index f3f969548b..8c9f76e2aa 100644 --- a/test/cases/safety/@intCast to u0.zig +++ b/test/cases/safety/@intCast to u0.zig @@ -14,7 +14,7 @@ pub fn main() !void { } fn bar(one: u1, not_zero: i32) void { - var x = one << @intCast(u0, not_zero); + var x = one << @as(u0, @intCast(not_zero)); _ = x; } // run diff --git a/test/cases/safety/@intFromFloat cannot fit - negative out of range.zig b/test/cases/safety/@intFromFloat cannot fit - negative out of range.zig index 9a8853c0e9..a5a8d831b3 100644 --- a/test/cases/safety/@intFromFloat cannot fit - negative out of range.zig +++ b/test/cases/safety/@intFromFloat cannot fit - negative out of range.zig @@ -12,7 +12,7 @@ pub fn main() !void { return error.TestFailed; } fn bar(a: f32) i8 { - return @intFromFloat(i8, a); + return @intFromFloat(a); } fn baz(_: i8) void {} // run diff --git a/test/cases/safety/@intFromFloat cannot fit - negative to unsigned.zig b/test/cases/safety/@intFromFloat cannot fit - negative to unsigned.zig index caf7bbf0d6..1bf1a66765 100644 --- a/test/cases/safety/@intFromFloat cannot fit - negative to unsigned.zig +++ b/test/cases/safety/@intFromFloat cannot fit - negative to unsigned.zig @@ -12,7 +12,7 @@ pub fn main() !void { return error.TestFailed; } fn bar(a: f32) u8 { - return @intFromFloat(u8, a); + return @intFromFloat(a); } fn baz(_: u8) void {} // run diff --git a/test/cases/safety/@intFromFloat cannot fit - positive out of range.zig b/test/cases/safety/@intFromFloat cannot fit - positive out of range.zig index d335238b65..15a9fa7ad1 100644 --- a/test/cases/safety/@intFromFloat cannot fit - positive out of range.zig +++ b/test/cases/safety/@intFromFloat cannot fit - positive out of range.zig @@ -12,7 +12,7 @@ pub fn main() !void { return error.TestFailed; } fn bar(a: f32) u8 { - return @intFromFloat(u8, a); + return @intFromFloat(a); } fn baz(_: u8) void {} // run diff --git a/test/cases/safety/@ptrFromInt address zero to non-optional byte-aligned pointer.zig b/test/cases/safety/@ptrFromInt address zero to non-optional byte-aligned pointer.zig index 345d5cfc74..41cff07e32 100644 --- a/test/cases/safety/@ptrFromInt address zero to non-optional byte-aligned pointer.zig +++ b/test/cases/safety/@ptrFromInt address zero to non-optional byte-aligned pointer.zig @@ -9,7 +9,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi } pub fn main() !void { var zero: usize = 0; - var b = @ptrFromInt(*u8, zero); + var b: *u8 = @ptrFromInt(zero); _ = b; return error.TestFailed; } diff --git a/test/cases/safety/@ptrFromInt address zero to non-optional pointer.zig b/test/cases/safety/@ptrFromInt address zero to non-optional pointer.zig index e7d3b66d6c..92e98d4777 100644 --- a/test/cases/safety/@ptrFromInt address zero to non-optional pointer.zig +++ b/test/cases/safety/@ptrFromInt address zero to non-optional pointer.zig @@ -9,7 +9,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi } pub fn main() !void { var zero: usize = 0; - var b = @ptrFromInt(*i32, zero); + var b: *i32 = @ptrFromInt(zero); _ = b; return error.TestFailed; } diff --git a/test/cases/safety/@ptrFromInt with misaligned address.zig b/test/cases/safety/@ptrFromInt with misaligned address.zig index c2e1d351eb..afb8aa7eb8 100644 --- a/test/cases/safety/@ptrFromInt with misaligned address.zig +++ b/test/cases/safety/@ptrFromInt with misaligned address.zig @@ -9,7 +9,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi } pub fn main() !void { var x: usize = 5; - var y = @ptrFromInt([*]align(4) u8, x); + var y: [*]align(4) u8 = @ptrFromInt(x); _ = y; return error.TestFailed; } diff --git a/test/cases/safety/@tagName on corrupted enum value.zig b/test/cases/safety/@tagName on corrupted enum value.zig index 43af9fbda6..a541771df1 100644 --- a/test/cases/safety/@tagName on corrupted enum value.zig +++ b/test/cases/safety/@tagName on corrupted enum value.zig @@ -15,7 +15,7 @@ const E = enum(u32) { pub fn main() !void { var e: E = undefined; - @memset(@ptrCast([*]u8, &e)[0..@sizeOf(E)], 0x55); + @memset(@as([*]u8, @ptrCast(&e))[0..@sizeOf(E)], 0x55); var n = @tagName(e); _ = n; return error.TestFailed; diff --git a/test/cases/safety/@tagName on corrupted union value.zig b/test/cases/safety/@tagName on corrupted union value.zig index a72755abdc..dd3d9bd3bf 100644 --- a/test/cases/safety/@tagName on corrupted union value.zig +++ b/test/cases/safety/@tagName on corrupted union value.zig @@ -15,7 +15,7 @@ const U = union(enum(u32)) { pub fn main() !void { var u: U = undefined; - @memset(@ptrCast([*]u8, &u)[0..@sizeOf(U)], 0x55); + @memset(@as([*]u8, @ptrCast(&u))[0..@sizeOf(U)], 0x55); var t: @typeInfo(U).Union.tag_type.? = u; var n = @tagName(t); _ = n; diff --git a/test/cases/safety/pointer casting to null function pointer.zig b/test/cases/safety/pointer casting to null function pointer.zig index 7736cb5034..8f399b66dc 100644 --- a/test/cases/safety/pointer casting to null function pointer.zig +++ b/test/cases/safety/pointer casting to null function pointer.zig @@ -13,7 +13,7 @@ fn getNullPtr() ?*const anyopaque { } pub fn main() !void { const null_ptr: ?*const anyopaque = getNullPtr(); - const required_ptr: *align(1) const fn () void = @ptrCast(*align(1) const fn () void, null_ptr); + const required_ptr: *align(1) const fn () void = @ptrCast(null_ptr); _ = required_ptr; return error.TestFailed; } diff --git a/test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig b/test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig index a79298b7da..9052913691 100644 --- a/test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig +++ b/test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig @@ -9,7 +9,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi } pub fn main() !void { var value: c_short = -1; - var casted = @intCast(u32, value); + var casted: u32 = @intCast(value); _ = casted; return error.TestFailed; } diff --git a/test/cases/safety/signed integer not fitting in cast to unsigned integer.zig b/test/cases/safety/signed integer not fitting in cast to unsigned integer.zig index 6c4e9e256d..5d8c3f88c8 100644 --- a/test/cases/safety/signed integer not fitting in cast to unsigned integer.zig +++ b/test/cases/safety/signed integer not fitting in cast to unsigned integer.zig @@ -13,7 +13,7 @@ pub fn main() !void { return error.TestFailed; } fn unsigned_cast(x: i32) u32 { - return @intCast(u32, x); + return @intCast(x); } // run // backend=llvm diff --git a/test/cases/safety/signed-unsigned vector cast.zig b/test/cases/safety/signed-unsigned vector cast.zig index d287c0a1ae..60406aa8a3 100644 --- a/test/cases/safety/signed-unsigned vector cast.zig +++ b/test/cases/safety/signed-unsigned vector cast.zig @@ -10,7 +10,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi pub fn main() !void { var x = @splat(4, @as(i32, -2147483647)); - var y = @intCast(@Vector(4, u32), x); + var y: @Vector(4, u32) = @intCast(x); _ = y; return error.TestFailed; } diff --git a/test/cases/safety/slice sentinel mismatch - optional pointers.zig b/test/cases/safety/slice sentinel mismatch - optional pointers.zig index 33f4a1099b..a3b4a98575 100644 --- a/test/cases/safety/slice sentinel mismatch - optional pointers.zig +++ b/test/cases/safety/slice sentinel mismatch - optional pointers.zig @@ -9,7 +9,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi } pub fn main() !void { - var buf: [4]?*i32 = .{ @ptrFromInt(*i32, 4), @ptrFromInt(*i32, 8), @ptrFromInt(*i32, 12), @ptrFromInt(*i32, 16) }; + var buf: [4]?*i32 = .{ @ptrFromInt(4), @ptrFromInt(8), @ptrFromInt(12), @ptrFromInt(16) }; const slice = buf[0..3 :null]; _ = slice; return error.TestFailed; diff --git a/test/cases/safety/switch else on corrupt enum value - one prong.zig b/test/cases/safety/switch else on corrupt enum value - one prong.zig index 2c0b58fcd4..c11227c3be 100644 --- a/test/cases/safety/switch else on corrupt enum value - one prong.zig +++ b/test/cases/safety/switch else on corrupt enum value - one prong.zig @@ -13,7 +13,7 @@ const E = enum(u32) { }; pub fn main() !void { var a: E = undefined; - @ptrCast(*u32, &a).* = 255; + @as(*u32, @ptrCast(&a)).* = 255; switch (a) { .one => @panic("one"), else => @panic("else"), diff --git a/test/cases/safety/switch else on corrupt enum value - union.zig b/test/cases/safety/switch else on corrupt enum value - union.zig index 358ecc89ac..a63c78597e 100644 --- a/test/cases/safety/switch else on corrupt enum value - union.zig +++ b/test/cases/safety/switch else on corrupt enum value - union.zig @@ -18,7 +18,7 @@ const U = union(E) { }; pub fn main() !void { var a: U = undefined; - @ptrCast(*align(@alignOf(U)) u32, &a).* = 0xFFFF_FFFF; + @as(*align(@alignOf(U)) u32, @ptrCast(&a)).* = 0xFFFF_FFFF; switch (a) { .one => @panic("one"), else => @panic("else"), diff --git a/test/cases/safety/switch else on corrupt enum value.zig b/test/cases/safety/switch else on corrupt enum value.zig index af04b7f4c3..7e050838c0 100644 --- a/test/cases/safety/switch else on corrupt enum value.zig +++ b/test/cases/safety/switch else on corrupt enum value.zig @@ -13,7 +13,7 @@ const E = enum(u32) { }; pub fn main() !void { var a: E = undefined; - @ptrCast(*u32, &a).* = 255; + @as(*u32, @ptrCast(&a)).* = 255; switch (a) { else => @panic("else"), } diff --git a/test/cases/safety/switch on corrupted enum value.zig b/test/cases/safety/switch on corrupted enum value.zig index 687be0b598..f890761911 100644 --- a/test/cases/safety/switch on corrupted enum value.zig +++ b/test/cases/safety/switch on corrupted enum value.zig @@ -15,7 +15,7 @@ const E = enum(u32) { pub fn main() !void { var e: E = undefined; - @memset(@ptrCast([*]u8, &e)[0..@sizeOf(E)], 0x55); + @memset(@as([*]u8, @ptrCast(&e))[0..@sizeOf(E)], 0x55); switch (e) { .X, .Y => @breakpoint(), } diff --git a/test/cases/safety/switch on corrupted union value.zig b/test/cases/safety/switch on corrupted union value.zig index 745a3fd037..fc93c9d6e7 100644 --- a/test/cases/safety/switch on corrupted union value.zig +++ b/test/cases/safety/switch on corrupted union value.zig @@ -15,7 +15,7 @@ const U = union(enum(u32)) { pub fn main() !void { var u: U = undefined; - @memset(@ptrCast([*]u8, &u)[0..@sizeOf(U)], 0x55); + @memset(@as([*]u8, @ptrCast(&u))[0..@sizeOf(U)], 0x55); switch (u) { .X, .Y => @breakpoint(), } diff --git a/test/cases/safety/truncating vector cast.zig b/test/cases/safety/truncating vector cast.zig index e81a6e64ef..501bf694ac 100644 --- a/test/cases/safety/truncating vector cast.zig +++ b/test/cases/safety/truncating vector cast.zig @@ -10,7 +10,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi pub fn main() !void { var x = @splat(4, @as(u32, 0xdeadbeef)); - var y = @intCast(@Vector(4, u16), x); + var y: @Vector(4, u16) = @intCast(x); _ = y; return error.TestFailed; } diff --git a/test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig b/test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig index f370f76557..bd35f35422 100644 --- a/test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig +++ b/test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig @@ -9,7 +9,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi } pub fn main() !void { var value: u8 = 245; - var casted = @intCast(i8, value); + var casted: i8 = @intCast(value); _ = casted; return error.TestFailed; } diff --git a/test/cases/safety/unsigned-signed vector cast.zig b/test/cases/safety/unsigned-signed vector cast.zig index d4b80fb05c..cf827878b6 100644 --- a/test/cases/safety/unsigned-signed vector cast.zig +++ b/test/cases/safety/unsigned-signed vector cast.zig @@ -10,7 +10,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi pub fn main() !void { var x = @splat(4, @as(u32, 0x80000000)); - var y = @intCast(@Vector(4, i32), x); + var y: @Vector(4, i32) = @intCast(x); _ = y; return error.TestFailed; } diff --git a/test/cases/safety/value does not fit in shortening cast - u0.zig b/test/cases/safety/value does not fit in shortening cast - u0.zig index 9b303e5cf5..ec111a2cae 100644 --- a/test/cases/safety/value does not fit in shortening cast - u0.zig +++ b/test/cases/safety/value does not fit in shortening cast - u0.zig @@ -14,7 +14,7 @@ pub fn main() !void { return error.TestFailed; } fn shorten_cast(x: u8) u0 { - return @intCast(u0, x); + return @intCast(x); } // run // backend=llvm diff --git a/test/cases/safety/value does not fit in shortening cast.zig b/test/cases/safety/value does not fit in shortening cast.zig index 0e98a09787..a5ea41659e 100644 --- a/test/cases/safety/value does not fit in shortening cast.zig +++ b/test/cases/safety/value does not fit in shortening cast.zig @@ -14,7 +14,7 @@ pub fn main() !void { return error.TestFailed; } fn shorten_cast(x: i32) i8 { - return @intCast(i8, x); + return @intCast(x); } // run // backend=llvm diff --git a/test/cbe.zig b/test/cbe.zig index f0cf720fd3..b56202c7e5 100644 --- a/test/cbe.zig +++ b/test/cbe.zig @@ -642,7 +642,7 @@ pub fn addCases(ctx: *Cases) !void { \\pub export fn main() c_int { \\ var number1 = Number.One; \\ var number2: Number = .Two; - \\ const number3 = @enumFromInt(Number, 2); + \\ const number3: Number = @enumFromInt(2); \\ if (number1 == number2) return 1; \\ if (number2 == number3) return 1; \\ if (@intFromEnum(number1) != 0) return 1; @@ -737,19 +737,19 @@ pub fn addCases(ctx: *Cases) !void { case.addError( \\pub export fn main() c_int { \\ const a = 1; - \\ _ = @enumFromInt(bool, a); + \\ _ = @as(bool, @enumFromInt(a)); \\} , &.{ - ":3:20: error: expected enum, found 'bool'", + ":3:19: error: expected enum, found 'bool'", }); case.addError( \\const E = enum { a, b, c }; \\pub export fn main() c_int { - \\ _ = @enumFromInt(E, 3); + \\ _ = @as(E, @enumFromInt(3)); \\} , &.{ - ":3:9: error: enum 'tmp.E' has no tag with value '3'", + ":3:16: error: enum 'tmp.E' has no tag with value '3'", ":1:11: note: enum declared here", }); diff --git a/test/compare_output.zig b/test/compare_output.zig index 66b5624443..92dfd76b58 100644 --- a/test/compare_output.zig +++ b/test/compare_output.zig @@ -180,8 +180,8 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\const c = @cImport(@cInclude("stdlib.h")); \\ \\export fn compare_fn(a: ?*const anyopaque, b: ?*const anyopaque) c_int { - \\ const a_int = @ptrCast(*const i32, @alignCast(@alignOf(i32), a)); - \\ const b_int = @ptrCast(*const i32, @alignCast(@alignOf(i32), b)); + \\ const a_int: *const i32 = @ptrCast(@alignCast(a)); + \\ const b_int: *const i32 = @ptrCast(@alignCast(b)); \\ if (a_int.* < b_int.*) { \\ return -1; \\ } else if (a_int.* > b_int.*) { @@ -194,7 +194,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\pub export fn main() c_int { \\ var array = [_]u32{ 1, 7, 3, 2, 0, 9, 4, 8, 6, 5 }; \\ - \\ c.qsort(@ptrCast(?*anyopaque, &array), @intCast(c_ulong, array.len), @sizeOf(i32), compare_fn); + \\ c.qsort(@ptrCast(&array), @intCast(array.len), @sizeOf(i32), compare_fn); \\ \\ for (array, 0..) |item, i| { \\ if (item != i) { @@ -229,8 +229,8 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\ } \\ const small: f32 = 3.25; \\ const x: f64 = small; - \\ const y = @intFromFloat(i32, x); - \\ const z = @floatFromInt(f64, y); + \\ const y: i32 = @intFromFloat(x); + \\ const z: f64 = @floatFromInt(y); \\ _ = c.printf("%.2f\n%d\n%.2f\n%.2f\n", x, y, z, @as(f64, -0.4)); \\ return 0; \\} diff --git a/test/link/macho/dead_strip_dylibs/build.zig b/test/link/macho/dead_strip_dylibs/build.zig index ec073e183a..c226e03196 100644 --- a/test/link/macho/dead_strip_dylibs/build.zig +++ b/test/link/macho/dead_strip_dylibs/build.zig @@ -37,7 +37,7 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.Optimize exe.dead_strip_dylibs = true; const run_cmd = b.addRunArtifact(exe); - run_cmd.expectExitCode(@bitCast(u8, @as(i8, -2))); // should fail + run_cmd.expectExitCode(@as(u8, @bitCast(@as(i8, -2)))); // should fail test_step.dependOn(&run_cmd.step); } } diff --git a/test/nvptx.zig b/test/nvptx.zig index 0bdc9455f7..c3748570e8 100644 --- a/test/nvptx.zig +++ b/test/nvptx.zig @@ -60,7 +60,7 @@ pub fn addCases(ctx: *Cases) !void { \\ \\ var _sdata: [1024]f32 addrspace(.shared) = undefined; \\ pub export fn reduceSum(d_x: []const f32, out: *f32) callconv(.Kernel) void { - \\ var sdata = @addrSpaceCast(.generic, &_sdata); + \\ var sdata: *addrspace(.generic) [1024]f32 = @addrSpaceCast(&_sdata); \\ const tid: u32 = threadIdX(); \\ var sum = d_x[tid]; \\ sdata[tid] = sum; diff --git a/test/standalone/hello_world/hello_libc.zig b/test/standalone/hello_world/hello_libc.zig index 42ba4db4b1..992afd736e 100644 --- a/test/standalone/hello_world/hello_libc.zig +++ b/test/standalone/hello_world/hello_libc.zig @@ -10,6 +10,6 @@ const msg = "Hello, world!\n"; pub export fn main(argc: c_int, argv: **u8) c_int { _ = argv; _ = argc; - if (c.printf(msg) != @intCast(c_int, c.strlen(msg))) return -1; + if (c.printf(msg) != @as(c_int, @intCast(c.strlen(msg)))) return -1; return 0; } diff --git a/test/standalone/issue_11595/main.zig b/test/standalone/issue_11595/main.zig index b91f54cb9c..12aa6ac3cd 100644 --- a/test/standalone/issue_11595/main.zig +++ b/test/standalone/issue_11595/main.zig @@ -1,5 +1,5 @@ extern fn check() c_int; pub fn main() u8 { - return @intCast(u8, check()); + return @as(u8, @intCast(check())); } diff --git a/test/standalone/main_return_error/error_u8_non_zero.zig b/test/standalone/main_return_error/error_u8_non_zero.zig index 9f7de780ac..c45458fb21 100644 --- a/test/standalone/main_return_error/error_u8_non_zero.zig +++ b/test/standalone/main_return_error/error_u8_non_zero.zig @@ -1,7 +1,7 @@ const Err = error{Foo}; fn foo() u8 { - var x = @intCast(u8, 9); + var x = @as(u8, @intCast(9)); return x; } diff --git a/test/standalone/mix_c_files/main.zig b/test/standalone/mix_c_files/main.zig index 913d284fe9..d755ada04c 100644 --- a/test/standalone/mix_c_files/main.zig +++ b/test/standalone/mix_c_files/main.zig @@ -25,6 +25,6 @@ pub fn main() anyerror!void { x = add_C(x); x = add_C_zig(x); - const u = @intCast(u32, x); + const u = @as(u32, @intCast(x)); try std.testing.expect(u / 100 == u % 100); } diff --git a/test/standalone/pie/main.zig b/test/standalone/pie/main.zig index 89d204aa1c..edf6a3fcaa 100644 --- a/test/standalone/pie/main.zig +++ b/test/standalone/pie/main.zig @@ -5,7 +5,7 @@ threadlocal var foo: u8 = 42; test "Check ELF header" { // PIE executables are marked as ET_DYN, regular exes as ET_EXEC. - const header = @ptrFromInt(*elf.Ehdr, std.process.getBaseAddress()); + const header = @as(*elf.Ehdr, @ptrFromInt(std.process.getBaseAddress())); try std.testing.expectEqual(elf.ET.DYN, header.e_type); } diff --git a/test/translate_c.zig b/test/translate_c.zig index 966f3e2785..40edec57f7 100644 --- a/test/translate_c.zig +++ b/test/translate_c.zig @@ -351,7 +351,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\} , &[_][]const u8{ \\pub export fn main() void { - \\ var a: c_int = @bitCast(c_int, @truncate(c_uint, @alignOf(c_int))); + \\ var a: c_int = @as(c_int, @bitCast(@as(c_uint, @truncate(@alignOf(c_int))))); \\ _ = @TypeOf(a); \\} }); @@ -465,7 +465,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ pub fn y(self: anytype) @import("std").zig.c_translation.FlexibleArrayType(@TypeOf(self), c_int) { \\ const Intermediate = @import("std").zig.c_translation.FlexibleArrayType(@TypeOf(self), u8); \\ const ReturnType = @import("std").zig.c_translation.FlexibleArrayType(@TypeOf(self), c_int); - \\ return @ptrCast(ReturnType, @alignCast(@alignOf(c_int), @ptrCast(Intermediate, self) + 4)); + \\ return @as(ReturnType, @ptrCast(@alignCast(@as(Intermediate, @ptrCast(self)) + 4))); \\ } \\}; \\pub const struct_bar = extern struct { @@ -473,7 +473,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ pub fn y(self: anytype) @import("std").zig.c_translation.FlexibleArrayType(@TypeOf(self), c_int) { \\ const Intermediate = @import("std").zig.c_translation.FlexibleArrayType(@TypeOf(self), u8); \\ const ReturnType = @import("std").zig.c_translation.FlexibleArrayType(@TypeOf(self), c_int); - \\ return @ptrCast(ReturnType, @alignCast(@alignOf(c_int), @ptrCast(Intermediate, self) + 4)); + \\ return @as(ReturnType, @ptrCast(@alignCast(@as(Intermediate, @ptrCast(self)) + 4))); \\ } \\}; }); @@ -635,7 +635,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\}; \\pub export fn foo(arg_x: [*c]outer) void { \\ var x = arg_x; - \\ x.*.unnamed_0.unnamed_0.y = @bitCast(c_int, @as(c_uint, x.*.unnamed_0.x)); + \\ x.*.unnamed_0.unnamed_0.y = @as(c_int, @bitCast(@as(c_uint, x.*.unnamed_0.x))); \\} }); @@ -721,7 +721,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub const struct_opaque_2 = opaque {}; \\pub export fn function(arg_opaque_1: ?*struct_opaque) void { \\ var opaque_1 = arg_opaque_1; - \\ var cast: ?*struct_opaque_2 = @ptrCast(?*struct_opaque_2, opaque_1); + \\ var cast: ?*struct_opaque_2 = @as(?*struct_opaque_2, @ptrCast(opaque_1)); \\ _ = @TypeOf(cast); \\} }); @@ -799,7 +799,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ _ = @TypeOf(b); \\ const c: c_int = undefined; \\ _ = @TypeOf(c); - \\ const d: c_uint = @bitCast(c_uint, @as(c_int, 440)); + \\ const d: c_uint = @as(c_uint, @bitCast(@as(c_int, 440))); \\ _ = @TypeOf(d); \\ var e: c_int = 10; \\ _ = @TypeOf(e); @@ -904,8 +904,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void { , &[_][]const u8{ \\pub extern fn foo() void; \\pub export fn bar() void { - \\ var func_ptr: ?*anyopaque = @ptrCast(?*anyopaque, &foo); - \\ var typed_func_ptr: ?*const fn () callconv(.C) void = @ptrFromInt(?*const fn () callconv(.C) void, @intCast(c_ulong, @intFromPtr(func_ptr))); + \\ var func_ptr: ?*anyopaque = @as(?*anyopaque, @ptrCast(&foo)); + \\ var typed_func_ptr: ?*const fn () callconv(.C) void = @as(?*const fn () callconv(.C) void, @ptrFromInt(@as(c_ulong, @intCast(@intFromPtr(func_ptr))))); \\ _ = @TypeOf(typed_func_ptr); \\} }); @@ -1353,7 +1353,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { , &[_][]const u8{ \\pub export fn foo() ?*anyopaque { \\ var x: [*c]c_ushort = undefined; - \\ return @ptrCast(?*anyopaque, x); + \\ return @as(?*anyopaque, @ptrCast(x)); \\} }); @@ -1543,7 +1543,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { , &[_][]const u8{ \\pub export fn ptrcast() [*c]f32 { \\ var a: [*c]c_int = undefined; - \\ return @ptrCast([*c]f32, @alignCast(@import("std").meta.alignment([*c]f32), a)); + \\ return @as([*c]f32, @ptrCast(@alignCast(a))); \\} }); @@ -1555,7 +1555,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { , &[_][]const u8{ \\pub export fn ptrptrcast() [*c][*c]f32 { \\ var a: [*c][*c]c_int = undefined; - \\ return @ptrCast([*c][*c]f32, @alignCast(@import("std").meta.alignment([*c][*c]f32), a)); + \\ return @as([*c][*c]f32, @ptrCast(@alignCast(a))); \\} }); @@ -1579,23 +1579,23 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub export fn test_ptr_cast() void { \\ var p: ?*anyopaque = undefined; \\ { - \\ var to_char: [*c]u8 = @ptrCast([*c]u8, @alignCast(@import("std").meta.alignment([*c]u8), p)); + \\ var to_char: [*c]u8 = @as([*c]u8, @ptrCast(@alignCast(p))); \\ _ = @TypeOf(to_char); - \\ var to_short: [*c]c_short = @ptrCast([*c]c_short, @alignCast(@import("std").meta.alignment([*c]c_short), p)); + \\ var to_short: [*c]c_short = @as([*c]c_short, @ptrCast(@alignCast(p))); \\ _ = @TypeOf(to_short); - \\ var to_int: [*c]c_int = @ptrCast([*c]c_int, @alignCast(@import("std").meta.alignment([*c]c_int), p)); + \\ var to_int: [*c]c_int = @as([*c]c_int, @ptrCast(@alignCast(p))); \\ _ = @TypeOf(to_int); - \\ var to_longlong: [*c]c_longlong = @ptrCast([*c]c_longlong, @alignCast(@import("std").meta.alignment([*c]c_longlong), p)); + \\ var to_longlong: [*c]c_longlong = @as([*c]c_longlong, @ptrCast(@alignCast(p))); \\ _ = @TypeOf(to_longlong); \\ } \\ { - \\ var to_char: [*c]u8 = @ptrCast([*c]u8, @alignCast(@import("std").meta.alignment([*c]u8), p)); + \\ var to_char: [*c]u8 = @as([*c]u8, @ptrCast(@alignCast(p))); \\ _ = @TypeOf(to_char); - \\ var to_short: [*c]c_short = @ptrCast([*c]c_short, @alignCast(@import("std").meta.alignment([*c]c_short), p)); + \\ var to_short: [*c]c_short = @as([*c]c_short, @ptrCast(@alignCast(p))); \\ _ = @TypeOf(to_short); - \\ var to_int: [*c]c_int = @ptrCast([*c]c_int, @alignCast(@import("std").meta.alignment([*c]c_int), p)); + \\ var to_int: [*c]c_int = @as([*c]c_int, @ptrCast(@alignCast(p))); \\ _ = @TypeOf(to_int); - \\ var to_longlong: [*c]c_longlong = @ptrCast([*c]c_longlong, @alignCast(@import("std").meta.alignment([*c]c_longlong), p)); + \\ var to_longlong: [*c]c_longlong = @as([*c]c_longlong, @ptrCast(@alignCast(p))); \\ _ = @TypeOf(to_longlong); \\ } \\} @@ -1651,7 +1651,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\} , &[_][]const u8{ \\pub export fn foo() c_int { - \\ return (@as(c_int, 1) << @intCast(@import("std").math.Log2Int(c_int), 2)) >> @intCast(@import("std").math.Log2Int(c_int), 1); + \\ return (@as(c_int, 1) << @intCast(2)) >> @intCast(1); \\} }); @@ -1885,7 +1885,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\const enum_unnamed_1 = ++ " " ++ default_enum_type ++ \\; - \\pub export var h: enum_unnamed_1 = @bitCast(c_uint, e); + \\pub export var h: enum_unnamed_1 = @as(c_uint, @bitCast(e)); \\pub const i: c_int = 0; \\pub const j: c_int = 1; \\pub const k: c_int = 2; @@ -2091,12 +2091,12 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ _ = @TypeOf(c_1); \\ var a_2: c_int = undefined; \\ var b_3: u8 = 123; - \\ b_3 = @bitCast(u8, @truncate(i8, a_2)); + \\ b_3 = @as(u8, @bitCast(@as(i8, @truncate(a_2)))); \\ { \\ var d: c_int = 5; \\ _ = @TypeOf(d); \\ } - \\ var d: c_uint = @bitCast(c_uint, @as(c_int, 440)); + \\ var d: c_uint = @as(c_uint, @bitCast(@as(c_int, 440))); \\ _ = @TypeOf(d); \\} }); @@ -2236,9 +2236,9 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\int c = 3.1415; \\double d = 3; , &[_][]const u8{ - \\pub export var a: f32 = @floatCast(f32, 3.1415); + \\pub export var a: f32 = @as(f32, @floatCast(3.1415)); \\pub export var b: f64 = 3.1415; - \\pub export var c: c_int = @intFromFloat(c_int, 3.1415); + \\pub export var c: c_int = @as(c_int, @intFromFloat(3.1415)); \\pub export var d: f64 = 3; }); @@ -2423,7 +2423,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { , &[_][]const u8{ \\pub export fn int_from_float(arg_a: f32) c_int { \\ var a = arg_a; - \\ return @intFromFloat(c_int, a); + \\ return @as(c_int, @intFromFloat(a)); \\} }); @@ -2533,15 +2533,15 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ var a = arg_a; \\ var b = arg_b; \\ var c = arg_c; - \\ var d: enum_Foo = @bitCast(c_uint, FooA); + \\ var d: enum_Foo = @as(c_uint, @bitCast(FooA)); \\ var e: c_int = @intFromBool((a != 0) and (b != 0)); \\ var f: c_int = @intFromBool((b != 0) and (c != null)); \\ var g: c_int = @intFromBool((a != 0) and (c != null)); \\ var h: c_int = @intFromBool((a != 0) or (b != 0)); \\ var i: c_int = @intFromBool((b != 0) or (c != null)); \\ var j: c_int = @intFromBool((a != 0) or (c != null)); - \\ var k: c_int = @intFromBool((a != 0) or (@bitCast(c_int, d) != 0)); - \\ var l: c_int = @intFromBool((@bitCast(c_int, d) != 0) and (b != 0)); + \\ var k: c_int = @intFromBool((a != 0) or (@as(c_int, @bitCast(d)) != 0)); + \\ var l: c_int = @intFromBool((@as(c_int, @bitCast(d)) != 0) and (b != 0)); \\ var m: c_int = @intFromBool((c != null) or (d != 0)); \\ var td: SomeTypedef = 44; \\ var o: c_int = @intFromBool((td != 0) or (b != 0)); @@ -2707,10 +2707,10 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub export var array: [100]c_int = [1]c_int{0} ** 100; \\pub export fn foo(arg_index: c_int) c_int { \\ var index = arg_index; - \\ return array[@intCast(c_uint, index)]; + \\ return array[@as(c_uint, @intCast(index))]; \\} , - \\pub const ACCESS = array[@intCast(usize, @as(c_int, 2))]; + \\pub const ACCESS = array[@as(usize, @intCast(@as(c_int, 2)))]; }); cases.add("cast signed array index to unsigned", @@ -2722,7 +2722,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub export fn foo() void { \\ var a: [10]c_int = undefined; \\ var i: c_int = 0; - \\ a[@intCast(c_uint, i)] = 0; + \\ a[@as(c_uint, @intCast(i))] = 0; \\} }); @@ -2735,7 +2735,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub export fn foo() void { \\ var a: [10]c_longlong = undefined; \\ var i: c_longlong = 0; - \\ a[@intCast(usize, i)] = 0; + \\ a[@as(usize, @intCast(i))] = 0; \\} }); @@ -3006,8 +3006,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub export fn log2(arg_a: c_uint) c_int { \\ var a = arg_a; \\ var i: c_int = 0; - \\ while (a > @bitCast(c_uint, @as(c_int, 0))) { - \\ a >>= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1)); + \\ while (a > @as(c_uint, @bitCast(@as(c_int, 0)))) { + \\ a >>= @intCast(@as(c_int, 1)); \\ } \\ return i; \\} @@ -3026,8 +3026,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub export fn log2(arg_a: u32) c_int { \\ var a = arg_a; \\ var i: c_int = 0; - \\ while (a > @bitCast(u32, @as(c_int, 0))) { - \\ a >>= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1)); + \\ while (a > @as(u32, @bitCast(@as(c_int, 0)))) { + \\ a >>= @intCast(@as(c_int, 1)); \\ } \\ return i; \\} @@ -3084,14 +3084,14 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ ref.* ^= @as(c_int, 1); \\ break :blk ref.*; \\ }; - \\ a >>= @intCast(@import("std").math.Log2Int(c_int), blk: { + \\ a >>= @intCast(blk: { \\ const ref = &a; - \\ ref.* >>= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1)); + \\ ref.* >>= @intCast(@as(c_int, 1)); \\ break :blk ref.*; \\ }); - \\ a <<= @intCast(@import("std").math.Log2Int(c_int), blk: { + \\ a <<= @intCast(blk: { \\ const ref = &a; - \\ ref.* <<= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1)); + \\ ref.* <<= @intCast(@as(c_int, 1)); \\ break :blk ref.*; \\ }); \\ a = @divTrunc(a, blk: { @@ -3106,12 +3106,12 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ }); \\ b /= blk: { \\ const ref = &b; - \\ ref.* /= @bitCast(c_uint, @as(c_int, 1)); + \\ ref.* /= @as(c_uint, @bitCast(@as(c_int, 1))); \\ break :blk ref.*; \\ }; \\ b %= blk: { \\ const ref = &b; - \\ ref.* %= @bitCast(c_uint, @as(c_int, 1)); + \\ ref.* %= @as(c_uint, @bitCast(@as(c_int, 1))); \\ break :blk ref.*; \\ }; \\} @@ -3134,42 +3134,42 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ var a: c_uint = 0; \\ a +%= blk: { \\ const ref = &a; - \\ ref.* +%= @bitCast(c_uint, @as(c_int, 1)); + \\ ref.* +%= @as(c_uint, @bitCast(@as(c_int, 1))); \\ break :blk ref.*; \\ }; \\ a -%= blk: { \\ const ref = &a; - \\ ref.* -%= @bitCast(c_uint, @as(c_int, 1)); + \\ ref.* -%= @as(c_uint, @bitCast(@as(c_int, 1))); \\ break :blk ref.*; \\ }; \\ a *%= blk: { \\ const ref = &a; - \\ ref.* *%= @bitCast(c_uint, @as(c_int, 1)); + \\ ref.* *%= @as(c_uint, @bitCast(@as(c_int, 1))); \\ break :blk ref.*; \\ }; \\ a &= blk: { \\ const ref = &a; - \\ ref.* &= @bitCast(c_uint, @as(c_int, 1)); + \\ ref.* &= @as(c_uint, @bitCast(@as(c_int, 1))); \\ break :blk ref.*; \\ }; \\ a |= blk: { \\ const ref = &a; - \\ ref.* |= @bitCast(c_uint, @as(c_int, 1)); + \\ ref.* |= @as(c_uint, @bitCast(@as(c_int, 1))); \\ break :blk ref.*; \\ }; \\ a ^= blk: { \\ const ref = &a; - \\ ref.* ^= @bitCast(c_uint, @as(c_int, 1)); + \\ ref.* ^= @as(c_uint, @bitCast(@as(c_int, 1))); \\ break :blk ref.*; \\ }; - \\ a >>= @intCast(@import("std").math.Log2Int(c_uint), blk: { + \\ a >>= @intCast(blk: { \\ const ref = &a; - \\ ref.* >>= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1)); + \\ ref.* >>= @intCast(@as(c_int, 1)); \\ break :blk ref.*; \\ }); - \\ a <<= @intCast(@import("std").math.Log2Int(c_uint), blk: { + \\ a <<= @intCast(blk: { \\ const ref = &a; - \\ ref.* <<= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1)); + \\ ref.* <<= @intCast(@as(c_int, 1)); \\ break :blk ref.*; \\ }); \\} @@ -3258,21 +3258,21 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub extern fn fn_bool(x: bool) void; \\pub extern fn fn_ptr(x: ?*anyopaque) void; \\pub export fn call() void { - \\ fn_int(@intFromFloat(c_int, 3.0)); - \\ fn_int(@intFromFloat(c_int, 3.0)); + \\ fn_int(@as(c_int, @intFromFloat(3.0))); + \\ fn_int(@as(c_int, @intFromFloat(3.0))); \\ fn_int(@as(c_int, 1094861636)); - \\ fn_f32(@floatFromInt(f32, @as(c_int, 3))); - \\ fn_f64(@floatFromInt(f64, @as(c_int, 3))); - \\ fn_char(@bitCast(u8, @truncate(i8, @as(c_int, '3')))); - \\ fn_char(@bitCast(u8, @truncate(i8, @as(c_int, '\x01')))); - \\ fn_char(@bitCast(u8, @truncate(i8, @as(c_int, 0)))); + \\ fn_f32(@as(f32, @floatFromInt(@as(c_int, 3)))); + \\ fn_f64(@as(f64, @floatFromInt(@as(c_int, 3)))); + \\ fn_char(@as(u8, @bitCast(@as(i8, @truncate(@as(c_int, '3')))))); + \\ fn_char(@as(u8, @bitCast(@as(i8, @truncate(@as(c_int, '\x01')))))); + \\ fn_char(@as(u8, @bitCast(@as(i8, @truncate(@as(c_int, 0)))))); \\ fn_f32(3.0); \\ fn_f64(3.0); \\ fn_bool(@as(c_int, 123) != 0); \\ fn_bool(@as(c_int, 0) != 0); \\ fn_bool(@intFromPtr(&fn_int) != 0); - \\ fn_int(@intCast(c_int, @intFromPtr(&fn_int))); - \\ fn_ptr(@ptrFromInt(?*anyopaque, @as(c_int, 42))); + \\ fn_int(@as(c_int, @intCast(@intFromPtr(&fn_int)))); + \\ fn_ptr(@as(?*anyopaque, @ptrFromInt(@as(c_int, 42)))); \\} }); @@ -3411,11 +3411,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\} , &[_][]const u8{ \\pub export fn foo() c_ulong { - \\ return @bitCast(c_ulong, @as(c_long, -@as(c_int, 1))); + \\ return @as(c_ulong, @bitCast(@as(c_long, -@as(c_int, 1)))); \\} \\pub export fn bar(arg_x: c_long) c_ushort { \\ var x = arg_x; - \\ return @bitCast(c_ushort, @truncate(c_short, x)); + \\ return @as(c_ushort, @bitCast(@as(c_short, @truncate(x)))); \\} }); @@ -3473,11 +3473,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\} \\pub export fn bar(arg_a: [*c]const c_int) void { \\ var a = arg_a; - \\ foo(@ptrFromInt([*c]c_int, @intFromPtr(a))); + \\ foo(@as([*c]c_int, @ptrFromInt(@intFromPtr(a)))); \\} \\pub export fn baz(arg_a: [*c]volatile c_int) void { \\ var a = arg_a; - \\ foo(@ptrFromInt([*c]c_int, @intFromPtr(a))); + \\ foo(@as([*c]c_int, @ptrFromInt(@intFromPtr(a)))); \\} }); @@ -3860,9 +3860,9 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ p[1]; \\} , &[_][]const u8{ - \\_ = p[@intCast(c_uint, @as(c_int, 0))]; + \\_ = p[@as(c_uint, @intCast(@as(c_int, 0)))]; , - \\_ = p[@intCast(c_uint, @as(c_int, 1))]; + \\_ = p[@as(c_uint, @intCast(@as(c_int, 1)))]; }); cases.add("Undefined macro identifier", @@ -3928,7 +3928,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub export fn foo() void { \\ var a: S = undefined; \\ var b: S = undefined; - \\ var c: c_longlong = @divExact(@bitCast(c_longlong, @intFromPtr(a) -% @intFromPtr(b)), @sizeOf(u8)); + \\ var c: c_longlong = @divExact(@as(c_longlong, @bitCast(@intFromPtr(a) -% @intFromPtr(b))), @sizeOf(u8)); \\ _ = @TypeOf(c); \\} }); @@ -3943,7 +3943,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub export fn foo() void { \\ var a: S = undefined; \\ var b: S = undefined; - \\ var c: c_long = @divExact(@bitCast(c_long, @intFromPtr(a) -% @intFromPtr(b)), @sizeOf(u8)); + \\ var c: c_long = @divExact(@as(c_long, @bitCast(@intFromPtr(a) -% @intFromPtr(b))), @sizeOf(u8)); \\ _ = @TypeOf(c); \\} }); diff --git a/tools/extract-grammar.zig b/tools/extract-grammar.zig index 9a72bf46e4..b251f57741 100644 --- a/tools/extract-grammar.zig +++ b/tools/extract-grammar.zig @@ -90,7 +90,7 @@ fn read(path: []const u8, allocator: mem.Allocator) ![:0]const u8 { const st = try f.stat(); if (st.size > max_src_size) return error.FileTooBig; - const src = try allocator.allocSentinel(u8, @intCast(usize, st.size), 0); + const src = try allocator.allocSentinel(u8, @as(usize, @intCast(st.size)), 0); const n = try f.readAll(src); if (n != st.size) return error.UnexpectedEndOfFile; diff --git a/tools/gen_spirv_spec.zig b/tools/gen_spirv_spec.zig index 91d0ba80ac..9427451a28 100644 --- a/tools/gen_spirv_spec.zig +++ b/tools/gen_spirv_spec.zig @@ -40,7 +40,7 @@ fn extendedStructs( kinds: []const g.OperandKind, ) !ExtendedStructSet { var map = ExtendedStructSet.init(arena); - try map.ensureTotalCapacity(@intCast(u32, kinds.len)); + try map.ensureTotalCapacity(@as(u32, @intCast(kinds.len))); for (kinds) |kind| { const enumerants = kind.enumerants orelse continue; diff --git a/tools/gen_stubs.zig b/tools/gen_stubs.zig index dab45350f9..1b70023666 100644 --- a/tools/gen_stubs.zig +++ b/tools/gen_stubs.zig @@ -441,10 +441,10 @@ fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: builtin.Endian) const sh_name = try arena.dupe(u8, mem.sliceTo(shstrtab[s(shdr.sh_name)..], 0)); log.debug("found section: {s}", .{sh_name}); if (mem.eql(u8, sh_name, ".dynsym")) { - dynsym_index = @intCast(u16, i); + dynsym_index = @as(u16, @intCast(i)); } const gop = try parse.sections.getOrPut(sh_name); - section_index_map[i] = @intCast(u16, gop.index); + section_index_map[i] = @as(u16, @intCast(gop.index)); } if (dynsym_index == 0) @panic("did not find the .dynsym section"); @@ -470,9 +470,9 @@ fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: builtin.Endian) for (copied_dyn_syms) |sym| { const this_section = s(sym.st_shndx); const name = try arena.dupe(u8, mem.sliceTo(dynstr[s(sym.st_name)..], 0)); - const ty = @truncate(u4, sym.st_info); - const binding = @truncate(u4, sym.st_info >> 4); - const visib = @enumFromInt(elf.STV, @truncate(u2, sym.st_other)); + const ty = @as(u4, @truncate(sym.st_info)); + const binding = @as(u4, @truncate(sym.st_info >> 4)); + const visib = @as(elf.STV, @enumFromInt(@as(u2, @truncate(sym.st_other)))); const size = s(sym.st_size); if (parse.blacklist.contains(name)) continue; diff --git a/tools/update-linux-headers.zig b/tools/update-linux-headers.zig index e3a3e9294d..ef701fc86d 100644 --- a/tools/update-linux-headers.zig +++ b/tools/update-linux-headers.zig @@ -112,7 +112,7 @@ const DestTarget = struct { _ = self; var hasher = std.hash.Wyhash.init(0); std.hash.autoHash(&hasher, a.arch); - return @truncate(u32, hasher.final()); + return @as(u32, @truncate(hasher.final())); } pub fn eql(self: @This(), a: DestTarget, b: DestTarget, b_index: usize) bool { diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig index 4584a87ef7..6616d5f077 100644 --- a/tools/update_clang_options.zig +++ b/tools/update_clang_options.zig @@ -591,7 +591,7 @@ pub fn main() anyerror!void { for (all_features, 0..) |feat, i| { const llvm_name = feat.llvm_name orelse continue; - const zig_feat = @enumFromInt(Feature, i); + const zig_feat = @as(Feature, @enumFromInt(i)); const zig_name = @tagName(zig_feat); try llvm_to_zig_cpu_features.put(llvm_name, zig_name); } @@ -790,7 +790,7 @@ const Syntax = union(enum) { }; fn objSyntax(obj: *json.ObjectMap) ?Syntax { - const num_args = @intCast(u8, obj.get("NumArgs").?.integer); + const num_args = @as(u8, @intCast(obj.get("NumArgs").?.integer)); for (obj.get("!superclasses").?.array.items) |superclass_json| { const superclass = superclass_json.string; if (std.mem.eql(u8, superclass, "Joined")) { From a84a8953257ccfb70567a75017c98830eca250e3 Mon Sep 17 00:00:00 2001 From: mlugg Date: Fri, 23 Jun 2023 19:37:50 +0100 Subject: [PATCH 5/7] langref: update to new cast builtin syntax --- doc/langref.html.in | 124 +++++++++++++++++++++++--------------------- 1 file changed, 65 insertions(+), 59 deletions(-) diff --git a/doc/langref.html.in b/doc/langref.html.in index 0b37db1997..1ebc737b18 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -2410,7 +2410,7 @@ var some_integers: [100]i32 = undefined; test "modify an array" { for (&some_integers, 0..) |*item, i| { - item.* = @intCast(i32, i); + item.* = @intCast(i); } try expect(some_integers[10] == 10); try expect(some_integers[99] == 99); @@ -2452,8 +2452,8 @@ var fancy_array = init: { var initial_value: [10]Point = undefined; for (&initial_value, 0..) |*pt, i| { pt.* = Point{ - .x = @intCast(i32, i), - .y = @intCast(i32, i) * 2, + .x = @intCast(i), + .y = @intCast(i * 2), }; } break :init initial_value; @@ -2769,7 +2769,7 @@ test "comptime pointers" { const expect = @import("std").testing.expect; test "@intFromPtr and @ptrFromInt" { - const ptr = @ptrFromInt(*i32, 0xdeadbee0); + const ptr: *i32 = @ptrFromInt(0xdeadbee0); const addr = @intFromPtr(ptr); try expect(@TypeOf(addr) == usize); try expect(addr == 0xdeadbee0); @@ -2784,7 +2784,7 @@ test "comptime @ptrFromInt" { comptime { // Zig is able to do this at compile-time, as long as // ptr is never dereferenced. - const ptr = @ptrFromInt(*i32, 0xdeadbee0); + const ptr: *i32 = @ptrFromInt(0xdeadbee0); const addr = @intFromPtr(ptr); try expect(@TypeOf(addr) == usize); try expect(addr == 0xdeadbee0); @@ -2801,7 +2801,7 @@ test "comptime @ptrFromInt" { const expect = @import("std").testing.expect; test "volatile" { - const mmio_ptr = @ptrFromInt(*volatile u8, 0x12345678); + const mmio_ptr: *volatile u8 = @ptrFromInt(0x12345678); try expect(@TypeOf(mmio_ptr) == *volatile u8); } {#code_end#} @@ -2822,7 +2822,7 @@ const expect = std.testing.expect; test "pointer casting" { const bytes align(@alignOf(u32)) = [_]u8{ 0x12, 0x12, 0x12, 0x12 }; - const u32_ptr = @ptrCast(*const u32, &bytes); + const u32_ptr: *const u32 = @ptrCast(&bytes); try expect(u32_ptr.* == 0x12121212); // Even this example is contrived - there are better ways to do the above than @@ -2831,7 +2831,7 @@ test "pointer casting" { try expect(u32_value == 0x12121212); // And even another way, the most straightforward way to do it: - try expect(@bitCast(u32, bytes) == 0x12121212); + try expect(@as(u32, @bitCast(bytes)) == 0x12121212); } test "pointer child type" { @@ -2921,7 +2921,7 @@ test "pointer alignment safety" { } fn foo(bytes: []u8) u32 { const slice4 = bytes[1..5]; - const int_slice = std.mem.bytesAsSlice(u32, @alignCast(4, slice4)); + const int_slice = std.mem.bytesAsSlice(u32, @as([]align(4) u8, @alignCast(slice4))); return int_slice[0]; } {#code_end#} @@ -2942,7 +2942,7 @@ const expect = std.testing.expect; test "allowzero" { var zero: usize = 0; - var ptr = @ptrFromInt(*allowzero i32, zero); + var ptr: *allowzero i32 = @ptrFromInt(zero); try expect(@intFromPtr(ptr) == 0); } {#code_end#} @@ -3354,12 +3354,12 @@ fn doTheTest() !void { try expect(@sizeOf(Full) == 2); try expect(@sizeOf(Divided) == 2); var full = Full{ .number = 0x1234 }; - var divided = @bitCast(Divided, full); + var divided: Divided = @bitCast(full); try expect(divided.half1 == 0x34); try expect(divided.quarter3 == 0x2); try expect(divided.quarter4 == 0x1); - var ordered = @bitCast([2]u8, full); + var ordered: [2]u8 = @bitCast(full); switch (native_endian) { .Big => { try expect(ordered[0] == 0x12); @@ -4428,7 +4428,7 @@ fn getNum(u: U) u32 { // `u.a` or `u.b` and `tag` is `u`'s comptime-known tag value. inline else => |num, tag| { if (tag == .b) { - return @intFromFloat(u32, num); + return @intFromFloat(num); } return num; } @@ -4714,7 +4714,7 @@ test "for basics" { var sum2: i32 = 0; for (items, 0..) |_, i| { try expect(@TypeOf(i) == usize); - sum2 += @intCast(i32, i); + sum2 += @as(i32, @intCast(i)); } try expect(sum2 == 10); @@ -6363,7 +6363,7 @@ const mem = std.mem; test "cast *[1][*]const u8 to [*]const ?[*]const u8" { const window_name = [1][*]const u8{"window name"}; const x: [*]const ?[*]const u8 = &window_name; - try expect(mem.eql(u8, std.mem.sliceTo(@ptrCast([*:0]const u8, x[0].?), 0), "window name")); + try expect(mem.eql(u8, std.mem.sliceTo(@as([*:0]const u8, @ptrCast(x[0].?)), 0), "window name")); } {#code_end#} {#header_close#} @@ -6760,8 +6760,8 @@ fn peerTypeEmptyArrayAndSliceAndError(a: bool, slice: []u8) anyerror![]u8 { } test "peer type resolution: *const T and ?*T" { - const a = @ptrFromInt(*const usize, 0x123456780); - const b = @ptrFromInt(?*usize, 0x123456780); + const a: *const usize = @ptrFromInt(0x123456780); + const b: ?*usize = @ptrFromInt(0x123456780); try expect(a == b); try expect(b == a); } @@ -7762,12 +7762,13 @@ test "global assembly" { at compile time.

{#header_open|@addrSpaceCast#} -
{#syntax#}@addrSpaceCast(comptime addrspace: std.builtin.AddressSpace, ptr: anytype) anytype{#endsyntax#}
+
{#syntax#}@addrSpaceCast(ptr: anytype) anytype{#endsyntax#}

- Converts a pointer from one address space to another. Depending on the current target and - address spaces, this cast may be a no-op, a complex operation, or illegal. If the cast is - legal, then the resulting pointer points to the same memory location as the pointer operand. - It is always valid to cast a pointer between the same address spaces. + Converts a pointer from one address space to another. The new address space is inferred + based on the result type. Depending on the current target and address spaces, this cast + may be a no-op, a complex operation, or illegal. If the cast is legal, then the resulting + pointer points to the same memory location as the pointer operand. It is always valid to + cast a pointer between the same address spaces.

{#header_close#} {#header_open|@addWithOverflow#} @@ -7777,10 +7778,10 @@ test "global assembly" {

{#header_close#} {#header_open|@alignCast#} -
{#syntax#}@alignCast(comptime alignment: u29, ptr: anytype) anytype{#endsyntax#}
+
{#syntax#}@alignCast(ptr: anytype) anytype{#endsyntax#}

{#syntax#}ptr{#endsyntax#} can be {#syntax#}*T{#endsyntax#}, {#syntax#}?*T{#endsyntax#}, or {#syntax#}[]T{#endsyntax#}. - It returns the same type as {#syntax#}ptr{#endsyntax#} except with the alignment adjusted to the new value. + Changes the alignment of a pointer. The alignment to use is inferred based on the result type.

A {#link|pointer alignment safety check|Incorrect Pointer Alignment#} is added to the generated code to make sure the pointer is aligned as promised.

@@ -7865,9 +7866,10 @@ comptime { {#header_close#} {#header_open|@bitCast#} -
{#syntax#}@bitCast(comptime DestType: type, value: anytype) DestType{#endsyntax#}
+
{#syntax#}@bitCast(value: anytype) anytype{#endsyntax#}

- Converts a value of one type to another type. + Converts a value of one type to another type. The return type is the + inferred result type.

Asserts that {#syntax#}@sizeOf(@TypeOf(value)) == @sizeOf(DestType){#endsyntax#}. @@ -8420,10 +8422,11 @@ test "main" { {#header_close#} {#header_open|@errSetCast#} -

{#syntax#}@errSetCast(comptime T: DestType, value: anytype) DestType{#endsyntax#}
+
{#syntax#}@errSetCast(value: anytype) anytype{#endsyntax#}

- Converts an error value from one error set to another error set. Attempting to convert an error - which is not in the destination error set results in safety-protected {#link|Undefined Behavior#}. + Converts an error value from one error set to another error set. The return type is the + inferred result type. Attempting to convert an error which is not in the destination error + set results in safety-protected {#link|Undefined Behavior#}.

{#header_close#} @@ -8535,17 +8538,17 @@ test "decl access by string" { {#header_close#} {#header_open|@floatCast#} -
{#syntax#}@floatCast(comptime DestType: type, value: anytype) DestType{#endsyntax#}
+
{#syntax#}@floatCast(value: anytype) anytype{#endsyntax#}

Convert from one float type to another. This cast is safe, but may cause the - numeric value to lose precision. + numeric value to lose precision. The return type is the inferred result type.

{#header_close#} {#header_open|@intFromFloat#} -
{#syntax#}@intFromFloat(comptime DestType: type, float: anytype) DestType{#endsyntax#}
+
{#syntax#}@intFromFloat(float: anytype) anytype{#endsyntax#}

- Converts the integer part of a floating point number to the destination type. + Converts the integer part of a floating point number to the inferred result type.

If the integer part of the floating point number cannot fit in the destination type, @@ -8660,16 +8663,17 @@ test "@hasDecl" { {#header_close#} {#header_open|@intCast#} -

{#syntax#}@intCast(comptime DestType: type, int: anytype) DestType{#endsyntax#}
+
{#syntax#}@intCast(int: anytype) anytype{#endsyntax#}

Converts an integer to another integer while keeping the same numerical value. + The return type is the inferred result type. Attempting to convert a number which is out of range of the destination type results in safety-protected {#link|Undefined Behavior#}.

{#code_begin|test_err|test_intCast_builtin|cast truncated bits#} test "integer cast panic" { var a: u16 = 0xabcd; - var b: u8 = @intCast(u8, a); + var b: u8 = @intCast(a); _ = b; } {#code_end#} @@ -8683,9 +8687,9 @@ test "integer cast panic" { {#header_close#} {#header_open|@enumFromInt#} -
{#syntax#}@enumFromInt(comptime DestType: type, integer: anytype) DestType{#endsyntax#}
+
{#syntax#}@enumFromInt(integer: anytype) anytype{#endsyntax#}

- Converts an integer into an {#link|enum#} value. + Converts an integer into an {#link|enum#} value. The return type is the inferred result type.

Attempting to convert an integer which represents no value in the chosen enum type invokes @@ -8711,16 +8715,18 @@ test "integer cast panic" { {#header_close#} {#header_open|@floatFromInt#} -

{#syntax#}@floatFromInt(comptime DestType: type, int: anytype) DestType{#endsyntax#}
+
{#syntax#}@floatFromInt(int: anytype) anytype{#endsyntax#}

- Converts an integer to the closest floating point representation. To convert the other way, use {#link|@intFromFloat#}. This cast is always safe. + Converts an integer to the closest floating point representation. The return type is the inferred result type. + To convert the other way, use {#link|@intFromFloat#}. This cast is always safe.

{#header_close#} {#header_open|@ptrFromInt#} -
{#syntax#}@ptrFromInt(comptime DestType: type, address: usize) DestType{#endsyntax#}
+
{#syntax#}@ptrFromInt(address: usize) anytype{#endsyntax#}

- Converts an integer to a {#link|pointer|Pointers#}. To convert the other way, use {#link|@intFromPtr#}. Casting an address of 0 to a destination type + Converts an integer to a {#link|pointer|Pointers#}. The return type is the inferred result type. + To convert the other way, use {#link|@intFromPtr#}. Casting an address of 0 to a destination type which in not {#link|optional|Optional Pointers#} and does not have the {#syntax#}allowzero{#endsyntax#} attribute will result in a {#link|Pointer Cast Invalid Null#} panic when runtime safety checks are enabled.

@@ -8924,9 +8930,9 @@ pub const PrefetchOptions = struct { {#header_close#} {#header_open|@ptrCast#} -
{#syntax#}@ptrCast(comptime DestType: type, value: anytype) DestType{#endsyntax#}
+
{#syntax#}@ptrCast(value: anytype) anytype{#endsyntax#}

- Converts a pointer of one type to a pointer of another type. + Converts a pointer of one type to a pointer of another type. The return type is the inferred result type.

{#link|Optional Pointers#} are allowed. Casting an optional pointer which is {#link|null#} @@ -9522,10 +9528,10 @@ fn List(comptime T: type) type { {#header_close#} {#header_open|@truncate#} -

{#syntax#}@truncate(comptime T: type, integer: anytype) T{#endsyntax#}
+
{#syntax#}@truncate(integer: anytype) anytype{#endsyntax#}

This function truncates bits from an integer type, resulting in a smaller - or same-sized integer type. + or same-sized integer type. The return type is the inferred result type.

This function always truncates the significant bits of the integer, regardless @@ -9540,7 +9546,7 @@ const expect = std.testing.expect; test "integer truncation" { var a: u16 = 0xabcd; - var b: u8 = @truncate(u8, a); + var b: u8 = @truncate(a); try expect(b == 0xcd); } {#code_end#} @@ -9838,7 +9844,7 @@ fn foo(x: []const u8) u8 { {#code_begin|test_err|test_comptime_invalid_cast|type 'u32' cannot represent integer value '-1'#} comptime { var value: i32 = -1; - const unsigned = @intCast(u32, value); + const unsigned: u32 = @intCast(value); _ = unsigned; } {#code_end#} @@ -9848,7 +9854,7 @@ const std = @import("std"); pub fn main() void { var value: i32 = -1; - var unsigned = @intCast(u32, value); + var unsigned: u32 = @intCast(value); std.debug.print("value: {}\n", .{unsigned}); } {#code_end#} @@ -9861,7 +9867,7 @@ pub fn main() void { {#code_begin|test_err|test_comptime_invalid_cast_truncate|type 'u8' cannot represent integer value '300'#} comptime { const spartan_count: u16 = 300; - const byte = @intCast(u8, spartan_count); + const byte: u8 = @intCast(spartan_count); _ = byte; } {#code_end#} @@ -9871,7 +9877,7 @@ const std = @import("std"); pub fn main() void { var spartan_count: u16 = 300; - const byte = @intCast(u8, spartan_count); + const byte: u8 = @intCast(spartan_count); std.debug.print("value: {}\n", .{byte}); } {#code_end#} @@ -10208,7 +10214,7 @@ const Foo = enum { }; comptime { const a: u2 = 3; - const b = @enumFromInt(Foo, a); + const b: Foo = @enumFromInt(a); _ = b; } {#code_end#} @@ -10224,7 +10230,7 @@ const Foo = enum { pub fn main() void { var a: u2 = 3; - var b = @enumFromInt(Foo, a); + var b: Foo = @enumFromInt(a); std.debug.print("value: {s}\n", .{@tagName(b)}); } {#code_end#} @@ -10242,7 +10248,7 @@ const Set2 = error{ C, }; comptime { - _ = @errSetCast(Set2, Set1.B); + _ = @as(Set2, @errSetCast(Set1.B)); } {#code_end#}

At runtime:

@@ -10261,7 +10267,7 @@ pub fn main() void { foo(Set1.B); } fn foo(set1: Set1) void { - const x = @errSetCast(Set2, set1); + const x = @as(Set2, @errSetCast(set1)); std.debug.print("value: {}\n", .{x}); } {#code_end#} @@ -10271,8 +10277,8 @@ fn foo(set1: Set1) void {

At compile-time:

{#code_begin|test_err|test_comptime_incorrect_pointer_alignment|pointer address 0x1 is not aligned to 4 bytes#} comptime { - const ptr = @ptrFromInt(*align(1) i32, 0x1); - const aligned = @alignCast(4, ptr); + const ptr: *align(1) i32 = @ptrFromInt(0x1); + const aligned: *align(4) i32 = @alignCast(ptr); _ = aligned; } {#code_end#} @@ -10286,7 +10292,7 @@ pub fn main() !void { } fn foo(bytes: []u8) u32 { const slice4 = bytes[1..5]; - const int_slice = mem.bytesAsSlice(u32, @alignCast(4, slice4)); + const int_slice = mem.bytesAsSlice(u32, @as([]align(4) u8, @alignCast(slice4))); return int_slice[0]; } {#code_end#} @@ -10387,7 +10393,7 @@ fn bar(f: *Foo) void { {#code_begin|test_err|test_comptime_invalid_null_pointer_cast|null pointer casted to type#} comptime { const opt_ptr: ?*i32 = null; - const ptr = @ptrCast(*i32, opt_ptr); + const ptr: *i32 = @ptrCast(opt_ptr); _ = ptr; } {#code_end#} @@ -10395,7 +10401,7 @@ comptime { {#code_begin|exe_err|runtime_invalid_null_pointer_cast#} pub fn main() void { var opt_ptr: ?*i32 = null; - var ptr = @ptrCast(*i32, opt_ptr); + var ptr: *i32 = @ptrCast(opt_ptr); _ = ptr; } {#code_end#} From 67997a699a4a1ee20fb189f38077a4bb29c096b3 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 24 Jun 2023 17:01:46 +0100 Subject: [PATCH 6/7] cbe: codegen int_from_ptr of slice correctly CBE was translating to access the `len` field rather than `ptr`. Air.zig specifies that this operation is valid on a slice. --- src/codegen/c.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 317d77602f..fb4e619e9e 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -5855,7 +5855,7 @@ fn airIntFromPtr(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, inst_ty); try writer.writeByte(')'); if (operand_ty.isSlice(mod)) { - try f.writeCValueMember(writer, operand, .{ .identifier = "len" }); + try f.writeCValueMember(writer, operand, .{ .identifier = "ptr" }); } else { try f.writeCValue(writer, operand, .Other); } From 21ac0beb436f49fe49c6982a872f2dc48e4bea5e Mon Sep 17 00:00:00 2001 From: mlugg Date: Fri, 23 Jun 2023 19:55:31 +0100 Subject: [PATCH 7/7] update zig1.wasm Needed due to the breaking changes to casting builtins, which are used by the compiler when building itself. Note from Andrew: I re-ran update-zig1 on my PC and replaced this commit. Signed-off-by: Andrew Kelley --- stage1/zig1.wasm | Bin 2494123 -> 2498705 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/stage1/zig1.wasm b/stage1/zig1.wasm index e5d9258b4dc74b11cb5538e92f45ef1a88810f96..d7aa6c83a45f88bce7ce0f6702a643a1d8579914 100644 GIT binary patch delta 825469 zcmZ2IcLw9cgEJVp7!r$f85tND7~f3fx;XK%xdv-}eSLi$Ljo^|hGT{VmioH7dWHlx zuuvUi{bU2iS(`60dNMQ0PgZ0WpKQz;4q@?d79^Hr#OD{J<}nygR%Di#e2i5aA~l(l zO@Pq^tlW?-k9Dc&Vo}q{OW5jI%$Us;CabZlFXGT zMnB=o z`7)<5GZP!rWOgn!MwZFCTv==i%#3V`tdr+)Ni!-<-o@p^!NkDI!OX?XFqxlQfl+y~ z0kpMU8xse}Elf;cl1+h;jg5l|#A5-2$u@jp+*+(`Y^uw)=4NAHn9L-g!p^|N!o0 zx}J%Vk&}^=0c4O8C}3DXj$`9tWzyhaVethyjDv{@EW->+C(?ehJUl|Gs{R3dfk7<6 zva)<3p}IO@x_pXaieh41EG&FXOmcE^a(a49LR>;ZLg7M8LP8O$d?Nb#ksP81QHIfc zBBCI~#mC1dYsAMYWW*F>F!_Ukh^-+XNQ{AvnS+gkft87gk&}~|S&);Rms^sbgHe)! zla-fIl0ln+k%NJenVEr^nS+6inVF51lQoW6d9stBr}`7&r^3&Kd6>4ya!o$m%(z%| z6CV>-J!73RgX0IrY$XO21_cJkD|a?;5KP zKKX#SsMy6jT>=7HicCDL+>R6ewg@=d2xv2KPmU826X0RycAUSeNx)G7B;=M zYs2Ol0$GzAmBq9)m>3jU6<8eqY?>jE1>!I&FgP-qF)2VO3nm8z21ko5$K8u3w@TUrBh!|a z%?}i=Ffz4vPu`}qj_J>)$!W^&O#hZoUazdrv~0y>MhhdRH7h17{+F7pry|Jo;MZhF z6(iBb51R!XnFKT)84F!m92D3USRMCYo7|xy!_=^2@-mewk^8^eVG4Lz!14>fY&KE# zW90esxl15Rkzawy@yF-MeQFI%6ZTCOP}gVLuy3-Xx)IyXeO&^Mijz0K6`MT4gmv;1 z^>0j5E>9Lvmz>-%U6N_b<;jlfGL!FUNHXocJo&qZDdV;2AJiE|Halv1aELBm(kS4_ zhT^Knwv5q^+#7X2{^K&sC%`qS-_DSBE-w6z~b1!lBK}n zc!E1iz{>H#oh|`ICIuD^rVbV*P7vM0qQvNU=v)iT7+!71HM^#P6mcprI&S~cfF!X2 zAYaO8*Scmi<=$dZ>EngkrhVG=J8 z62JF0Bia52A#n|)Lr6e#@ z)MjKlyA76n*xqjI5^z)jCoYht#q$~j9EHI_2-EcJ|K?X#4;V$iG`7LgFHCCdrOEeg zjF>il-Yjg(&L}$h5 zo!n+`CbZ_lL~se<$l}h+sKDg7;=<%}_9bjru67AH+D)FIp~p1!8a#BB&A1&I6$Km_ z6`jClTzJzc;3x?()eJ1s`hW6PhiXt@89BN$eOwIAz%!SC19bipaDe`D6lL5#S-?qD z+&=wc@N zYC(g5BPUo3EHIB;+??Z*&B(Oz^W+DfqLbgbrmEfqsS*II;$;9^wfAB(OdnL_L!p~$p@(gd^=ffkFOHhcLsF^TM0g>a}U*y*RAOnwkx%=me- zNT3l@&zZ>{fksSkdM4KdDl^^gnY=Jik?D8u60CT zj2T}{E(!WBvT188ywFeq8@g)WbKS3*n}=S}7eHDOx&bh3S@BGcKOlaoT#K&5dZWeHa z%Wx_3g4Le~i9#|Z)c7ykCZ~l>O`U(QNx)GMX73b6MH{gC)mvHx9QmNaj*N=#V9~xa zNN#atRP<12gGv2A-GI=bz@ccO&<>IZ)nU6jCnto9$QTj)RLdv)tiZTjc7!^M;f)%zN zZh<98aQMrBMJIqn;jzM@sHsq@2$8wD6WJ^-MNP2!cOX${y<(M;c_sPM(@tw=uNP*heZhRQ7af$AVgvfBs}MRJgkLb)PD=F+-Wq!{K>6ariO z1SASAtJFZz!KqLImHPDwkpVz~$qClb_<8cINEwMw?QJO0;0TtS)-m~9q>R?&ukacP z99E)W$=M)Lc!CF|h!d|Ti$v)$UGJOh5)}?AT<1o)v+ZBnDd4CFu8BS@1r?4=--FTkXbFiAU*I(a*bYc6HGG{sFIs}>!sf|4qGd(4focbMQUlp|wr}#wXbqD2m+P){GRVAo<6=lRaWIM4o{p;GqbTnD}sVON=VhDCu_wTG9B16IW|_A@$KZsST&{ty^~kQ8Z!Qwd^=W!sejvK#yC}`*8a&F zafVFuwoQ(XQ)Zkwxjs&fX>;%7<#C3LODErqQ(@lQ&@?Gtg=tamWVLuh=Cch=lcVC5 znD*|PToiyVlQt0*S)oic676Awp44 zAr>loZdW_JVgOmg1lF^!A6c|ckx79=Q5r1sejZYtrN9LhZJOUC;8+hY>+%()6*v@? zz#49^L`|y@(YGK`Xj%nTik=F2Q1wee6*jC`b7WBT1e>xGBq|2YQH~6XfeQ6dnMP3j zf@?&DLU8>W2-YwQBn;1jplJWxGkJHSO=k1OCPb;`$f#(iFqKi!7ObazatoqZab!@m z1B-r})Pg8|9T^q<6efdofsE`11qM8AaVYXDOasY-q*^CVPDm0Fo;$Hsz)=e7Zm@Ch zCbuUUGacxkyeCOg=G@)}c*SJy$fzix(9fvotH7$j=6GoG$0SjU11(J`3Jet{fK-7L z90U0f(un6ZSD47CXz0kOXbRT5V8dj)WJ&R>n;QfinIV>&J2EKRgC##s&P$fk`h!&V zg5>1Da=$>L&=h127Fp9bc~i17)7-wv*ORl^n&)>4IO>AiT$|^EaT2b7QQ85ZEv*q{Xy(yCV>#wz-1b>)9H>08tSj7TlDX_@Np2@7K z%1pa^CTpeYh+IZ78x+X<`X*;c(h4gvSR52s91n10Ire?-5^z-d!^ObBqX=oGzv^oPwcd;z84Fn)Ky5Tf?kpuv z1y)DyY$Zm=?Vvh?1suX)GiC^7)jKjcD*fSR;9=xe0ChjkA8inDL{;M0br)_8RMqw7 zRt#0^kX11`fKB^;rX8jZ)XD-`aPxi(T!@#EJ5GVcafVQq<0WL>V7=Q;wZW1D%(0H& zFjQ>@B^o9HkcVAa92~V3nRt}A6<8cMTw=&_oN;gRg7gfD$CDcc9O1c+WKnzVJYb%tcXmUT@?nTb&`SRqX@60Gjh;bug;$dOUe)$zfvb^*l@g=UZ{P}KCT zM{dD@L@xABUXr2AG_`;7$qYlrg_D10NQ&(4X%cXh1_wU00o3$xvPPyd)3g7ReKV)R zx+YMcfUQ1qaq{;}MNm^&D(kl>v{8(#VC%)nTG@(BTR%_s&2ED=j$dUb!>H@Aen2h?>r!~o7m7ZE+f$&2!| zn6@9Dd@fIsspaeBk9jgoH@;5h&u?Wra|7Jj2laJ6+<^CWCQmS7nfxN(o2~JBmw=-J z*tjLv5z@^CO>7gcbO|`hgQ{S*O(3S!UFc$OOOWN6ffUMF0D_o4^l1rucPzEeE?;Qrq;ca*VpJXP2W5DL5&gHioIO|jw0Zmt6ps~ z^Vw~klQ-1pvEAR+Dc~py@;lpi5K|niLa5H2>E*@A(RH>=KPOF}a9C7i^13>Grk|50 z->)-bTrxfJH>1d8wR%&gQ`;xU*1NOyZ|@Xvw3-aEd4IjLWb&@&9ZctcO)hCU#5Cp5W{XyTW~Lb%H+OajF*7x6n>@EWg=tI6 zWageNAZlli3e$|olkfM)Fr9fidBX(D$!5LVm`?qi{I2&r)2W}E_xD|7RCx1d61Xp{ z#FVYXsKDr`?k(-en5D?Az~so2@CpS;>=D%_XI@$&*S?(Oze0P!@WBq~qT>_d+4Cc%X3M`HjE;3{} z?m!kfaDgGq@eZ=c36RJWWRU|$7_uCHf(2vA^g{0QQK^zLv0mEK<%vK(hUn7njKt;mgqodS-G0%_23 z8U<#@hDDPLrwUK@oGK!62PDaakmQ0$>M=cBICmGlJVpxS8K6M!Ko+?L5}EuMN!KNi$aG|pm5;gv^cfhe86SW` z>&GLgf#9&4_!ug810+{J6~?^);)3IA4YGCMI9-A)(gd<@JF<1)I6Vkb1BwMue1YTi zB(exNPA@=3z@D80;?#pu6F3&P!W_~7a>!1QB*-*yoNfXoqua<<-h`>%07?M|Kym)) zahHH&BNM33R$y^_behTBu3M`HcW=su!3M`XP&oq-nkIuGVQw1DZ zK(Q({OC1!e8)r#Pj-4f>fUa;6NFldl!=mW|3M`HdmnJLDQh=z}!BG46=TrelPyz(2 z?Lbm1HQ91DrrCWUvsqzgUpRpG}b{BZk#2hJ;6tTMS;PLsiOik^814!%Mm?n8bIcQ{5x4@F4%lf zAjZ$d^k_3oAvj=uoI!T@SuE;jfYieS=D}H*dc}E|fj$>S?Fx9H^v_FVyK@a(A%d#X zfAC>L?#(XqZ3N|~OllW!Ja?x{K!FKdb~qkja8qKGR$z2wECGeY{0(7DFZWLVzQK)Y z*Y3&o8*RkS-e?E4a&p1Sc^MQyDo_V<^9kRrzI*a(NSWPy_D*$kg+P(VceuFgB;{KMZo3 z92-buAI$Q#Hz7(Hj+ZE~I4-*hQZiX^OTJC>t#*X#!S?)yDg{?5;2`*nEHV>jF-~gIzxs8Xt@kqOX%g7@0U`Cw=2dW7- z$S8;#4kN`6#0|)vf~T(O5Kpb%F3pjhf9Gn4i6KzBa22q#8q=V2IX>txYCX%`ZoLT(_>Wsya+ME$;hoBub|-A4I1W!41M!5gGJ{5 zn7m}agG|%^Mgd0|NOy)oN;$?7TQnYd0egveJn+epR z6;Y4^g|yxwTTl$;9+HrO)@vtJvINqV*kC1_oq~;H!=lX#54kge6uvs5F0tWAE2!t0 zhEN3ZV8f!xua3x0_CCbLwEf8DprbL&Og~yCZ#YrG^l;x~t&_UUvs&6FN1eA;{V@Rv>EUnINWXrzZ2B^M`xw zI(gw)54P{14uuJ*SwH=J1S2bZ2dHAZ*fLq*oFCJxhm#Y|6*JwrJ^AW6U8WNcC$pS? z3w58^Bar*lpzh;kR)9Kh!3A}u?#{_4FI)#HTy#+rM4i1T2cq6y%m-2a*JUSHUjmEx zUss=e=#mIX^u{GKrq}l;3taxq)Z00k_lhagyw1t8H{&KRx+29iW%uNLSE4}5=3KL# z9DX%HY(eKVl)8sofejRs{@2x+Hh}agO_sRU1ko^8Y!gT^C(Nx33P_6gffUD0zII)T z>E8XxpRRvmI{;G5GFkA7A=7b?WCh#2-JqzR?08#r@`jtY*iM2JaDWwD0Vxol+;hi$ zvf^zirtaO7ZEr)JIall&NF6IyTknDOZSJ{~&d3g$J2GQpn0)50>}2tK3ZO`{yjR0E zcM*KF=`1*6r(fV=WZlele=Z|CDDi-_G(3>oeCB~Y6D0l1g3`apV>PDbyC>T`mK6dg zbXiD3=TzVW1yA;4TQg`1U!jhY!i5z0VeKw78CeBhP};xu*qTWUoX>c`s}F?TSR5P~ z6v3A2KU3j@Wjd%(;xiGZ?MEh;KeGTu!p3Ld47Tx^2phzr$s3=EOqP4j4U*J;-okX{ z_vDSwFEQ=-Jh}D7PLQC(%Q<3uCxeIZAX$KyLqT0Z5afwZFVz_jPnLQW0#aA-YLeJV z(7+utETGw8>LgyPGo8OZ+41!UP?#FO$z?jfXnG(Qqdwb9P}oX?I>b!vi$R$N9JZ6y z-UhPu@97e7WC!P`nR_;`ecQ+c(roa-oo(TgP60;^&|m`78OVg`CGdnPw=%cmrGpIu zj_aFRCNqC51-a_rSLw-9KNf+Qd|%Yr`oIezCQtk$J308%c91AYf%@mCASOt`oFz>H zj!cs${!y45{Y4&RW6c*$5Vib^JJX?6lRtdXXKGjtoO0tw)bH+0zgA9O_g$Z9*DA0f2UbB1VVi9H!wY13oo(I9P65Y*llTAAVfwTZZ03)Za5I1Zf|@D4 zIO#XceN5~BP44~U4D!{bKVWnI{&8npwFYj^yER~QzN~?pbLOuvQXNTWW;{Gk8tZ2h4Ass=bBcR|)Cy!@X5^4h_# z(o^#o3z!-fO?|+q&a`azbWTRbD@=bjO~22`=+1U3)T)wA}rI_ zFfj^&lpbJWbZ2`CT5DhjP7%u>VXy`q2Gcv38THtf9_SKqJO?Unr(a@bYys(tU}1D; z`nG2JLKa2?rk!iSrte=1_O8Qx*6nJnj1O2q>NvO<-IOK-ox z#mLLZbnedd``nCa&_O3*1vbYYpSK6{Ft#$Xodcy`d62i+K7g2@(IBbmT6~P&Y(1c{ zA2qPlRuB_B47B|yAEPEC$kGq|jP6WVmQ4O%37OGvfJ|DofhVm%zGr2dV4CGPZ};}Y z0*o`5nBF|y9xKeq#>n*j>Go6+5Totc_FPdAqxaeNQZW!?=Ckd!;vmM#XWLsP80{Fv zR^9FraMVOBBYE?*OTdwPdY}xW0@JnIpiu+K=^|2$uZ7SRet$Y$P?}K%t|&kni=wt? z(-{?z6!pk3zQ(Y!_gR;KBM+*TCuFfGoC#7WfU1y19*ear(X4e)V7$jP?ap)sMaFMT zH*as3P-3)UWZH0RdYCe!A=9f{U~g_;q|Eq)nQ6hz=^g5fhC=smc7d9P@bxvj*H1sK z&Zxrl_~!PH>Wl`A?9V_74lqpbQD@W@T6qh*>a`HFr!UiF{2=xgq{ti*){T1_1RPl* z{#gzQ>+R3A7|$>O-(}Q2r1xy$fnGSBB zZfwHnUcc%}lYpYABa5OC4BFWe15Q5Kv9&3o7-{! zJ`l?hq=Jjvan1=ikCWRGRKh5Vg5@~49YKYlq9_kLx8t9OtsvcO+>UIDLXO*Zfq3Bd z8jJ4qpC*jj>>bCN1sosUo~~`mXbQ@~Nv4eI8rwk)J5kWex-H+pwt(Eo#O=7~51hxy z?Rf6a^o^#B=1dKXroS>}R0GjMW{gun^a?XZ8xZ}>j4_byHfV|2K~VPH9&FCIj0coO zemgUIFdYXint?1ddk?NWwoi6p+``QX8h2C_aNN=|Js^NlnGeKuWD|%3g~67V>5T!5 z>P%Z&rmqTM3p?fYI0n7(GE&-Y188q7-grQjA!fw zxhN!oF^7?9>!s<35*ZB{UxCJfg*iYoLW+(p*QWC)G5%rt@p(IMGNUdVC`@cJ8Qs}d zfm)&ZppuXcGQZyDvKViGOPG7vjP6XQmcvV!Z;*vtf4~d5wlB+J{K5<} zaZUlFJKGP??70%iM5eir0&p>?0Gysu$e6@-V<&7Fefz^g#z#!jJ&UG;Mif~b96J_G z2d(l{WKm#NU{GKbxHf(LV^PiN?4^v3OcNGOmwh6tG(D%3QHpyaM2iBeK-=_*rHrzC zZ~;fgGJ&?~6P}35O}|yjC?$v_kp+>v^+Z$_*?{f3WsDk(pa2gqXEbA)Gj)4^Iin?G z{Xx*82PT1SOpc78C3B35U^h7~?EDUKaq{9qT2^IonG)EQ`p)G5`(`m>W zAMR)ctzUo&IkG^l0Odc&2!z$FP;pRhbcEaE$OaV#WlJPscBn8YeP!&w$GcEziX!I~j@i9)>?mUZ8mX&G7#_dV-8QGYbI~t}<&tAkR z&3vX|+Vsvvj8#m_uTN)P%xJ`P2{Qb?-DfdlFF5nuUc%_gw0!w=rKODeOox_F4_?Y> z#B>6bb-=FRoxXf2qb&cAJ4lP1Kirvqc`4&|{+7E)f`9I8pSg@NfRSmaXYdqGCTe{-3;QHLU;!tCYx}BvVmjwiUyFJF@(1oRPiwJ7;)Ewtv$6BtkIAg zlqei`?*dz70O9pOoTLxY`1TrDr5=PgZ%HG_P+d^X#O%28Bv?)d!u!?>HbtA;kwcN$ zal<*VoEAi`=>b?yliRT#Mc|7dVKaw;-A9=-(jwK|0NA7Z^4#4#Ihg5^{p)~|dD zHbwrGL+Wjgf;W zU9lM)Y_i;r%`d@L%Rt0$UI6o?A-oqm!4V<_G2s{_;3c^oSM6v6g^vWpf%_+b9U%_U zvv?WEgnAw^h-BM7uo0pV$#-kO)`~z3oCtBfFvQo}c7u%(g798L94`pbw;K|#0uX&K z`oJprA#!IRa(ocE4-mO}UWhTj)_@Zd55zI+Ad=h=r6+cPjp2gu7C;=s3E{mz0CqM9 z#EOTlAU}cHETH7L`6f6d!R;1i$0kUsVujdn17ZV<5w|0sBD3Q>NNh1fc)uZ5fX5G+ z9p69RDt4!R zy6rATHKq-H(^Gdb%Jb|w0Vy|G9XIrCpRkLOjR_KDn$r*LVQgmV zFQYn$GiL{*GKfC7gHd+-*S(DE7@1ygpT2ZIqXW~FL(`w{XAG&Ia;O>9u-nNBYIiV7 zIWj7M#^;zEH-m;>nFKJzPk|OsvSW&G0F5ZHV2V$D0&a(3m~(m;ctw{mhQ!Wu)6X7Y zbZ6W)UGyNM3Db@J+r1Amo@F-p{HGnXDhHyT0pw7~EYc&8I2+Vr1}RVyS735nb91`K zQN|ij`|8+H#y8-4bjxYRnM|PC#{V4SHbxMG{Q@H^Bmb4YiJ*3%892S4>6Z>~DhIJ1v@b=GnMW34=qFHx$IJt+1ehE_JxE1n9&jbVO zJ5&->yfQgnJXa424ju`JF+U(m#UYZ%AU+g>@b-XeN@gBWh(o7>ss&~q5s1FGtzbV1 zLwJ8qw1Ipm1PO~iP~Vf8M-XDlH;6;Q6#oc7l2Apjw;B zvEfX!K#(Fc4;w`3ZHN`%nt;jiAjH2ckl_At59~u`h=rFxRXQ_hB8gRz$??Giuz$f7 z0h8m6Q`7I>V3cFtez;}&#k-7>(;L<^T7lYBoYSLjGV0g=+zqZm)j(cmc6rKpYAtZGVRqaOscaQMQOxYP=qFgE zJVfPYNG&M`iVbGR`}GhDWg(JXN5KsN8E(f8P{L;dr5QFwX2)p|%b*R1_7-quD#`8m z{1Dh^35a1W5Iy3cuw{0f1kob~ar^;@wW8dPyFu~D1WGEPHdXz{-C!exAr5%@1sobe z5KZ?XwY?xDY!U3Cg6#q) z6K2Qe?acy;^-Mg_2E(0RaH9a)V0g0ttcio$kynw~amHM*Ms{vT9z|xy4>Q0#XoKN4 zD7Ari3xe9hk03l2ZpZ5_;6^7i#1Z?hflX)Pc3f}=teX*HGSjLh)7x(|27^{}O?)mY zI{m;MMwaPcZ!;Q#L^SR&%F2U>{uu?T6__0rvQ!wj1wdONL2H1f3n(xOY?+>NhtU0ul@~c{F~MvI1CPO zPEa;xc0Bb9>~3fiYU4ey96Kb5&E5qLEjDh)8GFF0SV3Ewm>pkl2d5MkhzWZk(ZUSj zJ=qD?!vrw_oN1Uq^RJ*L8Ms}!z2zQbD5DnG`^=6Hn6i}E1*#pv{Y+37QGrR?5lpj5 zE3ttW!Axgo%J)4058q5b|A4U>6gVLd8C96(U!Pv}kTD-L zJp17VqYl&Z-P0u>F{Xj|jgKJwkIxvz!82CV1wbpbn6s2P1*#Q5TRs~W zO@H!;aVyO76HIW+WnuE0A2X^juAP44F{3f#mgziC7>yZsOm}#~XaTYbX0zN&Mg>u* z4{k7JDRCgozx#x-9^~k_r;K7Cy6h>V3e(Zs)8{;8>_;{t`Wc4f;YMCy%2MJ&7^(K0 z@i@q+_s=0lvcF(dVOq6oy6y``Bc^q`rYC~vO}nN~1<~7fO+Wd9Q5D%LkZU2yli3l? zwE_wtr!yoIr-(KF{AqYC5h=?C9I^dXlZ#58js#Kebsx$US#*Ae}K5>`~ya# z>F+);27>Hy{K%-n_ut@C)O7M$wzA zJ3(u#E`z8O42~VEK`oN)N53*EFbmxPP0TYeSTisv@H;YPIo|5q{{07|DHGGO8`JfE zGa50Sgsfr(4P)?bZ~D!s!7TXUGkCB-fdSO^=5YM-dHU{ujM7~5!L^+uiz3T(#vm!N z>2K~ba!mjCkC8VBG{^<&04)LKTxJk!Hz;>9JF+UWfGWUi+rTv=D`;&5ivp9tMOH;- z9u{uL?~uj|Gq>X^P(KZ#o{8JB`~LLU|BPxvcR0BDLFJQxBe+c{aF=6xfenlC^b`LX zO&NDj|M#C!no)786q6jI;#5l}4KC0+P+f+@9E$Q&vzauQuH2bAm1!1-jRLaV1g`+2hHtsE3i8< zmt-liD{xI0XJZOt`n!92HXDWV6A2nLSS# zvK+ZUjz%$iK`O-T?bDyLGX)88IdWuytULe}-t}#|HwTj!Blq-a984OF+|&1SFiDDl zx^XRx;JxcS3QQVI4UCE$)8B9~DLZjFa%Jfv88agzBg8T`cc_j2paEGnMG41WpphWS>D*jQ^3sbzqp+Zshyt6m zq9mwtVN+nzWjMpA$UfbXi%GH`JiGx49Q#5(H8wWTDGiJ;z|PLW!N|?X$qBLybPNXQ zU5HylzF!Bh2`ie6cvlN&eFEC^&a47I7$Oyb-Vn( zxC9y*K_WVeTp&tAk&mffK?r0ZvnPuKq**BB$O18E`gLw5jYL^9rUj{>pmBs&ZwsC> zWGP93nl%ctW=scC6__0lfRZwwKoxYU$8-S&xcr4wM@B_d+3D4SOkC5Qc$tJ)z$SpjxcHg4 zrq}Z_iKu{0V0HvK57vNS26=KtydpP>lT*{2; z0*ZGpfV|5N_AW>+1>_Y_sURj$r68#w0dlB-0{iqvKBhF7!{72TnX^IVwD_6Koh6wZ zKq-3(lOu;aICV;7DM&)1NJdctKj4K7O!-51FyP)87WK@&{1sl#3uttE% zjZt#?em*8uPDEl;6l78tyRs0xGZ?xm1+-lg9OR0E(DWeo6r_L=wj6~Kmb}0UK#8ON zB}f4aLIJY|(~5YIgEW{H#4AdG)4GBrsQd#n(Y-GPPUFxd32Huq5<93xtH7)PN)Vv* z4@x7BLda@MtdW|3xr_4u?+k_BqgXaxq#{(c`it=rX8bAx* z;QoiWo&%I-7Q`#^Ot%tda%1F~-YCo@BExOQ1UlCPe6k9=qfC|p2dH*ocNED|ket3z zm?@f(dpfTOla8Q0Nc$W{MF$W)gHh3Ox|ax(Gb8u(J`pB!9&SfYZ_x6+eJl#x(=UoJ zRmLkRFgc!O&(dXJP;>$%imssas=(xU9xM!AuOtbQI>*k- z2ug1ZijoR40xLk%bFKqD;1o^3$J-GNmzcPxljJl2uf4Ra6IU zi{mU*a#NI2U~;?(H5Fvy4R%GT={;ghj~K0{=ZiCmGb&AQ7iUsrRg&gbbf3OfoJm|t z$(38t8sxnQB`(JajE>BX3?N>NlJxX@;!I9VOCC;Fl3+4my6|Uuhy+s@x4a_VTT2B{~Wr||zxxc+gmMM^t zY3jG>N9CAYc^7_b7BFKvA*;ab*!yw1xIB{*Xf)Yco~Z&vZ^<6z}hU;~}dGF@MV$rM?JA1VX!fdcpR87fSA$O@*vR$l}U>6{q!zX6a$Tzj=Y&Jq{d{9q?TuTi5imz6(gDDHVlnT@+JaLdAOUZcpX>}$`7Bn*=z0bUXv`m;R6nGsSvK_Cyfj4V;9R;!! zj6nOUm>3jzK}|wq2nW=p`@!PKoSlO(wm1E(JC- zCK&}T1y(aA4h1fST2NDyOM%6VNkReKuw5Wx&cvg@rNC>(G=oKf3*0Ia1{ES)j%OIM z6hy&H1u-)w4+SpABMeyza$pfhhinA{FiXMEjLAlU3smGVJFXA`Whsa_qb3tWy*bkf zCXlI2SxyXKs~xW}WH~Z8+Wg@LwMZv0W+_QK-gwg`;J5>_8iNZod;r=>!=S(g(sBc& zoxzIX3&?R3-a*6b2}tw}QM7>LIo|m zc4QFvz{unPN}yX(mDs?|`vu@;Em+BA27xz>h?UbYtqNR@Q^0)&$IA>^0&kI2a5=6) z(ft`k?gw+0qM4}UfiOijW(Nf}aDxWYQcz%XTySb4=iK}fC-eE6b%%Z z9PdDwhSLRfnF<({r#I>{sY-%6SfGRlI=~AQpy17miu?*()A#E#?E)3J)p|@>jQny6 zOpZ%fvJ{vc7qDb0N>Bfy&y>QbJU!BYNrl4*oQo8Vr?(m~X|RG#oPN=UNqD-9Art5H za|TSjj2_ePV9IGRdQNvYWHLk3!cnhMr0Au29CdW5yS&H7^ zjvA|?G>HDgqR8&Zps2v?pdiiS01|0oRWtz=yZ0GD-IE_z7_t=DHJBKbOcmH2SwJD7 zWF~Nh5!8KN6A$XtGJ~2kO6Ce?phkeXf~gs(3#Q7f!L%e^K|#SJOCK^aq2S1r1v>V@ zkt0iyT|wHBFYi$+Y@oC=@{_AtAm$#hp^ro?(vPzeUEdKJwSm>hpW znC1#hn#>1S6rxVj_tC z%dVINqW`fgCQt7-VX78P0dY5QD5iqwjU0+;)1^(Bq_xu(m>egtWq}G^aPPk18bg*+ zh60o03?#u0kYMKYL{lbLsVtEGPwa}>Ao>-%Vh$)XTxG~o%AJ0|lu1M^Pl3sC2a?_! zAi4bMuS}Vg7z?KJnlVXo7J{^1;Q3$K*c$;V+)9FJpGm#ld_~7Sl;o& zb%rcREx_#f2E;a;E@sZ;p;Yb2UTDSeKU#rV!Nifd(2C(7h$-&KSZKxY7sOPU-eAt; z&T9uTdCpCSECrkCSIn7|Il0W37DOsAE7(r6U{b7425XrYqrj|?1ZK_!F%!YeIUr^N zm^mB7j8`ysWN3W(t@& z1;nh60kbE8*wJ9-L=ZCy%$xvXMuM6BAZ7%Z*#}~VgPFY`W)7Iy17c=_ncX0!7ns=v zVupd4ogii?nArhhih!BZ?587-!(S~3~eOM!(NL9)tVWgmHORNJiOTo;w zF;WW53fy4!8j!Fhn7JCnESdh%l1YKb3M{f5BqBLo){4oRCkQOE03;GPJ;jPi$+52U4`5H|zP{Q=@;PS3Vx3Qx#|i%z)Bkfo3Z=XQX&`7o}6y&2O2 zP)aQZTmBeixgwbP2*gyiW_$pmnKhX&gqkxqL@O{mE&&-JJ6*|!$%qFMu+KqC3a2O7 zFy$a>2~$33zXO!^Srz1`i`X)yig=naH83f#I{snIQqY6Qcx>;rWzu0{adA~}oxaze z$w}JLjA;!>(FUe01qX;CXHc)oPr+%rlmpW>M(gPx9hfv3Ri;ZjGJO#`0BeS5D6l#j zWI3LAw_VYRDTL9DTbF@BiP!NzBWMW-w}J>LGjl7jSTQgv@PPLF^YAdSGC3%4>oPDZ zu`94R{%0)o2W{gA4_GL`G)&*^%(PaMTb~hZ)_-QGVb+YG?LREmj7+G8O`qt(B+kLD zAOcn}eS-_rS`ltg?Z)d^57i3t68H2%S0)ijZVe_DC3atATUkK09QXElu1ubcjNH>- zxG|+oUtPi>H9f=_@HsMODe)*^Y1IAkb;yr-zW`KA%-gOB$UU)bChA-1s{vR-@3vmj}3X0S3_%R)0S}|$*5`QK& zo;~kDhkdeu>Yw-1ulX~{Gj04l{kuQYE~X9dr*95m3Sv9BzCpn8&~)xVCR0YY>5hR+ zu}nMOPhSwo6wAo9{a+x{8YZSKzqhXmVcNmO|DqFm-UDdz;6dl~;&7(LOfSw%mxy4p z6?kxFBKR-}rZ-FqOpaIXY)_3~;$UQ&zioSdBoj9W(~h3)HHl2zj7$gKZ|_WEy2%Jy z&XkhE^pc5j%5=YUrV_O&JEsY-Dlj=RS}`;+JI>fSO@LdM0W^ZZ%Ll4_nX;6a9e4Ch zf052~QvhA<^otoxwM?^iP7larn#wq3`m;=?gN#$QugGGuU;(Xad6dWG!HB8Ls(|Sr zrYLtI(H2j{s!W&OPYpui%n#HzsT$OvM9w$M*EY+?eho{wx|ngFu@brX{{$bOAxCTT{dmb2SkTbLXe z8ShPRYh^me-!Tc=Cjsq8ZJ4w@xsB--XlKxL`wpfBAngx3nEr!k`wk|h=`3ALbs)~e z4koGT^SYRngjV!GVwDM`Um(kIL(le0T}+ycOcy$)|LtZnLnr{b9HPLrhslGH@#gft zUM6j1c@{^A{OMk%P)4Sm9ov=qnVv8)ifm_}#PpGs@xk_=)0w7&62*2BGb#~FfC)`pDs6-$)C}Bd){28*NluJ+oR?)X)`iS*g3s@ z0n>6u|LOV*nUc6QKrLx@P#<1o`<#VLX^f!N#4L-Mj`M&RXV){mV`ciVa{Jz`Oj;a_ zbEd!8%akArTFq_7G=W!v$&uNNX$Oez-ZnjUA5*g!cu1Pbal-^qIu)o^;8oysY*;k? z+difmX(Tz=a5V!=LGOMhM^FmAu%AhpkrkXuCO0aJP3Jqv!~t5fFL97bh3V{q>2?R1 z+?c*CnBI1f$%yIqg6X>sGRcX9m4gx$rvfwLNMq*7jmqNF*$y#DGyPpKUHK4`5!2#@ z)5AgZiiOj=4>8>U^^j5zGi7r!%{evw(ix`xOz-ARpL~|dgz3|~=_k)Jg)q*TE_aSe zN@|LT0+Rx3mg9MzECppyM?j!(j%VbS#a=a@8@zRjD? zd7ep2{O0dw0niwt6~n|yObVdo%PeJ%H-As}I?og&@^fA%Xb~OsAQfgu@Cg5g^Gupz ze^F%=1Qa+xf%yJBlRi`9{Ot-Cm>x3mcg#di@H3|iTxK#igNFn&ID0|X|AEq$0+Ztf zo-8w_hKWoHY@ihZpmA*`$A*baSpwD56PuV~c#tKxKfTP904_4zZ!qyAWn0j^#-B~o zQ*S{s75i;S9@W3iBoFd^z-^{}5dH8r6L_b~!`n=<({1iRBp=>p5}96jhshQcp&RZn zy#l4MId_?S_*9TaADA5fFl0Gy*a06+Q!6ObZ@==E=^6{u^TpfOd}8{_&ENQ~1$18JZ5B}1>kjkuiSL*srl0u7 z6wmbH+H}qTOq-dmew=>qKhspEom;n-GAA)IUYdS`k@*_a+?CTeFflI&&8tT;Gdudg zY=@3XA&;5}Il^7TpdbXA+(z<)xZ{eC(4ia9j4lVrKii)%GcRXkTsggum3fu;e;y@H z$Qlq<1rEpK;4ZMBK*Mx@Hs)08MqVUI1rEm(3|ZjeDh|gPELo2C=YSJZ6GD+AW0nGk z;||6wf#&J&*_dVJA7dy{;6hfU&CV>#@f@UxNuX(Z06Vj2as$FR1rE^Qs{)Gxs{oQE z5K+gkP+iE%HJCgUS>WCf5hUprd%vutyaKo9ok&T_6#mU^t*fRYCC$k)5%XEG&W<$o7>5g2?R^n|4EpQ!7 z0u3NZVg4CR3LK6zn6e!IF(|MJw1EYA7k~s8Fl9O3V-NrvB0T*$7c;M%BLm3i*-ES+ z%2Qg2#gP%tf$E(u&CSd$-h#zSA%TYJw%p8FOzZllujFPnXKa}MkegXl9uXp-F?D9} zx$!GNLAHV=%dz3)c2OQ?9!925>!vI7G8;1eTsJ+8m)V%<-@57Dyv))T?Qome6gU(Z z&6pN|v@T%Da%@@;P7AuQ^ungV3yJ3+Oj!c$)9>*zOER^spZ=4V*@S7$j_LY*%o5hP zP20Sq3v@;Rl4;;#kTF|rqAVLR%H6KWBLI;W^=|CkROGaL4N1sXI5rx zn{LFD~Oy9ZVnAO&1klwqk6U9w2}c$4sbkJV}7LkZE1tc2+^= zDeTP0np&pMkziKmSq<6~zy_K^Tz7r?c?sqR%QCJak_{UvnA8UP1F5B^wUk#yFm1m&C`!cF`F{2**yIZ zh(5Dhw zdj~r9jTfSRo-#8h+aHjC5JX^uJToU#|C#B#<(bWxmYA;!kdJ4>@OgGO= zFH~VxWP5t1OTbYWqG5poGbhu}Gt*at6ihrj{Sk;>b#}U>BC{#e;j%B;wC{VZs=C`32dt2fV1->JwvmFdpe=>ba2ifk`I3PeB(M5p&CF>|o}0||(M z1VpBfP9I`POq0(~UkOsR{`~X@Ao|St>EbHPrc5u-gLEk}{X7qHts-0Ng)RX{QHVyc_hw(1 zz8$1+^M&c}K=hdl(^XZOO_`ov0BL}F@8boC_kMzOGeUHOz4!aV^rNcG9!&o(Oy^f) zR%GhEINeB%*^Fty#pzjU%$7`BEEv+Scx?K1b>;x3zPr;o zHJH_{jxTHwP-Fqke6uRDI`$rG2JyhFeOVoQPl0(%pw%0!jz2-`S3xTs96>5C-kP4J z!Mu`f{lP8)M>&Y+Kw-*$4kU1z0U`oUJhKi>kI`hdWV&!@`g9Qe?a=f~n#`t56An-3 z)B@#{!_yr=^tHp&%R%(_!_(J+=;=qMKLybTk4%@*W;SK|cw~Aoh@NzGdY3k{CDY-f zpwLj6{!*J+l>IyCOzAJbr?cuX>oFZWI^9%<*^=oWScL`Cw_Vd$=`iavojErBwhps8 z)AD1}S#+7Z7TU=1h*9 zphXKGKugU*Y!+_EKc~TJxIoKdZtMU%f&&!!Z$V3km>fAkh1`Yv(+}x0>w_9spY@rQ z75@Bf2kmoDQ(}R%8bB9#2sko2&X^BgYx{aSV+X%5r-TBtqePbDnt$7a44AJ8f*M>} zPRzW_+;b3n2)8@AGApxjzc@8f0Jf{Jz?)eYq;|eH^Cm8)rJ~c@EtyTXcZ4xdWM)|| z%AGO2z*|6cyHOOgJ&QD`^XAA1IavpMI|Yv-s{*Kxbo%G^o;YT1My69Qrq7IL)&@=L z9*bwr|q4biw6+6PTHp&fVGmKZ$uMBmWM>dN&5g4bP@; zPGPP91%+-Zb1R5GmC3BkxM=!~ROVg~H$9D60YrB~>D6h>ptCZj%cnCdiY){kE6ayi zfxr#vqR6K+OHD6GXXX&w1X93=P{0OheuEW!$YADR+ID#QrgUb1kPXZk%!(jdErYoX zM9<4$R$}}({a^;OFqA7b9jvEq(ez&#%t0VQzf9&T5Dj*8-}HBx%&AOw`nHE=F}E=> zuAB}FeC}N4y)4`GI4S;voL_hSNPp{Srm92Rkz1=Gk0<_>TG{8g;|i1QF{BYsm!)ajB}=2%wTS2 zoI8Eb4CXe*dE1?5GVf(}VK!qrAObqe&x&D>hysg(s5$cn5dVV+Xl|Xwaf3*fz$7LG z7F~ufBCfokCKF?pB8LJeXb6Z?fekdf!ZzJyF7rV~=ILznn9UiPx7*EQ7Utu;%;3-B z0NQafz0il-_I=1$T6M&0JA(J=X8A##XsF2M5#{C2UFYo z4>0pGGD=UMdyrXu!a*aSh^q^9pa#O%VTw(T(UY(}Au8PF~Zivk1aQlJSlrmsE1EQ3XS z`|Tsl3z+m)fKHcZ0o~%`$;ec%%-|@Ht;D3l;Ao-93|f7{g#?{l`9cNy~_-gyq6U;`;j4IPloMzT#T)q9CwTwxUSUyiPJeol z`7RIR8t|1p+h5&dj$;hE0bSe4bc0QS&G7+Smg56NE@X3j0+ap#T4?@*J2Dq|E7b2e1sy)y1Clwyk>z** zCc>b==6Hr9%kc!Vz#Gsuy65no6`SJ|jx5I(WPv|0ciaJ)_=O|OamsVh^()}%9yZ4X zoX|OOHqf;6^zMhu>ih?u!{^l59Ctk5e()i)3ZwppQ{W9>pf(CsoEdbsCd`}68VcaCq8kviA;w)eH9h+= zvo*_;=UoER8y_)iO~3M(S%GQF%kAGDGcRKjnehth-wU9VvTtx^IWBlLeZwu>EE6+Z)Q9^ebozQ1=9yG1!q7*F;_UV z9A7|e1KGt4>mh@p{su_F15U6V|6eex@P9#8_~zAgqnFI?4h^rtZiVRn0WokWHBbQ?&r3Zv~9E2gEzVo#nUz zS@9N_^c9fw9qug0J;>58xU(EjAPXFTseiz&zzj-JKe!c`r&qsXR<6H-tnLg<-5Zd( zH(?Sw?vi1g&EI1rH-b`0{%d99e1-3+I35NoQBPg%V zc{4roEpso^sTb2hl1$%56m*tr@Ut_0C5!G zGpkM4`M_)l;>3So?g7znKQQ|+oqDm|`Xh4zxT&!A6LStoLi#iFQ4qc1E3@8ofiKK4 zAdbT~W|{3>Uzk4^f4M_}tTE^uTy-Z%r!(~#tKf+NfE0&Msml!QQM)}KHY*a0e!kOk&&WI4`2 zHe(7$mg59u6$?1B92Ym>pMeWI3)t)&mOD4G<*|6_* zR!0_fR;DX=wr8=jFv&8_-!?r@iNy{y0KZO&C6ICZHf5IoT%h&@qaI5mh;GqiNn>2Q z{k0y81P5rtDyun*EO-m6iY3c_CQ-zoq__gJ0*fPLNb-j@OAdF-SwtH!--e}var^Wa zHZ0DJfzx$tStLMax!JOaGBWyakFjI1X4IBYVgfDQ05>uPdYM717a0Wlm=)L^bwEov zB(jv41o}Z-o2==F>{(ucOjzf@BErPDX8K_#79H-MWzz&4x7-3(Mfo-?CfoI#S%MjP z*TDCkiwG>A-si#+1R6JSVVTLh5iY|d(6GJTm1P@vm#)1xiwiSoa;4ReC6I|}&eZ8Q z0$84a1oj28#4~a(1YOg?>GpCiADKKg>J18+LFgw|| zJ2gyseTUhJL14;up->ieMzI-xrh;d5m_gG$6JVUlj+-Q>D?E~Cot_)U(#|+#`qwa) z6O2=)?+a%+z&K@leFTdQcqDyqBukeN)7f9!J#$!=Fq$s-I~B6-Y6|Ffw>gYijw}97 zg>4yTafB|cQsOmdS^!e8f)TW|YP(V%%TIRxGwO9gUIQ&p18wMGbu@uijjWC~aMlq92-A^)hlyK(ar)IV7B$9o)BlvQ$TO~+E>q4T z%cwBjs+=W-QDyr4a+YU|XQsDRuq3cgV`O5gXRMq4x`IX1QAL3bv>uN^flYx0Jo3e& zz@o{-q6n&??X4Jm6xb9v%$YzuurJsgJDIE*{1n&}7(jR5nK8*s_p4;d-~i7jFbb@h zzORyHE91uLB~>hbprQDQ&qbYOeokr=fUW=n6|wwT3QSp!iV95Gpz$B&YL*hl+0$oK zv&2phs9`Z-oIPEuhDB}qY6!cvhDD8W!Suiy7H!Uj9NeJY;R3U#Kd51`V=SL8U&|sU z9N@~!0CEk75`$yCn*xL50fy=BwJb)AN2gcSvZyewn?ApmrHFCqbi*uG$?e*8EISw( z*KU7O&oT*|Dk>UTDp^5U^>Z6b4~TATXPL)+T9Apio(XihU;|7p9(bq)mvf!WNUl{J4D!I_@jjHv@Oslg7OyJvFjV9IiQJaf8F7mI@Y zf$nJn0&|#^m?520&|D};`tOM@0Y@f*wCMufERxfgbg_ssa!lXV#p1%qG5voRO9IpE z+0zrdSrk+z%<2SP3$qYz0y8(r=H1}p#_sK9E- z;G@LfroiuHA1E!b1nvSR1y&6v0mpjKb_OP}6Z&U$fNonsb)@47Yo;v69iTyRW&wm0 z$N?N?Og}(0ry0`+bccKZJLJdot8WK3)m;G3dDM z4(2RJSggTflTm>Qw5oOo$O<+yrVY%33Y^e2*i4Q)n6n&b&z?T7m&LsP093^hkRx`0 zEk3}UC9oK71E}Ywz-PvE0;GV$is1%G1EU$!1rQ(N>J4CRC&1(d+^$B%6C*sH;vGOU z{>)j9CuesEII<(h255C9$fX$Z1oAVo#W%n+DNF)O;cf=00BQYuV*0#37O8qJM}aIQ zR_Jbzm_Q8x zZqQ<9Hqc5OCI$s=&@yOdD2GXtsRVRqsRmPxBC8_guYiT;@zmK-~@SqGuKbez39AJNj9oc|f`P0YjF+8fFFd>F@hlB0!UIo)cIsMEyXU zL3kY5tr+}3>rq)iVf0}7vFI(Fz7 zlq*P%9prjO1!jTu+heD&7;-Wmn%+N`4EcDLYW$7O!mEgFBB};)(;5U;ZE9i_kN0BTg z7Eoo(3|e*0p}?%bs>#G*&Ljh3X)u9SqBDUG0Rs)x2`pfCWGrNHaO75GcYHjvQvfvb z$L+`tD*FVK*rcJNki5?(Ce5wL25Q2YF=>F7LOW_?DX=N9PfuLJBFZX|B=7<^!>~ z{2duf6gU;Q+(1i+`Iwj(8Mr~aIKb|cP+)UnRA67*X3ngjz~(5CrNHK>kR|YfU5OoB z1G<9u17T5ahob%^Og$(kv4P@~L4!#^fx*!PbS)MKDC2<^$Oyb*2YEq5iG6z6au${P z*Klp%!PWwfQ;Y-%@cxF6@e={fw%06?4W&y4%sRU3hdmI%2{~ond=y98Ecdo z94Dw{E3v9DD6lFpYceq?37Rv{U{qiQZIlHEy%HOEg$jp);BqA{fd(c8Z&qD~3yh9} zkPOSJ!L)=?iEAxL7qv72T>|f>vz(9wZ6~rgA*sg5Jw5(}q!iQMNz;F=V9}O-&#uHG z@PSgaTbl`P_nAE!sGWRa1Yc)S&K6&a!~76LWs6qp1)PVZaE zQp@;hI?pN=BgW6u?N_mwGk%#~zKX?>@$2-Rt5`gkzOhelJS8c^bbao0#nmj*Og&4c z+pT7?Vca#ndNqp!fGFIyR_HpRs_HvKAA*&iwjtd2iart__3QIa}+8qH2Y zsIR_H_gc#$2eL40ElUXF&*}TuvY7B6Vu#kypfeFKT$s+Zj-`?5JNxv%XC)P;g-fr(e7wDK@?4oFpgHq&d?C*0V_RU$`(0)VTmf z5?IB$bCU9m2d77@XOUw(IQ`8zNqJC6@t&6qV*15CJ@LGx?sUlwEJBRmr|WEBN#*8p zWK`q;El5=0nl5-jQkwDLblnS*N{rK|2V9T@4bB!^kd&G(x{-xnj05Z|R>ukKC{9{` zK~iga%0?DPkRN0&O3F?@vXRA&@znH&i<0_`zo&1wC@IghV9xZIO)QFx?9=N&)Su}K zHnBu9?wP!RhZWN$RnHy`gwnQiJi-bg#>ja?`(VX36IH3%VU` z0cVy1o4}#z_b*FIO>f%*vD4tbq}cTPTUd0NR?eBuw-s!Q!B!R}&NfgmatkbEo_^+v zq#VTDcUQo{Gj}UXg~l&-MP?ozZcyejV_E{ z;MDnl8%u|CC~^8Nmfl}0dwXVpzac@;|%UBfhG<`W*#n( zqZV+3wwiNI|0v8UI=#S^on^ZD4i*bgPA=TRqQ>;4b^6R5EWM1))75vfC`)4`Z2@p1 zU=sMw4oZuPptRkwlO=-j=k$j=StJ=brvKi_BE=5N9x}UFTsgTw;kblbkz;z+E*2?9 z&gma+O3Imo;!}fZ1+OB=Q8Rc!Ar8+NT+j@{>L_l-@PJo=6||Rh3y9{Perp#?kj#Q5 z(*y)sI22g*87J^5F@aiNy!;@SIWAZ--D5Y44=61%-j-CHzI``~k{%>KAvHSr6hNIW zR?xzmLY8_3W>y7OM*%B_2Yd>w3Y^mmZ%f)UvQKy4!y;z_PeTw}z%>XjACm)EDubdpt=pz!22-${a%(J#%N zv~wte1|b-&7+OFHj?s*%K~RBJL1g;7yOLt|9UOZF-m-#*3|K)s=V0b{a)8pNKo-b& z2L)Epx+Y^r27xXPP*arE@rOW`f{=osK=<_9`&jH4d!|e6XVH{;b+i?9sVbth2NmZsv$ER=H&mzwBk$t+seM!aXulKXKiMDZAGu{BX?}8vG5#JEZ z68OYEea(GIb&+-s3#Jz!sRv-G7lK&=pQkq-V9^li;IL%=0h0LumiZx=CGchX-UBS! zjGfc{A4sY*ew{9OkVTYJ$eeirs2~@du78k4hVk2UpMxxlpsFMPAd7;Gkb)rCGuuQI zLB2j9l;wE;SSx6!^Frq7{0}9i*_YmJ5O6$wbNbnXEZWj1L2{f($%hx_OZMrd4hU=*D0dWglE6P|~szXBPw5pW(#f^Y>W@v%FC%Kir7EP-C6;)k0@7*uXBnK420 zVV2{W8Pl5&v*?9Fk_Kp)szVL0$s&8eMt2Bj3G|^DEri8rwj(Thp&cBGpo4h`J6d!v z*ystuSpxkiMhjvwy7LH&u?IM9YGfHRGJw*(CbI%47l1s`0P#cv_TT|^%a|QNZTl?8 z6RKGXg46AvN=nuXI`M}o2szobDF`~TbhIc6f|fdGD>5qxIsO07+^i(*#L}TCq#*3X z5Tqa+AkE9b4Kd=RY8Gf4H+a+v6znsEvjm>Qnhl^17wA?c@DL!h?EngP@Di|v%t#SF zea=x9Nqyp7{zElOK?v;f93ha)1wrl-3{VgR`9KKl10k3XKps*QL^$~;nv)k`IGGynNt5ZF$H6U% zImcOi89AoEI?f`?GXa#dCWvGyGAXc6&wegx?1oX7@IkZDcXp)U;RTh!TxLugL_qB@ zaEpEgs5ILllBK}lIAQ1Xo)at)Oh4JD+rE&LVCE8-IGyn%i;U1D4rDWUxD~jk>z`y1 zH^Q3bKz`;nWBMTia)2Xf`W006i9p*}=UzxEO`mm=MU|0j`hk-yBBtjVoGyTN2-mww zn=>x}C302`rU{}-9A->2L_w`HQIy&X+N|R|#bU+91sSAbRp6TLdx}Mq4N`GUuQ|n{ z%Q$V?D@n=ehfhITF)^HedsI9mZKs8mp8%sT;npz+Rs%@qhoMzExrTeP-JmrR0NGmFfltQFe&i5vw+U` zW>Me)Z3}g*cjpE5R#+7HKwQwk-v)67R>uwEh#=x+P~e^(a+XDbkIM zFdY$xPo%TzGMo{2WX$Gc0-xr?q`;hI$iM)~_l{y#3>zds6}1`D1PRbcx)nnYh%ac) z+yLUAkN}y<25Q_(WC?s_1ho>*NGLJ$@__txLPC)lRHZR1uo!@xsK^XT4a{atH$Y}^ znlW9F0PO$<1(*V>;{^$D-6R1GvG})=62d%;pw=C9G~@xu#s|}V&as$>zW}j6fOWh8 zbvp(2fE@7y#B>C^;)8@EWA<8bDI+kM1KeTKXZ!}U zCNhDXGTrc;M@YP+(W!1C0QK<}DO?K!GNmrNH6{_Pr86*#Dp@HPHD7ppNqkX$2NXNGO85 z1qnp~1zrU~(2yMXSOb1526s?U30O0>fhK2o%$R;igSrP+46QN>%)Fpp?GI^qIDrnY z;s%8utlQfmqrmFeA(Q3!YsPegi!4&?6F>qJWTppRWDx`9qn?kF()F510R{^B1)#H1 zAZ8)_EF??(0eL{socROD z2^(ZVS%TH^g-n)0J*(pf8Cbs9BCEvA3l8fIvao!?3Ck8j0lbVL-I1_@7qTya7+%W)5=IAQ@6N6`5PZVOPhlFb6mJ2^H; zW+?~>d}Ib?(HkJAfJ|W3WWFG4&invo7f2DS;{)00^DnVTv;P2@{6iL$mSodEK;88M zruGHcT_4cg#R&>#NW{HoRuHOD6awWd&|)(d&@MdCx(`Jb1$NMeLv|hpZU$~ps$0RW z2t`4Ovf#0l3oQ^!x z!#_(3xK2<~Uq+fm2M7Qe*;XzLL2F6pf$)c!3h6uv`J6na!9sfOH*z8o5Df zy8mZMiTWKN!5vCj0>?ltPi8Zw6G{p!j!a-f6gfc|4%8AImbUEnH< z6w?Z&>9t=Z#pND=1RtOo#R*QCOakAhCthU{XMX`w^FnEQ{Z$rmDK0an4@zKpR>uX9 za94ts=!{<_McEsaL6wE_^fOmkBIJy?7K!XA_(ZT>}(|}Y$yu1SB;1w#R=SOX-DJW+uMq#4r-keU}N)6d^vk<<>U;wxt+yS-?H1x!#z#!1f4l2SA zC@3-Wf-*0wCi4yjbLJBYj;p{I;ebk32GGz|Ex4Jo0Ic@}*rmTgMF&g0<0?kbAPhvT zfqgl*Km*70`dcjG^$S3yiQjuO3b{VRK=>vyg|{N`G6uQ4Vo!{_PHNWgeE{} zEyk(90`e0J$WJwjg3w9>H2Zx66k0bFrhDIJk*S}f0BVYX7AUZSrkbWGm@{7h$zK3_ zy@7qX5(8*UJ6M4uC{!3g8H%WR)0LMQoH?=Bbpp+<3n2X$z;+>8!fcSD z1C(}7C}t_hfd;i<1vyftzX8&FLvi|&yDWy&J#MmyaLUlY|i~uUMelTQ#dkdhxx*VeUTmfo4fF`Jso6nG58*0OOfgHYu^9(s~ z!?|9U0hBOV9C5Uqp$+G4atf>pJZ4NQKs2)z!&W)alqRTC=(s`-R5U?Gc|n~(aQcEq z!VZw+>=@fQ~lFRsx+P3+q;F0Cko&NM<>)!Aa;Os?U+92 z0gFOC$QKEa)Q|%1O!0s_R1DyLJUg=ks4L|Lt^!U-f|3G9mJ(>b5wwC?I_(e*o zo*C3i*I@b}1&T;eJc0)+z+HdP;SN*4oxHW?%s-?+gRv;(=?qZkbcS@6;{j0MBRAXF zz!A$fHjRkZiD866(DUZq(Q@N zsO>6{8pjQwt|zFaiqvb`A*}@VoC2uQk~U{PAPsgEE4XXL>UcmJWEFVD%=8kNsE0y03DZyMAihgEENU?=IM#MMRlfg zK4FpPIC}@QuTNm+bb}`>PW21H>oa5&n824FI5N0%_cAgvgH|ypWRtLpK@k5c1_8*r z0@x}B&;kG%C3aU9hv^TWun6;QWrNJODW^jA} zU8MqAYNEjCC;=L!0A(j;(DDdIM*%2*dfh)h88J}mPyjW}6%?2qRk9Qq9aXX%EvC0W zXGvoenf~rMOP8n(=s-1A1t!p17sxfxY>p?k_r759Wa0{B0Tn6?Rt$mDAG~5wVAPq; z_?pFvr)B3f0r23bz0EzUOc*auxBtVU&FD8h z`wxpNlTFt2O@CMn)O0`#YmP88IVf;Af(O!g85B5l83K736!;Wa1b%~0O$nUN@t4IJ zv}ND-FN-sy#`KAQSzP75Fo3$Iil7@1en6O@=F}eulWqE!zbxupehN(Bvj(o*nXdYe zMN=Hyd;u+sWda==sK5zfPMDtikHwAEQOS(y&GZfbSgci$Hu^I;o?*;#oPm6LK?_rs z@+ zdo8A}U{z#Vv196KRu!flJEneQO=r}Y9?8fm!^!FhS~*swz&5>(k#!UQovG6VKpnpm z>`JVTtDj7_XJW13ivZQ0Od3oxSd>^Cm8TzNVpU{*F?HJX_dKlT+ZC8u4H)6v>0K-11z9bzRb|&z09E1B^@lF<2sog zS+W!u1nw{^a)OTg3kDsysLK$v{Q?{7ZbnA`={+2*a*P4fS97p>NCkifR+%)IzOX1T zx`WPZ2d#+$6|e!*c{y2aI72`qX3QE)8qE zr|YaJt_-}L>0tZKY|j*Mbkk zCdAqeTG}iu%$mulGrd-r)e&ThFsm4&%=T--tkH~&v!)w~vd);^BE~v*db${^>~tG( zR{QCkVyp^`^QLQwvD!{o6laxYoIl-GoHd4V!S-3=tXzzY3%4(oV3lKHoHhN36l)aY z?CJ8-tdfj#rdvw0%FE4V2gT+YMps@&1wnB71>H@?s=y{NkDZ5cx{WleB$L4W=@X?{ z#Tge&Un$LM4BC@$Pny-Ak#W&LaaWLc*%En%PjPL_2pi0+kR-36sxr}xRT zmV-F#3aqn1^aKr7k?A`WShGQ%RaImK-Elj8gCc7MIe5vDz^%p1wqh zwU%kd#_7t+tbZ7nPFGN2l?HKbRah06ma$KFRAyD0E+fM#H+{Yes~X#K&=`yB^mrLo z>FLi@SaSqcuuFlGI)~#No-74!ffdt}R9VYFE_f%)syzL#Dr*&pQ>g}VYo!{i>+~vh zh+99a!`-@BqG880(5$m2WT=b@bT+OA6OX{6>Cu|3(o79IrdMdP&SYFXT}X@7iE;7v zP%YN?Y>WZZT@6`nLC(GZi*Lhp6C>7Nj6b*Q8?)vyGXC5?&xF;Ok?AML^a+`4BGaFn zvVLLwx&4kAt16?0h(IT^66kyj&@pHApkyY{&91=c#_Gtx&8@`%PWueH3<1+MEm&pv zdf2&PHxB9uteYNe!TN}?ce?xXu9Lul$ICF?VKTei^jd4yO^lbP8`!XF37%of)?i{#VuZ|*F*;tF zo^8XLB6W!Yl&-;dlAKrqKYE72@xYSlAB81Fr~k8I6=D20UD%d2jq%>}DqGf*jJv11 z*s;E6RG4mJ&l<$GS4e@$QDFK;H$h#YePA}|rXqRpf;@rA(@)#8<}t3DZsow*z&K_4 zZUk-E3+v{9eyBQg0Oy_cEeaX0U z`xkfCN=Byj_onB2vYIlTxVL?oC#yXp)5^cwpL?-pG4d}t3*XGd;5g^(^aLN)P-H`wTx;Lq<@}IPK5+i5bM$ zAIw_K#I)kycAGF(Uq*gEP+=XRzzeIVW`wiuWSX;TdT<15D&zm@J0n;PI4*o?5^z)! z=$QU7f>n{RbGmROYYx-wH`5yVs> z?SJQ25D*Wlfb>#0IKCzc4B>^MX#L1+72- zz?da4bGkwttMl}dI95Ua2cQvdfhf$* z&wfm2jAu1wTr}M@o;3lYjRT==NjzBF#dua}wFZcW2F5Ibr_A76Ye2g-%$Pbr?E%m+ zU?93a6cn2 z_`uo?jG$AXG?-Q}DuVfI7^knXW0f?xz=-Vf3t)#phYUb#%9uf|4bWo$8xWV?n0`2c z)q(Nwc9uj|M^LTcmc%-ZX-(_&dr7QDOdDIL3njDKFmGsRn;w|V8qK(J`r2gHOvY8y z#Zy@G7*|hkOJQ|lS~Yw6sT5W{rggKY|4L!iXWBG-`o~mO(diDUtk#UHr`M#iIx#Jt zJ^er`t3K0muv|Lh%IT45thS6hwogxEjb~zm#NGJEz~yWYuCkG@UJrRf}=c zcEc>zct%Er?Gv+EJ(-yvoSuFskJXx~;oNk|eAY^)F9)Vi$!ERC)UkegNdc=F(}wli z*A%d_GBWkuoW8k`btlurorYBdhsxa=E-ci9C0@ZHB`{4gn0q6z}h<0%t=>N{o=tfAhTFyUxzNmuubc1eIuj$&U z{9Mypx>+T;uYf#_v{`Waoo-f7#wFX;dRW6)L4Aa}iL8ql*Kb#x#5#qMaq0AJlUV~9 zmrduG!m7o%bi45s)=rS<-l?n;8JBJMpT;_okqxwo#*uyc-Z`w2YzJme6A<`5oplCl zw8D$U(-7gyti%fOE0Y3?;{(Pl$E91RH_TwoV_ZJ{{R~ze#ud|LX0l2_W7&EpYd0wD zPk`vI+0#R3v6?eYo;|&97OOnd^x4zb&tg?pI5HEP@hni|cg&goX%?$1)63J-EoZaJ z!WY=(Y5nxsb691VTF*@1Ifqr29kgu2@xKq0hi{dA4FtY(a> zrpM1^m18_Gy>2e6BID}m3+A%QF}<2G{lHvS@V1O=b6J&`e$1HuYc6X5D6RX!b*G!k5 z&uRu*gBLuX)td3m^#1v*VoclDZ(ly2^&5DH@cc!r2N}0aPh8A;pKDeErAwcBSb1GPQZPG7e`Mr``d<*e5k*KVJ<0@O5KyIo)v>l@Ji zzQ3zkpMmHnYgv;R8Fx;1U(XuGxNG{n^{nZPyQi~lV6|dgH{Edqs|MrV=~)|CZ5a1W zUk(v`w1L%wasPDfjbJ5d8(IAs4{YDIk=2%w@$U35n^@zR*3Ft8vYAyCw)3TaGph{a z$6Oz3duHEQ+AR3qCMq zIo2!iDTphufSOcnj*O6P0XL>=Y+;pW`Y~s^=N48uqxBrHW5hs4vYIjN09AoI7_%I? z1Rk@)e1+hDxyEFaj7FIK1(3$k?md*U|M?M&ef{h%XJ!22OV!{rQ)3nn52SU^W^JU~*x3=%jn0il8kU4RiJuwdf! z{GF_pOiTW3U$v9*(k~4*aJF@aL4qCyIGC-fBc7T=gm=Iaa^%u z`sLlMy-Y{WZ%^98s>Q^QWCFY6kN?wW?_>P~+LS(XKWnHObj$!Wzyhi*t}vin!N3f* zaZlHDkpryiuu%gg0dvsGLPz)^ggy$)j$68>M;>6!kUr2gO~8>^U?T@~6Aq}$4B9}# zDDZyzodc{YA|J5Hh#X`!XM8{1=OC*j(}o?>(+{$SF|9a1eg8pLKe@wQ&@qP}jG%+k zL1Pun4Cc%YOrSIR&rjDr#CnABBfgy!}+1! z1&yG9j)9my^Dt{0s9mObg!Lffr|FN6uo^Qxzc^juD61vYg-_FiLG+O`)BBFHS~Gr{ ze(ETzG1Kzt)Bhi3wN(DFsR6WLy@eTevKweykbvWhO*24u6KQhXSL#c&!GSs2NTdyU~oLrHNE9HYa!#;>EDjC zf{*dkJ;7?pw0QgUj1#O*j9;d&KEbNXxOn=_6Rc&7U#AD0WCb7ov+yKD(Vdg5I~l)C zpLUAXmht`cE2mha7~fCVKh2uR_`229`b zg|&q-V7uj4)}>7R8+wo|T+uWA?RVA&CeT^PKUmWk5hDkng6hwv=}&&L&H$x}x?ily zir~%N+ybCGOGV9?Ua)|660v|*{!}S|&c*c>4z7d%)6I=Ntb79ZPn zX2ws`orKuznH;jGw+OK*$biQu1(tzE%KranF63nd?SYfXQs8#9$(nvZh)qKgF`CH+ zAI)TgjApWd2CxFBa|*MmGwMt?6lPOrvdP*WCCsJ@I((r)gsqg_A`m{psvxMqDA2|% z1zI4GB3 z9910~7ES+P%vLsit`eI7lGrvSwoW82Sjj$R6eZV{F_qj`K~eHUh0TPi@&0r@RW>!N zf1sO$SwTnbaVs)8{@>LqpvVe3vyE4g$?*k*#{{{b`woN$?zJ*Ge%m#@Pn9iM51gi% z9bYhJDX|GuLw0V03)dfvS&mEsRgMTB2`I2lFH~WRonE8HrV2`ui`Cdnki7FijctO* z(cPeP?T&n```<^ zL8;`*o#{K&*}x~0@o2EANxc2tEa1o^kOphvgElF%fgIzUm2O?Eoi0gJ#6Fqpnwi%nRf6Qo{@korPRHudR-+HCERywB|TVY;9un}Wpt z9XM>O)?rg)y18Szf)1Or!~u{hY<7TDvE2m8vQOvJW>cPiN{20le+FW>hS_n#+3B{r zZ1%`SA|q%pwOf}BJdE*KmrWURVI&7ASwu}Q(__;Bg~kFsHt_hyEj^5+EV*z(gMgzn z>>N80q}26WA5(R2Z-aoN1WdIcl4==44As+Bj8L+Ptr4aYS7Q_15c>3pddp9;7My5Oer~A6IS*tDP0AHjIHbH^OkwpM1068%n z)S2GE!QC}IKull~==hT-VgjBZT3=j1dHV(rw!4h{TM&oLFgULHJAIB9+cS{ATD;k` zLG(s%Ht?{ujSn06*zPJHw)KqLr-M)AiSuQ9!^qUSaJr8_n}pb#zG(s)3=E3$jxV4r z1;;0S+pGN9cF6N?g&)qtE3kFDeLmX;PEb?kV+C6r(~1w%11s5{Fx~jD-Mos;kCFe* zd8FaWE9a+Asb%pK;6dCw*+n zjN7Mk^s~trZa6;?ataUVNFUH)OUQiCU0!gf@yKv1FbW)*9@@`l$+&BJe?OZQ)?%%;M<7{2O*ZF<0DHmT_^ zC$o7tZ9%vkG$sjlIp|_y(BK3pkO_zQ^x`RO%8Uo5PoKhOMM~K6LBf`4D%k%9Q`yWI zw@go;%4VgEFI24pxKZaZ0U^KrdQ2iiz637i^dLnVSD@-}r;Akg@*7j84!Q zw~UI+Jo(&?mu5D=czN88M<6^VMP{B{ZpR%x;1g{@yc}-Fhy7q4NMAO$<30!vB$vhQ zI0KBi!kA=k$J4Xm z4o%{AJOptl$T5lBj%?YQ5AVAg%{V1amuX1_vHb5VzxlUvQ%XxgAe{HGrKO!0oshVVFO+ z;}x(Rj~}<=X@pt6+>YxZX8CYCcKn5#<<0GQ4y+zzRy~gww_^)L6663+ZpWPnqdmAC z?|_x^xN|#RL>TSH?YJFcv@5scaxe>I7>^6LV-q3@ow*%BNBcp8+lkxpDA+8pN=I(T zMo}{=ds{+Tm>@LE+i}_{5X+H6v7VVnm)mjAIxrWsvtNhX@zfrW5j@)5jz>0t6CTJgEpEqe_rTVI zc$(afyRLy$If8f^+>Tp!f#pCO=GD0!*FAvCsc}2*KLIu!B&W*l*l-xE5~NRs+p)f@ z4KAt7?fBv>*u5Z0C2q&flfZJIpi<;^ybe13o5>NxQ{Z-d3Q-9%RG!=M!78{uIc~?! z6JRUY6q$KsxgDGSf#pCv8E(f@knjP8sWi9auKG0~NggR~$JXCqH-jW3xgDn;0b32y zB*E=?dm%`bBPe{txg8%I2Fro07UOoj{sYVd1+gf%L0s_Z-UzGtxgDSX0b31H#Ruw|F+2X90dgo0FX&8k zX2*LkKz;?S$zWGxcDxC83=cO%Zp&S;N-l0tpgAr+0#?ZhF=gEauv{nyx8wYIU?!h zJiqOR0#*5Rrnc1f?UfjNjnf)IR_n$t{7$$hcVftzT_GkQaKEe+KWOkhLar**6 z_A+Ls1s}Idh_b(8<%J*G#OlqazQRZdqChq8Zmt|Dn%5lj%qA^vMS7E{vC^-!Nde(Y^#am4iuxsfAJCGL+eYU`_xr zL6cDeSprwK+Z(d`GBRG9-fP7Ef$`P!I%9S###hsK8ndS}zM8IR!fwI%YI~Fkdl@66 z!t{%#>|IPNc25UYnM@0IPp>dzpQLc)?^FTD58FWlEu!4e`WAe8Ib`TNUePO=k~KU?Ee_WHZ-*eD1wHr`P_J!xgDqPX%+wtOh2?^muGr#V>**P zyPqtyZ?2K0#3E1)S~vo#_5t9NEDGVIVC`-S?+YcVbt;uH&;4yBq(HbC4KUWL98wY&bjJ!vryIMlOV-aj(=Gr$zQYkT*70IjGl<6tnuTX_yuPg!#Nz;E1t!N1P!-c ze&B~FWrT2N_DsL!!fwR)W;(Acy9LKh&;$|a7{L!eZ%IgsL;yQY73V^?E*JYCA2U0wQ1Ym0#6-bpQ>^O+b6l^CQQSsYmv7^Fe5 z7vs)u1?q!Md@d?F{lFbYJrL)^9Y)aFgzbOa*<+X(S8b2HZV1RHHcDRaFlW9WpeDf-VBmEdZ`IS-P-Qt&+f@2(FHn6g2|C5Crg1*fhk)6 z%x4tn+P*lD{TVysz3E+%?5&LVrVB)|KLB+mK1H$jFy5bDAI*M;@&5Lh81`&do}21S zBK6FmvnnqzPrsbVK9lnVXeo)9IrD+(rAh3w>kl(pF+2vHcF17O2s(BVv>4$KqY?w; z5=kZnhP6(N%?eE5+pd`e&a?6;aw{-_E@(Z)LN;W(oe!*dXu0djUD$dDr- zLqM0RJ^>rT-0Z|v12u;QWDe*kJq;$tBhx1&vm4c)U~~l^R>=%Ch{cNG8OT6ZD~KhG zRt!%-d^V_S*{v8Jf%qI&3=csxgB8OACMOA~1spI76o@phUIDv-lE?wauA9r$#!} zBME@coy^6v{4%5XXMCd(q}*<3Jr#XjAoFy5wiisjR-Wzz;Po5 z3YYbu#j&~!=NYXS>OnRlTn|cLXBn*-)`Fsz%Zgz;h~~6n*ao86tQfX}XbvleEg+gn zm*EVf5~CHv8Zb|j=?J4a(-IJy!Hj7ElL8{tzn^aDSia)iX;yP)kXM}${CZ|^U?2p* z(W1cZ$WfxeQUu!4w1Ek<=$JXn5fKaE0bM50<)2KTE@BmaTJz&aG zVo+cP#SsH2OT1tdcnA`ozzkYu#H7LW1iH%SK|P})1G9qyqbo1S-Ao!x9iXMPW=tKR zgEkpJ3*lJJn9eXWf$!h$0qud&0olu}z>=lFYR13-st9{PT3~w^z_*B>Va`%wb2Lz3 z&Q@XtPfmeWzJo>=K)%;xp26Je!~o(>V6Jz%eepu;tXuaR(_2AjDueFPV{+8U(qslJ zP-umO7)UWFpfs6R!1OL)c6!{q_(|uzS@-(tk@RxF^fI)Xg98_|cAZHR9Il$oJ7C&3 zFgqQdd3R;~@trr%++aY`F9Fjp&SqjXKI@wChAkTnOAF~-#52$uz%vJ>5 ztHP3{$N<@G$E3h$#qa=B(lJ^wfX-ueYHVm|5HM#31t_R21Lqu2B7DGY;KOWs(4|WEV6S4p(EKmeM0qck@=P2+AT%JBRgI%Hi0TXD+^dk-) z$ibap-enFR(80N2hlvS123;znz@#8%&J0Sv3gWhmD-g+oD+`n?;3r*cD8mgCIcXLocJG{L#WaRxIeO5jNbYl^wS08#@=CIQeS1GOC#QjRN_ zvotxGL9qX-HJMj{gAf#;^&q!F zLlhjK0z?KVXzMf+2?5%{2n(17Mp%eGV8{{!*$)p>SlBE82dObMY+k^_CIJ?vA0W|* zBT%iO8h^kw7N7;H0(zi=G7UHfO+TK^Zcz_z0|-E?Zb4|(Ed;H)g`riq2(;=Jg;w1> zpsE|xt%08Z%-|?dX2tLtl;Rm21Bf##5IN~MPE6@OecoZCF5RZaeJP=2K zTUO%Bl^|*eI}{uk_2QuTz*2}!eX;58*{>6Rq(RFtBt9q+IU(^uB^ane1s$7*y(D7* z)n}{<3KB;wUEYghPQvfmz_`^qf3)WfM?s$_nlu2sD6dUr@=( zq|0!DNr@SxlG#xJbixayvz4hmwRGfW`*8Nnuk+o0f5m_gtv2e*Ql6JxES7_>?OiGx~N;@qH3tFU7(AjZI) z2kKHQh^ZiH6I%|h*%?3^9iUceFoA*)Jnvo)76O?nh9l~BF8bPVZO`^ce?d!nm=&0u zkOjbzhb#b!J+xQ@H7h_(YmoC5Kurly^f7`~1~4ixB!X5$gI1Y?YzJp#4$vJ+SX+{_ zc6Tg2@&D?p1ZW~c;)C6R#7A<*^!x&LRW4A}LF}GBuYg?}TnU1X=0S1`3&<%fJdCWM z9j9;yGeZIkbpA@VBIr_EmMm}+2g$t>+@MogKv)6nZctn31o+Y%CQvDPfDzQ@c2WSP zRHiIPNJbF`%yHHisW14pA?T ztey`f&R|y0^nl4p04f32zzdRqXaKbkK^+LN1w06GIjDNDIC#A`#A47U29SENIb0z1 z5OH>>dayVrNE~85n*vikC^0yK4dMVvKs2x-YhXu+gVyoDEnowQLkt2fz6Xhe&0&R$ zD=;aDn=`*)a)M?GM*&bl4QlOxYTpM;PNE8sWDjuwsNn;VfNKP;oCg^U)(BoOkEKjo z(D!rh@lUr}uER10vH&`gR>|(6o+%dhUm|dnGi)*HRINJ5$*p~yN zh6QDkdZY}i1n*z4gYpvSAbN0@1;XJFc*5Zb?qh&5MF+DZBYH!}08)fbV9rut$WmYf z9gM{UZqP8o8#Egjvq1R>>}GHqFa*+q(`25&jHMCC25E4Ej`WAOT{ghmE)yVzfkFh_ z9At)=RbWWxd5cI0i3$P?LSE3R$@@#1a-m~ z6j&h?7nl;b0?G%D7nrx7DPs>|WqdYWv4%Z}@%i+U8umKFXB?o?p3zN_iH8rg66XM8 zmV&6jbA%`_OqAJi0#la2i|NX>?1_w0(_3rV%^BxR-&@Nr!#H>Pty*@_hPmIh?8c09 zrt8?X{GI-D8oMUb>7Ub8 zrnB3EF6>F3&c2fobZ4d9O!g!EbC6X}`8)mNOm-imBTK=n6_l7j%UT)HZtr1qM7!RH z(Q(1w>G`wRFMxu;aW=c3Afw;(32WGE80SvsUCaKJ@xyfHb?mQWg1~!sSV8NJL1zVV z+cAM|Zv*c`WpxDaJY@x~4h&NOQvry(ej35IcneI(`A9I9!1hbOw^l zboce_VW2h0^VhR4GC^9D%j9^432onU4>M@rGT4V8|1p7gz^g^U6!8}v_Ak; z`+i{0a=db9`kvG5`i!j8-<)Q55Cq-Q!U4IDW)FyFoo;)EUE7GyjOhS~%cQ|{fzgcV z3J1tk8E_*Xbd4FxMtIQ8XB&1*pLK>^O#q%`)_|HND|Sr3aE9HCe+lx2^*KAHi=Jh- z*AQ@IaMbz3&7i;sTG*-$3T;p~pCd~NG+MfVG0Txr;0vP?%k-MF?CSNbi2d*rn3Py} zK?8t4Kt(+0ej?C4Dxi@k(5*Tzm_XYeMZivd03S(pyup;EBm^3z0cpH|5Z}R+<%qJM zehm|7KRqIQfacG_4(JI0?9!mU zf{r(svp|Ppf;u$9EDj263L>EKLaFH$=h+nl`9Rk>us|9#%vlO73jCn?1SJ+nu%DE` zmx(bseqhc51vO{|DGTT-30ZUI7od?xS##za%nHnkpybG;!SsgN@zV5H=hibc1UHzoKu3!+InDrUe83Lc8#;px zw4-wlixLYj8_2tBI25F&r(R$;RN#RgF=)ng0%Y0=mMm^1PJx+V^>=rKt{5HCMz_URxl}n&g3#<0uB0TfJTSG=NE$Po57*PGTq@KyHq{D z8R&2VP#k?=$r6|acMV9@7LII?YZO=SBH>Uj2FlYpZ|*tt3Je1CKu5NL zVq1Yplj#Y&IWs6SG?*T+E3s>UFBj2eIKm1ti^=f@Tb2TkBM0;}6Hw=kElWWNNkD-| z6SRBc0-FMpf)JSbfDJT`-pC9}`1*`Z%!)i9ehafAmjaV!J@X4DMR{-vWKxs^`T7f! zA}5GG!lcLoqW6H0J=SEJ!=xyyz~s1v4Yg=+Jivxpcqqu4Gc5tx&TPhX1XL6-m@%CI z@mb86&OrG;KzvR!rax?+3QP)e=FAQ33QP*}=FA^J5}?}(tQdMgLOj-tT_741KvoQ$ zpbW%d#n1twVbwI#8jztoc1)MP!Y*B}!0y-p3hCFNqJ9k<==x?Rea0#5N-Utg!j3c8 zvlKv@C$MJ;%m=5WIWVaO?4X3UfGx{WMS%&VP+$RA!4jwf4Wfu1RO;LK*g^<;}xiS#~U!WfNBl4EXN}|rwM@0AJJgC zzz*g(G72mPJMRum3nVLn{CQ;Ow0iJ4C^sN#6}TW;{xAqEf#oQ0s4ZZHgc`^l$JaLq zII=?Bu>_P2L0&w8luMyTL*21}6=EpN9cN%#!0xDbgsW!KU^)R+dzb;D2g4yBz}A4Y z90J|auoUdGFHjRTnAWfN(Ch{ixM<^1V9!$GtVfh? zN-PTO3cOiL%#bomf!C3#M2R^|fk%NiTVNTuU|Rw@HCus8f!&d*1T;>r0IJqzuw@C% z0e83=tr-4*S30a=1eI^#if9F+qA;_Af`}tywvq^Frb-x8b}%algGxSOgzYYJSjv4GALU?*I%IUcyy3@z^z1gAG%XBTE< znLhP8yO}4rYF@#l#F1qNx~rN=gJ}T=*k@UwT)zZ#`#IE?5GiI*Lz&N!4O~1sf|3Li zxQODNE^>oiTM^q77DeRzy87%dp<|If` zDRP8x!@~XrgF7z^ zxDW>oxq$~L6*#5`++-K8hlGFclaC0-VWl0&A86v@s~~g++l^fy0rxBnwm$ z=`+p%C4FX)m#47uD6)bW6Ic}m6~w12-eOnbW(OVj2Rh9LBA_mfa2Ck7ETC$a1u{}A zK0WysyDHQVd`wKt4BQHQx(phS^G*32S*;i}Kry0_C7{FY2x_MDfJeJn&6r#im=pv- z_pC92Rz%xC+R+xElkClyOca^0n?M{w8EO6!umMqlk|06ygFOpmBd9U?g*^*oD~S04RAzz|fT}YIC3(=$ zlme3is9^@GL*&hwAAm|zP-vs6J;Z$pRn!qR2I!?+&|m1iu2PVW7mRz$LJpO_5ar zG(;+qrNl3A9n?@>!jz=|W%h7nDT0PEXK-XG3Ne8Wu>)gOW9qKI0Eo zC00-gi6b0#-ep&XSy@2poKlU3|Ck|d7j(xMmN%Bw%m>qSxTTn{0DQEzzk*uZby?W z(6|R^xX~a>5p-1__w+UQ*kz}iYO{jwt72lQ2ao5oIWj6RmpC$(D1wIdSwTm9u{r){ z1kFpcIx=M|vMR7SGG!^UK{76~7ACkBCIvQ7U17!u8eU<6=wZRChZ(MiQ9+FjJPrcB zQj^t@1EP%su8kYyd=>@n>CN}qwHf86ufNaUTF(cGWI1ytaMzj9jOhrc0+Ztjjw}U9 z1wK%39b9p6JBok`TSZ0%ZcQd0aE89ak)`mWGS(L(j_R}zTn7G z5?0^~;qbn7W7L4&TI0u>9g=1eW1a%B%k7O2Au8YU4~zzpi| zgNDLxfQma7aK*+1=7Ul;sLKnwb`x^JCbJ`Bp%SCCBj^H7Mrp+5q)ZB+HAkT4C!ZsO z;{i}pdJc0v>=IItl{eV45Vz)nQ&}M|lY)rATxNtSPJ!h}Jdj&G?->^Dls}T zXPGh0-~_EA)R1HX#RQ`xQCg;nDGF7maP)AD=))z zvw1S=^`Htt5tN%5iO$W)T8Pcf$a;`-b3Ojt3~uWP|X7=5xDBX*>(YFhHM5?mJ+DsUcd^f z5!b>R)8Ljks5`2`G>1un6Lfbsq|u`Qx>nenc>(O)MjmNJE=L9h9%)eFr@$`&F<}7{ zbWjIujw# z;0{Csn-Uu@xbgIb6;wazGW-D-=q!+24;uFYv$(;#^Fg|q1=hoZg_T=@3sfF+DS%u3 zjG&?GY(-XZqmRiE6g2z_pvzFem2xH|Ytkb(uG zuY(!tZ)RylNl=Qv!Kuis05YV58D_|W=?*X0#p>t4f{0OCQ3|AV29f~i?i)tXO;_I1 zil7TTI34R1nH?DwC73~t5eYX?o&}8(@F+4Wa4NDXuz)Hg&`o`=ybPd=WECW{lmr~X zO*JJk1p!C!MKPf3WCeUd*U1Vfh}FAEJF*CD1ohrQEq+D?MolILCxuQ>sn4b$m8B%+ z$dRQ4Y9J`eDhMcufr5pJK|u^OhQOe}ufPhbL&2h;h7ahnCWvZK(ohfpodC}OGD=K= z6=n{I%L;1$EnqcgVyIUTP~Zm*j50BR?iU2Lni(BgKnvg4K-C*)kAMIyu9!isVP*v$ zP|KZ1fzuH@9t1sVS_0JRkbp#~2Gavh&^Qygz32F!5p=pGgA${_8PJ||&`>U~0<*v- z@RYA;y%`gz!>%A|#xwzx4>+e)L1-UR3GOxITB};)*fyMJu_GoD}s7ssG(2&jwBslW{Ce}LP* zpn9GWa=kn_Gz7N7a|I}9nX>9ZeG140;@oaX1S;DdBrX* z&8@*?pup{D0g`Z($PqXVI?)1LO0!S@^ore(4{Vf)0;H(pnQr)+T|t>$zzGza4WNz? zlLpfVR!Cz10WMxOn7*(oGEXmg&8}O|Cau5;EzDS@L0Z5mlTBKQ6FS5Km1L7vfE9YI zAT7+`@fU<73zP|#3)Cz$01*~YkFeK+u2koNb{E*ekpU_; zKvQVo(PjmZM<5;qb$)*^gVrWkZEWD~J?M699Y;n* zR>-uS0cb2rB1>Qu8)!5_Q~-2|HmJ)78W4cw_y_D+AidzTnn3Pkas+if_(0+DghOC8 zc;pr|G9j=HbQ};!FZkdKM$lXt=*I8qPv5YsLXQap^`MwQmyIfbIz>th0^30quK{TN z?5gQHZ`md5kpw_e;NBDqw}P}IyA=cIlxWb&%b0rEAhQrZ7_yZ36{Hmy1lEEkJJ=37 zGM6}l_gR7S!URxb7Ic6k$N+F3cmiXVz*O*o#B&(Io4f0Axeo3aW^M&W1sRak09tIFVKd5xR3&a;|4efw1FP1{{&n}fx+0!A=H5#;tAj9Cf@c`kG24U7sLOyJ|3*Me#bW(}q# z;Dw3Ij-b^Y%#PqyF9Orq6qxlH&rGlR#xAL)%W#6xm6ulmd~_CQS=j|pi{l39EKksy zf6#GTps727+sxDTzp=~lXfWMivS50~4|cKo8{o1YDh0Z6o7wRJ$N)#s zh4joCObP-=*_0R+n4od-hFJ+ToynlU0y2V!8`NZZ!R*S*p#W-MXfQE=TY8StRtyVR z6qrFrqJ99m33Lh~C`>*u!@^<<3)r*M<5)Ok>o>5NGckZI*I;_UtjMAOT6F+EI0d z4Un@~%$Tl#Xf`t@P!C!ZJTeM$ALv*#NTg3;oi516p~N_0`o*8@`i!d68Go_M2G3vx zB{No#{WCxl2vGaiu!21UwVxXtDl0(8PcS=5Sus3-TEBqRj%f>s&1S~50b~$pOqyxB zBRhxmbXhhIF~$!dyX2=I`o%8F{sW}o2kZ2Ezu4UsH?Tp%vVl#3+3^HuL`i|!v4IU1 zmS@-W|2$mWyd2;L1oI6x2z>#hP0Wg+fgO_WAAtCv`75xO;l|8iNA?95 zXl;m;6~hIPCT0z$8SG|EOF(QkGo}TgrJyfBL++s9V28SciSf z`2bQSZ^f_xG`|VYDPKUM@SO4o#M=OxcvoO{`~WkFam)1jKkUl&8#qAef&+9b2^RU22OAJ!!E}-W4gm%c1_y_AXDV67%qTlxNnw#cyQmW0r4I{ zO@WW~_K1@X=YPCKR+5Sz`6sR1N+f(ulxFgrGI!5nag3+x>wE)F^N6I@_N zOuzV@U3hx^KlTzUE(Hch25Uwh(1B%E44|FLASIyazW}P>xGNYLp|^A}tAHkDz}MNxkF>2|^#9QBSI z9GnCI3rL=kfr%MJqheN0Hg*PXM^LwT0b`cH6IKOqhq1(Q&V!%qg}j{LN(o$na%6$q z&!9DW;ED`d5!Qp6wJ0?qCxMy})Yb(}px~+r*`fMz)x>O|Py}7vuD~R4hDQig%1eNv z0v5My(-pt4i$(~5qJ$M{)}19tO@S+L4yZi{_6%B60j3z#B4l<1-6y8NB+$YEvPxt6 z+x>z{j0)2Q4hTx~FoX62LGGkvnf_6jQ+#^BGa;6GU4{hEJ^?N^&^`f1CT0c}R?t=f zW(_6-a6t@ec7V$NMc}%)Mu`oyGEjjHlrlj@1gLDTP~tITYG4E{o)h@Rssv7{p#C|J zIdcc7N(HStWd>at0vbu;9gF3Ac?Fy^{x4|unKNtpRF#TWzU24h#n&V@qOk74H!(>uWgN*|!mb_5k?pvoR}7pWQ31yBJFFN>~#coSw!w`Aeanf`}^ zLri1_sB-7w1P7@LC^j{i9H#fPaM&<%PCvuKAt4HBc&tEacyLbt$-?0=z2^_RnEns2 z0iXgElpH|SMgt3YJG1~KIkd2VBM};h9MjuaITTcmvMI5F)(J7|GtOaAVh4v5J1C?; z=IAoaU~%PTnSP3uBN%K4`vVq`?>MGgv2i5$yaz|#99HDW{O-;Y||b8vdi&%C^CXl5j;rCI5>3p*kQ|Y*g+R# zPrvY;U4)Ti`cV!JbFh`{E7;AM7&cfHr;`XgN+lU3#8m) zx<40(IwRZkaxRWwi2MUC4z}q+JRIUIplm&zk()yjVcZc=B{X5ibS-WU9Y%10BrIsk z)BvgjK)p_I>&s+%H8+PkBkT0V+#JDBMJ%RFFSw@n|6`Y)Zp*{rGo6W-1GJ)AmX~89 zViF#--~i-B6(v?k%TNQ9d_d(CGzIA>vGRh-2m{bD*^q*QBg^sl^!L0RCJF~QL3(YJ zSa_MhJ_99T&`Hc74H{VjGp0N9ao8~SO>f}i$Y7i}{Uskqw-Ds)3kAqJ-1GAt1a?io z$Iqe5*f*U=fJ2vY<#a0n4mBPL&}Bd@`ivS%ti01}1vr%DMZlLH9brIOlf>fqf&o;Q zFf&Xyv=En=eoTNv8Z>?TSb#%^>A;TZf`S}MOgnZ=Hx=aYW1Ka;MUZ2X%$~Cipo1Zy z>s1*PK%?Ivf(3NqLA{N*3ghYN;X)j0{Lg1~z|O&EaC|UpdXErCBg-LHMwaPyKyY>M?7U(WWM$qC)&>_$oOrYESm~|N@FgkL83K&)emg#Du9Lh{b z*rrdIBP23CUX(+Qokid%o4_%)=?lF@MW**%5#^e`RFp%2t4orR6V&+nz&?GyC`Slm z$8-@f4pYY2)4jwvqS#*VY!(o>G<}sAhbr#`(0VNnP*8CRG)=!F#-Ypy6}rHXXqW-i*5}Y?Y^i5L)HW=jv-~(f>nt0X%$PbrOP3}v zfr=~+(86fY(pCnLu@hi5GT5z-7j{k)5LgbH>I03afh#;lYetA`9cO^X0UT$6EC-F1 zun0^A-G|Mg$vgvW2q$+v=x!lLNL+#zEP@J34#z#rS)fTy1_cfUZZoDG%nBT!rok2v z&1%N9ff>|<1T8#J;Bef)oF%Y^7i1wPD55$Tvy|8sI0aVmDzYlDLt>Im0VHyhTajIX z!>PZap`if0yB!>h^=m-hghV5&BRm>GQ!Ua(3hYSHxB{eb12hB|Fl9lbaSIc8e1SoM z73Ky|0B&Fc#p(`_mIL4*WCCqXfyU|?CPb{(vw)f?9J&lAKrJ?Kv^Fq965JamNP>F- zGKC#H=mL(`159>IH<%PSoEXiSFM#AgJE%Zu>jGr3g+rfl3Nv!7f-V&UIcfqkcn|fgfz3WPE}d(Gp_e0V}@%Q4R_f1!jQ`aB9DTG`<2+ronW9 zS&=12fdf1)15R%We2$ETEcFT;pvC8)77vOMcTkJ~r3ekC8_cW6=>@yMLuPJJD&PhM2&l2hrNAQ4fVk?KLziI#iz6R&|AzvH zBWMzo19CqSD6Mm7GB>c8GoOHXj|H?y;{;0M$QK_#Rw=LwG;@I>@(T-i zULI5rfK-6}$za9s2V^so8PgAt%?wryEvyP0poRU4pwtN(gXa|Z#R-Zw(4~wWte`@O zRbV36#2!{9CSGuv(*c^*;Bf3<%~E922U#s{#c+TXk_D!KXcqA7ABW=&umK#PaVuyk zc4zv1Sq{oS7Y--8VuDRRg&Zl1nNk;9B}&Ge6o98&eDRx^UE2Cb+>vl_HmA0h-g zumMtafYv%AY-ZvH<(nJqC^m0_+WY|26$KgnfgN-_Z#O>^Pd!teGK1rU>0L@328@fR z?^fcFV>~cD;J<*F@D3&g22CawMG*xK#}iDT%(P=VvoeRS&ks=T%;ETh3B0XYfdjM> z)=^TR3Y0B1nSU@Ti9!|uMzA<2a43j?4gvrb%U~lwM-qrG7mxzwst-&`oYU96mldxE zxmpvnj|5~FgBcU39RSjZ)uySp8w4CB1#+c8KK;Q2Drq&C8bC|eK$rA_YDXr}R5PfO z08LJS>KZ1{aW0@yKt@n24m_@+!05zc&ZGjSrXTPWWSzdDUr1=WrV58CX7eR!%>w!lBG~c=|^b4qe72)8$n;B&|Smtqcmx0vADbDkrFktHc9x zRtK}dCKe?DGo}YjpilvA{8!)gXnEq6iLm>;) zG+|KSf;a>0fE`RA2k?ULIpT190cyKJtrr8$7;`yZV({i=Q9WIGnX6%K>gneP)Ccw(Ig8LO3a|( z1PvWIUYhj1;0WGBXU6n{5qw04BFK19M}bj;2~?f`V9XNe z0aw*6po^nGCB#wC!B*hpufYWBDfEEY4A33}sKpOzD0VPq3G{*M+&N4TFLNmHnK7j= z04oNs6^D46NC`=Eu=8&A;s>7im4Hl3G zt?M@c?P*|BV27qLfgRHi=y1q0ZlC@@heL(+GJ`)${q(|E4q+z8%hTm_Ipi3drrYXr zcrtFE-mc40$GCGkn;wS($1Yw)76lH0-P85-IBXfWPtVcguz}br4YBpO9)~jH_UW(n zI1CuKPnXf>@MYXSJxia%k#W!THToRtGGG^pgIokUY@JsD*-6{q>vQOWZVQt&n!w_L*pCN~-?t?|s1O&G8LI-tuCBPxe23ml~A+VecAHADL5*YVR zmpA2*X52H~+LR-maryMcrX1=JPYX|D{i`X51mp7Qd}c6lq3QYh95U0rEjc)+ zCzx^QGcKP#!Hh$OaryMMa5aJ;HImamn{j|{&r34rP-R>`y~CUXbXffcb8zrIH0LnT z0S6dp*^C0C0=px`GoWaOHe`{7c%~cbafnP0wBQhjxQ~1K2arG$T!0HAu-Sq`oN@c~ z(-yGUbyw@*K7#bLv^eL9ylhZN(E>8jQoYK+^b`&e_>F>asUZOx%A4)P0ZZwzQn7pO#H z5ZJx_tThKO6XX8rw{1D>toQROvhgT`lo@~))UzsZ!qY6T0t;w}!bgDxv?j{HoXG_o z|C|aejz<_kcdmulaj2Pq6SD#bOFbx~^FSAcf-XwE%#aN+ih)&ujoXYVK!L>(G&IC6 z&@_FW9fyxN*lHz^)g6pkP@`D|4onxc=ZIlEI6cpvLxJNEXzYYlVE6QC_8hj1`=?*A z=dfYiKV8s)!;x|S^k4@L1IGQ+TO2ri*}$gBOm}eRkeUA3fkT1u@N^MJ4kN}R)7>38 z{AIw>(jdz|Frs^O`X)z?WsC==M>=s7F&>J@drzmz>)21oH^K;K}VNvb>q0fxPN+|JBJ8{x#)H#W zdveGj9CXo>LyvL)G%v7Owq6_tj7O*EfvEk{=X-G&sDnKWO}3nnlzbEv*F2Ce8B}Qh z^luP7>fRg%kTB!~g`wQ^YHyBnj4P&l`EW!tu9&{ehhqvqbkhx}`ez5-I_K%jag*`b zbY(w|X(D@hmDoYcn^-`rY8BW)ryL#Me%6m8gOPFXbh7{sCC0_uBLg^2Ff;C-t`x%Y znQ_H*u~3cx#ud|7M{-C_Zw%!S2VGilFOtI)G#q>}lp|JWFE41*2_rO=SwXwNSp`;r z2E%wkmkEMaJ+e6NU zsJ)M)FzglP1SK|5cL(N1m>V8N!`v`sx=$>J9OM4!*|8iQ5I0Co=ZnJ#3Su2H1>Ntr z;^BUGn68_^aT((GO^}#o1;w<)^oxlcnvimP|Fk3yQO1+g#gaG-`A+dF@F=i=I>ZX> z0>`F@CUNL89-rQv#9_>Mc>4Avjv!De4q6fbQw7>eEwE?0Tr!6;S1N}zPK>*!E2eYoW$c@NKb<3-al>@;3=Vgu|FfnyWN=6`UY|ZEgF{~Q z>#R-z(5N*tbj=+vBe$da^a~jrB1{`+PJfocp#!S1@Kd&Yw5^ zNG69F)AYa7|73E=${go`*@CWe`QPcLSsdz&Yo^C$ag;E=oqj2c;|Sx;>C>}0IvHZkVo<%b~}(VS02f$05<%yx@i(crKVtgJ}k%61%{r z=|*`Rj*Oe9m*jC&F z7(pW&3QPh|5C$>;=OE_{FH%vcU!T~zB{&*RO=5*^) zjx@&0(-)O;+((g_%y@ZvbUDX+#tqZGE5OPpRB&`MUfwQM$?=;J;cj-YyFo2`W`Qfy zwW~RTMXrK6o?K>3V0Xi0CRKB|GH#iEqncwe91-zM}5bSgih_-3_b?oNm&f+ufO%7#X+~ z*ue99W=tFkY)*^{>}%W1nFSQsK+B84seC6ab3z(PaODyR<)CH35};J3kR`ASrW}-( z*rqRN=a6E&F@1MChmP<~c!2=wVS#qR@1OpwokI#z;@p`o)xqJ(cw>4>2Zs&ggXs%9 zIAj>#Zr|6zk;Evxiw$~46lhliXarhd_jKbf4tK^q)2q8U^qHpYoW7=uBamsv&guWV zIMf;UPFL^dFc#Ry25MD-*HbVlFbnLQp4H7E15(`4&7mT&A0!Lv)qz(2vIy*)zNec* z10o9Uj46OF_1Zc8OE-st?g2K?vU|{=BG~QV z2_IwwA0q}Ed1i2&v2%J=4~IJAq3MfyIIKX?qKp63}e60O&xh2+-a!Go}R4 zf>Y4MA+&EU1nT)QgC?I9SQVH-TTuljvq6?=f&9y?%TN#XGRQR0a0B>Qq#F!b0=q$z zy7kN&OcNN*7$K9spuMR~j`bij8IWB9T4JieWX3cDG@lNdodeM`7@=k`YcMSU8v&Xb z202&)JVysvn+sA2+V%vtjah?f16UckSoUGen96 z@Io!H6cYm|SlGBh0Sj7w|BF?L1vKLZ3YqCk`Z<*Jk(9A>E3oJ?s5&xYgdIC*Q3{g+ zi@=fTU;8=ir>D#m6qI?v2s)com*Fd@Vt_3F0lD}E$WPOYCva#8BAE?36Vg#&`kDzG z63A(e4YUkw%FgN6CUE#L?w+nZkwX!bjoc=3sDqMd{zML0(W7iiEV>Lz;4%YrjFtj8 zF)y6RAt8$7XI7XKk4--}kt30D_jH3v90rz1PGAK!`WUPjloUX|N6j`!27$A|l%3OO zPU28y+&z8&Bn|^rgfwWDd;=3`9B~#4BNJ${`T`Sp;hzHYbn(d?{*3(7(MD!sr0g^y!m16yiY_mw<*s7#VnkKwf9GVgQYaFbm{ZGcxdim#QS4bu&#aEOO9 zL#04H4Y-se3sjUFGoB*LIF?4T8O;I%vx zm_Vryw0!`Sq$e;5{9@H$Qcz?Buin#On!%(98jAzTO#v|(!S*A92;}t(Ow;A2a+I+h zWm8}h*tdPwRE}AoQvyw=a}+Wy*tvbhbdDBA#zWI}XL3X{9-7`cljCarf`3y5K+DuY z8;l%hz&SkPAb)UVIbQgSB!2?V;Sqz%KY$A(+|Q`UJULLwYx+M0PPTeRK1V^EKnM&NnV4Bv+1NQaxwv_RAwWb#gjW;{`1n-> z7(ly3peu90hf_E*W%G&4%8JR!iAzYz%PS}-h)O9cDk>@|DM`z4JFb`x&UT8-JWBQ4 zjvuDMc#7PPU*H@CZpR;Rjy$*H9=IksZpS}x8Ch<}2Dpq2w_^*OBhBr&0M3!(cI<$2 zB)J`XpqzSU9tm#82~Zw0k2tsE2{=cL+i?z@Bg*Z#1kMrRc02;-2y;7bfOCYn9jCxK zg4~V=;2Z&DJ^b8`J3un^Obm+5Jbc^?+>SFqd@z>>*znMTs3-V<-@)tU?fVLtsXfQ=Mf{#!Eu|VTb;Ik*qm}I8sE#fd@WSzcn5r-z@ z{pqI{ad=wuID*cBhwi9x1T8A!0WDvJ@3G<0WGVn1JptNg+QSH%=TPD^W6DurR^XZL zznH_&4^#$bDKaqCgU)y;1lMq&jsb@Pcw;u`=pbIug6}MDMOLsH7VwG!&_R(UAk!d6 zVrH=_uyQLgDzHsIx0pkpnO}it+7gagBH)8*m~$wwq+>K=Isl$nVo(%SV0KiR-n5iMOB6b2 zqXga-$n3ZWeA?#pgG)IKY@w%gDT#vb4q$eKpN$CGd!qnuL6|XtcguWXLOGI?*>TQ} z>3YjJLY2U){+RU{Uob1N@PhWGw16%vfH-ji3+V8%=}VV!Xftw8zqpLU3be3~cR9zb zdWb)sfcBn%&eY=r&G|V#U;*vG{Rmq_%*&*}3pzshKWI9|krA{~QG@9VqXMrZONk?B zk1R82iLV9|gCHY=5)WwX34Fsf=;%ODr|-arzy^Wl%ux-x z!ImZPdAi>!4kqD9KP;gji4!SRE-VnSpr`megPed1_~l@NNO-m zU{?Yia*S>m?`jTHR(?lN-^YD5hXv!O=?$wnl-R&-WSqWuHHV(kSFm}Y*<4=GNkCBZ zZh*{V2A>$mIQ{c#4i#2@1$KdN(`D9hXfb}8?!E?LO4b?r3hMhzz8~Vk3m5oOOavv|Fs;VMr@8O;KTgc6nNcu85J0iQn~`@ z5(Et5l6-6gc4NFmoM;5g$XA5|<-Owi0Oi`^e7eE7x%-F|tiR zvyMZ8@zeCD>o^=4o2RR<=g@WJR$zxN*FVFl#KOy?zy%7A6P%#pf)SKt!M#9`^Ay-X z+uzy1d(W9c2knA(1u3vipR=ArLKYk#NI~kjfiuhT!RsagM?-;s(=V>)kPu=68GHe9 z2N4_S95S})zt?jZG4f51-^!tF#slgSC~ylLV*(vj!~kJ|Ixvpx;BuVC>NX;Lwr>r7zI(SQeIg1r}BXX3*Ns z7ocM91xuE|Pwwe@8#$8ee{oBJ+;@OWkrPDk-~z1){lKWetk3uba>ge!Xg&c{C~7c$ zU{T@}_zha4!K};B0@(rwT0sIeyaQC$fW}q9N5+XLFoV`Q34nv>26!9P0#;Dv0}5hh z1%5N8HLMEE%nF>-g*S0HF|th$+r;sod2VCp^oY$IrU5$|I|UpWVOOz(*Qw9o0F>94XsY`;64!*V-^LNi_K;%X&Fpvsbi&OEP*wQ{OQrgLCr1dQ#B>+X zrP&8|Oi$azkp`+X&+X!v%Q$Cx&TfumkW()1=1^k#$36Y)ZjN%s-_vvVa7>f`&#lM; zI?bKimBqo4QBfE)RKpBjhsN&M(=>g$(q0ZL#(mpk_j1TEGX4C!y?P(V6h_9=(*+K2 zm@{79?s0(QKOh{K(+Vf)EL9DR&T6TVE3Ji=kb2Pvf-!Rz+~ z8mG@Z0xodxUEq+K{^ke=2V>*(uSYoC`0h*xopcFS#w^e@-Sa4iJY&oB%%dCyqH`8c z16_$J20A%Q(6Qc~m(lUb^y%-8au_pynyz$=!D~F2JxGYbA&LqPQQMfBZ0APy2S~O#f%Em@0{RJV{D(!e3HXTt^>Ri z_z$RJ1TE8IRAAR&QUPVV1{TMLRnwzSawtlLgU*GqVhB*+0PmatZRBKdWOQT@_%MCi zNe)xS&go}Pa!h1Yo*s9KLrnMqCv@f;dVW!Sthon6*P>H4&>70}Br$B$pdq6n4o${S(;r;qux9$hJze$^M;c@QbeT&WlH2EA z;;3b;Z)9X-gx%-}HZ*o|PE&wf!Kg9+*5Ce2J zh(eYM19$s$om(7jOa~aYZ@$H`iIGutdfFY1ZbsGVpYCuJF&>(pdY5A!qv~{)dmMKd zRkuI9$6*Ch^Y9*r_;lk398OF<)25d{;K*lGo&Nj*M-Yf>{g7ibqw4hc4>@}3cl?L- z_Q7?Z;{iAybjuug-5=;K(gwINL|F%%&m#dEwqUSk1Sy*U7ltUC0p~+TPoT;cz=c5_ zERHP475}F*J?7A4+VFq7?qiN`jFxzu1s+a<8nmMqa-slusD>lU@j&nNn5P_Nj0dM* ze#$YGQFVIeGmiU=s@pxEb3`z*9A#5vozA7rsWJWR3l2F()#-0uaO@Dmv}*dcmmEi! zc5a&YD zf6bx9bZ7PU#jiP985xgm*L(|FmZLB|{2hlLqyO}_cO20&(?DBMm~?>VZNCVbhx={<)XD_0mV=vXvHD~7P` zzrJu(u`}-4UiycFgOO><-RbpzIZT#p=eWtZXX52Ic`olMyB_Bx7+b>hA^_t+SwxDs0A`h zWcnIj&LfQbrx)^Z8bK(r>AUziCo?U)y4|0jGlxm1Z%(s-K%*$P5NOaJG%KR-cyBhE zkgnsK9W4R^jnkhCa@sIX*sdbPxf(R>bwQXj%4#By5;JrHlpV6bl}Uq1L;x<#%MNK= zf^SCv6{8{oH<&e;I0Pn6PZ8nF6hSED05zvTwa3Km4@Ed*8N-?xnHVAayalqASXCGl zKvRyOMgxoEoGH@;9B(`TABp||bUZ(+J|hQs2^DywDEI($(Bvhv0;?u7gA&_vfro5L zj9_<8FFYh5CI~%luDNPCxO2e7y?wSA=Q2j7<#)C_OK>h{WIA_i`Ws13L#9W!rprrl z9$-8_{ecvxuK$Iz6A|N_99fPhkogDT9I&4q1+pA>z=a^0djm3m#o6sK(wuF~jQh7g zmg7`slqX>NbWH`$Se6NDOzhLU6gX=@Q$g&CoUa%UZU3amX#^_4RFygBGpbHMs?1p{ z&EwPnYK1l{ax3sG2W`z`(O_D_q{!~rGi`dT3a1P!yBSjh=k)akoKn+gsBmgAs!r!I zL2)gJITqJ{*Wr$fZTwqpUaa5hYUW+q`k!?DYHfJQG>hu_GPAOsVc^?WapkW8l zmIoGn#y!(JwK-*kcYyZ~gVqj#rUQ8t*c=t6*XnVKPd~5C$z6YdMS;cf1WT3@s{)H7 z=;RP4fmdt_pgju{nLty)44`eF8cZyToS>PWsf?hSlFc2o5EndR0@};Y0v_uDoeKvY zePdBz1+#8|%znWF+PQiIw9x{z##G=r*vBtelt7D3IFudFF@i=~7!)`_JD?%c8KAQk zSRBQy7#4sWr8<3y4rg$^F2gGp(6A7TF2fTRB@Rb>cNPaHMg@+wZHnBWHVTX50amz8 z3d{nJ!Sz(vX0~Fu!wNbTMuTa3 zs4i!e758!_4h41v4sU5ib_Fg_EU7p$x)*{MFoC@&@ESC(=n579?Zsemgb44M{!Eus zPDdb15xgoRTagoTrYzKI21hLE(m;>1AP_Sdfk(hWTN5CYZ%dd!w+(>%N-P>o3z!5z zbsi{gAh#WYT5c?kOrT}Tpv&r%IH!NqQw5qmrmt1y6cuLY1)Y({ z?#QUf0gC($?24SzXX|r%n4)AyP+HJu{Ku}y1WLSr*cI79Y2ydGBKX!=R$T@KM@Bbp zMb7CG2As0>pcRH3Sqki+qiR?{r^SMo81R5jICNrAw_p|6z)7Y7q#AU7EQ@0U z=)@0bk^!B_uFu%P3Em^d4oWlNqrpMec5#9(q5!d4IF;C8$$}l6EEu^JSf+0@;FNJ^ z1IPRhc305KB~zvi?BHb}pmf8jz`b0F9kl(=QyR1|7_^c=kqy*^fE`x|Ns1icjK^ch zX$c9N1?=FZ$e`oVLF3#U;Q40I;UL@!oYT_`IWrkKre84RlwjnT{?d?BP92oop~;X7 z;utnw(86RE(DoTbs+z86#HqumIz8HmGn$co`gS8ub5H~BgAu154{{_-*D~hRV&tA4 zYRqZPy@i#D6LP!HgXy!4Ib)}paC(M<4mD)aXXI1lQUKkWqR0aZBVI*N0E3T@1f3+| z2;OmtI6|7)aSdaZ0yk*9iCKZuieWA22RSyjvmT-J>1rf)Ifw9(>KU|FlgQ{%)~ z>%^eIQ`@Y>Q=iIzGL1T>%7(oY+@oY~v<1}UDU&6=43aODdFl8&T zPET}`mzsWolZkiwe+5oWsUm2Uu|$gM?eHIE{pmCY8ZcmQ!|4KWW7&3zC0f z#R;Cl=CxL8Z_p{3L3yr0Jlm& z1v9HY;|oS5=!q1{pb23fZqQ*0nbMQjSJ`iwo(J8d~5WF{~vu!5&n zSV8yRp?Gt~^uM;8Qu?z%CV_f0NDjfe6asV~lo`|Xhqjy&LJXiApH{#oszISLy}*uB z#205ufvyjLhaGrhJW|kr2TBnx00qQ`?HBDhGZ~p)T$*mt;B<9IPJQ(zQEq<* zrg}$41x818H&BlObRM1n_^=f!&d%2vlN&rW-)TA*cz!4Jz!w z=cjUmPpoBy7TN|(4hpQGBMO)V4uRJ5y7DqG)q&EDi4qsAK;~N82AW<2E6W09@U`5a z5=ns*rt>755}yKZm=c$?BBuhM0v?8wpj{?Wq2wnySp5=-h3VezJ3M`83p!p!s zQRxbxJChVaeKF8jHE2T#qqnpo3+Rqt&{4aEEDj2+(?2+K%GR@(F)27PDDo*VD4-Wz z=FFfeBsNEIqni~pk*EZ^vKq1cDi{^`pamCCZL`lZFC=f`Buu}Fo1d!`kW2BiA9Fx}9VQ=DB{LlUEPf{lkvgyiEf;xi~`e-yKyQqKAis2jZ;YoH2cZ`J_J+X5jW`E z{)FkW?woR_FBm`@0YK+d3OoUyOPK-g&HMh+nsWaoj} zVb}wb$&sxd5kx3nXGAuR0XbNikY!K;9L2Y==mSkw;PGEQgCk1ZqC_W(pHRGk5-2Dx zLUAif%%C_A=J0xOu)-b73`%ZHj*!Rz*@!PPPz;0_0X7fCUKAIh_>a+%87cfhaSw_o zP;N(r7#2sMScPsNB0k~fffmMzJ92pPvVcww7tT^t0~HPm0*?QgeW%Czae6bJn7-1F zQ-|@w^lN^c9*jGt%lmUGi9Y6bWCYclJPP0o1(iU$4@?j9=hS4}F}=Z`Q$ic20(6A% z3BfD{CdUh4@&M>;Sx{~kcnV%%&*Zpc`hoyX2{o_=P`3*lte`WY!3+Hu1RxT;pt~p> z4@_qa;55`cAqd`c3Mxt%9hpiL*s>rCHNd(c8o>AD9GD&*!0EyWu^|j>5J+5sZ>^G+ z0^?dIg|-%R<_CfbObS{~3T+D7E#}NO1VIf;#~WbBJ^+&-C-W$Pb`pcGVgS`Kj9GBk zADGS;$Y~q)26P`3gBjBckYQYAOdr6s0;2-xdcrKwDIs4#DnKpF>l;yYwyxIs< zG#_AuyBW0Tm^lk-&w=R+136WFK$^HfSD%53;RB3WSxTUtlL{;lKf|?xYeCTUgJ4I0 z5Ck1701iu#TbWoDKo_4Jm@X8=S;F)~aC%1&r@ByskOH#;uOsN-dC&>`ptv{~#3?5W z3n^9w76pcE1$G67Y=LL+==u}HDZv9)0%~3}IUbm<5X>oV_ycTjgHRS|DKNzSY?_5;)9O)m~I!! z+3S1+WS|fzTtVx3SQMBPWXzZjfCRxw7s-~F+)A7ZGN9e!T#BHRs}y9tL21UZV>(|L zrwikT>A_)~MVeElccY%8DbKiLx?wb@q!`$GX=w8ubeJWJf{?(g=?T%C4(4w_A;e+E z^a4a{nKM5Cg%OkE1F%sqz$7U1LXyvOa02F0;B%ioBbrm2e*;(^WZ@f-%=DYloSLj4 z?%V11F`N>fAe}4<5`VZDz-@kz)jS}pxj|b?Bo&wygg{rUFk~w*2)qIrpunae2})ov z8Odw~W(CP?foIbf#BeGz-k5$QhSS3I87PG?uLZ4m%u?cnBv$4dLZFSXS)c`fiVO;% zt3&t|1wqGLFe~tbtekEb%PGQmW4c=`rz_)*={>QWCdwy-6_`Lk_!12vBi z2xo!19pIdWE&*DY3ks&!+)4}zjM?CWF*pRCO*fC{oE8VN1#}KBk`<5w3N(1fpv0oU z=+6sU+6(G#fDH!S=!{|`iy0GGf&uI$CdUK9Suh*lP1jD~lw^E1-8F&Jfo%@Ro3+Kyif;%e4ud+PuiI%sPEv0z8hD5;+YyuYeK`ml@NA=`o3%l1dlAWGBGlPb_xh&E3rV%6xL+`O;te$ zy_wgxEmvgbVdS2klg4Qv;gF@oqynl$1hT;MG)j=+cZTVOZK7hJd-y=7-cEm)#;N3G zkOdu^2lWQPH+C>|$?}+yiD~+K4K6X3Uu;Yc z)8ivpMeB7L1Xvst*cI3{nK_g=LA_AUd&0yo|sgXxa;(>G*tii(3=qzJl# zQUX+|F@sKOV%KMsnQoBDDWW5x#AU{$06J$5)JbP|G{^#tBPg(d&LCk21w3eD*&;Rt zc6~-0unsK?C1%hakL(J}W=tLm?2e#q_8gGgi5)K>c5*N)aDl8BuLm9Rs6(98)k zYh4 zFo_3b%nBxSV?Z~bDX=>(0G|<~Bn=vjUfjJ~#;iE~;1?0)=?&`{Ij8Hd z;gxYK<=8roTUItB+w)H*wK&RGXSlPW7lAsz+4YLgO440 z1|PfQ4A8ZPpcW*%CNty+K7G&%LUvG&Cf~F}zYC)l`kfi{Md$K$xcepebrj1(-M4K!XeH zj-Wf{;g*8}7?fOJfXtP(VrT$)oLz%S!Hnq*Baa3mq>voD1A4q4B*qk21*XCxy&imI zAUJ#vKwScg8-Zt_15ZvswQ+(bDIL#Do+cph6XZa4ea17Odt}ctg4_q*vc~S%AO^~j z>>5lbn3X^ogvkMvwtK{sn0Y}f|JijJI>bQ5k)xay!v`@)vD6?|Z^v{5%mI7gfS3Zi z;|kF%@U~M0cEWQDlzjyeY!%_jOm3aNTpbo5)(MZyb#R-ot(%7s&PSO z6e9gX)*OJQe;gNxgYGzDa%=;IoC4(F#CiorZqUTPl#@coa`*xT&?qwG;5b9jXdw9f zEKr)!WoTe>WON6|9PG$Mc1O@zi=f!WlC?iDE3vrpg3>bB41Lh}H}sfRDLWWLihLmc5771Iur%@vd}0&0@yzZBx=#=^&;?$rhJ4N= zyFTLt7UUDhKyeHzm^xU@nP;$oN*9(a1@?N!8K8p=1zJJ55_Crwc$wb<7SK^=pkT)^ z!f^xS)NM%aXV+j_0h+-64$U%<+cWe)S%%Y*88lf7bMo+5vJiXx1E-IoQFuO@UW|M_?0+8Pf@vJg-0#yAxv& zFSwwEI~H_uBfBGL=AP5BfvM0HTG-bkis26|O1$9G9DIB!D7bVPUa)|T0i9h4N>&*8 z=LdMgUk?K$F^_fl07G>o>6|@tZLHw*o0X_4O-4VQm8MJm;0mWkof9+sZ;s^VSA9M^H*sT{pSD%B< z8_}$1W`I;Bpeth8K^I`dsuIULU`1P4voshO%orFTId%rCIr9OKL)bMKHnN&A+yt|A z88)yg@mn$6U}bVp09nTF_yDXMv`&E0kta)F8n~u=!m7j%ZpA8q-J;LURPUg`=LkN> zk=^kHSPLW+c5#9#(+{k!yx^i*llcX!l9V~~50KwMJ3m0B`wy`C39MNH)7U{}O$Qrt z1R8KFfb&iR8|aisa0Lh|G8OpNf=|Q;*QbiS^^W3-{5&eK^U^_y7POU>nTdf>9Tck* zpobd5V}3fcoaCg;89|NL?-xjMQ~930a*fS(6fV12>t9#H&%g4J;W56Fw)kQ4aF1xi;-cwBi^z&_>ykqYi0p5qFz zF<=X`KsBDg0gzqnn#?P}X6b@lue$-P2$UQIU~QBQU}+s_8UoD@C_qcS>HAAKMe7&v zL3|9-1U7dD*vm`!Ky?FXkT`n*6j4=>0=3l-GPr>g_b1E}Zv17vCgIAquzf51&`;RDajfEVXMW48mW2w`dip8~4@ zBnW$^JCt!s*LQ%fYX&6+s5Wq{Pe9fN%5gKH?wG&_atAmzXMoj!;sqX=Gr+PsNEJ1^ z2Gatt1=>h442oq?E&(<6m=rjs-z(!(1J~snL_h%r>2iQVP{E98hX{zVK?K}yIUoXx zryU|$3f$1dctixW)`}gZ=70$3e8v+Zpvnns4#?La9U%9AXLDiz^(pijuZSoygH!Ac zs3KO7<_q;AN}`~1mD%+fpFmV_gOUuW{?KK3AObo=72H9%Ap+`BJP-wy=-_780#IXW zhd9V#3q(QW185UWU(B2x=D!WGR61fGR zy9N{Jco622EbuJY1(2}^#6jV~?s!2o3))#bA`Wpq$jt}DL0z^TAg6$a5X3;ny&I>9gAWcgq1y+Gs;GEL}YE**rZ-ba3=-`86?260^>{_4#6x^BwodFLS4K-sr zAO@?8L8~1gZOYlx*H&;!*F%bgIf&Ny3UGP=1v;pCsK6>P7n){PfKvr1S2%6}t3gRK z8^E%llWJ6_wVnmuals<^3n=;& zK>_qZM3H6s!WqIMY#>){5S@OolT);Q1vq5IKv4y1Ei);w30wyCgD!}H*0F$E*`Pz_ z7l?ym7}V@FW4ZxKQ#VAi996&*yWlxYP-}33xRNwvau>A9nH{>ypaW$73b6SdqFD;k zp!r>PeZ~!-aA#0p_tIouA*Lv;zz&+@T>-L1+MF4yZ9lg(3plEP=6FH5NE~zsyav+> zad1v}A)2MYHr=*QK$_`;==9bqPDTDVqM(CF6ckuMUVkAv{bPi%Fe|$z^8vBxabINR zMA;orh{29cWdcnSvpZf8n^w&!$*3@0x|&nd;|gll{E)5^ zsHXwyMI8}SV1{j#4iNYaI=PkI5h<7%L6hYmpDqAr4@hV;gF;(+xe}|R4tPJUG`RBs zno$sBWB^r)@(S#Z6Trc@K^!!g2(A$wJHR3<#KAsb2Bi*ACsW*<8I&wQ5w-&CKF1Ye zS)k-|q8^kjn9Z2L`RIaZmLt1B6{J%Hs$WDwlet$w*QA2_T%u-72SB;!fOr<@*jtbS zG0^SQ%mQ1vK&e23>4c~m(+3epMo><1{2-F$$OtkG6q8CauoM8Qr=`uA!O{Ohv>uec zK=CgFjsFjzP8c^R*}M=1l|-QE-vDy6lsPk!+d++SkkeU^oc`{1hkzqHY=fLEvNr^- zLi=d42#13Gk_7V1caTO&gho*Lb3!yrU>g^Bkv1=qrVpSr4)WQgI~_>7@nk`XX@eLv z?TINe^T>jl7ohwEZdhp6gHk%EPq;%g3v^i!yFTL&vs0xBL6o!2Fx5)YIn7_1o~rGY8a0%*KD9uTXCl#mT#kg64w+?Axk zYqA>}9GVrS6u{|Ak(ozz&q`UCMSM-fmG28|Mcnl$3b8RCYx z8Pf}pvKQi6jyzyxC&aRpxD}WMV0i|t1e9c6h-5hmgOywm%TnS(Q*r=Q79J4Ia^w`K z2W7t-VxR#VW`Ra%)&m)HLKK>>PKd(u)eBKXz5-=IX>(?90tFQz?4S&&0LxYzK-r25 zl&x04v(*Ps29h#oMhXE|vwEf*pb!9^s}J%TCp^ctDvEk$6Dso-Q+kdUeduLJ~6M2x~QfYWHV#h0V*v(7t4Y&7N`Ls15QUDAYBmgEG0I0VYovasR02hFNG9P zB|(|Tu|qV=Q4VB=CNomQ5xi}hjYk@k(xlAmnbDFOQtOe8R|=Fok&+t7g{Ucw8R~9u z!v`EwAeS#|Y87ylfu$KxaKL>5FMhx+Dvf$36u%q+X@nPa3<^RD?2bE-3P*@0upYP< zFM;&PAz21apyHsUu|Ql=0<;8HAWMM@8Z+R7-+4g8W=x=4W1#KTiI;dp>c!2NBosIl zB+QsZK!F2odV*pD*27Zb0~bQMrI9FTpnD!D;ed0$ zF2e#bS6)UbM+Q(=PJ?NRm?ErgClW1$XhQ&v$&O*6+nTu zmKn4MM*wt>yaGF@U$r1kQ97COi60`#dc2NdOnD|L3Jvmb_R7?rJ#-8 z4Wddcykg*F3$LBQO+P6}`3>e1Iy&%wy?VvUx=!6@NECnf0 z&j@_KHE4W;TY(ca?+!ZjpId>0#R1$I1zk@7?(3drgU*id%7N}z0v#2>%p-?L)eW#F zP=hF_|NaNGI2}~?=7F1M;Kp{Af+%RR5Zs@`-oTXym3>p7gAXA8LUX5r8Pfz%gGc~c ziL;n75mkSq7QxW^8$7TE>9|3PV9*7opmr*UD=(<02TJD{B`@J}7c`>+YJ!?EJ%H8U zuypDODR)7U&+hmD)R={ryAMQ>I!T~%m)&uKC}@WuqMZs-0UpHXml>o6+>rqV3j;K}p%%VOW=!C5A<*SRpzN?iJj-$N+$N-=R~Qr$ z;A;MXD58le45{WpPQ4(iP!DdUIBo=KgcrTS;1OROn&AH157Gl~Q-G`V72=?xLW2q1 zHW21k09{Z4i9RL`CJsRc1|DhXz#OR8%aEnOQ4cw~REg7(Bb$$fQCXRZ0hGAK+ByeV$I8s0tjq{rp#d4cg4zu7+6}OgOrRzJXdMc8uo-6a4Y1kTAe$e6m7&=D04%Eo z4JPpL-V3lQtl{(mtXcy(oIW6_c2WS%KJ`O`>H|zMET|Si2HZex|a3hE#@8i0nKrabKcUjhZ`*n$>8vpXI@G&Mnoo(Y1o zA83Ka^wkqM75ub8Sp+n7$m+P^SCfFFHF!A`3lFG#09E=R^Wm-1^&sQny<&cl{0iuZ z>H={^79RfTjF$vN>iI!dTmYwBCUB9v0PK2)Y|!XcmcTDAP+Gphk38K5O3R=I&INvR z<_DnO)eEq8cE<<&kRd63#y9+kyW&~EITEzP9ds@bs87xh+9RzF&Rg|`j?mRSAE1UH zWx@~O%mtoU{Q;JR<*gP08|0q} z0EO9Y^8AMm=b1p=TMc{8RtAezgJ zX@&q==F)@>2Y~8LP{W!XIvfBV2S(0Z)3!>A)$@UDqby5dOO|ZltTF@aZ%~dxG!Q_k z1e^*%iBPE?tZsn-C`%zme?e-%SqfCjK(iF6e!<;{0QVNai*ZP6L)3!{1W;B(E)NcX zhH=?onF=)b2lolQ{|23;1UVJ%of9B^N(g49bx`7FmbAWiU`!i$ai_03SX9I`0s?L=u#nz_(*-;3yWrrk()17*vLU7Lc+# zUI6oFd<#lc`pZ-AH0Hi8DpK{I2JdxP0EnQwq^3-2!hHewEBMrR6SA=HA4{EPGEOj0G5RnQELQ|izrA5vqBKm z#Q`~dfgreI2RUMgAgE~C04Cv1uh(ZhAP6g?K-2s%x9<=HO~#%8RVdQt%ojkk6nF~_ zyCZ1p3_Ezw4`_9n0*3%33|^p_18qxyw%b7Ta0qm8ff}PAw>h>5fr~vAP!Il(Aac3) z17sJ68PgXK4c>2qR_@gkUG9PBzmOUgpl&&3B_P&D1*8ZBZOZ{ytV*aY26&mF04@Q4 z2!cvL)OH8B1Vn07fD$b35)gYg3cMnWKsO3o`8@?xD8WlWPzb?20xto1P?{Cdka68P zAdL#JB9I4s6&-RBh@=Tx1ZFwz1L=X6fZ#7qt2cRN{aaOe=8;yk}PgnWO=_3imxbxc#HR4W>MK<-zNo z5JUBhU~%w*H1Ngbpq%750lbl<0ptus`BKlW$vi;_lpVok&D60*3;lz)x`DbV3L;_X|1=7Q7$|>}XIqJ-vT6r$W6tqELSWYU|lT+j?r?F%(Va z148D^7eF?FcJY7$;(`#U5Cg@>9U_ZW0T=5R;4>slpo5Z8=Jr8uIv_T^&`elly8T>ESsfYDwH+4W znf`x;5c~8E)`DC*QluL&ms8dNHrNYVXpp7Ehdks9cCZrokT3ASFL+#QI(Xcd1v2i- z3>tc42haNpB9D^_g2x*{v;N@0G4R@Hf%V`CkP~9i`+(Rrm@bGZF-a>5f|@T!V8Tq& zkm-&yVv3-i4{RD_gJ_l_4`>*d$${BH!Ba>9v_lef0Fs~r3+OIEMJBKWi-IWVxMI-6 zENEB~v{8*giQ9~62FNVX(jQQ|n<1J7Izxb6pK*ccbh-JQvh|BZ!IxICgZ4^o0FA@( znlY^b(QIZ+paCby{0O%h(-x2*WR!ac$VkwjHaB#Ddk1)25Hi5cslbgsz?xQgOd1k z!3CVsvY@eTPQ=(Yp8}^Nd|X>#dO(1nNFaFR7~~kxiWKlDHYfThHpma))fBK%Y;gr9 z*eEs=bP$^vF^Ju=fK#m=F^CNtjut=+VuLoAato|w2A$_206rrXZ4evemJ6V!HIo^5 zD%kOcIBZo2WZ?ROc$N|qd}{iFIMQ4>XmF9IUV#bT%Ld855Qol~f~Lu#y=>69IBY=( zXvIA@IJJVuG7o?k!GKnZ!A8eVfR;8efwQYPZ2k~)bR6#Pda%Pm)h^iSw;v#{3IR1y z;NB2`O%OwefI+T>&-*_C>4T3fGAMv*(+h~6HfW#|l!8Ed;4|KzKzi!obs_^u!3i+=|Q!9137DHg2$L z7DZ;z=@QHe9MFkJ(8*we3=9xiHbrJ0&>b+a)l{INESNJu6aPpHuE4PjE0hn5@Nj@e zF4!Fph=7*oO0Y454V->%F{iW=V)mN_Qno`UznMUr*w{6h8^oqxED%ztX9so3!D}3N z&6wVZGC3gD>VcYRpluzqL7VaZ{|9d-03BKlZU7)=BVZxI0tpco(D*PbC`1_H1wO(t z;IIW3$dKte4h3-1Z9wuAJH%7ql|d5qj9@>3S}+jfnZSG=M$lQgVE;;ksyXNix@HCD zwNC&4GdF`a!b`QZfLbox+@K8#pa5l8;0JBi0d0+d4TLU*4E%vEp#%8}bBjC*A*g4pyI;u16aoo_AFc*ZWeGTf%aT7C~$&0`k>)JU4|JPAlo#UUa*61{$m0S z2eE4~Jz-bk1nqtSZKB~ZV_E?!?>BHjmgcYE$P%b;;Q&SR4h|(AaEFNpWFS8`C`2@v zHgG^zM}zLO0L3|IDho7FAg~NPhjN4iTwO6h_OXGZ_5|Dypv2e}1P3V1 zfn4hdx-$c0B6~e(*D7f3IV^)*04oA*NdSe@4Gu_l04?bPxf(JZ1DU3RTp-EmCayu8~9G*`~9&j?;p z4r+jamd}Cw4sMM%hzZODFOE7O23t_oT(2mtAcMR*QwB8p0os(!fLi`5NP}wtaEPLK zAGDzj+50a*-hYAYeHMuKQGE{y8J2p`wGJ9g44|d`Adlb)4tP+2$1Ff=%&~+4WX7#U zObMb0r~kn-M~W;8^-{}0XLmxD?=tA z0Ii)w4uTJ$Aou_d0_b!m=!Occ*|r|E7XiEt99%Ym7JMs!COSaL6gdT~0j+F3iX=s7Cz)Wr@1_gFgCQy$LlGLOXr9ce_l=cEB`+)~! zAc+N1v9N%l8zK}i$$$Q!wH51^?B&^#V!nIha_Z{BtYIG$jD4WN00 z>XsjHSHc!5f=6LAm>`Y?O$>-;Ic|H0ypEh3l-rO-(U^I-r-Q~j>On0j)Z`6nav&$~ z22d3SI%JmvwoU-li-ib$U~^@0V5(Q(L`qQXj-YdSIly(k6QcsBBU729ec{?Rfsf$n z8_-1)`iv7e!NW~b^`NumKotY1qyZJnGeEX204W0BHNhd!4xS@h!HL}V04a9dzzL~q z7I0>P?SgKk+`*~D#0#qQHgINfgSs&w*B$_=g{XqmN+&oW6-+&(h5=`CP-6|W*cp_c zIL(;ea6&fazW}+2*^1!-h|grkbOFSlzy-1V0w-(_+7vEi7lI4~1^EQfoE)h4zkmzU zQkubqXelk=GM8gw0AJw*ZuEj%e;j5^Ye25xGGki71nb3tn(AhAQq^StbGAp=11wGa* z19EmpJ!DZi8CeEgq!Spc2CexAEoePNsTV}S)jfED5NH7vxU2=W zgQ3+usC@)l8^;Uk6hWH#pb`MIMh-N@1+MhiLFJJGGq|V+&B%jJh5Nt;T2%z<(t;LZ zgNH0w)iufsSR+VA{e3X$6Bii$6ex0=Q`d zTI#L9slX!u+BK}t_y^q_@aFCxT%c3N*c}@nd!fOLt&rNlEhyWdL6$pqaAzshgTfrr zxX@r~;6`c$cW{GhVJ=V=0U1m`!HzzdegH`XGM2srAp(k)4eX%tbJ&~&sC&!~K28hd zJ;+2O54geo06ZzdG5z8kPBA$SPzRe$mx0q2+@A$^Zy_xJW_Ml&P#FeV?Xy7)y#G&k zg_r_(9}Z|i0<Q4=s7Veg!{E!zr%wzO5}E$qK#(;MJe&w>g+Ll9kX8{W zxkI-FvVh#*AO_nK7y&L#p}SDPjcrJq2(}<#h4}O(+c*{LIl*n@1@Hkt@E#{AbMU@4 z44alMYeZ}bYyh{%K-okQbch_0^8%n{t4vNT=1eLIAZmJpnXq6zyWP*@<3g@W3W z9N@7~aNh&65Cqgx0gr{&v+FZj=&|06r^B*Eft2__F!#`fTILrsW7;~ zcmlj+7#!Rjpb>f*5TT&N3_55THWs-;9Mtm#EzV_CU=uLlR$y{d0A(0~W1usN!9^<< z=#(^e4JJ@e2y`kE6R36~x29Q}AOcR7bN9v+2>Enasn;F7cN}xIrQXYL}Q*dQ> zTmUs0B(9K!<{l&~bxU7U&uR2FNHryC$;~X!9<(5;y?v z608u*5||D?dx~SaoG_Od^k9K`h||Kr^*NXi%I%IngrPMRylUzY0aZ<)%n7cevDQuU z;KW5l-INM#^@7R+SV=>2-SmGY=U#<8O@s@f*yVgpy9`Wx$g&F+bZD;KhRVZ&dQyfy8XhM76C^kJmH60P)}cfSb&SD z(B#AyniE7|p*aIRG#6loCb-e5!Nj1%0vd$|A0Q7p;~qS^2FgyL1Oz(na1{^urU%f0 zU}8#;aedHO8mJ8d>L)D_2UWwMQkYd>HE1rjqaKnkK)DUNWCF7M9kS`26?7|sE~w?s zqQE4uo*9&NLA|UMBG!y+Kz&hm$5r6?v|?BXK09TBxFVM$JLGyS1<=8h7}W;2$qvbl zpoUpJ$YAi17^}dW9WR3q#!zAc-P-~>D~jQwm=cRM;|h>vEKp0CAhzi;To40Y5e7a8 z?F_Vng`|7XAR)-D8cavTxIw*3^etxaq*gD3FRigF;BH5flhihYl9~)6sd0jjVmTqE z2#O1EUPVo7pu580)7prpB&f4x07_hFElH#V2fEz@mVK2t%$Q^pKzB7kTaqjaOrRo? zMS;_-o=HT31=K>9flw--8d{%mjfgdapaKi%z6=%x7Ep%f0CnZ97z7l+4Fw*sen;Uf z&?>BTBB0n}0j;*;HUsV80nL*#3qa0&0^J+~8C(Y6NDW?a#R{671y9JbI3iEPLI>hO zCsD(jCrTX9bP2ws9Xyf)nGy#14s-&;^h7ltk?H=DT*{!+Nx-MBfMXYu#z1uibUPX7 z3I|X%0ZI#4+lS`hq`(U9myyvvWChh2pyr`7QSHNfGaCgQ&7o?_2k zdrNUiGi?x?o*~7h!M{cfsXSgG1}={|4~T)gR|;&?H}nZe2|>oTn4m}Af|fx|ed45AxQo)%v1Lt@fIzb-w+Q+rJqH-vwoA%z2{1Bk z?4GVI%XN~eVbSz&vRpBYtF{NraaA$$HGmEXW)^5<2i>sNFum}Hpyc!~@?3>lb5=o5 z$6dgvzyi78LxBb45Cv8N@TtNuO&cJZRw!`EiZp<)Xj5PU*Chu)8t1H3x8805@i@61SrwTaUmpO=aX?gl z?FU)H!w!)}+QfbV?|QQ%Tgv|l+GE+TA9b=6$gX4zj7o506I6$Ynvj|LM zo8G7=A~MaHi?!ZV!4x!Y2|9=$bVIAD8PfsKz5_-phBKg!A!tC$oaqP?6DYSUWGP8& zFfo8`WM6f4)zvCRGsv=%~Qxc!n`cF&bnnPnM#%0;A&xhAhQW1xCjf#wOE| zDe5UOIv!!jQdCr6bacp8R8wFC-9x094!&~&lcPbFqJ;vZqe+&crUIj* zPL`rN*hz{)p!*{jvJ~Y(_jNF2DT;t^71m{_X8>KJy@*l4)R70|aK$nOR>w0US&FJ4 zS|UqPM}gJxr9hTqxdJO_A(~`EX4{1R>v1&S&A77td375vJ@*pv`Cg>l>)2d z0g)_6o+8Ce1y;~*1xJ=5#cBms#|HvgAm(DwU2z-4!R#fB^$M(@Q)d+Q6<9&T;fj`y z6Q)iR&}U>&TnbWmK_W}h0A!OumZFmatK$m6EJbIKQi&|Z8j#DxvlMGVE)&U8v{GPo zJRy>$n5Dq#*dPHmKLW%Q%~Es$SuX|_S_U$B#-of!7!NZZ zVm!dOpK&ka9>(2_+Znep?qb}@xPwtZNHAwB;~GX=8$0XOjE)X#8J97xU|i0)lu<`p zOLGb17RJqt^BCtc&SA8g&A5(n6XOQPjg0FV&oQ25yuf&#@iOCL#zl;Ix$0`FD$27M zCouLi&U9s*!8o098sk*P$&6DNCoxWBT*$b9aXw=oqrKiG#$LuA#%{(gMoSBYjP$hB zl;otug!s7F=$KB%4#sxIHpW)Q7RF}Ai_;%_aCtKFOlQpF60PU5VmKiI8XY%dIsm$i z47xK1l!7EcyK8=PfbNykXM7?6UIxGlDgqw}fW`_Rh%?nI$m=pNfUbB0B{I+j1fcG? zBk1@cdx1Y33gGE6R?uN!XG9cO!DCn4?3(s$`c{ku%8K7G4VAt9JvPpyK21x5KMs9_CR>ucmy&y~f zgDri6UFUz0MH);GKstYd*p4rt8W{v28sA{o_z$F5gXslGUcl`R33m*1%p5XY>bZ=T<$QKF=0s>phL+-Ams(P>;|Pq>Zc4b$VwGnSkm|b z3Mj_|(>MEYsUimvJh6gGGVtvJpxOy^_$G${EPJ#_AhJh;gc;KVF_4?#iXVt&If8Qp zt0waS2_@`F^E*!)*#})%cOH^U#Ff~1zksp< z=nNnZ1x9f86j$N|U2Fgecn$>?(DWua;~tPOV>$u8%1%I$1ElnXgd(?sra%*?A{Qv2 zO(1nCh-o9ODD23jz>%%U;>ZZ1*c3P%nWl^SamncFfd&S66*xeZl@cGQQL4o6_>3Xj zk=Y+~m5t*chHOVBe~`P_6?mt|`Elt6UuMWs;B+tI{l=&utiT8I1h}82$PNl7@M)JE zAYV0eg0kHi5hXU>A0S_KfK$#1@c0C%R|#q!fPzrOoVj88JwGl*{~N*ztd0y;3}-|Y zSV6ta6QZDWBLGTpCq%)dIEZYJ$Wq{TWGWMA0o(9I0A|AiK~V7l2{!?O*6BX}TrrG1 z(;q(Ik`U>D7K{xb8r%wDUNHTtKbKLcDA4{eQ2(-bIGlN1aqhrg`X#$}3JrkJ6 zk>%L46w(G`Vo+#dbbNx4ZUJ2h==cU9-OT8?Vh6YZr!<2J)X{1NtJtsuYyhah1$iDc z=PIF4&kDL-_6XQnW{}d9$&Bd$D4{ZGFa?0mL{U|M7^4bWW)0dr3JM<$CJr;E6Hs+3 zW=v0@OdT_(H&AAb8PgnaSn4xoSu?HzGj$m<9T^L)7*;}g^%)8rj#_S@Eu&TpD?oza zv}VTiLPS9yWS<3eQwtM=g1&+UG(=oLGDZj)7X>4zjHM$-mg514PeC1hOUE6Mpa6vf zD`=K0#EfYQ$Q~w5rUY~5`UPONE<+C3S!+OX&H&!5!s-aFO~BOopX#<}kivlj~^$7hu zppe6*4ow*==s;yfMx4eXl!G$8HRB#W1y%({Go~G&aNNO{#RDqB88w)6n3UM32YPdf zGV)A67R;qB{R4ddG-zZ8>is{w;P7R2JTRRngiF=;2QSFitd0$QSxTT{6tqqYyi-bv zU4cztB0H#9f}TLl;&>3ewhz5k07?w$iX7aIXQtPMa49l<;GMoOgiFyKoIJp{%7I#% zO5i))S(MnJhJN7%H!*M-3aaVT71=OgK8R3B#0?- zK$}w?;PP?{OeyGe1xAH>Rt=^NVoGe_S_YKPIs`!TfF}e%20McGp@RmBl~^FlejWKh zi@sSMUw}*H3!)%5fkPXVmQRS9Gd~c3_+|^pS6pUH8^pn@QNi66P*+DhOQ4fekqy+v zV|Dx>4qc|f>If>}*#x@4F8l(j2|)`fIIxX5VWht}JQZ1oDD#$+65yct};D6uIp3G{-3 zc>{Rf5wsqNQ(z+49r}!0grVWwAq3iZ%&5Qt4mVH-Nr4SoJ3E5*u(E=Kjz!=rXmb## zWdv%F*PAhc3wl-!rWrzzM!G^ht7E4GsNQwtC~^GLSW{%i1Zu=P-Vg>KPY-HvL7UNc zpmu}&cthBXX@d|bUcqUB)o}x4A1SLo;~7vufUnd(A*RT#pr#-R(r`haM25}BSF$< zM8HWLd~lqqf}}v;943W0MHX;b1LDtNg2WAMMF2Z^y-+_m9v2A1;}H^#+#szA5I?%| zeg+k9jt8c5M#5Tw5|RzPkOcaJ2a??;K=~ch6C$}J>t}#V7tnc)(9&fNymUb+p&<>t z1z^>n^!0-WJbgHWR|(V%>fp^1faJC%__VKpYX=?d56Y}UywE(b2Ci@Vlt?Z$e#ciY zngkpz1b%?;W8X2|K8j1FegfVREL1dPb4vLy#jW%2Ga&!P!0k2^-e&Y zzyhi@*q}Am4M;(Tq#ImYfKAk8xWWqxSp`;&dZrV+3gAncSV30?Kw7M4c%cFJ1iK-i zQjk@H=>e}I3lAiI7_1rJfFglagXsdV8TdW{#}~X=-~_sUK`D&0xe^uUO-DFQ0W9JW5J0Tbo8L$bVVy(v3geMdJ|}^0xEO# z8J9rw8@QMPw<;G1D}m4a2RFb!h%2#~F*Wdm6gz?o8c=rPL@Osjv&x`h7HFN%3MwZT z2%0m4Iu)RX-xkn`Y4vPoOdEt@1tnO^24Uoa5>yyKN_Eh26)ey+0xl3h*R*qFDT2H9 z8ca8Y5#=PPtA^0T2D&r=R6Iy1vGIb-$paFgQ~>ID*Mm-6hglCU*+C^U3uL@WU?MoH zoe)FKYVnX|6UciCJOYps=!7_0uyBL4!b_k}&=SZk4qgID zy#U>h%wop$05njcXvOdY%KyMUJu#9?L>^RtK<-aLDu!Bk;EkM%aa_Xnp!-_DX|jPw z2~r1v%F+%VP@0?oD%vLSWGSSfCPvT|DXfk^xKS$Y58UR=Gk6qO71GR@)_`cxc5l!* zc95h4x^;yWbZLq}CupM+tfd;u49%X9n^HhA@Bwry3^ZkK;Zb5k&6{kXA_>%dWOam` zECAC2YJTvbmTm{8ABZPZx`DGfVk`$?yav+_9#FalTYmw3hyjxV3#fEshn8*+c)-mx z^wJG%qAtT79#`H^ppdO+y1)Y~-ClsrVbx%|!UHYc-e5NbTtsRxz2H$~;eo_GgEiw9 zP_VLUFx}uWWBLG!n-4r$;Kar1cmixNC@daugZj3hXazObK5%Clf-{~z;~DHWfx}XR z=>*884~*Oj<_eHezyx_Kh6&IV)d5OjGq|%9lodDxCUJts0(2C?Ibs1fs3obvG>041 z)6rmB0-EV&bzH#>88%}Dub>4rQ$W{xUVqgj;Akw+0d6^Onck7erJ{nj>H)jW@xb&8 ziCo$&Yq%ZHPye6DC0gIXoh2|C*2xCP4{Fg3%5O{>Od%jAI_`ivg-wA28gD1K!C8Pw zfyIm|1eOp%1ApME?JKOybOdyv5f3Og?BIs90xy7#VwKTg+QSWvzANZvO<$kHCCzwY z`spMtd4)Tm@MG0rI>2qlbORKIH@LIFl@(}+)Ro0ydLk3A3}pO9iCKe*MG?Fnk3o@H zfz?soiUC~PfKIR%fQ~XLE3lvje}{x38_0z%5+FSqOg$2yXh2jn4H8Q1@V5OIaqz}K zRs|kw#xfm{R6t~0<@Tb)$xZ2C@;ML&-8)o50IJ$P`?T^ zTmU+K3N%2%s=+ix6kO|oh9khUG@vbc9H3()1g3C8Pr_w!04-hswGTi83ap?tt&m6n zH5vpVZS)1gSpr{JK>f-Ug5agE;HpzN3sr~XjEyq{6s171wL}mS?+XOM6*s%%Wd=`P zkS8D&J&2#J#O}yk!c?z-GDW7q?s#q0)m5z7;HjpU3~s!PQlQ{e2DRDQ!QF_L;J%NL z1``9Qpl5+5_&dT%?BFa1>eYaPTo%<^?BKKo8qs!KAeN=X4r$EY5LV&>b#T}fKgby1yd>FYE)IhBw;VcDa&~(QI@MvZhO4kZsD8o$ww;(Twm@z#8c^y<% zfN0PV7HC#vI;R=a6EOu=&`~U)hN9y`1}laapkZiObLW9rmII=_!-&sLMwrW?Eh5mu z0Z^t;n|?Q)OEKVrh!PvPcLwg2fo@F)cVy0pD1a^u0r$#S9kB-rC|9lE0+o*|xFF>t ztP)C;w_@1A1?gyQ-~yG^Tv#fh6I_r==m;06^wVHE!v!i89bx-USRFyzR6xZrmlBsF zN+ooMYkFrQmoolZ2vnVb28FmYxwNCNa5xWJVX2e?}d z4o7&U1g=-UfLm}6pnG9JeSFBM-UlvND`NV@OfC_FH(ai~??LSbrUzWGW&>!S3s~q0 z7c?9GLFg(h;u5LI;!3|m^cJLf2XrL85G2pm^BMVeqaw_mBvMHD;fCiG3*da#;Io7je zYcN4p+q{Dug$&Zn#;p*mpa(LVi9sROkppxUiXMo99fr>e+JLx%6SN!3jA;UhW_4V_ znFZeC&FXlA6V!-c1zXInzyhtkUvPrMOHUydYA}}qr-nKcc)cxnLU;p57HE7Fy;%n8 zBkE1(%jOcRf5Hh$9~w*#I3elg4rr}xfd|y&3CRXE^g*i< zE;E2i)z$UhybKBp85P(CW^jU5H!?6eD6oOM(4ZdK50HPj%$UA_Xl^s651im?iq%o~ z180`NOit(s3Rpc_0~D0vz)RU!K?@|Zxm4=mX$jmw1<5mkw)uh@2JBW09iS}2YsS<7 zN{FDHl8`oc0~e@TVF9gd5SRrn;ihnbn?6Wk15TSD^H?>QCU7YVgG+Nz5KdqR`FRhc zB8TIQjnXp&6!}2>1B{?m#~h9v*`O2Z1=^XFW=!wP;Zl>I!Q^Dwv0Rat8C1=2Wa%(4 zC~>+fu(=lTGBG(Q@J>&V6BM1k=DQH54EQEiR!2rF2E>#vC})dgDe_I{&E-;(hYkhI z0Ou80il2j;;&o@hQ#>fJ7l3@uV9mG$L@(gV0;hI9NTJW6z&E`=mrIWqDHaukr=QB@ zGK2Q(r{`bc;^73h6hI@7)AjPW5j;PGG{Mt=GQraTmPVT3sYjdNsRvDZK&E%<8Ib08z_U$k(5~tM3DCVm;I48u zC^vx$&KL0G(dwKSKrsZWt~WXw|H zHDj70slcsS&%~fO6SUL@ylTUY>4gNS!SMt%ImoKP^hQFF55%7!smKqa=SV6FfU3n6 zl8VA0dZi?2Wt1k<5edavObXyxo|)hoEEk1rXuf6DWL_W%wnCqAm87C5NZT4oMNoA$ zi^-g+9-KP3z*AU=`6}297J=CzG58#if~YlQ!b%V<1fH!D1~b8PR5O{(nZXlB0+1;# z(7J>fpavc<=psxIgC9I|#;U*vnGORfcmZnlaf4^dKs7CR))zEU!znNao>~~eg$H=# zL5TyB$3b;2xDWu$Ubx&C3LCWjqj=E>_5uRDVGPJbvl|%Eu0nf*TZd zP}hKJXi!Q54Gckw5778Hcn(h?0$hA(GA|GW&5(hXAUzNOozgvrNg+ak1vF2lgKeG+ zG;ao4UjeC?ggc-IihyS$AT`nSdPY_;URHg^864nJ9aPPNYr!cTuv)N#16B(z0PAPf zVCvz3)`CkAxQ-g^`flFWxxMEr&sKmj`@6-Sm5P!3RlwS~aV=5Z=P8h*T>5)p|1tpezM87&Ji+9o}VxPSmr2s!1DAx?+S+jDyu8rp&n&nBh|3 z<>|;$;0D$WcF-kPpxzy598i&khk=^`)Ij90=4QMDipCr4Sv;?iR{=?$5Xn+daRe{s z05$Ph6;wbWzL+mRBjcUVWL1QdT4hkILMcm*uVEl?RzeRP2ymSv{yp_W-(Edj`ypkmU=o z11FdqKoe9-EYQ>WpbHZ~^9`UY8bG%`IWj8pfKm)As9ObEzXxhuNq`#Fpw=R&RRh_B z$^&W!9FS0&%>;>n2o@Cw1y)ePV+PI5fu|zDGja;EnLxK2vFbB+NGS14ud3pbLbON0 zAq*-jST&g&BtU5yytWTC^$0c!s{@`~ZWeIV0&P10m248AFwkH+0UEmk-!H`K20-kDfTP{BLy~T1!jSToJvgKw8G+`z^ot% z+S3cFS7NHU64wjxE_Ehuh3Un$T;laNz$!pzh;j(*2l)b0wSeLp z6d^|r?=ttmz)(avK9iRz#P(Kk=zk~b$g;UNpe3^`pu*>c7-)tL zv>ZT@9n{Q$41|F~4iwU$*@PRyuu2DkAN7FNn6ZOPI8bG) z!L$K%MKdHIKw$>%FbFJ%r&neLCV>^4piB-b-_C#<_-x>7Wmz3p)jO`NS732uER?ol zxBwcM0!YRWjB9!>em=Y8?9AAJXKqLF0 zjkW?e`4l+x8FOH!W+;O0Jp2GtqYAp(f*q=;1g5A!kynAk@dro|sLZqvji!^|VF zczSdbm#x+e(Aqc9h!ck+=zKvI1$J10#Ex1Z@k~F|#ASq1%y57!o)w@SFW__XK<$2o znbQ}xa0zR`>P;RE$Yo$m+zK3;%r7`VG06ey+cj`1fb>tl*u*8ywgVJ#iql%TM5goh zaRrG$nl0d-9k&8+A-MH2eQi6JIpd7!3N2isGN2L@&4Hk?E>MZdGd-|{OOIHG>hnOo z$pv*N8#nmK4p7kIbT8A5o74Zaa7hI5fRYu5;|Y!|C6M)?gax{KQGvtp00%5#@qj$O z19U>80uN~Kr3!?S0UeJFI*^N2G^*OrNArD zJw2wKOIBtUlR5JVkThf)0EgoSCeS&pCz!GX&P|`y&gI286CCd%Gntkv!D4JCWRC&o zbiNKQvy?eV^&f` zosH8Ec5sO?a!$X|!DTnS;SVeKbj40CEzsN-(;JX4_|2GJFe!kJMdok>?M?t?{ufMH z0_Ugac5-QQfks_=KnFkbPhZf)vY{_z!%iV3v1WCe4U0*m8y(4A|%0%xY%cX3J7w{gKoK^b}2 zq37i)aDy|JBZDF*C~n<(89{vs4#y45pyCJ-xtyRDRirgzZ4O8Lb$=DT7UunE~c}P$_l+t{q&`b%V9v0cmGd z;Bb@x4OuEq;}xB5)6FFv%!84NG!*aQG9{*v$?M^gmVlLlU?==w2Bqx@Oj!y% z(--z|shTeZdv*$o5*sfsxZ+jdFlCy+qBs+&1_mXU87x`IHSqMmJzTPmT%cS$gGGr) zfz6Q-QrCjg5Xdla#$Lbz&XFrXdcl`8a)8$Vu!H9092r3Br$_g4sVQ`T-LM6s4XKI- zr2>vD1e0mthMutGgqjtA7dn`gdi_K$g?b*yVcnnxE{Ed*s9Qjh0J@9KaRX}>=oDiPea0hjy$4u9ty~Vr3#?fR ze2DfdxC*$!s=%QDxyAPc=;&Zj@PH1Jbv(hE1!}fJW^s906<7qo#TN@V_{2>X2T;pk zy5=MffW?;-~;tQMmYWeD+XB# zDpEkLDY*OpfZPvmYW-jZb>Trl57EAv(-Cy0y_5on;{>)WcLg5sU@@CO1E^f@U<2hH zP|?clj>XvUJ?j0r?L-e3debIHkEDl#BjB(emi z@_>T>37ZldFFPpeAFy#Na!-$(%%uX3$A-yVa-euzFqun6h(`d~LF^!-&Q0c$-~`pD zVB4p^pUkDq$TMAd3YRwH)amw9xK!#v)9sv&pu3DUKp}D)oYda1E3xr{qx%KBA`d9I zz$bu%f(Kk8fbJLtX#o%AL9~2ejP-Np_1)cxOBXEofw51->LS+YyC&(!9Os}8HB_+a~DvxWWNS4hjl9 z(>F}zGMXN1Aj&@d0H*Lup_#7K=&N5LHx-*-EIb#BIAMS2{X9lm>!5vZgWmz%$)tCYK894A8X2^pEKR!qXdOaoy8@?*fHH|7MH&eX!{Xp2t|QIU^ORb z=(=V4>{(nEf-{&Dz>8xWBtUa_td1SipUmP?N&sc21_{tjJ)j;tNVBBC9YzHfPyr4a z)q|}h2hUVIX9OMf4(iCC5X}M^4;}M-3>))wWE5Bf>y$$lC`?bA&7~mBE?LGy(`U`$Qio2jXjmgt!DWW(86S9%2$vWZ_`~Ek;_#2p$C%xXupt zJZMoVbjTLGYY{X+!UP>4VX$HVk20PR&r$+MEqGBkD7{xZ+FL+(E}jA9C?;;uun~M{ z4U{f2)E*{*58$cNdT$sSe61@oyc^@1zc)g&;)}>Taeij(9s$Kpm+wg8PFx!z%twl zXenUh0xomtp4Na1pfzKlonoLRbC7XC$app=PPT|E@qni9prhEJW!Bd&FEfK0 zGfJ>iwr4=P*PtW{8dhO(1&;z=08g|b#?e?E>p^o^@R2lTXvRJuVa^QN6AKzc0975J z{34L0#17d7rNC~+v;dTN7l>s!stSP4%m9r?vVsO>K_}FMeY`=)jA?@yXqE*uGAIT+ zpdP#;6yCZIc zavW%U8#LzWxcyOwfTJqx)NN4GMmQC8=ynR^5+o!iL!EmMq!E7VHaNGv5KvqWt|A`@ zD6U`xr4{hz4a6)AsE7ikcK9iAEZ|{Dk?9i?xkWgaGlB+;SQJ)FKe(JrW_rV7F3x($ zxHPzx2)Yl4i2-y8Ju7GcxP>3G-m8J1hUuanv>ppo@?a(mO4EfoI9-6IF7Tv_`VBY} z29482=i?3mNAr5*gn@7?C}H4A7l%L^G1A3KSh`q6O1dzgJ}Hq~m~$mGVXT_Ia4DBk z{Swf;8Jii?0%01)JSgHplQWnRPjQK$299~~#K2%I5e|W_u24gcc!X0y5s$M(cm>jk z5%a5IF~5eCn1_@ItDzCU22>(w*Mld}7Jx5E2JdGTmngkj|}=*($>-;gs^ z>zVPSw+o;}UZBcP6nQQbT%CXlZK7%*HU)6s0C}z(QtFFkIVuTMnSz{mKmuMlfKEzh zg|sU{H7=-;$m$5H?%;L93jrmb>GCVNWa>eCfEhuRs!*0AgTNMWvtfpi5)WwdPnThq z5NOo_s8qcJ&09C1d5aBvsyS>Hm>sfy2B|#_I=TW<>wsD&5bHtP+M%^AY}&UTTHS&= zEqJS2Qc6`GXobcDs?b2o;Ki~W1;BX=)H*{cRqL4`)h)E{vH_{mWPTt356dtX2T+uQ zI^vKZg#vfc$~qE30rjw+CwP>siGE$ft&c?`u6+pCILqQ z6Y4qZr^u3p_NiF4};Er&yNbsvg)8wV>Xu zDQL9@Xt5$_2NL)oZ{$@QINJ+`T)gt2*$f5H4U9@W)1B9FX`AwZdbg03843)b-AODw z_duOiP%i|0<%fb23roF%kOmV2E95j9ZUvs{6ZLpRr%!O>V{rtvAwi8Kaq#kWB@Xb8 zgDfS`2_K3Akj@M!0~`QP26HH2m?bd%?;0*C%Ohft1{vrCddR@U0Wl?B(CjCx0yk*9 zO$mJ1UxTo~CKgb)@c<~xv6wN#f}OQimQaF&9gKoe+$Re^o2(hR0RUItK$7*tL< zf+V@2JrUuguF~_qEpySR#dnD_@y`B@W-M^q_JSg%{2xkdw<3x&lHb~_2 zfmVRAm@$0-t#Mfa?^}Viu{wSb&Jx%TE^5|*TK~MDk%|@Kifo{kA!C*z3+Ox)##+dV zEpE_~cP0hK=}GIjMCxG$4rmZ(g)nIM)DCgfWhdh1pp96F;s;bMK(n#L8s8wgJ(Bkjr|4C{n4PtKBzkgI-pKC3v^~s z184;=hZ$1`Xn>+aB+HRgpb8Wjte|71m3RbpfY$7Dm@!QN$%D?176;wEqrf8oIvqxn z`G&C4EabfRLj9^64DAv!@W!X2F?$0n#$Bqll~2t_9| zI64W%CKGyW)`N-!BBJvkC}QBz$%q-9jG!S6(0UW3*gOl;Bms*}Mg>sVA;%`80&6{5 zbb=NvfS37#rcNOJPssQHD4&BTc3JfquYk%u4$vB*3t|v;ke(!@%-Y2XlKCMHUTy^P z?+0;n<^!O13uxUCBV^^xOz6s+dd54T0t1v@PVj-Ij6mZx8^l4!X|X8qg7%hZFx?P$ zWOQT{*bOdU&hRPm@IsCq;{$Ep0v7iB{md|4Q) zKI0#Lu)~mTg}VO-Kg?E8ZUG-O_5;*jVAN#dsW)ft5CAO)6;K3iL;)>1gPQ=FI)kj^ zIwJrn2^=9EGf-C;PHK4)S0NzoHC?Zs1oGg&fHS-h^WUsRTidS{rFa zF-HbPe$X^LvjbDSg94`_=;}XyH(mw>0R?{OjwMAFP)!6HG8WEK;D%f)2MPpms}oYO za4YZ&>;=ck8&F7r1{)uULx;g$fHpgV=G3QONEDKlR$`M@lyw9z{spgTW&y8}RbZ1= zl$hSPg-fg+d>EV@WQzfKb&rCC8!wXrj{^8$ILCjWyAZ(@ml))rEF}f7D(LQ2J_R1o z;#(!*EYQjCMxfbnF$H1J5+~4hHPD1IQ#NQ!lSe_;otJ@I0cw#WOSU49f`SGU%XHZi zZqPL*pu@+#r6DmY3JsDAqTs<)QAiLg@MtnK*fB9EaDZ;RX3q(`G~{-%Y1LZX#rH9!&fbWSKfk~8V>|O6MU??3>zdIL4F10#0e6P?1hd@ZoHs- zxIu@agT}t)tr$LlPO)ILVmJd@ohM_)R1a>=i6Tx4Iv@}a{gg_H5j)GPUA3)V53-nYu*g=e-o(b3x5tt!!K!$)b^9&)@ zdIc74NIM_Yd7bdUhs-=o(+d`Ii|DX|S~1}9Ey9LP7v$uT z=0h@sTZw1-{_R|H(`y!Ta|wZZ9^fE=O~mj_|G6D}K!^MeE(s^_xGHF=BeY@4slX1( zkC28b52RrVYNOo%7nPit4O1R)!?gYmA9Pj;(K5Zk2R+XbG$G6g-f0VIm$I8Nf!n3* zW=u1LL75(gp_!ip+6d)v1Lc3vfaeKNods%-fu}q`#UZ$B`U5mf2dc_CJU}T7)IEiC z%~>5!fDW_* zIA1DpfU+}a8!Bjh0c2Z(0;?k@@)QraF|WV{4l%F_P)M%;&niG?Dxh-<0$B>6b;qEE z*sK~%E5P##Ye4e~py6xq!d}qE8+J%r7j)Q=0H`C5{JMcPUvng&_Fn- z;sw=V$C!9HK-U~|fEEDoII@5WEjCBSY(>yyEdw`qD5yUz1gfC9pa6Nda0(2ILFsS!*1Js0oRDhs{1djr*z*g{-Xam3FSq4bz1P#l8q88Ma;Dy&( z9H7Vo?MzxA1`Q98%RpXVAeJRizYP%}tUTPHb{uHD0n{i54TmAT0zTso+Ra%ati-|# zPO%Gw6*<85I%p1Q1IUe_rV_|?N7=yzBcB-)_^c%s@Kh7XEeIn)Zm0(>6h91e11}fY zFi=Yl$*tgx@B&A`5qk$@H5aIszX7ru-0%eT%fvx-f+MIUhVVJK1qVJ;0TeT!IYH3n zTr3KpgMyEOO*bV77` z;yx~edLERn59n|ZPAkZ%b+B1baP|ZDbU>G&Tn4Q(V7MRxN|m6VCa8vmxDs?GGH8NH zJj+p9pbDHFK+E|hKqo!@|Ns9#Kcw>rIT2fd2fU*GK|QFn1qBm0`an}(0#cxYtOHa4 zFn}811h?6MQZ49Y&_2IKsLLdkN`JiKyl0iigFncp#Yu^1>f=#2D0o0 z=;9t;Mg<-PP6hBSO`yKU2JtM=k{8hBIiTbUN)w<90>G_?2VzP*pbK7j6j%_KzJWX| z0$R|(qQE&lzFa_D8F?cQv_?msWCAUSnl68UOI8%rZa)p%Obfa&1yo>74?Mu7ssVC2 zJRvG@f>u_t>NA2jI)Dlu*k%W|=>gtcVi>gz4|sE;A}^%20gwOjnlXW9+rW!^l_9kb z52(zBOou}%8BjiC1J!LHcYt=LXfSO6S2SB-70nmWOf+c52~?&*+&4ows~!|mAH!LH<1 z;7|mQ7=khh=&Tsylow{u0wGi=xiELy9ada zr2?q=1!|W5W(9i>G9V2u`e79YxSCMlS6~x34Jr#j)ged|=P0%lC0oUH(9sVmH5vIC#bsW5}d4sw_$sO1J~ z!-IyEK{gzKH_pLT1bFMp4PkH{#H!0M1AF5fWC&=?3be=qWJo>e^!%6LCjJ@FJ{NFi zJt4*lZk&TpiUmz_fF>9~=fyxe+|U9Nl7=Cb3@4=jvf*g)+dm~K#h-3{)p zp8&0V0JjWIiLy8(D6qme?(%~7@uq2#`}iH7LX>;8qVP zQ-O+HCdVm^pk-1TOa~-D%lugN8IOoe?>)jLUjIV~l-MDi?gsGD;gEU<;-m)9`Y|R@ zW2H$%Q3jIjK&w{RAp?M*dV|ND8PuB4V6ssJXFC&6xee|lfFs(mLj=@RM6`CnRVK6> z&LLe7%7WldxF{&UKM_!r1JMrz6xl)RY(d!%RQiLC2Z1z)z^x!f76neoP&Fv+LFTbo zpyNFU#6V*vtd0{zvJ^n|<#bR13@Q0opaUA<5)V?^DS$MyL(Brz5D&!cm>76ixE&cF zC5hPd_$^$b&Ky{~?HtmI5|GdX?Q8{IvB(H6C72b!rGx^al;BX1m@ajUOHNG#4e{|4|lGidA`6a|ph=>{<+4&?Uf3Q%PW8b3$0()d9w0R=X* zdZq)gLyJKTR8SRgKpZ?84_S-D%K@H(0qwH^H-7A2^Rf^D}1rw~N*6uj_;#f+)`gSZ07EeJC~PWT|6C4gw2f-i*vxAdR^0>jYY{grOL-2LbjD=m0NJ zTap2MAT@^uQ$1+EJmh>dkncf(4e|XEP579^v4NIuvY0V}7705x zO#gA3OHLY8bAh+;3!H@|Ww@P&XSmEEOQaz~A?%RGDX8MMg&zON25y{!k4%9K9)Zr? zf(;yiGAkr^g9aBtbM$c6odFHvf|eSAMwZ1v3*12au0idd1>lw!lLKf6Igo#$hNObmhAL$G;1P`8-X zu>o|R1AJy4b()V2RFHs9+7nm=%|qaV7j{%=gM=avr0oWYaPaU~J*Wu|Sp)*gstx>E z0*k<{xhJ4x1d0o=Vo(PNo(Q6N!6$@KB@xs!If8E7K~Dsr1~>H+fgo(I7m*0+L8$~D zo}vO(3V0I%xRn5kPP~Z#oP>x=1YrB&sq`giqE8Su(FfY<4(fFZXTc|XL5IwsCxX8q zO`-@*pn*kD!ht0MaBmeQ5rBq#9L3x~MW#KX6ahB|1kQ1SS{WRmeZWCk@9K(_;-?qdaa88A~)ATJZBol0p+sz*Pp0;xCwg$DIg5`5JF zI3?9XmJNW)6IgKqnrDWjBv4fh>VIMsD4;VN!2|Ai(-L^#hd^4Y2d!cT4d%0(G2MXF z{WpZO98u?*88C|!a4msWq@d0-nhv^63$pb@GDHk= zRu_0RIF}W}4k#bIxC6S72zulTC}#B;kBBL;gOKA z@ALUY%@tV`_`zKzHP9e8i!KAJB8LKtE(41qs88bnK7bR{zhV*C$gIQ;S>3^?z%^aq zD(GrHfpc&{&_D)gOZ*j3YO6P6f}E_O!2}vL2RWC=(Z0};sl>6N0dyG%_;d|WUj|f~ zpAgG}T?PVjH+V7H4KdK^L<%ed=h+a}vO0bc%K~p>WGr-LaR9Xw7_AsSfO^lM_6~>! zowRMn^aM1x@jxI8v;_e?C%!=t8iJsGBMStxxRpQyexT)SpyfG^vQ`WeKwYPLGp0Wv z$Fi9*{Q%J{U=^UvCE&x>!94JZwV*y07s&nxVzBwfHR4L_;B})<8LSv~fL4Kl=6}S& zm&Is;dk~-@%eicz!iYteff01#sV)P9q9~}eIl~A(*>Z+}A}FgcYA{`31TB65A8pC5 zzz;s!l4&`}>)?~F6nPx$c^FMVNfLZ&pAwHFV>Y)EXCY)Y6zJ?rZltp>*<2NPT#I-a znH&_jr!Sn%E;{}7H7?G2e$Yr6_|U~Ipd|yKIRwywFdKxj6oo;3DUhLxpfOa&ECrC! z;E`?6Ae=4(=puE{6g+4*7-%FA6wn-wJfL$|6@)?Qh24=SCku242!|pks3!nAO&m1s zq6pfo$dLs){9tM3x-gGS&PP|!n+BwMpS&^W`htSFAXyncdTB*nD$jIu(%c{UXUG65A zG^5~jtD9Wv^`J>p@NpI_u;X|@o(7F){Q!+2JH8MHI|$U1hKw9O5I1N3Ag;gx8Z~rP zU{erwyv&g0Uc}3yAPZjsp&;N6TDS!o2R;M3>k7103^Yl`V8sAl%m znbz}xhJ*MNctMjlyb8h~o-k-W26TV}JBSB5n1V4&k%Nbk8+=wBxJ@bozU32|5J2fs z0=$!e)ltQYVTCZ%GSCU?b3tun$QgW~No??8fZ)?o!A@bW>>A|LQREbw)s9U@tv>qbG_ z0wKyl8jDVsrsN4XZrv+ZI0_sG8M*SE;c^K3rf*cPDa)JhE zJt7xmW((A<1bGV7kAu`U9H3|%in z;2ho~q{I$NQsCwG;KiO7L_oz0lE&$McerHgxj~f^y8<`pI`-xdc?^nkex?!+}wQ$wrA?Itx7K1@5MSw$!tO2JE0J z*rh@K0Uy4msQ{W--5_j~B`_Cy;}>Z031nyiG{^)wLvjwt8%&N$irfmkj%OG^;lu&1 za;F#EzlbB`0vTw6VuIac#ST8nA9{ZbxXTUl2k3}(&>kye=;?3J zp&d}S;SMP6fgA`L-kHk=GY34FbU{Rk9eOBwJ!o|hXj%zm+!+z@QjkAf3=E*FVOT+% z>jdU7fd(ZQK@%VFDSH;!(1t2#7?dA$69DAA*B?+e(qV_}pyteNMpSv|Cg5v~vY_D) zQ2!bGxsITl@sNiiKw}V)2^P@W3usdZG&KmGvjsQ*8o*1y!P$#Z0kmNdBn=uB;RO#n z+bDwjhbEw%l%OGdMHZ%dXi9~)i#Q`1&=gQ;0&S=T9TX1no+@p=+vlt93PI-oTGN?@Ky7LyWa!vyHs2~g$CuE3iG3VP5ozVG1mexR|E z>9Zb!4hRsKzUv{EbiJbjbj-<-57agSC8Vpc)j>f;NYP&g z$3G0?OXI&ww;w)LAp^bA#A?R=%Z0Q21^9^w$Jl;Wr)}W#dGW`Ia z(^P`o1O*yIQUJvW=qOh3orNsC4AGEP^w5+5(gYbd0&NTi4|a=!HY!~MS1h2RWKibP zV7dWbWDZ`=0tyl6NTB1M_u#Vtu7g#75TEY*giD&~llb(kCtMmRdkJ2Er(#whmi&XJ zxuEO&L2K8bgN)#9TcBw&+cGlOk%>s^Ah@~Ci%a~4p_Tu0jv*7`s4UaTv z13y#H?iu*_2F5%t2}|(GQOKGbNF57_GEgqyaO_z%O~A43XOn=VhQJMQyqypMA7cO- zL_hG1OBQrC>-}e3+K5a9od*D)0|B~n0$iJb${bJ@f)uf^!<}J^OF>(kKxd+Ya~Eg} z4#)|hK^)M0_D#IG=rS}HP49fprR2o}YEnS%m4<~ov_+0Mk>m_$r7tK8pblPxH&%gX zKR{#XqSL=V=TZy+wP!%*s)8bqf9o0`=k*kjDgB(8mN} zd({lNc$Mrxbr`6|VG+0mE7rj`A%TwN1r^g|RF#9vSy0}=9fzRl zF7SB92~kkG0k=XofT#XIz6ZC)V9Cq6yFP>_vq5`bB|qOTQkYAL9d18VJ@kWd8O`GPhohcgL) zjt7`-FoBDGI@cR60eFcH>f@m2HBjvdtrj2?M69}m7S$P{1k4O<*@6Sy!GLCrdeD9} zP|lbE+mi(f8&CyBxa0+|l>z0&LlZj%9E}h;13b9`4i@k%A82oi29t~eiw2W|5)XK` zQeZK7TgMAASIFWk4h1#^b^!y>gfBQ3fSUcFuzey1TA;508fyi&sh|t$IA9CvV8!+V z4ju*J6Y!%L;06Bl32(Wi#GwrvjCRezw_N%%pg?AFoC6x%fn2i-nu-Mtdw~jXo_Aal z9Il{sSgt&0reA!^B@bFXq`>4j0jqkHrptL~p{ppR;OfYjrRd6Y8Z@q|z@)(A_=Z8? zHmt-3Esa22b^~tpoP%rSIR)4A8mxy0GJyrrH2vB;E(Jjzfjg*XO=o}4B_|2FP6D(Q z0KD|-2KWFpMGl1-)9v1KeRlzESXAg(4jxPe9YjRe}Bj5BjAGo|g`|K1zi;;Li zJvz`aJCG*Q9bqL7-f~c6f>vsAD{#S1uK^V&pyOX&Or0j6z$E}VyylD;NVQ`Rxbd(pYMz~$OzEU9Aa6J{`U!S zMHZf7kUr3)1TW~gPl1cfFiSQFLo9LUEdnc205zV>nQsUys3`J-#$Z5;)xeiPfv-*l z^%+35F06C|H7h}-WBmiMEJc3s-dqOodJfRW6HxKT13kA0=4O7!hE<>={v6rM9Phno z5O4%n!eUthcfosD^%=n)0Bt1#O(cWrScPGiYN1=vZse zQe6f}%MUy$%jU>g=Ez)V#jpf4;Kc(O00Z^!r-1mJW=x=gFwmgeDhUPFdIdH|wleTF zGAlvDEKJr6%Rw}Q6~j^q1y-kqhK7a@Eau?BH1L(=Ah&@A&AP#EJ0q$Db{nfB2Xu1{ z$Y-EUHlPp?1)Zn^I;6%CR93NoCS9S`7PvJg@RgCN-T~Bng5Ipb<)~~1x=I6->>dax zajZ3G;!xlQ9SQ(i<^j4}kxk$fGwgy2P-uZFWKfFYS6~;o4_e#DF7N<;g9QV~=p};S zTAEFu7c`yBDbT~L06K&DA!zf1Ci8-NLC_jzg^uMgQ^9BSD6;VE2Nz}_ZJ^=}qz^>1 z3xG8uGCug$sRQ5%j0bF4;0*(y#TtCd42}!9vXxj=Kr>E|qzS5UZ?I)St|Q}Ms#jpw zXJlhi5}022g)2@~LqWii36y3T9d$rsso*QXK@3F=(0RXEiX7Yu0@J^I;Zjr=aOA+M zkPoKNkwKA5L14PgS1v7e9y2Bb1r|pO&=v_M1qDbd7I0)ObG*L0S-=r=c$7hwz}@M+ zU%7N7pvjb1fklByfgPMoT@+Xpc&A_d%B7*f3u@uAgHs`k22+I+KPVA`7E(4aW+|{M z@PkemRFDzqov!$eOU0U3fjdh{2~?^maw%{qfXf%f9MAls1LV9au4 zP~ekR5(H&?Mn{n>#SPONzHu4a@R>190IlTZH)HAn(VS*X9Uz)RLC{laBj}u4M$p1x zGo~pZnn^*!vEE%lz_Ed`P-)Zjx8JxF(@VZsdFbt^wa{y%24Mu2SDR6=ll|JJgs1Yn0Og9)6H-mPpatb_{KJ7b~e7%4I zqk@bXQv_&OFhNleM5TZxX24mez?>-oB$A;h2BLBlB@`H85*Z3Cjvp97XE}0YDadFr zNeG-_2D!I^3B0|AQ9;a%DMNu#LBfnFM}bj6)Ql-bfl)yabZ|K%C{M&FFe(U`sWC+; zuqkkXM$FhjrS2T2EJw(_Ti}c2SR7X{WjP+$F+J)Bmonpw>2*K26bwN(K`|+SHox03 ztpVKy44Mf8(F|rxTR`-R9n%Cr6IM)9K)fA0rl0@8RT=@h*qa+v$ASW>gDFcPM}Zr3 zQ4|-rHUdfTE69RwB4TlTz?da)pIL!8KTMzVlS?8R6n&tq>i7eEf;Grug)9XPPz}SP z&-e!tC}di||BI{F??1OSBZ~rgCYUir@Nja2+CoPd6+yM3`t(=7xOCF_z%JgwoTb1H zIviMGJ!86KeX*l_p(7*cxK>6tX@L*yph(@rti%OcnnIrS(`$coNjXDITEh%Z9%LGH z>o=E0I(d3QX`Dp?v=0o_x;w$lhQNDf1-|K( zf4J;bXMkez04FG&Ki~wF6C47UL3%;UrZ}M!>C^vm_0{u(N*K_P5R(J5187bG z!UmC`!*bn0^Ku$YI*OphzVe`%UQqk;gW)B?4NR^Or9cAy)JCi4eQc;JSCVhOxX;|3^jAAkcF zbVDVim(8Nj_<~c37o8M&<_K;`uXW(5|< z1+1X*n#B=x#5tG1zUhWc-0FfG89}ub3*^pC$DXF?(=(a46%m?FfbU>KXj;U?t;#rU z`Uxg(8O9mYpE7aF3xOAlBw_xTrWo(+B$IPw8IAi)u zW^Qk3P*3a>IDU7qD{=EOgBC(;VdrH5)!P4=xxE-aP4{Bqc98f4Dx_zCQjlW@{8}m& zg&ETqv2fclHcx-Z!Y#+xGL4lxS(e|CvBZ(7(2C&+2gnf|AV+|sj3Z0n)AR;bZcWA+ z(^s)_>oLxlev_43jTa^9Oy^Cm;9Fad=keype_Tj@00Y_n2 zl!Jn&0i1*yxUvMcO%Gt_mS+0)aC$B~w-Hm{qv;FTx!uJ!Gnz3m@CbsI#)yIvf&iE` z{U1AbCF8W|6&&0Sj5DV15Oja@Lr&0I z9gu^-#~@=7X92A^z#`6!LmadaO@UJYY%uTiRb1TO!n}}u+n_>l2McI$<`Cy}Mpka= z>B`*P()CZ+U?YwwoltOtQiJIMn<5L(E>PnV+Pm2Ro?R|vsaGJ>v*7@hW1tr21FV+K{6!{d`90fqFO0dDgpk^DWK;%;p1{ov_ z?I3}MI~c7Pz(;iPfYxR~`fK1JbQVa{k`2_R+qtz_z!9`M05p>b?;wFXi3|$t;9ZHJ z?tz#Rr@(I}rFw3R9ulPK3EnXY8b_5-KYL?Lu*x zF@d^J;G-r$E&z3d9)SmrpNK=6nCze~4ycz0I>S|wm#N;7NrBC=iKUR2LCTRqL14N* zFSk}b_z(g%=&iAky+oktq%7#?)Q{WcpWe0VApuH-{SQMz21Ue7c?3%pSOxv=6j4&XKXu30Lw(n(>1t-v$Y2 z@&%m@0@@b@PNUOj@^LHIgIDW-`svWoWz=NIROa}nwFTNw7f1BdpNNBR?tv#maBuoI zlM-?&1g*P(b%4OBkPW??enK=06poO^2ataH8BrxR)MO|MNrs@k72MG7F&Fgg2~O}9 zGRV$YP+9^-_)Ac;h*h8Qi2&Gz$PxZPK#2|98|DRVvH?Z-0>OGnjB_aP!()7jAXq14 z{0kmT}gUuoxCbiDEWT zIl6bcj{vtseK&Z6IefbuC<}wO@`0vZK#l@;{uNk2eJjw$TJSDnP!Ak3Mg|JP8B8E6 zcYsPhL{}R;#66?o#{@nP_~sRy7*08mK;YMwF(yaL@H2HE`g0d$a= z2GaxZw89fcfjCexu|iCVjW=6?)v39mq2U0BIWuU1C1?m{iI@`mT61R5YH|&x1!Caw zrUsF$dT==fT6p;yydASe1fn0Lr9lKV3!p`vpqXAVGp0QtXWE!C?EujXRt(?;pWJ3l;CTqp zLQ|0Q76`%?Yu4X^nZsbk06H+qk->^#4QS;clNr+raZoVa5Ca`Q0J;rEU_G-Eizbtb zIr9b~P;^XS1Z|LEP~vvw)dBHAYa78knG`^U*ajg`n|&JiBEJX1h+w@V3<}mOp!Hp> z8ud&!gh7{E!_NCW0}_Vb1^`YyAd^mrW;rqmJO!Qp21ywUKyd<^H0M=d6ENV(1efdh z&u>FYu=}Sp3pk2G<_bXdKD3Sl&4oj!(Qb&sn_iGH$N(Wqww z+w2G$zGqZm5_ksQUvfYMR_LJT&8=AS=1#B}>O3pFSP^&*8~$_U-3*$tL#>1AA!Q1R zW+tS?I^cVIL06|B90D%l6_^|?K?MnDr!MGb1hh#BaM1)d8nQl!XA{Vc;4%X|B+UwL zuPJdU)WZg&K?jF`(idp`189w~BRAwgdPgqsItOt5$L_dc=QPL>FQDUJl{mpg9&`X6 zIyuG#UVa4%deA@#sI>rD2F>Bf1X}(H9%%=i3ZO#yuT7?4J2s%h(q8HhXUvz76n#EH7f>i z{mtQMU#P@l#Q-{SM1jSMp#{{Ef;K$C;-JBG7AuBU2?b5~U@)j9)y4^1tjz)%DzgAh z-+=GlWpiXHRAf`&1P!0C)q|=Bh?_xUzbq(=YFR+L3|Ry&!55Z;N;7R~=Loj%yBxrNW z0yfl22EG^%Qk881r3Xh<=mH=%M+SFLSbP8#8sGrquwnqMhyVrHK~R8kSTTTBOMu7C z!KqjxOW*~lsaX$d9e~z~!&a}bf^MT$Vi9-=4?adv5A!lZmLdxz86s*>@VdHfOh}D+ zP-9wIfkR*tXgw9b6~hlu*&$%X09*fhLL8J(!R`bt#o|}U2dy{+-I57j=?89cv4R)F zuxc=!5r=Qu`@;`uh=2;iAN(LQR){+?LT*A}a!_CeT@|aqE-;k?wp1Fl?wbYNGeDk; zejq&E@GiHwHs~y1O(y6DJWyJN6q#5jqR&fmOL&8`7P#RHUPI67xB+r>38>CpAP64| z0Sz=k4%-E-+yqTkLG~vhs@@A?;Hr1Jw-mRqF5<92Pzn*xa%2_Q37!VI0cul%Pfxie z4oaY`jyoVO_W_v;ntSO1Re3CCOdSH?t{kK#0ZPH(6LdfcoDMr;6>N)YXVO2nK8Wpoy!NB@p@6u52~W7K#{1)d_oK!l%SCXXi$RE5a{d& zaHu1K5?rByxBuKg92*L5Yk=B5NRH!x+`kLz+T7p+Sp+&u(Xr)Chk&DkKz%NFv>7zl zjpSCebBiHPRsbJ^=(r4|kr6gv!2n8<7sM2|fZISP#1yxJ^9y(#0O4cQ%$cT7PUIG< z-vT`rd@CcUdk=2LD}h=GA3*2NYA`V%r)0>DpOBdgaKd+F6}X1D=M6L;1)erj$ z71?-LK)1E9>N30#SK@YL0}5T8&A%BFsKx0)Fb&+@Ujb?`f+j^lO<&L| zAJ7&8&^~od<_+k*ItB$$r)3+XA`1_wi3J)EILHLrT?A^Wi9t#y1~aA=pjI*{ccPW2 zp5UzQ_-trHqY z4&2EEIhYkZCJSi?++YK((|iX`wQsPGr-8L-Fuh0dW}p#rx`JufKXgD&<|b%f8BF({}ya-dAVfe-5dhaxAWKLJW{ z6F?1iMo4qSv4P2&aRo?#2{Ix8UMOJ31Zt0fm?~yWPe6%>NrOqpjOmStfB*3Fr;9*6`xP+tv8|vFV&jpixjUP46xig~05fz@oqn zny7)S#|4#~T;LWiWN8X$u#Feg{A7c!h5(f+po0_@K}RHj$|xpiTa*(z)Q2b-K{YNL zKRTyq6<(h(D=rqyM*06LC@6?Dkc6%mDc z@Wt~dM9slXXi$X=PL~3ZvcR(C+UBA_Y)+}Z&ztpOPZT6pic;cvTuBMaQkkm3@& zx*61L{s3D71ri4rbHdO#Ymmrt+*S|LjPNiog92!96r}MZsAmi6zY8OU&n}R1Mz~$B zyiDAn&EH_(fCmdfEelYa8{9GkIT$=T18UiV_U(Z;qB~xk-2r#75%ewuM^HN%+~yR( zaPbXf7hIn`U0an~#TGT*&fe(~a74HZbfB^04CXAzVu1`L@S;Np1r}-0h`XqQ!1Nwf zZWTs>=^ItKMRF&6=@I}X2?Z8!P^VK+K~jN3UI@izzN8x0q8s@P!a{L zo|Och6!l0z;3QZIyx#?!9vUPR`4t2-n3gbuXCFYfD4b#hEnAsBQCV1cdQ1Sbs2VuM zDF_Oj1aG%LA_3jK0&4zCfR1XD0L9js=?!Y!qV;0T4hq}~GN6N7BtW%y3CO_$Gnqge zgh74+oubO7z@f;iz^%y1?4STT8Kz!=uf(ywxKK&Xk+CF8Nm!SG0o4BkZ@3ax;Iv|3 zP!LvNvtnRW;0Db=aPu&+Lbi`GDsh3T2gX8w76%13MJ^sjEp7#<4lxC=X%Kz~=2?e%3*;_`*XBFIm2s-Q*g5XHg@pvyTGz@Fm6^psdVT2=s^MZ@HvAmF%!8I=4W z6EduhF9fm_1RWuDBDhl|2&xmA!3$|Y!2)i(<4on?HL=jN07{0SowT3>{**Xer=QZ~ z=4TTH9SX=ZUGM{k*z~WO+~Ua{5?Jj6H+Vs7`@n^af~W$A0*50@mLsDATZzCYRvu8@ z09wq4&8_Hqr{`;Ni`J87k-!(w?hsvuC!pi+L3Jdk7Y)h-JW1S$FyjZ8C!nGOBn8Uh zJ)nHRs=+h?l<7es01lB32}L$`kaWv*PHk>=E=VXOg66udw7E_2WiT<&DUXZ_93_hE zkURyM(2`J;P!I(b8ElUA;5Y~68ATap(49D(3ZVI7nL;H#a8bdl&&Z%84BA+%#O3%O zTwn-*O0zl4ptY7+W=wPHnH6{yxIje+H)y3um5K(^ki{RKr;$v5;)_7?FXBM>_`E~{%p`~BHW;<)hwj|HU$9`*Rq3L3w0~6 zf-uNsykK*9l0p0M|1%aUuz*gx2f2mQ6LhRQR2BFPd`01@=h>9%@s`Mf(u%yGtGzLT zRSZk8N`QUCrXT@wBvVP266kypP>}v-E(C=sq-a75iF#(x5vriVic^8bnvsbomfMk0 zK@=3y;DQYlq;9;iQVPP30?-is&x}1tIYH~y7{HF;1L*~uha99J2ZDz{L8k()={agxskm6GTbVx!LXelkSTiCc^4O319&>%1CpzA#@4MbKdypgICHyanDaA`b6ZbVw+1@rHs6C<#Sj5ZxdFT2v0+ z8w*l)7~Eq%0Z|6-JAm3Kpdw9`>4=0BNXG#QC2sIhwV*PeO@RZ{Az@eG1Jx~xT+<~D zxs~c4K+fm~b-P$W!*7r-73fSV7Ldaj6u3e6&Vmm|gbN5*F^GT~rEmd3D+UEnGafD= zWW}HZN;q%Wv{@D8fd>LgT%cqD zKB{>7$Jv4+e4y4kXce~-zXIqW7Iq_U$j%6Uflr)}j1M|Sjwc*^Vjifk1nFUe>Iz;3 zVFhl1&*1qQ#Bu=0(ofz{XcuQS6E}FK1G0FTsMtz|TvyA*8w_%D3&H3LRzQiaAs$MY||edk=Q)Ykfg_G?+R-`j>;) zjuTMyi$V2I!LEM|NQ(y31d#S+Ox)8qrt*nR-}8l=2Xe;RRDQ97qQwy{{y>|VL1C@I)B@UP%&NiE0NM-)?g=O$#!$%F2cyKZ9G`-; zB2ox1qXH)^X<^d{KA8=)G~gw=J|0k-~dr%?!=-GI@K?8=M z@p5q7f|3S!>osBy>;-7)h?fGZW<4{3Gw?W}PT>SCw}4EPJAUkJ5OBQ50GrD409_-2 z?5sS<)R+gTChd@b497P}D6;W*fP);oZ5>olKM)5kUW06aU-Y0!z)=^r0bW9hjThuR z<^vL-poAoGP%K{9IZXhxE&?(z4^ji(Hv>K_tsZ=`E34x^M#xdG;PFVXJpx$*pxv3^ zi9$%LR}r$Gal1Ps#8IHWCOoTyvN$Nk@mn!~&V~Rd1kiE;a1I3(w6Jsu3SLCAP?`S5 znp@ok)YE5mya2yf36eI%%t3v9^t1sQGQ*cP+@Wa$RJMQ)P=`#Jg4Z-aE`kS7dV}Jk z3MCT6LF@2Y!OQ62ORmB9y0Y=O!O{jeWq}JPQ2IiojTNB9d#>QLu|Njxz^&}MyzS3rrIMc^BJj2omGGzJ9PKMUUDz#;G*+}e5|4jG6APvts- z&IUwGS%69rn7R|9iYz>i+~D<(ynza=j=LFQOG+Ta;GhBD$KsC6;3YVqga#6L0-8Zp z;;>=>56`peGCTqcuvsxc@{<+AG0v`W6?iEIXf7Ib>;fd!f#;s>Kt&~J z5*`$n9YRXnpx9sqac+o%x<|aDkujsa!jVTs`=pXvnek$dceyKM*Bg z6|gI$u81nJ@!Ekx`hqB^m#N8o0(3DmcnSn`L?OGtFVG}AWMRkzCQuCq5&q2ya@Z6m z(9k7lQid-}0i-q(x+sR#5wyt;yck1)T>zrE2cj6h&>VEVBv=z@7T*HN5E#D!*}mzn zj@+vCpiyZS&~O!p0+#}xz#p*Vco})DK)3uUa0>hd3$QqV_H#lxps5F>r~)nD5%>oW zH+!T&fdm_}9IN9G(7kR@m;UCQ{t~w%7`S;XK;2-)~fwx0+nZu55TKJJNQ9eba2NPTubwVSKxuR;6ln&P^N_3FAF;10MLVsC5TkYy&Ew!L>hVu?=XE z4X9NCx|GiG1%H-fJr8)b4XBi11ubt;VuP=@xxr_~1U~NX2Y;3$yFitKlLD$_2YA`6 z0OX8s(1g7>VigXwvM~n*BB<5?E!_gw$KYB5QP~7t0JTKSKp}JjG2aiWb-`Q1z!faS z^&DnQVApeiRsn(2G|2tGCN~N=^1xampjPsO>AW)BGE#7tcT8yn^?KmTb)2U2%WzAD zz#TskBoDvRmjRR$E{H4c1edud#1(fTmgQW4hdX$tj1?64;N7X9z15%<6-PP_tNstk$%VQ>xrmzpmGVf}4T5e7QNA2jLv2Gn>kf|Qgm z1VL@31)z<|Ah&^(u{tgg$P%dk56`kjpu;r5CV`Gl1DWszYJ%em0gwf(8cZJqVOKMP zPNNk7pF zY1BiO9Do~U3xpvn%R#lmAJA+#yBX6DL1;k&TI0&<_(L#Dpn*$~jmHQSEe+sX{y-iA z&1*D+e4k^`Pr6l=#e;Zh))--AarYygLF_1-X+F+yc7+3TF-QEtsH2 z4%7u_pb9mZP6&g}+W_5huE3}OyZ-PCRN4`8dk`qRpyqrK1~n`|-NNtSNCU6iHv;** zK?J444ok0?y75)mK$?O{f zSprR5pu%bbY^gZ3X=lx23|iX83sSBylgXU<0O(YY1K=}{z~+Ef4MVP-X9VpWK$vs` zwk8U0k_jl8Z4d^XL%`w63yLvN2?{IF6rc_R%Yc_2qPna@0JYHxa+w13aEu!uLwStq zVWl!?mjI{^g_#L*J-B{EHS&Ns79+u>vVx=nJ9H7v1<=}4W6-W=P*k1~2c<#{rW4{y zT#%E46*xg7*Wjfzd7$%PML>CgL!cQv0{#M|*%Z`l5Czq6pm1XaI}F@%b3D)BCJo-j z08xDb)X)K`2HkTBX$^r376lf8+u)IpI}qbQxYKp46e27JjINCA9o3v>%4=#EU#8YpyR>8O#{%HL=H%E7c|1=I0M}J1c`cb2>b@0 zl%v5khl!7gfeCcj8)$`)0>pg+E#TYdK=(EBf)=yh5QaDayk-pIFmR=MLSlM>gP3?d zsGS8)RuZr}1CoLzvOvQWP-lUB_yBxqI}J~C>=vWtMQjCRXF4$?P1G5XkVxYspruX}Di`Opz-9HFP zf1rib{3x|Oc<2+90YGP7BMz#(1Ih&2kaf%;Z$jOq0dA2Y)d_Dv9d3|1P<%sT2y}WJ z_yB7q4)Dd2_44g@s7BLH3#59(Edn(5$M%x;Kh zIr0lsfp@!st~+aERA2>3f)>t$3M|krK2RV%5U9s!f9t|ZdQis~G~)&uVC2t2w7l9Z0U8?vx$64LCILr&SmRqu!4*_;ft=XR z2pIv?0+pMf*+Ixkda!F>f;7ReRnY>)(F*}kzX`N~M3Ie0YkE)sw{rav(CSVVXt68~ z*|q8j>ZyZLCuEl@sNn?end^Y80WA+VW9k4kus~;7gKDr2Az1M|LkP8aZfE4t0>u>M zz)7@}2a5W7&@dM$U%!D}i3}>;AceqkcP3wSs zxIqX!NCC>~Ea1!oEoQ*IA05yL6}Y^I3}J&-bk^%LE&(-ZKyn+zvJ{vc?}C<*gAS7w zgI?9R1uicRlGg++C)Hru0G9^UfjXev3U+|FBa1*A7i_V(4k%r4m@$D?BRZ-;SCtw; zhdV)=2W&t^7HD}XXuw62`2$)Dpq-IN6BK?k*s~P592vm3HHtYhD9R`^I6g3$t`)>B z!+2x5dk}Yw`n7{CpmmH3*_a$aCr!C1f!au13gDAC7(sh@*)^CPl=!Bf3gWI{y76JU zOE9-OBk%N_U~XyF6QElgrXP%x6|PrcSqnah?gEnnyW2m=xF*_{^9dfMh>_4SWD5 zUx1CB!U0-249C;YbMvxJZ&=Uh!m7Zo!SrEzP6)SZy|OtI11KSJIVpiyn%oLpPKqFw z28g8&VyS~zY9N*xh@}c*se)KqAeIV)N+4NnkSzGh30?&jfgg-! zOg}&_ZeY$*sAmTsQp73nicNvtaRKPS8^;c2*iZmFxCcFfS%F=FU6Z+i*_?R>Xh*~f z<}A>~&yEsVjyu~LK{II|7(pSp2C@i~3$zG^g$HDz;|8#aGeD#U(+XyRliUjIj*vr! zCNO6yaMpu@Vgs{48#~B=1DFQf0BOZ=(g`FdU0?<|ZU-}{jAaKOY{RR-4szS#rbbY* zfw=7nrqQ6IZR*(_L1(S7YcPQhwE;Qq1K2{)(LbCZx4nS5?FWVdAZtG`n=?1CD6l&= z=zzimbm&I|C~Oz#g1GD&Of5Q~mJPdO2MehG#je4$K-ZD65TpSlARyVmz8rL}61zTQ z4-0tY6c&2ZCnkuB)N?4XJ5FE$mqA=0`)06!;!**W=wWfW0A&3N7Ep3t05%gGm7t?& z*c~^ppho2m7D!aCU@>Pt0Md7X1?)~xaJ}ByEa1otIsk}WpYaOBG4)W_fGl*p0k-1+ zh}2-ZzygWO2OzE^=sXZ`aNS^mM&%2L0m%A0SV3+?b`rZL^8*%h<_{oOfhvn1AO^c8 zGst-jtP1RokrPb@zg$bkiR-VCtu4Iom3X#y+6d7uMz*d2d>!UYtY zGgzU{TQOZQf?HU70jnY?6h46D1sd4FNeI-9-@vNC0XjF%Q332^)+~Yl)BPj3rRw*v zDu6PD9n%hw^A50rBh_&SE4YWuuFrS`!(AY253rgsod9XLzzRAVgWd52D@o1m(qg<_93DABa#vN#uV}YzG==cC^?12(9iKVa$y zE#zQ#M0e5;BquenE3nsVGQVI04J5FGS8hNHod<^+Kr`SFw?Wr#K*JO4wi)c86a`vo z!VX%!0dd;`upnsp3OEzZV28SG1!V09vi<{Lr69M}Gc%YogC?&*IcEbqBx8fZYX`Wr zUcjz62UHTBV220X!*eYHj@-~3bOzJx3lOtGqz2Ooc8J?2K?E~0tH`o>Dg51`?0e9Qa>kR^qoKUy5a3JSyaJY4V&Wv*W0J1=X zsewb_BzHZiXatMCfH|&%0~&BMFby~WR*LL4c1`9997>>(mIWLN?BF4n6(9yUJV6X- z`r832gh7XFL8JKS+ZF*wUIA$OJA-M}1&CE3QiJIP$gQ9T2Z-yqfddwfFsDAi)ZYL) zTn;7uv1>Bl;4o)?0df_{c^^0w*y}-g6BOS+Kg2vC-9YIItLBp=; zP7`Rb9g;ncV48gbZ1x5asljvr>b46YuHy<$XwaR2xUF8F@dl;=pcD2m^X>yAx4i(n z?E?5bICjS$h;X}nx*3sm|6m&3zy->>FF=mbVEVx+4+*#quqf#4IdI->;DQF+gy|dN zKx^B%%$a9^jy71p1u8As9T#w9fhUm#_Hl!nJPWwc<42ca5f^B<8L8QMfe+M{1LuPk zVCz@7q!id4Pe9#%fGZ1h2PwNg;~5kyAvJ{t(+Ms!rVAi{W0Ef*5E=6tyHn0OOaAk3W?hghvqi=wM2hx55-Pnq?{iMmfKzF$!pJTlu zC?U<@0@Xto`5?24pcb|Q-*lY>ZWYFc>46E{V!A&J6gZ%}?AaYbZ83Jo9|oXX+gT7N zHnJgiyyb%$bU-C2s2yj<^ubtx-SLBQmg4~ifhtEvMQ%{XLzB5d2i`jdZMO$C1k9Ot zm?*G2zR&?BjU6ViZZW$);|HDT_4B#qb6)6xTNR*#2RlGkfU+-EyQa-*6aZP6D+Q{b z8gxLxqrudnqX;@E4|FD$;|AR<@ca$vY)Vk>bzGqft~HrJvq$Wj%nPPpT*ECoU2Xxl zY&|GS93Oxa3naAu@PhoO$^3xJocRMN#D9PSQGrchJ~$2i;Zj2CRe^MaT=9d;jHvJ0rCv~(4Q>&A02|!Eoh8uD1xkTaxY2aivuiMQa4YhH^xgnjB=C$Gbe1T1rw}NG zK#UTY13Jl+2b{8zl?l1?vV)d@gAVsVQau-;dImSBG++~$3_7;YzR=O$jaOQMU6)}E zH#q4|=S=37t)B;0FN@p=nZX0<=kS1{YX%P}6@eQe2fzX=c)-Dls8o3%LAHR$oOuUG z-w7UgA&b&lKEs2Yi9z-`Uf=Juy}xJKm#wx zjmS;{RRTQb%r8K$0;Qr4AO^zW@RzMROD8x4Kf@`Q5ypZ&N0O2^4g7yfe*(bne zZvc@ROb4KDy8z-kuHXelJG%zc37Fe%VCV-`Jr{V*nIF`H%8C~t-+3v10v{~1EWyyO!8C)9)e&?z{T4n2c5nv- zM1z_pD?ldhfI49XUzWgYW>ALPGu<(bTeN-$Xx0Iw0(7+?s2OkoGMxZ!f}h|6HNj<& zQ~w2!+6Q3wTmYE~?^rP30LgsdLrp-i^z(%eIsJf4bo>D_#Lp3ImIl)YK1c!rP0+G~ z76B-L3;Z8^&;-=Mk72+BesHjAFg5Ul&S_xRXPh!!Kb>2&egeN4(+qx4F69S>BfH}a zene+!0YAu3(#U>V0aCkxADml2W`h0XcmXW1gC8|VAK-`N=neel%qKwlZt%l>{Ou9A z1qyA}-9d3N$U?^lU?Wd}NDZbN{16|5ZeL_~JOHv7ln5U1Lw)=KQ-24z5Jz{?4+T!scTz7OAsLKrYQo*p$T@|46xY^AX0;Af&j#A3qV}ZHFl8Lm>~dl z+X@W*pe(aMzz#HNEDoyHmV;KPY!HB?#T^2mF`MZ(GPus58iUULV%K1zuy&oEpT*5u4;o75RA2{B-?KX&5XchP1#Z%v!4kZXq2&vpkOa+P zgR<%c0YtLCAplC&QlMlFDn=iG)PWX(vV%sx6*$4glP<#t0asp7A;zxB{6fGyoVh_z zf!(n~5R^+G)hTFLNf1=2UE%|+BLEF52_h#IkOhu21hYV^dKeVgLDU40852NeI6~`! z>0a5~ilFWKFw+#cK~t>k`i!8dHt={Tcmo8e7yu0eg67keKm%W(fyE6V;|>UdGUEoY zOF-4D0?1E0Kr$Bu;VJs#DR3_ylA^DmI8%ljG%k4qYzo*c4W z5QHY_7w86n`>}P;3hc8&yJwAdhCv%Fk=x|!U5e13~GB!5po4>KVt{&VVffa$viWJrWdZ}mSi-U z&X~z9R&RpTE(JAt*&RWRMx^N=c&ik$dW%CizGcc1}V9dqUdx{!E&p#v)U>On14CC~vL#-O7z&6su=f%3LdmZKnO zX*lEn8pj33Sptoq5mwNuTq98X7t|a!f}NHGN`8ozFzB=-P?2HI3~n`pn%UsCGjxK8 zU7xYRP>CC~%3hQCN4){0WMeR6`T%keC}BWchSOmSFLVev3c_ZHL>1VP9Jw9ZyATEW z4P=5D(+7};J{V;=ZUO0opOqvEN-ZCB>LHEl7dndEJffhjxGd040IvoUhadw3NRS;g zt{|8ta2u4T76?IWDF%=MECTDmQw2+e&~p_?c7c!?(+VL_fgl9RlI-BcIWyQ4z?Ja^ zAy5vlmjq>T*vRb;km3WNvvM4Ffb4n7`nw1)akv#`C z05oyM?uhIpc1`9VLgvgJplfSE&YJ*YK%F-ObldX^VYu^NodVDCfpYqE`+ROy3CJXX zFgOhhL&JB2ut3}NntX0KbZd49E6rsxXFdV40A$kz5Cdw{4UkPQ5H?+z+a%z~HvL9E zw^lvak{@78F2F4LAPn;(0LU>QSDgScpkZ(UbZzYegk#>GZV__p<+6Hnv0(gi*i3Pl1 zn^OT+?QlcOLC_SZCNt5EtF60Q1RTu~0nGv`QNTkNY>wbY=my=adg#~%8)%a;xW0vl+Nm4pI#Gr9<9qc>=2D+_3r6Wjm+XIn-qhAW_& zgvX5O0*D3;` zV(8ITU{~NVV`>1=oMudaKn?({u+~vv2bntwBo6A}%79k8Susr1RbY2)YTz(q=mj+b z>KmXJV6}k6XXrxQ_(CU3pd0MQIlACpEsFvl=*V;|F1-M{8%cv{hOQF7z)5CBP#cxQ zis1stRz5SPB_NvLj0x1>aC`wW9kjGd3DjIU0UjNzU!a>M@D)5!{03$fKjbbe>^Aa3 zj)~_5rKK0TN)q5x!zIj^K!f0OK-YJG{P9B<61(h>zV-)@#RBl=?;j+K>Or?=VYi9h z@yeYp0W+o+Jy4^~@dQ(r8B>Fv0?3CAdRZX*f9NX73VZ~OErC}{aVmhK1auOQoB}Ac z-4zv>z?ZZrh$+Z|7AdkTfLz$22Wm@zOmOVbs|N+R7_=!pLk}9*xI+Nc(gRIwYBEpI zgSW32fJ|HgHgSO-Xw@>PeeHMxEU-Z@OGyxvr{DuvJ3#&c&8O;_Gasl2X}zGA1v1m| zgie+tY73k=2RYsVTXg_LYA{{UgH&Y?KwQTiAP0d0>xLe*Dtm!!XuSf~qy_S(V~2j0 z0>bH_X<!U)kpZ^=>8VOEdLaJ+`-BU&UModRB($!8V3vX zK{fsln5$>#LtVWBk0D?)I3ca@11OFLIdp?Qq_wa@&zyOOKDb!ZhfIN-KyvZCwtDac z5~x9>&v=GN+b)1zy8}dOFrCnc1pEyU*KvbBC~2{4FkOIo;Q@X_>jfS{{m@{57VtOp z&6!_-yaDph2M_}q@IOES-(i3V_>V`BW)pi1a0fhcm`yMMg*o>Rkh?XQIt&C(PLHqR z7ORIeQ5Fzs$_^A)gIv1801@&V3_u}21KQO%faK(#$dimmkSxPhHh>f739y?tfJhCd z0|tRCfh(FBsJSWp`X*0!mn* z9nR2(kN~t}He-5V54U_IsBZ}h9OEoArVh}IONU98BV_alyedqS`GYZ*rq2V=M9u{h zP?~#Sj5IlO!$gS<-0~5a{;`%@y#9g-s2$7>nxy#w>KTKQ6jm!wKW|5jB7HE1bSHlp zD+(xZLmE0KOki^~+>mz80TXaXj)?(O?rJjcFafo5xFPKvFeRhF&7>eOJ+YTtN?QVS zT^EixddSHm0Xp|)x#M{TcV0%?1gRdlaZcADoq3`~z)=s{cBFccs*B?fQgLXIPOom@ zmZ5qI9f06`b)1&LSuch^I6-Bb;{-!chWh}jSfQhBQ-~`s7u4SryZG^+N&7-Tspw{wF#R^V=x zAgnZD0^gegIx!YBs>aRB2s+B0M+7t~!)?a2#1M2Agc;KULrCs|T2N2E1+LItjNouN z0WOFd;3WjR;{mY94`Xl<0dBT|W}b}A!L!Ps;n@o&3hbbs%LNngs_qFOE}I$C3{Vj` z!z9a56+CRW036^GOrXPdple=CKpq24qL?5J+g&h$myw8JJ8*FdDu3A>JCG-^H<*AA zzGv5Du3v#D9&dnL04g5AE@O8DIZVKe3G6TdB!_+d*CF7jiaczGa3t!m-35~@Go}R~ z4=pgsa-8$O19ZSBbgw0N*zQKXiJ~AV+Ao+WvVlhEKu3Y;fX@~e&_E2^Ig%T^5Kpi{ zJ%Jp$pq;HU3heccprCC6jfjGS78k)> zwTMl!1QtO@L%}13pol>V){P)NQn2U{0)^%cc`-0Lnk~0J#0SF3wxHCRBH3&Gu z_xgf*hzKXa(zO66cz+l}5-q4k;}O6XyaJ$7XC~8fB^fiO6NVuBWz3ik7%H$kUI6>@ z1*lmMo%*?92r5bH6$L?Cr@(U?7YxmrAAl6UFa)(lYU>m~t5LxF#=eao)ABDfl% z^`NWPctnu~*+55FIx<@^s4B37+ExMr6PZB!1VDi+09stG!6c$6%Iu&Zs2~dNayw2i z&Qbuiy1>Vxf>uUygIOKM3ZVJ64rB0&B*-}&N}xN##2jCMPcZ~Hi*a=UKzA8vFr8q8 z+=vENioIR$_<})^jR!Qv2D;~X2E@`C##su23Ty%oxD`PcGvjR9DR9&~zF<(`0B?Gc zR%GLmWCgb<6}c4zL1&eM;+=sTlpi^=6hI;2!~kxwYA|s?f`UhpjoCqg4U{<@S6~l^ z1;#KV*+3%~pjEXCj6q`v@0k^5FxA&6&S2tU1lgeg8gSnLYC7)#SEd`ls{ZhTGMg^L z0b`IQpjg;p3|drn0_0O@B4T$uVVtEfgGt~DtlI&)RTx*V;~^)HB{)#cd-iHLp5K#Bm*o;X5bOtPo6@w}$tZo=1C2&sAEp4E_=mXg1 z8x*(IV~%=(Ds6qnJI0_66=?B|G3pK=4#-J{xZ@O?9?-d}pdlnzUSUwP12kNx0cy}I zFbYn8*Uc?Y^(N>5bdr{JAWgM{%0EzIp;jkJz8+_Qg94OYgK3SS5*z3qa|Q)A&>iC} z3Tz6XAa=82;8kFAv{#tLRJ9z`FO_z*FSKF+Eu1<5Dng+v)P&tY$=k8P2$6IIK7*%> zTa3UdF#%5*0BW6ZfPC9v1RnVWmC5kq8CXC^i?f4<-UN<;4*kdFkLmLL+!6$Q>4@xP z@S-|)#|~sCe*rtW2jXO6T-<@`VqI`aL_H^i&a>8KP*GxYWOM^9v2vUMb}c)o+HzzD zT|UIF&p3gwb3u7hmtm3-I8Q3fWU^zLVFWrLN`r}^-i&DuX!dsj*kpK#G{Y!M;43&Z zmKcG0{rG|&JSrxj%b>!lz{U-k>H(*Q1KQfEJ%BaDYlE1(3g|H%{OdsXw8Ug+DoP z@(6Q-b%FF`36PweVEP3fGQ)?01mU%CJ&xgi(9&Q#rVFsJHDkI0^3M&_puJ#}CGZ^_ zw08){9H=#^!E^%>R?vHem^7x_P2lEYdI1Tv`UfCOK0pKQ0k%NP68Hfdg(o&~f+t!) z!A>v$>wB?R?Vy<&4h3%H5EOuiAhrM$gilh5gX*Cl0$BlKu13_ z7dkR#^Dzqv3r^oOiCdx`vU5)u)H;N%6qum~>T{@pdbTsvKnr}p!~O@r0xQ%Iy+=?3 zc7qyZD0G3EIr9#Xz7uNjnd<(AW&uYb=t_YzYRD7)Ap0CIsDT@TJ3t0&Fr83?jCI}s zaUC~+EC!Wr7u29*oewY#Xix{a5!p$gN>I(5`31;TAm@DmF+i(U)y$cHfSlK%4tE}S ze3p@Oy7pvlEyyZ?8DR51z^tC2F3>hTcQUs`{RyxHnng3z!P_}jsDqchn=@|!F+i5? z05PDJ9#99huo0H7+T4oR!gxg;BTR09Z8`uVHJC1_LxQCK0f_IoLmj$&;f6XiOkQ9b z(4c`HCLkw$KyuO#becU8hF_#bNcJWHkY7@DGqnG?+RxATFB$ z7W@En*aQuz!xo?#0Mfq$tQ0v6K@MA?fk;0aG(ZlUp#j`GSTy^8=9cW~efmGrs__ppp7P z1GHLA6CQT&e>Q?vf-IW~UIlS+Dz}(w8+eQ93{8}91Z6VNJ|InV<^`I|9nVi+KaX36 z+AAKkzyk%Ko(+L1dQdt6jfW%6(1SM-5WmXt-GLU+!MIe}L*OdR4O%$r_(Cm9U<)^R zTy7TAawSnva}~M(f(?8d0HfOgu6-~V=(P{)8=D0j4WQ%K4LYDT2heRBG+g@tF0EH+ zg4(F;jw{qrrixZzByrt(hEwd3&47bJ2XKJ zCk>_znu=ly>;{epz;1;%$`)vXYBBgc{Si&jHNuPvTngYF8zS7GMUxWL&WDiEc)H}n zUQj-SO=?iM+PEGZ*(X2&2wkIlKofLg61zU*1x+Px@QPe;unB`MKH>&d(PuOj`9bsv zO(hBN$qIZ-4$KY;dBCF zB?<7k0WUN`lW<_oFTgPfZih_J0>%Fi%`7DWl;sBvT9BILgQhuihZbn%rWU+_MyYk@ zXer5pt(JvZ?YIDJW(SA_oi(NfsoYk8_>K))&k+QH0k;1Dh}2-Z0Cm&@5Z7@B%uzRBj(Pzz0HMD@8{JVK zkR0^`?5GD?;E^524sB>)1!Os*EOO}4Mvo}R3EJQo069d1sY4qQSTn$aA7Jj9pbZVI z1=A58*LYr1~o&o*pyqTcmL68byHc-k3FFpg`-N;VDzB#4%4d6gqp$#gY zKxG+dIvX_Zy+fM?G}6ef!L&hJNdUCydIl3@CYDJ7JUo9uTY=s209YURE?{0zX$Bhz z6}Sp6Z6#<^t{KuVtoMM*HA6&YM?qN2s)>VoKl0w7NjL?*5(VxeMQ+e=CkyC0IX+Mi zn-A2(rm!l&zOnN>9XEEanA0fWc%Feefd;7z9&2YgGFUS(fENClG2GV%O#y%DfpuYbfX$mA3$3#@$O^Pgms`p$$80x}zP3OvZ{H*!b|lV`|- zBh>MMTo!a;@*H{am^L41{jk3RCuod|QvtfMRft=G(-FEnd4{|aKV*6GntFNA`eY`^ z{xPN{@(S#Znr2K3Ks19D!(kBJ#A3~GP#(ONmbuW1;Q)xqW5%=xM01-l?EumIW=vZ^ zYEMAjv_L*f;1Q<+=xhObaQlW+fgdy_gYGl|4Nw~q?6wz>1<5Dml>`KivMGu()jNU$ z@dPMi2wdcwZoP_IzrH~MQ#fEJo|W`{g^J83Vd2Zw@K;uxl{Q zP*4Pw2@AkxLA>{e7j(*lF2f2176%3JGKmEWpiP^gDq#cIci=m`+1Yg&9w<2SxFc`n zz_k;B4SXsgsKvts*^R)9x@<-ez8e9&ss()bB<@WK8sOZ@0xBTT4o;|t>=(m*asp^u z0u#3aXu1ZnB0&(c&I3NG3Yse#eJuix8t~j)5AUOcY=G@gAY*$1uNf13djhCy z%?27F2Bi-L$WjbfUQy5x3VLd)XMklj@Qjt?2T*DOckLVxD1ep^3N-M5GQtm{lL=@6 zlLpfV1tmF0@J6*S3QA&sxEUNFQRLX52pWIp$x@I7_Z#c=89Nk-P3gkij*NFQ_2v2HMW*I71Os*?_Z%f*`arxJDl>`#-j_M3Wct3T8B4`^P z*xVJ0@W#dku)q$*EF}p|=6a;llMaCV0NP}wXwG~Br1gd(tb9OSC4WazNr|Adlwi(s zd;qra1c=mNx}gYZio5`E9S_ul+yx5G2a3?j^#f^!Pf*HIkbw8ML7Ora&6$6Id;)5s zG$=uuP#YA@nLCs~eQzbWm%zPAO%~`hz#Jt4ewzMs9k(20$@~f>P*gQ2fvXV>rUgn+ zr*9x+2B=a&gv$vOr-Pik1If7ult5+13MEK4_yWY;^`Il_!1DvJL!z!Ik&;9lZ-AYC z07PmqT~LC=#RCx6aRsGfnj!i53>S#RfI@?pdAZJ=FA^J0sRAP<_}QxLRUZj z!7vSZD?WuO#n<#$x_fzkPztN z0V$fJ;>ybmy2a0oX^DygJ7^Wl0u}H+DkT=j>6NRw1)u|R`0EeQ+6^VN1(&dG%xs{( zo+9}CXLiRHl`PN(FVI3AUNfdY$`C7mfM}4<%$OQfK>OQOU{xu&_WPlnC1BSBE*^VS zh%LjUK@+H;6KPbGSQJF33vA_T$_(f*1IJR%U{>eSzj2bh1Fz zE@;~^=-dv_>M`)iL1N(LY710A`Q?KVXkjL3#~^5+73CZu#Bw#1+OQsJt@;NY(0SXS zEP;PJo)Rx)_cLhsIouiD%?$#MVz708psmdaC&8Ahv4cvX4jssHHP8Sy4|_dmy&7y! zATL_w%LX1|oT8$n1deJ@5`{$F2572T0gAUB5U+!?7HlWM2^Au)Qa8!N`6DBmrCUa0Bc-cE=SeS&mH5B@Z`<4H9s|(`C4&0;)yXK^fB}Bw*upe>9e+R%WC7yNU&apI4Iab z@wtLPc;DTLH@ra$LqMU@qpAREUfMBrfYuF8P(>*{I#ja+`oMuQMU~jX17xrU(*#wJ z@0c8COy9GeTaj&r3TPq1^o85F#Rz**j94#*@4)Lt@Zv~F6|g}SRGWi}N$~Nq8cYXN zmBbWeeLhA5_8X z3|SP!+*ur$928g-Py?^J{Gke3oeElO*r1jr(8i-A z0bW=H+RqFUlmIV&K~9rjIvEj9%iM+SjTaPam(Rzx8C2Zz;S+>l;dhZ?Ac z2d&_S8dneA$;u5{tO)9kKY(4iMzGll_7P~>MxSwt8mN!}pF1%Dd~h_@ZV0T#1GUXS zbKqc=BK6pMEXYd1_6UR4F;dWT62#eanqIz}n-@?0D+p=tL(Z{20Xm)Vh7_ohJs|~a z=Yx-%xd4)RAqC1M;KQI>kk4p%BL&X<(1Z>$(eVS=5U^1iOfNj7Ann5+V9^U8qd`U7 z2PtTKw?P`+08st>L&}`FLmJd(kp{Wt60J^9pyN2$3o4F-)q?|?(TZV;G-M!A(n#ZBH$d?OT3ICx8xOl94W3YCb7Zgrr+F5EhoI~Mo?aD18V|c6 zt;7u(5979Cm;f5C<~Cz`0_kF92!WAPUcvkg;-TKXi*Ia-ITN=(q!H!L&gXk|z&< zxQ;VG7K8HS4pC^HJb|hI0az)_N%f$f2B@=n0pTX*8=|0`dq5PjP5TAHZIDrSsN3FP zn*9N6_6-oJ!Sn*^wjUs_;{}-8KET}8Ag08^GTr0=w}>6gH6X`;CKg1^nLEV5O%`+J z2_ObLIIx&!h=JOmV(_qn6kyYv4}e!RtvkRi77JO?bO2;F=#~O-!3+*J(8X5lj%YUR z5CdOR25KCcGhaZm=?2KA7YLg^G=sLMPggw1t>p}!4fr7jifYj1XW)}+K8Qg>ya968 zC$ty^*{sR@1AGM^JNTkL8Xh8}03KWfHL=-I`cp)o%S!x`s^g#{Pys%82-)&W-t@Ap z6ey5DCu1pbn=wrgS6~O7f7>Ars{W^6I86QX_2{_rtP6Amj23l*sU8$epyGTED2RB> zm}Y>2Xn{DQ@PpM9*LODyID&c)GsIyv#S-*X39l&@h@;dL?BMIcU{myIF0awcQgA^Hav`YrSs@Nu4X^{`zIrpJEg<)@m@#bt(V#tYAaO=3h67;Q zkqOjc1|7~KZN+d9By>U?mcK#cmEu_fC%{v#^$+md%mz6b1$=PM3Glrtpp{wt&E}x9 zQ5=7WgR<)lair5g96x}?E{JC-iNc2rK}F#UaYqJ4KG4__2s4A02no3H)`QjvfiC}I zckBR#34D!EhXnYJ6!Z`Q-%eujiO^9GRC0}`+RLLG=bA_3mPh3*hB(9kQ$A)t#jK|u<#NQ3Es z1f=Q!-KxayxB}!5P~4r6fEHvo&`hg`R{IF!en?~~AlwRCiYj5w`~c)!a43Qp?3&CV zXMX@WyFn7}?BhL1*H*Mh;txN^4oOf@O<#SSn+r1HIsq*30_NlnNvM-&5HJZ;t4)vu z-*>Y>5>%6b&3OPa2RbUbhJXq68cZuBLCeS-H%Njq9=qcLuo)fVSQs^^Mpp| zsVO)PXyh|v0(+1T$%Civb<~5-riJcD1~uE^z7>FtF@g@x0w0rZ#p=^L_aHs+ZRKpBKD6Tp!}=_NtIQahKx4AtK$&(Qc~3dG zVf_I*+zCDjk_|N437hNWM~rrYTm7t{f$|&R=zD=Et@#B`f=lWL;4I*U(U^yq9J&m5 zBta`-L8BvYpy~MqHPSO5>f9_WJ#$0SGcRawB6#H+CFz+L5?dg*!w(9D3~avu2MGM2 zQ1H;RBO~~5eu|PIFK8r5vmSJm%nZ;_CXRFnI%f=YP7A1T0BsrCVU*=~8+76Te5f0= zbOPZ>STY3N-UCgB@YL}cqz`^jC@ZL9sQ;j&2%gt}p`*yl!^*9|r@##jZg}!QuX@0@ zk%6n81}UU7l^lOaW+@=*Ty{<76XNE~A3#N22V__QsaTx`-l8x;s-8eCF#|j-@c~}R zfZW_6C2$IyBo|2G)y)SQJ>vt<_eepHtK~Ff+9Cz%f-tQCjhiH@nlY^aF?L9SN`6p8 zfX9?qNM#8$g9p0yNa5}$LnfnlNP#!SF)=9d;VHIZw=;ltJ@6{B~b0mrAnuOJ{`zzsSI$WbDz9<=cfS2_Yuvivas z&FUzC)N3&PFaTY+ETh1u!0V`!ljW$Btpr}64LLfISApM*NkoAcbh3~Pgi=x9g`7cz zxF>=aw38kj!Wv8rT%cvFyp9rC3IfxAT;Nu!_gCOl05yF;I}X_$4L?c@hFT;O+HaD!qR z;#u&)Nh%6pN=AVjbP1n?0yn6f0-X~HO|RSnhd7mZr@y_(EzSe;nF5yr?{tAn+$vEp z5zwFrN~k~r1?_$f{fm#Y0Oc%I8GbN5UxU;}Jzzwn#K>g3@mRGq87;j8pb(LG1 zX92ij!3;9`#`KF4>L+|X4FF3#}1wjod1~aBVAeZo(F@ZL8gA&;X5TDPH zxljSLYS)peq)1^7cqO`t*>vP8DNEh->P>3jkhsv?9&CGlHvV=;p8n(DCDppvXp>Vh1(a5Szxx zSdhj7npUZ21EmvCK?s`I0&UTPjL?Cc{lFwkg#mQy?HtJMZLE%{S3NHPUG=QNG=p7{ zg=ZFs?Z|G$0KVs(QG-cGfy0rh1hha6e5o$v7G=;yy3b()(5}2QnG}?f3ULh-n9bypu8$jj?TQTeaT`$Z59)1Dc)C|5IUBrsv2UPe2=+@yCLZEBW zxdqOHuX$f1q{P9iqQL67LLjRibRoM2(+VLaZh?!;kPCrV2!JZ!4FaHh#05ZSr9iGR zVg(%%!RiP) z>Igbh9pp$Q$brKm8cg*J0>43r3tt8u#{#;63$pfc1{3(qGInT(mBkh8UeIU?tK$bT zxYdpw5}lR-@%S~AZE_oAfdntYDnDxIf~U0bR9W|8Pf+rNZ9=lRA6=dA(-W8 zRxePc!0HG(CZ5&tgJ2e@m&ppc`1gaL8B>FhBcmedZsrCd*m+{Cn#?zZQO*-%RR9f! zoB#)wFvu~W8!{k!Sy}ZNcL*zSfNoOPWZobQYIA|_iGBcb6G+z$VTF2DkoCe@N+_-a zU&an{-``Ii0*+<^&~7FJNUada5|C42y*~y>tU+81x~N(x%W>;xK;8(p|X>vuDPvKfoQ9!5}$ zixJdhVpinfQ9-^?fF06}GlW42S1e25 zJNP!~BT$7JOb3L)w@L31fVp6UIH;m(e$)gC_a5~)}ZqB?x7%7v0 zMju&0_rfc1fLA4hD-B3-X$#tWik>G_A!!_R=QijPbg?WmCU8PLA&}*03LX?;)ntAk zrZjWN7qNo__x>w7E1^$3NhftXJm-UBi9@k-c7k3P_M&v4+d`LrnsXrl2kj zs|M2pF_7Ohm|ln}g055pCrE<%K>{>*#{y{(vp7w!f5a`L3A&Klk-ZR{47^z!6mTX3 z+6JNpDAMT=i045!U|Uc*5Oq~RtFAc|xExut9N7yU8GW$_B%1=O;|^hPK-S|(jgVFs zQue`?8r6u+J&--%{Ge$zNNEqxM1qhi4PM+o5S~s>nruDWO#L)juL3HCPY8pSBJqN5 z8)MaHJOe5VRU!4o31Ltze*;u;oe%?+h~V~sz)WzVc|}Z#gI5()6kGtUy<&Bgvtsxl zp}^``Z^m>)LV*?3>pTGBe*l}$>Ucl`Jmb%*&-evohnfOwJ*e#dAZ*6eAOfmXL?Bi) zh=AMCteVUnBB0?5g^uNl9H59&OaRp-td5}BAxI-@gHV=&us}DsSiT|-agXB$p)5sy zu)D-vd6hsufebE909o=t9ApW%Kr={BJ>wgwCS8UX;-Ipb)p3DXmLfloGB-ru1JEpz zIr9Nnd2s{OqBE2#pPjj zEemKo2uri3@DaCGy%IOLtOhp>Ky6fI1rC8ppn9L(is1q%J@8vGTmUtp1*{lefST0c zS_8ED`2;B0u$VEO0EvPYz=(r<{Xz`f00&JyLLv;5o>?`Ro`?x_a)H`4U&NKzc;!J# zKZq-GfFcKUe;`CHDE+bs{N@0uYvG5e1F3A_2U!T}IDpbWsKxF$1KcFt3C^q=gg{C` ziBOkelMp1Cf|g^jI=(<{a6Aw8abF@bwF8^p34K}XR-N?35#g*VO@h=IF1td1MRvm7PBO?dEyY%`%H zbr^W778?(!O9gJ8gL(nrOa$%;fYu@iNPzp0Aq zz^()Jv&29l?Fe#T%QeuUwDri%b5IWl;#joiIf|Pnfiy}W>C<3ZA*i?)+&o_(sJIW_ zJa@bxf|iU=h=4;A);d2RVm^IRBDb*hUTFJ#AGm$a>ewIwDnVEsL1_xq8Uc?-LmPAo z?4U7cMg*GT#3K(%9H1Wc^ul-C;`Ik46hLdK zHJCuFYgrxnp7O0m;GjkSRe>g9qf|X<*l(x)?FW0jkMC0S~Il!45-&Cn%BYGm_vc(3r*n z5rukI$CHeZIE4f+DELLP1p2_~1<9eP?wpFnoiia~B3TN^rE&+zv7l4~ay+XB(*Y4s z0S4;tg9aZC)JuS3|A+`Ajexs)tQt%wL-Guo%A;1E|^q4L_|A1dkTggUcWeGo}Y(ka`M~VIj>mXeNUD z>kKsUg9cfYSU{D82Ga>KC3bKN0_=QYGbT`bSpk%}K(Wmt@C~-m8k`#-<^BfYECmjM zM$q+G(k1nd(xpl)jvSynslnqSCnOyI*A|*FodDh1;K<+sYUzLs2Z>ih#UYJ2kV`?W z!Z+YCr!(S^PzS3OH)8^q$ek0kv0j8BT~m46fH?J|JSw`~Xw~gF+f) zD5yjOm*WD{*g=JOhlCOfFUU_PM6)0s2PHhvp@ZNNRY*=+AP6r3VDSOX^-3(@0J#CW zHBX?0OM!)3lbHb)b@h-n_TZDBK-p1B z=NTL?Feu3DGB8LhvhegVaf8{sLJEx2SA5}Ch<+geX)`=PbQVD24K6Yr7kz0GaMTrm z44<3;B|<^at}jp<{gjvzha;mmFB3TJDsdp!L5iTv^FnNTfrFTMJt%dt^nqbhn5}dt2g(GB02|9`ftA6W2c?+geEDMyK7YI6j`rIVos4wsXTnj@( z4^-=;W|T?blnN?k*udAMv4C_pNPsr;usVWb9V~DJEWiW_3Ma;98-^SODbW1A6ewj* z-0hc=BNUiQqW|E7dhJhk%x{zGY_CL2a;NliyR(T zUXJMxUT}$kRsw^X?1)(sQ1^?P&ga78d<6lEDo{%a$p^%GVEWw`T;d?tgA*8m0uvTo zSEn=!ID(g&gBo)ZAa6noOz=r&tVjhWv?q@!Fj+te@&LHNL=4E$*bN1)(pC_!Do>HQJx((y`M3Y?(%X(iA;EtZlj$C^R`Wl*d5goqLkcqj9CQPy)556?s5+3V;#{hzFY5 z2j9HQ0Ggu)H4P3(I5H^mfSYv^SqhQ@=a>|DTzOeRPWZ#5$nQ8~;|u{sVR$le%1)hM%3ihyr&Kv+u(xbL}K^-b+3$}}i8{C4OzUK=!Pd#{z2@7=K z3)F;!v{^w-VGhTsUzs@ygeO77>G@UU3E!Q$1ui12p=% z9qSc^LAeGw5paX@5?Hj5Tag`-cK8%PNkowonsXGn6u1?6m>m?@75KB11a%k~lsH@! zcwCEI!5j0rKz9j&aybVmm)C;=j1QEj*kSoxz>Sv`WF@x(s4)&oN$d*XB;zIxI);-6 zly*R?z8JF#q~(s>KBBV)E1Qwy^Kw*o)tk|PcUZf_+HX$3A=o@UWt z`ojb%^H@N+cX|OUj}++OdIb(?B^Ho7KsReKD6lw!vN;QAs0VaM5-%vbu_&-vG4Oyw zO<0$K1(bh;LB$_Cw*tQ-54d<`gLI@JZCv7;we|RGVvr9(Hv_VOidgWtJl1q!2&#xN zT85CZc2EJQ0G%AgY~k`tfoddB>jr%PD97{*nS7#oSkol-B-sK=lHe0qQQJ2>3Pe}R z(G32CtNK!zk( zbr~**fX)}e2rhU>OBf}%pa+TbgM$lHp@8NC!R0YzN(R~-1oh-0E4Eqn89@OCDrN`= z6x`Bzt6Kydg%Np|4cdAEH7lTrK!Zsp3Y3-=lz6}u9B8>Vt3KlkF(n?*a%~RKX%e92 z+AIo^0>_wm*g)%2!4p`ZB}xhk;6C~TF(n=|CK&}D1rFG%Y)F+NGJOFDk3s~Zg^p#= z1GGKOjERAV2{hf$!@~k<8G%<`5T5IpF38Cv4Z03qgj)f!1WAbp(t%`xmw}B;)BAaO z#O>i_Fb`;dToV(>8ptJ7prA)v0@#4i{|7Yrq98eaB`1$eJ=_pBkRc!`R>u{BS)jrR z)RY0`T2@C!D~2_oIU{~pf`Js{povv*q5(~%a6p|2T6e||8YzTK0?z=CnEc^a;sA|c zYB2raSL9VtQ;>9I5%|og#9aKi3qNRDULXrR`q{t_sr)pVI|QI(pRfsOZiR&DjJ!M& zR`9R@uW$ST3MS}?7JSMN)z$?h6^NR3@F}6>y=jBmH3)e587*UwXWX=p* zzN7%3HwRUxppXQO)*`0OJA`1a1hp8JD_VnEZQ!XwMP3CCa5)B^_yff!EA zg4(QzO3V?o>y;I0Z6mmu3rg^iq{$5G?0`op%CeR-T~e^A1Kx z$pJbF0=%{vmeWB|v_ROL`2lDV2%{Cl8!>2B2QAlN1@$+SLFGEciM&iq4hl>HD?qsc zT=+ObbI|k~LOe$G3&cR(gau2c2`F%brkFs}*`W9YI}t3m1DyLot8_G&7KnkTq4gQT z#R4RK2R_H*UpMQv@ zjCcwWo4#L|M?#61O4)=@i*d(v6%ihF#skxRL`cxJefmZbjOySH#C}*Wh6_~n-o_$! zi;-J_SAh%Es8-}~WZ>py26Gg|99Lakb+t-CP&x~8?4lr8JPV{qk&|126Ld|1BU6cj z1XLlH7)T}PWG&I@iK0AW^$P^enR-|hSRALYfTuWC2!d)^aGqNs$nAKBL6L>$9IA`J zmEt9k7E1?4iBiB0Udx1O0S^x zULX@6h{0B7-9a+&hPWBi3D6Y84L;C{9Z=$71+T6KFX?i$7pQLmcRKIzL8>JUrW<@9 zZ-AEDC~!DV*f~u=fkog0$fb-spyoQtS}|+@Rg9pTOWcfU4rrF%#*AqOh-R>2m;jRF zHe&)0VSyHeft=R>Uo`lJA7<4Henl40s;D2JR+8hH#mxead;*Z=9e?18e(;0c1FCg7 z9D7zx6Hwp+Z9IfH=?Bb7f{-+D^!%U`gu%vxMq8jh?tr=iWNZV!8Pglk`Y$dsCh+Pob~C0YAU%EY{9I-2}4hgn$_ncuxqZ zv;!3v9YU}|Z3bH79RSI*I5N0fF?sP{|KkTmhQw1FbQDtgwgdSAjN} z4>LjvTqegmQ>O_y?gH&Euo3vjrpUpqa2OOVkj{WB?=eUezzRBW5ps%=TVeV;NgfeD z9!F;Iz9U6u1xCjn#w^fS0fPdg0<#%Y3n>0Uol+)8Gl3_Zpia3y;}H=hM)0Z$Mr+0^ zY#=9qJEs@eKsCjM=>bwa()wUCm6$6I!0?ohyt+3Pk18rJijW{>hAivB8l<3w56}bYi3?Kj+KdMRS&kC44pO*H)1I{mI7(0{ zNUe^5Rx%u5%i=lC$gRNS$SBZ{9;PTNz(IQetY^A`9FK|(IB9D#ftK!o&-`LlfJ~Zz ztJn+R@&(kcRu-55D%(Lt4KuXZIUzoMf*g-n{S(L`O01wIS$n_{3@M(NLB;O_&^b!Y z;IiwCs1gS_o;j=;Vd~hd7@+GyoD@3jK=UW8;03453M>K>!MZ?&3A!$ENFfGV&v8Vg z9#PPMN-9tRqX23lf;Jnz1Xs381eM@<6KXNYX`pp!;ASeQp9CryK-E5EWT8a@Lx z3!E`QElXJGfo$IvNDyi;HB9GI;E}9H8Q@l8Q2+%^2e_ee1j&_K*dPt^4G33)N)AU* zC`{twVFZo8Gb%U=Ool9D2lp>QZ7XnSGb%8{m%BX>01a4yiw{u2!BQ{qj9Ch_jtLy= z90Hp_bu_541homY@C;nIvp~*QR$_7Fb7ygIyujdi9yDId?PNBpT0UOJi!L*#)@i$1#6fTHgK_SBg5dy8})Mq>*fhfwM z0RrlvZh(ZS0*^K07I2}mfh~*Y2spkH;qe7oRtqu`tz<_I8YEx_%|bDPr{xsD3k*Ocd=v}M!u5Jk&ICmTDCdBhh>!&@H$Xc&K`U`U z=?S!gS{Sqj1iXYEvQqI1bZhMmVWj0cBH)sm2UL55hIrwXALuj>$Y2bls0A&v0w?d; zO!dgiaTq|=(gVbB0BB__csKybaV(IfmY@;_v|I>uM4{uKrH$Z0Vn~7nZ9oMLJ3|}_ zTeZ##DU49uJ8@Z~jDVvEtnUvV`sk2Ql!BBbiagWxRd^)pd8`?iuz^N(S+PYR&Ev10 z2{gJvaOVM15P?Qa(BluhhKH>9gAC_@hL=EdrWg@OjrcnVia!-tG=Pd_r1-Oeo(Y95 z!953QgvTGaT0@RM9_HzWDm>zzkQf9HFf4$U2P}?9KzjlvfeS2>8cfm|Jdh2;&^cBG zW`Sv3&?UGm4hqcRSq4zC3{Eznxk^wISKuKtn9sk1q}T>7Wk7WX#5Pbdw_v)x zI*&B8m;*J8k!wHD`fSjMEU4iI8V>{2E0FpC)R1Oz?0yYi`}qjE(tHJc%fb@ymId(E z1*}tepg}Cy$~2JS_1G8Fup_qMpsg(gt*}5U*s!;{ABZb}R-ra5nhtXO1Mw_@YS2nu zNcn@(_^x+=PUeA^1A>|rkd_dl^$nVj2CX_^S73$~u{_Yt-=IJR6=#rgngP6Y1X7xT z>TyUV30~!g=9WBgIRaTRYXDvW#z9)#Lz?FGb3kpv8EjcRprJ@s#|hvR>m;x>L0rgtJ zmkpnv?_drd5dhU~CD4WzXz~uUCJ+>UB3S~EMfKp_8PHK8(0-Q_q6$p)kOf&^#!Jb z8@WAf^;iQKtX_ku0~El!zCsXiE`jPynQLCs^wbR!9Su$59um^$#oD zR!QV-6`+k&pu+`N6I!;J^h(efmI2-a}F|o0UKKeEwcyhcGv+9?gP^w>hNfd z2An_^Jm9_%2)OOgfJ@Qi(P8A7K2eWHRBDHCmJ++80%+kE`23d*!U`&&2HXbW=?C<9 z#H7HdfRGWV({S}2gE_^W*tG}zKEm> z9`glD-w+2UO~~0B9PlnugRmLov<%1@8sL=23Tn+A05wxV$7q1IcSFX8>sbU2gG;{; zpve^mQ0ezc7<@uT19-6~c%wN~NrMO^kLWTqi70}{4ncD<;3F_VPGAL}*#T~Ef=VE< zEP)yDX${Z-7btmxR$j3>HV9`4%!J6*GZ*qQC|H3)?1UI-hXkwR1<-tn;|^i?xck6=Wpm0^-CNQ2sf?2U!yj8UqKJw?P;(5D#MC5H@E9wedj{y>~#vg0PX>deGD? zXzZN{I>Za!vH-5gz|&}uX&TV36Of#DB3~CbU>Eqd>}k{*ERBtT7v{=xy$r(hCHB61m?`J zX+`Auen{DOLpV#|51S$z52ziA*nWRP3_7O+K0)P#s1j_|0C@QTczhhRS^zrT4_dhl z4ku9fz)b^h)n~1D>=4RwWL01km<#sd4Uh-HwfZeFP^}Mf21w&PE{HQYLBimeflMnw zS~cJab5Q#aH1Yz9J;)AOR?v99zXoNo=gO#OVB|BpdHAdBMe}Hg~NA{`J4q}$evpPniBzgZk0GMB#+gzf;xi& zV9$XLVMF#DsQ3c+Bw<^zk*tI)L;(4sLj-n;0s5RH$P)_SgXO@MC_-8}Fn@sOxEIt5 zf`S7I0(r0z_1&whJz zB(M;kRX{x!#|6UEANui#O+RbGBcyW%ywi07+Ro@iZiPgowHez$LF{;7I=AU)4KZ3n zfcgxBzlPu)ts%h6N^}{>tRawU2GCL@kt_vJakmIu++nFB>S6P{=ygOAed>sMP(ut} z0aCk;0C@s`9Z?US0$~L&nu9EdVs!)+%Ai173=T9B>WC!L>j;1FFf-CBA4b?3A4=+o zGHaZ5L=v|`5>g$p6b=_+W>CF zB6<-UKqr-hBxZn@_s-zYQh@g&W`LSR;O68k$T@opz-o5zgAP%GbTk&gl`Ila2F9Lj&17cC40i+kP z0^Y3v@2_bAwckM@4Wd~!nLj{I^8>d_K`R(Rqg|jD$Q@AU9@L8f(V$)gsH5k|WX5y{ z6gA)^1{tFVr)@~96VgIy5P|d}KuuvtFQNz9u>$qcIz*JX%$Qby#v{4Rm^OgM0yaQT z>r@1-V*s5I1u+eD&?0Ccqk%uG9@?jv0yRj3X#%K+#_D(gd;}OM!-G_Vx)ck9v!IHP zfLg}jF2w;ca6b}sf+DLUC_cd*3RdtTiL8#0$eRV4UcDgzIeiA=B6z3cLn8P(8_*1! z5@;DIuOmNNodOxI;D+`()_~f8;8e0g5Yp*bAPDJnfXBlW*rDp)Kwao~L2UX2cV2O( z6JpZ~#W+N#C)x8zLJq3tZYq;Ajrl58an;~I>iXo(FMl>I2ay?XDM;iE3hcA3ABJub$S76 zhl6%5gU+x6t6Kt^$K+)NAG>%#47}zBlsZp{L3%czt}3XD13pL#v|bCnX9J2Ru=}|{ zZfF3X4tj$Rx$R%y06HIv3*><&@aa&HQ~}ZmZv2Baf=-A9_hTKI;XR@iaDA&#^???# zgO`qN;Lj3R3hEtkaVtQ3M-BX0klqrccl3iFwtWn|jutYO20m#HdN3Gx%^9?xbOAKB z2Hpy;$$UZ-e2)vLg9z@4fLDe?&S?d$vPHP^2H2ILu{&LcTSB0bJ4evbqu~CRBa;H7 zKs}^?_XDI39L&E!svv;?DLmlC3EVTRpw1z9nNNoRtQJOJbDqq@2{sNCYb>DYGSJ#w zaIAs4gy8-t_{`pVP_GYXw1Lb3jdOq!4oZKt0aWgCf@4h>)Hj1T7#?e2eV`*zSwUxl zA;sDZxV~8sef8iTFyvq=G@pVNLxQ&OLE>rws&R8S8Yd@vUD1Sv=w09ECX?jE@P13GvO)FT9S_dtDQNOx}o>?9rV z_Q2(e$)J6H;36NqruqO{K5_&d3XHUqCmXy&qX)DA5wfMH0d>_rXcZ!&AIOeT$Abz) za5ombJrY+xkOP$cHcY?g%%fQk=>ej5ijzTmX;?w03@d@ADBwjSczQtzvUes zb3t5*&6Sr0q#jSWvcURH=uuk_&XUY9X{?dL1d~PA4Au*7ct8dd`9Nizm>XzWAAB_< z6J#}`0vBX*0=$jM4m@Q6TDh$d4%*!S8efL2T3G`s^PrO&UDK&Mo*m@&-|0&U2YsJ{U|Dhjkf06x5W2igG9V7egy zUSa~O9Y7;pkePrtpn3sZExrIx?>Wu@pK-|oA6}gyV#c%pG={hWEP-e-tq?I|+5nQ+ z0UkPCAd;m}&j}qn-2ttcbs2VvD1weSX#kIqY!CrmBnD|1H9%`SU4|wxMLuZ52()^X z73>67N05Dx5)9nqz|t^+tH#nWnjr)VK~~2d;JW030D8k{0{rN2Xu}BkEN~7prZ0k^ z>0Ibx+pL<*Cj`uyK?NbGTI&!p2QSaV+Asp2l+I+v1l}I;NFV06ZGYtH9^TLsYxSaR=D;6@u_M0tNB{ zL33vCz#XKK#DU&O+93{ZB-L*ahb+H=kf54ohZyM27Vt2&Ci4a{bLJbMW4S@KE+^z1 zQ*iYPYWRWAOb1u5klJ+yXjL>D$c$P1uDlGO+Xz4f2dI(6R9#)o1a2Yi;D@vdA-=B% zhe!iIC`33FIG`;g@bYTV`VmNtzC#?kryArj(1F?Db?~5i59%U#Lk<*2;D!)*WfZi_ zA&lG*0{1x}ZWQ2UP^gE5JxCw89mERS2L)Q82deuOS-=C&U|T`ek2&)J(0mAJq6BU+ z$c2tC_&^yG+=>Ga);faXoKpZUzyjG?#R}S(Va|L4WC*Br&j~spwnHpSU^%$N2k%$` zx4OZ_5Ub+}u*)HbfwO`xBotTyRtWACv4g^SkpS3aM@VlCq6V^;?E~7-qZ_CqVRZz} z(|{^gkgXtvCYsC*@H2QJ1F3GH&Bx%4=HP+@IS@d*mCTttrt5q2DAa@JEI|9QV2uM% zw;b#Q1;|8$xB{oZO0a_;fEo&%kmeD1azzZ}_$eZwmM$a$AY~CKV?w$ipow@9fmNVp zC8$yb)iR)74(PT51yBxgl&>#z6mth}nS%Cw6vV+Jyf;8K3;4iQ@Jtx1BdCt$6j;qA zu$cuES^A8iniZ6WL5A)S0A=Gd;-EaQ!E{0#T*QMK6ReJ)4h|?gJ2HTFek-ywJ1B4{ zaD$E)0=JavnL$%W;G;Fc%~u6b@@)nmMh9--gDX2t<_F^D%%FBVxG4y4{(?7TA?W}& zdO;m8NDB$n#)phLL%LO*0&BqG2yP#Nl>Ok(QU>iC0QZ*<2!K-GZSd+)@XQaB0z1f8 zKlrB`&ft-*N8Wq@S^^K78U}4R0EGc~stdFi0@U(CRN3IY5TIFV25UypDYy!(rc50I zpk=j?T)_qshP1*!eJW6@Islr+10P0t1C(E!KnFXs)hmFGJcOn{(CS;v^aArAc!L`_ z>w@MlKqi7lo*)gt6==-^H}LW@Q2$>ER60Y76L4wrK^#_^fLl`ajG)aRuDnbj4Up6Z z%I@G|0z7aJYAT{n8Gu`gkSPOrOA(SrQCo^k7%fFe8iv#epk^$q4|vNAXk!uD1cDNa zz*_he0&JN8L_6qG0?;-`#1sQ)2F&q*IB1GNrJh@XLjcmgL>wIr%e>%b5~x833Oe)cqQ-kS)AUHchYJLvroe!{DUW4feq{Rhl=zx|V!*4D@Zdid@YA}f%B4$htpq2nA zR9GF6TT>nI)>N06A}?s`5hy*eI)d6=utwPgxSC1mEiRA~SREk^Inc}_t0O3dVNPg6 zQqT$!_=GliniV_}h1}q(XLTgK#nl3B%Qk>$Vq09G^H4w`g4*D!2cL87h-d;qT3nzT zP(XzbqQwPk2!TqQ6+)mE*9>Tj3*7PnwHcrp4s_}`Xt*2RhPnf9L*0NhxIq1NsA3K= z$n^J&=~5m%QuUz96c$~e3=XpG18BD`sC5Nthd|;9Bnd7MK#e;{oeUp!0tGz_xav@1 zR{%AxxQT6CnS!P$Y1g`H0JR1{*$~>K0406!C>nU>15)d12fTH)3!`;aRaFIPT`ho) zxj}qSUh4|nJ^_spLtTWUbp`DbgWD*uVX_sV-V(TVwTg_^)dx^efSNLJi$N8(BWSz= zsWAl^bVL$BZC!zH8i2R1W?;6iKUki{0~#*H6+NJaAD3A@6RqP1fArLY zZ)TuTn-miE8}LOA_`YqL#tvGWlm&EpDWpvb9&3d3OhBaxv_}ET;tHTX$$Bn@dhl&W zN^HEK&INcW5Ga3x=2#$&O{C^4xb+In-;mZTx|y5;kdPpu^;)m!4r-0EI?g~TufY`< zTJx1At=D?!0RouKS4hJZTIGTV7C>!LVq32e-5?!UTd$yl3c!uv>972Fc&E>c;^D*7 zmUV~B*23DdAhU6`Wx)dipnf?h8fU-{dN_dAmUZV=a7Sv(Zi2OC^`hZzS(OQBV~QQn zF~ts~F~#W_X*@Dq;7j_zqlu8wL(p&oBj{8m2KeaV98g^X9v++l89fBeaDv*TkkP{> zaOnjC;DMwc;CsvvqlZ60i!(ukgrKn{kUm&jc7cc)(+ZFTs1*wt@~dZujvj7+j~;Fk z0WTGVG)zH_RG1Q!0Y=y;FsKX2>IiZIt0Txh@aze+(T3TUg{wv#Jp?b#?0}9QE&z`n zPCy<#1P^v1TDQ=}nTX*;@Uqx7psI}pIhGO_auU* zRkwf}!Jr*_W=x=ANAMs3v~>w~(+cQ9-VH)o0+7+eEzr0CwJt$T07zc}bodKs6cJnp zLks}5H9-x`69TaDz&&8C;Ki9cAQRc3`WK-Z)IUXxH68)023d0ek|rQs4bZ?bJ7_cz z+6FVu!F`2ITW}QxE;Arny}Ka<(i!6V|^~vhqR3 z5a9s?UKygq0-BtG4k3d2{Jh-I*|{BJpn;-#&}vL@Ew0P312oGDsxd%gau8R;?t?^g zHIk#jl?9~30lJqFv1}AP@W}xRjai_>4WXm)peP3qDuVT$5C9$F>B`Fp+JDNb$-Dq` zK_w`eg0_gQ5K;imCo8cya=C+!1#}1BBM6?}0&OsaBv~*CN`2xA>;jMlL*PC=C!$Xe z8>)3=QeYR@2%1#}&E7$iPrX2vz$Va$8mNABVlZb0j{rcLzo5Z!R&b9G(&=Rd&!2)U zf{fsS$0nf`ftE^xdM&yPe}r6lVY^vCsS~Cb)SUrsxe!8(;5pt9t_O9b&@BR40a?cb z9wg*|_ys&j2jk7PY!D2N@JejrLWfV<9INUKdjL&Tsq2$Q1__}X&NND%nSH8x)GxZek1MPo>E zVsm71Q(yxvRs|&k$mlPV<5>m;WAN5wea040RRKQTxV`~$TM8sg!TTDZf)3REVNw7Y z$t17^9H8JOD4JYm&f>JWkMZe3dMxTIczNtYo<8hU-!1QF1{ z94Hh(r`&)pgVARMEkbeSW#y>{$0TSVk;zd4$y;|oqb^`?-4Ijc1Qj|=4q(qYf~FN1 zYHDi0`-I`$eu(?Q4Jq)#3(#H3&>20 zkcArHQCYCNAf`e?1#$&0XrvaDlt8h>0UcrkcT}KB_<;cERyfdBE%5ee$k9}w)}=n= zU>{KMfo4~kASbES9}sth1uuBd21uU4is1z)c$a|2X+TG6GFUNy4^{(hAP_eLAFc*! z=R>bA0WFS(gz6bS(Bv^IbaWQFRtsV|c!?<}3B3SMx-dCjX8^fC60{`&JgBJ82;OYK z4r!qN;Ge!Hg+~KAo&5uC(X}zCH_Hl|?m(@u!0W@I+rayeZB`WoSA8ENB3+PBT$Sm~>e#mmY254&seEgb{6~hE5ze8a9 zM_bU@e4w-Dz*9mXKf>FVci`;^l$mMR-MA<-)1deUHSCbbNq-0{ahoyC05u{(tv%3U zKSb*ibpAKAeF-kML9G)GZ0$?X>@=c%2~&d7zJ$$AgZ8(ewl6_@SXdpwt83r`;gF39 zkO>m7>UvlM6ERo{X^VqLNkLcgB2EqjpVi3f*Z^;FLT9IunxLS~Kj^d5FZdy+PJ-Gn zKlssRr$Nj0Km|T%x*F8v2emJIKovK1b{f3j1TsbqF519zG9Ls%^%kV<0BK*s#;rjO zKgjGfXe=5uhyZS1f@&{FJq;>QKqb=;en|VWUY`*%Jq^;?AOu}3Pi|U zHf&HeqeaQiCUxLh;AZEq{8mk3QT|yQMKqSF=8Du@E$O8*Or$|Au$D+Ui zYJIYR2T;#2pfo?Bx6CQPTc4m6AE4wE8 zq}>G`#swX1$?Et4kv>7%K#2`@Rs^U616tX}1m3>MWySs5{rvvH*qIq1H-PRZME2-^kUn;VK2SS& zfj|~EJ#hbbTT6bI?4{|_$stUwoq2xJLtAJZ*F7*q96d~vEfHEVfyQ309P*uW3k7!6rnz#%ZJ z9<;uA19+a71HP^nd<-GvZE4(>TZuR}&$3JsZD@qn~MVUsH$!|Fk^E0~ij9seORgXYq~$Kz-+gXYp# zh=bC{9Wi*a0@apaKY&7|bzbNRUCB9pF(5PzA~f>RExdP$GJzN?cf0KtObZbg(NRcYDDZ z4RqZ!6H|RXmRJOx3Jh9T4B9&lP1Gz7j-a(`kR}0iH5QW;Xy}Px%?v)wUD%8Xfwlc^qh0u^X+9e6Xa0=^0uad@ATLdSCOJu9GdT3A4XC*bp`L5*0%_H^*tWmd-* zV&F@DVW(6-0F4((K&}Hikqho@gEpFjc9DZtMu7$@Vf#NE`N0)1Xd4B%;!;8!M7=`5 zj0s-HfGS|*Q>sCd@VdkfWCrM7b)}ip8B=*hAUh~PQ|^#i5YQSW)RU>fCnOTCS;4La z-JvW1E`&A+WH}yq)ga)=k5q3#H+`)@swC^76&}cua35R*X+)}7ok7_RNfVMDxW{gR z^w=R;20pwRbgMCV0T}4yLFk1ND3v65;JO2HayaZz`v&l#_MFF{b>s2r(@S_1q(I{_ zn#?EoK>bEUx6y-J!DITx5*{_QbMGPRnm@pgGhuUN6xa?PW%+_Q&?F1#npm&~4WuegV?G7OeXLTsI3`_Y>^8K@%pRyU#(I*MK$OfNSQ2 zYrX^5JpDl_k4XIukn+`F4N1vBJ~?U>X(7kJFb8$XMihT16Pg`NZ?S_U|Ipv zycDc?0bDbqzz%RST7q3GI7~H|7J#%aVXOxgiZkGPnV@>-VAl%{Q4OXUAiayh)=q$H zWq@j(f?X>(JT;gmfV3`Rgxt@k$f7Vkdp?gyCJ*SeTO|(A$+Vz8Eu=={Kt5OyJTwMs zdGkXLM*&@O1zN!b8l(ibb8m<%Ar{$ph=9t+2_m3YDhz{Gr%u1PghyHW1*l?nL_Qb< zG`}PUI<$ED#kD+Arl4biz#F0jc7n#u!8OnU@L)5jF#_552I@^85Cd5QZa~}+n|`r} zN1`600MauRgZ50p)`12=L1%7&hA1_dR){NdDuB4Fxj=n&1x_=jH=w&Y*uld@td5`& z5>Vy-f#dEC?dhZ6#*g_kF!2?pA^1sbs5%mUi14jvw50WXhb)nGa!3L2RJ zoq(jot;k*vUMaj-gpU10yN z0Ili=se&x5gQx{94`qQa{^$X%Bx8dtdF>DajTNvvvP`#_!6QBNKe6 zs5$cnP(uLJ=LMgq0qR*mcWr?py&k+T7UXEeDIMs?W_W^|%#M(T3~1sWw!IqEpn*og z3sBbuoa7+G01H54(x7=GkkO#cXpr#OfHsEfSr76Iq`e6`5Ay=(^gzTWE6`vWXy}3o zbni4QJZ_+y0a*h8IpqgSP6dxdutAQp2d5e{rZced#uG~540nPr3!Htq6+DsV{y}9A zXc_C58Xld|a%ZrXJ6;1(?(807<&GC93nOPUT;A>{6jicp`0{Cvy!PwsM9*A;h z&k!qjyooJ$&fzY1yy;!;fE|sq+<`;^ndOc*vE>d@c%YX%-qb92yphVC1<-QmSlej1 zGkD7#pMfZM_71Uf$A{Q*=Pd4W$A{kK4%pE+%N@)_0GVJA=2}@g0bAXWtMjcYKL0ch2B0cYNtx?tmSQv)qA10h#5FFR|qgQh1=3 zJHFH`cYKk`oq5o5XU2rla%b?CJAMOE?(838<&Gb*<<4o`<&Gb{%N?+zah5xfC?K=k z@gug}K?)D_a>tLF<&Gaxxic49?levvEq4ZQx#K?&<<5a2R_^!{Tkf2~UGDhPyW9ag z8fUo!i2^dq9e-lW9i;F;FL(T@S?>5Fl{<5wB9%L{pyiIulF@Q! z@RmD415xfA9%AKA5V7UXaopuj5WUMCu%mI7JCG;h??b25K_4_ z6I$-5t{g3Q25-3&JP_s1ks(&@1QT2C9K&7i1k=0R0XrIJxdVv;GRvJ{V#^(*@IWtj zf~i^V1S6F@Goa-T-@4IqXYiIgAp=qF935iiP6)B(&QaXuP6)lr9k8QumOGFrAhX;F zA-3E>3J>&hCxn{iP6$%DGaXv)eBU%$?hM{?Cv+gnonu3++zBPN+&O}~+zF+3xdV1I z&T_}*ddhj{$EYJ&D zVH?ju*Uf=8o{5Y;I=)AEX01 zj`RdLRo@X;g6IbAABSwk<*XOj!=}J$%s4>=dhIFrEI{xXI|4J=Ks&MD2!M1k)q}PO zzYqXz{RN$O13G~cd`Hm@@P%Wb&AOnyurt^cSRkPXI?jbtVFr`HNwD2-_?1{d=j4KR zw7=k2VliU^-9-StqY8X^8OmmM(0+T+txBNV7Zq5{nErrvt+Saife$eRorMazObB#@ z9c0&f56Fe!EyNm39Ri?(2f@4LLA$v@$67=7Ij<2^V&MgET3-RWWEFBW;{w5Y$l-jT z(|Z-*H$#B-3krb_hXi@+h8TF`I^?=8(5Wa2;tFg6&|T`F5M>7W>5Mq&G;Hu8AE5o@ zpgrW!5Ehul1>VNa2EFir3%YL|vVYhebiWc}*E+~N(9uSq&F~;kLJkCg92EdElT+Xn zm%vBxZA=$Ll~{OLAzRZ=h+?^oi3_?XoejDt9df`M`1%;op`56D(!pT^@(A*tbS2RF zyWs0TLDwjst zE%PjpGtNL;#5aKLVsbnHI`si$9f!bFkon+z`T~3y1G4#0;~#*sCZiR@8!_l%x)9?Z zhW2JksuQ1e0O&qB=S)?{XYZtquux`sgkbdePc&lIrL2c{bx94#V7iwMy9 zW+zB4B2JIwBH}cc08NSrd(dgipu3|eDj~Amag-1s4G(5Qg0qiq2JGM=acViKf)n0AUsLJV|x52%$6ItCbYQz+>C3q}6vg2`N>^*rG7 zuvj#hSU^{hf{w)kol(wi#n1t2Z3~()HSmK@g%t)RG0=H@>KaT83LK71C0RmW4$8iNGEPb21S8- zM+QYUM+Vrj`%DhZ4hn1vVxYz`hXNPqv`9sE1r7z_EG1dTdNU@-g}?P~;NB0YZvi^j z8FJCh8-68rUQnYO)J6m!)AfR1NdkUJc7uo#AGBKp>R0d!JYxo(hE?x)fWeAkhp+-G z=yby^AX?CjX#?o=A<$t^j0#MS6LwA$Pyk;k#16XafE{$#0ZYAt1gj$>$OQ1|qWl79 zVK-s0IzrC;+93>bJ?N|`@Tt$B8!aAyPErEhDhLW%P$Vd^)PuVPV>J2K0@#U;))f=6JU`O zkV6Gw2R$B;FlPo`aO1=fXwLiqbP6Xc=mae#b~7f>F{7aK%fTmp9T3lQWCI=22tFhX zd~z22oMq4z>)?xUZ-{3(DuEPfGCvSdnmK*K86L<25NZF}(n}0+djZoCCU^2NYzWZlXA->+1+|(8Sv<0*-7*hbtmE4t7Lx0_;>q zaWf{kBWHp%Dj{jqV0s~-cmkYO9tbF&WaL)ha0DN%enA8lcO0M#-dRDRsL9L#y04rA z)a^YWGJWDD9+CPJ;8Phn6izalF^PcBhC)3c8`N{fpBzBlFVKB+;#uG$+Af2;rWZt9 zK`9w@Vl1c+11eD@vXDG-fB|$iHIoCxEfPvIrpukpT*E7etlVK{qV3D6lDT3K)Ry@RAfb#ss?m0(|Ai8BtIlUjZ}%0Y0{k-Hb^Fd`7EI zPL`uiHt2c`P38k4pnzpjV4EIaE+Afy9NN%RG7<5?;t0C+&5Vfwd>f(ypBd8wwA;nO z*Cm6pH|QK#c1N~sK4u11M$ox>Ak`;C!RZK;D)|LAb2>5?Ix=SSF|)I>gKiF25LMvg z7udvN#so@*pe&)lKYhb_9+7%JGbT`m0Y?|Vz#dLV#zM$V6YP#+ZjgI_!7N75KpZQ` z9I!E<=yL?+X{a7oUhs9mkfUL-ln|gp+rek&iGt3E=F4(?y|Y=sky)Vr10yIVdq6P> zI<^pUoF1s}3hH!9fFcQe>>lV~-X-83`5CBE4W<(Upo14dGLXajdxW3`t5;%|R^(RTQ(#eGmj>OKtIM!L5H<+30Car? z`1UML&>7K=jEbC~yI^K8Db8k6-~$bODRP4c4HRZGnKOg$$^s2NfqKR>U`O`~oCA*! zO%Q<;4Y~}IM0gnyIjSCB5`ynAyC4dxjyg~ey$4;eaY9r9T$auR`3iD!Iw-0^H^p6M z29-ccu<~;z6MFf{0y+2=6lJjd0M1b##Iqb7Wg$5ZocSe`W-&Sa{|`D3p2Y!F34n*X zz_)LMN_cRxfm~HE3y}qM_EMvV*y3+ z2~a*Hsfbdm2VY^xZN_v4bZf4l8Pf?64XS(0nBIVxvzg49o`6O&_|2Fez(z7a$M1rw zlLum10_VAu*ug_3ETGHD!S|4ZayaSkf{0%)A*)=1jgV5(=!2{ANrIpz;P{ zZx3ud26Tlw$Qsan*KliGd09cl1?bKhkUi|&3bLS#%izeUC@{VGGLKq4v`!>0CCtE@ z5@s+hho^)Y7%71fPf8FHu&)9g^9fA}GqI%va5VI@olJm;xJegGZ5Fp;m!apW%bBH3Ne}ts@gC z!+#Ra(q&+IJ5H=vnmeFF zG+7;2h-YDM?uek21~aC|Tm#)9jfiows$Zykpw0nTQ=sO~v~TFm9fadx%^gr{$D9>( z)GJaeVID}M2ugG3goI)zIKmD{D0YE62q#3R>s;rNHicE>YzkeB3Z0CQsvMr#*+CUM z_yAT=T*0gJ=@+MS3G=eTYH*O==^v(Zp;qB+=uIj%P!-4f zk%fnWn}M6jK|#Qgu@H361CziCP(2G`3M+Cb@G7!_ItQTU2DhsM=mcLScE@_RB2Zn! z>IfPi246JAqQG7cy}N0PIJgo6pIfNGv_afa6m)kJq%`AY1GV;8vJ^QKxE&d@6}c7I zKolF;5zt#bgcW!NWEEHxPciaLW#k5z^h}_eEI9-of;$h6pbim4(J4kn76pjOuDsJX z-r$kugG+*{-viTc-GGnOq<{~J1+{8HvoxS{Q9%ip1E@_PmZbo{VF@gGLJZu}IDym*0;v<9Zg`tVwElnu=o&+C2!PuzERLW9 zpD%DJaDYl4ea0gaN*uhb;6@9$;e=}T196Di;MRy}mI9jqc!EZW!;!;Xfx}VIy^xnt zfkp8&BXq#J0o}HGM@E4jPMBpa5=tDP+tEOcF>o!(sK5y+IKV!6AON;)fuO=^P-r3? z%?diJUEnOI5(l^!_w86gAcj2cYSr`_U_VC0$3n8ziN zMdMHh)e7KTdxDg(=KwckNeX-RY(6FyMsPzCBfycu8+54?IKWvTR)88pY>=iaC=!@J z>0bl*nM_R$x&)!^krcbo&cv3;~pvA@^t=nC^9lN8JKE%%I7{ z0O^0QTQPtmV+NBQs96nT?4z_YK0Ie&x4D1C0uBL`%V1a#>a zOh^zSv;#Dl6YaP|9M;K&3{Z%hGlRxGKr^jJKw-gY#&kejK@uKmpzZ=I__7cjLlf{j zD5eWa@ru@e08gB45QN(Tx^4n=d4eFyr4tJT&6z<{X`tRNxN!QOGfTJ3$ z!UbI>f#g=?s+v`U3F2hXIq@P{j{iX#(e6|dQ#=cKKBU^XFYgW1$2?3z&B1r zo?`{C9)Vo^$0E=OTHo;lJPgtx1iF9>6y(#{LFpTGX^dmTC+0#0R>xPMRI1O&;3!a1 z3_sx#S@*gz@N@dP8NnLLpR)I)zE?$}&c2x6UJRN!!ADaq1h;BjOsDFO``fv(&D z>HN(K)p-D<(~%`h;1rtX10c=JAk8c=&8+nrOfSSi34t4&5+HY(f{c71o+a=GyfO%U z#V!*hA-w>Nz`vKCx2~IblB*YI|&IBHiL^Zpo z9(1t=NG<3(W&txM@MUtK7AC}SP#2X8I+O^$a1*>X=z=IXh(P0V+~80V6=+~`+%R>T zfFcVhA?PxE5OI`s2i4QM3>QQk*}+W6T~(UQCq&HQ^A{rZkeebww}?#ujW9JxWGS-1 z>M<5w2G9yM7ESQ_8U>EEfuJQ{pjO!p&?pqB5V-(ia6m`lK-YDE`0Qp(;NcfY7BOSG zBcXs=v4M(3aO=s@ZU<`31}gkEKr1%z2p^)g1j>KV!AExJBVNogzz;e(Kv$qeeHaQfFsf+T}WXd zupPd(iWQXTK<K3gAf`cF5I9N~n1NQCVOae<#$o<6f==Z`@5^ z*v%_~yL!+dw|W2#O_EUm?|IQ6;HW|6>S4O#TOOVI69SMWqza(U=m`PXb>AIA&{coX z3pTqMLG=NvBj}1mR!1zM3Z9Gttuz#Xwo^gHD|p5hw2TPemj zGQpP*!K6U9=;$+oFCPL|GBc*Xf6L>@$Ti*Y9gk+%3Q%#vX~qP)dT{ zqXn!OP}V)OTQOV!^*llAoHe}W#03I>} z*G1r-G%%>>N?g2(hglb#w( z55xr;Ko)>1L-6Fz5eb0?rs;zBnI*uZ`v*j(hkW2Mhfj8Zs-6{spiyuv(=7`Gk-AhM z6Tu^9uo-1gmx>Kq^FSv%kh)ad=;Pv$_;*|&oaJaKU{VDtlMhJ1d(nt?I5<^8`fs4& zBY4N^h1hhvk32>J$Q=e!T@GplUuIT7aryKEA9*BH!4)*Pct>*QS?Gi~%&pTG+~bDy z{_r?5{sFh-bo);{Y`G7_;2kjVtQ@F;fYqKw$D0Kl&mv8WgD1Jc({j)r81hUt2Pn2q zh=4~sU>)=WBA_0a`t+Nhc*IyibF!DGfBD3tPW`|ttmczY!RiTPM8OJ*SE9qJo|79C zzC;Jsnv*R8j>gb}^?(GVw@O-AVRWpd>N$8=L8S_0`8g;9fm##n(57Y&Xf&H0v_+vq z0yHfEZpd+<^mRb_2vX2KKwY47Lm0di20VQXsnH=zaX|OhDsX@%l0d1k9@d0|W+d26 z=WMQ^6)lb=ufKteeSubhh-E2pm@$Fw6$jnL{sOe>>IHw6BbPvx<9tv^zC7F_puke^ z$X@8k?xw^dt;FTXUV-PQqe@;#7F6@*S!4W<`jkmU2A zUQCgNhlSgbU4cbGUYCJE8a&8 zJF+OSD1auZq`5)s+Z;iaC8Gj2bOd(`sFnbAoFD_ctQt%kgp{}uBe>$A#WApjRtnIu zc?A~70}QZPEl5hmw)sE+bZ0#aWcUlZPQr|7i5PM{08hUt4LHP7GTe0n$jyY3GH84g zR303FuO%axmL)-H(GfJY47u_03bgle1H2~Akr5Qfpv7F^@mmLw;SaD5--6l>3sS+A zA?Pw~)C3D2zg-{*PY&49D@sNKcOpP3cIEF50Y^z#x&w`>ED)Q{C&Mir3eQbPKuS;> zcusg7|fYLbK~G;R;-SYrXFYq z7@NQc_=Y1;fyG#mkwTE#Rr}vWl@0cE&@$fKw2lD6&>8*);@Sr8@$+N z3#e(rrT}hkY!C;nRAig(_KQatJR}O*^TQ5Ks*a$!3y>+Gkx@`-%c8*L$RKbPY{nOm z8Q@H;!Sq2Ka!DfeN=PQqrX;C59XS_V!_BW3n zitLRJjm7B46lO+WmbN4FmD9MBE$o;buVygQ&F5Kt=F!4I9Exgnqg z2{KR{a|b`@;?x#p%o{zRCoirZ4{*M16UYThk;CB)nKZB0a|+t8hnPdGQU7o zYA}5e1h2dUjZY(rmp@Qx@Bl6-f><4)27xA_K(PRAt+s$J8wF1^H3%t!YR?5=n>nF3 zxvqhlqg&6gLQs(p6p{-B6=#7Q25R1cEQD;Hdjq_dJlX-Nj@h9G z_JA%E1*_`-SNodG4HA(2xIh3j#L4P70~D6vi35RIT#(rZCI_bJxqo@2>iIx}L<+N* z%$Y&cTME2jE@%{>0n|MKF@Atr8N%Rb0EK}-6F4=^K)dPF23+TX z0!EQnfdjPA2(knc*53x-SX&QXeE`~hcK|%s4h~ePfd|A;du}$MnN5C z49iAJZsI`0i4@X-Si^fbuIt-#9*UJ(I~(G@}p ztl$=b8Pgge*nk0eo9PN6$ThX#0u@Zq=89we4u0^aS5{5t72@c& zg4Z8>5YK{SCD0r=xXlIXw}Wac6ayg}k--4~%}F;f%mWRmJAyWI@;Y)rr;O_rSV054 zXFy}Kyx@xQ0Bq?XsAdEA0+B{Q7!_C*Kr0>jAcq-%c07Pr=zzixv@5v>oD%MUrs+UJ zdl;b$|5-JdKrImP;(vDN;(yTI0w!Yx@O-xbWZ2^cKUxrQE7*V*w}Kn8uDp=-R0pO9 zGV$s#@=OPfc1SjW7PYfLo3=t$3==?6!(ztNF?~G~uUb8?0%&tP+;R9%6 z01Nb9ZUHNX2~hpuj0PGFf%3rvOTtzR3!vuB0A(x4G66*Wv<6!0gL6N)g|b2jsealZ zz+%Rzd+#!(V$RKbHJazd8YN!U&46!0T*yUgVF^we8KSnDlHflc%fC{8Bj)H0B!#Ul?f30Pw-`ds!!06 zFK9b9vS5+6nKt0pk6sB^Mm{3pxJ9#P<{dj1IW+dRS}>S%+NLc)Av2(k(%Dm z$}3EU&%(Tj4ew{N60!s6%#l z4AvXdoG!x7t4MK3fTpBTMwsC4Jc&NS1d1Udf=4LO zcp(5<5er^V^FRQ6!UCv?32tkG8lB*g9MHBnNShtpxCb@qc7U55E5OUK zK&z|3B{OJf2fQVN4YYFu+%!D_Zjvkm4QfEz)i=OxcF+pdi_A%o0Uv0q0_0TiJfi}e zKs&f{1@{m@ZRZu>X4VVvBq@?@55&xw7l4*}{|7Dc1y$Irn#>D8t!i^-@TA-Zq3Qd% zc{S@nM<#H9=S?8P1)zZya64Au2H2te)WRWW?bZh~k_$EZLI7l%Y z4-ZK32e4vRa1$KTqy(F*Ag;hEa0{&bhq#gyn=3CX4>va`y=yRS5C9F-fIDX|1gAIh z@QT!b5L94=wzk1-C{Vq|>IiB{u{whKb)a)g;5#G1&0%nOgUo`g5!7HhBL?lZLl*ym zR^hNZf_iwIkh4Nq6xbCxm>m>gy?Jm9gSO{6o)E7G4K0J3X`Biy0?q88y+Id1H6(aO z9@2mYjVpoD;szmeX7Cm`NG$?tiXk+C+t8p?2b!H=g)UcvC^1b;Y1XctW7&>uvCIgEOFZ75Jl(Bf_m*y3(RP-r^JTQPh9&2BPU zF`N;B^gzHXLD`{=YS0*&0CcYvXaO~9!y4A4aI9yrW(03gWi@5G!Dr44-lW>VpQQ*{ zPOHEMk_N{gsJEvCYF>f5TY;5J5MdQ{WyKq?6ErMeaTqXu&xs;6W`?NM_>zHJCsJ z&;u^WL|-8yWoR}5V6eJy)1U@su9LTJosGtby@UuBGW`Poi+VqS3 zymE|c(?9U@N<)r60*%Uny7+8nOm{#-dhj)-wPLoQwoW^RSt=??{XB^VD(|0Tey#mF;VRgl-T9-Js)tA+)T2ADt|5`=a* zVDq0yi*fittA=2!iopwUpykC6e(07CP+<#Nq7B*Y+ykl^KoT0; zQ(|{y%>vCoF#6(J_vwaz_cPMu3b?HdDrvB`l|c;#Z_wNwsJ;=%g7%0&gS3{QF=WtA zR``4dVn!0YbQ@GdV{AB{&M3?)R}UHXVs*SB4y(MuwIg_K67qINb7t^31*qKws_Q$1 zU>Rx#+RS$zwDJZI6oHZ|XgC^Pd4oDPOrW-nF2f5kS6&8CcNVhDZUySp6R1`M4Re5s z>jlE#6|&#~awRqeNVN+pY9&CUTOd0i13REP4p!@e$J@Y7kPZoW+n_-LT-8EGkU`#9 zP!BH{Hh^ke@F*)Ac$5|D4^Z!$)e*^Yj-VNN(0mVg1|C!yYBGZy2n2YKy;I4q}cKpP*( zgXZ)Tcz28=rIX!fk>+_G2!K+&0eHVRe6cK%IbD=nArGmW-vJ({I553Kl-H1vXZkKt zUi*6R6azbW|0JuUkQGA*sGMYn?0E#|M@S%n22)w!TL)VBmB9T}P-bo52T$Z6l!03^ z0{`I!Dk!Ie(iUhe7E%B#5r$~jU|J9W9=rq9L<)%A16$zo8-zja3`bD1L6jeNpwi&^ z1@Pd&4Phk~$aw#T=_kZ^HR>U$p#yQ204RBaQWZG%Pk`nt5nGW!-3Ra{Q-~ix%?S>H z1};T5aJ+%05WtBUYy+DD2WZ*3KodKplgMet1WE;i7WE!~s>?pduwMF~KKEAXaV=%L31DfNCPh@c9{0B{tB?Gw{YIh$7H9H7jVQ#PQgR zCILrffk)sf`h*BXCCG;dM8NaFtlrRuG-MhB)C%QR01Y65hVnQC9)ky%!8@Ok^3fGW zZiOqzD`DE9sl`$PTt%cv@M_oZ01eu3m@#b-LJXgPiyFs^py3k^frV_KLIb)?2U-n) z&SQf#G{r&X06S~|1r#Qr!~rTvAzlQxK|#YJpusDlECEPZfCCSt@I<{R$N*4o0~H~x zps;kj02)BifDWL5he|<8L6L|wfN}z~S`)OKE_`IVcgq_pgA@0AYpiMggt+6UVX~Qvj)e zLEfzbay(?EHFW(YX!ag77{dxaJp-~uh0q))IPgJZAF$&x%$ZM!f{H=Vm@~-vpqT^E z7L@~Hpnws}a+Cm7PtbPs1f(r06GT8OjzDXoKqJ6nC|guS;Z<1}Xy6Lm&Iga^fg%+$ zm<^xe1UJ~hQ=CX`o(mv1fk%r#3|6q?ILw&9j^ltGu?X6&>$u@-hk&C5Y>E>!g^T1? zv@I$SCqtck3ZxNr;`4&2;#F|Go)A^MM#{t|d{qZ{iwf6O=nUvJ#_57Sd`i=AN%IQx zffieWrYFFi!iMR;rFr%1pMd5ESj?E-fJQ;tpht$VYA|hK1Z|yt0ulvv9~eP}$O8dz z=0Y3M04@GNO)y9;67Y06X!H&(!GO~XlaoSy$8sgmCTZwa6lMiha+Z6brXSGqP*C~- zMU^9H#iy`9l>(;%J8VSR1z?0}F zsFT3zF;f`W`QVfWDzU&GnDeDWz)={M!WhszBLH3Y0nS(&s2%FlDUt#X?x_kuEgpn?4e zoamN-L|HYMc7PKdXc-=;kAt$_0(5kWBefD8sMw~s&;rLA8>D;y#~Ja7PDh|h!Fli{ zx@(`wO>}QSI&~0<&Y74*Hw$Z`y9rBlw@6NOTsNVK?iRQVl$xHe$jcuM8i`{OSj~lV zfEHSuy#TeVIly~1kcu;S2Mb$q2Ht$71Zv2EN)yoGLf{H#hESHHIcP){+z^sL-bM#* zl|lMepfVC(Azcs!^{pVgu0ZR!6hJKi$j~ErFc91=c_9WGS_Byi>gs?;qd>h?=o}$P z4!lkT)Jypw44ZO1!G}8EmJb=d0JX%xT{s06M+WHF*9tK=B{p90G8E8oqB-*oKG13+ zX3&Wxu%$5I0ZFi}3LH>vpg}~?0ymJuKpQw-@K0A(=2fi+bw|J*Fa^lI8pvpnxB?4w zfN%$>)x`^H2WT>H5C_=?S_c5>bYj;B89Dj^(#HeQ_W`1h6*T4q*(U}X?gHBf>L-Fm zt3Vb)hLAubRG^VgkV8So+}z*;?;QlKl>(K6pcykq&^Qc>z9j*I`H5k$T+aSp#!d|CwWB>zf1n78SR!!#m69VWl z4|X9tbh$WmoCwqb1&v673RiHC3$!a2HYx?`?XWt6`U1$KQmmjoK%luR1z0x}v~NiP z)C~nS;Fi`qE~!^ws@G-6k%lZi2aT11$DlyRXG5le(O2*1gTn#kEHkW&h~XLR{mo_p zM{_)L5lTe%OqsbA@}~=^fybaU)p&K3c@Ss$u`7U2M*#Jpl&l!QhpT}~2!ZJpYP?GI z;07b8OAb2N0n{ggbR9t1o5itb)ieReH!s1P=5BC;Dt3Lw4$%3|kQGFbGr%Bg!eHHy zFQ5afL0tyWusvwy7%ONs7<95(gJ}b3q#I%dsQ1KD59-+>ZLr}oV`>4-d~-r~+k=uK zsB8mmO@s9PmI#AR9|z?e&=G=WOrSHR!4tfQiHIr0j*Ggo-GXs6K zQeJ~XgF``F;5j&HUl4;FkPkBUgqS%qcpUkJ0L+*U$Sx*m{-}o>j*e|5J*YedZCMfn zr!(+68qip-7)EXsR)ES#4)Cxjt0TV^1Nih7(5M+GVS!pl zpmYJt*TsAu8(C?FsQ;&V7F$3tvucW zYOJ$?YcND(9aIHFPQd^d0I0)Uhz0wg;VbZH#B~1o+@jO(>G87HUjPqVOhAm%g4fWo zg68i)Czir?OvA=)LFA-iuu8&@4!1*#O->p=w*Bcx&m$%0oPfOh19WI-1th$9BK!23?vz^!=j7#C>D z19mcpP!?is(*^Lv38Vmmj1Ys4VF863s0K%JJ%<^S2H5!=pbhAtdJOD=D}NgVK)%QY zugnA2H5zzt_Q7@06Tu}zXkzERz$xKw6-0zNTY=jt_f7kqj+fvnjTir0S$_` z!RhUUxZ)k8ZXY~4KsyydVGTKdmlM7XYBIcV??l%qgr z1VFD~0d*h1MgI$NP@)5c5U8{P_2)tTc<^$G4){V*^x0oqXqhUMr2st;sNNA;#ej+` zP)!Nxx8YjR0vqH+S;-74Mq$o`th&B|Ho0UAj&aZw0I07C?uLP~DT{)5y}%1_odfO{ zL5ojNzX-g@0aAOjf)C*XZCll3K7ckSWDA*ngY1#PlJ!9IhoGj_8v$q?1nD|H5CGRf zY>xly|1%eYP6hx?2r4i+9t2fEAg6(^=U~-jMqi`~4M9k&kONU6vw`NiP)|fPXND|y z0A0Eb?g=u3Z<1lvU~1uqR8tN7 z&}xbUR!xDL(BNZe6jRHtu6qsjskV*L7UXTX#my^fu;dSI|ON_5;OyYB?+L<_}X$S*iP>- z;Z>8qVfu_;pgKU^E6|iCbas(d1=LX$SOng2j5@Ih8X5<8Jr9UXUvJ8* zmij>ibmB1&0~;#{FflT-vVaC5G?-3^I5HPHGP(1zIG$%vG*JLe)2&(BBmg<-iI*KL z06LEtBH+r)2HsAg!E{Chd^#vQ=nzM6>*<8ZbSpDn#d?r2U^Sr45egg%YyvO2K+U%$ z!mhlm5KBOd?LkW;K*7%<@PW;rmlZUY0^ZkjKt$jds}hSVXkZ6apMkf1gO`DW!i)vX z0v#(09(DqE_7yk;UU5MU)@7)FBIe4=0^X7V)&7Dp3skBf5D_@aro`gNlLa;dY8ngl zND)W}3{)j^2)ss^1~QBpGVB66R1und9X0-NfzKlaWh2lj*c<|HV0!9x8MX+!@-jin zKggNI7f=sfJ0T98QDT9tc>(RZ$8v@OxO~PKSw=JrV22t*H>E;%cTc}z&dXmv0o*`9 zoD2nS8h{42#K0SqVGRV(z#J$rK@Efjf}lbIH2(=|Ab_fAPy+#cb_eL7EjDob0Mv&D zucv|^k-9+$Iw`wB404DFXl)-e=rAtER9?uD@Sw35@LEUo<;tM55u;~L7S8f4mL^{d?bgrJD z;$3i+zCcj%9^y!NL<!z5}yAqxJu_xxCgG%O{Ujd@=ADu zG6Gum;br7@yuhGn!mVJU$imYIYC9;v+IQ?4OdNs?paaPfooY5v@4aREgj+mv&;t#* zz-10-Wrp~4MJryVdT^NoI=dWN27yO;K=Pnb9#DkFmH2!m#RK^u-Yh=FIV9XCurVa2Oe4_d$l622jf z+L#3wT9DHKUZ7?LP)lZkAkuk*kQrQ1Wy^s`>71zN7{KP^Kzpzp85N;-hg}c>4`GsU z4FV+VLzdZq)_Q`*mq1g`;O-LW0B{53G>>#FFsRsW;0IX^(##K@*HvKEtY>aO9GMUX zT7&>@@j{Y6xCsR6fx`#UUx501;L~rAcGrW)G(pK9lr2G~OJt!s4V?Tzj@$OJ1H8H! z-gI>Yk8pu29{BVbsFVXukBUPF*&6t>9AAJmqRyUmNGRS1C;tWs#RrJwj~J4KCjS%g za}6N35P;9{2c0YpK45zKyh}XnocE!5;lcEOHoQ{xpsJTmU^2TB=tOyNvm7#Z0;=mk zy>ie2x{LxFzzfu0fG+a`w{@R@mOF#WbQb6d(V!&@0s_C8lsI%5f_NJl!4=DTW(ClR zMfW(}cpD)(i-o6wksGvK44PuNB(dZw@R(Th^u+n#Tm|ZT%CbYZ>44651nqv|hiLrl6Nh<^pF5`j>n1SZaAjW_~mmTC- z=sFlp<`b}Ome5_6c92dHXooJVBgCvdpb!G3M@Epx`C+4aS3pfK7Etr0{sO%FbwV7v zZXGhN0GTj>^i~?s7TX{67?BS&C)TcRlsrnq*eitAiEh8><$L-AsV1&6nJkemgN{&O7-azH*krP zGo}F^W&sbOfSN3z{m-B#3uMSgD9e!(G*+R&0b5q_LlAT{8tAAD&>?q%kkKB{L5+@L z0#yq2F-SAqKLkNzK(J*MLf`@(6os11H-r&G8=%q*Qo@6BDX8*f1#MnKjA?-TQs8wv z8xTj!f)76emGG#puSYl^WD{t6x8sT*9iZ|BKBNIU`5$_k255i)by)?-&7kosu#dKY zbc!KHI$}WMS%QdV70`MCqzAm)?}m_Xmg5-)6f)+n3fmV59PgCF>$DmE*!dVLJ zkVAo3br~*5fV-%mo)c12jm?Y+RQ!O7p%p?3daU5gC@N5;zz0bX0?nXdQ`iXe4WTUX zfGR6!Mnh@F^h{q~ZOHj?ke&=^mK=Ve6L|kVc!&e(oN8>%MN~IHoUjTsiYW>!4;bOj zfsK5ETm|k-o7F?M|AL%$2&4%;Z}e&HXyYYs5!|BTGa?z=qiw-?e9a4cp_kDp@u!H7{ zL8F@B?e(DJ5|D=Kz^g>q9T`CBN}utCAn1ezP~8g&wk^;ZrVZjse2QGm4hrnBp*zq3 z5ZGU!(`DJAGfdziQt)w{n#`afQt%oY&=?_PDGcN+191gj(0xr%pM!@ALHnmb8$F== zLL3)>n@dPLwm3nHnqfP(px1GLM{?MC>LCj>(6?;C2KD%$Ted(eOhH?^Kx3QGQ4#1l z2zNl6NT5dT(Y|$biX
VxOS1%HP!H4$2VW!) znlpzE>VaBupm9bf$0m?<;HDsGYLQiwxdUx|i5+Ah8B}d^fYwdOfCvSp>HmXx`Ba6$ z$MbS1uq*Hi7=Rj3;IXMK!l3q#0%*lKs24mvKAcyK=>TL5V0uCzFAt~@qX8L=Bm=xJS@#2WZ^Pn~vcu}5?P?d$qb;qc`V@0 z2)BYA(k78cM$p+{jEZdB3edAu!Pm!t50Qs2lK_wCG)+I~!zB(K(UAf#90J`713J=} zA9_g!h%YjIV=A8nbORHpzYOXsfmZl9-aOVI;K%{*crFoyEaLzL#sbilNTBv8Xhah< zy#v`f@&q*40GjFmwaq~nT!OL($UIQT8Z^5KIY1ILP0|J4jy0_}_ed1!iICa+{Yr~v`#se#-A?nrMy_0JQ~z#usCHJBcN z<^@1K4p>D4@y`562#l!&;l=p1dU{YYc@#WD6m?A@{uE>zz485 zAHe3VV0#upmuxOqWZ|hn@g+Fg8o)ya#*hV992!h4ifrH;i9lsN$Q|HyDX_CdVCzyq zr{oEPMi4}%H)Qd$B!NmTi0{EAj|LNi<2jJ;x!|$c13hg6HXkbk6B8376X^C;(2_#%8bVOw zxCotn`N0qBL4lWjbWFF+=2fh}04|3gpzc`%9hV5|w1c{bOF*4^Ht<|M#6O_HE%4F@ zfsddqK;U&3kl29Dn!N#4%i!@H)U~0IO+Dc16jWh@mIc;>oc{@2nS=K@gBIR{CZ|Ek z7PLDE(%nYiFjfF6(?MrPftz}$E3!egJ!Ci@o2Q^x2!gu~pll%pS)l>zJ0N!*roVs1 z1)hgz!#KwnTu`16$Z|wF;Q;lRdvHMx$^npq95EOu06IehvZ5BW(Gflcj%7R?I#C9) zXUU{a@E{=g#!hUPLlEq#Nx(N|fbTJ=f5a^{J%NvpMOFd4Lm1S~5y?_w5qQrIUegYm zGGq~WKYd>=uPSJ~Q2?~l2-G@3t{=?Y!_sE6etux z%@O#HE<})k@-CJUBJfcK(5i?ZdDRPOw<36i=mg$9uAp`&XuA-ocYXje1$ICz%W=oM z4gp8)>6Z(6g|a~#&(M105Z6Lo`V^!VwR8PKK=CQKTlYXf@fpdT>!;Ap^)qapYf#r3 z)ELJX*RYN?D6fOkBJ|ouY#nRRK02sBK)X>u9cE_G70Azvc=a>EfeR^9LB#{S#|%DO z4IH>g^NZld3268Y(Q8LCd`4>{;$$LFCl_1^LsmbZ5Cyd=rS(BuB|(RzfO-eu>yM$W zJ$6vL4{~23hZz&JmvBN9+)IGAFNLwRFQ-q8<&^_9+#wAdP$35@p7F;(s>K^kQt8o~O&`_-Sr zvgHdBvL&o4pWf-h&0GH*nmu1YvZn&1%L3}l&R`1Q19pw2%e&|%9LQ3^pRND6tt z47#%(zCZzQngI82$x0I+L4^r4O@N~obfOa|Ux*+E;^U#+928eJwlxYkN};ETm#{SP zii9))S#tOinigK6rUfR@#jD`9fC6m!IWK57Z3a^SOTA+~Xw?{a)4l==_>NA{_FV9Z z0-&ZeXzptUlfXA{BLO@%2|gD8hA=m{ohS!6@&{C@9ua|_S`EHS9W+M4e1fFK*sNwXP_O|(ZdKD(}Jvh z0nP29_Gm!6lQ2e>K+7i3N0vZ$6(QYCi#Vk7gm9L?6Y#R@4$y*PR?vWCmk6lv1y2lu zLbje6H03Ac4$5_~;D#5`;HIboljB_m1$M{746s{lL7hO*Ngzz18fb%%BNONrTi7TU zH|RWNPjIFM%~^6dPS`n30CXgIw!jClQ^0*D&?-LAz$YW9c7b)8^clg2o`U2+eJv(O zO~{J#1!6FpL7gR_;Ov)0s=84r-7QPtdLeFXnnaOGm_IltIEKuK0);uD8ASQ zTDUYo4ZII%3psljxfOblk4>oMVV?dWhgW3!zED1Se)s?bxa(avJ&}zM)CgzH#@c@LMr}`$(Y0L4u$)F(UP#@lQ}43h9@ES_a@zhTpdvK)3P0rt4vcWq<=1 zJhBH~0Kx%XFAo~;0&Q!Aj5olK&H%MAz!~udbg??pn9HInNzk^3) z1%7}t%>w8wJZL-ueB=vcA?_K_tTzLwd9BNUyrs#U8PxNJE_(y@wLu*SkZZu!f{SGZ zMuDG@TSi}iA`!d;0CZe4_~1TBTN<_h`~WRZdcg;8vw|a#9l9X}x&;EVxe0QT9B2@e z71WgjH>RK?J)k%SZAm!*%I=`P9DH*Vr1=2K@Q7`BpyUCng_#@|gPa8F+QYhXCqz;E zk`PD1!_e`~uOAmr03EXE^U;v#5#==uOeZo{8S=@s*;0_%- z^jZ;EZw@?&10J*irzJ?I4&-D|$pB7E*WaL|C0I8Pl$KVAgIepLGYmkjb#S2xS=J0Y z3mn$h1Kl4AUELrK-M9eiY(R1rc=aT!V?AhVEF`^502MC`P+3s&2A4qs-%)yZphN~5 z*ny-aaN`l0mQb6I;Hd;i5(oJUVjQT?2{u##TyzQi;!>z*)n^2k?$E>p9svSPW`X)f zpqK-lu?rcDL0{7x3GRY}E*FC>J$Hm|%Z0AoMU=##v;@BQ3$&5|G_J&|$qXr(K}8qx zjv`RPM=zQ|9ykCVt^!p`ECQf~6#9$@#6V>*w8&<0P+-z!$N~4PK_w<=$^x`#ei=9? zf=f$~5U4#3_LMkennItk9(Q z1x${M>Kzx>gZ(27i74nOh(05@Dg`^?gn(kC0>bl$1 zREaV!( z3&brDL%@TB3QUey7#uGn!f=BS)GlznglyFUF-1;rVgQ{P0UDnKZ__&z$aGot#*}>nLYPWI3K#F--urC&qDtM3&NPybOtd1Z9UcdxFgC`&Z9v}#fR_@_fGm6kEgVMl?qJ~xD*8Jl%$Y$m8Q|>=u#HEKGFA*9K%@U`W=s!2 zvjL#`3Y2U>d&nSr)j$i^K~q?u()xt}B-k0O8NpRNt0@y`<2$I~58Af{Dl0+jh`|j# zeMazs>)-?jZm8U0aJqC!WBGc0KZh5 zn48ecK%IHm=34OW_SNJXRf=I$71Efd=ycW)UIEZpE+h=VEetmJQsFz$WxtLLRt(^U zu?$uWXF$1<$&3lyAc3Ct?6m6Ys;gY)%#h0dvr{;!+h65bt%%BtrI#d6M1ay!XRCCp zLFwq~Xd3I*|)H z@iRdLv`g}U*mQ{wUZwg8B3Yp8p+V7uz@XFvT1EkB4=S-a3S{#!F)=c5E3i3=Lsn%# zCX29)=z>xRXyo7wXniRcc;W)F0TMJh1PN8haIvEtXd)amJ1+2>3HRU<8G9SR0~p{2 zFsK>FN5Od)L`@hnGq(5QeTsAmd34HrJzdjJ&wphHDKBbg`oz{>#ZK`nF8;iEW5 zdBO8oprj3-+yS>Qzypyd5J#kgr$Imikw}gMt*Q`*+~oy2lO8mzC7$It8MI(q9y%OQ z59#%RC#MgHBL)(n835vB(B?zFEXOS%jc7x@;)<`qso;UQ;v2*`0%D^BbU|MSq`ZeM z=xczDEtya6a^dFTd<`8?cr(4Wi&q0ycsL$`bVJYG=@M{cfSxP|Zasq5R-*Sm=;z6Hnf198Q7BqT-1kk(sh3Va6}(t-}GA>C=q2wR5% z8YKeH)Pr(AxDg3zIkE`+2OomZ>Ig{%c#5wc&>SM08B>Q4Y|PULGbAWmz(DI8w z@dY0Eq<-n83`r&6liook0&L(D68_M+Dro7&1Ufu|zNOc_s~w;-H=&D|!KV*`)<8f~ zG%U?vE4`YpbqF|OlwR*)N#+9yNd{heF-`Aw;pXOi4^2EDrcdh!6<^@$?Ib&B84>tM z3D95zhXQCA33LH2WQG>J(gHLt2I~5N6hK-ote`HD5-0TRe()}8@DL#Ky#V0z`azo^ zphwk%QXgd4k42z?TOp8DmthI`uq?+FkRlOsrab6MA_W$KMs7&y2pX;f+pEE}0vrfy zV5KAa$-aft`zG*;)n5U%yk{_(GaUgjK$ljUF@di@2PaoHGf1P11H8WJfCO}-4Yc|t zvvg#GU75-5s8jCB5GL)+AR($D?xQR z=rl`5&>~xK`%-}ww0m5MO`v`&_`oX-rWNAQ)3k4hXE|~UR4EuJAg)LUNuEHf<3Yva z1EezyK-hR^cp!OPQ4GP#J z0^7LYYp+3-C&<0lndURXPe&F&MRFH?hPt|R`P+f8K`%`2wHv00$(-) z>QaDOF`z?Gz+E87F=vnj4lV>h%OXKh`#?YmcJ?^tVOWqOpFnHOCO|hqgYKIKjj7FG z%mNP?gU)0I8FK{M1X~~oIrInI2%7;;6LY}D4D^hv2Vx+3CdcidLB^$Q3M_aIc`3&1 z%9Mj{m!F=>#wQ8w&LqK(NCAz#g997lJMe(88d zf4~Dkzm$q|KPX=X|m_p8eG>pa9~7b|i!M-a|&y z!0FtPNdUBjLZ7jNUx^EJtRSeL3@V(!-Dq~l7RD?kUIkXidT^bm!PLMHxtIzx>>r;dkKvVi47i%yr5eD5JW)Ioo2FhOo29SFu zAlU*mlEcD{+FNev7b@Fvt{0*#X(; zAke}MlG-4oXb;lAMF?^nC95vO3UScEe&AY?MS)Gg0J7B|Y^Q>-fPn%p$S8qUZqPK? z3UP3VfF$c7ia}WlG%9`vR3mYlG2H+S3Ne{6fvPIU(?8n!uNf0)&;dL%35%Hn5?PKc(3p83ti;6&n#=@64Y5+hj8l-^;ITan$Uj{c1lnN&ayBSTSRF$V65w+*5nLv2MA#t$ z$N)0hGeal~X9#mEa8183i&vul4Csg*xE20Bnp4GOlH>$)o{UKMEZK0~Lf!;Awd9fuSHH zL9=m?0a)+^2h_+H{LpI)A-y`#T0mCtIXU3F0pSB);B#Bp6zoCSr2~9Tk_o6Y3cB7^ zfdx9)g?^mpXGWfSW>D$R#DL{|Zpgyi9jJ!`fKFM(vab}>{)QfJ%XQF^xde0-`sNwn zQx_H46<8eqY=Uw)6+p+ob3wO)&tR$ttq)b=)nH=5YAHK1E1!t0#XHxqpt=yY zKNfW6A#_V=JtJsUD5wg9-pl}U@m!ErW`tIx6U&jbf}0HRo7=@8cR_&KVx+`+J^pA1 zMJqczTEVdjsuSjeejL@PK_K}y77oosRN5Fu#j6jE_x86(6S ztKc!h1%it8;C1LYq7}TOK!Fu>Y7A(26|`aA@k)OusG$VyWaElfY_a+Wq!Zp7fW{sN zq;>T`Z2J89yh@4$7mkx!XCAqr{)Gdf;aI!yq6aO@1G<&NhWK+C5<_c2LiDR4mK+rW)H z@L9T`l=%RDswQX#1AGi4c&7xY!w1y?nI1j?I@%UA=LBn#gB$;#xi7S)_)i7}HUZ?O zxVl2ea@f2AY+9iZvn^hVJPG8?Z^Z!G=nopK2i;Zxy;ufhKe)Vw&j+EcngJcn1DXv2 z(bMA>@yet^&Uv{Y3O@xG6q=yB5kbvokn!L$2~r|}&p$#;2!O%>+^R-hG>&a5Ku`*_ zh!DKP2a!5i9U-e!psQ>_la(OL4@f{x+=cCu18v2X03U-0n$QA`B7jFrL1&|YSCB$m zlAvQUVPk2aTkaPKL$f*Lf~_j(SQ^TiFQA4Ytj7;3KGDl=@VXpuE+=s23v?9{sIA<< z59rZ!7`1Y(Gd1JVS$fOC1u5U%)xLK|=8> zV%Q9EybE;DA91`3c!c5b@>Gr+6g3~2@_@wK>DHYURg>dvr#QGrODCQ38*y=Kz5dqyQ#th)(>LoPrHP|`Vg?N|wT1rVfAP+|i$&_VNukhL#t$fX3R zPu zK_Igi2xcj;Ko$amraDCgK7waQ!L0=*(B*-kQVbNF&_k<1a|URk*#}+z#css_KeifF z{ekXn1a;Ivn-f7DH_$W{?96q@+z4n&6EqY98HoYe0xC6_9Dgtr1jV-1SX!>-V`OM>#TfIIjcIZ*2lT#rD?<4$f+5d+^XiC*sJq4lca=FVZjk4jBUzSf^9po)gFoV1W3U*M;BD@8fng{jomQ3Hbf>)s)6#kG!x{{!J zZ2~ymPGJ=Izz98ap z3r`WZqrGB3qochd3r`O6RJ9g(L%}UL|nX6XH>jb3x-{@M`A_ z==5Yz=?cEc3w-SbD`+hUbU+Xk-)xSvzk$!MzX)EB1RFvFZ6O6ONd|cwG-?G|&mTJ^9fvPBI+=6losB;NQ*utLv4cVrak1{ah) ztV%54(gHpr2u=|o)f!A4s;r7EJXs*a8sIt^pgLRNI;US;#VcIj08*O?Qu~7iVM`C# zmOm_5Z2`rd2Gb7~MHZe6kj4*ijSK=1jbE^9%mt4HeE?}p2Wfo4l2s25cOFMZfnKnc zZ?NkGg^UK%3y{tO`l8i$}_9k!<#nv07-jcRZ<2ge%NSb_Qp z;D|#xV+;9+El>^wXA4Ar5SR#d2E43;8PBaygf!-z3<`C4`Ucm);8b%09uWotleiVs zSoImtV2=rqat)>vpqNNRj){8Im;j&Wk67vgFS6k+Yz?LdV&LPz!DHnh&wx7opplQs zV86G3TG-Gsr9r|Bn)|_5Zi4F)XmEmWlK^!xF?PZyqIeaYl~chL7NoHO8ct>^$x;H% zuqm=Bs5-9L0a*yh#Gs(+$dTo^VaGH9@a!9dg1)0fmJ=;Spih9gPNB5jNmIQq3QC3C}=VH194CwI)cMNeEPYKyz=-j>I4-$$OmbG_dSC` z5?p5rOaV1M!A)#4xg#fewm;h}yf=e&3 zdM5^`TJ$1o0wcG=1mp%q97>?EP50l(s{{=%e}1%PD5O6Ca~V>2fmV3J!VA3k9<%@u z)OET6zGxC0Za0L%;l|i8{bMYzL_K)-C+N5_&}1yA%`Oft8$qcUQY^zm31kwyKLc5? z#^N|*^E3g0Q{VuDHNim_r+|_st1iO{=msEAn6rR`L}3OKv_%fORlOcGr3|`bQgH?- z9zX-M%-|tkP=tW5-&DZcZDN6Un}m_OP2dr0a5R7$tdRBBn#}02Fp;qylDWVs5bQj7 zjG?#ECo*y?Ohk^cSkxE;wP+#bAWN1a=-?v`M+QYcWd_F&hZwSz*i;x4*uZOMz_W4@ z3ZUie=FADxnYM!CS#>L~G2`#)kz099IIbOR5pZM?SU7#^R$etZelw;Sj0$WDpi6QY z71+QlBiI~4OU5__{&7vexs|t3bTT`*-Z{Ytn$u%*yupy=cxTu2{B688jAy2=*~V+n z_;&i6ZM^ErqGn7FK!!YE1YKPZ)%F5O+s6ab&9;NJ6>kS?+q|7OLy%X2O@nC*lOhL* z-oT_NFkNQ{uc`tsXlO%`4x5wFo{I z&`D2xpr8`x25H;DtSAAR;c-;(XK?_X2+Zerp26_~gMy$mXsipu76Z+Cl@v-VA|$yL z1r^v7B+Qu3Fe|Vr@J(0S$s3@AY&xp~uYy1p$ch;(pb11?N0ux_-sv-T@|G|POy}Lj z8_EJ2#h;$Gi#M2Y#`Imgcomr@?3{jg7q1cHjOhZqd6SfQ71(qcma;+oH-ilv!aLYd z{5NGcuNkBL^wYa}Z8+vPH48Wj3M`z?zlT?anNMK(bhACYYRo-N)2GMp;T1#nyC~T2 zE7(CsJMLi5Qs7izbKJlPIwfy8hXR{E;{kRh0dP ztOzs}OyER#ZO8QcdwFdc&rFxy2TlaB`*?LZj(|2}atSP)K4~AXfy5p*1vbY6oKUxO zW`P207pK7T>9_Xr2J$XgGED&FksX`@|9Gcc?B~^GTs}Q*Kkr;d{^=|S5SFgsMp*i1 zdg%eMrJE0cE&Y6eSC?tQlIcnZd2Jb&Pfs|=JC%`t`nQ9;$@Pt#pqQP)s|Ze*6L=wO zbwOe?c$HWj|1cIh{$T{I$Ypa}z?-GO3CY=v3M`IHWsdfRYui925Su>Z3SRJxGgy@( z7mqMEB!D;YDzG{3;LQTX3P`&^J2%KP2VknBcJO8?a`6apE3hkYfno^35(F&KjT`VY`G-)xQ@f-ny&uxT=X5P*7h zf}j!?*fy|(LAGfybqFeQDX=-t5QKD`*c^X=4gDjKC9s_jG zNCtrnxF7^Raf{9IfeFHqn<5He?C;1K}DE=rWY5JIHt4=b8LhqFVX z{EZNDl!G;X5CTQH+6y6Ql>b0+hXCjdXGRUC7?5K=2!WFUhX7=i&D-gpkMgS5gHBUs zLrDl7!k~g0bQugZ<%8VXAPh|i3xtsq0>}*-Ofx{2q_csJe}orBVz4L`Mot0{%xpp(A9m3GK1ks?l1ks=wUSUXCbpS-OIqnb!#U)4%wD$@em%^|Z=n+AV z0gz4Ls1$)lrHC2R3=o^!jA;sp21TWa0$aV~4H0Nuioi^|gJBXV_-=@pF+C6gRZ`$< z+1MN(KrV}62L;ay&W=tzYArY|vM01!i%>mJDj&nq_z#SVl#}}em zk>J>0APNh|H|XwX1I5S!F-VNO5H(}^0WyWljOh!A<~L*d0J3d|7{sw3;0EoPc9K`L zeuo(N0%tbRL>=g2GtlXkpspo20#1m5@)92?0vN0qu7K=dGGn>`YE&>-G28*sD0vLj z*#{{G6{|~DH-WA*f<(*{F-XKf;stCVXqzRY!gQ8>yrQZP#GsM$K@2%^_>d###q@J0 zdF2@2PJernSA*vVXw;g`@ds#Je!Ai*URB0_)7?+;D%bzwbYujrECjdfc+8m_#1+^; zXB)zD<8M&X#Dc6U{hcMU(dw`UXM`#-Z2eMiA%(l1lF1}9}oxSr3K=Og7u(s zR2&l32gI`kUNeI%xFW733a&ClK~(_A0+9L(;!2|6wWr_$e*$QYErS)q8<2GZW=t3xbWV2i4YW8cZuB%$PQSOxz&>3Tn`T zIX3WG4K~LO641>_Z2F9QP<4ZwU^^s0?MU#xb56*NfdJ^-05*Nb6A~EVBx}XcAPGtO z2PDjx-qeHav^HaU0iqeK7#>I{uqg)9L^NWx5igK9dc`O61x{=Sez zG=CL192v90SI;tVgLf@4m@)m4RA2)iboqY2gE`X|kX2k}OdmkjG)O`G@If+5;0-ti zTBMNjRy_|mb9F#eX)rZNDRBtA;RGdu9x3EX1EfrYsY6PUTY--SlxphDnC3`9>Wvv7 znq{py^8_gcHpc}Jvm7T#!IIMwDfB`P6lx2kl(@|5nO1=G$XhWy0MU$A412(anKA7E z(R^l1TR=3I8Pf)k{12$v8>C=i_6MukKcvi<8l)B296O{z$qBULjm>d|6o}m*jYv*C z($iJX@e0UlFm*_S%iIalpmrDd{0SClBAPM1{~Ty`RvMCOCP41zG(-jb# z-;C*k45%q73$gfu49o{}WT!7W&nuEVL)MIGfh?&1Dhp{zEs%w@q(HUr3RzI$&kHKg zQ40ADvf#oE+@amSwOPQCO8`>HZ;?fI9$4cJSx`fYYlAGbd_OSV<^r!2(+*j1MLRw3 z0=Vs5c7a#D{)8+jHai%zlsFVP1^$9t&nINTv3NlibX6s%z*lgYcR?0h``&>13KT$m z&;WWMi=4zkF3@1QA*%=~lQ_(n-hcv%%Z%v-$j9m!<~L(H z0J5z^9%9e|Iha8`@(_dSA&q5_NubQsA#cVsK^_!&@{pLBAdiTd8SKr2Y!fb=j}F+7onR2mQDL4Cj@@}O=ibbJEZ1^NKi328*lZ*G+bo#hXS$1n28 z@d&o&hdj6u#U&4o#|DMz`8RkanSRJaBC_uiI3lNA;?=6}P|#rF0H@&z3LwX^X)tv# zLL7KO0UXC*a)AP9paNR=-cUfdQiJJ=f)dwS#hFY_44`2vHVvi=3XsZ}*+F3jlfq1< zAQlG&Hpd5Omegx7y-`pUWpV%o;|~R}niYzmR0RsJ8w}tA_=^IhVs=y&_yF$Itx;4G z16Awn3Syvo3{?9&idiu{P*h-ZR5fEd0HQ&4vLbYhXah*#1K3zlgJOdsEIz-~D=LZc zf&vXRu*74|{6i7ExeGiJ19mE}0(kWaq)pJE1PYNKieOhMfSjxXX%loPA%_U4XaGg; z93=&CG*3~2MDGM81$dia1xSodqn@cnNr_{vIr9P~kZ}!4ilFA-1|>+sTc89h#J4CZ zfv&7&SC9aC7-RuR`35B=2{Wc0AZtLKKoAXDz6mlKG+qy)Ma`IwfM^LbrUM}P4rPeZ z2b92JP!H+=DkHZyU}kqHn=wsL2Bm3bkf$ery}koXPEbY^;&U)GYcS1F2K!MjA;Xi{{dtqXiPyF z?0PnR#xLk5vMGp}G5rB)V$)#yplrtU1Ef*jieZBaqyXsw(M)Dc9V!ZJ3iY5VWe|hS z@rQDj8B+^L@`MT`j2cv6wwzHxaW|rEa6$#CZ7_q$iLpj;1``hxHv=~){nnc?-2oX6 zx=e62st_;y0U5?+#`FV3 z^O-Sy0nwmkJ|J})R3Wba0C)WsRb4zd;>b<)PHL z9qM4Sz%}lZ-OZqJXh=!jqmDeZ19E`l1a)wY+o29Esb@^jyTL0}KS5oIMPMVd5-&8A zKxfB;@;wiX0_)4-m~^#`Fb5gPKh0koNZm4M+m*0nsdG zOdT4KbkP8!*&IKpgR&S%?t})!6%87&=sBYS58ir479Mc$fZ9u%kQhIqVa9X=WD2ht z(-ja6YA=CInxF|W=>puODVlJTpay{we3NUq(=;1x{Q z-hjCF0?f4!aJ%*fIj%*xQvfuO1dWUa#w^h7qip(&Z@~76DL{u2K_Li=z89KiOdm9- zAGyt|SpPv2w3LBU;2;~go@vkmr2rOz->k@2aS_dJRyj5feDZt^l6+fwU?%fL!Ca z0z_&sX@ESd18G(4&{6_zuZDrNAN}s{m^8gfp`6fLflQCdL_%sSH*OM?e;V z=IgZ(O^h2LG0?1&mXg?7bLIEDS~<#55Qr@=6FF1UjJxKfAEr5!VhFJNZ|u5 zB{?&u7a;RNHIFu=jQs;LQ_PI%2Z)w2WBLN3<;<8qfYj~K21Nv$;|DERrrtAs!ChWO zAyh`;NOeeI#?z;do3Q{A8L2HB?+Dh0$ z0~Q#vRtyt#ATe-3+l=WC$U18?rXL_0bl!}%0-J)28Pfw0e}N9fp%1iS4qc)HG91*i zMh$C-=`)y?gIa_dObc|tu3G`p51tWWs&`~ikmY5Na%4d4ffN&X3p&M`mq7~DoL{1) zBPVl+E$QAqGe>v_LCMU?;eOdE+FaB*)7E3j7yN+)!U{&;dtF zJuF&Q=$J9>0r^wRjA;jmmN8@6q5~;?KY(b^$T5fp%>sbr1O0mLi7&WIj-V4>Tachuq|u zqpQHCAYjHc14M%dCUil?F4WEmy08?tM0fha@4OPzeV+1ixG&HJHFQ>hRDy;hKs0DL z0&IjC(+&^~n(qM79A-=#K=L1;CT`G$`Qi(biJBjD&6s|G)HUdV65R?-FsTb7f9OII z9cZAXMGx!>4^XZLHEgEnAvbJ#^dNre&;vzFgC3~4;5b7M5_}zcu!hYHJy^qrce=wP zUa@-c>#A7-jH4lqPe7zS4~8A0F|ldv)l8!VY4c4?#HpdMHpll41>o9~k z?tlR-WP1$3iP;upC1{A_fFUS2HJCaK&6s9@*r2H&5Dl98F$9f1LyeeV2s7de#0YS% ztiNDr#&iRu?tvksw7Ox4Xf!=A1g$YcO2s@}piU2HR2pOjlNr+skX8mOhA$u*rAhL` z5D|k0nwVya3CN>y>8cpS~Fe$Ij<=H3L_;!Go}qjkb&|Y(}SP$D%I~W0dW+cc&3y|?_8caW+S|F{Z9VS_zbtR53K;F<`vQS{t z;AFCaG?)%duX({M9}OD&FaafQuu{;F;0co~fnDHA_J)a)94{v~qLuUlbWuI81`~^t z+*)(y2PO)jO`YH(^MeT_dp$6L)mUFlrhj|Et5E;JL`lhv=?BOT(7=Kz#M?cl3Tz5; zW=tI*n%9h}1w<>EF*ShXPnbdsYcPdnv@@o-Y5`E+2NZHAOwE`sfHdDQ1x48nkdGaI zm}Gf@{AP-%!tUTTNrUNzDcC;`Ku&y0iJIo-y_yG6C9y2_?ka4VcQ{rW*cf7H5ngA%jFj_I30GTHP zng#<|aKJ2!TTusGlI}251P#85nK4}e$=omlo&%qwli)BzG^bNpkLWyUlGBz40a5;_yi zVR3xN94&O}W#N5!P`e5;!F0nM)U4tIt*_z(O)zC4CYZphbPgB^{9;uCUs2AW$l}PN z$i>438YKtyY94?reqjzu9}mp4K!cYo&>a22963jWie?R_H}&R_y!^r(JaBP=0d(ie zb{GWit?a1xtd_nw!-?Sh4pj2Y7nkm>SP3==FNIsFaDL_ssA z7a&^JjOhu826Zk#>Q-1nOnhJgO9gADAAHNJsIkJ*jA;YNgdLV3e{KNz)A0tF++c~w z&3izqW$QJVc36U4cmQM`B-;u?6Y~j6yx{?wX0?Lk=mVB!Om9GTf@WDkG-#I9Qh`lD z){N-_h(E&$;@S(AFxSqp0vV3p6@rJh2GbvN$9gx^OuYhZoheg~g*o#AD^Qhcl?7g4 zsKGSD3havwR_WoWek zl@%IH9Twm!?EuIVkc=+_3nD8-EY=^eGGn>}a-yIa(-jb{Va9X;M9Z2nodG!)6t~t2 zY>upEOfNv<0%lAPK;oc-fqA%dV{+_3>P6ab!=V$4$Df(B5B6SDnab)Z#wP6}*}XBfsGzG*4 zHRwT-7hr}>I0W^Joj&6g48uU4x!`2RbOWU40Z0$XGdDnbV4iv41o8~)bcLV1qDCyB zmVLb$(;jO`e|QIo2KDVXfU?a2Xa?9|4J$;CSc5tuvRQ42P} zjOhl5<}zct0-`z0m@a_SO|XH+r8O)nr`Uka(_zzKnqXtbGy`PH0vk{<`oa)Ip0Eay zGi(sW=n}9F85dBWev1uqpMH%E#1AWMK%ua}2AoQFK&@P11MAc8u))@+2M5FfkYY%Y z4C~XMutBTo;T1ht2dGH4g+$o_8#AUiASZzO^e;dj0hP%h0g(4W{28`T@7ut%r{uypYo?7&_84m(H}UunAk7hYEN4m*(5 z(0={|J4E@1($AkV{lGU~ImQ{&?|r?5_GU~Cpdz#bR8WJ8fd+?c_@cs&>3lzVbuz&7 z?2O!?lbIO6$?pU&C`xC)ZxL|h5m?9u8c93Di(IBNf?B9-8cZj6&6qBLR)gOF>p&al zxxoug1P^$jyRW7P{^Av_cYFX|Gd>w~iYA-m3tmJjShTMRbZ9K3NB)Kvc|91&LdOpv zOB6u8h8GN3jtmN13S5r$ZqlHP{(={pN`9cblM^&_3t1n`tHdR+v7T8;05S6h-ar68 zmXAx|Ehjb^HU-EOjDP}{0C)xt)J1RL168bi(9{eXF6-b!_+wQQc!LVWA3c0ZNNdCy zx!phmPkgZU5{5@Q_@Ew{!G{*$2#-wQQ{tNb`xmcdJ=oFQ2=9Q`p@YIo0peWnSl0rO zV^@G23u?RZfp-5h7ebTm26S849Cv{FEsh}dY>o>+x&&@RR!Cte)u?CM!3VC^4}jEx zIXe@4MgY*Z61sY5r_&^##hh~5m_kyxC8#HV>_|a1! z=vY2b!3$cgzlI;QxEfMQusAkwfHsn>;D>CGSOTJX&6pN|Xg)KhIs6K2(2YARjtwjz zr87{3m_R~PK&rXSm?nUvK;z^f8Z=H0qCsooK=PpRArQ^xIDtPa#f)hSND_4A2)Kyn zhZh0-$Sn*|iU5rmZV-T!B|rGhm^wgg0W+o+5Y2DK)BqZpIv@Zki5di81|1QYp174) zr2YWtdS#H33*e2-Y@k!n;pNB$0dpqsg2@ku7_w9tK$|#^VBfL}+5x7)bO5wvH=0|4 z)o}-WvoE8-RPbioJy5KyD+yK|f0M)q#t4GGa#E3xdb4m48RT^(qK9v4n8;!w67a< zmMBCTcG-Xi(*touPRDvsKi{z)w1Wq7{V<~f=q6cExP#6=s0W=AsljwY6tqW{6|{H? z<&+L_==M@}GbYgPMn?%N2JnFk;L|#o928hU+06m816?Ew)WBj;0B!SSJ|F^`++uZ< zgr3I$QZ54W3FxpAP>UG67g~W^fh!1fq$}ufKk%_Tpq1#5ovYxj<&KO3)4|)r9|*vj zU}(oAM1Ty1Zwv=-T!}&6-aS443K#cuB`aRhdYJD)JEsvr4Z2_ke4fn-(Cz^6R#pYj z$tlpV1RX*HIuS<<8V;aULZIU_#G&B;+FAqRgTe*06&l0`ZDoe+mxOIA1>Is7!t_AE zvEH#Ba-KbsCn279WPo}VzOPaVdNCVp|8Y2qSHXMWqmjHSDJclKGI)WY0yyBo@d&!m zOle;}@;E)=Ss0oHc0n~W{AJ+lej4GA|Id%yYgc2b30Z=N01|%ZQ zfCEwhw7mc{f6KxX1`f&T4vc&%j6Bmp=P^K%N#I=`;psZee0=o~-=T!n6X=FuaJJ$A zrFckM*20v+-VaX2g}VYknNGFS)n92Rhf!kUmkTkUbBB=GU7;G_h;7=et0 z6bkk}>>3zwLIM|d3t)wvV~UQz3~+h31bcZ0Qm(ft5$6eQT;5a9DUK%>W?f&jER8?>dqK>|EK20ArK0!P^u#I3+8 z;D}myg@JQ4Bve3^2Lm`;g7%MtPpf5t7hIr&55OK}LoQ;W839tlg0_7_&O?|9E=1vH zPM{w$5D1DxWOp#L@PvXdM^uE?V!8~hpdnyg1{Ot5a9ss}7>fbKE? zE%OpmV0ARJVpss>&j9hwtr&Jd`JnUaLECM?$JJU|G32NCkiBJa zFHZrTN(Cy64}i`P03Sa9+QrDK!DK%{M2SUU7B}cX0}uv9>Jc#|HWpBC<^_X0FAJz) ze}d5!d|dGi;VdO41<>IO7n#B5JkOrqz|JRK4?CqEbfVr4@Uh7ggh2CEps_ht(1A=H zLgvh%QvkqcO+YT~VOC%Solei{_(3d7mw^Fn0q96LaAF3hUNfc+P=(MT0=^Q)kr8xk zkS6rZvH$=7|K|?_9bT`%s96s>qV)vm_DN8a0CF!IXurM`$T#4NNx_adA*KjB$ngRw z$AHHEvD!9!ZwjSeMeY{5p*jxm~%sT`a=#r zF{TG%(|>aCDF{Q4-DhG@V007!70w^Trt5I>DJUKQ9guuLI7qAs3%S{T)!91*%vP7bt?#1L)3rP$HTklqCSU zTLFB1B1qu}aZo`GVu8-xWz}F>A?~>EO_PA5F7!4T@Dbo(KYS2Z1Wmj!XDPCP4xkcd za!}xO6ev{UcLnFQ1L9fJjkx&4>%q4zf=s*tG25|c)ieQ6_=1mu-T-&Q9Z;nKQU|)a z2jWA}Vc8(ZiaTC-&?Mlf0d+$MzY-fSlLCt)V-_feK*_CvAKWDdpRo_h%k1ED)j>(3 zz5(yicc5cNL=?E(AXmeJ3J+FC(6PJh@B`i^fbIhYne+7X4qgfFpw`U=1(0L>vt&S`R-P>P- zBLSitbPGE#({fOoTb~isAcU4E3LM~Wv#SE6`>ep>$Xo&z@hk!zJ_9*-614bIK}5q3 zd`C67e0%^dM0S8D%ix8`1~GHy8^WMn1}u<_3~BR%awN8kfTq8H#U-l42Abysx2%z~ z`U!AhA&}*$El>rj{=mh`%;^W@`9$PE#R9mjf)pm8qXiLHM~O{m6y}wjUJwnMl10vm zLhihb3cQZYpumR&J19fW1s&}IzHky$yMa%77RwTt%%;Hasvr!CQhT=|P|2#xa799i z1tI{JPylhjauCBn8Dk#EFwmJIn#`c0ECUob3}#GN%QCnJf1PR)aMVVWW#F3qg191^ z0t>8c0Yxb&kfFy;B1#|dv6Bx#r3bHeJ-BuNdC-wDn~#~1i2-y{isJ`x8fJAoAeQCG zDzFoL0Luq(G6n^!F2g5bP&BiGH+8c*VyP8C2W5iJ&bV< zH~6w51?Gd>Rp9fn!3TT2;0I-QP>Vzf)VcswXzb9G1Wrf_?4bGubn6gE8sflf@GQy* zs>VQ-zSwjdemzJFy%coX z%W}o5v0>-L5)Sk(LZRX(SjCa ziGk7$2rICHFO6h@+*{`;>;|rzR!q-#5R<3}^}Il>n>(P>yE(weJu_-DL9;n1>p*(F zD8&UNl375J3_5|D1N8)IcC>TF!JTAWXVa#bGlLH-0v!QF^x?GNo4#-!P7ArX!BGTs z8x**PfE*>B4=P>2C;CDPFHk89FI~XL(SnZ>C+j%c*4sljPHA4Ss+xn_*G-3BGIw)HHm^3_DA81E}JG#y1bv_y(WJ3R-J~ ze7_s*B4ivX+NO9|ULJaLNV(iiX3l{yqNsj*dU(o3PjV1a)Ln15+;N94u z(*;2dIu=OR@Ps&ccR!>v3M#EOKyP>k--QM_L=JkI*&jx3g+IupRS+m7IiP2fPydy} z%LTrnz8>BS19!{8o$Cqk&NYL;B5>_71$*Zjq*{Y%0*fLGk1wb&>_F1E7_6}eyGBq) zRD-F5MUkbR#|Nad0j`q~YGn&{ouHnh22%sbN^g+HAIy*{4|Iqp#L7R+;K5~9w08o{l1?o+p18=bN?H~<8&8QuzSxz?ePu5u$fs z0(>eSDCK}EAP@~2TmYR)tEK|AdhmVepq0j;u9S%t1GrTN;){c_Ib<|n z06t6&8yto7#6Y8R0!l34;|)PUzd~Gz(~Jq+Ap-T1z<2X0aKMB9hBzo@PVhl%RZ#zg zgGXQ)H{=`;R>u?5110(78KtHdO7e-32M55lLKfjhyz;c zi@DWNIBL{p33OH0889*0}fEU)XD{zB{>%j#rsA=g~4@qzfgh4y%*#(-} zL1O|hKs{kls}Rz|gD3$R$|0~EoZUM>!30tOYW>3o3Ly$^gWF!GK4$i?Rz_*`)@-pZ`6h;9^UjBe`jJoe~#T0Oeff}#ii$ggf z7l(pJ!602cP^%qO8gVP6fTw)^|7R|A<@E%`AZnKPMQ#=IDp)aqb3M5IGM!(RPbmSM z>p^@_u2-^R0N*MMInNSu!z5%136gX58Mg>2fyeeWm^KK3`@IXm<8!c~?o!0mBXP*c}Y71Un_WphwdSqZ7f3~IrHPHP3Vg9MST$LWxm zp1{T{3A${E71R!f^xUDhl!LP=R(n29hTNb69)dd{0SX)qrV|o~ZY$y255oTl4x@rq3MlIP>ffdv~VbFB~r4X<559cMTpX3h-u4ru5I((zzZ-~jbASalgL zh=2+-^zea|osI@5;WGm?q9BzCx?%$wHlU0S8Y==1O@d1q@bwc7;6rp-LGys%NkULB z0XbaY<{y0BD&S}U?am*N0Hq3WxPa$P#HN1?;+571m2RMcNDc*d&=q6cuzPn|KrB`b zCeYbbps`1AR(c>dJwBXQjOl>L^oPZ~3c`p>CpaME0~bW5PmJZ21Km;xDh!~d1?Z-B zfmz_PatkPxGC~@58^k~h)xbqP2c+zFgyd;Z76uhr>@ZEBVhW}ST%Un&hX%D2KtnU2 zsdi|S!jT_bqPsX=5Cz8-W)cT=r$C*@FQ8(Q!;I+z?C@AnSp;dTuYj~QrY9)!iPb|c zwtRtV&;w9j0rkjP%$Qn0RTY~VQv*M!`2a1rz;%@X)F4L{#Yv0`9F8iWbtWp{aT-uD z^nyQ2U?sN_2V~M18dPGS_2H{v7jE)0fkG1EKgiKt%;0Eo><}PAFSISeA^_J4x%L)R zXo3b!Si#1u2FJ@1&@c=%h!+Th3q5c<%3A@X8dTcuAQlBC#}5n&9H5#CaXSw* zE)Ph6><5*99pG_K@LUuqL>j;nkVpeHnm`R=F-Jy$wcL2G)pUnd6WsTi6&N5VBX~~_ z@a7VOh6@+0#y=o3ed0kLnH0#So!}5?kWk_PHT$5piexFUpnDm706Hl9fJ!3{h<4y4Hc)!QiGK=qzbOs6!@xUBRZpWc%Lv=K&gE#~T1hX@yV<71ZlnOLJ6G9r( z%jR(j!KOKN8Q2t9^ch(MKCpu-Wqn52!Req72AvNFo+$^V9B2xMHUx2=n(l<@07!xY z%_xFyuH7dH%K@#DY8MDu`d|1 z6c#WE%m+2vSsgzxWEnFu*fN6GAFygNXP7fLNPt_Fjz1s@=Rp)UFlK2oGng~O6jqos zH`GfwvM7SC!UT=%bVxw&Q3W@K%$Y%zyrYa2!v|2AsbDaKy$#Xrc4V2!R3ok%J)S%X?BdAJ)-dq4L>Oi^jgs2ibcs>ALmVpXP z0Z=1hHmD2F0$RcXS**b>u!+SCau3D=!7SE#1r~0BjTmJj2Y7N*(((1qW&uZL&@OIP zeMa~q2b2N`Txx>Ldj%GOC7hrHi8CA8V1x!2j{|JLY5K6c`#z3E&h2%9bn&ObRRl_c#^U6*vU`uqw=$ZllX5D-G%-I-X|C=3`=F z1TAs|MGLrrdO~!1i7uaRy#uKD;{YA$174I=$P4QBfQA?u71$j)U`s~OZzTK22%3in zMGD%5m!M@aY|tfzoZvvlHsJs|G2}9{0^32ydeoauIgxKR3zuA-xslx`k*py3w6LPC4 zALx!uP%jK|6DD+Y8eBesdL7^eM4)U49yd^c4^G#EdKIAc7NC1k;hi{eIS0}Ps%Ic8 z%9OaxAWr2*aw_UQr_7+Zhqwsbd=-G+bIJ_rM1YDJB=^IvIfdRT&#VCV47yegrVeO7 zu|YzS4Lq`;f%SGy@F=%BsNXNNKb}f(kiS)dZ^0gQr@7 zRiPdnsh|o=0n!s^1obpQIhZy#9)j`&Xfloy8jJB8gtHu1f^H{OgH16op}P|rjZ9dg z@e)WU{2D|iP(g?iTTIgritq^6gD2}jtwd-c0lFn#K~jN5U;zuLo&XONfk$(n2!Ph_ zf-e(;lu2mW8Lb2I-VR=;T(Y zhx!lF*M=r1B(30qlpDfXj+a1M;mHZyl|o5QjNG8b(a@}mxV9Fy<^k8>5f`Wl3(8Xm zM5YUx@M)Pyf)-i}7=YS@(B%@iw4& zu!2>A8#HhZUY4R6p~wd+8Ddx+8Mqa}mj!`GN_kzu+kgvQi$G~vm*IuTbVoBjX~qlF z1snM!v`>gBfEK?(DkV^R2hsvzfh+(472@!<0n-xNiJ}kADo#$!&Ts!FYu5HqMp1UuEaH6ekGUqbRBa(!TJs0o-nBUenJ3r z02zlPW0n#N$oEL0pv0lT<2Yx^Gy%sOpcPpP0-L}CI6K6YxOiC=SQNl#MS-Vkz=Q7Q z%o9W$7fhKZpva;Cy5{&2gByIk3dj~#N6`9Dc7Xxdk4wDM1ds<7IVR1knf{ zOc8j&roau-%!*jBR<+#GzR-%{hoAzh;}5|sP`ZO^VRKX{1dS_#hwh=HDIoh0(Ft>Z z2dG)Xn5D=8UL<@)REf)RKcgFq1Ju=^1kMGza>;RrICxACd_F8_5(VtZ6QbtK8{lJj z0$B=6K-&r$*g=czl#o3G8gc{m6d=A}RKWB_eFG?EHV9>bTFs1B42+;e}ci@xxOiX!ROsC6fZ9BVtz`t3KlqNJM}uo&%uk13=3#p%W4ctd5|SM<7E$Cpm*- zmRo@b)a4b;v1d0;S(GHFcjG)D4uwEZ1je%UksKKNI_ffqg)B*4q0ac@K z1eCa3c^P3=f+L5Q8Prf_bp$OoWCdk0&}u^~h94k1e}L@--Epq4m=Wfh8z7k*ppfMT z-S-2k9GU7N?t-p6ZQ##>Tucd>rvf=l36xeCvy}M3;RNC^m4FH(NDf#vO~7&O(s#M{weo0=0?2N$G$%Xy9lwIRD%cSE>hPAaK0h5I195 zAr4B-;O&_#pp*>K2AVyE^u$0@pCC8F0}tkwiy(J`QZs1rr9R^sK1g!rWt36?r(#f% zb%GCMJhvkgw-v)0aVBsQQz&Gqho&(BxS@*tAh&`x&N;&FY635syThl%#S2{?af8o{ z=>#Z^fo3fsjswlZf((JDF@A6wJ0YCq*bE9WO=yU%5C$8=p#TZ7dQfGy05rgB&isPk z@!ZmB0`L^ZCy-rH!Qr?8Oqqapupl(;89>v0EvK>A+Rfe z_Bb*YDzG^+6*@AODX=+m6go1ODeyWnxP#KHaFzm}g0KQ;8?Hd1BTJdUR`8JV571g` zP=i?!+znRz54nC2w4zpVMmnfeVgTQ*0V=i*lQ4{Aw4>N;0mE0EvdGq~VWV8NqCACT7C zYy%e+U$C#W0jbtt`oOHn!eaq4>jhjRg8)S18zha>AKUQ>vb_MQHb)*el0umn5rFPG z0Zo*MOy8Kw2U`6ITKfaq=?GsT0vqN7FL?lUW# zGC(z6!J?6$5t6txm@a^H8WGyu1v+mEygfz%GQg zW{ePvlVR)KKwXGo}+_pf(P81V#aM zg4*!}c!UpJ?4p*s2S9TJpvz4`y(`dS5CLeV0U8YlH{W2>%2090sua+u|4#U{F=$jA zRQZCtQK0G$+{qEx1x}Dl1eL(cJ@}Em>5_cS4~gw=3;}*KWQ+5Hl0AKAY9w**r5d;D1Ja| zoj}10i65~4LDvpDG6?kGAB{FZaTyCM=r|w~L;!|)O7$bYsn;k9vgHj7@SrN#q zXF%&QK-+{MJrvO5LS&zQ0I#+HjY2s#Ouy~MC&>mG%9;MljZdZi3}_7nXcY#m{{_lJ z;Nk}4Nr)do15e;V9MEDG@R9-r7SR1Q;29lg5(a7G5a5||5ZXl`d%QpkOhF^i;Ef5O zux1h1%?)bVF92UC3GS>yC&SQJc}@m5RUokiS`B((XDrGBI8dGgB_k%sE)WysT@Hb% z;An(TOF^S?G9$ObWKi1!GCASOtA`nlM#$A7QIqm}z%4ACQ#86Lc7SVaL*%5w3%cSO zw3Y?51ekw1zbv0TDE0gR#W7@^3}km3Bm_Xoh0SsCod(cxG(R{&IRV_+0k2R4l`JRt zK#O}o9TU)6x(~wBPk8caS%G!{!Ae2_cxHJ6we|%+sFTfrHaZKMy15`W-S95Ac>M{K z1!~~<2c>9Gj=9VX>Z~ZiM{k%w%}gbdc6Jb)Sg8l~+Z0$qX&SuX_JDYnqZoLlKV&@( zXyNM>@WcdY_ao@`IM|4*IB5BB6)2c+jJIw8Edm71P_cp+LBXd|E`ZiPfyY}_@$_5F=W+u?6BY49p5d)VK6S+k=o0!a)BotT_nx_l+@M+Yq z0Poi7;0JZ-5giYHb7pW6(;$+i1X|t04q5gAx_c9n@<82OHh~}DyxsxI>)>J&yg!s( z0J7HS4Jgc5KtZ6(@IoB41P@e%JOC~AG-n2l#eoLp>Y0Clde@K?xfeW{3OZn80@z+w zP&ZD2MPMJOH4WPL3d+);t`M8RK5!*^090ebwW5xU>bP! z@D9+zDo_#z4Ss=4*&qy=evH#(J^?#!16nSG^MH5%LCPYC$3b<;7C{A8P$t>{qFKzC z)_~|4Oy*2W1ZkOHKwhVC#;V?o=>}+&mkp9M9ao5FIdX&Z3&@YGjyJ@zl-LBeLU#$= z0F9P`vH>XFstZ&pR4A~)H_m`0Z-}F2Cs3@SOlX0IL_ozmEHi;t&w-W{qUI*>PQ?@A zkRf0OGbS9F3G8xkrUIoTu-ngE>j0fkoeR%4;MyPT4}oovjwCEIf!uqb9_E{yAdTvX zTv!3hWnx(ZXgLt1=>|y84e>0;rt2L7j_5g1u?3a^TZzbkoD85PA)veMr-O1}eG4=P zwlbM9i9q)>f|BZK*iJ{##(L<^Mey+=pjGTF@D=`$3<#R2`NIa8G=+39!87Jupp^wm z+zK3^gRog0J0!9cG9AG;bAXFg#|aWy^$MAe0-(D(AR?>8vlOZvnLr!*tr$RC>N3Hn zQh;ZNtMnNdtQl8;Dj!grLX(6AF{w7y!io|LqQuKc-cTX z^p-fN9jw4P*n1X8g|9N{6*>L0UoFpfdG@ zxWImHkaN$7u_`QLEE8D4BmLk>0&vq5 zdzk|(w(N~46Xt+4N+QaH3{YYg%feQWz{`Z~AU%u-J)pB$ zBoy0VWkNgYWrBJev`lEnRVJ`FfYyXh=MUi%tp~MgLG>-DDr9v86&;XT4s@IaD6@g5 zH$b)A4QMT=$=rdq1t=Uep#yGcLpq$oSpqY`gW6X>y=PWXS*y!%0dyfRBu+%3n~gw& z+Vvo#K}S!3Rs}+LM6iQ0&jumIa7dR5vR)fYs{-8Q2c0hr>rjK5u#gTlViyZ2eS=14 znH(EIR)G#(Ud5rWmfkB(MT>042DK0c|bpo$eUQClRAbUg;Y6~hB4A3Q4#Y62qn z0>S&_L2bhukWDulOfUG6`nxy4{oNOW(>KP*iq^jn1T8#%#thnaE&yAC#p?J1Jf-qL z7^OJ}*@Of+Fblk&h7G*59DVf^s1FCOL|9Pv$W!>T z1R@K7Mn52fu}IxmF6eTU8Pn~f_(bb3fV;XAK(jR9=wfv|0TzKaM_>mMfTnFgi-bX| z6+p=nw7*I?O9^zE9w_^>m@&a7#T*4d&2UhQ0(MN0qaaKK)M*Em?T(=3fUF=jTpvJl zX&;2M9J#0OjN(&v6^5yu02-v5AOg-KkZgVcw5RoeSe7FbObw_Z4QlE;GQvba?K;O5 z(_N$aG@15`OfQJ$6Jh!}X?j~UpMpAQoR1B3WT`=x6?i2J=)~S^B~Wx65Xk~bFqSy} zoHYGxG@o=mNW@Xsih%)?P7a7H^>lSEDnJurPH$-kUBm{*1(h28IV(%u$Tc)Y7^#9 z$1nqwT97)T=xzm7oHvBB1P($@9|E1T2wIpSl7*CJPJ=9F#%3`r4NU}D%!)lx4xI{5H()5O9>hVhAkZIUbzZAmE5C1Ypy`SX9)56Ob7bY`WfY z2PoMnAX%fqbV3AlaGD0w0TD&$W++fk9kGpS4QTR!1yq)=5QGh^fOZssPnv@aUxC_B z;MG71ECMUQ^CIAm6)ULw!wQ<|HD}%cDkDH0b5P;BK@2h=qRG4gG(rO!wu6_g;A4Xz z+f}i(EI`J9nsj2&fn>;3#{;n}q>ag-0{(+|mI9N454en1;1Fm5?@b4_2{2VB>A@g&RpmwbXi12~W&B3SHB%voLf_J%-aB^ZKC`E&|2Z7oO zgpW?t0vD^WDIV~cu^w~R>3O?Au~N1Oe_MwIY8|g@D?NZknjNsP)i3q z#SH2qfyRNQKy?Oa;6Q-`ynO-G_Xm|+;I0AE*^Z#$dr(xt8qKh2SWOfMvVmuMbit$A z5T_`C7BzyGtAJ`*Mg=CYTfu7?;ZAG;%{YLDAHaJZz`HHLQKAH@j?Z#QG1V)8Sda!U z2jpa$1%jX})4|F>7pDqL2NfQW&4Qr3GJ{RQ6PkTM`}aU?0Z_Yy1+-@koC(2u!Z<)P z4J_Q6%nZ;ymFT|H0Q+wGx+M7SJk-?zqR`1QvQ`Ixc3XmmH^5sOL9I(@2tiW_%5nfl zMu9`%*#(RVaacsCgPqDWeNHl;GM253;FSxYNC(gRA&y{%S+2qKKnzsyfF{Vmn>Hcm z0u$bt;>ZAX8P0hRHHgcy1g2M}@F_ywhP*omu`Wr8#gQe;l!*b-S|{XOP^^N(4018W z3?}fNAw0Wjz{^$}BtS>@X*eEaQRco@spA zj6BmHKH!q52Ok0kIv801G%^Lc1{HZ}F=*`(Xf%NlbX5Q%L4Z%P0M9nu5C-R4P#Ya_ z=r(M+5`1Jgs7VN^M4)5ktQt%kz^9>n5NC1#)v6!FrwgX@i9~`YDnWyB;vlCmD6t`T zGr`szWUzvq7!KJ(g=n3F$~f?@Q_zhwpl$u2MY8Ph);a%tZc)%F2<-I~4wXX}%ds-X5GblEUy&=oeb)Bw6>0=AYPv^E~JyQAI#wE7u@LG=^Ls5q!>12rPSn;t=_ z8Z!CHTRJ&0-s`&e<=F=x7atn)hK|8A5;K3~Lw6Fq)z}e}JnS8>$C}%l?#Xn`fDBL(VEK?+6aXbLFE#X)fcKGtH!^u3vU`kCOs1r-O7z$N3@2JkF7$m}KS zS_B-$p)1_zd2B-jWOX~VG4((IwD+AIbQ6>#gCm2$D#%5la%k~E4Fv!dTr~?Py#s?}S!P~VVAq?(sBbJ~+R-h@N*aL2xsDW$#VDOljCaf5T z9o!7c0iZS)c=92D5v>~k*lKo%-8VmuoG_d&r zbb1Ns{!Ug6CL4H*5{H)(u=oicM-bmAaR?j%EtChHe*>EIf-Yv^5NO5kOYj=f1Jl3c z!fOhy3!r5gpyRJZr+>8N6{!am?2sW!P!NMU%i!gXzoBi)H~f&(AwceU!4K{bgL+?( zQ7PCOoU69(V*;P$ z$q8vqDsX`gYh@~NY-p${f}GH)!PFrD-c19V76!E>K&Kc%PU<`ZHVw3~7c}z)y5j>h z8UY&n1BV9arWTmJLP{*4QVKK{06y|t96W&rI{yXa;SM1s4#&$3*^Uef9FCV5yrs>U zwt&{*Lx#IRhr}L*M<9!Wj)J7X93}-0MHU_vZUy$~lM47m>vb7eKwIZPH{XM2VHm6! zmVhRln9P{KJw5>|h6$iKI*>;}ov{s|^GiUbh#2^?63Ey$#EqcJSqyrTAE-aZ3O;j+ z)e&-N8K@rT5@-SEp)cZKZ?QWvPoGl2CteR(gv0<2MJ0Cd)$ZW_0qE4IOU$rx0ep-V z17vm_6ah;>v*e)dMIf5p5gz(01VMcte$WYD;CfvE66!6W^a3e-_!Zd|KtqV<*}!Jk zGZ#V*HopN9Jcblxtd6o)3>!dmdG%&Yb3hY)>}E_eKzv3kh6x}(sKx}*Od3ofW=x<( zVW65Fe6}2@cLO=SYl$$}zu={1ptD!N6#=Ae1X@>659=P^5LTSQ1exW4l$6jk32M5C zf+mmnK%EkhBSEu;;Dh*B990xJ1QvkjSHRbufETl!K-@wB?u~%-if1{((=+(y6HwP2 zJT6}kIs8gA3v`hVC|!XU41%`OTma|v2jH1>RdDhFZAk(5)1dcaR*!@GgRXqutw0Z4ZJ=B*E67kT7zi^NT)PN=LWb=CdV5L zj@O|Mhq`+U*c4C;0;MAjDhD-~Hh>I~0vWObZU`gP`ZW-Z0qBQ1%7f>NSAcX%f^;r` z>tulHTmsRFu2BxGal!PD#e7N%;93&WKjsE^h!r^%AV(sz2>fPpl$dT@!snsm=lBA2 z4h9qWCNd614#ycAX9$3H+bOVsX1+KaIkFY`6u1T2nU!Wt@5|v*H03V@?=*9=>{zZS zzzmu@%X2N_1ug9pP~dg^&rs+N+AZJ*+NQ@h{cj1MxGwmVJ8)+f6j~x# zio&3|LN-UnZ17QDj9CgG!+99E8MqaMr#qJNNy!U?ju~Mt0WB&3?f6n;0hR4AtT1DG zSt*~A88|O#FhwY^3#?~WkQ8{$0y_R2d=en5KI0wmsnekSBpOT#3Y_50LmEssgr}b? zE<1f;#-@DY4S>xCO}#4sq%i0v)ZOJF@>7-erQbf#*7Fk}>- zLxCMW_{|R){08m11dZT>T9=@OHQ@6^5Ze{OL!Gep5qK#uq$M`0z1s7l9Gf zvmnq#Kyesk(A}}`Pz%ynEQbP`1ED*2Il!aV4WI+$zy%=4t&FA+!s2QP4N+7WTz8wkF=%!M3>6+AxlwDYHWEaq~7VuJfq)utyq`YPUM-^C49NeHl z$u6u^&MwmreB_Z(Lhc|#f&}@l1k&;b7q+~?Nm|~xH?bLUivelJL89i3deGc1cmnhT zsDl9-)nIZ|Q2-AsfE!hy*$n8|Ep*Ca3it>aupZEMaqxZ{XiEw-+6$Wh5XH~|I%^TK znhbR*Eog2Pal|5c+2R54Sp)Lf(1_$f8OTDaqd7J1yHdD z8or10z9Gj_f+H3(z5z<>l#FjIU)dtysDLQD*ujlT32=Alf!K8UlYDYGkELV-pMOZ+ zv6M18q>Lzm&UBEsL0*+J1$oK7BxsW-)-x7lAaSE82%ZuNfwZ$&G?-WvMM39bLCzqP zvSMfe?eJlN9(>7e#V`TN?|_b%ih{O1D}m;pKpSV+A#F6!nIA9srZ24L6RCg0r^F2^ zwV@*#AHc^Uyx;?mqbp=7C@ZjoPE25R{K1!{AP5@CP?7<+$7M*(~Zj}quarA;hKyk<-f zK!;0!Zngz=U>|_ZtcMy3nUR5rfemrx1x*_&@R~7w0I5D8lm$8|!tsJo7N|MN3cf%D zWE_hb(+wfWfH2CsBX9uj02k{G0w~Lwe(;0mE+MUndQf*9G+zg~s{*O%#|9f7hBW<< z_8@?A=m)UL8wBAdJ3=NnR|taF-oVy!EP$TV1{#6{6+fV;6VC#Tc65j-fKFAZXPN-& za!(M;as;n5aJ&It3)3N%1z*Y3A!f!jL(Gv8)M0U)AqH+ALNfLTq|+)ukpfx~&kQ<> z6}zD(fOv;EB%y(3Xd!bW3}#F}>OmcUP^%Hib)e;4AlI>(F-;Hy+2jav-kGl* z;0%vgHjQv9+5j=c#R{xo*S-VkLtV-ALtL?!36$hMi0di#A@cJF_^G_ml}wly(|}e$ zPk#~2C&()BkXey?I(svpY9J_ufKti}P)hj$PS>Dl{lO0!`vgTPfDl7PlfNIK_i@4}1`sUfawkSKk1>>4sBa5xD2wBCN#C3ti;iAPmYX z9nj@C6NJIr>N`M-AlacO_|Fgq-JrxTa1y*4c#g0u=pZc(rWug=5CPCir->jpt^nKCmgWfctpdZXj2JRdOq^ z*9#nFSKx-mYy9)dYTmmn_YxB+sgBJgTh8%^#I}ss~{(;RObm)NN1!2(I zDoC`0&dLE#c!T@ZNU_MNzyk4QJ*z(B6KJw}08LhRpvme6C|SX-MFP!(L#9t5Mzafi zU|S2FM?C_HNp@~Xov8~tI)y(AR2V|5Fv1#2U?2*RRZQi zE7Iw%9egtNpc5P5-a$!EYd|GCxZSWqToHWXAS=`qh()~MJ$fiYe4x9_SRHqOvkJro zU%`t7c8Dl&YA`WCq(P&)Fe~dpA%Q4M@4)T60k?Anl4;LCmpOp04-^3f!Uhoq7O=VC zlZPBPV1@!j2<&TK&;|4?0-Inuz{!lU-iqN2XnC!$858)DzY}~}ihSIl6Kc$uu7IW< zKv z&6pZMRWG+06R3gU*ddnXs0`}7I@W_`P*@!s#IlsQ1sb8-6C1=pRWztv7Rz!3?Z1do z;D)ywK$7U~2GHOSN~-}>0D`K2ysZY%4Mp|L8^A-v;1lb?`$<9CP+JXPr-NG$po{@_ z{Q9dM0*=bC&MbIZ8GNXFgIJcpcIfyrXlfg@YzNshS3vrZt~QFv2MzdRZnlDW657l|f@T(9u>t(8*@MnH1Os z)`Fsj%Zv$pyB$C1iWE?3^g#fu161;XmQFz?S3vvEXMkdx&5Y>V`rtodQ+e3hdx247@=Kns~*t6yV*w z6@p6KAZ41&3k2a!K=1$?C@yh13^a5Oa@A&(Aq~(O?x1^Lz>Pe3RDwJYJ|xGC39>v; zEX#2lNS_oe2{407#}{Id_SXY3MNkC*8Yl*D00U(@^j6yuV^C28zINq+IB1m~I0g3b zgAT6(U1_KTKEY8KIsyk?YyLre`utu#X|*5XO57|C3ZQdt_@GDnfo%T*-Vd(;8bQ-w z`XE02;&d)y9?&J20-)_;Yzq9-KTPKmuV;09Apma7GchQzL3;Kd;5`X8M~N)ZZGQ~# zz9pzJ1KMLFaEuA$G-z@GO(=GtRe1_gifjtHihK%&pjw=lk=v0G)QE5dP3eg%fX<6^ z{Qv)dy?r6{a-0RAb}KmCW(a}SMuYd+I(C5fs3H|%pfkC8K%1~xHJL$uoCbacHpM6f zK1E%Sr{@SkCY?Z2km3p)P@^Y+j0Uv^!HvcaAyD^SgQ-V|TTu)Y?GwN)XhsEHNX0h; zdPyUw9tYKzED9V7hFKuT+7~Jq3S0$m=bgi^#Ldg7AmAuas3@Tz;K)&^C<|%G&EPj< zTEMTsq`@ShCT02l_;_) zfO^u7JXwmo3Zjn8S)gvUqA2KsnhfyKNqh>1iYy8o;46VR!1X9-MFNt+-~*L-LESY) z(81{}3JU(9mV^e=8Hwo+rt(SD3;XjjNIBYr!s>v80vkxB61Ss0_{=6&1#UB@6`+RM z25=CtI5_Ux^D8CEN;v&_gT0 z*8;=cpunZb!Xph@JavFSOF>Xz9-9K20%w*2*YqC~__R3qiWK=2*cG{^Yfa>n;pgL4 z-~r9faDck~3WC!^C-SM)^C?I;GPoCk5-4bHN5GN2(2>;*cA^HO6~i3RKnI5z(+tpH z#scV85KzKc08W>n2FU>A4`x~qy;8g$@1AIO0$4hj;k3Q#8&f#yLR zFF?HwlI8_Z46Xn<(ed7!76C^tfra26-5RLnpeSDl*|dS)oHPd!Q;E4}eVp6=x4X!J)yl0~Ac43qKBkWB3SE zp$5|d0dRu^x@XJr0VG|4+TM4dY8;O+LeJe~gUtPd+Hn& zOW-WHGx!En&Vx=O2RWcaAj`4dv0i}%7DCbr64D?eUI+@z=Y&;jEZk6OcV5s*8ug4; z3|~NV?w~^>K)DFCeFtJ9mN<4~#f;+*g5WrYE}&(!VrT)4)_{(s0Cmz=2xWnK4GK&O zSd3E;Q1HM=GYvuz^N?5CLd#f2Pz|TZ!Xp7HNWr&o!Do88r`t{8Q)1+qo;Zb1!VtWe z5wy(~6dAC2Kn}=J;Gi=U!KZYBW>Y~&v4JiXhYZo35m4fVloclgK=l@Qa!p);%aOHA zi4$6oz7T_{c_OAL2nyZ@Vu~PZR)|C9^Fe(F&}{^uE)aBt0@RBK4a0IMa5^$TOV1u5 zusK|y(i2oTa1<(Jf*z=61@7u|Liz-tt_+fPa3Lv|1u6X$K`V7w6l7rKqzI&(lt3yc zMIb7bc;V$FFZA>YP+i6dYQ#W~BGPA^1M(TP7UNdr1(k`YMI$eyUW07cVp3300Il}` zt?*z+l#E-1f|Pi{ec2OySxUU16(pdWw3NVWM|l;vQApa+dhmJ~P)Y$6 zVjG~HMo|2LPM?I#vx3*5C~zr=fl5081wlv9-D;r8Q!#MDg{-3Cg%oxh#2|$or~w7K zsSmW&1C(ZN@PW#@7tpef8#LStF7Kvap3Wy&FY2nm1u2Iig&b&g705T>LJqQq0n~~@ zE#$U{D}gNq8NWdsI#3N-x&w-xAMi5n3z9O>^`@W$HXK2vHN+91a;_e!pljiWmWz*PfyE=8Yl3secXoC6=@zy?`m z19q~o854Lv!3|*>(Dn~mNRt6t(A|M(g*ae=AgrK+hLW^`D7c`zAuKR|y4*}Y>3XEg zXTjUZp=A)3h;(Gdh)B@>!E0O!ETHjcc?D?N0&!R!8Lb$efCf@QWea$cNfcJRp&0E1 zN)CdcLxIr~$pdk4B0JHBP~Oj9P(T}HKzDH&g3VE218uukLP^z7#b8qv zS$Kp%VSHeE(_B6^a~|jzETn4+JKb#t6L@VeB#(m@tUc#c;s9q2S6)yBdtmzgx!_f( z6ZLpRc|o;0XjmRpRttm9zY?p59F7gq4;nOQ1FbgNd8|RekwXA>=+zX^8AQ-=hza0x zNnm zgDRf};pyk+@u@q2_Tt_F)#;!MR6+F#Vq_2GJXX*yTY>Z7G`a?K1`@dIyFwgX9kV(f z5T35EichBg2&lYa2e$}tjB!CWF0Vk{%fA48p(v=s4JjtXK??%GLtEgR4nbq4te{Z} zMEL+3r2uUW1)Y}#-0sEjFP2@GJ_`7!{^63K(!Ja41L$9Ao0)0kK#$m_RudbonA=b&u$D$8rH- zUeJAR;F1iaZ+d*W0BF1cQPg3%dO#veVJf2;6L`RZ12p*s8R-S5CQw=cr6zFQ13GCM zJf06tP2yS5)O3bViH(;NRI8rg1EnTVV-OUAps6Z>XVVX@;gie-4Y9I=N3E!usu@zg6ia#`oLpK! z(JqaYT>KmnE<>B(L`g3TK^igA%XC%C%kBC+g3uxeR=Jdp6e8Tmh`27PK@q^x)4Q&g6>Ssva2et1Z zHOUcB1;+&GFC7qp6d#}k7NDaQ6j%ffvNuo79H1j( zINX$2r0YRDfkEfkIo^BGAmF%jYqNj?i!`Y6TY>dVJ3@&KvLXZIdQh(xa+(Hc#tFK< z6Lv2Xc)!nbB`%G6CI;xl3#fV}c6A}rT3k>=2s9Kg0IotnCnSI>IZ$N>It$2A1e|t3 z?OpIRE7E~hCqTUt5zwFusG@n{qG8c=0R=Y5K@}QIC#G-Q#3#)1KtO5cbcYpu zL>wFeUK@sVa0Ga`0@V7(NtKY)N8y!O>C6ph_kH( z8ZLu2KjCd9S#UA}jk|-3z>5lm3ha&w({nfQ2?c`cK}fS{ z4x_?saI=XW-fRM|C4@Dbz?>7Jpt42=w1?kOCnw8MCmXc90d$Tas5i)>Fb7g}))Ou| zXM<`-P%nlZQV4fE~Iv%SZc;yD+boCsh5hGpAg{7-`&~!CD zaRZ;611K$l+f?%y73P8}Tn=u~f-FSKn?Mqp{(CW>BGNVJ@W%IaeNZzLRO)Pj_SZI0 z9F5S0FG&3>l%Ru_%b>g8AS)7C`9O6M-e?3ZwICuI!NVtHMdM3QY{(#$4t|cHF;K$M z*p9V$m=BA_1=yo;0i(ivP&BfW7>)AF_!Je9>K3d`r}@(vH}Z+p^II`o0L3W>=zxDv zyVHzm3#b>x4&B@cD)J#U3uu)ap-OIsooRAzD%p{xfFKuqP-DJptJVx(HTG zfwdmolwyN7r9h1)&|#pcEh$bjrWZseI09`av;(!d>Jb~6Xw!Cr9H0mUg8O({Yxsrt##6BMR^Pf!5OFo2Gv1osIY*{v8ZfKoch z|Df>#Mk|IM&_E>UkfSO|EK54LdrowyGsC3EXV!!9KPMQmw(% zFHgfmxt&238Fw(4}uTAd8=Gh)qARl}{uMyrKs*%EE`OCk@*EeF40*3CoDv2~h<^ z289fxgOV<`&1=)&zv6-&Mgm@^O4@mWf}o`qpw>TVfCY3yAE-wI-t`SSEzbbl6om|H z5Ip9--T@TJ3j~q27=n^7s6ztIe25_!c;>@8WCz--4bHNlNd(9Y2&gd!b{y(4_phII z2sj$R>KRan1v&%+aw}T@9Oaz*xz8~UAz2})IE@K3*0n%TaXL~Cf}aTnU1EL$ekK^W z`*lD9+@J)_D@^Zp;pU!xubWSBx`Yp(bUnDc3@QvjcN0R-s0WP%gEqs0Rsw^MkOu|X z12NFmYYLF{_lSeHA*Wk|M#(!wvOq)Z;B`8nAuffE<%(>efltL$@Mb0OZH}OmwLtZQ z7-TJ`Nj+*`$PqN(2C1PSeIL+zV-|tcTx$iEf|nTdfQC=OtF(l(6vROx(IEhxtOg(S z%M98+3lhE~3_2YEv{_b(TalgFL4iYo3o<(lsro=eBrFOXDFV&lv+%*^YJxYeX)>P> zGiL_h911BKK;1D`@O6sf3M>Mbz&RRvY#ijyMDU~=Xvk3vv;h7#GwA559w8+*UhvI_ zpd4;6gGZ+R11MKAS}~jfU8BQg#soe=k=u+3ytSAUdQuu_h8eWF3N$4E*@8a@WD2G)ONkh-l zk_3k_Xi^Fk_Mq_>NQfK|nQpb4Pp1AdxWq?4^#rsc2QoX!11itqZUS$x7XS(UU;^zj zVp8A{xB^ZNf0&>fBrw%#F#TXsWZ?nrp@k1*sX(tHnjYcJ#b?0-njlgF9U}r-PtM}V zAOLk1XkH8H!~%q~Acjqs-UFT?dia1#R2Ejx!6xKDx3hujJ&@le5J#6F8i1g{0NLr- z1G=s9_{%2H?#vs|+fq(|XWBs7=zs`#TNS8WhXfs{oCn=S1xnGJ+<$Km$>rS_X8;NF!wRlptgd5M(Oo5-9K#A*6%? z4_83b6a3~Hi1DCS4J6V)TQU&|2^?v>+zO1-yZ7=*F!D@avX{>k`2a9hMLaPLDYzcygWg*c8UfX(L zdg^{YO-7#S{rmZ}>%l7kV4-gey(S8lvXNo~bfOE9u|ZS<2KiWAff0JD2>8%&&`~I; zu~Bb^D|kSw<20B+Ar30Np}_+>F$dJZ0JnP0n81k{6b+!#8?-(GT6zm21&`2wq~HPH zb#Y+&_5*y{dWez&bPNo5ZI=cUWO*0pctFrzOweL|(56a81(xZK%zPsCj-dSujG);0 z!KcItp3Vg~mZ4K(;9UobRtyiIeDEo*N>&UXKm+iQ2^Mg}as_n9188;)+^qm@ZQu|9 zZ@gjwZ7&0@s(~ajP|eB(iIyLa>zf1|jRbCh>+TNFN!%=;Rqf!_kf265NG<5Z2YzVv zJ_o8qgJ}l8A`7@h3fckYH~~CWjd1=B=r|U5v7i$4!ftlR$v+^!P5|#NRA6I+Z#JT%_Q*BuPtJVq99v0fER#+w<(G%K$O+%GwuM@kMMTM7IDZzx((uryr7kH zjtq+0%nk}{3S6K)X`pqpiUJ^U1r|jPP^*OnB*zE3vXo0;3X1}p0&kWAuL6`UkOewg zAH2+!hY`F+kkg9c1E|~=G-J8~qU%8$NF)?kok07@Kmwv>Oh-U8pBWQqv<7r{4CuHQ z&=FYzkOl{MdL9z#;);9<+Thzhcv%$69T`Bkjg&hwWq}S|0;O|BD+cgzzA*IU3DAMJ z0?^V7G`h&@=nX2@5tkT%y5*o|2P}B>8GnF6hZWXp`y#HW4GPH*;!50#yv(3cM-E8H za4QH*e|4Bow;t?g7LcD=Ajhq+Ix0dB8wD*K09W+zk`Ywjae~h@ngH5+3a(#{fNW$} z;04)pKpeC@j~8@vJ1=;Rx*`W8AVI6(LA#d0XF!2c>Ic3o1#Sgxf%8 zSV4Y!0o~n@r6i!hroau_QOKbH64PLM!3VMmybeMEBDodhGTto5dXP(Qh$#su2soZ) za8nYHHe+INWK_^r5Rg{XR$x~U1~G&cI6%&vE_#$ttbT!5mcVsxMK(|)54_3+8t9-- z5a=wh6(U&zH^5Dk1E5opnIYL0lyP+#c7RVu)@9ft2--HN$+Sigd~1yc(+WXQU60sC z0jgH8Wa%aW$9hFXe zph^xjXl2E)0F-(bh-4}9gIamu#noVUgMxJi6V%mf1eEI8Ksy~Zm{tgYOD9BC4^qep zy)N(xG;DMk9teYXYJvjg2ILOw4WRiRm{VqhZ}_SM_4dKV94NVg3}SWM0KUWw6q%3< z1h0UK98Lw+dQc|?bP+MPIr9zy(B;RDjP8)-lb{Y1t0QO~H-{qwv~x278bprrkTcO7 znXDK<<8mMebqLrofyeN)&6q%YWk8F(l|Wk{z#CqmMZysQB{o-H7SL`@R>uZ`tSHd@ zA|Gfo0XR*A%(=m54!*Tafy0bx3#js7F=N^QiUHWB07yrX)o}xVmcT7=(%b_o?7^eh zJNQATBY{?Xi%b_f!Kax3+Gi;6n+Y66999fFKtnsogP4#G2J*2HSl0MJssqs2yyFqn zHGnff2S-R}f*NL^oPZ~3P=O_uyepgrcaFJm9b$1B~?%# zRa}7!S~Fb$#W$NH1LzVe4W=^!iozgQoe)su1ed5>3cTJd4hozKywmwl@rgmN83895 z1r}J60rfh$Kobt2YjW(E7(iROvlQ7uCj@6H!b@Ix=pl2^;Wh=(+#jfR;e@Vr)rPHe z1s~W6YUhI*RFHb+4#<@pJS_Df@AGCU@ik;J`Ckpgq#8jt?+X9C&LqqZPvy zSib{Y!hjB@MLA}jUzdRq+)4qRf(RNO1f7BiN>iXj4?Zk_UzdTwk-Nn4Ph(9H=un3j zAXPsEK?}7Zoes!Re1AZtDR`IwdeH|c34yNofE>O8+W!YR(XIs)exPa%lm!@2H*RmDLH+Ea=&PYry7!I@cOZD}=!NC%M-u&S7G9P~cMF z2Sp?QTE)3cP7I1OKw>kP6y`7mf=gY{IR=d2W|+bZCWX07HPF4wcR(hBS8d%80xgGd ztp5R8xerqbRrLq1>W2`hRsgS{U{cTqI}db@!)P${w?#q3OI z%z}r0(c-HIcuXES62R+1*&#axCy3O8%5YHs0dhDVV+|-i zK>PI+Ilws*RK6Sl-5aF<*;>u6z%2l21RX#dbJ7Nn^MOV&7|fVnfHv5GZrFji2C^v| zw66!0866qi1@3?wQXd5CA#HNlV4*goO(PGvMgUYngL|Wb&;c`E=ztlh{R*nBL8~9Z zE);j`e(LAzX__8s_!GEk!v)a3#l%n9jo!N2Ga~~Go}R~xe46hE>Jz262AhQ;{phe zO-V3Ii4$Zqo8tqXECodWhfS0D0S{<+ya5axnnCGdCiuJ? z)?(9VyuqymR-wUkg_92Y%2NOHM~cMHxqsW3$;bnD%fhDu9&;y74k9usJsHWI=*NNkf4P(q89O z039c)!PLM5$`@>o9XweI5Xr5a3Y-c8SxOop3p;p}lt2oUKuvZQ2L%mKsm|c2q$uOa zsK`6L;1Zui$Omqa=@YoKlw{CN|G*8>%;xxm8?@mLWHrom8Ib8exRn$@3KXEG%Ro#A znSAFGpJe?39tAc9Su>_3AX>nTX#tM{Xx9Mq3=m(=jA;sp7Byp8IF2b51ho;t#-C;*}l@G5abn|12rog7jyo0;mocRH-0-NIsUQk{E+53PO zv>Jy)fz5FScb32vE(JDy#x)?@*%ZL(WCeI17AL3&#R==^fe&e61N9zn@ItcE1(2Z& z;D%n{1=ar`u@1PQOF)L!vp`H;fNUx+s7~VrcOTglM9r8sfHdyl1?}%)bKJlSiUR>q z!3pvKFM<5U1j@IHRtz2xcR47qIVxE(L_l}}3e!Jc;S*E8!42|d1EUfzxWeECZ3|%r zXF;mwiB5OE$tR^Pa1I=~Cs>p?!QSQsd7Gcx z@y60=0-!QMm*Er(sJq3cFk^b(O}-$IRgZ4+tp?HkxA=S*mrlQPi_h48DJLlO&afhf z9yhlF8+gAkw*ogPrJi6_;>}Xx^JQ^R;9ILWi%Ee~mw|(q(XpO~X?o*(VNq64f;%z2 z;WnR`_5oh_Y$!OZDX=+$G6RPKcpm~N_~A=Erysn{CtlA5imDHg0*D8mz#U(JMP~4T zA`f&55ID(C;4xhDf(m8G3@NDS=2hb0WdP;P3%sD(f(>-W-wTk-K=F&@U@kKzu!FhGnCijx z7RbeiFSZCc!nZVV!dx%V&IsNR!^^3_2JtVFH=cs@!0)o+RDk9+YXU3;NhHpP=rS~fQOSCe69<(8PgqB1vUjiGo~9L8Z;_#ffeKy zR?v1gHpdIB;9+n!ea0sk=?Ij59Kz-{Ap1o@MW92r z0taa5fftmnd)SnCU3nR$6xbEOk%vb^@rCe*(1GAACLqmjGV`a3OJr4Sak#n75FQQ&rCad2c%;8WlRt^Z_E;PaMNe{J)7eMcF;y_aJaB2a0zVX1f3ti0on_8mH~7U6Px1+SOM~k0{H4DQ0>XC1UhnpT>-q^5UgMWSOF-oA24Sr zuxZpYtzZY=S_PVMV$)~bf~jQ(LG(gP6oPOgWpRC7`ozn!M0rdwi1=ria0m_@uOTgGPn0}xIR0jtvI;Pt`;xl06 zn4b5DPs{@9NzmXwG^Rjl4m8kr0aQ0JS~08vb*BZ*m{x#jUNfd8AX;?#o=1F6!Y^1s z!TW(DO99>~n=bg6&tL32xX|DPEmZ`wz-v!=rx=Kv^h9Fgj^ z6I{s6ZB}ligbqsnN4OMuLG%GGB`#>!ZUz%H@pC~dG7j`?FcXpuK^{2%84e4Uis(Y-n-( z1X)Kt=pavU5%7QusWyQHDGw7jtUiJG5Im0X0%QRsK|?OvW7B7RgA!z5$A17R13Mnn z%wPe9*b9(LkX-+z9#h*7xHhQsKY%n{V90{H{|^^rB!>fJ;195?!E=QikO5Z6)hwVG z2KSX%K@r@*4JruO96>bzmjW-SU&;=uc^kM5%$XQK)7{|x0&I>S;9V2&(n2;+{{*&+ z52WD%4``c{^mMORd~)>@Kt(2_6~i0Qc!8`L(+dzS03A{gg$^lzdPN{7fCdjalxn~>D#h&qy6|$X0@qrn|Nohr zofO&>W-zt1n1gl~vpIg?%~D|ity=nmeX$m3Tu+1P1Ct^P56kokbNM9DF1jLS=RRm* zB76xLxZMO^-SPl_A}XiAU2X-GlLH_ljl8gv1HgL+AVy3-_L`61XA-+3VxWUa*P+tIi_X49rng$bx29tm;!y#7iF;oZm zKvnFVY10G*`oIf2j_@h5fQFBdhDyOjB{Qfo$_Oeu^%Nu(cmx)+D=|7of^Ovki6NGD> zVLIyCO5FAIJm>auzeD2nEmuI#~*g0$>>qZUwLm zgBeqT0+Ztd2C&TZv+wvM8K+Et{*JGnamw`c_k6mHN2gDH&nM3~W%}0leBq2Art^N_ zE0bm^0X3mOegNkxX2%zdS)d_f0fD2_=Y8NyVw^Ia@gtu%W6yTqk9>kmjI*}Kf9A_% zWSq7Aubrwj1+6V%sDB@ENl(Zk*b~pTf9l>PP+z#?8~C z82LXlZkg`K#4pXbb$S*Pe=*~>>GzoU7c*|3-o(s*ka62|e>Q&E>4_}-^BK2qXJqB~ zWrT?BW8A)-kDWgd#7*Gf-^I9nyEG@i4kPCdcF@6pj#utX58&e0U_Nka_4H~k{!qr9 z(=T%ITQcsN&dts5#khNV7&pH;gK_`#A|9}k zRUqoX_D4J*%coB_;NzDQIS9(2pp)T2vzt2@vlKW54oy$u<5yxlJiV2V-<9dgo#|)z z_)Qt7PiNxi*AjxOX9Y((NVOe5zdYlS>2du0$spB-`T1j+u3VU|B)}iRcyxNT0KXOE zvFY0c_#>IFUzjc^$R8wqoE6v{tU)L)6<0c9T|^LUm?t2$9R0Yx(I)k#%XqL1qKCn zP`ifJaRF#IgFDfgbz~U&7C9s1Xbo&t~fiajd zDS#H4D6lvxOn)uHugR*Rz~ZPeT~d@koRNQep(wu?YlH%ehGYy0=n{$P2eP(BA)_e6_^>5hXRWu=uT=jfp^S` zj0y~nnr2J_3ZQN? zOEaLSU25Zptmjc+a1_XL1g$1gU~qiB)4Ex}jEP5q z0pzp;jMj`Cpb%xuf;3y8>OseW>ocBUv}OdAEKCXHkS3s@@g~SC$(7+cclFXQH zfc$%dF^gM~NrA=j1vo5&AAkfufW7emE66QO!|Rfl(U7 zXDO6cWCVrI7kKD=fP{_$g&{Uln(+rT@46xig(9w-74xo^A1R2Goz$~zt zU5OPOIJ^Qp{M?RLByl966-?k9Wx4=l94H&JfCk+a7~%fj!KB2>3(9XBn6eZ>r*MJ~ z>IE&tI04aGe*nBAgaPD##|vPe8~~FYOj(Y%dKWM&FgdamgHkPXP8l>c-2fTN4+^g< zOb~}_FkJwbB@B*J*S11a(*u|>3eeOckmU%LVsPAa1d*8Z86Pk~Qqv2jEZ7lA;M506 zO)r>WsfiJknx?Oj=2wxzm8E__JQei;6ds_p|16FlK&$QH;nBgY#LCMLiY{i<@PHOW z6TnL{LDPdQjx(4cMGWY;D>i|zpo4W+FoVOpegQKmvoV9>9dt<@E3S~*0kVz{6jEE5 zU3ob`DPsfZ7ztRhbO2-y=z2gF#{-~2F@eAA3gCfMP-zJgM@=7 zERF|QK;gdvOzvRGa;$L#l^SeW;Kb@FtpqFlPOvDk^71k}C@_MWnJfyRYfF?^&6qB* z_<>4!koyF{85Fc(+VKHP7I^p`ZtV+%wKqW4egIo|15CbPL6osSSf*c;8is5t4`?9?%+v)SQx|~ETEyeXSjY>y)&N=@ zff6@p_?yLX259hF;1s(8lOubf600MtJ1D-EOn)HHug9|jq;3UR#mVVX3jC^!JEl7* z@JnQE07)ER14YON&}cY3!<}G*xYh9nTNXIvA>j&chcjOQr->tMph(wXI>4p~s#HKj z%`A|ic}G@QT85eOgAJSk13rLtK4DV?*R~JX6hS!wG@j1l_yOz+kY`{Cpo1Oc1}4bO zJ?yRuoUWkrds%cDTG+vjSkS&7bwz%ydU)1wJiw8q#0-wK->i-*;3h1zt<6#-a0;~k z2Apm=K=x`d@hCEak{2lbKx^X{AfLS8096TZpyfL#^dTafd6YoM46u7kgNkHOLRVx~ zV02`henF9+TX=2Na!30@B}OZTeV|*r7_AugPJge+uj&5y2dLBpl?)TuK>-c&BDiQ} z233+1*g=_-5!7d68Wfb3uc z&BlV7P@qfVK`vjx0kU8Mm|VbtNaGu(|54(X&|U)?+hG?;dP)^3BO4}gsTXJ*hW zE{h{*@ShQU!~F?%1!i3a4nhOq)RdbpsmgE0cw_oO1%5H(3n0rLfMWqP$-v?Wau+u&v4WBtE4KnWC@$E+ zQ~E59KfoHLKn)s4&}co2BY3fE5%&J6N;8o`VI%0ai1n6Rb=Q3f!*X#0bgV7eE>=K$XIK5G;--!1h2x3zV)N zu!0kX{0)!>(BKY>;|;JqAlta0-uu8honM__iKzjS6{UWF6nB6#-49mK7&N>AJ3UFA z-hOPJ+%|ot9>4PRXkGp)rtR$0Hzcr2PXD0GzXBpLA1)TG z4;K5Q&p(B6`}Bzh{Og&1^iFp&Y`Ry3nrvEbJw`Xjh zZe_%;&Ukowk`ezE?sidbP6h=AM}2{|>5j(y=8WyrD~HH@A<&4{>x0~?qXWTs9-IRYeS7HPuNG2-=Mv#C4E4Ua?V3k&4R$vC*vTg4s?Px!} z#*E*Vam(~wX8hrdN2d#!^G{%$HGQ!;zXfCXbca%TO^vhcN(|tZBj|n{CT;~r$0ICR zpw735KqoV_8?L}4aBjM;1%E7K_w=44qS8#~*r%^NA{xbbemcuhQ3b~H(^Zd(dNEyK zpI&%WRBC#cCBGA6$Mmz7{C13&rt?_wJ1}0J?rX*G!gQH^`hhw8o{VRv-?iemF}(t_ zn-|nAVKie}0cwV=V9IiwFu6g%kx3v8>Ne0>!U`;o6PU6Du1@!}=FewbIDMZr|8w?h z?A!;K7+I%35LOVMF4Dys&h(gldTtl11JfR!>Fc{#WkCG1U99pT`dt^RCP;j~sDk)( zgKk!CK_O72aXB(Tu8Cz7cs>2MEq^NG)ai+K{Pv7@r!TbQPiMS0oztFQhjHh0GkbnZ z#v9WM?D_Q=?@ym^&o9aJ@6YsI_WUN?yVy0DI2=LS=59>?Y0vM%_+Yx71HU-al%3Px zh4LFt?{MH3Vw^jDwgbPU3xGAM9>%ETR@QiDr@!*K^=mgBNX4WLUe(-3Mv z$Ax`h0Qvl_1HTd{bVBnX`}Eae{Ic8{S+FV2htnM#`8B2QfiK7tQ3T&*ETG7vz^TXu z+T!DQ?#}cUM}B>Y^QSrl6dA$C>?rU!a)6exz>%p$iP7=T1xQa?kx7Bk@dK2@>d3$iZli)ik%3zroV^?w9hd)Y z7jWcroc_06K%jGawhO-n$&pFitJ(s4Ky=b zD6oUZ2-qDhvIL$^k9Xyl;DbgkOjC3`znqMN0=uI_wxg!NV|dSt2~>hBWCKkFNasOxNd5S90T5WxO%n!;N2u=|1~(p9Fph#-8c@Zu~B6;843WeM17j>hynZ z{2U3!gWG#7(&!ITe{5`yf0Mh@QqHU^!!MM_!N~hLc|k1w@=tk;1!`5uI) z&F%R9FxYA>h^k%h!Fn_yj$8=gX>dETD>6Gy`~a3y=XR_I88`{TRpWN#P-J#|3*o6k zl)i)TR3LKOKY=wWL*#ZqcuEktTOYx4iV(Tm5S{|W`j79xa`F(BO%TV(aXWI6Ga zEpG$)M;0Qvc?Fm!12N{o5in00!n?7n9i&nU!aLCq=1D?$SMP#(5)f1NKui(mb`(%# zc03CSBQc1|Uwgq(E(!|wdS=I$hrn(Vfhc`@4jj0`+>T<3%#NoZJRwLlG_D4#6odru z!4+U*1h^gf6`39XJ_pP3Lt^^#W^h#SaXWG;GCTIZ0L$@0!lG#lSdIs*vfgn%M3Nii z8fM4I5J@g>M^Qy)#~BbgPHsmLMP|nyh#Utb-Wwru><}xyLo8&2I1O}Oq9W*;7;rKM z9h|7h#KQuyVI4#tGb9npErb}#1PQjqGr(?RgoMS}iPQgj^V@(5I>uyi$>||J{Gy;- zYM3l8#q{IE^mZTqNYQ)jpt&vvM^4Z>6ebo&PH=gt>&q`Oo!6J2Lt-7Q&}N1d>`b85 z?w}%7*Oy;`anp1^Uw#SGtL#b;X=qK$20A=nL4nOtAxmH{yCM^)_n^VVpadQ|0-xl~ zEO2l7Y+wF-P&vut$FETT3(X!-waE&yM*zHDN+2C-19+CuAsZri1!gZu6f_7J0a~TY zkmc9_t7IY0;sq5fpvB?>aF>Ga)nQNsukw7rkfp-Ppuo(1aQX&6eg&rOTc=<0xN^A=3j`q`Q{rQz#oEQ~2*0z~58z``X)}XUG8e|FVTD%pYD-jv89RD&1 zJf8kAfZvCy;qY|*Kz>=P|DfE#3|b1sp~&R;0hF+qL1Wjwy9 z+;F@VRPnR~^4l=po_;WpUygC+^ap|bMU2;`H*9CroL&>e-^O&Fce>sVMl}#UZ;z<# zbkAUZ4G?F}4n}1VeQ*b(?DVa{{4$2nijB$f&D>4_M*+uMb2|kDxR=`8Pbdk09Z{w|c?QvCxXlY;_>;|Inp$GM<%$tJLn z8DS`+0*7M*BdFEl7sfBkxNLe(7{3(b)#>eF{7Q`HrmqU)S7bWJJKfQdUw-e8IGr#tQN}LM3j`q`wBKVahoER0@KuPxj$R9Tt zvlKWSA24PKyqLZwf?pag`vD{inoI)+gTTw__ab14_0@EyNPbtNGYGpBctAnIlqE2Y z18zS!u`)xpR6^PpZ>LvA@+*T9>%2&QZN~G{Pet<2MQwS#nqC@(-tu}ieOVO02jki4 z@1r2iPJd^9rRiGH{5qCrcu`tkTp0pS*g=K)=Ym8!*49Rh67w$D=>p5JQ>WGEI@5P1!hN! zEXSTD(+y(zb%dvZlrV8CFgvoFF~uk_J5F6YJwKLTLuwia%nghR%nA%jdU!C#t^dGVO?pRxXZ$T|THxF==4>XH!#?-(F8rNsc0_R;vMo?0Q z4yZGM8-xPKd0->#Ob{I%ARQfyS&o~JPuGv zK!M%y14EW0s5PjU$ZrkPlb^_M#q^tP`sze}Jx1y2w-WhJh_B*MVAf~kQ3B6wg908j zULzIsFb6P^eFU#x{e%^XU@F{BBCOu}gwR_7<>!rs!g(2R@d!7QcgC z-E`{|e);K(*rbfb@8VVwBz_NvxUQ+G^Od3>iF*yo=7Gf}i`sa)a3<5uf*f2A6nDpz8ABbjM77Wsb!#>6}b{f8G~6rwJ%9fu|=Q@=P!67q#SFz|6>0@5rLa z?0D^9%XCRHIg4=A|5Qn@z59V<}nuYtff_a<}xy?_(mT_=9KG_WxXNNfA2t*Ydwp9gB6>F4nGGBqsP zzAuMgfsygi^!vH|+KlU_bLa8vF|M9&mB+8jxO#d5h*~+lEsx)manAG$dHk}BbEm({ z<5ysuH=RG9Uy*Uabi;i934D)u6qq!b89)Swz>?|b^ZCUs*NTAFNil)SJH+aclWYR3 zL_qr>8P+N=uXR#rYXR|@xD^om71QMl_|3T%f|arC7g)4Cp@83?k#WiN4TbzAjE|-t z%I8;}?oq^_#5imF>LUISM#iPnO29IzMf}RsXO!?KDbEI5&a!;pZjfax`*}d8!6~o` zEEa(dz6vaxE?df9F7Sv)fmwlBlUV>nNC+&SzND02k8%C<%ccB!%4II?; zs`%45APHXJ`Sg2L{Qe-(35P|+r$1;EWuNX_&94d)DXHd9MyR`A%^w63op4xGYWL}@*4`h0j5DDnLdG6N`mRy@#&UL{N9Wkrnffnn}BTJ-NbLq4jK>U7I-rKYZKTaxn_PF#+%cl zoB3@SpH83I%&)*WY5J~aeme`eUC@G$7c`Xwx^#RJ2SNl^pf<25FgrG|WH~PDnQqX+ zFU|O7x=#zgBHM(f=>h^zra$DDQUJMNY74)t(9L$ z5n&OF0?eWVAd60b#w{Hu?3`}g${)h`YCoTl{cZfRj1#A?ZR2-k+%SE< zkW?V!lj$FYq(G6A*A5P|)$RN?QW&8G>RK@>FbPbW&e*{(!!&8_bombcc*ch5^&R|j zEREa(r>8IM;NK}W33LG_i-Q6av`S)7U~-&ue0o7A|8l0~{L}Ati^@zt-Nni^-M)+8 z2_)j)BdRd{XBU4QLEy&ph#vk3#_!WN_VC*C!Ka}zNbo)O3QpW4kxAyTHNKFFue>g#nCw5pz7?LMG zPXFJ>FT=EV?sVCHep|-x)1&+O%NegvzueDnkEsh5S|6wDOyHN1x_qn^bkER2W+kYp z5P434kJFPTfL(MGq--rn85fe}2xa^e`OD<5^C0xILBsb4XuWECdcoGr@xrZ9{?ReXS%{O-Oq~Oneoc>{d4%epd;wh-RAO3 zF;1ACIG10F=_=23e`|iJ>C@-(?_}ILJ#Zeslm&RaQ(y(ZA}gpF&J61PbAT5G3G8B5 z1ht;QQ*w+NOdOzrRZx|~3|eWmYx?4O{0^dEeXQJ|Y-Yv;)wg^4k9quxjJu~x&FAk{ zTLDV1uAm-&p#qzueW801FRKD0=!#kJG)I;qlLCjp%IT-)^Gj;)We2tP&6qSam_XT8 z12pQP!NlUG$O@`cz-|*z09(IrI{yNG3C4ZXH5TwkFz%n;xPV_)8aDV0@eL!?RSXKu z0tcq=T)?l*cwqX|1^oVuE2f(+mhF~H`C}OwH&36wjNedT zD+j1vWN=i;0+nI{Tesg>#(#j3amVzTEBK!=E}q`7l3#^=Cx-%)J|oNYg|r=%GjH60lR&Q0IHnqPtO_4Eg;`3)F%O&46l-^2KN`n)y#hK#$WUs%I0!}xmo z+co?~jJu{QtmRi>d_CQDEx#G#uIV*v`BmlK@PLe-!l=Z;D+V%q0;3|60yB7B7-(u_ z|5|=W#$D4H*YV3U?wqc;j$edv?{u?u{Bqhm!EU<3ti-}Aq`>U>0(6F^z+13sZ(xF; zH6x4y@21zU<5y&SKYigkerd-2)Az39&t%*+U41>j8so9)e(U+oB_OW21Jegu6$DzI zv}5|*_56B_AEuvK&+o*zYdZf1erd+z)3rD7S4-^zwg5DFgxA=`O=X=U2{m=#TY+L4+ZfS zPp{m>ug$n^`qEAOK1vX0cQ7mQf)9UVQQ%#!$PQ{*fp)htItmFKn=ZVW--~hA^tjFZ zv0{)=Uc;os!VAiapw(N90(+-l+{`b>xO4i4&HN%_kT5?2Q*r{dlu6+8bg3=;GK}x0 zn{NRJedHE!(3fuE&tu#*{mB-7GsX|orMH4ZHEt`vHsi7BU0dN10ghFWec)IH`FHxw zt^8(;U#1Ie18WK1#vjSJYx>%4U~`^81l70m8!+yl9ENOy78>U*FDe%eZ&C=nno2vE7K~1E|LjT8zgi@P7K_9sDYc=ce!20Zw|q zcJRA0?w{_klfRO2_w-#m`7@-qa40c2S}Q=NFr~S9AX7l67zDaSr%%w85|iBr+F=ao z5c4vC;)Vm#GiG$;5ZFJxU>Cmy({G;Xt9J1_G47uJb{GFy#)H#$?*_Z~{%-yl#=X<| z-KE5)2kqf^XWTV?)*k)^jJu{=@8!2<+%>&?FTa-TA)FS7K`c<5et$22I@5Qa=^p#| z6&Mdr&)mmv#kgzwl70M|Akp*t_~jW7PJh3T--U74bd&x3icCLvribt6w_rRteZqeJ zAf}%@)1U0;_h#HR-QWPfHRG4*IS2TC7cd#76;1Dlh2j9(pOPTw*95XOVkA0Ok_VmvyX?>N6Maaec$#05@znI_)BKXE+rb5{j1p*h8|Xmy(;VO~l?Iap zcs-3lmJ*}D(djc!^P35u;ZR^vWCdM94_dmy=*T5-Z2F_q{1%L7r%RvVm&!cH0d4lX z@`9@`Hpfp4ZlE>lj^YCQITV>eRT?{(F9GK>YA^|a+8qj@_3Vz{7(j=DFgglAWXvx3LyH_Iet&Z zEz?EL^ZP4ofjJ7iXbiNt4pa*ZW7B6|=9h{5#{+6#fKF@kQGl%C z1uf|SjZ7&p3hV)wpgc;TwV{j(tOBPw6c`kkK$|o`MLDQ#!Ub*@C@>0q1((ty2&L2c zuJB65E$1jGuygw6 zEBw}ihrp^-kWBo4g<$+sT|2Mx+cKV+ z{{1Sy1>@=I2G{s+F`k*ueVt#E@$_`l>-;|%&rH|9!7na^@G~RmVjWh2)6*kv@M{bH z;epmUp!GS-0)Kg?Pr1SGCjN&UUG^5gGN{~jy2Y;qDtFUv@!K&To4)iGzb50}>F00p8!+yjcAGz( zao6;S+u$;N_HF+8pknS0|3grla3`+1*rcZgo@6R}8 zI=dgc+;pa={6dUVri(u1*J4_;Yr5l8eo>VTyQT?fFfo7M5f#S;^$_(Jl*>jf1Jdu^V0-$85pb> zdO!>6m_R!pI24!#`llcK#lJ^<#d&bFfo9RzLFbHo01c-vI6r;LZ~j=e7Z*SqfuN#58^XUw(7O3Dckb;Ww9_#0y%a z3Ub;4rYr?^&>BTYLj^ts8G*^uE&lR5T2J6bDp=Sd%RDA9W(iE;1x zU;v%N!>++(0Gi9xU@}qS6qq`F%U^ycF|YyP{j#8OA)Fef&KSj`a%`~PsRz;r!ol?h|K0yUE_2+Wx- z$t)1SIAMAgvw$1hn%&a`9L1+^U>A^>evVl{1!1Nii+~Sk@ezxFKjVbyuUQ0g*p7e{ zOJOJ$pTG+`JD%MUv|<|^fC|zIoB|W4bF+c=5w7shf~00`*}_QKSrr}pwoy271(td{;;_6f|pqTU~%Nh z=5v~E&MjavJ&Ri)MQc8 zw&N4<6#*Lxu8ctE6F6>Q$`V*G{SB|ce8vgWd-w!ivMl6PVg#9~GJP(;fI8!Z=|}km z)EF;Mf5R_e#WI1Hhhusmi-72KdjSD^ru}=SHwXxLvwY)F;+&cwpf>&dUw)7lSwR6Q zjyc<=2`ESiEShdF2oCIiK>-;>gr{eqc$!mSGB}aUnf{MkzzfOi;0@yf(C`redtHo2 zK#2#GYDAPI1g1_86cPx8_+JdTkz{i~3GDF26T(*!_!ADKb<>BY9` zlEMNujEkm+2@CjJfWrhF&>-71n0Bxzf{yZsgf_y(p!N1(F>o&A6qr2yt}r;g>WK)5 zGeVp;Jw`-;8yYp@pkRbXjW~+agdtArW4dyFdb}ud+WRdk@Elb5JrNV|1kw890)C9& zx7Uda$S^XscTb-qA)wE6b>j515(4^+cc=fA5Qt;?);&E|Qb2;gn-|<401-T(iO&{E z0ZXO{J=6C~3dAz)ZJ#bD1y>{hQX~RWGu(eBtVK}K#JI; z1uU7CbWL}Z7Kmk<)H8j7G~6TwkRlb3qT?V%TYIK6$q0mNZtv+7aI_O>flsfRD=>rG zZ2}NiYJjxqfV4Hq2v{=x=$XD(Mj)1HdG~Y~S-9B-AVnr1Mb5GUmQ2UGr`O2}#4`1@ zPrr*$WC2oS15(5yCt%66yM4O7oIos7SLgJ3a&VIzK#E*IijIO5E$^HrFA&T0v3GjB zynrM$;58Vo?uw;7QJAJ2uK&;lwwoU;@1tdj=3e4bgm=Wsz2#{SdAbk#s0+viY?bE9j1!A?X zbao0jDkJGLP+$gctApuF0O?Bs>0?q7uw?qxIo(!CAeL!a*Yr6`aF1kw6y<;v9RVr2 z)HVITl7K$b>mAb#lm%j$u5?ZBSB5Jt04Xj3Dc-IuV9E5qYx*Z;0ez;vo70t51oWAX zteYO9A`s5>ynXsw6}YAfkfs`trmHFfmQ1Z3(*;xoVwu`Irst~)NC`rsxB)zL&;kyf zS*ik-Ocy(*UsV-|)oJYR6mV2T@}&m6F$>Yz0oK_A))}WJV9B(!d-_Z@fmofDpzu*c z(y6Y%45}6tVBs?XtaA!jr;@sWCDWzO>9Ohpu}qgcrteV~kc2vI23Xk~u(D?$WgVT< z6*UB6nSS+5Z_t1%TL4zJ1gvb8hJYp0?B3~5H3VXHPJn!%iR2Cy=;Q{>9V@^(*MN2A zX$n{}ed(UQQd2;m@!9m7ngRw)(@#z3))LTXntpP+4T%1GVtOu!etu&5JP`fw-4?)0*%a5T3V+&8VE>0?xjnX!9%l%ar;_~Hp@X$my?-LPo- zVnYE-$>ShNW~5LA9pVGF=&PZCCDYjn(+!LS^qID~ek2}IwUH~lb(UNwFC z7Z5$~#&lI<0e!~h(?g8~45a_|wt_CQY++Vp0yWklyU-bR8Q2B7r!O!T@MY|t{?1rH zpJ{IIba@j2QBXLTnFv@it?Hd#Xd)mC;iPtk&G1w@uf=)IxQD6nFSq3eP6R=_^fE(lhIyzee zd;~IRi!VsKLpJnS^d3g=xj77wBeFn8H-Hp(Fq$zQKl(Y<}46@w3GaN-7|0<*v|?&*zs zA|l-2X_6QPW`T{&AOSHqSu2JMAj?1p7%?iaf>to@0Pz{E7&d_T%w|jrKs1vEQ-~SU z5|9*VBo^e41&rYHKw0$}-%Rhf5)c)C!DzfSIQ3DB#7ob9dWokUO{kMw%BNNlxUEBY;2~1<;Ok-pMrz6Jc`=@e{mY3>+Cj z_kS@vI%F$>R%tSWPBUm=Qeae^e!NXob@~EN0U6Gtj1bKN(;w^;RGt3XQ-ITvL5a!n z5_l+y!I2}&@y-%RGmHteQ{f7n0~#e})?i|R-dX}K)0qVRf;J&dU+BOoGClQ-fbeu{ zF9DwEEb~~z*+AVr?w!+jcnJhCZkW#PEg-My2#s>kK6dcgFrX2H2b|D3GSGRPpxb4b z1RhQg@D@m8oHc#Bw}2*N|MZ960y2yfr~mU7P-dJpUCBq_EZ1`uNM_o=IGrz8KykX1 zuYfUQ$84$v->Dj;KL zkfSqL94|9uJA(EXU1IQ-R$>MZm(2(D>OseWPT%Y&U|oNPQvrMeLt z$|Ph(=Mu>5ACm&;EJIM*+r|q|{h(=eCdcm#3gA_?ECN%ZhOGb@2Fe~A7(rcZNpR;_ z0W=44^=F5GqmtvspB(}My`tQAr|0?$IP;umfLhMrbYc1)e*vwG3t$oG<_eHM{6S{} zGdUTZX10yrH0)rF8AQli4VgfU0+wMW|2q8=sd@hLwD5tOp zG*9;p5YXrS%MQ9@LxageiN#T6dS`%uI^+528v_Ky8ShL#86e;-y^0%gKpF$&Xht?} z&^kncJJa<81-ykI7Bho}oIo22xCK^C?+Fx8t%ry+aVv0v4?gEJXKn!H)CR^Z1$Keo zpkxLThm5F!_h5tW;$r~a%+LV}L(l*wD`@$r0=vL&ZpV61?85v3_Atbkpmi!v;6pz_ z;Rq_xK>J)79WSt6m?j|5G~F~vz>KkZdQOmlA!wD`{2&2M#y`_f1_@a5^s_UuK?Woq zFiq$37nRZmt;FZ{&;C>CLkOU-$*#=^~1?aLJ@JVybj(->xI2{=arHeq7gh-JY6X>Mf6Aa)j zd7%9!Zzqr zXrG=HDqzgGd-}Xk0d=Ntywi_|3be986*tZm5uctJCLqN)b$UyffSu5a9n%Cr+gv~g zGdPN5Id0f7{Ysd?4*ml>;G;1NjyrZtpB*lsAo_w6G#|*L#0V)eL9>mHm#1F{7ud~s zc6(2R04F2k?(Nee1tb|6S54m#C7>xeV=3Gi@G77Hb7l)ri*xDpPf-F6jP}!wqXnii zUD&hzRJ6csMyBm|r>DmXm@`edw|zmZfHosz_w+My0)~vcr~iu+&;WTxBVNEA=9xqx zA&Kd|@dEseQ>V|37l;zL%;3q(0NSbsT292`czHT|f`FFjO9ppd&?GuYLPUWbTzI^k z?v@}BCH9q95p+VM0(e;jC|p?;K#h-W2?8oA^FWCjG^z`ZYYs@V1sM-I3rT?)6#Kl> z|0M{RGESUsk|5L}a>dl7J;>R6Zj~AXWbeLpC2P z6C)!xH|W4KM`btAK!@XP1~<@EjgC!>ZoJG2?2g|U-FQK3OgpCkNfMA{yfa-YSwMzy z!F2Ov0nk#`uw(&s#w*k7k_D6*w@+V`ETF-(jSsvKTXx1yNN6)MfUZgbdy3KV*{UW1 zM*)GA)Bh(6s4_mAu9_lXD)m(mW2~P8wC8um&gms70=^ulHn$2mvI?x6eli8D>3xcT zIODnL?5P53j9;hgr3#2M&Dc5JJyk#-L|3E=xH2A@zAsfk!b#apfdk}~BG3+L(4uR1 z&=xPpEsP-XZ;WoBYhoB2ryyL)3>vmx0ZrK;nduT~0y9A8`0V^GaA12;y1*Pp#?RCF zGX0j&YvF+DU(;G7jwu3&cD0n0iPpdGXd%mSZ5r*A7T zIPL+TvjVBIm>oe?mcWeZvDpHKjB}>9=zPQW!5!cgPj6WNeyVl`9a%xOn>2Tmdu2^V50q1XLNfY&XgisAgoGIelHe0BElM zX}*99;9jJ?zS3I&=~nm85svp`n~g3n(vaa?tE)zvCcbIwLufh$Xi%Te8o zX}WKbfXsBkB7p|RS<@#K38*k$p1!?EU^e5z?ZL$YuNfIPO}8%k0J_gegGoRMl+6}a2uO&XXK?3*)np8g_ujOCn(zy!pRW*5WLz};V}*bc%yv3#MCF3MexkpB`T+pvSmrdS9i0JmZq->na5dE$1wqCLjQ50hTB+^D=^3eg)tg zLBL1i2=sz(r2;iP5mtb29(*%Bph`eyx=EFQDC45(UR46-9P_~vJxtRBs|3WSFQ^g_ z;egz!G6gDhxk^BS@%Z#NRRV4tn|MJ-x-9|Ah)oZu7La6IGCiwWK#ThfgTNeci*yZG zQd|l&QmDkFzzA{60%p)r3ml*$%jZnLP%W^Sar*R<8Uc02w(WCj1Qs%iK#Lzn(AYdA z=mcg@53Ca~WZSsBML?i$dPkjr2jk}L7wZHjF|vK#(jL}qe@y~bjI*a(HVeckzTVj^AaIEba=;sS^c!-yqyjr= zpov?cY5KNi0Sjl)@je2VxDwn(2lu0;2Vep!-<3LF0}d;Ho!5fgQvNF=I-BuzWzv`oUrWjx~jj zj76X+0(M8x;Ygg2ad^Pp6qrG$ z6f!8VYt}PYm@_ee4pjq>K`MZK$q71ciCu$f2BRbBv?X_5X7F`%P(Q}{pg0l~cp6L_z>XAn%%=bfDCQmDCL0I$biY;sl@y4!D=6AP zHfk_kfN1Lmd;STE0?;@xC{k`PDzTU`y#TqN1>C!1cLeqB*g<_e1x|s(Y@l%a!8rX~ zn}AIHFGfd>Y(5@l7BFC7Vq{@v0S(-PPDbKZU=e75ZqH$I08JEZU~=RGx0u2EY(Yy~ z*fp3y$0mVJMP}DzZeTKJJ^)GwphJn+L07VZqE=uXFG%whCU715geeP_kXg*UnI3?) za)6Fq2hpr%OgBK;s z)n?acJi?6ZY>;iBSUSLL#&iOt19S)wqXN6*31;xAm*CWJff``aD&bd1qCp(60;f82aqqB&6s|G+zdJ&2r@y%De#XAk~Ugc zKz4zPk_HwfX0v*x4$y(Q%w|jzSQOYDp;ITIF%Jb!fzzCz6fp;)#c=@(B+$TfN*YWv zSOk`Hfc&(A1wG`TmqxIIw195z1c%lJ7D#A8I}z;QMP+RD3akPf!5s-M(4myDG2&5RC8d$Rg)`D|r4=chx3Gn5Cpz?tgeAOS!(YvPCbqmNdJz$$Yzgs}m=LRU4 zK-UzogHIa*t?|DMGIRm>#7S_H0!>YKfQ~<8ckE!#GGjWxuE6eifIZ7m6Lx(CCPY;VY83U+fVO zXPhwoWsiWO&=0V4QB&E1-re6qpoP1&9s>IdCX|%V|&| zA`}Wtpdy6mP}q8*Nx)GK5em$p(3k)Y4aWu3E&ITs5ZMO_1tz3Wn9v6fg%y1Qio%Fc z0Br>Wg&*7WiC<(zr+@4d5cdF`nFUEoETDlR1s+ERflc7#GzBR+vGA~i%3aWTSrEmb z*kFaU1Hh%gjOhXW0`iQTrx)}KNY#Tf94IY8N&s+m1=>*n(F(Ok;3>57UBC(|Kp-PV zOb(zU9Y71R*d0M<`k?4=WDwW_cG((M$oaR>b_b}+1PxW}0A&r(`9Afa0Wc*l@bCw_ zBj_X^Mg<;dDSL!fiGvqZJF{yr9bi@B0iFJ%z^=e!#&m%dG>ic{;zx-?;5VxxxCO=v zDWgD_rm_i~0@wR@peATA-C$J&tx4d>QUG0nAaDv)iGq$nncgu$fVchuD8~w#Grs^i z>jNvOkY!ci0vEm-OdnVk`4re4K{*Aq4zC$(atkPzFoLp^E<*zwIQN0l)DKoAL38E~ zP*w#U8pH)3eqsk*C87W>s#vo?<4^3G%oEs@1e`!;HcuCsC?Jv#%G+F^{-F}Tg5Yw2 z1}0Dnn!%*36Q>A>PhU4tfTJE%PC$=^+W}GyI#-GveCiXp_)y>y zXy8`h1Ucsfy8@R$Bex=`?481%rN9qqXtS6xfr|)!Go~I;QNe1)1S%>(*1<+3KqZpE zCIL_hvxHrVgBNsM52y*suEYc~?8J1-NdhJk_#HcEk^o2j2R29=cmWEJA8c7lpg03v zQo;^C9F0q02FL~6W=struRuHws=3&~HC?IL=PMgKYsg*y>?}kkElHP9+Y=77|d307U{XXnYzxZ~#skpqoY%*g<#V zC~$#JpJUf$m;<(s6LRFKBNJ$F2x1>{^sa!q4&?B>iHKwjFxo>dQa)dr}m^cnX+ z)qxHRQv_8CprgW=9H%lWa4LXDj6tm{uo@=@ML}?sfbtYO=rB$JnA@2_#l{QPECo<* zbd)c2WCo`paOJ?{_>V#02`5Om6NA%PO$JcdoncqvkX94`jh{lQ5YR~ypj`HVHA~<* zAIQ-sK*1#=y#bmx!G+=qB=28n5pYz7-bMsI zZ;2Ju(-PP@-DjGB6jSTP=^4`m{xB|^o;qDXo^jds*69M;j7+;%Zr?ORK%J54+t2B@ zX9^r-oG^XvEP+i{UwN5W>sdja%n8idN}MVT3Y-d{oP3d4ffJnUmAFAWMnENzBB+f9 zDakkmHgZp|Hx>~EpR5Pkd#?bBEd?%t&77eA7-KdcGZO<7Xa%GO6N4g`0;l5%@Z1m! z=q7ov!6z8am@a^ZQ9*n1Il+g&atZ9>1YMQ&fKicKffL+-06C?ZQ-M>T@e898J1-MR z`U9gPuL7qg^9x2rfqKwz64+!=uay;aix8(iV+#{RE#yd4sO6lx3=K>y^&oF)GXG#S zXYOE9-~^pS1&(qy1y1l8tXu*O?4S#qIrSN*fJ|Wq4fI+u%wYoQ(_osw1U|u)({Tn< z7OUfojrB7GxRrPWRtkcgxCEpDs&D}l=nyPWJpfANoC*Tw%o{-VffoBCN()JWN=?Q-F%4% zZ~byXMX)>%_>5Nu1s)A11|{aT3PMq!`+6PEFl9ONIWj14gOBv(R1gIBuQ)+loOgiS z)G?z&z!7vB6Py}~C`Xrp1%s-f>Uz;nSn6U!XF9N3sW`TEn(1|Kg?q?Onp z*&kGha)PF|j(|dz4ZMcS5ENhx1dlJSP4#QcR;>jg(w0|6+=`rIZlUa0GAq^`ix+8ED&|v zpoQn4K{D_}vg1_-$IGAq7x>Rt5Az=jC{crU!7ynsfpmeQdW0L`}_n9P{|fJ|YO zGGqDy3V{yr$Q383Q!3EM3Cchn(|hL&7&~x-2XzD#I2{ELM46eMTQ8q?vq>5?uz5 zdPNS9j~o>F6gWZq!CC@|W9iU&u~;ck$kpa!}B2ab5q zj2ovT=)ix77A9EkVeFV5zfeH5zJbx4c>*Y&LC0`|XwZVz8Egs+Adf+2M?s#24q$PD zkL8CIJd8?g;GF{Cn`XhPK%+|RNI3|UjuwCp<^!3`XvMGxRQ<7obJGS!P-t-}fU?sD z#w>y5(-sMcOCDg%0$uE&0Llj|7{OQmfL(H6y1^m=O~wh+qZSGLWt=cQVzEFFFJ z7Yn3`-QZJX22E%~Ph1COT^2|C>FP@aR2Xke_gNw!!+2?W<`RKo#!1ufE)j4SLmHQ7 z)MS!Sw$jpWd)kK$mgW^mR)G5*c?-7g#3XY-bM&a~4q3tps!=up?uk z6+?{z=0g!!ykfjD{r++R zDW)ycrvF|p5TVe<3De39(u1NEy!xPcg+Lpl{q$ce1e6$WOqX2=_Knp_0WYQt7pBMY z3rSC3xKhBD@%;4rD+Me#+Lkv8IPwZCoG!mgpf>j7sTKi87J;oo3as#S?>PH@i-03L zL=ZH22+lcS_T4rrGzWA73qC%D80u-_-)zOe^0_*IFZ>BG?JJ;{X)NATtin zoF2bMz>IOp^l57ZbeN94oqlAEfEm*t-sykV2pBT{pRTu7z>x9y^n|qn=8Q|G&sZy< z!!+sb_G4=W{1_Q;Zx>xJ5X0yU-pkJlT5+tzB~T4n7t;YUy@Mso(MF)kaRq4IQ2(xG z&@n%tL0dN^CTS%OM@~mJ1tw`FPRE8t)3-76O1&rjtr;~B=fKQF!aQxHSBB030!^rK(qR8QRefM_GfTef@d64=Mc z+}6-JookPP3G>{B&gqVO1PqzCHFRz--6NpF2=dhIy#mKUbjm&f6{hF^r+4iWFk*WB zfBL?C0!GX&4V~LR?-MX#WZW=aZ2d$gRKynvh@wjhRkAbWFg2v3vUGV*)NLpeELI z>*E4W3W%l$ue2Fc26XU^O@RxvXjI@I-}Jf11!Op9Yy>s4xCK^DKXhEcTpYG+0o)-} zU{v4&PgXQd7dRnc$hc~{(+Pn{;r)=C*Fhx%7wBkA7J+xu*PRgXWm?WTy>YjY@N|Kb z0=$eXrpuia5a$5xr&jOsm>e%ppLtq9*`*15BDew*sCf?F-41Ga&jcqX5k%brxdI5Z?nQ|S z>`7Jy7F`C=?PDyO%pB8M&j{#CNPw>L0G&`Qkp-HzWDuCg3{qq=-RF!z03*}%gMI>1 z^$bejtA{{Kq(Ccrp0I*UbCd%0shB}+J@DiwgCb~tajhcw*doR(fhVkrjG(*kz}*c- z1&AJ*Y(7p#MrLLP5CBa{YcQ<=EpTBfQD6d1;y@=*nKYPIFiL}$oiUYU)jP6)n2N06 zBbu2sh|mJjp~UD29zADrTmu^Q75L4h#Hh;<0BS9OZw*mkbiBjx2XyTX=$aCE>_LLS z2dqq&!OL~J)L8*##tqZ$&I+grLuZnt6hIAL1r`MkftTCM&kCGkWIFy~y7PGfQ>F_a zrdOR8Fk*W1Vf(uC0>>GdmVTH%uU$}b`h<%DGECb)Oka0Vz=-MShv@~^1VyIHb_jAz z|94T~E90W+ezwBe)8#J1!_wL@@o~p3Z(nposCq_Npra!HkR_rk}ei(7?Hx zjgbwsZtlSJ@M{9TjEkl(y(XZ?_-6W*YXVw~Z>RsiCLmTnogK6-#{zUdnWI3K5)Y{9 z!>h=vz@xw|u$os9w2+BQ;3l^sj{*m1!HtUo2WWD~L4gA_zUrgE;rNHq@egDUPuj78 z1=N)Ruf9>>apW!mufZt-UG%~MZux=E-*w0aO*nGsGlqb!p93973m(H~1nrRE&|nHs z;xuE50G$K}+93oA5iSKbfxVodLo;%~Hxn{IE%?#OKgtPOxJtfSmrT0CEMWIrx%UfumlZaStQ%Bm{WR5r+oT4o2|QAP2ZB z1TqUWH3)9llcOp9cW&ULxF9rIWzcFJkTa9kb6Pr;(^?&&$t9-k__ft zNS{`L1KbM*ITzA{QD6WWyJ0%dO#x-bche1S3dkU>^bxJ+0C#dh;~#7a9N=>O0BFdR z1Ju<8U$Vvl>h`HHaG&60;)aw^4e+rD(3K5lOae>}knRa+972gp0G68Jy%Ja#f&<>q zkN}mVjNA&G^`PP(w6JR?yAr#=L~xG^bmtPN1ZM-SycAGi(`DcRtp(F%0N+N)06J?K zlqx`Bei7QCQUKip1Mc8(f+{sG(EKxaAj3k5-?8BvbD`rO(7d;Y0w-t?7UWz8GbSGe zPRIIs&`L5+&={CCqZg9{=r#h-n3@XYdI&DaEoq?HRb2)KMIlf_-9S+oG^7Z1EU0qf z1UZyLfmM->*+Buck(m<|kP4s{z7ij}6%QJ?fwboX6hO412(;&tj*u4tb#XXB4P^yT zH{b$8mcSEcP#>;9i4WX=0N2~>te`GX5vZ_m1&tqovVxxiw<0g-c5_G zH*oNKI5yW6DsX@fN@pr5syAcu;9=r+Vo(%u1Vui`H4qi-MM`XDOdg=lzXp?!61!tV zgG?bWgMyI2e$XAupt2bpa}A)70oATtpnIf5A<@|Z5(KT|264=;cmIe`)6ycvv;l?$Ap zoD3cvt2bi;ch%U;n0A01!318P&8Z*&UY`vaBm)@_szzTjE3k6vGrnP*t`I0LQV(60 z%?V0c;Q0Q)2p-^O0CgyTz)BZR@cl*LP6oV(-NU4W)Wrscqb@@SXj3gCh#mcr#|BoCa~9x9x$0Py#OfzjfsGQ1vDT6E;u5cb6n?BY~-*jK#5!=E4Eugw_)&l`q#zoV2JP=S|JU#v90|6DL|AN#1 zKM?R^d^6qep@1`E-}Xrl1sWL{XH6G*EMUsGXt~#80Vc-B)4iVx7&E?}UjI~JvcfLd zqD2<)_z`IBD+_4E1$14S0=vNe?MBZ8xEYydotbX`T)>p+`kCcr&jlD6)u)%g5C{eB zJ9#0H%Gf;J_@zJ!odOSb2{5pZQ> zT(W)BTY(-%#>La^-wSMDTr~Z|djWsOMbjNV2>9zw6lUUp6vUuGVGh`?3Qh&k32F+A z3T&WeBD*8_QU^tz>3cs2NHH#%e&d6H3^T7l)Aa8j1mf!%%$Rf(IKYd7&(C)-XHrq% z0Cn3H6gWVGL?ELSz;`oq=rfuiuL=THc%asTffu$dbX);il{SM7)Er1r0@bYGV-;9H!=N1CW;v7N9|llUO@k>x;0(A@ z&OuWb$>4Mbyo4O&8fb|Ns+lc7jV|aK;ESNiat=-A0+1^}$3|!{*(h>>TFoY)Sv3tN z9tAegRh#^v(F@QxI%sx>f4ZZpsPJ^D&jO-6e9+b{m$V|+bnDLoO7$GzE&(KPxD?nN znZXxE3e4gL1w@4ssK>P&I?Mqs@L53t#tk|ej{(xGK}1*!r~x5h2Rcxd12ov60E)8) zMp&dxVMLBKkP!+T8ud&QAa~PoK!>S7lS|-nI}UxuH5fW{8CHNomjmRu1&rp*8$j^` z8o2_|prI&OJV6#Vfh+-^s0Z>N2lzN6(D_r~CDM?vjL>9004~@-dx4>?DtIt}=N`aK z$~%n6Df0@*qZ}GcCm=Ht;PZjOp1lC~?33vZ5*(t_uk#A?*CTuB4~mySmg+M6V07hW zRRH_x1EV=}11Q!(x4VIZj|p`CFlepJG|7CHe8(hr(gPy+Q`IY849pw%fH z;1ZPubcY;BogyO(Xbl<%XvP4n5bPdD21iDL3tS*G*GymYRX~IB!1TuN0%F>rp>j#2WaG8gNXq& zw#Wo(*>QkI@4+kK85Ed63F!pW^g=UX_2~(}1h{0_K}mZ96X@7SP@)0d6~!pf1J1Eq zrbm3INjRy}C7fDbw+J|@QYoCKzxW{_MS~=xLYJU8^rKn8QH44|!LP6Yo?18@K?^{T zix>`l#wFAHe+kI2PXIL&xbIKj^h;n0J7|D|MWAJR)NcWK@nhVIkX;L)QFX^>pPB?5 z4=@P)n?B*UfI4(_{A zM+QX(Zs=(v3Jl!4MVQzjmHPvxY$XmA1_el+2I*Qsdm|SZvm84ZK_eNC0-z%SKw~Z- ztiYkmFoDsLDVxulnVFf9g^7ugk&%&!k#V})KLHEFj||{}9DzbzQ6QIdNE`g~Wpb-IZ4GmKC10(}lunP8@zf`3 z9ZX6*Amf=~#&auigF1bMN_>u2KojDOOw0`23Ve=?*}TjO4B#nV0Z`4)#jU`oF@1}L zpfEFsW5e`-{{mw5KNvy7iY$u!pt2WSq=GyHx)bs;vjYEeB^GF*%@3L!R$|p)Vi06x zP-1pvad2c*WP(%>pas<&p!x(_TylV$AsnD7Z%}#347x9XLzB6K2{g;a0a_mi@)>CS zf~h_WG`j^JF<>%dIsx+V38pMZPJt?L#rT0SONmXO8B}jFfnpI7caBN|RiJJjT;c{u z-wmcLM}BZI0G^x$b^4VAAT>uA_}(ObaBCl2pD92>USN5>fE1`I+W=ZI3m#oq!2}Kp z1~aAupsI=AoEh%cEYLz_uv1x(oce!zi-031Y?TBD%-I4hjNmxo0wa1kuBx!E}HLG}5KPw1Y{J3ACXHRDwY2 zSXK=t4nYRcE}!WeEd<4=7T;{3YL4#lts+pRz@vaj2=Fiwq0LsG`n6eyg1**VV<^)rg zz!gvf4%A|10+pAb5(m`3V#;!46sQ8Nz#uM@S28)j8tgY1?8^@TxQI*`erVM>r&K&uitL2;+SB;feJxuy{0R|Cg}Kg@+t zpIiVfZ~%AiVZ-M2$Touv1L+As44Ol122D>xt>(~YMCmGnu7!s!b?4AvN-$$$03Tco zT4Mt4q(Unt@X3MTnh8`VF)=7GgWB*J(-YNrM5fne%F1d(PdQT{Ru?1Z^!iL$6=l-Y zOM{Eh2TarFu?s43%stm6;K(hoaQg{%!R1U$$NQ!iatWF+-RRrCoJ&xdk!gD0^fTOo zsf@R_8}SI*GBRE3-CoEmSjx!sX4>=@e1gx|rcdbv?e)6CFE~ML0vB{zlNodwEBLZh z9)auAs{{ml7@MZE3kpUu_DxR|6clD$IK5O*(3w$T`c6T?^`M#JEFr-*#>vw^3JHpU zxa`7$=8Q9@+Y1XOv8yNuf^M6c{y|t!db+%%pa=(-0*?Z}29wJ4`@(|f8Lv%WFCr+% z^y9?z^CE(2phXYnqJqjC=k9a~I5G%4nw}&osKoeqdaJ0QB^zjdRp9RQBcg%Bi!MMqEc3vcVVnF?vtW5f@ZooG`stT(Ah_GDZo( zb`aeqAy~wyGX1ZFU^2|qG)ciYu_N5jQ3ud2Xbq+bjEd}zGd51YB`Nq@y@OMM4Rp^5 zEBJ^oHqcSXypE79!LZHtyjhNyxBrn6WMO8Sv2i<-oZu=ZanQAvjxwO5>)M#5z(-*# zFge;}O+Tz4sKlr<{h5Mb3FFc2(Taj=85xgm=TsJ)&B)j{eW{9I4F`O;qNb{#F5{x< z;i`g)%JbPko1|3`dk?^av*5Fvz^8*K2)vm-TU9WQal!Oos)8+SO%EFc1e&L}s0o7Z zTX?P}IF*s{*7RBp!A`a>Q>O_C+?vj#DVW5u0JaXKOj9s20eKw=Y+;k*n*|N<6(69i z2+CZ{D62!j!&0ENQjU8sGz&QLK-P?a_Q`>_#3_MxNpfU?mV)SO34)e_Bx?z3GVOjo zy-!Qfmhu1eGg^YiOs7vxXVex1Ed?>x7Sv|?{%CrNwxB6!g~)7eLD1TdOWJ}uOmCh~ z(-Bk==>)CL2cP5t8j%GZV6p4MbPFA@x_li$9j1oo)93048iCe`T+|UXWc#@_Aw^#sis8Fy^|t}i%^k?HHb>HUU+2N<_a4=@rmFg(xTX2rk>+DNwn z+>wfgoS&hPt;D1P>c=xciU&tVMP|?u`T_^0Z!{8=lWF8sU~-fM_c2%s9hu4?7sDuk zjzDCdF0fO`f4ZQtpf;o5bX#M={Y;yBr~fk+oXzhC>it|`0IT9Yqdk3viJgzzSM_Z=k^HC}+js0pWp8d=SY}#0PJd)As3-D)DGM}Z z#-P9o>TQA(8{2dh8$rE#&>9b>k}M?-1t!p#wPQ`8fHG*&*9Imf7SJRls|M2wCMD2X z8YMnZvlO(_2()3OrZ5ZCss^oK;R)&D%% zil7-jj%-B%1#U;4Y$Z?wUQt+q5wbKyQNocyk%ifT$w7fdfgiNRl0|_#ONW8M5j+zi ztiS^D7)TR~q9mvkVNeuQ04;~D)1hE-vqY^LJJAR-D0bMt* zz*MNn2wp|)jpu%YBqs8^^6KEuAo~rHJI+O zC^CW9)`AuUID!%f6N4)+lLC_?M>c2yus}PrlH78pdIcuXU<-pHvtvD|uxElK6(wdz z#%yjSc0_b>fdh{T99=xF3e2uWyr2S`OMwRxT|A)ZV&P>_;G6!pNL;-B3aBUs%|w98 z?h8y=ir}NoLB@h|7AU$vhJ#}Zw17*OAp?|VKureFnPJe`PEOca)8KPYSfD)uR_KB> zo-8F{1yD7kBq6Yw1(eM);H{o|M(`dvRt=^UMIKPul%U9`zzQBY5>}89IL!xAUIAAQ zQ7xgs>?mKT#GB>Fk_Fzn!BpbNlcmVwsH6xk-%1oE75G4>I%qHzC)3%3;-DY5-k{2`V30L5Ca({N+%BT$c%+{(`G?1WnU}){QB3Kqkpq z^%1($v^k)l$OTfPq6liB zNq}Sop)wMRf(p!zJXs)V871(>3~*NnoGq9gK|6jyUS$R)eg*{&P{4xjZV&*Kub@f6 z>AVhtJkruxj;vXZOl1n(S&H1?oX85EcoSCOo$eSYD_jqY%q5_+(OE$Y;y^9}AEYR( zzzdDe9k6rdz`MTLA*H#ZG{|XN7!?H-xFAutfl&#xz>!mto!LPFv_1e9cWj^tlTl!U z7KE}2tk79Hw&}+l1f>}{r$2HKG_02vcm_%)j-1(iOpFYmwSA5^7_*dwz~kiLC77VS z1qNB5o~#h)2tR$sJB&&~pv=dhAOwn29#EEj0N3+n$ut243_VZ4dO!y*fc1bT4HekT zm>59&!dO9bf}k`2zquB4_!!7&aCCw^0**~qP&budK^7WW0*bPrzz_jB3N+`$uOI`J z;ZT%;M52#H7HJ1zI7+3ff(z2u^&Ug{e#>Sqgj#Jk$R=3+nSrvN$LRK(4G;WKs~A zZtWtd#s;os1g0~(3d#xafVQPFf#xefR}(4lOxJf6lv5IL6v$E(P>=+Lvm_5AE3<=w zAPZ<~E)RJ5CzAp%#3Wu&{ltsMBwk*SNxTYzAd>{AN4W~BvvGhlaZGP_71Uw_t&33P zn7-XrP@P#^L3H{>o|6sR)mVJVnDWE2vn0>XAKFl8yR3xIYmuz*I19T^ot^KqcTPey?%kZV9Y6a*B&N;H{QFe%AF`dgrR z3|0lm*0dR*#1Cp!u-1d7UKHe(3rK;y@B!on&}o(6dC`~@$N;$F(g$Ou~YYmlwPtO6Rf16_*O z3_3iV88nu|#Nsgh=RQHT>0xeyqKxaO=eP+LG5z71{=iMpg=d*C*e@&&j{Tpv8@LPZ zV`S?6HvNN#pefVxZ`;*91(!22HF8Y9;3e1!k_h(}RA744J-re{pZzs`wzr@y)ADcA zZ+Q#eWn4LZR+Qj6#^&js(Slx#>!(kN7A&-w#ibyuz#*`k4}S0w=pZ0=@QM)*fmvLj z-P$V{9aY_VLEH5~=Zb*tg%;?YZW|*g!MJLASd5@LWApU77(o@rRnrgp35qd2SUG)n zjGztcYOb{cOQ-*d5wvD(o^BBGsKjTA*_qvy%lw7-vmCkSr+0xO@8TWI-9m z>C=BC3z{;{n68&1sLQx+dQ6I-JmdQ5H7SDCjH{>rND;JRY?>YzE~w9VW_n||prr`N z^`P*dz^TM*#xw(TQ@7Og4XJ_-Tp$?@rVpHsQqzB>3R*I*ooJ$TfX_nxG-$)ae(~1nn88PZvxVv|yYu-78%%kn#TXIq8CmjQ6MSOBbx19vUU+ zGFTJJJxD^ihbN)j!RVtEs7cRm#UP=;>d0@!prOF(C}70^8bSok#)A$!5&#_r4LZz(6;y~C zfJ$8sP)phoJn#!@kWD}7EUXd3A#k1#K?}c? zK%4Zzt1!WX5bU5PG{^y2pnVAntf1Anpx&UsjOmJ*f>QM<5SJu?m!7kL3tvYu(3}&i zBk15nMo^y}(#Ko#*+pJ*B0u69LjRJKu>Onz2gAs1n z48|-aE^b9oWd=KG4Yd0e6l|cr2dh5g7L*Upq&H+X<9 zBV$ki_n{y5d1CK+7c*AU!j7fmvLj5ns@Gw$T0%3pge~Cvcwuow&_u#&m)a zv^+q9se^NRVzi8?a090TBWMvHuL7&%1WwR!Zo_olY(c~N9bnD^PDGq-0E^7v1n&c5 z290^KYBEpYG-qDH326Y`-~>ga;|#fp+&u8ajsiF6s2i}S8bDkB!3$i#vjCtIQ$VYfTfjpfADEPQz;56H z^<_bh21U#ZCUek{3yeWhBGV0oIryg^$`zEUZvYKHu{t&|XTeT@21SV@=y(9Is3YjC z%eh>j%+pXj@5Aj=+;b7QUOmd{9wuwSO?ZK14EA{^8{u^ULJmK z1vXwro_Zc`1vXb+@bC<;Ir9RDYZjonW(6}Sl`jBwLKPSVAg)<~VzLO>HLIYm*?_|} zJD8CtvO#+4HJLYnoWsSf!0F1%rNFAdXU=?r8A2Zb(Y)r&7ohYFX2*jJpr~{_f#g7t zpaLuS06pj#+Pr*H3apL~ppG~Ic0A}vJ}!YX;3)sV4Db7b2H1qb{rCsWAd@wiUNH0U zaXTt1vVg(`)Hpkp&ZWf!Q+ z0XnNrfdzEeHmg444V2U21i^_Jb_gA4*A}R31_kj4kdOGxnd@J$Fgbum6hWpyj(`HE zzaL=#d|&}p4X_wTJ$Vikd%6t2SU@H^Hh@pfVFfMs0A+M|^6Owl_KhevC?7PiD)RDx z5}e}%R`BsaT%gEefu~rM1HwSII6_YOVs(50b`C2@p8}V_3UC5nfT3KMVG%265SA5s z))%I$HX!s&S11%@XVzq1F+HwOP_q62s{*S6uQ~G$R&c0*4*7zmDNslF1S=>X8~_dS z!Slfhgkg*|_CDGEkKXaSiC)GvqY$0PLC@D6V;c;u?@Ibr~M9x$@S7oDVu63>2^EspbQUYe1Sb znO}fh!wxPxp=D$PJ0yC3K%=LF9Tq)55W(ci%gF6`kb#E_L@Ka4f{q>ox!DzT(H|!$ zo+hw^g97aK2KFokcsv~j#S`NK&{=2>uv`E5zy&Dy1VL5}rWw;WmI%t%uYi^?3s@jY zVFgkN!wRxu1GESPofHZ_r$T{C;2hZW1L!AdvT87GU;&l8pgYk(M=ya=BGVnvNDT*g z62&%(DL%rZr=0MdxsDy)0OYC7s-f9F=2kKrNVXud*S2(~9YVGb|R|Ll> zdzJ#!fCKDV0zKdY>R#ysGtOy0V?*{vp^%c z;JYCnu!9yZvq91!Bv_d1LE*x!!0L3C!JO#|lLGH@MK;H?pn+!4mU&S4zW^V!2WvNd zVMop&tl-M(13Rb}09t0i4mwbYRe{Zn=?};;pfh7Zj*+)w*uVklZuEd?CNriE4h2@w zl5dp;5QEk62WZ?C)GTs5!2!CEfz`2r1H6Bb6*LC%zD!WG{sbrvLE3L{Kr-S54(QHE zR!!y`93a1e+E9)UK+=vpS&mH5d9oKA$ZiIy(O`POp~M1;0R~XGKLL--JAMFZRA6$RiE(-2O`#@SwP3av4GTn-~i2rvO50YfDD&|R(XTYe7*+S8pf*0`~&P5@Np2} zuG0>9Qy+Xv1FPc(NK2myv>lBVRI7X}7c^yW;L1{A;C?+_w?Z(3bE5$0Ob`(zw&{$= zglwnxq>FG&e_SCrk?H&H>1CCImt-MLO;+$RP@oxU2~f)rGFQXqHa)URP=&E&dR>*E zCC4t%VkUlph12&}3D&wD*xVrC$OKu7WC03D&{*n@xeWr291uaop*E*)wSev=03Tli zS~UpXQ3h^U9GKozEoj2HWcv1MK~1Jx@1{Sh7Sv(fF`d5#Eb3AtsLeF>{`A5cK_jNq zx27+x0WU$iQzNLuvHWurXba4b=?5)2g{PlgDZn*dw^q=NseyfZX|14z<1bFwPKOT+ z(7R5-9c5+(7J=WO%f8w58CjHA9QQMVdSEQPpgAMResc!U{4fvW^v3tXqO2SW?2Z=T zb7t!eApJ1Vk(Z!A0_|kvP+$R_vdXtyi4(deAADe&5@^63v?iYeG)>H?2%5`Q0N;-a zK0O`WiDvDG-O(Z25ppUk3%J9`uE|^43v=|Ou< zm3Y8&R!X4B8jvyU-~|BY%%BheM+WHpQUx~1zBX1}1_sa(+@N(PD4qs~vLj@T4-3fR z2t?4rCP`~f|8>;XeAQ622+GLXbuw;w4lRF z(SjDVXagR!u#g39ok9*-@NvV?kex5ZB{uz%lBB#VQrLmF9fIy0fL7;Bpnch_;Avt; z1rC9S%!=T=&7r_8a13-068I7U(245|(^u9C`ZKPW{*@2_ z1x=YQzn*@zUC@ZB>CJT5`7|x{>I@1@pfwjt;PcrSlo$klFiL?|dV-c-F@iY^3XE>t zo1hn~86XB=e;sZRaAXr$2yXm>3UJUe5(WkE=nH7v4OFc%fX*xhjS_-<0czrMWC=j- z2N3}uzA*i@g#b5rSWp0TlN4y=4}9t=WFQuO4KQd>6kKtzf}>u8Nda~_1*;~r2Kd|n zP}@(1f%{(*BNKl;3+Oo21>BI$b>IvKDj>l%Br9kLmjjgZEtKj-&AOg1cfX?w! z;Dio~vb!mQ&YN;%Pz3eCm>d-NL52vy3;`WRE9A(CFoO>~+6=n62;?tF^M&0_kqdNK z8-pSjH%vd1yON*+PmwFj^hN6h#925TUvNzi{4XG?{eTNnyNfGuI3C~vZPb3i1!|Qs zL#h;U#I8H>>2t#+C4<3x=2mclwpKW<;L0*%x&hk0af2(%Q5IYXLN~mv;8K!+)Y_nh zxf}}O=FBrd3O|7Dn8B5$AOWhkIrJHSa82KET1Y0BLzDRfmmm=o6xqVS7?8^)hys>O{LJz28A{#cfha$GpIAn07<8U zj?8Z0NKg`SWOgf3;8NfzR1j1UaVshUmA>FZG-9V9DfdjPZhaW1g z16o?C!K9(c4+?w{HdfI7N^oKqt4B)fE8yu6boey*SOD?mN_^1PtT=dwDQNKrs5J}S zVG3S6!r^!UtaArEiGfoW=zJ(h8iVaHUBP9}e1J;Lpf{-JVJ0x8pjDK8_#NvTmny0|S(2SmAp#8@SC7X-yfdYyoqY61M`Xk{R5N zjEbNF0+fN^+c-bKt1d|9sTVhA23K-qXC8KF=3zHuf@hvN4>|-K#bD_ew38C<5dqlL z9(cdDMm-aXcea2u!jHn|PyiJG4cu91n!tMCndT-)4}3ll+!Xu41vzB|oM|{fi`m$q zg&*h^3_%74Xr^J|spkOIkOGeNZVKRHnU_I97}7T9a0E?Rae~^k3ZNYU3c?C}0w1_R zDSHhw^15U$ZUu1bY6UZBp*xd<0*``-YoRM73xY}?&{)<6(7w?f;MfGsY;l4P#^=yw zIFJXP?&1KqQ^2!U9FCxACWy5h;7Kabv4YHSyRTri`vS~vVaScH;NupbfRa6*8Pg3A zEwI*{=?*0Q*Mm~U4=&J=o*b~Y1jGeO;^xc`K;(~NYG0oxlhdE0D z4fB(4AA>b$k%io|z3fv!^rR#NI-W$6c?GEYX9sOW zKEO5oW0#DGlejsPhyrL73RIiH+dJSvH*li|R39-hfF=q!G?+r5eI-!aEJuk4yxzur z`oi@BV#sMti(nHB=1TGDfs&#k^%U10pvDX6nn(0Dm#{z;_`Fh3!s3RU-oc7gX=s3t zdEl;x%Mh2As9jamJA&GWmv40lI10m(1S`zl@N+#N=RAO#ScqEh1xSwuLJ#PeSx_|r zJJth94?G!7y4@k*2v0_=3LF|XOh1qtSFE6m?O>$=dOG6ZR^W0JC{qIM@)0Og;u*}b zFM>bzH3w7d&pg*5;3xu%NfwOQ2c56M0d8O;Vt+kIk0wG7dh8?Vfye$WkREvKvw%;w zLy3LxU0rx$A37ZK8r0$kH3H3;I#@u9kwM4#fNCm%*Pu%xLEH2=9Or-pAgxeNGo~pZ z_lTN-?mL8yd8zy|L}pf`y?Ep=##i9Dd70WL9_LHlUH{X`{Dr4RBX`Y^=wgft=0xWEE<#}qdD z0vZ`%g4|)p1{ux=_aKqSLEsSv*$EA~zK#Rr93^qckPD>U$_j3vfLpGhrU|6mCk`KV zQ373O2kt8>y5bpif!p@#NHcUtG-T8Tlt7p^fJ+B(w~}@GJ24>%%NJarD?&kQ`asf)HD=0#sr(k<^+jxXfQqD0-d*{06P3agXsa+^oe}}!mJ#c%m=up2Z#$P2qOk< zz@yQi5Cz|TR}Weg`VhS25LB-@G8HQDIwBvd%ZoTx7d(!$fCV%L07`HI@0izeD@ou! zh8Nr?S^-iInn>q_s27g{cN0N_no7)$Yo@PRFTf41)S!cC^{_!SNt8h}SX+?EK|yl4 z5;wH=l>`qhz$VIAA+161*=ZON4URg{$eIGUZx2psE4Y-TAdw6iSCceno>7lFuqK6w zVrEy+VtLS-Pe>~gI#UH|+JnOnw22uyV+9_419<>8`X-5E^zGNR1_4I}#CQ<1LOq8D zC?SBB(x45$v4XaRA+;4iv-O||2Jc&SVgVhH$_k=nARAsKAWL>ZC(uZkGw%Qu3Y=z4 zTR^m^8Pf(9aDN)K;(`Noo2fvn5M*dR3UucnD|jkG0lF3yltDpbjwe8RIn9`kfM`gW zbO2%uXx#*d;{lc|xG~sF0dBMyB=SoQ^Lt;(_K}3DEEgs9gZhykO1) zuIV2mgo(<$)}RxDV3`*-2neA;`=tt%1VJNxypG&uN?eZeg$n#27LTKRp&KtdC^8@e zdgvJsRDK~wuOS%^RO-Vr9b{kvG|>m@g0b>4g4XncW*R}pD{?7->;sL^LmUEH!3RAO z@CPI4+zllGN0n?oHqg0$%%J1!1RN#7yHLEi6nKgp0a$fI0IGNT~oRTrRL=3DmbkOBc*yfg5}QCj)5w1XRB9z|DOEGLIR&Pak{~E{8eu z3y8Up(gt)XFc*TeE=j06Q!XC}b%> z(y9iN3TW6uNAbC~{3Vd@C$g4@!So;7eMd3jsKwYem45 zd5|I(R7NmC76fr(tp!2NJn-ZJXyF=YI0MwDfVd!%1;XO14P;S@&EihMb zDT2;h1h>RlK#P@RKm?A;9aI`IgDQ7+aB+>=HV2O$!7>>rZGjr+ptU%l^$|>tIyqU6 zI@wC#Q>PDbO^^E`E9Z&a!h^TfVZ)~2r6bJXr6Zu_6YK)8+Fya+u>st62TzMIJ8~2j zO`pGBK!O$>K5z66pFro3>gqx6YsXoJQ@N|R_e2x)0`~##>Zzi1_4pkdSU`Qf29`os z1!hO-5=ZG$M+OgirkbuHm1-a>{^*rz&Ygs<{u|&_gEBw>YSM$|nV<^`xZsn%jvv4) zKV9injonz|wf&nyxt0Za83|{@Cz^ov-+_9$65p-V?xLVL;PB3Tw0UB!P0N*dq z;rN3k3wji356Xrf(D*#a1s$xQ-QyELBe#&Q>|OAfB9iute2(>yyO6;XU{4UXK466&Nea4=3v?s-Bk;{nT%g%aaRrG8upyx9&N&>x zE&-hw3O>H}2TE*!&eqXo_yFGK!=cIif>lXMLA)sda>geEcz|e^g&;59rW1H$tAql$ zX#k2i_>wCH3B-ac3Ghe}tYZgVgu(>w*%7e_r5+pwSXQBcCPc7JSP6mKy-W&HjslL1 zNNpJp}3G2 zH2;Y{Nd6L}8$PYVtN`k*f~H7zg6}Ou(F1RS{Rin`gAJ^M@9FpfT6xBy!SsPkkqNTL z0n|0G2c4V@I|`ebTLH9H6`HUhNgTWwqk#>yg@9e433Oa8hbD6a8)zQ}D2ah)#5+JK zbpl%!Xno4gX`tIoIv^azEF~s^CU$|3;1iQ)pln%!_D<_TO?yq|32fjcb)XGlObU|A zK|5;-J*%nv}0 zTEGrEdx*pF0ULOi35P!85|q74+}!oxxlN`8>}E_WK(n8q-9n%hGoY2gDkMa;Oe2L!GYJ6 zf{%OQ1Z8w40r1@tpz;MiT>+Y-03|?Z@p1v=+zTKNBFZMv^1d4&@dr=`++YV^Bgdi7 z_yA=~56G3e43F4d>p}4g()I$R4YUggw#HM)ofou>)bRsokCEdK1_f4urQmV~b#oBN zB+$Md1t!P4;HLfucE`gE(3_Y3U|6cb^n+ayGT6Wis+8*83Ryt+OEiFY<$!BBfyaEH z)deja$Qx!sY0a?%v=hhiI@p>94umy5m`W#rm1=?OBhWSw$W2gFQ1%Fc%&ga7n!v%T zz{2gwB+$kMjSNt*gYNSIU*`il$qyPE3ZU{AT!Df@YzAmG%mT2xIUHwjzyf*!iXEWf z(PdZ!zRsM(aRq1}gd_MoHE}1#T7i$u^&qcqz|i8j18VLH$nGalHHM)`gJ}l`=yY_) z17Oc0MbQxsOm@a@E09qss zj!bA=LUwK;$K@Rk@X~JRN^nrPgZFPSIbN>^N9GMAYo1`LeF1h6I5Hm~-2Mi|Js?9h zm|j5Qk{M@Qg6?<(A2h}R+SjE7PJ0(1Z6I-TX3$P8@#T*H`)fc$x(y$|r?5ktK_2GJ zA3!nQzzHc+K7h`G2N!HDoZy`y&=kW9n!*Py{@_#sEwliY79E_R1KcgI|6ie1g8QUv;{PUQ;7vs8E7y~-~_E21RX5_N~NIE1a#>XhK-Jg8Lb#r zfa*C`Go~dVnnQzW2B#U*0+0hhCr^L|A{KCFf%XY<=reACnotkk62Svw*ea1hWh)shmpdIWSj?);e7&@R!e|~V9F@b^%WMTto;%fr9@x7#GqQjL%$PuzktiVMX`y3V=(Dt-nMu%W6=Jct9vkSK zIQWPhq%{lL?a2XJkcpVE1P`slR+;-tajNd?|?pGejhS7K4ChRY247u%SL&Bh%pJWIW7}!DvuU08e*OXI>R@@f^wHkh|2~_)CaiCr+fD>N$G%M z40IC-6R45}Uui9{k_R&N4w@WeQ(%XTAcLj=r#r^VN)np3;wDta5g6_yE>c0``t_hb z9gZ1Dc%Sb2wic|zorp+f1MOU8(_q3eo(`HU2gfKpD40MaBFx+hBH&Fburd$K!Hmvy z;-ixdI(iP;BL|62L@fs1eGQr*!e5Vpsv}oWl!6N{AxB2gKoMx?lM*XUHxxfL*0NGW9Jhcz^nlhCtCO7b~0im383{){G6RMa91tb%N3&Qx}wGwD@aRK*s zwJt#gM$pYX9^HbyAo_Z@pcik?s%Zj_OP@7?RxjO{uGb@|$_BngcloqM0;1FNdIW_S zpG|M<5fo{ch`UEF4GHv}my>)_ME7SZR(|INeegzrHJV{U)L~BeE zY!|!vvt7VZ9yTh(rT{uTs$tRe|C0ozr$3k^$SL+4q`(@R0u`p6zox5B7W^;Xuz3dL za%pA-CdWUUpd60rd2Ygr(_^LxCW5TnKSfZP1AKG>w?OrDhe?9b(_Nyc|lSl;PVogHJAhxK?@xvvJ@B` z@4aaeaO4tLINfTxpd#a<=`qs<{g~cxPG2)!u#|;g;0@<=)fs|Q8NW{7Jws4ecm-%V zC%8+m!Nj7#;K(4bZTjySf|86+wu{abG-PDFI^BJipc2!UmD96l3EFZrd~6bMG#7X@ zeZwq4bH;hoU(6CzW}GvfXSSe+*kmryS*@TmvJ_yo470$x=~=S{HDsrN2FIYgjzI%# zte_((m>hpGC@>2w-M)S{D6;<^nSODO;0DHd)0^fBT8o`$$mU~WU<94;3#!{dl@_bO z(&?Ay3YsD=LK2uKsLi-&y3IU6MaKElX zT`Xw8IA{8~#e$Kbb8I!22+A`yP4`?PsK7XPde#!b7>PZgbL2q>`>{LrG=WY|)@QuI zDDaPW`n@HBPK>LkD=rljXKbEsxm3`E=|AuE{H22Fj1AMTFBMb+asMwB)R0`wrNHLP z3p$$(eEu_Nha6}tqd@a?hh>6Bj4jiPmqAp|UM3jA*f{;eGC>{2mg%y~1=V=|^Fl}7 zSRHRnKd2}oHa&H@pe|$6^vTNwHN}2|Zife*LFM=Zdc>SVwm{SLOUnfV8C$2TtPoUV zJUrcdgDg-qjTxJ#&tEHO zz}P(f!dgKM#-r1Jt`(d*-C~WPDdW-Ud20ky7>`W9yoP|TV<26p7>`XqvQ99EanE$a z^@0+Nd!~D>7gS-KF+G31;9bVg(|tDxS}=Z?-nK!|-()$b5@h%pJP64FY7;PVgX>qM zVMxdcU96x9BL#MWJ<~Zi3aT>wQP7WZ-Sqm6g1(Gfrk~v?=*ieOU3`;Z7~`4g zd7A_!7~7_|ZxYmFJU)HfCP4$n8PlI{60~9LnXa-~(1EdUdg^9DBgQM!XKWVCWbB*H zv_(*z?E?7h#OeB51pOG#Ot0G_sKnSdec={CZN@9p&u$TvV>~nc%@)C8#KhqEF5Hw?I z`aAu{4nZTP?!VJ@b_%949lSq%@=n20#x2u1cL~ZfZkevROK^hdRzYa{kr%Wq7_`)y zNnqRbbGrn^M7P1EKu6O+q_$4~xl1sV@$hv2-GbWUn>ihs3qcJOW(GzEM$lqb&^g?9 zr%&81I7{;kgIf`3d=O|H+cUFFQWn|J2xzvuCPx~T7Ckf0{GM*$HO2+?6A@tyw({sVWO~4P=)Di z-*mTqf)hBGF*7oQ&kCRZV4tA9hXNDm5;+zJ(7c*}5|iU42GF%v44^y4?kt%m09vZe z#Gt_7cm>V@El&im2V(*6uVWJU3(64F7dmi?OwTWoVXa@lm?iLpRS`5pR}V^5Ob(!n zctE473ap@2i;gnce4H$-jLZxmz|IXi<$Mk3WPPR*5OV>ek&)K|8j2 z8Kz&H%Oc9l3_6UL5j?^o06quZb9&$bK{>{^(+ds=%CTKp-6SB;JblUm!7#?v(_bGD zv|*ezUE?75{PBQ;f|iWU)4L7|S}?Ade)yo^IgS;eL-HISFizVp3sV2#5LkWTZdo11 z=IIBw%YxMZ-Y%=hIB&Yv4%u^z^QKQaA{fFui%WqG)I()fU>9ha{`rWYIYh_+bTuWr zz_RJ)M+F@j*G;cHD(JwtW%{n8g3gS6)Bhb6^pH9OzMXFlqY^vlejLyd>Y%QYBbPwi z^n_!AiHxhKPhgUhn*L#tkP_pp>Hm)jYA`lWe_tg8a)|-6oY-{xY9T46B|_8x*9o2H zSRw?vzlR9-F9*B7emPV<=s2rR=#ldm7{Ry1G1W6UD1gpLQDShE@C99b?8vCVP~zCo zaD}Uomq|*2!BNl+l>C{%iG~qOG6;aL@nq6vIKT)RePq&Qn8fJFSPYu}1znu2!~i-m z1ipiL`np^hb^!)~<7|$M#h~j2E-+@PFid|nN%HITU8e=Nv7P+fBH$=FefAVdL#8L6 zr>{ICXvFmP^YpuC1aC6FpT6gTwAS>8X9czDS1g+*0G(k1H9#B}AoFLyI2;PhjuT)U z22lOZ>?o1tcmgg2PDYLgkoh}c9FXP>Fb)GK%YZe%012fqGk^x3!B#&23BdR_U>uO{ z3os4?C^*2nJC>umy8)U11EwCN`NOj52k#5@Os_gG$j!KDdfRzHcgEGz&z%RKTmSLA zpcP~Dbe#)=mNMY92s(p=QGpF~S^=n70Uxy_@Ky0u|N*ja<_=Toe>zTr~aGHd(FhA1?|9F*0tQUUOAYcKXe80tVAhUlmlD zu6tfUglXrU>8CFXno3@|(;?u<2P^5A6hLhsP`R^hy5to>X{IN4wwqlMbcLGI&h+fg zbk=KvrczCJk+p#eNH);H7y{e4roU|$lwz86cYDD#!HbMc=O;{ez9DGD^lQTQ@*9E$ zjEwK6Cq5FiWo(;1|B;|HdmD$qK@ovp(+@rplw)k4{@{_IG)p_j)c)!J9|_7bc1)Lj zEGW&`G5x?TK~=`i>G6*RwRJi{N8Eu%I271H$Dk@GusUjF88b2n91;Pwnlza~7eq*a zMSf4Wy)9_T*gE~gV?j;E*69mx3yLuwo^JR=P?YiS^mDfb_b@J*-hD?^y$_b#eqT_Qv2FU*`-0aQmrYMl zmXl*#w!B_hj*)Tc^s^5IO&NQpGd~iH#Tqm>kb{P?efpnAf(nRmv3yKcxZK1OF1JD9 z(mnms6G3Ulx6|J}5tL*p0D^I~xE#<+Mo%X2|{P+|o|l_KaU zFYpzVkca`FFfNb@R6_*1Gutw zWORdEuEG0KkcE+P>2{%4pu0Gmr+d8?RAO8*J@>VsDdW27i(d;WGAd6$`dUzv?*XWp z4?4CTbdb;8>3?1eYH0_64(bG-bq#9Ng03+ZxX%f?@mSyirxN_YN)82Pfrrxr-w3KR zKAc|pMo^LO$@dNcM+3)A-$4gZiB3=WB?t-(#ZTb6>fHeUsUuxes_KYhYa!J~}tryG71jK!|x9Yo1HJWAd|l)S~GMFli@$^>d5GIJ<^54mFo)z=IH9bAfxpl-J{Be?tM$XIB_z@)%3 z{k^_y>-6`WLOhIHr~ly;sx^7DtOaz}F7(Q9ka6G(72x~O!6)WxFhzh5NCb669KSA` zzLrbqjNue^Cf0gJ(9vEOm>{P~f|{g?915W0Tvycv`@L5GjAJ9124%Pk~P zFTeykOAu5FL(jG11s&za4!USoMu8o4;gJNa!>hpVD3K*_2YdhuBk0OW1<<`N5Up$= zvlT$+2s(0PDRC=s2rTAQ|A#f9Pl>s~Glt&WoNp@MnTxYrz9%zm^0T$fG(LbXHHPyS+2;X0BW~` zuc%@N-$lqFunaVC!pi_TJWzqju@%&@aL86*5ts_LBL}pF5)`MfaYlAWSu2JGAXW9? zt0urf*a70RS}`<$uCd|*oo2=kzT=KVftMY0ccH*bW{`uoOpoUk60P6BXvVYyq~id% zaAODc)IsyL3?Nq@U<571VFCsJ39vMi0_f-o(4nR50_~9dau|^h@CPZ?Ww^iy_xA}# zbLJZ$<8Lr#DR2l}1l>Xe?)8F0PvCDoFDQIL+CW~Jzyyi3KOm=rE?EHSWVK@W0OE6* zF+BkBS1^HY_F#8>zzAMr#IDb{h6(xDYLH=|$XdZ<#YjYuon3AamZppqz*Z|Bj_eBCdUs93M`DqT z&0}nu9v~=W%RK=!T*IWxz#z~veTJY=G2`CpJVHYL&;iZqi9$kZ#?Sc_z|QD^Ib#A7 zs7G;Cgo&%336!NbFhNf1;{fdgWB{FfH2okquSh)yXvhqd&y}ESDU`S!g+LjY+mY9e zmr;S+kJ$adV*g?|~TxLub3gE?C zpaTLyN9Tc8r)e-bfG+p&P=K5bh_DQ_5X%vikvRosbAv~mKuaUQD_;ipoPPro(p&d=K)B@ z0#BWEfC8Pvv4aVGjM$8N5SP`AX#scz3R9M&H25SGP_E?w-NOby4rvCH8Pf_T=vhZA zm_UP*RiK?Ln#>(cO2WviG=i!x0j1!dT*M`ZsYQwR+HMY|7&W z-TQ3dXaV*9utf{huL1&9j`fV7(Hzj+D@F0bGy&uS)M#NgW10aHWkyOalV>!84i$wq z3z$JS5+fW5ix=?1Sq&zXc$p2-C;*F3@VwUqCeVU84WO};qK%Js&B{r}$$jhMNK3XIbkWUr(6_7n7P!B&N8?;ETgDDFs1>Kv`AmGReO9+e# zf(lSCK@vJ#Cuj-m45loBc1Q<`7vx89IfmrVw;;3N$DDx1eIbTKA!*WJn!p4)@DZBM z7`Z`LHbT=GEc>&9hO9vCM^Fh1F1SI*?t&L+J%ALgObiO(?Q1ut8}LDuF?8;9ejKtxEThnjzC)|E;GZ9um;y7prZ>RRTfhC zLl)A2Mw1;tN$bG$0x==cG;nRu0cweXYXc!@Z7=~|8+0&1YXhbepjZ_$XU1y!(-az{%Z%v)zXGr01^z5YQ}BE}ivq7E^MmR3 zazf(56Qn_U4hUo^*nt9@SD$gl^h`M+jm!zsiXbx?%$Qz)OtLj+#%kivhGqdrQ_!3| zuLjctF+~>80p~Bo6j>F(VZ!V9LIl?204Gjf#|I+d76&+6@Pfj!Fj-i1I={S-2IH>j z7V<(BjL)ZUlo!%x?3n&gUPw$Bn-94Zgv^w&x(BjU4s_!cen0jq2q_Tj#~D*#3*sPt zWQO_?RO<+Uf{!RaYE0i?Eyy+9Ls3YcQF?lYqL48@A0AZ{(wE2TLrw6e7r5`lu*9IW zlJNA2KZIq8_28z-&Cn%DlzH%-l8`s!?&(&_LR%T7r+-iuGGzMveY&!WP%op%^o=S) zU*x4hL3e~#$v{!mkwH<&kwKB)kwH;_*+D^+#X&)O`Vm#3dPe){E^0#hj8fA})P$@V zcTeA-CS=YiHT{K}kO|}N>9XoVdW?If`=|@4iXH*&Q{i=-z@Mcc1+Fd~83guDZ&MdC zV%#@W}As@!w z(|2hIDG5u1azC$QhhUb1h~tSJ(*y+ePk*l=B+V#2om*4L(6*ahkzIk;aRNwQL0Lfp zbX2(t=-MzQ4JHYJUrY+T`ixTql>~TM6jVT-ogkIKS857*)~lE?sVImk zNa!*Ifk#n-Ae>+@Cm6yB0dqnioKP?)6vWYB5-?*D0I_r#d>!Qrtr&bk91SK7GbRlM zG0;6Yeh?~5K}z^Q!1?oBT2uxv9;C1{Vk_9q@L4nuti%1s87zTxU zUdIn2Sqkc)o(ZqxACNAvDNG<$EuvWpVCz9V1zU5bJ0NR>%$RO~Xhk!oHz2E&%$UxI zD)55mDGJP)4~Q!8DoB|zZ2{3@W=tE97&V!A%$c5mwEhsCo~141TK`29>`buF zr-){OhI>HvFA&XgL^{r&*Kv(#mg5Y>`To41K;Ht2UuiR@1`y3|#`FhjM};}_1u#>C z=?0@2(*`jGUMB`~rXEoRUdI*Cy8xJHfOr$?L0l#VbEYLAZoL`P2@w4O;gBbyS&mDP z>;)Yv@dBiV!HQw4m;$fk9GD4rK%y;TS&j^jI)Au9o;V=}DyTX{vJ}KX>GTXUxKYQe z&v-*jNdPp%!|Pc8MGVcO4dS4S3#Nf?frrG*0kA0-#K0vuI5qHUGM^ArvNLDi0rI$% z8Pf)F1zu3dfy*vsXzgKV$J7E6XS8BC0iqeqn0i1oECiU|h$-;aJMI9vA2e_F17sMu zLNjA}0`l1m@hp&UK|Wc4V6H$g8LSviio>IyX%C3aV9vA!WDA2CQwN04JORwNVmJe) ztr<^&X)~rH;tIT)jz7e*6x9@X9p8v$DXKGpZn0k?3JW$~4WNo`Q@F^i!0R|e0<_XdKwvi; zsDzjyp(Mb|t-!0xFiQf|LFaW`0G4NR1Q%)Su)^#ISoI1CB>`7nW(5JqeT?oPZH^nJ z@6;2LQ`je{!0UJbqEi8MT`{Pfb3DQz0MU11`gc7c?RtS`PLQ1~AUin}cpW<=K%RoM z7Ji6>H(c=QGxk8GH9+Z40_3GNqS>H1dJwZi0P}`>ibdH%MC{m_}83;xA zHpqb5O)^;uLK;j@n3Mz+*cG@0*06&{Z&(}@col5TnP-5MERX>?Wrhss1e47ypfFt_ zqa?t~0dmh08PLg28cYkO|27bkVe0^?@0c!YD5Sx-V7i;3kc-j;8Ca15O3EMw=mt+; zV<_aw12PkwBSD5S8VMQ6b%54*Gbn)aAviZHaIJN^z)4K!9GU!?fP%nm8LC}opfusVjf}$DI9S|*V#&iQj3z;!300kE$cyCB% z2^IX1o?*@oII65r+t8mGbRTG4RF9YzK{fE zHeSaIl3CCU_X3nkctIKNm82_Z$pWwA2bgyhtOS0sDHtmVf{o+VWd0zjWNXI+nxc2Q z#Gq)apy}$%%c#H$x_x?slmf4UvKdo@lmf5g2`Nws!0Xr`1s*fz)n_~-1P3bqSUpd4RQAbX_&k3fHgxJ4gm^cj`mgzJ_=%volMq@ehOem-H=vdUu(|9 zp#YA@7a-#l&6u8mXn8ZH2OwI=jOl|kDDyi$kOo}~$LsjPQyP*GK~>}*kQ!+-rXSLv zh~;oJ0r~ugbe6ypCPe`Sbx^@1lBL8hFo#J&O@UqD9;npZA(ka@AH=vZ{h_&#X#EYO z!e;`!@VNsn9~pUAxd|F0k|l5&JhIE{$Y90rLPCKTv-azdyw!_WpYe+vsHg%r|1nBA4W7xBX2;o>RUs!GJ<_2l7&_40=QOi zfdSFVxNy3GwU8*^Awh0X`%~Z`lY)qX^z>kBq1__j%8%C(bkY;3%i6(}C2)AUl8uly z8%Rdr$n;vh13}TPA{|-GL)GInk)kqPOP8=stxkX46q(%f#&J^Y=sUp_DyfH6Utz`IsLVr zkeb0!K?Md-@pFP3G=n9Cyv18V&QTq5!2_@31-KcG_Cm#e+xQSu`3wr+yC+4s6&S#` zI5O}EbAz?=a)E4L!IP!H9AJ5dO%X*!dGkOqezX#J3pg7kDF2O&Mi z`_mI0gtUCb%$Rn7TLj?F39sW0!7Om=#PNk-mV!Ab_KpZD34$(K6tKA9fRF;%W{@W?OyBD$B*lL~2xJ8)J%Fv4 z{>)KGS>=aN7P6aoL4naAY{s-eSOHYBFA&ZW_&Z(KNk}^A4ky%2yx{p(Mh&I}C3a96 z;dNXgjA_UQVKb%+!k~5$NKycvqS&D+3Y4~IISKh|JP=mkRRHx`Ks37<(;bke7s8;8 zxx9`ygkfdOo9TkiLK2KGrfWG1Dc64x&QbzRe)EAU2T*cnS6~ykiR3~B1xIU88NloK zLl~0SK~13@f>{cD0!IZwY4wM&k}YUPNKrt+cDW)SDBWl)Xe)>boaPaD3rYW=5NHrV zNqH?IN&>tLOpa%#-*Xn4CV80)G@;4~N}?N}Q?{%M%#KGsPVaRQQfHK$zSTv@ka5EF z=Pp9#_=Ytex(VqRb8~|`K}Z8ipsBPAJdXQMvN<0A*RTrzyca6nV#V$WWqF+W4fZLusAcQ9ffnm>YQ|2{=l^{045zf&B*d8l#6$626%1^$^lU_7JNX6aE-{>LCR4 z5cbFeT?_;oJitHjh%53~uteV8>CNDS4#7nXFKlQM7J17&g$&t#ESV-C&^G<9r;u`Z z%ToB#XkN!HOj(XSOOg7%Q<$R0yahNeZ03W9-0v@JV!VIzj;Yx6ZXHv2?XMVw?z^lM$#xw=gdSWnRngF8V z{p~p*9)lUv4rWmI#*S$Vh-L>}$fGb_&Q~aq>A;TZg}y>|OgnZ=-{>nOAwHW^i5(n_ zEDj1F$NgXc9SZYnI%BkuJF@GiPw*4cVMG?^0r@IvdXB#kFQfGIDu1C0s5E2$bn5`2 zEl?hkA!*YW2MU=p9oaekPN0yqAb8Ah0jmPY$+KCr1ddPV2ojQqYGdr5UJxWSM_3Ac z-=@HCa6@Vi=y>LR)8&JOj8(z9Zy@OwRAhqtfLY)dhk}R#^K^wIAqQq=f&S_D(uIg$ubO0>m&zC~f+j2q7*Mq2%cXQ9>*dj-V5S zKo!aa<}AkvOQ#8d_8>3_%$lATDI|ueL7q`+dO(zr0ptDYjZs2zN}yi(4~8rSW`Pre zpbqvK$Z139Nsh69x?zk^`t%LaLKY~7wlVISz9U8`SybALX$!joFKDJ^1E_KEgFQ<@ za=JmRkfe|#s1*Of4mzC(+z#mBn4TIdWP~v1L99>-$2C5X{dYK~AIwB{_=8xX^yy0R zLPne?1(jI9Sz`LZ6d@55-O1Bs6NJ(jCrxij5OQU_KK)dJkSs!hL88z!#tGApCkmM| zUZ4Iq5n1j*tWfgwV@X14OgA~F|4S5-L1=prE0j8YN3xI<p97cd4N{Q#IbP75QhtVzq7c$d6KH1!uj7m9 zi!y{X*g)gPjyI-X$PiLR1ejf>P&MPM>ANzKV?!ZJXr-W(g0Kb?hrn+pN0uzdR}2Ct zr{Bs#R^*T^R0^4i;Dim^J>mkTof*>)WD1ERtN~$7qXwHn9?w&8C%y@HpcD|4yLX%;PP_ho_*afJ6;d5```Ca6>H)c>x zaYF<=zdQYcqd5EY{sJKdMZ`=NcfWBR!wA#uqL4n9u7-lF|!A6qq!bEfo1dl!2mv0uyL@Mo>XWV8Qf_WkQky zcSRJKKym^gLPB8q^jl>@F^sL#&C7*AI|vdXRCl?MB;$kWOUs2eT7b7Pg2upD;a7LF zg4WluK~8}Kk9>i*fvuPdZ!|DCE|@xfVx^E7XA|f)SOs=Rsp%Igg;d0!3W7Qd;2U{C zT@TQz)H8evtkWNI%SlgHs}d5JZd@s(MxmYe!Ir@7d;sR6*m(}w&LrY?Lou$I{;y8x zLckh9$Yv|Smw%D5y27Pue?&T)#M?ju-|&74_60el5IND{QUx?X{i6}$=-oOA@H zP3LSDlAd1QB*dx;UJgnR>zNc7!A@X?TF*FPx@xnKI^)*uzRf}fj7%Gww(oBdvS4I8 z0ji6{rYm*|`EB=Z6Pm`z^y13)$L&HEj7%LXx65`4$uNp@33Py>j+a$|4ZJu7blwS* z!29VzT|$M7OQ#>{5^`hQHl4d$NQ-g%bhBG+N5 z3H?GjjN7MAvSGFHRboNj(|KYEQf5AYv8NNfEsq7VqZ6@8T7gx8P2eA!H6zFLk2b7| zTwo?>{jk9F{s}@MOpRRApH2`m;JmPMnt&pY4 zoIZWRBq4dmrs*3d37IoCO@A>-$N{8NYqF3Zh_0F}WW(4refwmga>l0V`cs7dfcB_O z71Ck)&9*&is?bzMrqeU0f1D;{#B^)sbk*rX9*oV?^QH@#Gd53OHeINPqnTZSQ=jnw z^Rxv5BGdh52nm8VmnY5;%4ckzesPA-KgOlgFU%AwX6|X4KHY7WP$(N{Yq`Mk=?iBG znKPF*#s?Kwg^j5DT3%n|y^*tgwp zuFy_KrtLSUbIunsVmftmx-E!)aC3X%e4)*ZOr7(lt1T3I#kKlRi+~~%sMMIgFhfds zy6z$&K9g4$n*NLw~P^D(hPGIYcAz9mAwVgd>*pwr4gn>UoK7!)8piRmAg2uas# zK!%9Gbvx*&SApLgOb!Yx`iwf@<+d!K-MJb{9H6}mETHWZ778qm4%y&51kT)Sjt9Or z2{@_|HfL7HCPk*&kNUeScBj~Ek z1@L8WEa0VQETDyS&}9lN3M`t;6Bx~zS1>BDfTr7SfGl8fyup|Sx)b>Yh{tNi^np=< z1(caVw}~sTfR}BvfEMzB)-rx@-l+lbAl1C71aT?Ul^Z}B;Y*pBKymYc5j1KAT7{qpT8akR z+6##kP@L=mZ4m)iN)s4C8)9DXY!(o>1S;-ad6~cqWYt(46|5L8fV|3V23mXvK2#VK zK`X$K0rEGK0_${HZ(+G8S6((qs80Z?2gjiq(*aOeA7IRK)B*R3SwSH(0UnB=D=)x7 z0!j&>Xi{JSMY;kk6dy2xZb@UB&bUfQq#m@p+!3^>8&tP|k{jr#0MN#s1Dv2_8KcAs zasWsLsN4m|7A)jvfLz3B&WzP%-%hs(IO>49BrK4%!JvSf!3bN-#sb>KqQS%<4O$EV zUd;whc%UQ)Ud_g$$-Dxx$kYC?2+#EYD}?x*E-=8doB?P`43vum_Jds+0$zp8;&_1} zi(8Q$n#3%?D`gmzK*PKTK@-p{4&W7+%Am+65CPLAyoHs6TzOgWguNPg@fbL7z|t?l zke~j0rI14A4S3?k8-&e`tc}%X@E}iSR*9MxNN%r8lkIrM(Ah7X0^LH>KWq>ZovyP^h@Y`*y3INvYsMYZE7l3= zNxcwMUA%EH7v{o+<>r|tU#L`@i( zn*L1x8YpVWH2KeTjUZ7&rul!SM+b>coqp-C5NPvhJdd2zbo(RVBM;{A$bpVLkk^wF zo4)XfkOX7%^j$}UPIEW&f!Zqvzy}8KoDwo%d^_Ftl#m|d+vz!{gbW!EZl8BbD1wpc z>%{3lP75`IPPYlTz%Md=;u)b>#ud}wpAj-+Ts~dxtdJ7pp6Q-vh13{ZrWb&ybNoD=c`9oE39&-jK>iJ6yKffG~`gAZ!}bz#q(6H0Yo z&Ivth1l-aB9a0C{i@*U|OTx>dzyv*ln3qL?6ST8NffKYppG#m58|c(+CeYd20-)6; zd!|>M7iwZ$H~sf{p;X2*(-SWU88LpCJ_SUzOh0}>$bzwL+C?F4#%a@aE($p@9+{qh zQOJ(5Y5Ka0Lhg)brhmIAq{_Huy38e^AjZDw`Im%}7+a=ax+G-8xNbV@Ww77kFN5Va zT?YH@_GP%=*03ru^D-)Mg3h-E`;ASYZMyRnA#KLC>G@ZLVnOG_ToF=ZJTv{v6`^3p zzUdxUh0Gbj8`V@9r%qpXRY;My~^{%!Otk&PZSe z9q2k`=k&@$LV43;Z-Eo&hFe0QgGAom5&~_umbxvZ#n>?2^|sJ)#)j#xwl5n!u3 zR-heVfz1B_QxDSo0mfkf4J?2)uK+1l2d()79c&BoHAn!)p8?~5bWd0TKK_Ddk17)n zWS8&*w(0-l%kd;g3nP*i9x>1{bxdYVYe1&+N0>7$0Wm=P+(0yg8Pg0X4Yzh5$P6Zg z8Da{|pkpu;m_hAgxV1Botvz&ndioG17PL$YtnUKI3Vw6u z6CfI76{sD40K{Q6t7kd^3WyVIS&mylo#UoJ2o(9Iob<6hPy!+ z;)V`p1!l(%@BwEa?>l zvH>K`V#c%pB)0&rTakfBh+BclaRw+lG?)|w?(o5lSpgbG1G{zwbC%=sQ`5Dd3TZ2^ z=YaWK5Uz3q$j}YUS&r90=R<2w&wn8#UcUqEZdj~<&Dp`6<+yQH18AEeJo5=SGAJ@J zJ18(Iu)6Vbf*SM;ifjr@ptG^SOWw_x4uDKQzzph`AOcJX6s{a*OjnpehjN)QT>#OL za6SQQ4zfsq&VpwGt?C2C$_Zvra=XBsC2)EAuN1kUfCpgpNQv~~-*(XEHMqNE9G{$P z7I1{_U}JWCz?>zJ4)eMU$m>YS_3q#4C!YyPbH4yN^#ya56ij2|FXKbW%|CpS&Eel8@<-2jT?29_+x{`b=pp9@I~c7OyrSh5^< zJ#P_k6ctFDo{%Od$~^%jJb@+4@#u}|Yo7~A8_oa;%wWlK+<&SGGz|&&jwla5D8wKk zv;d@P0ZW$Sy5-YgcO(^53o$1{z6DL_5=$gxZnlCeNb2<z9AuFc!9McstA4wl%Ca{=!Z(nF6&ZN=xIu-9fgg7f;uS*+@Q`5Xru;oECs0V!ll51lyqN!biZK9a=bowy4@=wW$q6kfe$QMj^}4h z&v_*zmC*v4`3A*`5}z5<4^TisZV7JyE#GNi1*KZhMNrq^C53@-#%0Dd14M&u1+6>)aX8nSGtB`pKsPFZ z%FY{X)Ae5qi8;&wyI}$=sGtUo)w5(Ng3gI#$x>uhU))FN?1zyle*098L0;KN*D>%;Hyb)5YzW@@rz?$Va8MKcVDIN2G z794Up+JkzlFx{Y~4URWhvmE!V10TBr&r0CywU|Ls^#Ekv16J@r1X7jF18M`afQm6z zPymB=CV~zuV40qgBgbd_0;K5$7T<6qg6acE%?H*j$5WsKXPKwRyb+RP+B$Fg+qXh0 z-VJP!ItQLB7tQNLDVe#P7!}ypwt<>OY*|RvI~PPBJO!?vH$CQ^kgRbBDA{zdWjUS$ z*~W#)Dj*G@#Ae1c0i;9%G*-|C%JUPZuX!h=5IX}TIs??W0J#7uErV}VK?%(TAf*e~ zFhi3IG|$Kk8WUXsQnCVklM<*7ov!&_NHuu_NN@vNmgBTjE#T&WA_Js+U@&9a0g~In zmgP9Rrww%8Ej)%eARavcl0N`9P!V!n1hjIx0V=w0FoP2M#`i)-h9^Kn5+@k59QU4S zMah>e;GMltT?as_Kx1v4XQs=15R$In0TS53nB{l?qz$QU3OacaROW%VE^h#-+Q69Q z*a0fWkXm7k;6kfLi4EKoP+)QV!&s=q;P{8py~q_jEnDqf2%2>S3$fHWfkt1L${g(r z*S3N3JZrrf(*=;(po46g1l}>Pg{2`SP;mfl$igH1$7Iwh9HjFGBXSa9RA7T=JkW-b z2OuNZk!ss%AR`bSaNq?^>I#&BIsuGX0za8yr8ubl$&OUf&O+4((gxNuz44=vr~r$j zK$#Mo?C)ii+o#D)|W|3K|cZbWtgxrfD(y9{)ZG^hy% zs*hMeYb*soS&<2;HUz0;LkgW9Y%Zv8U_v;+jHv_E9O_`oa%=(>UTjF#f{L347G$*( zKx!v2WjWqND~=&v<0u3rE@tpqpPUH%vGEDWt>rV0zL|@EFAM zpF+iqN0&?g5@KZBKV9axP%zu;wJibyjoV9q3kiWWcK#FE&BQck>UK9q;mb@Qqoi4c z61vX(NP?wAy)R9mCpD?7rBJhffsUCFt(;Frw4i?a9QJ^jk8+d&K z1L*ukFzW;pXba;DCeR7A3gAP71SWzGD+0B@*+F}Q*c@3(vOsHVv%u;>Iw59EU~*&x zC0uq;TiuDF9&{uOyW=9JEYP?jg91Bf5(Rva5`#740wzZWZjcqA4y^)M2WSwS8Fa=} zH+TYJ5fkW4SStpQ`Rtm^6PV1ID-_s42ShDkvS#Q69lXS5#n8d10PzZF)`3xh9W*5_ z011)lavZ|qj4!8~aR}QmzMfvpA-qW*BBRN?0(^cHc<(%D0)Sb8Re?p|zuuV zOz%ES|HdV3#x(KcbUkk2{fwukzvC8O#MnH2B9Cy4_-W7uMWC5?5dDB53v~ZQfxzG)o1ik;sp;Hf)B4^=2l<@%?&6pg2r?`lz68%@Cz%+azRJEgh6cp zR?r{^cy^A}kt0js^Ys1v!l{D1p#6TV`iwO&;K2~1fET+@>Ug}oT>OkXD` zENRGK#&m{BffYOhc7DEtInxm)(0sBP(*Y)svq9<=Si#-)H(;+_nJzCRELwj7)CUKt z0ClmM990xp9d9sY2`pd(dH)F$@}*Qv+#r8HU{YiO4cs$3fJW&-O)pl^=F%@HcE17H z4H~I5V|u})zyoR~gPM>(psskq1U`L@RiCl`4>UO)&yqI?Ru>GjB}Wk*g*RWplVsT6*xidUQPvW5XG&) z1EP2om_dy+P~8Gb7|5r=sf%#U|%pRAg9Yc%*dzEF@tJ!mMoCY9n;N3gysAW zfWipW0|v$H0p=_P76nd$YfyKc0J#g~ste3nDh%A0g_w9C)71-DvXxjtBemeUUeGEV zP#aE>Yx>76BC6AmiwN^`NGPy^rm_X5-xCp*sD~ca&k9=G#thos1gg9sGuW*9j4Eg+ z>w(X#S5V>xZHs3GU2kcizzUj10w3qkC9sRrkpbj=@SHCzc!`uFW400>=;RUyC4NT+ z_bh1m1wbRR9(*w@sD=T>hXBa`K8oxh%0rP26sDl_utA+CNGgR)PjV`NFIj*liWqR> zf!LfJ$xff$lH~E`j}Q3LM-@911+(O|Syr!TGC1i5GO<2`^}u4LGhjz-RS>odGU# zK+XUa!AdL&Tp)i5C~!HhVaWomdJ7Qvt;VFlEU=#s>A+GB1s(+!sNp?~ilCB$BWrq( zn6Q+p1$c&)6*Te)D!@Rq_@IIvl-e1=$q^I?({G3gYZ!AXuz_wA1}&ukr9uTx(4lXv z;E_v4P}77#;2=2bEntL)-gE9pyk2zN+u!h>3tHyyrw+hB?PPryk<;KK*cvVc%cC+cr+JO zA3={cWCb5>$P7&a)6Yr>Yl0Sru!6>W6<7rBKr=4*EJFoQJVD$8>HvX@bR}kN$%9FO z6OuX@tr*UL7G8l00|C%N?&X}&qyaGpbQC9OGZ8Z=+E^VmvIPEe@IfOC=B6r1VJT|{ zGp0Kz<@^RlNLaicQn!48)GZ)6xGTUh z4@&bIOdpsOL6@i)fT!M|Ya!UzDhesEDT2xgX3$~x3W5qkfuMOYQ2heRf}r{ZoJW}z zz!?!-Gxabd*NQF7^~hBWs48U!ZBBz!DUhmh3N!eqOBAPZg93;hT0Knwou&pVVj+b- ztK$r2@UDASea0pD^lC6IU{b)lj9!-kfsgH3LK!?jnlE7B}=o0aOHoZhz5W0uAIsBcp`{UL3+5 z0uDd0o(_;6Xk;|7C~!d{0~{BKNa$fv;&SChj|493dPWvVW}g7PvmOx(6IehKI&hcD zf(9K}9cO@S10Q`3b_%TOpMkCzbU}^+XBH@h6o4j)pzR!JNyfkpcFF=4eo#YUs}hp} zWR|aC`%hV6Zbs7+ELjR70w)+j>E{Fs__736#|tc=`xRJq8BVbXoCHf2(Ai5-FR8SQ&LPNpxQuSCX*t&0w1VI0PWT0RNz$oNxsm1rAVOh)03biUHJ}2dydLVFFz&&!fx0s05n)|Ib+H&*GrKsmKPZ z&rvl%%va!6vzqG-k*EO(N(~Xv4@iy;4b7 zMNk0Lcm!Q)48aNl)7L2p$At2LhL|;&LX`MG;~$XJ&I(Pyjtr32=N(XLWz}GMz@o$^ ztq9&5cmQtYF|4ax$% z3e2D^z^cIQ$e3lusOeI-Lyr8(}1SN2=XZ|yT5&_JCprp&Iz--M3 zZk0fN!;0|DaaCb?xNks;(S39A3k%pcfe4R)lz}}$C~HefD+)qlLI@PNA6OKnAThxW ziU|!S22eItWCwLinCh89TQ->$xIxWQ_Ch62M^M7%1+Ds5U9QCC2u|8iU-5&|H?IP- z94P6sD)2+Rr68!l56beOehz3wCC_vlbzwOTWIb@TV9UT(fVyrhAm>bPQWw@{uZFO(B)Hs$6y?m)W=wxTZD3XnrZ+6pZ)ym;bA!$xfOZh3 zYikP6WV|!|sHU)fJ-AQ9XvMIF8PqfdO%yvyK-#UK_7DeX*pd-6Y6)`@Pa>;W|{ z*udLpSsg)b5n%;Z#~sYD@rE;y#sUKjuVa+L&lME?PJ%v48#9aLF?C)Pks8jveN z^UWY_`t^)oAngxmXOtV-?EJuN#`FWUcU25&R0Px(VN`$(agpN2x>e)n<^bF3T&XJ3adWj6c+fX$n+=L!kRju zMh0q=5Zu1t;Q+Of7O-R~$SR0HyX-5bTj~hQN-SYf1UF$9uqbhWPfig5_b)a~&(jeW z6W_w3Bmf?6697ds7c1xn7Y(Kj(`V}lt48hsC9VS?mw_Ub1vH+*0w2;i0BYBO4u0lW z;D%)jZZK0p655~;m(EgT=4Apkk5Cm&m(&$D7ZQMUl|bW=5HB5^o}??RB+07)+O!6q zpJo6l0ZH;Ium~KQK37-RfP)8=JDC*(reD<+wqg{RE}$pu12YS>CLW}mS>W^ZJU!ub z#^&kQ^n@F=1r$}d6+|7GvJ^!@ZHFvH0Z0a8b!=b-wX;4+F$qEjWHwB1(ic{mzEDJ1 zdis8SVP{rOP-Q-y-9T7Y9lTF~RiDuX+){=nXwYC2sPu6F-Cy7Vnj4+&Z6Msv$T|J8 zfv^@M=k$LD!g7pU(`5~X6&Vjsw>K1?!pm0N*xdbN?TzwAM9_vHv|6hR|P33PZb%-H8f!itQCrgIt# z%fZdlGZt0>a~Kpjr$-tK>w`HUNAw#D8!+yje!y5*6fSnhSlCdu8EnZHSeG2l5-k&9 zGw12-pp?xBI+XPRXviD7JBR^ZhJaRlf$M4oW`R9?pv&M{9W}C)K)cRBt_3+}sfn;Y zH>gu^f+0(RMPTpr`zFF-V3&Yo|Ck7aF052A6_(O^0B*TjfO0V?4X}bP8!|9wzQF<> zJW~`?fK}l)rl*(+n+42Z1@$`}XRv0OF|7dIIRlz2R{$@(2e&CSnP;#nK`#*pEdXa# z5Hn|P04W4b>4VdTf;eac1FJsc1Xd*;&~c(tIjow@9jsu>7|fU!fUFQRXU1yPjfKqu zjtZca^sE|8Ggu+bk_D`a&?8e>9e=RE`~qG`&+7Pr1$@o{cp*KjCi9Ew1!ltGr1<-z znXtYmR>$ap7rnERW3>sp(fNg!SaPz&B5@f~NF$fQCR0fOfQk*W!ULojW~U(Nb7UU^xeOJmjR$_y-}n)Kd5XBj@xsD`CI-JJ1oI25`B?j9TGMVL=|y=wSh6b2Fw6Q27Wd zkwLkjg9TRM&0$djEl*)kU;&k>ps^KD`7i^tE)1k$1*kv;kNPiQfsOjFVVUl3Ei6#) zxB=SP1Wz}B2GBH^R!xc4udR^WgA!&xCO3Y zF2V*m#&HMO4#=d#4Dg`n9;jJzkUAQy3S90xP66!{Vbx&T!6I;mbNWVWVNq{ZM^GOW zRLUZH#3x{Z1`-AhML?Svm@a@0c%5JXjd)FGTqGb;4{6bZ91ki1*c7-0z|%~un#>o# z#xj9o24ZjrW0vDn(B>;s@TNUbaH5tb3fuy+psph;sFXn}6G7t8fo4!mxdB}2Uj}Uh zWt^^NE3D4AXu6-Ra2Mm=={IeKHQBB=vhGB2n=UUo3WkQNjQuNw19sj=z7|G7vXh`=cmiL z3WHW)N4pB|QfLIvsqjE5d=5}!2~;G4=Jy1iF@x^kG<6f+&D5}H`X@JG@J-6%?!xV2 z=RsF%8Xz`Ufz}!|ESmn#U08bhd3Rw>v8Mh`q`QnjTh}>2Gv)8xg;kh(`lpL{2s<#& znI7sP?8rE0`g9NBQpQ`;c|C<~8ShN@22ppXcX|pZGJ-FCp5`UY$9QHtzn8EDBctkc z4{zZ(!Tq3#D(J2}cFw<181NABx&6sQySV0SFEfiQm8}UG@Ex@;xvHA%A zWcu-Any;`KXu-OLukc*P57V#t3M&ZQe+XWCfAAsrZZPf()7kulwHYNq^D2stY#K}~ ziUJCtMPQ1G3apOoRty0Ote_(qJrr0Sm8}>OAiM|#R!0>ph5`sLLxIsz2UK7y2uz>i zC+yCwq#!ZvI?LH9BxHP@Hy&=;tCR=VShzF1p!d&1LX7- z&%wK)l%~7-3#&3pPS5ri)?{2XeX75(DvPARKd$Nf{Dmdul|ZwC3|RsPxIwkt0TCrv zP%lA|MZtBsq9hN?^v3tXqSLtpgzfpcp@;E<6)5pcj|>nl5>o_uj?mU=&H9*)* za@+1^=wa}z8ca9RAxY#xx}x@Ul|W$!DQ(c<_Uxdg&7idQKm=5y?*KVea(Z2$a2SiE zCKJQ-jhVv2(_aS)TPt;dmRU(d%lZ|mirU~pCsh%Yw+=wGc?1b7)JrO8D{zC3m1kEF z2OZrHn%f5{J(8}(V#cHZX>A@zSKnh#?Iou3SD@iQn$g1Rqi+~8v;LC4QNWL5+( z4q|oO0Cu0iF3`4ZP*YezayoCYuqEVV;E8bdC^Vel7t}`xn$p)}F2yBCO&d0Gg8#0ENX15tP%B zSRG+!Cow-jgtG-`!wXB9k~CA~N;?4r4Vn_KhMFRykN0~B39R)VWhK7a)mO@9lGDSlL(DW%&Nfx)Fh5~FdNeOg_ z8n_F#fYFR;1!%qsw0TiN;2E=`?sWT5VKGLD>0zP5Zt{|Fl`j}UR|*pCK&nqLW(jmo7n~w2#mGJV zU6`;jk#0+$Nx%U5#1;IQ`=hVUg(qF~VHa<0lETPVbEt)^vkT zu7gIS6uEhrxEVl)(}5B(FL+%A$ZMc0dK3kE_@Jxbpd;qsxWq78#EV-&(^0O(QLfaH z!Gl*+LDLa@c{DE~0f7?>uL!)K@PZ*rpcmRdz+$MwY+;V+T`|JKGJRmbL2o6)={I}u zEpB30+_M z+GZT!iC>5l8iUQNi zV}<2JB^6j5S3G3^O)j%K-T*NK1m;a&6)UXB3OeXta{7f>VWoP|G4Sk;60l`R-~vnm zbd5H2T@tv6Qvltc4HHmc2eUxM2WXL#0(h~Tr~;_GSIKg;_`}5j@(8E^2RRee1OU}; zGe8%y@+z=pgO8_YRbVq?>Jd?3Wl~_D?ieR59|I||pnYNmHb>?X$3L592teHR0W?zb zK{!ibK4@wlRJ?O5u_=JUVJ)))n=XR@_&^m_(Ap@lvsgj5TYwiN3M;TGuutC;C#=B9 zZpL&##F0@^VEWTIVI@Y6=^XLGii`r&wc>@9CE3lG9)Q)FF|7~*jrKaO5Xo}<-OxEb zEne7yd3Iyx^o8-l!a zLHkFU6(m755EHltQoxyH1i>R9lH8!7FAb(SX-WdqO%sIm7zL)+32@4DyMgK=Hc+J^ zIlV7I*g!x+pr22HRYAgx2|OdqHT_(Ia0R2pboWHzD5mvmr!P(vwqV-6cKY2!VHt4& z(10N;Xp_BygaA10yYdPv=x!HC5|(FV`u%jeVX|-qCt6MdC;D^A!kp+5pxnz3o~INB zrEt)B_7c;1Q-l@dB@_e{mywek>2#dcF@e(71QAFly+cG19AyI2)3Susjsy+x zP6UV27iQ%23Tl7IPmj+N)`X=~p6L_vgk`w7K*By1>osk=qMQ6JjDxseWOK|DxT6rVn$NZ3wQ9MoOn z1TCl&0S)8|JYGc^Q4|DMilF8RsO=3}3daIjpa~lCht?~QP#2gE_TmbZGzD^r{Pg+~ zVYcbUCBlL-5>T}>z-o73SF2t>ZRa!rMPrcF8cb&xl~h6NGeL_;7!}2s928U?nSEIt zz{x_@ktv&xiIIVsTR})c%n@|-yMma&Oz4HQpcadws{*@%7^vyO#GoLi;0mfuLCqQ^ z(7J0yR|Pf&F-MLpFb^cf22wFywNzL@QOr>wO9|AT1`YYLf#;DSwrFI5iU5I`)5A)I zGjyTWf#!B`*~g*D#GvS^z@Z>E{dK9ZOc0vM?4Zj%m>^~&`vA0D5SNi?UYH(PCahQw zHkSk159I*0wZJ>QRUH`>RMQ>hi%T4t%9LdC6onOJ92v6}R0|aq92vkp05)*fMAZ}2 zEp=*W>X>n0L1Qz!IrD=Q1s(+j&>`3|{z{-j`h>GU)0k}8irfn93Nnri3Y^l}iaZKp z3T%!dB01oW1?UhnP~?EH0{3*@a$&jpS$v>on@EF}&{Mn}+H8ZckO6oJD7&7DZTW=0Mi7G#$(I5Hw!h7yJ-9$!eX%4M6nzt&QM&B;$Rf-qu7n&DwNv_0LxUWjl}Pal(+v(I6f032gW@$*`(ddL)XK(XKZ-5rW+9>yZWMUZ-I2qSmjSdY zMM2o{Kd87<5O!oLQBd_(5O%Ep&){9;%HjZ8LJi8lpjuXmO#za771-7)xhinNGcAV# zn*u1)a<5f#b5dw)F=u{}qQI))2F<=4(%B%T917t4%?(z{4pFM)-eS)D0d%o0G@pYM zaw^F9*DLWVu!B`{C7(f;J2M7h~nhFWb1|Pu>(#GK^l9Q#xr@#l-25M*t zfetN(nBvL{?(RJh0jYvJMTrY+xdN90AL!Z(b_MR~{58Uo^*j*&If`T}@Iv&13R7^8 zln=B5En5jxDzIfMabKl>@2XKko^c2RRT?OBl(e4fddrT zJZqg4+C*E-nHy3SSQT8I6xtNrTFjY$q$r3@-%}$jmBt0O5axGC=z zSjeFOa)uHc$Rq`E1vUi^&{1a)lR?=>2qF)fZF(S*1*&4%z!5WjU9GS*x3~h>`An<| z;?u9x3MyrA(c z_O*~IjSZ6Ip$T~{XqX$6J7$1foJdI;bQd`&8?k}HTEVrY-kf2%9A}C9OnXa(9$4sw1S&D;8eq{zzJ3k4SfYrik<;# z>asd6cv{bprNFHqBrul`+?;b{0JVt0<35fIjvAnAs=&h{7g8M=73CF#6@&$@LkL)oeC`$Q(SJ$d4a4NC;f=ePkP#lVaiU@GPazIN6eNYJj2~KEH1Ca(TONI+8 zica5OFD%B&qrk4nHT_n-uoSbH0@w8K^};HtppK&%(+5zjfSOU}%r`(xeKV#nso)R+ zk8rU%egFjwkAjfEJU%531${SO1_d6_QcWHOHc+FNO@Tv^i-(aL+`i zq)cjhLxZqrIv1#+$DzOu%2S-6cy2Lg0^N6@?hYD1RA5)&%TnT7tH7?Ho~0xLk!QY; zs=%%&uE3|r&BF@1U{=bJK~YXY9(>*uBPgI885QLe*c7-G#Ir%SSg|T_P5;v%tX0nq z(g!NR*c8}7d+zxa)It4wMn#ATAVb*{xfS@7coq3U%CsP%4AP~*rpV31g0M{)VjE1# zkwHj1FKrYSWE7jey;0cELf?#OLK-+U z!Cfg<#|fYjE>LLB2Ztue|6Gcoo9`4*gH54HSeH?Kx?hv9c%pi?qPPO5q6j!F*tiuS z9s+Hi$W|10g!oHdffM8>MSf5d$p}>3aw_nGq!|?XKvJNyx^gkVCjZZ31W-03{;m(l$^b2Ft|epy*>$6j5MT zGjdJe-z+R<0P54SEAT`5v@5{j1S-10*s2=fe3AcM0pNQ%{Q1BhY=6@)?p^Fbr_j9E%@-~yRV zK`tAVN!h@qsK6XP1x^Jn1)eO>petw)i!ob~T>;5g3Tz6zp1h2pJSYqbhU`2gBgY07 zPbGT=J_QkX^&&+FQ0I3;s*)p!-jWKM!BJNbQxFGPr>L)B1hS25`q>s?n|e7=Fes=h z$SJXbGKqqM1-JtMx&jMSX0k(yMGkNo32r!WNM|crD6lEG&AV9 z1vUj|kY+0dYmjEpLRHHwB~a*t@`ROwHAs(?f+bvwf{=m~JU$e}6!<~@Q&NT(!^#R$ z3jEoi6|dQ#Vi?kIQ~=f8@ERu^T!n%9bNY(xV3kVp3S0$BoC^F|N*s!it`d(wDD@(G zXW+s=O9`~B6V#0ajif7yg4U(0DS$2m706N&ogUXFtSKQ1xpGxY0X(kD?kEE4GqEXf zO`qQ;tWdA4zzHqxLH^@Y5CIirVha2Ud$h1! z0rVzKfmK1?jOhi4b~R^ykftCu-M2$nynZgY?gS-jQ27l~`v;^(-i+x7h;}t+{s78l ztd1W*Zf14-0iiB{>IWf#MPS`vD|*r)R&;=9S99iubZ`mS0Ggy_b?g99LIR8VxIuUA zW-GEYIVd==I516r4042+0*3-0*e@V|se$&9Wq}&S*$V6e3&2*0DX4)`9!y3JRP(EW zk{oC?AR}lsAZP&xd$tlkI2b`y4lg)3%$aYbDX=TbEAWCg5JE~`c?EVw9#C;+q@b+8 znXM$j2HL9tYbq)$aAbp;3fTg4rcdkywM0S9Z_pwPUPTTCPDN!_aM*K#-KzjXOF=nHL0Q4YTUt>WRMvp%XHbEo$fE!?i5t?;U{vH)5K-XER$>Ef&v0b)mR1CX z0UyY>iX4vhik$Mypixv_P;Ca8TI6%f&Q_jHdgVfT8_?TeuHH%gIt0u<^z zjvSEb9|i?h1xUepAYFk&fj3);OOXwfAwe_V;-G}+&Qh-+t{@H$fINhYl=!k0MHDy` z*}yRhnl}QSw+mhx%?7H(*&RhxVJ$UQN02-use%Vwl-LwRz$@E9O-_>Jhd~Si+~s4hms#gA&{zRNzry z&sJgs*9wkI+2Hd^*&OSOvlKbN2?e}@8!{RWiZfPl400-RfwI2>Xv>j;5GbR93Osg! znbSY_2&?*l!dcN(0n}XL$Wj9FK*bmjXfzB|-w0$Wfo1^|#lb32#>K=zwH>>o2%<)s zp3*C>n$e0Cbf^va&GxIC3feHk86`Ausq|k>0uLu&17GI&R6AS z08QY7wonUa39RG;%?~05qvhB^D-yvII-vPnkhHNb0|U6>%AlY*{ow>*QC^g3c<>|* z3m3P7=5+aq!lJyZkmu+@lG0kp?H?j0%6urBD7yffVBt1nIs>A)rgKgbmR1Di3Gm{F z6Uryr@oy>6dIvo_Y?CMuAE3{Q@nKo&05V8^`m0I8LX4Wz|4tHC zz!F;1ohA!QVF@U9WXA}D$M(UKUkp|ZJ3yY{p1xzUuq}AjY6ILKM=)lsJYX|y@l%AQ z)xj5qUP8ZN_ zKG3Wu8`4bI4^ZzNG*bzpdCZtTfabRvz*|H?EYQV>petD+GYsHZ`M{JVum&6}EzC-s zES#Vd77%H2deu~6Jv%N(7SO~bw77(X=b0(2#0e_y(;OKUd8V7s6n0?d;boleJ6%{& z3Zc%7=|?JPlE)F$?R30(X8P8d!Zu7l&rJV3Q&_{DOM^*9fy+?=wD3`h2h^R1EwOlz zuEYa6E*~_8CSU+6=fDRh+(`#@j}$=Li$N<5LH5Wf@JvsdB^(FZ@0P8|4H^Le)qsqk zYd}Ej0zpxOND*e>r48KN3S2A>3XC9gK#OPvPO~XNu0#TbCg|P~E`f*4kR1gOy`Xhs zT;ReD!Urueo1QaUn4=!D{>Ysdyf1JIsBNGD9+70sQh>#?4oH?!gGqs<9yA*&!V1~L z!mY>!ntoHzP-It70k6>FcI9OU$2J#e{S5=?gdZg~N84;A4o7ZZN6?TrM~P#jY@swS zg94l5Cx&b#j_IFg3+vduWXR@YW@KVy03GnB><&5!4|=p8H)u=%5-K2nfwB?CdMHp? zE5QXyA&5jR1z7~bi5h&<56ux)loWJS0iCB0iWOGSg-Xy#LV@4Y|I88AQRWtCV1Z^7 zR~83QQxTkBkW=q;@43R#pu#3wk$Za1TwzHw2~af#CS(NGfiv_P(5Wpf4xk`%WPt1& zUcn5u7rcQT9uM+dpnd!Zm&@~kH)w-a6@ZEn?&*T_gryjHrt8cTRsxrRpaXFrWyTwn zbtaGkxN{!zy#20u!g8XZfdkMn7@(0cP>_n})0z=8{K0zu#ha4i^n91sRh82EerV@B!l-2PHE9gec>D&v1FM}?je6T>+A9M+& z;X+|K2T6f$P(kyBHA|5XbU0*@A{S_0{s|j+eFJDCjYXCcBj_qaJ_Sb5J>ppWAu(M( zN0?*!`h~*!`s=|l*}$#@Iu4dt?W&7}RqLTkVZf*7F={aBD1kO+GnHgPS44u+@dS3z zI0Puiflg5bZ9W90p$Uu%%;55b7j{?N4>rgGX%6tUXP^}`pmNvo16!5?4=6t}Kz0_f zfci8XpcNmA%uJpR3VNWaaDLFx1k?)90Z9sy0$+Kdmra98NT{EfAVI=}El3)#3QJEn zUMw8#D(P6Szyw`GE3LrfC|{<)C9nZ>>=mmf^9^=Ta~x_O__jzs1qN*9ffrdmUo0$L z4_ZdVz|9SA^fM{2fO`DWj!Y#WKj|p(f;!;Z~5&QUs0uf-jf`p9KO92W|y{ zEJtQgcM`VP6x725AHFkzF-u7tG}X)O09g_&pa5FgssOS>kxhXk(UGwPG#&wR253Tu zJxfU*d=?EnIzUtFNU;F20laq+6!h{SXF`J>yyBA=)OrUQ2#x|4CI50pQMGeUe za?hp0-kzX29Z;|{!-Jg}HP}~hkP_@4mkNv4i^Dvr1UdZ&n(ElHlt4#hA^Dfp@c;+t zOi-vY&}mbkou23JbO|^z2>bvo*uMZ)!0On+p5@4_09v8)f-y^A1vuAT-~gXn0!_!J@#Rz&(BQGGSePi2fVc_2bYk1=0>`!h;%)x(v5CK$VCzXk&r& zbjRhwqTJw>;);~V%c14M{@|>DGfrl3k`gCbD}=e+F*B=%f(p5r_1X$yekJr^#}RBS z3JgwXrYo-$*6{+_sN;BG$}|Dc)<6YNvNM!cFhI)VCrI+x+LgkBUIJOL{iMvGnp|0d zML{}CiP@3CUBM7ELkyY)0PUS7(zzZW=Q4l}GGl@?I$;eSaGGMs5`c8Vpr#QlhH6#` zb9o}dp4IUJCn&))DS*O4QeYZ*m*)je@NP1wnecRsBS5D=UnML;Z0Of3FgTrsdWXqD zK|+IxLxV{`m*EI2sBnhg1p=xXA}01ZsqXHZ2ftGrV3aEG)QT z2l$#j(E4srl&#n?oqwIMHnE9;Ujeag+R#-|e|qye;ZRHPp7|YYir}*pjH8AxVFM&2$vFb8hVN(>DF1TLU$oBxmNuYrpR>vJXAe-ix z7jP-CDu_5T7FscU0WoE*89#w&ez4F-5K|n?`~YIQnKQot(LB>9tQQt%NUz zG{u2WLE{0PBLO-;1iS=Tfzi<+TTvaBCKZK1tCAfV6y+URz^a8ESrkP;Q??4y;9Vzj zpur(fzX5dLn1URr9RO}Ju|t~xptJly6BVF4KEMqCMJWYl@R{e#(M?e=op18MI}cD zMbM1^3gA`=hk^oVE`u4|`~WRv0nK@^YA{V;Q~+D2#17gm2U_~y5y1m4Il$-XfqOro zLKIv{g7#~5@R&0lKU!v?CS*g(MmI!qEYN~gf;2vRB`uyK0*Mqzm|c5olW zAzO)2K}10myhol_L3q02Sz#$DE(NUZH%0})>EOoOgQvo*Onlssu#(#(%x(voD-eJV zZ80i9j>Z$1&8Z{+9{W(@0hK;H@M8eUvNajJXMi8n-(a$0*a7mYf*I2m5G`lMv;huP>_J^DA8cL!3DadUD1(2U>_@LoHevmT?Oy}PsEGG>KPx8(21qUJ@w}LciKv){IM+D?j4W>VwN}wSiGp0Y_ zk(eKx3Ou0X@5l(w=%5xI78TR;E(?okW7DSqTHpycnH{8m2A2}g^!1m8C89tkgO|jx zIyUfRIf80hXwn1?;(!+(uxc_l@R&0{0EO@iaBQ-I$0gdhK^4gd9wkbX@AM;Eg=Gyk zL%L(UO6;I&AAH8a4{*i;kD7osz>=FiH-Yg{(P}+oMcTmuQ^Sa{^F|6PP%~F{$EdkMTW=sn}+Z#6ULh{}MURvip zp6LcVgju;k=Qy)E9smW;bcY?n;z4p|OfNt#VmD*@0$Sb0>WG>vUx1FZbo|1b`xtSOD#fKEVeo<8CoH-edqZ z6uxjtD@cO(LkgS$d-Mz+cz}}C(Se_wI>mO7#r=>i|r%sX&zQ0j{tAYTZ8&bq2s-~pX;@&N9iZwv|o z3OrC7pYVZC1tK%`<-iJb(0Ocp+~6(BOrYipc(-C^0LDW|=X$fH)2c%%ErkHRT+cO2BOkxFU9R zMW9RpRm7nvtKiGd4cdnYI_v_{5N36}!4F#I4CT zLn(Ln@F|IdQy@6uk?Im3aZrK-&2(rm?ch@sSAZ5+;-CWS0IYOy1Q%H1&;siSMFo}} zxWIyB>;vEe3pz{84jRsZEDV5l-Bdscu-K8Y5Hzm`&U*qYri&dB)@78K?s7!fmQiAQ z(-C21i7n81a))ds22c_JpJWYc+8jIyZp~vIVPSx@=9Q$OdxT+~%R*iz1@O590t&j* z^^OWlNebvPFhE*q4FaIH4Y(z$z$UPKdg4)GVQx}_Jn*`(!t~2Wg$+s9m3&OtN(7_% z$E_$ked94O@SbiDtUh8DULR zl+^M@5F@paT|y~Le{)7yi=v#!^F=(kK{kW2pSF3VCoP7?@1SMWKooze*2uTp{@X^m#V?EB2`fq zRBEqDWp!lWHe;F~q`;~mYsSU1sWq_rlj0LSn&9wunKBAJR<~3hwPA23t|f>%p6G$0GK7D zr9+S|DSlSyPzz|2Qpa*dHqe<;%#MtT+MwbJ+yQhYDMUd7aEt=eEw6x!jD#x`mt-%l z2pb@Uo8yit(*ztD9nUjB!VOZ|lNWA{SB1Su^_@yG^@!{2%64hbwu?0 z7{RlNsB>Eao2CceCEZ3<*$%EOffb-NdXOgW3h;RlupzAJ@9qlAk{WHw({t|$b1;*e zXu#UpSseKUwoSWFx{tsafpvPrePID9&>?A{46VU51+)`R0ore6gY;X4r^ntB<|Ea2 zD%15I2urhznlWt#MBp^Ax?pN=ZY}+s8ljaPOyd2 zkA!oTxCCZ#DlmX1pl%3*)+B%pBe~NhDGFLsL3%w5c1OZvVb*#c#FQI{f)Go+0z2qp zSkTIH1zAPVnO~sZB(%@oz?Y>2TA~7(l~NEuUve^m543CwGQutn8DWPs7ZrFA1MP0S zpgrpBpwfazfzd~UNrziO2{h&a-X;T?@dvfnSRH5ZWq~6CRB(dE>S04gGbrjdD}cMr zpw)-E472z^BSWC&py1J=6`-TBQ z>@USLan!TaDKj`8nBM$GIEb-h`n5O0#Y_iyrVIWS7M0w=134@mv>oyUPnH6&;|?D1 zHCNL&vWloppZ`{vwf+NG{02CNLFt^=5j5Y)>v(|&d^$NZg95JtuO{;e$ZRMtXaV{J zUXYmM1l}wqRx_p*ARen3(*|AzUdIi*S&otdRUj54$i5Z4Sprv>LCQ6lR`8lJ?cjA} zRAdD?1T+mMP{owv05W|7uaX$}HV6JNUXX>N=FC5M6nGsQctM8z;K@=D10BuItIycM ztHcUYqRHIAs|fNJgBjBdkPAf3nUUPZ40RVXlDj6|o8JFUSj-0GrmK+FHZKdZ)1b~; zd#^*lkx>A8=rfA~uLjc$UPUlHVfw>&!kUbmr*pp-c3|8!-RHfq0%OPY-1ow|j2+Vj zehZ5UH}E3D`~f&@8>Y|sB&55l_=>(4=GstrnKxarZfzH(Bb^HKN2jD~M!HEHS>Mj!~q8{)}zxY8om2uZ}^N(P! zM|>1^W85`;{zqXY#*XQSJ__4QZRP}>0Ll#7N68Dl4v|TLQ-MXGd%E-|VRQD2pjmcT z-suK!g(V4un!XQKZ-|1Aw9)Uw%^AC=m;Msg6WakQCB)2` zH?S!1I)aYMXH;MjXr8|Fm#{hOZ_vro(;dSlC75{~@fQL5o>&vJ5E+S?q)fkKVyl3o z5XFINI=$eJFpuIcP6b|Fh6gO*QXF&{5g+7gKVI;~bbJDrrqBH&?7{|0#R4m)KmQ}F zt#FACTv9J#&2r>(WKdw02Av1apup?cF{48O)N(NTE3C%KZ^qQYIz1sxNP_Xe^ms-Q zF=1Hs1=_a^t-hwu|0`@rgF1@~TxWr90^xOB0jk#owsL_AX;`fVts0aCsuV;O>RAPL za4Ik>K(CNz6{rSPYD}=240_23q$U%?sL8;WjG)(KATPpdGBK2zj15|ou_0=*>F53l zOQwOUv29$A{~@Q4@`6foNDT{h>+;qH0Y_z6HO2%!69J?Lc6R|2!UteK3*^DgLay+9 z{|l>ta#HbsVPg{oK7kG{MOM(hc;YLJLNj6M>F1}2r)KKRTeQ6odIeSfIP~q z!0QOQOre=z`K@-l@ts**%yQV%8F;eF@V|oBm&1c4R2SkgRGl5zZj{M-;EfI;x zno&d`HJ2(XmX!6-tzlyiX@)KDRia(Er;-dH+KKvBr?#?tBEm_<~WZY-TH!y=;3*gf5cMI@WCd-@I*kqBr~m@dXDV#dfn z-IrBF98^gBU;~}_E2hKicz`EMfy)uRfEL_xRp0_2`2s68kRyLJD@`Jw2V89YU?ULu zQeb8E9qiy#PAEjAG$KTr_>i$l=CYv}R51f*69R%{4? zPPsvecOg(ggNR=%b`dqk=IM#-A|58~?4Ty%26jbIX?B1e6euUyvK06PKoX7@*g;)6 zCIxPgq66$oq6%WQ0!ydgWf#$ATs@tWLqv;l+H?~R5q-wX(^EJ^bQo7npU5F%$+&d- zX%3N4#%0qLIYq3{GdRd5SO#z9q(&Ss0Cgsy-5QX`9R;mDda{P(Iuz)=7ejRN4( ze*te6{N@?~Q0)fl!Xc^^$BC~y1RQk`de}f^GjEmvtg{5xqrn968nl+#57GngED372e?J_+_5&4bU`5v>WL~zK>CWf`bHAa zzT%AOraU4VIYjo2Zk=itaMVRKlfWIm3!r0sSmAvm(DHZ&P+0?>R!8m~v4T?d1D@&k zp9)A%f5IaY3G0OOfIDuGp5Tt@3A`fW^$6c7F+t;w2~x|06C9*1a)JYte?W8w!a|N$i3OxelNn#&DIupkSi)QQr$fMz3zmqu6nGusK7rO9X@$JdwiU>+aR01D z)5ru(eXzm`NzV?DWgyS(1nGgNJ}ywk1IHMscRT$ypNJ0Qis>BuBKC~Sr+e^=s4$+H zp35&{!q_%_5xrab-qXcNUfip|s-E>_65f#Rz(}M&=>=?VJPZSU_ zs&51x#0NHh18ALk0|#hIgx7HgXO1gLZ{W zi3v1P%%A|i*h7$8p`O=q0vBlDir4W6C+JG-7A}y&DJXUea)Y9O0+%9a32oS=T8FyxLk(AW=`BQv8w+w==U zBJxV$i-bTEdAyDbKrV8e4!vcbMPMo?$W2S83kr(}+b-Zzv#j~)E}H#0@K(*9(ln9-Xji;1OZU;`T(|{*YN>YmcR@)1qM)p{J>?-#K6PPtsoiz zO7V_AplUz{HGzgKc{Q1Tz|??l%Ti(iZHt-S`(Bur(jf^gdJIYI-rFSLsD-E&!0lVo zhs+G?LC2bDFc~Ncf{xHYF9KRXXPya~F*Sg~nZb;y2gFCuwoTw{+W}HCfjdiq4^(1t z!?NuZZsd#zvdnP>Hz+m?4XpBNFiqe#W10ano>!B30h>AV0+4x(Rt#&v ziA@3AA>+;xSk8%2#_)lb=Ml(R8^8nhs5xs3$Z69<#YKeKHgG$F@=~6-h+aK-Y#%mw zpa9AvU)Tgzf^*RxZtzqg^j1Y7aAMxUtq4Aqlh+ZFpFjgIpkjj8@c`6LX3!WTSpON2 zjSirRLQq}?l?=KJCm=;KuO{;WZgb`f+)|)K57G&9!Ua&63S8n7SUG)@xQKH71MVyZ zCX5XJ1jT6}qcxZwfKF88b$kI1Z;0!_qa+`=Aw|UxkTnWS*t7c|EC&7n`4x0s6=+VC z30?p+K=;})C}1f7T6mBP0EojHKnwp71%SDvh(hLnKJak^%;5D1(BcA|Tspv}^E%Gp z24`o|cYOP~i_I?tJYKvG1^ za|K8>s2JrjV_E`YgBlH>v24&LIZ!pQfG10!4;*V-Fs##H+Q6d-x{;YlV8wKKDG_nT z_tVX#M2s05re{lufbQb!kP^}4YvBboMHCcRK@Mt|zF$g2oaq72^glNQhB`=cB*f8BlK|~G2jZzRXVq87FS3yLav3dGZ1rdA3mg!HSlDvu{ zA&iaFlN3dC7+a?ID?+qvR}}GOY?{uhB%+SwV9*ufkd?mzP1Bu}M6wuHPhYDfV#+vc z`eP*#ZN}#5{K_H*jLp*>lts+g8`zgCun9CzuT>V=4d>Xah^RBp-JY%@qRGhA*))BU zs>mV6)zd@Oz@~Jo!AudIenw41nz4EM3pEi9#+K=V>LME%Tc#gY7l~kOoi3vx;={On zdZvbmGUJ))y&57)j4P&Z&=3)0+%x^ShKQ!%EYL1>&;_t;3S0^tprB*a6!BwRH$6a8 z#Fuf)^m&>ho{W9dUucSiF`k)juO%YE*fu>vOGKaX%=8W|5og9N)6Zy$s4T zn6YoVyS9iUW83sjZIK44Hqe@L(0;fB?4X&zM=VS%^^BmIzz5R}r-?|ofj0Sp&VK|S zXTc)S3_1{=Ri9A=ye5G`0e+~BE(3Tk3ur|JhZ3hbGw8Mug)9X&@KrOM)5~;3)EHZ* z&(#qT7h-d40A0VU!K5N^gIR-#L!fo~NgWY07Ds!5mg&s8A}X@2(6t#IjG#sVBeX(+ zUJpBcqpPS$J>(qoIgARd;NHmj{SM|#Qy4)xoEmTwqjUQ(zIeGQB}hL|p3zIO~9R zkAn6_vMDe_JbDpy_Yvs)7*HU9V(0OTJH65?dYD5*XaXfat0W2g9RRtb)YkS)v;ArM}?oEe)zyu@{)EzfYXaXyT z8>-{j*#@zvpOKNNUYWsBAzO)6g+XEZ4}Fnb#tYNa3`8!8{pJN#l;E@X6+nCKG(aa2 zZl3OEC?d`Dn|FG)p@=SH_w<>DA|_0Kc&DE?6tR~03)TXSWKe$?;{D~|cvb=Je1|r- z5iJJLNreocO7$Is0<*x<=}|@^3XE;j8?Fk839veX?uKV_YzAHA%PcT;`a&ZS1%W@H zVGJS17q1{UyKbC*(MTkK@$hsdV-YRJ!_$3@MGP2cOs@m+Hcwv(;@zEo%UDE-@%Hq; z#v;=h&rhFfA~KJ0)^rn75p|~7T+;(C@QZMBffvCuD)0%+nOaItyMOeBo) z#&lD25g#OzRxp7EDHkw-_8Egrnq@8$Xn`>4028SEg_;CvFLP-!A7BDyA4zTnF2@sK z-8pYxbzucFex$df*kRHDNB))M+$Vb1J86_dl5xOp6UMfA}YeGxdb+| zfRvu#S7PC10-a_(o!?SKnQ_+ib@t#wF5Xha1XN-=h-fh0o4(LrM0fftOOg4E_ojDQ zi8wL+W}AM_N<@+I&-4#gB4Uh9)7h=TKGm=miG)hZOR)?50cRwzdpCg3284QN`W|bM z7{>e4C2d4p7*|b?vk`G)d@y~fjff9YP9b4}mxD5A~SHvP4uh>0LL{=o_D2k7WB zfeX_OoJ7nR=T1*|60v8zFnzg`h$f@d^ovd+@{AX!e{>R2X527c)LF!paozN2XAvXD z71R5i;c=j72#EnPP>SgQ53@pJ0GgmWKu1~eh=QaW*s~P41ujgtb^+U0QI6)a^0w*lP=y-_e zfxPVjcFkH3uxlQ8pd^qN;8Sj3P6+S>M}DCvSmOduu*OfGV2!F?A}&ZN0@4wOX>9Nk z(Pw-*eS;U+wcOq!I-)HcpbYeZONp76OM%Pr1y>fgBL8$BZxJO>oECVCXp6RSgOp6* zR$}G_9T4BaoyDyvIDLb+hys$YKz%VT#|7LlU%m7e(P8EnST|k5N5r35P+;BkEFZ7~ z=lX~Qg2MTOkBA7U_2Ucni<&P~d8jW``Ep;d@>{-;Xi@M3D>wBMaW;D-2y)O39%%V+ zfd^DRfR0#H03GqmufXNF0(1tb0Oaf)G0?I>E(HQ*2G=am`Jk){Oo}|yW&FWo{_&O~ zveOg%VR>Nxbn^faGsY*=^8!SikbDkW!^7nWTH*pJN>2uexPv?hDp@2T30DA=GC`w| zh)nJs2=;eQAUu^LN*aEU@(sL5<;Juikr>9N>9IlJM7JnN#EtRk^p`;*52UelV_Bfx z*y&G#MKt8kEuAKy!0gEA&dVg_cw*@^(BWy0r$96#x8s?m)3rlHq!^D+cMB0Q2h}to zA~uXY(|3o6I5752{}Uo&#CT=8eyB($W8d_dp(5&R7nV*F5NMfxEL6mg@yvA5FcBrj zw&@mOBHE5upqX(7Kd5wP0r_D8KWI>kS%FVM0JOJ}L4iw?c>!3O86LrbW-BqNFfi3KIVdoIj+A3?lmuN1!sy7Tz)<4Y(6C6fke5kHfx%G_ zw02a13EZM%1d|K`4WP~_lP<#nMqWlK1twjFNsOSAU_gt0lo+Nj%#{%lXAn3J?(SS* z1T`>^GD8{<2d3X&A*nfidANuqD)608 zfy14b0W?46$XGbtUz}5F`oBmKz3FMuB0HGYPu;E%Bf`YU)Ual|N~{PoBctDRjX04M zu~ppA!3ti`tSU#A0+#~2z@6!f;zTAgZG1f4G+tyS?is&#lPp?fB(H8|5)GQ7PY@qcxjE)yBOcM}jntm`* zM9d7lj7fpjk+BeTz7K=bdFZiM3Jgvcz)VKa*`FF&N}LLy^&%IU9nUj3UYO3CB$BQQ zv6exB!I8n55wtoHw8Rt?0iYveq3y`!(;MfCh)>^;BqGeXbo!Ab5naZv>7SBBG}$if zoF*W!e7a(?h$7?C>CVX_&5WJXk0*xubCMc-QD6mw- zf=R%U5j1zt3Km!_cjrD3LsJY)a#z)X=7mdc=Vgu#&`%duw)_|z!SJPCti z%amyXplMkq@Xl?p{0wCI2~(yAWr;NMG_Wf$3EUJ^VAf|0nckQ!B0c?AmPi8AoK@3f zvPC4>n!wW2tFuKcr5o54K_@58Sp~JGLV+3Fcw`Z1ntnc8WH005>Ag83wmkN3ybMx~ zjG*Hp7`I=`5z%5~Yv0l;;HW&^yH+H3dU2k}FXji0&C?6=MUF7;pKe+pa*YLaYUT8a zg(4Eu)eA+W7;jH^Dim>NTs^(3P(+dO_VhJ{BKC~Cr@tu_F;%yBQxs7U)@4}5q$mU0 zDO~8PAm(WAR>TY1xWc5ssUYmAktMKmx<`?SALEJX(~Cr!8Fx*WE*42;JUqRzSVWW! zw83B4efpYWkqGwfvpT_RVYo{~%$S%QraP91NKc<1D#SKDpi{(k`uq|RE~bV>(^r>> zII=i0DvC{iQz8-%67ek+QDK_hKE1e9#E5BC`}F0dB2jXC!ReYoLD&(}U*>dV5ZKA4 zAgs@LgIS4tdO(qg*ml!0k%Np(bB|5`Q7&S}bmG`_g9?#JOq~y=AE^+rVEX@X`o9Vh z6@lgb?E;D-JgnS~5{iF` z`sGTIXCQa&t`boO(GRLbWI^=*Dv?eQ-Bm4O0i~s<->(KsZvR&;;?BtQYwC3OT9G@F zlewT>2VqChWw?xv_ujOCCY2UWx2Y4c(Od-f$Q>3XPEgL}&}Cp$FGd54}>=RL7+`4^LpNJwOAjOgOqn`6rn6lZG2i}sl1Mfq)19{I8B;_|nY!Ay&z~algi*H>Jo5tT zkb@>{KwU}&Hi5t3!GMtpFEf011kM8Z=DY3T(@{m6#P6A$N3mPA@R!7vYBJ z0eNA=^euBl3K*NGtIicMVf-{bYOaVPEdCnjil{J7oxW_YNI28aNz?z&6>$d9F7rfG z1bY@u2XBnyWpvb5WOnRWG`(`3h?Zc(qUiz(i~@NIERF`Cuz51Qevya` zi)b-+Pk*pj#Fz2XbiE}aVzSEx6Xf{oGO!Tgm0HX%$}3 zG$lKb$a765qYNneA5>#6H$`+ z%K>ZkfX-0@m6S}5b3l>IC2(f?gJmLm(*>4`sHj6cZvdK#;Bd46H7P+?0dRp%mgdlB zbWsA$i-1n5jb1LI%hJFuuylIQauG$=CiWm+hUvSOi6BQb88DnU z=rC@a{(ZHG4`au4mo*~FAa$8*M6B6C1HWoXLdu$VtXIwiyW1C14 z(=XoXXSRvBfoPHKBD##t)19`9I5IX*Z`m&5s0nUTf(j2dNDv7?1VB|88@OnIH#6UD z7x95w#=#A0R!(=@0Uo`{+99ILIBWWh9U@v#d3K2Wxg8>EOh0(1f7=0G^I*0UJT@H# zqLxmt+X-^w_IW!++8LSFADyngTf~g%z|rmTyFmg!c&C4yAR;pT&mIvI##hq~_lgMD zUuI=w0rfQ=FlH-ps4ysSD6oKUI_3Zkm@v0bzA!;Sfde$h$6Bwz;V1zb z1!i&pB@G=V`20NRkTL}h&=Lv_B~CLY0|n4f_XNf)u=@lyv6wMgfb@V;J1exC3+h%V zaDYzP0cQeM1vY`boFHR;5L1mjptdlFE`tXsuP`{OFjzD4fUMDEb}(lSP~ZSnb_$H3 zGJpX*MaH4e7&EzNgp6xapsZU1@z)WWztZFCV}4RhmMMD7KNRv zD+cNzv4TR*@xYGhUB^Vqg;qcgv{jNZ2Ty7^?%1(i__&BIBNwRe$e_R_Eih+#)CrMf z#{TL1PKabP&Y7-tQe-aU^yw!~icDhcn;v~iq)qrTL)KaaCP&aAppGY)vlMs)7Ek|j zO2mz^d%E>$5ieG-x2E4YEg~a`=(U1QTyW!M01GoYU6_9Lw1~IXfy*5Njv|h$E<@%S zQ05>SZgdDZiaEZx4wjp)yIV+sanW?Y-9mAUi?*-bE!4%xxMRA-ULkwN9n&lJ3bipl zn*M*UkeOuP{{{g^Hbe`ROMxBKP8QfX-EW_eG}H9|)3f&p88L16KYji_Au&0`a&>ml zDYOlsm2yl9?2ap#vK)8*pMH6tkOO1$bdmi+?u<{S$LtsKXIwsQk$|WGJ9wl6q>>{` zfkmKY`o;Z1#VlKy8QG>gG79-IuAV;cjED{6tm(JUh-ff2Pv6Kaq{as7IByr6KJllZ z-}Jh(A{C6=r++^y62E2gFo-@vvzFtdGX1f1XkwV74 z=?AZh$O}#04w>I+U}0nim$eMjKV21ZV*EG#;58A^>0Z}FxV#!U71%VG1VAgBK$kme zFiC)RSTTaG7-3`vT`>YmzzUE-3U+9D$L`Jxs^(eEm^2hXmy>{Jbijs+C~7c?C~_#U zIU2xm&Oc^JXuPN;AW!&BWM%=v`!K<6~+cCbJ+zh@tHF-fXY`kP@Tapum`;7 zA9QaDxH2(g@=#!NJi!2(cRCB{{)sXuutDVl6xbXufaUf~@3}4_DG;Fm8c9%K1DD9` z0{f?Ly)L34-oy^F15(U@Z{!7yz&v1>{^q(!I`5Uw9RiNhj@v(j((Cku8zPd7Gp1MF z5K(75G=0$xkqpQ@Cf`jFMaI_Y1~)}SIR7z1qhz}8O%bK~MovgmTEMY^smP2;K!F9+ zO<_}D6?g+$qQIihD1*EN2iEeIFk@0sU;$NVpi-`x6V%!^L072DU;u7ruxK)Cm@``_ zuz&)MO@ReegRwX&fEH!dgEV>|FUn#9HLF3v0&37ZWJA_uv1l>}fZF3A>%pPJqyREN zfmxshGyut>$sA$M!~oLyfC1EQJ_a366v$R$QDIZ;Pl1vpKF| z$pUp;1WvLG{AN;ORbZRmaa%-=g;j$|Vfw1uBKnN;rr)|P;+nXEF-w8fQ37_6Aq#kI z18D6oX!wblK>^&|Q2=!-VFC)QVAcdsn;B%B5`!7j22fyb0UM=~5@{o#n2Guj0mkEdq{`pv7Y> zpfk1=nLxoX1GEE{#SwJ=J=^r6yCO=gYzj;YtkY-S713a1n||o7h#e#A^hRz@@p{lj zH{iHh0P6oKFmTTkX5y`92F>#yU;~f;DljQ9XfiP<@tZSqD1ZjoCo(B8fhPZ%K@^K3 z=pZl_7Er88WGOHsu6blqU*>Z+?;=FA2POpZ$6d3`3Q`i6#vh5~cu z4WMMgV#c%tM01)k#XvOsfIf>D79RJ7G7FoD{zJ3!nQ?B>h|Ks(@J zbvC~_a|eh&0aS-GF@V=KFgebF@y>wU&E#m1WyZ7sNnDiM>Fo736$KMC0EJviX^qCwtFo9;~4=};DeKRRAX)^C%QmPj;XI{ai zz@)%p#rmY!wfoU_QEzAmhpu^Wd+!st)imVD?>lK(BFEC{}o|raGfT>;qd?GQE;{j$+ z5S#!nQGm%R)H7=^F$nwyT~-8^*JnJ!tien z1ug0VwZ)Ju2VY;rsKF#5(9a2qzcb9>_(M0eo`9*K@dzYS!CnI8{S(Xrv$#R#USU>Z z<^_fI1?DVnB^Ct%fh({jfS@H{3ZQg^#}7KlNr(|tfH8n-G$v3BU<0P6y^(FyqxW(}yX!{i8Bk^n96T3Fy2<_|Nx!21E(?FT7CzA$Gg zKnuJNpr$rRw4TXv3Jbj8S-=9i-U~9A#{^2}N@h%FSl|ho`2Y(fZ*2k5pq)h^n$wKw z4X7`}V8(O0=t3b-dM zs0AFTFgpP<^98#(^92xt!JPR8h-NWo`U0XM7Bl?;S;%0`xCN9t;RW0hkSM%>y9462 zux2@GGC+zc4h0s0-%N_2$YQr*I04eiV8(O+L|jNNooj$c+tb zSqg%nZ81#xj1$7f=VjW=1QejYF=I)<< z?>T5lh+(?u3lR&UDTv{50nmu_l%3P#UxofNJQK9TA>_u(3u+XC zSH&@dnw|oWK+Qd7P|L@Rk%5PgTY*`FNkEASbSAlj0)r!x1t2*ECNriPj0())b{mME z!I%X%VgVz_2wrXl=6aAR5@t**KuSP&7=q{(NJ=+=mGW>aFoRTzfCoaD9U%iD%%BlA zxS8X|cVG?3?rG}h0~4Vuu1$qzSc4`OUcM6XVSF^*?6rs@$gs%Q zBI+hcZeisH?fD6j+sjN;I9kU{%0yB6igTO>4kYhk47)#0YxVIu^Opm#zpS~&~3%P6-R3AT{zW1%j4#vmR z%if7JNZb}y0I6mw0e5J#1;Bhpf!ovh-itIdKAt}5y~tt4$J4_;h`2Jon?C1*NDkxU z>6{-$92s9v_x>odlJW8MgfAju)44y1#4tXdp7BZKCF85H9v5STVkv{`0d)8bp(#zzR^uh23#JwBxdK`ou3HVVnmUKwV{K$8*!)e-T;Ccz62b zuOfy_3l>d3_Ekia>BY3^AHRwy3p|(xy}+mgw2t(~wCO6}L<$)1O`rNrB$V;q^w-}+ zRx{q8KK;9h661&IyS|IWGxH04*e>%!#Ep^h{`BOZA`2MbOlSEe@{aND^v}OUmN9;q zKI^wgB;$waUw(_|gRE2i1Ft4YOz}>s zm_;v(P59CU?h7g~`GZEZ!Nriko@pYYjoW!xMXxY1?wS6OT{N0mL15l=dk#@M#+lRW zI7D}v_cb>P2=t3`i-6Jws4Qf5RB~K6xf3jiGVXNgWru*Hp5vmI;C_4_r)UD>f$8Tt zMMW8RO@GQMD$jUuIxCl`B5w<5>8PIL6|lPP#$2MZjEpO`2Xc$LGBV!ZKAlH&6(gg- z^m$)7lo(m27s`tUZQsr(s>i~_QZoIykf;fWmK7E?1<@ORakx&ODJ;q@xoLHafIuVC zidzxK*I=RPdxb@HvHO1eHWATKW~=Q&imadv0h+B+VC6>Y`L_J-5O7p-eDDjjI!2V6 z9~uOX436~zEu!4P(-|K~iA`TBE~?G<=1r4;qpH9UHqgk1!1M_lWz`sarf=UU>n`3a z1e#1@aZq3dHKeQnZ%o{m$u*Qlerquq~OO4lk<)Q-&E+ zg#vi}u4BWl>BUl_i_AMffzpQ(opO%rwlrf#r-b95H7x>;pVzbq2uzypBQ2`RxMO;e zwCHB0!`;)>Wkek~&Ud$g&M#@1zOaJDVfuO*QCqf#Rnr6nHcbB{BWl8^FkM|%REe>D zx|gh|flLPv$R|2V?BG=x912_-Oe&!IlEblK)%0nyqH+@9;8IV41u_K#8Uk}0e|;y9Jd&=lp}4xB;L6e}M76C^dSbGArJwai5g_@`e)43hf=c$RVXS#A?y1%;UISw6&0UFbDbws77 zJ7|bXGipqa(GZ;=yr!W=z>ybb1n3%jjp_e2L{*q~H?&Mw(-c(=G*AFtT%f=T8eL=; z*v|&KN=Spr!i))Y6}UsT0&IX%2sA~{3hJyXfQ+%o5}3>eT9gs12%cUFPy}yT&}E39 zK3h|C64Tbr+x4|X;~1GQH8fA3pe=fuy8ybJoLykb^fVpOX-v!aO#i1NdaJ%d0bJ}r zlB5EQzyu!H?Z>R88zWpyMJznkIlW z&46kGjR~;}e8te@4{Fc3^0M)Ou0aNk&ajv>F92x-4N!q%3AE>iL*N1UK2CkuEgQW3%US-BTIopVB+*&2BOlzpgj?wHVml41Rl9zP+(PH0<(62 z3}>`rI0MR$%-~iGcvb;)7n}x@hyvtJ;7_2r3Q#@Ipuh@l1?`@$s4qI3>GZ$p`}9T6 zD}YudAcYk#2dHT%kfjK!ycx3;nWwKc5LIM+F#WuNsFIjKmI8+(OP0VXCeR3kfFhU@ zna*w~Y9|D?2sD}ivZ#Y8OJMi(a6{4AOuzn3|6(Y5m2txKQ%0h8j1#tV8jH?gWZX1; zk%?%t)E+j_8DZe*MFj;;&~PayXuSuwz@F*$rlPB?-?PKzEx-rWC~$%bFgAhr>|m}t zFL*f}c-R-zjpGE3pn^tRfALLMF%#8c`o%Xr$V@bj={Mi>O=hBnvhc0EoX|e)0!9T+ zaNCnxVApg9b5SLb(iC&icBVgk)8Cql9$=g_eVv79ALF#?ww9tXjDM%kvJ{n-`pd4s z>DU2Y_YPi~qQI)a1`2tBNz*S{incL+nI2&!+QIm5`a>&GWyXKgS*=Aw7$;4SvId*< z(pofy@$7Uz8&O%tY14CTM3tDi1v;irwh_&jT+OA(1^09ZE65t~YVp<6rENuB8D~vT zv=x0I_;yJp&pCNb`r z{>NTa+6+GY&Iv9<%$OQL-e>@?4nK&9iVk*1=0cFg5Q*u&4x;7K2iX;Q;qKZ1(zk&f zwB(Xo;NbN84x;jmTc`hb5S3)yI$hFHRE2T+bUQ~;dB*9};~c^6EO!*GVVcH1U1GL? zdHqQiCN@Z^_JILB02M=^JK$Nj)Flm79t+W8et^s818Zw^F0PV1_yYn*CE3km>egWNx#R?g8XAt

aS=7=1aEd@(`Qtfe({5h@bvvIqGDR$MHZmhCQ!-3 zApl;y3!3F;gC>6ia6?FeL*VQ5|1P2$Opo}cYr2YhFix9ZMVyD1kOz7au?N<02fJMJ&=AalLD&(i@;;P>E7<5Y7lj$?xGruXQt0{ z7X>XWKj<#1#tvQ%e35(lJ9p7ljLp+$dWaT+8vZ<CEKm-Cm-ajLp-xdWoKZn7*77ym5*HbW5l}mcZZX z3%o@^3$~7Wi~5OxmTrS)CqXlc5}>{T=x{uBA5k+#mFaOlq7nM*ctI;F&oF|PkU>L& z4YW{*P2dM;7b9p~kKJ(wXlPo4$w1)F^rt?e%8Xy9bNY(PFn*b?<}0el^n-7Dn6Ky| znBzfP38x$RiP|wvonGiCs%(E2Tho%?@xs~`0mtKOK}}0hZV$)`c84s`x_L)NMM!5; ziNWyz!}LpjqKg=}PcQWsRbYHNeVV_h4rAB!!~UWgj3=kR@fTHMoG_g~Ks0Onhak}{ z>};KjI|UpqCr{AOV>-Hcvf_Wq>7S!S?NyuD6@@@GIJgp%P+$kuU!dj$ivl|+1~>$o z*aenO504gYVtU85{dTk{KO^I*>91o%^BE^ikB$Y0d26hw731XT2VzCVV4l4bE2_xY zGW}ny=vvUawzYAh(V%5dT=AkwjLp+?<3$%TE}zbyAgauGZM$)TXcr^n$LaeLMT;1l zrW+=S$}=`i4@?quV0yWKm7VrPCiLi@s&t zHT`0Ws6XS@>58eM35?Ctn^HwrLJSw@1*a7SW(9r)L4j-A1JXpNLM_spE|MXdtTIoL zNd!`oPvFW{VpCxNm*a}O3Xo}CP*uEW`s@r*xq1OJrT_&t&~0!L&;ba@JT9-mBG3^X zETHA~pcQc7<01q>MX!>Of`C8+6BBr8?gJJ_PB%y{Vaal2DPnbGP!I%7kZ)iC4WdAX z=0R8Nf^L83RuoZSa|Cbv0!>geD~JfxZ)O4Y5gJ&PI6+h78caV}6h#%-96_i5F@wzc z!=fY!+BwIj&o}|DsDo8W%<(@XNWO8 zi3{Xl2T;ntzzTEl0@f@?mc$}fhcCO=DMnz5yCI$r&Z(bHq2k!-ImJ&N; zFqaK9OY#6z8na|6iaRoZ?;Yj<&B}vpS4vK(2mKq0Bb1sS(v z1MMPu0LttSIIJtftn67AeT7mH1t)mSnu$SylO0^fId0&bzCT-3%KHN(Sg?c&=u8`wP+`<$Vo{PZ zX9k4}bdLH1B8Wb4PFKkhRjTK3#5Ag5-`O+1<=aa)x#}TyKhh2k- zLy!SH3=59JAD}4w!I7oFtRN_GlwA?DbW6yLsf82Nd^KZg-~`{t2`ax>K`YQ48#uEB z4s$ATf?M0XY&@E*pd{J>-gyVAsMx?Nj&Oovrw0@}yr5w1;8f(8{w_yU+);tcjA;VM z!85>G*c>Ntf|jI#l+WQ*;siOC!<83wajLk$F(w{fZcu&19pwm$}ofs6Q6d3D4v&)dw3OXVM8XBNN4ipbe&~++e z3VZ_BpdL_?0*}WuG&nRXN-Ic#PH$4=1bfd>-iqM@+--3z{v#U5>QS30UE%X%ne-F zUBSZ*TCu0WG=tL&vL=KjOOZu^88n{K#Ct`$JaZX1(*abO>fK>mDk3|G?29%A2=02xkphN z5=Y>&WBTcQQOzh$&=feh7+nBf)#SK3w1}OYzaArBGf@g(5V-BEv&Y>g)nGyn> zF~_DLZ4SFa)#&7*TtG%4o+M;JwMPOv0?dDzG11T-;$-;^YMf#0_>( zfH*#YDg-r|Srs@0w(^3~%M++F#~18b_27miKgeef*cDg>zO#W;yn(0?17F9f#GD0^ ze!;E;T809yCM6u}LCZJU96x|9-~kuFo0EfUHK1EI*(9DX0Fu1!h0i<35 zX44!FB~DO{rpqt`R6K#M5CxSq6Q&<55><45!0O1TC=5CaOdv~U1C$*( zctH1mfJZB#i;2Y?mDnodq3KVYSp2umMb92SIEFO>crvQsLqT1@40BUZtYO(opv>Su?VLMjOF_2CifdFi$^c zBqMIW17tp<6~hrwO$4brcQAub++u`y3#=NvS_EF72!rYq22ff#G5uYssCaz?3+RqX zD~3Cukdgo|6JS#iH3OZE46Y3#z{exAInH3oQUFy?7nniy1}KxvVNqfNc^s^428$Wf z0+3OVGZxq!7qEc+3$|(n3uq-FXetwO6dwoZj0rXkrVY$ydZ79O)M`}(t*;TtQsh%$ z0bQWV0=|(7RO~WlDTph88lDPljt8c1DihVH7lBp`ps9LLBH6)|rNpNot{?!a7?`p^ z6$7|&w}%NFRZ`Hn;sIAb4%r}oXfS;MrCDgAasX#^aQ&dbDsTW?m>gkJVh68H2geMk zk)pwLfJu={fvp}qU~)`cKf+iz#q9GLj%+6WhhB>_8LMpmI)4u|Hz`b!c(AWkz(Spk_$jBzl>M7F=D@29GCV+0q z;ZhLI0wu@}W;-U(P!s?3TNR>8s{GsvBB1g^1i2jnwF+KzO;@ZGl@;S$3tG<8?4;19 zAl=deUY5${IAMB7rD(LlLlH*?fsG=L4DO(O$-I1@GXq0|y}%<8X9flah8WPG0rP$r z5SxMFgT27x>Gvx|RYaeNfL3RK=7Kr0j2Ri67@QacwoI3(5|!0^3YG$`3XuRc(ZN!i zLFR+TxgUVWkGQ$P(>adJ0{=y($L$m{WNe;3VW*J35P0wioGBGRojo>zrs)@V3du4~ znErmJ5b`EA@OEw1`Wc`zY``Z%g0qnY)MpOaDh!Czerz50uWA9G)q^zWsphzEN~eH8 zpD1_hQbs13dREYU&jJ?6bPs5FQ;|!7#Zd-SJ1~J(L@R>kaW61{vL7p`lvm_eV0DaH zzzDw88MMSyQBVPVU$CN(0xM{ly`r!JtK$TwEJYD;T2SN#B{fhtQh^mb_pZbVT8PP@ z$t0kt2om5?WCKwgiX0${MUj0v<1DtM=^L(au&_xuGG+70PLI3BC>zZHTFk``x(=Ha zG{y%ivO!ZF4%v<@MWD7f8@OcSH3l_V!1wkd1YLQJc#Off6)`9=IIdyIQs4p2@Pk(l zu{thbp5ACDELuMUbh-Ho(AGW28Q`Ls0n|rib=<+61-c)Z*AOyv3F;<)UoMNwF)@SpVTS<-fB>-%A(=uKv}&KvjVR`GiZhdbgwE%x`QRl5p*jCvgwS4N-WY!9ALv)q?Onq zhQp1W0Ma)BW-O9{Ge8nESh5_s1*$-2aeZJ`QiL=>!+4EAQBw~tk1l`$0kn*V6}$~m z5!47})n|Obti%RVrpbJRSy2&k#MTc`ASjwMBLc&W=>=#>8M23AXa4KM0?dpF?(_Dx zCZua{>h(br{*Eu0vjkcowG=OsrVfzvLE+I2(xeWX9@JL=-E|J$smTh~3cX?gNhdr+ zwt{rRkMh-5;Dkg8%(h%)>)@fa6Qqe7VI8Xm(+_6QR$&dM51{^?J}6c4g4P7Hf_ftX zpc4Ykn0ypiL4*Dt;2eOKEaqOh<9&E=h704=a@U_qP+4w{2x)nG#L z%lU>*0Y?plK0Z*vgIy0i9)E)Lz$2dl6h1$gqaZiZe_&P=15I6m7T+=`fJUM~lllxw z9N>mAXi$e$pYa4U^h`G~1yKP59wTlAZjd1dm_bYW85FocoFmL2uY!trj_DWpIVB>* z%$P(#XM}1puV4nPWM=@4(u#tH6Bs~UF>u)e9jOBEHeq$#zznK7K#d|@iVG}#j0D7J z#so?@3XDi819bX4=UN5!wN46cElQxg-NBNj!~|N^$O=*j4? zXy7jIo7MpC5kqPSZ3QI-R?re$WIbrvTALf;RYfHQ#pQ|&3Y?B-!7Hv>E1;!-;9HSKqQ$YIQm(gf}FQ^0+Bv|yo)5T7Z z9(bYPq6G?oAIyr7b!W_q>^xe~(!B*#D=V2Z)j(phg3_4OBPwUgfQxp}<`_tF-O2^= zcL$3Z6WkA6;87(uP-cP`-Szxo;K~fNZwGv)04w;A7A#e|K1d(lvKyMTKqUdZGU+(o zEC9OX4U(TU6u>L9pm_=u-u0jxgEX)>b{R+`{OEoSjPTZghBxwgoeI3*&CB2lwSy5` z$U|yW$mSz(X*#{fPM8b5J%?JkYGK3xw#rom90MJU&{_#s<*F`FrNB>W<*EltO9U%d zJ%SO0;%<25I^%vlXk#s+l4Ny+djo#m2DZv|-Te;m1!<7DWl#V$_d)pxc0~u09(WqO z4$=e9NASv392CDFm=z_6uUsV*#03m^^ogomC9qYl;>1<1S`=5VIv5EETTT%as8XPA z<*JJ-*`QRey3k}pTIISOR8Iu_BhMKoEFro$}k3b44 zM@YkY2BEwSYB&p!nzwauMG{I4sRNB9+%+VMyWx5JCOB`SG@Rkyz?-)}fbuC)!x_{m zC6u>kJwU#z6rQ&wL9zOQSy77kye*|5DPX{(OHAIbm%^5}C5g-1;7WwujA;!hd2xcb zaez-}Rsi1?ti-9nEAR}w?Rx{GBB%uiTJy;1$WjCqcRa$FRS!yzppuRkyhRk;wA%qW zbd6a7)Ds0AcG1iaU-YTPqYX{dS3s6>m@!=d(Y$6%;1!eX;1!dcijXlXhzX#*A4n!> zfZ7`1N}Qa07c(oWzHKU;m6(+C} zf_*Fv&=kl7#wL4k+?K>m<-y5Znr!pn+vh&}xosMRv$c1LznD&? z#i2SWLB4Prb%I=js34E0B*@i3>7NmF^B9&gkCeoYFXSz-Cw3Zy{EMm00*)4xCwAgO zo}$FAMsZ?S#go`^1i2bfiCvA9#Qtpxb%I=-sKicDkgHN009$$sG06@1-VvMgs)WF+T+$fm`KH>`Mmtn0bP&$RG;Fx4q#M>!E zxF2+r3TT}E_UuLhM?P2wPX^p61oawV=Vr+uI)!k*yaDOaMd$&IxZ=L0Q&40Q;PKa3X?sf>hY-EQQVSHq)w|Nr^JctPQdwFpvLE+7TE+Y3B}3=3XRc>tRfP(tY&VRPD)gH1?B zTS$OQpbi$;Rl`Aryb{pxMGoF|AWdqpIFSJL6i|X!0-9Js{cS|gj+9o85=LUh=6GR@ z@*m{%9h_(-JLIZKSRsH2S7lt4G)lNCLz69O;d=Q%lYpZzEQrL=!nMATR}4$Iz65E~ zLTExS1VB@%py87*piZ=+9n%L8&1(iabBYzz<9z|*vzvj=o&wK~J-|Aq&#A!5jB8~E zC;r|wCwLhG*lbW`2AUOj1YKp!s>%F-5uC7JFoRYNf}?;{gXsk`q&p5OoE@12suYA2 zIN_7%Ajx`owFa6G5JZ#3FVI0Mf$*i$Vqk(8?W<9<(ujkl#UGuXh9u18f0l5-IMKzh(81{9@1dGQ0Yq71gNLrw)w=uQfF_g_l?wsK6f4vO05g};X%~@byIs*oqTS3I-L^plN$>0tF>O(3zvqH7}sHBr|wT z5%QcqXsn1sfd#TQ2r69iO>v663P{dW%pbT3pLdzQ5RU{)Qg+j_VxV!JwH+Bj* z!qbm1=onpy`@qxGuysPhh!O{s^uQ_aD@Z4N5J?zx1T=n~@bovosT01!D4>v67?fO4 zrXqx)!?rD;MVFw_b`Z^L#`FhTNd3TCI1xP)A%hX6AUi-k1P-J!hZj8b2ReBN9282Z zt90POpde7CAWZ5^gaT1fsz5MGQQQqKd_Y};zGt9WMqWgr!0HJ12Hu&7xz9k?^&s>K zD}cR@O%FUW_k;AH7ba1PvY<%)z^o{Ttu(>Dghx(6R=|Kq5l3Od0WK*NAWL}a<**ed zvS1~kA_DIc9#8=XstTuoQWb|8(*zI=n$8B%>}E_IOa!O1r+3&1^PvwfqfU286P3t7 zqNyk%3AM5mFPGJW+{Ep3Fqn`xlhR;3WrW$_DNYYzYRQVHW>IoyrEC zQvvFPqE&N{B`?rbaA!bzFcX?79T6a-6T3fK~vJXi_A zL?#VN_X?of9J;_4nW#W2D+SPr8KC86thx-A0uU(~NM$9V2;C?MYx?qlj-O;wB&D*F z#YlA63VnWoDztK530fN=^^KK4>&i&4tmKGFbaDg}9X5A=tZ(fUaO8)TPyA?}fK^tY zjy$++hUA;(woU;@C4^3RH4G`>pzBkSbi&KG6(F6^q*q_a3vPL!R95_`;~!{?(zY-` z&&XwUT*H*5fLN2Z0#w@YnlUW_(d^)}VZn{#IUqiR8Pg0br4R?`P(|WbrV*DKK;3p$ z@HTvKSSUf3oq=)|C=0@~Eohw~E9mrf(pRQYD@|ZsnYJ6WS`6OF2Q4}UohShI3VdZ6 zAE^0)wleKJNFThP&xf%x4M`6?Eqnv%fu@B*UN=5aAffbL`JfG#BcMoBGH2QYq8ZGX zc3_E2NL-$PE^|KsTHy^@<_;>z7@?ghcr#Q6wB(LC3p|FggDFd(ogdzN2UoRsK*#8@ zYA}KCoC2qQ1)MXwKVXYyLFYyr@W^p1aLa(a@&S3(>=$Ox37eqyJ7h)|JbhP>I(-Ma zp+SN0npseV3py2wQ=kK!eM}%_p#iuo)W`xI{$vT2(t(tP8t7#qv?v6Pei7Sn$E<*{ zl_{W}CBcR}XvrVE#DEr+;D$T2dg4GCeSt6F2Mxhu9ehD)xXX}L7VfU!)g<5u?=6B( zlZ0+MfFylri3UoK;M9Or79IfUl!29n+&CKUNIK!^|2{}32SlfDAuqTJMJWrpLAp52 zm`;GU_FwJ|}%T*QWj6g(`t5McrC!&49zGQ@`kc|`$e zVG?LD*%eU1$O%4BoE5YmPl*Y#)aNy5IS!KoufS_?>QP{DWP}`W#0oyc9DLe5c+Kep zZIB7PW=wZLG$;7naadO$si#j?h=M|vDNCT6y5R~=r?e_!LE-B71>V*G1@Q+^z<@^h zKs0Fd4QCxhczy+=I+7%+p~(sELW4Fjf(s=jCinmzO49-~#m4IRfhh~zw79~oz)JL@ zcPXNpn^FXeSgcKpOHVrl962GyT_G=McmeJecn^mIOVi>mNFRKfjRW4afc0>Y^uUYO z9+YmhJ@_a@l%@p-Xcd4!nG%x%CwMN%jTd~45gE%d;cW^wj7UToPk^q-1do(M1_G4W zAUokeoo3KlBG3S&8n`ookU$K))1f;7RWBv?TQeSrGnupMZi6B$7#PNJ-!x(Lz(ZwZ3;ar|J0 zv|Yd(&{(;#E|vuqM>yIq7+FjlBX+R0U%;`0vQG#!_K0O38x&7C=drO4K*Hy-LGw6R zV+>^;TLKzmB+g^cJc|@%yv*Q{a!`!HM#@2B>*y_*bs#EuCJGQGM3M01)kbugnh#*{!; z4uQKT3z)N%KqupHWGO+Go`Pm>AXn(1?Mw!(F0H=|>c;J0&QfC1WnjR*#uPkv0A14w zo;#2OMWR47Ua7P-`lZ8qo(0@(HN;pQ2jD3Ak38)$z5cq9=sQMXOIKmXfH8ow0(845==?7g1qKi$13Ib#JWL245v&K*t>Dd0 zphIanKpT|cTSpbZ+f2d3*r0vxitOM-rwG1(7_|8voa8{~j}k~{&@u&-z+8DnQOlEh zluAkzd$$xceT=m@N9mS|fjo?F4;pCV3flj@Itj96SSlVejGze1hD-{WD~a(fE*HlU z8_eKAS8A*+7X_8_pb-}2(Gk#DrI?GGIThFi40yyaoW#ef!E}Tfw5kj=I>HE<-Q)x( z0#KMi6A5ULmmTbEaLT}wFnB~rb(1LV-2|F~!^lXW%Y?9n6KD~*2rf547X2$Qfx{^s zYd8_R44mB&+yI6g0_4Uk2+l!h8~H)i4Y5ruLDbyCoCS(haQO^fG6F82L31>qWoV#= z6$_*Rj6SOiS_cn4BZ3i>D`2fD91UP0qUv5DP+G#5|4`iuFO6@2+BuME8AxdibvnE` z3ocdBn!xWsn&5M}3>ZydBt7twc=|cynip0Q!vI{uQYB zKre|QgNzE$e8!F~pFxTTg8594_FdnIy_kMZnm*yMsQB~;jiNdrj=i~f`t;H!Q9H)t)3-E<+OltA2VF+Z zGd)jSQhvI4v#0{omH*Q%n?)@d*G?~P77bxsJN-nns1;A+>IU$ozbuXy7^VlR@{4ME zKySbR-Lee2DRl?Czq0WnfTZQDD?%;80@ocVsL99k1&q?Z_gqkqvZ{7HFDMlZgk^ zKL;Ht%wW#Mqrj-Z4mwN-bR#V2oMiAt=Sm0P4Sh&IDyLW8wli2HZSkoqnrT zv_aJoa{GV)_||5odIbhu1{Nh|M|*b`(9Rg>?}`t6S8OanJNS-J-ILkEj3Y7FA$;JYBj+REu%XbhjQ+JB2kf z8$jzi)0CLOGZdh+Mj3S(7##o1n=YWhAh3A)k{(fArV~QbwbJ<=IG)bv1WjNcpDx`i z8pm;UcPoU~*efc{dErw7m?t#-PddL8=gnQsP_BFizZA#b-4NO7uX;tLIoAG#NKWVN z6P4z8ei1w^etfz?pQtY9p92t6g{JS#;5TMGF@08_s50j~h&zu9O_$B&m*fVo*=2Hc z$QC#`{S`=lF`U#?{Os$iqUz;GR!FX}{{|Tb<<_M+W)S|%R z$RKc$U6BcV;w9+TG6v8^ZJ=`rm>mtW9D9~b6L4e}m^?jnqNu9yZXQJ@(DA>_j_jZV z?wB2?uASaHQB*@}HxJAKpnE;Q$6kT2VRYO!{rp5xS;pPd-%b=&Wono-U1XAIn%G@- z1!hMD&{go=EU+wdfNA=KNum`@=f6$onk;I?#R59lUjlTI{B*||qLRXnUl|-5J z-S;7I2y|=1^zD;HlXzAzg7UH>=sM2B)0L-)nzDoMQ{TY|(kE5V;t0OjMBoSyMak-_m#!-Z9>g}jX5OVO1W z6c`;l82^ASduI?h&IZ0+$B_}V1)dpntfmTs0@L&pQ$$}f-kZK@s_1mKH$R#M93`d~ zP7@X8o&dV}W&&fDUxSt<PV1h-3y`%funD2~=eA2!SG00Ccn}=;|Tn63`Am6omp%g@PchXvWpw-H$BH#{@A4beRByqvG_21)}Qp#0DTY$R6+k%LrSPNYV_xP*xpe3_B@RY!1oCcGO68KtV21uMsF$ipBQD6ie7U{@TqQC&U!vu7( z3X`M2_LYl7MHm_1Oy9RyRDu6Ij{^9XG4N2Az{%-v7mIo@^(>igzC=`u_Z~MQhxSg- zTOumK_-1

Y+IH`BK*5!D72=Z}|&y2><60#|qonUQNeeMSZa27wFHU6zW5GxklN zvsBcE@!a&AOGVEMpM#fYOrV>F6_^CBP2astG=%AF?{vZCqLFM*c1{xzcs0FXxu`UR z{dW4q<)W5q=OII$;G0ziP6?sb(h>?xjuKgp8+T3ruv}Dw>F>|!QY%DFWcS@^2h|$T zZK2GLEsLfLfKSW?9|W7dLR5M233Nm^s5DhvC8`c`oYyK*Jv2A9t`gN? z?3})Cm8i7D{#(c=)iNotIPzyJFbW);{&1D3DAS4O(f>JVo_uUg&Bh)3kiX>U@bhblLtK)wLNEjxrgM?wlI#D@L7|vKH>cDt@`ptEa zF#Nqv)C?4c#_L7x34~$)dPo?qUJnk#v+G5T(ZY~-gXlDcBakp$!I&j*mje-o2?~&8 zv3dI84WcR$NMQ)7Vn9V^mLo4}F!Dfxk%MqB7JzO}X8_mW3)YItPmkLu%E58=7dZXq zPG7%DRC0RzMo|GY=PubOs>#?n{oF=TX>0+=4hcZ?L};~1)J_BDd`57ECU6>>l_Ak? z#-yRZ?5F|G$_q9@!uQ4|QA11oSs5C^;LG$Nmn4FY$9KpE<)nq%MU@ykr>AZf)kX8f z{LP|mj8mtpZ4o`fbYsT!H(Nwy7&lMn+$yTVcz(LxR#7oEL_TKLU;^!^19d(WS<#bb z)>cULKHMoP&)orPS9L(zL>sn>>Z4iuW~-Q3b@;h`@WbP1FjMIMud` zI#C{YN4ATaq4|S-hiDDs)afldME5Ja0i|1JN6^V(c~T0nm}~$AL<3`%R z1=Jm2b$kH20T^5Zf!Zpd+qgg{$!Rb>n4Ys=RE=o`({#gF7E!hZpqcCi(+}(yl@eUQ zl%>ET@Sb^j7O0=V=qND#^?p%rRu9m9yVI=?h{}crD6oQ3FsR5CILHoK`V49(fm-6A z^M+YL)tnL=Xq71gxB)6KogLH#5;z1>3W{b{u!GE)GMGT8eSq3-ECNTSuRS0tEdsjy zSO9cY8Y}4TORx<`r{6swsw)V+id%sNVh%6M^u~YeqH@qnofx>mC-s9?aDZk=K^Gsh z2pnXaE-+J4WV*mFcDCsPheTPY=N%MflkEVhn828&1oAZKd{Slw7LXf3vz&*fPdq3p z5;g;*1azMEEDGRb0tJ4vDlt0pWPx-sgQg=vw>_CNgReja z9s75T9n`)DpXkR7X$vxdRv|cE0G}jzf)NyDr`SR3udhrmJ|t>ce*XM#|3Z>sDuK`?|=&_FgR|2b3hGKuzUwxNP)qz0nP!9#clxgUAaX; zBQrvbj)(q&8fcDt|F#PVOcCXtGW}vDe;{bsZNgzu@K{_y6@MC2!=mXfM?~EjyQlXY z5tU|qF@41m(P+lU(^-y!$6r*Bih41=oL+ELRFiS)^jSwmCosO7u6;~Yp7H&3?_;7; zqUShZjZ{=yne9ts08# zAkTqX-3p8X+oy+|5Y=Pcx4rpj60|AJ0&W| zxOe*9Q=&GE`=(2s7Bv^!#i78g&uF5=1gfVrm<(7ISh)ojPftHB>dSa+`qtB;3QYfa zre8lT+QztZdfXXNKgKQ7*PanIVLUed(HYS=#-r06&x%@#Y~g?na)2+wWptDjI5xfS ztY{$P&gl=%f^GeOR#a7CCx-%)KBJ5h=#nA^1s2eqJdB`jEa)N+M^S;D(_PMq_A~CA ze&w8~1LNN5qUS~J8Fx(&Juj-oxOjTQdC?@sozowm7Y%3JIo4g`-*JgHJ z5Vc@jHT}>9QFpOj9N_CnK({A>uGj@#49+O9YP$SIQE%a09H7ufz* zYohLq$EJJU6xE%6{+ehJIpI00F+{?Z;G}v?wZbg zOEf@s7uYdZzsuGkDvk7u*7ee)la=KgM0tZ`~4AVfx22?Y5{4}Jsw@HzSzgdGK+s@?z~1Ro?uc42?wx-2j;J(Lfr=6n zFDtn9robq$clw_@qB4>YttLuLuDq`Es-WAnhJT^TJLUrF2)n@w2 zvwg>1(NfU8=z8}>U6^jnn_hBXR8a~NYiyuXXKu}#CIA|?Q3P>r&YQmezNiZ0o9S2X ziz+ky=9&KUzNix8vFS1oM5`IMOrP;URFQ2nzXGqo=IQ$%h*~ginf~JeIFVaC6#dA! zd%D&m(FCR@-sz6dMa8Etek3Zz^zh*H{f|VgW4Pmi!J05_r*ti`x zKv=BYjtd|x7H-E`5Ee7HBa0%l<3$LM3Bo%D;W0vZcMeXEcr0oo-2>^3vN$+)ESe5# z-hju|859@=u1#P6SX7g-Y5L8_qSA~_)4x6zRb!g4Xu9kZQ6*mA?H%RE7sG z4$`1k#t71&_*C==sAQh_T+|sv>pT;cVr-o5{!BDddikX;0Y@eQU1%{2nmzyxIWP#c z^G<)bUS5)E&86wro{8=R=|k4P{ljz70!GFs(-U8cI{3Uk-Xx&N#KX?*`0re^fFhHl zup%=L8@J<&#x@X-S&^BCmD_RtA6Hs6lI6D2=8&P}7CQfjD4Y^dy zktJJ$i2;<(1ddI&dn@Y2xNUmpTTvax9n-hH1(!;N%cRAobG{S(#CUxBpLe2;j7(cE zPPhLc>dW|TdhZ9(G{(KtzkCoiX52SjU=cVu zea=r&IgV5O;Ko+}^g};Iw~4PfKMj=nKyw}7dtG*bPG4SdetQ2eQ5lW{=cfrMa0@J& z{_d}+`1Di1L=9w5gQiG8<1nm_FStPKJ8$r3IT|W(Ii6tF6TLaz`;TZ3)NyzI5be10e?{dOCxTq1 z!EFe+w3zNbnXbzu<_r;w7hZk56*MlS2^qa&Q{WOfKm9S2m>T2W z>0HcW7EJ%1Om|@xb7A~8-JV@ce)<}AF+oAc2Lf3NAf zcwo92iG6W;1sq_%fE}m9 zEhaeq35VEDoC#(C4Z?FnX2D@(xnkvB=JXs0? z0%!P@IKY!LFt>wDkOp@}pa~pwLnf;R6WBBnfpgoR@`!0NGS1x2&nH&F$T(+u2fvt% zoVEhDz)6T}n3ULg!OI12Fe!?`@+8054oPU9R01Ck1kRP<-O&rqZ=Wn6Cd$Y-clt^} zF)zkb+rJ2k=`%9TI6qxgSZoi=4Sq%T>51xMs?)hd#JrhSAD1Ra5QaNsc?cTyReS@Hw`t$@*vFkz$&QBB2WM%;M9F-k8vK&{OpYACp)(I&iB?T5u zza=IXC2a`05l}!$&Jon1&|td3t{^FJ8*JE<=?}!jJf=?<7xQDk0~UD$7GaiiyfWQf zLhLrjU9jj6c8H}4a$+3QJtf6-Snh#b%&04-#mvsjFug!hOmF%FBQcigHzdXGGOaj2 zeY=#HKhuKq(>bKY(mCdUifkEyMbpcr#ggQv@PZ<(1l-vI-G>a?ti_S#Xz0kGz$Y+y z`*&$EVMdS0hufc%@lihmDpJvre8D=>zw{UTa07+1Qjts zsC6;mF0tbQUQls6g9qY5QGvvyuhOA zH`TWC@vfit!s)DKst%jt>joPI!BjFq2J zK}&&G;32;fWZ-_fqMq2c=^u2(c%>)tg2n?tnJd=s53ZCBuq%o~vf(sSv3)oTwyUOMxf~zPO#@xOuxPrM znV18A2PkQBK!f23*K|e;F|q0E&BW9>KAZ)sJ39TInV2o(sp&H2Vj*nXo=+2S)R-P% zCnhqz%UnzcQWA?#KW8qc&3JeEFLN_#hjUUHcy}K zX)Wf*_-*|g^&Lj@=rYGBc@ z$VM!Vk?F+3=}dNF9?Wx^rcd{^1J^4P?Zjp>?wzh}FBT~I>GCuIM>SAo1W7-}=dVnE;Q&^x;V72E^!w@bK1Z>g zjNhhDv=g(L-r^*t4tB;mC$JYM+KHJ>H+B|NkUOyuRJViX#z7acLJEIJMt5Eo1qR0x z3#S)4i>ZP9F%wE3bQZfI(c0Yxs^g77eN@oJnc!<5*#$OE-{>MH!MJPsDHkylq22tT z&KZjWhdVF$_6C7H+XY<3#2Fb6PuFl0GiN%nZhE4dm<&_P`t9{@Vh&7<&!->o5PQdT z60;3z%Ar^Ln!%9$7VK{@j@I9HtDQ{qry5qLSh z&__&85S-yT6s+2FYpmFn(oLgE+Xdm57cM{-L4=k@EUC37EvX(=?k*O#Dudz zuGql{nuLYBLe*DH8Rp7Nko*Bu`3he#MP^ljH(-BE-{C99%Leu2X^@NtOorPJAtUD} zrpVIC2$PEU6XVr}XyR93RN!`GF0^83U<92aqRD)N5pVz|BKa zw-KRVI2vT{22^{&sYyIYOpEdDbf+LOTgJE38-m2Nr#l9T2~Xb_Bqj-Ay@IpEf?;A# za8@3iH4Dx<24{VOv*bd=#F@@8Om_$p6PfNE0yC%_&RQHICeEk|kv|V&Ye3k)K^i+5 zK^)QPnxSH1jMgBw$n=myu<2)hBqwt}$NfY_E0_6-o*48mp!6BB1Nhp-L8U=D~0 z6BCvK=MEuA?oi?YUFHgD^8R6-zA;NoZ2J5#F;VU}3qe(a1`~_G(dqlc#N0Suf%D1~ zmg$Z~Vq(*k!^K1ypH8<77t>_A3iW42xR^N82PkK0xR^Mj-t>v#Vj|Nog4p^H_TO+Z zap4zRrwJ%PU7*3l;HWF`VY+&Rm?oqNjwGQj@L_s#gjf~h$LSxV#AK)cju7L8^!L~W z7EKq36cgbAZRpWpdLabP{TCI)ilu=Xo{CXo zMr=QurwcghO%JRQGnmd3BPPxGZMtEjn8@^ zuO7BfAZgXKJ5IWT^j{xMbz5vn|=KZq9-o}LsTCd_vOe8;#cDCe;VJcpEV z1_@%Uj2+X>62z1kzfO-#5L0G6JH0kRtb}dBr0D{V3ZSs`X8bnYIZ;fD@!RygL@^tt zd*7$8NfcYd_-%S*l9)2%^XauoV!nohndq&z&r0z&K~RO|qDr`wxClWL@S5%@Hsv@GCGm&e)7%K4>`ytK$iwEP!Fj-8L;~R{(FIh}a6xod+XMoL^&X^)5Z2SqNkVW7nID2dn z1&_ir)hlp<*UA0@jYEJF&kZ4vzrKU2zu(i{Q^a)Hen8k&DPn<)f2N;F5z|Qf%kRis z2wLvN#0a{;9pVYJKtebeJ&?ADDzWoo_an&7FN7e|SpTL+rHaY%K8gT!1Ts=M z3)-g!dkLZuZu9i{Y3R;BnI2!d?bAe!Ykkr8x=6Evmt2r98pXUr5+pI)9J zX3F?|`q~UJZ^oY>%6+;+rWgm~KM>n~dPk<15M$%?*_mQpnr9h6*RX@G<7U=n;BjPf z;{}a_fn0wZwC0V;5nSA{2sBOi%L3W zVxo*Yr+<7PrXe^1Y!SQXf&Cl-t8!u;F81`0c5|31$b>(_+&mmlF@utJ1?!QOhqHeDcBOkCX%RK8C@ zR)Hl9rm%s+Ky3Qw0x=EjVW3}#90n1E@GyXy4E8mrz@q8<3dPJIfyE=RX!@T*F*8Uv z88-gaGu^RNOniEH5h6g^i@*VLq)2QsribGQcv_;PV zjkY}yZC^^jjS;R=F$2bl)2&OzGziff_2{%3=)%B+*?e2Rk3!K>b}VCd)AqoHF)6 zq{Tp2@;M&h097A~pti>jNJqz~0&G)#g_tzcjE&RhREXv9y<~9gXM~h9Yyxf5#VW#TiP^BN`!P+xQFZ!;M6nf&-=@b`iL zPVcA_GiC!-0s>d3AFLA-^twr6Y|~}f#6>~f=L2HG)8EvKNiv?D&ekAi#eWl0RDz?P zMWAU~znJXwoCdKv=2wl=r@w6w1C4|mHi{{$HGoP+P_fVM*aBI6&g!^B3=|KDQTaV$ z(*rui6sON_6nn>bV|rJUSTfV<7|MR@xDHZcoGYf^an{&q3x>1yp_CTyobD)h0cu;%F5 zK21OYWEfM2SP07lenqzFA3Me5rdM=`@jwRdKzXO9Lu?P@_356SV#Y#-3g83OZU{k# zshnoS8jhx{ODs zPwx>^l7O3h0yH29H@AEG#U3#U#+TDy^@zzaJ>{SNaGIFHbgf=7aR(&3K#ev`yTGNu z8zCh&UXVPv6{-LpY6rK=hG6)&(lYAa8_2S0{bFX*|M!W> zO%Lf8lU82DD9|SWn*IU%2y_k`rjNkwk@fvzA`bna<^W`b8tga)CeVs5#|OMwj`fa2 z8>KKoOcLUR@CjlvEFgW;%O}7i&-ad)w?TsA2dIcq;8`m$Vfx0&Vk(Ri zr(c^a=E(SJy2KPQN5-Smqo#;iFm_L$JVi{IapLstQ^Y(Ow@qi9Di+H)V|x5laHTwL zs#t;S83sig9`I&mus=a-X!nA$Go&U~o+f6%yS(0US-k>>W4)WSz{KeZ)5I)zm)1Kj zK@pOjerTGQEaRl<52lIPGftj9aXL77H%u2(m>xe}OhgVTCSf5BzN7L8uL6$%vU;oO z@zccQrgO{?lSZ?~U?a0k(0*+eK7tR%12`SS=rd!St<7J#O-F=Rj9OKmKX>-K17^h92I7iGLR7PBy zBj&>Vu3`Fg$+=)dBIb%YGrhk#eePVbwJ-zX=ZT3bOhF`JurFQ+ffuNK5YAE%R^SwX z8@zv>SRh+_BdF^?{X>J8#B{y+Vj3`$vgeBl89vwsSv|)L8mqhk<2(=mjjFObz7WWA z`~VXKsd%w%`nvgI;*9;%kIfe|U|cl)`+PAs#uL-+7Kn*6HcSs&Af{|E9n{%i1YH>~ z0Pe%Gfv5VwgP{|^{mZHKj#KItKpVcK!Bgog7l^4aHcUUaK+MqO!MSMy0y6}l#Vu%Z zT-fmsLzbepf-I;4R^(NX0@I+JyF)NbQP6S5`RT?B#opQ=B~ca!M+QZ4M+QYb1yKba zS6)Ub1z`m(ftdn|Yzo2(%4SR@pp7=Hjyr@v;iV!lXL`&cF$Kn((`y%r%?3q;)M7Ck z<^_$@r$;UZ*R*pNiv_ct1cjr*^q9qB4xom^Vlg>xR>uQiBh&=uOix$>HllHf*hIz? z(}kCc`P+ayIvNUW;9i0ZC`lg$m!@k(mDoTBK4>tl5Cz?p0;#YSpgkiQP|xP*^m$9g zN+1nEHi4tlrIv|FF)p5Nx=bup@dT`f0v#vlcmkXgp?ZbDmQ3HhOw5~c@pSg(Vm}$1 zrZcV(vtqh;Zo2IXF;%8J=ccEu5Yu8jvAutVm=L4T5r%BX|BQu>te(6KphayBjMF!- z6fD;Tt8W|0y_pSoZNNidqR>Syoy8dc0KgL@n!S1=X0J^kexF=NIl(`DC+wR277Rbb0fVix$!2-hq$ z{qkBdd*74b0k8`~EDj373gCnM&j<+|=jUc}P|$}s4m7Z#tspA^QTad!Bnnk}2dosV zkw-yDVA}Nfbz<|FZqJ?me;v4m2p$hhUN4r%IB)u;^GL*=*&-{D*aA+a z6X6P~w}{CyPMkh-iv0Y4n9Wt+XX8M8cV%F1VZ5NYgoI8EjcCooq^Ld#Z6xbnyMq<#h z15m69Oq^c0L#%**4QROwtmFDbYH{)}3OyOrK^=-?9_j z`cB*><^)yt1zFjpU1GXSt7lJV+znG^JpJKrF+Ijb(*^d3P2w-AOt)jWWpCzzhddxnt`Hb_X|Jf%N#O@PUko*rouGs|8%{>U=M>0d4E{UnCbOwQ2#|`y2lZ4Jq?!VI3lJ5D!bMk z5p!klnloL%@hk(V#jG%0|F{^(bcLg0$`F@|OpiD!=D~IYq{Ik^3btcn+DzwfOgBD; za81WCF%_n*|EI4zhHy>(aWOBpZcxhsJUFc|{r+*7i^QiZoe(o*dT?WU=m~@&t4@e1 zG5xtQ{oDz$HDcd5pzA<*!N*Hn0F`6{z0)V06bq7^b$%LXiGW`6(l z-zV|pt#hZuWEiJUe|<{Kl5xUx)zf0~kl9&&fuqxXPJ;#8PK)`moZwgDn7$!HOn!R) zX)!Uz3Dfz`h?R5A0%yt&VI_{~2d{|PGftR({)||H%xq9fcp;XhAOKo9%Amv~0Ld3` z#1IPk&xuJ-Uv^eZo@*}HkT0lmi@6}_{)`Yp%b#;%rc5{gPd7df4$ueZ#R3>7OgFqB zX3e;0dhP`=55@`8!Ny*^AQs4Bs2~JM-C7sLJT)fp@_@q(v`!e@j}Vx`3t4Y zK+^l{i(&zcjN7Nbxg_?TbpkIB8~5~yXT)q7CrnqnB4*7vZ+g-du`tGk(>L4_lb(M5 zoS2Rz%;*`Q%36V0K?t7oC9jG_@=xI90Ux{pmKNxp-gp&nY`wWE<^ozyVRTI_6SRzD z?lm!M#_7{~(Fg^K} zm=DBjY9L>MoegRIF)Q%Eef9p9m<8j6=^D4iw3+rT0u}q>({pZvtxDV_rZfHGZ825G z3Ddvd7MsO({V#Zc;X{L%-t?n)#DpMiDe>v=?|^%E5_iQa8IMk%bXUxe1H6y{)bo6C zS1c5=;s(TXy(i|#u>rJT05q7`cTY?X!UM@|xhEFFxNSS!uq#BBcoYN$UQXA3CZ4`7I%=sqNJNDOu?B`Kn7I-=R_A4=i>DOL} z$v|tvJJTP&64RQl{1Q^$y1x|DX1qCl?;A1w>5E>9nS<7l-F_*iAaaaBL0UmdU^c8V z>&nX_HGSg?G1ciNufzm7t}rO@fJWJ-`@Rwj2Nm4QUWuvm_0>D}pxW(@Y_0zEh}U9L zaO+;9S|@~R9TU{LxgW*Yr(3@flVXRAmp_4cpzV#AFyo}@v)+g)F+K#j-->AtG#K>W zig7VEOt*Y1rp~x%dg5C#5y*ho;_20I#Zn**KauIV@5M}}gALbzC#J$TsorrS)ajtq zkoZn4feTVjo)J=Fo1XYy%x${hdodZN)yJpnzZcVFx(RLtx=&yD9^#E#?;)Wkr67tN zYK&6T4L*t4P51dACd9aCdi)2mIL1lS_k9qvWn41-&j+yxXmipJ+LB!PQLKy~worjp zpYe$R#CC`2d7s1_m<ZI6H~lK6AiZNNq`x8t>*O$kRxOGvND3^TUhq{+kN*U} z5?oHw@$&SwU&SmL8>T=1D&`7WDfCUunAcDNay6`&0*}D$>ABy)P2Ppy#8la*@(O%r zR1yQt>K`ok>W`Q|)0rvLt^b0W(O{uPf5nX1CQO+w;AoAZ-0q*41LK_O z_5Z+DuKp($!}bWIQV*h1_dnd=g8yO?j1#AK{uhhlf{c$UFbaI0{`S9E6Vt@0QwzoY z8NW?EDDK8MXF3O?IA|4^38T1%AhBu?iXUbB@N1fYquBI&zr`e` z2QZ0ifc1(`pTH!}%XoMCJSOp(jN7(bFpK|UWZXI3m{lB66z8ytm$2;sd0rOOiozJs z6`AhFCN9SKZF(%5xDC_HU(;u^iG${r!L79zc5zw8=hLg$#chLcY(}gbcl;ro<#=H; zQm6a`jPpSlQZ_e;WH}x|*Rltu3e-#AvUxfehqyfBpXpj0;xZ`*7#!_Elg@vH9huy~ zU2bqM^pA)V2V{qVfC3w+ApaqvBrC9(9o8lTnY2L^bVM|x0+Rr^+ytd9c1K7?hfeNE zD~N(iv6UR+fsEg$>v4)}aD#@_SRH?eWeNNQ2^jHzVgN4&giNFK2u|O?A+9)m0;jkP zt|bPgVI(1JD(9&t;yZ$GCAI4Vt-;}-`_ z2^jE-D?(BN=nfvpDIo%jrYG@=`!X({zJXUg_0&pd47eVe&fw=>m2D}8t zC0KR96<4~TxV$80&@+RldK#wB7ZjI(WJ}Pj*nUBAL&nq7KMINqa~LYHL0W}8LgEID zXF$QN#?0n;d3u_V_)?~aSEus|!`vV~Jw;etf$8tn>21Q|>lwdIHxm(8w&&%UOdbIiZ87jte$}*6lzR_kcz_AoUV>NESS1{6a_( zvP7Um5R`@{i-{}qoMixyy@EyyUV+xm$WFg6CN2sw7dCc?-CXcMI(PwIhal8kP}lG0 zbbWDgWv&Ylb6*H$O?TiHSAYzqf*0m!PCq0rp3Jynx|)ReM2>?Xb^HP=r|*{#S7dxT z{h@@oG2@)+qLSijjLWCnNQ##*9-O{WQoLC5z}0C2j?$pDHjssmyr8*4PS9E#TPbl% zrh8YXS4fF3XZ$u@L0UYMaqjdkY4K-_*QXcCh&wQ@n!ZU!Jcqw$A5xLfvTwSsthgTI zgy}J|Fsmh}PnQ*!<6JGE$mYbrt!O=cf4rE)^lP%>+w_qJ!a&0uyb2rwYXlV8Ace;d z5zr(kbPgRfNB3v?ZaHxU#);GK$cZ~JuAMF`FK)rqEinCD9C%n6+}_+LFW#w(W-Pk` zCz7$SIET;NmMDm8GEM}oXw=}821guH04H*bdrr4h6qjOLG(AL7T!Y!r(O`PLqPV;q zQnlS9s3Znn(+S!R30b8veUGBJp)zFZ4P1nM5mpl8Wdh9|fQlT@>>+gXjDV83EaRW) zdP?FdY*4)sO5%o$OQv@ziF=6O<_9eSW_1J&n1XdFFe`8h+}{3DNnDIk614q>7ZO}( z#!9G&$5=zxH?VMnR`D|^f@Ww|fG4h51eygDcv$K|!vv7)c(CLSRCH9nxx*WCVt7&Hk*%yiHVVs0W>_u>ZqB`$HL6a%nIT%IY6KStK&U} zY(8cV4o1*UZ^tQ&Zs5tEAB=9ijG)s?K^4N1>F3nNmoYA#UaA2O*yS4HQH;x`Gii$F zF)p26s42ddap`nGTj?@e(eu3^TXm$t%+@>5A7dE}i~BPh5O@nx1$iggp^LiBIp;M@Y*W zh-WY^o!(|3F2}N7K#6I3{|`Rp>8B0E8yOdG_catZW@ehPaeI@Aco`GZ&q)|Z6wYCk zn107xTzWc(g}9oCh$1U^F>99Nh8?i{INi`f+?;8}j_Fwz;yO$Rc1)jPA+E%%iYT%vuz=2SP+$k0{iOt2=F5@gIDgx8UrX^M#*NcASc-?ScW{Ev zEPOKE!CG8*x|Nlfc4gp6t#|{wn?MjD$z+_Qw zA@IIn1tv!o$4|>!1O%Ezxi>O0LXK#dUig_$l<~~;%}(Olj7O&5cM_LlY@PnkNxYJA z(ew&uaX-^Npi8`192B@f;i!N}CrIo;G<+>~i{=ky$R@nps=+b_6_r!g{J>zHogDQ?R6YI~licqb#% z;1x^KiJ0=eWPEboAM1j-s52NEBW(96X#zJYw1{TmpAP}=ifya@% zq{y+Mp{B@;DL{eK@diVd0+#|Wrz7Yb&Ns{oocfG0(+&K@MWiB>_(10{aXNyIoB$oD zz$I{+6Xec}>2ZGIif)jz!I%`71!nPpw3Q(5H)rBj-~?T(T%*9Lz-q=+p}?rX>8JtP zy~nA+RGF@c4rKdmf6Su7g9YevWzyvm>PmqCFOd=Lf1FivoK11FAlE|959 zP)r4#_o~aVfYG&{7j(%CrzZ0ZMkN7r<`p0pgZ2|}3BZq51t%S5fj?{@=YTYVH}vx= z2rL(9U;+ir7Dh!D1y0ZzstQ~R>{wNqeF0B^bnova2@{DIL8(8vw8X9?_%H%^ct z3z#5bJ_kgzf-jBb1l`dJaWyDRn6d;m34k25h6y>cm_h4qIhi$>R!mYjY@bs!c zabc$wpz;FL{^4{4U5CR7x-^ta;1oM33G^_7B-kA}K!*f6GI@f+(QyJZ$RUVOpE3P> zptxxL1ZH#Q1It<2)oVA{Ye08zOIrcw;16qFh{HJElV zD=-Ue7MR|6RY-jLgdlMtf6xUaOpgB;1fGEH0G${Nwu2FB2ZPgD(0)Wvdxq2T1alU6 z4Fj73C+OrDfdhPy;5-2ePSCAz7p8vZq` zMR-6yVRnSX*z`mpA&KeFLd5wPpHKf9BCg20;AMw^qmkpA7aaluz0;LL#cdf^Oiv6I z*W$a(;8_Ijl7UXExIBGksJH^-p6Rw@-f&Ca%xaBs^U*T>LuYis>K2 z#YGv9PiK!1x8u9S;0r1k*d0L!mR_D793igC*u1?mLcE$0)p8rgK+)50f6NVwN zr@$o8Hhn{!xH@C|^c!*FJs=a(;>Gtdf*5Lv;+#y3XQmq@i%T>8I5FKfSv;!#9y{oq zLX^w!L!0fmQq?ua)d}!fxwG?qa*{eKId!XGx7Et-E06O(GOJMKxz!Y(D#(UE< zQ^Z}E&fS^5K1E!|qhZl>kTn`vNN!{Von4A><1dgkj1X)7|7R}bWkho$Nb_{%RB=7& zhRNXbe8Db)C{+MOnMRht)$KW{;*N}r8>X*I6W3tcz&3q-28;Rhh94|^)BpFd@lJ1W zWR{)oC%~>YJvUw4neo!}jp^d5j2ot3PZ#%LygXecL%fY~!}R(raRbIn)9+-6Cox{0 zZl5V0f+DCs{Y|F0Eks6l`h_h*>Imr#S>ln5m#1@Oi|g0#VTPTPyMQrUiBScVrX8}K z7|fX&6c|C2g#x3~npux7FTV8Qm%KT11S&TGmFuCv2+GcmKYFJLI9mJx9YM?JXaUXv z3<8%yw?;DPGqNZ#Su@T6ty5>zsAo!WWXdvQN?>vTwGdO37##I{Aqg)FoP(LH85qDa zj!aqL^UM_(LE=hG3ZOGRLG_~&v!j6mL$(qN)H;nU1x8I~kV`%=WN9)pv^p_>1Tvg% zU%b#d>(>3o^j0SZfhi)OGgckz4M0jj@*1t?U`?QDG0pu@G{Q?>sQ2+&}z!eS!M#lw=(+_8hw=!MlonFY# zZoXn1-gEk8Ps3|9g!ojg>AaQMP|+Ee{#e%nQrh- zUs%YjFx@>@+>U(<8>k=2GyQBchXn^?NtQk%gTS@vXL7}tBTE@gUy~=!%LtZGoBkkA zyctz?PCi2RLcaJG#+L1^1>!}FEL%mnji)C%aj8s~FA`@Jy1@%OI9LkY92dAfJ+Mez zm;=hY1LA3DL&{SI4JH#sX7K7;MHVIp1qMe8P=yJqJDD9hvO)K1J6@i?q)7Y+k(O>%4v~A2K+sn(ug&74-fXV(=U{Z>oFdi{=Zz@ zm~GBt(B7Hp78T<9BIn@8w}4s%D`c}A|DKp$T_Ik;bpOQkw-w^PY@q&uz^UmDmEv|t zsu>iR9e2oNIZiw?y|GGMQWmW0J3Hd+28i^A6I}w1;?oy?78jm=yHebbGSg0gOcR5c zcB)D|gXztQ=_b|UdHP3AbqF|efyz4QSu|khNoFaq3Y-=|cGCj!EXO}5reCZU_h39T zU8+W0p7G3ds~U0H^k08KhfX@Su4x4w+Im(1bXe9ACM8zh^^81g!ACirU;^Dl$e_UH z_}~{AJPz!i?vq zJJyPeGoGIwSt~9hH;YSwSpjsLvK)kNkOKv@Bse}nu^=U3!rdU3rNHdiAeJR?VS2%3 zajofpYQ+UWW=ctjN;Jr230#9mDkvOo$YnV`IX2y@PF$YxPa4>H@G4S3pLrVB}c_HsS`{h>JTL1RPl$83is)w`~xY zHM|DrgSCADXkAZ=~)nxMvjwM}7BV&z@J$g>EnZ3YW0Ei7CB zQ#So(gSaf$;$MwWZPOaX<+%R-fpMp6G>Xe{96JuOMc~SGzeaI6y%pQQL2?Cb!xI)I zR#)D+j6CzfR=j{)v2AH1DD7OCJ_}^RJkVME%mP=Y9{}-AUTT8KJp=L1u5TA`WD&SB zou^4$PH*c|B-6LBf=r*m$TJIU`VLl@2OeyI>WOF)m*e>HuwB5BRp83>I*GPV!Ef~*Dzuqiv!gy{vPm8#p*?E-w`au@zOmM*lsv$nenlViPWr_vhTrfc{OMydR z*7ON2;xdeZ7-y}eajQI8aBuFsu*jSM}iZQ_c4q}T(=!YJNhc0>+=i_@pH zi3hWRN;`q8)8Dp<%dvq42tv_Pemz*R`9WCNv}UPhjNNUXC#BgAq3$z}n8 ztJ5Rf#TD50Y-<(}xHi2W#BSWw0Aep~7momyGdIM|m{|qRO?Qxz(4L;uAui2$etKPp zxH#j9>C-yIO&L!?im-1T;v$UKr*n6TtFeCp)lWABrf=vJmted(J+M<;m+|}b`c81c zwZ2naa(Y0gIN$U)bHzEhPRM0}N3aC0Pk+%VuBNaDq8GW$zkw9c}$iAyt`L~r%gRB896>3SL4ggNgdJDNg35As zfpgQ}^oYAMo||seD;~&retJu zLch4wbc23z9;S-|U|Xm6P7qJygET|H)fOo787GQ6F`l2EcvD=J6_f!_O<(X;TyOfE ziQ+tHZHgN*U^h2PNQk;Oh=J-kP@9BV05$dVnlVk0RbY1fcB)0djA?=_!~+MQG`P-e zkjrv>f4ob;k!yP3BymZZX&gvt0#?I1g6hUU$EW*F5|?F~#xdRQn79N}54e3GK7H;a zaVP$@;4`xve=P$SvuCHjnhW#p*?9We=M?SX>%&SY^rDbxb=h78m-0w)BfzuhV>$;={fYI?vF za533GMcj(<+;rWk;*uh$Z3a;2?U*zD`xJ3g+zS4ln0}E@LU?-aRB;i;i_@E?iaRl$ zn|^evxSb)w5qyxQiW2CCA$9?f?;Rm-0*x{}V`Fjv&2tGHnr<>pT#NC<^!RDua=d1m zxEE5&05xhsQMF^v^jp)!6|u9KISxFlP{l4$}0 zH>Mw$E-pEJ!E|w+=})JN7crij9x+4QP8{LZ3XognAo=tB^tCg@l^IV=zc@o&n(@%| zw==+nmGVq*kj2duS7h2TXL{pIaYYGu+H~cu;Hd;f2`Ck>nlXLXOz}|0bJGQ8iGxnR z_n##$%6NWy>MU_Z32^=Zwd2?Y-hnPXVsTKARA3W0HhtbKac#y^(@)J3w?gt|Dac`v z+$=F0#s4swzbB^q&KCDzd@z05Y;k+W6VtEF7FXp12lo!-Y-l{)W{x;Nx9as=sQA8l;xYjswj+bUBd|$RxIiYA@RWf}n!yD#38WXUW(inL zF;6K-%?h|0usMtZkEUzQ7nk7x>j3fm=8G#bo|s-RUp$xTvB32E^TkUbT=(g-7l^At z#q6guEff!6+%Y|Pp}3zFC^dkyy{H+}4^c?ZP6jlqsKD&_LliU=3O$(`6!M@_5k!9w z%@Q~@{q{of7*IlCg)F541^f$9a1rXgNZb^Q%-<8!rz{e`$Fyh0^peHmF^uP?UtBC6 zCw!hqLB#PlgB$2LcSn#3b7oF=S|VPfJZ14T0Vf6lP1t*%qJ2R#~Tq15P^Z7&v z=orq0%t}zVaDl=LWYms1(+!qFQY}=9Q-K+5$=?&xQD$+e@6|a6j`BIOO02xRpyU6U z9Uq8hDR2oq5P&%u)aHY_c)>bxL!{t?$^1Pr{pmV!dy!)TimafX8Mtl1Ch$hUamkYD z7VE|LF`nE0eZ9CQBjfq$iW|k1nC8r!?z&ODSQ>P*2ue7D`r9D8?#!5ed84>7)8`Y@ zIW~zKf=U(ZP2wh^=XhYQVul6e-xJeYHi?HauH1fWlQ;mVf|J)*OP%oA3$m*@Y?g*xfTv;3(SsmFFAZ-q2 z#|7e9N-P5JA?IKzFoQ~O(COly?ccP(v17y+p!K!!*;vO2PZ>j{Vsu!f6o8U!4V{h9uBtGGJT zDS_z|kBVzfH{T{M0E#uFVE|}V7QIbeQX1SlMR5OkUHKzN= zreEJCK9lj>^uq1p>RbqI(-&&wvhGxufFln|TN`}iGiZbs zJP0E2oPAn^gap&xQ_~;q5MRi6a(c^7aXpcDkdhJU@hXR))BXpD++^83LK3#v8xFp+3kXjbV=t$x&ab2zj>p_hcaQ}J6 zE^!&FOWorGyWDAo~~Uf&S?!Ai+Xsx zL%>lOC9puL9n|7xcHAME<%r&l1Zjd9-?vvh7}+zRtiw1_++e!=K5-#Plt@qa-6zh+ z2yMJ}?Zari{*cLXJav5fs(lz90m)pSJN^DXaZy(A4PVngc8QBJvpQa$F1TM@K?HT= z45S+7u9*Gec8n*c&)zSdfEt!vGp6eu5LaY8H$C8hxG2;0xzp1RK#C1$(1Po0P}o9^ z+J5|ixH==#(gWK+92C!FWLnd6S;uU6F$Tlpq-=io@gc{84d9 zsP7d(ohoqZ#t2kUeZOPQbn|24^5)&3D-K0bVgz*Q9jNhe6?u#b)E)=5;=zez@!aXX z$1p+=WGvJp({COV&xbe^l5;>!T8Bz;qv;*T#d)OR%`sk3?1RE`$DB^kAjv}J=?1&R zC1lrtx&^4N0+r=IWU>UV@_^Em1k;h@(=S$wi*FY_A+FBI_-niEN%1U3w)d}F1RV9J z#~&8YpDuG+{2TN9H!agY9uzN{-hM{hk>#8Klf(4DDdO6kpf){&0=vNZX=laNCI3SG zti+-KYV>|ZG6hPJ10=X>51xuzEo)eb^m0T0f ziHkBGo4)LvxF&iS-aUuX>Vz48?(}r+^WySYEdFq8dfa*OD#jDjZ=M%7l|94`t`tGl z3uu)b*x!)WCpcw+kY zOW19h`O?o?nmV`>lsb+kY=f>@R#qX4+SD#8b<073Vg zI)0D^HMj~ci@P#jpPqPKTy*;B%i@9xP*7rxE|=VASgLQRD$}%5WmCI4%j?c>l|z` zTneNQ)E;1UL`onBWRzHVA&qWOZv`X+Y0o$^3oHen!91Pis<_DX-6+f5&)Y zy1+GYDaJ$7wXcb*FrJ?ta7|pB@!a&9Yv94Vjn~B87|%~X@DN<)JKhnugM<>eFLwSB zI8JjOfx8O_u8SKmo}2#hy13r->__4})3tAiH=2SPF`&^oB`(nEh>)J*2e~XI4h1d+ z7J>b2;1iahu?O-Eg9500dUyjof(Ra6a=i&2T}r$Ob^>^0+~>MD*Yy22#km--Pycrh z(lK;YmJneDjh^3}&U;JTlnpZ61Y%1}Pq-y6&vp~h852l~L z4-Sj>_r)zyf+X<)mLQRMNJfx=`i>Zh;qxPLcg8i-tsjdkGrit9J>jvq5{PbjEUv`3 zX8Q8S;<51d?glY4rWaz+juL3AA-lk#=?YK8Wf;#-w|xS3Q05bHJ4r+jACx;ENJCmV ztODn!A9y0J#dvc1>nGxV>Id0CN3=14x#`wXS1~*2)+=!h#-q~@zY^DHJT?9EE0nMRjekyn z3~up@-28}Vd7hG!MEV8xP}eF9iU`)Lk!f&2U{wD81?`i1q&7f-MJjmj7f$!62d_b8pW(K8N@hr#2Q`2vMKnX3F%=Nj``97kw zTQp z@AUdl;^s{2{!ZWVNnDlj-1K{&#HD4xGth0^pvF&!9B6b@0o-?0U~rtUbGqnf@itK7 zXYOb5%Z&S{7k&{}WI8-)`jju?3d}cKTBmRSA}$YR-25W$$aro#<3w?NE6~stye?8; zHDhX!gEbK3zzqbH8uo@9q*wus^G{#98#>au;44bn29>d(COKrjucE(SzryVNA^`>+D5*J|v_dqq-KA-FmaO9o7ZlAcg5~%;gfI1A1 z(rQ0(95NU@AOqU=n?de9p#j_btOrQ5# zTuSX2y8??NiUDS94OuIm@|Fx zACwdbllgmM`i(#0N{pwb|NbLxVey;Ykq1<=Xfp2*4*+)^K&?Y&#|D`!&{Zg)g}C6D zQUH|%7o@X519lVS1eQz@h2886{*G zuTQ_hC}F^OWI894gc0M>={8If@{Bj8$1q7q%H0%D;)09{g6>ggb_7+C%#NTD7f^DX z9?K-5JbfFJ!~({1)5Do1EE#W3pU5l$s`b}0OQ=tuz%0Qj4ob|-+@Mwtq@NCQ6tBRp z>0g*7KpD}D1+1-rMZy}ae)<*`32{&~L+u6m=7ThN!1F$fggoP(>Hk?IN-s&t{X5WxBC=dKa67E#vp;C)gy^8K+MF zz$T%}_vN|^vrAlaflRR>*#o+t9yCD&k2!>YHh^2oC~?XGj>A$8 z33;ZLW7DT`fZer?Lt+m+1uYOWV>$uyE~0_i!zqD0BLeOhgDWo3XyHRniCFeGvpWSG z+x|><<&sck+Xv#DpI*Qvp~iL?#QrsX4wrmT2xOd)S&`fE zAgD8bXxsGXToM{gjhm(ma!W|E?E>k2G~Iw(LYJv^&GdMX*dvhG#p!h*u@i@;uLX%c z2Z_C&eui5@g7MV!r`!_Avm_wLUVx+pV;%`zrW1>&C-F$AG47e($s^&&xP1C)9tmT{ zmg)a_Bor8zO;_NRFlKC<9>Oc3ApDyPRBt?xQDSlBWdmITbz<@Kc3z28;SKN#1$2iP z8@Ji8uj?e~IeT=1GrXq${hfmeYWvLcn)5mX01hp$6mQ2;L# zx&WF7y&#+Ac<^+`bZ!9&sp%j1CD=eY1a1Uar{ndx)42pBK;uO^0uqv-Nn%4c36bg1 z0umN>SB|y{I3f=-GUK+F8O`2%N2gyD0DIx1fCTb_GEhwqijM1Zr<({$C?bu^fMh_q z9U3SC*QOT;N~9tw01bqoD)=BMAqpBA-{&DA%?=AfO(6;JWR;Kv^86*JdvP&Jo^J^gxnXBkQ7BsgMuqjkU6kvP6c5J9>$B)b%iAg8Fx&dFD#+Q zcy9VRVF}O*7M>PyZ644FffA@7WD__s-A+UTv>+u~L;}<;UL+!+!FX=^X%PtxIZ%nD zz$);88Jy%GO%>2M%>?jLhUtQ$60%I&=S(*cl~5FKW(O$(r7~WSvq5zqJE%RAAu8bw zF;9UDN$)*T2^FS4$ELH0Nlbw(uYoMSxZW&o#0=^`^0bKSOg9#nP(qs~UVz+qnb;yO z4OvTL#R^*c0$EQ((n1%|)To4nv`K>)II+XS9u(uCHVcRbEd)RWH%J7utY+Kn>5UQ+ zq%EuYG6$qbdb&fKIEw+O=79~yfDFZN9W-)zkyTxsZZ9cO3|YLQKmC!UgfLPM5@aN3 zSOON$d{PoJVz50>7tESCG&b1Gm;V!F>ntBm^{R+)J%kFOP%5rB_v^u$67@R$?36*5AVNl=@I6r-*l7uSb@9F22BtQfAFO?*;7_UzkQI=3+da-!Ajk1I+I6}fklxOloT2erQh_8>fmx4Qu znudfT(r^eU(m~S-%R#j@-*kIT3DN0(8WL=b-=~LaNT>^}2Hh|Po=69&`gd-6kA{Q< z^THDy(=SR$h)my)ed6f)-08P9BqT*p7c7HJxITBfwWfq5*bbzTA(%5D($Yv{6)+iC zZfVq%XockJ3*eDyP+Lw!OTwJ-+;kr;2{XoX)9*|c*O|UvOCq1~{B(J32`zo8Y$Xmy zR?zMS@S+URB40;F1$F^&pn+%hLFpW1(E@OZ(?3OAYx*K>32TKbS7r!+R=g{)I8K2w zVGS720Qd9hf3+o~7=KKc(2>w&>RvwGSx3SQsks6&5;UhV?acIE9SL8iRi~%l(UDMK z{677!j)aajB5Xm5{+(+9O+_vQk5_~DxPYP)Bm)oa3|$ExP$L`K8iE;reQxJ;1xX31 z>5FwG#GnaMT#OrSxj4wA>vN||>q$sTF@vgiv~{2edGz21$^1Dn-AhkGTM)5=pIM)A zf~>%k>1}!vX-r3-PJgE-p}@5D+H?VZ3DDB1V0{TEw5sL;bg&ZCq*$jfp~84-`el6y z6UOt?ISeG!B~b?kz`fQ5kfAdV0}0TAhzSM~;*k5u!3h#1dGYl0?FJI^7AW&hpuYSE zP*V~#g}@3~QVE$!0uQEu8f4%K2h{lBGn5EnT7P_cilKx#)8^yT=Nd|=AP;td;s9jP z6;Mkb+)qLZe9#CM#C+&7M|Odx>38#~?HA z?~T9}bF#67quhDevJKF72p%brtvh70z^mn01kO)CWGs=ucy79~iG%~FIDoD^0mVOL zJtjy^m5GD{)A!TU_n1gHv0YdS-WI`PDxu9db-ID6gbHN0!uRP>rV<9ii1ipCGeDKm z$?bDZC4Mq7?VmM$hJ}O{M3dz7I~EdB?4U-Tz<2iP^DQLArVCq27&D%n?rtfuk7*6t z^Z;uK@#%6_5+1Im)EFO4 z_qUc%WO~*=y~tX^g=yFN={u|?8bSNotZgKM86m@ikUYsHaBlkDbK+XlkJ(Cq#&T}k zO2{)_pZ?ocLIbId4QfwFcy79$J-F#nXfNTz)Np3{etSr*4L`yIynxYh>%8e->?I5s*GyM+kcg21`VIb@Mt=^EGHpgw>G+)Vg>yUEfjS1LOJW3QiJo)}WRb zcx5w-0;@nHrvi8&CN-hafkRKb(v;Q2#H_iuwtmcRw@LZl;*88~Lg4!P+Hv&6-xZ+4cD0FBF^ zbp|&zcwA7rHJ~gA>W)k~G2PMy+>gVdVD72u9WD~`5s1-wnAWxMC0dXS2?={hsAw>? z$SSde2XkB%xE=rhul6m140(XE3oEGbcVsS6;t=>S%~e8&pTm(cd#%&||IEz-;HeO6 zR|z$Cc1M9Sfq%TyGhHQg89z*)?kZtqgct+|UEd8VT|fzLf;jRn6_53GFbv2r+d0f=t?6>Ly&1G?wmf+T|(3plDC*0K~*TT zBd99-3XaAd;z}&MAU!7}LEZZe;-KAepdE?Kjt3;CJAM|I;E@E4lY^J}?vR}R-(4b` z@!a%m4+(puMYo{b2nvE7;7+ML(*ps}M7W3~YCvK1N##AkeNr7yiCkFw=d7m$Xnp5h zPYHQQoeyz1C_#c6HPcPJBtQ#2J-tv$15l*E`aqyfKR2iMcu62nTQf|bI9*&~`YA84 z>&?6&r79#Hf$NJ9|=%DYKo5pXu0xvAC!Cn8cf(A z2Oa_Z<0E0g_m0B$ZZ zJAyLpwdwDCCFI0!@PMZFu1JH11)0H1`&k_)G)?ffEnt==S1CNCi`bj9ut_Ssi z_&@`A&~RdPYyd^{2DvPOtJ637Nk}psI63`{pMUnj4_DIjC>9_T=px{^^AkhV4Y_|!N2xerO&^Uc! zki<%+CC8`R1cL)8CRhSA3EmS7?g}ppmMCI6^>?~dh=i=n4|ujmZ}Ff1J3TlAKDZ`4 zeP#$a`7aBRC}cc0T_RM%$r-uO1~=JYBQgSzx&h1nAy&}lHOP`hP!(`~`rJ^YK0Rn# z1hm%$%0mHD#7(#%t1v(V7#F7ZPZ5`zt`R0-&3Ju!N*H*U=b@B@C>yAYdyQwhqcXT> zcQ8!Cefohg37+XQ!X)?@FHTnumuO`=bZYvla0wYpjKxqpKt%><%H=F*Q6KV%AgB|~ z2r6h+$bu5l|8NOOri-VhOGQY;L$W)hCIek?-X0-g0a}s~Ara1ae!4`YgfZi(={}JX zHH_z`pNy2y2aTuwij?)n05!&;ttC)m1+5PGdt!Tal!P~<#ROwPcPh9v0JX$G+w{RxCIY9n|BIGTW)fY)2CemZ z859^am^>7@9cOIZZWbrO#t3cmIL1q8FrJv66)&N})Np+Igm?*KrrzVzkH$;*TTcPC zC&3%%q1lNMoTWjdMxX`eju#eB1KoWJS-k@84M4h(pe?NvAgi9{%@UWIUY{T#$are{ z!~~SlV~}4#t-4h+rr%GHKws?ylKFUSx=^A-gj54))lrimY>)|DC__RCGzBIyeMX|h zI>uYm-IF9@n6_`7zBEZfi}B&~3rP}HOl#SuuiYf1I6XI6Vj74OyIDwu@!E8)6p8f+ z?zB{iwTx@0uS=DXW4t!~Y^uZs#&y%@rAe4EuAhE2O(Kr*{dC4A3A5>0=@Q(WZIHH> zz^vtU=@N{L3#TVGNGLG2PhXHBp~&39F@L&#frJuc=k(_p5~_>~rnhBFC@?Ocu9qnh z#n?K%GgCsI@yqm;nG$~(-%l58m9XcBn7Mx#eptNs71_qGa2vl9u<<808>L%8MC!NGe4UMj=t%Rc{f|+D*i%d_fmFS$lp;00LQpngM%9rWxO%e;BsW$f5ln6x!k(jz2)PpFqd-MXeIA zaohRj(Rav19th4ha=3VfPzn*!_is-S)ek60MAk9n+J$C43l< zOyAHgA5)igWZHIN`omrcL#B%tra#>( z(KmfjpM*5yt?B#vBo;Hyn4Z)Rrr0J(%w#-2eZd5YB97Huiku2uj*8QzCQ3*%HF8Zi zpD3Zo*gQRQqJ%kP^Yn=mB`meTM+<@aZ(NQmSh5tjAqQ%KMoJX898Z8xKjd<3U9La}WTwBEBq1SqS`c)jI4^?&m!k#9 zxYN@GCrkMApB9u--~ye!rNpPeBXD|p{$z>6jLp-Hr$|^ZHc!u)B4I2Db_p+|0+%D` zMr$5{-P1Qrk&x7c?5O~YuVBqm;DI|3$$cA`vjo;p|1?EHN$QNC0+&AH1U4mZUJeB= zU53eQuDt9{=NX(XOxK$#A;b87y7yEGb6tpKED$#-@IW`9usA62Im#D0a=C-ZJO8{ci1!g5~UUmi0`b#cdhRe*Zyeweb7(kX^VwP4E zab!^B=HcL0;Bthyf4bo`2{ESMY|}lbNw_oanLcqEc#81AGzmw>J=2+{gL#J2C1x|8 zpMG|_gcWEIZ-#`H=#{IG5mO~rGo~$!pyjTPU#?DfpCNG*bYF?mOosMi@mZvDUr z3TucQI5u~H&k5slgoO31*%C^OXQuC+Eg{JQ4%}O_C7c+SPnVq|ktGP*0|bt61!jTw z(`U?)2<83BtH7ebrNQ)oMc@qQ^n!&FGSkK8N(eG`PuG|$VZ=Chdh}cg88xYFM>bDh z22gT=q=O|~+=_e(T#g+qpo7Yoz)R!1r_Y6GxiD8kMhQs^#MvM%0-yi~Yhe+1Kb>!$ zM4&i0(p|wx37oPWL9Ic7rs??$C1j^ho+lwEDvhyVr1TG7L7LYP#^D!|n zF>*VdpYA(fLV^Wk$n?zl5~^HFz^5njg3|1a`4R?Ft)Q(u4_LB52@gbqnoL}p%n|}u zrazi5;bsR@uK+z$gUj&%OO_J10=K|bK_$=-4RayH*9Ta#1Xl9#fIR|JvSNDL0*M%f zR~(8U??F{Cf!GimA(60W`lAIBs*KmBb1allWqdzfZ=r-1zVNifAC&wc^A~`#Ga@K& zE|ieb1SKX=P(mc(K`FrP1PUJ{ndx$iBxD#bPq$tqA+K)O&Re-}1l-?lOfk$A^^lyv7 zVJWjjBAxNW^p+(OSv)^^6_`Pp;{cn$neD%pNVqdHHC&wTyi8)E^h!R^dAi_81;rlN zJV-j5n5xai6;c^KLWwa7(0;>`?C{O4zOlEcE1*K%C z3)5?sOUP(K@(T;-SWsBe1uopd1q3K{Fv4=s!Q~QK^5Dz^txCXA3lZQ2g)1m6bFGlj z5xfpApC7O&aYL=z?!H2Tg^_X5bf1+H`b_`0w%4wd2w-H~H~riyiG0TX>2|9nt})Hq zI$dmygelYVt2d2MI@x9(Z59w{p8j^7gfZjw>5A(mo-l5m&bC2f zujGOE9RiMgu)-8H#=rtPrb}Sk^gA0QB$+O}pZ z8#K6yrtJk-8`#$AXF;|9CdQWOGqy>{GA^6GZJUHYfFd#0Ohm#`39#;3rh zzyv;!&p?6A(Ezl0rGb5V%XSGh-WCoXCeSE4s{*sY^6A^RON27Mna;Zdtl4&lggDp3 zpH1N7W8bk%H(W2FKAl%oZqao0of3SETc?}tlvp6yjU`x+{5AXibnaaehD`h3Pq*78 zF_U-W^9}(=0mu2z!DkLW*(LE+st0r|xyhkEWZW5Lv?W@3|z^1^az#%YoI{#h?3&v^NJ@!gWWn|j? zeER);5++O!o^Kc3FJZ>W^mo;C-vbi&FYJP zM5o6eln7^>w0-+Q31RR-#%GR5lru3-o9=R4!i;g!^y=dhYK&8-FFGz^#5isG)#DO{ zOhkB(?d>4sDoC8o&qWsvDMuq8KXC(?46{hb#E1}G&F#Y~n31xPLY%?YX9n_dh2g#;7no?VLnF{d86)4(Ktu4B1Mopi}oWm_(GoXS^!0D=-QC zU{nO1yvi)_gHcL>X{{o!0`po$KF~pUETH4gK(}PGF*z_hC~$zUKI6zzU~>dd5t}hF zfDQz9Izdl?5p>ihrvi%rC`&1@fdY!%Q6x_hd>}o8BcnB= z04P;3DzHtzazVmJnpKn8!JLUhfdMoW!v#)m;6unSFl0@)x+qbjIE7Dv6N<3$pxN7@>OA;)MGT`I9n85}~fDfWn0-en*k)^;SFl+jQ z%MwTMt2l5)VwP_JBU8OHgQG&W60-{EFmBMbTa2JsWdtP|(E3HjwV-%p2F=oOC@?E9 zLG5D(#V2S+LLy6nL0}`F0<)vS^f^~0bQxz(KXp|?k#XkqH&-QoGR~a-?3%;@iR;1& z3<^w+B01oOWVQg9&nR$x`o8NDO^h>_JKm6BWSYr0J^X`+3*(aM9XBO1L0qnnA`XlT zx0~FOuwZ1IIeq;-38m@%w-2u$K*a!}yVXB1Fk8E~5EM7>!PCkcwHKt9P2jK8Nl@k_>n zhD-S+a~S_j=M|9L$k;x8zkuY-dhiLr+zPy)gE3VYxU(Rqt_x%J|9U3(B7?3JlPajRkyMZ3h>qrh|!sh737CM+-9xbZ|K`7IQ1G zID%`>Ru)F)`sq1>lID!Rr!Ntdv}2jVsmMFsPe{^yx~!0-8sneoE<%$1j8mpx5t7^` ztO}Zx29;aTQ>R%3zD-{!ENLRM5p=!+=;%n$t>hP`Lh2JHP*yxKb^1qPNgKxX)Ad9o z0~ps&uNRT*Vf;CrM^w^Ot%n0t%w1pvRpio$dR5r*+0;$}M@7fOQ#%C&rfp9bm6T>= zKLVZvZJ1v8Lr`M+g$1%4(^rZ~$}m2deoRaUALFpn2v=-t0y3wgL5 zr4*SRyJmysxFH(X+yy(Ei`!99k=gO!H!zPA;?Ub|V0|1Ax%KzKa_kW6Ppt%-!Uplg zw^?prE-SaAj3Tq+rJrDpED&GJgJ@)iDD8sqm>|YHUI^C52(jW%|MdNmlJ<;ervI0e zRA9V6T|r7xgt2eBsg$G^+nFiTK!+`+N=Yg)z2ThRDJ2=s;%G0>GX05^q$A_y>1xuF zwT#oIFOim9D>9i~kzIiebY%bwC_^Z)IZl~BJyk~1obk-`xiXTrjBlrJbQKku{$55> zmgz6Ybnv0#in5aNO#cL@H_1xA;lr)lg4=OiV^%;4Tm_XMfYcO#rg3h96 zQ(zXDIeoFBq#EO=>8BJWyO?e-pB||C)lx5sD{iL#_rS*33IpH}Iu(eS7HLSJ2=5d=`$(t3LKxlR!tIgYSddb$r{Gy=~?QM+KkhuPg9rN$arVE zhlZr8GN>sGJ%5}NdRjI3fPD_feT-n|JMNf1K|@j|U_YM%rz4{k!x3h1(*ksIHnYHT zPEde>8k!2=bKhA(X9Tkg{N+$$1P^I}x|gtn)BQJG{jrXu z6=UCYIbBIprasQ;A18>2OwZJn6k%+hUZ*P=%-A#ilCGo;W8ZXsJ;^}E)zcI8B>Nd> zP5-DTX~@_-Jyv31XYMk3f!Q$Q{WKTB%}bk@PNaSwJb|u-E=)uNe9NB>G`IT#*9a%FE*9bW!y9U zqNyZky!V}{qzlL^CT5a)imSO4LF+Y`%$UH37=k(p%%HLgG^s0*CD6zgoM9lG%*2rpG%= zf`+qaI7`|vHcv0Km9%8~COBQdMbaAN0ACl$YDT8%CtW0^SsJ+%Sf^(hNXkuTbCo>6 z@l6m^lYU^DF0Uu)h&O8b@kUMm^l46#G58$BIAc1$3nXrFyT^Ej5X3#8n*9k2BO9n# zT)+fwpF?^w43M%2srNbAag)gOe+ryzEG&)(8K#T5Nt!DmbwxQqeMh9ODm#cjW9RgP zGeyLvx4KEHGc~YJU*{&N&DcEsmYbw9O`YCrw%`hYrnL#EX znFuP=kUPVSsPdp?p9`2k9uk5!RUD-qZ%pq5jS5UpI4LB@cy46^SHO_{#^*#6Q>vVu`=2d4rn=>ES2jAl$bK>f)bj9HEb0#%?941pcf>wP2-Fz%f0 z;45j&xN~}mujCuHJqtktFx%7oB*hsS|4y&-mz>Vnw_Pkiasp_W3{-h?Du7#YYyz97 z9|@GyV0yiC`ny0$ea0o=LV|M*6C(>K#vV+!4w6);Z{!4ZeL;g0jE>BOO6;IAl);g~ znh~@$g&EX=W#k6AQIna&oLNDERe{BvSpwYu1$AW{C9(uwGb^xaGHWQYfcmA-f(+DL zgxr||(ha)W#zuh^bl;5$$OJoZf11(p&wu9nLdOR1l{TP0wF9_&02u@dDhD$r4+VBdCQx<33cf&y$#DiK=uR+XDS$8WU;w!%0D4IYEBK-uCIwc| zJvj=@0xh7k1XwkhBg{b!ZPw|0(UR;^8@ZX->zP4!{7hiZR^n7)P~Ze*YS1#}wZW1q zxvZcfok0;)GjM?VQ!E0_;6hwP2~<-_(&0y6dBb*P-6Bkg%Wm*;3OIVd5o9_0j$VR$HjZ~swX z6*$SJD4@W{;-J8(AP72KVIMFCKa z!(hggqrj<9&#A#=W5$%C0JbPWfm4CovH2f!p@KlwawRSWZb$HPRV8i(E(LB+X-6jb z)kGPfTXYz*6j&7?*PK9V(Gn#tP$dc)>Qdz52K9Or_`$9fGH0$(;NxXf;80`-Sr586 z2Xs5kbnf#)YW2%ujRa5=g#l9OgKqBud9nd?Bg_wwO^``Hka3P3jG*Y@5@-U&GN&eU z2O}sMfV6;{o3H=@U08$^AX6BX*ue>n9n`)6sn%ecz^KGNy(vsm+-C+O=q?|yO$v+x zkHD4v3PvRsunrcO4$w_RASZz0Wdq1*I~YNMzy(SSGeGD@9d4X}dQMjb0)&VBaVlD8cIiOJ-PUwXyJD3z0 zHJMmIZBfS);46oAOm7XBH1h*p-lG7%w#SUA1Jq*aUO;d(aE8F%Ous6a<$GNP!wQADE_ZI4z`*!34I3!HlT^)MyegXU1yDj9JYB zjs^m`QlKpJgGmv5yHW!)xMCFm1>_AT(BK@%7tD?vAb|@^;I*zy;Qd>mrrN{^Npp!C z@K6Vh3bG2U5(K&B0ps)s5t1VHHyF*CUw{G)G}Qs3Uoe8N1_B4#2XLf=E(HSLz{IG) z1qx42ea0Rp^jm{KH(gBuwG~**m?nUo+QFm<3a=SVpoGNfIDsikpa&dXbC?jNQ9TQ& zU(Tt(X2q}qq?=QNX$F%S(-IJy*^Frc=<1{mOrTCVr{e;qEP*~w$X!1hm_W%9RQG_} z{-6Xuff-cmaD!_dM-72>qTEn6qrhr7n-|1h!^FrAF1Q)8w||P1yb2zLJ{c>yok>K= zj0vAt&! zWEq>MXYUfy7X?oUfyZV+w}C)5e>6>Bze`9=544W~JiZDZqZGKrC$JI}BCMLs3eW%n zzCMmE8Oa(PfMW;_sl#F6rI{j0kWE$hP>5)m2I{Y7|LoV$F z4@$h4K7C4(q#NV*>9><4H5hkHXGxZHW$c{pl`Lt*w1{o`dv0Ng>C=)WEg4@-zmO~` zlKu>I5fg_da|XyM%-qlcd3I2#%B{em%TNL;d^j|i3KUtvM>tGmQs4mHl%l|)0UCLB zWGTr~Vnd8Rb11NZM{6oTdRsvi7zapY&Ggt5Nj=8q={+fuMn={y8ym(YuX|KQDF|yx(V>bQmjx@o=@MN zDyh%-Z2I$5NoB?t)49_m^%<{Bw@#CkVSG6~B27|92jvDo4rm96L4gC5A)(>Tp}+~f z$ZtcMq#Wb^>F3iVH5so=|CuJK!#HEQTDqh)W6$)Yba1a?Zo1@R#_ii3GbHaZGPRwb zu9zih2D*$rGE35kY2*3ny;+hM7>{p{%a&Zu$hdi`w452!(eu;oawUxzuS{)|>ze*M zSCW(IFZXn5Mmde?dU=xEY^^gJ1O%o}x6hN5WB$p#eELIaQRV5)d6H3V(_2B4HQOKO zNhUBdewpr4AZg0@cY0NU)IFJv%NSZUAo&I`)WF}KX z@Aj~Xk~NItE4h^z!7Y1727XZU8RRWSfjiq@PLf>1$n^Z(^yyP1O__eYn|^MJq!C*u zXd;kt`olAVJ<}&mm9*j8#R-~p1=Yh!+yYJ0?@X1{7ygU5-w||)kping^K`jsk_L?3 z)BUGOnuFTW9n&Ps6*^{g2!Jo1Qs9*a4Ujx~yg`Q*q`;B0OcC6b<^vUZU?E;mW#uRdHe5l_ zih&tn2cIK@yA=ay97_Pq0@)`5W`V7&XLaN*bCfSs;!|LQvIIdx%nXiVVADW-43Ja# ztQbH#1)(nGR^T^dasYMZ&6q3{xWTPDY;m97fhpw5COGx(k_ZpR)*P|!bN$kJe7Fk@g);8x() zWcDy;E&#DK7!u7GIvGJsU4{fDCM$*xMg=BNgAd|rZcr*#;085k!INwP)4)!d!iZ=h zD8zx92n>#*V9$cu8K8+#elsRezls~wzJ&IzxIuHJ3}#F-K%2loZA~75*PM!=!3-8~ z+F1a)ppe^f0eH#PYfdG0$NJ(z1@`HDb0v-HHJD_Sm>dPl6q!M3#6*b�H&v1iFAt zfeAb+=BB``!0uKE>DWm_oWpF*2)fM?)ZPWndRa4qqLIa#5j3jLV9f{$NG1mbA;)?* z(AB1(G+4yTz*MIo49@@1G~jf$9vsu$j-cB%dB9-;8bMSLR^S!*zzvF=HOL_XTBrif z0xLk58AA;RV83b0#ima9w+z;n=1l7AxwPI1A^&yTj5ci3hGlL>h6tp*sK^&YMK&g(wk-<^p z4>tq10=MG<#_31qNuFj?cT?bWw4Xk8zN8`Jr0K`!OZG5M+a9t&(wC9Ze)^(?l1hw| zrXO7>+08g@df+0->1^*>Ko#5ccZ(#&WoC$SSHY)>K(pnn3ZR?$K$o_jnXb54Qk$`D zy6<91Y2Kx?n*%5rqRS+w zGp?MzcA4ZN#>VLZ%O&L)o2KV4mkfifHP&SkSUdgea!EbVx`maJ>Wu5A`>c?>3YT0p zJ#eL@9^;zn4Is(YAW6CD$5u+_FkYQr$SI^TeQTBIU#1!Vrf;bhHDFr*Z~8M3ee>UR zi5k(raz7`v31sOrFjzD2IkIvqfo5qrvp`qHu@{4uFB;d1IuKGbu~t-q?d0-S0Y&EN z7X##!r_WF4XPth%R#c{bSy!8YA~O#^wa(!$9YFu zKq`5-9XS=59N(R70P(mXa-A2NK|C&QM|MRf$FAdG9w)aWha!{X@nc{f2e%`aB9r42 z2#*~icOeVHW#e{aQDk!b{t#>-E5!P55FQIe>AR0$IcA8;xo5#VCWy)teeEFY86l2Y zdkSPBD271;IG_w}#>B#X9Ms$qoxasrR!UAzpb8Y{jv844TS0e)F>5esfXZ8DM~f^+ zuIb;5Wph(Eo^BR!ltZjg0!X!LzB=bDLTE&SeC!uAzOukTNm6?b7af{)qG4PN{o(oE<@I}LP}0X#}{xu1GfM~ zwXEa(6HNjFGeuzn3XW?}LImW%DioL;Prv98a5Qn83ZfVtCxPxboh8aWjh&IV9^5HW z+HSi_vX_~$XZq>2l9o(wIl(ukPM2CI$tUX%DLY?)M#VrLVbx{uQDg@VMzJVzOpjS7 zsm8=qGQDw~q$H=WBlMQ$>GRe}ewJzF09Co5OO7>}8I-sb*fp6sAcH0X+qSP=FPYEC z^y~3-xs8&hOzWOZpT14fbbHrE$q9^1M;=d?*$iq;JqC%IP4C<+$<4tYNF|p^c3tEC!T5S3J_!Yc-W?UZI_Im?yy~wjdA66 z_wADWjMH6rN~ZE{5Ej@d3{>&7ugt?U8|7flc5N2k4FgMo?cH)MY!tuE40ktjlnMQQ!&3^vpeyddjER;kG;g zUBSr=UP8_)aFSgBbU&vfgTUqKyY@(0Fm9Uud5>fT&oa(a^WEKNxOhtnUG*!^SoF%Z6U4x0mO_804of~xZkr~qp&}PyV%vl1v z*cCy!1gsvE$T&cInLsO2K-zXq7u+uiy7ELdHYW-yfB{ zDEhm(O~8>~z{rsi)D{5U>e}7XHvQ@`NoA%px2FF(CMnCbY3{p zYZ+^l85}35K^CO3f=AVq1kIVjiQbK{a$WCtbE zeXNSpAKs7^<75Th>Z-sd@NPQG2}xr{uIUyhB-I$Xr^la=l;YS6I-rPEAZsXB0Ci5#Y^9W97{3|Ip{oYwgZefJ);f7EDb5>G`@$q!|bCQZ2D?yu2c?Hs@UtGr` z#mp`+dHRJLlH${A&Pj4=OoGZOfkq%9O?O^UQy8*x8EQ93%i4323XBJ*pF1Zh%Xo15 zn{$%#pd;>i&r1d|&YhlcUQ!Rq8DIlIkuZJXd2n1EI4`Nd_;~um^OCCET%dd6m_c`E zaZMMzASumwaJudVNhQYV(*rI@$}@6IFSsBnrGoI2Ft;OvBB=EXnr@lE4m$Q2)L{X+ zY25`$ZBPs!xFxAH{qF^opx}pv^K|KpU^iG@1iK;nq9o`fnud##`i#G)Z@4Ha&$M99 z^otiI602PgM#4ng6%9~cGtm&ZXvP^AISY2;3bvs@ZjhItiGqLn z(d{hCW{=qwnR&o26ad|m)Ch_(R!wFBbLJVKCMv7r4DKv}CJser9xhM;x_}#WQZCo@ zkHVay(+gbLS*DxsV6kA@J8AlZ>yoM*Us_v07lWovU$BFvhGzy3$i5HUSpxI;r)%tF zQIbT@A;f4C_rXAVk08cZvA6+v#D!3&BzcsT)DCg zKs5LCTf11CEWqW307z#CA87sZ0?;1t77hhgeZ~oVkbKVz&Ml5083k5HkTMp5$reV+x4oo<4IAixbmi{^@u3ut;fwTqyu? z{|A0-?tj4#x_4|c|8#}DETA%L)g4JG4h{uyHOevl%pFMs#>vzF-jUQae9sPYCuoQU zTGFuzv~qxYun~%aASwW~w#xB=04UfP6_^D+On<+ZC5W+Y`l`Ex#E<5!PvdA#a;>Wa=d?RI_E=4MfRn48w4Cr-<)pvP*O|sBK?ZlvL7w%nnHv zV7qyF6$G6af->papBp3anXA}PlWZJ5CV zRe1Wmqb%aQSSW}tIRfM+ML~qEKap$&MZ*H& zEP-@r*$0jVeo%$Q1aBsSU9+WdsYJ zON!~vLe7l5pi-F2jA?_20<1Z|0#uCc5CM&DP1rfT=LEQ_vUwpX$;>6NaC*cGNg1I< zC`RybD{xQOKgl8v^3nVkl426vW=uasKus>l6b7r~50NYdP#f*+3rS_h$J0N)kW^vh znttFUi-;p^LuI|2v^nzvP>N*LV45Hbx&Uy7D5xM7MXBIGohtC=16Icc;2R|;PtSfS zsmI0TSPvT7XI0>u?t6+wlMPZYOs_e`qRTjK+AB#(NGppQ(#o=Zh0@9bH3C3I#;obl zufVlg-785&HS~&%3$*nHH2Pc*slgVAfrGo?G>bMP*YuCCB*nljF0R*-GFV&EpamiD zmNdxmpb+tU4Q`+1zm}9@bG#syrNA+L{%c7cPF_&ee?d%Xa*S znlWtIfh#0yn4*h=bco_26Eq2GbF7MP}%D z6RR%68F5F@E@DQ|IwvLt<}5=722c?Ints^;+Wo|7#xy}ffz^@0ilGO@7c^&X0P#;q zfDB~=b-E<71imtYS|?{Dl$d#WK%sI%0MFz&6Sq!eWT6RZLNS@=LA%keLRz~kx5-bxyTzW@n+0PA}p0Sc5o zAgBBQF+n@7^cg=$pq?7X4szfhkOSF33)M83en^-xHApJ3f~RLV1tx+QL4n1Q1+>8qG(7S`T7kt85|kjnL4s01fmcBgG;6~P zT1CKb#ZV90mM37%*e0U@-f{Os8r1i(VrZ37VCDslTKtfP2bL=@Gitze$beeEGFgtl zQ0s4Aa1RR7dYAxGGeHL8WoV%fzLFJOn($BW`6ww}uZa|}pcq;JzK8?d0AOgu8R!qlkAmjRA3#ppAPdSstd1{avJ~oB z9Y4sxa?uu9C1zf5bZn4?*$+Bu5R`=)B(oHR1U@o@vdayS zOF#y&YBFDtHD`VRvkIh$6?AbtXnY4@6}asT&9gs1w*8O=Cq#B721U@6Flf5bm6rqD zg98-?AD~`%5eGB)1=tUeQ}hvj-~>epBptkGRuHOD6ap=v2d#@@$x`G5?_^SB0j-+P zQe@|008Ov4YA~(fR)qDuooZ=FBG9(V1~3# z*+Ag|syr1z4KwiE>j#j!1|?7#_CXOu{!j!Z*(sndENBo^iP?;)LrD~rvl^5@Im~f} zQWkh!Y=_cx|Id;Vt`n3LSV6;wAW|ty;1m-i?MwiV1(tx~9#jP^Py%(w%$QbyXl65} z4Io_yphj(g7*)RmB)CHK-WPWH~MXs1IPv{(!EL0lN$mGCx34Ka?R=C;Rk`*I0yk z8$iRI4Jwe9!t{&RScJJcKtdfVS&qA>+kKN1tA|8AI5ofxQv$7i1gQfpn*lBSdIcIX zVAW)9U^Hi*p#oa64QfGylMbj)^%r!13S#gU?03f*DxiRzppqr98)AOEkavLF9gRJ17 zV-lDJPeiQnF2xUd&@hKQID<~V_(M`8qd@^wxhjC-t3d&j@Ij-GjuRBJK#PMx1qmpo zHJBzSI5N9|cA|my-h&tUcPJ98WZt1*&U`|_aTO!DLSf8O zWB?5<)`DAP3&46$fZYmNI$7_yiV-xH1rcjtU(PMCk7s)QEf(?m1)xHEfg)PA=Wt?B z6a+QSnH@lxwnITdFoKsw0XnP-jgb`~tt%8E`3F{QfD5q=;Nlp1?nwVUKSoUZYI!y3W5qkfxL_gLXZ(=Xiz)=S@1vsH7G!d*YO27GEjr!11KUv zB`d3DJ@X3%bLJo5m;{Y-{{TA@9BYt*0wU1BzFZL$vS8~#^=JdA+6C38kl_hXIdiQM zeB*K&bW;iVq$%*#AFPfIidh1S`4JMVprHJrqQK%<4-RG_C&n7kxQF8pl`O|QAafWI z1rBI{j~g^F$^)wNp}`Bzz0lC@P*f0H4i7AhnD+(a#!IhU8oXxS=eFDwy3n2X$(Ch~9fdnm!V|6@%WH-oaNu=WA z21xG>nBCJ?-(}H~YmfugigKv6JPRo0f@=BcwtpqXr&rzt&vYdGm6WKT08$USRS2aP z1-IWBvJ^nwE6`Mi9HOgH0cr?oFx8-RH6VRq)J_KINO0Vpj2Uv^PDVYb!-l7q0qtaL zlY?|JR)A<`D~7FdpmSwGTYDW>$bpJDaGwm6Lr}ZcAkXcPLrvx2%yK{u)QIO2sBd5b zHSdndA$Jtc$YFLAI^;nsYLGh$2jn4cS^z458LSxQfcS#u%o9NT3-TcISsgp%LB;R{ zc~~Feio6mtF9*oV3-X{oLOp0!9^Od+C-Vm&b9l^{Zpecg%ix$}b-V#K=Yc$Gu>;!Y z3)(9TDt-h&#SfDsW0oT5q#Q&Y^8)0Y7xJ(Kil}ZuEjZ>_(26$3Y(-vX2L*0N#%v{S zq?zD?9Un-7A_LYbd?JY)*KZ^-<9dY@XiX(@TtAS4H0&=(nlT-a zf^-V^fW!pNnKyv=AEZE5usW`g0>$?RDOh}eky5H>=4Ar~!3QZ&goEN6JX8eg6oRKT zrhq%nYt5N|NPz~uQHtpqpl<&R=`61Cm(+OW=`)(6R!gWrcK>BYZ3#QjmdM?zjQekq4D3NIlFQ(n?@&fhyY#(&o$u zq@f8F(!)F;orTiFWUGg_S5APeIw1{iufP`*K#r_|7Pr`($_h@Wg-D6>0>~WDY52%P z0H9sf;2}A`|{hYsqCvN}T24YZ%it-u4ySUjkG)amb-q@?S= zA7};7Q9@#Hf*N>ZDEAT;CZ>8u&?dtTkj=6RpuO!(j!XipI6#G!J|he0Fak*X3qpdI z1}iZ+s`|4yfG1s*SR6G$Ie{5kxo|jsWl#k5CE4A0SwT*5=Vj($0bNHly@y#!A{eG_ z1!x`33dStQ^WT~U96@bwCUB=0)CyqbR$x+OQDAZuaN}j-W^w?nQz>!$x9`V)_Ci+$ z7DqX6@CtEe(A$#nXz6Fi)vwJFDbp`|)gp?qG1HLSeBTL{Q3tRwHAc59NIc{K@ zUdk%vAb5)h}g(va8yzS56Ji3!xE0_}!i)?nfgI3_gxehRL&}kH%k&8xQeynCIk>^Apano# zP+-IKtsGLej8muA)$$t(?dMTs1`QB_wht+>DXAigH-{XeiT>)o=m^rz^}o$ zVR|)>lonRgcJN4nRxw@Yk&I}8vn#N03p|U`QPehep$w+(`PpGD=;nMpT4`9-&XSCY0%Bt0%@=#;vxPAIi!(aN``4!&-D5H zQc_I&d8Yfe@GHRXz<$Ut1-=7&Y74)^;Eh|LgCW4j#IXrXi5 zf|wUN=r1s9x`!w@Ii`zB$x1@+A4!9qaR{l^IRvImp9@j7M^q|Q;iwQo9ySPd05p1W zfDv4aPB#>jvP7~A5*eUbtXeT}j+`wfrE2(?9cBtdo&z+R0CgQ`JP6Wyi%5SNl++B59@)TNsy3 zFVvThVSG0IoWA@b#uL*E4dkyf9+3-AqoZ zlX2_x19DQyjN7K~HBQNE^xOMtjc`0MY&C?kbqyiatOn+-E?=pS5f|MiU&gnN5q$V(~o1Ud8WyIJ& zeXgRE3*)ot)u!^&py}ZMic)br;ML+xjtkhc1bU`3n#m_JE}ec_Nh*YK7sv_|_GxEZ z1suP9njWnzCCRuOBqlz+M_Fnn$k+SLt0nUkgpBYq(Hu&XC)8v zwYECg*YB<5p}tm{K3iR?j&bXBDGjLv#%}p}wBJQA0|Sar^X%I#S}(t8C;Q z8MjW~VPkFzpo=D!SQF_bO8khf$7u#>qsq^ zeGgiz!wfqO3RIOsDpp2EPJ#W?3wH4^ge#$D5OO{HX~iyKPm zOYG%PVA5x7VN+t}6;%MOT4&N=YG4ysJUz}(N}uuA^gctWNsPOuD;h~y4zW zU~E2PDSpOn)8&k%To`vvPd1j)XFNH5vaytk*iNv)XIPb(dHEHX94~;*_!ZbY{j9N+ z98~TOOb(RZ7zN%=XEc!#VLUip%tR^%=EeyoQlesSctCFa0#p72Y}CQ&yG^9@7(YyZ zWg_JSQLe%GX?mEcloI2?>6NBZj*Ppe?>3dnV%#-d%}mOJao6;GGpQWLUDNNFN!irz z=KvjrNBDSYBnZK<`dvu&%z)H@KFc?>!A`Cz!E~>1{!D^I}`YfS4OzT z8(?`sBze%W)QoWb55V#Q5P2RZ1yB|P+q`}HVsj}QA&83vKo&0mZTlBkH~pizl!P(N z{uSU|`+OjI(58Nb#gOg%yda4kV2$gi+gM0tFz%YZ)Iv&^eKkKQ^zKYwU@Iju{jG(R z83!cJk1$VPXe%W=-Of^qmvQlQA4{oN#$(f0SxV_M{o|Q_+fu5Sanp2vD=7uWozn}f zq{11GO+R5JWdxG^VBH(;HUu zOHaRRD+M~UCc#e1k8#)Z^>$LqjN7JPwv!5F+%;X(UaE(2*Ys`nQjv_irVBWLmBc%M zl{7nmmE3fYTEqBpdT*P-c#<+9(gC;q3ro9}~9YKb0xJdCZ?wqdIEGNVG zcDgr+`aIp=MG9nOPqSPy8D)50c6lBCpKNzO)8Ob|MY%0sYu4X(_gwtsWR@I&hIWI2eJl2`MOK3W869YkGqsI z=A9#VlayEs6bsbI&7uz(_d0w^Up3JUC;-s=I5v2`9&GZ=SHclVTvVB9r*fu~fa z6kHoPA%RZ|ngC8Od#5XVf#c4{OUjIK@APH}Z;h9fBIC~K=e)osrF%=oGVYpw)Eit7 z==(_dGk%y}?*k6;t3Fc2jC-eh`bs%5?wj5NqV`Td?+f>9(a;pbb3bsxUAb3Amz+>Y&uh*R3PK7=^=qq-i*7pF9?(pWMn)uoiSWWcKZ1s zDMJxRnWUn`%qyL`sx#=XCQBDHG6ExGVY&#D^$ut66`-FO=byD*nrC&(EJyp!1n2l z5mK^@g45?mNNF$%O!o|v(wIIgOiGt+KWLRQCs=@yA1pmxE?mkF=GCrnm>M348X<@p zz6dFy=>ZW^a?|ZVtd$^EUWAkoBiJSlnD*llQlgCerr(VKS0r{~8=X)%77J}U<70X{r<_Bjf7n_GwZQ;!`e76A+jU-_9uQ zxC<;WJugj4p6SJ^>HTR^GE7TWPhXoR^_r<=*YsKGQZk@hK6a!_*)Yx6HT_$4q6nK~n42P7`pHaGbJsnt;F@QSMAerh4#crxOlLpPM12tg`-ggMcCv4>RQ0(f%7P zARZI9W6xADi;>%L`qb%9GNhC>uk4y8ATU=Hv;!A3L;%_kD&+VaWIUJSEfB@)cnW08 zyzK^=Qmsski>7bOmRe*vj~CR&m2u?-wYe3tK>PDRqn#28ECNqKH9RZy6d2G-GH}rV z8b?=P5m+>RVvf{4z60l{2`I2Aa44`jG74Or9-AwrDg)E!0Lr;cpt&v3;a`jjOpXW6 zPoI}7rOtS9`mtOoHO8mY-{wk5N;H7hhB8_)Tm^NCz>Nb(Ed>^Vlhft$q?|b*HV7=5 zo|z|QDSetBvVn}%5p=c?q_blHIx$~h^7OrVQf5qN_^0dTOKDG6$d}?_Ts&PjU&@00 zG6QIx`_t(;`BFBF2GdvOOI>6l~W&LXKV<{O?&6VPO0P!a*{NoRJP!Jp;00xBd55t_iC<+uPUBnAHKb3+C@cq+S%B367rFqwxIMEQYB^1H0$Z~-YRhVJ5ePSWc>WnGyz9G z$7`SzJV%rpe!d=`<4cgxd?X=Z#~)y!>Auxc`b@3e)9b3Gv@}-i24^VH`a5RF1-rq1 z1#Q9yO@DzyhXu6qUtrht3)NCCqPzL689`e@IkFTO1?GWfIuux3r*qdx$xL^xkt$=n zJbh1%lo{jK>7Q$)R2eT#m#mdiW&AYVzE&!be;Ysa?pp9PfztH(wNhn_N2ZI_NtrQD zneJOB70h^W`ocOXDaI+&_tZ%#GESX-uTE+vC=A*F$fL*! zqBs88IH3{?=x{d$MqLJ3fdkVYcS@Nu9-J=OC1pPS zN|%%%9|yMrJ1EgZ<@39wbQup$pW7v6&Uj?{)h?+bs6hr`gFq+7Kn#lM1{*Y~TgsgA z@bpvNQnHK(roZS0>lW()>-OuBn#Xu)`r95UWyS;31$(7DKnZqjuax-oqFyN_$;+3g z2{=j$v@k=@k_9CJ1@M6G+FmJ3ruUbpzvz`(%lK{jv_7dw#!J&b^hwDxewr@OFBQhP zZ90FbydL9@>2{&=8q@dnONlV0tNvW|yQ1&ke=o`Y0O|1?ReLi_+f=;~7N`6mjX!}~$U;b#dPo!&TE zN{;cs^aYcpBp8oP-#J-Ineq7ayOX6X;W`A&nH``y9I^$DPS>3R4qxvnQj(0vrl(Gk zQe`|oy?csOKI69Szo$rrgKAZmy0jz;Jf|pH!%jrx*L&JjUe`iU_ zGX9?~JzGka@zr$8*;4Wxjf@JQBd=diPna#G!`;9LDzaP@m<3)>pFUekUK8eUeg()m zBB1J)jaz|xxg%G$61%GcyQ8675ibi5)AVbzrE(d!O?R0i70ayRs62hi94QM(!UcJQ zZLX9pJs zkTi!9hXRwp%jpN_N;xntn$9#&N=N<`Sg{LK@d*Zy>VFIh>>%|H0pN2Z^eC*>e~gu#kIKmlCCg4(ehSpuK7%g&b)XJkAv-E@JJj`0&A1r}Wf zG0=(u7F`Bm(Bua=s7?rjhA$XFr|g4{xl>?roN;cNfC96?BHrm6n+1iZFJB;K%J^ye z-38!+Vc{(Wx9L?2rTig*AvPV9CzvlYWKG}sNJ?Qk`yweb#!u7j7D;I`KA)bkNXn59 z5cKwSb3MQ{j#!$bxeCJLY+ zX$1$Mgc2t>0Cg9G12Au~lz|A;U`D9H3<~T5Kc{b63=i56s4<{+K7->w2GGHP8cYEK zuc!ZAETsi$Ik5^Xny$Y@N{(s5=IQ=Rq?|=SZsGu4A;Rd`4!VerRp2H6^b2W{V$=65 zky2w^H2v`ssc@#B{L>9@3W!b*Tq-38x+!8QxVbicsZ=DR#q^I$r8>25G*1_BltC_% z9ryLO2{=CJZv(aG{xcRjTDVC&GJ$5@mM)VrW&Ykgefr&HQXY)oraNwvlbY_ZT#8c* z+`8jZU~yyyFSk`-5%|LoIzOHV`OHs7ZctbkFPG9{JTraXaw$m#kSD;m>_A$T(*(db z>VeH*0huxV`f@2NSokqP!w)pM@N>Gt8Y!{q)+@l?idrG1%J_78!wM-qrWaeMZ&)E^ zz_@7o!xd6tOxyUUf1D{TJl$s{xIV2~DP^eA06Lay0V8Os0Gs0!#w^DdTc-(t(lf~Q zN=zC|pl0#;l~R^Uux2V7Xqps!ACRM=0;>YM0+S;PXj!qq?de*pq&(!O@Iq2K8+clb z36#M>If+e!iARZ3VCwXVtE5sH=S}~zm8#e*04^}uK`YA~*Md)HheRr9 zeVYQCz|rX&R!iwJE}DLCwUiR$n(2&dq~sXEo>yjkIbD0J9LV2+TjfL;uS~CBBbC8) zRcN}x8h(lCjBBOjm<};aS6C}0&NyMZ#abzS#--CU)=D)pe%$_QtyDE5!mCh ze@>sjUdn}W<@6`(rGgl@O*h{lWhk%`w0RPAa204}DVyWt=@lEK3>kM$U$sFhoNXna z0=who>FgV&mN8zNzHXybIODbH@3u(EPFLO}70P&hdgUgmDxn)f;2H*Wuqb#JgTRgH zted3{^V}3t00)z!g21imS2jz9Gv1u8vqh?a@%Hq&TciRQZ*PCMMd})(%uxnz&`v`~ zM$mEtHb?NFpaK&pngpIsf3QtTf}m5=ijCZCN?2xix`ZIC*svT0UjNhhzJS-(Voq4B}E!(3n(*zvlrW+iR z@`Q;m-YI3v)cAFJ;C?Azn3(e}DO;w6U#IIFLWn)yC1uOD`|C6TM>Q1v&AX)}nC4xW zK6kg2x$@ri;1i6PK!*k~I{sS#<}rc}6k>F|cMg1n(DXHjq_n3i?UB-Ddh~UA;2tR{ z#=FxM4@(&`efc_l{vIh&P$O#F9w|GvzaY!lrWYQN5}z)(SIQ9Py0E=cnM^I;rXSo3 z)_Z5Klrht^Z_@?$Askz@Ps)~Q>9^?{k;H`dOW88*{D$JuP5Y%XnQnfYE_(oB)aP&0 za}OYlI(I-Shwa<9X#$R7(-jX$i80=t4z{uXJBqTm2c>M8CVfW%?z z%5iW`gMcHe!2RjD$D}m$F6^BqpunfV?Fil<%Pepol9CyvoEQ~_oEpI3!2kaY+=_yZ z7xqp+d`xO0$E7r78eU8jP~dQ6DN*9c)?{XI>;UsYOo7AGXC0SPkOZm7(qsmm z0}7G^Gldm|1oln8d|XQ1y5ZF{0g#xIpc8|@6wr-|py4tO#}1gdh!X>7UnoQlREP;X zf!6Rc2uzu7dP1s*al`Z#C#19)_f5ZYLTVlR10e-AU51C#Kb(@1*}n9olnW!{gYBGED$fXDf1muF&dW&QjnIcsSkdtW+E0!|7+wO3CxWRB$VC2t1np z^Q@FP_ajjFDu50ecs$+uoRkjtLy!RY%AWDd^xv1He3sk=FkYI@eoab^@$z)NYf@f}SEg59lggAlv|u{u%&!(^*oqfW z`ey`Hupsl99xs?~c3sMk@!Ry)>ry%#rx?Kf(p%GaUI!O*kFHB;a-4(-3g3`g%eZm- z!5dORjEvi+i`)VaYQNtiEjiu)mXs0Ok1Nvz1m;g~za`bh*g0M3wv;7f=X9UjQp${- z(+h4(Su<{%zUH>ndd8j8W9~@ZVLUS3@~)J$=qK=`iVS4MiUMS1)raYscctp58@RBu zOn-G(ikFFpYkHtChr;v(VGbTKMji;@;)2i+a=P_BDLGCa9v*fcb{-yfkb2=zE>>1H zHi$fwWME+6;$mT8Vd3FnVBi65Kw<$6N|t0fvSjgaf)aeA<)4;Vy?VwkjtMyJVDR`LuM8RMo!QbA&j7^ z9<(wEYMvuY7B^_{hmn^Zv|Je~3R;rS3A!0kXS&aQDJ3I_xpJ<&pmW!mO0qx)Pw6s% zmo73hFfoFcva#~8fX47yvJ^pA|4pB9Un+$0)AYCZrHmNYZdZ69HJ6d`$@F~>r7XBV zgYS_8Esc6Po$-;BFvllQ3St&`IbG(FR2Acl?b9Dgi7*N;I6qAQ)S+YwQDAVq!;s~; z;{5b|kEIkj4hn-S!zt6BKbF!Hn#!xd?8xArrNpDaroailmQnqQRItD)VbGawY@j3L z1?GWPmhE{Wr8Irb6Dc0XdDC}3k-E<~XZp;iQre7*rXPJOrOw$b08Yj~`KLGPiHJ<+ zd?qEw^a|996cL_o^Gqt2@yPU5&!i?ZUY&0DT*{Pj&h(P!QtFJirq6jUr78vO$vMp%mULvhny;~ zIlg4b2DgZqJg4iwkdiX|unjsO1v+N*1&lKTJn+N~I{e%50Zb62;>Nb=RWGC*82?P) z{X)u({Utx>l*~QT`(H|FOgDHbCB$v3z#{M;EP4bYTKZCopH&k)TG{zh$_h06`1_@l z_Vf=gr8pTEPyh8&Y6sJb^V7GylFDRSaDKY{Ybnq%&o-~6(ixvlU-DY&IY_qmjZ`2= z_Q4w|QRzjH{wc^gA3(QyJHBAfa{L22QhM=pfwxl4p!*-EzLm0JYW^|(@>?kvNOM|j zy52h}AI3S;tKUgwa^B=uf}8_5J^#Iw%XGu{QYKK(ESb*#LCRY0yxW{#U8lj4P&}{wih9IA=QdHz_T~i_^`%NtrS( znx6AbN`~_?L$)J>0>9&>=@Y+6*)gu1e&L&xC&zR~(BOx_%jt^WrPN(7fsVCe2i=Cj z1U~!}Gvdqluz?lHh`zkm#AInHDRjr|BHaVUTW5PH8$sWP6QzWKY9 zE#sx>AHPevFl%bq1b!)OlKU}u4?=Mz{o{q0YwL_yG)1?Y@d$oL1N3*MQqGLerpy19l4V>m-S)RsCF8H@+kZ=GGk%)>__vf5 z3P2&}pvvj3P?xybPfJh=3v&xC5iWslY0*V*01QQYzdazkqu23<8U$EB=$R=2!|k zNsmil<@A()QpQ$G8m5D%BH*LR{El1Eg)|(Gf`k@|a=&F_WQ3gZsK_*(@t3^V^ymMi z3Y0$Vg=AeO0|f@qxtBj+d{78EWIHzOn_l@}>X6zte(0DWqy}da*vZeWzz92QQpNG( zAJ8b<)I@1{wi8>Y2?%_e+9vJA_-X1zX-CGU>B5ZCij2pnn=(o(_}u_W&k%ra!2yja zXfO#VFoH%wm;@HHgDN#f*fLDG*CZWR-2~N?q7WMyKm%W()oIhFFJ_dU1WLk8(n`=0 zB9=*7nQ`&-MkZ+msRjYi-B#d(j#(Ah1R8{)bvlc~^cZGo#p!pLq&XNTO@GQHt;z<@ z{a2?8FiTr-7$|T!o&lZi7QifR%6Mja8?$t{4%DzUOj)42RuP7Qh9lS=!K)t>m>thB zD6j}TozBT39mx1=dNhmlH>Ul6rU$c1gPJT;Sf%}$7G0nIlvNrujyjo5dI~E)I5+69 zOB?Zl#%(m19F!y-4>Jh7o}R-lU9JEd5LA)@b=VX*9Ch5mYmpfgcol>MW(rJyz%4Dw z$;_?5s30itoPYX4UTL-I-W<~IOw;~OpTZ#xD!R^cOY11Hf^r0S0uww(1a3qNf+|`i z&>{XxVgie%t8+@bK_;6xKqHOp`iwEtCvr)vPhZ3-?ZCG4?=%5N+37Plq#YQ)O$V9D z#mcQ9tstQwCeSiHo=aMr@zwNhF6l_fn24ajqUmqAr2Con&75A#E$t7{vr=;#zY?D# zXtOjg=xp`|?kwFkMKxaG47oH zpGP`ENZauZgMxrGs6yZet7il`Q*nA1ue2!R;^_-`rOg-{reEikRuTpu6FLKYSJ`O> z1xavN^YBR*GB!-F7h~0%*_m2G%TrSJQX! zN%slaIoc`+C~<%+{SUVE$n=GR(xubS@=FT~Xgg{ufTd1?rOtq)%J>#AXMqa56Rb*N z0%!QA{}zz0oc>lo8Z=|}OF+7s@!0e-L1`7nSJP(;O1JU+138afgXspV!0YL{LegzK z?-(54GC<7zJ$=8BbQ2TIls&@IG2#uN6Hq^}W-0M2@WHBSet}uj^+lwggQ^_~QRz9fS8br{c0KQ1nn9D(V9($c~(7l6#<-oTlq#H7HepeWEVeS?g&7vrMozh$J&U}|>D zN^5aJtUSV{Bs_hgqO=}NZn>N^ALFFy+vKD(n2w&Ct{^Y%!u^dyNfex{U3sTFDog9a z)T${+oADVs8Ypl`3oI1?rPedk8D*snVDj&zrP-%zD@v=$-aj`@K$n35lw_`eYswpp zZiR}hpnU>&&rQ!$lvZJb7A`XsrOhGw7js+!HA_L2PPGzPaI=#1ZirmR^zX{j3F^Ou zlsLf-gOpRC*=I*^1_CYL)n#B%;s>Salhf-|q$MR6v%?gFGUo=yEF~^b6$%O>6=_uw zaBe!l3=w5_T*8nhv0vwc(;$aEMIP2*RrqTk_FKI{%@qo$*erY8^P^mTj zgNAe*;~7wxMKCR9pB|to?LYm8rnDeq!*nh!X&1o>^^X1ZAgiIC{XIQiOS%u#Dt)gd ztx*avf=Yjw=ibJm6BV zPe*#HOkcfY4~n_qJoSfNiIc@)`a@&s@agh;(n8Z$=t_$)&Y8YfS9&7I&Hz1WLx^RG z(_^ir1*U)2lh(#ztud-N!1cxk)-1EOsx;8ze7 z_&c53TzUb-sk0ctzL+%qriC;&W5e{P7Sehk9|hP;Z+3rt8^DD=(2W8wr)RiH>oZ=RKEqAgp6TAX z={Mb^)s60)nh3YM z2rFQ=ll@_x+AgsU+I=*5DC_Gurn6`juCNn0GLIrU6GG7pZ z-k}I~gaW(c2H`9L@D!tXJrlPAEJi>f0nx#<1T{u(fDYhuT!HGb1;Xac7ev4@0`l52 zkk_P9yvCux0}CzCxrV5r1@U1n7DGWU2RYI42*^k#6eHO|S8-D0WvT7H{?crW)1w2V zx#~+wq?xl45|da06{N(HJv@U_BH2S+L&IblOWcBU^YRP4ynPhqm4qFfxVU+kQmxEH zOhiqcT^PizEgj7*IMdQO!Xtd+?A&uoWc-9O`S|5{mHp!bG9;s-1yoek)HO7U7;eX>rupWrD^o z=#UanEE9}fod!lwzfmI#bp53QlOq#&23d(o;4ZlGHc(;(9}dn6S`pAN{Xn3!nFeUL zIVib6vLmwsJLpU_unXBBX$l-O?Z2nX1xbtR?EyK1$&3k<_&_lORtRw(J2*?chzk@sAbn>AUeH~9YyvC5os1e3mD9flODo8OyIZUp zObpN-R|}&O57?%g2p&N zD`MC+nJ0jk#@9_>9U?934_*ku1~q>UBl2C7AVV~mW`NGNVFzzQ2CXXia2B+z3Dk-{ z3J$y_sP;~m43(Cvzrz6TjIUtK5_ko*6mmUa9Vlsnt|OG9eu>ex1!L)-B z9190PyA>3e9MP>lf?_pD3n+LGFq%!@87eJOe;4A26A(wR>oZ=#Q15sHY&0`S-v#6# zg6z(z4f-q;s0h?&oFWVwHwE>Qm_c(JTp)8M2rIIJ>ci=N zVbWst8=%^l6<8FwK!>aeybuCaZ5xEahZ2G2J{_7Bg%kuq_Yf;`fNq-t7uBFX9Oz;W zqBMcZb5IsVXwqPE0b5p&-!@QMcVx3-02Qu|%vKEGe8mdQ^9!I(hq#9gw0;B<ylD^nAnJq&H-W2 zE?EJ1^$)HLK?`L;jRj`V>L|$J8Q{Y51h{H~1TBl>&SMP%jvN9n!P(%9h!O{HB`6+F zh=Aj*0pbhrI5X%>Fo8e(^`Jg|3&zjQ$VE}n;FvtP+bf5x}zF65rd8} zhK!pm0coj%)atN|h3au($mO)42o;7-Y&$e73MvRgW4DSMl;@|%MMz6SY5)#!vk2@d zQ2qXF!SwzJY4LhceBi9#Ilw`3K}3PW5i~f6QrdEWXUQC2fEFNvM(m^&xp>M!wLL`B z0nh|EcvTFz{h`3<*ak}Vkd9xAuo4$-U^V}2ZTWvV6gC1fKG`5 z51LF6R$>x>tqcM;%jzM`6-PtJ3U=@)!xCX7F3`F*+~z?vDX7WIPzpEf(qL;B3a;La6nZYJE$2A)yaz^%6Kajn0P?($PFF}ogh42H(FZW z9h4)$1%acX0uQv4#^VU;q=ARs;ZX$g1H=Xa*a~6>C2+OJ0$M>leOk1%S3M}mSp_~b zLW7*QT!900cOqy$6%xHnkfWK|HJA-H1nNjIg1o~48MP}0WpW0{Drry~cpLo4FNln&5_pXsnj+ z5K&+UUBYk$MDu`bVaPUPx*!5#@hE}Tvw{R$>MXJ;LjGyv_ivGFMDOMavrD{1I|FN*_c0j?Je9!_6 zCO42BprHm(TMJQyLIWIJXyk*;gVvxE>QR~>pk^~<>>8Zc@*&NS3BqQe&NsUPn;8?R zKMoo-P+)X?0=hdBTpxlO(V+G#sBKdKDP3Xhj|(F3h8NZ*WxXN?PXVX_0MBhFKvg#= z&4Eh46Czo#89Y$qCJ*X#(2f^SL4ndi0XrG2VuG+EQ;8yv0-GW?=zukFkbo)(P&o>2 z)FZkq^^D+FQvs;!E1|#xu2U8WgNi9u&`DIF>x3Dz6}c67Ks+AsG3(GO)=^LaQTc&V z5N|$dUI)inJ-95!5}i53MrRH*OhElXP?#Wlk_8&?;GoID7Mq}^3@BSLIzGi3ow+3Y zDVONj%vNC6s0YMjKy*w6`|z&939A^_hx&m*uG zwAT`p=CKWBAlE8rPCN}7%}_*)Y@v>3@PP(YR20BtUEmQFh@)|hs3?J53fdLQslW%F z8h(P(g|3{wF-rKZAHE&}+HG$D@BXiw-jObC zgf-x&O`Cop9X;Sl9<)lS<5u7TT~`1q)jL2A0qw8h(qIC$P9XCvJxrJb1RYG^ehT

Dw};bwnXe&nqbIubBQlQ(9RRGN*qB zO-esYTE^uATEsm;v7rp?lm|?pwkfDKgtSRPL$jb-8MHtLRE$nrD~>FfHWy|}J3STTTx@Dy057cP<(mSzNPt^l2<25MY_)`x-$Yf!B$uycBR zwzNS#WGWk)E?$7sMJXt|d;o_fd}IXFx&DIUA&@E!rVo&~`TMtquWo!PqsJCV<*2u;I5E%%EX~5|E=nD>*p z6QVj|T*8c;Bub`F&XLwhSPmY5+kz?uGGzxdc!UGGLL&5Z^dC~@qN2VXklUC*z0gZ`)QYt8iUl75XG7CT{Q+ax3zOoLhz8(^oeoO;;f))3~(PoWBS{CX)aNC2Y~@JthNAbbN+Om0%;|ce2}*`KpY54 zOe;WppkQfb16XC=^dOMRJdnx*2$egKRUQDV%$+_1q!Lv7f)+%;+FvJ-RbBwA%$a_# zKw610XF6k{v1XAp1beSfQ!-1+pn0z$&w*XBA2-)n|cJHh_*W zQUKSIKaf>6u!6!l6QraAq{Pu!fkogQcpR*U6*=O-NWg9{?%lNdl=ofw13k322uZsAzCphfsS0tTqv(7JLR3Brp&SeaH0(we=UkiW5MJ zZy*^ATWG@IxDlcF23T!8NGkfR(y~lrBJ5dInYL0(SFyCI%ii zklGdKYR{soT>&=M6{K_ny3%u~N;iO&x`34KKv#MJRq2j;uv%x3+5-@^jt5{x6trV} zgdMpW0x1PG0U(Xl6X3a*1MG@SATdz0-~@Y?BX}T{6+Dm%p6LY7Q#(UOWgz37C)mNG zjI56JpcxJnMeLx#;w$W+eLNsD!GnwJ3ha(IK+e-(0*#tqV9yfR0dBJ2!El@74@N77 z7a$Gl){IX;#(-A$vYRnI0I?Z$85$U^8K1F(jOI~db(}skPMV+f0Xt~;HF27>M*Rnn zH3}%hmXKl>98gZ672hlZ&-tMzmV#Xlo^y5r1>p~{W>EW!1wPZs;J5{mJb!>qafF)! zIqw;43V6=f5z>nTogw46l~I930J2)I1+x4WIx*}BvI?~P4YXzAEO_NRv`X#Z0HuBh zko*LY8L&0ypaKpWn^RDX0x1PGt0r)mG0ovnU{|Pzk1@>vO$#jmnc}Fb!0tGMBTE3% zE`_cIhB^jhg5wI1GDzcQ0jReJYcQ<K%2+B0T$o@-9iUC!-ySxW1l(m1(1tgaDWy)T>x9i;&=!&kI4<4$9%(q+}pJS z_x4_JfP1?i!0JJ#MxlnrmwFV>fb@Vu;{%5oQv;_0cy#d(N~rt*xuJs-bl8U?Xhi}? zmcUy`DAm}40&WH)=k&k^X_4vj_0rtDGr$ryAc+M~ zaqoI*Zj%LIachwH3b2{r6b~slFEa{!f)=nVz{;#Z$~JIL@2Z!U;@toivIGh3;GDj( zURugz2Uy4gBy<4Xuxkjz4uF-JgOr`%oc?zsV0!h~q{L)~+S3zXA*6&Mu2 z*%+i&gXsnrIIBHC(*qmteS+cwkQR_19)NCk19^dM`b8dTVGnl4A0YjX(hBU3FSuZt z>ko!rNAQ*!i0gkKyS{-N6zA&GCp1edN`X7VOsEr8ucl2GaMZ#xPX$?)#jd~vp2oW% z0y zrZXT{vzalS09kSYq`~nPWMuyYH+Z!ZXrczi#UMKzZ-7)nTzmo9#W%n%Rs|XM0IY<` z@d{cS>IvM%-~_5V{bLizEzk}HEJ?mWu@htVYgO(H>U^U7hB@=k2FKCk%tDgWC z0xbn_oPnZC6s~IqSP5vB-Ejem5;3@v1z;r#AcI#xm4MqPkRjGJJjlruq)LNn1rK@*Ih=Gk=W^z z;-tlp#<}VbfZc)FtULe?40%vsoB%698G}88;xUja4W<)3;5>T)q8cMCuJ9nQL4;_z zz*7$nz#C9KFQBOt78rMUz?UmAC~$+;9fI_LmLGfoHMNhO&>>M_bsKR;DEit8RM=-I%7;21&Q0k#!0=sS2pnLrK{^b^2JP=bC6FLF8rsnTGY zzzYug84%SNK|hBVIp{%JK*3nQfmeYYv@mZCFC-in@Pcyn3|?@Q?0}dBTFTA~4o6s= z?7?FH4qkAYJ^;}J4@OwfAK^t#(;)jbm=5qxpVltTPt4HmbdN4+{^@)Aq;0tF!-AB! zc?FI76JRHy4I!TZ2e2$CfG>dMQ3Ci1iU+}}*sK^{fC7ufis1<;u-G-2F7PU`nK9h} ziGF~Z!R~ki5>&A4|An_6IpASNb?_;$JF-|YwD3W~{RcSQKS08L0v|Xjf||?_!(ib) zg%3F|z|2|!GK<-YVF}c%8GN8PoWKW;j}3g@FvDi>fg=$XBU|{8V+3TF2Ga&UP+lN5 zl|$xxrswoXdviTNjTMZ9z60zh%!Iy!50ublK=E(@tc1z&53~e;H4cvOA*XhbQVpg9 zeBcy%0;&bQLOp}7C6---=>#N1E}-dw1@0A;@Q~pKg+dA_I5e0p@R5}Hr?2gk=AV9d zsq>|l0=`NcoC#ZgXsYzR9-+-!+ZI# zCes@%T3+x4g44+ds2*5I!b0T>N~l1r`9MZ0VU&i{MGx_&6U}i8WX#~%P`cYF+S{0_L|k6`F_Ji(u(4VrgCosK*Kc0Ks? zj0+&6;0qq7AMBSFt_MkIFkRpW=f@im1@O3mIrR?Idl272wA|naC%gw}T3`w92^K96 z_(A#6@dc6=2T1)6>H57v@e{t}$SgBhAL1VFh)3@J1hfK_54X5d0KEXw0?KPF0+7DY z6%?<5RD(*N`U?VPOm{$sbAnn%pfUFcAgzwF;H?ff1Ykwy6AaysFF*<*J=g~VNPX@X zV3&)4T>b&91Tz7Afx8?Wn;^9sOdlXl{{huA0XiECs|^33xB#RDL8@t6>E}gPAg*W!-|X;e#oHNZCe-2V|1t43IkXYG8&SC_xKPKQ~ue1~S@$ zwt#Eu^yvbQig-p_AWOC&TiihVBysP+6ozIS(5S-#uwyVoZHXXq%M`TqU4voiorM4!pg#c)FClPvVO<)wefqR{(r8Qa&=*h>+|F1Jxgf-HW=ww&p8f&ybb}BmTj+o~EO1Y^ z2q9$)9*}Ow4v;dir$w0>gpjg$hY%=d2!LZ6YtHxs^0_iKat4P0w*sdwLkHvZ!a34n zSS?#HW4eH&IyEeF7XX!76F}Z})P*mdnSv5BU?np^O28q*GyyqaW`F~R9~>}Pv%rZN zSXM`po(K3r?py$N1ZKUk1jW4|rJ$U@K*)?~jS%uo=n5fFP6rv{XrRFExIze){%4C3JpLJB<4OHbG}nRf_HpSMt25vyn4&6xgg5#<@459HYcU@7Ke> zM}%+@#|!fD1F$PFef$K)!62m|A3qQ>V|s()W6(}d#}6Pw9F0H|4saiT!7#w_2S^#j z#~*|g>JdKv0roKuHxp#tyb>$u0ti0HISsBX4vwp?uDV*KC;+8-AT((IE1#nbXz!+? zfC8TelY}CV0-vJ-NPt_B%@NYpX0T$|E~3Dq2)?|D*#R^>2f_**3iUkTgFZSKv-y}A z7#W$l6+nBNm`k!0xj{xlI7*yZN}wY}K}SolDS+;lXW<1c!xhL<;8Osd5e(Vq4qmG5 zxC1o1p}{1e#0d&35hY%MCqfGBx(xe7K-$@L8TP^$dqf-=>)k;nL5?;BISI74SC?Uj zh!Q{Oj8i5iMh0#Len)9HUM2;8$FB@-yvz#xj;9#hLAMk@j?-pW;Dl_iQs4zIIcEjk zodI4u`&~$pn}?HIfyY?lhPt2!oc9fEs?nSpq-#mAJvP^B_6Ul8_~!&AsrZBGMik zeFZK@`T9c85UOyNzz^{3_Z1N(ZeGydDu^GzGav*vCxD&K?g-l1$fy8yGO>G_>%sef zL3^3No2L{Q6<8p_^#~MZzmGKtD1efU600;Q85JtANjq{layjxTuu3biNkbP5oB_ES zyou|C2zW;|Xp0ZLn*EZZh7bCVPxZm>s6em!0Ilr;9f0!#G`YnBIYd=~1$^Qq_w@bCq{SIe zOuxQNI)L%hbdBZGXX~$Agr9ZF;CSXD^tet2er^Q@M~*DV2guSlFr?ogOFzMo{(&t0 z0Ykdw(ll)W4JHQA1}g@~KN#{8E+M(C)za>4-#8Q*c{rvEtd!e*E_OMQWutwUH>D;5~8`r_?W&znPKD~Oav;$N7B@Gt_yU{8^|R>u)}?rrt__nR$$uwc>044FgG!Q z><^RN3(~BFIpz<1gX$ffa z2B_e<498;Xbj7|83ZSzgroZ1I ztycefXS0C7B`!rqQ0>92z-q=Mpa4DCo@3<8aup!4e$9GMFpLEASS85KcetxS$< zmNtR(flh{F0=d%>%^+qCCJ8ep4FzV<8Fvf<&zM2g8#}12z@or}P%H~N?vW96?gF!; z0mw6q-2W_@IO|#Jlo=c!OntG%BjfJrQTwGkuqpepUpj(u|8(C2(pnN*1wi-69AH&q0k0fn zv1XLu;o(-`p5A{zT91)y`o06w5{%r_uOEDC`*aO4Ad86Q(!S;ngO~kVTMqaql7?}0?0WNgtC-)1a@$O&cHTfx&e~D zA(Z7PFHof*r~s01WK?9S2c77w$$UacNeEJFgn>?WRuC{}?f_{3b+_0YL49JOF5#$>NGo}k52ML%nBRPx}>M&L$hxJTt5O9D%nF+J+Av>F?az$O+YuIX)urE~O_ za)Mm_1tqEl!AbE0t0Id6n`4h1#^esksnppz{i%P1iYKz4yI zYyuG1oSDAwh_pBxw+0i)nYWKfD~WJvFtI4|g8aq?N<^Uj11!@8k4mfSaDhBFflY}= zk%QR*bbt=%R5~_C(Bgh}P%2;r-9QMr45wv!%28=&DK3FM;PkzN4LN-agVOiGqtZHz zJEy-pDjlAnz^1^Z$;4vL{DBSB*H8e3ni!gt!&hOqm+k!HFMvu{gx5(>EQHcC4SmuEgQUT%yS3$N&m%4W<9Gqh22h+SaDvXF2A!*P#JLoTo{8!It8I6*9CaVbwUW7WEsquet>M{H)lq&ofT5}fT~cq?MtUL2so<2 z3L$Q|OLmGuOD=GM@%i*PND?WaOTH?6kBF!BduBwTI~or^^4n#X$FX9F=ILax;Eeh%p}m&U0k3XGJ`8i08%wS;6hHd+#v6O zR@<_H*0?IMXfiW^idqIxG6ZY9!Ih=N1rim2SC}Wb%$ZkkDey0^R|J*Z0?R>7AJE_m zD;paFq{ptr0x8K^94|6J3sqiF24F!g(O-ayY>p?mK`8+g+`J0x0w36*WgoW^iwdHx#>)+>O|Eb&a)JEC z;8+hjm4tix{&UjeY!|o{ITX02-#I6(RS#NG%H{}K)ykm&E81RgBPTCjXif3}B z#}DA};04{m0F4@?u;4%{@lgY00uOow@nmT-*E5(S>XrtO2t)uBB0oSDf*PJ|pvgSP z1zcGIzd5qX(2?4p?L(M}>jJvlPas?hiOd5C0g%&ofR|lzb1QIyDseW@CEprMd$$AeZif(8xhnHAU^Z}4O(usE`SZ=i5Q4YeQWHh(}^1hM%ALI7m*1DMThu;q{n z%#IUwg033J=C^n@#~Hk65i@}oE$4I~1VC0afY;q|f$od~x8**7N`N1{po9rpLkBu` z7?v;_rYByM7Ek}f3pxo#gXssa5-+r!#seu*I`}{bX@EKr;9M?X#c+TRRMTlNb?})n zP2f{ta|A8+0+j$0_+XXx6+ZCg1Pls%pq>dgXk3)dl<5NB^v0voQm&x2SZt0jctJNJ z@Ic!}5BQK%G$%AggI8xMusOcq1LbfoM2h~vH{I@%v=~}kbnv4ER0BU+K>gr@3xERZ z1K;$im!$RUxnSvb0;uCLgCFE&P6bH8rol9WAJR7AhqO&5@MkH&lQX{(=p1GS1x}Dh zIbg||-<*jB)Fw4!+5j?8z>47khz6}`0R_|sepo<#;RoFXI^FrQv`jsCWs3qgsM)}x z!0lKM=}o~_w}6r%$YJc@wi;?0oB%&^5!xv10AH7foN*BXAbU3HL$3oVh_z!5o}ae}HW15CkPvPy%6d{2`Dfun$}_O%O!03EZCR zm_Fr-v??!TF#xEi!sa+*`mrn0YEh_Rv_TLpZLENFP!WaO0)zl4j36rs*r6r0;||CK zE;z4Fce^UBhE|j zzw3@5;vf@=>94O!^NMkU2UkGmfj9PmTH+c^d#3YUlNMLGA*jHT1?u~;f`(?B8LL20)r+KxDB*{QGwNQ17nr~lfZAb>6zE1n^^dlJD#8Z z@w&7IV~u_WBc?8H>BNJJ2>_#@lVgYB^@%|;HI=9W9Rgs zo6@ToTc`iMDgA`8Y5JpE(m{;P(@k$nYcXz{o^V?_fbqcejkl#07!OXrbX(e15qy3a zs|J&TA}448hKeGuBLlc!$F9KZ#>=k2IbG+Dv@$DbhO2&h&>d+*BSt1xa7zSq^&N{6 z8>kh+lFi4&$iND2JhLcpWr1dT*g)6wF@f3z9MgB+kv6p5CIHdMQ+0}2mNPnnHd zfms36QcwWj0dB>>z^%XmIz5X+fydv631Is@^mcN0J=jK-65ZEuE2)b{L z-Hd7a%m>m+jPJMac_5w2%-A$t>4~&Az+zatOuP2!VbE^58PH}2Hl{*4r(9+uz}V80NZeqO%Yn}z5orHfto;|gFe{67pH)tj(2%hm;&E&1y+F_;No%t zD`Fix2RPw5g3k9~bA)IE-Sff(9Tr)Gs+3iM1=KZP!3wH@*c}C?b3T`rt$zTrlF^D` z56G#UW=uOkG-yQvL%bjTX?@54#yt z18CSs+=^iXh(5rUr2uk50~@TCIx=1Mg|ti%8y7 zLD@jk7eLaWlNXo-4zNjqhRDECbps^)fGtZ2bT8|T=^OcEgzE)%2!QfFI1Pcii0o!e zFF?vZuz}9rWpe}_oxmipj|~*8AJ`Px6+m;XkR->YzzAyWgKb0_y8@kwz$CDT4b*0C z0NoD6pui#xO6P2jpphyj1rAX5W7B7xz^=qB1@2ieTQhQiM$bVv6G2ol3QU`B{!&_= zv1xkvOX&hf(8&c%3LFZ&jtmN%*^c&wYn6E06nNVOjzCfjJL2GQ@FCU^lR&x{1*U=S z{g@v8Qo6L>sUEJsqgmi1c$j7bie4sAg#(HK@Sz4Ey^Np|krkX5c7PfXptAz~KlwFr7<*TVUUG`M1)4nHq(s%e|AXW!$rU&O2#eM#crx zU%i($Wt=o!;e)h^-d<2i4T^4XR*_Huoj||@>3BJ=fR$U&DTEE%r#F3&)@0l}ea#1H zW&S;gGLF%4%l7H_K1kOnZx-d|1`X3OI`Rl?7Ukv#vq8OMM^=GFqTDS^j7-!2PZN-t zKJ%lr6XV|LcRor>G9H-z?W1%&Sxlm-qzyKf#uhO4EjRZxW>9#+lH5qxPD}0w$W#pOe_FdYAk!O1M zcWG%c9tDQAZAzfg3MR+;;=;8~{~4QGl%~IbB(15;QpPbF%`^bdDd=N`jrBVM~rIV@3v04@#4nVfw?L(wfYS3Ov)(e@H8Gf-F(u zX;a{xKJkaN95b&1&-6_{q#YS~rhoY%t)|bbz_3e&;_ka`x}g9lyP4K8s&@;v6u zH$Z(T&^c|ON=1=JffLlNQ{ZN)SKwd;9frxkz1E!h1xV2gW>5#@1T&}_0Ui3!0G{$u zV9gRZAfUtnIkN=3aEJxeR0Cbas>rUu=*XC@$f&@ez_UC{k%5N+(*0&PWBLFx@&m{) zfrCgD$q!})HfTev0bK2~Kq?Dxi%#GZsAoNa1>_D;;||mwm;#!b0oAniW=u0cI#z&M z2A}~J7SO1tx(%yr9B=2Ix8! z@I2)NR&(YBpk~YhR#4D`j6v}hc)+n9)R00ng4y&LH?S&k@PaEmR^-++%*jmL3M`EFTI4-@&{Oz*db~lPCWr~1?c!P$aE0skoXngn&<{=j1mVgxU^-> zQh@XZK$RIQs6)V}$^3v-kq0!p4w_)lWq891S^vfWu7N>gxvXYPD?lN_W6rz))JOr% z#DNky=r&GHfkRVkW#Sllrrwb;V&s|5&nRQ4!{f-Iz>vMRLy?Es0dx;Gc;1UifjLWo z$A$saMqmYgrR1ECg>Ry zLG47x4cm_~$#gLD?2u;SfLvnrfo*yen@lj{{^{%3WPIdzfa)0|a6JQV_M>$lrc1KR zSm@*GD@Z{59B6%o=?&~M(8@+@`Zac$)6?^BNe6SVu61H>?oi~P{)a=xK^R(ULkesK zMok6=rRnvzq{XMtyD6<}+M>Yf$X=+#xK>H9LjkH3RFyyrcqM)(#x?~;P(j|-q9_2m z+pa^2fBFed85^)&0x;bI9SUqOB5UJU5L6K8P!gDao=YZyv1z&iw@eP> z;pua^Wu7x0oZiYK(+cjSaDoa?$O%rMwPKv$V)6wG_%K{W(1tcp!biySgL@|Ips5%I zaN!A30_vu)L;8cD^TQM%w{k0h@{Pb@@L`-2SP?hfaPWXOJ2}o^tp}Z4futAI2VvJ| zoP(;C6|y6L2DlHx4eA+lL%YNZ9H4TRLjg25lBLMS!^F+N4O;poZ^du{bO{Ng6~h`( zQsy;dS^=W@%$SydXi&jv#1U0NDcW<$y{wY?Wt-}I5f}q(QaNz*fJpr^s0(43qyCdjaJ#gW`uFp7Q`bRz)MUe#{*%jcv2RnEM zok?KdbTxh%BgR?NBl%_Y*!i6}+FBI(r}y#8=rQt7-^(u}&B6~7xWg}_tjX`lp}>;8 z7Ni2wd}UPNcVtlDhI9q^+Z6agT>&0p?&-1oGFtcz6TxQK^m_s_GUgjVt^^&j$0Tr) z4V0P`*ud#)2S^;$H)D4MovO#b9MSS;*JnI2-B3_Qxc&f}5*wtk&+f=*#c&6t2h`RF zO)-GZduKCax&mUenK4}e$$>g(kkX4Av|d-2VHO*xtq(qy>;{_|(-V+l&^{72Go}Y% zwl2dRHb`5aO#xJTf#Tx<8>pSnBgkD35fW(O0L^4GDllg$F>5k_R(nBmu0RKP(Gn>8 zVa<06jG!AI6+o+&xD6Q?oERJ#6d1D||1%T{OcengZpxqliVp?QD1jz3gMvT{w<2gK z7r#0422hIRWU*p606KqKV7j-EjDi9`#6DvN2Go2m0J=9AR6KMG$w*7`w{U~QS-_lm z0>~6*D~1IiL-?of7m|@@mz9+9;@%F>C!T;j9aF! zm6pk7+%jEKMn;+O+H_kPnUg3y&FLYsGK(0uO#dz`qs4e_yONyDY(}OnSEuinm&su~ zH(f_T#(?qM^jHNMd&cwA=PJmE%bXBUU;$m|9$>~4p}^w!fB`g@bAthNVa%-Q7ZhZq zB=)mGR1}yoRVc7HHh}MwVihEVhpR(j_^N5+9ptm6f(D}~%? z#R9rkOW+!c%nHUV#~pK~?^l!ojd|Txl+hRc&aMPG5s#NifyEJ|;AC^hba^EidB$_o z?UiICMG!{7RBW9$Jx57Kn`!!)=`)pN6mTo-J2m~el8n3vrX_1mOqWuYQ2}XHmXQ=i z*ue-|xCe5^-xJewm1X1^FHY}OmT6)XZ zy1BZHD$|UO+vC(_*q9hkPEXa8(PBJ5eS)UUOs3hhr)z7;7_v@TJWar9dZLz$EYprT z(;Kv8beSgePrslgBgU$c<#=u0_ETCizKo3br%UL_l=4F7_CRY&1XfRS6lGF*NEXUJxrW@+XXrNd( zy-ZJrk7>fr>1}#4iA?9_On<3|*NiKeW{5)k)Hc0ZUq%jO)NFm3B&G>lr~lWNk!9?h zE^8no$uxcIbPEF+dA8YGI|UpWryJ~%5u09YAXCY-YyI?h1~L+iPo}dQ%A_%#pPpwZ z?-|N?NSqf?VuyGM)RJ|40rIy3lfa4Tc1AKXg0ny?GC&y@+`Z)x zm^D4yNG6E!)bxEuGQN!Gr}G%g$TMD;u463Y$arqLq6s8%!3+fTp&*HCwy}&JW7qUk zAmtyYzcQ9F!>#=K-06NMGK!4nrWc@^2O6lF!IxdS1Cm)MAmMP$Qbv*S@yl94CHgg+;yH(JTaF8>_17Hn@o z)2KJ5+uA}>F0?8ErH&sAS&ny(P2XT6qsz4Q*!250GO8jU;gtGk0;yVpnFUcH&29mS(di&%jMt`rww2Lk`g3BspoNU^bT3;O z9!Qm9I=$RZCaC^|fC4LMs*MA*v>UWDK>>V;G@HOIE{KGL61y3b0+WLR17sJM0;{6} zsFL2z2il)i597XPb~?Zi#>)sYOMz8^o!b#wIVrF@T7W7ZCIuFO%L1UODHkOc&}^5Z zMHUa!bPszO5ynf?W9?;R8P83xwwEykB`13sXU6l>KibRaF`k;P;2>kmcwu^+gNzl( z_c~585}*o`1zPMVusVWr^~5tB(>okxzySnO!gP&iy0)W?+;l}p84H*`P^h8l+d6xC zyQ7REOoh}n9wjstU*>cQI5Gr*pc<*h_W`gTmGU zblfd?7>EPhx^rJj2z?5=?yM2QcP=4OrPT-qsa94#Pt0xGPaE8r~h@4krJK- zJ|?2tXBp;ukco z0ZJO6_4$mTHU10&=cf0#%GfZTn|{(&#+~u}bU`;6C57`mj*N<+X-058I-A>(5xl{S z6?WhmcxrmJn~Xc-h3SXgWLy}}P3Le2HzRf2Wu!rV_jH%hW;%3ydYQY7 zHmdCOhwd^ejQ6IqdB}u;RK$76=rEp}Zcrv8I(>nM3?Jh)Fq?xF)M{}2dt&;=domK! z=X%HpGF=py9#AhMI-SQ;rh@VO^j1$91;*XemwC#lnn3)?qQRtq77(DFM+(f2h_GOR z_@7na-1PsRGWMY6nwN|>^Xxn8F4mH?%y$Ix`DS$ zKGW&b(-(TnXfd9fey~JFoC_>9uT#L0XZi&X8IkF?ePnp28~DnwAuG6Yxi zm@*XDK`BHD)GGot88|`34Litk&>=jaI|a{AulJXcW}G#Bwm-PR@XjAx=;{W5TM7k% zkn$FCpBZ@b&H_kFVNQUI7Sr5o(@zA*$TLm3HvMIQj6P%6bjd&&dB%^^Ed#+V2OQc# z4TC^%%V9sN`3#^SmDqS0rcZE_k)OUUR7M=spJGEPT@^sp z@(qS8q#P9q%w?DS;-FKusZ5&|UWeAE!Ho$(WI3^!2&Z zmxakFN~5+CW`HvsE2w%FxHkP!7_@c^2ba8t;WF`zJ<}(K%gBNXUeHb^aKXzWaB}+I za2XY*_P^5~hs!KwJU2ZsTED)0gQc?(c zM$k%=3k+FGObUz&pcG{vBjay;2sA|tniORP-Dt}R~33`>%r#0oCi zP#rg&BTgn46nf=xGWOFS9utxj_`;>Y1iBgvbX$`O1NYDA+v8+(8Bb1s8YiQf1e&8` z6?n(2$m(dX$fUpw9xzd0mR1C9asv&YC^0+Q7g{lhg0|>^W?3EW3mxs3M_^U zqKe!gN(?j?pup((ff2OC4AkXzd;?}q=i4G;FugloMp}{8Re{Oz|NsC08NG`@!ON}y z+B(Wy;>e`HJ~sE5EV&@uf- zf=mPB+Ub>vGOmo*rteRbSpnf$Oka>B!wV5on|>upriF3s^vGnes`<$>3mH46JEq8p zNj&CNVAW;daRqHh1qCa(amgUCc6xq_j3(pS=`&Mgo-nSRUY9DP$9Qe~+Ef{RMg*59 zUB(c^wabuEWxTdMB}2x9Nq*skP65Y%{ha~=i$%GG!87KLb6PtE9DlZe#HMf0lJRH! zKb<{WMwh99W4c+kjDmRs2W;ypBj~D1jx2%y?4VtROpXd!DhvvY+~N>(7#+_aZxwK4 zbUb~$RX||F^xkY41E$`S+xKV790Ls;wB^b;G47szGFL`Rbq|~4h3m}%3QYQp3mBCc z96`g0|CtLN6+C$vr5u0WXcllheSNw_o{T!<`{@pOGLlSdu1}B7lhI~eHN8DgrdWOt znZldsS!Dc)bwR`gpTWo70DP&AY_<985wdTE33fn z>4`-$@-E=Atw|8pZe{`HPDTZ0P@7DF*-;_O5v<^b089bYj0udOmD|kV+M#9oks=vO zP*(h3B+~+NIJhmwr~q>Kgko@aK+sJ_lndl_1(5Lh>7R>bdKgblZzur|N*pYak!G5B zX8OGn8EvNT?9&^4WJIRRm&)kC8aEs)Ae%sq#27yr2~fj^V|r()j4tD;>Dx(ZV5~;D3y_%{=ZBn z6XaNMvIg}P!PSc9^m*kn0*vRUuP>L;W;`|hcDaltTgNkdb7(wf$>_j142xxyI?nRWb({&rSbQ1r7v@YH;jDR?DO_o}YfO zTE++@S1n`3cy79Wjm%m|!(5aX6nvmG1#az6|6e1cDDZ(@fk}}aG`ScIgu3!g=|2fe${bH?*)O5Fc84ISp^3xmYWnx4*6j&4(9ZxW3De-~2sw|)z4JD?3 ztd~(yQ2-4cgKlmB-Ob0L!NdZ(T~di%gNZ>&Sl}=?V`)q`Ymm`)R#0Mv1d56h8+buI z=n7v@l2YPSU{nC@?pG27RsSsDdS8LlkwxI704QVHfESdrIC5l7U)UfcBI=N>AOae_ zwonohIKac?0Af2#Kh+?kC2;~g1STkO4y-W*p;53|M!enwwEP=1&7dTx03IU(ckn=6 zcu-Rb6kG;bj!aNnW8j)W%gY6}ftKNMS~K#1rtyV9Rz|piLK!>feC71xi9Nr-Eh?3e1@s zKpG{o9FLyt5O8D%jpjnOt+6n8KhnS6ju$5S&mCUjpG{vN+^23R=P`o#^_)} zRw(i~&18oRVxcGo8|EpI3>RJJiH@In*897gc=8c^~<_<#cxk&N8ax0}hxv$KF!TPv|o zU(hTg!^l2;Z?lYP5NQ21hrnA{U&@u20hD(+K!qk7BrSlA0PWUsyZ~+#^McbZC}CV+ zRAM({x&hja&kbrIDRF|vC8t}q$jGS+oaQlOdH{-y7mT3T0Id##C7d@1mrn0(kui&t z$a362vs1uP9OPeU=4N(WvSgZoA{S`#AHq^#as0uUrNAu!X$c-A!Q1f;8D79)UTcpjAeMRRY{}XKs^G6FkWS z%0p|A%rS41QDo$t9@!@2r4L$)&#HhN16&}7A;$~YxttKu=_lJ{Y$XIh)9VZZYdD}j zbme6SXOijC?J^c(3qZba6a?8i0dl(}*!5}cGDe^t%=8&(iZ&oAdJI*xU9>|+nvtog zbGlKdOchAm_BEX{OpHu>I;U^wlBseTC<{K}g^I$goNnAB zqX)`h;1OofP&`xtOj|{dj5Rcpp;vu^Tsr+=kBpHwN;!lQy=XpRL2}*=?9MybBO~I4 zQV0=eAQ!w)Ls1WMpa^O?gd&gWKzKRyfH4bH4xuOq8)t%24xz|n8po)>rO(J9@MOA9 zpNugmWlWFnlhKyMj5_e-BS+TsxqUL0jOV6b?~}1&JTYCUU&e#+(Ddkj89AxH?4UJF z3Tz6T0=tDk$9pg+a5y?-J2sq|KDl4Un(@K(^ZhcKjOVBS?3dAC2e)AY;I|X8Mi^GV!dJ8L|ZKPnVe}Qv_=2 zoRAUa3xSo>pG}j|)7--b zYNY5gaDhjCnG~2oYbikSt zFCkRB1VC2ZoGxR>3APH%RhR+lVQdeaA#;$CX~LK7BC}*#m^mg2D}qLtA51r#FC#X6 z+gzC_#%0q5=gBN%oH)I6zKr7ZoAYEW*_UyEk`v4HH}hqJr{~O z|_RJ+R79g=jOJzD4mu=TsCUcOHarN|f%VkQK8Wv4YSs_!(IBEK=6*3kQlZAOe z2j_sQN>Cpev@QWO=rw7&)=C)%rYm>0=d6@D$t>13xk12@1-5UWNr78|&#__Abnmq? zDvZ6;3)afSF`l1(eyxlHtqxd&rdg8C!?r%9x|~B+Hu4NZiYGD09gZXBu|)L zyiO*Vamw`L>tswBCr|&oPDY$@(sa@FG8-)4!0JX`(6SoHswoQvP&?Iec~2X3dJj?# zxbiZBwmpEl@Js?d)BmlP(Pw-!U2lVo0%%AKTrz{=8Z;LXyFn(AX<5(o0~=%{8P83> zwL!*0XBTK04mg!CDlj>I0aZ;m1e6%S_5G zj!%zw2?%se&)F#BCkdMU05wQBl(@jnc2Ki`QGrw7-1N&E!JQVqO)^%D=cjval2K(m zIX!=qj1l9z>5DeWI5C|U08QSDO#ih>MvCn@xRE)1;Sm{;>3W-G_!u`&FW4m`&IWPc z^r+1;(rjBm^PC(IHT|1q1laa~9Y1~PW|<_>CU!+g$q$-k1SM=xi*dTl78zCfYanNH zfEqaP*?DdSo;n2pXPXQ+ zL~qM<+ifx`peg#~Z8GXiSLRIb*(Re6?z?W2kz)G@8n$PdK4F`T*!1_?WVm47hx$-_ zyNoSY6R1hi!IUL%e0tn=85ySUf2P-L2hXTXJSZd14(iWuo!2>CVTX*=^atBzL>RA6 z|GHg912jJ&ze7e7)CX%kA|nQJp%?SbGp*Af9+Hv6?t$r_cgUzS-4vMK2#ODboiYN9 z=cYUElu@28wNplHdeu%D8Bp}?zaS$%ef>@uR;DMs)9>7q(U^X3rwj)(XfWgDPMHK) zq)yl-Bf`rBX<-OtDX%NXK_e?*jApF4fwZg6Nh?vxSJQ(!Y=`oIJ#GaWxLWjWpfB@yt7 z75qs=XOB!c$ma#6GNRLG?U9+ocxt-&UKuOK>(h(&%4mTc4PH+2eygj zAfqJo0X#VH0o;{ffz+p)4#>F59)OkzERfj?1#o8t(sUF6ry-^3W(UDZVB$d;WyYJ+ zvk%IsGM<|b;!V#wC?m$n0@`Qe*l?y}dhK=@sp)SI%7`#c6NaQ7fg>_}(6lEtz4nNV zxC}VdLBsqU0!RZAI~cP-0}}I&%ScY|JtV`)yzoTF^nlAU;-HKMn&Se+B^yW;Br4^n z|34(d14)gi6jVTs35maOw~X|3u=_r;gB%N1 z1QX<7VG{t2GaZouJ6n!v>(=RWkH|PNUZ4Kp2zXcq0ZZV1VCx0 z0mRWbCZi<9;&_4yG}i-;3&`;Pb#VXv%=CvxWkjbRJ|-i|wgFQ92_BOX=e~eYKAr!V zjKp+_<1)gaNC-SGBQo9gqzoV9_vr!0Wo(fLfk2`8@7(lx$7Pfd!F}tvj1b5%fyZTF z0U|N|)^QnLt_G$oM@W-u`oR-OnRfq48Mf&qCuGz?feg;3|BuVCvV%IfTmq+2)q-TE z|2`qZ$%rOlcU(r0@gvBIh|<9BxC}Qln*iA1M$-#U$goWRc1(tE`sI@{mO|i$B8vlP zd_$kHfl1&AFKEyXoGnGCdz_Mq=RX13)(#mT1&z5(-*yT-9Qfpvj4^0h1fJE|K!cbQ z&rH`kEu+l%eY)Ri8GXj5)9X&ls4h^zrpug>kp-3E)@NjtK;>%u z85udo@6+qf$Y{!)s7sGP)KBrJ${^ zOlC|MKuT{gf;y4lMQUx_pq(EN7?qg8I}Skoc(5V0XJvv!p1~~wFUe8>t+$*0_^ix# zogdpF(=p)P^&j9IZUxBkdfXEgm^kVgL7T}MSf^KQl+m7UcwR<01Uw@KUaSdTCBot; zk);HxYrtjoEG`95S)stLzzRAJkVS(@Ly;3y<0~kFWS~=O=LA5RTA$Gdbg%+w-`w=S z`((6~*x&;eNXtM$er6Sbbi#9{OI(nVvB?1U5kccl3FgcN(Dik!0`J&i!faVpQOCWGVyg32lo|1lb3& zVFqaI29$7E1^NWRp$u97#ReU2TEmFgW6Z-YHTAQW7 z;&^}&G~5VY%^?c9WEecc3Q2L`^CeG!>;k1c5PbqXUc`|lZ~piveLCLL(ja#aA009N}Jfm+FqGnleKm#iu< zJ2ICjv4O_FS@ap_Fe!1k@-jhAo8tlPT7*<q@%ZP}uLt3m5!=b5g`oYUGA_6Rq;5@Iu0&=OqH6E}iCkxn3*XMRlyDB5e%j5t) z(-@TI9WPAxyCS2&1u}^loS4BHup7ezF$OYf3U=c|e-ML)M^b59A6Ch{Cle zrvJYpqsjPWy5?0G>G~N=N*rcP3qVt%9A-=_Kr37~KqG?9kue)`La@MO4rqLAVFHhh zf)D@W0Uf6RE@(C|nKA7EX*~eas=(s7g9&{3bD0vez&~D4pZEk8eVWV%nCi`$FMu?H zhSOO=tDr!GDKD6^90kDX(eVirXw35s6X*yI1s2erh6fm5Fk_kn>Rf@21Tbg5 z0rCKg;|-9)8KC~_0%nlL8Q?vLpkZMpkZ&AW%N!ZAlsE)FfJg7PFe}x=Po9DJ#&H5z z-v(xQmT>FzP;~p6B~ArS1!jS%u%*TI;E@2hT5xg&^)?+FKpQUwJ_T^&Qw;HZ!I@p!i^OWN>U~ zXlSTVV3sZdxoiSRl-ZHN&x+vyD8x=MgT};J92bD1w%&0EGicWcD2-fT1~1HJ0r?#g zbnxH+)g>pG!MjgEgcojo3-eS;XHF#bFUf!@c)?-9H876~A>X8h^FcWu})nf^4f`Nt< z4&Ykn0VrBQ+kHXyfTPv%26L9cS8%n{z=AVY9cQqBDiOyYh`i+p+O5mt_yHjTiuxDK z=FAOh$2{YabWR{8qQ?H z!R-i&PLSDq=T1+!EhCzNY8<%x36jR|Uv_j`zvIa)5HqLWy)7eE5Apm|&_E+3cu27e zvfqQSRgBOHDo8Lu?16YQ8WwEO84^ft2elc%%g7)aa1_gga?tdPcVtBCLH+^HjzP>q zwj9ZBa1L7lDv?1u)j?4Ls$5wdFEGJ!*a18_476#S1+)bnbjU2I@^S=iHD__$z>-xD z69VP16)fh=Cy;U&sBy@W<+v2oyaDGgh#SGF@&q`4fwsc`hZT|_anSkZpjrr=zgD7Y zbmawY-$p5HM0h~Mw;&Hef)-qyPdC)U)?NaMLfbEpb-EIwtjMh`ShRq(o1-_kpc4k5 z=|=_y@N6GwULEQfP_FMHk@5z`7K@tQsPC)7#rYqeC4;we$m(gXM zFn#@f88hz744%9UpyPEMFHiq)U&fB{{B->XG7gM~rWZVrkzqVP{UK4`l?{p%d@Z6(7lnu!BZ}L1Sm@AIcaq zJ^&f03R>&~8A#>@w;Kdt6X;+wgg|Xl(DF|3^rZqjs4a*vX~v1^@sDJb*=~cTjM+iM zl_JyUJdzPLe*mjiAwdSN^gt#-Okr|F$^pFKpcDZ0TDw3)kmA$-Kavq*d@^0=v5XDV zykpY?9?R@wyfK~siHtb!uD|U9j!cf*{E_bQX^OQ3&x;VT(O#-r0WzmhQo`R&;&8Fi*FN2l|@meJ(x6IOtR4ZFa>>8`J3 zESMG_n_mA~Mw{`&^wqCr>=+MDfAt!?dx`Ijj49ifqpjd|?;x+mzL61NJUBh?4LImF zzL7Cu+&}%<8yPjm-s!w=WxAODoR~iIt&Be7lj$el%A_$q*>3Po#*>lp=JcxfGTltK zc&E$UkdvLR@!uVx6-$xl+#?{+>KFX9ZG0vHO@UzS!#+lnw zzQ|NFGQOJr;Hyjw|4cz8Mg^8^P)f-bc*Z^5F`QkG3v$ho0<*xJ>CN9{wlGebZt-2F zfN|3FmEUDTynm)JNkh&xm>`<1!~t422)ag&Ux5X5Y67SU!Q`O8s=x{A!h`ncE2)4^ zs%iKj-yFcptiYk5vK+K>>4S(Pv*PrFUqlo+6gV6|h-4}7PcQr-Obw!r{Eh`s#ry(`ri=WPQB?-r z5v0VW!0jkssKl+nrNHg2Bp|KG3u1w=r?e9HbU_#0RaIX0hkC&HXjQwKR-7=w<9Z932QbV6Au?3w<8;v%jV9@%frp>$PVVQ zyYuq!aB(|=&JO2wlWrWCax`8vJBA)`U66k{=>$6E33S#0N0uU|0uSh{1JH;EXx>@^6orhx zWz-qDrfdF|kzwSS?)F>8ngeWxe4*p?^%}CG)BQPQc|cc3@;LryEOca4Grz~T6UDN6yw`N{?k2uDUM z1_lLAP-1(*1nMe-PCQf+0G;E<3!3Erz?7vR#00vi;;)R17?%QvDbp7w&{>&G+%P)@ zrd$1$kxK&|j02jWRN!#jzzjagfkU7153`amxa1QC9my&RN}n92Oh1^-nHabg1i+Sa zD{zAX2UJ6FfL8Z4uqbdif|}l-@jPZwYG=t(;1!r6sKBAm*fV|KUm5ZG4i->6gU%S> zGh;ddIvtGLis1~C0tdL+57Ed1S`neZE-(+QaRrMKBiJE~AcqKnVi0ty4hQHY9wkO- z3x)&Kz)|3EoWYW%!~_#l;Lv2Az+%q4fklBsm*EKLk`Fcpu;BEle=V7Am3uNlr43 z42pb?42rzWpmK@V9h~M_6nH^b3n+2wGJub_fFyh+9y6vFptKJ;i-Q8Ef$4O@K&kYH0Eqk}AaIrk6v_t#l{i632Ru9ix8A10^fa<*p(MLZBn`IP@7W2!a<@LY>FU4bpu^ zP*E5}pAb|O1Z7V_kk?=u9$?XMM^I4`q@n(XASA@VI#>nzSU^GW0gH||f{OAW9WMkG z#Xydd{Y5He$80FCi-D5#h-&k$1Ja0J~r!{G?JYm{Gs z9dvLOhd$#9Athc=8du;2`Cb%sxHSjp{+a0ynPi3QLD%j;$`5e`c?DSo1!(zkLI^n? zML-b^y5v&Ykpp(h7l-3Hp)3U{M<$SaL5|{3kOrT$azRLeLqSTPfx(*bj1b778-x@& z!DWdM_(UGiRpX8l;H?~t(~Fs9HR>-2fsU+ZRNw)X7mT1dJ|kq!AgTa5Qw)?~;O7A| zI*Nmu4d;ZQc7nqilK4UAub44C0ENN}Ay5M2aC{(?C9sPRWZVZKB_>`*P~H>DQse|x z#vBSFV8{LtQsAfuEfJGZ5CumrX#7&(6}TK02H6F==#oQ&3EVv45r_ks)*%cvtw9)M z8u)-?CTQS-`$-a@JEDa^$MrCP>Nf>wUB~3W?0~IKu7_008-zgzHG(Y#-5Uss!M&Uy z`;G`Bu5p&&kpRWT1+cOM!XVdzmYgssaDa;F9m3|!CxjjALCqmhapekXk~!WG22G)I zIGzyB5}3gTGVuB$An${War^+*%b~&aLRg>~?EF6v#S98?MY;?>gu%@bP38~6iYkf<5{?Yk z3=9eqj!apgN(1B@Wpm~R5d{tfZYzckk?E&dWu+n~fcOw+&H%L`Cy0O|r~%xD?BD{G z$-KLuCyXAw}PSqFQ`~%2Nh!MptQg{-GxooP8W15p@;&jfFvk{W(XrD zeI!5?7l#JZ6k$a+P;h0|tpJ6Q;{suLJ_WZfRtQgj&L*qJ$TMAx zT~=+nvbwBbdbT1L_+&m%402jATmbn;(u`>fh!!(r+5n=3&6t*eXhAck1t3o{fG#I> zWGqx-ls02p0}^3T5LEziR)9D!K)wg%d=Xd^BCHr$yHl#U!375PBb zF(bIt2OZ*S#`Hiyfl+}Mv@)N=5p)GS2WY;ON#H$X^_LPTx7`o`EslOa{RoGwTK$x( zkPV)oC;%;JRb)}%apcJY9fb&r2~P0Yx9~OtIEtV(CpR}J2Z5?JM+QY^M+QYEkRL!f z3sm|-N?iq31@N5#oUjrWlzJzCwrhh98)MgCieP}|6UPMtS)l1fP$Gk5yaoZ#4Gf?& z!IjOKJ3t&|bLJVKc0vOv96*g^`9dWwM;1^Oz~#sVqTp2}52(o?r8q%=1r%^Iz#4vo zR+4}RMp2cC)GKf}t^m)(b2xSgWC^SXmrff5kn^_y=&)K2P39E>=1dHrhLQsKEGh=j zVmA(5h6x~d?GON`e6VGE*c=($6}XEaH?=76flAt40!r-CpojxI>Hug}IB1u>f(g4c zs9B`I4?5hJL!WVn04S{=5b)(?QDBFxr(*|=IDuLc3OtI;pmrDE^ut`T#?udR$x2S= z=a%KwJpnNeWRMao)F{UbU|+$^y&#|jE+DgM%kjwM=~H=R^|(HO^nq?S z;Gce$N7l`D4zfCqEXO6te1R;-HE=!;2dM7>ZYV(;kUY}^cx6?&9)OH|zz^#3)bq;P z)$@YF5?o?|TI?_QK^X;9n}Ae6l82)}HXjoMBWQUSI0u0>qey{=L;2izSwa0isQHka z9~Ai96qywm6+z8n(8VdtysT0@%#icmIUFDGXW2q}YVw6pTfjqjAR}JzgU;U6U}A9N zWdWrRMg>q?4-{)~(|DMt^K!~cO`psstKTkXx4<cjtrm@-m!rr3)0KvaQwlZ1-dnmL4gBAeE=P_$^vRKLi!%!pt6z^(vR1Q{r`GE_7r9Cq`ya zk3-s#Nl{dRQ&EP8iFoH>CjOJFW|cwqq-a+M_p z&X#jPogNMirWss{T%cwZhb}_{mn$#0>HmP!m6r?DcKX4o2&Qjvg3>PN3LVgxv;wn& ztb&AqF~sl=poXsi4YYA+GJoJyQUYvQETCcvNsJ3k43zpo z9>Pp=)(oQH8%?HH%FD{*OFg3C5Q2sRD5fqzLjhEPAcgYcc8&= z0~E%rAjgB7-yE7uOE|dE^8!8xfqHbh3`h=ofpE|VBnN>mXoETE2a1CrCVYW9=mW?> zEMNyQgBBf;>>y!mi4u~6K&hTXgXsq+s0G&m8U+KDq@apx0{m_<4#x&=P!jFnh9%J% z+{md29DJ?}Q@CM~%?Qp-8cY+o6}fo8SJERA0$2_zjv49Ta>gA`Q2`1W1#UB@6(FZ_ znK3N@Idub)Q$g3maX793Idz(}th|f{(*}@jETFS4!BL4Glk#|M6XsTsp1x5-R=8dU zQtE*&*a3~fa)8`$0_uhXAUA+6iGy_ZIUK=vx`8f?gQm6{+{igv7@S_OV1(8MkUN>N zxzn0qda=B$D!w!)L}U~{0J)jljOhi)%^#4$?EyH7Ux305Tnha_iDFQ&>$)<0fjb(M z$3bPz2auy#K$mwSq8Ka(6~__92KWOVhc5g904ZlXKq;EbjH!VKG|s~VD#tg$LBA*I=5!13rHkmf2uQh8b29;&X@~w}SF=MJ{dyE=5pz zh^Uzs@F3SrD|i&RKsDI~9&p9P4U0bk$9gvfUdMX3BJh|w=(aj>1t|pq1#W>4;IZH* zJW70^6O1a=M#}_=HDG5;)2L(Pw zAyDI%!|?-HE2Q28HN=nbf`&abn11k>F&zN2HJLx~m^1GH%>vY);LU3uLM^MprCcUzzg%dW4)pf$m0!spvfaf1rE@KXiNAYw%y=0V_E=aYcgNpHD{i| z2kO=FK`fiW2eYhx1s|GipwlfWmvEB9I|!cF+I;cue#I)C(WjvjpBSgF1#^_>dEyv) z7Kkb^OfQp{Rl=8txUp#k7q0kpagn8qlPp~vWa(nZsS970WCMjRBP4W@q6QS?psoum zsHX}_XbtR$z6o2Fk|?NK0_&ZC4%7z?^C&QahW2@(6C|LfJ|}pTWcpG8S+RN+1@OQR z=-Nrpb*5%a9gy9w9N^1+pi^&3?2cU7e9UZ&p!;6gK?{``6xeuKK_g9!SpqrWZqfvH zaA^g)f*LxWFadM{4rtY=FE1;o@CMx$#4i9@g8*vLfu={M`zgwb*Mm|y_(~v{t&N-t z;GrgP*G_}!4;!Sf^96MC6o&@W0(Q{FMH);q*g-iHl%PMbnK7*Z|E33hO>fGz+6DHee%zQT^&6agu&2i^ap#OGMA zz^1_GCauT@niYhtjR*CsxY7DmH$XnT0S;<#H%Uc-9n?htucZg6XGc~qWXALWr2YXy zJzBR)6wtqfp;l{IhJk;@uT zE(eV+fF|WYW69uBN}H2gK@^k%!FPcxDTA&n1`m7BU^fTd!>+vCk+}#o=v#5iP;EPfqoq0akyfk#A5i~Bu4bC@^xoPMKBM11N0wvIt3!4Jc z;s+MU;s;RW04kC}7h8Dq3oH->jZ1@SY{wOBpo*TuaRwWx!v-29(q-7e29nZbUcsiQ z0vV#_WmHg6R94_o6ackK;iWat^bS>7Rbw7Vp$%T-4GB~pE>K1T*~TZ(#11nB)X#(( zGyQ?8tVk*rjhrBjpcC;SO+wW9;~Q+q(E(Db!E}I4kr7lYUSU&W0_A1}4o#*bY@n$d z22cw;OUy{Y|!Z%9#An1xd4(wff0N>K8J#`9n%9)fPtsr>;8x&M;1$@!0*ZFX^w{(tYO{xf z%m_*ljv#}X1e&I6X~-IeZeRtSGROpqUS&|npA|Gf#xJmo6J#DJLT-STt#D{EUtm>K zRp0~_CL9V(3hV-#Sd;`nw@x@RD)K6DDl&tzB1Bw40Fv=&JXD}9m3ta_ObLIyi zUx6;D-~c-TWC_TE53CRs9Gc88Sk0M#uqtplf@YIJH%)LjJ^+UTcm_rq+9riXvVa*= z2dJymzxL~X!0gY>M zINo3Z)n^wV`q&h>LA4cht;h|~`US8yC2rVS5d|*r4B`XO`VG)MLi_^1*&OR3C&O}p zuO{IDE$LL?7x=@g0Lp70SYSg_T%bXU7c8K4C5-TOCEVbIiY1X|E5a7xfNo>NmRl#l zFA&iJH5NbzMrbhoV1Z=U9#%wl{lfy!uD#R$X~`-IwXmY)*8b^++OncXpo<#$6&MBH zfamb$z%789kDL|3_oVPJf=2OWI6+McM@=_g2Jm3x^j+Gra&ay25qAX!M~*DV9%Q~i zmg5vS9~4XzM6*;F__-B075TvZ69Mo{3ZJ4NXdDLAT>&?7I3bfp4BQOU{dHs`8JniB z)RB$m*aN!C&6UMrdZGfS^mKJySsBLZ(_M6B4H&0SFV&R|5!$e1ngDdE3)n3?mTbSR zD?5*oQDJ+PzHAF4)6ccj3}p@28rQW5II>OO$R%__K@QY%2b~3Q`w!?0D91}6O4M=p zpAG?mS)$x`rXO@<7oEPwkyYhCaSMKsB!lCDThkL7B{ir2HIWTwoH0GnRJNLN()6>YvLTF61PuD+2Iv|SLq9zg=K=Ml6M zQh`-qKlGvs(7tOX&@GHC8cd*5SsOSCU3nR#92+2{f@US7ZmB779AIk_qAv76-?Mh6+Uv z(0m9p=*nDy5=GFpjvymIUDl8++fRC>J+#Ym`#}&>R1)HxvQ3gFX#l1bc2r(W%88f1|6RX z>PPy3whL-7X_zs2D6l9%rc2G3LO}PPIyQW;VJ=i)b8KKMRA2%ftI1SSq`+BJq`*;x zICLMhneGGwxZwo8>LdcZx(syE{q%5a*?PwP)3000HZe|^9%Un2#5isGSsU3t_g$jg zhgg|dAWJ(xFlH;Useo>#V+76VaG5hRfJ;p_1y%vbCI9*^+;0E8CC=FtAftk~vd&w#@E}QOWC#%S~ zY+}cm(^kH*{7vn_gfiD?i=GTb6%%sGIC>{*5p{GAeL@{1)#nD-RO{ zmE&MRsGoMZ%jPmMt|8OiD)1t%;*(k;x(|JTCwYgb9#fIYn#w>xJ>FWb!Yq_2*nI-_5_Yl}Q-7HA9hx-S(4G${f z7^eqZlogx)I7oI5!(+R z%Fbn6KV2eBb_=7z^s`~IC5-#GJB7<$W@0=zT`WrW0OP^!*P~=5nK+N^oCZ3^^xU24 zKVxLo1zOn^SV09N_@)63f!6H?v9jWfOn0VD_l%Q0#`u1^K)kFp(}yL~b>n3LX?rmxPBHDbDSW%~ULStrS*Y@qr{1-aN{DeQ}z+#rRlO+ zvNnvnr$=SUhA{r!zCKG0418ShMAS0tOtIAgj{L z&p2y(K#8oB@Lxet&^9nCv4gID1n2eXwI#BWOcyRppHU*~q`I05bfdVVq8ZZ?&}u_Q zGo}Tg;}uqbN>c?k@U>BmT+{!T$o4bxPwy|4jbq$1{avZ76yx0KoMp1TLT4^O8YYb3 zl_w02CoW81Q6{U2P5g10tclPGgnH1~NsebOOxG-zm1Vp(-Mw7a7+D%L&jFI|DVHr^ zI&oq8zj9f5rZX3&%U8(OFn*Xmr$W}0@znH76|znsS@{ZCF&B1Ff721&NepI;44`@# zB8%<-uq-3D0_gBl29SLPm9qNWC)r@7$%E;uDrI#Uo2TEXlywk;RC~Oj6}g~`JRoO{ zG)>p50xOBBlC=~^QUX5gAEstam8`PD1n_w>pkaK_#vI7S!3rD#vxF5G6gV}RCrp1| zB`Z@8ISH2&bl@190w?(L98hOe;FO>Or#@o=w4u&q%?MiY4_f1|$(&)%T%o|}2s&s1 z#QUL2Ld&LZG@EWFB}89q8NzMg>;T5`xir9tO?DlkF=ic296?CcF-XM?Q)-AfIsTR^wbvOup}R$zyR z+77TvCT<0GMGlZJPk<7yBYcLR6VyXH!w6c=5AKRVk9%NN-~<=v%cmy_35iZWQzI+l z3OYFywACGSq~#VaNSs^%o6ZO_7-d`uH zCU{y%5qv5%XsiWvS?#v%$LeHL7#TNC*KUxtk=P>Q3YT|c0Pm}5XmAkNJiWd_R)cZw z^c5g#>GX>Yve}GFr`tBlI?JsRhF&fPS_sSnTHP(g0O<96NA9^>0g^<)fiVzmuZ%jW1KqOx>;61e5x>{1_zy)2PzN2 zm%0cnonF)|Tg>=z`m<))d5o*4x31lCVa?3UGMTt0nbx2!DV+Ue^d)Y)!XX~y;2Uv|s-F*0tQZqY02&$woLN3X0L zqb9`P9W0>Z13)1F+DZ)(`@sYg+W}%tKi?;tC$neOGy#FF{GjDB(DO(@sSC7IP+;lw z$bQ*CgEf$_0Pj`Zzz8w|RM>!w-oTip#Hs*dePGBESRjvH2?u4r3<%wMo-y3qt#8&$BoE(l2}Fgq~>gX{+F@>75u4Nz>x zq`k*Wqb+@0_jcfXY8EbK1o)Iv32^YNwNya(%_PN#?I+?Cdo=N%@dyfeUhvO(?+)Gij!qS zK}Q`{OqRXIJ-vFqY%yc=^!M{+Wtke-r}Hh4EoD42y=8%{9^?J#|D41<8Rt*`yFgZv zarbojg|ZHe_R|v<%9;u8Wdogk$e_Rlx?q4^ppAd}@`bWjK~0gmMY7%E3!6Fx9FH=< zni`0fgZkS)jG={L@!2 zh8W(oM7CZ0HOTNINQQ$tQ4NcxgAL!*+&SHSsceA4VKz`V2!cVUi8e6vGAMBAGBhze z@@6^m7fwIERMuCPMOq1TXal1Hm*e(XodS;M?sN$#ut+Phm@(~OoW8I}LTvi^#j=Le z|16YMoX)>e)@l0fWwH`%pbMiL7ES-TOx6*!@{D`B)pFTPv2AUw0*<^ePlI+6vw=LF zze-kg`nlz@{7lE%rr%#KYs7S}ZMyIZSye@Q(EJ33r$K3#0d&@kz|QHBD`e%F+W5Cu zt&o*s1O@N3m9pK;zneR!tE`eW6`Rn~Dd1>~u#pvXlgpy%`Kx4An2)t|ZlAVF_8=qE z&g0V~*T|YNojX2#;u={OM*Hd4*2qRHf)7dK1>La;Dgm$ZDRT3$b1N_@FgoghSM;-P z4_Yhxh)H45uMW@vMB}dx0fAnm0SINs|0|k60}vq_WR=B#vq?d_ndZ#UL#jX>(CK{} zWF;8)O#iS&R-SR$bb+m4%3!N(@N}zp|iF5|4}+c(O(i$eww zSR52s!38IH^=wbm^y#9TWOYndb18yuv{p4^GEray71ss|Y@ox>L2+V`CD6zPYE0Qc z+ER`dS=*K8(|_)e)igwo2&CRVD0V;{OVF8?Y@jnO z!518|3hZH3;iUlNeu4-?&fKj`7v>5Bp@b96{PZ(?_6jQ_yWf0H*m>#*5S69FUb0nt@h~PT4tK z=Af)Bj&dstSTX~T}`#}3PSFs;}zUEqkU1Ji*W(}Rx4 zx-;$AF@5$CS$oDO({CS(v z&KVu&fR5N=6*x2f;89t5wiz2|2!PJuJu0ipczZg}Fd}*8@O@7;CSbBvw)+Rz{=^*kI5QHd=*p#dx;Iw+;zMTx*wcb;LLRW zQ_0j9lwk8CmO<85}<_Om{mi8weWZWYcAkaAYn79WAH81{%ubFk@0s0DF%e zx^i2{9W2XX#-yRZ=BSYcYK4N!4R!?%fyrzjZ8l1r3ansdpb-sFVpCvJ;80+2Y+=k& z0wpPCM+OBJ1y;ukJEsXag2U+=mx2PDK4Xpo8|Y#J@K`sS0z0H=;sRx9Hb;Z$ac5+m znC9)AzUYjs5y!Hf%>s^U0=d)ApOuy8e?F^2K$C&NoPj}s$??Ig>3`44N(n)VwG>e6 z-;60ifz9#3^w=}9&WzKh*PN9#V>~~7^I2JIY+BtJ_e^&^Co9jmetPOT*&xRC(|4Sc zoyoLQaQXxZIV;AV>3!#A^%=iU-*;Ztg6YE2=|9fPx-!0=Zg)Y}i1GFG!V9t{jEko) zyC5sixOn=p3$o4{OF&&7MhzwwftP}g4DO(zR929)6hMInmSh4;PB*+LE5`U^y2nLX zXSP}0Edm1l(%&!!){ENja2^4avimt~Lh%(G+SuV(_C+xJ0m z`h_d97WG03oS@pDOMwwoL|kMB^_Vo6Km*i{8+J|;0FBK;&RyelG{^$)76)xU6SxTO zdnj8mcqnix2$_NQNGouIN4zaSbw6mi4``Jzq<_Yxz~l%XoZ$dh^o|SyKR{hKjVuM8 zdT25c1T7)u1m8~q?%F{#@(4`f0PVL<08d*^rh#37lerI2JU<&RhbT z7*b*d4U93v9BctPeUL|B6)&j7SKt9{ofcTls|dP9oJZg$w<5a&XFX^*xde1Jff-YQ z0w-wxs73*F#-8IJW>8z8P};G91$1MQBV(a-5ongbqzE+fUIaQm5YnXtxe3&UeZvgu zC$=zx&m3X^9oGcwfod={FoKSw>Hu}`K)qZp1ttY9Q1Vu&2Zv@iCrIlYMkUZZCxZej ztn;eNFay-Xa(TyMs-2h>?(2DOtEI33xo7&d_TpzblF9TTYksKBz;oOuCA zG3dGmkmD9Gf;TpC>N7rpc48rYST=42&U#SrK43IsdI8b_I?)w0I`x7P(L??K>brpo zVNUSP4NRbf!UNii2`YXB4uFpt(`5buRt4@yH!x)>AUp_8RuKR7Fd+vGNUbhIN(X2j zoYN6>+Z{N9K<;T^0H0c1x^L9`&bk>vOrcbDDY3`za^_A%?Eb8B4{%O2U9(>18DmjD8ut9 z@=y1@I+zXQ>dh=5XJ43Je@j->n;+~vP7S6L%!+~_^Ug3Ui6J@m1~Vw3fh-ef z0*}$$VOA0WjdCf7fV|5G8j2MJJLJan`?qAJ>xIFA)gY+A>G%Nbiv~eZM`tcL_q||7 zY;k7c;Q@!$6J{lDN0t&r5s>WtmOx&gylA4`U0Za&6qxbhFTg}Kt6#Ml7b3i3Y?&0$OO)V9o8~^ z!);kljR`CYoC+*vOdTMa!;Gm1l+I_cfFc8Q^Z}0ovx1N#gTR#Oig#p%wHCl9${E4K zOQ4Bz1xAo#Kua+h;cjx=Fg@;$tXSX*kZC(uK;s9Tj-b$z( z+0B?vfHZ)n`$1WqQ32xU6D(O!N8g!#^p31+%mPsOJ05^Lh7;tN9W3U|GeD653Vu$< z8G>1$tSfLC6cQT*vlJKw76^kY5l(%^9fC?spfNHHrY(Y?ETqA-K~RwuG&&_XJ?*Zn zqT~sP0%lMQ9T5aIKfufbg3}k=m95d1P+)XoF=tYNP%;XP3KHf_BA~j|u|p7itRfTW zh%W}t%&IYCX551;_!)L?qS;@I$wxe&Bh2z)l-50JVBR!E)pfdxDl z$*Et@*usifX9r1#Ag?vBf;!9{tf2LE3jDsH)1DyLQi1l5f{JbiGp0GLpaUMwm}Y>& zr{0Wd3WyI{OaR(8H31~J0BYR?R;YE1OIVfQ8%0=n__#$NRq6s(Go}?F9UEA)l=!Ce zJ&={O+rb7-qzasl9ju^Kw}Lgx5j0E>s@cKinZP6-Q0~3KhH9h&r!K<qVzgrDUaN7I925K09mi2RjR-3RWNtiRw2mv4dqRIS$ z&764yA|-+nAF~3Zz-4A82L*}cO3ct6xdeE{GbkiMmnK4L07B^1DUu0e46Mo&SJdT@dRWjzH@D$7!01visG7axI2Nu*@R>9~eH3*sAQ zh=aM!m==J%&kkOv$qDMME@4+-hI>_s8{}C~fc+2vwRJc_>p`K$gR|NMcJq4X51#d%o{)<49;pN zK<<<<$M5?3EXQ+yI|Lj>V8b<>$X*e+3O!v8bQlo2f1ZH!86fn5MzAjkW?|Fw0_19t z-=_WR5O8Ef=;5r_U^*eFCPwwAL0>^@1B7 zpe6{90HocthaI`y1S*bn8FqjsY`{^xfgO~j4}fyV0Z_g`nmITDN>25V3=66v!BYn( z*v*+3cz8j6kp(Z2=L9u@-++=Cn;Fv!kU8vD3=cqj2|K16AX;FpIr9aO1ZYDgs6}~! z9oC|p!U35UfwU+=rh+^=fy0bx2B-zKfFny2+-#e{foM@K-~hRR2XwLzsAUIhWi6ln z;EAkw{R$3nb)dn-qX0^`9jsaKM&=5zMo=@xaRUdeFmlKSg%t-ZtT>Rv3Zzt*VFzgX z3KCWv;IIO@47A~p2RW?326710H$kU1PJk@~g|_1bjx0#;0hAgWz`YJrrYjug%oD(E z)EgX%pb~fjC#V_#&v1d~ZDw$SoX7zW_!*o~Z*zhcZY}^N!xdm9oQ?}P!J)&c&$uRm zQwdbSfDaCb6#tGJz^Xu9VonXF6(D^M*#h68ZQu=HQ@Jq$lM`_)EVM7e4O-~Q?>oK! znXDYw0gy2#IJ1=4!G(K0XxAL5CI<%%XO<)AB5U-5{R5X0DAh42u!Ead+@ON}1s5-q z0yk)_Jm_qA#w_sU3n*><;DSaY2RIsMFoPT9ocfG^xG+4}z^xBb4DRav;1Xzo#y|tN zITHg9C=MNGaAzq9EAR@e6atk6E4ZiEJ(rbJ;0Kkc+@Km^0XL{F2SqM}0;l5?Zcv%8 z#5aAgD~2CDkb3w9j~UYkP>yTh1%(hN zX#bAD3rMq&7fqwYawS1W`7B4)GVrWFxUmXu3pMb9_bP+D&#b`C0dBc6EAUO1dnqen zBn;{Fb_gnPI<308>MEBxGpK&U zPY?u!G`O;IoB$SqwyKyxt3Ek3nHvPnnLBtD_&_00U+f54^Cp;G2HF;&!0*WDCS3@w zs@L#lf!p? z;Q&#^3K|BVQZo@hXTrGG{4?W|=Yd@G1x@h=Pm&$*ti9oibSRhl_z>y6_uW!TJk)kQLkD$UMLg z@*rqq>Kf2^)d_yk@@>$_0!ZrtekIUE7Zb=RP^X_$pYaSoco_hv;~hRw2M;B7!7Bs6 zg~|o6sTV*eCVDaR9H7B;hY#GB0Y`%413pk_DKQHy5>{fd0M8+_IDi+SqiBbB zFlT}0&3br20v*$%-pZOWc1)l6R#qhQ4X8^lX2$daM005}v6wSI;8);sv;f(p$PCIJ z0-(AJTt$OU!kEr!#`FcGn9+>s1E_d?#9+nn1Ej?S6a%0mIXE3Z@Mj6MaY84a!Bt(y zbiQ}8S&RbHE8of5DhoJXW&j_W&*=zGN}!`284;Z)PR9-ba4*290aU@y5QO9g&_OXg z3Ow+(t-x=1&H%YwQ3AArW4gk7S^0X2@u$3%F_S3hFC@_H!W`v}R1;7Uc>- z&~eLEpn*zF<^zICl8}~X1jzjgpfMqkTOiF&P$O9q(d^^^&q5>S=a{GOdoRnIg|8Wk zWM})!76C_1fm|t2>>LmTB@J+ckQv+?2UTG3B9sx7X*iKO1jyZp=^Ofmc&3Mbkd>_m zoyh|qQbiv_0yV5B2$(Z30Qm=WP!5kHs7eH#*ux{Rn_CGqs#)(QEie&0VzNd6oJJ8X z1Wv~dptF4xK;ZE7sKf@^@xiRXtspeL z>XWRL`W!*fAg}_ccc{TMLlD%{l2Kq&V0P5W$#T@mR$>O(-5@wUQH@7r`uR_?Jc@|w z6?7RVs38v@vtS0zB??Xd^GQ}k0lAOM3F+0M^!cVce3lihN6r%P?uwZ}l_R4f6J)pr zoH5{CGEfc9OrT2!9-G16B{O4!SY{`f<@mn6MZnPvkr`M)QFcNQ+%ZHCO(q2c!RPf=$KL^$m;lM z)0w`?>M%~5uK!imoALVe>aVi$OakYo&x6tjzsj0!cl;)Mj0v=wmGh^pC=+AEbot-1 zij02K9e>N3F#QvpUjAD)im`e6rQfoPSRCyITBcY0k(B{m2sQ1GtOld?^!Ya zI9^~-VA5p>ny&R%ww`g#^i_XlPcXIfY>&^D6lI*=`CoP-)0HbzW#mkmo?V$5EN8^@ z<;v7{xk-%u)1?^YG#Rz07t9cqHfdNiT|j|RAYFmUQ2^vaanPh1qa&l^HPFfZjESTzf>|n{vwxW# z53FbdsZa(T{LAe4^ItQFrv%~shwv0ZrvNiMUb@%>l2ZVk0?h2V{!9aiCl5LWnAve5 zgeM2l_u~p!PL{jgkxh}=aqT5AR|aCtzh_`8q#?%ahw!98#|JYzzJu^2xgEI`nH?WN zcoN)>Jc`VY7caGdEEMN<X2;r@aL`jRWHQ`47N6 zc8C?fAo|!KDmR`2o5ITND4@vf_;Ohz$cHQtXP<`fm?6Ad5FQiA6h~&q?f1YM86lGU z{(*R)d;>oDmjN^)#lkHOPEVlq-U=*^%YJkSILbM;{pb)7m>@cRVV{HyQ?tl%po)xqxR3Hyx^ZQna%|?=N z-#f1R+9BYm?AZAgvSbHIX40(=0Y^Q@M>jz-)6X-?9bjBNeFBplXj#lzCOHkzTCgf1 z#pzyBaw4pp3S0us(+!yAESNU)OHb6Dl% z89S!$VU?3+?%-H3{fwfV5@YA|_lk1b;+-4{pqUeP1y)UF0R>h^i7aDA27x`(4VC09 z8C$3CmzC3CY@NQqP)?L_*Ytm~a&ps8D9PnacVd@QcG<(Gzy{t~V9sm+Ssug&T7nN+ z1fjqxu#6ABRLMhu4HTFPY>p=wvLGu4pzy(2@)W zYevulC(trfRs}AB?}7?!(+{4P6`jT@XV2I(-GNi?597q?wp?;^1eb#5&p}h$brXbz^yuPFs_&iS=$HRZo6RW^fYcc3(iM^pjmfzN2%!- zDuu+S@8g!^5_=351FzzNthfa&Ks`JC9=BWrb@rvI@y*aJPc;mI0{O%rpIytlYHe=Y-@Wg;YVqC2Y_Wra=DfpZ-%w&O~M- z7i5+l9D)~+L-54Z=^uq9MW@FJ%c-I)J(HZiS6FTdZ@(@g z*Urdv?DzBpF*!rFFTXnk9J!|R?~>4EYWOqVZkI&g^bm15U94*?7-vrJmyokYSz=*0 zT}M*Rh;iricu6_X659ik;CT5fDW{E;%cL10IgD}6^!ZZQBj=8k+y%xt)7MIaBL_WO zakg{3^!<69NCq&jpI$E_*~2tfXgaT`qzPls^cV#>OU9qmrzyxq z<0v*BA{QI1?HmG+M8KY7gcKbt9UKCWr=M1oQ^sF*Jb|S%HV>;uq=?Thm za*WHC*DK30@-7f!asY3CRAO{go!+A&XUe!|`XLoLKYsLdCa)@I%-AzMSXFK<1En->b&bfGmNk`bJJI8%V{ybnSK>aP1BLP#`tpjbsf2fcys)G zyg8nGz7V$pyWgq4LqAUZ&+IY1?$lAwZsf}p@-5hbAoAbPs1xttMW=kyA5IUQ?IJqKF) z#HqjmDi#ftK>NikvWyuSl(+<*h$w(=TmeZaD1p`wYJep`>4ZyQ&-DA|a-#AeHJZ!} zitGxYU3nZJ1)%jFpmQ#r7zDOV7q*ZyXKbDBZ6T+f3@%(hFl2$|G(m=fb{&D}4n`#| zfu|tLK>K0X6}Uix1z;P0Fo4Ijc?5QWq%SaJfus@?z}x;nTLak@z&bgh_HCYi(?aeX z@$^n6>n zQpOq6pW4b9GOnI3ZYS5vcz*g)JGolvouQK}Yt&`Jk<^tgN2W0w1>Dc97Em?J=x3mXsAk zYL+-Cuz~LZ5@=wbZtf%($+&U)L?^i_F_abvXayIz1u|XHSuT{ZXL`A_oU_O#A<)W2 z4JH-^CP&sX&@_O+%jxHw<&Ghn4{8({fG#d?V4uFqMJ}9sqY$E`<8hUfWZW}d%~j5l z@!9kQS5VoueTu7G4-=!pbVU!j`HWrD4|>R@aw%jhvO97rGV(A_FZ7g?-R|otm(9c& zK7FURoTs#iA}h0l0(iWDLjknZja8tNS&37DNuYbWtdCqfWBBx?K5|a{A_ASvp!>wZ zn{EVlPXFd3_ldE4`b%FqX~vz?S^ea8C^SuJ5D=Iy%AF7C5IbZkF{yxW7YA)9WN=iO ze%DXVknzrRet)@~>AC#u1=C;q%N0)d6JYn|`6A844TA)z#C9N>xq~_Hxo?(#X6rkQxAw{1z+CF3|ctT0OGTlFg03Q+!H6(<2I!D*kp18Q5UAqe=LUI8Ly`MwL6Hx{ z)lp(~tcUoA&zyM!Xh$4q`~iGNDzhW#@;+wpl}mSo6_^zm!1tdrgIj-~4F!9Jz|M8N z!;}RI4bYB;D@<7mtZPB*TENOLfDGg_XFdUTwRPpeEVi4q4Go}V+uuY(J1-^%E5+oGsnL0pYqo6BSAqIiw$RSG2m?nT3DquaJ zMS2hwaI0p3bb_Whp=L2H011HB<3SXHmXk4oZy9FRU{Vmc1DYv>IA#TC=o)M+Wdk#K;F^aU;e#De*MPHxf+EPDJD9T^*#xQ_859|q9TXT8SV7Hp zCeSh^76k?t2e?ZQfQ&rAoFyD^K1e%ItQebdo z%mO`Jj?nHjGiVi_Ks8#tG741jh=JS&34<3P<6kgmIf@HZ@rWYCKY+wPK*dE6;y*y* zKbW%|>tzM1Kqn!>+}Xea%H}LtjuHY@JVFQ!9U$=zmMlk6fhy1~1u*p!K;jcvz>8nN zISQO)&6s9@#AiUwfhT1%rUfAJ1uR*P(gNV*mIyOw1xR8AOO~UgKoulSL439WB)$PE z&Vz8!4v_c`mMljBfhy!!KL8RJ$WmYsXc7WtOP1;K;c{Z$CqTj+FkuZQ9#C{?FmZsQ z94YWYIf(%>jKS;(TBipJceK)Iy248#QE|{3WG3)5466o{2xuCHNr6$|?({X`aWcLM@>G#6rr0Z{hI5$|b9C-w)KsR_XYcLr=N^K2A7D$$3133!3KIs8i zLX(;S|z2(3ECb2TBC}bgkON{dBFm@jl2ppYFqE7#N*21P!Biz z14seridIg6DsJ$?R%XybycSld3MF1MrXMT{%%G7S1~aAx&oKR_Ie$WuQ+qCZ%t8$`=V)i;2WA#%aQCQ#+XsKBABBv1u086tiFBz}M~%aKK(3bc<37CbvZ;yW0@sRk065cL~C;`N{#QyIaT zN`t9J33Q7$bD;tQ=o}#>&}l;MMXm}Aj{pBxdl!OwBVZvG&;gAMhyxlSO%af3py5C! z0nmYtu-pw=xCm;GfoPCVK}{_-P$W3sUK1{*XsXxE>T-jIdw?+4TXt zNz9QZuF>Aiw=!geDXQ$A%BgpoPbf00vF>H-MUJ436Ao0$uE&!j(ml4MZ`3 ziZxiVU(c51$SqI>It&Rk1H|muz?229Z&}dOHUsF~Nu*{%2dGWa!31tofW}@MSP-_G zF--u;PGEu-6%3A`W1ZMX2%C? zpjoL;s!Tla9>f1QxdLd9Aq^4iu=?u(LzY0E5-5={7xIdO;)x+kpoLM9fkzx<3up@> zWFru^{sg$@f%hjEK>Z1R1%|cL3myuIWigpCf%^~qX#EFJJpqb4=H%mbL_;>!wkB;64V-E zLJo0|K4uLjl5WNp67}#-2`Jt`eG-tjKxGN2+qi5l=+<3? z!$lOBK|@pmi&$XJ7J+0!c)HyP(t#9lAbD0ZCX|i~B7j9eO$kVO3Gc#u0o`fMD3Ghj zz=PD*h4o}W?VSeZEJu5R^XxD;fO-+2-6EjoFsx6rPe_pg(j*3TLlC_hkbYRN2IP)( z$btHm`Dv{lc*-7E;V z5Z?2-_NW28d}ey#3n5Vnw2lwRBv8j^EjgYLitISAY^hj)i|J)a(xCMV4e?*|>dF}(p~4!i@j z|5Ou-Iildv3}|Br-W^)EeEOL*IcY9&k7;r;hVbssl;w>Aj*puz>TvK*`p+&$7@g7-r~ z{ULbd@PonvDRN%@o&F?U&WiEObomT9HKlX#=!DhMcA>ygSqdatRMO z?}A1lz`dgBYcu4eJwcry(D|m0D`zzbII<%g3%ZDvMS&aChX=Q^Il;@7L1(oh)zR>N z(Dk{~1vBN8x#9hu^RuSgXUa*XBYHfbJEK889?)trj2_Q+c)=y0zzn(+0A6t2oYN`b zh*WTa$HBPFm{2-G`=%erloPFA0_yB=fp;H4A`djWgW3Z^Zc;C4ZV+%psysm_w;}pN zh{_Cfmm8=@#HIl47_ozR>-Uy^$gc+9%3#ypC26@G7u6%6o&(3UFjpU@Za7Zy-0bpxvh|$MdtMpU9R|w19V^ zCeK8zf_Ok3DlXLC1iVAFXWewg961Sfl+F~Wn1__|JfLG8pgm7UQ0FQkM^20z-nqIy zcY0%voTMPUUv=tdJJ|QrwahlyT zJupvBR2`+;1g_{fKuRHr13n6NXnJp+oDny?$F%p%^mBP~()IAJQsU=cz_g!d`u@eD^8DvP*EfR3P(cTl3Y?t&cCn}?tPEobkx?U6pcbjE|>3tdtX0I3fg3Rq&&oBNSL1Z!lyz-enMYJYA$pPK9yubjvC^ zceW=xrwIt$m|n14R2ss*GktlL+-j!dLeu+qi<&ZCo8GXUQ4_>HxQ9`S@%Z!$)pG72 zF3%1|1*WgU)Ae>Rsxf|@K6j6(9OLonjWu%WAkjHH7?nZv!5xgUAl3JGFiJ`CfEJ%I z2y7H_WN_ye0C7MU@-PZ)nXXtXC&PGX`h&Bg64S$K9!4W(TuyM&uoxOX52OX zZ-ZPtbQ7tXkI z`hzApb*8->(;aK&B&Kt?NbxZ4oUYd_C&T!5x;KdWJl)?#N||x*^qyw0B_EpQv>884 zmuZo6XWTX2+f_=D@y+yHS1DP>z0>x_lMR7SIK_j8+W2V6EUAYI_*76hJ2~HV9Ae>X9?zW&~CB z9F7860+Xg+>XcJu`vXdCSEv8&lyl-RP~dhv!yqtux_g&gA=AQ((|2{rxiEg4{=Z8u zg0X3OM7Nxd@V#@>1oRof13tWrJWSkPYm>xMD#xv79d*m$SFEfC&yE;9zPi`sWx9Pw8e!pK%lF@hrvgisp2h?c*Z_5Ln#&7^Gtia&7WAAqDiE@lgj8mrT zPL?~tzW`Zs#@^{HQ{+4u`=5@ zF;1CoGhMEc@xk)006qrD(61Wt2niV(|xSO50oInRXoR}jQDez60Re=e# z#uBuHnoVHZbi=uFqKs3fyU&%&mwnJWT>x}(2xuC{@eYUsnh^w5FgIGKPrp1@u9tD@ z^u&2`T8uNM_sj$PYUMn+GR7Ix73RxHGESLpHD9ic@!Rx+^W{nyr%X3nAa_=A!oFz& zpe3|Q%-KpT3M`I47`&yyZE*!=N0t)D2m7Y0ER;J9I_dD^Lb=mCjN7K~T_KC#y!49m3 zuEJ*m&1`{hSpp4IGdUI!JHXkb!69Y47u$oPQ$+49Y-2GHw7I?|8z^2d0g1pz1 z1#~+rs1N8U51vA$M`Zj z+JJ)85qe@3=zeafwFVFa+4LDjkga6~Su2sHzyz^Y0HO+Pt%Nxf`1D=4C6LA>Lp`Xq z$L^>B3TH*op{neleHctCp!FP%Dp^XPW44$;YgH7o9PeD4Cg7+c(83JekOdllWdogK zjA{x+XU*MJyg901K8U=G^knI*( zj;(*D2{_6loXiW_Z7c!0{GHuV2IS{@B^F1fEP*ee^+uq(us{kxU0@j{Mn}dhGbR}j zM?!%SwB--7{g53bf=yb1(~b$`02a_)0oVgX0UWx_pmP90tE|`@4LCqck}a|XzD;-B zCMQ-8iogpDpfPGl4eO}xxOm}o0f7aguqqccRCVXejUtY3e@_z-SSZS^$H-W( z%-|>hUGD_05+EyY1b%`R4Y5GdANZVD6$Wlkh;rESD90buS8bG2W&AY#!bUkA#);Ga zZj=L^*{8ZmPPP6izXF2-yCyRzA%T_@Du5h0iwR-^XaftU3IpWyAVviifo9N_Dh1G* z6mSj(ZQJAot)>Dk76L^sc+)Oq;NObp!Mv!SQkZkCf{oH#vZvz$7|hqKcJ6xalgPVe3E3u)@LL%>s`9?zadiK>BBn;%-We(vIwooC=K6ps9@`TR}C*!s$P^%DFRw z)1Csv(X0YTr)O@Hvr=6ypui3)UO>Z}h#+MLMIxy8b%p^H3e!()lQUqPIQ{1~IX!N$ z>sUdj#Vwj{v|Y}Hanbbh?Q*V+i>B}0E~m!bat;(#8cYHLi>ALr77!6wG+lm&TnOW~ z=_Nbl9y0Eno;XcfV|v|AIWG3g44_HQr_=j)%4xwZU=Ua|{m4!^bsi)ed8ciX6P?bp zOD@m^95?Km%o^ah0Ufoe!oc0g#K;WF8V*@X%qpO%D+WiEEHm)h1qMwfhUtGkN$InF zTsR%HAl&e?lsKq9_4+JjT>lRw#t14wK%H9f%?Qk(GEW0^bgrcWi{lFha4(s;P@4NLBV^T^0{p@R4JID2 zDGW>;4xpMsK!J67&RbClJArNdN{}e!Wl&%R=Wh)r9t9SG9sCNc`iwkEY+&&pj8+U1 z3akpu){J7HHLjr5m8=S^(*u@COS6Gin+jm7B>BL3dLt7f3n-)%ru)4WwX#OG5Oji^ z5;zI4J2DG=W<)pzRGNZzC2BA+D6u&*XMr+2+w{DJQtJ8~pz=~83*=T74JJ_Croifm z-GOXoOajvn&XE#l5y-;oLPmw@dAsDI8QZ6y+9fB$cxC$YU2^tJOaDyQ*ez$t_+@(1 zZn-GQ*6Y(iBiit43S6Xv&*{CoTh5qi{`Kixd%*3%4SVD!GR~OJSSTeuJ$A30G}CiP z5iz}QuN)gBi!uu=nm%W*oHFCT=?C`8*)eXL{(rBW4&#pLI{W10n9eXvj}4WVU}JGq z0J%47pPUS1`}FpGati8~8T>)x5sIv!=;KjhapcJoXyR00a=grtt;7zl)(x`xPEJ3! zPj18X`u%dsjAy1V-Y>U{;}YnQC}x3|(^C(~nK5phKIed3l)x+j=)vKoT}neKl`PKoi%^rAy@>WpWm&pIS0UH^n1W`#756lmKO=ww*|foE`GDIQ6furQLa zB##74SXJN+T(tyfew0B0WQYcmusCSJ6-2m`5lLLk1~lyekvPMEBq0i#Xn=^bAPI|r zCYK<>tO9SQ7aW!olR#K51X^7Mk!28gGkxA+IWc{NtN>{803yqdWF$Xm8bif6yz@q8S$K?{4_FkX9>^Qhf_WHP- zy3{Xz$Z_7BpzGCHKr1alcM&Ua3QU}?b3!hKv0?he6LQ9k6Q>_NAs5T|a=P+KxeCU! z)90L&o6LB2y1^+qZ^qx#Yfi~^G5(p(ep=3!anW?w({i9rYr$zbQ^uz0%TCJ`3SMT& z0v&kGroaPE;SG$_HO|Ndu`7W*De!uF;~6@d@#S>Ib8^n?FTs~M{h7{tT251f!SN_a7mEhd4^}01f!VO^FkRrZoci?3 z=j3iNPMp5(yqqcH%jr+f%Xu-LovwdDu8#5S^mP~HR2hFyzj8q?g>mh4%ZqZlY7mp( zuqmATS5!Q0O7t9F7g!rU?jqhbI%r+1wlg5BU{YL0Y68r<{jx z5tDK3IS<)}#|+B$EilfC={GOQ88iNw&U;x-QVtxREE-HFn2920dIWyhhoxZS;Sz)^WZ8>{|%fbpwj^LUFz8U!5QP9A&IVZ+T z)9voc$$_Ne?#f9ru9;qOSI&TO&GeOb<+QDL{%Z$~xGrQ?VuIu|P_r4d?}6Fz21Ay> zG!7(j(7I;O3JB26GLHNHP3ONSC&n~&?R52fa#{-O&vg(|PX8N$P$A$ukNp zWL9JXjYuv4ZQM~{;)Vwu@;>UW=>hlUGbGpxHN;n6=4NNfh*H>9?C^>{FyghK!HJ^YkKcPIW>hfGaJCikU>v@ z0(A-*br~2Op{ly4Uw$ZO!uWAI$0NC9#*Nc+9?5ygygAh&;K&0C1?ZV*44`U~L4iSF zy72Ue>*XbxexI8D`jMQO47k_FDDa#ewxj&F1`{9TT;T`PZ6C|cW9qpu{qAEqUBmsL z4WaC2OdCMGST+r&4~$Bnxk%78I*?%_(8xX;Xf?azfeX_F1ZGUPd?HsWyq}G`MM)%p zmq&rk@dxO}5`n$b4?dBLU|c#~>ZzPr{R+@=$e_gsAR1&mXu%|-6~i77AGCG_#6JMO zNfc@UE2vAigEdQFhA?Pg-~%h-tX%_8t;puMjx|e(OM%VtF)OH;v|?ZYbzAir9ZbB0EU_1*;-Ihp zOy_$pr@**(y8d%HD@MNQSl;A(OISeHsx#Fqa5}PpmUFT>c5r|$_F!{t;K)+o z5ctZbzzV9A*&HXJh(BXfVC4pdBIr!iD^P35saR*1166l_8@LB0QI3Ty!DzG_j z;K%|Ej4(jPM^=C=W3*y80J5CLjA;+}wEdkNS)e#`Jjjs+j;o^_S>SnHHeH4#97-&p zb7tz<96?9Efr6aP@drC7ea!$_qQC|UL1qPJfk`}|VBY|8EZnVP+@Nq>!3l8`C%D6` z0CW8h(9PuN`fs4<7YFOVz{$%P!Oc_;84Cjq1!*wN;Q*}$1&vv;I-X;210B2W_<$3% zpO+PMoHCm(!vroz33py*aFXN%rDa`)7o5B-3T)sbs<;%u*E%fVnr`@3Sd4|uv4ea1 zgnmI$kp^zimTneMns%JP4f1CL_w@NM<#fQ-gD%1XpAxJD8oc6TWn^SvU}ONDw+1Ta z99gnJOO8Rs6_Wz9z*S*Td`WUcg8l$ED2q8B;07OHbOFQz%?^WxTW)ZJ*UN(p=K|Sy zfg7|k+mTU`1>{*x<^$YHqE4~U!y_3K*c3#}nOA@`ZU8%Q1$UN$DCh_uHhsn&+)6B< zF)2;v4cuTqfHpXQ>=H3&MsfkG857tAtY%C%KnC5IKJk^DsNw>+_1mFcP4LZXC%6^C z;dp?1dfq8%nHUYGC!FBm=Y`C{AUq3-Fwi!Y6`-5kvzWb~GDSToLH1VuSQBc#-V8Muc7l+tw>4sa;)Pv84RPImf+Z*uI6 zN2bfak&~?70CM&YE=W2B<;4wLSyoE2Xn9p`5N>K3p$-sFPhojY6*Uq0Z1`0^na zBriSTg823+7w9Hwn6Hm>q4@eJ7uekqd$vJ`nh9=O5<34s$_pb*eyIKrjK0xE9z zaDk##gJ}o&x;9OwGt*zckuwqc0!q+~W=tQrK*`3DQIUVT;afRR_cg2_wgQVHJ7_60 zYRcpTEnO1;MZyAZ&_E=p#|$nI*+54Sf-b!Uoh<_z&73g(;9EKK`nRkQKfGjx__d!6 zr1>msmXe?&6T(9x=1eW1no!t`sR2X_>N7A{GxUO(LRJhtYzji)@?Zv7cL)5)TJX4O z1DiSX1kl++pc~2|r48sDU(gMwSy_&t!B&ne(27rJadUu831k?9f)FUH@q(s{*g&_W zgNiIR#}nWi)IqcC4Pd(#fa)0Z66XaQV*J_woXT_=9!!7uPELk#-SmI&B6;#IWnx6PxPP`s;oG%+_)w?50kphRn1n}7M2R6{Ua5l#uVB03JWq~BYPFLU% zxXlbIAOE1(#t+JSpaXt+K~esLO`w6vksovopDEKEc1TTf0$kRD>;_eF)7e1^Pp}(; z6ggf1%R%h{oub~x4zlA0J7Vk*;v-Gw3)3||$jL!`B+Uv^wPt$y2RSo{2*{7uKgfA7 zE|{+HQO=5S()8$$a!!nsrqBB*XTZom{rpF{V8#X0l|RYpGcKJT{7FuSaq0AiPjb4( z^=3>@KxHbQ8Pfw0Eo#nu19V9aqZLCBDBxd!?#NbP1KpJ^@ERNfZ>FFBBqz%Cf(5(^ zfX(s4^mm`+q}Z>4GuV&m{Ga7S89z){{VZoF4%(-}=C}c5gupINP*u{w>dMP7J>iR- z$n?g~a49J5)EOsCulypX#mF~($rrg!M!xBKU*%HS_(4M%{L^QCl~XoHWGF~8gqH<$pgX9g z^??hN4!|uLUPe&S&kCAYWKiT*U{vIr{`sq1Im9{YjEEA(laYV=uWxcnjC|8&zsn^v zKA7J4T~3E_+4Oba<^D0woWAyl9O#DUb3f$bn3i)+FSsHkI^FrFoD|1}3!ouOfvM9| zf67f^{4xF4PdN|9FVmfW$>}rBnO^)$&YSVe^!>l&92w6~XZ$Uv&9q|YbiLnlwjy&u zr-HC(Fa>}vg<%IRYhSZ-di`%XUBa9?xUPROqLZ?H##v0Y@UAOpPa^YlOj1S#<|nui{#80=T4ti zBxl2PW$E-QMRK7a=V}+rnJ~_uo=_|&#W;U@b+McRW9#%e#d0c)t#d3Cx>!)j$$b~YlpI%=grzEu%l*PcaP~hszk-5+cbbA4-sK^5vOa&e3$p^Z%fK353BMB1c1f3I*r6j1p0TL1rST{Y4 zO@4;NQeg!~1->jL(C$l+H`xT%2`aG(te8HbNM4h1#dHI9d1G6!A|}w4NeWm})1;gm zs*Jm*>+r}+gUs{fk(X!OKRuI2UY)UN`a~XieWvSkryt^xS7BT>{W*`kCew+Z(}j5D zl^73BH|3RAXWTtKmRH_F5_HMA2Ga~C(C#$_W`PEFMNUwg&5?QfI$n7L#^&kwdFADV zz-RFAGAXct3p0?i>HK{1zU-H0b%3seo8C83UP=0;up$F!B$EZ)5jA7F0Ge>Vz>?+o zZ^rZmeDYo(2fg8wS7dCS&c`orB)kl^hryMXkw=P~+p+)i^tws%N+6e(^MhSFi(h^M z)4_?;bp+&PnGVgI?kym1!T4%=i-5c)tE(#AJcUV>@wiRpg@<-?e+vQO`sDz7!YUP#`E2i(zSalFBnCGdLsULko^ zo&^#LERG8#vK-r*JEp%Al9!pjvR+<#ddxI=5yr>U3#ZA8O%D*3H)4AO3Oe5D@(uD< zOmBpz|DGlv#`to&w1~W7{n6{*~05GK&TR}Wa^2s^Bq#G=5W$-IEwocRSfv1u@EUm6H;V$B8PgID z1z60_05RD?E}Fmrn)L-wEHB_tU;+30SR5B{WC^?ld6}CVu4e^EdId+8<4w?lU0zrO zOrInwFC4K2WFTnM97qx9G;v1Id4t&s%mVO4#LERY>HtX90gfz3=tc}s*nzfeLeu35 zkn{z^UK{x^sd{V6liLa|fpa zi(>;PDE2xyp|*1(!e{~~=qeutE`entmdrCiYC(Nk7RMPNwbTEJ$;a@PIQ*U;)1(185}F$QcLNbWk{HFiqf61WiP9nKM5Csd>Pag<7h~x5_KU zyx;+aumGqb&I)Q)vFI~?;89}W1%)(=Ci4p(bLJnQwTC}=K)Mly-wUoR1y+z&h(rUs z4FhNj5ws8+ytbXiaSHGBgo*MZ;q2felVJhdz^lOG2rVnS1QZyg&6p;DPPk@qe8H7v z#?%3l2Avts0zQHlT#kZ#15I-?K+-dK!R6>VNqPH(1t5V1yznLJ@FETr6^<)-K`un` z+y-7H25|mh(PUo1YtFO>WI3A|(+<#@c9828SR8loLVJw@%ckc_$t&oc0NHYaH_Pz^ zXbm|lEJGIZB022>NX-S_EXRY>cT33!)IR|k42oGL(DpeH2eP~G0f-6e-GP!GsBpdx zDrMObW(q5?I6eS7{01+ml!X=A!k|oT#y-1Y&a58^fwAEQ7`3FeX2S@<(WeGf;eq2^wZhFu{c`3$a z)8yopruQ$DS7qEYed|JbO{PYn=?@mli!vUa{%xVWHq%?->B@`btr?F^k6$EjV)7Qb zNdZcyjuSwqj|=Q#lY)Ex2B=2A!Jp;0dTEn@zoI zvw=>-@>G+b$M|vj3pM$@j1#9XQ&=Q%ukeNg#fsMjEY@jvnkh04Iw40tGOJMhOCvEu{jl1j$?BHhqM$o2BHi1>( zgR4N-s4|1r?Ofte0A1Glgk$t%N(G21oE=rvjHjCNK$Hp02JVZ?CnV9V(3MI&jcKTnC$V6F4w^f{wf=7s7edAM41Q zg7_c@G6`&)uB0pPC9o6hAcSGlYjow+88=T~qAPFCIAQu7UHLAiI~S&(m68f(d^cTD zT51d9vgw8T@-d9hrk~T7U&MG~dZB^*RmKC;*T_n#Fz%mzQC3Quap&~EvQmnSGp7q$ z%Zu}LfEIUx&RftGcs%{Cp?odl$LR;;q>>r8P2X=Mudn!MQ-grxp>53q3aq*e9MWb? zFF>neAFzT}UA|z=68JP-L0L*-dXK!61LMc(XXT}g88=U#V=N!QxMTWTWBC@wozrhB zNKIf|H$6*H%80Rl`dmdR7shAPt4-ylL7j{lrt+~oJ)j)R1ZpM=^h{?olTT#YEIj>; zl2kC`XOQtG?9A6Eoz)VP2iUo+Z#TUD{qgk#WoPT6_7B=}9*7lG9(= z%gew$%j+O7IsKx&yu@@n2l*w&&tOxfu+^ZTO}gOYY3{KrFr#n0RLBx|&OY7Xq`WxO ztH0Cr9OaEHw+Vxk34jO@&=L&L?iLM@GMIs&J~x9IQ-lI|*4T09Y4C2mh0N18o|IQ$ zyf=N9qr9}qFHp_NBCrs)DwdbYap~O#0msufTLcuC9N7yUncbuv;oD~{Ko(4wKP4~j z&B&Dm*>GS)PsY*z|N~`8=i*FScKImX~B?d_4W5i+l~^ zv*~%R@+pkZr(brJcV&DvUD8e7fbr#YFE{z+oO4e!LnhUyzd0>0!+GgwGpGb#$UI%( zjJ*7GJ9qg6rn85pFL0NyMYDLiy@$LM2h1_k6FuZpneOvUcYH1?KK;IjJTK$^>2E#c zdl?UGulAJx$Y``{K4dXDwELyN4cbHFxCvD>OMw$~`xlb}tK;VR)8~21>ocvHKmEM7 zyadzQ`O{x|%eyh{oC2k1&OkV)(3Pv7q& zAI-F5{&X2%d3~mx^QZgz$~!S_n?HS`ue>_b_W9Fyf%seIPygpDuf()<{&Xcj`BRKj zr@!@+_XRB)oaiqvHa(+)MV6`m^7KM~c{!+{3DcCz(~l$gV$=Wn%gZt?ygXedKzxjaokVBYj!0rJsom#$0`aAcp3yf%^P&Xwsa1Ldt0 zP?r{R3xS55K#d?S#}`f01q9|!XAY8&X1aG}dP0yqcmo00ks$lozJZKooURZcKhNeH zhaxMeT>xI8$O>I>?5OHEX8~jd@(d;>M#!-lp!L2SkOTS@m|YcE9WPCv5G*gwdYK_x z;OX==!SdFOm!`i6me*vQFR-Y|J{rOOOipgRp_fTp3L3*W35 z)-WnCIvxWp+F}-%JzXqZ-k$xB5NLdT&-9Ps^78zkmJPeWLw@Mmgz53&^3tF^v)sH4 z3XB4Crmqi|H{mdJWPogCc^xjV!#HKSM1*`a+~5rk{$J_hg(iT_izXm-`k2XkDA5MwY;<={^baDx8-YvK<)|I2#hpQ!BoD2~_z? zYfHspsgW*2YNQD|QWDc&+RMwL)JTlGLADu8H^`E2W}Gs8OO|{9Oc7 z%d1FV2DK+anF^6JB^_^dv^6E@n>jafRsf<}*-Sig)@`pjg+B~2u5J6+WpaXE3p~Kn&pbJ8>mB3@epdCJZ z@L_F!fpya#6yhG%{yrT%to?oZt|EEk5X@oi@0i2d-yy@=JfPV`1s-qEu(p5#m%vS7 zB_;(y1#ShVp%ijjx8Il;r++*pUXpBKv; zGTwp=b1$84Um~x^xO{qEiTpyw9n%F$<@FiEr#qC&Ys!Jo98_R*Ji?L%YVv~5E@cF< zKzl#$Pj4@k_m=Hu2VI5S#7>=O*$cQCSn#`ajP&+xnOP~}WOT7eg6~Wg)gRaw1 zVCFuc#l#9({3hrNCo=w;?q4CF1sjroINh*PUU>S?3VB5wt}MTW zc_gND^T;bqpH(TZfjR;|U8+i6g6|=6pHzDK#<}vs(}Szzi^v~(x388Dgb%$>ua;M0 z{4;%5wR|RQU>(&)#K8LW?i%^mjIXAb*UBp~{+d3kRz8;v(y^S*TPH8gHUl)uGGn@4 zoxBfBya7pkVV!&zLkn!)o!aRJ_e@>NGfEn6X zFE4Ei8V(0_8RtlV_T`!}%>a!M&H(qJUkGG5u4Q%vjoHl*$#QI96nHEQ8gT@zEL;In zwqp9TdU;dEf74|e0WDvHxeNCoc!LPU{dkAK8$>`yvA_XC%?$F4@G63En z0$vXcbu7N&?*{O4VHORh1>orsU4{*;N(_$7pjB=xpc^~E)pi3Dc-jMbP}~uHP<$pM zvR5&O#Q%b$2o$QIlJaq6JlIA5q!zxvf;KDm_&eABt!vP*Q2eH_v&z&tVgLUZK2tIVq0Xg^+ zwAh-6G4ps_W9B@dIV1AN%t6OgDzG?CnZBS^Uc#>jF-nLwX3m6v%=`~HR$yrgI$}Qk zT(!LXbiKLqVnOgBbMX0~EI0?rK_h8I4U#{c-rFXxAOjyHX9XRR2(uqDEI$2Yo4hx{ zA#q3nEC4=75@is)kr^~(X~qOkS)lU?1wjr(9rVUJ=KT;fLJ2xIclruxc`1Tp-*88O z4j^Q4Y=8{L@`6_JVQ~a_I2z&z*yuLs@G{UaF3R9G*b#!<3T)G-&6nq^$34gmRtPo~ zl&_Emxp}Y@e4x=|&{QDEI+T%aM7f7$q?=K|1ALw@8)`9%d*B;k8puM3yC4JK(-}ME zC6jTFc_VbfQZx935|A;VBeX%;9aP(aj-`Z;f`jI)`9SAgDzJcRJV=~RukDnVl>P(K z2%4P*dH4tN;QGVq8$0Di**ZW2+8xu+bjnNCcYuex_i#eC*}&4{1WLI_*v?qWf#9W^AFJ8To%V4{8^5$v3}5(3$w4MJq7FSAfJFH z^#8q(l`!Ugh?OvprYAsG!mR9*S7DqnUAj_Ue)=J9d11zb(;55a^%G4zKb-5>idIu9k zvK+6Rm_B`qydOfvh7;32ps3h;V!Fmu`6Q-a+~CzQOglNJHwwv1s_f)cWB?sV3Ud=? zzoAuR`aU6fmGlXqR?-B>!Wqy)15iZsbAxjE0#-%P1;U`#6)UuL#RzR(F`@J*9tdP9 zF~Ry2Aa`TyQ#4GIw`6Mr1r=oV4P)E%U(@8nnJ%zTml2UyfUUj(Sr3|AA2zFR+Cg3h z`3PE8!n+#K)i=;%a;Kl1Aur62*4LQc-ytt*f_u3QD6qgwJs>Fy(#3!*w=tZFf4NNu z$XP@!w*jjJdjM}=1FQ_mF%6sw?l_m*fE6Qkj_~z2Iz*=b7n7GIX}!%XMndatT0#ED zTyG;>k1aodeGke9_E0d4aS_=K+s7^9Rr>oeyZMbUs)#J1ZXD&zDft0r3!gL@#YA+{~g>=U{-)FL0Z9!Sf#Uo*PLk!$Z2e5 zOdF6^>1=>3Y*Anqcm!Uh!+ij>+~ojxmCp3@bLAyP;XM-QoIfbgIY1YENKU^mC9hL| z2V^QJXM&dKfI2GRfjKj#8(?uWc%AMDU7`bbHYg3;058$Gz?&t2IEECuL;<(e6(Ai2@X8)Pe@>;HhQg|5o^0OCN}L*NBEj7ZLbuF8Qs z2ec~Z2iQ3uP@Dr@l>>JUbX5+>IpFFJ;vDc|AgFU1Kn?4L>Ea#o%6wBmjco=qrU@Xi z3DZ3~-<&TmKRs7QUTXTR4tWvo9U!S4kcBjc3*?ojH+0CWh=R-j zg|HdZ1(5s&$V!=Bko?&Wd6nsQo${PPjAl$PzzUK4xB;?B9_~k81$d(!ble3W@@dt<(1jKfDC1yuFxqjH=R>XUXrZ=)MjpgbPpOp?Q{Y3?g6L+p}?uY z)nd*J>M1ya+TARml`{f}W5&UW7Ni}ui?9UTls97n^$Va?3Al4H1Ki$Tz@H`1E&^SD zC?1FYFUckE)5BSZQKR~(^AFy{RT0l*6CNm~bKLQ$ZKR{Oe06U~X zAWNVbRKj3cWdv^7v4SE5s;>jwsbK360JVLm|6VMwT|Wh6IwU7e05#G2V1VBec@-Tyzdt)qLIstO)3B>XxxEnxcRYH9W@BJVOI8bL1+Tj2- z5<#w%aAZ(qcpoD#tK_X1cTC^E zN|cvFT;3k`kB@8l1x8mPk+8zUWak|^fhr&rt{sG7oWamoxCvPvgs$*LDu|#UI*^qh^?15lsGt} z6SU7fO$jkyqQD^V1G-=Q$c*V#>*Wo^&tX?J{os0e3D8VD|5O7@E9j1FCdUuY8w3=Yc~~HFUqF|6GlLQUt0I%*-bpPW zIVR90;7pFcZ-9A>p!=(tKnDrT+903H^mEem35P{Rr}saQHv(~LYWTIMzk49xI-O~g zyaeOa>C&6zjiH*S$vklO;c*%phT)8B8AmzV#BP-KHm zk?vM`d8RpAr~7S{mj)S}4dO4|I=u(VUkT!G-ZK3Zl>dCIyuAD+kb^*rrx|%Huomi@f{9ZAq{tYXqUi1N@@yR->o})3Y?l`Y1=UQD*ow_9pku4{LB+0v4BHGA0~=<5 z%`nj&^70-%8(Rb%xxtF`u_!_f9zFEnXaGOk#YLLSCAwzafiIT{O5Ho0*<_3 zQ+2VJI{iFI(GielF0djUY>Iey%FE05ffPa9r;SaK_fC0vw#gu$a!)VV2?>!-kl52r zAg4`V2NgRGGOQV-jvs8J1~$W3cgf4gegbI}04q|*q6jra)X+oZDaZ;zux>Rhx~Dhp zl9!iX2Z}*R!d1nlX#XyGd8Q{@r$5*Q$qj!&noc4#DPz-Qy<1+M?Ho9jO;6Yj35m+x z^72fRwoae58)DG*-SYDC`#?_Q13MACtO~uT__|wO-eW%~6d);49*b?Lp&*AI3bWwp zczWPlNV+K8BQGz17GyQVR9P&hPG1jFw0Q$O@yK9P^cJLO6DVCk!dV)dBJI8M@@x-5 ziiD>J?1h9zCP?fOD8chi?}Lgh-zzUKe;wp3NOG0HX4uoc^764K5g{UuMGSXh0$mJ??&*p9~%yv$cbqBs9GaDrSE~US9q?C?p^`L;#!NwID^SL54y6!jDbS zZjkBELCzGK{@@72#=rye@@#WKVxrUKjzYxx4}dBza2$bCBM&yi&Vm$mf;0+)6>(!z z#C=d+o^2P{SF#8OV?P)AOKW`p4wu<@bQZM8Sq}U^6Tiq^K2?krcp+ zK)2pOYeGbQy84*BJliag+cl?OI0muN@sPYc+a-|c!qfSVL&WM2$;-z7gj~!6nGePv?MFo zr8rZdeE;1h0Y?_FB4KQP1}Do`|C&M77D}=_Kc@|}$`LtPGMzmdu>IfGIl)7O8~ z=Rx_P#Kv@e&h$G_J}99v9iKB@>L^4#IFTJW45|#kj=`PC4sB}{a1;S6!kx%kjxoLF+KY`o$;i+I6wMoZ1t1! z3ZOOEo+srE`O(*4cY-8$o}Rw?q`Vq*g|#Mhg>@QyAvDef)?%mR73`qPs*#klWTCIB zPKTR?v8cM_l)NhA<)74JP z&u4r*{n%-FIi|TMrawC^uf%le=ya|#V8aZ~$SW{Do*sBcUP0k8Xy*j@s9_e+sUS?y zo}q;TlcPnJ(C z;k>-G{0GpE1||V`#|ktRCE)mC(+mLxMu9^t)9XYPBsp&G0%PMywiSw56; z#`L($^6Gq3K!=-yj$P(g5D=I-eez{_8^#&aFI<*a;{{78WGQfiWtgwX?_iuUea{tn zS?(Dk3QVA7d29mx)1O?Ck7ArK-TtclEXMxrx30?XVAP!eDg-r{zA!0*`??=Md(oLR zn0_#ED>B1;&H}o&krm|m{nzC`fIPkNhP)z(zH&oe6%-i1Z^(Ot_#QXq=Yr_xH|489 zbn-2Ty0%;Lx*&C1Z^>JOmh6AJC9lB<>Fmo*x4a`?1nJt#GPX@Wbw}Qg@!@pNyYeMS zJ$caCEDVm!(^uY=w`Odc{`4;Pp1k-yq{Vrlo;>59>67ls7csU?XT2|P%=mD+<$d`Q zrXG>$f~E4((@))(R}$(G0i|FL1xCnTACNcy-IuQd^>wj!d}pw)5am9~%*Z@F@u$4# z^y+8w+|$=Sl-Ff^GyV2M`3;PdruRIOFJ_!H?XkQBH#il7I-mkw)0H2~D{_D?lV)=K zz%X5xP09gBf4N$L$q~|D?vb0m?y>y2=?Z;p9PExvS$v_vA=B6XWV4%o_=&s_OF&>y z==8v+@+z$C0#!Un%n*cVFqk?0*Aw}m=^viTvuqD|D(}L`$|+FA<1u~WdwGrN2cOBS zvT_Mj@wh>h@;{f?WaSp9;&FlS4L-=TFgion0lP-fucc z-E^ZD@~YDtUdXdB+JaRv3QTW&A+NzIC{V>?4HD5{v;tGp|G$tg2Rp+I#8+YDo4(+s zyargr1mdu-FXjEFgKadNUieC0gVA6*<6C(RMt!gbu*~#BujKt0Ij0N0me*kA6R6_R zhN#p6Q`7rj%d4^q2~_cDfVD7+PrnCJ2og~Pi-4Uv-S~~XDprZ0ReufeJ*P{kt+*2X9WmSB|=sN#`? zFvSF_cqG8g?YZydmoRfcE2=fqB|ph~a=@n9r+2TCH)h;1ecmVeB-rMo-07OD^D+LCR`m z51O@zDB>J6`}0-a1L5}Eb@FQ1ht3{>ht3dF@ITifPr=Ksm)C-gqvayiX$lMiy&~X6 z#~eo><7c_k8`jHf!A8^2)lOgVU0wpVpC=Ej5j3WD`@4J&&TQX0-RY;i1Y^ha=%4b+ zj4!6w|CG07d^3ITPkC*on=_`r`YAtM?mRoxQLelkpwV_F1s2df0^o}A`Niq;f5|I= zsaRo&C4GGUNH_+P~#xtf7?|+{K`QM?@jgEdsTGmj$#16l}f$ z_)-hUo4ckr{gzi^JU@NuZ}|iqB}*Q>WH|;OiSzv<|AOP}FG!s?{qJUZNmb~0TrQ6B zIH*$VE%K7EQ8|!OoTGBNu*M3mQMp`2M);`Q$G`AVIgp`PN9FYYAx7muim;B#<^F?= z%Juz|mqz%naI3r|)6%We&!C8|-zqQ3u^H6);Sk83eqpP;C_=~Et@4tp(9t@OW!OgR zpdq5YOhw?zCVjarkXHt-thYsa|6k#38 z^JP+ySA`DcffQjI%7c0(aHqT^Y(Q@V6LMJf@06G1fDG#i>UYaaa!dk^6mkmWPG7JaIgIx1mX}n8 z4*r2Gz&7{?wcz(|c}dt1AV?|BAwYN-;Ti%2r6<@B;6_#j_z)n-RIEdQ??H;7Lx3Pf zScd?0*%ahep+kTmMc9S_pz z^-3@Xkz_d(z=MN!912J!y5fF$N!So!1BU{5TEUT5L7oFLL@1Cuyw0sLg^0o?gHt*+FfDaym48=MU^p_h{Zh*!B#lQiBZ6pY4Gvi@-NjB&ZVge7uW<@~-c@D@B zqCoESgv0W>lnf$*Qz~>25#(arDHS@12vUSIrK&;)5kZQu4I)Cl1x~)up+uDA3mHm8 zOTM7N#09*NG8dFmL4%3+c#*RMC{coj6lM4zs=x{IICxkRJwZYT7C{cgogkqDiy%dW z5+rnZ5hX!Fh8NKiBxrzf0zbrkptJ`XU_8f<>_uyK2~xPl^+(87%~z_;I7F@Vl(WO7`var!(-1v49jIOvcRkT_hOBlr?n z&}{;YB}y!g2$dicSU@HSNGZr8nam72xCJEcD5ao>Bo11w0#@HBrJyE?FbA|fl?k+1 ze!<4+d!!W9MHj#w36cUGwdS~R3W4J#RrvAlu;0sL~zn;LJOWj24T&aK?jktI?i}D{fC^w zS;k4zkI5^U23vg?o%sr~kE7c*Qt%`U@)sBgSddrL7ej88>a8Ypsya$hdVoudTv!M#k;i zzuGCpvxw{#QRL!Q;AW{)U}gp1?glzRDZ^Qzk$IWM=Gd6RDg~k;C5mFtNOtJRV6TGdSj%5sx;UPhis50u!0*%3Jyjps7Qeo zfQHy|)BT1F}8gZ9NlDL9Jk7XhhI;B#V7;8b9B1YMB5fBL>C zgpl(C+o$Pep zcm)o)7h$TCtiUA?oXI1RDVj#GdVFRFoGNajzOq}Nd8?Bub?l!Uqk_C!o$SvBBARN z@>}tzy#mWZfs@m1GZf7APJkp7m>n5BSR7m~DH_+;+!Q*Xz_5Fv09cCx!*00f0)dm$ z7iB1@nw}DIWG>`mW@KV!;8tMNX668?1zoxdVnPfAiOvEEgYJ$13vd6Ep}@k#czQZx zwt^4Sjo;J#vlWt=-u&LaC0jv^iSfwx)42+#85xgkpPsLv%EU+Ob;ql zc*uBUx=E2j9@B=2+m{q6Br!3boz7mWpvrh|`i2z>qUMf_0_R0Q(|wHE%mN@6NeG+; z`wPwotC$5^Gzk|HIJ>>FR6&4=@xu15a)khp({5KP`0`&6QDg!s1eJS?0vEQMS1F`2 zu`d%5I3pr3Z@S%F1%>H{YZUe{?YXtRu~wmriRr|x>AVdJcNmXMf7PI%2~v3(uF|Se zVFlBTTic&BDl{@G+_*hMAWNH>L5Wp?Re?c)#f1S3b{tx;5Oh-B?dh$}3jT~oreAGV z&;^-u4Q`HRi-IWQrs?)A3ZT=!8d?-U7xC_DQSek+07^8VBRw@iwXc%crZhDg;xK)H1nXsZ3zU^x16+dXO}xHT_GQf(YZSX`2)jr)#w<1T)^5 zF0ZVRH~me!f`|A%5e0V8ji#UrvB4P*beN97uIb(#3MPu1MOYj_Q^SySE5geNy7Lrr z=_p88VBhow9SRbRyQc5#P>>VZECRarh#howGI;N#0*k=D>90E!6dCVL=j~KbVZ1lp z2ttK-Dwr}Jo!;B25XiW1`u$D?dB(ld|9673m~G|I67UgM?ryc z*L43L1p~(W(;IpeK!@?I=uvnBDk#`N=MsU!w}DZC-LV5)jevEn?^RG{yg&VNFIf5S zUWH)BqtpHS6k_x?i?Gy#?$bmrTy`H|5ZDJwL7>}YK{K)7D`c4&1ny10*{5(PZ5ODr za>xc1Zvjed3apNhVps`s&f#8=tO4j$VRn!T2PF;#R?uCpn#>GJoC0831xRqRYBC!r zusVW^XNUwS$ttiYa42vJ?42$>K|zjj*L1513Vw`tr*}?J&=#B}q5!K-MFbv+2rS%w zbb`V)X8x@rpbIcS8{nB0SOm6CUo}NRneo8%b5j&R*POkdq9Dq6Xgcdug?PpT(^ID^ z_=4IiQxz;hXNXNzNM<}RJ#d;r5aWUAOQtED<_DjuuK?OF!K=U`ux)zXbOjCReW1F6 zRg+mj33O%|IPHSZ_+b<{JpJHw1uMbLB906`EDoR*>)@;RHc#i7p&-e)Z@StH1t-RR z(=%r%STpXMzI2AdBF25wJ!UFIGVYtcXr@9t7SP>7&GpguD(oRF5}7R zN0%v>ik%SY0Uso)zyhl@_Ad}PFXD=_bn+Ohr$C_8@?^`yy2kC#KgyR4iJq zplyR$Pr!`@)eizEK_)Oks}Qhp(Ar{_z{%;HD-^02&rfezp`gtOF6PBN&Wg-}I0{~d zgNt=V2GDF1Tuk6B$XJL;kjen8YL+6XvH~xtnXWKbK^znZpI|{e-DjnOB;&H}X)6^p znHc9ypRih?n(^Coo;3>gjNd@MR%CHx5coE|5F)T~je-i}x9Qi`C@2e@5>aGSVA5s= zP0=$J3VfT+xmMvDRR>%vYH%zEQ!AaoY5_ zjo|p5v{50K@z1tR3N1`b3wCaA+oGV%JYDa;f|x@Z^u|Kaxox0AfI;9O3qptuI-mf$ zUkiLg4d@_ac5VeWNARgh?A!^{Rqre4a6oh(=b4^-UqNSj{0;?Yxt$ye%%FR7Sy({# zX0bqTA_t!)%P6pS`hgt^s*F3QS6&g7o6fRRp_6gS^uC=6paZ_{?^JMSoU&bgmx3Sb z_P_%QZFLFexZ9DsW6+cUwey z`i>(C0*v#fpFE;qm%$A>>V?B`1tX{%Dgi$Eo*8tO+5)hE0775}SbzgApuh<_C7*c$ zBdD0tU{U}b*06z5fx~eNSfNT5^a?)?aAC|f-S()0kt4SwW0oQ}=vESzEJaWcmN83_ z5xm_*fdjPPqr~ygrWsI6L3dFLWC_gYQQ&Zt$l_LF1Z_T=zTv1sCL`B$sbdODxwyF% zI2=DPW+`w@KY2_+iJ41*d-}U$3K5Ll+ue^VEM;PhoxWhZg8X#39SXeDD^4jSF<#q# z`;-DBGyjQO@T)Ev91q-@p3o?%G@bXH!f&A+oS;KFLFaLbfDTyzttpW@udq{WCnsc4 z5hGMug9&_a=gad7lctyMSJ+dJW+deHOwbkGERF`C4HVOunV3@k#^j*D0jlj;K+=vJ zpzHiV()Dn84+RcTJ>$qIPzAa#+?(T%U~y3309|y(06N4Nbn>182k0U< zZqPMi9QuqBh*SSKKy5}2O=bZ_P$)B)F=;4pC~$+WoyO|mdJE9i_G|*swO62u8{~9> ztI$0&pk%^?MJUE7%+kqeZQ_S7er6Hp>S~e zj++W*j6bKpzp1d1aohBTw-lx_?wnq@Oj>OEgN0Ho%$FImrtf?trLeu^w!%6_{yoTP zaLcXfhIbWAF%ssvYBqUseo%(t!0_Pq)pr%vGr|@_Jl~#lU!k2<?pLbj4(X>qE8Lt;*5dY*G) zafw1nzCvPgacWVCLP36JUP)?^!ggr^#l@@;bp<6wFjdJQ)i5IEf5>I!L@sW~YiXTkIo=VT_Qf&_9C^D4K0Zd9Diq*|0(T$)p&P*Pct zs*qW%P@0#WmtUTzkWva(n3z`ylG$F}qWG0r5@fVPa(-TM3Ct@gnZ?P8MJdzcI}|Hq zz|v*;If*5iIjQI>?sOGnN_I_7|Jpd^U@KPZXxxEK5~LRDi?;vWk$(f>b@%qN4nw(CtnC6t$%xVUw7WQWRW} zm<$he)L>eaRC z3ecpR3^q<7Gp{6n`$0kF9w82}Y(`@7^g`s>eHz$O{z`)Mn_yZ)ssKwyea<@x>n_UFl2}_z05T$>7)nV(@^R0173DGgg4apdPpZV)Hb7_{9X0XGlFO%iuWa z6G$^7gX4D?qvJCuph2cEGBPq~IBtOP!8dw3-T-k8dMy~4m>C!twyQHRfuc|T_H?%? z$_h+VzD&=UqI|Ocbu=S5EI<|&|AVLn8R?e52v!er0pkh=a7cj6*}I5=iIahW0TePK zD;Su#7#J8p;t!)4nYbAkz)2+YA0rblG^u=F&cGxDRinQO!UpN*TEW01!oW}u)@;tm z!0;u51#DsPJ%~dO{9t5KVQ^gVy-R>w^hX8@*af9B5SN^ZVqkJ(aC``o6v>2Wbb1GI z@giXcCUpkKIX}7txD`NNVSy@G{{*5y1FV37fr%l2sh+{{4pM0s|w1;~kJX1yI71f+}-_T5AHeRsn47 zoZlelfoe#3sI?r2A=b9RtW{W`$l!PftPT`?%9$)o^&odnUjk9c168O1RygMm$bq1^ zQiobB26bNm)O`$0%nXiqK+5Dm$v_8cZ`o{!aZdyx-eqWEaGdkEOMqJult2tJS-|!9 z5dj7!c7}QehBQG2CM{6DQefx+`4^;8l81qT!5nI>)oVzGTXC3y$&|ry&c7}JZb?x3 zvdLrtr|la(AeS&OTsXwQWX9ll2dsdPfq}t3lLeeu^B+SJtw9RJg#RED_!$@&JnErt z{s>KVD~^GJ-ti7tg#ZHsgAddZhs7X{IGW7BWWwM$r=eSbTT+mLfgv!H1)MYs9z&Am zlVAoWJqE`+UW@$NApTf?lz~Z;!SN1Q z1*jlRhPv{I7$k259c5rLVsM<(1TsRDfq@}ClLegP3L6=~bf`HalO==W9k2p11_p)_ zs0lkIAttaLV_?!}aGX=$3^GETfq|g{>IjdA5Jz}KGcXx5INkv(kYHe7sD}FE+kJ>X zW}7jBTsfx&WP&6E149$kgcuo!3FZZiOkNC*cfbmy7#J8@Gg-jpMc*q(x#2&Lp`J;V z!EsJ&w*a?-Gy?-eKhzGj7Z5x4{9s_RWnf@neDMD%gX0~LI!PG@28L-+tKLFGtQ8Vs zbJ{>|k!4_DmtuZv#9<)s^?aYQ*8!5F-|PGBAlUINkv( z02O&3pbD5DLKJjE6wK)dnE)#7zQPn-fhZ`2D7XVwzyqrPenI^qGzk*v8{HY0#26gs zOaK`HD&-imAhpf%(-0%#p+d$ zor&E7_1u!6QjQ<0g5eWH#aW0Qb0&d|0F`ngS&%e*RuD6N;wm#f~hwkX<}g>Buz{OnE)!}te{qS+=W>2yFMC}{v7Xs zRe(x4XQ&Y>?;%F)D})#^4P*qUl=Fmo^wd>IP&pMbFv&1D-T^640F`onSu9MToMZKj z5ljacf)s$-SD?HwXF8}@0F`wCAa(T&3=H#cKoma8gDd1)S@ zBOwb?i}l=qGvCgpkt$2qe>Mu3X7#wh| zYc;4?sb^pal7+NvZpVXj)0~AMBlH;<7;ZyT&CFMj?Dm9>5uDw?3Je$+7#>2?-G(?w zIejn@Y{#5Mpn}|xfq|j48j=-kRx^OJ0$&s)g)=cY-T^Bz0;T__&>S`A1|&xvf>tr0 z(tI(h9H?>Wcn7S&gn@zKJJd19A|Q_GZ2@&i9OoiY z{)Psb+bT$q?T=(&k^$AfU=?Pd7HBpoNEjGe)gU2q!5Hk}IZHuCm@_aiuxCRO=oV{mV=D2W?*1Y$%Yh@LH{9T*rr%;!{rWGfeix#gKjpY ztZsb_DZ<_rK(guzkO`pHlrdDn$*YX@;1-EW9ys;f0jscMU|_I>I`jQHh%+}uGk`l| zE4u}_MeU%HHqnsyN;?itICnsjlAu4q3RVKb1Ng1g2U}12)15%{~YFA8ys#2JXrV8X7ka_Dt=7E~8bD%0YW}vAQU~s&% zz8<6u)XG={RrX^7nlex>+yGJqDvwq`Rpoc1sRFg~z^Xv)jdf5}*Pf%P0`;Odf|`t= zHpdo_DvZ zvhWUA0jTM51Y{Ef1A}J(B(;5ygfy`>gWEr#rpKvl7ABBN_xlWB`mz=ylRbmuoz0-i z8r1Z-3r&yxuOaF2TOi00plAU(cFq=%I#3Jb0aRU#6-3?YHb{B`^_s2!IfcFfrYG6B@|P=zYcn+q{v3Pi!3UEuZ)sOg~rRUtM5q9PHZV$N=m z5um1rHdF!IM2G@Mh=My{1)!#f9#lbXH$;IVM8TXrAQM1M4@0PeC(j`Y7J|G1s(-*L zKur%5sEVBzASx_DDi|E+>;)MCYI>OGKw5(DenVP<*P|gtKUe{%>0u3xFV4@9Hddo6 zBdDpkU?0c?7pP>@Cx~RGDiL4~(3RMpH65LJO-BS52Nj&t^d3OrB& z9tKsU3sqzZRRk)dz=}X+csx|mEog&J5vm9@_;mne5(}vRlL1u~2UW!hRRt=ZK&m7` zg?V8PqzuS?2T96ib3pmjan3=Izd!|fHB`acXAlK^5CwO@3P5FeD^!8NO^AYLv%x0R z&p8A#0#s`DLsjfL4^goXqT&u%0jL0<2~}Ww1fpOXM8TZHAQM2n!NpJo$x9&$@*oQC zfE9pB?X^$^hh{?*xXrEyyJF4}kP)ELdOK7_64awgP=A0GfXeBEPzAP7kN$$#G3O}A z1W*xu8meH`4Tv3=APVk)6@W_Tt55~^&M`7^fy&=i5EXNdfs6o^%=e)xULA%Q(F9R& z2dn_p8+-{>UI_vyVDv}Vlpt?P=FM^U|9`y_i_0Tc9b{_^N zZ3f3VXFx`P91xJp!UU=b_}+soW?;BFi-F05!SN1Afg-34lmS)n(Hf$_f|Zde5nLxh zvg_Gy0d7T5_G!pvVFJ}+TsEMQ0S1N=7DlFMu)=z<$~$0{pnNnnmxT#bHC(lZ)NdD9 z7@5++D!~fpoC7%yl%F<06~0^qQOLR&JgRvItN@gqPUo^PffCb=MGRoN+Kqw9j=^!x zd5{U9>~tFx1EBP0Z3F2@<@^N~{CB`AK>6u$E~FtVVE}0$*cyVXz&RH{Mu76uOOO!^ z3=9fT6BG(SL&=VJzzRUw>0>SnxazaAg)|Lb)gIs)9KiJTQ6( ztOAsuSo0u8e6xWVA$NAFoDXbgu9@&3F;_d#E6?9BS4Ls*?Ew> zd)bT;oQI#8f%}1XzzRT(nzeZ>Od!`DQ3Q2t7#KoNfa~Eow?HOdJae8wRj*w}ya6D(2h<83Ah4JcFvxaD*sGFk}P|KZ6y3 z8a3bZAm!7qcc40ifkFK}1Cs@V;w44}c?M<8QCV=VRUP>UI%37-2Z zBWN1p4p;?fkYx(g;^?0ciz8zpV?~caMt}xcmOvHQLKSGVg4=v|zzU2Q7#OzXvw*wE zakh}Ivi(0uG5VxifLmM-G`O-K>UKfsIJfBe>AahjrRwj1lt_X`SB^j}dF%#p#k7x% zO#TdxbDn}+0UBMo0=2{yYRQkkU`y_R6@Ug;?&q_BGj10&<7#v=f~RSpflL4mu6)ji z6tt_LCDf(A^^D-*KadK2(CErvXjQU%+6f)w#!1)w1pj{-<6 z71}^zsW_gY9yHuG=M~5Zdj&Bhw-rA@@&Q;C zAQK!J7#OMxAjQ&AYe=!Q;5-9p{_PG}ffEA*LstQ$$dJAdDl+OB7|Nkp=?%yTX9fm_ zIRz|CpmO-W1*lA9V5oS<2r6~%fE9o?I&6a)(fAQ$1OtPR5hGJ9gX5gHAQM~}7#L0z zKn7->E`SWoM0^9!Bi;ckaARO#xCT|Qw*C%^iaGBU@%mx>p zb3TGh@MU0N@GOKBY%b7(O*sQpWH{adEARt#j)Dpy&9SbfPsOb9jai_REUDTQ^5-6dYH~TKnaq8LHr{lQyoLS;~lVy zFwoFz5hP`5T0-jNBP}4K9Orxk84=FFz#v%!$>sCKAes1N3nOTb{0>+_1Oo$uY7r#e zC47V=noDmP!41;yAQK`P7#NJ9Cip=oeEaI-!7cJTU=>lI^#nzb{PX-WBe+^W{vFbC z_yIBk)S&i*+R=Rol5(L6?tm4mX_3C&+|Y1_p-gB1obM`pCdk z4{D(@d;*vEcfcy*Koj*)BR&N{jF^=VRxsxm$cT7Qlovq?t2>U6u9e*fNJ9>+Ac28_ zVPX-auv!MK%kCa$0FP4t2AKfrKrDoKqn;rq0pbrPH*mFn2dpBAfq`K?)DgjtKtpK^ z3^%$V4bnd#Ba#^y81@yhfQObYE`oHgHcw_`Dgmt)0VzlU&0s)1I#C5O^7`ut#1Vf% z3Q`#u7%mspLn5qA5fWh!j(}QQj(5N+(m?YM5N9&X=!Yl>4h5wi$2tE%Mu2)P?~5Sq zG(Q`VP6mcMA&^QNtRMrlVy74q+H>0=)xKURq!aibWI`sWaVT00X`M`f&T+(CgS6zp zDzZT7t{CFcEwYeaOy6}#j5hQLa7$)0Ffb?1Mv#I$1_lOqs2!$CkR}rA zbp}wQcTQ7}0JmN~0|P?<)P%r3hzZZmfg%*NuzQ0knnl+AL6uTkcvVE28N7c7A8>Npm7Ey9c-KhDT%=fiWnFe>WU%R^2|a= zwp0N37aixcfJ`U`RXN3wBF@GVQp9E1LHcklJpvNkdL;}D3`>h4$yL7(()L((Zu+|2 z%2M@nT0v%%GB7Y~Er!g?v_(PYOvGA2$=2}>SV0*B1H+zTNX89Nf#jJ#Es&Z1HjoMB z3=9mXp(dQvgAB*c*$-}n-T^D9U|?Xl1og*;MG$`^sMLcC>pATpBPtmf816x%u}&8f zja9Lbcm^w|VqjqSPz)(1n-@X4vnC0UjN1V+p_+k#;af4JjZkaA2=1;o=`%2eF*x1< zE2v>$VBju+l+|;b8JNUDsne+)yhvwGC&-9e1_lPD5=a}N!x_>>U}*>E%R68Nbqov) zrX?)kL4|Y9kTHX@c5rad=>nMmGSasMQms9?52@B9V;~^`R?xt}zz_#Dq5k4eh!H*w z;F5MuH^_)a1_p+*5=iAbl|plRKqfSU zro^ENepx^iyy9g7_n*KDT0m{u5|(;!7ZsDPr|@eWwU1O^5M|58Zl>ZAfGJtP{yJ(D?;K}Jjj z)rO^z5Pqiv$=NYpjGzU44?qe;b4nq_;;$=^hU9)9a7%N}6p(I4Q0Y`x3Q0yvS0Qa` zV_!xlYtVWSkP6Tk+vHM6Lgc*)av1}|4iGuary^ebORP&GDZHpm3fa7j)XB+A)sAVugAZ$>6t2FE*K z1)!m{x-v+Fia%y#ss{~gJqQF()XkX#G6K|GpH#-e1R5RLcNWzDWMDWSzz7qQC{BV9s2S38100YfuFzmp~N!@`soJRsgc|2Sh;?B@qEf`(@1EC3k+8l0L?4pE?g6{4Uo0Ic8+SOKUDx2hb{ zPJFKj8QJXiWCX2rnX?dN0%$00OF1Na=4e2&=Lb)4jNbt(kO7rHN1*<2JO`>I7#PZY z7(qkSa~6S&01c%*gBo!U+SCkz8Ua=S8cO?C4vEp|GmtFG1kIw0K_-BP(zq&Ez)KfZ z)EU8aksl+IGlS!u#o*QvXe{l)Du_S+L#OArYl2&ObC!UN01c(7S3s)Gmi>?}P8&2? zffaxfi(LhzW_Fwc>Aj`IZHt%fQHgsDj+3|;5$ft6dJ_HR1Yd@z$!pvX^~JB z&mKWkBnN`aqB+YzMu56Dc~AucFCc~0I%r`9Rsb4GYp!5n0=f3T4yb#@z%bd1k;$FG zan5p(38100B^4}8AjfQe1R9=TsArH2grrQc3ef!E@d`*g$dV7zikTS#?j$W((Idbu zdai;6yjuFPEM$C_EelknIX(bOYIQ*Bq~Cgwvit<3!M|W7Naw{07I0hssvM+=>rf3E zKC5@U1Cj-=_&8GwG5hcw$aIL12dKgAIA;~e?VytQW(A~GvFkRZRdMDvxT$^zq(Bx_ z62Gc|G$g0}gES1afzn?%8xwf646FiF8nabG z>b(d~NI@811X-@V24n=NG(NHv;?j^_NGO!5fC^p5BOnEgpgBhn1H9@(v=Wl6uH1uk zqpGzb&78F$qc}kIk37_}tE(WbN;_Ld@FHWd3Q)1ER|zT4#3zF+VPJT?AKamwvkqhg zs927!V*%In(TgB8y>T{pWyukc0`)0w!@?nuG%WwK9&Fj1^&qoA1$1~N z#AoFukT}?z0V+Bi4}uhc%>psxKm~OaG#K6AK^mf=Paq`(19+`Rj{vtEsI*Ris!DkZ zQDuds>dppG{U--1v(uo;9$beglSfiEXCufkP{Ex8Rki&rL=_8?sykp+pu)Qds>0jZJ%73f`+ko>PT8IpM=3mKW}K`n_bpwI`E=JEq@ z2`b&^SJp$!+kF>e9=A8R2{>mP$VH&yeNiO~c+uUtSjf_hDMjE`f$|`avFLU^pen z1a4k{6@ZF)`6@_`ey|Xdqq(&pZrB4d0aVQ2hlYseBS;x-6H^biV;@KbIF3OKNl<~0 zJ~Lu?13dQd52S#R5wtL*M}S)iRPaBmhm_C;eUQ->iAqTE2cEeVVQ@UZw?}}Rk%htW z4v3)ynpIJ*f{ZClTvQKQD9OOE$qQ8WfOUY2-Tr+bJ)rUz#E=9{wCGhq0{HPtNC0oT z$pC6|oChfY$0mrO1e$TNu7VVvH&#O07GH0m8NMH6IHl# zfmMR$UBaO%<&QHm)q~pD>^`VU=NtqX3YviV3Ju#6P(vSrn-?GxK`W9O9PfZsN`hu$ z#2O&hyY)iq$)s{nhtYA)AyC|dW@6kLAmPVV4++1fa!`(|cf12u0h);kZh-VR(x*W> z3mkQjS@XjnBR~@|i4BluxT!hF?+gr`XBa?(MR&joKoc<~4UjB3X$2%pUX5S?4I<1r z0x|(K5ffhp$&v+@kc<{pp9U$az$!p9F=O|i0?o+eLWA)> z2PC!kMS_PK_k$FGQs^BJLlQIzQwoi{U~rss0%SU9?q)hP zFD(>@gRrAuA>4fcWa5*`UWz z9p9n5tU_SMgK{kcDCj_~Hjr{j(A3ZysPgAf<)&cejtyr)?gph@5JM6)NAwk{@?kwx zsTf4*L68b?2!j}EpxL5-P^C+tD!(Vfya@IobZHDI=0Hh06Kp>T%ihL?m=^JPmfK)I# zF*x1~q$N<_Qa{#0O z?l` z_Tpk0i!{=ND`4BWM zm0Vp9sVYnFK(a4;0I2`xcn7QkG%uA4-9;gH4pMl;dw@%qIoCi&fTpGLp*~Xqb&D7n z81&hgnA8~@?|>D6rlrvLVO+Zk>aIJ^xehWR1{DA0)sWV}YCFjMfM^z|edBlsq(T)m zFVzI~=o?0eN0|aabIagd&cN8f$iTwjIOhh)IM5VTYc-@I)G>!7>nv{Y(A^!d0?-sy zFI0gtNCC+HyWHR*;yE`#Mu6t1rb1OnfmARsFf4{BxC2%InxmQv_4I<(kj6k>Ah@Rd z2U5TW>T-gM(_0|(KoeGrp$X|3bS3jYFztBf7O4J_1kG3-gr=m&w;(C0H60wxeYZi8 z4$6xlh9qd}Y7I270<0i`Wl#y8mAwO20Ghhm4t3j1sN23*FoO3c-T}D=G;_7L8d8hb zO=e&c2Kj%|WKg%x@eWu8XzJ=j1EfD}XbM`0!oc8R3h8~`1sMUFy1D>OqtE3ab2mx0WMl#rfcHQqfM%|)gLeQiFw~#82kG?h-w*DR+yScq zOj3r5-$7&Zq=w)-Uh_b+H`pfK`B|uKra+8cqKuK^jfmg`f#T z$2ku{MihW%MQR{LW5Hxd8L^)OG`#3|2dn@zNhMOl0`3Xv9EM~dH)sZW1Tvurv|iMkP)CsDw!HkQO>}yz!I{wt!Eav z-G2wH05nIXQ3J{5Mjephgpfi;CJhG1IZwdtU(h6#Q4ORXU+@aj_S^mnoB;1U0oehX zq>5~W_+wEF#2<-^Azi+wAO)aFs`y4oVaRwB(j1$s0;;DRkAM__+vgyLBxthA0qWe! zC`P7w(CEQ+DR6si&NGl%pxG*)8pznizu%Cti+m}tS$Dt+K(keGP$NwLKq9VB3cTcS z&U26npxLUd8c3OP`3|ImaW4%tg6DV#tN=7wRSNNNJ%i3&&`=Zu!`?JVK6wE$0yJCI z3=Oo0Vvw@&FQn6a2dn^;Z+f8$-ar+kLln$;2{HjxIin8_F8mJ3s~`mmpvkJ~HPC71 zo3|jr9##ojh6*m(K)L=E$RyCD)tg303}@Vd_}9<{62o8xph>HxHIOkhVFO5ZI^hl) z(sZ2j8e{@!(rOhn5p9KTPnF6C^*J2t?|@Z+CapF=y))SWl1SK%K(ynWHy|TGlUBQ+ zX}wwzGLG+332t!T0V@DaS{<%|41i5v2pN-F4(_`<&Up(m0W>{z2^uROgCViPzNj9O z%s?tQLGx30!P&*}14w{1k{>h_@7VDUR0@LHeIN!WXqM`J4JgkzZU70e?&Jl{3OJqs zaTqxm9PfY_oS?a?M^Mv10<3CK)84~O12O73K{HlQpvHkjSi`s>#(_9s<3J2a(7e@i zuyG6wYoVK+g`p;XfSCwlNP;G?UV;r|V92$EluakNz-I0PDFF3iPJkI443718KrBhn zoYotto_MI9Ef78bK?=A)%eukI0u&4%LE#CSu+0y3I` zff2MF-|-Get>k0|28LfyUu=Sw%0iIH>G}){BM?J!3IhYfUyw>rIb8`=D$WBAr2QZj z;D84)B&UMxsfGA6i5F6m&*uU+pZFIDuky z`o!DHVlsEYil&1W_CU>j^#B|g3@qH!`)(^sdd&F>^7;%=`3_Z~2W^ez@PljyISK4_ z(5V0*wOTVlbE{A*>56xhCEf0TR7lQd zU|te+LC0sGS31=*|JH{eY@ugL+VpdwRniWy$(GAQjqk85kI3pep{X zfCRw@aDoE6or4k7c>x`m;{a->{s1MYc?=8;@=z_@&;tRW)}IC`0Cj5bfEXt8L6iMZ zuRLdhc-9fzqGez(Q($0#n$92~-~fshu#KSN>}QVvx5)wq28P2;kkaZ@5~L*I;hXMv zS6LzO4oHRWLIwr~wOUAxwfZ)sU~K0B)vF8)PN3l-Q1mjia4>LifZJKWK*lZtt?q*w zyA5jWBi`xz?B|@x816vz7C`k{ z^G;uX59C6yishhnMNk!KP!(#t(_e#B%=rs4V+8{P!!xLg5U7g3Jk!rRt5$iXd-^eGkyMjkPAU7bXPMlFc?77 z$VyH~Yhww|^xOB9C1vLP2PN}0pv7#pkS6%N+mPBSn`gSf17%5>J0KM%YeCB!ptgL0 zCe1rs(_J4ZE6B`g=oR2LSqEB=0`q9onjy(-22@2q@ARJ_6?ecYHiAxxf~shNsz~LXZu$`9jwX;9n-~}vxS&JB zR?zS`z%#w@p|V2#9gqs8&7j2!(427lI;7#_mk+AbK-NQs6Z)Ea1-KbO!;K(@(iYHK zzFJ6&P8fQa#NivDmMK&VXsi&VQgSP3r##f~O=ls7-^c|S?$}q~(ksBt)Bx%af>@H< zKubDnA?@9ziy&FD!3$)n<2jH5CIQe!I}l59J7`H(Eu`7{dnKgVnsJi>G?d)e3Uexm zp|FF2fguv=R34~PCqkSGYX5-xUFSgx!2=c`hU88L28MX3ll7rTK-oZ@+y-*71E|>v zVoB~|U|>jwx_B|{uoIWRc(w}YGx z8khz#B=>+u3!qMCg*rVR>~zQTAO)Z!(e8j4l6x5#7%FQat>2=vkkV$$Rd6@BsRLvt z59mCdUIA{&eW3M?&~mB10or5U?E)TWy#rDzxgWH^u@=$@`t<|S2;%w)?v>B!1Q~Gv zbjU+3q%9ZG3&|a+y`T<`;~lVqgA5D|YoI1ntcIAdz#G!I?E;x_h=GA&cP$HK@BAtq zM(`eb=orB%kP7f<0*E1bn1O*o0-AiYcp;q&Q^Io%+$j)2zBLSxJ96C{DH^#HdE z_k$ELH83(bo&~W$4fqEjhUig9?l)sdg>HD&flRml1IaQo9uQ=3oYT`Qz^x8yv7CS! zE(bMy()5E*ltt>#f)s#^yaQrLf*L9~CUO6P6tKej_`M)=L8Ch7A&y{}JpnQx@WUN6 zZe1uzTtm7>c7IzP;-X@-3Ri-ab&YgX%>P1H1)07%|j8^{1!#&HL%05pAzK2LpFnhCrPeG13~(9CT-)VH_H zA(6@^!35ftbqA~fG-;b$2dUFeK?e2f8P1C^f#>(8f{Xyo*QP-oap(i2GkEDYXyn83 z4p;$bs0O9!ET6xInzKUfM#i%pbDPchbUMAQE&&W05nNE0jj|1ay>*vFGR(h z=^!IOv$XS|DxRH!C@6y{xC2%Inx$O>RbX}yq97ikV9pGX380DDT~Gzbr$7{VKor~o zD*(;Qo~UDC;sur6N`0V#3AUg zmc*sNRvZE;s0Zg*5K9s?G0Ow(gFD=W`1qm%6KHC8&RmdLpov*2=pd=iEl`J;fuRCY z;++L40GBTyh9qc)7JZ)G9g;EU%wx6=nLw*1=PUvl0h(sb zfzD9##zWdLlLf#_neKoUfTmfC!A3AJs6$Ve^S=t&#km+{0%&%%tR6D-$fFJ!dNk<= zPjTM?D*(-@w%0?AILZ#GJsydIQ&;a2P>_IHR3L^DXi~Kc8oY;~!5bO{I{Oe(U4YKs zxC2rt37TD@W-wfd@uGpo!A(#&En z2gRM^omIW{0^E|InNv>ajQGk>NXA#K0MEyIL67sB$Pcc9_Jb6F z62To1LlQJ|IUQ=|$*B-$i3l+<*@4pkCXiX6`N~<)nHMSO4&WV~QAQM1Skv|$(Kyxwm z3@);ehI5cS6L_IHSOsW{+%IT;UEUALuK^k0Q+?;`0vQ3CisXb&)@a>@B&bV5;3D}B zSOIAIkq0{I^g;$wf(a{tGeYxjPy~Sn!FGegA2bmu+z6>NF1>-&8O1!{%J2?IsU&C~ z5`6+IOqL0}MSc&+HK2LO3GI+Y?LAD8(XmI4;0fM4UCV(a!(dVy> zm6<@BZts8Mmp#D4zJ+-pFRE~)$l)mj}){1Tq0M#;pKVApV+>sUFn#5`?I@16Bc=jMRpzxN#q1L|+NS zh{GTwKy#61Pz7d}Aqu!53hsavfaW3{p$cxDf+*-HhL~^!WCCa|(g&iTobi6he$R3NivT7ugIAi5*uVAz=;;39tgtTx2iQ1P^FPWJ3Ho z=NQNY&|Kt<&U#2#rM`mL@f7M0unN#zAe?tRv^BG8BZYhQ& zt5YC5KvR&cU67!9@*ZM@Ei^DsgA{9Q@n2QcUOfZC+09F7RfL4Vnh+YCwu&#)a zsa^|o(htZ8&>W;bR7EKCxTNqRh$GH`6oBR)Eujh?O@$c208udKEJy)p?$H;jKoB~+ zTw4e+0jvNt_ZSAvU%5))N~@mXtr!z%vHF~IAR|C?kLa_@%94JS4~{&>%+V6v&!aduh;8Q^z?M zLCylrK=w64dL!;ape=$73^yeinCd}Cw1ZWE<{sxm1E=UPIB*zN$})j=M>Smnxdyam z0>qF9O+PMzs?3B0Bg1YnPzwsw8)0H_JPT3=Uf~2{fF~iBLjBne4YNH(;L5DC{xZmD zP+bIKfae}pHbPodMbP#3pNkkkJ@S(v1>n#FF(g4#j_VsCDK29XBsKh$2Db#~TmktQ zG{LwNs^G$LhyqSoaAA59qyX%`E8z4GnpZpwRoVzux<>}AwDT&=eIN#SR`G8;r2TnY z0TM_NV6Qvg0V@DaCZ2^>bUXh-D!S8hkTJ7sAZJZwU|_h?2$_>;xW))McW}BqxS4hY zqyp4dxdUQIf~F4dKquRCOdvf6afr$}*Fk21rVU>}16FMUq~UZ)9^C%C16BZgqfg>`vV%6UeF~?i$%eMB>O=sz?~)#LlQJ+_#0|jDl``VDljoQF*tVL>=oc< zW&uxtfta$O*~9-(Px?a_0Y8=knF=0VI1N$+_UBD-_=Bbp*_$9;q}BHz9zDYiZh&;( z0);I zkEe9r202)PfrFvmaSn(r37SS!X@VrYY-qykDFn}1oCYZXyB5Td1kD-BtcHveYScr9 zeD>QjFcpH190G+YXv$CvI;W(s3rQos;$Yw0xdT!FnjX|`f{d%yGroZ&l1_0*d+IJo z1!#KE1ZqSoG>3JGf!nipzzRUKgXYk1c>NR-4w_t$g*5j-CV(af9iR#AHY0~iD~LhuYkL6s;IXtB;6umaEoV005?ROl*n zyz`DYxUih_0AvDa{x2RnGnN$qX;^L$Vges#16HsKwB{(KiG>L?j@B0hTE)Y_aJHO* zDV)J^&O?w9ph>=#Cdk0j&4rMG-R_Ivg6$4i0cehIMiXQ+!a{Y(2>O%rkRwVSflL5R z@oj2?lzh4mA>%NWy5L1VcOLah2yknGrubm9!0=VZ)A#;S7FX?m3`%~D432X^45Rf( z6T67D!_$BMQI-q415#lOn(8|X^)l*`VNlt~-~bx=2Tgb~f~F+ELZCwaNv{C6F=!6( z5>zXE4e|8czsd^IcfbljQ-H7uU--J==_~#!E7Z?<+AF}V2ATnc&FW$<6J}yy0A1I> z!r*ugq#B%}K@3UIEZ{?^#c0cg8=ryv&&1%^2VyV_FfcPX&H*tcK@)pSq5IV z3l1y>2GFb(Xe-bika8o?tl(#;@((K@-Jqbz>F@t3%hk_$0g8UmEZ{e&=g`&+p93ji z5&+F{f>_{LK-fes+LGbEm!Jtl1yG|N#F7Nf0y047Yo|gN0z;P!p93jia^L`stbmx{ z=|5JeTC_#OeXl^qf+iQ|fLP%9KiDKM+N$A0AO*ajDH(7;aDZm@9q)hy6+v@>yv^Y5 zB*XbPklv(}cs*#)53CkcB+hva3JlPkAV2h&v6FI;3L}aiGGPN&0191Us8J?X5TpDA zKn1tsoHrm7Ky!kk(0RN#XcuKp6L_rZ4p;$bP7plN2dY`nX8I<)1*Im?$N-1|o)eUA zhUCoqR**JH>?a1$AnYBm0??eGPBWxXPK7QZ)$wKk-RLss9mq9D85kIhpg}w14x|^B zRREfQb9_(_QXy&%mDLi26hGSqL9-E#bKZlrgYuwFGo*$nYJiM#be4fPO5Oo00L=!X z?+E>G!~hysSn#1&fLnCWQb@P?D|EZ)If&$)54{5Q+~8wm4LVrB_e$*yX9Ux(jG!4K zaO}njwo2yC*;vA;wJrm2{4CK7mXCl__D( zkTUY|6G$0(>`6U%7UB+A1!#yj3mVdu6Ch=v%LH&Z&G`(Pr~}OwRznj;H}tBQLT*q| z4mu8=!SN1A6?ivjJv8#ynnH3L6F0c_o%01WtpG}%O;82(?a(931G$;NWBDu$j(0$+ zBsoD>;6PRRLg#{SaDgmyobwgrA<&M{j%G*&nKKnK_&(17Je>Rhq(HVGnkZzT``7lU zfY!=F5<93o{njhMT@McasZe81LrFNih<4Gh^%K|(1*rk zYZ_=s*m2G;kP)EJ-wsy6z+fi}DFX@{A*+w*IuiNUm=QF`Jm)va1kjKH z`f~o1!;szxSOI9d_9Q5wfb0?qf{c0S8A5X2ACM8C`Pxg+&{+8d5@z#Kz>SYPU#awTM))17*dG6m42?IWnNJ7*vk zd8UB+gbWM}oQBbTLCn8`y6WD(K`_P`cxsAje((&19Y1fNF8Vj_bs%@Sacn- z$*@utlIIv0SOOUw=lt&#;FcE#wQRmZ6|OuBQRoj>2r>_>4m6+p7piRi5{NP(xH3@c zY3LK+Rs_xJvbI1Z{c2~XyQtGY^hXlpaMo7>%^$Bn*fM$19 zTObABYv}URmnx8LcnshrL3f(^K*xfBW_h*1j$vTng1V(E6?}lhoMw<)K$E;iP`7M@ zy5)EZ<0tL_{uMO0yswI%j*VyHN>EL1`dd~?|@V& zfF^tQtcCdM+cAi*)Zr1#0;-f+L7oFm`Fca$+zpN36nKPifKIppsZ#{CSc9SJCPCxZ z3a(Cofe|#a*#?R`&=hbqROK>g6#oSu5d-pr0(i?3SRrTvI2o$28XCzrppgvfH-TCo z?I7QRCW5nDAiYq*ct|mE4pL0q0V@E_|JLWVK$-*&dXRd&2O5VRAR|B%zy&RkBDMrt z#8&+TS1NbF3P4l7<CkdW|01)QN0;tSvpt+Xu=nL1$-;yri?i~AR|EYy(^%0{DHP4 z{vHF3WINsgD*(;+u5Ez~!3bD_EN5W2Q_sNU3c6|wWCCcu_h<{GUl=dTz*G+^!t{PX zR|AO)al-s?~WZqU29A{#+FupRG!6@X@UZ$lLX zYe33wnF3JD$#G6U$OO=&F0@Tj&j4$b1crm-^#MqQ==~N*?>V6nQY`bOLAIAq0BHv` zh^}sgB&-T?NCkhe0F+rA?|>D68Y?eaSipz=ZgB=RAQ%{KyMVi6b0&hK6jWO>wz5EO zGz@?qL*|?as-Zxs9aQdt)G2}5EF7(nGUTWZBu=kofZPdE23f2)XA;Owpo){X6_RoF z?Lcl}V30TpX>o!TfZ8s?t&m_{8Uq(8`KAQ$R+6YBc3m$VlimMo^*1z;G=VJWX*3 zqyRja4`N8xgX&Q|sJpg67o9OCf>-FxnF=xsRF4`#!+-%=h3zQ-k2xF!DFE4e2gHEf z0txj%1azYDTRb>v&6x%=3RIukwnAd|Qz^KwUeCaM2o#=NJTSwGNk(8{R7^$b_k>Z>^~4g9#pf2KwbY{4$`__9m>E2 zn!^P(Z$Za0fl>shW{rfZ`)v)0>9iD3*g`G7GXqpgf-2UyR!Gu&51s$NlL`tgkTOv5 z=Qw93$UM*zsYIy4cwtE4ck~Fzryzx(1O`?n0_uOHK!bl_J0!1k9YRWob7p~z1J$e< z-~`LS@ShD*jQuza%9JqUz{)`NYCbevcDF#f3R#C>&I1MCY>;W7rBme)&(<^SSB8X& z+#yg?9A+Fyog%1?t!agJ+uAS8inLk?{#>m7E@_)<~P<0Ga2QhLk$VgD7yAo>TiBO1PeD@)aWVpfr zKA07%aLz)IiJ*FzqYbhvZv`VG6DSv6ybL-U1XPYdx6y-Cf)`|owm}N9wkwd_*K`@& zWS+AKl>J#al8Xo0jiI6+91c9eQ|>%@r$~kz;~Rpq)&hwd>n>X8)S)%HuQX*m%kyc zOpwRH%}kI=@F5t9ZIA^&ks*-ISNb2wrnRLY*Jy*Vgb}tm+fsmIW=qdI0s7T?Dw0t!D^`09Eba zoPP(TN(rnSuKH3J|)ekTr;$iM*(Ryk)i$U@L+te>XW7=b zLo()$$&m1x5CN)l9RGt zE10tZWCUo@R$)6N68;85%0K}T@R=5OzzRTXwi??ZWjf96k4E5kuXw_fA&DS}bK}LX9Z(VMOv{K@uz^xPp^;S?d;dlqE0JM7RPCKM^ zHQNNTseHK!XfcT6oGl;|K&!W&Lme?q57MPLXaHFs4OReJy!EjiRKe6UFif-n4bw0% zbfkbvdB-_hL4`JG(H8m?UV8>8+kqn+T*iSE%7fNyF?2v8&>b2-nJ6W}HjtA*3%A%h zKsGWk1Wkj~7HiKkFx7)D%LJ-cRsu@pv}qzIuQE~NP!%vX^(3(6EuW1XBWsAP{ZC3WDh9)ms&u~ zxyc3^jCQ;ORsm|z8+Aa&Gc(*ELoEG_;8~$LyFo^P8uTt5kaX{z07>^VS(%su8659` z6@c3FAy5U@VGsok%-}`UbM}Bt0JZ1qQ=lrU|3S(x2Z)M0U=^Ttd`So7Xn}x-kTF5f z^|P9w)1*K~fLiZe9gqTU)>=pbC(8jIq`b2iqyW@v9Pmmf z$2mtqCV(2-=qsEAPD4xpE06{qiwIrf1YhBl4_@iyIOiD12vFlX9J(rJ0d!r@twj(c zzzRT(>LjRwgjpa728Jdu?KtN+$OKSZ8hwq_HSk&~$2-Tt<8PowbOqFin>`RCK7nb+ zIVV6yfEv%}tEfzGg35NsJ75K%HZ%Gvssiw8D#tk|K_-CO$}^x=z!p(01}~71cuUYAVM&UY{ z)Pq-3Io<)Q05y)$S5awzL)~%C8ITd6wlVrDs;S`BRE~GR3P593=&PvagI7~I&N&M* z0W?bW0U8nudl(@r>o!6{`wmzIXqXCpnbj5WC3}u@&Vh^oHHp!eS$zX9w{pA#Rsd=V z3qaRloqh?4(9dAnan5;=37`frbRAYb18gByAo!j>$2(vZpawAdLaaRS^>vPOE`W>x zHGt9AVVwo9$8x*_Rsd@HqOZeR1HQe^an41M37}CbXV7*9P#G%T2TD#13<3Qh+VKup z1*qwZzEUfo6Rcp)C6E!I25)#5q*7HBg4E(?e}eXvINkv(05y1%yCB1Ha_W#?uIE|E zK-6WB31*=Br@9N$>3+^>56ZrN~umaGI16)f%u7cZNpr&nq7o?N(?>?l(eWDFA#d#G}^@G~8i@P9G zgtuiOrE(k-WG$5dgJbixJ^^kf1<*zi5DVP0UD5?Bo@gA{;=j6e)Y zTaf?PbwO%`DCiX#FPXs0U*=o~c@MPhU~d zzc`q{_nO`SnE-0R9`1seFdw=IJdzV!li#@k_CKfzd#nr6DHV!^jLkVf+&kwc$Oupq z_7qgXLFhtTRtE5Ds5@Wor~*!LNRLjG8N5km&MlA$pcd>Us3T;d$JV@OW@4%b z-L48&0cyEk?SfP~JD`0&Mh+&>_+aC0P+tWUydZ`gs0n)uYF5fS$oS`J2FQdhsEKn2 ztP0eOy$ekjrne#KLYxP*ItS9_pK}M~u6j^o_A%7jO;Br(u`_}0GP(m+0cy;?fGSu6 zRq&722yjk4W?20SCfCkOI(_ggYPx_%yYzV7JvXFfiYT zIPL&5c;)h(dmwj#nzg^dDi|1^LU$woV*#&$yaQGMYS1!vLy~`zc z>L6*TgXTjN%y|NG5NJPw5!Akg(A{nY4B)uG16Baqk6;S5!u}zoO;ydwR1ZFv=_$wv z(1rvnXpB1lhs3Bd7ZXzigX3Y40&uATVn~8^COAM>zkj04DDcWoC9J= zf;KG#K~>IzTHgb){xC=ZvjBLu48)X-XJBB6=!WD8w>Zd1_2yIH=4$H;aQhoHxeH=} z_b()LL#l$;v5*mnnNz?wmmLKu0B0W%LlU%+A-x;YLzRt#^iaKyf=0j`TVH~l3L5DJ zF(g4d8VaD+KZ=D|Z!!sN{SlA?#(K~p2Ot*sP_+uE_1tj~>-WzCTi@~uWGE;zfEeII z)j;_M)H}Hm3$Z?ABG~#PAO+wpc_4-)XtP5HG^DEHAhRN0??DFOUW4L14OIU3K^@2h z4IhR%5C?)(fVwAlKn(C^hiOpvHA3AN@&Mw#Hz4;3FgVTuF(g5|9_B)AHH6yw?F87? zBOnFfHMJmyWF~0*Z!t6^g`pw2X+GF1EpI`Fg2rV*3`x+ghc!@#7D63*>L$dYAO+wx zzaWMrX#2yKZpg5XC3Ms9bjY0Kf_I=U@WF0Khi#88BjnPu8HS8Zd7x96KtTeU-MZ8b z>19n)hKw_%2r)9nGdRw94^jZ?0zc@6RGCUCJU0HrU39!TDiuZ0ZQFZ==Nr+o$)0cyEf^g!}KvK=HLKK%h2oOQedRsiZ$ zIYM122CcU%Pk`nI9OrxinZN@|f9O-L&>IQvfK`Axy{s)d4x`%`yu-HgyNA0MzLX?t#=DmJN`i_$tD#5U zu4#a5ulNSC1Jvn_f+~1)22x?vH$bMjzxN4nbAmd(abN|G8$bfA``>~NoN{~s<|IL6 zK7cr!3m$=FAR~)E`sxL^IYB8d4WeYj50Gg;u0WK4mLGuyKuInOqT&Ns!1)YB1!%JW zCrAq@&E-KuSpZrl<9G+e;RGeOWj&xyz2gUv0P8_h&;*9#oZle5ptN?l2ecR4aRW$zHPQ-v zH^v;>KA;P?R~z}jU6xew+q$YN0X(&=RZEjHc&5@2jkYgcKiSmU|sedlI9!w1-LmuiRl5vOB)*M`vtgJ ze_nuC43-8ZCFVZRWlN49Kmx1>K7+?T=QM&eg9cCJ`oPZG01{wLatAx>4v51E8aT1( zgZLOEznR`iy>X()$1sVHNxXPI`0N zL7GA1B31p6pau!BeryG=O1T5#aDoO!X7od{6G(uytronKaZU$FFK9$$3&i9NAOY5? zonVvifH<6>;gDnfEKDj4j`bfvBCG+mpqW+2Ih`QQps|ow{gA8(5@6lg37&qt1LAOk z211x7uz(JH_y7`M6}$kxNpnsYNH1sr z0<2o~29Q#$8>AUD{*f^OVlha7_3Ju_#UKtRXz-(90yrW*fCO0Gu0gV54@fU)Q@{t10Bdv$I0ek9?*(ZFjd@&$xN8GQfVHm$oaXO< zIGmsXk53a=K=*8Z012>e^Mh=Y?E~orjdn;(gk(jK0IP`}BrAeAoS>l&9f<4)kO1qx zm!K(H$2t8Vz51Z`hXX`<14x85b0t{%4v51E8srF>2r0Qh0<2Mb;PaH{OaN&Hjc{a6 z1g&y$+yD|_-RuKCYvvA!!wDMRXqpH*H^=bRGv#fmbTu0r5CNgBg;Ozy<0DkN|6@ zJh(ueGXG?L*s30%Bv012@EV+YM0I^F?sI6Xfkf@Byg7a01{v|a)acEX&}v@$+v@(AaM*5V2xb?i6#(- z6Es9|brK{if&^IgE<&>6bdX-q_{7IakZ1x4usW`XL=%X^Sq~bW;F=5xE|3W8T|G#S zm;urZ8kta?3{C+XKmx4hN|0y*aX3N45>}JJY5oI9fYtjKIL*(Q3DOH1lklAkj>HWh z0oE`4;7Gg!;&6fnB;qEs)PwHL{s0nT)u;te+Rd2-(hM4nD47hJd2rkS5@5}^3cdjP z4v51E8iDAV3`txd0aiW_@W}&nW`p#CMjqBo23-;BxB(=tR|Y^D&!7`!wDKu0Lu?>Jlp}XIYA=|`coju10=w@tBHZh2-NS z%OW9xwV+>syPgwth{1lS#UK$@5pjscAPy&Jtl-8JNV)_GuyW~x?~0qV5M(iEoZ!zC z&>)E829N-&y*OyGkK-K>hZ8hNz%&&s`vD}tdVM)qwrNqn05_;NJ!cUp{5e5m1sqdB z?QF*lASqTxe()spArJ?=D+a{i1RY->Fcs7icKiSmV9mV%&IC=1LB@ePN+1R&XsAGD zDyUKJxB(=_8k&&^UqWR8S|^@dHSpo^`Q4xMwzJ3CKgBQ34%^lQ)0_SVf+K zoqPwx;RKBk7(pER0VKe>Y$3S+*Sr+u0#I8N#NY%C4_HkFbzU4dfCO0gEdw|B4ud%0 z&MAn&2^t%4uZOw?B*I#{5aN_&Ag6%F1NSIo5!SgO-&b z(?H#K5Q7tRhCwI9=^Ivp+kcNcLCb6$4}+w^;R9lDf{rrigT%!LkO1p*e{fvPSq1VU zsB1q38pt34)*?1=$#fXR0UH8haDq-VSO{_Y2ao`(%_49q)4UqwCKk}nMi7e=bdv29r@=|!G+BRkN|6b2)JlI z1mb|*1Y&T4PBZv56|^47@dHSJ^@Ns+0h`|Xu-yjHT97urmDabem$3q|v z*fbD>6LiW!G1N4W0ILGjv<(o`>K*5RSe&544kk|nXYLIk0oJD`;8x5b5C?1=h`|Xu z@nAkg*$0pSE4L|FS<^;{agK993{KFw2dk$+5-do7b-O4y){lcY;D`V*I7>k3f9Etv zOn^jKU;PA^eNCHS#(@}|phFN&O@kCEAOY4NCg39F7>ENl4#eODorG|C8YKOL1X!g_ z!RfzgGt4v)gA;Tf!abE8UI2=EK1XvgLfo78( z=WGF`wMx**uhT%IFpe8Q0<5Vz;CY=xAPzXdKnzaM!3h5$%07SuSY<`Q%9^%9+*I#4 z2gKq8osYmb9b8s#012?}w*rr290GB`#(@}|pi>e=rh`M`14w|?p#?kyHD?>hO`sDJ zWT%4**9{;6R{0iik#!uz0UH8haDvWBP?-)InsNM44-#R$FA8pFH*JSF%5e^e!3i20 zF_{i2M?nIt(;?;P9T0~TbXJ1nbVz7}1XzoffkV4v2gnf6s3wTP2|6&rXF4Qdfdp7N z#lS`3ArOaA0kr=R#Nq^`N(_()tIip4YvT}z z12zuC-~^qguo!9_NPzX}D~NHsVa9?tWrAI&lpoYdB5Q7tR%)8MH zaPWKp3pCwhsAmFo>p-UM2c_VC1_lPl8IU3uED*{LDssUmrk?}}fO9*D!3jEAA!G)) z!S(?xAP@<RA{ZI}gE(1u-~5XD^(Ch5=Y0Dhv?@AOWzkAO~uW z3S2>R=wKCRKmv@+432j|3{KGD3cRyem;~w>96x|Xo-PCpcY@XSoCJ9gw1*wU-~=7B zAUg|^dcgv63qh?Ku+lqV0nj-MMzg@((+^;Q4GTaw;(}GoIR$bW=%59+S&&=`7U*9I zNhqN7cLyv2I%*+s7Nlwc3#=PXDs=mdsMv%rn84PXI#R|Y1~_z7t38CU>x1jET$ z;P%%CumFoIxcvpr3ccq*9t5Q+5Q7tR8pA6{fNlT_6!$>_6l5A$0CXC|zgdv<4i;!& z1U1P)TS^?~oCmoGbP5B{Y)F0q3oKp-sx!bU?tleAr!dsZ&ITvC4`2~LMo6gun!mdM zG6Zzmg6V8rQTm+f1nSp^Jcs961+W-<^ zwJitd??WI?JvgdBEKbk?3z@UQY2*V)fb~x)s3do6y3`L^Db3(G2gKk69jQ<<8#00e z5@0=E4qgIy4#WXnw08%@-~^qaFnKn(@$eBOz)}J39yDEs83tl-ZU@yr3n0$l01{!% zX$Cpp@eqgub{mMn2|7Vx{cLa^_y7`Mo!Sh_1CC8sV8($MoSIOIyRkU4mind012?pFK1+`S7&fM1mb~>12H&3 z2PFv20gqyQ012=LWrDl>ZP#JOff$_o85kJ!=796V29N;j$_#LRI0WK=t8x&76LeOB zCq&r?kN|5-HdtBb4UlP|^B?AbSe&3E6H?|thN?jVto#|^QHqlw4tN$8#NY%SoKQCh z93&q=0<6^;;2@cB6J#8y9tSZvK?kW%ngbp$*#Ht?)l3JE$6o+(z@~v1oJTh~r~(HuI6)^UY@Y+k-5>$h?tE|;>kxI0amAO`0}1_p*zbHStc z8^8i@zk;R>>cPey2Z?|O4L}UeOQ3s4=0XPC!2PQq;JgC50%jfy=!Ad|V1avij7;^QF;oEt$BsuJV?n(Z z5QFn70|SH8Jg^HkfCZK!84D5skGg;uoYz1>3pExjpqS6d1T*z9$W+kC3y8sa9TZ&i zz=L@kzygI|>KT|ogUbR8ju$~9;Cu^WaNb~GVAwj31#~mu2e5!ZE@;O&IJhQ00T~Nw z*@GCIHyIch?#zP(Em&X!IB3C2FMfEJy%sEQrB*2XvRle28Pg0(X%d`wV6(h{1W6fq|i5KE$!lK=nUo zF_LRRO2Ec~7@YSQ7#Jqc2gkw(uz(zrW1qu}1u;19gJvusN;iN748OqB!EulPD0N)~ zGe9eTKn%_Yp#5+6=R;BiSmbODJnGtCfXruMaGVHcfC3xD;C#ryz`(HptY-sQVEq>c zSahEN2_P<>0$BtS;e5ovz+kw51+**u16Uw1ml1rsfB-|iW5>&W0d5pcb3h`Tj~N&k zVxbm+1wJ8J1QI}8Jq5A|B*OWGfq|h7Y7tmqI@}`1j#nUiKr{KE5)8C~WX>ya_&;S} zVA!+(+}YazQp3uc2_Dlq1mb}6I*7sfjDdk+-vV%Z<^x#3=^3Ow18OzB29-z885kJO zF96pK8^8km<)FF&oDT1R1zs>PFg#uWDbK(HR`rUYx(%#k&Kr;+FF`4E0i<~W7HFsd zH7~#_?tleeF)%R5Ed+PFK7a+LwSv0EU=>YoL9qrZ=|K$6*P!xqA-K)A0W9E|&Bz3r zClX+AJOmP8tOxDN1F<;YfWjMUEJ%RWIRn(2bDZ-I zz{Y_XoF5n%7>+Fj&!l_+39yFbFoJFdX!-y%4#eR62&%Cbf@_@(AOY5TNNsxv!~vTI zVsL%}T_Ls*9JC)m0<6+GjP*=I1q#L z3j+g#`66(1e*g)va@2vM+i}h(P=I}9U|{fG1Zhuz1XzXY!R?7VAP(m@1_p-IMJ!B` zp#Bd?gw?GcRC74a`3%zhoq>U&ZV@<&H-H3KE9=2=aRd810amj*aC7$#h{O2{6rvE>487u+FaR+)e}l@@Mc_uj z29N-&LKV0Xa0kTU{KLS&@Np5iJ@Nr0z?xM9Zja3Q26Dk)P!X{hGM@qxU}dWXPgC3h zaX9}mFfa%#2D|UWH_-eG>-#LQ3+8+WY5oto$_paB0VKe>A0m4f!~tCsW&@G@0OGUu zLnPWj12Ye_OXxmiEVsI1m14xkdSbZg^+u}IqC&&P1 zMh1qW#gN_xNPu-GxR3032gKoIVPs%vTntLIjvqh*tg~ytX?D&pkX}|s28M-;!AW%k zNPu;69XP4p0dY9l7#SFjEryijAOY4(b)XvEaZdejkY;v928K6_A-M}A!1}HdoV)IT zIGi9~vn+wMGC%^X8nuwL@&}|Bw0ubxBD(=3!1}HVEPDsU;RF?|W=kL$5+uNyRtL_I z&3{4B4W54hu{gOI85lg5fb+%%kN~TH6{y&8yaVEJ@-Q+mWGn$U`aXaJSUak~jlMbm zKrY~AWMF7r0!b?%0oMJ{v;yLA@-Z?n%!0^%012>mRD)B}oc|!b_56$s4C|rJ2Z^wT zRzsW*;&2KuGBBK20!e5f0oLu+kc8GS0ko2bk%8g<5=b5c39u?wL-H7i!zskb!0>Yk zsFCUT0VKfsuom3LoYM%>E6m8iz_yg7o=Kd+aRW$%^(rI{-vMzrMHm?vWS2rxDM*0z zMKvUmHi0yYGBPk2FNIVYAOTjF8gP|y2gKnNV`N~kUkXl#A3y@E5;fp-IHwt;SDcZ7 zArPv!c|yGaH*0h)L^D`gf{}qCb}1ynKmx2U^T83;)G`6u3k5MaK|2KtAj&p?1X%Y# zlpO+bz-1JO!70Vaz|g!D96}#J0<3Fm!67uKb%Fr5q%kiVcfhK^i|e4O?kt3=$_Dui zqH0bT$SlwryH`+ES$z;y{aGNhK>dFX2FE)fWs;!9cAuciI@BS`=4XJEInL<@`46<- z?iVxwQlMKd2$D=475B?9;&R~Ck*1x>SD-{G+5o7K9F&sg?j2xb;c_p z#(gRQttOsj1ydea9nhogSD3EFe&`Lf#Xuw=t2+0{UHDNwx1@*rsfZPaL)8_$I_ctD*PB0AA zn})d&tPHfaF9_DhBl@p~~h=0+|L{_m>Z~RplYX)(O>+acQuncP7-rOYkWZXaItS-0y&u$$;8`%Rsd# z1B1j$$og!*V#ugF#Iz}(JONr8xM&&l_Li^E%X>;&L7Sn$@!@y}tPHeBa3wV9d8LlYe?iy{Rukg0ix;-SQTi^;2vnO#r1)Mt)3yVAGBc& zqHNAIP%Z;49z4DbQp^~jzIQgJdpG=poDL`5@aI=S&Bg09rwK z4(jNTOAtrT>;a7;INkv(04*ZC1&SF^{2Q}FvVaXc=)fw+IWs^;fEE%yg2u@YHAtM; zI6;oO0xJNmC42*Q#M@{{4wruo+PCgFXC}x5&}zaD%OHg=|2oJnk#p-HOVq&%Kr>JE z-=IclLXW!o8w(m3cbqc|WP~Q@dfsKAF>wY4`B2CLbCpc+Vu?Fo1)z0>%*#Q`Ko}VI z--T?PS^b}pDU!i)&g=;S+)AL8g&fNvK{a~;#FZ16K)XsnDHSyT4eEe^luCk@8S*TL z1iNq_Vnb1=Yv#v5T(egR?Bh9e zLCFNPtx^i=nnrM#)iW^EM1xkkfm)>uj(0%HBthfma?2qJeV;BQq0gVmz!b&cIA#5+0^k4SOIADq1$pu9@rQGNz;ZiL1mKTocSOVKnoB< zm$NX{g9<~Im5}`bj58rg1grwI1~CM{LwnG(cgeaJ^2xJ6kN#e%kphgx0!^%~VlPOv!LKgXg6@Yr1`<6p$ zm<5v{=~zIX0ko@W&f*E6^9MmI6HhFM6lQTtA;(Envq5eISq!oRv^Meba!4u+2!I@j zwzG=?bc@HFB_IW$5u`g%JHm7!c9;Z$J$eVM0JK8!8C1b3b4V$DM2L~e1GN5TDaZ)W z=-LNpU~X211ZFD-6KKfd4p;$bog(83P)n47A*&wLq+nq9FpYu9nZa?+GLQ+NrHT?O zAfbJd9TM8N*ul%0?tm45#>6yMfSTA046oHes~Z>?V(XPa(=?8AmV=A{typwl0cvJ5 zFmP*t7F#ed^eI7N6s!QWXfb64B+3^;T^Rv!<(w5D6F|!rOQEg|g1T}I#FckI3c!0O zdsaX*Spp;EW}U`RP$vkI1Lmv*Wfag7#{LzMY%6vIl5L|xLCt4~s*@m9;ASR>AqiU3 zIC%x6;?L!TlxBHxpk{>QoK+xqfmSunhMMPh53*gxYYw>nbG!pq0b1EOAFAT#B#4T> zIMA%S~D( z_d+p{4MIKhAUzkb0??|*>re$^4P&@8I8^EEE2JoECAQM1q zA9bO2oHvJ50b<9&R@?zA0Ihm7S_vsk)Z-W-C+jD(g0HEXvjt=XXyK#vN=QgthPEXp zvqO$X0V@EleRNt0NgOrMBWHT~K!bsfbGCv^04;y?SP98JIS(O8b)qt8>#^ez_7{3)-zQ6OTE4R#)cyl+Xv~LNR-_BDEdLm|BR6LU z$OzD4$I_MH48y>v56Lf2Q^5KA4p;$bnPW3lK{`|cEBIVM$2mJeCV_rQQoc?L)L3<#vkPPdXm#UyXyT}U0&cxAEYkw@ zE*$TG6@bFbXpBzWI(~NISSFFV#5!au#TrqS-1) z0l)w~{BB-4NTK5$umaEmMaxx?d@=VgIA7E=+%NzSb;j%t22dhG(?|@Z+RwWj%f|Px^QIO>A&09uPU32H@) z9oUL`hF~G^?J#%1DnP3c=R$+`GIU4jJ}Gb%&N&V;0<`{c88m|S2S6g|vlMuh-yN_5 z(8|MYs~{P@WHIED(dW*PKE(-;39Sqa45y(A?%%72sNit{tGEMJ0a|T{z9%)j4Xj|! zNstksb%w8?@gfSnWYGfJ5dbRytuTBGO=#uN4q794^uw|96e!t)COSb3$@)&vP&QO) z%{fR*hdU4KiBth3W{f4TH zd<0Rspa87$BuD{xU<|~N>;Xk8<7!B9Re@enaUlt;wDSzeP|!#Nh#?7DXvnr2()JU6 z0BND}uro20GC1A=D*!Do6o(pR3cc*^V=g#rbe@G71!92jZ-SnPQqKUp9jECVBU2HB z;~lUH(BeX^)sU>51ie6FJESh3a}E>*pe2R6t09T;-wVjl!kcu^_=w{jumaFJLQ|+~ zHcW!#h?-JxtuyC5$OO>pK`YR8w4kvOMr)7^1A~q=Xn4o*4p_w$1_p+J)u39EfkDC= zQb{IQgVWla3!qjiXr*BCYDjtF*b6RC7}CHuPcSezfX0Wwsz8eb(^o@!AQ~$ny@UKn zP$w2N|0%%W*m4mTIv|E5Xvtt9RDIn-h zC6L!ZO9h*tfzx6E$*FTVz@7U$U;3ng#|jz&9~1gnE%#9TNX8VPI#S z1}Oj+vmgfeCZ?58Gv78ryc!e%4z2EMpz&{z(mB^a;RaejxE|_2E9mjwC65@v!_goG zqFbPno8CiQKl?LSa?W*-ZqRV_PN;&2mkJsSOI7R`UF%#2GkLiP)FPZnE-0ro`ovlfhq`xD7XVw0NTlY z393N)1!Fz9HJk-;=A2s~BS8Iu>rfR>FF+iz1L_E{0??TA9jF4~V-N-UXTYwUa~os= zXt?)F1ai zMu5hX|3eidFM}vBf%*fi06ciI22y&~&w(hgUjp{%ockaXKtsr!Pz4FoAqtFVgB9EX zE2syJ9`iv}G*5u2=$i^wG3Nou2++8(FjPU-2Z(~bpFuUa;~lU9&`_}iRKeU=5Ct3G zf)&hp2r>aQMl1_e@Bx}8)@d#uDXk1twsvveHL_yXe zu!1{a1)vdO9jF5BnGgl;v%w1HJO-Hn8VfdrD#(L6qW3dnJrgg3;~lUH&?vAORK;v) z_V9)};t9wI(D<)4R6z|J>DnP@%UQiXLP)FE89q|lg1Zbq!AF6-}YKJGpjyqrlph4acsDe+>gsBfT z;W@|z(C}^~R6%_`G+B8-0~4$QG`<@LRdE-ZJJca|%y|Ja0yMsx3{{{9O?QS%ARz%( z02<%TfGW^}raM(=bi4$a02OZfK+IIzUK$1T6DYvRskw5XRm>D`pOkS<}xsFR)VIs9Ot|N838ISSFV9ntisHY zQMLD_putqfJ75K%(sJ_}$UuxlJ!A;eydG53JI;9vGJzE||8!^#q>4}0gjDgu?hqru zDnP~MRj3i`+95^=c0!DJ2QmUwT0U6=Y4n=eLC&BHO#nCI?|>D6O3M#xAT5=9a*&$J z(;eJ*o%0@4|A22g`T-3{pDEyOA%lK1xCprO9%Kioxct8c(&?D-7}7Oe8UqQ;4-*8q z*U5p3%NaXan9l5EVPe?D!ep|Gg(+hf3)7rkEKC=6u`sdhW?{0}&BBzkn}un?ZWg91 zyIGhx_OLM7)bC+oD%iuqv}6wp(~UhWOgwv8m>l-9FqQ0OVOp`5h3U>-7AApxEKDx@ zSePpIu`sRK$HMer9}APneikN={VYs1`&pPa>}O$mvY&-X;s6Vi&jA*uh65~2TMn== zy*R+aBy*63Dc~RrQ_DdXrX2@anBE*@VNy86!W43dg{k8Z3se1`Lo7@m4zVz)9A;sP zILyM-bC`wcz+o1qFNax}G>)(^#T;Q_ns9`L>BtclrXNRGm~@V^FeMyiVVZK3h3Ujm z7N$Q(S(prtu`s0^V_}+cjD_jUF%~9<<19=j$61&%jn$s*y4^Fc%iJW0!@;Jl7RC9)fX~P*7rYC1um?X}!F!`Ki zVQM(b!nEZq3)73UEKD-zSeOFNu`soqV`18Hj)m#XITj{`^DImu=UJFK&eyXr?K#iF z^x-@Ulgb4aricqHOg$G^m=0WEVfu1`g-PQg3scNR7N!XoS(uJoWMTSok%dX;5(`tp zB^IVBmspriTw-DRbBTq?;4%wS%4HU&8JAg@&Rk|;Vz|P>WO9XtDdP$Y)0`_TOc$=O zFtJ=^VY0Z&!jyBBg=xW67N#p#S(rHLudy)MTw`G>xW>Y?CSZ)CV?9)OfENAm@00tFs-@4!t~$<3zNuA7AB9IEKD^wS(rB5WMO)8 zlZ8p*77LTlEf%JRTP#dlZm}@ExW&RGbDM=J;5G|W%WW2>9k*GS-rQzkQnBM~&ra$*tm<%4UFr_?TVVd!Ph3U)#7AA&=EKDX3S(q{& zvM|kg$ij5tAqx}BBNir$M=VS^k64%%JYr$G@`$CLiQ_Q~lg(omrh>;TOiLcKFx_~} z!o>50g~{Ow3scDx7N!+XSeWiSVPO(@%EILGl!d9{DGSq@rz}hlp0Y5BJY!+sgpOUa~OldC9``;Ux=`$}1M8h*vC3J+D}p4!mMv`tpi}N#ivOQ_O1? zrU|cEn2x+=Vfyi!g-PcP3sb@y7N#k0SeQ<{VPX37hK0%CEeli1TNb7nZ&{emyk%ix zc*nwI@{WZm;~fjroOdit7v8ZjvAkztvUtzJl=Gg2X~BCIrYrUDS(rFJurS$tU|}lw zz{0fT0}IoQ4=hYPA6b|jKC&>Cd}Lu-@sWk;&PNs|fln+#7`EcKR;QR41TdNrTk)H zn(>Q;>C7({CWhZEOeVism@ARhQ^`LTrWOBKnC|>zVG{Vy!sPOwg{k5{ z3)7naEKCpngO=a1GI=ntGSx7!GHqaBWqQKE$|S+a%H+ey%GAKf%Cv=%mFWc|E0YWp zD^mayD^m*-E7J}pR;D*htV{~b^{h-G%&bfu%&bg%m|2-VFtakLu&^>ku&^@qu&^>6 zU}0tY!otd=!OF@M!^+Auft8i%2rDbo4^~zt9X3{`1U6QtDQv7vC)ik-{;;t!8L+c5 zrLeOy&0uF`I>XM&#K6JIWWvG9l)=HuG>3zg=>i8U6ALFRlLaR$Qw}F9(}H?VR;DYQ ztV|qStV}jstV{)5tV~O|Seb5cu`=;+vobkwvoe)%vofvVW@Wm=&B`Re!^-5s!^%{_ z!^*UVhn4984=a-hFDsJ=FDp|GFDugqURI_jysS(Te5_19e5_0je5_1c_*j`<@Ub$< z@Ut=n@Ut?t@Ut@Q;Adre!_Uf8uOPt66e7UN)FHsiv`2uI>4N|(lZqfKQ-mNZQ;#4k z(*Z$NrZ0l5Od3M0Off>NOcR7ynT`mtGW`%@WzrF5Wl9icWtt+)%5*}QmFbT#E0ci; zD^rRHE7J@SR;Du|tV|4|tV|}NtV|iAtW0x6S(z?~vNExVu`*eRu`=a|vDPy!5MyP! zBF4(ZAYb05j9!Rn>iAb?Bc}TG`)kv{2ZIEJRdLqTjBq7bp)F92uv_+bg>4h{a zlZ*^2Q-BOBQ;Q5MlT9is(+-(>R;D*HtV{~BtV|)YtV|uUtW0}kS(!e_vNEa2u`)%- zu`>0@u`(TyV`cgx$I7H3&&m`d&&o7Go|Wl{JS)==c~&MJ1y-g61y-gh3am^g6j+)5 zD6ldaD6%r8D6%rmP-JB~qsYp{pv1~#qQuISp~T8GM~Ri`f)Xnei!v*dMZGdBQ;sq# z(*k8yrYp*4y$0la4McQ-Url(-d7+rW3lX zOn-D)nGEz;nNsvvnP%v*GM&+5Wn$20WirucWy;WJWtyYU%5*`WwVsK^fR)L@fR!o7 zfR$;10V~rL16C#uLsljmLsq5&Lsq6GhOA6C3|X0Yj98f*j98gUj98gg7_l545Y6NeoulZ_oKQ-K{T(-J#YrWyYt;mXR?ZWZ=Qdl;Xk4G{b|H>5K;}6N4u!lZhuQQ-&vNJ<}XdR;CM{tV}FktV|YO ztV}sxtV|2MSedSPu`+RZvohIuvoaNUvobC5W@Wko;-|7Q@%XSZIry+LmH4nSt?+>a z^Bo^nCIMeoCKq2;rV3wHrZv8-Ob>innMC|pnLPYhnQHu4nKt;bGClEQWs>k`W%BW7 zWoq!RXJy*r&&u?|pOr}_fR!mAfR(8wfR$-S04vj*09Gc2Kvt%ZKvt%XKvt$bfvij) z0$G_@a%f>@b)f>@aj1hF!G31Vf^2xeu931(%Q5X{PSB$$=yM=&dsP6#VgLI^9< zln_>?6Ctcje?nN93_@9%QbJjoW`wdboe5=SVyF*eWikn4Wy%O+WttPl%5))&m5C*s zmB}KUl_@8jm1#jZE7O&5Rwj-JRwkPWR;GdoR;DEptV}l|SebYtAwGAAWMwLeWMx_r z$;xyml9fpyij~PFij}D%ij`?i6f4t%C{`wsXjUeVXjZ10XjY~T(X32QqFI?FVpy5# zePURd8e&+Pw#2Y9y@+9Dl8I$y3W#N8YKdiK+7Sz};7u$mlR_LTQ%D>uQ%4*t)1Ek1 zrVnwfOe*oLOcC*{Og-_eOb6mwnZCrcGHE2RGQ}jYGEGQeWjd0;%Jd_Fl}RU&l_?>S zm1#;ME7OTYR;E9RtV{+;tV}6Mto2MYl31C}B(X9vB(pM^B(pMQB(pNjNoHlbkj%=& zlETVlk;2N9lfufhAcd9bN(w6zM=Hco1*xn|OHx^xZh+E58Y`1S8Y@#t8Y|O^G*+fN zX{<~F>8wmH>8wl@>8wm^(pi}vq_Z-KWUw-MWUw;TWUw-A$Y5o9l2OmfB$3I=r(~c}wrZ-uvObXeoOd;8E7Ol$RwkW1R;GkJR;DR= ztV}2JSegFhu`(ItvofXB=d&`+$Y*6blh4Y;P{7J$QozcTQNYSHr+}5|LIEoiOCc+h zMIkFwP9ZDPfL-`Bv8W2-*Xm8?t}Rjf=gRjf=Cs#uwhRIxJssA6T(sb*zLsAgrF zQq9VAqMDWIPc9yAUDy2p!`!EwgKi2~f9mHHqhjt4+IW=0Mc2FE8*CM$zu$0U&P zj2r?Cj%%PyK?cVQ5GKeeUqMVJkgl1NCkk*gaj-Ht?uD=f7#tr$Sb_|WEmJ@SDb<5i zu7I#XO3y%<0t}8HpiDsq$Ej06MuCjq0b((8f^^-1GFcfM8>WE_QvvyZ36#mt;CKSU z1ZjH%X7Yg)OqdQ*%LfXeEz>8~3ve?FaWFGD-T(^=aWFAB{(&<285|eP04Wv(DL4XU zGBG&5fHL_R9D8Ph6bOLavjNIvWpKO#W%4sP{s1%iK?>%~0;%N(dGi32$zRXl_yo)r z2f3|dHb{jy$Zc!DOfit?1t^oB!SM^2DFQNW#vG7Z5s+znpiF)S#|L1hG)P;^T#y23 zkYOvpOnH##87PyV!STb~i30W9%rYS1Df2*zWkJjxV5T_(KZD~P2veD#!LeaJNP#Ly z!4fEwlfm%>l*!NF_y)veRfVLso&^)32g|B*F);8jIBq}@;AL>U0utb71kL|}lt5NP zGBP+mSO~I*kpYy}7lD`@Am#%Q6EtnOU@=Hc04xS#f>wMlSOOB01&e{13Lxf!r4u0= z1|EPI%no3&WgsyJ2FC{=26F)T{@Vr1LE0NY@((~v(D51zR)7?;GB{oUGuRm%AAlKb z42~04f~43P92dYC7hsGBUVKqoiJ;)$13zSE} z43Hu)gNMO!!WxhwkQy+9m%;G@m;q7)X7DjMPFM?517`3uI9>oVKx)7Y0S3nj>!4~b zz!(p}4AA|*6V^lJ7JwN{42~DT43K^>gPFl`!UmWcFoT7`@dB6uQUhiPGB{4y2vY-Q z2r)Qb05d>pzzksq#|fKYYQPK;hI+>fU=~Obm?6sGIAJqP5tt#y;CKPd0I2~p#2FkX zY=Nl(Gb9)sFMt^!H6RAd|NsC0GdNDz3R1?(;J5(HkYsSY0A_%cff@Bu42~1FffRuP z8O)GoaJ&F!fE0llG7OFrw!_qb8L|wH7r+dV8ZbkS!EwS4m>Mucp26_~m;q7)W+*T? zPS^=k17;}JGdNxVvp|Z#3?&A~3AAO;UJLm&eK$Xtl*g#9qB zV1_z_;{`B-g~9Owh`|b)Rse<10gyU22FC?p21p&40rLBS>9&n3;`I{_f|P(V9+)A- z;P?T|P-SqOa0nzN#^AUC%mC#VFhh*N@dKCvO7DlEY8HSQj0}z&zzk3@gBfBBjt{^L zMh3?ZUFEJ2Dq`Ui&P){GhRQ8K zVsAiVUqE6%Kw^IYu?0aJAVHx9ns)(Bd$2G#&Nw^$cC(7S+KICuXMqwsn8D59_y)}2 zVQ~BbX7DmNZa6nxp+!ZHY07!9gX#~Qhw8ZkVn9ojHz0N;C|*FVw*0*Ow6w(1640Wy zFX@n_svz-i>5#P}Aay^}AuBXMF00DPOj^DT!B@$EutDmQ^K)|(^HPdZ6O&8wix?Or z9Wo%3TOf_@P&OljW5dPiT&*fzYI8Cm6Tcwog&B~kE|58Eplpykwr6awYE@BiVVZJl z`<#<1YZ>cZ)b~_Qu2F+uqLfG{TpqbiC2!{c*eIggi2JMt6gt9>!5K7lVya)196_gE{ z?5c;dK@r>nWrHSOJE3gQ1Zf|X4Vvnl3}u5RHD|1?hZqW)T$HV41us4TZ3CGLRRr3D zu?Wfr<+9~awkZPx!x|_Xw5?zxlnq*%zilmOAq!|{4+FyxaG4?)?W3G@?Cwj`*% za}LS@m77Y>mYN7q6`d44DnD7XwE-l9c1#b&#o9&<>2%P_`9==t=YsNH~ygVN`1C>ykB;NC_^LI*|TBPbiRqTv~o4f4k;C>s>Y?;z}Y z22g_g1mQ4%LiQV!4N7~zHbT-NC=vgMvO&3rWfMdkw6KE{$_Aw+J}4U$ni9}-3(7pg zP;pS^k=O)DhoB52x2Yb&0i_KUs01j~w4iKIv>QO#psZ~MWrK2z4U`Q^lTJ`JC^>j+ zf}|@@4EaOZpyDD7$_B+&ER@X)O6Vz24yY{1hO$9fwFt@vWz|Y383=CWr5OzHSCj$eQ1BAoC#lXPju@&MWMg|71Kq#Aufq^Ru%4TL@ z;7W$FSr{0&a-eKh1_rKDD4UIefvavSBmmeM7`PZ5|Gb$bz`YjSnFpmYP}%||a_Q~= zFR5rV)-Txx@i{21AlFB$p>-1|D%V5Vpp3E^$_8bE?NBx-CU-;GpitirWrNc9;cbvm z1KE87$_8bov)jNSRnGv*m6stL22d2=gt9?7^gfggiovH)HYoSMhO$Aq?<15A%B|m_ zY*6y}3uS|n1oL*VW#Ej+xgC*q1-3(XU4qi3I8*|Zv*e&`P)Mml*`VyL17(9UjWLuB zO1oCuA@K`Ji%w8BD5SlhY)~!^g0ewHLll&)3d$Tw+ad7|O6=KCHYhihK-r+&TDu(* zr=TLB9m)n3B@?%U(li4DD7Vht4p~(V%EhaoY*5Z^{{UH$0;3%m9M627B)~0rXa!?E zXvBx%&Zgrf@MD4sfXzKbO%C%uFGoq2GZnr93ldvK{7kQG83Od+y{wgG^iU6J(k{)HE0k(sScyJxHqUImE9p8YI>8YmxxB$f-{d z85qq3KJV`USnlv;h#ZXO0Ly&=%e}b-k%Q3?xfQ=bX2w9}z%(fRK_u^hC2w7XXoS%a zxe0$j8huVfl3EXMoY5g!Tt4<1`&21EWDQ8~#rc;I0>p zzYQ@CMmI1xJ^{%HvCM}^z-W*?Ga4ofa0}g>0g-{xAel2D8A0<~5J$u41_sB5#>oQQ zA`P!03%_Bs0>c7E2FERp^^*m-1*LC8mRiAR0S3nxAYFn|haeI#8nmvy3FLx4@P)Jt z3@{qxf(u}opj!|vFd8J&(hSnG{T)OGMuTK_)Ptqs??P6C!Dx`w8?el?XAn&=8YHuz z1!P>uQiu$U2FY9j%P78r$iQfjOh+q7PslQe42%Yky6pi=Ma_Xo!Dvt(_yCr9(*==% z(IA;6Z6M=jL_@M6j0VZv0L%14WneVO2|eu~JvPwe6QFcG14!xsNJ_8`stHC5FgShz zOH@E5U~~h6RrGo>4(C+{dk226wU zKS=5XSnA#y$ofba4U+i-mQlRU$ONh+VKhi)Ll4NfYY7nJU^GbP30S7>8E73S0|Sf( z$;{{l>0!MNkzu`F4`G0$&VZ%1Ko1y!(V&oP=$kCSE$TWIViAl6^;KCI9JhcaclSah zVKh|o1xQll=?h5s!Dt1B1&R!gbNVL>aMufE-G)fQXpn^$KvE(u(6SIlL$tL_0QtZ! z5n>;V2I<-XmU*=XG-<-X0HZ-NZ@@B(;vt0)j0VZnFPI22?!IzuqFEn^z zG)Sgn5=hTds6{XuB(n!B(*?B%MuTKNfMwQAgxCY48S0rB9G6T68FvtB5sU_Dx&fAH zg<1roK{7p4KzfRy7Qtwc%mJ{>))nIB-81#u9^EdcF$1+^_fQfsDz zjC*|pvTznggJd3nWfY)3g3%zEDKkKNEMg$X8o+3f%n7i}_NS0k0;54Pe?T&V0@oli z^)N;QgX4yoAk&T?g2=#Vke(-C8K3tM85qsL!QeP!7D!J!wBZAzK{98+GPcls3!_0Y z4YNUdgu!{Ro`C_vfE?1Z1uXUSE~H$A(I6+h0Lh33K=UAshGw%lb3hsNIy4W$XsF}` zkfhKfXafO8gUoE1JGowfTPOpXmtZtVYUf-~`MMU8l^9?&6NBR$kc7}KXsp9%kd_7W zK;F6mjV2fklDPtwnY{$rb1Wn*V}SI`SqU;N z4Sab$0|Sf($y@--h%SM62u6csT2_Jduup>ISQrgz3GD#Mh-yG{ER2R`$~R!inqH{? zYoJ*dD!X7c$j~#;0u)Ar47~!DnXm@3^dClpWIEP>^wdL7euB{;nLS{cgE5exhS4CI z4{Jc}Um^Vy5ZA+KkiV9!1sQkrJ;cW_8YFWAERzrIZNg}fOwT%yo?vJugV7+F17Ml2 z&_o2I8JHOA9lwC3)AK2EcGQ3Qrf|2kfsfrLBeKn;Rd%{vag%!)S=9^;f`>xho-(Fd8D+u@mHnH~o;@0i!_~XAf9r zJ2ZpBXpqbYuuL^HgTiQ#%#vLo(=wnL6iU~FuV=XdmRbePp)eYxsb@FH?dzbM>|r#- zc?UppLQ~@)PKVJTU0*;lf^63y!2zQ|-KIStH!S}E@zL@RjP>B7@xVu>fu)ijLiE6B zkj#X=AWdJPqYp3|By$8LBfL7evn4RgAhYuG{n#oAUV-3ry;=sqoHZw4_NX&baV(tgHD|S zb$vD*0JWdK&4Or!(NL{Vz>;cEb73@8a>hZBPEM$~Fd8a(1}wP>YAlR~N;Vt<>8x)% z1+f%HLuI#sWvdoIBw;jE@&#D(=uC(tjD||iISgvKeL4w|gwYKQju*fZels8u45jNC z8W^@*m6qpJoMe%pI^~7qmeEqajIr!YNRonm|(j1&jv8{sWMVs17vtVKmglDQ7@#xd4rQ z7!8#?0hZ*3wwGWuRPqm4vJ9Lw>KPay43HcHI4_D)RCA_=3Rl4n4Yf@`4(2S$VZ-*6t3Vdly~np7}afWdJKSR&*kBw%1PXmAiL zaS%HA1fxM`nO*=HI)Tvw430a%5~o3l zgn@wpMhh@Fz5z@8n+P!rMhh@FF1Q4W$lxWA+7m{DBJv7IMszMTOkp%AWa?QM96K(9 z!tF9NP+>GkmW9D_4_NXJG*DqQRPqB@QU+=+jD|`sxdJlw{~}0qz-XxC4X|Y2ENJ_q z4{APCw&yA+P(Dq9Xob-N42}oD6331}{0^f*1BGCTIB1l>XaNSt71uyUX+fg|MuU#z z087+wSqd==MuV2LHQE(gC4Cku~KeDBPwz1GkSDU^FOXm>3*SfF+m9L5^{Q(I80< z2FE{O$@cRQoiG|C$;9Be;TFi)G^kD(4Jv;?rOp$utRYkuMuW71203nnw8lYQ1f!vn zXTXx~P)lJnRI=d?Nayc!5EsE{sN|MAlLhLzMJGXJVKhhS`=Ox=qd}7O91M;f4?tS=pju%xNS1@a zaSvGXC^U>=5FL3F}skR)iVr2Z+$+={U>C^Q&gG*t2gSaJ{o`cNY3JnGr4U*p`!~D|r!W6127% zBv~(d2^#h=8YKn}@*(JTy(Z@?0r*B}xw8k9ZX zfC}Pg&;v_gv;afB;}x*Xn==qCFj|4ZvEwaB%k`I#6ER@41B2rpu!JMjEEwIu;P?R~ zA;fVQVib%9b;y>y169q_mq27-GG(py)aPmN_B- zIcft&gDQ_NAQ{n`XONl`MuUP4l>a_}!j$bML=sFR=RdG)?RkhSj0S0CVQ`%A5u{c6 z2t*P_gCv<49FKq{U6(>6VKh|o2Uv2)Y=|U`hDxrf{{%9(W*S5mMnh#EfFy;4KSN>w zMuWx>rhJ|(z%B7{79?n3Gy`L!62k)>76!)?VEO<5Ad|i@nt`!#Q8`%t4_Kb<6JtI2 zTza-o5Q7{Tn-&#<6>Rtdvfzm&!~htLkbeS}7m|;n$F`fzb^t z44{rQ3xneeu>4j_hy!6XLVnJ7konB@4sQFO30kjgQK?Rgm!SeOq7LdceV042(gAxN+!GfP4 z3xfVZ5;=?p$p?YuuYl#7-$AC%VKhR%;}=N(-zSg~1V*{*mar2C4`c z7+^HQf&+g*7JNJdk%!R;`7dC3)u)im1EUcZtoRGkA9Wof52F$K@B9VL|43wduSpF(Bh+#Cs0YAX<>!BeGqY>t>`41X@mJoslF^onic<>+O zfF2?JKp2b$nGccwQx8@U3Jn1m zjZmRv{31S$H@Zklp{HrIB zGyl&^C^CRV0gOiY@Bmo;0W=C=bPL1> zEDVlc!1BAGK7`Q_3qa5P;D$1Q_aBAPW9~6~sJ+bi-jZ!h#KbAPZ()hseWd zgauE)@(a&GIenvk?ztAK|2*c(e&P0#}k_He1U^K#l3t;(& zcOVXc(I5*TA=okrq<_~RNFs*OEd~q>5C`l4%XdTd!|3`J0|rKjf;V6Ve)}K>z-W*> z#DWErK^9!ofH(j~Bjm4u<-3$2`e8K40TBHiQ$YF~p$!%&UC)3>WP890zC3}XVi=7` z10TTh=b)(^MkC^Q$yAU9hR{?FqY?5q!1BvaLs~=-x}Jey2Izo(a2n{D1~R}N+9HC{ zGXfY`Ahp#2u)G(vMFgWk@{ktI7qEO4v_%A?5%MdhgUr7PZLol8Q2IkCxC2&T0j=+0 zG(vvD43GhHphYo^2FXJlcmyo}7Ft`vXoUO^u)GDdt^v{DT`&j*Yi5Eh(1+FqFd7s> zkTmiDEZ+ll0E|Y+PniYMpA0QWU^GJh1X#WnTF}*gV5nyTt$zn8fFzPXUs4~-%ijga30mN$R~F^mSuGl2EK0L!yNLzuN5$^f|# zV!@nwAPb_QK?tJ}@)yAJY0waW(I9z<1ugSI`d`n2gaC|2bj@~vvsbzUo8tR$LblN3VEG=X17I{l{s&n8E+`3s${#R;0qg*f4_Fu+*DM2B zzymb^Mk9Ri04#qN+WUsl2>B_?LHZq`hqA(GQ0E#tJOGxD`Om;q56b=^28aeJfb_oq zfEBbr9RQ;dz3&YxKo%T;Isit4aG(vvC+9?9uf(IiZ4JH`fz~FcVBq70f2of|f8c{%XtOL1> z2V!A81C+r4QUECd_ka~VgAOjiXhZ?^0WALsI=BR*5e3ka^&kuWJ%Bg>MkC_x23UR? zIBx107$6J=284p14PXNLS z=H7*_;D^y5$^Q(FPr#BZp_amEkfr|_9A|8sBET)S=>o(!7|p|QKGUpL6=>0>;{=<^9FLA z0)%#4021UvsAF*40}|ndrE&=Eco!rHIzQ$Eh#?{aU0$dGr9rg~gX6?KQ^W+gL8_MQ znIgcg+ymVk((`G$FR!X}+6}Pime&vu!DxmeJ{E-o3=EqD9LgG)1sFCsFflmx>;)O{ z;sHb{jK-?;09fh$%MhjaFHb+mt12z<1th9u{sAIt{$ct;K2<4!75k%Dn(0`T#8Y3>r4iK24YBSCtZ&auB4}_zgs@@f(n+n4N|LgX0OXOzlI642&)i zuxemn5NK%NU=U$sU}4a4U~v2cR+)JPqB8T!^tt@1QuP}SO%dRhUj$9^FdCLZKvM$@ z3FT`9RuiYKCtE&AZ5G;S0MI7Xvc=5 zQv|r#;cDi9xV&pmL)1WM$1Na1M1Hvd;_?a~f~XNb1nw_5z5t0L3~D(BN_=~uE``xh zH=*XK9U!H=-ycBCgV2r_K!VUb#l!$Qr{oPtg!lVps4_6^*m8V|Ks`599Vm~2jxhmc zQAViqAhhF-<5L7+${@yF0g3Q_`Uo)&LOZ?&3Gz6AZYu;wDmcrZm?FR}H3b?mFq#2$ za1{f?p#}qnddEFrB_E(ZfYC@w96x|nOgjy+4Mu}hIDiZhU~pV=5)`#(7DH6PXodm~ z9tQ@7N8k|w(7L!AU}daRA%WGBxDdo*-z{S1e50EG#`A#_pN<@Eb zAx%&S?YIdf2=d4Y5QBHdK5%!50YW=I0||m;|9}|07e7Eory;cCl=FxL2uY_KKq9end;$`M+6xKqhKrye__!A0NeJyY10;x|Yzs()SI7Wj9E5hf1QG;= z#S0Ka^aHfEhS58+c>`G)9OqmDwQ1^YpoIjC-t$^M5OkIqSoZUMNIHbkd;Zr4$1*s! zTn1?k1Q!Af3^4lN|Nr5C860u-Q$MSnvl;9)e>ojun+I-4NPw+6_?M1!|##7`)+7%@Er06i5)H_YR1`n+YAi zfY6TrK!PCI2{%CrNgjG@A%u3^1QG-l>qqKAEMCcpkWpYW@GZEG&p?8pBmI7W7`!$U zA?@kNNg&#B#w}2;X9UkHgI2VGxV)86H4`9e&VvL&);|C-cs(XUmR_Y!0@02Qw;?8g zmOFr&n^Qo1UM8p!@(?4ofdoMpFr5G~c=e%1_&|(!0TN_q1lO+u432+5T;5lfkYWWw zJI=iW3O~?3J`jWV3{)0EJ6;3{GS-83mxEZmVs9XU51}1f??N@t05Nz~pdsW4389@J zK~OvK42Z$Y+7IcOL1@RfAVHAR8}5Mu|0lGXgV2r(L4qLJE%!jxC-2-2h-L`wcoi%S z+Lr`k@YX$AR2D)zegp}EWLqA9oW%$7 z5~#bO4e`@5kSHk5c7PbXd!U*jwBt>XAV~8Y5Q8^!B4jkHZ4!uf?0X1u788Tx0uX~Y z9x7V{kv#+w1etsV#NZ8u$`(yx04?`+{00&QNq0N~xl6?Yl7k?$<0_CKNOlj1!OIJc zCJ60#7bFOB)&~%SS5*lzRu66TPksz?77K&p5)eb^*L_GgjQ;_IW?*4pcHIAXx}%h; zh|CSJ5?AP^kSHh(T6XjK@$^C|RbiQ)CsPEtg>sNX7CeU^D6ufK>3ly9+V? zCzy783K9h6rY|4{uRC;SNf4NJocR>wL=FbW6(9z$KXlDZB$#$Q3=-rMU|?uyX#_2X zW?*1pWN^F#5*72i2(cy-N`v<C?m zfK+O(=zyr)-7(!?R#mb71xS?F?;S)m0!%x$yaAQTkZv^SR0|N7x5^5#thfVAJMI7p zLfdkn{UabQ?=0v%`wcMd_y#11P}A}jkU*LSqP-X#7l1^$8bIgkfMOc7xE&BKd=D06YG7b!U}S<-Vjb_N2ym-1LURbuis}31RAuY; zfJDVCz-gU^NhA))_}x>*)VgA{%P$#Ax`FfxEDzy=1#o{v)m zxYg%C$MAM6nLbrsRo?mlNK~v9ItDiZN`u{_z~J~0Bnv*^+JV9G3y7os6}p-3|J3R4 z4Z0PWU6XvdjfKxQ{EIIaOPSeHQ3+yM{=WYR?# z;{k}lDt-gxM8_{64oJH7D@ZqJsV;~i$_!3r3=HB>8Wb=b430ZNl8m5r*e5_N@djuN zO@Y!N4{(4o_#2Qk$lO05hWHz3=zoLKaCHm5L9N{YV(=c4gLEb!wBsd^AgF$O0%B;^ z=tIW68EdBRRa8}~pYa`(fjrkj#=Ic3<6e*;$mTO32Ct?tq_GX59p8Wi*%=u?Nlt*l zvEc{Egq+o&#v20zgmzp65(M=TwtyHSl}{msPAilKxt+oB#t(4&l!L+X1xS_j+v^Zj zKcO@@_kcnT)QIc(3G$@_gX0_!L%QcNL~Q|dwHrh&D93|#ZGe=3Y`*|v@cx0$$w6qx zHy}Z15dvzTwfq8k;0RO^gm#<{669n-lnpyT0=(z$f!ZSs3=rD!CP)y})_MbC@FskM z6zUM#vG+IB$qPUX-gloME8`)w<3W(15lHhD5Q{e#Dh;6>zk&oovK@b>2ykl(8$jYs z*kF2}vZ|u)9*`*SDji77KxoH%AVE-Ee*iJmyTl=Sx~AVzR#gyK@)wlb_@M{<@IRZ* zqoOJ=a04v5JrUBd*`7GvT18d9zUSW*0d6_wkDvt3z`zTo84MX15?B}*SQQu?m^c_5 zH-l6$HS#bx9ssdqox$B)1_oa!4N~vG!k_?}aA9I_dI%9)xNWKc zw>p+KF6f%&2VnIe0^~x_V8R8Ea*c=?jCSC2_TbH329WuT4WQHfKoh?p|5GF z<_ANks^XwD({x5HRSAI|U`a0Mh34WG(>H3U$_u;!iE1F4ztbORs4567=$Ie>uuk#s=V-$E|9-aTDjBDX{yQy+yF};nzhq4wN&K= zdb&Z%5pCG%5g^e6U{OSib$UBU^b1H-LlN4P*SR%)o0h7Az=|G_35Zt1^k*Qocfg{E zHo|mKZB==J3B91OT5=Uqx~;o9-9=kfLEs2jbp9EL=*lzGOSDxL>VJSlWf83hNGk%G zFW^N(A1v>#0kK%q|9}#o;{gzd6Di$Z1_^+I@&SkeYhQc;aYParSeQUb37q~wr*Im8 zHgZ@_>7OdV4Qpzw0C6x?f>Y87kemjh(J|deM^!=K4@guK(Z-m*R!3D)V8aAZ@rYo2Dp}8%I8}ffeYR^INRXAeff3Zz+Y91=ZeutDVrW9z5DW~S zYo}Z3swxUJOaevBr)Wkd&~c(nG1IekRpkY?fJIfI8-NU-O`io4eE}9lv=*kH1c}a> z3^D=HI+*@LS5;p40$3EK9WXslS5-!!WeO;EuUv)XgjZLmhv}&*2D%;F6$KVdohrbs7TpP{%%eM}Ki5;06Sx8rRS$yJ(m|i5 zi|VV&3wKNdSyuHNVi}m8zDrM4R(KCsBKZPD0!%|BK7b`2^g|@TG+3g3$#jr6y`ggv zFdCE{9RxxBa?oxq5Kn-C0aVo704Zk`e+Fs+IDP?fKsEn=FoTi7v1bM--tL=220I|M z;{mW>uLJ3NPw(5W+gchHGI0HsEu`)RR0LchLMp76U4n{(Erh@x0Yi3Us;1*i> z1fmN@gL*9wKr+G(*B~-58axFyWzJLqZs9v;AW|?I6l#tqz*0x=fWnG_0Y-zZ`U93) zbRBv!#UkjcYVgSr8|H#cTnSxZ38O(ZgX0sh)Ou(&38O(ZpW}>qAYCo9AvGzC26d$z z&w!*vUPD*w!Dt4sL+TsmPZi)6sagVYKa2)fU5;D8a^H?YJOHC1&FB{(Ini#Y!(lWi zrGV2v)H6UCAW2X!uw@}gYco4TL*1s!)TCkAj_^Ss-G&rEq?wzWMmvhGcY7D zFf=$YICd-sNl%7K!)Unl9+0%~{nrrPFd7_*AHY%?4X8BgQX6DrFNeNo5%p8!BStqQf^QKVKi83#j>da z-1WlE&^ZVg4VJqDk`vw!-7p5D1sD<-949OXxuXHPl?z6LrH+85N}$07qrnFL07*%4 zK&4=`AVYluBPdWn;~;BROcmgklDG-U`!IT8!#75T4sHepP0*b2K5;kK%FWD2FDX%6+58`VrM;+0c!h$)+T`*@&~LWjSFH3j6V7N z+vN6MY5qBh42~OCfx^J(9K@+G8tl<0V5y~tAyP0J96U2tgLHlQ3yCHOT@P}00)yij zken3zQiwJfEeL8Kf;yR?Ba_#FG6tgLf+Tg2=N%Xvw}2)8heM2p(NM`3AW11sXlV$e zA!dV4rcq#UoU;}bZhLhghQjER%fIn9%;Z*`kO;ct2c+Wm2S_;qqrrjF2?>;zbs!bb zp<7I0v>+%iLA|g8tb!T3a}-R2Mpq#&;Q$w7Z@@|>LAQ{?XhDV#@MN-JJ;<6Vl8{~| zj22}00#|VbtfCM)p#`HSHh@%cfex{D?AQP@qe_y2349+shym(uAuHJfQX>6fDkLYu z=#vY;sgZ>t5p)j=NQu<`K8O++jT~r8HiGQYh0cY*=#$I8F*bB^x6XOU;8=eHq=Yr* z4yc#z_yWWMEp=sZ?AZirftefwi_ZaZnB_nnwgVuhVAdT-o!1Pd85tbEfF+!cK_sG~ zG-yBUW>B4~Yy>GZA++Nikf2}y=wxirFn0MJNW%bBscr#Psv5^20{&1MRH=d_#CLQ< zE`>h^%`X@pA8)eUk0Ul6+k^82L{It+d+mF zLfueP4`uLy9P<<;Cj_;J8%l#^X6yjz(uLY%0j0qaat0(N-U_v60+f~mpE&Qp;MlMe zq*ENKLkUWQb#B=?RiK_*d=d1d+_g{|uJr|2>r_rgCeShXhoLklsN$Nl3uLObBm)!Z z9Qj}<&B*||Y6m1Gs0E!lh0zBX99wpSBK9?Ott*tSXE?y%xC11^d-5bCAwy`#Hy}a5 z_u-Ht0Y*IEO3g~-6@1q_ZKKr(`HhanO$dIN*wl6@evzP^K$%rJTagX0abM9eaX1dN`*;MlVt zq~kucc?+W_FgP9nODw8~E;(5Qr6({regVteUjZ=-Mo(aHTyX$o)&!VJ7|q~#2Q0B0 zsslz(U~rsp5TxTV)V(lz0)yibu!Qw8M##<#YpBB~FgX4I%k)Y@ax09Uz~H#%5Xh`z zs056jz~J}*EV1YYB%)yS1O~?`he0~_LwCt9e+%hiPGE360hZ7P?T`nRztbMUW&VI= zPC<9azk?nXJ%Pb-!x2z|^MH0_U^HU`GlSz3u+$IeNDz!>oWRWBIO8ZtSHgKnqZCGi zrOtq*cQA%j*B+HuZt zkoKFK`4aTv|u!QglWB*W{{2a$o$jvc2QKfZ4KNKlne7ZL_wdb**fs+91O(^CbwRh~o7HUrbs{p?jG1#W;PYWG8uQ|91{7W$X`tMFY-(svHJ}Vkiw>DJaAMjtfQx21X4Q20;eLFJQGE z&{zvQ!^p%jeV(1FB>#%DAfp?^AtTlkrsvtI$_m^83u>>12%4^*KHE-J#%97fkkX^j zfu>7P8nm+?e1!%BsDH)4puqsL?+93}?@EXX;VZ!=2>bwxe!B-6_h4Z7c5k}8y{e4B zn)4uYAH+jK=|TK-Z+lglj0a#*x2KS74WmK3(gdU!Kmoz6%F)2c!N5?+;5g+1$b{{BXny%@fDwFvHB&z!15yWpWdV>N31J5j1rny{fa;hB7p!Jl3j0}!5E`f}& zfO-Q)gLXtP2r)1@aWHTxg1oT+6liBaYE^Tg-pIW^eVv1a|)6?g> zsY(i80ZFI^i9wXdg6Qepo~ly(9oIpg`OycNBK|RbqLZqez#fpG3L-B~-|wU6NT_l&PY@Ik0ENX3 zu;Lu>?6~V8IvAH1^`r z^pno2vh^!&fr^vRIgt2-(2gfSf>1Z!0kL@lpb8+g;~$V9RKbMXpq{ihQ~`_zbumDF zrVSujsLCTCwwN1KC71?X5Cc}K#NhY@Bn?&i1H@KU)PLegGD{2`&4;^mKkFRVm>qcR`UnWi})|gX!sOJyfOAK;mMG&?Kz$7~*x% zWl|sqzW~WXUGWFR7CU+#qQdJcgode{a}N=d8$fIou6~GJV0!vHTUAMcCtwMWbC60n z``q;XuBtKuGwy>tqWXw|33O$T>Z9qeT~%cT&VWU^?n6Ym?oXF+Q zM!4e%$Qj!nKuSa~J^iA)s-*B9utdsXPSJiEJch)I@QUZ46mSD73Z|#`yQoSD+yP5GhL)O-b*FcDs><3- zcmY!WvJaB-Vf2QhN!o93?Gmz;W2@U4(QL%PaArC)gX0mfVhzwt9RmY{#`K4ts&WE9 zK!PgFnvjx>S#vtS7pO{n2{M;e9FpW%#iv_)smcmG0E=FL_WCdAOwaIAl@*@y3Z&NT z0puzUR}ekj&P!Fw<^)JW#TlAzVKlgumU3ue;9!za~YK(@rps2M!0m%dJPfXwMttun11uXgiT916Y zI{me`s!YZUu;@PMbj%4TJpoh~OSv-5}gJ7X`Dp1{D) z!XxD%WXr)Mw-B!O0$6SN2gs;J`G@KGKA=g1_n`2whgRnHPp8lJQI!?g0TwNRR@)`l zr=Rdqm92jR7OjT1Ghj5Rm2gLz@ziEMmU)IJngux&4oqcca9r>KWH2wZ`2nLh9B|XW zBh7HiSPrb%5UThJNU?AhbZ7@gGlJGXbbJIgg6FcY7S^>1|&hZFHPLv61D2xWR-xwJje}E-F-GZ13qd{YnpmX}Z zfrj|%mq8?9Gy@}OjHTZ30a*6#JBTcd1`Vx&7XN$)Y1Nwpk%ZBpQ?3LU98Z8HwWdQP zVKit&RDr?q4_Gn^+L?jTphg#HwB!fK*p2TPnOs2e4q||4kSu78F-TVEy)LA~@E1xa z^D#Ki_z7xmq^^XNHxSzK%FpRb{8ba{|J1O8=Nv&JGJk7W!F3~OW{9B{!Uo;X#|&kI zvN2mNEBO2}P+`mkWrJM72fdgQRJRI4*`P`rc3~yxuw~e#tDr@UO0}#^+@QE+WN_^G zGgW|_QJKMU28h84x(yO!JR^hSi9geq1gPpsPxuQ`ahkz#4v4{VhJk?ra*yzqzti6a zs7lx00EvUv%DezG0vH^BfEf)8jvfC%dO@vqFavZzD~JKT()S36EeUcdXu=g_Eodn| zhz8BXf)*x$6oBS7zknG`42})|K?Z_Or~@-VNBx8$b+Bm{Fk9Q$Yf(AoD>B z?m!$}kinqaipx}|?+sK{WuDV8ZTizdRpWY%21wX~6zDXtg7YgVE)1b;P&k`4u!7Hz z0EM#+lnpZ7sezRV6jvaZ^DsDWXq+a%%?i3Wkr8yW*fardPSDUINChKdurWCP05jMb z9D62B6W~q+r5PWnLqQ&5U|=v}@I@71@I&LnHpM|TFa%Ct5vnRz{{v(qFH8^=Q<@Bp zb0&iQe?j2FDK|Nk%RP$A)Rs1h{!Y7juFx_5@w%2|7X% zB*>V~;J5_L$Y5|Z-2!3>20@L0b)8HY94~+*I6*gQgXEkU9AAJ07?~LyKTVs?6s{Uy ze`7kx3XlU_K}r}H7}OaYzkmc;LE#BH273l5V}jxrq=1pZaS2F(3nTzi!DPVTxC1N# z3MQDy8L$ZGW;U3}1F#5aWf4r|%ZzCPk=$IMi%?-gJu{~Xa65v6Woj6tV+1M?K(cEl zL23pV|MeYIKIoq7LRCmVjGMvn&dlkS5vr#3r|vL-*AT=rFfg?Kg|HJD7#RGgLD=aG z3=C{`kV!C5YV^7XF%}f9R?{GC(88G6F%UIb3=9nN{1EYC1_p-3DNyl+qN6W|U6 zwX4sH{}6{BMRbJOocSOKn@1o zrYy_g*sye(05=DypoCOGGeCSn(6#h1pMd227#w#johDGvEeT3LV3i;7~CK1!N)j*>}6tb{0LEZ$>Aigz&e#DW7YJh zajJ&(`HLV$&!W2!`ucGQee)fJe*X|sx;RXM6hFrwLHJ*oApE295SqyeLRV-(5}GY1 zr1XIuHB$*W`GukGGbHm~S^=iB7<4tk3v^H0Y@pz+fi4~cH8Y~2b10yC1s0m1N);BOpp*>@RZwjN z3t3R5>jDjZkVDL&F#yWS9ne?+Wu!HbeX{l7WtMAuAkhNK^RwlQ3s6G|mNr1;6D)mz=8s@$1au`kES-REPlu%yRR#uz52lboFEs|o zp3Tz)xa%1~`?*0ZaMOh!bc7D5Pyt0d8-wEkunZ_YfOb=ZIGmvD2Erx`jxWFhpp*lO zX%hy=KVSifUQkAMoUjFyC}9Ey430~{0x&Hq4E2sXz#=duW(;QQ>$2B+KZ|;P_x`JxHSms7zr1 zuUYv5l46BubZprM(ggCoK4_f;NGU{yfq|XDaRo@06JjI-11p2$9uxs(2FD8^0hnP= zKpfCA&MzPaIJA`+9Q|6hgN%ilqQ~Gk10=u=v!}Ya7POiIRID;FIPL+-fei$m?g!#9 z1~53@05kj<9G`5TJ~u^Gy1sn}sC~-F$jHRV%n0jHF98X1fFcT9vpDVm@u3yT4-gyN zR|bUyD0V>@#0GhupTTj~PLK;285~!E7~mcmBZK1s5C>FuJOVT985}==7$RIC*Mm#| zwR2l`O%vd5WZrI@s#?GZDjh(*0g%z))%(m0j#Ku6%mxjo?VWxuO;uXr07!rpl>eC- z93OxVyYE`JjRd z#P^eg$V2Ng2FKeFz7yCj3=A?>ka7Xkqz9RI|2{+>)WQYvkA_3!qd^{HWT=G1gXDg0%^hOf`;W885lmP1$lIKu?$ZLbz z_lyh-3!w+bf?8%E^_x{8@)l4J?1A2r-~ie%4N~wQdS)G{Cdjo(M?(_&-SV0Gy#185k^;A$(3I&}Dy& z3=DeEUF)Fw0;J)aDntRO5(8PdSpdQZ)s!H<$8!kZ4C-S8sDYs250HE$ln?4^f%w&S zkj??9A_VatK|2fKpo$CR!`aYY2&iiVQg98r`~Xx#g7_Q$K{^qjni1qc4g*NT22?wO zs6pWsC9%0m_zwzG$0y4g*wQB z-R~fLP>%*=z+$Kcpw1zPKNHFabr3=PtsD^ZKz%Zh{+#s?{RPl8utXIsUk~n?f)pev zLz-xyq7uaCdjjEu#<4&llpq7)gBmxWV*kcJ2p=?-2vV;K)emZ2f%u2iA+!0Q>Kepv zQHAh9%>WSJ53IhPfx!UkB0)xo0#hh|l^(=`VnzmVL1GsU>6d}Zeo(>Es|jgKf?7bJ ze6Pw0kq0$!Ko;!$1d#`|9YC?-qzvI}F))H#<{m1HOx&ROw*<*DGB9xJK@`|Q9q<-> z3JOm2j(@?$}RQ?W>4;nN8$^V1$L1Q-{zKV>;5gwNs67Q~ z>o_hsH=QqE)kO{z#y>#LVgR?Q{(#j$G&xQ3Og09F z>2c2$O~oDeTmUsAZZbHY1~HmIZeV0ooc=Rkwa(l^u%hp4N&S1l7upu+p zaM?{QNMj~R5<)xOhR{{e&D*ffPX=_j?9mEH+rb#hhYp=Fv_aJ`{0xzY_FwDa!($E5 z;jln6NZs$C2BBd?U=!6LZGdD=NL9-O9rc2ZcnMiRs&LpSmb@oK-7aX={_PJ$-UV9q z$AL$<>Jh_RGe1ETytjd9Tm&86diMuX^KB4^(0zIkS_!)HAT!xK^i)Lg13DnL|vV_n_p?paw{TNEOS;#{ayfBB* zP8JMIp!%Q35<;)vu2Z4v%v|61VwwOq(|>yg$GIRDvjdX@gX3B-a{{XagX3N>^9zRq z=n68>bWDQ+JV7w7l_B!kk?+K?7-l73d~dRT6s~;fx+<>m=|)6Q(4P_ z!SNNC_tNo9wypz%<4+Ke-61(>|1kpx2FK1<(*(HLIqD{x%a}MYIL>^v-LppZA7g#P z2WY`C0nFe6tswx3q%b&c0EsZYU}kVU0A>lOGdNxVv6vUMF)}zl05ct&_!%5OfSC=` zW-vH5egqB2J^aeb;5ZS?JT=Rj!Eqso$?}9dgo(j%Bbfc7%tM^P@nAieBe3+!3kJuF zAU5lQB0mXE(19M{;HeV$9L3=H5hTObaBN9kFEfK<(~Q6W1o zzdR4A(B1x_S@jhYC#anSsux_gKWkGx%vkTY4m^7Su2NL6r28K~Nj zgr4CBVn=U<3^IY*ZxRO~ojeeG*Lp}%2x70;0bzsesoVr9xj^hkdm!z35PQ)oFuR@s zRKdCLgp?W}cIZ}cixDioZ9k-~1`-ch2VsL6iVE8xZ7>k~(t1ei1;mcq3t@w5(E8Po zMh1wTybIDG0I^SOhOj{m%zyjqAq@c#=i53)aHS7odu)eP;2^fxMhF|!{;Jx|2;SWQ zV!v7isl!3+OFJQK(70XH7DjMJ2eC`{F@p0hh|Rwi!Unb5R&HZt0(HH)KxJ+B21X`O z(*`7QWiKNWsNn`;H?M)PLB*lLZbl|hdknc}oeCQv5? z#7^7H$OKA<0-%l|-vLG@P|Fk~;kq8e2DP1LY-eNwHR3_+^o@*6ptclz;$FKUY*1O;u!)fg)S&>eI}R{1fttb~_K|fE zHYjdIcQ7)6`uZTY&n8ADP(L5U-nxg83Dk@Ou}kY$K{%lJm)gn51nQN5*m7GLnLxcx z5W9Xqgbhj;GV2(bKz%n5TW1?16R58ZV&7j6VT000_FhINP-g?g?p@8u1nRAV*y_6& znLurR5W8nHgk29xNC)>bGJ$3{L2UN*j7*?*I*1*$9l{1BDvgbdOrRbMh~2xJkqI;~ z1Y)mR1!02{+q0dFOrSMIAa?#1MkY`{AH+7=$H)X4I0CWHtbwpWi7<5=V?7h74G!Wg z-oVHNYJY>+Z}&pjpu}mrhLH(0$^>G++{MTQYJ-E=XEs6DphUat03#D<*aXCmU&qJ< z>dk`KsXHKSPy&9sk&y}1s|B&M_AoMm`s@mz_}5&;$OP(pfh5lDfUrTS{NffyCeUye zh;6u^kqOk>0I|=lg|I;>e*IQPCeY{wh%K>!kqOiT0kQe^LfD|3aAGwh6KFsH#4g&! z$OLNhgV@5G8JU7W`5(kFJ;2BW>JEXjNX~jjCQwHK#6GZ{kqOlI2C*k>gs?$b=i6>Z zCQ$DK#QwL6kqOiv2eGSnLfD`jb$v4<6KE15Ujj3h+bl<6}5^M|RPZ!|6w>^8RY9ON+Ls4oxHKK2M(Y3n|K0N;>|Ou=|Ia%CVXtRk zVEB6h!d7NvV91BEzyACGpKmTi+~xoO|Een?>?!~M|F1j=X4f-Z{{R1f;Z}&npdq06 ztzZd;TMP^gUoS$~?2HTyW<3x!`iu+=tqUOHcmMtW|91{Voa6uh{}(Pn#0~!c|3Be5 zgq{8W|Nq5DAZl9w|NqZ24a5z+ib4!hX!az)<=cq6Sp){<{fb%YfE_Kz*#u$iVP*DMZ|mk%3|VX^7rS z_5c3=zq=SB@%G>U{|jeA*r3%X zIQjqo|Et{)_VfS$|8JN8VS|b#;a&(Elu55a(*!7K-aQBruL8}uPlB*#GB7Z-oQALu zF)%Q=&WAYeDyZhz2aU4_psD!f5Jhhp7#L!parTpefng~$9e{$D{S-uv03!p#YiL4K zWMp9AhdM%&k%8enG|}lWGBEhff~YrOWMD9XrU?^91_teApafpez;NN;|NncT8m|2N z|GyGy(DQ%)|38BI^3%Wn|2r>34Ep}>|NkT15FdaN-LYN>Tj2ly|9;T8m;C?#zxfr2 zdRb6V--EFA|NsB5y8^;C|NsBLa6L3G9sd9Se->&%C@7mj6IcBI|NkG(g;j@|NsAg^DT(Q`#|}1DWt)5?EnA&a!{Xva!m#_D_;2j|9{O?hy{24 z|Nq|rEgL|Ue=*eept`dY8q%QJ$>{}SJ-D$3D)m=D6CEf)RX}|JDi4dGK?+(FGov43 z0ceQnXE(&5p!5IcLkp-$3=9l%lObxBF)%Q^fts_Afq}sWnhwq~FfcrWM!^+O&5;F7 zTsIjQ7`&l{!)?&2m?IE_9x^a6)ZK!xK?!pLG#`HeEq;OK<8PqCsvi2 z4h`L3|Nj4<4N2_v4D6u9H3^~!l%00pgRnXO|Nrj+^)aXfw4DJF7ybYL|9NQCfU?eN&El*zalg}fhwNkGa%;YfgBC> zBq;0VLOoDc|NsC0GY27tg7WdN#Sk{A+@5#}!S;iu@P_~Y|BFM5WKjNoy9iHq)#c_%|G1CSG^&!NZWl?|vA z6NXBF8X7C1DZh(>f#LH!h(VJX7#Jj=KAz3Mz|j2?;s{Xl>kHJEppwoBniWB-jAl3 zf8TV7B2ca9d=kN~fwp8oHRL>KQSA&W&!N!-sx41K6C380ssI1*Mydgpqeuf zTFFI%%74krkop+ZgAst{X;77_4mAi=r3yd`B2bm81x?8y_8(}K3The8gywq?I}#eA zpsF?&T3dqHdC)WjVt0dkLiG#`#h`XLv_Js$e3hVOKBzig1Z`Y^*w3K`fvV=uQy_^B z#Qq8OC5XKUT4{lr;q1_+H;DZWTDXBapa~NoAqrx**FzHmsQ$h&8zKQ>zl9d1Ahr!O zF@oy#C(tq+)Sh1tO&_2-{s}ZXKtir5lJX2Ca}lY$a&afcg$K&_oAf+e35B=KufyPlvVxK<$Qh zXaWTFFGNB2H-d&3K^z@u5P=#LY*33q4T^WrG9T0lnF7tCpvHv+R1K)Zx&_*tIt*$} zf%|m~44_7b^&3buff^nEp&1}S~#bn-763~7MzkHYa{ML#X()I1ZcGjYVFL2COQy% zHnb1}vFpD>$9zDoAWbM6)Cwwrmh&L?8)$V58nlaowsb+Qqo2^Zs0C#L8A+*U01dHhhbA0QbIK4JT%f+6F0_#e8dz`wRhJA544|d_ zx1olDnpqE_aS!Sr&Vo7))Z}`)6yi8glgk5|Zb1FUThQSg5c|n=X!{@37E?L|Ni4Mt z3=AisK?iD*k95Sk-E zqahEVQMLdS|CZ2h5va#H0UF1kHr*3w;@S)9m_iF5Q2TBdv<3sQA42tl+Ip_gt~h9j zr4MQ`sNH7(t^GjlzOztAfY_iDa#BI#KcF_C7_^~rg@J)#GSmVP+X~t|2ek)%p*aG? zE&=sk85kHq>1d87#vrFIWNjQ%$*qO9e0Cy3h@h=oERL> zfH*9vXU{q^INkxXTdq%cWN>@~W?v9Wc4Tn;17vtp}*A@pTF$Xxsu=oGsc^aR#^ zRmiNGsU~FRZ0;(E`RhU;lT}AUAQM>i)uxc?x_>>8De|)4kR?=}(;>?vlno#nou)%p zFFfUg$j@|z7;p#bfRtAdaTe%N1YhMK`u@8@s+a;rh{xV7gUo!3DuSon>ls9UL3}pT z4bnMU2A$;J9S4~^)YgVLY|3|tI=>5$$^W`w$i!CeLP!X;UWF_I*zgu2e?}EDjVC4m zq37La08i6}US$B!nr#L1>lqkUm_ug2imyT}Sm_9v^UHk(vG|xEq(t~=23Z7fRbP|9}Sk`qg zGJ&RXDg_`Dn`s>o2fT8Hgyecxh(iv!LL7VzN(;C#)`Mp@b=@Eqc)3C50N2|QDGnfaqz7A2#C*4vq21Yx(dG3clXB(I0YKQ_TaUn^$ZMhArK9f;gC392Q50CJRp2dMM#>6 zaf0}`@i!zyw2TEU1VSq0pmSv@Rq!r2n0M>Z$6#`VS5)Odwq1%EAP~1+FYiApF9Og$aZ~ zY*Mj>I|~yCT%sOMw!c^h{!53Uum|%E|0}Inp1_p+s4AYN2SN&9Ugx!c!)u2eX+WcpEd* z11Mg=$;?z>$jVe;SkKC2Q^d;DP{zt+&N6|77k4CCXXZBkj8Kp0sbw1^jkVPfd= zAaRJrtV}aJSea(jg9s0Z#c#f{fDJ^}R3gF3bjOF4Nx+wt$w!)%$-@tVxA?O%fiOs| zNf;{=2xo+`GR+Btn6*cimFY$VD-%y7E7O$}Rwj;A2n|vXS`+}o^)SW&(e(@iWHAG1 zL60hEoE_BH2Q3=|t#N>{VSLbH9S{aByZ}uaf>s297HWVnXgP}g^rx>?f7idf$G`-_ zT5}kgK=`>V0}}}6i7_yNFo=y0OUp7afiSWfiF*u8AdD=A&IZ}}@*YDy)BNL%a12v) z;|~K92*daw+T;TR69|LYazPAC$QUH{J)DsV8H2OBvvMtZU`I1=jdhyuc-osEGSe# zp$Q6A5C(-bsBZ#V6bM>Z2U?@2*2>6qTb+^Vi~xAM8puv4cI>$_Lx6h+8)(u9wAK(5 zcOV@s)7#&xicg>QUR9wUw2Zs|Hv5MA+)foUR?4dT!Lv)$mMDidS<%L+!O7Zr?5;suOM`UQ+kj~f`79yKt6)=x4# zs$gUiD_~^OD_~^0-N4B7uz``uxPX!AQ9UEm!+MCC?+uJhZyFexqWc+{3i}zEo-JZz zdcKH}Dd!#|Q|>)Rrmz!?OyMUOnV1V1nOF+JdO*>&pTY6mtr-H`OpDC7Z}_CTM4IW# K#OYytYM}tVfzx{c delta 821444 zcmbPua0cV*xic8K7!r$f85tND7++1~y2#F2UteEe$1w4hg%~f03C9cxEcJDD^$e4( z7-zB8)z{ZEY`)0o!_25Kc>}BXWK-5~2#bfaAh9GPKEEI}kHKj223BW?@MLZ_fywJx zB_k(tSgnU$59SCW;Pk&%Ukk&}g$jb-wFPSMF%IgJ_FCUbMCO*Z1nQdVSU zWK&{gVq#+BWMyVy2LoY7W^pD~Ru)zkMmAP9<;nZFd>ojW7+5)&xtL{`+4$KR85r1E znAzF5nK>C5*qNBv*x6W_**Mr38JW0cm>9W)`I#9Rxf!@Q7?@aDIT#$InPn$?a@zF-)GpZO5i2AtAxUF!>I*oG3Fp12+Q$HzNxR69*F`KO-wE zlQ83CHXa>aSteFSCPr39b_PZ#Mp?Gm}wvavQJ3&>>SLjY>b@TtjrwDjND92OdR~on!Id`91N`PEUetjAis04fedE?lWdBU z_4rb`HCfr%SXsHbxtY`^&*KZ4{E^RqmxY~`m5~t?rtG{-lXdv5Sh&2|wI*lsE3tBL zaxgGXp2}}7z`(@9$j;8d#K_3T#>B{_J^3cTEH4u)$S_`BZeDI~?#b)|Dx6F#OiXMX zOe{>3jRdUgS(um@895m_8911jSd>AIVF5XYjfa&K+I$HXKrFE6jF%Ou1lBqS6r#3UpX zp~@$srx(c~svl(#%_kxXLR@@&d~)oDd_smyG5V7~3W(Sl@PWh_*qAxkI2c%&m>4-Y zn3)AR*;#qHCHXlx7$q4v86_FC7#KMi7&(}k8JL+l7}%JZ*;qMQ%#1)+sYMeqhX2Vo+gFU~oKfZSy9<CW3$BBpu@Gx^b zcCBg>a8v*ZIj){8D51}KV$}?RtjUeaVpf^lzhm@*t*$T zx}A}6^Yn}Nm^CNAk=eoYty2H&^t{)pOHb4%kf3)w8?W79GUtX zH$PCg!pO9zZSppybxcQAO-@sGXZpNw@_J=`rrC=oGg=riEnGBN@xRn$JrzNwzh5Ui zsu+o$yw@z?$RwcY$XMvg;-J8;!0Nc`%H$3e8K$orCofZ}68ZDB9j1Vn1uVbs{bmzY zKSrK6Z@UDt6!{gH9ACVh+^5#S)VgD`fVw`@(jAi>)s5KJ?dTG4RGhr=t=QxVCajaE zsDESXJUdxHU2<~6bV;U_XD2(V%S^tbA<4Aq?Bwqnrc9^LPXDORD7@KG(}P3w#NtK) zM>Z5^b>5sj&oGARN7Lj#hL%i!zD_nb>Z#wbx=Fy19Yx*KJQQ$Rr1@&AA3LS6>PUz@uG z96wABERa6HLsR=A4*((nN-7&xy&;OPVCjeL(S|MoM-_1D0%@8(uR*|37#xH!O>=*5er5H5QFQg+R#*atN!>m_`M!-2 z)BLxag>BgxMR%TuM+{8G$@7!-?TnZfzMY(GH=Swm+sS|I%0!ldT*eEwgjWq5Li;)< zx7nKsy*M#ZAWMUZ!I8zCmr;Sq@yUtF=j=<^PM+@)aI~8|K|_zJ>jFGdloP+!3fdh2PJaB;iauj7;KUu&@ zROIlX1_4JAWbI2F8}ViV*oZo3e#YgK`<$hm zPF`t( zCSM6LVeFjD8EV3G`O#$iP(`K_+b1W5s)366u24m-kDyW<;zM3Dg*ZhQu(qui8U!3g z!T#ojB)uoUCtnRUkLo|$Am9iIXkIe~E=5JK^0^>UXr!4ba47OBR6=E*+-MeXgv)R# z@`BZW1&Km3CDi!$TPCN4O--G3r%Aw35N7WbMnxO2`V;G01swUH!j6oJ?qJcjGe~Z6 zWK{G}XoE?8Kiz=PpunMMqRB?YTMmR=AA#s%cH|@*EUM zB4Ei~lX)W~C8mOkNu&T21IsPGG}$gfmT~jsmLpc;R6-pH$GDmhIo5iK5308j-Bnqup%oNHLH5E7%CBZTeH?`Ia zIPyR}roaUiez&O=$wwTD$_m9$nZut^9Rx{s=Rl%J4iZu*SA@vCU)hQj!(56&U~3y! zwZgru28s?&g$k(D#&?Je018Y_u!bG)CeMnLkyzQ>h7t{qV9B%1lg~xUXzlz2uaUrE zB?^{22oi-ScufUAE(Cjs$=rlW)eUFhBa=I4NF*>1M}dwRl73PyZVyN5v~K z-P$_2E?$-Ce%Ius@rFzO5g~t__B9lUdqMkx5 zRQC1Oc6h}AvW5w)=XMveXq_UH0*9hBSmxF|q&iE13o7~+Bw7zI>+%()6*v@?z#4um zK~1X=(XOS4vQ8|xY*?}8$e`#6Hsu~jR1BJ<92pb?73!ffe>TG^4242) z{Tc|?FclQ-@GJ<5_EjB|cPH9p{yW!%DAgPp73~zJGAi1F^*ritL6pmm42pJO(TgBa zSn2D?sOYCK8KetjS1k;-0>oE%v0 zD@YWYg3Q4pk2@!CN>*mN(mDBhayHwK8Jz--y5Kg~ikYCCtZl}vz@aFvV4=tl_T73= zOA(%xs};o^85N_zGB}mtG-i#a>3t1dMZ8S&jEG13_R!8n^B}T{A z{Vf8HEZ`6Zn=wNutKN~pQRxpi0}ms&0;v0O{AdHZ632hn;MPD@effuBkmI83NUE3| zz@|+((GD{W)XD-`u>3Y$t1B-fcbo!?;|!rJ$79I4!Fn$rZ4-bd2bg0WUty?P4N5dj z0w52&vN$+uD>CsYaVxMmZn(sd<=As$@`CgXiH^yQ0*>(9$f)QA4$%MSCo5z~GOgb* z**ZfqVE#I!yy3{G7_5+{7ztK)_FyxjUF689=<4|QYrBAAh(a?+6)0YRg7m^$Fd&ik z&dEzMl$p+SO+J}n$T)NI?+i(iWj##-j?&=3hcZxcuDQ^JlhCdUJE-fz=-6{|^4;7L_C`?GWL5-p}BCnw)6 zkzr~$HTic*JKNifT>_38V1+Y~Tym-O9?D?F<~?Q7jEu7<-zb00G;7P`qZL+6`*u!d ztenKQ?PwRMW13hmI(c2CKhvt2lmAy5FzKi;!~q)Xn8U2Z=-A)5S-N2^ zGt=k&lXo@mV0!a)a!Jb}rv3w)En5AVnI5dz+}R<-%rs%$O+i78u&QGwA> z-CNp`F-wtKfyt34$MMVa%^N1nV>Cq?A%SG`jnf1)nHkKPHw1yoqyPmr$IqZ*3(}YZ zD_OC6a`Pl_{yDp5fX!C~6)Q`2Y`!~5jR=9Js)c z<#+*EDI>{B)K4xdQ8`5PhL1Ro$1uY$*>``SMc1#JvnAtg3xNv;19xFkj8V<^qAJK zn7nYR@MOX1A#5)$b%D|uDD^g6h9sW@Z`mhroo>q1@oDm#=`u`jK27GD;Zi^UPM3f# z1A`UVo8Tb*atA4pXMh6v53VNQhctj3vKAx>G7TK3n?T9vJhGK%VX8NPQosRFoL{@!CE(b|1gfwV zSR5anXUKBAeIF^9Zr+D_{DHp$iz9;>Q-hxZ%jDBD%_PyI^U$ZM0*)-8Se2Tk4vN){ zv!o`+&Jt2US9lMkklXRd%;^FOERGGACM(WTfT-8OP`eYP7L)+NYCDkBN=>$$jcN8N zka|{_*%wYA%$AzG9EB%i~1Thru04Zby zg~mFF!i}?}v?ur|uqZH?F?CdchJAl9WI3XT%^r|8kbfu3%mted3dH!im>%5^QwR>2 zA7_vqein=RDjDE;#i*)D==OL=e=_8C5A$i3NRzKx*# z#@==T$HUjU1QeLSWryPd1~(-}X$3|{#u89S%-;~k^knzs?;G5hu5X)cztKkQ)#Y|j zOEVX&oR>iXG&p%==H!VRh1t*jXa*TJdG$sErXw>a-`%Lr^!M^)woN@+ldrT3XfS{$ zURWGYfD&={m39G5P`U@@qRC9w4ExIzSR9+LOjg_~Kl%11F0Ji1n;=;mqyVgC{mmwX zmWePe%WqER->j*14@3Ditjf=Ul*^m2gIaeNKn{7c58Y``_F;3{oqZsu$+3Yn_Q5Pa z1@#5P@e&0V$HNfU3vS7`S#Y%-;d-z=Gp-_;3ii(gWRaOLBRj5w?3sLf3zynjnCYM( z02{IbDgv&Tz=kZiGPz)@2-6IZ)tYLrphkkr9k7n4P!WWVyH~+Fu7d615dirIY|`qh zlQ(V^VOk6pH{l0~gH5=O5kTj$2GDVE08O^s#=|~uf4hKV)Bedm+k9-!AUgnT;1OgI zu+@8zMdrgoVhh+MZag5DTn9zff2i*fzWV_cLHOG zaRaia;Hhg1#8a!c%kxcL(k{Tm$n7Yh$mFbZ`!=*qOhlh#Vkx7xs5tNfa(jfMR zos&Ix1VHnE&Y`vjP~{BEF^-q7vBENzfSia?d%cKKhk{fn3!>0-uq!h#)8Njl$&TiH}Fo%)z@U~U~MLWmC z*Ct;%sAX_uTPrwvKykvYsG-29pykNtR>aHT$fRiF_-;Q)EjJUWH!Gqb1qx}sL$;t8 z$~`0@1FhFisALJGE3v^!Hai6y$0IX0FFfSV1XB3wh`Pk{{jH#$Xc|Hh$b&~_PJVSn zcCz;&E~a<;HwPV!VP;z2FnPm?3Z^@ICTpG4Wxn{ob#m0nOi{EM07eCF$F*IPZ=AFO znJ;o`y7-HRX#$SA0+6Hv>g$4(9GN-!(5Z5!8I6-=Z;DS2I(>-k2B<}63NFzn%mIx^ zOino?4jO-~J!1u;cAg1hI(=j^?^zGF*@wFX9Dhs}JS#T2@T?xw`t_3+p7mgR1L{$j zfSUEw&qpw_vUh+gwvL9$0_Xgg7T%qlaIToC>E`6C=X9C=-sfVzU~j2{@`j z-N(zU0CnDi3+haV8z-N*RD%3t+_q<)AdhmKR}9E zCJSCMWcm-1tYF){4HVUr9dC&D?3^Io&qS+EbrB@O_~EAYB~gt*y$Iz z7+E)S-Ji?I4oW;AEe#LkHlKN5&jd;TvY_-Y@>q@O$hOHgk7b3x30)SF&^Zs+21T%?`p;DOV3`go zl=w`9>D~Ux<6`Cc*}>X!6EqB9rBwbAu$cpSLjm{55&w^Gi%C-cD|P zu@fZd@N$mW*2&=UJ4hDbL3=3#>m^z8q z>P)9@PIi3#0TiajZ*rNA%$Xj@#i-AA4-~f2pdK;PA4J$rR(l)B*0QZjz>yuCpZd0K zUi-F@38dNJgFD-_d7Yr{|ABwnOa~#erN_aurQFKgj;9Vb2sn2AZ=THju@vO0gI}d5 zPyJW~V)A`aXWIf^1~GZ!AKA&lpSFWUK?>AAKLs&C3fh-62{$cv-R0*=gJLnOb04YB$T zHYD}CJJb8clh=LMXIc+2WGl=Nw#mjnyg&vV{F*v>>uvn^iSDd2c;^8SB1OwSjC9rkuH)M0FsfB%A-DZM!9H_UxZtNu*x{o@Ss z)ulgRbN>EuXPdtaW)9=y>5W{B#!N4l!Oc1I*OzhDWT}5)g5aeu;BNQ{15k(8V6vm5 z1=EG4lXw3!V4M!oI2WW*A7p;ee|NSPP=6INTC*OqIN|0043O6jewCh@$5_C0WaiWd zjOt8>w@v3{WW2(3WYzThjEwGV+m?3W=1`>S-ZLf9M6FY+v%5>8CyWQB3KyRnO-lOzL15{ zfN>qj>@CZ|UUityx?PQx@c|1+6$cliJJYNM;DpMw1v00;3p}U3{Q?&wFC)|8Yt!#@ zGp0euoP-tF9ACWM9>~Mk%E)#Ylz73-d$y+_CTJ{3YPuF5qc>Y4Xyiu?l&;xUftcVy zpzTNb7&RF|wtnDebZ0s_Z}R_2N$_O;S4ax~4Nl=8&$F^kFwJt@y>0tp0md0jOb_mF zj}>NQV`O@Le|xG3i1F+G_FPdAqw&G^QZW#t_rdmBaS&tfgYB&njCPD-^RIRZIBFsm zkUY5GCE&Gf${X@g{C6Y7icoFO&5`3d@Y2o_VxYgg3^pC$m#;5v8elXe>$TA zs=6K-#@85jH$LbRaOA=gJOc)iJ?roZG zY{KYXzw}a*fTE})i=q$@AGc%wo(2$)Sy70Gm)j9k@+pe)@Nhdm+TSdoD9Xdl?YL_X zh~)@U!Nu(eDzy|v!8}fGM^Kpq<#BL3f{H~&Q66?~$IW+JLAu$v9oZCx94~JL@xU!O z7TxJTO&GP=uN-a`aJ+kSy0$5!DJV}TnKG(tfNMZe(CWHdpTTy5+{nc3c;Y9V$H?t? z_}cW1ri|uHM`licWy+`qqJ_*Dr-0}cW{fr<`k5JHAln7dlCy)LEWJJ0oN*ZsDBt{c zX7pg%Gk5ZOWyr#_Cv!nLdi!J-#x2~Oppi&L0muHv=>Y+Z%6uTMBbz`RC=B`=r#A*L zsx$RBPG1$k7!I<7Igrtw>B!9OE`f~enAi?(?GkW2G+FSh#B_lWMrF|zue$^sPcTA9 zdOmbxNm+e+DjFQYuFCR}2jb~H=SzjH`sLAp6aXTp0rA?1dV3eJHGM=#$ zsD8pi|87AZRZzdW5kiSZB9i?`c( zlNoi{KmlZv$>`2DAJjE2&OC+%@urRL@3#6yO#KXv~z^uRoURi3PzzkZI30EyJal39A zqXr`=8p6vN&6xJ|ZtpK=v}CNm2U;1yB(ROikrA}6j!_ZpHpe@y;N?b8VUTkjS8o8- z$uN~*AyD>nWI+*Hvj#lphOF`5=2p-m2dI!E3)Biwu5^q*Sj`F*2jx*mW|*OlY*1lP zW{-+$JFWBix{PuTYgTR-nodeifPWp>8y(xjhK!@hUvHaEN1Kl=c3z7 z7+sm>ES#>ilu@5)`@-qLOBs!r_JZ;d*cH6fmoH_M<$rSxY5DV$Ytt_;W!%pH16lCH zwe2&PF$OR)?wtN=Ipceg$Dqk2KF3QSO4f1H_AUW|UEANTU_8jo^mpd;4Qm*Un3m6) ztoUDY`rkE-LcBk&P7-jmaXfNql7PVO=`w2>o%nn9L8n)k6c`<+?3tdsmeB*`#qDbu zS28j^***QjT}G+t0^68VraxHE=v#js)Tw9UvF3JUS7dhF)737Z$i!pC?Z~Cb?AY|W z1;n$2@HT7*%UN(ccC2m%iJNmfwl8f0vCOy~SrwTb7o2Pc@k}ASyAYF2xIy{Av3W%U zNX{6-`+gLx(ulhrZ0*s_V6GuIC{;M#*b27D0K!`dagshn*)aV6d@tg2C+|p z+mXFqk=bzy#0q(c(#5C1#>hdGF4znXHd$`Ri%-B-%Rt1Jp8@lvA-q;lRn5dB1u@|< zB;X~v9glBr1BH(S#DRAp_J~9D{96Pvp`J$!BDr`cI8H?&lF!zFtrdY7xUL=SdSQsK zFK+|$gdn^Qh@pZIeeIAa5P;}wg+v`cL~aH|jt?T&3z4hmg&6a64LBk3Kpb-#5_jAX zrB645E#!jm_Cg%P3E?f<19mnC#ELylAU}bcGN9ym^BOoL!Oa`)y&AVLI}7dd?Qc5>Tb|XbWSg{)roqnK1Ci2!Sle z1IT=iEXN(l{0%pzr)_0)Bw5q7t&F)$Paki0-NtCe$arOX!*<3sj5nw2?qF1By2&v; zcQ2#t^x_?i>Wnw1&)&hP!gzD~fgOxf>(Bh0D&Tm90n`ZM;Q~$j+7~)9xv@AXFgtQ& zIWG8xB8wr$$qiBW1xXzTRP4lWgkE;2*qlEIF*cA`CP$Xz2P8pOkRV5v=1R>OgG=kVmF8Ot;;|sK#`mZF=f1MtPo=1CWA~)$u~x_6fTf z*_a?fraAq<9>!*-DZJD5b}*_jPMMywm(dl(owI{c8AKo4!6>`^>t4onj7%Myr!U>l z=)lx}VEXg@j3M>?2bw`Gy`8L}CI_RGBclRn2#?8eHE4vENdQCqIcUixJEr&_(4Yeg zrueSM;3f!$Ifr+FS9}R$NZda?{pYF_M&}BwLkXY^+Q3+-Sfyvtit}w1-6xjr7n=(7D0Z}@Rf9G`y2<)1E_9~+W z+p7=F0*cJj3um!KfT}po=`q(Bjq9f_Y!Xmp=FtLG2TYEeK}7>Ik0z*$XL3BWya6Pq z0V-0M98bS#0rAu!ye+$1K|D2XM`lGP$K^{JK|EE6DW@PRRX`;Jlj8$WnaRwf3{m-Z zSvyFj5=8DwJw&4-sPtiS+zrtPssI$391lTw;PRizaX|yvp>h!GL7P_;nR#R(hE9i= z0Th4}Z@Rj@5TDx?}z0a?%NxCrDlCQuRrHAL3^ z0M}niAjdE}K7r^1ryyp>D-eAOpx9=1Jn;>zQXZo60;Il_14Rt8Tz&xD zB#_~DTnDjC8X~?2Vi~jvvE)CvB9-KJd~^V8v;@SkRS-SmpkQWp+yT)e266mdh_#~J zjyFJ2$^=R^pw?CWo1I`IgdrAneF2At5Jb}{NWCuz37fu^;8uVDMCBfcWzgosdwBDK zO_AAg(t5Bdybx!7m<#6da68U847LlLVwfHOZD|%ztY_kZHXXKifqeySIy_kb*2KZ> z$g9ZgIC(Z$BRjVvk0P_<>uF#fwCS({l<>e?3PG*n^$;Elx8t8?aPyNH;)tG0VAGkn z9gp1v>t=+Q%(QgTbjM44BGYHzX0!(_1GE1sDmwkZ9Y&VvUvD!SfiW6v|f((A<^pw574{nSI?Q0D|?Yi7p}-@%TDwy4(J0n4#N(%Ix);2>k; zcAUHmtcn%14~p5bV>7rF#sV>6D~!Fre=CV(>$r~?n~Fo2tx+gt82hB9h_ zz0d6UfGJCfU7*?#-0uW+85NkM9lGaREgIsC?f8 z@F36h^A8xCL4gzUkWq!{=#}YJ4;k}8BfB47FzPTJ**0DB5n~#N-}nf^|M-kiOrFb( zsexI69X!)DT|j}^v4J^DiBq6j0lYbM=JY3z7`MVKKfwgIToxw3`7xsk$4xQyQaT-%@_^hpMSt8IX&SG zV-|>W<_)6?Xk7Hu8%ApoU;iy6nzy`VRAHPm{pMRn@NgnnkHI^Lo;mLrRTwu*o*9*^bZjCoPWS*H2vKN#z2rgjvpCS7~fA%`v@@uZ2sPl zjCmkA-A{}vOy7=f5B|ishmq;Vm+gXI80RyJo?F@};K=NF0z{o)aQwRj)J)la^edwR zv(O39ggyg$r;KrL7f$0u*6@BYUq4NBWL{xKQ{frhw1y`MRte98=BZ3g8<7Etwf5mX*B zgZ4%PLKW1s3vragPR{z8VNXp zn}q_mIi`Q~WwD%o?mweGH?!ZqCS5$EY-Y8zYk`qxtkVj7%FCm8Z{TVp3vMo_>IdNt97} z`ZXq|5>8tMW?hC!jEXALgPECbiEugE7dmqK^D;OxD)KloI?iWO;F|8p!sN%#r68`s z#GoMV$djcguD~^Y9t+bNMvLj*tW1*pCmz5DVVE5cJeXe0$`r}SHT^6rleRp!0uM+Z zj|LNDc|B+a0G9&0BXdcX61xKTba6JOAf_KXr)RSGchPIJ5K7JewmHQm63b8Bs-HC|BeTcE*~>UZo`A=aqLXeOm~({ zuV!bmHbgcX+>zPylp)KJ3)E^xF?&HO#O(FcpR+Rsftp@fAS(|*g&%yL?#;pE#mF^% z8V8dGBiHo(988iTTnbE%EsUTwy^1`bQPT!SMegZuIGB{3I2^gM`B*`RLohLLD{weU zXFD=^@-irJII_9(GI1+#x$-hAFgr4U{H(z2xPURs5j{8qIhn+`I2<{>LD8bXF};wJ z$(il%js^imsmXy#O4AKW_*k@;Fe$Nvtzu?mWCU5o?#Sk@zz(+0A2b-trYPa~6*LGW zHJzJ_NxuF7XapA2^iW`vR+Iv#rZbF+Yzj<{E10r$85ooV91XLSgg^;`*`JrkkwuZi z@e^pUgG+(U(Y}yRjh&s1jS&GjI5;^OnHagaKu2LPGBUC-GP1MtvT}1PaDieK-G}wC z-U%qhK%;F2BaaZMJ2`_fOM%((0z;Mpw*rrXjKE7q9zkvfP-}+65j>E@1!-<@ID!X# zxOg~0H6Mo~cqE4l)Iw!b%6u1;P zAj+Wf(;ax2#56!_N~Q}aK*h|ME~J107?imf1*#N;6a=7prq}T>#j$ZK@PPR8yiBsw z|MM_Ou|W-$=Vh{F0UIUH%Op2_uOJiW^k!Zr5fx}P3vw5%rN9jG(TaG8pBKa{a!udO z%jAqvRPgdKsjx$1dHQ=nCNWEPGo}kDzP$kQtu)xTAUU|p#RRGpq!c7TE)`Ia7O0*c zD8!V=iO~CzkI9@3s^B9ZlgM-@ekNsYDJBO{!MTLVk;5IFE+w)Qq#zL_qbLD#w1gr% zXwln~-P0%VGl?@wO<%#!WLytbBLYz)pvVqNO5m|vc1Lza33y6kcVtnN0Hr2yLWRgN zb2}~qbx_zHSrsKfsR=wf3E?q9hY&%7HBJoX%nRZbK;^-L`wUr1ND&_fX~=*=7+1`* z!@>d)%c+ixic+AM2Bi*AO!L9oTu?RBHwZAfF-lGE6=G85L?p8Lf=sdsh;(&kA$S8Z zbo~lwS0^~s=L<5)O>Yon;uO0JQosmXxWcFawPwB`lRDGGh0_&R0#z_$!Lh9XY7jXIF%`p7>uwKwZN&-AJAQ zfn9;aj0tp<2e^u0ca+Ie-~m+@?2aN?3R2Uz3NuABa!nT$VbT$_0~t1lQPCbm&tOz^ znC>US1IS-d3r#EQL-aZxuuIX1rm@4BH6_^~)vV#g)MMqF#Jj1T&q`>6( z7$W2h5_-h0=mMTIVsbnW76va;k^)JcW9MZAB_akzDFqpU6`;8`R|Q2!29F|MMo?ct zYWiAHCR;|i>90kZ((Jh$h23}=L1T`v=qd1eel%gxQq7^6@ zB9yotConoPJ2HTHF-nrtABi(LF`c?MT}^_?fT`o_^au&2Fm5?V4sfl>rXV+crvy_t z^NZcB+l3{WmNCjI3#?^Q;84(nP*PyZkwIWJNYHZnH7O=nM)T=1(oCxv6{eq%W^!ay zn$9J|6wCBs2qb66d5h1@0VeUVzinrCCe1W)P8q+nJiNvBh&8B(@)AV zx$++W3_2_4gscLy7nLU>Rrvi(BrXs%rv*QhhEG1zD zUIi`z0|jBQxPbzjB9{WcBjjWlX2%A(ECogdImcCRT0rg65;swf~X@;wvsUD5FB0wVMi8lAP6&q)^c(x$W9kjV%iE(D?fd=G85wGK3kE ztH#7Ry;zk=it+jMK2;`DBysNP?^K!8n6^BdE~dt0jwE5s_L6DI_iQARM zVLHDKlRT;&^^T0zj4V7%+zO1EObm*(pmm>SOfm}G3ap^Qhg+c*)C%QRU;!0O;5P39 z5pyOU1#SgiGo~3V3f$o4kua!0;&wd4kfk6BW-5r8F?lF(J04-kQji6UI67o2sDfDv z24+k)3f!RLi`lV$g$O9gK_oyNX zGJ6gq#BfF{hOaCTQ|5qlFJXZ@V#?-e;9YA>Js^oGELo0oHp2(DxgC38_N)O(Z(+%D zoPuo6ow-omJ3!J0Sh5_S%!TN#X9D?Z22A@Ako*~zEXM^%+Cjd$1Cn@xaL*eA^9Dvf0s5NJv0IIz8DKI;}xQFCe1=Z<~wV5;+xu^5!Fe#y=U-{`( zI!s)QT+>^1m<$-Xrf<_>a+3hhWIbR4r7J~M1t!Nk5T?O&5nZMNMy2Vkx=gB)pl%o_ z4T4HOP}o8?D)K9EPd}{7vi)T)>j0C^`LyK2r*#()3sZ zCKV1taPCqxn%-%^q`?X{aXPOdlMJ5*)144SJy1DtBSg`C`U4bcTSkxR-iAzO2o2Kp z%0-Hv3e1il7_uBEJnIrrP?1(tQDAa>!~>( zn6f$LLDlzRc17dqp2kdx^(LS~4O}lPnkq0k{(>;g6qq!b53nk>FoK-AhgH!Vr0YMs zq7R5};864h(TyC6ehN(b3}4w5{S}xrnYOShwlabw-m)tOfarJZih&AD`i$E+6oWwg zwH%7UAo?}CVhD(S!(Oi#3Sz9_Pz(dnD>)RyLG&sP#Rw3+nnN)XM6csei~`YXI25Bn z^cD`q7!duPT`?9!|72H;1JS?P72`qlA9lqA5dD{3F%d-nV^>U4V0P@d&Hzdf7FnRu z7Tn37zQTklO)wdxW&?*}3W(mwp_n>d-jqpNEKPyQaRM7calGC?{%u`6bQ=vVBD*`Pddl_5(hM}f)l0FvGZAi-P(CdVB}f;T{ddDGvSGAS|U zPZu;}lB_QP8FZCHu@DrXQ`i;z7(t9V?27&1kiE#ESOikBi$k#(L?7l*ECJC+I221k zbQ6bS8Hhf}u2>GD_j4##fas$fij^Sx7>8n&0<+_WI}BM0Dv$#XKrwih1C(s_8BUvW zD1rvJSAZN=qu}MJ0h(e~V0P@e!H}gO1lp*|?AQTf8-hxHX2%u~+i3bdGbUw8S%~xp zP#~*9q~Czp2Gga?nLL!L9N7!482(2qFe?~4G8bAg`~xw?9T^L)82*Bo^3z+)ncR78 zAzJ6$WXMvmo_@oeNtu(|jA=on0<(h6bQTLH#rh<$o_R3}%nFHM=3Edn0nD5OV#b4+ zvq8)_1v5tm_d+X%Ss-4lf+>VI6U2*AFoE!9fOu|J4AVh$Dp>P05HkhLoC;zlgPBu6 z%=&0BdlHBp1!hhJF(bju2_R+!nAs0vhJ%@XAZ8et*$ZN3gPA=bW)_&)4PttNnOz`e zD45v^Vupa39U!I%n90@-Vw+Fbv}7`_mj(+pf<%BrV2NYl(jOI*RK#X9p z++>ilAZv!JAi5G930FYO3b5>D5K~E^ya>#@1mcw`MeJ&0EdmRJX3mVlXSW26+A z6}Z6cH6URNFmpACSv>u#C6fY=C0JxRNJMJ7q7{=hPas%i0Z1fZdWIE~l3=wp!)*}P z-=gv1YoO4U-X1Az0!$NTOhRiVaf^C%9G-*u=y=T|tpaWcphhCRsjcX9bkNSrz1_ zOV~1{ig=hYH83f#I{snIQqY6QxNo0m%cR4^;_Ry6GX0=ElasWA8Pgh&q76)03ic31 zPM|KEpMv9bIR~a|j8@aXIxuN6Do>YpWcnhs0oIVvP+)a5$a38AV!NsnQwZbq_AD8G zNfA&!RfUNoS2b~}Q9{Nz=Qh?}~zQu)U?ez2<89oaUkgK^s%~2KKa)Ju{I}D8 z`!nrgTJUoE_5h|Jww>!61RQrw=MQ8uWpthH8pssOw&G=%fWVUJYkim`rXL7oa%AM* z&J@J7hKcFJ=j|Ipn07GnFKC(uIYI(7(=elHdU-h0Vx}2Kr^`ez*$PZJIuR_%^oB`+ z$??Rs?U@lw9E?n}wrnqsWa8#vy3w+|A(4rjk!i!r?Y&7%HyJ?-pE6RIUNSLGogS3V zRHD|iahd?D0+S=76+;uVK;sjqAlOn;NkbW;Fb?ewb| zOtnmtHck)8WSYu2b^5DJrh|-Ax39@!vS0zNf_aw5%b98zo44mwFxj#)Hcwww%ap))aXNP$lN!_3 z=IO?DOsY&j-b|0GW4a3RzhOPoDkgsLIkuo?D+g#0ZPoNwjZ9{uufDbmD6mK?u_`b- zGJ+VOUHQ{Zo0!1M?_-;oCV=dJ-^64MvR|i}Nt%&q%hBzgElduKjLp-#TA9xA&**~o zQb1c=Cv*?oW67xQ!hWPpT)!g>Ilr8?l_z27-QpfjyX(4j1tqU=P)f}ea(mHS zrq_&&65HeEGift2b!?p8y?|*sW6*Ttg-l7@I-tfgJE%!7v3SB(?OviaZ zjEn1;-mx;BUbg+w8%F-_oAU~*(OW7+|tS8SY~xR0q>3_McJ z}OJDM4Vy_Dw{y7?j4V; znl5vYNrmaqtm#e%ncSFO&zjzKkjaSY!>sB14l>Ch(hDc(?2=W}Ums+$V)`^|y80m| zBc>U%r$>Y6xwEJDA7Z)z>i1+GX3FMdns{XTwKGinnI6xWKK(3{3Dff#)6bq|3Xz#9 zqQC?ic3{$ER!~w=;MHViP*QVx!r;UpFm<}^IVN$Y4SmzY&M}36=r!kIEmU>8%C zBeOu20=EJ?C=x?%GxdYaPrSn?Ml zw@lYqm<}!2zTp$oS8k?xAEz(>$JD~K?b3AJ|4f^ietnq!=s(j`rfVCvRx&3sGR~iV zhmrXj(~qUow=gj;2ThU3GBZ2+!0dt!F(Hq82sti*+0CFJ9Bp%c`?gzKHI&(19Cdk3V5 zNdU=sm`fDc1iF#U2aUKYuqd#islCCF<@gF}$n=@)%rfGrx)8oP!_ItEv7Z+)p27+$ zOc+4pWUL^kJ1Qu!2uz&5iG$gev48pp4rU3)?&+MI%u0;i({(tRwIn9Mox=>8z=fDB zB+xxQkCVAop&u>_9uemNjo6=;zJrTdp0R)W4K8K{5s)Xdl~^1Zvz1suY)@%{iPOcnndQV0Rxrat$q{6~ zH#c(@Q)l<|?cB`fjNQ}Ub2E#|BSHZ*OwJ76zq0}qH7i)M9KRmguFS*C!^re}#dJen zW<#cTE2gLLG8;2}UNLR%H6PY5F-nW^*LJF`@WPhM!rPapH6b zer6k{SJS4~@iR*?c2A$l&)mWEbj5UK0cI=4?&)y?DAB`&8a)dHmcnvYo|L(Gh4DfS=%Y#s4{(`DznJ+bqdU!Or7hd&zEMlWSY5d`YjN>ZQXQH8D>kS zz3Zk2fap8xrgzIQTeAIJ*D2u03DI~@hMAM8dHwX4GR$>M+fGlyr*BYYR%AMNeEM-k=BZ2JLN}1V| z>A{KVh04r|Y#&Z^2{?*_G(bGW)Nyk9c94Q4C#Syy(FadXS5;v)Wx96~q(Ndjj}|j0 z)4P)(w<)syIte-p45AzCnF*(+9|fsiaSG%ak?8^;#~wU2-B^{`l9T6ficC$Xr#q-In=ws2J-tGW*^+7HX^8rbry=S$pPpW) z!>q)kzzmvx=s(aU;K)3EuMV@+bVqe&XQngvrnjjxE6Po}-6EjK2AUA$R^)J;c&q`$ zV+5_SsW$ya`&!$pTvIzy{(SZwARR zffk0cI(`8yC}jcV5|GOI*QQr!Ft21=vb#&bQ4SIspv1v`7$k6-0U`oU9sPTzXK6B9 zG9BGBeJO~3y=VFZO=eT3*1glkwLrOJ?{q&9eR}WoRuKJW@AN$&x@X_?&mel+zUf-p z%%)7w_DxR&(e3-E&(LPJWZJPG6dEeiziTs#vOfYH3O(`rbYUH4J*M6Jr#tH~TQYqH ztFT~ty=D3i9cF!|g9oO+)L~XfkS=o<+IIG>(yX3Cuph4o+D5;2e;$pq`!V1hWDoxXzGJ zV0M(qay06kcb)$v5fbe{y~4d{Z)xf7Y0m=0gt&YR4)rt79ND~i3}*8)1*7QWtr z8`4S8O=p&xUYE|yA=UsocaagX%zzEjCI&0`m%+@z)Utp2fplhnkPU(v%!(k|B!jsO zM6bzUR>D}P05xDu2D8+3kO6G}KnodIrZZ+St4}x1WL5|1jLBrK0?}Zvv`zn$$(+h` zrek|@7IPaD$q$KZt;)!&w|R zh-3*&Vp3qyW%wfE%FCp{2^#s~RA2+maDaTzn5D=uJ!mfTK}N>uBJ-Hd85y_x%wrbj zXb5DM4$TF@UG@O8JR|#b zTM)%NJr+bMPp<`2+vgr&=4E7*oWA-XvmCdS8IuB!D7ONGqxAGE2buL)WXzZprt@uQ z7N0JAh*_OccDm~!W=9qYWKktHL68QC=_d~{yD+M3=R3?in^9=R1ZZc2MS%fytIviB z({~?Xmcb&v{pAtn1x$MP--6Ei5zuDf_GDzLS7vY&$W~%fVQ@54WCpD(VRBGlbhOOo zV`5@pB1usPEb&``!2aHnVbuLsNuX)g{z z2d?&jWR8F~x4}dh6xbZkaAZ0DKo)qzk>%L%5Z*^(b9@3)0kayU~$qZkk42hr#zYN^PE``Ic6qs zDX==u-~x3=r#C-mR$*H3WcteI%Q3!Sn)5 z!5PqC$ra8l#~TpaKz1?1y1Ss*zX4M4fD>#7?@MMC{wK%^?>w3A@RHfx;RCYZ3z*?| zxD=Qjzrc7Mpk`%=(DM`;d<#JOX25uR zK)fT|S&j>k6)%BFUja$q;m&eggDm}mJIiqgvcLwI`Ul(!%%F7#Ke!c`r+2<$R<1vS ztZolX-5Zd(Ct$o5P$RL2C(H2yvf?W+=?Ng|89Z5zcaWtw@MJl@Ko)oaQ@;YFehW{Q zBZFhlcJ0^9a~MV5z~nxF3hN(iS&m`O2IC((Ct)*$*r_?>ln~ zNI>NWGZPcjiEGm}elaUE%{V;W_ZPEa{SQ#v=Ll@|#05}6aTJm#!RymcfJz$JusSGp zfzGD?aRgdifdqDdN-Jc6IUHGzCypSQF@+<`@c^=l1sqw97a#(9pp`nzjw?8_9B&}& z0fp%Uh!Tj3EgV^nFOF>2{>`k!%Jls4R(}?CR;ClzwpXySFv&8_+A_UHiNy{yFuq5L zC6ICbc0Og6|6HK92EQIlBZ!`)$CAdle)~^776}f}&QoD?7FqCKR3l54{Y;{Wu|;tO zW(5{U$jBm_4NDHsmZR|Ip(toegYoo@HY_fTJEwoMVR4oYVFBIw_JTP}fl;81S&CbM zL4)ZIiz2JzgB9CDY*|DZ8H2WG*|AtNYRf1wfmU9C+ob}%%%D|^3<7=33ha&spcNPr zSxQU-{UEMG_Vf$(EU!SW+~dF^!o;|4`b8%e9i9dArU^LCxCE}hzz*4N={V=;c9US1&5V{)p+mQfn#=}D%nHm-_U%p$Q(oU;294c< zUK^ zXF0$)b$f3Fiw$^~{B$Hsmk`sduiL|OSe7uF-uOBdvX*KJ=)Si(j9HEkzD|WL!)I}X zE|yZ_HD_7?Qm}#%v|MVtK_1IbcK$st;YA^vQxk26_`K^)L21>=Y%M* zIx?6s1SqgN)`Qm3f_O|i5W!F|#iRh*qy$>##_Gso#SpE)3M%Rh6c`lPL1iqf<4w?# zGth1wR!0ly5-?Urhis54P)&4#0V3$g0A4sZ{c#zK8smoPT;(kCj2otFm9xk)s!aDN zXGvkyn7+20a zVVpUA4v3u&VSle-vC}MrG}j!mz|FOKP;*UzL6;%em6rkJ77ir_$9gvf21ncJ@wF^Q zjK`)=s%24O+5pX4uWMQCn3ju9U$;y^V!Lx4%MM1y_1pi{vrGafhzX4>m8>A=aksPd zfasa+Ec3Yk2r%)~Gl8y`JTTp)N4}c3d&>^!DSxQ{fpLei$ zNb`eMS2AWP@PQ7K2M?JmfSWr^3jEW(I$89^_{^CTKqK{6P6q3QVn2rq2e^H>OPA+s$Ik%dNl!a{C34CEU~hcC*MEIC5AqJOJ?- z&6sY0Onm@eb##CUwD#x*BbWp^W8*Xd1y&6vk?H9@EV5y1rgnh(j0@rJVFs2~#@-6u2E33ze9q9l0IZ6_}+3(qMrCI?0;DieUmsC!-lt2eSf`Bg8vD zz!pHf)4`nOxMeofI|3`=UI6KCV9sK8{B&sgfnF9-=>;Hj*vyz_FoSIbP0cYmE?~}b zoG^9zhh7%*&=pVxYd{t)UMe+8$SJgKT9+ZDBnC_$P!q?tiU-vYywLJXj*H-1QrX? zKpsX=C!XDkArQ18oCTB&4^00%fhAO|;awMaxupV|H)u-$qXLrxtHA2*853Dtm?c)T zLq~x?m(M7G79c7xDliMI*?wdSiyGNluA zTNnKBU`B4x{Wc)~a|m3S?mM4FUgQ->38w&(5=YhP4f9z5(4f5U0v08vDKn-QE?{wEyfJ;p z0v1KabJJrLq$H+)TELRbcyoH#LKYjw8`CE*WT|94Kb?0Gi$Bw=Bh%v+u`Fhqd~iDR zViq%|83(6ZFJ^IIntO11?P8WATr9oMGIAZW#)p}+v%LZkp{7J=JF8d)Hxihy!9bOaF8^yJ8Lgl2>^Met@zhwSN- zm$F=8ddxmucRQo%^uA>*k@csicM3QPI&PodDIl;(l=}lbT$I>U7!=qPAV|zu^ytI$={K&1e99cq#YSSgBFYo+zM>)p-MJKMg=xq z2L2MzRl;nJ5?KmtjtW@PqI2*?PyZbxuxf5i?Od17;P$W~!c zVB&79VBxQ4u4AlatWjogT%eMz#Hzxez^cHk$;6-}V$M8+QGpe-H5431N^Fi3zzd;7 zmMbv}G%zWMu<9~gV007&9U#OCis>bcO3c#@E=Yi4XE`oBIQ{KP7ICI`?9-zzN=mbxIMgcO$Tj`rN)};|SpP*y+3At1SmK%9vrp%} zBx%U>fqlBeB}p@;kL=ScE=f8tePW-!>yo59zh2E^ z!@iphoOPxbZfB95KKHUD_w=weEJ93Q*r)HgEGav^eGQA8{{15z0**{b6{?T|s{$W5 zB?>e$nK2z;0kyLfSRD_rWC?s>pRRgEQXaId#O;ctFZ%=)1y;uiD$_mJvM4HiWrtOi zAR`#fm}Y=vXQ*U3zHVp$E&U2y%OWX(p{he!fz`1?dHU|PEJ{o-j!nP6mPMTL%k=MS zS>!8%bH(&)dT4u0; zB84qWflc7h^t|hm@}N-czb+ZX_MIK1Y^aEjV#iP`=+nIA*sYTb$Z7}7SNF9ij6FqV(if1o4}q0vVbv50qhRLo03}71vjxc zf?{LdO-b45g_~I17>`fiv57^W@yGNRn^@$TrcRsAxf$#yZ4mWyy6omA_1WG z46EY-t}Mq@i~@gQX#-Sx@Nsj4(i2yfz#sPM8}CW#PM^7*MTqgs^flXABCN#v?lY z!44K1P}Y{)$)d)2z6pHI+(PE*{SPE-cs@YP=E)Lh;FvD{P*O?i#WA$(B>;8?lfakh zg1cChKqfl9!g3m@qxm13Aey2W(8ISR!wFJbLIwK1y=A4Gti83au4$PS*AkfI6z^czU zflrA^ky(L>mmlOm#{+Ywf8EXE!}w*o(;gNVy?N6*L3hF++K7A#pi@FYD`prAS?U#- zSru3v1*{kz@F}n=@J!#bhs9nIl=ff+3P>0-mDa%zDlPc493e?mSe}o`K>@|N2ly3O zL0#Y}AewW!+g_G9WQTs>M|bFfy)3$nJEy;4oTEE#MsK5%E z!)XvyU{w&GUhqs(Orn)zufSVYP}dvODTkZiHa&4aiB1VgRP+JNfE)x0}6BnCV_X;xel=CFp5k!Kft2Obn?*jxC4;tW#1i1 zvFW`BSOP_xIjk9PfZTII5R__e2xbYqXP>V3LQ-9%g~NjB1xV@vSn7pfmcWPUdIwoF zL|Qp4nSX#}K7eI@2xbXhho1A)yNHJ%aMM04NuVnlmo|)deEc z?;K>2Vf;M(=Rp=lP%R~Mh($qER6zvnx9uW|9H8<*D9dr$q3MB#Sd`d5-DwbToOyM6 z)gcybsfI&nzU77ccK;z3BSw+wpANBDgN)Tb%%a4;26QRN;_uTV4zp+~AxZ~u>I6HF zmzT)_WHAT}Y@dGpm87LfGY2RqDKLR5EhYz0Y^Y>8?w*9&*94Vl?2ZBotd0%BSpr?t z1&*+Yazm@HEXRYBrW+h#(F<(>M>`QliSGfM)ghcE(9Ho&>7ecc=-3AEP!O8YBGZ4p zmXr!@3>D2#!Ru6sy~CJgiI-bl);wSgi-Bg>eP0hGEm znH4~39ps$}5bGv@LuPvaQ5IR&8_HPWs%GkapDhC5OuO?QxI`t>1a_D0j=T9 zR%BKXb^8CGxmiifiKRnPR6)#%AxJ?iK$@2UbR;rsmg7z3EYPmzY*6O|6tpvhvjm1HRub(-f17GFlb>C;cJ$nx}pf_8#PmLljNm;)zRjCC-o2R>-p{(_vLctPbK z`*it}EE1p!!}25xxWd@~K~jR5U7&CJx|1w2(fu4qS&)|pw0x`Hjh7SN>ihtzSU-p$ z*O*9o0+b{;%$R_6pIxXw_`o%^UsW|=0X%;1QjGBZ?ffZ~sOFg6tSs(@~ z{-?{HVbPZ0<~C#6AqEanR!4}dcZg*va7|A*!y*T2A~u{sG!YkwLOpx&v!uKcyosp9 z06O;>TyVJZa`1rK1G)@rxD|PkD&PxZSqgm91;0q@aB_o$FNi5}LK@xHXTj~o$g?a# z)(X6i%!QyEft6SknH(7vIhY+3xWU)wf)4d@tas-H^-?%Bm>86J&6qZbE3i6l5C?_v ze)j1LHnNCM|96%}0(AZ_q*n*pC7;d51U{aI$&i5oluaGQtQbCsE3kt0HD3T3z+lC2 z2E-RJXFdSpcSwL*GN7jJ0r4z>uZ*CoyGKHanU@FT?hXk>X3&5KXfRii8I&ZL&6s9@ zN*i7?rU?>`42m40u+WqMrGXg|&@g-ZRZ>Djfd{kD=OyB@_p!6B{fE){L z$pbO0;j-4(59f95?P8o)9cQ&h}R2%)@Lh%66`SvQ14wB#8=`6ZTJD5 z0t*_35&?G#6-5<5b992BZKFy8pb>K(N2V+Vuu4|P3ldohBA~HZRs{iT#v`E65H)8$ z0iwCB7;Z=?fHtKofO7T;klO{o$vjJuBLy_6D3YZJn!#brRs=2o;#S}Q)giEc-~*7` zA4p_5Y6vWY5351?3rvn{=1da+cNLHlATOw$3TX$u0O@@pF+KePi-Ph85a$Ef_V>(A z2N=S5L9IuK!Vgjktd1X~rmwodB3nOGI!h7UfRfH~WGPYTmM@U2ngCc@offp3s0-&)DusH$>BA|geR>yBrSqfk! zte{ca4rx#a-il!ch~~9s{34~m4DzI7hcqOIfEuj4%nCfn;k!T@l-i`T9N$AL^68T= zvWT#+012;l+{voJ)FExgbOB@>C?kTzKp7FlZ;%0HL{>k?9nzqTbU+%G8(UJwLEChL;Rg-yvj5+fLn4us=td1LG zvK$$ZhJCrgO;%`rJ|UyP>Ucs18d9LxW`LHI(|sI&=rj%pG8d9*_ZTH9!m{ zgGxl;N z!L)>1kpo=jPQ4|?EsMxcY~Ubb5*iKKz`;hW9k6Sqe1#e4GN&)3L_Z$Z5v314J{MF&zNux&Srm0K}-U6ClA83Rwb&L7AS}jOm7g z0*fOP*bvakB;cms4Ui^*Lu`)B&?B!|9e03*-hf7aTzNs~BEWLwp8dx1SfWx{$5mYnFU1O18pP&ee2gT{O*H|R$XMi{h6hUD+LlG1m z$JrHFbs3f@vN%9OcY&fA(+WjUlNdY)xdEhp2Uz_Eu=*eDpuXA##Q-H1G9WD#Osk_4z6E`#f)hINL>SH zhERbMJ}%I}XwJMs2{HqW)DQSHwG(AD5A0#b6-uDcTcDIBFd3yI0BX!AfQCpwBMV_2 z%2|%zLHe1|!h+q5X@e3rEEX z^a*mHGC&R#$P?s1i2yV_=D0vEOOXjuV}LTN2GasLM@G=FGRPK?w=|h&$SH|9H8ik9 zfl7AA9de)YBHaYGiSab=eP=dZ478*GXrQCtQOo#+yK^l1MJi(pkjfg-fWa>4HpB;`JNkK@Cy)EXUu-0mk9PpeW+Vpvc1P0Loo62`p$?vT%N)IkbrP@;1@AP)*Gi0fIPu0J8K#0>7Quxc_NkT++(AP-7&T%ef>#|!eH z!ePQ~77+;%C&n5@5gthS4;s0B0ScfOa?|(RW|661BL`~bfu{FC!)HuOv4d-1@G&^x^%bm+6Xdf5rg9)8SV5uI zp$uAA460B?K>-Ttrhy9pkU@+{1psIu3f%VL0acieknja(E>=x&@XnA|5LpfuVANpZ zVlZP`0E+bm^3&t*f=X{t#32=$AVZ+xwgRMXg*<9bbmavd_o~2(#iAR?7BQGHJpk!{ zfR?)0K;yUIxmd>=@>vRU$fJlzS^Nb^?+f|qqW4$~6E1*SF&AV|Yct5S2e{pUR4=l? zYqJ|5O*ddoA7mwng5&{6%>$X~%kHtr)pyE5+TK58AZ>3@Gok|2iqK%HQDj22yFo`~ zGAi;gJ1BsbXo0(8pv3V(249Q&g$(u8%vKB? zpo5}$tr?%lfM#YvT~x;(pjITLp99KAC~fWuvY@J47MvO2d4ZihiYdz19 zRp3BsJzoIX$!*580c0416~h`3U&New0f_%V7Tj`noFNM;LKeuv8qQB-m6&dBB?*8M77nnZcvd*-9L+oWWqm^aIpQ`yr9# zxEnNn%Z`-EV2SM=tjys>B(w%eP}@QhC3S*ESXn@20(kuisOt+Vj_W~5DglzDQea&< zUIlg~PDLJ2-`$Zxkqb2D;0ms7W=Mh(lt7jeXkVj}0Jsz9H~}okk)BtXlJ z%$YkR6+l9Yq6#3sq8O-7kj#Q^{$v2PktDO~6+|KZWHEE*1(Hk-3Id>OL_m@YBtaz` zxMvJXG3=mqSm3n4i=`(eslWqD3!oASG@Ky`ZRJUtF!LHQ+8pi*?LIdcc-6jRjMr4^t~^9rdf$M2w+ zLT($vx)X?)0u4qmYA`XFG1YGXY1<%$5>>oV9iV9oDUbs@BtadjX|RSE$c4}XaR*5I z4k_djI#_}PwVI?5eK!$k-|Ym{i=Zm_fKxLAx z=LWL|H24EPF&n3ASwY8rFcu=E>IWc`9)L`$N2=vnxfM7R_#s1kkkEMnQuhLz@4$!h zgHFW-ZCe3#*}-K>6YJetp zLFMKGl`Ito1?K4wPKoMFcYnen&v6KJY8<1$^yzs|Se)wTvx63i$S5#@FEMaraObXr ztQl0u##oervLu5Yx*o%dK@ilZR$y0Pg)PyrVgRjql*m$mEzkfhaF9`A0@doE;APWb zk^n7%QDAcv6vz_T#s*ox0J=p%foXccQx;ps8Qc4wvWPPoBtjhXfFWCn8RQQIMn{!D zTnwN^DU2FSDxm&>iUKoebp@lNMwX+=bhZ~PX^axnb6>D@i8^G1`o;=O0>7CQAvZp= zIiA_h^OD7ri3_w=m>ay(bb9=276nFw=?$-0ta$!xm?i)o{uS6LI{jmheew)aQdr1EUrur+0zaGvKXitfL0zKVFpjhnSi>x zptHGk8A5m&6!;Wa1b%~0GYOgA{+GoWv^D?CUlwOZo#_JqSX|})Fo4u6g2s`)K$vU_ zERH`QOwQ@W|5((y0u`7XJ3wc8Uz@( zuwi-uBWpUN&h#6MtTLQzj-aJ%RSK-r|1z>};{Ve-O#svdJ;AQT>bUID^nFaM6@1a4 zWX7byG=oKn#Zhs(3p1-CQ%B$Qi!7`b+vhX08Za_-^liV$!m7&1bm7JHAFQlqOxIsb zH(+C}XAGLYfsHl8JQ%e6$&~2;cpWb@bn!02%L>enELjQ+0(Y1dITe_78Nxs-taKSd zxBIiR?q*~Rn$FG1D#sW+U5k^|Ln;_lrZ8zRePK~xbO)WeuE++Od$D2&p5DXBYQq^0 zKC)edNoV>+PF8n`j$;!A6j%fvfP-rfs}c(@2grjvrWNEYEI6aYzwa7w25VE+q z8MM%t*-;U6L=rP7F$XI!Lu5e9jxk+6osXN<(r6MQs<^lnIC!~0H_{lNcyT zu{z2ZS}_D5C$8`OtZNvw_RR>m5Y&a{&p=%Ryii78SK-0v{@ymU%kSlF}+)wRYH0uxQIQ&2->MA2#U=U zpnJtw71#u3P2VTYD$h83`aNk@NtQY6JdD#1O0!Bz3Cx9SVNgKQA~0{do(!uoXg5N# z468pQ0Y@YndLbn7%@p^^e0M zcF^fupoz69+@QiLOF%|}LxbrJk0P7ngB3Fb6hQ}vaX5Ye4~v7^Ic$y`*@~PBOpY5E zvji5iPp_9@b(pTA%Bse=WV*L1YmdNEb}3Ny;BcJ7lcm5duyp!!Rn{_)a67;PLBes^z_LZtPLQJs3z-Xi9hS72{=NogaEJf<^r{Pcmx(qf1t@K z&Gcve^uL;{GZ`07pRUE~#JF(#EiKmfY>dIvPZ_e>g2Iy1h;i0(`6Kx!=%9A2s(C` zQJ{A^vlZ(B#=hxCKve(qR%_N^)(Z?-0u!d+wPwA}cwzch8&)mB15DYVX);F0H8g6 z8yF`|cW`7iV4OU?(2>=EeG0n*i#{XA^g=xmiR}j*S^t2JUfRCTnYEsgv0=KME9()) zsoR;|Si2b+r%ms7XMM@IYkQ>!Yb7I7=Z)!KJXuYd7Twsc?Zs-($h745_8f24EJpq- zh|Nz7j%SWef9k^;iY&{*7Yy|60rZtPE--uvMW&AtcE|S%N ze*2|zgH$9qF8AR`iW|alex1(9pKy+;ktMv4^ z7>HnP466mxk(tZ4$FMRouA9CimNlPo$#ji4)>OtN)BECB#TchgUmC~iYzSV44XOJd zFoHTY9H5CPP*;Qr+*>hcz5yD52hSdW+6p(ObHuaiFz%Ud8qaDp-OHX;aC${Ns{!MZ z=_}(|6ZpZ3IY1*L5XBM+tUC33*ueESqat{Cf(dj2F~}+b&XvH18DGd0!VrWR2irl!z}O>Nf~&J{{n5$ zk;Dp&0+ZQ5tzA%ybvq+3_&nMb;A=)Sm=-W9g854rryJU{N*bJCRA2;6$1;K)cY+b* zIK(0o9%fJ@MS;zX=>kN-h3OWFtPYGvwpS*yI)dt$-ASy|m>xGw4^Cz^VtUaqy*-)L zhWYvL*6GKRS)&=3O;<``&176Yy(fh=k8#EH-zlt4O!KEsw@+o&V_H0QdVVUaKGX84 z(-)+&1~9Ie{xOx+iD|~v>85F{`b=}c!lKiw(^!KTmrXyH#%jyBb30EuYdjO<^yxX7 ztdE#xY@8mD#j3@4czSgfs}|E1=oXy)S*&i1j4IpNa#%f?nbsYj9+=N+&9vdz^uB!7 zN~V{)rgIjs-edZ^V*1MhRx_rhE4M2ava&KVHD8{tQN+5FsqNtO$3?7GO#KJ9%NMf> zFfmP9Ki#mDwU$w7`ub8FH@@tlFU4F(;R?nhEVZ z)GDCKz+et)eLJ#cIqp9+{dO5^8f#-iL&JpWZsn}YK|5pKm$L@5Zkatzz;SwiC94!S zD4rc(FlRZgoi;tEf;Efj)1m1%Dp*w*cTN9W!5RY9B0IgHl2w#z#*e82j&RM&Ym%Xn(~_YPJ? z;Zr(R#~CXe3@P~gEd;=4=5VI!O0-7gjtCd;xi@%7RLvSS&p;TPyafDHIH%W z^pu&bI*iMvPn^jr1&z%OGg-Sq;cPpLwVUbxly2@-; zWv(X^r@>+@a5k$5)7oj%(`U1~GPNC_zHT2Jabb>{J#qk77mcZWWpXahFny=&l&HIZea)KxU&_pcg?v58A zSHED*a=Z_kT;~=@gM}`m0vF_j5AS)bN=$DjP0ydl8URZ42j{WMG2Ndu{oy=TQFUs!Xv+u2rv8sDq8x2|G+1ClOU z!}<(FN3LT{Vr1Ml{r(14(dif0vpOgz=m=6 zbeWA{!SIc&9*ldYFM#lFZ)Ejn+zaA~Zg<$ks=>&3YkKBp);OlcQ>LHX%&Mvk-so@@ z+{(DXq{PV!I@OE;yrquC@dguUs_nsa!7Z!`N=OPHASrwSiu)H#S)ko#!CP3>8Fx*u z-oh#a+b%SB3#&ZS+iBDHZDEyTTs!^l7FKDo2kh{Z?LduG7RLijSps{e^KE6-=e@kII|R5D*ahyv4FIKd77Zo`B~~*g@OD_x z&0Gos2pd=w#1&XTtwBBo7RMb-pi!&)(=TpiwPSoRU0@rl6e#`ZY-81AJTX0N8*2dL z>*>q4vFb7Y-+pZy>vpE_3GLGa93Ox;SAr(;7#ufrA_Q1KCxCoFQo#%oxX^)6!Gtcr z2ol)PIsM5_R!gQkU$)EdVm-;oyyM5z>92RQN;0k3Fr94=t1W>w8$WnD}@6fbR!pciiw}y1;(cKcGGDdGKYGHseQ-TpYM7vrbt zUB_9~7#B|8c%0RW?=1&pf4hzXgX5p3=|7LN7BYUGo_&H9e4OW^6Reg@^Vd(mdxF)8 z@zZpLldQUo3#a>?WG!R-JpJfNR`7X4Vy7UA0#C8-Wc)In`!uU9FePI2-gnY;>tK*%y(|>+u-N2|bebpCMS4N%bAHJ|!Fa}T8{L0$G7`%PmSJtIW z{1;k~Y&_92J?RH)125!0905)6(EoJCzpTRBnSQc*Fd_ygKqb+URnsGXvCaUs`9J@H z^aUh-Lz?~OzgfYTpziq%ZvIc-^P5$Ay5Jv(q`@CnU#26gw%7k*m1Shyx_$0nRv|XV zZQD06v+V|TbEfCAvmIbM)V^JxgKaMp67``9J#?q8hls;I?`bKeLlA9%#5F>Zx>>-XFkG^HT{PWn}Q5@NK;@LXj~0c z$$&Ptfe!Xk;C6J#o^B${rlE)!0A+Ios|M}JgA9POfhHY7rq>FysWTc(Un=uYYSp`Ap84jR1b_H(G*j6BDLE!-w&~P>jKeywh`3;~w71|8k zj+eGClwp%$6aS z+ZV>@>1v8>v5W@OTNK$q1Hu~=*~AzXrXN>iJI%TOYomapDrjjpqaj4woY`_FM1mU9Mxc|6_8YWt6-|04l-LFrdk$Bb&e{V3DciD(-*0-soDJm z-4DzPI{S@Vk;!om==e%j(4l?2icF5J5FQic7Ve*0TLlzZdB9y+CdbKOxoIYB2Gh;d z*uba3rK_>YBe}U>jjauop2gMKOpt^_)Y&F@JlY02XAXQ!lu9-q6AJ?qx8s^_kQm5r zMg_(aM`2K#1=MY3bzBCyIU8&#j&0uX$fL-CtwEpLG2W>VH zkS>^boi^J#5dXan%vZa%H48XmGwQt#n;P2{kSzQ3|5|K1(~sz|2{Y~91~(I=fa%J% z>EgO<-TWsI11!vr2aZnPsLN)LTud>7#!&z3vVn&t()HNDLl!-HkZ2asX9JB+O!w2r zNQ=|M4A=yal0d!zx{@{|*$IZ|vSrg}7@-)j(Fjw?PGb}$my9u$+%iE?^4WyVglY5V z>6)f&#@M`RK7ElXBxN%ivL#J7HDd#x`5bS?X2Nvy^Yp1^xU}(^BWc@e$|f>>t2sCd zx1Tm=Q(|SB{CWC&8#Wzmc1lfmv1RiF#csbX8)!IoyMP^AH#5^p_UVirtkTRZ0;{I) zc4S+^)A4;O;=CEVBYfu57df#_5PJh!o&p--08OWXuEu3_WDs~W-NTvf73i#a6Bo8@ zCK1rF@{WwqDoFv90bYFBzS)h9hmmQ@kLd^9*{qcpa)7Td2b-Y4K z-E?^mwy8`T`ljET!|%zop>H{(CmR$08^ob6431B}P8aZIdj|5(4{tVY5UuLN1|D7B z;KK&K{5?eF~A zcF6N?7ln z;dHwRY^J0bBEhX7C~$cCk_l{DjGLyPpTK6!xM@1`L^gBAP1CI>vMDkyoE|%o&5`lQ z^!XFnB%}^;D6lHBfc6zJX)q-yv9dTQ@G3AWunH`kesLn(bjG98Gbgd>GH#kaeG;29 zX!QTiBsO)~tsI~cz$1)G?7Uo{NsI%GO6;yIpp*ZY1@=x?n9SzRxMO<8WHvd*W7FFw zvl%ijoW651nF+1AsW2X#E;5Bpg>m6@%PDL*jGLw}o5CiqL^zmj-YN4^lYfekQX6#a|;O@ip4`%F&+h5OQi)Ix1@d|nj95kUcyxtx$ zo6SU#>CvI>eS6r789@x@eQc`Qj61e#zGVBw&$ZwOs5*3HR%D)D*ed>LdcZtUmg&O3 z*qZC_PU-}mItv=2&F6MJHM0T6%j0(31K}|#GV|neJHBrRpD_#K<#0RB=mzsZ`m(tl zzjlFnAh|4V$MXo4ncR-kAu2&~8QhLrk9WXqNauE(Sbqf0P2+al4p9m+G?m+N3&LqB z+>TQqav)=pxg8JBf;%*c+i@4fp&)&U+>R4ZfDgt6@e;Tle;$YP;<+6`rJ5pW9}zQ8 z9Jk}#ANAmH0T~+0?fBvgoEO9G_#DjRiRN~EatLlj6u09$gkK}M9j8M=7Gzljx8pPj z4`g0Aw__8+$zj}%t0CS2$%S$|_Jiek>O;64k3n1yk_hH@d<70Xo*-_=zhA-50T~_0 z?f4&IZ2-693WQ<)+>YH4!~D1%4v_Dm z9T!3*K@RZbc3g)r+JoD1GFT~(JGbLWgwbx?j_)8wyK+0;1G7Me@wjk1?gXoM1X<+F z?Fc$W4;tJ~+>U<`UU1}g{15V8JtQt1xE+^4tOaSb=XUIX7z1*W9k=5Fh!r3?TW-hM z5IGRfhTHKc!cc2&$5{|LkUlGJ#~)xh9!qY={b0xQSa3T&gqX#m$joET?O1;w!UY*- z#_jkCtO4v7Q*OsI5IK-xCftr6!E!vt+>U1&;EB(O+j0Ag7Ep@jRAlBc-VEZg zDl+pJa62~K0-xOslGEpQT>KrRl1Go*@ySsT%aKE|o|#9N+j078Fc-9SUWeQ9=q`{E zJlfoj4_AW|9>_2)ZpXv-z}A9zn%s^LE`wA#f_NI-j%#;;6I@c6+wt*fuzNw0O5Bcp{a`szP$_adUILxR&EyE; zDR4VJfT#o+D$nhBdKFxs9JgcR39uDxip)H++>U>LgXKUx8E(g;knjP8sWi9a()u+Z zNggR~$JIZ;ZU#w6ayvfV54IYlNrKz)*g}vhM^N~Pb33ks$bqaD<97V=8LSc%#G>4e z*CE~k=@a30+yyor9G$}4jy(|XfaHX@9d|DRg>Ahfh%3nLxMK^5%Ok+;xaI)ZK#({; zx8tKnqK2R5j+409TkVAQRL8p{6J2t-n`4zMlf?bi>@f+ANJlqhuTerX}xwt`r z<~aKZSS2UKlq+Y!a-kgDj??FXCD^$^k?FVve2O;c{B3SUX2+9PK#l=j?aimi?068I z)Ij~PdFMgS0`-G96qy~*o&u|4f|zjs)Yb%cIcCQ5)9abpr!k)2F2=(CkB5803HT|9 z)8z%(pYiv+h99WM?AY>pyND2b88g#_*V}tU*-KXd>}8CMD$`xf*t;B&?pa_}<<#Xd@=60O4 zt62avbQ@~VF3;3exSfwZJJv4cm0Kw6mE?oQ`+W>>+kBi)(ZjepBANSrD%D=<3lIWm2ZGy55~%O{!y z6q%=gl;@P1zQToFqJB5%*mq{oN%EjUfX6$U1r(WiI6<9nCdV%uTR}VyP}*j4Yz7rz z%%J1sxfGckFYgAcWaD-O4HsMmb>KnM!H#^2OpaT-+ClnQxE(<%4{UA&@tC>6a;+U; zIVK3NR{)}s5yIWp-ah@IBERtTBv*Dh##hrDUD+*|&P|%W+m(GTl=W}P5k$ABQe69`rz&g-Gj)3EaRnwi^*_A&#+(+}KX z)B|w>J=j5$_1gVN6vTHF;on8o`=J>MrGESYY z<;NbtICc7+EPlD^GyT|w7^hBOEem$PX6ppMoSyo1^Brg-yUuQQSUc`scXwX zlmdgJj5{xrJ((n?@`5s=BTo+KKy;>T0WhCYVCr`9AogeM zjCZF0i)3$QyfeKeiv0no37;0t-otoz`sZl&JB)X?Ux;DPX65P7ViKumu2W`kY+#x0 zp2R+r^9N|>fS5V+hv_eq*k{*&X0&3M3_4qn!I}|#>K~IX!zV^1PP^6u3b>48||hSyI@I>VGhT`WZ|L%us_^tQe+&3}m%}Si)$^LR`4l9OmMKq(pB@R$0lgSZ_O^^2cnlW|%`m0e;n~?Zmiy-`Z zumX@x8caVJ6+w$a2-+wO@-uSeqa+FqhL4P9khl@EVVJ<|1Uk1C6fWSnkphLweb9-L zx(xprtr+S-HX>XPx}x?kqcy`_&}o-kRt(QUG^Z8AGZ4*Y#qbnFb67Dv0ntpl41XAv z7_AuYfO(osUl`4qu7KDKW=x=SI^m)I{d7ym@)hS!vzjx5yy}GD*E53y10ev876ooc zjuHiyBG85;&;g%lu>ccZzoog90-sj=;zC&0rF^ z4-!AX3|e`^q`@?W39_JNLOqir1G9qyc=;d`C|~RVtv@wm0-t)xpuxldnpOM*T6D~$ z!L$do=|l%)FS7zmmIA980|Q9Q9*`EuGI6jtlj9%eEG0Ha0|n-6C06hp3}^v3=-5e+ z?=_iEFt<7}fVc;k>z!_2ywE!9*8RrxRwo7l&;i5>OpY3$tB^qo6j~u624aH(N|X5p zOz#C|r^mgEpLE`vb+4};NiPRXFGH(2IB-FS`7vpN!&Q^{1x)(`W~ak5@2-?TzVqgp z8w^PLC1CmmTFsd^O!v)Tm#W_bs__`J6+zdLuw*GR@PL-agD$4CVweDmZ$>MI1I!9c zPN2mq0_M!1zyswIa4rEQxCzWzpsXSZVj~h8Jb5g*yWrLC(;tuSu7~CoWPugT3ffFg z$ReOXL`xBnQbB=F;No<_Om>C(3Cvjv3<3{0c)*8hLU_{6QdJPQWdt1|3yRMj%#K`Hpd`QqzCU~qvl4@&8YtEwdKt2m7#uA?2d3U&%F<_K zux0=aFc@Tk;^+bsXaNtCG$^e&a%Cwpf+n|_91lRs43M4+(@$rzOV)$^aDo{W*6_4| zH7VR+0I30)9RN)Wpi{J&G$B!Y17U*W1uz?Xq5xmS4Kl<4k|?B^K=a|yQ~_E*q{$3Q z1E4c!nWrx-m5MJff;ntC@AeSII3kUF@n-Is4!wQW7-2g+yf~*&6$zH zQvf|Y6|jV-Ci4w&@G&?VWYvQl3k^+hUMGiid08AoEm zw}TNDz73490Di!b6$i2(l;a$U4dNH@kV=4rF%#&rdXO=oiUwO4Gfth(n8PljwgD+r z70^Q!lx4u#XS#I`yG1>?vKN5X(Sp!AS_oQ43q$K@5ojGP3az7gKy@^zCt}4g16*f2 zN|aeKfR1lgU~m*DvtpP95#=beVwlOSz_=EYog8^UwK=H9P>`^Jv;ykIoGdzQAWaT2 zCxZ?fNMk|FNu$FC(p~_!2S9d#?SM3Jz;;YR*f9~w4sfjmDi}bg&`m&+09Wr4pwuN{ z197&v4MRP+kppoA*!>VkfZY#q#7wX?&;VfoI|3YL5RZbJHVlp+kAj;};-D?NpuiyP zP;g+>i-Y0=OA$8p#iqMwzfSm(1}(vm_@Gqegv1ABeNfE_I?)b$F$Qj2uqrSJ^fM`n zf#QNG3v^1KKm(HzlLJ#dXmcB=Xwqdkz~soB&BxBfz`(%3!omnTwF0y^1GI|}RFF+! z;&x=5zA%qnUjS4ivN(zfJmOGbQD7E$JpDl)yRs>$+GGW95Cfe~3Mt;0bQv0$m6$;) znH>c{o2=mC7nlSdb8t^Dlw%XAX9Cx`tO|@^ow^Jcm_UseuwF(-`$9(nH$`S<2L&RxfKMd?`x)dHQ5-IrwYy{KiT_t$B|sg5#0R?riI3!r z>5mK8Rk=X^1~pCuE>0IJWY-2)cc9I@pau`rEi53Xu<$UlGCL@+!yU{F2`tdzxS;J@ z@P-S>jnI&j;09IhAPl-J6?D=!sAURTX~P66%sw!JTGCDmpk&FM1t}uxg+bXA+y!w0 zoj4C_ZGy9}5JFrUssSu62oi@h=%t|I;Br6!Bn~kL)&T&k=Ld;H)Qcml=L3l|nAI~) zV0IFKN`N(hPm70W0JZx-4gy=i1JVEymxHPYi-Q*|Lo5cZ%m=9lo5KZC4-p6Ltb&Vk zg2W-_uqiOrgA#)y*dPv&1VjVqtZ=vnc7!JR=BtVlY+Q8 z^9*JuXa;Z;0F}z1b`7ZQGJ)AiRG}W6*dPu7tq6ulz%_!_fP)MMYXmR(#!`SS==(YM z_@`Sf*I^k0Spb|Mkp+-as2XqUneW4Y@1tv%E>UdCryMx)0F-wU7 z)OJ(=xB3)WKm|3Z0i(q1XaFiqAw2|8d1(djJuqfLdLuiS6&ZNy!E2*Idksiv#z4#i zg*|9@AiPQQ0N$iI0J;rSDOabZoC24dI6+$12Z>h zt^?dIU<9`XLA?i1s}bCn1BVPFs9nj15-N~3C1{5esOSO5D}gp$y#kX08>Ba)z~s0C z?urc{SAY!w?Sde+`8)$|&IFJ-;DA_wH6U1+93Mbh(C*L{^a{A%1t7f}z!{Vo)VhR!hTc*3!vCm^P-u1h3X?Ffgj+; z59nMx$YR0ijZ@i0L?LVe6#4bjZ%k!(Qn~fH8MM7^8x!c1H}D-tpnIK=S6&?WI^Av> zyC&0%FVi!pvD-4e`nr9^H1?g0pi3~5XRsgPzk;m#%-88=GueHNp3DU=iBMt!Rh^7z zclIzk&OyD@htcuI*XdhkvR?psv27N+pCDu4^sB4cYq)2LFmcv1f(8;FOn>0aFEYJg z4LfMBn#Wr9*Vdun9XsIrdo)02Roa2?fdg+@Wd$t>j0BwxY{d|)z>3(p%Ieq%x-e&Y z;W~CPjwr|}PYTof*Rh9z77#yQ$G*q}X?ZP^;}s^f9nC$UAu*&a&S0bar|(_Q?g(2> zY|eZDR9(S$IG+LWCTy54vw{6PW8id=jqK%!6+0{ntf0kEvo^AafJWs%Y-H~O(KVac zpJP*`Hhsos$oTz-jqGL9?Y6LkS9FDJVb2tt_pVvMQ5bZWq5`WU6KL=1xh?Dx$csst z9Z$4OZ`;aVK0RkEJ2y6&2Tb$cZO_=o{+5OD!}PG->^h7ewm0u)uVDh+8^ySfJ%p8M zL+|$R!|X-OjLp+e9b=ah*nhD}0NmGi1Whv?+pzt|F?L2qkSBg0XYUq!x}X*0D(Eex zOrSnAXdryf33e5x-wU=MI>BDT=zYtLNg6U>Jb?>x0ptq?aP`69xP~Q5NkoCcv4JT| z3AA+c2D1{E1`~&pw7_pBC2?H_q@|e*pe3E)y8{@xrcXb`Zo(@9+Ih>N#0uJ{&oceS zDRxBa&L3{jMJxN9h(NcxFoV|{f^YI+(O{AQyZr$ZY=juJ z(LqUUy1`j?anL^bsI%mof!49l_Ot2GbU1fd{P6EpcGCP2tE=5{CE#q-h1SBV&ocR5m5%=~?I4rEI`E z?R%J&_ywl1A+)i>cH~Pdg4)A3K%?75;NyGvSsWB36+l;lDTz)$aE@KEo=byCK#3XL z)_TC4rNFGf3!1=C0-Z(*9mWI&(FbPGxFr)PD8N@%$eJ_10QHz<&6#g7EAW8MP60XZ z4YT8U2GE(YOpZT5T>#MVg#v@Xbnr&S78WID@IeC;SU@9M4J@FrkOmEuf{bO-Wd6Zy z&fEcN=60}vjwofFUVWZjMvPft23Y$XkaqBOBx^VnM5nJk&u*x|0X=*Wyg`Twv_Xhl ziCbVMSp6LqCFbe>&a-Q9gHBul-NOUYW_W>JrJh@07Ffv}kddqk-~rqfOiG|um>JUw zFn0-PHvv;g7RXmKI6wo+pkQFIV)(%VQU8L)jOhc2&1(iaM*tK@A6T*kX2V?w(z%5r z8{|R-aK&lH!~oi-&6wq>$ic0^3p&Mzms>#q)OZvCADpYm5AFhjch75pw69=N1g9ES zPC$EShyY6FKh;_ zS_Uo4KJlkXz)>Dl(m8(m)dX7ktjz#wM_=K{0!0vGIWJ4+5*@(q+&F$%Af_(O?qLWq891;&Ll62+W_Zc!^!Io>ha111xg{ zEW_k@gDpz|bZwI+WG4cX;|sPd1u;jYowcBQoXnXouz?0>!Kxmxf%c^~GJ|rhK4TNJ zA_s`y0y-puNt5{nlcFrBh4_O>Q4ZweFHE2?&|o^kq{yRC4;r~=Qe;hD4_5y;#LKMVrPV0jN5j0;#nBW_HNAb0Fv-yq<~3U$X4RwZWM zdQgx01X5;3b;kl$h^a7loB`No3=l0C4*3AK1!VpqMgJA@J=>U=KSoW+894M_ zD|v$J?-fi+>{(_^OTcMp0qCr4XmzoKNrBxF>Tf0urUe|JqYXjL6E@J*T}+OkM8c%O z^nhK7ZF=Z+c5Ow7<9QjF92D4KWget7WO4*07q;mOud~ZAvkA&VjV0*cF5wIkFrXK;0!K#|f;UgV&fqkty&6bn_>>BXda> zsC3e2oB>Mx%pmVfVdYU|0Wl`9D)K1^O*g*DuFk;%KC}kHQ;}ogRzNr_3v>(*c<>1_ z;4CzK$xU`us2}*4n3x&36}WU6G#r^hXMJ!vvRW}{fMP@;OF)O)5!4>%0T1M{m@&D4 z?h`ZvjqZT%gRz0Mm@Pn~4`xgz3gFux4VWB2BTO7hoEl65j$0T(hllbqC~!LVu)vus zSh95M85qFAN5I0ayr7(=$!q|s${hc&XMxUf0JT|uuxBaoK|<#Xdltx6kmv_cxd|2p zRe%yovgXX7yCW6k%$Z++DjivK<_DmB21T}6;jfe%#sGnHg1fd-ltIi~yHX4kIg1>GQ= zrNpDaA+VB7kwt-5fyq%KONm$DI;g|9gegk_%Ix9D0ylwXaAYYmGu10VZnpz%B!%c; z03Fl5>rFlRv~P&cq;IbMKmI|O-q1zVQmiRSug;H?1643MMt@r0=6s#kbhRODZw4daIs0&)kF zBj_G`&{6JS?VzN*^H3^lq4Z~K)nx4J&X5;mA^u1zE-9cmsCHI+NoCjw~f;x@B^F z0q!R9f!n;GbpC-OOMwrmW$gGP3@jo5JB5Z7bk&3$=tw3G&>*s74=3D?4$dqEMvzmO z92+>Zl!PF8gUN9ZXO;r!=2MU*ObiNKU=yZ$Jz&>hW>gTGUiyGtp`H^~I=|rr^^idw zM$mGN6`(NyIdi5hARk}g%yK+}*rp3wWW{s^B(bLjnlDdqg3}-K1r`MsO(qUSIR)_0 z1E^~_hZCACm>d^?oeb)pJ5J%uQet;nb+vxgRW5KqfeOC{P!5wbXKDeJDSJ4wlt85n zXsyozW>D!5s+Dhm`mmsL>_J5xsGAJB%oB2LCkto+B9;qxnZSh=s5vR%$lwUug9@q6 zcyFe8o6DZt4{c2FZ8SD&4B2TPWMfa3>H$7(IK6 z4uUe&@fp)sJz|&Q5CK(qjt@AezrV{aJ$Q0(J_icnkvTpy3V*cGM_ka=gL`I$N2^aRqCZ5{m*S=(=IhG%7e_DR6=^5p(?lP%CN% zQj|%nNSteqjZb6uJz5z~wh1B&RbvazHXVw*siE0@BSPun`_Cte~S(nLrIN zHc(R(bbKcK-Wf>Hh$--cf(A6lR}TpresI{pg9cRDEnv+O*aQxk6-=PKtHCq}l+!@- z*&vO``Z}1Q{$`O@lmL112B#v60?3dKW|$$UVWY$>4f2=IEZLQp!4Y)0 zphT9Epd+}gR-z#2$OCH2m4UY6gKukbQs^|VXJSxbR*(cKRCV`3s%U)0Lsmp%nMi{GGd^S zO(syIk`WR)EZK@|^`Idg(Di66jsn?=poTl>o@vHxMIHqXP!pX4w7wEFOvLC2T2`eX z3W`!u@R${-Kf|d6njr-p@a6cQQGrpHfkBBy;0&VzO9^OnmS2HUU^95y8gv^vsCUT@ zK2?Vabj=RGz!o+IrVI@x4p66FgNa8Gbh9-lvx5Ss0yk*-lgV)fOO^r$=o+VzEG1B) z(_~^$l7$2SBZw_$&b$CrL9AfSQji7BXE1^4Em$f7^=nwO1h#_H(46UgPua!lmoR~I z4agIq%)tig9iFjDu`qY$id2NW(9Ug@?cjG0%aE_1_dEV@?ZzWIRiwJK|u({ z14TOnOoc#}5`#uK6Udh!C15V-uxRl14Dc`|t0PaA0=occxyA~%EF}hTCrb$w(BL3< z+`tAZ)Px-cvXmfAR0ah$Xn`=D=Ow#LJs+rx$pTGfpq7XN1B?Uea4cX0If%(|3ul%R z%&`#tppz0ILUIZW5FsYV9n2s%$bsz!jg@U+hFr(Kff-~Bs8Ir1TMe~@PCx&WT}uVb16`W~Zc9PinhLCrDxmh3z$(yUUDoNcuh^xfSv8mp6j&WC zKoX7;IRd9aXG?&4m|W9?U$Gkof{ij!fRtctp!Ex&#yY6854szL6*OOM#u!9=z;0_(wml{kKP%m=TgR&wUw3-25cMmQu zAYG{$OrZ1VofIHm2h~?Uz>&hL!6YDX6m$s`1E@900%C(}Pl4@h;Ql-4qHY~WMnx7L z(Bc&EDpW^_EP*v_plM=I5`(mz9A7ZFL2~*7_AHQIM$majAa_FM62N0bPdEftgU50~ z)w95M(5bzk1{LUrN{|hZp`ht*Z`f6#oiI?Jh{+Lj;xV)r2JW#LfL8ggncn+`U9uiY z0MwZSb=g7A7I$Q~VgQ}(j9o7qXgZt;bQH7_uY$M&gTPvFOY)#2bBQB(4;Ls?GQ*Bh z1ou@T1qfr70B8lFKI0rlB^DM3(3m6+8^EW33+RACiBSQZ8fGvGtYcOXcjUnDGVnqW z9EO784dg}8bRUR4fsG4%cFXovZ`nH-8ShUwe$W1hLqU-PbjFCnblDH=u8a!PlRvP3 zW?a2J{3CllBlj9{CeC^$$i(u+PwbYAw$ph&vzzd*=z_G~nLx{$7j#Yc|IDty*f_o5 zGrOs20y8MM6hIv=i7bKZ(|3Gk52@dQtZ4(B1G;V$R1i3xfD0)wI39p=cmzR- zgCooF23$yi!SMo|1KLLq*8c)7q`=_#0M6mz=LQ!p)4RX0H`Q-o%u-+n6=k5y8I-s{ ztqEqw6<|S*EG2GmRDx<|P?2lSyns;wB&5g#DS~+wm>o|rW`TOg;1Yt_5wxg+2UJus zEAWDk>;}!Ja5;hxsAtw-S^{3&%?w@XS_4L;48bd?hQs)US0)e z(87xgjONS_7!{ZuUw{tV1f38FI`RrMJt=UTS%F!f@eN3eB##Eu3r6sG1-qlj^t)f# zr7aeK0va;T$_(mOfDX$7^@_fLnj&mwOdmj|?f~t7P+$htP698%=Ixno@QqzNcn1@x zUR_IJ=^J}+HmK3(%}1D2SM|pdNa|Jl*gcyU6q<-`U0L zUx4cih`q8rJRn;@Gn-71;MHJK5IDvLIvN!kAAgv^c@%v101sHXF2fIIS6&VUPzywZ zi2>B+V|J9bV%WfsvdNVIRn^4 z4WDkb$69HM8Rhkg-sg-GLYg4K9#JbQx~2xbku;Fgq$( zF-!m*8_ulBe1XM|=>^DXEM`nkKs1{f(*qU-W)a9Bv;wo^1GpQPOyB*JU72yg^!q>A z<)`cXWEWskcAFk}jb9`SG@=ViLn~Mnm>oB;f-bHEHM3yJb`LAqcTmTG6zekV0G(jL z>?mc$@B!+U4Xk!dM?h>gGo}L|gFq*MffCCBR#-5#uub3dlU<3afo=MOpX|n59iRff z12noceLp*g)O7P->{<#3K;vj^W=s=6lAtL;kQo!$U}oHz-uR1Mx&8*Ifx*MY&C8*{ z3>txW!3LopfOLsjF-%}r0B4I2AU=5F3KSS{Q`WE}dx;B_P^7FF9)L74YcQ<<-8>Fr zvzak%08NB~##lkl*uV~T1{33->8iikl^K6b_x#OnX4?Qd_D$Z3VFQSU=bRQ051w;+ zK)eGSS>PhFfdgt1;}MSO4J;f=^#?f2nHWF^U@(JDY}WvfcpEOuz)*Y4Y*-H zy@iECj%fww^agegx#_e2uyfgL0GTCk#qa<`!#%YH#DjZk4~X{xYUTz`n5Vu>fB1)8 zx&8wu$WQE`YnPcdm{iP|et?uWaDh_T50ECPpL)2E{RC34%h16E8U_aW=>Vv1VX|VF z!v*nE1D74s6c8JfNxDXysHwI9u38VA?ulAu@uRk@J%IH+a? zvv>riGxiCJY6pPi`ME*qQovEw9b5`MfZBB(T)bz%;%~a+H+G47P38oU7C!I=AfQ$o z2t%|BXY+|Nu(E>ANM;66FwDXVbs!a*3}N8172HrEvCROQYWgz=94Md)lELu{oWsL6 zo%cJtc)bRb0C?mXbOA7<0+YZJHc()fD1o|`3<_+p>J8-E0wo?ZrV0gC1r~u{tV-bY ztiY(r#AD9f0E#ZqHH6?(H$jW0SOhk+fDD=dX*WXaB+x2O4W=GOMR4GEFeesa}B--Z+^9Zk+Id95(}eFCa6>K`iFX8$b@) z!3gU0aZP{1$YEUn44km;Fe)*DRzE5*fjlMz8U=#1^Eg0e++b8<5_rxHZH$A}u|U;< z?wnx~c*qRm3WCa4(9&g)B{LYaKr47aZ5u`(VQ9F zi~+6J0ykn5m>oeE+p$6$M}II30PRWORs;=gf`bLzT4Lfzp1$iFyBPZkP@{*3cX}){ zhqwT|Ji7ue*e6VHVdl^Qw<1JlfLamI%3B2#*BVR;({C|z*yw@w34ms;L0T3tXDKj( zy3uS3ECP^5&I*)94$pKK77mZ;Cw{Vv>Hh#53933l$p=&?HL!qpkwcSD3kz~fgnRl$ z77hiKV{A%n(4d;bq6FHl!Jxnn3M!B}x(qW|TzT22i?VVAgYB4J$I8LR$UVK4l_SCD z8#wajup&nu4GeO^IY3RL=?QEc?u^{i z9e=QkPQUk)olS>}8{F0gXE*3zArB`wy-FyufodlcM3zT0#irM>bFgXRHc3N~5#(feY^-JH&|&19ev6$$k&$cqA9fD&=|;cV#n@M{ zn=>)+a83{4;BaB&m_C(*L!M&_J1AdufU8-B>329dIHtF-b4XZfG7C7>JAs2j#}ox*{is&h+lT>|*>Ez?C+r!U8!@VtNrLhdLws z^jVx7!PDRUVHfk-0jmL_V;zXHl?@z30+1#qsL{;{x&a5=J3j!bCE*G52#B{~!gOCQ z4xQ<~oE&1)`?)ySI6?YBmWoWD$Hk$}$Ts~L7e_EeUZ0zTZTjxN?Ap_PxH){L`~PE? zWIR7T^B=peDAwv}`s#n|veGh)jP(i(ps8sVB_>DEa(vJgaiF=3dDAy@b4=ubtxK5h z!^0uNxMX@J4~I14{^_ke943tWr|;t7kYk)R{U#5G4P)O~2C71zc! zMCuv9B_~4`$Y2H~R(OK|l&vN(I&x(5G4t~9f%*rau^mRoC3{;ytJW8TE^}Z8U(3M& z?%jb#>lhu6E^8EUg1 zRnXixqvL&$r}^Q!BS5;rhj%krGqNbCFo3$$44^R@aN`l2A;4Lb0W?6w3d(;8)0>4k zl$nmQO@H7oEjxX8kremz3&I>+oXi?b2?EF11ddOCAk3l9bwZMn6V$+*z%gA?gd>Ep ze|nw>hbiNt=}Sa7qS&5oZx#@^Fr7t|LzTA!ymSo|He3P?)6GOVl=+}S7Z|df7zCQ9 zr;BnZNwk12zvci<--C)g(9%z5bPd_Nip4C~$z9f>Rh3I22gT zm?khPaDdJQUI*fXZk@JbSPNo;uKu=SSOb+^4Wj@5XSQanUjbq=m@zE@sp1A-Hnj>Q z1R9g!RNw$5Xo24x3LN^3TNuHoA~GnjS~IeM_KR?UE;`!42wFhA1El2uBe=!{ow_N| z#R<}Kh7r-~f@slYI02fZ;Bb_+VrXDe;BauPHv`?w26p@lkSXj|3=cqV;m~A0z-Y&G zgAwdO<_jP>(4i3^M_ph9_aiv;8K+Da6z34FpTK0sGy}A=YXK9;4IH2)n4o3L3oPU$fLw65%XWb8a+v~+#1&veIJp(L z6j&W0i2)R8pxS}MvHlJ-^wK#F&?1f-%nBR|TxLvHKs2iv(*;o32%5-XQ{ZsCz?>zp zofl*wCny?0cfqhj;+a)}9TLxM3LudOpiAsI9QS~ix-&6=gMn!a$dkNgOdCKngB8O$ zCIt=!Hec`x)w3WrE0}o(Dt#J6gCqI`h{a&WbOfY`9URZ6KtiCc#GoMEz?21z=PT0# zBsfItFED{3_6A7J18|^%hDBJQ5&MP-IbuN`(Pel6YKMX&b^)^j2Pj^9p#JXw@!72y z8bEvwP38woc1%B*6gZqf?MILW8$iLQz~T6T2^Ozg+?f$gBS_K#r#5Er9z71w$qWkM zNCK@^0Trg8`++fO5U=e^t>f6JCFK{)hz1@DXcU0M5j1iIidAONXD-vjQKey5oWTMrWu7rR$`^v_A08G4hUs=v9D?;v*dgj4u!G7M&_Wgk zP@Hmr$MSc8&Q9XcXYAonVggnAtf1i4VCvuyc*x8RO0C?W5L&>JrN9O4@xm#K6b|Qu6^cQK-SR0=$}r1DpX_%$Z-Xfc*=Sb9@0dg;iiDuL6fI!ygu2P#WlA zg=Znq?enaVEYt#`SXQ3akQ+ zT%eep!m0#bH_xEJ3{nC1CxaEk9FWbRVg+O~gB8OP5IunYYafE zZ3X(lCaz&sV&Vmtjw?V@O<-w7rh1Uo;#Ld~KnAm#F>L|SEM`m_KnDK+`CfqoG`I&E zH|5Z0{KE>~6@ug{P|W`Tg$1Y>0gcLWIL-hqGX^CsHgLTIPJ%sb$Toe6AN3XJt2 z{T*xquxe=nbc7DPSpnjqiR?-&j`eP!3pzndsG+HqMS)dd4=*UF7OM(&rk;4(R>4r^#RbT@rD3#T7g1rjOtjOWs0X7VjF%jWD zfgL&ALHASYGW39VcOZv*2fG=pp#j=X04WJr;XyQq-IW&<6rj7a*+H2Rl&C>P0+Sij z0#K2#fgQAD9O6`(0&3&Py#%_ z4o-mepmN~^IE*-Og#>780jzL@rNA2~ApvrWF2fa!khlO22@Y89hqw_D`0U&Yp&X8& z6}S-N;l=R_q_Ag(mgsNTF-!Cp?4aUUfmMM`fmeZDU@y2F(q#THeVaUoJmbFU*W@|O z7`IR7Rp5}SM~!zzP$AF&FUR0Eg9fl6jsYF>0jW#=px6v5TQ!(|pxV42YI6gqn+q~} z0te{K^5og6Yc?I1CsUO~0+)ll%8%dO=t8MSD)^t z#9_j?dwR7JhmX<$aKWXc#0jcoG?+9LK`9+H{|vs+SV2T!#q^g-9LkKRr}HXv=rS&w zZmrBAX*C0M4i)GiGEj?!6VwO>t(Debn!zlvi3M~o`UlXEFSx;?z$pN!YuVium>n-n z?^EVb7HH&D6jopc->fdIz~y*h`axw5Y1Io1{wxj(LIRDP3S3!^7Z|d285kT@vmO64 z7CJKeg4U07INq4fsKTL8e}V~gQ8bspMUVq{Kml@s31l^|8Mx)-2nutU#bTgQX)e&< zH!mX(6R5yf;5B3FU{>H!5D?e|T6@JSu!#kAb{owIR#d7tqlT~_YT<# z3<_Wi`4u>(FHqqqWL!2~NR=arv1xj#Du)GQ*YtI&9FB}jr+-l8kY~I%T|kXPobmp2 z4K)rQ#?#YF)i`YZmvO>whXSo+;Q$S{|KUIgaX5lzQ9(P%6u1Q%z@=#eljFbt;8pt! zjwV^4kO6Jg%LWaAIi8=+uFhe?)WbC0R-Hqu8 zHLSt*n1R~bpg~fQB^#Ks`~(j2DsqC>Vz7fFOdT|2#sRvQN`cYw4`_OtOW-uvygeZE zz#07j$e9YDi7J@f5hl>xmmH1@rYmZ2=ri_i57gi=XJR|VtH{EmF#UoKhvf9-S{w@2 zU;%m1UF8N@3cLzz3hbcuP8v)s0*84OSV4_z9|cy>c#DBKlMDEIchLTgBMeyrho&=W zbErsx(ptR&X#9pjgNX;aRFPYO#qq*)2W<{H#)j$f+8myYho-O8=BQ&lGF?N5LxJNc zFQ}m^aBO;j4u>t{q3L}(95#%Hrk~c~aAZ6*omZE`fbr0DJ6#T6#zWJ4bU7Ruk550R z%b_j;`Rz0CoM1I1(67Om{TqkY+qSJ=T~bopI&# zqsAQSjEAPbHRf<)TshsqghL$?LqZ@+Wu{k}a9GH#gzYon6@<7Kv{Wt&v_e&ZOJL>n z^KeaqAWf3fMNK*K7*|ejG38KYTseJ}Da=fP=?Bd?M5ezsNOM4>oz210(dHZpjEAQ0 zF$YJywFQR)TXOJ%u8jC=&0)uQYPyXL zha}_S>0ve;@{FgZm)UTrFtv`T|2XB)K!NDzI^zF$I9qEdyvlT*LGWHXJ_U zr+5|Fc$7ePfaYjGxfIl%KRw;pmLrDo%=8Ji910v~K_e%u0>`HBu;s92JT?8BEr$)` zsp-ac9FA;Y{c_Vcx^ak1@3Z3&XFN51nH`5OL|g_GVZzgS>^U?U&rLV8=P&{d&f0VM z%Yd~>gRK3)n57`5z^cFraVLww;ptcGIhHY=o?h+1QN(y=`a1^>bH+2%)f_nt5iW^$ zJ(+gX_I3 zIJ~*sIMf(VO*eD{`zFr~95bD697>F*rmu11Fkn12{jM8F0^_OacJ3TfjHjnZx^o0E zo|(SJokM}~%=EMF9CF;SpyA~af`kE=2RICrJivy=dvF*qUYI@sM4g&`(1XK39qe;x z`s0Ko$qS%3fX+ogg-%VE@Z>Ongd!Kj%#5d|y99B3W?VJhBA6q9annxXO5W`h+l!O%Q*wL;M*X&QS<)fFY{gu~2Inmrh?5 z!6D5EwTCql!=BBNNcONq!R(Qn{v(QG3gfBiz0n*U5G$mn8^oad5vx5@P`u&?^@YQ9 z|2U4z5MO{zFPpwSo`I2ByuP-uG*fG$WhKH zzKRW+J3y;}!DcZka0*ONG8nH-Kbpc}#dvL6Du)x}vFXmK9D5mirZc8-gfp(49+}4BF85_hCuoE8LT30S zqL5w)2dI#_GJRhfhrISHkQz<_WHpYepjw{=JlYR0mK>K&p3afZp#!-$^Z9hEbPhd{ z7qDAL!Rd-4OMy$^`t+J~4jq*i&?;nJWMdV$6nH?B5&{Y=ptC0hp0H21&ESw=nlfYh zvvdwKrY%3F%VluLGG3Z)lffa$bmYhM@C*)h##PhnGdM~ZUrqm#!EuD~$@HC>9G#3$ zr#okH9AvybT`-qJjPcoYnQRV2#!b`xvN`k^H%+h2<~St!j2ARE&ITG{2JLs9!KlP8 zuz7k&4u>P-mg&=SII0-$Lv%l%u93@O$hdWSR4#`cDOsI~SU%;Wm4LV;R+&>W5 zIQ?1yhm%;tyDso#z5=H==z>2`PmN9B_H?~Mjv#~0YzmB^QC}WVs6u7nZ5i;i6OSS{ zsNDz}QezglGkrrLhdAS==_d*~e0cBjDuU+GKod9uo2Dxkal|v;o8DN&k;}Mg`uidd z7sk8OUzTuaPLD0-NMpP={YWv#eH59=jQ6J3mU6sj+%!G646J-p8Am7Mz3n#T9KRV6 z?q&zO8&pv;3*4XXTgeeD@&MF#Q_JkG-LxBrDgA4v*w)Q)8}<^NDJ%-?VbP~8wX0$%mTZnpX}t20cm;O$)O@}03-|Q&Vii5BCu<^ zbQgz)5m>YV-0J~dn6?WvGS8yH)WBrM$iTzK4Z3H|jHv^(Wu^mkHmbluw&@341VqHa zYm0g|LYA;HGJqC?{aGZcgn86Y^yExPt4^L<8=CA@e$+nwA zRtQNy_%`1O8>i=>iG$a)PuMtpb~lGKdxI$Va&AVJ>F?Kw7&F>V|J2Q4R*z%`Xo<2T z5+AhG2Q;53pu_|!`k57&%$On+n8AB^S)pS9LZIq_*%7k#3e=MYp9BCNqyv>o%(@Ka z;Q1No@o%8UDR4ttOuFNfb0^G9LV+wAibdV zHXwQeBh(CL4W=1jBS5pxAO}l;?^lMbs0OKAfTVH-SS4th7^G69-i!&h&|Th=mFRZ%odp1sI>Elq+ z2b%y<#?Gz4qRXJ_2wp;k5_as6=^++@qtiqCIPCer#kvw`#2U27dwPE#hnVyOMo?V> zzN{9sXddeB2cWQ+ez1>2!y3sZ(21gsBA{6TR?q4p6qilBTqrJq9`l!Ev6bI6JwV^d<$Wl%y(>+kzHBt((Igcat6udnO+9ZmpBNJ}3v`7e59knM2nLmy%%H(C1s3r6!jM&w z2N*&7#j^Q0_<4DG1$c#o7`VX)@A5J8@Cbmq6po-fZ9tRuj0}){2aHw>ObQI3&2U)) zIo6B}JfNkIpdbZ{N;+~tg#|!C%79QjJ#i9;xHJdoz_Al-Abp@dH0U@zus%nQtm(az zIK)jjvK*NOAPPX;7i0yFEKmtz_uDDaC_gGoV=5wvgtoL?0|BX=OVDU9F)=|H9F=cybmjEsk;S4`)KW;{Io%5;va^*6pv70?7-%?;WSaskfa z5eJ#hk>%I{7Xlqp2j_qeVFoROa-0DdQebeL0O#8s~(q%t0xE;yG%gYocmtGOJRjEAQ$naiQ3jAkM%C_ugU z2GE7c+`n0-zn#kwFx|n3pQWBbiILAv00$5R14g7oF9ZR?!os{FV8Ex$4?f2WRJcB2 zRRA9v;>eWECoUr+Dl01{E+Ho;FE1}5si2^sps1)QCC%-4q90t}DKhgYa@RXffb$f% z9jCxK^4yLy;2b$_$2)M2EVttvI7f!taRHno&F#1Z&XMAFJOJlNayzbob0oMO*T6XS zJmTDr8(=&hF>c2faE>Uq;}JMVgxm26oFmNbcmd84;&$8u=Lm8;K7exskoE9$JKliH z@NqM6JMQS8CQuJvbOyR-Y!6(L8` znYkSw)Wg*=Av=~4DI7rKaiDW5xDTr{2|;F8E^vX%N6-><(0XuA(20+rwdf*BYy!WT zKnFbXWjXQ}IgMRZ=?r>IC!yr7PlfNSPe@dXp&xm6Euhkk_GLW zXUt+%;N(_fRA8O1y@*4fnNNXxdh8;OSt2|j^O$rQR3M8-*%g=sUQFj+%%Lg+@;hib zDCihR#}n7Ozz1(IgO=nd@J{zx%uy}D2405(-v4m`JQu~FC<@v*GyVQz4lPmGQ8v)+ zTzkOhm`;~l!eJl;J$Fq>)SP(*Xf036hUp1QIAo9yrep>kOxeAJBa~5Q`okq0@}T8` zOiMYm8QG@mE#Fe-v1IiQjp3Ot}09Z(SriUrVJT;QV` z75P946hO!DFgWsLX@GWJvpLp-)G|BXV9injZM6V3$(cb*Q@{t4DX%m@sta4KGOuYEN;*NrwpLQDd3)f&Gh@rIn3?A7VlsK?ZSXN zPk|Y{IDkz6V%8ouMHU`VGZeH;l1X496DXuP6gab#*rrFW;INSan*o}|M|J!Swk&~P z(>Jc*kP_q)I1L+;2Q>>7c&8VvD;S0^ceq4w_3%az{oZ|dKHHXE1Lql zz~AXDt2ne6e@rXN_#A*;oirO3#wzzsSG0W|L<0!o4m zprZspMI0!hGg&b#0j=0(n6{2X!X1))zn=2Kpk2J1zv$;OrX=npe#1f zaRTh%!j?mU3zVBcd6-p!3*HX18*Y)t(+2Q;F&&YJx8)gBaaj)WgOrFE#lW;+QG%Gz&4$8 z1BZ+hn?Mr}r0v}T*#*d8#n1ybX1e_b4haQtVBFvY<;MlkW92|YYJ6r)Ye1^lrk8Kv zaAIVgzH0-=f94&(I;QX0$YC0A`B#U4BO`1*H8_}NaDfhH(_oswrN{zm$})nEdtw2( z>JN(&8|X|p4h1%+r40=Y1?Ef)3Tz6jjx2?a4DMD80t&3t9XD|(GO|uj-o&9Q#p=jX zrpT(m0E$mgN(7A|f)8Y1oxW%jN1!;X853w)kOi~|9kK213?tQe1OKe(GihLLI7&+Yg3a7+PpA4>Ldm@}Twy+gF*q_hG6=k#{_8l0DdU9c1}8WsYC(=^2kAY*p~SYLT6||CrL4i@=!SvTBI7}FArYoG}5M!J;-TWlTbKA2|awIY`U;WiF{ro8o zQ|9--I;OLq=15?CH9g}rM*yUz1D#3++ffBtB+BeKgELED()25*Il>r!PS-xek-|86 z`h+taC5*eKvz`S9h22>WKgKE3d(U!sGX9-@{VcdL0t-%^9(ayJiShsRl5-r%%nW8s z({G*QP*(tlC7-|p@W%5k97-&p6Czn0_c6Njf=VFf=_`(MD6oS|ixV8vJ!iRty1Ir5mEoS6RpGKV-*%gN~+S2%*0mYm!kbA>~dQToY= z83LeN+7y{UV*-qh9L1o9G-HWl%gO0;u0m9PxXNM0$aLi7cAe`SMT|^OPEKENgF{@P zZOJJXNf;II;%0b2HbiVbx53Ft_31<*K0+jOy;9BquY+vnZn*u==FJU#F> zM>nJL^t-n?iWm=154giIk5PI0`#T(W7?ro5xyxY%Qp0|qBa&%N-}I#W9QlmO(=XlU z2m*1{9&l`CRGxn80Y{JJhwrf7Cb*b)`~m0lNP_yt4AzXGR^1M`Fhtpb@6+c$e4qaIA%`Z*i|Hy`mvWB znxHfHKfdHB2hmxtIFy*Suh`!Aildd0@z{2eH=qS1D$||ba_BJzO)q@Q5iQdP+JeEP z%V5vTAmum-%CQ4+`Zi8~@s>kPcn|pWtOz9rUPecdYR5?%rz^hWsA6h(w|(wA4m(z^ zNL~ieK$I0j< z%l27+Id~Y^ChlwzaMT8iO#kqYV?B%`4$1k!wg^r+xIeZ zMlrI@{nsSms1FtaX{nojpP93d>B;Zy`7E3vjBJO0gAN0SNb|CBPGsCaeLfrKU8Wu9 zrq{7^nlYU`H+>sB=LYu2Ad`BW}m^*o$+7!OSE=j9ZeUc}2O17WXZdVgWN6(45~lTPQXW&wdFQEnm7=rm{~U*GZO zOf(@~$6Z@m1O%FU7fQrT$+s_Dd#xk;`Ff!IpHxT6%ab43hO~CQ;Jy0kJ zJYZvTP+-+(F$l`T9TXx7|%?9Ey=0t-*9ZAfF^8Uj3dkO$5ABy2RH}Z zz;hJHa(n?7g61@2{*9yC)1^4um>Caje<#bS&L~g7^ywz@oUtq&YD^r{r^<8IFxpNR zR^WWaczF9i1x_PS0b{7dIiFE^`b8zqT4`RV22ks%S&>J9cRA=-4i*ijB}|GOj%)g+ zXDV~bFmg<9Q|9zwRGyxw$0;@ar81`;Bgb?J6;3Ngj_JWFoZb)_{q5&eIAfWF)_{r( zCh+NGp!TQZmhaPT)j1>h=X{?Ex|0CZDPVA%@_qUebxt3q7vHCUQs+$IUx6&S;QRDM z4NeOeOp{Z^j*k}0w(fDg&!0L?s1 zWdzk$Z0?|YtH7Oh(256EP?v~_L6Hq~vM7Th4@kEo4|qI>=?)8IZtMn#<}nAKya{UU zgBCu4Eb|7fh!l9v1~!aEpYa6?c)L26<2go97mPuH3*;3c@U+J(7FS*t1vXGe6FlG# z_6Ccim=(hUkmHr7cW844`#2t8&4QT@x>*n$+Gki1`+9glhxoAQGMr#_WpM!c?EtGH z_}C@zbQWm&hL{yY185phdAf%VXO0vbXqlp?G-R6^XkduZy%4BFFT_dYm4nC^<@z12n+=k6n=oME_w|WCz9G z4|YW^P=lFOmqEdi(T!V?W4eMqr>q<31VRqb0+CZpATvRyVzYqG%41`808JQyMuS*E zy*+kt%z`_`N}Og)Euii0>}E_2oC++C9h}n@RX9ZyIyk}miP#;_F~ZL123g$&+HlY5 z%HlA6mp-SQJ!Bqh2Rkp=!VT=;#iI(G3LFZY-qN63EI?^Wkrg!AoDGf~ZqV>HD5gPf zg%%2y2AuMW;80k=uE+s8Sc*XrG_MBglNLfx+ECz_UTMIYDa8)HxPS$6ZnD4|@cc0- z4i(s^|1sc{Q%Bn5&IL|vJJ@(ZyY*N!m^QE}v4bY3J*5>nraKyP>M$x#&oktVX5^TD z%8=6>)J9@9;?(0oj)3X5Mx0uVJkv9bIIX!au`+Q%ZqoTMeVq|!ENC!bBV(2#*K}@U zPS1LgoGj4nn1CW^yY)Lp(3zbqj>4c*96@)Fv4Bq8<5ghMXAo560SzvJcJCn$;AK|e zv1WY3sK5oJxNi)B;8YPH+zZbY=q+G!&-q zHRjaN;!|K*tHfL5#8~UZpuk((ti)TR$Omc$gN~SFaAfqBUJF{G!l%ePo!^AhoblLn zcN0!oW`0G!>FFk%@v^+o31cRKcg#v0YnAw*M)B1)EAmc%Y{IF+!ds)rJDuBQ%0UWd`xVRq3Hum)A>DQrKc~L z$RPk)FsH=kXgmFk8K)0uvQf&Mvzl?w^l9dtri_!OpE2i@Wty;Y`)hMfDMm@ed?2X0 z0o{E88chRD#>-i9sx$7H?qpopvlQ8<$60Yo)C+*34m8l@cn>tg3W-W3PEc%smiL3=lN)sS1*oKC zS73A$%+hCIP~rkb9XlxMz$#h6-Fa@%X*BM&!dRY(_;E2udF3Zx0voa$0YVF6I zE6~1HHWXVXFe$;a8Utt(AgeCJB!n$SHk?XAGeG+o!DDWqLn>JH8Rtw-v*C=8Spaf8 zXqh{!BWOD;vg21wzh%QIRlf>k5~w4B3JnHOqihFwZ3lQ15lJ@-$XlQd!At^^*unRYGgvc%8<~#v#nbPabF!O&>n5a- zVgfk`i4R??0ckIQrj}V94{Tp&%bCf@wB*!uNqf#$jEAPPIB@DaHH&iFLXI$S$Wmfb zVPL9vWK>{uRCVKJkOCbA37(7w9VM*GVBpB-4i?p5VsQl>z~_*y!oa;?`i?0aCewF2 za0)OgZ9n6{X~0-NorQ_54!j|J17o%lmkI;;>}@4p1x81eKU@q93S0`jjx0q2raI@;IkM7_JdZR zg3=E!sGfxizN=?em;6XId5-x5~;iSk7x`r5ZrduJ4g94YM1?UKKn1`4Z zpk*CsX;M7{Xib}f0t4uJy0uCy&~l2Uw%ME+G@HTW$N*Z?#s!*}QUcux3JDaD87J8k z*%Y`y%L@?_OmzyNL#DvR9wRt*7`>&HctE#_bFWomQD8%uufSTP#I8`!gJP)?rviH| z$U^S5O6&@(3aGLQY&CEfvez~%aVoIXg1Q8ppmKphfk%N`fm?ycQ`(G4VL7)VXx$sX z8IuMm51TWCntg(1OezWtpo8}f6u|!0QE*^TU^QnJP+$PmQdgV-D#=9 zz`fgwNdr<6n4$CWdO@%i*hS58w#k?HGQITaaSOuy*Lsgw#@R|z^sULs52B{%3Eoec2a z1rE@LA!g9U9893)s$#kfTA+l(1X?w!09F5l8Kk}dMLiR!+r^~IPy{NaG?`cwwH25g ze@u^XkbwCjsWpK-zT+islNnKOhGwG@~@<3l$7n$Dd&S|F$ znarC4vX9&G1nBxDq@zDUH;0OWw$5y30R_#1>F?Y*#p>rUDvE9T+b27y=8cYAQ^^PFJJ0`WN%KqB8fIF-O63?MO(h>9ntE8~UfS)QDM^%n%Q zlo%XA%@eR$O3VVUL2IZO9GM)M72q5eM^*%f&5<3!VQ^$Za2Qebupwkvk@YYl^wcvr zvN|HEV{im*aD+O68C4UK6PS?oFrw-~GK~pU2FU1PY3aP~3_VGbql3IlLYmtZ>INgVG)>GC(%siwqP4VMc(>L$Mdd zMJWDbbY!kaBvDY@gW?I)Tt}n`ERI023f({~<}q?}gH8qZ+pHcw>5lFQ*RUgXwF1IXxIpO#kM~sU-S}+Yx*wEsp~DmO{|&4~`e6>-uqO z)`RmmSX*1R9?JU#|i;0@T!ir~%K~f-Xi70GsW~%gn97%;b1s`VBu$LyjAQ;F}{(Oc(U$ z6ypY~09{_-cwxGcKc|Zn#GTkInLfdvQ_TK@paPSEmXku8f_95J^9w-*kh@=iUHkz| zg51rcz>uv3o+^bLxK2tulpA0WfH%$OR46qppa6c`nFvXz*#K#L7p zKsTv@+`{6>RHDF?<#-0X90pXpUtomz3v|92XiXtxhaSj(6#<;8J|ImXtCc{t=LN>B zEG%IK@h~`me}Em`AOzYd4-P(%TbWoDK)0t{nEo<=vxKQbXnK4gr@GJtA@E(EpnD;* z!Kc}um_8$rQ?4EshM*gc7_t@E6&SJw-lD`A=<>2GB~X+oFe67hq^iHb2x_x|&Wd3G z-E9eOih@G516&_Z5CYu;2XWdvZt%#8BdBf410TzzeG(cX9}UqIHWD8@-gUmm~7Am^Pq`GmMnp{)BlHYx^XTLQqlx@Wyy5Ea84D?EkcS4AnpbsMMV(3 zMo3W!MDGw%lmgLngcOxQ^o;4V!#R&Io|v8x!6|13OHKR=A`0x;3XBTu;MBwn>e_(P zHpHb_N(`WWA_K&^08_ES0dgSCpoL%x za8I&qzS{Y50Fya7lPXk`N=F)4t?1|dvH9+XxD&H8XF zfDRC5Q4kXNINdyo(_#9yC{ATDP(c7mX79k6fk%PQoyB4LizrUbumf;=K7-|$6eP`< z<_Ie=DR7uE%>dC_=FAg>!G+5NVNe{*0Fxk>eF5F?pdgv82)f-*1Khw;hh*sK`q7+X zq93_I?vYerRuIZk;#6SBR$vhLI6W$wQ-<;F^zvvB@LwdU6b>iSiAQGr7%}u7GG(Go}l|3Tg@rkbWV! z^C0kQ`mPvGf5sWpxnemdGoF|}FP2jx0vx}fX;}s(76oQc(3UAC1%9Y8EMQ}p94`n% zu3J-Jz?6U{1c6W7N(>5&+2C_4I0W8K=ZoW<2Cinfz^eh_(F}5-n z2VLzTV#ag^6pD;yOrQ>nf|fb+0Z8N@fJ8flWXuxyGW};fr=iOeP!8rYV|oC}yPzr} zO93=i4J!9Rg&wF91s6d4pezopgWhv1ae*!*VOC%PC5gAw^Ab2?Ef^i^LHE}lVw46= zm4L3|Rb*D+030)3 zmoh$|-kii4&G>Tr{UlBsM#i@3GAW#!7#pUaPvMkc+&TS43a69gMJ7fT(0Y7@Y$X;I zP(lV>+6%fodwNhRr$NvK21p;23Dj`{EujIo89*)sU+w~$>4D!80$FYe>KB6!j(i6? zU4sd<3klT9fap-jQed0DJ(W`_(;*u)zyn@}$pkv>4tCQB%nSoiSTTU|8|V;0fxVz( zQ$fo;1iYm|M`h|WdN}@P@Rn8tmqr&DvQ)rB2B6z>zzcn#y*n01f$8~kWkrQRL)IKw z3XB3ESAoW{6{fqVaVj%)e4n0^#;M8kgOQ05ayIh?hUtp7tXlP;t{Ibq0=p(NgA&Jb zB^F0JP|3{#x&s?@feR=&KodcbEeQe*ObYCvLz)G$`B<2lm{`CAP8?Z|FXlkTn3zEG z;13Yz34_M8z=!uR3;brYX5;|1w866!pdka$fh!51ZZ&9^7O0y5Q35g(q6c(3jKCkT zKF~-Ox*#YEfP;j)xiY*Jt#ZelVR=M9%|sHEaNA;2pFbi5)f`qrd_>JA@s)Lz5A%CkL!YD+6?^qKX1Lc(4%EwPkk%9eK?T8lP}vP=Hv(s=z8RlLutb4JPm; z7I^XsR5K{BgU1)y9WQ{-bx~3UB~y0C74=M@`yoz%1tCJ9aZPs6?FP_UTy~J%JDALw zXD}(KE_aj%ufO92r2z#MbLI!23t?Y?ccrm|&Wuq2J4;{|j{*Zo-wW{3eoT;EHLQ-9 z=L1fE?Jv!#%?>)AP61>T$WJT+Oy*ue+7FhU*H05$ms*kmJ6)c;_} z0-Frd0a`k&!0sq##jt@{fgN-fNDDJ0{u@C26UJR%M%shR3Q+2nvYC{2Q*Zi0kDJtWnN+c7;6 zS6~MPVt^Ub191g*#|a>VKrJ?Q#|PrzBN0K_4rC70uM@=0m==Ik-jK*rVgfb$92baZ zflj<+0u7RayoyN3kOdl```{gCh-HDU*KlkD#fktV9Z%s@VB`jspeob*b2!E8O+X{3 zpv4qCpp>G^uz|^u(H$IB9grijAt42cP%OE61q)~)87K*Z4KV@@>w^z#Wl&JDV`AVj z1jP_&QXUr24V<84zyi+WELruiEDc_a&+d4D1$@R6GpP8u0ammFAp*+28(6^8<*3-Y0Apj-wr?L$3_IdcQ60=r`eE67ElqdB+* zn%P0I!U~Pv4pz_^W}qlxMK=O`A|ELD!9@%xPA7oMyYHaQo$Q*-Ggv{33k<=zN|)gr zixMv{$X}53x(GBL;93t}@dDKjDmYQ{46mvJy8<_;LQ~>Y03AkU#7dI&K;;mqv|tD4N(D{@9)V4e(+1fcLGqje4eU;gptGOY!3SPI9KC_H9^`A# z71`{%3};v{%WPJ}neZ$;pit9gxBxnmwTJXz3O#|+BT zAGpDdNyihQOZvbyI;ha+QDRZx7B~ei_H`M~K;rfVNGGUn1sz|c!E}NLbf_Y`KH~=- zB_>GhGJ#SWIGFB$)P{gs;q0KE?F}AC3+)Ps2KDuL6xbaf@PJ*e07};vc(Md0gVV|r z9wpG}y9^32!@%pCAMhwaBWAk2sHj}MF{oq*U5Np@^#PQkKq&xh9=qcU9&q&mi4-1C z%z=X2v4Izqup#ABE4ZcC!s`mknHo$Dyr7^6a0l@mJ9r_^f|OgJxH}HAj9rttgBN6& z0k;Bsz2gM1DzF*>SR-cwSY98Rx=(CUSiq;i4mxi43oj^pYcRdwHDmg~3u^50K^j~icwtrb9zNt0 z1d3abCG|V_%$N@FDX=@9-~)Mr-SGe)IM6{&Gz=Y%7r=_35eU+Mf=_`}U;#9DUEl)+ zD>&oa;6udGS5O(lV#f3UWX%h(3G9v!AlB5g>odOL15XShLWkY)16UQ3Jug7^KtlBk zpAw5BC_q&ez-=6G_XjkL#je5h0d%Q7s0M%<22L+OPz(d5g9T6r|KJM(xfqlX92>xq zh?))>K$m&*=p(fj*fp3s_(3ZL^^g(=D1m?rLU5X#&R@!@Ru5`F?2rTnDWuZ_3akJ# zrUQ~7#tunE4h43{1(KjZIv@!-W)@UBE|CN+*aEk^7Dy@ynK7M^RA6^pAqp}F6dE8M zAP;YV;Y`h$=CIQ~L!mh$2>y1uN=BmBc_tGqdY6-Vp<<0G}AC$Op3Jh8XD7 zRZv;Yd_fFU27eHRB%c5?rU_!8>QxNnunA!D1ITjF82v;Jklq&X<_J*HBBlr`xgD|< zKwbr%qVt>8ktIus1+@Bry&mKW2L(3JQDorx#|m-N znvNg9p~D0&Mt6v32`uG-Me-L=C@F&c_(4pOW%|Yy5@Iaujx)rk8#Hl>)=vO6x*v#x zqf8vM6oyUUGN{w_K^(N23Dmd;U3NAD6t3)`*0veb1yE#M5X*8@0ne#}r;|Yq%^6}! zs*rj0Fi;y1dGcI5 zTR;^xh(@>V%cf=lM-|ZYI=cqb3^7G;Qe7Yh$qVA3A)X#V0U1wm(An3Fpq*I?90CTQ z16sg$;2i_q0Ia|Q3WX)&pz}@?Ae;r_pum*@jqW(=IdnKo)@fXyHa(n~wDQJTjyMmAz(+f~U zLHcu`&Jd`p_e6a9!x~OuCZt$k1TA9#1;h++&H?q9K%+31nH5x*E3rE2ID#%0R&`S7 zSPq`i5oBZl)wlAX!WSHSE5txUf8e^-@c>w4ff(5BppFJ4e$1IcX$BOZHzYuL))Ca% z0$o;L56Z&KW=!CGwm~e*kzJq)R5gQ*xgnaR#4Kzxj>JHxS5$!{#4VFMatvFR^>R2ESZlZ&dhZ;0_ ztN`UpZjh!0;-I1mlsX=O+^k~GjO2C}Go~9Lr-QlyAUA=We&T9}fFrv=F8D|;URh*s z2wa8s9Ayy>1^Xo# z&w+D=ngToc>P3*h)$Ab2NENeh4N5PdQWR7gy<`TJYI8t29+WZ|tQjE%hAGnwF-Yt? zJ^+O_$aYXEF@p^h#t+1mRKd&g8yXy%6;%|#sZ5a>w91GDR3d^pO@G9*K!qZxbB8Up zGlLTz=#GdFpw!L*>Gy-eBLJEKn9U%g0xSxAup$*S(DwtH6FMN}QC5REXo(~XxJUr` zjNK8GE)Y3^2_+|h26;g_fyEUZ(KF@Nc{;==rWrzeUJe4|0S{4>;NkMWD$f58cISh(}b}pc-yNJ*ZA)Mk>3d!6^@v zxgbp{SQ6eKX2t|gRG{)r3R1;`($NetMB0Ov-r%+as4#;J@WIj^xNZj*-Zvy*h4%um z{VL|nNX~+m+Mq57xD*Bz!asL33ph%{q860d5H5t3-ja~ChvLv_JDUX@rI7S#FfD+# z5oU-fGV@5*gAxhox*|x|RRGf76alq2K@9{&e$W7!M3y3`x$;9SOHmXQ!4`@FAj(Ej z(2)Ur_BpQtxZBOHAfUhtZ=LQC1-IXKh=Q8p@a86i0vC7?6=Xnw8I#EmPy`4;!WNQZ zn9Z2}fCQP%n7)W9Fr%a#P?3wCa=`Nt;AmeU4!Wj{8I*EVmMbwsi$oQa#sSE7M@B_9 z9vP6&L8TC=*MOFUlt6L>5>Y*9yiP0&bfj1V$hn|GHeM-E+C)lc zAQz%0GiEa;u)D$S8*og4Tz>d(i-4mHEWv=PHMlR}g&e5O&8|_;gyNTnAdT=+jzIxb zlsAYWN=Aq#upYP<--GnXAz21aox-4`F+)sIf*ZW_5_Bd$cxsXhbYuu<02|bToI;2RbmFERvrx|(2a+n&akKwv!e#6yr~D>K&!zd z0XuRX+~Nk6V4&vR2C*y!7J+4;fgEr_pv!PW+?AJ6%8>!o!P8(mBd*8{+Rw-gx?ce_ z2dE?o>Q^{EW5@y@I?n`Js==nf3z;)|AP%|?ln1&Vom~Os93^LFGo}^^1#tx)M~y5; z6YxYHXk2PNvmz*$uv;;#07bf_0&}(*6Sxb(Y{oPPB*bFI1S)PEXNXPT(#okBa%9yE z0r2iLW>A6vC0K#wAeS!?&*D}B-7df_u$CEg(Vc)RXvH`?s2Bwmr;r*D($)l5CZdoe z`#@YtWx8P-r*J(txSI+t#UOPtsI#g9ZOqOPS7PB61Eo_$T@3CMfM)VQbuxHT1j+b$ zD_R5`MWJ1t2jY;Bd?5}Fcu;Z34O*Emqrl~;0E!0> zU80>+v0fFF@|YbZvOptvkTdKQR3KskS)di>4BQI5ptC@D!7KB*SRB9|PteXJP_s7+ zbchYK*)6BQsK5m}sDhbC4v}CNz#2LWzy;_}(Cz_H4GWqnQeeu2l-CMkpy^3)PQcz) zl?Rn%J+OWl_^KxGxIlmzQwJ!Fg`jmciy2c3G1W9`VGFIML4$56g)L}pHK_f`>B=ij zjcWP>tS1IbppK9d78L93jvr8ZVjo12I&7d4mff)f)TC2Dbk;yhz=;x6wMs+ka!?6~ zQrdz$>ni4uQWo4dfmG*6#Vj*O4Y*GN3Kj-vHbX6Dnar5LBSB1%;ue&<9RJN}LMmj1 zL4!l!`s;%zqP!G_)aD?kZV*#Q05?V)=gbA)p$P5734=#9aA<=2Zxu)nyy*ZgTo;Ie ziU{y<0W*&QK@E8oJr3MdKRx|~^oRbVC53iAzM zjk=)Zy92BatO`DwyaOz)10OJijU`Y2-^D2o9d?3RrKG^_cmQk!s&8Oc9RM4p2eRn| zSQ)BKC&1FWpyPhT+?iawyP)&n{%>{mQrg#P(ZREhYfu^2SU>Y=dZh#Hf2H6b` zdx2Gup)ye8_5q46&0!fX7U=K_-ERz_p-(0SgKNNSli#n~#Hu zQ9(h0k%OE<~Czm1EN*TnU;WP(6Uc~>59Fa5={78iqOup3St-u(ozIf$DqQC z$(2`#;=+r?jOhTljSrgF0W}XnBZS(ZdeU(LICKt(frfWsV?+m#dl`^nqzwX~VI&rK zBN3#eK^z)F4dU=lFj7O27c_AOE4pq-fQOMl^)FJ<1-gq5wE6?l69KghP>U{Tw+J*a z2Or#Qy4NA#s10kf34;2|FmJ#|gajc)7s#RD9+M%c%d{S(5k8CrS|HBucmUBp1Z`p$ z1Z4xzZLr+a|MqbzchF9#EkGs;og4z}u3iK<2|cviu-< zL{DdWdOxRZ{SE>6Xue|y*wvt`;~^QLksH)VI3R#Ly#`9dpj5d-z?}Jn0I0$R8^G>( zLI5_Mb437g!T<{>(<4uuTmT)2$fE^n#>y8ua)ZxzW_P>+H3U>j!sf#w??2aG6{sNhO6zroPh+G-W2zDX6F2gVI zW!G%F3>=`wEdzLr>w+X`)4>9A-=n zf++b+6V{gp)rO#!Fsv^R>yg2RYgMKf?voUoZa4E2&V^=$* zkdAr-&+{T>n%k2&WwU005(B6hgeR;i?4Yc&KoC8vIIa)`W2cLePS0+(9~+ycMB9(w`Nho7h zUI;!BLM{X!fUM*;W4Z&PIn0=D2%!{$@BvMNg&=rX3aPOH>T*+78e%O_AjKhQ-wL?G zRYGk^z>7l#aB=uR2vi!fV3vlUAqApJL+m{z(D6>>G&kN|=@4*)mxiDaf_nsB8iH2M zuxl_uOT&8bfWPB6kVXYqamWKYu?F1SfE9;Gn&8FZ3{VjYZ$j{(HX$JAs)Du?LZ%#< zHJCUA85nqYKsDwE@U{YW#|=VR^`OO>?D~vfgpi9qwEXo!2vqceG6kgQgErMU-FQKR zk)U!6yo6ebOW+MVXuUO96^H{K$XDP5Q=Yu?;Kk;Mp?OBf|NrZmK|B5#6u{*c*oGhA zT{R8RoC~`Vn_ZLn2RQG7%eDq#NYRawwS_@hTNAYm=)j_|1E!GOae{C?$RJqrWsWd% z76z4&pt5;}FsvAuAZ*UO0M!3oAq*-67Jv+Q1h2IMWk_gw06NtH+?LrO44T?y1nsc~ zEu{iG98w8D3R+HgUO9#7muGT{)N3J{j?Y0&M_XvqQ4>7AqRG5M*qnI>$YRh=5>UwO z5Qf!RM}(0hP7}3YJRl5;I1NyogHFg`cU*v2qY7F>2O1M*$i|)V!(4d8Qpmlgorx-wDpmixsSh%>T3O}t5u-9|3O@dNm>!y zD0%`C2MIGtD}qN*-iRxLYMc&nDkU zuh0Uo6`TPYwBs{lngXKP%$PuP0FY@DE;FV%AVJ7*H)zEVcyyZ!I@0|@40JUaWTcy0 zfeU@4`;4d(6R76n)?iWrEl|*4Iw7hA+AyTSH2vFLPC++N!31j4gGN0-!|2R}N4J@n zL5r0bK_@nIfDR93P!gT)JC9R30yLftI?Nt&ln6U$ONt6;@kxt>f`|e)VmO-_>`st# z9AU%Rq6$o~;cO=8a5ghyIQ!K+PPKZ(a5ij~6KOabbdm}eXgHfymq7r0fGgT?Hpm^I zWm2GfcR*9Xjwd8w>pnnTXm-aA$t)!%_>^>qB+?8qk3f|IPrU*Yysr+DogfLFDFscB zGlFVa(7c6`3T(*-sISfiPLklkP0$EAc+yA#Hcoy5v}}P1lv59g!R8Gy$I0RDu4e+x zU4vVpAg6!3iM;Lu)Te`cLjX203?7%!U;?=oKHvWzqz`F62UPcVNFv53k@Uc4x_fVR z2skFfYfuJIE_xvjnJj!D4mz?1JdCNprNF7dBm*K8lt2fDzz6Afh=JEraw!NZ2nZN} zc3p9T%-A9Z@|^-`=3IkmgV^+U3pgb>R3NA8sZQrx$SDJAuZk!zDS$`inLwlRpuV>P zxV!?7%7aTJ(Ct6q(RfCXQJ~ewNTcu!(D8Xr$VfbRJ1IZt%x?~E1uoDEUKVc99zX@K z7#lZOHH#uM=tyv81y?TU_%{orSpbn{Q)K1=-30?%B1LpL%+CrMSdj#;jd5m%7zSC- ztH=!6od%wo1)cE+E+rA2XBJ2)37xrQVgN1X(q!HtK3#Akx5)HNJ0YIw6Bco@x`3BD z@R>0!5eFZ30;y<0ryPQN>;j8mhkb&#D1e%NpmxD&Hs}$cyx^dketi+AoD#xra9Fd0 z)>wfyc7Rrffs!o9ZfNBK_S6Likf&soR6tD^NJuk*{RcX(j!_ZpS5;7zraJwihm>%= zN=pl<6~f1@zzJPvqrhXuumOB54s1wyEo6WYv_TKz6T~J1_#OjLas_XvfNrMf03Bfm z-b`V}Gy$~m7jy_TsK3eXIDsA17U=+yy7kz$M(kim?%MEyS4(eT2h9aI9$*JGe?Vm< zxQmFiHR1p}$RE7m9U&S_7g$htQ9&9oY@jV1pw zfdimTN*Md7POyUpPNDmv@b9By0<|(Attjv)c;}T_FrI=mS`q34G}z=yWaE zRxi-tz!!GtB4{=R7GBWOu^;T<)+{VKLDRqNkSJx>XZ*vC+)flmZ72R<2elK0KsGjT zfRYm=L_uQ{jvb)AuL=++IR2Oo@{%q?6NfbDG%E$Py=V139AGzrHZ_13X0to)fM^E| zM?$t5ba24kw1-29(Uli;3l_Nf54xWTvOFH__X$v)Ag_I3$ilLvYXkUfa8}r&dLiz5 z1>^zE9iZDJK$moZ(jh#Jg4(=@Bnm1m4sakRQP8$8O{OCp;JWJp)D7&8pq*jh-78o& zA2o17b67oi>nUhpSeM}k2QLe#`*4rLkqI&&afSmlX%4!K6uhIQ1LUy@oRCHO8(6Xg z8aY7GIs;}H*er0IbZ~;=1f&$=4sHQR*B`tXdI2XS4}<2sK)p$ZEP*%RF_tBq;M$2n zff>~O02{Odsuo)AXfQ1R-A@hM4z+^QoC$oX89V423Q&m$jWy6@4ouMoup$9yW&kN* z0cBoriJ(-^>&TwX2f3q?*HI9%S{ss{RONx%75a>zm6YHKZB@`NL~sil z>O1gYC}?I;fmL7$Xsxv>CRX zG1d;=@dNffcsv8T4-`~9AbT4!OScEKx&g13!Lv<@ED9>iK_>u0*61m5f_GuCD1eTJ zMdUr+dQfo!x*ZRaodrMz3raEot&l`c1}8v)Z~`0%&}mT6{TR?>>Imsw&p_l_W^f+@ z)GGlkeg?-SO03sAfTHmZXgMyZCWRFLpuUF&(+vr51058zkO^`&0Z@(yC8h_k-K(G# ztDt}cH!l=GcaMN)dfCBY2_7ofVEO>sG7D0~DzJ`6kr_2%g@e0iGsFZIa62(5u$wZ0 zx^Mc7pmZgzr~+y)Ft2s`|DU-TbOezKIHB+(!U&v~9NEEz254sE3MXV6^#x9pp*s@i zFY3WIP&Q%*+L;ZScmu7p1>k-10uw{s#-BauuOc2LHmNqzE z1nGqL3-~~312NB_$PBu`1TnG(Z9;=ts2`kQgCL{q^`M%N-LZiSQsI2y%mSM%0BM?Za49kIf+k@axUxV;Z-E!RP5`Ne zsDjo_T*y@lI9tnFFpi!G1r2WiB}Bj7U^W{4@W@bFFN*vKgZDs(`#6}Zd=jTL}qTrPr2f5?Tb z;DK;(9S+LA;G7RGQ$cNAXcZ1B$)|v7Cr~K}Dak>u*I=3eUQ!1d6jK5P9%$hwGq@}V zFUe+i1nq{aXBPOtwia~8ToLHW z2095Pa2p%F+-Kq80+rja3Ikm1-;e+o`=GrK`t_i;2MaGKDT79%Kz@T(YS2cx~9g0J^F0iI*v=AN#!g;P|78`OE=3i{h9eA{PA_dlY~YbP|DS-B(f(nJ{AGdLeFV)hl zeIX3mQ3@*l{s<#i9;*11!#w(>{DYR0|yE;M+=}=^s954T^KndT8kKSWuim8vo!pXV+(3Af^O5 zjh{V`Q2K}4c63*ZfTI>P&Ub*)J*X}dhfIG;fKIy<01X-oOm9>c6|sCEp~L|yu|aph zaDq0pB8LeFsDIAOtpMJo0_wDYQUjQCLt^^FU?E{1@QP_rc41avnJ%!MQ?ed0DF#~m z4O;L5pY;N-^5!%Hol5|!Z&(!s1a5Ld$3j5`5VF40D(<{dZNQug2KgTzvr|wm5eCl$uaK-)1fSHh zKvGc()Y$p}uGM)!=ih-2E?44I;6&`#`XK>6Gm=?BN#+>16{u3+T&utZQK+g=4^jguIKgEO{2U2WP?>|R$boG}1{XP|pm3QWsR%kz zfx(OkT;!;kGb5G1*j)R0YJ-5I0600+GZ*seD?lAAumiF#jaMI08o|>;+q4D&M^&&U zupZEf9y=tnkoBPDeSL0(R~11AJ1H_La5$b~08NFlJ6;jbQs6c7X9A_-Gl*kFKx-dB zi(q)on2vyG&_PTfngenk3}{iu4smQ{258)nqDTdmyP!yg7p0&`1@|UFy>-bfB@Xza zN>E0Dmz&C<3d8XPc!wH93HX>FkeU+&iVK{Pj8d<_A{i7*ut-L!S3paUz^(_4GD?Du z?*f$;cP@3nVwt&+S5E=SEAWjTdZ38bU;?=n?w@xceaf(i)&p-*0~Hpqg_lTr;E7}M zW#k*j-Sj}?!z&~~Bc|YbfRzWdISX_@&k<0$uWHV;2ShWOG3~%w{*w}yrWhF%8VaC6 zJ-DYJyA44*+1VXIsTQ_bV2319=Hvpmo56=mzzg+y{xDt>TzL^CnoOX15pOhs5*LcY z)}3fZ+DWJbiX~Cd*bm5&pb)SB&s@l>1BoUS$DRRcp?ZBAUMZ@s$ETtYF87K%^{rtczQSt(gd$| zH5JgST}@EJKxuM=S4i`kF+BlgbWSs-2Oyf)jOh-D<^%7>XOsZ-K~ej9kem-5d&QsK zbugj^CEGx=JEWC;16-C7Xl09#n%xa?MG{JRYzU1c+~qNfyW!b=^G#587X`-?Gw51e zcF@rXkTw9`?0yQAb&*=xpmoKBvild19?a|xK5%P+q$21v0Y~Cm*>Vc90tTQ%9>Lol zK9JbTmP5|&^{`g9ENIO+bTkc5c866pJ)ntjPBW$s5Y21G)B>XUz}x0I6hW&OKx3}p zp1xR?zy=;A$e4sHuMUp^w7_5jk8ktA#vj1rsMv=eK-U+7dVY>Cpu-Oa=*bePVuxIk zhgz}ohk={9OxW7kDCtNaoQ~@8_Uqx<_4gy#U3u)_mJc&DdxDmOg5r@0bo2)U=uTn= zB@P7+jNTGx5Rp~D02Cmwcw_=a0*g4vpf3`jrXB<6Iu%%VY5K!oa>DhXm9ya8IiL|y z@akDskS_3=S$0qmfzNpa-JAqo%ZBJ68E`A`f>sVGaxyz8@VW9DfKF&OW10erIS#O< z31YZg+pr$`bc-fVUi4A}Hb4aGo5Mz_kvb+^&~6kLQa1{6Iv;2uJ*1TeJ~oUCDI!6w z19osn2{w=oK2nDZ)X}SF2aTsOm^0(+dK-Wv6Hi=%4%-Ab(!lNl7Yd-^qNSjCg&)2L zY62i!30;I;&*A`H@&X#Cm&`(P^g57Uq-FqQjD!~yp`bAmY#nJX(2dWqgHPZcX(k0; z0Rxa%VMzmgI2{Yfpf3_i^<1DR2VHUkJ?|0Rg;r4jQ=pzS6LKDhWpQ4xCdm90?kvu& zz~RV}rO2Vc2f7EG4}4n{DD{GNDuFxLOpa>=S+W$!$+-G>GOiVt7CCByi@gbEZ%y-1SHb_ZYInT@REMz-y{u>7LAFk1zNwuqS&O1pnj7%>s@V zlqY-Af}i|k59?V#8VckjdtE%m4UTXJb-xK0H#(#w`;U{T6YjbsC42J2T^Cy1p!OL^ z8K=SQHn2kGI#9;k_%M2Opw0tmkW_&kq!2!!ql;}I6`}^-7~@9CM)09kZCu?4ls=v| zI64Uot%670z&%sYJ`d2STf^)|0Y^Sq=RpzN#{+c&U_+~lh&~?NFB3s}bP;+$eLNg` z;C?#@(gUA(QB1{^q|&COuAtVe zq~XL6q@b#x-t5E> zX@ZBQ97br$fwmkY>SfeGC8eCg3{-Ibh7>LEBr1+kPJsq&F{XReP!cQX6#4r9|M|ms z!QqRo=LuVY2AzuCpBij!dv9 zWKaMt@&V~V8^s6d0eQV1yn3JyqzP%t0CYeA4o&bXeF;bp`dFZ%Jjg){Bo)C24}Cxz z`NuMZRIk9Spr9ZxV8Ek=Xd9vo6oB>sDljW>z=n{(0|ZFp1Pa*33FL_!C*TZHWad!; zjcEvEDR2mE1`kz;fNogPU=jc?1?0$5;Be$9gG%v$4u92P;!xzp*32NKfYHEKz#tlE z&~a%{qU2QIgDeYz%tUd4*Bl{F=YxiscojGxilEY<1LX-!=cCt)%6OtWwDiGU zX<};_|EveC$AG6O3D9n8i2J~vfUP3}9r_Q>07$Ku3BNl89O2_n5(;pi;nE3Dk-I=T zp(!$;kXHhf6p`96OrRrp*+J8~dqBk%s3`}cLCbwWG#~gFS1fHDqNf@ZF`^V@$Q~M{ zyrAL=bbAhDj2_p_BItZ1aZ;xm)QE~wHG)x!;%;zp1nMlTybqdc;6;=zkkb_)@q$*q zpv=+lxZfe*2tOQ&K>>8QJE+HiMGrhOZ-MkcBNJTGftG93D}pc3Ssmt2;R|7T!Ir;P!=%YQ6n?K z2_q*s_}rE40&0w1iJ0v8%e(8>y_^aq`i$*aH#sVkt;MAT&}L=}801k)ilcfYUy*CF7@ z4=eUW&^!UF%S534z$_%+H2&`paD*R{DWU-P8QA++biz~PGLTMaYOF8h6#=C}l)6j= zb-)BX{DD}m2D&B)JXgZ*2)ZT+d`t~HXf+!6S|DCCrZ=EfYJAW;fjG<{cLFhiuM5Jc z0y!WzsNiU^!zRu^^LF4-6XH?>sGkqpC;}QZgRDgZc@w4T0Ifg-Zyr%%1+~w(6dz0+mdlg;1b%P{N=Tfwo+2 zB}gB9U_cmSxf+rlcv`ps(gRHkg}iRy1Hn+1tAWZUP`l<1s9I4qXSxESnZTPxF(Q++ zxKzQ&+1Qq=fv*k$Z`XuOgp#ye4Rp&c;VML#VAjRvudrBdW1jQ0~ zxfUV@-xi4x-A{n`ega&`;2%U}ZO7Z`xcS&b~vu^}@>1t4-d zkaAW7OF7F4-L*h$)>6iZ4r~P$sGm!)pam^4gcnE11uf{}C&?^F4wSJVc&1V!s-RUO z7+cuf{jq*$6H?!mA4fsU56V`Mbw}_D{~Jgr{2Cd490e_sPI%^;x~oaRkpoh=`xf$o z5AsGSXhHkbL3#gy7?T5d5*f4?hh3N9p%~Us>tIh?|onA3~k9ysCk7|PtBl>SJ}l~)PGcnl5_B}mtp{2)<+ z2gw&ikboQh6aRusuFo0$%VK>-7ln}i?05q>exF%O1FIwM%?puIgHV7KAgL?%Yt)Sgo=$natfb_v< zE4eYE6-f^~T9>1YJ=k-DN@J90 zkta3Ez;0a+IuaW;15wY*0p6kqY9B+l=z-3jhRs00liv}LCiu(_2Y7Ef4n6SbdIQn} z&xPO)?+QuCLAnbh6*+h~xD~K1Att5dl*fo2Y*`E(J1G0AKp73oasW^~VeG4d%-P}S z)xehnfTkOD{mK~spJ0SVA%aE$G8N}!`dK%281FGyr5@hPx6a%3s-f>-;3wxqCvr!c@rCqtUq zCnP}kr!x!G!xmgC34_Nam@uN72{MZa8W#ce?7^iLq%RA)*%K1oi0ML<)&eBDSBPgR zF(J(vf|S5Ba6M=TC6?BY3{hPu8G_M`HZB2n{U=ZiKs&tkh2Zmk;9h}OlZ>Da6xz7N zB+yhNJi0;0PJq3SO%FV}*MRiEqni<0OM*Mg3nUde>mlol`OKK!fcA2L+QOiDMeyx; zppE#VSxOuVyaM1uDSn88rrB7EVEjL#@TM@R1q?bM0Mxe;P+&%^#OIX;A88NmRw1_a z!cKU02H(R7UyjVFAShtKBMt2ae2_ru7=3|mKVVW|hRrg9&%eGQ0Y0g|o)a`AORzVj z0A9&12pXsbt@Kmm05=H~nYp2N%0iPdp8^qS_cF5rB+*JKushW=Ag`i8T3jTGy^ju> zNG4pKND@k5aEt1*98dLhg12&mFKk`_?vR4!V?d)GIKv+`_(8*W2B6!W!Q&+agCDZ3 z8g!}>DvJ=+#MkEqe1y+pR49uXN3IYZ^l87_{?(~7nvHC9( zO5j}z%nGcqM1npQjdjU7*p-BLrAUH`M)dgwBv;n!Gs2THsK_w@&1S;F65&jmxw4*_ z0d)En_>u^aGm(cPm=%~Y0+UGrl(`Lf#1VlBSz!(;Ca`5V}7sIGK&?;MS=?j`^1D(JJYV~1h zor78uB(=^##U|mEyrQ5$!gr7dXq_Fntp}R&mV{5|eg!qcAd_pL4Soy?P^ZHW>;ToS z=&il(lgP~p&`}7GdIHv_N74hYC$@m}!0QRvBVsfGaV}4H0b4q;*z$1jil@R+ti6JL}I;BX~H)h_rl zQrug`z$eyXL^(cJ@{{JudT`9++|UL|D1OL zovMft$*|rGw$?N^Hg^eQX69j zY|f-v9)Y%?Ik8pPxJqwO;mv`=m5`zXRL2lkctdJJZ~`V=$AE7^K@UuPu4E_ZN*dIJ z?AXE*Dauh=74;kf20UyyoJmm~13Cc*PZh()t-ztk%u~<6%>cSNfC+S3C?|*ozD6Bu zNaOMY$P0{QdLfXK?kOYabW_ABuv{QNuuYHK!zodJKs-x<+i?j?mJ(#p0n`QH)?}K) z0y%gXbkw90D|EgONp1#cViYns1vU6LCgEinZf7% zLe|;bkjN64&IB4^s^N1VI}^&JD$r66x1olFclm>7Kf z1n7<|*ujf<4<>%KvPr;E9=hT6fjDS?w+7P-aZu+8d=50X0#AYl=-w^R3?fQZu|Q0T z8FXMRXvT{J{lHrAsj;wfWd``RGw2!t$XUjqH3IOVFqF#Ufe5HG4(^MC%O7Z8{DTPW zP&??gTrWgmhuVRzgL3?V5!M}|kg#UJ6V^(Iu&xI+m5DzEnw+p+y|M+oSPwkLg&x*v zVxSYz5#vWBhBd5oBq^+aU=QmKQM9mb5QT;{_>^)`N?>-ZcjIMHVAEh?QDlV-LxN_z zK;z?#3d{mqd6?=!!vOk>pu>v6iw{8es(=y<=nRwk8Dd$E;3S~HDzFW75g#avvqLg6 zdY;@NuE46xz~HFs&daC(THUgq88lW8ih&8D){Ij?V;Jm?lfbcU#V}12H2gC|Op(Kp zU6B=ZhO7$cN-xwCjq4d%Ky!BBoTmUe2M%N~csPF&_=uj*;-JK%#AL+)%5v=b3?Idn zn5`KnfGlH%TFL~mO_$+=xDqQUC$MWUy%7g3(1PV44WD z4XG_a)eMsY3uwljgU~7D=HRGig%$8S#8Jz47VvZv=qe&`R6u41VH0%=#FSV;bqlDv za)uS~2SC*b+9~|-Q^=cVHVQbJL!){J_{4eeDg2W?T>IugYFoJIl`&Ie+N8g zp#X{nP9pRr-O;~qiD>E@?63z^m|n!f53rxa%cJ5<5+v!^(% zMgFlVvMaDa&+9nAsKDa5X3=z+)0}dQ4b!bpb0#ykPM?38vxKp6y2u$$MW%mj+s)5# z3NQ*j_&H4gRKl?+FoVhjfu`vZXE|#b4@|#ymUA*=?4X7pivr_XbLJhO z3!PaUcQ8U`2^*)MJ;y1<*f{;gIZhebMt0C8peza;=FAsBZn(jirNH8N0i;%-dAj0x zPC3Sw=?>>P^%xtc=bz`4VQiei+ z3Mev97rY^=FkSu%r)>R-mL>s3W*%N{M2d(>bnkDls0Lu6>nrA>+pFXRmT{ zFbb)FHsv@9fGQd4?>#Sk%l z>kCeEMwRVvUvP>uN_2Ej69C^93hK!SWI0ago+bdQ(55SH;S!#%|BCY>)3PPgnO<|A zv5QvVRFJV^2vguxP&Z@JP~cRMGhH4s|o|?K2*mu3|ZhC7L+6um=#zArm;=?BrKxM zq`{=3WTs%I!NlOmm<1|jSryF8m@a^hhAc&A1xCj+ zj9H2?3XF~)7_t;A6c`;_7_$^r6c`=%FlH$(XH;NvdnV6fG4P9Zj+nH5C{gb+QySz)n&WQebj? z!H}gW4?VJ8)Uj{lGyzaEg0WC>1*3wQBM&$^RVuJL&JoR0R8wGe{3DU2sI9>2I6*W^ ziN%qnNKr?D)p4RkmSU9xtK$a2EXR75BE?JvR>v9qSs>|Z1y;uf{wzh%sb4MpS&Aze zK>|WqiZu$Xjw{5o6tfjr9j8iWDb^~mI&KxsQmj*8apWmd%u!%<{2&N6q8`K)2QycJ zbRQ7SQZxYR7R^$$a@^26O+cTKL2)%mszEZVUeQp2)o}rTmSO`)WQSmuqKg8n;}5|s zMOOt@#}3IX#YT|C1_`i<)(WhSjr>`PO(3xaqFIVIAh(NVDdsA$I)cvacLSLQv3(6l z%?8OV#XOLhWR_wxNZ^P>mSPKtu|pzDv6ayuq)#GCu?-~jfFnz>9mKf8k);?5@(0A$ zI0aV67RfBd4v^dri7drV5MzR5mSPtpQ$4c-vjdZZf~W=)gCfiHhw_{n+Yh|wyu#?D zV6MQdAmzwZ0*V$TK2S-k#No)8&G&;bEHos*-!C{Q(8t@$!`;)x*=aT7Dn@xZS(zV< z-xZ#^a2~7>_a@VLZ%uknsTHKE}O_ zdl+{xZfD%hxQlToqkxcL&NjxijJ7s*)@v9Y9o8`}XI#m+f^iw6j<%NOQpT-}TNvjv z&SRX*Xf=m%J>zD^jf|TZH!z-OJjZyE@dD!&#wCo48S`@0)l^lKXERP@oWMBCopC1P z494k<(-@~PPGy|TIEir)<3h#-jQx!EdY2jd7<(Ce7`quQEfg}+(^6BClM)l+<6@&@ zx)?hdI~dy;+ZbCJTNp1fHc$WeiPMvjZ+hZaPKo*-5(=!~tHLB82@!IeA$YuuL*OR| zlY;`QKI0ZaCD09Dtf2C7gCMB6^@ES8UO^tx8Ud#`CD65zpv?V)FUyfZ;1`Di3n(k= zIDsZ!6?7aqvS2KMEJp@NdB9@ER6jvffz^@0ieVjyZfIaJV_XGd{r}Hw&A0-@0=0fX zOQj&0N8mRH$U)%yvltb?4g%euHG^LPloEdMWeNNN-2n?)*2oJI*&van#G=6Dr~*o! z{8<8v*c4c}HJKTd<}-nAwoEN#;LN#DS6xX!juYbi=B_^n=fgiN;CCj8NJ^OFBP5 z0qJ;Qde=8jRag+t5QXF}uv0fkKvEF67EnOSU7*|D7$Avxg9NzW1_>ER7=f;M0%s^z zP38|0u!M_d`FDurD2eGeqX6V2DMc1gNzRw0z&QP*ERSdepCdD1>VDQF5bPCuW;CCVr={pNR0JzafJSDsq|e5r&I54gm}c-p3p;Rq3##70 zMt~w{K9fK**!(SmF!MhMg31tZsB91vXqm3^lQV{qZ+hcTP6?46(Bg9ghz7T3m_JNk z`;$|#9#jj@m^)2Cpp{bz9$8(Cj!WiF69DzXm>3kg7#%sX9M{Z+G~<{U6gn9le;}kg z!O{)$;L;t8jte$`+ki^*nLs019bgqJHh@)tie*qL;xl9VAfZst3OeTL3)op^OjAJV zkO`7yKrt5pKB`zv0b-0AXpRZohh_yya+ooJ(=Dh82fj@NEUIG$I!GSOi~%o-V^v_% zXUwu@`~@~!mm$-UvCxX)CzMy8p}^s&=~e_5`~eaKr)lsh^addNEFC$rpy6PkU%0 zz#Jw8Xw?CaR7lP74K)W!f``XJWw?a`EWd%Wi{panPk(c&r6GAmOOuHKWS17Im*z8> zGtU4m7+nF%xu8WwU?&Qk1!vkFd`cWF4hmWd5(4)?*LtymdIC%g3R)ny&)@@xisOap zg?~83>bLN*I4JOea`g&6&_EZf;{mXdtQt&f_@ITr5k7Eb1~pF$6c4P93-~}SCQxvr z8jDiQ`~i7fgXsXDA`8qpsMV;W<0~Ez^cG# z#&iM{Y$y1#ctB+`-ms8cZgAe3i zR>ui^kg^bTjV=qQ?Wn}Az$Vbo4k~-5Kmv=!@gO7Uf(=$j)XEqX>lun1+>R%wNBrYd zWNP4>Uj2_#k&$ou+<%M>r=!RKVea0tZN^HCgpdu732TJxo__9E$;enVE z2ee((4leV)z?9Aq1R2As!Sq2)i49y0fYi?r1fAG>LkQF;bi5&y1@4b2u?RpeZR2xi zai~{db=)Bc>a;B2hb0|wVLXH1oOy#Fq%!;>uE46mWybVD9NO620M_wAJWHU1Q;`kS zY-Dv@0Iw`(@MkHo33P%TxCB(_gYMirS zy-;q(83?}z%0CU^c7qJrAd;oPqQC}iL2m&$7gSw<@|cJjQ-d(58UzO)t7C(3mcT4F zP)hFs1sww@=sLs|*%j0kBtYy1qSI$Haw!Qe5LIMTPzR6Zf^vug$Ml73xrOV&A{<~5 zHb`cA!eGVl0~G!2W=tPIG>aM27eSDX;N|Y%9)v(III?;`QOuyAsGtaHK(IipQ{u=1 z8>GS1A)y$LY$dA((;QI+kY#_k7#I}P6eI)!=P)V6E3$w~5Du3M0FgDXd{iMk9= zctPQ;z^b9nbc0uc4|MN5tK$c-IjkB?cX**8_yumr^o=ZBBHkZ(6~YI0>_uF@2aWz{D#$eF7^NOFbl2K`Rtck)+S~MHpJ*LJBcZF)FOY z0xk`}$yq{)&5Y>+KS;47xEKPZbWWsV6f{aB4k|@|2%?mt9|X;r7l2AnPBW%CA_}Yu zY-aULGen@pD5zP(>NrCLxflgydPq0ogJ6~d3pCw;Ge0BfXs|3ra2H>LX@dx&7zM4I zKxkqUfRv(tKp7KUihhs)#W%Qr-H{0{GC-lb0bGiMybS7Yd=ShMm;laV9pb22ECG@m zK_g-C0u@vQGb*rwHrc@oRB*S10jbFO&Bi5^O-mX!$aS9~?Qj%9k1ZiV1kimrzK7qL84-!t<4p8(iMN%9jpkmBj&SM?%Y& z4p@f`TAL(rgS5iSmrv00WZ#2IY9N6w*=C-v%B~`s)CXWP-yO z)UIscF=wt{0dh~e8PgsR&1S~5g9kFX0!p-CO*?o%DbSh`lnfvZBWO-M0Coo`IvRLD zr%yoY^dme-HfelpNK%9O8MGLD2(*Xez=O4X0_)Occ*En$`w86pd%y!Lo_>Ix#j3&dga=wY z{lRVqxQx?a`oW{f!UKtH25ZI^Xkqn&$Be0gS0RGcv4J-WoSax4Z-7k(h0zCYPz-?L z4%A3%;DO{Pea1W3Z32gv2Gb3YO&=J!6)Y6^KqGvj3G!A93!q7A1}I6b;LcJ|R^Skr z2;TLp0~#FAVA{Z~$O$T&)^LM5DH=>$KvT-Bjys@tnzK620J%nkX$7|um*b@uO#+U_ z0&U>7@R8{T+=Rj(>^8>>(<`{Sv~~7yJDy>1Q(}=;0_As31r})~4h1eytBN~IU=pme z433`*)6a8rN!5dHXa|)?YziFE8s!EzIJq-{MlM5;N8rHK(pN^%S^`#GhAZ5#j?W2h zNUINWpF67t(;04P#67`l6etaN!K~3>dcdv7!t;?4bagh18B+)n6EwhJ&G-fs(5xCv z7r4!sUVsAo1$P#>k^&8ty0SP-7nJ6cVdR@W;RLs6y}T6zxPoCeV**z&poXHd0t;%y z?2rHre`qjmkx&Gey?Z1;5d>`>gW7HqN}K|)raq{73<@|<4%)*7X-I=B7zHNBGbg4A zfMR$7s94+}m?cpE9h`rz@I$g6vK~YQ04~%um@e=u@gY?MsCKY`n_pRqYzm+`c8)Ai z!%vY5w6Nlb1n5L-1ttYHPy}l*fh!Jh)N?2>*DJ7t;+|On)S?DC2qo?nSgaZMaKWnz za1{qSUKP}N0u@CQM9rCZK)eXPyB;(m!u$brBoVmZ3>xqO)i4~eCi@Lh&;T?usC|A$ zG|Q3EiUHhK2VE`(?y^9;&#a(T6c<242dtXR8wAan4}jSkOix6?(^yYH;|HKg9nf$$ zhXR`dv%qB7?ba*~3ZNqIgK!pTlz>%%8SDda2@kG(L0vH6EP*d9poaAi!FurRg;U0(zt@%}*&TvKy8USRM9_vRoqJBXjH#OcUf!c?yyt--|L%HjZ-NK@c+ zyteAc+ zAE1$G#|L7Nt_>*Ri9iMj*%Y`1o-s>-JSUc=B%%P~{{q!f9pDzs1ko(V4$ycWr@$0Q z`(7N9-r%;?07GJF!}W#k6WBY=iHxfIwzQ48w{fD+COku2Qd!^o|m z4hjTe(B9AgnX&{vGb%w$!5+xTqTmVlaoe9(d7hYVH>E5t#?CqHOS0ITByaipdWBX&C>Rx*Mkc6yN@mk^`+ z^j1MG#efB(N^Ib+7Pt?`2+GPJ=gbjRU~^Pq;AZAQn&&<#>P#TnBN0`+wlDgSSe+Rm&Gp1UY_y zx(U>+hYZIxaKoAq(=SSMiPZn$a^-ywZZv%0f;AdCz?CVh2GbWVXx-BT*9B{}fjdTE zr)n^DfGV5!U<(#kT7`B)TK6(m4I zr_2X96f~ya5(ZVH9H4`mdN>vM6xkHa71$kFvXs~rz++pEEZLyc3MwkzGJtwupb;cR zHg1Jj1$~eyObiOKppEOC3i=?53szc!R!!{y%?q%ZF)aYmtd2W4v&@)2fSN2XI6*PT z>UaXQ^htrG9$I(*-~i+0+>zjXY*d6F_-j0W=M;^q;?Y6;-$k6g>+YY{IKlY;`!^lC9K4I%Ih0Sjb?0JKU$QDFL3F)pP9 zXn%YK*tM{9yM_xKg-Gs0PPZ$#6j^w{=@t}f8$h0Aux8u>qBn3s(yRca?B-Aqm~Jl4 zrN@gD9SS1Ti^RFipj~iLP|*fz<$=4skO`Izg43^wbIH|%mQ64TOl5;juP`ZSI)Z0h zX0s`1I^OA8)Ps8M&{-2c zGbZp15F51H`9UH}i3`&7$OB~(hVslW}IIamN%O2Y~oW;A1(Aqgt2r${PtgXlS+b7Vk$ zCP_tb)9;L=B0s2Ec0*E81VrDI1TA^eWcnhZxR6N!JomByJg?-YkO$41teVUhBo#qc z=ri7uR1^kjyCbOxs(|VjGMO`hlMSdu5>-IVBEjaBtnkm-h=YeNK{GQ7!q90XL9h^b z4oL*e1kW5TU@~V0Pv`JMCZ0g+2VRKQE3hi?K<9h}!1GzGpkpNWfLdq_;CUui1#a;C z6{zk6Rq3F6)f6}d=E749BY2%8cpO2A1Cnt;0|wx7dk$n&F$+>OI$mJN0`;8PK^ORe z!gdC~zrb7A@_f+B9%xH>jwncWfvCV5R%p6(Wu#g=p&C3LCH*64` zzDtTLiDiMP636twWx~SsPzQkOTu|}>H6a#){J{NKm*$vObb9qV1lZR z9UPD{0_Gh?h<6x4-l^AC0C7R#0k4gfwe5tHMIjnf1a%@vp(|NsC0pB*v-!)V0-$^oFj2hSlhBh@J8;6mxc zG5J=kuRAz#vL_o9g5};Y0IbidDNGNgef=B+pNI+|L2_;658x%l)21%X66rn?p-3P}!6r>#p7o$PYEU)<8w{GghBjIlp|j{LpiZR?DETnL zXRX0%5%byH3e0dR@WxJLDOOP2yCEPG5CT#i{~}c zG96K-8KTqAw{wXyO%R>lq{!vOdx60dbR?rA18D8<^oxpICX5x+*_5~x7%Qf$D{(2* z&u0QT5n+^hZ?At z^8wr^1tnbA$c-V0PqgF1-%M*%B_9Z)`atjZ#HCgo;C*?ZwWI>SnLve*;{@;;p#|{0Vvu=l5p(7a z3GfIMB$`0w2Y8SIG@Ak1vH?n1Y_NnieXSaogg0pU5v0Rz#&iR;RN)3+mLursE>=zE z8T?8Mz-b3L>0AJ-DR1h?M4hmUah8=?7mJg`Cxk1pJ`Gx?(5x4p$& zKoM%3F2fImOFjsiGrtgmtoc-6b$lTN8hK$9cmy7@Tp$WHv;i`s3#wQ_{$$l;o**i) z1Z;7OFhq?8Q-iP)mn$!5PwEZuH91Q`(G2!7XsASiQGpY>qHK*gM2jxN3UTn{)&`L* zMRrhI12Vz{4mpV|$atklmH=e@^ozJsJqu`cfg@-g8oMKS!5b)(v4ZM@J)ok39b80# zDmx9P51{Rjkaip>%)sq-fn~5lk{7g9Y&j<=?}AQk?g6#4+02+aK;t{B>K#|sE3h~+ z7D`(&OaO_1=Gs7xs_%dg`7DtD)g4@G6&El$fff}pJ1ER&Qdqzg2#y5s>JZ0{jnf1a z<}(Sb00-0)F(rueK?lvk9j3&h0FKTFVjvqG83a~>6@P&!p3mgOSOZG$5cS|WP*9;G z21+_2@DOtR!yvE>*4TFCW#9(6ja8HR2Omf;E2tR{UcUk=(y-=+3SuEiB<3_2IpAsd_`&NC};=rg*2rwTbVm>d+@ z6*wGEfOa}4aD(~*9H2_dLy2p8zBZRcq<|Syf&vF{chM7}f(e(e?T((;CnG`r2 zcYrT-y1)S%G+>97Fzm1r2C}S~b9$N%mk~-y!U3*?R)F@7ya1c9f+I^|!Sr1^T+;HK z8cZC53=E*?;m~A$!2yaM4p8(oa4K*p%%2`_$tA(Gch_`2U9M7Q9tFPXZMs~xrf9AK zEswg)3|cdzgytG<4JHOqMat>Q%QC%ChfBI%OrXkj>P0*5B^4h~RvoD*`^7nqV!;8b8YV*=d}#RfVP zhr@9LN0t)LbOjwQk?C^!TtbXo(+%~x)Y!Qphd53baNts%-mK4M%s7Agc6}}-mHEgX z<7Ea}gBkwo9k`^Y>p5_-Go8IYUD1HcEb8I)W&uaY@mI{ClM*>Jm>M`0c|oc22Zthf zUWCJO1`~9tJqM`OGJy%)+vRhtcT?bTtak%75*0Wc7l7A^gA$}fmI9wZ*Yu4BTr$S< znZW01bAZaQ8KBe9KzVcmjIjZn4&fR#vJ`j(x~3U&$ucgSE@#Lk$qp`K1kOyiH01Kq zS^!oJPII7C3r!~rn3gMXLR0MmNUG(UzSfY-EO{|f{l}pI>inan+QrCW22Ko|3a}){ z1x*ZG(8K_axECCtAq+%{UI0na>l&sf7;%X)a!oHc;<97pn!e45%LFu$#PkN_WL`6- z7fjF$eS-8wwGzoS@QZ56D%V z3Se1KF{;b3gPE5Rq!% zz@f|VfteQ+A{tqWykJWhvJ_yeW!OL+0}jUrAff4=rd-aLWh5KUGIEjIoM8} zXc2I{KE2kGD=VDCaR&>?Ru0fGm;$H30dOB}2aBSB0*B)PmMkTH1!x>Vr4Fzt3W6$3 zsH52w_#Htp2&&jP96^n3M~y6jnbRXIxV%V-urn51=EOu8N~MO+myDaHS6Fi8OL4jK z!pgP}pb-1O0!m{4*pwDbXSCvy@Z?rlz$EYlT%Z2|1u-WmW`3|Jf|5f6DBh<98 zDzWi`)ikgwvV$@xG*|LXPqN}tW9AgtJ-yqCOP=Ws*Yq`3T&m360t=>JvEouzw+Cfm z$337H8k+)}0-Ga)z&vhH#$5w45*+L+SQWXa%Ug3P)N=~#hK)dSI39pHgToP&hd?=; zHA~lj>Xc>xt854+hyuk*_ z)jMstRAfN5NMs4j;QaQmN+zt&VV%Q2<9Ar@&KiDtp7O z#KsGb?icKepz`_yJ0z8Xnz>4B3VaG&U@hQLGl-Tia4jF$!NLE79aMwd1l1s%0;j>{ z!ymYs9}r_3IC!9n*cG@0PEYT%;}YY9q+AYx)6>`5aY;zPYaL$DSwNun&J<8XihKH9 zJ1!ZuUa-MSI2746s1~?I%W1q+)G9sW5%JKK-f#mzjR+4dkX111L@#Kof=>8caVp6mhs5H28_b z<;v6h9J#DTA?HMZdXo>C719usCQGj!}U{;1{?HJO|VT1~1>%WtbtV#Kr=e4FUIm zA?xhH{cZ5hV(21k&=yL8+sq2Ay84V?Ks|2oB*zCqRs{}j$c88dHdiZ#IsBjzHWei< z(5w}w0^jr}PFyN1^O+RpPj_6zEizrfnTthjhXk};30^G&>enI<#6m_u7Kl&xa^^DO z`5~di;tCoA(_s2Ay~~-)h;hU8z0O=Rj60^^a^~_E0xfz1O*SiV2(01+4VrJ6ZtcQl zAvm82)X&yr-T*#7m(_8{^cEK`r36q;+92V`0E#DY-&sLI;0~h#3%C`CG$09{6nM_a z4O;3A>XhPC7CQtcG>HL2h)sFrCenOF@{=kpV-(5+cDUum(*>4n%?h zLt?%wmjPrWpr|>&6$5xY95%xW3M5G985FM^jyvyym(;BV_p(1oz$aZ~+_+>JKTWrC z<5Gvth@x%*0S%-fwt!4=<5H>zuPI}JjZ8w z0I~oF6jI?vIRo|)MW;peFWR^0`e9lP(i+D7x)R@BMdH1K!?17H_C$c1hw^J*PrFt3KlnP&k72fo_n<0xhr5W!NIY z%Lolt@TeQOtYH(_zzL2@NN!W&gsK3!i%no7I9<=-S7PC1;)aY8gN7+V@xiFT1W7zx z0_(u4kW!8udeQ++eZthu5uN_flS|DDnqVM40;Mg;%m#}F69+h+K`jP!Nj9(yw*p!U z2=wAI2W7LLoDu;OKqGgcxkJ!;Nyq>oWIP%aCtt*sI6>27&{1es(72uwJ7{1Cye0jF z0H|iWAppu`APm~@GyS6%muUSGkg1@>G7{j_u7o+v%x=Yiw8;y!;sG=o1Mw4ht2FY! zG6%E+4c(Fm+5`w1Xkl>$5BDt)1&!ZA#>hZT9#+Tt8DOCw65w$S@T>`Rrz~iPGH8?l zv=S9G9VeKj#17dN1gh{KfJXKnh-Eo~ZnT7K&VuX`x&lpR4Z>zjAH*CPK`8`u^Ohr{ zKow|sS(AB#AiTvM2Hqb9E<3=v0W{YRN&4^+?FIahfYoH4!4L5_XeuAH4Fg>AgBaji z4Ybb}v}H+w!;A@hw;d~JZKC6)+Z_Uqs<3-MKuH@kMhbB%=)RB?(9M?M;tVt{401AP zHbyMV@h3li_4MF13>3*ZNsfr=C8aK@S>>OY8O;Ybsp=m147jgrKYI~@X!=Jm*F0^wFrn!uGL z?t(O8B#HH~B(Z^%Bw;>XXd<^T=XyraSRsqThUt#}TuSv{KvQCDW=tQ1X&CdMhzCv9 zU`9N}g#qXyD9F?go|wlS@t_6Rv@8tnKI;&0gx~E1DhwbqE}-3wpoqs=7_>e|zJw24 z_3jW<+z5;LO{By;xG><{2#xqn(-Q-^wCll>WT2Xn5mbH%W(mv$H|qZgDsh4eZqQ5^ zcmfJE!N%$cDLO#&dZ1Y|@LVLQ9t15@gdByi22>ldfQnnt)D@^93o3{fh?_Id5P=rS zpo*G9U+%rI)rUvKk4)H8U zC4nkakn=uB!0QFX964yIGw3)Z=y?_JYGH?<66f^&L0mHRpqW5WsNE0(9W%2T++uhk zq{IoD;L~MzB?MX;04i0tfLc6kW=tDIAbw>-na>5Sq##_;LPx5RXK)$kPyaB3N3HZ785Hyc zs#Z3r>)1f91Enf()%xjYlYpZDv>2@iH+~R~1!X5(Axf;^228|p*=kFnD7Bn>nYIWWa$`ZK6 zhFHm>z@h+J&j1>}0IjzGucZS|PGVhe0Z+)SPMxqLS3&JR&_WAPdmFT3i3K#HguLJa ze^cSZF&=Kve1!swz(Zyw&gqXsxwK6=K@-oAr4+nA`5shJpeqI;5Y-kxddbgivo|p7EVwy)nJ+-?s$s9 z9dvdqiv#Ec6j1XWwmTNoga^fbhe(#dR-|=I3T%+r=L4;;XE9?s0NOhA0=%UJG`9}Y z#_D)LBuii$xU6{t3KOumUx1#N{w!x&iX*4UsHI(DkaIMZS&~M6#4P1-65h z@`GB|AbHS9)8e4}0Tnm}K&P{5GH(!3S_qw^3xk$E9H7txwcTp*&v$;%EZed=e3AX@eyhk&$!`tzV}2grd~+&5`@gMcIa{!|uFGGPU|RRDIk zBWS+^*nehB4`7a-2ht=CD|lEyH&lZ3m ze4q^)8cZ`J92p%M1$Kdpjyrr1W`J9R zZy?Iyeo$gj;HZZf`+`q_#SyZ&2DToP6?Et{WZ@a8dGtaU)~K8UawK?7s0oU71%81&;P`0)*$>(d|AP-Y($&Dv%LrP{&E?3T0Nyq< zT~kj;v|fo#T2U6Vz81Vrm<7CWRe?=fQ4%zIEoK6<8El z6u7;mLEBD1Czygpfpi%b@Pmg|g(0z_z^Td1V8_Iuz@f;+4Vn+mQsn1h1m8u?SO^)f z0uA256CP+S3fPi8;8X*u;Gt_3LCgI>W0>F!v_TLw*~hBO@Ib;5)!YsLgH01WM|lZSbIDa2Tx^{(uJIWT5M5h0U12r=)>eHzJ^6B~Y&gd_E5-DSVK~ z5_kzdaO)1JlL1bh^*4k-)BP6&f0UK|Cj7{L34S)eDafiCz3Z9@TdRzRjq5QUlY z24o5cC~dzGVpU)P?IHuMhXpUp10_gdSU!9r2CgsR!J)(gD^nggh+*V@79Mu^0M!O@ zSi=C?>RcfXZWn+qS^$@6;^02%^bf~)*cmye-%H?W0=QkvrN9o#=a6kS`tPRbG9uzkU>C zLPz8^m>|ts&>$$Nk-~1q^gp?RZN*tiS**Y(fGAaO z2xTe3W?WtfgBB=)3l4#O;EW7f3k=!}1@75^4#~o%bU#?>8_=?04sHc51<;B-xDS*- z=Q&J2#Ka|1&kgcFs4oN>+v3DIyEcPAOW*+5$Sk@PcFT=zoK-B98*3 zAp))f;gun%UkY9x2AUrO&B3yQ$`H`1XV5Y;R!2~!{Ry^SmX{T@6GVYaky7{DVPtQt%&gu#6;P%8yeuY%Trb1Lu&Yyr=wUf_2;#Q<6*1wQ6? z21psGF@uOU&@qDG!y<3+LFa5h?gRzI4L)#J2{uW>%EJw6DS`(rK>i1fra{UlkOuH5 z2DFp(1+@4YvfD>kkptXX0L_kp`v#z8#vs=nVh5#I(2hOuNk*U%ClLjZTM$Nq+yGii zeh3yryj)K$JMapAXbTT|iZR^7N}xUpc;C|kad;6A3wq6baZsxPl=#3|8WDy9C&0FX=k`DgmOyy~)TGD) zb$Kw$>Prh!*2H5}#Cvdca7AeCw z-+)iH2F-56d!<;%eZWJLpp%MsFKrQUl!h)!{~!U`YW_n4-1Y&*G3Rt$F<}Yg6_BK) zzzHhEK)0EI8or=24v#VMaDf)euxc=Y5`Y<#0;p%I!L&ep`bSR@VIIgT4sdPFHT`2g zkE9}EpAMFRCD3ZA>2orC|5Ar%a0rwS)@ZkrFfzTg13Isw(j3Y?%~8ZuiBsaHT# zZ)~9I3>0sm5ef~a1K_&l2&`^d0!pf&St(G#1##aC;jDU4?p+|E1gbSb=fa!>C$KA^ z`jHjV8vq|`YyuheQef0zvXNE<5A*scfUhlw@9S4&fz+d*6$CGYKrMFg{I(K@w4xx0 z2iniSQ*k&lftu(d3X&k@pt_ueTY;rs zK@i-GhuQ^dowF#2gJeZOl>o$9AQyqouvdge4i9Lh0;o~XtpGl3*%8`|=K;GCbcY+b z+683+&}l5-Ej7?qJa`ERn*zTArvj$}zqfQfWS9632}K@IdvSw=;!;rg2AT&3Pq~7c zSfDwS->hKofkufTXL~@(cVW;hDX5eeP+$`{1zMj08sh?K0=05ML#4t3OTp_Den9I$ zkS!ks!HuUGA__e9j{m{?u$UD1Ax(D$D~1K2?e9yO>|lG~>>!PH&~cKW8VxiO4YC2z zTnEwYaSiHa zgAQbYbk?E8A0!Qf)}?@s69E_HGx%Xexgrbb9v4{g2`R((fV=uLAZeRbmtht^ivy@` zWrwzCAjh0Tj0GK~bMQ%%fTN|rA2yg_kR=r0Gg+Xu3b>&I%2c2NmC11mBj{igP+JGI z29Q;saSCY34J)Ya#Ub#JS&;>_@Eg{dX9D%$>%on3P!koj^GY~N0OH6CpqiBlR8m|L zQIvsXL{JNt9Wssxnl0inXMO;_t=vWtoDoexLwewKMc}A+ydeVGVgPUQ?f}I<6Eqt` z@+7E3Uk^HFK!a(EprRb8B-kLR$nMAhTKNL%=Yw*90vn_m1aAB&f&Gua#^m`buhNpgX>9TNi& z3pcFuFFxHqpG(f_0&<_91K#K71m{Z@M@VtO4C(VLAc_kP1pj_UxhNL5c)bP?4Yj3J|c>9Ec)ex`RB6i560Vp6gOm8UQQmGdQxd(LO z#C%Z03zkCXGueSo;zuOW!=NQ*R*)ph0%|aVns%V=YM?TK6+CheiVH~V^n;iZ2Xg!L z1xO)i^c~S^;|DbXz$ab8H*bO(ef*#fm;_P-m6roNgQLJ^#sqHVf`;@6oun0IjV7)jQ+Rq~374o5ctVs3 zBmizlbAac_K+Xh>7;`Cz2*`q(GoWi$z+-%%gSAeSz!%TC;aTg-2ESDXR8c{~9MmX~ z0QF))>nA~35wxb*al`buQZ6}ZP&EeLt}k!~mZaf!PA>)9dA^iOf(yLv6*S{7IQ?TO zmxMVe*F!QPXj~E0ErZ+K0~!+ttvdpx8wt=tH_#4iPy^?K@N~ud%#zHkjvuDSlz}Jf zz?*#-v!D~1ENcUK!F3x0+S)x()xr+#(ty_V2!bj^=!pZMN}3hiwPA-$MdO*|<8TFa zUqR=`feN^K@CH}VAOv{3K4@J8p0#_RaZSiHI6GuxJ;+PooU{RXl5YlLl8+s_q!zR? z12oCU0@@7?UL>$V0yN22kJO{$1oc~3!9!c1-MZl2(~#B-e3owoXnF;F1_I>z1n@LE z_+Vb}Bp-hFp-%F#fhUUULB|BbXZb+ASFrz(T#Y)*2Refi(imC<={WO(3(p;(S$Rm0 zm=n?n1C?YPkfAG3y$M-a2`XzY@Mj4u0k_Kj@F{WDgChg17}S%2CxR$m@VQ$wOa!2Y z^8(R&P-7Z-1t;NUCinwcP@oDNu%K{-&GaHC0#Sh~(1?gY6C@GTgHr)YA^_Lvpy;Y5updYfVSR%)@#AndxCeEPY|81u!T#co(;6Lj{|h# z8EC};sA&yqShIm{I7b=xW&@R?m4q>%M}JtTNGE40(F`|7dD|3d>{)!6U&aMbI{;r zCSdo%6BW2J0G-Z)RI;GXL4&TChUkGMD#*e>Xvw0#K?2mwhNK!|6HN$~M8gD{EP*Yp zgH4vOg7#CPEkDMa97Qor=2qgI9$(8Prvx(;+YAio9G_c^(*>mYWI+qz*YHYA_ddtX z>%nEk@Iwr#mk(V!1U;Yy6wdmLE#gY-ko8>+;)?v>4x$1-bclr?JVmO+H~qmkUQu~P z76tGve2!|MfoT?923AE51r}Wf7DY}4ZUv|5adlkc_1xeQLl(&KE1-poEDqqqWzR7y zv4c;d0c|A!S1?mRorZcdCh#T^(7X{O5*0Wd?F${5N*o&+YKqL5z(;0))(wF&An3%5 zH;|?$$kpK01T#Pv*n(!U&a)}8JJy35H{ey68^p7~I~ExW!L1cgQ~3aBAvz0W3=A}6 z6k^7-1w6tim<1Z91$U<#grMOClK3E)#jOMyy#pOi0a|zOC~L)V0qX8Op!Hgylhi>p z3s}VhanOb?7SQok%nattpmU5sN7^WW+`d3O3pywH2I_Xlr=a^aK)OL)VsY@wFHO*m zKaQXQ$9Zg^l88l@fl-kc6v_;Wf}laOGmNk^BSk>>cxo_RU<9p*;Be%~2JKn(5@-kQ zOkWQ2I_Ll-21QQCdXVQhASdD}aXK<)b1QM-IAM~_Re{sBh?fz3!lX?Dms~v`Xv7PA zC`t=x)c^}*l_BV^5k(R3b~(`W3Mc5ekt_v}so?Qv&|sP_184^sXpcH*-xp{^58@0S z(3zzQBB1UxyCY9d7HGKxqzCIzrDT zRuDu=a7uD!OrQx%P`?u#Z50>;oZ2Q?P7KtxfJ33MGAByk~}243#s$RO~aWBR&A zE@_rVPDSDAXB)YU>jf0pK~W?EYQSeJvNJn?_Tz&_B84Gi4l)Yt;Gqx&(2#=cA4B8CIpafoMpd^6MDd1@2&I?+9$Lq+b z=*G(mnlu0fI|pb!fE_d%!LGmynl#{5;7|~rzMzRqy&g2*3GUCpE^KSzS7HaP(sf(` zS#bltAPCg61ed*_Q(?@R7f2{@fJXmZK|?&C&F1b!yetZ`@Wl=a{O+JtTcGjWJD^jM znV<(KF<3D`*6o2VHgW_l$1w#5An0xbi7Y1uCq@OqwQZ32v3HY(ZV`LK2X+y+f)nVF zK2X+~p3}^wpZWkAw`V|cD{01b0>lSd0pf#lJ%|sQEd}Mq4e%8Ldj!D7v4V5s4bZL* z&}^yz=&BC|NNxnRE$4x*24Z!*Fs+5lR73z2b^@@l69Mr=rhB$U>m)K!ahRb2gdk9Y9&`frtV(D2YKjy`amNm_U2ZH3b?u6*xf2+tCPAfq-w5Y5;o# zbSV}5)F{xPBfo+OWFkZ&OW-^>Pu~$zVh1H6@M3y!FMEL~s6YWV{vmzM2Vz-FO6=1& zv~h_iDDi>P2D<{E1`~?{yQ2~4KqF|RFgu=PP=p%5?Z^mm3FsJ1(3(#r&?*K{F>9g( zD`h|v6-w;V;9KvQ!8`K!6nLg5v~h`=FA!1C1kb{O8m;r7S73owk3f9Rp}-40dhrd& z@l1{iiaZLujwh$DY2%U?;DMYx4(geMk20NpyA5_x3V3}4C?rAWiGxF^s4jgpL68H%2Iilc|7k{`I7(iE`u!6S13CsbV zU;-+`K@+MhVUQVfmMrjvUZ|?HL1T~tpi}Zei4MN}gB{fPxXs9|fGiJPLvjt&M07+t z>hK^FmndW_HDnP4Xy6_)xdB@00Bx6m)-P-U%`t(SY8${yw88magXDlTV%WebPtA?R%>gCzq5gX#4&F;?cW2tnu7_n0~ch3CuF%b#1AZv7pB*A za!Ig*+{&TA25yMCf}9B5Bg>+|p#XOxBtL+Lc|poqK!X;Xu$4V9XTuIo<(&StlgkOz zTKvf=Q4j9yL&``-4JIAPAToG`4rs9kXx@ehLn6XbXwX>L#%0$sN$09s;)v~Qhr`tEKn^-M@&0I%)=ErI~~ z6X~EcBHmptK0m1R1CSEgJx@3FrIR=ogREI4jp1Z8uO^I#Y>fW1fpv`3SbnQ8ioUM|G| zPE9eRUs~W<6-&6O=nZvy>nwgN8p@ z1eQT+6;S^h|3U}2TV|hb7I3seEOY=*Xa0~-1dVCn>5KA!Z~R6c8<`%V1v&*|`qX}K zS_9ppXNi)fK*0qnCc!fo3LK7e7EKdyZ2aCN;HV*R9UNUVM8T&5fX11{CveGvjw&^q zz@?3-0YC@FLbQW33g~WQL^gpGmF&=r3tOED8ZH1`;{r;B5}?I@ASZwZNk9`5H_&qm zthfW8EOD6;vbu5lg9%(pUXa`hIxZHR4?*D$FRekz3RJw!0WFmUrF_&eV(^M>CD5z_ zC`t2A_nFA0SWl(&1{*|x?jk{K4u|g909C2jmevG-*VYJuQX(h_!E0+^XOcqFBDh|I zj0A)Bi-X2ZpfkOQvh&HV`<-7Cf^KT43{YajSr%5F*`yTO#0e z2kJnBn)F0&!TxZJhtCd{|5*fX!pe2g7erGb521;^Z zhn_V7IzoV8frFL+KvN))ZA5qy03<#I!ROIJ(ykT$1OPe?1eyRqH9JZIn6CJlM|`^8 zW&t*Wu@ATRXlJW{Bkau258x&n_!wi*O(kfh4h!VSPo(8hppp}|0sp)u7kfPiXlEdJ zOAqQv3!r`ndQJqDLMXE|gcqP0p=6TzXq%!KLCZv;$A3Wl2k!ZT%45i}kC5XvI6<`* z;c^+CKMr(t3OE`e@&|Z^;|KVjFmR)Z1GHRA21F=?Dsh5VH)aVe2Jf%{P0xU4J3tp# zv4hs6L3ciaTJE4^xSTj%3kDsrJALP{1=e?g0(EK<v>0A=Z?x4X5cb?PW{LZAn=lF&};1;Ze2dyt~yfFRe zbS?!EKF8OXGP*Ok6a@JMZlh_4o53X~2fcJalbHd$e(44Hh%rSD(4{K|9m}U*c*`d< zeSw)c%k%|}0zA_vtdn4!-q0$3Q4pj+p<_8XSU`;plj(D3a>+;-7#f+$%9+Y5C@Lwd zsH&-JXliNe=<4ZDzc7;-6|+S|iGvrscn-AW ziCcjSc1jGWi~$|9($P0fK!FQ-NKB77NVQ`^l=a9oPJOftwTHu(h=^EP-Nk$2I&LMG4g^=krTMc46|g0 z2*eV1-YT#naD&O5d4q_8iXuN~462aDK|y2s!tGolkdxb4pm$s?5YJNN2QT`A+`|C6 z7YsDo$p;zeg&v#5q`>buVbL@J1vW?aGRNi@4FZm!W)Nto_YQc2s6Hdu#h`=nL7Ri2 zlfMGLnUuIdgSx?>kz5YYvQ-79dIc7NyPVK95TLEJ5(?Um430W~xIuL|Xlx7APyj81 z=z`4^a)4SHERIK5z+L1Yd`g@Goy?%n1sx0xs%IG>yW&A}+X`%soMn#8g;oq#Kx0aL zkTE7umN^3AbDGsN9RRhs8LSv?fdtqb*~-B8q1*(CGFdZR2hj{x3|A!-Se+Ug8X7LJ zm@|RKt3catK=~I`Bnxzd-8P3`3G6mjM-J#d6;9|r6>ue`#0Fje1}c|WK(m|*td8I` z1|ED6_zD`sWz}bdUJb$JsA$FnT5|=OP8C$*SZmJ2p}-B=at~_7a4GPCR{b%hTyd(ZpnXc96v*hP0~$bu+-eBwwW@+fShEy4xE1)P zTg~HAROff(z^afJrVun(t-wFMX&#rBI-ePnfdY%81!Tp#0wjs@JF=EJUfS6#;0QW$ z$skMM&h*RkxO60-X%Mugg-L-OoCIAISQL1si_hoM(BJ_jF?MhgW6@x$P!a&8FkuB2 z#|B2w@w5U8po=tR1iGiE&*xIH=276zQc?ny5uoD$6?nj`eDJOUCD4##w$fHckTV#w z92pdNrIiG;KpV>)MY0sPO+PlD%aD`TjA;UBfvdo@1zaMr+ZnAGW-x*l(3&w#0ntnf zs*d&U3jB@@jD<=&7^STkW`cwlfZPH~MoOT4XrN=N9XlAa1TOI@u;?@HV1%Sa8AvLU zQDD(zUcjg{e|q5pE&~N&1<-0T&|nFun{xnU+6_i%xG8W7+?#%I0hchxPSAExPJ#Q= z?=0Yw*XLJYRFE-aiU5tZB|v62QWS+17{R%sz?>-oB$1&g4x(}tC8ujGE^vJSk`Pdk1>JkY;`o3uOW;1U0(K|H zV>pq~is23Dsu@t!fHIZi5AZR~Aj=i96jVWlKZ`!&ABew+vxJdU4u8TslF#U{~#62KC4p9T^n1GNwD$7dy%qIx>RJBxQ7y z7I@1Jikm&mpmX~f6^OIY6J#OiBu!9cf$Vhz9iYq(K2jHA-Wq0bToY#;BmZ=TrCb(@ z#Hls{Th5{Y+Ncj|L7f1Nq3>XHRp4+`b}Ita)1F1Vplas?$lC(%nH6}a?_bJg&p3bj z|D{~wyr6k|*jd=~rz8n?8X`uP(!3r)>MxN>4R&d4C^D78C9+^2^K!L@v zgDXp*8dTJqG0os|WCS-4W^lnyiek}Z{=f+ji7-$If>#OL0ENT@a7f$$g#`GF0TzA6 z7o18wAVr$Y4>&<%94w%Tx*w2$F=ze&Vz4-Vm~Ot3OI)m{qf@|947OrKL;>oke=E6E z5i+oBsD;pfk&7dRIFwx@`Jld0+9PdSR7A)&pif( zIH(d>#ihazKGBy&gXsi|62If&Yty%`;<9G^IsM}*E&~>R$LFu6tFGpDp_! z2+(kOc^_;Z&|~o#W;WZ{WV}u)Jy!+e=IN)`bGf_12N6J#L?|Xf*HE!w ziOK2V8@QC1{@j{gy@5+gZVRNuK%j?Nfn9-9-~s3;U~uaNe3C8}aTd_VAuQs| zIK)A#oL~m?O#iT%%Ug&C+#^;16&pKPvJ^N34o>&o!ewstg$*{shBDFxZbxY_ePB~$ z;W-3qQ$ZW;GejT__IhZ8T@18D0W?y)Kpff#0u9uNPk*t6OSN9akwFpEEEfQ^n!uZd zL_m!ZNZ}&_Dtttst$R?voY9H_e0mdTj}d5)7utPMV1YE9*g$QTEt{JK96_5LK$BMR z);*|8!=S+K2y5MoD{+Ea_x0T9&3kCm3cL*tqjeAJ@_^idXx)Rl4WPUTX?n4O&g50% z0=2z(6|6z`wlOGJ!!K@RbySC31__#7y$oumH`KFOGhBo;uy9s>%AgGH1IwDw+hpFC?NrBC=i3M~B zwj+ZA$ZXJY*i0HsplL7-CJqfI0p00c+qgvQ!8_F1tQf#oHu9K(7VU$kLqHenKnCAY zoBN<%122ODH>9Bk-po0JAKKjKg*5jed+FE}xD-Sb1QfUhy1^I1L;DG=;PtMcc0XwG z*F8?qb>Ey=8~*GHJOYn66+j12!!rUmXe0-`CkS#C7r0%-3v2fu1*dC}E>ODX66gV^ ze^3Sjbv@w|ir}nt478vMR4}u^lByQc92EyB@qq^0Hb`W#DyVRScZ`AhqD`1Kk{W05LsBDXe>XR@i^m1+;&DPF8q~Q1 zC2G*v_DfLn1k$5pLyO`Kf=X=Q9vu&89|b62J_y!>s}cnc1%7xOe-Q-x4Kl?64h>NA z92AG(D25&t&j1>gSOVITgBiCA#9@6psAAB;PoPUF4};?tIZh$_kzl0~8>r|8B>?cK z9;Cm`1Ul=rlUad9pcA~S7<5{W~v8o>;{h*fv%TlQD6jLb>hSTx=jhZR34m*lvos)94!PM zb1JZKLxzUH$r#<5{m9lpZ2|co+;RZVwkxtJFix-C1-m~DlstsNhi!w}H<0WLIXwzI zM+?p$O8AF;*?3C1p#u@1V++9B8qAnLM|U|kh-5j6f<`n!nQwy#qUFWn;5dDE8xN-( zsC@z20|;6@3|gKpoQ0TuS|Bq0{UlyV#**n7yScPwdqAdxPHYAh;h;OpL5se`z`Jn| zh)mzGn@hYNv_1oJhybew(*y7tyeEtTaiCQ5LQIK`w^)JIsj;D;1Mc;*Wmf_DARQM(KqDL%K*wyzTQNKUO-wRa zG3*fp9T3KD#sr?h1lCjae&i{0=vKmX7K6GKcF53tp{8HS}_QkjS@3s>H%GT zW@X0I0iqeK7{IFuxy_is!|gM~A*J>QLD)*bIZ(SC8LSvUhw^~vH=rRhCNn0`o+=Hd z8RCkdQyf6)WIeMIizbswy*YD(FsMl~ff2NZfjhg3iAwp_)#b#^Q~U3*U|0t-BB z4ld2Wi$hp=c7dz`mmr|iK^0iRhlD6`DR9(7TV$YZIG{uZibl|O97k@*l#?T92{7nV z7sf0_7RME!yIP=9FRMZ>Kz%u z<+1{sBS)b$i-Q8hL7<5=&;&0NXc4HuFAfFJQS%C{j%rp6;3fda+N$M__6i)*j`oF0 z^(F8yZ;57*~N<|Nk>vGp+!!7|fWKh=LAj0S`@0011H`Dxk${kmUCp zyyY78Xn5#~!WsNoN-XsXOyCuLpe2Qi*s#@Gys#>I4=4>eszTSrpoAie6$5DffC4Dc z&;oG*h|glh@DbFC0yRg#DMcbn;5oR3TfYUg8<{7<7yfXicah zG!?_@XK2N_544a8)TID5b3ko)Wd#m_iJ)yX{8kJrK$Z$vF~F8(c1VEIImAie1)8k& z8cY+wvyGs|Y_K#4ny=Gf>XCr&96KWbY50IDmJ8Zn6W90D$iuRbUsG1X`~S z%A?Rm3aCSZJgdJ0oPt(JfU*Z9l35KknL($`fzGRi9}WrHsthTJIS_|K!mIb`9}aOz zc!TpQxUmgh_RR`f^6e-MIVbi5;+R;(8M`w;6*8z#1!?ueCu}E(gX{0k)_E zfvS0lEJs#>UEs-t84^ltpxZ8W8D>d2~5CFxm0{D!)dc+at9pZ@Q5oi(vJe&b4KS29Upa)*S zgBDzEg14`1K%9LEZqa}mR7j2koyiGW*UVwY^nwqx_`(t7y!lT%1RNCvavgbKQzjs{ zPM@=hTPhTE+7#&cKF5O~Meso@@Itx?;)=UqCFO3!F}p+_5Ei-%dc5v#*zvl8S)dtU z$hs;GCI)af8=9&Wz&BBXCJR7)h8N*%&kYjNR!IPx6b`LsoHt<|M%bV;ll6 z!DT=TKlnr&Ht?k(pgSiZOKCs_1?Yqjn6f4OiflYApi4tobr~A?mAD<5AbSfy8^_rN zUV%>e1H}n+tujRPHQ0tZppFaZ6bMiTo*@Xyz@S70I%*rd{xbr!22%pG5MITRLEsk? zs5N>=2%PBQUAY@VkbwmbkSRa-AUPPc3Jz4t)SEGZ>JP_yGbYgOua3f244?(&po6&F ztQZc1O2K+oU4}+}N5(=chC?7NjZD^zp!q)VLUc1GP~`-^@)lH*fvOo;hwTl>9iW>{ z88w(9G?+BZm^47EhE17X2!UqdI6!qaivkDe>goUd=1c+#9FCn#)(n9P9H0%v9W0Qu zW*9ZrOe0@CXQ1!aq{5*y?i2+)Ch!jQv5z+0_B z>L5LW7vfn0Q@~>hKR~L%bBJP~X&`V{2Yk$hCi4d|SknyJfdt*#y#3+WEq&$=o3hDRmgkn0^Q!K?S%b*I;U3S7hPYHvQm9E|q#7P|{~m z!o zKydN}wTX z@MtDv97I9^c9!1?@#&hUxuoltfcspar8JP^OF*Y&g63ji#UG0T7pMgQy~7+d#>Weq zI%I>cAOMvspp*I(K?4h*LW&96aOH%KFd~XaP<6@%A2Wn(UYb)6n)nCpl9NziQ(y;e z*;iAL5SYWnlL0!G13dX}#?%2SfI!VLP}3Z=M`;!txcd)21|6zV0iqAOW(772pAI@k zOOb_VD=6WCt9EmyKcGAXuF(ZGnM8V6%)!^*D+oGH0biqJ#`I%)*cmRj`VWFx(Blk1 zJ4wz=?-X!k5=aBJga0!Zy7DrB^DSr{iUMQ-F{tPOxBNjF1Jq-L)@$%v(mn`-vMPA> z18581J8-U90y}QP9p_%QWONi4dr_9vfUZ{=1g-yiI4-DE(IZJ7u4bg7i-|c z0aQnTn=;_lC?LZ?tE?Q){%99)WP!UGT;G5Om%xo0f%ouMD=3kIf?5O`XB#B4952^{ zG$TCB%b<`8Y4!?y0CkU{>Om`nLE9A^uY;5`!tHY9W#R@OGy?JscyN*f5|5yEhhP@S z!QiEa;2H_+V34Knrggv_Yy|3TfU*UsoeXYaV!C*P2vQ7vnKs?~9G8kMYP=n~)+OMG za2GFwf}rCJX3)+LMh&J6C2+6XNr6Qg)G8NN5S+f|9G42C;Pi{WHSP7fO- z6!{edHJFw#gXc0>6eOmfbz;(2gVbi60w=-yz`sa9_h^7>AW6_+V3MGE{Ll0S=eb1d z#hD!xcobwnhk{5#vOQ>p)l4Qu76pEgKLkK$5h-ve@+$BsaxsHW;$?BHR}d(1tS>H9 zl5=D%$x;&0Wncj9Q3Y=a5>eo?Vqj1ZQDC!TU{v4%jW_e~FtI{5HZm%Kj^p^xSm+Ph z+^WdU!>Glr0M#L`05%Px4`Q_fJLv3OUQjQSSAlza!bQ*l6`&jGarz8=jPf4|M+OBB zX+=3lMg^7 zsAL93C1{%?yCZmAoCs)s1{{;11yIN_$p+em%A&;W3yVdNpX5|Q=Ouyq87!bE1r_FC zPYGapN~|6&`GbaWm>d)Y9hWeJ@(5(^ht+Y1V3q>tYI0ER2iimoJKYd8rU?ob@PrJ` zv;bZw3QY^3WC+^$2|7YfiNkgJt4mz`jKb4@UE(rI-T_Kc(ApN+7Enb28dQMfL16{Z zRq`xZj*JRyB?6yVL0foPK}+4Rx)4q8^#03SqV=R%B=7~)`_pCk!{^EiVu4Cw1yDlZ z$>K(Y6hC+gHE44LND7p%_kdDAs|M2n2_+6`kiWs<06Gi;d?3)4>6%x#)VYK~NAB@t zaw`ZEiDTw0L_C5*wxb>RqO zUSnvH2?YNNeoWA@zmt4Jw0w*Z=K!qNg zCKG5i^KvC#NWljSIZ%;j#x#dnflYx2RGhIv`kUZ2FeO=v9F7d2LO@u78?@pHnnXd( zUFZfB4h0tEJd*9$vtQW}wtp)?<7m$MlVGe?p#Dbt>gBcV+!NXhvI@%Ak z#Xk#FDkHmvjT_crTLzXWnfm5HHuvjVFVXm2!^IrE3<)i=4M>fxP0@K`;lV+ih3fT#b!^D>|fsi5u_ zc!dQgbV)R*gcE~zICe-Var4H4ilHqMiXtF-1Nih#Q0q|vr0f8sI}K3=?o)t{7zY)A zs!U%bq(C}8NGNfEr@uk<0-FM4Z@&UBsQyvpo^E-IOR0W=IQRl4PCCqEFmXlff}C>_koV#0N0qHHa)0^ zpd_HcsK7m4{Wh08=&Vl#0fCR4pn@3O^QO-k?m?rlJfJFE;1hUK2(c`I z8y$uAJ2=;K&Z|!-6j9f~*1scd`Z2l~^3DKvf_w$V?8T&hloEMnosml{X$V z6oOi6ZX&|{i@!AqIBJpY{GuK*MZ~uuG=FhdP3K_@Lkh)xV$s)?k_;s>q>`1ZkN=N-a?N#R`fKN3@6oU+ByL zEpEVV;SCa?r2){w6}EpIJRJubA^`2=0nMU|MAG18RN%y?5qw}BXcfXebbUOaIDtDDJRt+J z2sA$gvS%wO2_l>r#0zRP!rchkU<7v~H1^?E9RO)XIMJ0i7BluYFmX>8Wa1T>u5+7* z4}8BnsJaKQr2vggflhD%yJmXBBQ9?j$VKf7_(2sjxVi%k*UjJukFl6AT(z1W#&#dVHYN0p8(^SS7Rqv`{4oqge7RO6XukUoectx_ZVQywgW&D`^e79gH*@?pwztsJe#Y*v_V3VjVAyU>YzRy zB-w(xvF!DbedcrTH3>NCLid^f08MX#o%aE}T^XVrlxAS-FCZiMyg}f*UF;yoe$_)x z0(0EQXvF|pPXr#g1hxD?>xX*56;B6jy_+Irhu~gD9#^oVKn*!~ZWpiuU;YhhMS)IU z03`$PG$APONq`DjSkeUL9#Ag&!3RndD${wMa;dw3h6h+3C&2Ij0S|I^h?_HS5CpII zKu#H;u{GQ&!ylS5K!porEe0rMfTlM=;~Ai7bI>dxC?=|qqu_%$XgNQtCi4QM*-`Lj zV>TW?Sjqqu2jI1#;BiMp%J>1An)3yvj1TqT!DsOKMc_s+C_y1Pg%dK<4Qg`2ozjn* zGQ8291x*>=pp>yf95hx88J6eq1}|;h0lF#!(sg14?f!$LT2>9FEfTm=i7&{g58y-w zI=TV8*Aa4d#T9-fHeO%QU@Z9FOeU}iAl0Cy5ssikVyXxtf8k<^0+l0c^ngDMb2Ynz4FLxB~PIKlZ9R7QYuIc%8?D3J;(u?buR z?N$Q02$Zy01ir$D!arQ2d9_k+D#T!-@f1va#wi z`~nNGSusFzlNG}^kX9BehOZ!+&5EHOv=$efMnT>Htpf)IENGvi5(}>fPb8$b2d*?h z{(*-nXm`E@B!oZgRgU zY&R?D&L`;FsXL%$CvFO?phR>-0JJZA256}gCuDISc%Bj6oAb3hr%4YF(*n7Z5FrUN8EN847&I-`1&+og zpmgsB3i1W~kh>$mHw}YlARv?QKR{FSkYytim_SF{LWF;Ef;={b2{h~png>Y$Z66mv zQViN62R^}Ffn5NixCf#bz8)TYIwCtn53d8r5QG_Eegm?7(_>z7sn+vCI#ZyNkGK?g z1%81Y$IHm$1iF<@fm7f&Sb)U=w5<}#0Zmn)hua@`xVd8l8?qd$;}1q;KTms&(+v#V zJPx4pk`uZoQPbd4VZQ476XBL4gCbP=7V3lL>0ZtdN+V(8w*uv_N8d)H^P{ zdQkZYYQTW2L;>)ThTu95R3d^7mRbN>J`QUAf|`Cp&{OL`-9qq1KagVpc7RX&1KY}< zr2slu9yCq72ee4W3Q|_?5CS!tK7hu1V9Hn>KL}(A)c=KNS}V{+c3_i0$B}_dI07}n z@rM9tF$HMu0JPxeA~VF$9|E8g;~PMeE*;>*5E}%u6j%g~fa~-QP~mF@Dg(O&mDn8F z-4xgy#oUUZWgh4jB*!1ZS)lvd;BEI0!jSdspbFs(XbCvG8Pf?NXc$8l>YWhE68Oid z$i`y@ikAyu*Md9*EuTU5gLYgCG_VUmu3EhU^Mbq?(;twTtQt%ggh1~80a{SLq8@rX z-w&ZIf&bvbdkv^D0u8nm!e&epK>j-cX~2L^R0K^uL7Fts(;v>jG;o+PT>;f{>}E`$ zRrVT8Cxk&=JxACLfER>ecD#XU0j;)tA*{q}#K&3&KK0`wjYR&->(5V%mz9qyU z@OA+!P#ttZ1hP^ed}kl0KodA<{=nlD)D;JvN1(V66#Ag<4anILRck;c2{`6gh(l_> z1>%YeKXA*Ffz~&% z<*@-Jvl$|wQ$09bc|oZHREWY#ESSTu#tcL9Pc~f(&Z7AbGYy0*jI0YDGapfgO5<<^)j4T7wTbU^?y572gqM)25p1E}$03n~-&L6saR+#suY!HqY^GYoFhp!*;NAgUL@ zRf8))PCviYD_8j8M1YR^Z_^ z<1q(CCc6~_(*k}_87y88x{(UJ90%O&~s%PP4V5$cd zk>HDlz`fKBp!fx!{|mk?2ecFfp7bw(mZvK)Iv$uaO#qbkp{w{oBOH*+T0z@&8$cNi zR2+iWWKIzkXygLB8g$?wSV|Cdj|a#Ig?f;Sgatl;;|Y8R8At(qmxsb*0539wI1F5=LYrQKDC0mI1R+f? zP%`@?0g5MhiUxIK!23ra!;~OD|KJ1pjR|b60=qyHmjcw&pjwB+@dq<_US$RoXa!6& z=8o15r!t0rj0g>Odo+&`1L< zS=V6NA)v$oz6!BkpAlTf8-o(%4?%Dl1ZhPt5J3rE&_R!&QV>-B&JY0~T?Dz)7h*1G z2MqWyTm=?^e{9gI7~FR=0o}s`UdsWhWo`&4ffinXmWzNqrxb?ThzA8h1Gp4IvmRvS z4?gfxJIGiLsAd6|XP^s~VWWKPjym8QC+1HFubDRkW&Z}!TkfC}%Rr~q34+(mgEjfO+;}G?tYtqtO4C^Z^m>3)ZhZ0lZ?(HCi_#j}a)Q zAP0}2S95mmxG(9;N{mQ zARjgegU2sGIh_TZbp#;g4!A>Pq5!_m7hDO0hPOfYOx5c%J^^i31j&JRGBP>d1uaDf zZQT@uUM=_qE-#*?z~rb2ss28|r9mwu6NuwAm_CR*vIw+r!PcCcfYJqr853wdBPf(X zT`xx?D+W+s3^drN5DF@>KnqksV=QUIUv!3f%8 z&aT1apu{`<>2IzIrpxc9$N%9{ujc_RsxJU7kZ0FmY5^T`!>+;9!Kes2C;R|o7U+O; z(Dr-B3yfKg9gLvEvO!Bt)`HK8JHe#D4k{HD*d0$WWhrn9+~NaSe1Sl!l3}tqL3x<8@yPYky{f6)WBLGc9_TEhdUnSTV3)mO zQ(y^0_dO}4W{|XQwJywPXHYc zqQLIh!IC8~i%o$GWcvgbP|PYI$LtJ{+6642gf|0Zr2sf$4}b+$u%Jfl1{Q^Sc91y> zSj?GsfHZ=R%7Mo0v+d0Sj?AFbhS>EP&!D&sWTE2)u#uocOV~jN=|E!k23T+d$YPM= zFR(x(_5r5;23C+8>yh2WuE~6Z#hm#C=;Ccq!utSXuxm1doc9Cdybe}ysDcvSs~;@_ zj;v7U^{^rb9oTsjz{dXoxkrPkgB9Yu8DPN=Fy~ER6>xD}1#FfE(+6l6HGs}4a=ZaD8sxkmtk5Xzfb4mJCQp!lP|9gw zGiROvDmo^xv4INCe{9n`n7H-oXRv`h!*URG;m?gzGr()h;mLLe8^~93$i7+tGGhhU z>IGnfz{&OmSYQJiYO>t{+FHS`$-IKiocRDq-vu^!vRL*PbcmloJtWg#L2)z4LdP3m zBM*Q`4Ws*4@sHqa^*cF@s@km6$k zJJf9pAnQnw_3vN@Whqc_)q}1nf|cJZ*cHIJ928y~z^obUii<%tzyWr6z^y;kBH+jk z%}+-#%{~D!8$@a_9e}#+0*LFlf*qXFHJDDo+;*cL(|`|PwIIhKyX^sz+g^a(c7a`S z3CL|f5NIB3p5UIg*fCCc87eHLc6&$c|ggNyFrv4vbrD#sQz+uk(0OTr=^ImW$u-Aj~CMdo? zfZW%>33ne#vDw0j5fUAoprHH!vP6TaffM4q31Gn&Fz0n}LPK%}rv43Jr5ozevg`uT zVKmSnTfqr(+yqY0XdJua4usqO-fRMm-b1p-9!#?jfX!Y3A~l$HK;3o%#C2T22@Sdf z5VzIqGhVjskB9)R6;0(@Q_yCdj$KWNtdd7>GSb-!R5{R3Y1vRE3tvD*0Og49d5<$cmV3$ z9bDkMI@$FZkDyoysVg*?4se+QWpvsX^5j4)ruE~5r2fha#w7nlvF?4{OyFwR~3_46<$Mdl3 zGj7nG{?M6QK4*n4xLpA{8u0|k3Q&T?YS-?WjRGJGbEQBv)Bzn(_-HVl&;cEU2)Y}S z-SLBN7I;Dkbh0HV|2n?V1=pM4=_qzh<_FUgJ-ChPL9yX@1DsAE0oBf@!0z~mD+_Ye z1-oMlcNWMw3<~TZkuRX|XRu=E12x>iXI*rGSm3iRIzTlzsL{q{&isN)fgOBnlo`_p zkd_JD;2i1rfh$YkHwP#kPT>aiSUX>&p^m=SX4 z1znTIT!`AATY^x&VftQfZgIvV)30)KE7vatOUQz}4C_U00HwAaJRsL@-~pvMaHHi0 zSl|E;IPl?}s1u;O@7Oh&ckq}qUjXTQzymLoQJUIMc#z8mkbRCXc)%T%3m}6vm>%#z z3YQNcuHy-i#h`F}!2>Ooe_$FggBRpRWGAs}GJoJPXKvsHH(AY@J3tJm^Cs|u`gpu> z=Yi{tdRSX~2``5GR)CG403tP*7Vtuxw*ka;?BE3_0}ZAXyin)uz|?;MtQ6UK;B0{8 zq!Ya08f^nFB)8l^I1Z&?zJqD@1F+dAK%@rK4XE2*fVhqaU~YQ=bK3_D{h)IB1+O{t zk9ts`G=Ow~`T`3;-F#3n0lGI4lz%$-Kq34CEDz35ANWAIU4dPfVFDius5!ER51zxe z@WFBz$oLLEbLJU*puq?;rUiVET(<;dCW95jJ`fGgb@jVIOmMc_0b(8C0|%_*0zO#& zIs#d?gVZ1dC5r=~)678{F7TmbuNhzmpWuUKuR9pJL4#W_K<;AKWWK;xZ_fMxWXuD; zELO)q$EOK^@?8VS5dxsLyFTL=6azr%7G(VgJ~O5tAbp?_R8U(4wB8EVeQn?e^#f#( zvs4E^sQ%*z1!f07EK5OpHxocoEBH~9B`j60;YUuDAR`?&fK34nX|iiDt>A~G${k?A z2_U0E&fCBbO_c{Q^n;>i2frQD36QlH_`!D;odD?*c+CkakFHGLz|SpOe}UhO=>|y2 z1Ab8WvOC`3M|9sFfITRU?7EpFAxL`fq?VI0??8K0Z`ee%dkPv zl^0Y(f?E87=Hbi-K*~;l=KP?;rl1BCSp6M7(7X(2gbBqhAPXFC2!cvXP;LM%UAh1= z;{wPGM^MG5!E|E!X<=@~`Uha!4}eTl;0DbHv+FayKrswd8i2+tHJKj>DuG7mz$2g_ z;~IoO*$y-l#R(rqVg3PV+tVGPn{oaHD8S^)0|J|f6*gK?HXBWN%fwD{5()EHM_cYI--1w9%HlmK<%&2rG0 z<)HGyoEhAN2etXZ4SDEn7rQ>=0YfEj&~^e%=K39mkYWvVnmforpyUpA8M`Achy6a) zA>b$oo9+VL%878~cF1X?yrQ5Y9b|$T(+iM?KyK|j-2pndAr~?yCJKt#4fVQ^7XAud zMQ$EZ(An55(1}DI4JHmj1_qEIJ80TMFiYSzsIB_|mMR!P2CxY11kYhSK}kNKd|8xi= zq7oEJ6NEv2)MRcDHfNq83@RXm;hCxTAm|>L>5&rLS`y&7fE{2XCI~}oqz%FXEz@U9 zaLd^tn+j@}3Y#;Z02vIj;R1-k4zb||$Oh2jCQzeVBFpjV3DBJ5^j~lret>Pb0JGr( z$OaopZi#x(QEBXsX!iUNR$9Vj&fEdIrxs+@1P}vi)eO)r&nraWp4mUQNx+c}Ixe+F z1i54Y1)d{l0-7DXA{^8L5rGs8pqXrT#|a`?0+4!cg9x-_I57RLB)5b$vU@-dIDzDV z3nHMZZ-)qEWD&Fo3F_&Ue;WlHS*IIIacePNoE|5|EmjX1*#3d(su`l_kp}7vh?p}s zfYK(&$_@|%>gNfdWV}EW?&sadTMz>wOCa|ELYtgmKd%6rK0!1z3zW_lh(bKQ0W8`f z3XSCzqEJ8Yz%W3Qd4nh@n?d*!;ph^p*Rp8F_{w!`;YFFz@^sCmF|7jF( zRHC?ASCRycIfy$l7dkSz@iI6vDuP;-papuI0^o5EUIuVs4h^Y#k_J2=r8y_OGzXQ3 zpb8jklgJ!=Kry%ghZnOE;87)TLV}EbKq4P?_(O?*dSfcLEb%R(=4~wkj^>mGokl%B zXqJ>8R7Zl-E29;|6j1TbXT~%EM00>Ar8{&X=Wc-}oo48QibzlitB?gdWgL2)6}#gE zUC3Bf>DCk8(pqT*$Cg^2WM?m8}Y-UWLaCJPR zlLfobirsO6ZWibk4$x>5=#X)Cea0oa;G_6>KnDp2DS(b}f56Ej#H|21aGPC&X@Ra1 zuRtegB65Q+a;phkV}TNi6~h^j_xS3~m=1s#oMudWKs2uz(+*t)c96lBKzvZARv5It z&Whn8NW7_m!;Il9sAh)UYjp%9d;{vl6}nlVd#%{@8Sg-x$fCdtxxS5#rQkq2^CK6qWg3OyxB@ImU5^=3?j)*tI-sWj^56kIa5=a`Pf1qb2e%TNBV@T3 zrz3+RzXB*;K&J=EDS$%UT~UF_f!Tq{L4jXE7PK^yT><3E6ChV!02$+W0^~RaerO~2 z24S~}fvRwBP@Qo>58CprH)DDLGVul2#0MY~A$56&KB!vwpqHg2j54+L1LP@oP39MR z=FAQH3ha&(^s~S-tR1>pj;L*8eS*;<#tj+@nE|${0YqvrP0)u_5eq?AT88>r3J8~jW{vgDnK$S|$G3KX7$9HN>zOkj0D0pAk~b#rZb7URxI(0b zH^2@)03tP*Etbw1;v(Qg8{NPK!xZBnA7#lnSbbm zBJ+VhWbUNH01*>EL6;uzLfaxe1~_9v4pw@AZJS^Kiu4~KM{6*37zkXPzEBz7c36Ph z6cj@apg5UblX-@LIr9nwXvl8>F+d@&XU@FC02EP3uHJtDY1Z)!%sxV4bOG$@9UxMJ z>4X6!{oDX?9XA+2L;ivRH2pk)nMxqwe;|ATy5a#8@-L8_{=ong@;3}XZ4-9K217*1 zqs&gW7~%|hlrZZs1O@yDkefA_8Vn&`m;e@hVNehCLx&;M4>O1}739JRhDwW>%$XM$ zDzGbn7%L1xrJ5nAkYjgTV2CoIv;l{)^;Qht{NW(oQkgnee6GeUnZph@$3{%+T4maq$ z7k0-9rr=H&69cF$)@1H5ot{|DEyBn@JyDBWN?ihUdl>EnF9AA>XSw5<=??1D4?aC` zbDFNf_xx~+fTNxObZ(6D;1kCmeB!X+`=~K2f^P>^gHN7d@eMBKHh}6#0mvBF6?~-u zqA<{4x?rdXN>~b@K8fQE@SH8Tz;iB8R@G&=V+iVaXfQF}n5NCGz_h~zJdhQ%0l8Q> zU;@ju+=zo1gkgmO6L^~?=v+Y1Ko&PIBj~Vk9uaUW%8cm=s7z%uV|pM092%~nK8Wpl}0a2vK&>x17i=s!TZ1jIxq&>i){k(2WSA;B+HSp9@b@BUTlil$Ka?#dc3O;Hdv<%b)UZ|}Axd&8JA~}rTj0x;8ek6xY_|YNY zs0tkzs|SsNIU-z%Ix@Dv6m%3V$VVScvK+UAG{TQj2ak-cFjW)=H4qn=Dzbsbz=RdJ zq0z^$ffyLW6T0=}1}|uHtPE)Nfk~D?6KKp49JJ72l|nA{@P;g?1x&r5oqe$Zv}gu8 zb||C(bKoNAcq3?ZmmQR1%$PnvLNzwaaV1ET6f{0~g+RfH60Abp3Yftv1nP>tFw9aA zRA3Wm;sK2#e*xz>1#UB@KhV7K1ClqOHL@0R){MghW;0WI=44!#{I|-Jq1wg@j!UU3P511&j@d#iGUeJVUJ!m9a zNydz+!$^T0bdPR>5vapu1S+#ufLgZD`IQ+)t}G6Y^@_rvgWA9o2osFVnHPW*uP_2N zv)COQjIspof=7VY7!lVb-C&ee4_{OSS^)sD)(AF3yoWe*bQyLSLCWC`Mi2+WnvIUP zUp5Ii8VdY@ZG3j+6+vx1#wnnV8;BwebAgW1bY!+-P*q?DHHicTCNhEc6@UU)0JPLx zgGoe@pBdD776x~o9d8(Cfm=_|MKSD-7p6}z;ufnvVXOd})jDAeUV;SKsG$V97ERpo z1p`Pcz6LETH)vq!1S90mH#{mGUoa@L@qowMCC!)~KAoTX4NtiUGloLdoeuQJXS zngWO83kC%a@PvT0A{&n+s{%LZDrjy6VbCNFDBc;kLHUs*O92!TP7L5OS%ZlK5)?d& zY|IV{Y@m2>{DB${%TzfW{fE2ZZZo~jBZ!iJb0*ZzX6VNiU879byXog9a!h9xyd$0}z=(=I(uuVO- zks?XZIgZ?BObXx?XpRd&Ryi_)iVEl|`6VU$=vmSKs9B3%*J}0y{gXB1DZUc2@^cmNffVx7Ui6s-%EiW7(C*w&Gpvrs& zk{)HyiIyDT6Q{vP9SVaQ8lXOks^b|3cLia`diSE~6I8jShe7|Qu@!kbo8kug^z)|N ztazH4pcb13(;Gu2Hc%nTpunae3_6aKO#u|NZdMEe3Tz6CnU*{L|Ns9#VN$YXH; z_17Fbbh8AGfi4%oSptBXHXI7v0uN!=g1GVuqZXF+XoV%n(L_Jk}f#C_zLEE|vDoSjQjBenCQy0MQ2iIth%%FRZ*!3AN7%8!Vt|-Hk z-N21(U4~1bsuH}&)sE=~s8;0w*9Uh%b>#!F$=XPXz?j8Gw66<#w^kDiOOKwTV57QS~aEsNy z06F4^QI-;D_U45VYz3+UhXS_(NT|Um%aK_CesK@+X-6280zoAn-n1uyEAdVDv*eb* zlf6JKL4^fOc1#mMQ}d7jodTN7o?(p0FrbEuahAY8aDdJ+CRl2N4Ao$oVXO$QVHq5! zKegl*U|ca>QJY(=egVkz4aSg=T40PVqy+whhSxzS6A~XzLPUj=fPfhGTrz#36}N;N zu2AFzU2YAX8Wjgc#t!h}70^a~_)eG|;N>)8+zPzY|66e@Cqg#u34=PEu$lK8YM?HM z8mI|)Lk+Yn1iWCZK^-LULJiTE0d>PZsDb*2n#>Q>%$a|H^mVAiCvLm`H3>KhO;@zx z)~W~3yw3m|@d0EesP>+q4jpVgLQVK-(TcHg5niK#Ppk z%$awngL<(Dn^6Wx&!{W0fO0N0V8F&-02{vpL~1adP=^eL-T-kOH>g9Wj4!~P_W;v* z9UAC?0a}!-4&J`;0^~5zK8_C{2FR&D)IsCBP`5T{fYPZ3Jd}>FYenp5oT7md5;MS7 zHGoJBrU@Dlr!D|-9e==_Izs~*k}ELv9{?$>cSH_KkefDWAR=yu2B@X50OYU}2#3vR zY(nhMIfH5N1(3mxJ3yoc(+Q}X@Jjju4o=9hw3y)9=}Fi`P5ufXE=FTXs$637Y223qaeG zK|!$s#DK=m22If5EW)BGKN}GXE{jCp91a@;PvM@O)EM!`)BnWDxLYE$}fNvvU zbfep<0|VF)7QQ_fR9SVf>VK1fqXBGWncN^mUUeW0stR6cf+|XO#~11-6EZI}!PO)e z=&}|T9*|01hF6-P{sz*92FP|FVQxozMSjqLhvNrO!6WdG4K%|GZPJ3W4P%zT3pO|K z(H~C9;FfN)(tM@>ut3#vMShSwK7jMa0#M!%fOY78Xo4Ct8cZKFAuDgRKu(1>k{*E7 z!Y9aEv_LH-M$m;1)A^mb<*1!oAd?An$t|}*lfdvv1ahO=Xu7@+Xvr-o)SwGh8?-76w+b<)yi6+WKa*@<-bA;H1`MAyh01!bhrQ(*rAoBB!Du}e?SXT2W`+YXFdVa zc|!|c!lP7+ceIpb!JB4eL91v$Ry#fbn|T66YB1f<5`eTYUV!+H2S8SX3f>1=(1P~^ zrU4z=SqiAm`GMq|25nHu`~u{t3EFT+%~{xlR99;wJE|V+su|j#aBBdO8cY+kA=SYG z5ZCbs%uzG6p^jRCVgOkG0SrfN&_;yO4zQyZXoE*y98VzJHFB0}TD3u(+XZ&iq1~cD0lN{c5S_pjwLJBGx?O12_O* zfC3O)mVxGsL4(vkv{@V!z*7bvw3P%vYmnzNfdbAQyhL5VjHy8fw7^gYltI8Z1@nT6 zGuWV|!2RhTJh_$`Ihr<^r%lhb(GmbAl|SENqZ9XPzMoTIMYaFPcHiyqO%?reE@f z^)`2a&6pqyE#5ZBLW|u4vd9hxImYn;SShk~p!MIf=FBHRHiG&H7eEZq0%ciq<{KcZ zUm&dhxE^VN@*7ztmg&`g-13kL=?B>43ou7~fH|T;4#N=(dJ6AC6?)u{@n7=74Kk^C&_-T>M80%0d)4LEcK*c(hAeE^$&14L>ty^w>{hCe`D#|t3G7=!BQ4|33W zX^=+`5>U1CL(ZJJLmrg!gX z3;bqM;zeCitiYsD&na-1(~M~e$U@NB@gNJ=$Rn*Nz5$9Z(43xp7Ia1N9f(D|jtrnB zSDXsm0uMMrt$0wA4s7ia$k91B6K~UfkCoTtoZv+zp)q;wk*#urt zoLo>)VgWBKW`WEi+)#i;b3J$$`w6g<;N7J=q}ao*!E{4G5mdZA0GkEzUOOM?00&)$ z7YZz(>rUAwvA_CJEEnGay!fo}Xg2KPG3pAV! zouvm=OzfbA>PW{TXn^xCKd8Jw>z~&{c3**(vw*fhfyxo|T>+rmbV0`_Le~XAH#@=C z?1K(r(_|)k8|BlTEdq`j@ceBI>1UnLQRGnoUm#B8sR^QjJZ4N1pu1blm_!sneQ}oQ z{-NAr^*ECYXfBW4af2f0m{ZVpMaKq3&^jN1b{^0=!5xakCO6Qejt0{PMI|{$@V2fk zic0){xEUN-K$DH^^^ON%HpoKzm?wxfT9_NO2;GrUQ4W-lk0>h2fan8?;G;JjPk>jn zUr_|@y@RZ1zn}jOn4NpMET8GZ^}3X-6rNCMumy`c!|*n<1DFBCzGZWO_XBD_!pRYc%s z$^<2lO+OT~lq69)whc;N1TAHmJX zcyYR21bor-2GY#9pp>P6a4l$7S;?Gv2gt#o>iz(T0ot9VXwG~B4=6z!CJ&ULO@R+Yd8r;AE*;9qeggU92a-P;ltBf< z3nfUmd4e)LDo~~grYMt=LL6r(XMvVgH7J93TTD=fcxVBL>-Yoap&80h53L}{aFBBs zD4R2HPzG%m0UNmkWF&O$;vT4B^?2(Jcw*LI+M%o{sUQPhYanUH#J~;8Fd(b|s{7a- z4=86TDJk$Mh=Y##Z2(tmDp^YT3QCR|f4IP%i47`Q3i%2=3X%e^xD?oR8Mdf^&Js5U zXX6bjS)kSd188vXf=ZTxs)D4zMDSXzJ1VZcphasuW=v016xbC&t5T*LL~%<(J8byt z2G9x&CHP`3Ch$5*HrN(1P?u1VAG{XH@rX(m=q4c0*<(CrOnaa3=GPr0wqe5)aC=Hst1D!pnqQs&gK3z7NTT%j7c{1HEnp+J|X$@MZ z16p7S+KUAmJJrcj;(=_?1#OWAtwjSL_#*~cV5tHsXI_AJ9)q^tfd*kwj{gbc1$iHo z@KI{I`Yh1$F33vp4Z2GFAXS9wGSK2T&^}=W&_*%P5**OEL*2~{0*+#^<$It_zz^U~ zf~`7Z2NmWgbfBxwbQJk{*y};d&S1NU*)+h5CE*n-8#hS96%{2Va8!d*CM1!307ado z8Pf|;y!`;%vq2eDM8KBvcc>~6SD4rHNN_7CfwI#N6+0$SBl!a;v7oNGn4pSC83L2Q zNn(mBu}MM_nj|KuDuIlDp@MXu28+XV&1i03+(~44EZ!s{0UCrB*JV&pl7y{Hm;rVx zyWeMge>SEemLAnMjtRFl?>D3RUo7 zTpLuU-;d)~V0Oi2v9?iRC}{bcJYcF~YtSBi@&L6Y%;Hyu=Yw z@_$eT)rFu^5`1K=22+EY5}SgoFK7-xgXxbdC@(v1P(#WeJz$@2Q6rQ;K!LFX($jbnFI0Nt^5kF#C@ z)N8)a32puoY&e1=05q?l&v-=*l(+u|pcv%$5e( z-yscapo8}pOaRHOkOrl6@Bz}$QL7xt@hWSi!ATh!2cWg!jvK(H%m9%ZOe>@zExR2c zuHyud(V$4$APsHV9l+2Js%~~jn=_vP*?R({fkvkY&~enN7j)W#BD_}%@6>XV)TuS9 z2Zt}C6~h&2$UxNvP#E2iMielxfvW4E&849A!55@q166m>Lz)vbPz8#N8`4TFpkXRD zP(u{D@g3BQV^V;OQau6L#%;#*zzk&F8)>9rsu`d$XV8$4G;Ek^j*JpFs9)&_9j0Ot zcrg7#8nhVE$hZ`@6~hG?$m#1#Ks4xLKhQAt3#efWWU>TaGK0F^Z)CtnRrl9{0D3w&;lAZR_VB4~Jr4dj;>GD^JQ;T>Mc@XiMrXyDX?N&yzo3Sv$L7J>Vm zN|3eM;6i{KGD}@d1d` zVEO<(%cDUoOMzX)@dn6ZP#FDy9p=$7-7u3|!W!KH6T~3hZ;%6Kh=KBBgBWE0@(M9{ zF#s7bpWdDcTSBx0WQ*ejF;GjBU4vz9I(GZS4puuqS^FA416&dWC&}5IFhG8 z2b6#=ihv%*a=HP01~9Zl!!&(^IH+8lA?^Y0tgaA;7HMG73F0shi^GaE32)00qqh(1othQ`um}{iU6tL(~=69dC%kiu)(% zsSsY=KM+SL?%BcDhrwpgL0x@N7Dk+?@kYEJTnM9=kKoc59D$&_F~mW;t$u*qS8vAj z1>{~9@GZma;Js!baYieK1_=drM@B~`P=^(?Tu<7Hp%En1Apt8^K*NOMSpt{9`=Ay` zKrF7uR_+TRm;0K`9iR)aoIs1F_?yi^2Zn$y)dV$ZB(jtQVS|f~p!IUQ3c<$hkVIbe z16l|wY0kVr65QQ0XI=qffEMyfm@{vX1dYldoV}Gp>slK8{V@dVg~8$hH6(}8-Z zyDxzFjw>Xg1?~x$yKfLM4ip&|B*FK_JODZC1=ySgAakHYf^YDeP!9^F7m|vg-GUz^ zLG1~4#|L0DPKakgkNWjSK-;d&L6dc$95X>m zf!$HrjHy9Nfn7nJ$BgL@C~O7Hn0`otwzDxPaf2IgOd3o90>7CchiHS(RKR%*Addn! z;;d}6aX|s-xF8Q`lOm%cH+ZT7bUZbv7e(ShfS~(HKvPAaB7FmB`vzzh5xQ*xd<`wT z;}3AyZ7|AG;)NUq$Zf_1Ix`S-HlU*vc*L$j3bb>8S70;fkZMqC!U*X&LCCJ@2~t@~ zf(pE#`)2n z(gWM1&C3QpzGy?eQI^0}W(-ZBAxUtcd;n>JZ_oy}nK$Ty1}Zd|z$X{7fyNwppp$t5 zh%ra-VcM*qQR*2|pfY6zqRbW$xC|+;rIf^3yd5yw>G0x6mtl?+XsIe_kYWueWeb=w zt&pNdg64%fcs^vH8j~fJSuc!6&M~_Bn3=9l8Z-IfKs$1E(U;5;;)zQ4m1NBj3JvAdNVKmMkC~ z2}_5d+i$=ofHDrK?QNXp*oU%zpH+cfqn>Glt|EBqe1)ze4-YH10xx_6KRkV)*FfN# zzra<-0jVqnLvta6$$eF3G-`Ap_aZ=@91lR)M03y>3kNP)_J z`1tM%sVspm@F3M6DU3cZsDBF1GLZS;A5zd=QcAq|3U1it2B2L8JPN#^8wtS^-ntA7 zpnQWm;eixulHjI452*11s_YKvfHt~<8$*zp4PH$q2Iv{8kgE+qr)q!>)&(Devq4vh zhnJxqq?6D*J-ircSl%MwD2eC+K=#b*D1vsufsSeARp8cOl2PDxQ~=GQEAc4sI5K7l zEM@`q>JJzy@qi}4cohT`_yr8O!5b_k>cNAMxDpb0vSW`SXp%<(q+Ww*hau?xV;Kcr z1s+G8oGeG3Y$b5B>wzw)7Ucou1`!1wCl+%i83?7Kzympn=LP(l1s>4GbZ`i3FmW@0 zR+{oSN@OYUPminQR;mwD01Xv9-~_MhcWgi!W>LU8kbv%X@CX8EPk=sSi!`Y62ag{% zNP|WYgh9<$#1IK6xqw!Qf#rm_!Q0?LgXf5`K~i4Jw*M z5SzKV1rBm5@qnuW=ngx`KrgyG!I@QPdSWHFM7LxbwmSK5Y6HsM-WABQaxo0vZPd z4gG;C{uf}q@bd2gcb331@Mh6B+{gnmphkiQ(+h6!WYh=lEbyLcgDeG52Xy-UTJ8eI z3)2PaxTWitg9}JzkkJ>A?gJ5a=Vbsd*@4(3umY@%2{h!!?)ZQMykHO1umUZ`1>MoX zW5xuURsp544iKN$k-1QT1$5sHQ%Mnc*Z3I*H%E?=B84U3W#n#-oFzpH^O+JsYs%dm zxk`!@*c`b_iX0mnYKqL5K=TfcGa#3AICk)43A_OhMb1IFq=N|*F6P zR`7u826o2pa7a4 z;aLH?Jz0Zk1-l{(&ms`pk==@6hNuFo0;2|#jsk}xQwgZf0$)_ir~tY(6?8f6GuVi+ zx+^dE_WBD58DcR@to>9zf;6x7-R?F?0w+)Het#usRA^G5ipN@IQcz6}DpdAr9q>gIvo1 zzJXlCieUxlB3}+OrUeoTtl;y$85Ot%&Vz60{v)Ks!K(uj7RWLN-AJp!^g~FATi_xy zVIph0;8bYbub zp)5x;fhq-7#}&d^3ap?L_(45I@CD=tgv^*O2stt;awxDmUJ!yEJxzfGt+= z1lSBg1=f1-q2R3G8x|njJ6ZJ^7l8G=uO2}Dou2rEM^xeyWR6QI=FAe`mMAaIovlohW)HE1wh z5LOgXV08qY|IVnu4mIx$RNApY1Y`{;&wLdIDHAlY;`M zBYPpJy#`LrA}GljoMsk?Oka11OT2!D2vQ~hjV-c*lL-gT8yAq$ye=dOgD$0pTtaQe z1WIV26y|6O?l7`~5<4P2g70VmA6WxQmLG&=LD!3mfP(#lFsu}3)o0uxqQv3K%LIu$ zJ#NsQ?2`nQxRCvX%}XG2R>0E>E9e681rp}Wl=(3loKJCk5|kMTc@vi}r=Op~BU!J{ z4RYxQL|Dj`7`5UZ^{2b%;OO+oz@Rt=^F;-Ki%U|J!r$Of5-B_hj6fVvkfkY+B6 z)AalwJnW$Ml`u0X!@O0iMuj)n_~+Qm@3p zs|T^1yE&h16=B}I$jXT5}3gT%Fj1MKqD9m9m^FtK$As^iBLyNzzTcN%_brO zUEorFiUhb52Tm7UZ4rAsf8b+NtdC4A5_+} zIzAA~Qsm*$=7#9|!3Ul&Xn>U_GsG2G9YJ@Kfod&?5li4kEPxvE0mXg$B?J5@_WvYAEaw z%2MJ`U{nwRx8WQ&!0R+nps+fEZW-YK-NDGJ&$tIvrGcYrhaeRC0JH-K(cGH0G4 z!33&YXGnmG{swUn2}uuapqn^B=M^eI>UPkjI3fz{0+(TJU{_v7DIR_Bp}L@R@xUqI z0H_7cVa7BCMDv1M&#aC&_#i0&)PjBqZjRR9;e!M`s1CouXU6n`Pk|MDi5Mu6z2JkT zfE@zpDF75Rpe7o;sfE9!zjgp?Nv};pQ4ek%feZI~4jwITNQ)cPi~ub+P*&g&mxWEOY~4yzY@N^HF9pafn2iVs|!JP-pVVist#>4_Lb9u#~^EReSS12H8o zaJm55u>&+DEC7u~P)24E_zF8a4`i_81W{0|gKm!F6lmZC*|bVjksVZjKrq;mne1Oyu5V^QFM zb9mH2<0K$S1rCt%LIsGd0y~&j#H$7x6I;WQr2wlySwICnKgfj(M6xuPEWx8s;Fie@ z5p(7l{0b79OyF}A!9@%-f3hHs3Fd^&3QXsJ%q6a)!3`gkk4QbpXi&a_ zr_CQAJ3!j8TK4T|19-Ma3KU5n1Qppp^bb%I72FkO1x@&Z*Kb0vhGhlK6(cv?KVWNx z9N^;Ssb_WEz?7xHF3`aaGII+PsFAF}w1ElS;atI#rN9A|TEhft5o<85U{d6QL^B6? zXc`vypjsAm2P7+~4Fh!yJnX@N0-9Ea1PYr12WYICRhOZ@LDH311wEWWi{e48ThO9- za8LJ!P?jUVKow{t*YU{A=>nj*6UJ4fp3o5bkDuHxv5X2X4 zSlzzje}{l0KdkSji10@}xQ{512Of3hRYW8-xQ}*$v}z!8+0clAINlQ5e4chK`H{!LJe>cCD6eI>eelhP-5ei2Ps`30WO~|)boQ1 zC{S?*$}B7bKRH0^uJA+DfmB}L2U!RX4p4msF7?5E@Lk}>MT0O%DX7xaWoQzH)XWRO zWk3TztTPKvhCe{Ju!1}RT4(|ql>qnFtXRyLz95fHKoTja;zsH8Ai8KA@IpfH2bXxg z5~y(mx;_gz6S6vj?%;ywLfBZu2MKfL38L5%`vcIh4d@tT(6G%1u`EZ>QPixCpackR zXu!uJ9*BXL$*?+p5YKXy5U8pLB}h;K1i3JQKMdUMV*|x4q@f9_M8H)(D`?yUw0KHD z3KYbk;)quPq?4O+3oE<^#`*XDcd@6N4B;kTaBM^>78;wA5bJw*7 z0Y?c~at04Z{18+;3?7X5AgFi*F&MEx6c&Fhpm`W@m?D>H6GY9K7^aI%}a- z0v?cH1+Cjr0u9!H(iA9~kqatNyM$SR5mcFg#ucU~Oym}4d@;RZBDZ2phj^CbiH*|) z1eU>TJq1uvBa)@S5}G>p-;?KXfFK1KR!p-Bbz5I-r`CMW6$8@;$3Q;}=0C z7SPZUIOl?V2Fk9Cj-L*LF544W49*El#35=xnP!2w8Pf(qXhr@(0`9A5=vsD=S)fo- zU~)VT@)oGPgL!KMtQCn+3p&V_g=Zdk%o0A12_77U4~P=!L9n}FgQN-w55gP|Dqs{r zja5*QfaGw5Tfniaz~p#>QGr8XIk@$Yx`@eCC)&Bq$R-j}ID$>9X zK?EQuVe2!J;0n+nEhsNLo@9i?AtX3KV@9G`0=?h_g5)m8|No7c3&BToEmvgWnFDbb zY)BX!lncOO{8kL0b}DF$`G+7h&4Ahlput+`7%nLOL5(v;SW*%=54zMI)Fp!^0Z_pK zYv6T=E3v^Fcprq#m}ZDT(*-C9zzsa;N%EjfJV7)|36wBrh=6K-P!rFwp3#b7jVP#R zCjpwXU;*9s4zf!Ek=!~YAgxtU&rN|5bb36v&-4eZ1k`EJW%wb%;sB~aG?_uAAh~PuwuI7J1+5h@CXry z8Pf&{a03WjgMq3zED-_n0w_FZh=7~}F5NmLkRoD+2sp$bUI7IJu808nf&<*vcC2Tx zVpt1`2vC551`U6ME6YE8h=};XXT}7on~9Ey8T_De2ymEzLT?5?dN40Q6p8h)zyqoL z0M-KvXHZfk7|yf6VTu~gkn&^$JFErpf-TF@P@ogsQrLoJE~cIlS_Ww_Z2(Qi%mACY z14X|PRR10<`Wabxz~zAk(+-gK>0s>#P_!FEwI9KzT??9THJA>7^iN~rc09x2c$Psy zUYCJET9JikDib%D%_}tB@fEMw^nEkAdFpq7axbVihZr0Ig&MfTb)5UDNx)H806O{% z%DsY)pc;f#m!V5siNlf6o0kck)|4vEvrD?Zf)p5;0Fm%A6J9vhlC=V0taMH0?`cM zKyL2$U8Im%|JKVvw~Yopvna@AGQTr^@93;(1H_`|5!CZ6)ZSlp;Ha8I=UWQ z6F_n{sEB0+RoaekKQ;+C>I?h;SJgWtKpQ+6L2U!nEHM+DCP8gbHb>Cqh%BJWe1imN zyC5sL^9>gG0v2F`_}+=J*@huUK?=0LP70I+rx!2dmaYf&LP13sDE09{8UvuP0F~_^ z=R=$aPJ{vvz=H>W_>?$#c|rF5;8PNGY-I9haeyQ}l!h?4&;vDSpay>sL`rNRhqHnM zsvcB65uC{51$EfLLkbO&@MKK5?F^dF0~K2mS)h45Pzx7S8-vR|Pzw&UB(xrs+ct=R z?{NW7sKS#q=yGY$m=+>YL)y%oJiMTxJWxUfr)_Z5fk*J*X?umZ5+^SYDA*T(_h&#R z@Ia{=qzlDW^^nmU4rp-N z85TUni_hWU(OC`!v~BpHD`CK?4u7!-55NnPngtvcY!T%m2Pp2rK;uO zoX9OQ-GG^or=A-!7>PR74$I2m@{-StiIT*?izieS1svg1Oh|PJNufGDb0N1Z!CD0# zeAg#73pj!|P=ZQd@K^)1Rv{sB#}8M8a`XDM+hu!9!mfESXnlw>*9 z6bdMVIuf#JM3vYaIY65R*r#83%_>^Y?O0Qo1zt#^z~;yUzJBe41ZZ)i z0_e85Kir^|73^z4&U1Rg(6C&I-Hho6s4w3D_6duFBZDHBBZDFj#7DxQ6GXmn$!Wf`~vnGpJAjIa7(-kuh6I71XyW zROD2c&*Ws;v0PCJy!W3IWSWwss{*%c5icW?gMyNRq~m{vLU#p8M+U$8B3=gYxd95C zP#Y9PKmjboCQQ z7L?Bpy55Lgfy{YU3pm*STvaaFe&moK3FkBKv6`2#SxNBTzT0P_(6$)U4d7iompu<({iSI1<+*R z^vI>$61-f6ilCt+aIA4puUpD3Ru9TG;8JUv6T$6fRC^|a#59=+^+e!x#=4aa0*)%g6nByeL{zyf+~7D?oX^Bm z4=EHt@eC>=9P1TDK-mM-5aNdv5=uOvOam4z8vIY0@;H5Igk8&D9pvXVyul(0eR z26UVQha;n#v?32UUuP?Ff;L=cDTov*vV(ICNF}Ji1|{egW=F7qP|&UKpcyHak}M?_N1iOupcd!^5kAoN zdlm&jGbRQe7H$O*T?Q6V&JdaYa0RzaJ#0dW(xxgGV!?T}dI|Wv76&N# z%76$3B~EY!2HIJ|s?P}O(1AR}0SXMz&Jq>{34vowpojx^w3mp3I+zLyV9o+@B~CLY z83j%S4%h|~aFqd`hfv@EO+$((aDWzi$$+To7glp~2hM;T*1!b1>kqu*5z%bta0G4e z1?2%!<~$&cde8!W(9uaE+@P5Z&>jg+NVk$1UL)22C0Mu%=C>bxkaQv^&Q+WHjrT;C7|Ul(?2fdmDXRt51#4< z9VMi|jTEdbpb33e4W>E#NWnUTfBL@|=3N$qOK@60z!DA)Raq2IiMGuUizO@F^2Qf3I8=%SK8v^V4YYItydoAf#xD+PwSk%g;8C{@ z=ptiKV`+~Bbd(4*gbq&PNV7?xkyFU@C=2L{C{}$&@RTS>_%Ndt!vu*y1#knU2i&RF zU|Jvn9Spd@p9P*Z>5u?d%dDEr7x-Oprt$DOb>E22iPQDP+bfPD3B{x2!i)3uh5K!b%P*;$c9*4cP(-91XoZbPT`ZtsoDYx{JLk7@i!pXj-?ENdy&g4Pe*uM|0_b)r&~gOOVp|3H zA}mnL1{7SdV1cZ`x*>!bEw-ShGk7Lfkyn8O)L>@?4Il}lmg3jg1_&RBWeIeFhv8wF$8iOt6$5xA8`Pf`Gh^BUN;{n3aYb;B0VkdfkTxT$ zCi4ezG)HqQ*dk4gz6N^%HK^Z%NKhO{KE7i@-0K5>PP-Y3Z6VeF3=|H2)8xK@-EEr3HTk z6);CQcZg&uIAC3fy8&|%H+YT)+X?~0johM<4U(W0NrRU2f&4YZo%1|M{h3usLD1z#3;TRf}dhw1Y-a*NmR0F`Gu1hPN_1PZPSZ1AN5 zH~7q$P6$93zMK#MZ(9H*`VOR#PjFHIcVs}l2GC3nBsSqgo)bU?A0t>BV#pJ;CIp;_ zJ0uj+S)mT)Fk=EclmoOx4>X462y*JSlgRtqL0JYg#SLEQ2jAZg>P)d}FoE0)_so8f zK3T*v0oUnsHgQXZoB(+i-_s!g@^`NqyU4hjRl>Z^>AXAqMM3mTgnL+Cc zgg~=e;Pw5`C8{sb3Wgp=NXY=1#`z(PQlWeh22bOFd*5rsp%n@wD=!ew5>P^{Phetl zP+$^R4odCd!UeoglwIJ*blxr8M)eKCpdQ(QInx9bxIt5)ton>CpanA^CxX@nGCA%5 z64Wg@T|(5=@{`#|L7LECMZDO3>1S8C1Z6>RC|n4AKcPS(EvL z7;2{PVXOz$&#=0hTcHQJdU}YZ40wtv2A&pwf{02)+X-5nBU)GB;(P*VMTi1?tv!qR zbcKoBtkc28`@ZRsTe;O4FHCRT$}OQp*d$0h3#@hT^k-W!YKA+gP6h8dyp2Wd79%%k zKP#v^q{!jOz|G67!0pIXq9EzG>guYiRSM$LS&A&6lmOZ#&s35HQl!Ymt-!_0h^nw2 ztP*s3qA;kyXHk%aw1hwne6a672%0nXuqd!NPGQLcMcWTh|4IQoNAW|D+wmlWA`8zs zR2PD`qFn-sfp;gdfrEaLC%GfAXujXdjOhf3X0T#70Fnb8HUi?; z?+^eje}J^;Anw~Epac!F9Ri@B0yifa9gi<;7I5T)uF5$BS91b%i#e+!C>L`$&RH}~ z06bm=biNg_!|O>T%ZkK{Gb!* z!N!9|+MveY;D@*rWb6fgGo}_$Q;W-t3B0P0-HhoEsEUEE<8zd^VgN192DK>WfZEh7 zW=u0g73x_*TPQ(GUO`6UT?*)0Hl`9k);eg z`n3pT0%!&oR3{>We+y^{KNFstU#K?h<#{gY~32MqpC@q36u0*VU00$~)*$%j`q_BtyHhM9=U>^~y zAK*5%9c&SBltT<%fCn0Wfaa^1A*~lgMa7K1`hgkLDw`lW{b8Q0$aI1I+|uTEzyS-M z+6S#p5a{7jg6y>hhZmzYBSZnXiFpGY@;BJBc#eZYUTpfjz1%#EeA8F&DBPwe4_m;?OQk@W5otLIhXNS^ zYIOxP|8xNqMvUAFOpc5Kz33r|q5>SS7r=U^r|#!gsRyTKO(xLdGjnFp`ULO}1ZV|% z0bF{3+T6+leW0QnRFE)33l-3^GFArfRyy(uS!4m-%6D^Lz&R$vk62WtZr z9^k}+q)Qx9Xn|JLOaT=@pt25h+YYGs0X619ZAnD&^93n;MnWwHx$T1>I5~lJPO?gBI=2%6so&)kFi%Aj%(Qiv(BD1btz z9o)9~g5=O6Y@kLPtK$JSAGk|Fg$bxfDlmbIhY>V0!l>XZFcGx31v+&MZX|$0oKb-p zz9w^nAZRoUT%vpsoPO>(uNZitq2mEa5P+6Xf&xo{1#*tQ5{o0BJBx$kSq8^5puu&1 zH)%*K8`K=1Ac|NN2^|px34`iOCdV%f3e3h}mUBD)1}T~3$OZ8m zL>pKPw9r)fX$HRnqh>wqSOd^<gV^D{4UQ7Gz{l zVuqYO1{oxGBw+gfc6c7%DZ6{!bHfoUFyKQM>y?~<#l12@t9TJM*;(r5Zg%m5df|LR?G{{*X z6EmQ7UYy{Rzye}oiBnRdw4Q0X61C%$$S73-MQJ^_HA>w$B_&GhH%MfG#?ipj$k6SI z;5dD@q!B!!3K>NPRTG$TiY*QHfYJgyO2IW7N|b`;IFO<=2zt*6E2#Y0!o~w0pV$B` zG*}$>fcA4t0GDVaHUV@`aItm7of816vl)A*2dY0@Xs;irsmKn^ zhR{T$>p2Jp5U=!gMm=h6&*&{PCyy&br@sKlbcB`625}9zod%XsN0Py_ z(_;@fprLaP(~qC#=9QTN4s|94K1Y3lso?hS9C*l2|8SaHq<#h{tapKRPJk=cgDRc^ zSB$ccXFo`}2GazP)}0`>V+UNTE>vp|cCGtB$~BleKw5V&aw|+Xk>iqN~lclKcLZf(2{V_s1~RWmH@SS zX7Gcj$vecel-LwNCSHV&`+^tdvI%^I?S!ib8Sf|n?LvSiQ9zBc4he`pc2IK`l4?NB zJS2S}ZJ;Gn;29&(<{40@P{4{|2`Fi>Kn6ZQ8%N{K!+W`#-(9fdO!ik>Ud%L*R$MOqhSYbr6I!ZGybsK4oZNI7p7O7=hk85o4#QV zuW0=Okt`*4M+GYe&^!$Xc>kCRXy{>v2&gzv0S!xl)}(<3rCA*nA%_}(*5iZP1CC12 z!wnz{;lT%{f!D4naX=S7f@+Zl@RBbM&;ekuJ{x!}nE{kQ>l;ABA*_y|;bEi%4H_>6 z%g=zU^SA(>IOPB(LspQs3&Lhh;NfS;5mDgu36^L8H6KCeE`UxOfgCKwB5(v;WF7#` z-Y|d$5)X-hHoJok7*VKabp)M#0#kDVuI3VGm>iNwLDNC3jvyznf{&yEH^xBoV&Yi> z)8G>)4A5>RT(!V-Bsm*Un0APR!ju(sg-yk=f=(KPj5dG_-yj0neheDe1#jqMv|<1aJA(M2fnY}_=s+-d`2=(` z%!_*XffL{{3D6N8;G77mm(PK_#o!@#P~u?KU;-UMVa9X=)VyaiV*($f0vb1m1QY1o z7VxN^NEUR@40r?`9F035!`F~?2B4e{QVdEQ^&R3c)!?you<8>6ptgx)1GvTxFuMh`CJLrHZ(281c z<)fenDik3K_dpY!BV>d54%E}HKoht#_@{T?tvT`&L|QUEtgLE!^84K!y7I(`mxU;?WG zqrhyi4`+aU2yP3^5(kZTK%4>6IEM@349L1@&=zsXY!sxg2VSHAY9@lljzJN^3OZx~ zRNo3_3Csl>4c(i16GCxaVGp^Fs0%D9?"lEZItUcxXKK+9*pCM>8Gx8YwDba zR2ArP$gPlsw31^Rw8D_THdM{$g41rDhkjt8Iddn zQ0cb-T>4?DGU{Qo!st~-GJUFydQf2uuMeqRWq>?^zsd*!Z<=KVufjvGG8TdZjf5&A z8M(@+r%#muT4#h_udviRO0iDwxx+1hqb^D2R!BywOSVGmk`s3jb;$||)Xv8O2?cQH zV*&U;E6mOZcr7UhWGyME^YH_8(hq2eUl=rmr32YGgVOl`HSQU~8#Dx9osS-98()K| zzC#4u&;<1r5S@=BaQOp}LuF5ZPccGtK2GqPF}(n-Vr>urO%5S-J_O8|IzaRFFTjhz zU+{yD`Ga&mUVv6|fLo!j1VF7&#}5LKx<^x>KC(xQ(NHY$!poG;C3kraK?>`9`~YwCzrhb%q-F@|d?2q{V>e^E0&4by z0v|+!&J%^42Mcb_Uf?%p1`YCoIvYKp!yg%~7(huA#0MXP$7IH|1*{J=*9SfvgVpgy zJ@}+YQ09cRaxREufjb|d7Br;uaR(X~pg!LX5hX4&rXK>3HPj8DfrbX?5!H&It>&PU z4k2L%I=2wi69KI*h8z_91Zs{3(*sbyjuq6mP++VFx3EE~L0yp#!myr53#iQr?uj&r zL&i_QEj~zm&Hzp3P5=vZfFlnqutE@Y3M1^$YglL`3zH_!qF=y7DAkrGfEh4fWmof=RF5p=3GQa2G4Gvc86=;?_M zxb?t&7079o@IxmH;mt-+0D(IfJfH!@9YUaX?F?~HB*6L>JA{J>mI@HK*uGHCz}Q}OVC=XpE8F}Dab zd&PH0bc16JtPip}5VTnl5gsq#`d&fwL9#gH z@G>-?a)P3b7c}h%3gQQl)6YQ5W}(gnr54zL&jYb6M}#d6pwa{!noT00ji?aUVGYiD zuv0;uY;eaGw6qbN*g%C2sLcfFF@oDnpq*QwP9-SdLEUUfFB^T?Z3<}LC%E_rof8Q@ z!kk3_k&nRVeI3VT*T385QPss!sZUb#V22F;5cOKg@f!5SGMuP@6Kx-sG9AK>uC+NhSFeKAqZaZ0;)AYgJ_Vch&`ZU6I^xf5CXTYUV!U(7Wl~R3lTG> z51;|sA7BYYtLle{8B+sjlBolHWXuOpUz-y;blV{cng7sb=n@4l`#m5As;U}5cNjyJ z8~|+u1J|5~#1wg8LxtdK8sr34N05Dxk`3H@!P0n|0aFcYJk^8GbOX1ZUI>9gkkzpR ze1F6O#Ia1^tvalZ4}{PfPaog{-~(!oKtmRMEd^*aNzj}bR6v5NyBk6{8c(3J*}yl5 zFhRHTfTzWvTY64_*W7}VI%sSToQ1%RC(!mDNaN`YG%i4mrxQY;72793dxF@^m@a^} zd4sA;NKOLXX9`N5pyj)e_RdNJMmGq+Uz4RBO|qKgwW zMhhN=zaeJEv;)*aJ^-mH!SMvn)hED*zOg!909~O9QV%f`6!a_#oC4r6U2X*)M;_#M z61a-mFHi#k(n#gHAp%M(pxB+x zC&(jS51thP?HPl$9Ux<~pgabd$`DiF6j%;&I_Pc=HeT?-P@t2{&6z>R%j+^c5dk%X z5%~h_576nxpi~R$zzD1WwK_p{E+~JV5CbjG`2p&7f-38JP$mMUGckA2eh<*PPEc1# zK@42`&j3|3keMD(O$KSNa|*2F64(r05UtM$s%Am$NQlv9Oda6Wb3GEELIAw^3RF-) zyFY@Uc%A_|7==TDMUkD^L4iY|p35D)77x^>0yW@4%M3tcM0}uodN~9d!Dm2%djjCv zPm>vRaxbWX4{kHU+sTmjF=%T#LdgiCI#?$*e3+0FO=qyu1DSv0jj@YlhvTg8x{_rI^7YpLj#md z5%oBDhX!csoxz$Bbf~fdt0~hB0Z{h~yi!Sl4I~U{vVk^Jf!cNr;;=^b3`k?B9$v*m zlO<@?FlOq3`4PNn50azBLE{hL<{7lj@B(5Yl7XN#aG-$$@QJxdrOX1*vMz8bvqTjf zbf7&duDnd3BO4%T4_NrDP*^p+$OMoSWs0id-7q|Ok} zQsA1t@IAMv;cECC1#Eo+L^-HU3F<#1<}g5IkYj_y^ylxnRqDAEIH2uH#N`N(cBKbo zlnm661BE%rG!}t1V2}O)^%219Q8bxBJMqBR6M<%T!3u?8k#hlUYQ%$E!2_xJwHdVh z3$zfS_XF+PUkAW-JWBiP2)JejrOE?B;Pw}!9l!y-1OQg&YcQRFw7)>j9?){>i_o=r zD9tQTyA3AMA!^2S0Msr3g%ztKsBKjbYk!@9H?B^Jf!kl8gv9CyYK6g+T!1UNgx>xF zIf2y?(yRkbaYAYsEHmfe_SX-QEE=@Gu0Y#g7eF*s+FytU5v2VEx;hM0Kq1;+f1ptX ziY(A@f*I2dX!{G?HUc#wAUO?`DL}<4qW!f7ltvlqK{{6mgNj~ow;rJy-2M`RjaQuk zs|G1PAq;MGgDPA|6e8MW2l&mHPJr5EkhTgW?!axb1_971wye;48$Nb5L$DrnT?eQ> zQDRpBHOjb&ZIn@|O%@Bj-;Iby*#%Ie43veT4G~ZU<_H>{125Y^YLj(pwsnV30-&I6N+Z+o<5W2Rv?tzX4{>+yQFAfCieOw&Q4jfof-1GX^%i_M;x| zpkJgnzz%>c1~qoz7K7Vgpm7eQ<`%0XD8`WlklSDN=FH$rcVG>$8<-6+P@f!BG=fHF z5e+bqLU03&u>sU01h>DM1R?D&P>Tno1EdDh{zBZ~11X+8A?>dRBF>N+459J?S_8}z zbjLKl23Ujm^u%x6a`m7+=HR9fsKw$48easZT-27(1VIH>TrD9;BLPQC2-Kkg-G8LP zgw_%Q^?t$UIn*nFvYI0U=sYE8Q;4oDAxBW21Gxs&yj%;e=)j{&pdtyh>;}A240JAm z5U9~cep?6>5=69xK!g6Uw$KI#@V(y@$A3*I^8pamR$O4a- z)2H1EiY!inb*NFLHUM#j)@}v07@_S}$lxWo!v!j4Ak!wSj-dKVOo6jr0GiG~!=a$= z2zYrCDC2`Ba3I5%&?YXtK?=?IkQOewnXu3xvW4ppI%ZAL6Vzg5b$o$Yjw3a3!GTQc z7A`1QSizlL*i~6?V+*M2hpr6;H3J|$2+&z+ppiy!B@8j(fEcJL z2@V|S+Rz@bR&Wcq19a6Dq#ut^4Qk;c+QU=8szKGp1jwi}qyqwqLUzz7BDhQfcX2_z z7qAe-C{T!VD6oSD7C98S6u2F^P};Edu+^fR=wpO0z+F@-!-F8c_nB2?4Ipbs2t$K{nMQjS0f8TgL5Za76)Xk%I0$_C=ob z0}q&TfClnjfezDzj^TrR3m%OGYXproaDgrygv>cU5Hn{6cj-XGO+ThHGV*{frv^2J zK%?7`%da8*R51m10muR+aQ`1V>IUln!$x!+nH1Os)`MoGLBoCUFG};EL2|>Fs z!Gi^$#xnS>Xi!9gvnwclvx56*V)Y8_(BVDs5GK?j&=qx{UWzWm8DY>_LvZ6kw^4&r z4XFD9zAPCrya!sa3tDyvwgS}OM0f?VY-k6#=7j_WX#F>6M7AEZMjfwTzyS%Vfw6_s zMsQ^bu7g3T1Jtnr)f=GoQ;<5n1FhaL;b8}Dv;;-L1tHJ^$s0mh+~5P4p_7o{IY`iQ z5CzB_B>ea?Q0@fJX&}u(vSExIK>`<&<3aPYklK|KbjOpUe4!E>XsIh3XsN3bm%wT+ zAJ~o&c4z{EXa~)#vM6vNt%d~+E`wSiOpZbdoC+-9fh5qKq)KeO;L*VYB8n!{6PS6# z>MwwAh&s!lU;^G?t=AmM+qcf%mGa=f_*VVT#*x0$S^s8r)I$`HyCPbYrz|y!2^RJ$3ol> z?*0DY1Ip>H*jNn25)Tss^pNOOZbdL{*0!Ra7z5+Z{3r=GYMbJWYm1UU_^ zLBf#<8oc21gh0{^RtzgZ!Mg-{3NmQ>8u)BDRt=^Z63{bKZt#HyJwSm9TAd9E)H{42 zpF#r_G{g*QJeScCBdc*Pk~TN1IsaRGQt^#D?960+Fu0$c>T*bk|}3EIzx zK3}~Ph3JFxzpAbNsuLiXuLB%_0wI8T~4{A-`0S~l7R{McBw?M|JK^YEIha={z zLG?SNH3=KL1~u~_^VOj7Xn1Q9R98W&O>laI%<+I*llA(HkO^y$&I>}|^_8IN5}_It zyP!aX%x=R5W;K|82!Yr4LFyJr41x+lusNX9WI^NMkOmg0C%^$|Y90WO4??Epz(Vy9 zi$L)KIy~nYvl5E}w*s%@X$F+WCU{Al0;2+~t=R%L0Ahpnz>Om)% zfjdFq89OfUNimG@v!Xz&0Kof$K#PPKP;W^=oa>3ceI0SWIB5UV0q_#p4TvQJ;DuJK zjw{5oG@0ue%wgwzE`XjD3~DQa4$=c{6NFr3^Z`_6aX=QZg1b_hpyM4t2g-mlJ81L* zezxZa0W&7hq%Eii20jinSAhdX5;nx?C@xT?V53maA+Q;|a!`W_q!l!723i;fx(gSS z20+)I!|(V)9Q6U3bOo>MVg(<%w*Yp^6}aWc0E&kW?1zGa27$q=v&@(vo~j4=3S<-5 zU+d0v2smoO&g=lCZ|GU$0$BpE!#<#WMv!aa9=!?D$BxjK3ker&df@(l1kxj3kI-ZT z)wG2RaxMjuMetzx4bp^hy69=}VN4woif0fbC>ePG?LRv}!%0;050C@Ey2c&}s&Jdu%d-Pe0 z6QDc~p0zjyT}i?M8rKG|B&n>dgsdb1&EkNT(199V90D`yLCcgsh=UqA913jE?i{E& z!3#OsNSC2Q*cCj|%BsoSAdEDN0d)Jh1D;V6KBUc0f~zU`s%26G1HYaRWG;)wd!HZ=Dfg>pup|;pRo|sv10|D}&-0`D$_hK2z4=@c(Ws}r&U9^6S3u!5*WpHA@tw-CXu1`RiWB1sXFAV6UO zN(T+%(|H7WBJ*5}u7%WK54Qd*IM^r$SoZ}AA^$D?Pr*H2O1Rc{1S@;0EdKcm? z&{|!@-b~De4+kLKdoT$ZNNW(D-XO#yX2dOkx$Xg?8Z@QMuE34dTLovL8`Dn+@hH?o z>U78gD;5VwPzxMVr$dBznZRfG6RcVxC&ZgEp`4%11@0f=tvqG26ri_wAZky#;DJab#7WpcK)Y^0yFoexA%|zXDxk^2OMW|0 zl?$m3rzhOx5dqKk;H?M2$1oDE2SLlPK&_PqL2$9rAeiO&^koCmrSPC?5OnMX*zxcx ztsYuEp?KjNNF!1`=n5KT!mK+%df*=W57J|Ys0LXzm?lUlo`qFupo4*-myDoPY2Ypf zdYuM2H9xSCM{;_vD35>?qz`+84|TV<7q^1f^zEWNYG`NbLzX2sz|TZsb7U0Q3LgGw z!G0;8gWH)w_fbet?m^BS<`7jVs-aLsS9YXwW;&%>mc_1iS7HAPpK! z4?vn%f!K~W;F{Utn(x3hPj?XK5vjicQoa(b`~qA#vm*mEgs;Grqa0ZS4pj}N3n0xa zz?x6MHG_`5ghbC7?3%%Ws=;&uq`7`MSoZDa9H<&hJ3yM3f;87VZh-4%6xaq%Ra>y@1&65y(*}^Ud&f{WJlj0enz*88q6?mLi9{L9#O9hF|KcL8D0`()1 z?~_*oO)Pu>oh=VyfREq+^$(!Cv_P%cdhniDkfRX?d!V1R;SFy1f{!Qz53_>iXuC`~kGA3^dsTG8(k`3^M;Q18x4pI|bw!NJ|oQ+T{t*nS{tK zLdf6)N_cEQHv=-8KLI?iXESUD0X%Zv0}#ul!K9T8LndIJnmx02b5FkS?quv4OwJ>rPzT)0hz^) z53$7#Qh1;jJ3eFIPQXAEJ9~#%u@gXSv2z-Cu@gY=Vh8MKoW%|#3dk&W0*EbkkirAK*a;xB*a<)? zcIHBhopiI&VrOU-JAngH?Ccw2#ZDlx#m*_*#ZDl-iyg3|aTYs}C?K=g2_&}IK?)D_ zVkeNyVkZ!(*qH+@b^@$Ni=ClW>;w%&v9o`O6+1!17CR?#7dt`pE_T3<##!t@qJYd| zCy3Z$2Pr(zi=7}ci=7~(VrMqA*s*jNEp~=hu@gKH#m<2tR_p{5TkM>`UF-zYyVwCc z8fUQsi2^c#m=E2R_uflTkIUeUF?L?yVwCc8fUQsi2^c?qVm5-o*~s(Kw48NEDD+ z?1T|p>>z~)da)BmX0a27RP0QL7CU!?M~j`IRqTWhM6q*Zh!s2G#1=b8a2Gq_^e%S5 zj>cK+K%#)mVkex~Vh1Ta(2JdLGK-yXq+(|pwAeWoF6-1a$fer^0+Df#YDi_XsGlfX=4{ zZTs9I0J;;YK^(j>9<+-deiZ;@hdM-PB^T%lMbJs}XFyxg+02+ufNoI&9ZJes4?SKE zvKRdh$ZYV&U=5}l0!rLwOyE86pgr24BO;-@mqGpjZ$;VM;cu)8a36^>X=qaI~!-341F*k)HhcM(C4^Rk$Zd_{MQhUfgOAtlLDu}2`+&ZoC;8fKu$_T-i%!jJxc+0)$tz*=m83l`@TL%WP$d< zvp|j>1EuyEkfU224}gw$09nT&Fa=~jI76-gpRj;zKGgUH;tJp+bJvKY7{5T=6WM%_ z&pv>U&|-p^54vCqVm`MfGXwOv7A2@_7{G_+v+zs-TYX`=MAB$k04i&sh16(S06X9d zxhyz2lFNdVG%5@1L8lvwXDL9kDx@wWy(q|XM=ENNivp1GM3n@Sp(R0m+Gt5IS`rXw z6`UH$CBZ2gl>~OwDGBOvlmsB-i7E*uK}rJH0u&a|0u=C2g#zdtK=AoHpq4o37+&TV zeBeWi6a_$(0!zJukjC`ivOL`NkaMnBG?-XGS72!{F(`u0lH!AGVwSvDDu}kGAOb+GAMFE4#Z+|V0KVoQxFFo%*3I<4Z2K4kzIj9 zK_p8_*0J7<33458y&Jf<0_sVCPFRLqFtbNMiJceJ#0DMv5AFi(5KxkYUtoPfM2Q#L z2?BK^1O%QjgHE)ncRavg#jrpGa%|xo5G`oNGy~LE0v+hYsKDgdv2mJ!0z2r+0(Q`q z1?-?J3s~wEBv~C9K_-A(A_4-ZVK-2)IzrC!S|9>)J?OkAaE}6XlLYuIFJ4H01r!eq zN-UOUOlJhZ@dfE;eE^>%1ggwg9M`a5IkAz&QOA+VkwIbp^!qtHqV)^Fw{AB`!tHXL z0T%fIIq?>DzVQbMb7s&@GNAK~Hwc1S(0UV(9V#^a#R8-(q!IHFNipyn8g7UAmFPFz)22#H{aPici2OlBKp>ToGj7bCaXHY70~mfLGcb+ zKXn=0A6+2o3QEwRI0N-uKu07?WSKF+BjEspKouz4)Pvmg0o1p0`v3p`fBrCNOn{09 z1<0kZ;J5(w;URG`A9iT;4nc69Wz}TfAPA3(9})_z;26Ye>!;;S0*(h51ahT7-P;cm zifjTP#t-mS!{81o%XG!poT6Tkt|&XGPtBshrobg&z^%ZcAR%y!33Mj~2T0Q#eo%*B z0W=%|KADZ(j7bJ`C559-PL`uiHt5OLCp*8NQ2JJ77*AyeP;oWLOob7 ziy0FrEJ4TWD+mbe=5%B%bmfKI&kH`w6H8eEI!GOSCLQQ7vm1O_j?cC?3pg?hd|(6( zzT5#7cA(SoX9z+L-GrP~CJ74E8=%n;(D}KGITcvz^%>`Y;uta-APBnD03-!DQ2&k) zw8(^vh=6*&;A4$7m~IFu@d|W-d-8uk+SwG?6nGR^9P8bb*rgSD6nGU_6xgLf*L>+R z{D2MqFn<6Y%V*B~LP&uNbZoODqas&5=B9Nj&m*J5JFC(^`1-dw70r>318=xcg8Q|sZ1%7kp8T<Q_l*n^uYHA@|ZF8fXY}w&=8^mD>#VF znAU)V7BQJKEddQ_@R~6#fDLJY?jHh&xpKhArb}~ zG-0q4pDy{ZcnCclo$CiE( zQ(!}>OxYEhKxH7q0TF8k28AX@M+w%~)`# zLt-0L%d#u52_R}&2J~nJw~Smsxd5|i1Ue)Wa_0FJ*x0BzQmrh4QZ~$=Ze7kJRS(X{ zpo)Xc`WNgTxO2dj6{sz=`6E(W2o!vd2*)Ac*fZ7{8t_q-&dzIK7>)nb#H4x+wKk)5OEDG!br~SZH+ZS)n34zgq^awzaPGG;6CD6oMjHn1Z=O=G?+1rY^K0a*nW#f#Gu zD|sZiLER&W(8cL3l|1TvZ~;)Ydtv&{O8AIP$_!BjRs|Mlg8_8Y31~v%JSexZ>N748 zSK{DhP+$Z1q`)=z0&yiyMHXfU&>arqpc)@~Zzfo>LtJ1JXbMOI*4zQ7j}D3H0yR9M z^&dd@m_P@lLFu2x5p;s{IW7ebPzj^Y_(ejAgO?RDivh|R@SBvtX8+&=n>_>62?RBd z*dUk9C~-J)xGQit3c45aGAghrUSfofP;WrDE!&Y%pqmqB*%k>U4p&}Ah;5+!0J>HO zd~Xxjwhe+{+dc>?Tw+uNO)?EOz!i=w?<7W0I|9@Lg{BM8g?rNztKlJ> zMdJ{Lgf%EWAmQvoR5){h8>rwp9(>_Ey|9KyydEPgu>>Ux#6U>YKpU3epy6;7gWeH@ zFPJZ53+A(sU{+!-1cx&-w*rgeWk#NfpbJ(&0|bza2o7+~S{`)^@DPF~69c3J!EVI> z4*K~_c1#mQ5eZM=8N^tSsi1|zXv?J&;CUDXFLR^UlUSF;dHe)&gy%P%5RuIYpx6YJU zAX(6n12PH%x-knTBnS~&0P0js-*|{iq#iVq0V-!&K%VCWb)*#};86uiHLT#fIdBX% zz^{jZG=$iptz#C*1t$ll$8mthj6g}j z5#+p2pE?8_)sU)9B&QZeZ^oX9Q0c?CS?z@l(<`no+lYSSAUd06T%2xSRe zVuoF$$)vynu0WteYtSSFODY_0;K6f9>VXv;3S0{8;Etxi1<-I7=pqUKGH6GczV| z+&vJ>ax?-pq(F`78T?8Mrf+ED5s?E$5-3GO@4o__@ePmK3F6b|HSx&R&ydJc;)WJG zpu6Wlz2pTFSpxgORX*s@e$dT~Y@jL`v_t`$(vx7N;Hx89LDdYnm4nsL{a}@l3n&@E zYmM?i$M!=?LIyLY1)x?a*wr8gx(}zHXc2HUf;Qe~@Pk5DgJ}W3BAWtqgoDV$F#W?Z z9uWc1loY6`t_Uu=9DhuI*vuoA1Bzf!;O}FHwwgdaE!4}@K=wLfjZsn~^ui$?316&{ zsfQey(4J8QCr$xX~k4hmry}Ff$tNsFbbYX>XmcUm| zMAm|>JUStqrNAQ4z^TCM2wDIEzPeZm)D~tDn98nT?F1e)w07jkg0Td$K%F{JFNMLH z;Vh^N%wWZE8bts9&uqIP01eoIZ<%FM0AI(kL%@s)e2E-r+#1q)0No<)HURW4(d&TU)eALeE$Y`{26@oeusoW1C!&0-f04gETDQ%m*Ie@qpUlq z($r;GAnM2tW;*@=)%}{xGeqH6C{7SHXZ`>hA7!*+XaKd1J_u$hvea`!8_F!Y3<4|; z3M`u7B`yjaYXiY6i#W}gIwYaP%?%(1=&D6fdgn4@`U0ZaAw#ForjHp@kE8->bp)F7 z0XKLY(W;{lV&=>T{S2;^=n$l3L*pgX>m*aRA(;|2{PW=sb_$_|KRIr0irDR3y%vk5dI z$%5J-ju%9-9OVS66y`H2unBAj-%PH-1kwv{Hi7guGAbZ0*8<6+x0gW9?hsdkk5+~;Y*vyy^-Ug)_N07%ioazv8 zM7j|RDG&sX1;tK~MqWfS3p5hgAd)4p2vZY0fKGz+$RYHANB2OQ zc5s5bjGzPz%IF}Q;NkWPqzTEQM#kyq8hE5aAAlkS)Xe+|Qh?EJy9R5wT_?HSR(}oJ zZo3YdD}t9wr`e#D3#e_)0+}XZ=Y}SGl<_Z6Xh2GOEJNOenqS<@l@RSO#$`MbDp;#) z4Ny@Jib&8T9g(ds`OiF(^`Ox|5?VkHpEn3NYEV?xgT|FuoLJ16R1{d8rpry_W(PNi zrt|jk=+=YRa6%?>!S@J3uPM9%x#qfKIrM6renxOL0iC1660G3SLq;ox7Jg{U5>%Li z2lYXd_-v3A#snJjbbKM4CGdv>5}C?{paR(ubbTPG&kB;71ztQ0zQPAwi@cbAr;o>x zk$bvaKaXbE4`BsX&?QTtD{MgbIe!6_;htd~7O-MKSyappxxXINX$0Mb>nI4l z-3m0qBtBhc0*_`rxcX55^-naI7zALWR(AxzqgJe-+6Ua*1=^_T4jbvRy4GVd;?Pn8MXqe1oeDD zw<0;-eAFc1s3PzKY;6NS)XWbOS&(5h(A1&^(+@s@29P%JxERPyU#81X+WVHfN8~jpjxlcs64KMZMz(;Vef> z0m~{-+515P-or(-m_c<9^j0KLCkx&SS|JV^U}K%GIGIOmy4xfkad-ofT7H-=z{Drv zhddbpckx*UP_aRVo2Q?j!XsJF%?;AJ0C9UUcuow|d%|klyCcm4j%Sf3qQO(j;7KuP z_X&A|7;PROd=D^W9v@bCOqZR=%>gPqgr}dM%p=AMnkT(D{ncb1b$A$pie6IF0jTqJ znHjWN$8=K_w2TD+FnDf|?cV&_?GT&`2{osAsc70yHQOYQZXT zpww-kL<$*1+JU-!W`hX0y@i;#1Lb7M+$x&_2WTn>l+o&8jXCJd9Vc|aip>?Y;KY%P z#WD*Z<5dg9vy?c@n826taX|0B+aZwU$R$wa_!bm`+xNE!D6mYgpT;9ve`Mx#&=mX& zzAS-i1@Qgm;0+2V1hO1eK;v7WTgu>B8WyvliW3~OklX}rg@RUH!E^QxP|?W-${`;R zOEp%2oCnGwNbbxAt?B{IdciK~13C1}gk}LpE?D!3MS)9!#SzJ!d5{_me5o9$Sb?lj z0<8jayb02&g3!sT!L&jgl1vtePp_NKBSw5;+j_BCz|oRQiEX<23?6}cP_GG8K{6_E zLx)vcgq1jWL3as5Mq61mm>PtYxZ%U9FT_D>PQaHEg2oxyAj>fnSR8F(Q$>&jh-HC4 zDAR%JMNoqo+A^~=V|pTnRQ@B9B}y*^apNwc3;^G_OZ_A%2})v)pixiA)sXN>NQy4ji$J0#!_)of)Xf5!_A&r6|1V5PZ)(fpmzHUqB@q=$1&wBVRfM93^4N33QV# zl3UTnP+x$Z4B67-cpsz@wJE+rT=53DDZW5l@g|}vPC~IiU1}mXH|Gs#oBZbVX|s4V z>czm#lyu#%RXsQpix(0NitvG1)3fuJ5a#;b;MkdfmCTJ3j9h`O@ zK{E^>gFpkCpkkFpfyHMxkeB&tPj5C9-Jap2r5B>4b-aZ5CGjn`T|ra?*PrBC~!iHians4 zG(lr=2f)IhreFtX1)v5~{SHuL6EtoNSt@k|s#1gLfDmXUtK$i<4p^CU1}Y62GJ^IH zpaz`)nR5X?taAmbP=o1$5O^pDvh<1*dNJxBs7G|`8GZ;V@`B>xgP`IP z=yGdN4+>#mgE+c@pb2|Wvw{`WdRF8Gw?x2Zf<|J%4A2+_Xk-FxxFcgB$UBY)pbO9t zMjwD(@>vhR1e zu#S4ro$KKBe2~ZiFW3Whw!jPZ6cAb(_?6grxgaw*3apM0Cxg0wuyuVPO`HN(!DSWr zUS5!5NF4iy8m`0a~ZTXT}6xhtCcjpa2c1GZ}*t zmq?ZXWc*`?09xkfRa>F3bXI1u3vP3Ry8c z5Q411xiNj&Vji`69tF^9Zw)2}P&w(yZpCl_s(uHk;t;T6cmU;t^O~R)!vat?2Mwcu zM!kit7(PJFdjZN>pm{gsH9dcz1wXg|09R2zgpjJJ2ElqWCQtzYPLqf=0&9e!H5(|A zf@*(A2?8xmG?-R^%HJCTS&j?>SHW|UXP_E1m`(^Qa)1*bXq}HEIP@L2Tx$|=~ps5AW3LL@dElYWHQF`gn^{4R3bnpcA3_)||4hf{zI%Kp4y(rne?D(GCR-<4!MF&cl}TgAdYDz_kb!9vaWTGz&QDP^VibGM#G$j}r9*2{bjW ziZzfaJ+?YQcDlYpb4 zz%_8WFagxE1aB*uB&fs%YA1n*Ur>6f;DT?0#B}~_F>!5DG72bgv8O--P)4Cc*gouP z6>v15C<)@4NC1UxJ*bres;L?TVdq~g0kwI+0lI)6)Q|*~2B4Krpr$dr0ek^+L;{uu zZ~Q86g4^TZIM85f5Hw=~^$#_edc+~a2OZ#bg7x5TBrCX&$teJC^s^|iD{?S9D8Pml zz|jucy9e&ggO{T#a4N6}G=k3ynEBjVx z>u?=G!VW!x1T;n`2yMfI8Y-x5cvw5dk-?e~yepN}l<5VZIWu@?>IMERMaZgJ1vZd0 zINCt{N+r->J*aaBj%HB%71Z?t#WTbcH~2xxf`x|*lq^7pi=e0n^*IG0e!d0qvoJ_G zGid7?c*!KH@kq*VgOx830V#)WhPfb;Rgb2gQ{WC4q@8_3Bnw0CE*ETB8mRFL$!Z|y zfl8lyT#&iNLh#ma(1|^u%=$nq3p%&?0IgF}0PbR>gF1PNywE;K19%+hhCr6UeYj~X zpm5kBk)?oW^h1s_*@I~FZ-BM=APiHcuOC>n_mD9G~y~< z4{%d$7Zn>PVp0Vt-3-uDAx1^;TEYv{KW^mFV&t2y=fWp8-Dne!d_6d=!WI|{AdPT= z+U%hI18BD~sQ&=+I?`Goe$cWc*aBqmIv{9yaY6vPg#%PqfL6{!c0Pj_{xE<#NZ?^H zPym8*b{s1x8-PkMaEApnrN{{FB+LQTFOY@Fg5XLY)TW1&?VvT4?2hNYH3>KxL7NUY z_&|LS&}=fK!L7@1i%*GtdSfRyVs;UnI>9UDu*{JD;KOKhg9;$5ZEjFg!W%r$4xZgX znrpEH4QzuJz{2NF5YwUHHQk^(8nV9zK54RmfBO4Lypr{xjh@g7lGSmB1gs7RSCZgU zijX%knlppPMnH`yP}aI31j|}4(3TApLh5kH$Q&rxK@&ExIviXjGJy(xU55G<;^4X) zw#rf%R6T<80;p65RgR#c6%hM_Fn9?qc<5e?d;kFn#}a9*ufPO4uQRl1xD<3LNtt&;}Po4|s?Hyo(g# z2T*H(L*O5$A{#i)K+_iBgbY><+OIl+F-xF<9n!UAH)8^&0ML*yXm155i8%h?tIq=O z^8_V_|KQHcA5aMdZZ(3|Y?v{@=VqW*ie-VPRY3I-WVn6~D11}s61bxwhY;88R1UT35{ zbeoY|;WqLToOWnxG1v*N7NU3ZXxDdumiKU&F*OK-Lk%)81DZ&1JOdh-;SgBN1}ZT0 z8Nus=peY1&@*1RF2`&TJVIwo3Mi?k@fQnK`IDng+paB<9b3_=nk`x?xAcZsP`C)+p zDn3|2Vd;1pG&G|D9hw0T&4QGIA`vk(!!&~*eVq(sWCk=Y177L_8K#6ytH3w5fZ7@0 z6!t<~iBsUB1bk&9sB6$60;x_MJ4B|>*~KGS58k5!T1qH^Wr?Q%q$GhBB#1pMAeTap z-$Pn(10TY{Sa1U>W^l~8);r=kGy}0p4&-#uECs01(g0e%)F7VaC;_UQpqoJ+h-AU{ ztvnC`ojC}qIziR5IMTiq(543HI`}ZioNGN3WT+jyI}9>#4zI_+ZFBIXE7E8KmZk8Z zMG9cYff5ex`HAY(zpjX6+Y)04=Ug_m>^DuI`=6^BkH8<0)E9i z;5ePZuXvY~Nmp=j%6SJm*LoMUZ^eucw0;vb_W|w%UYP!A508HRBhcgniy6}%(D*7F z^lT7T4W=!Oplz^QK%%S~OdA+MCB_Cpa85$o&;Xj(LQOE}3r9d>emD|LeaCVoF6hD$ z7FWD8D2zx_sGbpkE+~N( zVIVidJ#-eNQxBoj1$3;Y2;%IP1SC!1a&Lxsmg8TLCX9r550((`lb#UoK@;Nr={=&n z($h7q`M9Sa*v}&bPkw(u360H+=?92r)nM8KPJUmYydB`=2U`0Fo;$@@#YC;-ho`_I zzPzF!`RNE$DYy>412_yMEfF-|&q$j@z(B$_JR1Qi{ zPdmiJAKf7W+Dx>P3+MPOv|Iz9Z2;ORfxTaatz4T9>Q|x8Wq@X>UI=A5nuCUDL0ty4 zDFRT-9MYu%6_)U-2-c+nj~9U2wV;+IfC^boIqVt=u9L? z4m@@QYM~tvfvpj^!G}7lR|Fop05yj}wUGp95P<v7kr?V zP0XN!NMP%3z$1}hTNOB<+Cbxmpd*z)xMt`59)w`yIcyaphZdwknuq= z1s3R-;1AF&7%!-P)MWl34zdljA^_6Y#I6rAXtV;!5xXX!r;=A_eN>fOlLWEyxEoH$fYWzqT>Z~$$Lfb?N-PLIJd*wpLI z0*>Z*<|dR#>5EDgAq_#bgVtq%+5(`#y9?75j)Nz>eUI}wZAJSkUS^@CrT9QV8hr(U55ruqde55y=u*15VdJK<9*m zQ-%cWAZpN=6o`Y(AqVg608g}l$96!&C*bh`4l||&^`JxuK4%Sb5+5ir&w(9}04hdV zL3{8(i5D73ptA7?_zZO>$7LW6sE}d@pF@r3oak0auM_1=$WD-0J;-=)gzyPG;ey2Q z3{gl3LqgT@&d+HApzxa@>crduI#3&w^I?la&<~_;0U7Fe0mUN7012$ijcSh#sFQ5+r8dw&GwK=mAjLBzAm<{2*9w7x9&{2po>T9U552F4B@WOs7o@}iI<*DV zQsjW1-oOIce-BA*Y|zzuf1qg$^-wi)X7CCWPyx+>lz>kNz^no}uK{{cdOc+60i=3> z91hK)ASUn>oQ5ZeD?tx*hb#sI4-C%`gqZ`H5CA(FTt7e#H^#CKAGFpSlzidmoI|RF z1>&G%&g)?b1(a28fKSV3ay$g?a*Bh-wGrp2Ll!E+ybeAvo)NN85!5~aB}CAO6L>KV zXebuh>kY_f!6W6}4vDOK1u;;z1s~K5o^}JRzW^V{&jdPZ6coKZ;NlE4PNKvOUP;HM zzzJSSht%`tP+)_$85%&x>4TTnf@U3{gQ}2r5olxxGWCWC;(GA$`=E(e&|)<3j(<>% z4R+6MW{_jPh$*q~vVd0Qf=WqHNx=>}<%k`0z)%)=)C^RZ%UdxV5QR*@%mFQRVjk{NvL$QMv7gJ=IhhZ;c-JV6|*4_ed;j%9G(f)4dz?zR9A z^&t=PLSh*@$ct1+f#%Lw6kw684Uc6!rBoBx>I>5o&VlQz4Rd%^>-nu1z^AxyK!^80 z4Is!Sv8mrh$wNuGx!N1PzDDb&H~ym3ohtDa}poKvIHO*{0dY* zC!gBDn^f>&R$2<&Bp3{B}Xo)A_74OxRyBy8{=_0S7YGZIw6 zD6m^I!j?3D0hRS^;2H}t?f^>ekh3r_7hWi^2*9?mz=y7;3;y5|uLrlX8ze!EtpkYR zSWwRo)Ko=129hc5_2HuHk^Dm!Yh3REeuE3gSPLq}ghvf$MQpbfqtNzmN~5=slEf0)TD0vQw{LNB|rGgnL!~TDDGeH42GxcqgkU|k8EDfq_K+SMXr}_C>H+&nfki>A zUf>zH1_Jkpprt9eM+9nyg9al(27|UmKzalXux1BzhRPn&A%N`6!LrXEG+_v;lJ*FK zb3S+u1vG0W2&$4m^X2vbnG3-~d7usNC^&VM}QX~gW?4mE#OWY zX#Eao8XCO50hBC-K*JG`N((#<%?hcsP6%LGl8vi=0`)~fwIO0jHh8)k)HMTb=>^Sb zgO<;NmY{<|Yla{!O`~_apfLhzm4K2!G85vo*AHUm%oxoQNc9Y9mVnX#C<%aCC7{*> zmLz~ayKB#_U_ZV7DvuiDh3U(#f~Wl6pWxL1RaD?&3p%9<>Wv}|$Ur&{kiHdY+=LB2 z>-PmThyWei{2*?|1fFCDWfh2G&@>Lp0b=@$OF+X1;2zil3Gg)i1^z4rbx<&Y=A@T^ zw>G2BC4vUX!7Zi?NvU%#aZk(6N0WCrl8{s&~}*!v#J-6qHdwXI66vyh3Q2BjU=-1TEme zD;*{vDr9iACILQi7<{ZFXf+FH|31ob43J(PxBv$A1W=Y+C>HNLFyDusW^~2cH%OI(rS|CeY{`C?G&h0?^?spo4`$ z%lg1mkgT94!2?ip8Fbthn}A*wDB!_MgBpYpb65?azBfqK2QkPwB%p4gpwfcr2{(Bl zRX%vxGpLsj>3hJdeDF33W{^9;XWA;Tf{s1{52u5wd?fdPPIw1Z`7EHha8TbAH0bBJ za6%{Iq<3(Q59-5%mvad~Ykb~#P=te)k0Cp9B}gOESs;d;M)6xpyE@+N$-d@ z6=>~0qDck1;{fdL>4wjEMO~kOD{l^kr{K!l6nr!#hrmPFQERTeEuh1{AZZnpvk)1H zmyz4?EQ6vcw}R>PHMe+V7%xmec?;a=1)W>S1ujTHt1Bd?3o`MFOxL;1!&eV3O<2sB zz=uuSh&oyVo(BZQ87L(~l!MY8yW@}dO#+Sz-~$Tv8Rv*V z`u`eCGekhsxS+|^4`SdcXwXW4+dNu{;DihsMG`@qlLa-dAzR@a5C;H)f(+D5`5=gN z${=Kh7F3fWE%@Pt9f>gg!3r+X>G5}X*vY!f0Mvd25Bq`EXo5zFKnpU#tARl0bQ^$M zxsX)zK^&2!LB;O{en-%BJ$R86=r}@vmBdWW1JhfLqhQxI3;h8P<)O^$%r9DXi7#Lmk+(O0DJ&={WEA%ehyB`pqAi6 zW_Xt!bh<92?ZhT9kzEON-a91U!55Z+sx%e_c1K2m_288mJK!T0TR_W`L0NzWdR{YV zj#g0MHFE<-49BO|zu5LnNw06NC#E~guBBPef7ufE44%a1ya)I9xR374EQ_*_xY zEh8+T6Y)SBb3p4tVGHRvK}TgwU-*Srq@GIwauBQnt0O=3Tu<1rBBad*SClhAdbPR?@cSB>p)1KjEm-haDR@r@B*%gp zupEvI0*}D$j0Q<1Ht=cSthx+8Aj`}kn@X4zlGxWa1@JOpn}`RM5|FJpNXyDVr37e3 z0cD2)G=G7reNbHpsR|LDVgAQl;^6%jj%3t)pdoM2pb&o+XvqM0$pL6n8fe)i=u}T3 zfhy1ty21h`M*&Ah*p&PQkt`){fm@&%5Khoho(in>pq`f_Xy%?>fg5s67wAM)a1Ta2 z3v_V`C~t!%29OTf0@Y8T;s9JGf!cVGStIxvTSTwy;54g;97&4o^!;s+NpS?1JUy+T6dAh7MpLji_69I~UP{)WBw7o!J3V3lkxUazss%r%x-Coc^ zE0As<`o6vikgb%CkctK=34w;z!KW5+LRS!j+9bk=1L*$L!#aY{!XMrd1a|_M9N>qC zE3i83i zK7rcj9rfanehl;=Kk!wcCj>y1EO`El$?*rMjDY0v-E5#a3D6`UsI!EWcR>S(!dU`u zz}X$#Z2~u)ZwRm|uy6}32j@%hQeKeNpdJ*Hqb8^nnIWi9&n|EpTtMFe?K=d?gBs!B z1JFU;E+NnjE)WIarDkCFy%17lhjyRQ9R?c7L2{VDTd7VxSY)!Hxs(&ISAFgBYsd4i9YzVghJJOo2rmdYUf0i4Hog3Y6v`LuRnSnGN83p-zv#p#sR?baf=9aqtQcNE51*P958ZqE z0&EXtY6P@g_ygFGH&FXPj(Z^t8c&=6S_A_cIR$myUkGG@7omc-sDmmjB_2olLPs%3 zl?4%8S}xj=?jWeP!Rz-rw=me*8|FN;2b0ZHdTYELqw4UdWa;G zg92!K5_m8cl3AG)xWT0s$g!Z6pPJ0@&HK=iBM0zMSiK`?>kg~q2Z1aF4)E2+;C(Zo zHMNj=9q@seETH}?t{3Dc+&jNPwty;EPzB%!I-LR(5TIb-f}F@* z584OB>Iex9(9O!=sRodlpe-E`CxFNNz{ew@j`@Mtj6r9$K*ct6p9lKR2nTR~R+nLy z5a?J3U4}zKkU0(T*_hLlEqS4bfpeHKEdfou@|%HACwAm;WK&?126Y(bfLfhPds_t@ z_w=?3fCNEXu=$bJ?nB0~!F>v>Jy=Ms4R6~+R$s!`($zywVIpTV8#JH}?p%UWA85-I zsOt(D>=(*%Z%UVjtheJ-p5N0^F@&QFu8$@d=Nn=K&!l zZ~?zVNQuiIRJnq>mf-cA{LuBB9A-@5m7Ji7TdZq1LFK{%5y)QD8PHQ)!H0Sx5+PE5 z646pzaG6)Mp89D_2-1}VAI1SmV8}@b9vz^I@mxTsxFXA9CNNOM(;|t1QW%FBQ$09~ zfF@FeKsJGs*y3j$;G2RV%|TG>1JqLh575I$QxItda=ig4fo%urL%KE(v|JN=`a&cx z@7o|v80qU3EPcHuHGS2;f~K$6kZvoe^g)~)r^F`kn+aSKE&w&!LGxaSO)Q|29K7!l zRN+8+HlPBG9dc?Mt1d%>B>b{3q&_=1e?o4B{2`=Z1s*VW6cwlfZv+4xBMqJr1J!|` z5l&E^g3g9*08d?jFXD^fW#nO-?s%9-q@IHtw1O5qLp%cY<8}9Ha?8GX{>F9TJNCpn86T zgd#h*=7esAU>BGLZVP~SOtFFLNz@%vkd+mnxoD&n3Xoj^+yV_;km(P!rW&}$f;1K& zRTZd_2-`T@0N#Dc3aZaQljb{w6gUJvf}H?f#0M%JKy5=XEtaLg4eeZjSL%TTL49mc zQ3x9LnZX9OAAM2@Y(J|4bmuN;xg%&a9=tycl!OF9O$GRD40z&--H`#5%=8&|2!S@% zfE))pd`W|833N%p0tqEvMQ&yX1$Nl%3~1Pa)v+El4#AK=u246KH* zR4fR>$`S?zP^tr!c+j#0T%ALT5`9FG-}n!8mYr3DX@>-;U!cJRDoa2~4@)$06ON{j zKY0|>kz)dMK$|0STuEdpaD#6P16`>EnpNjka8Lw|LjL~`x@@R{k%yTZ)WQRuy$8O2 zlo85hMXX-~_wX8#dUz7kPtD}!mS+WxG=R5lLFXty(|zD|Dxjn4#1Nf3SbqdEe2UV! zgRNWv<;(*jpk*rH{u7rO(+W`k336XCymJTY0D)>}P=^IvBwrAL?#=`0-yx(VV8(O+ zc9;~XWeCA` zEke8eKj0VWfZdO1Hh>xpphbWa!0SkmozDt!FnHh}G;sm$dVr=#L928?xmyU9yJvv9 zQjoDl=(sItVT?3rNj)e8!GnSG!3PB8LozLRb`~-Y2rBsCwLh`rfJnA{?&=h9l!mS= zp1}{QnZRRlp#0h)0WK2tK_@|g&gA0&)kvT_`&b}H%d>$p1FHs8j|5WL*dYNf8<{=` zPG9(zM?n}7aq&og+0 z#6S~T;GKU0Uxs8Nk)ul@S>4?!;3!A`M52%hy08Y6GEPZ=T6f^Qhce|2E;}HT1K9E& z@;P15Jcw&@0JOISz$yL8oE<01dLigAJznhY+}X2^wKT zTto-bJV7uEJp2r4b$sPgVprhy1$9cH%?`*Bs?d4P6W~c#SR6eA)zP43I-KAd70ffITK(i5m3K@(M=0Vs&Hz-#bW6!iQ2fEESiN`ZP9ph;X14JwSaU>6Y*n^q5S zaq~p5I{pB+s6mS~(bi~!SLm>tF|7ct+X2l)fM@~m76r)67-&ofRBnMbDSU(XXh2h2 zH~7Kh2lb$}F`)h)_`GlqGbYd#IiTHSAB3SrA85n{WCZBC9AqQF#W}^Te_hPxKQt!c zRjD*+s2JMv2d$AKruGE)*72Y1fYkEu2i2bVTmEXGmOpebgcGTkO-_Sfu^u#3%%~s& zT?f4fbbJ(eS{!2`3Ft^V@IsOX$U-Mb0s9IxZ3iy5K*Ms-ojjlkb!eoEWeNOdQv{XG zpq>S^NFh1_|;8*M;ChTs(kBG5$x;ITqb9s!N=F*)7^PbRz)gFX$@3+M2NGxALb-Hs&-+D`~dI-qGO(1bK4MfnsRmh2LU!Y1rgG=DcvSFjHprIL1 zSc4a#K!z1S=@hisSAz+(U=}pk=*TEw{1Y4i3t%Jau;uLxpp%poS$L{J<87$R7wV@Q zy7I`@Ll)I>XfS~m)q&g%8a4nen**1ppxYKf^BJI3r=W>?&;+muXknL*0vqTYKP3)F z7SQktX!ILAKnrm!XbS?!EO;1zBNG}1GXz1au0TWF3Y=!lOfUGR`!C^<2Wtrde3Bmb^yPdxG}pwt3dE+d#_#suEy2|6Vl`8ZzC5^ngd%Ai^myxkSN9u{$O z^$tN$e^@>n{{n6#8)o)&f>*VI+Y%q3=dS$_1g+qNEXhHP+``7^P?zLD_Dg|Vy`W=_ z*(JfFdY}e7i@+Op&@vLxv5pEX0&k{&`plzR4_ZA2S^9x~XdF1OVGHAt!W5M2z-z48 zA#1F`-7nDnQlJdaXvMGxcBTMid;oN+7@|-IWqHsv5tHM8&?xPVdOlFMaSA8MkOknW zKg3D;#gG{^$YqU4tJgqkK?@+k`vzIenBIUE1aN|f6Cn8svclknP?i8FrE)72BMoO) zp=NDX(9)#3=^IPAWEuIUH~!?5;6j``Io;+fk9<9-3On>YCdl$O(4tb%(ne4-3KYKjjPPT<(5?^!EsX`2BB070;sH<_j8otz2WaZ9 z9&Ldu=tNAYb6&u*CZu)-?aD(Q#s*hzplL2}e~?1}*2@Il;R$YSffmHFDR3w-3j6`* zMDUVWaAh__LXi_RG|A+^RPO-o@`F}4aR@+G+c|)aNW+!cKnw3dB?!38g5*^2T?U|0 zZODKSsL+RuVx#0?EGt);oTgv?#v@G5iUiQ6Hc;sg*{}iH`UhI|0$vUTIzmbt)XW4o zQ@|^f;Af6O4qF0w9^^;pnPZSZfCL1}8JdW-E#MWZkU>>Y0*4QYfd=0}$ph4)h4`KU zx`qQZ9ERjb><2|%e%>MAs6GA3cOIc^(2hm4btw?nLS5SM0&}^$pyC_wG{^=)#kV9c z8+ZdC4juNN!>`0)#sr-boxu+t_GkPs9W>V_jAgD3H0rNd4{5`I z3P4bO54+V5ydnU+at?Jx0C*)isO8Ba@Q+P#J`*JEf!nhj0)N3xc@_tS`Akj>LEv#9 zNGxC}B|)(O%8f3dQxHIH5eCru$UOoW>(W5QHKFV{{o)<+mIooFb2c+3jLgUf&WxZ| z3%1M%P2RE~MVibTm_b$Vg5S{HQ{be5E8l@H)PhWTfjjcxQ(cJ4cMEPc2{`g0W^rLB zLLlcs*zPHmOt=B0Q5H!f_}tWYuuS-#+PJ6NA8SCM-qs^o!qk$XY5!R9XP_lJKMjDde;O z8YKXa_25kl;4uQS(!#XXMzpkma2>2X#+nwEfiy~?r-cu&wD6JSwD19%7Cxe;1t!SO zBS;O6G!{IcDS)Nku^zOOnZt1cBWOSJCKl+W>7YguXoU)B=6ybsz&~&c0Ni~Cj}&eY z;RZJr1k9K~cV&TkIf^Whh8$>h z9g6};4`@L)+)nVqE$|`Wpbjc%5dw6$0PVQwNsK)8;Qlyt4>QtyIduC1#@ZNAI|_Ym z3=7ik1yH>V+Oh%ec7Rq8lQIr4IhFkC{X;c3p9Z{$>`mJNsQbIlaNOVYIvBzi}a>9%;uG6yf9sXg;#X? z2}WKq=%4@(r~wUH@eXQ0PZwa~6|V<%cEO!j(0Bn<3REgUPhti+0aV!|wr9f5WCqpM zGr;SSz(b*+lc7Mp4p3SKoudaC9)O?F42ph8UV|QW0%=`us0S}Y1ubp+4<921^+rJT zGDHcet_FAIpc?@`fR>Mem4Ht_f^~TRfMSaQH0TD})diYE0<~K{NSH%Lok6{0$l3?c zUQ&>2AluvPL5*SsMu7%yPy>GjXfGZ3ycy6{qTnl*AZ=sh=m!l)qaN=!89aIhX%DeO zPep>B!vi@=3vy=(XwemT6%M#n1R0kB6$p?kOu#vr(TV|nloq6A4$9N0eKt@9%jCEi zA-uv8-fT31wo+7Scw!)m}|EYOXM=wmD> zV=B|@Sa`)yk`c=I3$!N&>c4~YDC{->SQiYGj9!R?+HjzQb3u!%LA%<(2M{BY5v<<@ zofZV`l9qs;m;;^|W2^_CbO%Z_-~$h#N#y}(5)HP72+}bFwHeS-8hA%2JQ;zxg^ZDc8^H>Y_=MOB zURea%0S2n@KtcI~53)B+vz{4!v1Sx#e2NuR=P58bE~0TVXTJ}OI)V)N0TTq3#|I>`96ulnynu1Q20VcAHc0R=bAw0UC9)i6 zEI@O@1Z05@7zb=Z1B|yp0us3(C+vU;!kn-HSzrZ>12$j*jJH7oJaG!~#0{7z%!CWb z0w-V`un7lXybThdGP47GoYe`)upc-S;oCJ};R>pSc1V~r{{W4>gHBD9FlYWC0a|w; zW5uvQ60$CTf+Xl>BGCDjpmI_Ia{iebX^Fd7DbUk<>0r(&nkTTFP36tYj@CsSb zQESk&3_iAn8P-l&Ac2xenYk6B6j^vGVHf<&5Xo{}4XR|Up_NP&(pXjXAbWsac}qb720OBm z15{&GQD9mLhG~_samb5Kd=mBGlRiLgCeSKZP{|LkRuvGZ;=sfD{QRKq?JL0|&IYgk9h`E2ubMAr88gL4#?DIH>oa!L&eJkyC+PKtX{8)Yf)t zY-nhhz+uh|O4kZ(YoW)?fNBd+w*WL${2II?xJO*69;^dYs&t5hMl8U6JJ|Aj25^%F zd;$wN);Q6Y--9X-kcprO1Rd+A&)5S#>lCs^r$YiVat&Q53#wls?I>`&%ZWjtnNxw) zOP_HL=q&XH)BB=l+5?E_E5_C`> zbo>^4M?5?9?pDwVC7?YvpfMlN)D3KS`3>mu4v-@rfN#12JL3f(=spPW{&LWb1E4k0 z4dT=J_;{5VA51so;}vIW5TEYF$19f#ndZSV6abs%nFDHmbAdZ+i1ArSyn>?un=t6Q5`kBsk{i+h z1GiunfL*@hPa%!iCvQ9qrsgVQOfeTXfcLL;3Q zq)3zbM?F#sMLLoSyy^pQLIp2$B`cv`SllY$$PP`Y;8Pw!H)DcERS6{2n;?z6P>rC- z(qL-fSNsBw@*jMPU&%?R)3bi?@Juh(;uQon$3R!8oMwa0zk`O{q00?G<3S3bVFFOW z0B%su5C;ukwQ+;G3#^Wi^nj;ay91gO1PwI{VND;Pa!rwlf(@oT2N!Ihp*e_KVFRAn3bwl-jTi;nH&}Z4PELAZ0vBwY-=GECchG$! z8sH6qECR>a!7C|1J3&B=BM$IhE2J4$(BU1RAu&)l2&4eI@ekBJQsRUj?E^l;lND4R zAzwrb-hBf;odVPug72yn2BpDv&72IFu2hPnxqy7ojR}?e9;1EMjWXbz%5XvUNpEwF_PzD*p| zfCaf1c8Q1MH;_JVSmt6-FobwsU_10oXlT9y=>d=6fc)2du|vS!5hG*$0H>)Re2PC2 z2bm#S4A5Peh!z9*AhQjSjhjEf*@;8pC%Bye-dCo?23iU$u$EZ?a&}`UH?&v=FN2hr zZZF9z>IywV7IW+!+$RLB{(zkv1nRMX@1R85m&ytmd!Ig2l2-aYc-0Umr-64cmV;aB&_!OLO^Dzu1Zn-VXfUw|z#8RSKAVdZQt;bUm*I(*#B(7DtgR@W$;&OiG-f5>|mz zlZnTi`3I<@!l}W;;>f4~DhQQWq|KPX*GaKBzWUlOpa9~7R&7DAhk?{s9|W@;nFO9P zg8HU6_?5W9Tca7Q7(ita_^ecR#}>vcB_0J<$9k|1@Dah_rJJCE5P>f6pxGL6C2sI6 z`w#GK;L!WILAz!_9ZJZs{Rd$MZqPwg4xk+WMOcH0LD3PEwLzx>IohX zN<84o2y`GL=;U>9xPpca99f_-u|q_O8#KWN8m$2J`oLRgAu$2!0)sVx_L?h!BLXy^ z1C5A163{?`wErMiY$|crgJ(HFRRHJ^_YQHSnCKD5j0sS9C~%rFJpr9M#REDt6|!9x z)KV7$x%hz?s80z=H4{KqGJ_UEfr1Sbxg8Rq^a9@e$thq2it}*LdMMCsBcQdaps-{C zowSst#0^^Y!ksM;r@#-2;|6{uZdYDLDM+$V;#J^L;1n=W07oq-K6pVkKvD{i0;l7v zuhTu{d9~_ifEQXpjAetS7FbfA!Jowq+9nQOJjt#A&O!>@piOVQAc{pv0AwblsSom` zI4s?K5QC(fFJelNbOTN=5C?(Q=u5!UP(37FfC3GqMu}g6S3y9)K!G3K`4DZOIvM6x z10GOn0_$Q&SIrJ`n1F#JgQ6oOYaleSa3iHr$9jYW_)Iedmx&t@c92X3aVmJ0_=Qjw z&JgBS;09OFN-U0N7~Oc86j(s(C|E(s@eZE?s{)f56S(eWuws}YUa!E47`FlyLXM6) zS*A=3=HSat!80tNUNdMM6f$@P98oL5l{;)AC$YNzjO(qS<$Rl)23se*` zO##iUgL>B>6QP4w^{o1g;OPyhktl~pfY#E2QY>Un=!8HPbo5G}5j-2urr-!lu{Q)j zt}p?uhz3tQu?Rp$vCuD$`UTqS53YBRx<=skC@5AJ@K0ZNh)cX4Wic(dngb7{LHF=* zA9Q3cQDSjCv1*0@c&#(L0*m7j7>5(I!di(N*0O{gafP(U2Q*m$AI=6hcR{r*^p*|C z*hf8R%o~>1VHY-lW}p@2Gc@P!hf8VqtXG-%PZ<6Mwd zW`tIxOC>?uFrb%8f*TDh#6e9P@c1nFqG3>f7%9#{T@d1<9Hp}WiBeDvEy&2A1ib;2 zK$H@S(Rye&fnp2P2R3#1d?(uc+s z4{~Hp=kMbYpT0qrmtB#_#o*_+a*ODITE;9Stsw^;>*)InHfaxxnpAv_?9imC1o>2 z!3{$2UDMz;Bsd3x=4_#xqCnE1!U%Mun&XGL(*)qR8_j20?s$d)bgE)K`0y144v2g& zxPb@0tPM1>xdGa~2CD(>LV#Us2HHXe(E*tj{sEd#0L{sNfa~}m0iH=kYl;74P+$Y? z8-lmQ)fGCHqt6-?fLdtqnJVzCK?%xa5O}jJXo>*T!T@!xAZ>6^(*R{Mhz*>uL5t5t zKt(?2rf<;V&kx`mZ=ANqlnb!q_*q=RZCxC9NrNuvL7vO}P%igUwpUe0>_p^7bk(*(iiwaHi0@Oj=OJlfcG_n+aI9FLUJnF&>FkAS3B2Iz(LmENASuvAk)ZW$n#>&%O7jiDsRVlV z=wC)|g}=zfPd-X60r%L8!Api9MT!y|sHqQ{_Jd4wupyT*piTtHZQyglAb}ysd{D~|)XN9aEM`nSpiVKQK4Mg;2NeXMVFGY# zL!h4Gbp+bC208t04`{gkY&{M+~h$?|^%mFttSfCqA zG?^!e3VZ}llY%QLCeTV4P#FjcPUxMAteES?ra;$=v0E{KYcWvr1l6UGJIcUEzk`xX-2;34!XSB^09)873x8L0GHJY z5+J`#0H@d~i~^vw!}^R%KuZQe5rOao$TU_$et>4Isf^qTQ;{PF)h}$;jQlJ-h1`xd zic=XKZ4_B}vccnG&>cL^-IPDAgZdFJW!Uo?!gKMBw2H@NQ8t;NE|AeIpaOwpc zqEQdo{0T}9kfu7QZ40WdCxYV`u@4kJ20Nc=Imq9baSSelX0U<|lx9-kb7T~l1TH1# zuqv^D8y~2ZI7qby(~NpnMHZe+kYN+xIvJokr(o9!$}bvB6F?d>KpH#X8W{y9gRShr zt`QWM8cZD^jp-nb4XjxT;0?%pjto$pE!cJDKn9dRI@3Tpf3P5YIR$Lx9~P{>%m!=x z!J^2*lM2%K0j`lj0Iac|@e6jHSzw(XKsr-EI$ywbG73xuTlogNMo?gCFuee2Oa^Iu z0N2O>)%XNSqqx9*W(5WXa3%$XrUugkkj^AnvIboy69`^N0?w?EU1QKP5R_;@1r8{4 zg7zkZYXneY-5>~RVS(BSpf=JBe$Wmb7zXvU`KP}(;x(=Z=?5n|fwP>7Y>@R-pv($R znc#D*!Ml}}SQJG`NI;H>dX$&|m4uMY4Qge8%RcbR zAkcPOP%|GC6$`|{H;aIW5kZ~-jUs?NKMU;lEuf}4s9guTQU%ls1TCn7gc7)B5ts$u zCiDST(yO2s^$93m1*f7EaNPxIcYwyEnM$&hK(la)Yzk_Q3pPO37cwy@s5x?EIj-0+ zO#nKUVc;l{<-`D5v|-=~8mxh^IACHBDOON|dH}k%lo7hFX*HuY<1f(IPy>@S;}4Jw zlNr+lQBbp-NrMTrECnp4!K7lwv<1r4F=N^zs$d0jzz&21tQl01|?v7;vJkR{(8jS71}%6X*jUdNd=aAK1hIF2X29Z=8hoNPY{JJZAVWHahP@{fm@-B zeA73~;T3g<)sLXoBB(I~YAu0MCAd$Cymk+COC>a2&H(Qy{J{q*Kpa8g@Plu5H&4OE!qW18;)}B=A62-GPqI1&u?(rwCw| zx`6`k4`{TP1Kj99#1N#kh6f&~0|)Q%fE)M;2s+=H!;wLe z-;qI)SDC?4a=~GSY$Y}o1_d_oA{_9LO@soQ!u;tUta#NJ`KF6o^BOb$obGMSYr?T) zZ;OBQ71$g>%PiO&K`YTX1zNeMpSI?06rIQpu8&SI zW+`whusPmf$a1{CV|tGN!O?HS)pzh}d%t}JZE^Z;bY1I8=`P64R47f9OP z?3=D(3)Yru%WKc{hI9Hx2VRlsyKH%b1$h+MG?=z9DRO}54NQvs)8*`VRTX$ZiAs?d zBzS>IQCxw|@dXpY9Upq8C)@EFvGOYLI@(O1Wyh-)0XnyWU4a+0B2=6kq-_VYq9jwj z0*j-9KZ^tCnmb;{GYpPr859Jic^J8&Y;i|M1(uRRX+?x2x1yi|n}Vbn(-~$3HU-}4 z685|SO30?OD)1=qXMwDk!2(M5JfI7*d8T*S^Oi93PycVv8_EKjCYT=Kz#Ggsf4W0G zuNbc=Cuq%PC>t$Eq2L%#D`3`nPeg!sA9=rp(Ws}8>=>~}A zoxb0RS6&SivEZA(tr*^bXh`^h&LosJW4Zzpna<+O8_daX#4z?fAR!>TnGCpfgkTARv_f(% zG{9d7Ao+a__|)qUjG#b!BcQ~=0y_E~ys{pw5p>@Ss9a`qe8G_AIK9_~msjnDfWQ@Q zkoq42U=OfEn)QO9gWMQ3m|{T5{)51DLl<5#4nfd10nkw=jh?*f^`JBH*&I8-Wx#KC zkh%`g0V{0a6~>(4au5_s4T7M8noXZ^fuIr>*g;&NctTPJn1GGo~Yg3T%$@Rtyb7pzH3f818`h%w|kCK=PoIK|nN{;}OuQ zHCI4#GlU>X^@3nlJ?MgaHhso9LTKT}=BQxBa6w3c4HP6ZKu1u5*gR%TOF%TJ00!Ci z0BX_#A@EKSHhsn?7zTk1cpwBmiG|JagAl0u$OfAI1=U0BUZBAJAY{(Oz$1W>Dt-td zQpN2RO`wZQA*tez5OS&jYitmn-e|)ssrEw%ni4vMk$oh9oDv#@At^xsmJ-f*^QzZR z5C$b>!;OlC|sgcaC8jgt!?n$7WqaF!X<6_6ZgToROi zKnK0NWCo=+(Dn~->H%jvkWJvY6oJO2h#Aug5S!bKX$go1#ia-+CB6`WxO#yIEHdAS zpoJkgHM|fpWBLGc-VYH-WPX5Ln$8Z2%O4`3xa7x(%LY+ITuwaOEa1oi8n$B7XKWFL z#3io+IE#aIc8G!_ldC}#8krNOFYx7+V(JhDM;s?VG~zg?AN1uFnf~6FSEwGezZI5| zpMl*uLlhi+py>oq0D&Td7aAEWM3LhTWTytx0#R_>$y+fz5QT*O9x!djv_ljU_8UMn zhZ)lv5Y6VeMl=iDv0`)lA(|xssklU8LHB3+1V3KU`VC@`Q2!xn#?%2~bD1%F{i~Y<99f~U@kI<08<5BVoA^Tv92wjn z#GsMUAU?gEbN-T;T%nk}1patXLrX`O#^9qpp0&w%|1;7??fRmyIlK`Z>23kbN z=D0x|qztsZl}&>Q+^pjfhy&TYLmasQ0?tkhRt#rACNo$u91&Mw11$|XAg;g$UJ+7Y z&U^zT25KLQEAg*2XTBf~iogxxih}hDY>p4aArXE-JWJp;IKMs-R}u#2S7A_T53&HH z{(-oXFnEzExNKhlTE@#@#qbAYG-yB+L<^cReF4#+dzwMc0rhM^0SMZ}4k-%uNGO4} z4znu=f{m`{hNQ+F5@t*XKqj65hy4Mt*FpQE*+82wLCru;kk8Mc>ISz@Pe_0onvNGF zK<$GL(2fy)So=T%Bb;Qd7$!(Uth^v$#`LEiWT&MW(+?00+MFh#z@{K*#`FNhUmyty zmhQW-fM@oUs33M6J z`~42)Of6E7g1bRVfz5G(6vPh=Qdt6Tz>zye3MoU?^MG^I42UWXrU_C?90ISoKw&*c z3b}d!Dbrw@A*INzz{>(kHT7mpYoHdb0MRUK&6yW~Y}^1b%W;7eEIDnFLND7up|(Lv ziOZ~>X$MG;ycNR-5Y1@Ca0YCc8Pf?6&1=SV1VnS0F&zNOcSu8gb3h6fWB5 z%33izkcOnE1=40rXFzOFLkMIygB8O85FZr#ApQ@KiJ-oOG|cgTr0da5)L{A{&FWas z4em0^Sut#oft2(uAe!5ZsX+!*M9V;ep+N>_(is_KgCLQ0LI!Ne1*l?BZNm?(ZEnaw z)4+7gP+s2p39=ANUVtoN(_p$FW5)CZ#1=4PdH}L;g)GFv2XG75$f8>aiqjRcW=tDo z71$hi$U^Fb4YG(jVTUZJPT&QlS(KvxfQ~G<_yhNhcWrJKaO8rP{YPY>&IDJ)V4WwX ze+=c7QavCGE&4B@xPuqDQFUUvK^U(b=Z+Xq?XECF(l2Ga{!Sc~ZoC=|KOn0|l)K;DXB zgB+yN?g7#DOlC|Sa*$wd0MTrYKV-Aam|8%RC*&Z(+aL#zPdVh^1=$2@F*V3TlG+J5 zGo~9LL%7YDu7GF(Go}k5gJ#G>47va}XpTI@pn6FE17s2?qGrgOF)fe>brIwt5w$=b z5m77TK@r6Ril`0ppr~O1ZLkAZ+<0B*c?BgSyHdT^hZHan8^X8uR{^6W``muk%5Bm1_O9} zqeT%?uRAIWdGsk`f1Ki?kBR zxCu&%pf11xB}h8lpaiP}jwmTff}0eQAP<8q04YD9q$Fv^bOK}zsJo~PvF;AYXwW%> zAX?ar=?aLJG-J8|Qa3{xV)O+ia2V8sx{Auk%@vs0GnCDk7Jy7#p$zi$0+6R2Pk_k< z%7{8(4Tfe7rWMLyUu*zb1*rl!piPP$%4p$K&jzXkJ}5&n<_2XmraK@zEzOv2fM^CQ zh6^A$VKb%!Abx`i#PtW1!LA4OHdN3|WK$3~W9m_X7}%g<#?+w#X<8fr(V$asKs1vX z(+rTBdeBrLh{5LAp^|0BGzBDi18U0z6__n|P;EgpEpDhFH7({dIWg8K&S&Cb;%49m zrQdoprZ*tNSxB+(fzqvJW&!rwTIof-1QDcmcKdhAO0~q{;k36*{>x zy&!^DM2yYx14s!ZMQ}k=#Efq8iXofnpG+u#j2`6avHFykbfw~fl zz(!^z9`H&ra1I9#5%It}#3(Y;`{O`eP>5ULp3-32p$@7<9Y3f;f(BIQ3P9`J9~kb_ zVEUjA>S8&bWpF&hpuq3QK!A$bLj{B7!*)VK-=;@CP(}8W^(#mau{H&L2>S!UGiKCs2U?&@^Le z(3&on%PS)a5_4?O0&O_s6gb2Nt{EpxKbOcWTi>AttzTzoDX=*%&;k`DGeD#UQ;!z3 zt*}B1IqpG*X)rC&Qshz)cVsSfWO7rKg?0FsXeqHM@-jOpfF?QE91TFGb!cTN@e7;+ zUAE4q!DOJokp0LV3tJ3yoclLp9ZI*oIw1jS;(;pfJ0L?DtQf9Yx>6&UIoVwT1s+eOg}&dg9>wPNa@z2t-z)Lx?CPa z%a}2>fM_{0rUsDw32lgB4ceg0G<|O}uaYjvt`pj3Ocy{3ZfJwT=>o`yjz7TU1#LuO zen)#+F|R~D$WIzfH?+a-djK*LQV7F(`Y*KchK8&a!vY;hW__S-#?%8k=ibtcsRKka zSTQt!I=;E}RRe?)^2iyprp$lqh zfVTT8awtIN`V@FUV;Q{2&5bp>kO7z#AR0WTpsT

bL=F=K@_=n%ts0y&;`fW_m&z zFN@m-T~I4y2gnPcp#%^O8cG10Va9X3m;3w#TEAtgl zD`)7z8W=0|pbZQqp6LfOc*W|$btkBhaohk>3`ym%@rNCH;Hnxm&m(lmS}}Y8S<9xu zv_a2|=?;huis>65e}EdIAOVo~LHq`NsQ2~Y-q%;+nBJ7mE2Gz-53XK2^g)#oX!hs@ zs2k%5+U&~)DoMeU)6n)uk3J|GqhCV3h z8bBqBKr5(aDQCrSK_6WD+A%EvG1xSiX6QTCgSwM*^rg+1mgs}#szJtqie`P7E8jru zQ-7fk?s9$rxzQ0eCl9(7g-w(BgFbi)|A)Q;#CEM8AY(ui>KqDejtvH&f)>&-U45`Y zz>yzXKeiYsaZj($oj{vB zE5HWJ$uBSfCz}mWRiIXpAXLc?m|L?zv26fqJt^>-F`Y3`U{l~RV>$t%`OTP)fM`&Z zgKPt3azkh)2k}AeUywYgX9c3!91j?P@;FFth9Sgd7YtwlKgST{TNz7G%jbe2q%fUf zXvVYx#0JgWfN0Rn4akZIQ0)s0VcMTSw1ab(+yg^1rWYV}9}Gdk%;xyQ5YgWGFx@7b zS2Exa$UG)9rXL`63|0&+MxX!#&x%4v7&?p)5!v;k0WlfZV}#rT-~uJL2}YoHj-*beCm*KNSCMVFz$*pU;oRf$c5>5h>isAo387?jOH zO%gT@rYlC!Ce#dLASv~Mt0;02BAfZ7EbObcKq!b-q2vOP+-&GWU_%YxGqe0%j1=g<~L)y zVFH>o0V@TK^xZJY5;y|xi@h*WlH=v%Ml`d2n1HgQ1`~^t+*)(y4<-trEu`SGwZRmU zzdo42>a!Np=?C(773v#Im6XhwI!vLx77z{El548KrXXj=Gy_EQm@!QO(Mo1a6F~Ae zpoUE_h2^$8rnqVfSeN65sTtD)kmeVrpeTC*^08xwX_hz0Z>ESE?hRg(G?-qPg8lOW z8OM4zB_5V~$3MrX39v%Sw;LeyWI!`rW}wKrV3x%VNg^lA6hY&q z;$}<_Kvuml1LX`*f_Pw-r62=c9Pz;n)aWz@IUF<=^TQ03)Lxi@OmJ*42Te19m!YsZ z{s6gD;2ks6#~tR-C^R=?nq#iOrXXX+GzCP9n=wrQ(Q;-?J?4;zy#S&atr*sTXjU_( z6(D&*Go}R~KBQyIBW=bs10>Am*khh$#lLOn>UlA$j_TId~Z20z;MphrnU*`m_LW@9P9u zWFmN|)kBd-0X|L2rogKp4$jZ~pt&21ECn7%fh;9i=%P|K#|dB|jw~e^aJ~jj+wy=} z9To~8Rf@6-Jeu{8!!p?%S6E~zz!f@fu*g!71qA}U*JS~2f0#2buux!gbjViVR^W$} z0|zWXe%%2gH6SCqE|5atgaxjKF1XFcV8w99Lcs$(W_kr=I;h)afhZ4NfW+7|n2uOL zdR?FbhUtI>s3>&&U;#<>4=i9M!k6j)i+B~?K3FKpm@)kTxe?UqvV@d_J(kc;7l@WM zV`>4>GGBSSykPmD~RFsnEhK&NNX_lSUA?Zp=SCMAS2mKnU+|XGamq1bHFN#8`MhFVA^2? z_QeG&Sa$EQ$Wq{kX7?LVyRcbh4+^mhRv=Rx7l30(Sl}BQs3g#3m|+1bOEj1kSb*!k z2Ov*Cx@0oY0^x<#v|?U``Uh5KOn*R*<2Pga0-{yTm_C4LSu>_LAcunD)mnkgk=2Z; z!y3|YXaMmUtr&VhG-SP?oFikQv>DS6kWMzoH&&q8BarL~Ye?udSi@u68WMUCAJr=$ zjSg@lii8u^pxnUYD3YbfqW~=#&?lD{AUC)+9AE%fn~tDG8uyx75G}4P4#+JoP!;I7 z1Ed(dDsH-F8LyD)1_x-%>wp7tnZb$N^4c*yp$t6iRA0ucUVj2&0-GbE-Es!Q9#H(8 za4=)K0Ggb+0nq^}&yl+<7eKHGmXfzgcVhXZ7q?*oYDFk^ZHqS+kZ9AL;YWBLM8 zwBsNHB+5Pmi5```%4LpvPJm`;G$pnfcf7BFKv0J6B_5Y(jOP* zI8A?0&MQ*i;bg`%!3k89f)s&PN=`V$fM{vYZ~~1ia)8!Df+GsFd{N*TCuo9(9h)Gi zLU3FFG72;awSh59feRWyE1ZxW4_2pO#gO{|WcNwXMN&=>Uo3DkW7+~@3z#u&07?FU zn+9?iWQoWh48uT?{KLtNslgevk_R?a(r_5bFCEU)A5`#)nuFKmvVfZW^=3?Wpqc3g zhz1QNTma>l2hjX-!5UW5KCuS%Z6u~&sO4qx23hvN8q^Yg0a6W$U>j%zgN-p``T?T3 z%$UA_XwXJfkh%>v&?trH!YwukQ^8|i8*I#&c7V(}U;`?7e;9(u7uFzhhYh0SJ%XeU zJhX7d26<@Vj18oOIAH^F=>Z#Xy1fCl{e%r{XyJwpwxI=Z$UFckh7{(op@kPVXpIPX z8v?8YRG8aBqVR!@8B-644H{bLu!Yn{4IlwfNPv8|!xkD6Ht@7=YF>HC1GeDO_XNmP_{hQuTgb?QQXC7nW4$7ef-qz> zmc;?Q>g|A0mcVp&B@RagQ0L_+pA@LkqM!(B(K*UlF*MkL+T~_US3u4N`Qrk}A2aMA z?!I6PbN3uOcnq?s&#(ixbQjoVDKMgsI4rOOk2tKbgN!&Rfm4#E;|h?8uyKbCc8J0g zW!zzl-E`SHURkvrAd?lKg%|3W!v;H8;R$gO9}8-+dA^2Mi{%Y`xWS-~ms3kY5;59v z!Vc7|2e}5cdc-aZ99+=6`vPJl*vVm_aoiiA(BU;>x&oqk%$P2KXf_R|2XNPO)Et)P=OW4Z&9`T(^9G*-a@wcrQB0yeh~cHs01s`2b0xvB?5gK9o|1vUi* zGo}U*&E|N<4phH^-r~y1h1Tx04!-@}-j-lfo6ZjAbY5CqJ(3!cAsk|wC$kh~B=L|l`xW@!O zXaZWmhvrDc=+z9q>3WI0VxZfLK>N$E%*TL?RNw@Ud~m@=K0v3at>6Q#BtdrL1|&DO z{zsZ_+`@;vga%}w;|@^3AkzKxJ{w*hjSVmdAHeP49eheG)8{qtO0s}PX{Il1;1#K7 zQ-HV(JjiweQuYW^;T1QsM~Gz~*=Yq)p&9c*)!o45b=O z5BNa699@Pryr6b~F2h=0&>&nr_@FIN_4otS@6u&h1+(r0AM)lGu*X5CA+UipusMR1 zb3(2AgP~M|X#+2);M8FH!3Qc0bs1Lkf>%9)?$d+R-4l4Tl!O)Pg&kRnpzYcYe&l5l zAREEwB7@dvOyGxPDNr_LgJ#ni{7T~BbO%f8Y#K}x_(2^u1z|I$J)j;LB;yN8n=$PG z?RMccW7-0u`OTO%fM^agrusDqb==ZsOe;WKNOzVCG^h(w#$v{_03-|=sRz-Zk$Mmf zS~mxh=P_eC0HWC(7w~79F&zQPbqGKle1IQT2K1ops{r{9G%{I#Kmbxrb%1tef!N|^ zOjAI#fEm*S0nlC%s7Vt9U?yF`FbPyHTo3@?cgyDZKmb%%u{qulK(w452$(a07hf(o z%#fwR0NPJ_1-yG0bSNf#w=p;agSMS%FkJv`ACBTyV0AnJ-+0U@FbBNx_Y7Dk=r#k` zr43+>piQG1Oea7ZBS9Juz%?>JH6Fp#=mu)Mfwq2XFdYEti~#A}0n;fk7i{4kOno42 zpv{jOOgli@rW<_b1l^Wx*TO5o`9n~F6|^JZ!}RzTUh{hJdHdj`g-HM5Qvz?51}AP#$9hn{a;$d;pHl$d z`=`JLH3)RmK|Sb%OAV$O{GhG5te{m>DCdNTTQPudZD2QJ0-eC)C}G6_KF$KvI02oB z&jRW;f{qiKA({o6wqZ~J?Jj1XAPSncV|A2-9tyz$+P4e}JES8X1VP0vR}kpL(a5{Xxxl^wWfAe|f=s+3)4>;!LxKUc za~mG!bD-OD!9f8&O$NN#haDPJpfdo#2g!&*0|a&qhB!1pU}u;}Kz$5a2?5$S4cV9p z+tCWSWY0y=vEH%X5qum!l5ZiQ;K%^=GkpK15*v8D9DdnE7}(F#yE}MQK(`mK;gygA zrzwyVK%3kpp_^(Uz7A~X)qy556c6`+P7?tiDAWNy70wZo$Y36Z?6`HThg>YwA)&~v zzzIu580n1-l8AorDT2?7gaj`vxz6ENss|rX1xj4t#6)b^p(Yj%1yCmnbjTIxG%U1~ zf);j&i#otzCszm$yijo9!Ab@;P)3c!m0&J)@G8|qJc|-^f1sChfO8lqtw21?2~8^~ zNr@A(DOUk>9uOpdl9rS}oBeU6B~H*O2pd3&iQJ?Vf*Bg%B5nh$h;vNQ5tt7y<+fli zFc_9#p*$&890lAk_|Yc&EQvJ6KDh<00cWd27GD^ zhXR{GKUf%kYK%DYg_oe?eKnXq2!aYN$nlb(8^u5;1%Niwu_8{R0EPboaY)hV0N(Zu z+QbjK&<1k275s1s^kORz6jZ3iRwy_XLe7>@0-couK0pt0>;md3)Ud-0*q}uXxNwCW zUxaje3FH8V1>oWnr#k}B-NDSl6EgiEBd@F?I5w16bQxF`xfNJ+8CVoK!L=0FSr%3d zFQ!NK@G93o0NuC%n)(z{V0ARJV)y{%zX0*gtr$9>x85{B=?9=o5iPA4et=Hl1(jGJ zzLgcj575b4AU^1Hst1sLXYj)L3FLM?R?t1sP%lDH3l@DKqQoMwkQ=nU6@)=ehZgba z0=>KvLNA1~l$aDiZSRZB;IpU~O}Fahm9B>!ln*-jZ-EHt9MuOxpiy$r@E!P|g&RWV z%%JlHz(-lIf=_Q|R$v4jkk1O*MWoBX06xbKbSzy5D5ZjvCggr&(6xb}8*CgIL1z>} zjyGch?Srt@Ev5Mx>(KAr0%r-JYbaqy;NCI$sY$U%}D#HT;& z<5f^>0G-<0Ad;m3yNHq*GgDqzNJpYX326 zFzG0;I5L%h0v=Xwf@&mCO$pkl1ThA5aSW)$2PKLZLRkXe!DlakFLVScL>x;FIv$u6 zbc2B7&R0zWj=Ip>ZB|Ht>K?GS7Dy-xC~!D3XDPC9JK8IP&V1u^6ev{U1>K_zzT`rI z5wxfX!&%@PAwjl*PTK`tqcdmGGyzZ;gU_&T5Qe#G4(Nmjkh%-}ARmHQphLauLGG7u zJbkZ8z)=J0tQ(-l29p8{Xx@kgRN81VUjQAYX3h*gfEaWh1UupLAixLhv1))WF5_~8 z9AykDE+B^kv%`*)t7kj_aTqj2L1&qRPd@~uxreYzSa_L0J!a6JC;nz7&<)d|o2{yr zD~fP4)hlp1DnQ~6bRjOp7|<3VlsI%`7I@4IO2HkVyvhvsn{GWrmk8)!15QT<1s+HH zLPs_?X-CjeY{)4`$W59Xe1HUxBk15zPR9cb5E~)2FDNWQI~qX=26TRl29pjb-8deY zGflwp3n*kY1+IgynFHV3$OJkk7+i78;Lq}f78oBuH_3n_ILna*p?lLyaJ)ctgReg{#11 zHU(Z+1rbm@*}D~iPV3ZVXpvN6fe3&l6hIuX9K>11ypFZt zf_v@ji6#L@ZA8HZuCpgdD6%O)PpsVlp1cGFD)dN8L{S7j(sF?~_(;p?ACB>e)blWb zBMaXf&6wVRj*a0mV|u}-0J#?);(yR>d902uFZi+qmVsMJ;A6X4K_$cv0dOe{IxH5H zR6unWJ7~B^0h)jm*g;8wQ2}y4c7yozdsBGD>Knwf1mL$L@iKyIFwo5wkW<8vV^-Lm z7gWK4vIi(imqTL#dYcj`J{-}M!>*_WDPI9n4mu7DbleK~Kq1i9Izdp26O!IR)5Rr4 zW=wlPJyuRf_7cYimO^jCLzz@1Eo4cgQv7xD^ z*o+BuhomFuf>zM!=AiS^-+;>r@U5)i6GkOLx$>2tQbIU0QI>yfci(E4fT+y1GN+phYVp}Hw|j6fV6!O z1GOzc7}U%X16>F7j2YxhVNi|7>i7eEmhKKglrmt0Aml7v4+R$R@wwnmKT4qhIkO4e zLj;}O54i{kmNc-G5+7#ph}MI4yR$kX&VXihoFNKo!~B4plMXvn8+08t=$>a#BL&oR z2X#XvvOveAW1O7}zM~B3K-w#yOLSO4cX)u#&IS2cILlE4ba|B~^9E*6F_dtVM+AHp z`V2%@4AcjLpQ8;vE*E^-5n0FOzCF_nx}`T4Txqm;k~ie%7H=Ad4WL>Ag9%GgrEhnc)Y zqy%ti4VxGA_nLa~YTl+egz#w4M@qR@mTC4k0GSRFx) zA{K$y;D!iz`~=*#0}a!eF@djXg|ueCCoMxuw;!Mh3#8N5nwhv2nwh{ucK`o_4qp!h z9Rtasz~VUF{V)&T^oH5I^7ZhJBe<{wce@wByWI=|E5WtU671b>kZKL41uTjzJU*a; za|V*eRbY*CuxkW$Sv8nuuqd+B^LT@FPJruVgjzWTyG~FiQ-f&&$VxAe#ttNn5G#AI zYXp_-8cZD^jh@_~rW=DI3+SdI8&@77$gz78_2BV6*r~>#k{;Ao0v(J8I)e`q-k{-5 zP*tzb2phNocj>@|!Vf;^=re*xkw8Kp1hf1=OMkQ19HD0(hT&tJqVI(5qx?#=u#I>Go};Ze&iR>=~AF;zy!gqMNk_EbayIfncoa9M+QZ91r|ka zW(Ng!1upO?K6o<>s8Q+&J}??`y&;DJyFep5sNVvvdqK@jNM{g|6hVe^2&@BV@ExFF z0x5tbH;6Kbg4^Jx8Mq+?8g2!}od~G?%?(=U$gRK*>aeo&Fmi*^iM$m<{Q=MrH)t0i z=(JGKeZnA`1$ulbCv^M_l#4*tffDaaum#{_*+HiHXZ&9yr~ zXXk^g1$8+==?m281-Vh55q#MlND4Y~;mD%ES`WGfT>+F=JH$b|>cA|dT#J6&T`D+? zK+RI{ZKs@OOgDr;1pwrtCs3Xdf)qR43aLmHi3cdyP-_k!aG^Tg%#}w1Qk;tNDp)aq zGdsA20y@52K3Nf(-$6p4LRHC%0UT!>-~%*S!8eyNg6EVV4P@}GWZ(gR4W(c6r}eNpPLM!pSa(R6GcQ2C02nl0 z0y!y}16yP1`eI&r74X5S;Ib8Rs|={U3%X4P;$}_e51@7}yn7A4Q4W$ZKv@~yuH7Lq zUH&tVWId>B1sX(w57B`q#Xuc%toA(Xg3FpCq5Ig^S4 zi_>(uiQMd1o6X5-4=!hbuLjtaDz-J(Xb|!#&psczK3q(O> z8K}2_>;qV_>1Y5Nh5(g-;E4$_OXf|bm`1FrUd8PG1BJ&F<$wE7;AH+cBf=hhR6uSVZSqvW21m$Z`$;A%S1RBqT zX%YulYoKZZbk{kk8w%l1rE^N9Ei(^pmF&@0#vhrO2>9^?*x1w1}I22fF&T225L-!y8Ysgi~?J@ z@m?b82CGb_A6&(&2n!386Wl=s0m#{~MGdg?*g=yjpfOxf3~i8rgayQYP%+Me=6R5t z5V9N)m$AYMA<%AK#N`c=;M*HqVNOFF{_hE%T7$YWmpE6VS`3*?+X`t#!%mDxL=xoQ z2p3G7J&^7Ok-&DsJ)+qRjuKF_8GacRsMJzo12>$(F$}spkpq!dK*7Z>uni@xKmr|9 z5KRyT#W?78K1hsjkx=plFAD*uF+`9OW3Db?>R0=j{x_}3l1Z-|qmw`=zMW2yH;4M3-M$>2f0-aF>CCLvGkm+1-B?-+T z(7rm(v*8^vtp=w;Pz*7G?)g2@GfhB&aXRxR9*HdQyejPS02WOq1_jXIwk8vUz;>8zdYV zpfd*w%bB)WFl~?kO?HBYDOeSjGg&fkkZ@#DWP`MxLCcYsGYKpJ-IT}b_<{Ig`MAh{6U&@RTESJxpZO^^h2$gLPach-XP4yX|TO85dVp*I%8C;l0%8Novc ztfovK1i|e}@G1JBvf6Qhg*bSP8psk@*BL$2+hYd2GdBY`+r!eE1`~rK8z}5mK0uN3n3LKCvGoY4kT$Vs52dGB~Pf(z~ASg|PdSajs3DRvM=>D?B^cOgp zgVQwFj_C)PxI}Hi$xD+Nb|nCM9s!+*4=EjOFb#CTU4}qhiYP;%r6VXeffpdm;0G0N z;31p^;?P18lznILE3tzwghDFeKzD{Ha0n~{jUli=W;Z4zEv$ZEqHEWCrayX4Pi|#SM5g0$jL&M|@!AEx0^YU=di%32N|yM;}1L zd+5PyjS(7LJoe!37=t3q^aam&#l0aR3%xcQJn{nC2n=Z-L6*U2Fr|P3LxU*+oUlMS zm_-3JUVN8Rfn9+^;14TkQdgm4`E-H9(jxMpqyXx?I-X|C=3`=F1g*FP#SaT4;HE2W z=GA3%njWwjwABS%^sqZ}z%l^no;Kub+FF@-SRw93yFe7QJcrGS0lXp&lrRNBC#kVO zI!d76f^-U6Aos>{DR4p*LGO*-AOSjK1U!o#2E8{HbQLmqzy>@Y4NBJF zF$=_uEvOp2<@JL$@y6sg>AU7B?zr}>^&ODenC}cKBWWi%C%kct8EBro4 z@N5Q3Y%xtQc)}x64@!05Vh>UpXfRD+RFF_$5m>+iDg?j-RN&#_EzqII4WP5?DU4<& z(7}$&l|T_)&*TVNKLRfJKzsBdcLGCm1bFI<;#>%zJ-f>oahUT5&Rf8eE{OaUcscKz9n8NPq%fzyQ=Rgs$(vsUK6j zh%Ts|NV+~TpH}1v4jYi|pgCY2M^F+abd|9kq=r*uSI_{BvvGrOGByTR%%FR}Sd>IS zE7+9SAPb5>8&@1TKqD-QYzm+S!{Fg6P=kqmdZU1xaJ{336~hAXS_(5J@G2NhD+ciT zNyr#3tD_cl{Uj&&G6hygZRq+*(1eMw0;{8r6~hbAx=Bv(+3zBt#cZIPcR>3v8XA}i zL5&;GW?}_p1#Sfnkkd4n7z9?ZDu{rF?>Tfq^?ZaPc+Me))scZ4vkK^z~u_s z(Z*2dS_Dexx(qu+r@!CLD_uW995f)c0Ci=<3_)||4siw00$oUr^+5up3bY)F1+t_C zWpxv{*#N$#6{#HH2G6X3#v4HuBX}8amKoClP#t_gB+F3?G~TGdY6ZU0bOC(0(ZP{X zksFi}KrI_{<{O~e3p7p#9s)v?&J!e*xTo(A;u4>JYY(p=(wN>1@TLO}N5(8A7Et&g zg_9D80-xiWo@oM(mq8246a;pH2b6w@DRJ|%f>!8cf!bQ&MjdFP!JPSmh~tA5Ge9eR zK(|bvXK;hB#R1vE>UaUXGV&kW@_Hp6fro5L+>lb8)p0}5G>|TEdtKlKn*t9=CoAZj zp&KCY-w??Ht(SwUVslg|1dVcnCm5ikIUw^95e##08$0MMbFeG*8JF-YaXap3bYpRF zd{7T|B{=2tf~GxwK;o1ilu|*HJK!GG4A9Nb;z$;(0u9*zV_S~mc+hwssPh1EHK<78 zF=KiFa`gj|EKu8-(TafuvIw&w|)HCTQ@xs$1XmK5AshI+!BVxNBt3G3k zI5?oe)l!4F8Pf$&f`?9%KqfU5K!$)0oCe1nj{;~J)&=1#$9JEa1RNEi@wGw%Yz&72 z4>%-1@l_9=K{02ZA?~;X6!bix28kmi!dMkRtCD7jX9={kJIWVYF`NLcTRtHKiUQD4 z6^;#zpk;HgJ|Spi1+@)nd%|VfUcgs0AE`Qn#=?_LkW};7_*dk z!Jz`;Ko-?1fz$J%X#$Qb9ybX%DnY~N20t`>coE?PU3=Of0j{-|LxM&MG_?!P*A1Zc zfdae0*Q*A3_fM92&t$c^yGhq>iE7c{McR)W^+ zGv48YBv)QWDFtp&A_WyyH~2v2b2~C|TQR&5X96cF@Iq9OkxKkhoEZmN}K&96n&{z|fv?G{i z)L@FxWe5Y^ET+J&P%nUR)k&zUP6#X1vpSv-&f->NhXgS!#e-&tAt42xY}H`W!JE_$ zfGpqe4k@WE5CMl9q<9grV`>mlfQ2DwSt2Om?3q1HK%t(^vEcwip(`%~DD{B4NT}8# zLIY|%JTx4SfIQ2p&$vb$ntm8S>4yotq?&1oxE)gmsBG#G$>Ii|Kn^;`prL^U6z-r3 z9cnP>fE!T0Va!tG;8p-#{sh^SrL4fAz?CI15j5!pS>?L}bs} zQ-M!`MS%yD9>qb6YC#*784DHIK#L?9%M{ogISL(_%M`dA8Qeh=$Rb$^0tzDa;Ct)@ z3LRO>1onW3m`@16rYE>TX-lyUa$P29I{~D^WiVp`&%uIvh*;~-7uOmD9JQbmXg~Ns zRTp@-Dtv7%qN~Ls=Ewo6zM%^UK?9PYQuD*~#2-AOph-sIdTwroHc&H)8(f9C@>+uY z44+E|-+2WdFKR$q*|Qg1UbJ9e*#lCo!PEd+*<%he>jyJvq!V~}zNr8t0K?6FVIcfpu))?^4sp*cdcqOJAe&iOb2akY*w)TK$ zNfAkHf+%P|5uq!?LCqXC1@IIJWWO17lxG93838kp-{9fIVh!4DW(5-a0Q0ZFK5#I7 zLGmrqUAZ8&8cZKRo;4Y8&pI*)ECpYgi_^O%Ag{r_TMyoLYXK5_0rRdSqriT!hu>iL zFi5oq(+iM?jX)Y7z%?>JH9oz&M8bTZG(^nklg^u}yr>DWo>p^W+&^i;) z_Fx83st2_mA){%a`8cFaWm%wz4ge7$0v{Mb{Xu<3(zlq^>odZKnknC71{%T!k4zJ> z#mo>Cei!&)I~~E1VLJWH30@^ezUl8z@M=Q_*I`TLl%QkE;ElbI<~X!52%648ERy~M z-TMd%7|`N5$gl=zH43;_2^xb06@;MiXV4JqPw)UVY)2$W6X>@04$z5$^{k*Z7z$_; z)sV%^SPELuVrI}pHF%*q$kni$Z$Wt%+&+WNQ$yVi>W+Y&A#eab4-Fdp1}*3SwPSNY z6&!dtRp1~vO@0Bb+X1y{!E3RgtJ@%D1bBxRbQ%bJLoR4&1G+}6orzna9aK8Prj8B3 zArA|9@ET1>?nRyu0Iewkg)AdvCIDluj2&8n;o02h$RN;-f0S7t#U(7Ppz_BUX>B}o zN)l!V#IK+!Nzf?K5^*Iq(C{!gY(c3GJRivco{t1YIvcE@UH6b_X>cod0eC?oXt>D{ z)c6J^S1AQn1@L{nO6&@30^nH$P}}aq^pj_JRqA`7p6-Ax(Evp=BmqG@4Vp}YjW>f9 z%0LzIQ+r6%vFS>cMSx z&^i>zvXEBrO42#dYoEcl6ob0>AnoAE8gpjQOdxn<3%ol56!t6vhqys)F;G?kWf{;y zR>*u2d{a6!E@px+6^2?Zk>z+|V=T%tJCJTrvSM=V0x?0} zN(sd-Qg$4egIeLB)qc~ZCUSFgb}~V>9&}AFwBu8*2d{vIY*)L%k1~pL0W^vODvJ(? zWGR6b6|sYhYw%bUBoTu8zia~U!G*#F&@MPoc?&8nK*K$dB}Ofv-Ki{~VqKS^0dzGp zxCaN`sbJ20LO`J&GWZ5s0p|!wqes9~tDq&-H^BC?LdExkrimb%xgqPI*#wS)tJ()3 zo7q7twI7Lq)*XTuZk$%Ha_3CcyFstG!+h1kNm;4&{2HfJ?Ja|oRvT> zuLl)hGaz|shIp3avWp!8j@+=k1jcyf0F@SZE_MibVCKJWSpMrFCI5k^E$X|W*{%n+;|Jsn5v0wTpkn}_ zt4_fOpMZt|Sl}BBAQ=ucd-IzOGLZ@%@`WTiF3|pN(0~f)@NZT}(9NDXj^OJ|%t2fD z91loj)hpyU3V<#+fr!i!&r+y!WXjTIV6Xx$15wBUcWb}}TAe-vgEb>4(SS;u8KR&q z7NF1sO~HUJVPn;0*dhuUc?D1GLvkayz=P(<22iQV2Fj655}*@?(dDz%g!+b9=;D6;?2EJN{=Z z1hrPP!6&W2^8OZ3b^_(~H6R9y8PgIF4XT>OA<=+R7lKAfpjGS-@hpw$+YWK@4~8;; z4U~03>n1_zPdrNrHJ5|p*%4F*ND5RbWP{28jjcKb*8t>|0q?GM zfDX1r+RF)Q4}uFbZ21}Pn{Oa}l87=O8{&CvC!^?hqHopK`OExpo$Ds9fBrrKo!{r z$er4(n#?m`Ys;W*>PXO34Q###WJZ0Kz)bLn_7YId%nGTQ7r<*~(A3Qh(98jNN&qrf z0e^2%;dP0Q6UnvewkID5xhwR)PiABU^UTTdLZT~Ake}EI|_@9TOm@Bg~tF9`d|SC zSl2<1hk3f=VIC3ieo21#kQBIUs1NHJ`mpdyg!}-HgF{xQ4xMFn#oiWJjxIhG@ z;Wh)j`w7&qG-H|rYM_8Rh?q;KzzsE`+VIma-Qg9XLhBvWm0>eu0-sBAgD=Yww5$el zW(}(&XipTp^)6w?v;fxLSU|Y-4(?EcMvK6U6cAIlpap86Hi0Jd41V}nH?Gz@$Z?>H zEg^gJK+Zb?YQ3w$N)2%99hBi9ZbfT8fab42o8dtnf5$f{B^>N^6kynaIfG?BsTct8l!34o2+ z)N3#u5He!|PjOxl0@c1Jz;=L}>7af;XjlYN((D0^g@Ky`I|RX2gKK#~b7pWs1M0?u zTI-;xP*%qaLa-(QypV&HM2JG}g^<$xdT`GYTFRB-SXl=06uvBi=+=S;NFa;pkQ(G% zsO!wQLF>#I8I(9&d09XcT@8{TZy!K(pdEjJ$4j6M6;RU)>{AKInqtuU1W-x^4Htt} zoSHG60F@Uk(CKN=?6UxaKr{$VQi3P#lQfnAlMukvz0)F z`~=Y~kOWw9n?RO61A{dK185H@g9qp?7-VIRx>g`5aPwA)Q-LkZQBi>{8@34Pgm9Js zXz3j|r-Q}};dMN-9@r0TppHMNdY1s5&;s%~sPuylo9^z5D$Ea(t_#m!zsXd8&br8hF;(U1mGeDd^iPC5d$7e0`-N!m@)kTbs9O% zm{x!ycZG14Onnz#|I!ogt0gTo~jPdYCsyZK(Yp&%3gw;%7n!X zc&d9aZ90Y-pmYTpT>*Du(cKDKbMZqcOW*`#LJ@R&BxFJjX_)8*$YN$}7K0K7sK@sZ zWHBoi6*DA23jk+GWZ?-U(7`1a__G|(PU{eGl)z#LyvEx(vq8WSTL{4OFQ$rmZ~}sq z7LF%`vm7^rl8pkAH5yDaL_w2~pyRs~q5HK!onz3JDabI)A5am@0_x)Y5Cn~pfXXG% z76s5rdeHL;KYboCBw_0R|5tTfP)UUB4RBdEIgAf5$Y zD-WuBI0Txwz&;X(O?87Vd0eho1}cM~domGo)Szn)L6cJO-XjNC?{vdwyxP2FNV}YM zre{9mRX}U;>4DFegv`D``plq3|DbDgK&{&>1?c=ADEOhv{Xs2e4-nx4pZkMP<4Hn~ ziiFM*fyxVT-waf7f!0Prwzxw#EI_6Zz(+(nA?@fPV*XAObqpFbb)ySzxkCH_UVjK) zVgjlLLC4a8&j1B2I~4fI0qP=vw<&-(T7x{Y0kp3P`#3;TWh~s9 z%nZ=Tg|*v3tAdv+vhZktJvW{ACDIYL5|A1Xyp=~3I{Sq-f1iPE{vNbR1k^Z(g*a$= zEOZJ5G_wTmkATzY1X0jx?^EERf1GpJ>JXRxLb!|zTs0tE0zPyBJZ;AS-{JSzlbA)0K{Q9=Oom?4r9s^n116G)-9Kag+od#jx1TGkZYSD;|BPg3ywq3 zs1W1|i}_3fE5LJ3I33LaNl&07{xlpJ1x|wl6sNmY!S0^E^EGKvjdiI8EIeVs4Gm^c zdl)mS8Q^Zi8QCgex4~v=!BgZKu+;;5-|)iEKLH;f20FDEJYo#Gw3VqO3%=|Zv_1&b zB4z}gbB;(8;6pCJ9f=Jh;EW0y!9pC<4V!iZO*ev*45)O3k6VKZ7je)i-U10G2T(b^ zKw^5qTV9b!@a!Tdcyb!NUY`wR?>{KB9%O*-z5y+U!R$hUN;%N9C3FCr9oAl+F8G5> z9DJ~WBlQ>OtAg5R&}s-YAPVa>Uf>7yw4rl8GeolLL2X#bl4KAGD&(=w{eY${!G$hl zm;~Om18=4Q&;1bCmca&^umkN81oiwur6qU|!SDLp9RiN3uxc7KU=5jI0=XHj_Xu%1 zXpy8ymgAy3*yesF!g`OBi0wVHfCg;AgU{2s4fy!#!Be;j90I4g92pBiT^Mkj<2vsF z#{qPp_9>|D5zIn4?*U{S_^@_Ri3^ekZ8U-JDi8<780hj>38nedWj^rgXM)2VRM|kn zoQ&fg;D-NS)gs_14qcr_&*L53!2>AZMhK`LWC2}DqQK(F;K(4b5T& zUcCT{^Xcb5@rtHmDNjKiL-0yBfsJ@-14vCEa1R`III95g9R?Ssdw%BChBRJC_Jake z*A8lEK$~hPegI7=fo>ON)nKxLH`j1@DG!UEAoDa(4}*^+I0IVB4my7gG!g}kISzpq z{JvBG#}rCsq5|Fr3_5t=C#MA00?Q;B0kLroFn+)oXr8v@{6FrYy=a6>~7 ze4!SrKI0rgB^F-rOcUsoDM8o}qY$Kv09r%N2D4WPq*4LAUkcRuQ4j++;z5VSfINLe zNQuMo0z)?FiYLeO4BpaaOf8_^I%o_Ed^(Z9S$K4^C}=B42+Uzp;80}YQQ=l#pZ?$* zuV_8!baT*<2k0V2P&hMKF+2h7vta`7vte}Yp#2K$pp6+^pb0I|cp+$@^%65|4>5T9p8+zF4T^&=pz&hRK0^=j zYOJ%FF`WSM8Lb!&fcT(|*C3ingDJ#}X$wdSwAuh=@)$whFA@XInV(u;5q@) zdIGP55}3hO53cq_6z4-$J%g$*(CH?SlnH9wfDRpd!I!1Pp#aJZpt(M9f0)Hl1+>i@ zJW~R`LIu2`4e53Za0dlcV@qT?!c#T)HWg4H!>fjR@JdiN&_Q~jvl}5J69VVKx&H?r zXi!=WoQObcvq4n_EZ{&#XoEU8irflnirU=Jq=s24fUA%j@RhBsj`tWG?=mPb=`!R< z3!LXxsAtt@yn}t2D@dyb(+y@t79Lr!!EjSp1TKI(_E)fL1XUv%Ocy{JWk4EFz%?>M zHJ*WMtVb!sK!vvk(+QAHX^_qXaGgw!Hy9kRBRQN^pYaIX6cj^1Wupeu0gxe5AVYS* z4Pk^@zXzc)Fbio3v^;pWcLzwPBuM85xK0MB&MgR?$QtFq8aIG6N`P-PVC0*g_KR1J zQD%DUFJ2FwK*txLb1ay+6}U8*I21V`o4G*8!n1&;q&OTovK4t1xCPpomF7?X`HNT4 zRG?6i1yr+Ib}U!q2j8yEk)^}Hpv2>Jwsn6!jA3%%Cs=jk|*vHh_0xfa*W+C_Km|!qDEr2VqcezDj{Z zfgLu`3~Hr=2AYxgT7wqGfX?qg+L;d?ZiKa(puGrC+hPIJ@_NWA>fmk!Bd8c6(2d}P zb|b(J1C{T>S&n!1wjhm%vZFf?x*Zoh=L71$f;vE;GD#%M@xk8u76C{2x)*j(8HutV zmmShM#j;`vw7&>ZLm}b{+^PZl5j4^R?elTK`h4K|7$tV-90`jnuOo5V;t*tD0L`0&V+B;2f{%&^Z{Gk# z3RbJvg2Ge}(P01&>im#UDW4Tb$_Fht0cUHZl;1fa zuUWuR1=jHfkD8*SeC%h9fC8Ajls`Rj3Xg;m*0ct{ae?R~A;HNUv~^4YwpSjO%t5Qd zKv6(sGQZj1jJOfu1L%B6R`9Wh9H82Q6*T(?p3?%2NiaF8fX3J*vIG`^H|5l$AJ8(3 z5j4RHxkiLV;0Ggk(K__VPEdaX+T;N*6ah`PLq}?%v$L}pK~@sI-`<>Qy6{A9q3P$h za`R?@)+b;NQ9x@G(0w-G5(82)fkva)6gco6LJ4jnf(|=T02M%>ZZ)_Fnh(AWqaJ$p zB&eYa8JqwW<&+FgG%afpa8y8)HQ-_xe)c5z0uK!)83h(c1<(qP>3jV7_;8*@$qGK_ zki4@f8e#nRs=#&MiS;#BsB|&>E8NsJ!IbN8?#wW@s3_ijD(hCROGQ|SA+zE6* z6{ue)WyNp-bV4Ny>Q$j&EQzX7xc zfY*#^0jQg~0a-I-4h13v*6hj)x`RZ4*No`^Nbv;WEYSH2jthjdK>3Rmbgjr5kZCO7 zDQ58K9t+BdE~p{)Lku+haRGcLk{J`j^fOC&xU@frAs?&+8Uq0@gGXwTu|ezH`P0E` zlQ)33{Wl=4x^jd}>HQGQ(qv`;uQ3I+ctBH(piVBR2?i={Af*hb33dY1B?h&4Knw3L zh-Eo~XWboFfbUH?A%?gv;DneN(+x34M(~2u8)D#g8gwQ90_1iWXcaxEHUh6C2MvEi zmZic+bAO0KQjaF{2gHFYD?nXLkS-+0fqJ4K$ALz5#lTSta^CZI9pH=%-n7aKUJHb9 zE81u-#L1wwb7EPJotSHZR!AsLVFIP21rmx=5u>?;Z>T|C3nVcGx}H*_KS1#s550ZoPvgu#2* zZwQ00{D$noeIX25L&7g`5xf}bjj$`|z$y)<7sB9q4FS+PCD2$m=z>{L7zrq_JBs9h zj@UpfXs&l$!4Fza0!zf8$v*Hf4tQ7@G)@2+h64?8fll=RRZ*a6ic8>L?H^%KTGD0s zA*^Twvf+!cVl;^UAgrj%13v8N0XQ3iFVE&>a%2E0cZ7(2U{hpacBlvK%XR~~8!XJD zz%FozU4b8xVONNN!j=^-1D^B*_ocu?#Sk}v_I0xfK-{xO1Z06O!wwOMd$x!`+_OPM zQJ06ERe=X|P&y9}BR9B5$Le@ML>p8b!9xFth!PK|Uj*{P0TDB%6QFfJprckm^EW3% zrVH@!Nz{Xnd;=XK0^Xyjz%Ota9Qs#8TtNpUXfRz60Us9+UcLmnQ{WVM+q(wnjtWOe zAJLo{ywC>tE%n8zP|ign4F)1jI8FBp{yYkpOw79yIo$s0(UfvpPcda9~S@Pe5J) zwZj^~hnFMlVbuT~3<6qaF+(s5yipo_#s`lpFC(b+&!xZ*K2uNN0vBj%1GK;ayhfB? zflJ^ymjbJ9JwuNOXnj3sKb{D9S0F4p{lUixPY}sc0(C7xqM%b}z;OoJ(ur__0QhcM zm^YU|<7@#m&gMYlYz8RKz-xCKz{!9SG7lO6Ne=7+AK2DHXGB{-er4wdtzKnN)CCn5 z{8^x45_;+F1%4i8Zpbi#03`dtNKlm~JpC&#pM3oRa5|mHt^~R{7hKhVmcAm~%%Z># zx>}r7pYes55)UsAX#K|%G0=zvLJcTr#gs$@&VbGd0S{C2fO?SdL#RRNRRlaJug^FK zREC12Uw}PV4;~YnA+96<*%$1QZAjq6#cvbHS(fI5vo4v>hNq;85UY z0QG$ZHo7SJRCFZ6f^E;FV-Acg#9Oh5R* zZ5y<%KdAiwAvS&gQXcVo5*h@Mem`h5334J6FKB-UW$go=dNZa2pgNexj0x0Ga6BQF z<){qmf;xf*R#_bnh-E492sDB=WwB~79S}2Px&UiOfc7_JDe%DeE`cP`+Yg|o2}0kFmcXvr67eJyJ10etfi;noA#<=|EXC>wy?-g%)zz)>02BLz(b!~G$!9e(IE zxTgwo?}d7pZ`Oe{B3)SoYP%f}!`Qq8(sTo)2h>2?2GYZbWSIuj3JJw&uolO3B3c}< zGuL33;$_%@HZE|2HZAZ#T0pD{0-&Kq(2+Qx@f-!nRdfQNGs1o|DXKKGvmg=9W(L_x z^g%4kQ3{kMK!;s|I-HQBGC{Sn0z0@h0KO&()F}jCk_0;Z1TrG`Lr{qaq)e0fgCM*! zy#h4i1xf?B90pAjj_XhcCO~IkLAG?>5X*wcCCKyO#WF~4-3Zbr1&bDDP_eW^9MpgZ zuk!&F_mEYq(B($xO|%wcP|3mt($*jW>4hpCHp!@z9;9Dm^%@o+(39Q_p0qpfh*o4%&{Y&r zFalNCyo}tAjGz{SBWR{g41A!I5UL*MIbBaMp*?h1rDgu4?sqP+IisS z;0+-~a46pq;#L$_V6At209vfAz^I@LspVdPJEu$vpgJ2=IfCw6G|GZFR>4T%GI%%a z8&C(6Q9;mApiog#LC}$-P*E1LF#iR=8Pf-T1ttw90Yy3RQGn2ieozbj1Ai9i)=XCL ziOnL83JM~Qz6zewiV6xwj!Y$rY~U`lBTp7+GQp8K3)FvBR0U1;WPmR`6;LozWKrM% zU+}{LsWc_B6p#$on7&?&PlD->#Pqc?d=m8{{=5uQjy9mg@IgX>4KxIy#0jdD89)O( zpdECeCRc+1sJY4N_=6vEjiNqdivT!5$t&i`?Wq@W7+1n5AcdhTomK7l^)bY%~^k>Hwhfe0`u7v*c7<3Ahsh^@q#8oz-PZ@D^9;J&L?0j zP^2iJz^=&6!^F+N4LUWQQG+P~d|nWXE`v5GVKRU!R6a+>Y()-GH(x<`x{?H+TD^dR zq$7iS5h(eBW@iK)*$W+6-C*ZjFj_IZ0gZQnHiU!vARnOK2PK;ipdktH9Tt$Y1_c#l zL5DbTDS%eLf@WA*9bX7!30wgur7r?XJfIw;!Sq2u5gaNrz(&<0uECli06M68fq(+| zj13tDc7eBSj*Nx8(je~$fE>u;pdjh00Ci#!Xwt%Q0W_FE(!Ah#ydNNQ9oM{Q5pd)Z zSPbsk{efBzN+Lf5%$QbyywU*PumswaBmmob(gG?{K|6p!yKe=}m^OgabqHoD$S6oS z?qA;^;K&Lcdg+0xgl>BRwc9p;c5`Ylb%2I`ZU|%v><6caDNuzPOcMmbebE(QYgrvP z2!PWeXl8Q_RE^^iM(F95Y>)|ikb74MnlXW<_`#cBm=xqd#j9|Zz*TT3a1SWYgKp6P zIpBssmSepms8?3ET26u0kepwmr2kpUfM1#QIsFg;$H zPpp0hI1bQ9=H>`0@q)S^8cZ_;LES&_9Fdp;mm_PL5+}5fT_FxrvqW4GJjS*FT>F7` zn}R3bc?2#pgKh%=b%lgLB@Pq#>@&~^EQbQ8BLk>p^#RRh3xN&d0u`;o3S6L~6;ySz zYB1dp0`L6*7p=0eq7^j32=Z@(1gvQNBB3PdsGuM@-9d&=QD4N7K@l{u$*;f(Ipgs+fc8%#1WcZ|+=7>THP)S4qIzbd#fbzl%P+l`8@Jbs{l?ECi2d%sS zT^R%3O34eW%(xYKG0RL|Nc9FF_7;Z>0DUbykJ7iFhR}h&#O_tB59#YsT zfMy;vm;{tqAbTqr67b}Q>ialONP}P&Ez^@>TCbM2X^@avBgh;umx4H`AhZ`y5Of6Hv8DiC@ge{%2*s3mp#`BBq#y(}yg)bh zfmUsRvg->zP&v3mVEP0(KE--rSC~^E1s@|=DrlIH5w*bk0!pUPg6xAh zbnF_m_68JzC*bAX5($V~K(>M|jRhYb3o5y}L0j>`WnMi}p?8HJq8zj^6xzN6t%_m= zt(gJM^a{cXy)VKLg&IsBAOmoaK?ra)0a^zSTI(bNQ35XWz?&J_APZzbZkZthJ|GEn z8kP-c@slj1aRDv#wm`IkLLVI7&_WLyVbTi1;6iVMh`_w*ujToq>yhr41#jvAug8aE zOSG7CWQD{WQpACVkwB#pXlz0eS=-VfMzs!lY?NE0yI@1nf-$g zQgR~i3gCrwOkleL#6S^zVft|eJ{3m3>2DPH6p^p@#5SqTqQE9_88jRLI&c^^`8FTY zy@gF{gDO}@Mnw*AGZHjhgLT&L00YY867E04T4#YhrTolI38d?%rk+nVF1l(!y4^~e$6`;K#RPLEgOlSJGS%K_Ru0$x0`0={^LQQ#W5PF;h2@eD|{2Ga^= zMHU`mP^2t?Yh-|GT!LMr1bEfW0%k=PCLST=)sK+V=0I!cLF>Xypr_4&mZ*dHpsqD& zEf?$A0)B_zJuF3@B{fkWj79J*U$FmHMXTV9+@eG5Tv?2?S z2&jDvYJ`9;R|S=MpzZO{ApuaC1KJq_x{k{bY>omO=)fx_lOBxq1n+VJ?XH8JA*jUW z$mOoU=E&k+2x{6mHt>TjsRtz*(BydwC`{O)8)iW%2C{n?bJh)VizN6~MDTDZYN-L< z!U~!#0qt7_xwk zpv;-UXK#R7O^~xU5VO~yEr8(d3>y%m9#~e&pt=Y=gavZb{7W4Ij`FbK3kC&NKO_gD zO@O@+HiI9Q>v#a95p_7~hlJv6aAbdwP@DrgnA&j$|8!d|J~>a&c3RLDaTbL+j0&?E z1q`?qI26E#WAK1jtQt(9Tx!Oo09jBugCFEt83phFNKTfcPBv(8AH45jo)Mp-Iiirm zGW{))r7)Y(jEMo10XaZZXKax9i5rlZP64kHV=-fz1Bz+TGB8l@3^Cdas_h|%jzN;s z8_*4noS<^$g*YfVfm)IZ;M0&{Ny?EWn~$B5iGhiU3AAn+bb13gO@+YI6lgjUl%_z% z7}05p5q+5zC`?{VH_+r06NXgNFObvF^hRwy&}GL+LuR14J0cPfc#kDniRT{Z3?^yR z!~>pd0=W!d;`t5Ih>>{a!V=Fs zi$Xz5@IyVH>eHF>UcpI`BbV4!e&ejBGaep@$o~hm|}yR z|DeGHl7XLKpaU{MlX-(6;*uTc6+xh}Nl0TGG(iGt141fBcw-xs$Uyx{(84wNbO3l9 z1T-B0YTAK1QG!`Y^`Ipo(0utq7+f=gj_8=Zzd^uJ1Susm--CoT9TQ_Yf0F67(w@MfE!(4@1B@$WWcBCF91E-2z2NrsIA2Uo#$bP z%=0*cdfJeD0qVYj?^6)C0d8}FkEj8+xpoLBf^W;NST&@dv11~th+k;`tz1Rf`W3?hR2Jm8se(3meIdCUQ|QNeAf8G@kX0c}J5 z5Ss38%qLrqd`CS-=|xf!!E7_3#0Y4pSR~64bO;_~vItz~q9qZuMigifJ`s&5b|R`= z6t}=r$1+grKsv;Z)ek(61$G+V)Nv4`5u*{c7?xC)ASV?KP*MRmqLwf!ECyA%9NeHa zT!@A@))cY?l0xbUrI5v-Di+*$0#$8qK;zwz`>Cjt<@u2sQJ_Gae$SLoOc*r&3JE26 zqY0G7(c%xZFoTHr1CN#vXflDUh8H*R_&W@W2Ke1-;2sphW%!y*4?!9+;%_M|{+3~n zzh#UHOF{9+PE7nQBQpM$g5nR=(3FkwpN}PJ-~CF@thB$O7=$o9xgnl%R$X8=@h^ zZpH*Uq7$_t1Uh#K>p7jEWiPOrr5-XOi5MLO%^sjsF`(8F+Ljma^^=s{J_&0aWoKb+ z9YIdHhHSCG);fX~SD>T6IAJ@O2sV?Zf0)TDQV(k4fV&8|8b;tnFGy!wVQUz1Ku)Xz z%@ZNDi=KcQDe%LuK~unRAHa(%&_Y^JqXng5^arF7sbK{76QoU*2Og>d4a&fD!rMhn z2O0z%G1^75VC|yW80{iy(An7d8bw8h>LEm;oAM z0L=}HgHG}S7l|{(K}&rhb5tzg$u>~Fe!~YksuxrzfR_+3psi5@Ee>4(o<08|3|g)L z=_P?WMZ(~_2M{?IGO7+r)f|X^4O-5Hjp~E8NP>0(f;PFZI@XhMt{-S1A2N{x%D|vA z=|J5Z_*t?B;I<}cx(aekGQ1uF1&0L4YS5ZXl!GomAoXOxX&sbPK|@lYJ``w>mOl$| z&?Wd1R*bPdP^kjah2%I;%>*r~8zjKd06P4v?LmitqXDel3Cfd>2)ClwOkgKNox201 z5v^ttRGiBM%9tMn73U%H8=|ua?T0}7ilF=lY7u>q0Qc2F6A#m~fADZ~&V}Z?d7xwJ zHPS&v2WS^LWHf#aXsiviN0wEWVTCwor3=W61>&Gv)S&0(B96{x1vjEWW9gu=-v)4g zLtKglIVmg+67>(J+4G6kgO6?HfbJ**&C@|DFGybolAc#`trb`TUZ!vdbW$d06&PrV zw;0IdHw2(l-ry4~L37&RLyoqHK-Lv)5K-b*WM_5&T?htF_Tc^IklhL_3LFBBN#G5J z(8Jf@XRm?p_=FT2p!PHP%wy2i?*cc$c^rNQ8{`;Gh(b^fWD&T{3_8*34(Rk=@C}6; zOgE-qbmx(&KLFbM&S=Fj2ehG>%Zv$p_6WBb6L^C$C+Lhs(D*ZGzyi8$9kS~ld=4;p zg_{P`3(%B1sF;8hage$la`YQ|k(S281Zp^dn*E^dwkI~$qg?I*5(kaYGdUW9Rsew4 z1A|Ti)?^0XxCq)C58Y0d25vt>SAv4Nn&3fwq_AiJooo!+oyZEh@e?{C4xZ};Wl=HM zE#x^L(2k2s<5oyRI{u9hRGPwP-1SFFL*0dg%D$mbJ8rw2Om$yk8rkf4syU}AA( zfVu=c^#zI+=#d2omw>fjn7-5rJc)GCiBCcbJWmaquoFO@u!9_50`@w1e*~z-1vdb=6Owef{ z;3)zu_vifOggTs;0aWFHCec9!Cg|V^(6lOOEDBWHY!J*6_z%wMutWPmdHjPQcoj8x zjVEZ~1t>Lw=jUOIQ58CtBaDYc5GN>9QNx@Uya!uHVY;LXp9mx0bUhb7(|T|V3lvr$ zXBa{E&4VU4L40E?2JlK%7HHEK5pbX>A5QRg1IKxwP*WB_4>i!d2xNc@R8~QP4OFhe zf^CZkD0J%~>l8PLfKIc5E>?gHZ-P!JxdT7N1Js}cwdX;lGGsCjd`<>f7@nlT8bPO$ zffBkPY^D`_Oa@3HDAFMA00lQ_m7j!TeebI#(8kpt;7QpX5+FB0mPh>nwW&Z0RG|mz zf&yJ(J}Bpa5)CV4aRq29asgN?Y~B|XUqASu#W^IwKu!k%t;GQi`hjLb5b?#&3!6#h z07uyLkFI>0jC|7_ba^%E4XqeZlCm*!Qihb0&>#bOjf2?e0I#Zoc?2FEphIn-!+#qf z>8&0TSfJrs@bv`(sL=rm#Q98)e~yFZx51$U>Q)Otf&)GIg7->+q90T_4`3nc%VvA33|mgJ7nn- z$hjB5+ZjPMoLH6ux4;KBC9bs!ENkl^X)u5Va__wWs4#_`FvIBhX3jJL=+0_bnOnac zS{Ih{fLDZq)>Mfpz}g)@#FW?|tINKKDGGyZ{U8Ru*A`R#jrq9fmK1!jHv}g^O!L;NGh;8v6wT1^R=)U(-%#3M=S=uj$}rQK)hRohGeN<;axf2)WM0QPGM4 z+%ppaA8rUk0}(naKzEq1f=)aE1>h3!wl)o> z1rkc!ioDEx4ou+o8z^YF75JxjdhzMjgZ<0`@-qwM@D)~&Gr)B{JG3zcnvR8}MNqlT z37W(Pr67Sz;FZ2DAS>AwctI96NPxOFh}Ftj;5A2}eLccvOrWjLpab*46((qZGq(bd zK>b~C17Qa2{!n-dVFgXufVNb#I^Gb=QsP%&Q{Wbu3OY-K7rZli2S3Ov@Km+}L~;wr zWjtAq^&pqb5Le7=dco}zXB>9I>3Sr zU_tP9Vn^uY7-TmBe2PgBG#3WR`%DfB^Xr+GgHi=JC4{tsa#HeL=;eFthN zg0djE5A{LVi9ukh2&hZJ;lvOGUSjzIv^E+X-k=UKt0pr8tYiSS|3M9f51>`d^%_hs z1eCbeDlK4ASjni6rN99zeWA)gA^Jf;iPwy&LlktMw}2vei9P7bSm?5LHc+Bsg%)EE zxWW4}8Lb#Tf=AN9*NT4tO<{Zx$x`G8Wi0TjZLrH;@MYC2%x8i+><`pN4W=Ig;GzoD ze}RMtI5MEO0WJZpg5dy9oj^8&g1kLL1RN0!pav1lDT~1Oa@B$6!bCu29;mtk8N})c zS`W_Z2#VR(dT>JC12Pjbw=BY{FrSIroVi0#f!mQ0ynRlWp@$#50v*(~Vs!+q66bJa z5NHHYf#`XfF7Qfqt7@+ zP>Iczmj$#hl-2P9C{i3h@S%(qyx;?0J;n+euWA9+C!h)hwD1YGMFG-dWOZy1$P#!6 zPOLp3XMsn)I|LNL*O@ssh)$pH$ETS9+R!NQn+Y^9>Bs?HE5!~yv<;NYp-ZHY&ZWSz z3jkDx9dL^+uPJW>bg+kuLF_}U(D?+=`Ik?z$27ptKB1S(HK?Jv-ooDV>g zpbx~d9Q)=s3xMp(l>#S83DE9q@cAI1TnZ`*IY3o9>}ZPVyZ!n2^+9DKXc~z_fn9+U zv^EENUlk|pWD4-rgNPkP|32`FF-;JizVI!Nf-qug0D2<#0@3N7A9-b_Cj{`Zf)|#H zDR4o1&I>^EKx~km*BVT71QkUwpmCFGM-I^DVJ1*(5wz+Y zT&6IYF>D4g{{LsTX50W`F_D&#O5<8EMy70q zg3=yniHIX3_;_x8#y1cRERKv>^^nux7;8ZNGl(A0qJGf1f#B`OkcAR_S&n}g1eUQu zPe6C&W#Hyv2eqz1MJlMyQ()C(YT-9$2Cp#yU8e~3$^zIq+&!R@88iTWKm@crTo073 zK%?jNplzxG5PyIdaDr_;067%^bZ!8slK{F&5j3`=&j?<&3B6DW3NS zkZ}=37NiJX0F7YqU^KXC52^qmXO7UC`hfs15|}6=zrA7JxLr8qmg+^uPm)prVuk zdLhpN=;jvKE_2Y;JpA>#42+Hp?gEd&ZMXwMptguY2dtH^2O2tK1yzYCLw17DAv+%E zkex76D--0x38JtQMZqmgP~iZ|lM>KvDWF0g6s@3bDWRZv#T;rY=T<0J)B{Hz{5mY~ z2^sKV4DhfRc)+KDISX78^MIpAnZa=a-}HB(eEN)h(-qEfi*ay*13{5@x=k2g4I}UL zond^^^}9KhKzj&X71$g#vJ|)kc5^C;gZ5_>fjU6YP8OTv3T{w?1AOxk8|Ym072IY_ z8@LtN9Cv^vz#HM%K(jFyxE0ty$DN!2Nh(<}Jb?0VfM_-irXAd7Ob0-68@R!}uX;8m z0R=Y40}vjYl3kp*;I z1A`)$f&l282|&zTsA?=i&tke*h_P90`QCNY^v4JN`L0kc*tb+%n%#lHn6Qrbt zM^P3;H}HTrg>o@FC~zqVxUqn0P61Fqj?+<5QN|JE*m^b%raRn<3ScFIZoG^NY>o?f zvLHdCq^iIL=_zn32xoyzT)+d$7i^9zc(N2Al3O?xI2HJ_lvF{|D|nQYKnj#Vodp&L z1y#_2vJ8$&iZY;0-P{VCpbD2$feVz6xWM^n`h^HSmih@`8#i!+&TB%maRSuF89bn^ zgdjBKrh4D0~}u!55lWZ@IGoAt2&ZYoPPba1qM)Aqk z{{VRe)H?_H?EqZ+2VPJ$4GOCj+*wfFM?kt+Ai58Lx3WQ_9)6S!C_f6DFKd2xDxfCtG)iWueWjH2K#3))ZctFD3L4nOt$%-KY!V6Gfb9}%6DmFlV zbjVf!`A-4d_hi#&Y+wX$U}jg~0c~et1{t(~5fofM7?66QY>qR)f*(MF(|e-%l=wi? z|2&``CTN=)|MWf4eC~=(d~ib!Fv4uSzyJzV(2jY5`_olo_*5C^Pxp%9bGDxk@1e3e zE?@#3I|Du@n+-H_(ZQ_1<~V~n3lwsoePoUwx~B<1l9;rTB>0%&>1Sg2oVXwd4zuYq zE}4ElmQQ}Vc`V;V#&gq8#`5Vf%1-|l%a_db=lJyKIKE3vy|1S0$MdO3gE|5$SQOYC zH?SbPk=wEN)%3i0K5IizfH7wYoZ(Vn(`P)vqQnV4GJq50H-2vLm?Efn(PcQr0y;B@ zO=14@EAf0mAoDd6_*R4Hs|kEQj7z2~CGr`EFX03kbA}Z;oWZ-Rxk33!gXsjT5>J*A zuP^xauM4bM3cPET7BYc7{(u=&6D(i@8E}PF33SMh0yL(0rZ<`kh_Zs>^2PLZiF{(( z4|w4xm$5l+02d^ne1tUT3t!7R{e2>zcs(a5vM2C@BKrUjJa0I5fJJujfTABX-T=-m z8+go_L0K8Jnh=z0K$#kJRJ0k>4Nzv_Fk=E&G$*(rTQ)!@-{8$s;uUBEl?R~91i?bw zkekSO6nF*N>Y*|pKr$b=vmB2z2vmV~lrcGg@@E4dsFs26=>Sbmnlm?mau+C{LCSr| z!c|c1z^lZ;%K*~$ffrOEv4PG*?BG*?>H;y?92@wuKm(*;2XmP*)x%vp@oWpyh7Jx; zv^d`2%@Sy5gsiXNP+)`j*Nh2NeS+#P$IT!;@LRGt6hIEcp$G24-5@>38KASvBD@@+ za;$?7l$bP_8u%1BcsM`{8^n3QRr>{2CD8Hh(|;xNDb_#W26qb{u!7PiYZmAL!W})+ z1QbDaHn_O}Sv|l88dd-~7AywU^a7;m1#1?!k^rbX#mLQ6uK+%;f(v<^h0XB;YnBoZ zc+8#+bh`luXjp^?l=S|v)`QcY6g=^PqO5@plzTWpyTaJ?8GG22c)(?M16vliB98(i z=xhdI1s+`n4p0jKWTGMu=u9y-1s+W%&;>^3%oEtaE&;n0>VD9O51Zo*HiS#S#YeqB z7bhr)^%8!Is6XgmA$H22e+o4OHNQodCKIc?alh1ZZHu zhIQB+4}i^s7ME;}6WHoOnM5Ee1Hu)sB-~g4%jOQb8f)R0r`T> zaR(d790iy;PeA6df>P}bHqiCV+~&+LK#})>4RllmFRbOmro>VYE`h+4lib{(RP%*R zQ5byY1f(+jz^23ltM|a;vD|Jf4vq{8yb9c)eLySoYo8tvG&>mF|&_Dy5KI05_B}vc-54(aSsO;ngrJ4oopj5*G8pQ_{+U(#7Sat=l z*T4!^fE9qk9#s0VX)rBdcidCIahd>VZz7vM;~GpY8z5R7!B=&$X)vu|cRYNpOTdvq z-~l+kc3@}$h4}_{bLIo=pauZg!w0}5$WAWMIss74kI+`n$O1Yi2hs=tUm^gl#-TkS z=z$sFMhCAOFQX$gM7be_I@o9p(10_C0v9AiZ-7IL9~{J30`vh`n$7VABtVU!A^L({ zi60W8{O}O{0QNmBM8CitJzXw?Pu2qzCeV=k1DAs9ZQuaK1`|BwexQY12L~*ErZ;Es z8Tf;i2Z3rP4W=0!pw=d8D1cg)(5M8ZQcy4L0;qxl-C_qSLxs(lR)A<8Go~dVT5vj3 zCZCh=4OUQs`@oR}I!Fo9_nRJ`$>%QyX-;s0R&9b?C*bwBJk$4N@+sEyIC3DWEJiDa z9!}7_3;3|322N-pW&kp`finv)AoQj}&>IKx9C*aO}gJvhF!qZ@S z0orQA2^v{iz{Cuij#F5`6v*PBz^%Yr4b0?rluD_hML&)8K-b5fu_vZ6~H^FxxnSw1TIBTN|}MI1vKWwrq4KsO9_;X*cG@z zK?qSkqn=928~dOVnFUimD z>DHjzHJ`}|F%re*XaG8d2h>0i02MP_3T%!S7$752(+zU?H0w8jgm!@JfQ-N_1dlec z=`-%(f@LI-C%`SQ15mevI^!U12=^bsP^`gpfD61H=L8odg>B%o#)Lvx)5(-SUGp3z`>z@@|m?QhNpb-+LwfeTunf>H!8a*kL48F~dR z&th}@0P-FrJYXVhWY#K~I zKzbaqp=EvxH%j2K)B%Afz1(At#B#ufI89ap!$9Rw>c97_~2FW4rn&V z3B0fx1iTQF4b<}i9oY%}xq!3iFv-TFgQFw%Hsf@MWnmfR=oYDIO+nZfpAzGw=@U!%BpD}7UtPi{DLsi@kr_07%mg~6kHL&7L4gS} zmLxD~`hAGHpCx?tjFYBUmh$N`9-F?rluw><()8n{eBq4Wr^}b|l}U@_fU+6LE8uL# z?D&E)3p4;OAaHE@hBCe+#);E~%lW(+ySB%d^9eFB&e&d9$(PBcIzCCJ2P>xkuH%zoTs&Q(o-dwp$@GePz8JfzUZ7dETF?~iBfd?Gi3XB4mrq5~MO9z?9X~eo=`qoB1C$`ViIt2vU zrvGi^vt&9lbGk(np99<386dIkbxnN6ER5@>Z))R9VO&35yqzzDal`b6cD~QD8`+iE z!4(Z?+R5<;OO}EFcn!i0hAe?i(f+nOxMjO~ zH=i#fgeyCJQV-uk#x2`TdierDVr_kVyBN1@ckJiWVdUJ(4!-T}#I@;V6ZkZk8(yxO zzHkCxDC4&2-zV@{GH#!4IFZkbamVzUiG1daJEw1%$fv=$Yx>=Ze3p#6r;ASl3;Ivu z^I+UFeFlVg0YvTH&M_I}f~nKPrtry$>|+O=l*b6(SMIojF-w6{VE^=vDSS$d2d1x{ z!sp6#;@b32Q}|37r%ujfMzhkrX8H#GL;|8cZvg%$Ptc z(l#)GiggwRPJyG-mrUnd#CUAF`wTug#^ck|X7Kqj9+|#%2461Y{^`my`7#*yPw$z@ z=g4?u`nj2Wb&N-*`_AIa(m27+t-zqb4r<)6Ixb+%Qeoh3RAb_(XRK3ZaBSerR$@^B ztuj`a9@r-!Dl4JD;wX_Nu$3Kjl@zEXVlZRU0DFhUQDeH`Y(7m^0|gdGgX#9O`NCy* zv-!9gnV4Bv+1NQaxwv`2%K$+6g~jo~_D!?-I2pw(6j&S`vXxjBSR5^~6xamzvMI0% z?45pb4xg!vhXRWu=;CZPfs<^CJPIsO`vMeL94|~)oXaPy!lS^rwgEI8#{wF|i2^Bc zWG=C0j8I^41f{gSJc^9d16J}$^McON0c8Lt1!jSr(;MdUnVCH}IRknws0NP!)M*(C zERG*QL$i)A7(mWx;8bAIW$0s6Vs~T$4IcFTnEq}qpV)L4V|E!i(2y|Xa!W=>27w(s z3M~4JD;Sknd4&{M92ZQtoyVsx1sd^XM)b;96d28zHZUr%C@@cFT*4|`ufXECfiX*g z399`9NV}i{i!Q?%(DXe6s7N@$2#RY4$LUL(1VAM{i{lMYSpn9^Ca{Y~fx%HA%MrBn zN`V1<%()p8j{*b8F*g{k895YK93L=dLE0)%<)9PU^%);9S~K!7C@?56fhJA`cJuHE zPJh3WPp%#m7`hBSjEdj`-Fg`nnINH}$jamZGPaFTi360NS@an@z@<89xPt|p+}Ru% z6*wH3O9bAsgH*nOdIuz@$ja@g?x>-_DD9}~sHVUut-vI$$i(EJz`#_m!0re-hRzYZ z-jkQXki>Zeblxv4ygQg6;jPQi!UU>_STvX# zn7F|KroaRWEe1!OEKuSAc@&gbK~c>BlG0EBOMw;y^6*2We*(y-Gr-nNU;@SeGd2Yl z1s07!CeZ%(1t8f4U~y=s2FZeEAz&>*n|sUat2eDRXPfEl9zO*H~pj$kPU$F+x=1z?FxpYa0|B$fSO%7UFi1x}QZRQ7|(nh_NC zObU#SOai;7A6&?%!UXew2Q%3JQ4P!rERLXk6D*DmpzR3o>@cGVw1&j1#=d-908pl#wPF;bmq?vi1|DlK<0xk6J&ARFui{fpI96ux1InQ&j$*L zBh0S6;1x{=Ku2xBikb@`eW2SDSsX7ggVMtaWh|hRiWHc288{r7z`0z5X$FfTE2vnS!UA%g2Gax<@FJ-L(+!vKiCUgu z0hMi_h8pt@7EnPAcFYbid4eU&vBnW}!Wvr^I7NC&E5QoB8`JBT@QDXJ0O@%Eatoy1 z1g+U|{J@e0J_H|>ommuE&6s{5EO`Oa(7*}`p%-9FKurypp&hK#pDp3Ds-FO=UM7GB z)F3;C!DYDun*yW2bao|HaOkr-Du8NIW&uP_td6Yipt#vG{p(UbJ)RvPb>Ja%fn(G4m+`4G zonV{3z?fY;>i|dwH1^B_8M=pOsvB$&$AZUEL3arm%yn77eBe(`}aXX>lV8qzlsvm-C5u3S=oTJF*lB zoB}Ne0;fa)kWHX=JtMP&0;t+#fpis^e}LTng9DTg-ardwP=G^3_Dw&xoKHz4-Lbyd zQNB=#(Td?5Xq6J96~o!-%q#dby*j^vN;yzY2My0doDa@{%%G}m0XrzSF@kzsjNA$g zj;3bd%TPG76c`j31@`cOvIqwxhww}Yp$9-}PJm+&G!X#0B4;XS2^5P4(+PGnrVAkH3!q_qNM;4i`?5IR0FS(b)E;FA zmmHwtLYLtNICb1)_n&UFn$L;p1tig!JOG&i8mwn=d;qoxWF5BvqAVt z6xqRJ8Z3?-kYJQ*0EOBFaCm_S?BU6K#`GVn`NHLPfTDT_IBy|&@Bk~=g9oPPuHkcx zIRR1wnq6jbJi!V&a-0n`4*{z2nJ%!JG2H+)c(_5`Qdow40MhUPsubSTX8{kZu)#tB zlw>}zf+Ji01xUjWNcjXNLAG&0{nx-Y-EJ+P661vFiEH`9q&h$ub_O`hcYw1RtdX-| z`oy(-woDD2(=V*$GZb%l*9C64Dlqwjt~miWYXuJROuH%4uzkilzSB&Mho+}&{!}GxlN}R9#IgUZoh+1 zld)rZ<_3OM?c&?dxM6zPZob)!o2Cox;WJ=t zo^JR`N_cwM9zH(zgFK)F25k?3no1H`N}wWI095ocgW?kuK+FQ~n3Wh|nVrdsfl&dp z-i=X#RT?5Vecv8FTgC&^|LoxlXFN7NWG~+Y#u?L}?&Y&!jGErxD6h$QYPzf@zXZR8 zKqoV(*9^KNmr3CC^!$B%v5cM5&+g-sW;)G2o$-oj6yurcF;_$t7|%?vy&~$xcy{{6 z{a`ix2l(t5&rdg0;&))YFunf(p9|9k_USv6_&pg~@ySZho74ch zs54E86XF$6uycSqSpt`)>mTP+;(*Rd-kTnIoKKMx#$%r@d6!R~t3m<18k|eu_VhK! z`6QSQUz>jHIG?`6o1+~9ijdQ%6hO0H3fzty*`T|5c~9_JfW|4@Pw?q7-k)B2f-jKi z0sHjc2Ylkw@1Nik6MV{id)YxdR~a2|fM!&$PS-!l=fQNB zeY)L4J~5EjCZ6Q;)4#*6#02&Ms660M;LH$s$gaTY$eyLZS*XbA$f(E#zMv39uz(!G zcZ$!I@y_&+Q+$dRJ5E5lnu?%S@ERzG)sX?Iqrxo;PW+BXezXfX@;Pq#(Jmm+HGTal zJ`2WM)BB$A1@mnKJCXt7$eYvOKjG7ecmVHLFoPxpnZZp50|j;kc1MLQf$i*|ky~a9 z1$IY+ECqH)i!6aha1BhLib{b=ff=S29J4UfPoCye7e<)Q1S*6TvIO={XFkJc$oOEo z=@~wKrZdy0r=8(bWxP7Q=M0~Y^c}d_potRDp%;t_?2a9bS&l8!FQ4IaVFL&6&FTN1 z@u_lUD6oV2*p6qWPxm>?r^xjD%k=!SeCo{e|F=w^d6rKZ6ln{e^NGPU?S0NCJ^kxh zz7nvCi+33%r#Gx;OrHMY9G_$T-SbTXicCC~+>YFe%#PO}JPU3|Rz+sV9cLOqa^~EQ z%!!*uRzQ*;&$XyWOiH+ zG0%|Okw=l)@dU`cdL|wNh|+x!WAwQlg%p_`S3`Ju5JSJc23xJm?Z~Fc?6~n<3&?sM zh?Cbqc-q{KuOL=yK~&v;2iBtrapW-wPlMZ$U6I*w+k3E_I=5pz$iVFot{S%^ha$7% zqz_<8Rfy8b5S|J|?#@TBoH9i2E`+B9k^2u(sR)s4fXFF8te*z4ULK-yIfN(2?Z{ED z$n3atQ5(oVvJlBvi@`h@h%tZngL%>r-q)?|AeB-O-qS8HPZGlWbOX$jfSB?bVv0Do zqktl_<7`M6i9uBU-vf?vQBb(oGdn&!1oo2%MCtBR;J_8;b`(=&cANp>2|=Ra=PIyD zK}Z0vSphaifZLH@k=b$4bFdsgB&OeO1g8Q%ZbvRfX2(qsIbKLu{DsKzfK}Ez9)U=5 zgIvSx*tZF+k&D|=RFTe0 zK?gP}GV!oLY`6^3#|%k?a&sVtGC_iE-ZZfFjF7N6(Ldes5}ys@gXwjb`1G0Xuupd^ zk&$AWdSv?POMH={x7k5cgba?H;1#?qj-239wD2;YgaT-dBxojg8LR|n2A6-lOrW+R zsE8}P%%>o=3M9)dkfsEe1JJK%+y4;VmGKFr+vr+>M^r@*vk{dBRbeA2t5~i9?j#oWAQC zpAyrjL(}hFFU?{eDr>R5;rr%y$Xzu%!Q7O*?de4jNtfYay$!4 z-^`#TOB{+!j#H;Eyv`>o0+N7vW4tvzXD_4N^ock5G(ci=b}%Y~=z}{LWvBnW$tR-= zHT>c9P60;&$Me%W1q8Z9VKvwXkSLeq6R7B+>0Y<^Oc`%YZ@2|+WW2b=r^mQ$y7+BA zE5_&3gKqQbF`FR)9tgIUH9oWjRir zUVn#Ait)kpS$FtkWuQ%sh0IEzb<>a&R24WJ8yK?$9!$S{hfj&=Chzp9<9xC#7Fhx} zr%T-B(~`aozE@a9ksDNTD=6|Pa4CY8O0zj0zBWDfE}y;<&X$IN0tcva6L>Ox-CaHd z9&pnNCi3bopAIPLN!;U8VLUS3_8wm{-qG6`+TlO*Ljto ztt(LK;sA}53QXpJi#gVV>v)K9rrSP*#K**wd`i{#s zWIHl@nt;Ha>0cl5sfyg?QDov_15IEsf+trT83a~M*L%!oEW3&WTv#eFgT>95EWoWD zW=D%G$2D`N*FEOb5uOZE!o;n>?8t7$6r;fGxM{`oy^r}cq$YE~Y-dzpR$wq=s!(8d z{K1grxNiFY$9!^(lc&o);ZtY4H{IT-L z$SAOo88#ry1a6%PT;f3z08PhsfOLQwPp=M5uYJlV#|LeD!y<6yQ$9<^z0)5*nJr?bE0^ALZ^0}A9077#fB8o3Mt zPo}56-6`p`K%d_P1ku!kb^!|5z(mI!aK_#Ilak%obffgn)DKLYQI}<3WKRVw8 z)o(RlREKl(9++kM^F@srUr)Dx&!@~eZ6>&b_KIiv`T3%f+~9HyG$}6dW_mwJel}cQ zeu1bT?;X(6H6}+;1@?w#des6^%jy0f__&yt>}{D2Zk7wI5|y4l;RBy?{hI&aW|$ba z<6lUtR+JmuIDPpG+~^X4G|5}owklZV@J?qP7-h#TUNiD$t)E=coh*Jdz} z6C!ux3D`0YZpZsO!Q$)?N9>2FV&isXQe<|#z5^`B3USiCCa{NDAS!kK9R+ilAqIYd z@R%SQ^i;nQYZKRxmb zpC03y=^bDAR2kPyUk9RAO~3Mm&y#WPbh)p5vW)Yl+kE9yV4Oca_A8$v35 zzMWqDgHPE?V4Vo4>B0mm(IOO>Kog`2i~=Xw1XhcHHX$;sRbXE0q|nv^;xln8Aowe% z=YHce=UN0-#@}PKY#Fr zFfuNi?)M8_$@1wJe(_Z(&jGuJW%<6{Alq5?^Du!Y;RKe5fR?GSD6k4FpPv1juUz0Q zj{>s-vnI0uh>#FiG5ynTK0U?_(-r^l=`pUI9`J`xjHmBmgMg#0fHniSW8e0oKYX7U z85c}X{l}-sxMF+zKfafYOdI;9H!|^yP5*aE)P!k6-_(8lX^_znCQv!R$nOtVAv0N& zWBP1HepQgjAx8dWggRX&{veQOBNM+nh(5){pO27rWajT;oHYG{fRv%oOVAn-CT;}| zM~*Bd(46*_>HIAGix}Tc-zF$!D)r)62Pk7MWJWY_LB$G_z?bQato)%&t2m}d3rQ(X zuVm%-V_ZA^w2)NxbYnJtA*Lrh)4hbHRGB(9PA?ReikW_jjbDQ4EBo|k!cwLp2iO#t z9YHhBtP0GID?pRP0`I5mi%4lOuALqsBBeFGft_E7@yYbb?EE%N?~YBs$j%?Y)Xy

;ptu+{ECd1rsr_*OEUEyp5D&E@6EV&`ehD&6OetJoczY@pz&XBf%ns$Ir%}R z=5X@cFg}^Sl9S(-@x%1zocs!m{nI(P`0b?L!F>o>PRR?J5n)u|6zHFxz{M}abhmwa z9T&ee9{ijpAn<V(Cuml zjsn?y%nYClLmU|uL7f0lEzIDm$UOZSNaK^~0fPLVN_#;&F+iIWK(iF!ogM4~lQfN2((cK8UhzUYb-A?W;t$}K3!Cl-xj7nO_V={@zM0t zqWty&CqX?MP9z7wf*WBVxYr0C5}N_)lY*wiyFj`)Fm!>O9VG^K_C+y%8Kx%(r@t5D z7iWAoon0IzD$M-@WD01S`_$p>bHw>gSy{J;a-V0Mt|-eUxBZI@e*mK>>S`;2%fg^l zRof%w_zyFJ29Oo_r?5`}FO66+-EohgHPq$O%my(5!F)w7s9AtjLGkxMgKB?*b%KSUcwy`TRfZEY! zOacnbpdK-U1`~?{v!g(kz&6m3q`*pkMOIKNoEbDN&7lBV?jo>#dYuZt4CD6cvsL&V zM8PUpxfPIA?3n&RgVp?5usO;Xx)<@XLbgkR=L517nG`q# zR`G+jR)Ur;gDNtxVIU_7WC`qIhqzD!;xvscfnC$ztMZFW?S?xZwB7?U@C)*U!0zb^ zYWxw5d!|>a@yklX28AJhVT8JrL4jFd@AQpo{Mw9rr{7iM_h(!=U00nyhjHcfS?c@| zj0dKFR_Ff@n;*CUo*xiEogetjGreFRpXBs7O@1#>jnT-&FE{;!Cch?#bBc*ydODvL z{~N}Q(|>F6n=)?NZm7*4%gDHKdcO|8p}=MiP*up_sFDTBTLPQ6U(n${z{t35`b0hc zXN(J{&v%uQp5EXk#loh*=qM(zefnH|{&L10(?t#VgBW*A&oJQkWZW@*lL5aH_ZAKX zCVj>TC1w_f>4jcW!qYz)@EbGkn675XU&45L`fNjfX~r|tw;S>+Fn*hU!;s&AamRE{ zBmN%7Z_}q4@f$Mkn0~^DUxxA9^ruGrMvObAOBnO3Fn*hEYs_!PxMO;WF~2I~_vy2Y z!AiCp^E)!`nEun4UtVlG2dI!e!mPx?E2O~ecmlM#LtyuGJrjO8ZHU|znA{7n+z%d* zhBq)l(DDgJfuGaMP52cVe@&lh!Y|FZcluTn{!GRl)8$S1)fkUVcQfTTXZ$|B&Xiw_ zaohB%ru=%0zo#ED<#&?Z!2xPBG_WY~f)1jDTz&?rAsGdZPFFVLuV&maeZCpL1mo7} zJIwe?8UIYzGv}9K{5jp*oZpf0*YsX~zKPA|0J*Jj)@eXa$+ z595yMuPpc#7>`Wnw&Vw`TMV=0kA;Tc8YU$cUQo^guMQH}J^iF5znljo{Pw`)4lrdY zFbY7z?+8rr1X%DNI8e_(1vQvXFe!2>FoO>GQ~(98z7;rd1FgWG&9&msW85+QjupQd z%R2O=nM%WuHAcY1&=zdYlC=>@j@$~L>eA^3+;3AE^hS%c{Zqaw&y&}uJ6 zfd*bsa%cg$oLzw#w75$V)VS#Yd#jOm`gvP^TgKhfdF}W!#CF018&uSQ22vCl1%6HM zw&Pb}JTrZR9XM5fvg3DU+&kUEp1+cD=k!hX{24}@IFuM1trZ|sZPMI4kcpU63<5o( z+(C>?^~wy60@+H;Dh%MF4K%wy-NykOR0~X{6sLDO@QX9G@lIdl!0*JkbNW*U{TspTy5U{frZT8sm=X z#?JiKj60_1JM(KX9spS($t)(YcRJXT>(2b?OijGg9bEVo823+4bm6yR+%bKQ3%@2v z^tcPZJmdc9&t3Rk7jh5bH@v`UhTj|PoEUF@H<0~HyutQ=@aA`7+&$gc2W(EP55GFdoHif+ z5XSw}Z~O3TF&>`I=F6|kxM{kHFMj~zj_IwwU{f#nf=y-j1DhJ+2QCkn`+>vmgde{w z9OWBO%(epkjF(o$+P+)ZY#~^S5T&9XBG4p~)nFJI;_b`HvHWk}C`=!vS5%4_-FK2$f@SoUn2F+faTJ#@*AE!oc~>Jq%nm)P?Z}GM<=z z0Yq(|t`yE644TUi=ikM+f4X4=e=y_1>8%m`<&2xA|Bc{JVB9i2E|TAf@yPUvk^C}D zoxIc6NAgPxLW&3;CD3w3Mg>-ZlhbcU@=Mu33I`E{FgSyOwiJMd^)#3S6nPYw9l4J;=C@!x zIb9=${}$t^>CCbGnv5r>>&Ei`)H($kO9F2_V*ssu18;r+8w_^^BdF2=1xY|0zqVjI zFSNn|%~CT9bns5^iQ{(@Z|9Zb=T=}60G%?D<=D(<#lWDzByf28%Q*g1jN7MgiRZUw z+&}$8JiiL#uIZu){K`xXywj}`_?4I%d8fxE@Y^vSnLamxUz2h7^y3Nq28_F>|485u zXWTJ8AQ4>FPfp~Y4=TZu_#cAWAIbbaj7O#?CG#usu9-AVz|p|*!^CL<0=uVAOyaB%`eKk0xs>i zX4CYdZ2nTl%hO+E^Sd)%ooIXb*QiaNSF~M3{G}8 zwtvXsS7rt^_P{N_*g{ClFRp<9J~qMb?ZQR;x{Qpy(;bWXC*bzt5!_Ou(@&N1Yccjt z|60nQ$2fI*N*TW|qF%$li0RRx>8ES>6By4<*Qw=~Wn4eqtCqiuX+_iYYqk8gOe>nU zi`Mb$F)~h{?o!VmD|O$3Ne;4)U;=Bl61xiMNIeTs8)e4y-SzxwOfz|>Kk%29W#)6d zFx|OHOo1r7yffmzdcHt_EepK%-BUmZO0JpMs3Qyy*tb{Em{dc|o-| zv~YngsF=W*B`|+_S2MpN`v zWo`U=OpiBBzt+aThVjdEo?iYakO$iN;~1Atx9#BfXPiB~w}ZbxWEn4LeFZb<>>YN{ z<;qGt0?VfhcJc=>&Yqss$?wLtVDmHqNAc+ocYtb9n9a+>1zI2SgBcod3epOk0&~DR z|1c}@Ot+uH@5VTLdT(@*!n^)k+$p5DuE&D6XFRdl;UAHNx+{7PO&21O>5lzSRA>s`CRy>8&2dGoxZA{UvB%!etus@DX@A$xOxS+TF2=I z6ZuW1r%vQgHdw{0AjZqcS5t~8lH zk!2>Y64Ug+8T{TN;6%d&N?M?8{f--$vIJI7e>RzaKI81^ZBzJPGOn3kFqPkrarX56 zQ~A}{E-+-7F)>Ux?BzFQoIPD}8h<#`<}K5!rty0-ewlu38h?N?*g&vj!CRLnus~g_ zAnmwj-82CO34yiKEvJK1OXqZc8KlHN1Ev%l2Al%(rXQFNPBelu_{A9^o}F$#gP)sm z!F2B#FmH;1+|B{oBL(uN1jL)R)8EhF7v*nQKTQC<9vqaneyp1=Jd@uBG;BK)Z2f|n zDApgqu>Sf?{s5%F2m6Nu>K#bnE3z-(75L1kBn}QtNVtomI7k@cpgy)E$EOK6nt&3$ z4K(pG&Yu2tHve-_k#}znzbA-h^kh?>?lYHPoAK-Rin;tUj76@4F$7((Q z-+>zZh6>Eki5G}n5g@x_K>Dnf^II}?HB2vE&L69F3FI|pB*P69n4x=2A^H+P`cgpp z{($uTXqs-mf?(eJrunO<`>o;+ z*Ln(yQE4P|v>;RHaC0g^=G1`9IlqeElBv0II@fCcSl!mfP65!7SBN~NslW_czya6T zzzEXW0uJgatNAUN&Nog!znVXm>3{Qdy*2!jf{^g(04wVOD~njeZ^^W{W%}eb{INRA zK_RP#-a61X0}bgzm7ka=~&Bj zyY+BStpF=q16G!?p5Kz`Q_J+l>-qH=?@hnFp5K6J(uwJ;8~F8^4jrFv2BKFSpPmk) zCmo+Y14REkKK1<@VLr{4q7yXH*i z-^{Pi*fiaKGrs}T(O=UGLEHz^7lOEZ_f5YH;`UEx-okId)cAP1=@x!{rafP$r-A4b z6Q@t#!mrP`Zu;>p{1HskUQbut%C7;MB=z3PZ^^Xk_4Kx_{Q69-jnlVpuw`AV=zh(M|ZT!iM)24@P z=Qk2x*x4!IsE8DFp#IsBnbY5I=NF&8Wjnu;oZMQ zIz1Xhe_ArV2SneTGkq_Jo9cn7 z`!Y_O{$eM;zT|B5G6;055!mqtyZ9}|S71tmz1^^j-;!x-+w|SL_`M`|wl@hlB9~*J z1=?UUd4$J~G*+47e^ch8z*uVn?Y@iC65p?RTN*3rCI>@EwOfsN*$U%z&SwZI; znt?7VY+x~CG*STXmb7LBt-)h3W71Jz1?74M(Cif}XrnM_)1?BdW4#3UNFi2zMjH%! z7_1n4Ku1VhF}NtOf;tlp3apNxiyT2=1X(B@0NOff0X~@>bdn>~P|(T-$c++n7?nU{ zE0FEcprLs1(b6**&6pN|PD5D1=mS~srvN(t0lXV{tw1yQT!b}@h`|#UkWP@lN!vPSV8N8#I6*Tx3V#c%uB=rMSwtzM$u!6F+0K_+c7{LP%3<}_u zCMYgIj`_i8#?$~lrx-M(3W@+G@X;F#pcC~wn9P|R89=c%feF-4e8|Ma0_o8?WI+c+ zLEGjWC9lcNBLuLM~W<0vqFLLY}{JqM`8yodxsXn7CX(8^r0*6GK8=H?(?kX8n3MmbQ2TLg5Xfg%ei zp@6o8Fl#VzD6)Zg5=u-g_4Nu&Ypa$!GL^78{$b=+1g(2#10_6W(D&pMS|qva2)`8n3%CkK&=D8YA0OeLF9FvE8t!3GV047(oOP7ni*eWXr$_lq7?}<~ zogRIh-<0Y3)9I6r^Xo8f-oF1he-$Is?t{~&*qg)m! zTSFsB#*u;BiUGV?)tm`5l@HmU4GLHpYX(s7npuNMM2X3oQLJ8p0c5;@5@@#5ia`!E z{pBc7X3Z!8>Vtz>A_@$S{Gc^fa-e7jjc0+p&spZkSZKu{3)axUROo8OAOmJ|K_sNX z9PTpk>10w042}#=pwMJ0bF?p9+qPVR(eV#sq2nJeMmK55bf?1fytDiYTt68h^8gnZ zrqAdRRGq%(EI+3i3;3o7P*I}91o1nQ<9P;97=R{6LAjU#9AHcWkObr4DD1||G+pK# zzpOM1XwM)=mgAo}klqm!Xq(g*FlYK4AwdaF7RYwsdb6nuIi#i=EAk6*GJ_UfsW5PF zn;v$KKZtST^p)rM<<;z%L=+f7JKR~o3%M1T1nzSp&fZ`Hl>$rx52k-S$DhVHdwRrq zeoeVPP6Y-9PSD;E$Uqn;q||0nU{zof=$}6SJijvI%;|^E^PlB>$im11D&P-HpLu~l zl(B#M`wRRfj9t@HF7n$lwoYGkk>84O%JjPz`5ny~z(=|+V08TVAGDcYff0K83X9_f zhHOXBrnvJA-qK3Up!jl}4;pn-U~^nCJ?av_b^U3Oi`X=oS1^Lc&p;>lDu8YWM@-;v zfzIxOoCq${4)Q{46gKdj8k6I92FS{C7J(^H!*+lS106bafDu%9NrLBfK>6A6!>0}b zM=k?DXWz{95&A!6MLYMj(Iqvw$zVNBAuZ zcp~T{-G~MtDzq%yE0??Kp(3qAI52zq_P~sMNHofo) zzc=H?>3gs6OENy2e*Fr+w-8)0XcHN;0=K}%=_XhCRT=3tOy&I_hhI$F?yR*}GUFDZ$J9A>1fI!3ayI1+mn3_1JuU{h| zK3(S;za(g_sQWd3O~&8T3$O88GESSm`WnCF^c5HQg+a#)b4L{>*ZhvwGQC1AjkW=cd89|3ZGng^iD6lCga)Jv3 zCy*;Zdx1gahl>Im_~a3G1ulW#9Md;m=htKGn*QKAzlX?DPRKYR*r|>uKpV!EO}Dwh zufaHFdg=}Sg-imi)46Z*8#5l7ZhwDfmdf{#U-HfNU>)zq#WMtgD-TW@UBqQV2>4Eq7H6?epI1Fn9Xo zd;AWJHq)=%MJ&he1noaxo|?T!!mwHX<^rsqE5H)K3Cef}eU z4Ulh6KH_&r^^HJ^I6IGHJ7_qT1Jur*GTrVmf0V!l22WlF@QFi;psl(WrmuL+uO<3| z!JQYpOo>5*NkoAiTo}BV{`@h2lvo3wA`2+{f#M%jPAjk~unTle4}Ze1qB0+}e;d?v z0Hp&34gqk=1Q`!L)sk6(U7&$)`ur#Sri}g5uRh@~W1KPF=PAD-_GyYWl!wlJctW1oI+}sSH(Mu&aUKRy*$6E|;ysQfBj*X0Nyr8Xd z-xxv1rL#M>PoMjYUzYLq^xe<+WfdUYSSU{_#MU>5kt3u=QfIPPHtwGtpz5VIqw3KEz;{ogx& zL&n+D4c_xBGR~bI^qyad526B8gL_L0ES%o-p8qG~h3PdP_){1!On>-+-;%L$y7Wi> zD8_};%Rcg(F`k>g_9MS4IGESWC|C!&6antm+&-^Zov$mi9%&*MI*fah8 z7yf3IM$jb)j>e!?3Zn*-iQ}rPtFBgo8VEMh3S3!AT#l+{Ow-?e<(HYh=_`K& zfB4lH7frwOhhLd-|FpmSdW?Ig>;L7KXIwVj|1ZCxsi z(?5P;cE~+LQ$QjT(@XyGOEB)A-usW=jeRdK$c!Z*3GwM4|M5#QE}PE%pI?jRB!j^0 z>6ZWb)dj&8gAR-Voq+@vF8t5Gm~q-vQ2}+v)~&Vz3mHYAMGtIJ4|Kf4oavt!1q|7G zmb3^6^iS7h67XQ$w7rN)U=kzSuiZ@oj%?FEgo_w5Hf^_I5%6GS+_$}zRe+C?=ejZz zXFU`6U@ea6^VtNf7#B^y#U>D=_-uQ#fWQSVNJR{uTY}uws=y8kc5Z=&>EY}G7WJSL zpad>(DexA8`c0q{bRE}#&JY7ni79ZIF|7d2n{k^lEdkN2;BG1;mO;DQ7BFTBYzMcR zz%v*p7(k=w5?KnI0u7**OY9m<9s-*{hr6&legVr}VE}bQAcnJ=F=f;%uq&`@GWnP@ z<$zfFj0x6^4IrD?HJKyKnJYkox(rF+>a+<|8!%ciw1MSy8B)RR@>VD>15GfG3(j6D=K#W=s=6I=~%w1<)p%37`ef z3e2E0J{c6)HJN9Cntwcy85Ks5Up1h9W!Ggm#|S#lh8cW=Bs5gPfj@mJr+`X*J11yr z?g}Gvi;SBaWbFk;Rs|N&ZHjNWK&Pf@GT(q%0_r3qq7l>`V|V<65;;FW-sLu9`U0X^ z&6qyGnzW$E0d4GpL=Go6=r}A;W&k%a6hQNmEFhr=pynB<`3gE=M*!l+Cn# z1F#zf-a=jY0_;NYya{;K6xal~q2N_kU?CO-W~iZYpygJeRv)A-3JO6DrUoXE&q3t` zyFTL-Cgh+2nGFh@4kjfQGo~3#kR4A8Kyd;(5sMvkkA(uKz!5f3sIFj|F3Tk#Q@@JI zkt3UrhnWQo7?>DYm{~y8FldDvw*rem19X2TXw*QL;Q^B)A9Ty30=wf6&5T}SptGsiHJDDY2rT6Q`RN7=a%6)-&JlDV70A&S zSP-H002*2fkOG$7aRKPms(J-hfeqlE3m4dh(4>zXj-X?z(8KWqMi}TayaBrtIT&An zgAtU;!3SzWA_?I!&=h$+s4M{;hXqOwGa%XI2h3+3tjInCnFA^&8(7VmCxBYKpfjyN zzMQ~{@Z}6vP?`W=Z2+;3Qvlg7paZc`{8G=Z&$xmWxo`kkqsy>_6+KOWPRU{bmCms6 z1G$zN9(>?31$3t(E2K2p0CM3DR#1l-G<3)AxPdiGU)UA0G+nU z?zn?J%Z%v*NXrNIEJsb)eJr4oNt1cUbbo#U(bOLxY0yDk?2bR!vlIlD3rK+y(*`#1 z+Awxa<`ry;0t)P)fz1OTQw7YKv6|jHuMsqXmn#KI(mO!MY_e-G9bi}FQebzyF#ROI zfIQ=k=`Z;OH2JQuE3tw)#w;L}7p5x+2#7Ntm~J5;pr~_#4SY=v=(7GbRRD@|@l(AfSdTV3S-%ph+c z00)rch3OrF;DA{nD4@xIgyaIgbrw+8WgT<)BW;= z#HTw635a?efTuC=aF_y*BZI(Ra5_7Jl+IW{J1yD4hekotKPZ-1AvGwt@o{4Md?5jO z#(mTG3kgWogVHu=6Bwju03W6TItL1(6>5*bTRupW;sQ9&K?a?e93b^SXt4_D=x!7p zjtm0(!7jVQ3OR)tI(P)CD?lSEFF<+b1#6ZY0JgXsgS5|0_v4^Wl^UBAty07`W~ShJKk1b(wBf{GOIxm2J7LqX$k0w=-kq#jTj zX9Pt@2ix?6!UD?m6WA2k6$H(hXMjqh1#F=Es#q1cz~!$7(*ia{J_UA1P+nnF01tk# z>oaZvn*qvEx(plGz!?yfhE}jC37RwS02u>1;*LuIHdw{(2+FaG1VZ zT!3f#Ul9S0dQi~;J;3kfHVc5toGT#rgN~?T*I>H9uEYeY2!BlP z5EU?yz~%1g8^i^~!6Bx=X2!GtWCxEK(+rSfSFnRl)ns>^0d_9vBs?yGX&?olI~G9- zAi)4Fl-aXDgEo#6*s>fM1m5t0oO1-^9Pk>h1K^|m!0oji?B>kiA`^6c94H!puxD|A zuD@Z|WWE4#4Kt{j#ROWK0Ul%HRN{b~J_1^~#i`4nzzbTz3LdEdCl^Q1!Ex-4p!=7& zKxZYg>oS}H+Xoul)?i|AWCD#wLBb3;&NVsJX8$p8wxKkP~z(uxA$Ro>u=1$6cbXldUBwk(17d>}{v z00ozfH0V$|Na?_?!E^(;D0(pcxwwEXQv=6zF$sZ>jN7MkND5R*u07Kt;HV6}b&D5t zA~CDu3x+I#9n)t^3P>?sJ~Ms0q`)7=rJRbOHOG#Oir^))ObSe-vW zN}#x95xB^#zzND29H3%H05l%LpvcAK01iCRK=lsp=?BFGg@r*I-ym3l6I7vb32d64 zkSron&!xZ#J`5RDkiZPP!3aK1gVPbzh2jM70^t(a$*I7p%kY6wkw<~k5j;BzDS9~d z8C#f?*m;>i(hW?Cyr5~3AB>6u3Y?Cx-V&(b?&4J71b2JDN%o?D>M~4V zVgcPD!KumI!DP-ngGqrC-0%a%2ODT;j44ZjOW+?H=;n1!ea0mq8<`b2L6bIXKzG`4 zYA`Ke0-qtw2|Cq;)$zfK83No&+ybiwLDp^osfViDz+}d>19biguPZNuo&u+WfI0I4 zkbR)tKZx>?(-FM!M}gCE2NSq_a&0CFKG=wM@rD;1a(xCGXKJ$wP= zVHQx@&}2RV3Q$mBb%3^qfdUiKWdVgX^K=D80Y&K@pl%d6DwqYn@j>UlKvC*=VS0?B zfRywVkn`BUQ`9FQ&H)(+a?Y~p6G34DD@H))kt%>f780-zK;8izstgTSUQp_N0h%X= z9H-6c2$8(Q2fF@+Q=jn-$ld%u4K_3llgE9p^GyF-(Bw#s+3^r1l{%0cZH4uM@zbq=5&3^O>vfyNra>R3SI7@Ycy zAxcQ=J3wl583I66z9zGWA|I%1f$BgM0Vzm2rn9RE2-Qa@@`D-==FAxiphI&xbr}*s z6%wZczd3V(0;ePBIC)U!2OG#}#ZUuMy&Tj;tpG(0cu)saCV~nkhAe27Z$Vi<0*Zf7 zbT%-WF?E0<6g1Su2`a7?IOV{bR+!*fivn91ML0$vRnt}=~M~*CT`2w|R z3nTK%8L+|=;L$SpO=938dIO^w(+-f;j8+V1Ky@BFI6EC+1m|v0DmlQIC9rJz1zurM zX->xrpfl}3$Ap1WD9OhpDsqqbpqsEj^A#FQJ_<~riKUQiN2Zc2$Me(Ys|yr^ zZp_dSa2G?G6=2k4l2GJTU<6H{6=(>!GWJd1rXirqxN!Ot4S__)J=5JZ1)Ldergv!y z$TL2gzDiR-hVlIL%@k-xxLv4Ww z#@6Xw+5&Qn7pAY&7HDI%nQovXpagO-R3CT>4Bf#6Is#ryXHHDtqazT(cxJk&u7Cx{ z>Sc`rj=Tbkr-$ka)W+^V)*|4@BCuIV0kYr;lx`hg-f97Dx(5q_0tr;XFgxy>-V7JL z0Zp;Yj{D~{fQIV9YF{vbE`6K82)eG2)p7r^>H2yC#*B-nC+i7lGJShJy-!cThVk$8 z(|Q61OfPRu|Enip%D8yCvA%#d(~>vS6Z8dC1lu9EZGcwNfi9ldH+}k4eE~DZ#naE| z3+ON%d_Db_zJM81JKuC80|7(EztdAd)c)zy37f(NDAfUt4`+EBy0|7rq#>?Bi zjRayCL2LL{8Vks?AMI!raO~PK{k*Y&BuMa;u|Ow?E;SJVpXIsUM4*##%XCXqfy4Zr z>`DRxznOFy0>P(vPmi?`kQE0VR{~1cHxTnq42~D(PPaD`aB+Bo5Cr$?ITYBzcfx{B z>|uAj1D%0lmQY{^ErS4^Z_K8_#G=Hnz@ov#;wErw`(ZNyGbX10Pp1F25SYQ(Iem(y zz;njX?KM^cW{li#IH4P^SRAHbv=k7T{=i0nefkY+0Zrapf>0^Yj&&qSO&ft%Oj|Zj z7qt~IvS3z#tP_}MYN++M(t`Qp!x?W^qtRKUS|(LvxiC?sb&3aBtO{hWTl zQNW0)?dS9_jsix^^M7`1*K`svVPsrCJ2BTv zhKy^bS9l9V3Ljtt7j|q4912{Zq{1TbZTb^$0bj;t(G)BAh`1eiMbr!VjkP-lEJ{e+J|Irn!aCT7U` zs|C}&dFBQbN)& zgAq0)#h}2V$vgo(DZx11@dvxG>RRw+T`Z0Zz{_;@Gcz$mCdno+f^VN;0(CYnOh30q zRL&2vrU-nj4(N21i=Yqz?ZH!E6PU>c@`eneM&kj^0fP>!%2r|m1s3S2Xk7*g(6s`Z z%mUNp{RQ+T6ckuMNs9&CP64fLo5u_~0^eqOn!i8*FX(z2aL>eX0%O*6U4H?oFi>+C zyrzpqm!ZH>5Hu>l3~I}RE|XddIyWEOAOWdj1Zh$LsRiAf0#Ypu8cYX=IXJLD^IePr z6F`SwOg|SOpu#v|`sV-vHDTyXl9U1)IJ^}&1fFl#2^2WR$n@yt^dCV2rcAG2PS+0> zFk)(dH9cUFpvd;}V1dnyOb1?0*9jFcWxMpUL%@+|`aTgsJ*EdQr@s>sl$pLcRNyP) zlIbhM1PmD$Prn@|AgS^JG~~pl$*cg1ZqQmAP(ZMQ*HMA4MqpN85x6*AB3wXI?<43E zbI@8U@P0uNP`pDbAAZNJzgq+ZCWvy!Gctl!iWy`pu}r@hE+8@ef3y(m^qz15J*H-! z>3hNjiWq-x*N+egW@P%wH=Wl*pq}#p8zU=7&xPqcQ3AeRqXLIxy&2OSMv(oSW=u0c0!&s66BrdZ z6gV`Q3+zBA3351snrI+rfR46&$qX{~3`)BXy!ntrg9&tZ&;^hgptB`G^aV!nU=;(% zFv#2`$O+(HGN=>X1a{&ZMkQ{RdItp*N4@~vtN?c90?=f6y%|#v6DSzXm^wgwb}NPk z5T8Sn`2nLH(+`k?+18pfgAcC+P3nQ13!3GGICl#Z@>~SWxscwi0tcv9D5L;#FR1%E z19Y?yhbHrZ>1$#Hlo`KGzYrrJ!;ZY5s-6Sf-vtehuqkkWia&u(+{go3o1%CiG9swHZ1FbA)a)5MXpo680K^Jm>JG=^@aX!!h7pO17s=xv7g21v3 zrzNOT=2Bn>Wf~5FiQvAMfC8%{sMH1>kpmj>ge*s9*JTg@Td2sc!0D)wH7!L_rk)El z1JCK$!U#>YoQ|MnE)1X+Fr@#$30nCHDsY*>Jy3=$1x`>S87}ewEaH$26Hs6W9a_l= zO8cPh2DrDx90BT3>oSCZXWOTM+VGsZ455yUh4oep;9)FIU4|g=*0!T|s>kR^;JU0QD3>S8{QIu6M~&1mD@rm<=8&7McD!PC&7~j}c_1AGns`1YOk# z5&#MLDDY${@mMqVfCleD9X1beZ;=xe9Ub5$(VWmuC6~ZEW+irTZ;ss+w7!PNk-dHb>^N zqKpz&1vYMwp&&nix?rI3u^*776r7+22iVCBASbhc3RZB>?gFC{n;Fv$M)3Lv1x`>g zGy&AxuQy}*14<-pW}wW>>BwZo@Bzf7Cc}CPdanzGp<3;)L;uhchzkG znFt!E0r?EHzZIOPIW?JgFhQ#x$Vd>lPEmk$&(AP{!w<3lf>W2_1QY1CRPa#20VZ>1 z(8!bH4e$UE=-w+(mQny;RnDo;cn8BdV9$X%av;xvMoK`Q138V|is1u@&#A$5gUO8P z1&GbM)|?qU0tA}K2YZwW>`@=k5FX0(IumI49yEvtnyLmFzX3W{wE{9$1@8K8V21h> z>__OzQOJ5w4Nk`$%vqpYI2AyHM>iO=AjKCbd_cND(RqM5ONC*&&jS%z4iV7i5hj6c z(+eMn2v0Xm5;(~8VdeBMNdo#zzgA9HO%`~>cJCLcPM*%2A|T7SWV&{WfCA&?={_j} zDoo8n(~D9B{1{(NKbRum%-Fk~HC3RIk#W-W_A~)g##zhvrU@`H-k!cML%^8v@$^p_ z0+ShcPjAZf+Eu|X91tLt`kd`5+?B-Bm0JqCP_bz}M zNsJ0?paEr2$pX66ly|y8iGUR2vgzI>0y4~e0*%wNN(AETb)ZEohocE}9})*>5r~2U zxS^l|X${yZa5y%A_90opngtf%Dj#egxIPzvRQ@i=BUqq{6x3#LFk|vi03X%t$Wo-l z<7 z!ic7j!RaJ;tvtx3;Kn~Fm2-e9DFx73ap21O9hZU$xZ%+OZv8WHgTm8BkrPCjC<=lC z1a#gYW44kIXj}&ri=c@_q3H@`0vbGm&{dP1(u$nZL&^k{>OmE?BC7%ixRDLA4>W+u z=Ez(En(P)>$PMz)6qMmDZjg_bz^Yje(CRr*YmWicxYA&nzzAt@fvQbV3k=j|+y-id zfYZVz5DQ!hf?H`j7zIIdT^x>}S_YCD_Dp|RCLmhBgVBuX07x;U9s)NUK;w-Z;1U>m z-56-|0?0MG3>QEl4sP6?U^HjG0n!Lr>BRvW(gw|mfy3+pILts=z!MuF?{a{vLge5D zsjO#)toQ<*I^g($5!_P*-ER#E7x1JD56FiM3j7dC02%}!@AiS3a$xUvfmmSgf}3}s z>kvWVt-uOR`cs&Y6DY_sP{NwPWL6Ja0mcEJ|3IYZH5fW{8CEd4^0I1sSNU1fBT@8j}Fk1EA(A=$L;{jt1ptaBGPB&Gc!t0#ihsK%4kj3>jFI*xD4> zoa_Uo1)8TD)(OaqpW_A}j0zbFbbR=sNx<;{gTSBZ>2(6?&^-;W8Bb5IsTWX_y#U^% z#sJOh080%xc1uNP3~01xX)3oM)du3o?ryf2H%0km6Gpn-q7NrQk7 zOstUBO9OMZ5~%e8zAOS%`7tPQL%UI+ zGjBT>L6>zq3V^1ls-xHCB%L8TlwWZ{7lk0Vny9}6oBBdBxC<0#TLjhc*gJv^8OYV3PMJV`)P9@M8#e$3sptZ7~ znh>Lc?FX>a|a{14aW>}o&wlup!IPRnCcZcKog@c zK(2nll;y}NP^AE>UM7G_Hi2f)I0a}iHc0vdQ{Jy}XRL==g0~&}>1O)+u8PfyMI0>&gvP)Ua zm?nTkS-^b{4$%Cun4_$e|?Mx9^!yMEo;Q_V7tr=7mIGh?98X7+QueUd6k^vnC22KYY z3LFaTu)TVqHoHca5@^8_E2sm&;b@Sh09rkA1l*!EP~vpu1+NMPH`PGZF%zu%gR~rp zNlo>j1_UcKl30-3N9Q4+}Y1*Z&NHU&`3fUY5grYB@g@SO4wq=_Gv z2*Jh2113;6O@rwMlOi(@+w?#~5fL+Xqy!AtdkmC-1z~zwK}8RWURJPPHdc^c?&(L{ z1XP(8Fp*KtaA`0xK+72xP>}@c7l9g);Pk}+?NbnwGC&CfoNGbl3=2}w*$PyF+kr2b zvIMSx3ReydrWZ`m0v1%DFbY%=8~wz@emzpb2X>GG2k06Zc+qhEbqDB*gj`5Wf(jdO zF(Ysles32CD6~bL)(guc%1qw2N*JN<$G8j3sK{gwL zrldi2HV1f14~ITuj1ng=3n*+fm?F%W5)@!PSx|Wd8p#ChLjMlVyg7=TkhTKo40_Pq z1UG1;lmk>7LPjc;K*s1Y1VctKl_5OPn5Gq|@Bq#2DWF(gqQnWV=r}Z(3P3}o;LD-F zJ#R>77-4k_BUmZ;!XnVR3kF99MNZHs=0uPxLL9@LIxQrpe6h`P_v{)R2)f7c+ZUMEbIIS5$nV^ArTDO1_ z$BZ*g0*>4Qi??fa3oK`1d(zn|;K)DSTua1|v2#0TpMWwW)0WQZdi?^aj5oJW=@+nN zWcu8({qh8XQbwkS6Q+Aj5_rbe)7J^w8D}!ra2zO$G|;NkQ;lLdMh z8>eSZ5r`D+1#fGXQDg(1BRcak{{H#>>+MX9&oF?z=ObA&>@I z*fDd4fHLgX(FfCy%n(pwd@%j_3;|0v(7HQ;JJZ!>3Is5Im|i|pK!x$a^f@yHG8i?c zv(6IeXS^_d>MQ{x_J53pjz<{Wr(c{UpupHS{nIRgB9IHyW(%}~==ZY)ia0bB1Ql2m zgqKg}nl7L}ea0LCU6}Ew=Lp1!o#9r3FA!k_jXfx`J3d%3-F~jXZ}oQYNeVnltl(n` z*)*6qlz1H>+o@p_puAa*7q%zP6JTLxdaz=9`a*$KOyZ!E=N)B0`zhO)r65Pqf$s5{ zuCYWwiP2!X_Y#2;#$($LED>1C$arjf-ZFvNjEue0xmF0&aIAst>+D+rp4s2CLI8Aq z%)J!?QXJs(;#dV4j?qk~ogSmbBbnBM_N@yrjh;8r)iXn-$Xxd30hg1pcLR3w0t zkK^OB%>s@*kTo%&aV_v+jY^=sqa0bFRV`E2f>*ViSSz5(wC(Bi&uay28UIcv~}QN$2sc+w3$9Vn0|7dfGN|`V^em6a@zIR;kva3|<)MFlp;6CayE9n43}p#8cLO3a|m#|+bN>=58{5>Q|h zSOGqENkdT#v{_jPG;zVI!4v@DhHxv2C%F-Nl6~kyax@>uqw!b zoA<1s8AJtE(A1=YJg9lks?T_X3G7(V#YNCs5LAV7fGR=I`AVA17nl^~K!;F)TZ~|n zw6WTM@=Jq&qaLV>%?hq!nL%ECF+F#$KpZ2}^j*6I(h2!McYuBH1LOl;tUh32;#LqP zDjM_$*c019yRYmgUtf6)8FhBa3>TUW&`Y#Gs_zU9L=!$L<2JL&Y;BNc!woR zkpq-RV-(pyLk$Wnj!(c@9W*V#l=*T zrNpkl1R7j+tSJ;w2Ib%lOpxJ2P_I4pru%HpfLwfK>|9of)&)8hd2jxni%*-Fp#C-0a(yweIQTDf#w<=85BXq z2B>UOU=g^(sKCez=3ijSQeaUKQxFk|1BLGmQ26pPI)j>S;E5XW@(DJOqc1Rl$SX`B zKftDB7(rY8m~~EXb)PiV_NZGK%aVc7c+RBC`VD^oNH9#OnFM8%02I2%35YM~osTsJ{fML)byf zWEeol)qxjKgATBB$X4JI_{jlsMGK=67kIv(%Nnw@99)Bdn(-Z=RL2UMp%VDRp#-@m z7FDGqXmK^8sscpOEH;pNQ!vZ}x7cK-&pslc1lc#ms=+ja(F`=2#^K0V23qm}TCgOb zzzMCs6co8YepFEeHCH6E6a*Cbp)wMR{0hvDJm8}NWt1dA$$}X)E)U-P#q7wE1sXYF zP+$ff@Bm4qitM18JWG*Tfz6RId%De00iJrvEJxNXN2W3b?kq)a9!74^c?6&%2?Z5+ zpb-f=b`b1UW>BC&VsZ)SURG8OrUjtu>%oTwDu6UV;&KNg^6qZXJp-WVkOX;S3!@^x z0v9COHZUsjD5@|ka4K>@*LW~$FzG0;WGR6TA5#RaVS_}U^z_$91ymWCrwbkvFszq^ zj6^zeX7e#IGJua5xxtvF#0zQ{Lbg}1DS%R{z;`wUUeFnh`iyrNm3UowK^ND9*Kx3c z{P+N_XUg1Z0ty&cgVdVxL4`ar}0NQiG3R*`6N(XG<1_Zd-0a+XY+B^t8 zP7NHR;5D}b3ewOB6Ht@}g@FjjQJ{ztP>_Pka43S>-H@o{QR0P1r4)n(+93{RDM*{u zGoi;N2dInzosLDW&cP(TfoFh8J25vvB%3q~c-4mDN{rZ0!0jHk)s8qRR!9s22NwFpf%5;5Z4KUDbU0#dUkV*X0J5LFyv$2CTu}|N2 zNeF5KMQ+|z%Y6_96anl5opz@2f|^n`N) zMeNO>7Pvsx^!jar_B=}=4O$il$7AodtDG0u$H=te{q!dn1WcI@z27cb%6z3{StF!v15`r8?dS&lQ?r{`W4P+)rXb$b700b8a+@28);EO3`` z`SiX=0_zx?rrSOi@M7FGz2mV!A;%0Z1z`mafuqwIp9qLa&ENu6hAS8ymEC#4H3vIr zhFgI{pnJOE69Eav71O<*2&gkQPM>g1Kt*ze5O|j&Bl03hPHx8!3#YGtB4ES1l54HN zlIb6w2v{>VPS<%VV8FOydhAnyM8><*_dgYoV!Si`)>DBQjCZF`ydfY9qMivDGp?L| z^qGJaac>*iwjst!L zae*n*t6vD5Wn4Ag^QC|iW7G7UmjWK@Q17rgDue5BcE=5Dpl!FHP01Vr5XlW}juO*v zzZB45Ts@uTm4K6Aw-Bg~0afo1Wh z%IW#<1eBQmb57s*K){f3%Jie}1ne0XO=o>CV8OU}y4`z$K*qb%C%hL>WV}25!b5?* zgEj5kfux-~((OnQ(6C6o{0l8?Ky!9`HW~sku2~W zrNBf`gNWUVK|z7lk>82|bmFss6@!BUxXH!=It4}ov^5fRRxc~4wQK>Zdj-G^EXWop zRt+W|f%D+LyFMe*rB$FgV})#14JHpo(D}@e!6goX^Wag25G4+9b;W^b3k87d^BW9V zN*oG|3e2FnBv#ORZzWCz=-M*S0vJVhPyL{7dnc8M#5Rat9rp#FR_AGUV&OS&@p6IP>WQ7)zJaeiviWXtf1qUH9$KE z!7K}N<_6Hg&fxuntf0*XGeBG>Go~q^BcQ>pa5K<>SgiHnwX~3Cz69ua>K6=Ij-aX) zbXUze@DXfF7?n66Z5^=hnL!(jST&d;K#SH_fQ$t#e}$L;8oLL1RbUo3sJXEPq!Zk8 zy1|eIYS4l<6@vEcF(`nIV&Yb)2gln2Mz~=M7_*ePxD`QdwH?s5Iu~ft+)*P7)U;#O zXWWCO5wn949K)dPW+0bf;u8xS&pFDqIw5Vs(8Vv z1fP8dS;%Y7d;ye)Zh)O~fip`1JYB@9&-j2-iJ2FCkoOHvMUZD0%$PoaT)=D2jN~p3 zGbXURIFQ`6|8s|cqXldp4wQZ_FlN~+KpcsfGD31I)SW*;8X1u^f+{0LaJTfu^sX-g za`n)y2m(#u#P|edfgUI=>M}e4_ewyAtARS2(DiQ2Pz_(OX!yVg%DbA(FBrkzglzEV z61WKNlKuhhW`J*IU`~f7_aBT(+|X7iXw5Xpi=b90cuX0z{R})zsKGP=v~e1|=z$e- z?wI2T&~nK}a6hwwS&0W+jPQVp5s>3S5%dGJ8i9Fw!#WAk=>k_c`KGV@Dj=ga0bD{k zf>w92L(VY)MU5k9%@-?3)R9SnQDF9T`ELT^^|}nRm_faDR!7jBX6VB=-v;o(+bbL9o~0u^}8nKwY}1}#fQwi~qQnALFu zs3i}w8*K0nW+fKT3Tx8 zT*Jk!!0F1%rNFAdXU=?s8A4wG(Y)r&51{l5W=A_v{DJRO1-Y*tbi^*Rqn!e)Bj|Eb zkPt|OPYN{L0CmCzW>CI-zzo_LejOao4J^o8N`$#V^~48ekii;EKbU#=xE&Q0SwNuz zat^OK^90brJG|!19V}4m!Pg{#qNbj)kQbC5K!=sFI!<5#6`+g)v%%prg9SOtg}}ix z3v@XZsH|WC?G(Wbo)s*}!2{Bx$-IC?kr#9>EUV)U7Ra_-@V*F8o0L@meBv0p0*gQ| zxY&I$UGRs1X#E2gbLI~$plvFkA{Z2+4Xlt5{Q(kXb!=b-osA4WBL|$eI#@wr)BqY6 zfX8nK$|**msM2NVVg(uK2x>}WOKUS&!Ojl_Uj@m+BMMGz6Ic~_K}$(k!5619Du4>K z4=k_*fqDiQ$QICnT?!!6e}ID%qK!*n*>t_1-~_)3d_owjBjk)P%rHEF;ciXl9UwRG zLJ~YXIKhL41V9P?0w`d3&6!WIGC6?CSG0fu9i_zTcmWVE3?s4`|{NSO!jR4Q$By z9Hd^Cp$U9I600NRFeXe_O<)7Z7c{y+dNi3k*dWP`6SUrz6%<4Z*dS?V2AcwFJxE{$ z8#Eux0CkZ-`GB#Iml2!`L5DTLav={W7lL|Gtd1astd5{g5*PTcZUuIM8{n*XfDJh- z3WBrZ2GD6stQt%^*m!tABa)n;GzUt=C)k)k6`SJ;Bx^w3Jy+4%3BuipVG9Qn+dH?V-M03VVFwSpC7#Q|th2Rf9I)e&?yB$vQV zu;~|2ieWCW=?7RK#qbI6IUgWzGQ9!SuN>gve#kYpjtZbdE?FH9fXJz-MdU9QOHcnWkRHs~-;Q22w+5Ce5mAdRRN4&+jT6NMuX`EhY|~@kH-M2yWentobC9715^xjFlB)@x(Q4L1!DszB&WY& z%E||AkYLqkY~e(XNzi=@AR`($K@&}^jvbt!-BqlP4d96sW`Vn)sS{RB<_=C!*n{`E z3xI4n0dLu}fChG1LCt&6I#VXl_7_%7<{g~V1t)TgOwY6vk^!}?94ByrZkD(|l}j*# zansad!Fx=9c1;gv6uczJ3R?HW0y-FnT>w0PKi!E*P=&E|dODM!CC7cx!U%qW#nV?X z3D&yp*bH9O0zRq(bXN>$b*$rtxeWr29FWBhkd7|sFtnN1S_B+fA)=tp8F&e!66i#Z z9n-U!1x*+iPhY?+sL6Eo&GggEf;x;FrvHG5>aqxGGflWVJ&Hxph-v1v>3uAMpmh&N zSp;>M7JZ!lnME*@=^xwlI95T6>65qwy&2a}zsDu0&Zsn$otH<@-TWUPXxYvK(DDIpZt%`AfUyvBA^8ZRt$QelC6Qoj8O}Ad6ouv{siQ81_f4bkl%iTdo3o@FNz9^*cg~G zS%8Nez*m$*hDBVE%Ms8N5qJj|s0YppE|E1r3*H63vrQMUlN8zhI9yN$e9Q=Aq+l=8 zk(tvcL<)NG&H>#Czu-xefTM!I_36(e1y$L=+rPkv1Bp%7h!PYOea?^#9^(NU4{B;# zWC`?c_lpvYWMb<5wtZ!c;ABRoOW&s3#R;}D?fE?YXq@0zkOh0<1(iYc!+61Vv2S16 z1svsJT}?Ldiv1%qr`IJ2s))__+K#kI5m`=}yMsxA%@I_fT>CoxUV`9%{u8Ta2!Oiu zil7H9a*|u#|;Y;5FCud#Qp` z8GlSKNfXr71#dV2bp#nSKsP2iG6)>tQ(}NkR)Gdyz>5zVz()seKbIzG$jEqQT85w! zQ_G_1Dj9;d9KYW+2{@VyJeZ!DA!yDxclyE%L1o6-(~o8ddWcPg9yremnyCcEG3W^J zt<#k=1vMBaP4~$ZlxAGMJtI?4fsyI-f$3dYf*Tm;Pq)q%v=%$V06UBud_Dze-#x3q z^65Ru>z@s7@pjZ`HKAkmJP*0`@G!h3|js%+cyutt)6!{?t zJzH`5#9Tpz>G`>W5{%2IcjOALV4ODHFi)_9v3L5eJi!f&lc#6r3re#+TMRm~e|t~9 zpgkkwwCVO7!s5K3nQvBbN@N$9G5teHLL)vW)Mi>lO-XFwUADRw!t|IBR-) zp32&7)fmrCXDJiRV_d(zxJ*!pk#XhpwsOJsjOV7CRS3p0uAKh5O3;FF#`JR)g2s%E z(<7<{4Hz4zcUB8(FrJ?tQz>|g@%(i4D!~-SbJKgO2xzzf(lB+pdySwe~z2<)BCTq~%;xOloot>9h8Pt)1z1T7eUPPeZU^k-Z)y}wRS zS^}P}SV1S1f<_sjIuF$esxo!*O@CP@=*PHbx<$R9FX%>(dO=Ud-su~_Gyh%`rv1NKxlb|-^rRi-=f^v+fr!Q?11RZN8 z*(_+wcw~Bbv!D*+k?AeXg6Z-N?4Z-6K?_tEKr8S-CrcZErga+Fl>`HMS*Hg+k`UQ0 z*8(a|e|()@*(PYlH22%|jctNPOv}Daf7&LP%Jlrsbf0#?QpIh8(CJ&yam)&WptC(e zwILh$P6dZ-fo;sEns$dM-9CL&hhQk< zk?9C!u7(tCO&`CZIrh9b?&eA;10J@e2bUwa7mI8~wKX%Xr z8fdbe9aNHnrrtm;LV{3u9s{~H2)tqzG+6*%OTq*?c9>b0AwY@AQ5H1% z%;YHS4m!sURC$9A!{UI{@I0*Cps{LD4Xevw;K-cK$I8se$OKwa!r;VU?hn2q4yG2g zQW`XWq5xVz555{$kqJ~EFoOg&K!?vTJHBAZQe*-z1%ck-3tlxMu!|3Lhg8MXS`H<~ zf~gBQq*XzOI$^ts4z!~i)U;~=9n3Uc?vJSWbl*Nf4#p4D!}JtoO zTsi$>pP&unjOl#+f}q)9vwlHK#>VLd{el*Z%cigF7d*$h3^Wok-EW#;B;(5IAGXVa zROcU%1r|{Yl#x>L9Ckr|-ZkoPevY<0#@AM~=1w9x~PuHCyD8bk=-EE3sBIC;Gj(l=b z)8h+-lo)4BU+5&M!Pq#RZz|Z{hkSBk)8nQJN--@GnttCy=se@1>0Z+WWAVCkDcGI7 zONBtEB0T_w5O>)0_f@iDwtU%2%#NI%EDny0j@OwKm_b($@-j+sD}b(c1g$P&5NH5x z8fMaEFaVvu$)wA`N^%fE)8P_^$7*C92=1NKNdhGIY5^;!#E6}%?qIWIUQFZD;CIdT!74< z0pox)Pk?biX8?nnFpeicLMhA)pd(j6*}(AtNC3v)0poylZ-8;YM+bwgegPK(r$xsH z$ov~H4oLHbh0|r13ia^tf;$unOiuLz3#KoaE9lO+a{7z8f=Y~y)4AsfTCq2>E3oM^ zI!qUwCnz#KVVJ*`cfg z4MNjD%oDU=Trgc>x2%&mcqss=gTMmbh$djp!~m+Rr&sQlRp(gA1zu*XJbmMQL2=Mj z`oZ0@VvGy6Kb$XE!^pUKdfY<6R;J00cZHw=W9#&NAYSYATPp-r8QZ3< z6x3#Ho367`P(t{y2xyW^lNog8kwlg;BWRE9g^hxij7`%gt`yYNYXS`rfF@u;XD_m9 zGADqhD>4L*fVwUY*&t4U0xRg4K22r@fq&BzHwku6FJ2{RXtxN|Lf&aa13Y?%p8PM@fpaXaWAg9ejPoLd8{n08xCD5XV)q?VjOQ)-@7F17A1$`M$ zPY+ou=*4(?`uw$Exr=KBWf@zhzh5hOopIxIL47$n#*NDj^yNS&n>=4HXv)|*U3PZI5QMeFX6f!QHF1J0z>pRd9!@L+oOE({< z|Di*`(ZKQS2hc#o^n<$uLl{?1PrL}O><;b&x1Sr23u-X_ot}77@F?To=>;diH3t?Y ze;`W!;8F4$qU1LoCBGm_e&JE_6QblN9wk2@N`Bx`@*Se&J02z9AWFXBQSud{ z)L=X^J^iwv9%K9TX_p19Stf7@yq$javY;K~#_1wg1a%qrZg;vOD8R_LaeDAoK^4Zm z(<`nDmN0Id{`RV%8spySBG&{rGj5uG;+mizF=*9GMncTYFJE+}hY3TaacAdLZm zde5NB2h?CxU<8dFfrmdi%orIISf}@27o5nrdAi~a!CI!{i>EKRAtptBu8mq@XL zwwH05F@gFJ0@Kgm6qKl!U~*7k2W43f=%NW;P_vaCbf<`l0y}6qvI68t1yCEzQ6Wp< zz94A+n-SDz*T_-=Q4*lu$TQIO8Uk5LTnZeZTUt3mTi8Ljw1U=N!ER{<9m&h2qreWi zrIj6Y2#u+;0z0Vn4!Wild}EG{0=oj2qkN$igA>Sl*v@Yk1$J<+okM{ebW7`jdxE0% zpuU0!XzfXW0z0Te0TpEm&@*V*^%+x;*M&otH$u&ULpcw~CvZ9&oc1B~EX-`T+< z380%PPQVUw1l^bc9%ukxD+5_v!32&NP|F51cOuZr1#;FClywjw)w&E17(taQ*zY$O z&6!_-Oa`qn<`DP}s(Ha{8$gXCJ!XLp@Nt|VZJ<-sz_+_W&VU3bhz`(IXsqBfAlVhT zrgLkGsxvZ8H`Wx@;s-4p2ajk&M*%0^5mc0gcfr}g+p3rxKQJh;2rLKR2zF)q`8$H* zY!{du83kIV2fh~;p8o%iAgc~&4Fr>8YrSJjJ!s*3p)~039tKb!5q#q_J7^=lIjFnD z?)YH3{#`+9rZtPE=iC)cX52OX!d<}twsl8aKphsfdxH8hQ$cr_fEK8O#_8EXBVdpe zufQs>XL{j1L0`tE>HF^q&SPwt9(P~Rmiqx{goa6%fkB{Y`m+0i#f*EVOFak}*K3YLQfdial9qR=^Eu#ib z1<0vlpyLogQy8G5G9bCdQ3JMEi34;{E$Dbsm;%tgWeIa;3(y8@GhRlpk%}ObEI?*H zV+Nh?FXYC{2I}pDZH<5)iOK=aYg_`)*c4L0+sG4?m_ely2dGq1fL#qB1S&>gLW{Ve zM;(GrQwH^ln8CLR6o8ISV?eSFvwSdVrE33)2n8=?9xY z6H$ziMO#83CxN%~Utk2C(8UZoXGCbZ5+`Jvx`L3CLdSCO)&$W0fAE$d4$x>Y$X-U! zC@^SY7YBH%hy%3r2svyXFq$)iPBsPYgjxaFLcfA33v|xNj(QN6)r{!?XczkdrYuKk z(EfjL838`P1bzm<4kpmPLq|qM(8*;dK+{^Fa{xd)TR`WSC<#M$9)^Jqc~B5CX9gV$ z0y+c>bR+>&mVz*7=OO4|C?+LVkP=Pi8BB^GFV};Pg#x)j$ebC?X&_N#m%Vt{A>b$t z>xzJ48Fa`q#Fen6U5ucu^pGnSPk?*`a%k%#fb zu|V6FK^r02K_du|bEiN-1&%*ZuN8DqC@7Rc;S0L$7nJsXFiwA%Co3}jzLp3(3#S>A z$n=dXBtU1$KqJ{5e3mU}Jd^`m5HcySfnpbQvNfpf28xykV3&gqv;_q-V_-y8;rl3}#F(K&A?rGh;RVR$r%pqbg{x z9ft4NQ=vv4TmFb^7%xNjYWII7G`4V7LDOpCU0mAVp9(nH5~@f>#Q2DR2r* z2IssUMn&+jA*jfn#|U1V#-_mS2^wEi;Dp4JBZDF1*XD+Tf(X$q7nIY5^^Avw4H+KdLI6Q3AC zTk$~dgnAI66;!Z*a!Wg;+X||vA%|-r6?soVW{DuuALv|cG(F%H$q9;gP>SSXwC4tm zra-Gn77ZrwK6WP1uq3SbhNLG2@T4QSii8&4NJ+Jx6V#w$0v)*}0vgH&wJ$k9qgkMp z9SU3mYnc_e6j&6P1oj9*55NGa0F8`7ibBxlWCbSBF)85W4%rB7#soTk1$6s_0xRUK zk_+HA)$$dLOa_of)dQJqC0-Q=j zCnyVv3vZAH={X>frCZQFDNFA&9mp<3Yu}ioHm{JouCvW#A^~* zye17g<6!!NY7vQCUQK2qy*PPNvw))-!i&sMFS3y0#XSOs(|O+u$}>t$*L^Q&jL%11 z?*;V{K05ebPz>rLF|0lk1|O5+=MbLzC&-6!M1oasAO&9zss4BW=<1_&UUdIXiSqjqN`_~*91olt&{wiq1 zcwlmr%pe@=`(~Z6f`Y`UD zUjI!{NnZ+7xA8i52xcjWI3CzAO+etFpaQQxV~?Pc056MzEa*&ob_F9)+;#{mNlm}- zP0&!blU2}`*J?k|< zyL-eGBp~O}TQP(xfH`5{qw2#ToNzEF9KwkJb0R<-4JHB5;00Ji0Cb6r{o4a%2?P#H=I$ zDXdu(c)Rgf3B!3_584sbCtgEvb_5L6~IGbr#XD48*>5mDe( zP%~p%0ixB-n3jNOMKh)aAevW`c>-v0zDT_Suj2u*0SiR3KqDAD0#n!&cpY0rvp|{| z6nGsQM6(psK|K>*#~#rvP#G|R7j(D)uj3TaECsNgOdxd%W=wZP6?h#xM6*ElgPdV& z&h!Rkosb#R3lObn#`H%-ffqcXQDDw|K~zDTS3%m0>4>NTuY#Bv(*aS0e<$+Psb%M6(=`mSgcc?h(y$>_M#0;swRV5m3ZS znK4ZO(d=eSJs_Gt1~0phjPZKXh1sbR2UI4TB8oiG#LfJCQ&bUNyQ&YR$MydefE z-DikoDTrw>$q1Zb26r2H^%-A?DG7k)f_NS4Tg1^kIzc?k5$WbJUdIbyQyz$c%WZIS z;MHWlA*N(!&U^yoVQDj_1L6w2pi?8j1+g-;&aks%ngSAMv|_jcq8ZGX=74Be2r&H- zQ{b(4+yHVvXx6Pm99(6+5XmxQ`U3LV3USbMGRQ{94G88A1e3vv;ifn|ikZ%U*bL@O zM?khPm@&Vd^=W{T zrUWPm_K1RSsRJ=rNSHA(fNnbEb^L&&`v6>b{TGmSc97aFq6+BRKY+Efa4YaS&X5EJ z5xAuyFbC}MIg&~Oylfx`YcS1_1l=tG$`hb{ZWlliw?Go%+7Dt`3c?DaAX`p|2=sA+ zLU{>TyO{#7;|f%jkWl8;U|JxlWC=1J9AKaVXoaLX^A1S`UdJ8N{s~6ZFOUI+kW7|> zkOtEeCM7}8n61D{P?H^;XB2GBnRkGc9FPIIWQPoBh-Whk$nFy|N&>tbAm<#BQRD*A z2V|54Am)I`7fg;!CE&gMydYCnfJ|8-lcgZ2z%C%GAfzB*#`J_~`t^T;5{w6?fA}Zp zqO?H-fZ_(EiPv$))CWQq z@+Tx=3Ez=Hk>8O~Q3Is^iX_CFC#IV-3VA5mxG8BW@`JQ~fQ7vy14!K#Q3Y^X*fV`0 zqmY#42T4U`CIrgO6h>DFHXm24m2 zj<5t}K{1em4^p5N-n@=Kz~+lNvScZ-gNqrEWP`LB(*kLbNBY5JuQVuqOGEs-KpN)X z71E%r<9Gmc?Jut`!zyWBM#ue(iUN)-ZbhJdXN(HT8VwFITNUq5j11! zkby*I3y79CV`>1=LS{@8WE3Po4TJ`nEKq82oFD^9hM=l+4oHoZ8Pg0IP(I;sGy%oZ z44Eu}B}|F}3hJPOLIe~bbC?v=6xapsF)Q#oo)F6txDR5y5Xlla4Js)?Ta;aS!Mo9T z9e*GdOdBM?8zp%4>ly!m%T-1mR&L^q=E24Z7Zl0W<_TD&M+8BmK#+c`Bo=4PgKGsBAyBQ+971AzM+CV+?OTC^ObQ|jQq%u(2<;XD z*P*9soK7_vIFnVr-h8!7X%{v3I%-k5C5V zjp3M2ar_{drC<(U{;(9ufZ?we4Sl7s@NtqrvvbleyyE)+nr4ML#ZT%fkdAHghv6N1nl zijpLYgMuWu$ph-r>=4RQ;!zM$n6At(B&-Sc(*>a{1uQN&Ap~mVGJuP=8`IoYQ);@6fRMlA3thYF}viCsY$bifYCWgn)W5fD;l{4xE5fRIT24Nhn% zfvPeEM{7`i=5=fkfuvthKWm3zmI9x^aY0a$Y!Feh1aOdoR zV3vZmz%?EPZIC4$A_8wAJ^+Ql1QC=JI7I~HG{;j6ihR?H1%)O{UId>+#tBNS@Y59( zm>u`OpDr&Xq|PWc-Can?kgTMB{oM6M>Y_H9W;1oDJ-N08VV5>60gTP6aqRSfbh5u5A2{_P%6B@lLZ@$e_ z$Ku;ZyP5=w`xR7%A66lS};PHqpOj(X=5En4>I!$BDi=1-9yOhxP%#G3X(mb>o9m> zci(_40(F)iFi%gH5>ga`Ie=*n_$XEhMfT~FrG&DWc5ImbUrNZ1X~Tx;_R>NU;Az%zco`+9bI1x+K&2Ub zr=K$vN}m2eO^9{6p`4HpR2;=P134j<>4x$`=AZ+uv*d-O1tmd+(gIcmP}6QUYnH&t z>C@zeQd#C@C7n&n14O%T30J=YpSA%H|s{*&cf$3`%gp3uyx^E!q7F1+{dx%-! z$23Klf0#h-j+?$(QOF)@I3Fn3!NW-npfRcG7iEQFr_WUqa=@4HK||2Wh=L3$xk-S> z+d(H*z(?GvHY|Ovu|>d9g5qIm10HzTFbMpZ9-t!R&eY2e@_5|z?N z<1%B~0BYTU_8Ln~-=`%cDI*0+WIxzJI~u^u9u6fIfgg;Zo*v8ems&zb2ovpfgwm&< z))wMrd@%i{wva4}%uL49({Jhs$)E@(PhY4jl*Tx5I)|Q+E91lIetKxSlBbL73mG%^ zP50IpGG%-?y-^=SftP`h8sm-Wg$8Icsncx?g`^ngP7gN}(nHvK&QPcu!K*VCN}tYY zEX2zNO(&X;jME?J2#KO7nWYKp0`NM5=6@h_p`d&(0=kul#Q{8K37_?u?r0(;!}wu( zoQaTB0;Kl6z@MdH3!62PR$}4>H-rhys_=lOqIj|#cP(oXa1=l!Us!F!0UCOle#1ma zo^i$WA0|SIj2IbKoDXzLHLnKK5-G?U18_OMKnk?RK$wMBlljH;w=%-=A}DiBpmhek zjz6X=$O?;0w=xqFVFQgIJHD76VkV@D5-2It)y##&8D~znGe-;gVXXiS;Vh}hzJC^vN3CS`_PM5J3vO>trw-)Mvga~8r^a2~9I>sB*-`fZoB2?_x6iS|c##V?+ z5i#`!K7u^8t?cmVFa_O7U?&vL_-4Ajy^t+q^YmJKA#s;x(584c1r~7sVFzd<>kg(Y1y+G( z4$#>)3QXX-NMN?eZUx8<2_WVo1s1sQf^7nerr)p^l9O!VP!v{RgWhy^0%Xz&ut_b` z`5lB57+a9yLPOx5hyoLcB>*BM1XfIsa}kPRY?{8?MM#seY5G$L#qTO4$@p-(rmN6KrYRiL z@4E>}$bt8Hg2%tW`=d_u!rKlEjt6?Di@FP$adv|G^q{q}(*xavR775YJLjMS)8Ok) zuTSrB7t)))-Azc1B$FS3jbe9{fSCLk%tbNzCdlLl;x;xhuA6?(L+FChIziBu7szg< zMa)c$koCa}7_*hwR2UT4K(kz+oju?g4>ko>fhio*ABxFIfLw0qEE{Sp@5bW5R0o~` zb36kY8F4zxpunWb#IPL11#NP2I>W%N$gaTP$Y9OL0d9@edI^bxI{scl=EC6p94rnB zY@mjc0;>YEz|!gWy@c`@Z%_C076NU!B4U@;^cp{*yVE0mg@TCm{1&k1Q??*^ela5x z6U6fh*-9)bpz|FV!Fh_o$qsZDI_TC?=y@j~Igl64nL&s1a%3qmBS}Cs34nL{D=-UO zn!eFrNT&XhAZWvFy#gaEI30ji&@u~55prZMl;#GV56%YK^2i1nK4x)L$Wme8rml0i z(A;B$6G<`{ckUHbG?Pmgn3K*HzG;FsE60%@qTMVjz#HKF@6Y|@BDp+V5 zBh%wc+mk|tEEt&{FWo*TOh|@Nl2w6ApbfNon3q+7&G7?c7G$lsz>n$Y!-WbNmrQqz z5OQPOKD{GCNQ-gD^vw}Ms*KyG-;5A4W!y2HKT=4Fap!ieNTD^1jO(Xgj}kIxd^?># zTIeF<(&-PQg?=$jy)yl7j8G2GQASwuy}&SC(Uw&iv_X?ymw|)DL4jS9nL&wTxe|-x zc?LILMg`ENz>q`>T4>4wYQC`RGB`L2gBGO1lPc`663~zWXazOsGN3}185oi1%~PWVueCLo5MolgbX+zESM&s$m956!SvoZ zp^1!Br_09+$uTxew~H4#z&LgK|9ByJ#)j$g2}0(K4b#IDgd9LRXC?^wf#{bBLN<&I z)727%${8D`&r1~g!`L`|c9M_|(-e;FCzFJxGBO>TK0P5t$cX9c^yyPlggh9Vrr%8w zGG}a>E}AL?x@~7sDp>XO3#mf+jE&PB(uDpoE}3qhE>z5X=jXKPThoO?*+6^I1(r@1 z$PhA@Q#NBd!m7XtUR4FUn)(E57Wjhn1FYbUJgFH%U5pLWKV%5mFg~2Fn<=Ev*f2dd z6U>{HDWtfau?sw?D`c+RVtbZtnD{ zc|xz4PJExJU1MLG*WCQ8-03SBW;%Jd2aETeZT8fteR5*jIarAM> zR$znf(BJ^2WESvI&7ebPVC@S97ENXWbLI(*3M`=We?k<%?g#*t^DK&-3M`;4hzcyA zZOWjPp~BPsxP;lJ#}x}H)PoK)U;!-@gc;=sI*XG9bgU+HfddQ3@CA(K%sUtrSU~e^ zFF>}iIKBYgIc&!C1H@wmpRmXRS}WlwD^R7t0@*3y_=6FA)Fn8CelVIbb$~7gWL01R zEyM;Lb*WI#0y@o6i3_$G8e|x!Ir9ZZ1r|rp9tRfi`d=jelL0y;!b|P#wG-!(siv|sEZlnZnMyCN$O6F2AtSa3vv<7qc&-w3$s1C>GSj?cC?3kX~Qm6NW#Ow$_<3X4rE z6=Ics0CFI+8Pg5u%3P3dKWlG=HbWuD~X+4>Tdo;s9P(4hnA>JmJka{V$)ea)c|$ zL{R$1>OnQo>V1$q7ci0*?u^qF<_n4BgFFOE>O_U+fO~2QOR(hYp*- zAvVx~3{0Rxgh2r;1`4bvY);H==FFg34yfW63|UH`EC^1~uRtk+19acmL?%ZDMR3$| zC^9mGin$KZPD@AdsX8FnusDL&cr!Y3WI-BP3M>lK8-BB}F;-0PsSuLj7Xe+X!VH@C z1>Mj6WBTR_Ap@lo@a-7jt{8I4UQJQiz+;V!BSHkSXJY=}DDB8jO3V zcUKC@GH#qczeY%G`u<9x{fxV&*H;PYF}|L@u1aWyREH2~bBh9KEiQ{dhmb(0kOJtu z8BT%D=>^q7s*D}er&SACGwzyxsT!<_u|`OOal`ZvbwUP=ozuN*g!CBqPOqvFQi0fU z4Q~AQrL{sVjEuXcuc{N$lXxu%IiV?r8?@mkK#2`RMF_wRQmhxs698R3r^pC8nL&i5 z-tobT?ept}=CUz9pKjYGq&Gcqn}FZ+kG7Ej&D()ar~@sZV+6}7u!2SlK$Vxk9X^2-)9VW* zMN!?czEF~jqXV=`kwsw2^n-E9=a6i*kME2PhOWV_p3p=L&= zRiCDxo+o6=bn4S|rujnVLK8MZ+Kr6hlNlHsdp1t@o-dR)J-bavlkv#(sck|IjBV4e zwF#**9-029O~{(mUnduI5JMZzD`m{b`^*%2&;7&K$QaYhyZ~_EYtfr?5yM+uHPj8p)5sF}BdeuF>yjQ51X&Tq`{d?rZrVI56MKdm+p4TU2 z#<*lcb-+%&zQU&s@5j8s23pda@O`GJlB;?!q+ z!>Gi}%dEf&s$0Rw0I>?RO!uB3l*+hl`o;-D@{G%;Uz{K$DhN8J05lz;zzNDCTmpNi zf1e=K#JFa9)kL9G#?#XuOcXL=d^=rul8_N&({#5ejF-G4nDia5}DFg`8u=CeXq){XvA3 z==2*?g+v)!roWpi6wA12y3aHrHOAA^i>C<%GxknDK26A+5xO%|Xu6OPA};5A{pOIkDnxAH2wZ`AzD3=d#H44m*FJK&y?guaq z;xv{S^HJSB0h!+c^C73PbgD7&K!!~g zuul)mk}Igs099EEj$auZUowCWl2DQcIh!F%;D`|P{03go9bhjQvIKrID>CqigZ8~K zfIAfh3e4bPAjs)NOrWcMnL#}c1!ho(1Rf?U7_$_ZLA##dBA|P%m_aQHO=b5& z(aai56=qBaKuiWRrU{_?wI;AD&WX0kcR9NP&bGfw4p`d11HF04W=E8W=u0c zjZGE>X2%)qS&j{dTS3<*Bed~|fqiY3#B>2<1(O-m6A;aB&U6PvLu|hRVj|oA6=XXT z!getQW^mt!8PxZI+kOFTJJjZ%ho;vo7c#2<02;pl)y3e8Cz(Mv(1FeZV#*R&2s>7T zR}8c--i+x76DS&(K&=QSfo5))SH*Y)KpQvbFlQ++fm(3Pj#HSk6d2Zm2CbMu_cl#n zR$x}(H)rku(IBfp+h7|&99FY>rVdaLcCcqTz5#iH5#b3@(260@tgFC57MNY4p!*<@ zg8c)A9%j%9`3lTneXw9}UdMD@*R(MQ`C@?#&V9pY_#13=22t>yWkbgmOzYL@U z9BK8Cb_PfRs~OVm>z)SKxghlG%GUj2!XbT&0qrG<)R?)j1O+i3uZphk|vOI zUodAm9yvPQVU>`!{8|oJzzM>YegK*JfjP_Z_rvL3tAwN_ejuEx!K46+8OI;YS&l2G z?_DJ%na1MA%L!^hGbpktFtIqmjcEYc)xZMk3?qU@2o!=GW=vC9KnKB@F--u`kPz(v zotn#%1>SYVn578H2B34SnL!8s3tX6Pvsx%9U;$V;Qqp|)qaCzG51t@o9J|hdE+qyn zwGjX%H3qa{{F5 z1WT4<$K&a9)(A<5UjPYQV99bkaIHzeQ3PQbTzSYD%AZVA06W!-;VD z1dzKXux2?f|1o{#Iw4Kj86bffV1ahf?bKYCyAQplb(U@wWnG!3x$a$1~HX2d@`W=H37j z*ua|Qcx1}-#`Qu{&PTw3qQS(V1llnR;y@0zH~><3fE84I?qJOlxC}2a1QeJ-L%Q%n z>)f>I_tp!E@t*)`0FCiN^7r(MC2}J5Pe2B8nK6M^hk&9_iNTEN0!WZ^tvS;j5CgPr z1XchpX>LHNGeO}Fx^IEm@d9g>zz5Kw`=E_xps@q+m4fUb9_UnW#w3LJEv)r@wH7fE zHf4J5Mj^@R=gQ?c>wAuNpty?@G)|$w2?~4A*sUTLXs8akI02p6+rS12_D%suaET5n z(0LVD9Ob=1C-5^Wu#_nADex8|Y3l%K>tF*#PA76nF`aLdkVyRmkn9AuEXNMere>sM z3%(M8%h3k3I1v`{GeC-Guw^+;U)>_$$c{+kyr5Ny;QK%qfRunPV1PszEX9Jap8ywa zETAzJ&7S9SCq@~>8}83T7ku9+=#H>08+DoEz9xwm+AL52}ug>01511%W_;b z0}>q5@0ZKTc^_bd)IIPTb2>Yvp|sn zQZ#@z{DS0uuw^-3YHvevHwPqu8bJA@fgRS&Qe@!an7*)3PMC4m^kZ9uj6@rl96`qe zFoTE6zZ`D`C%NelD&<7^e=s^SgRAWyj9HEcj!)OzDkNS1ff3Z8W6W~=4pNKMMg@(e zgCZOrTrWT>Uod7lE(H~pNKG+DM+VT93(WeAeT+(Mpyp2pqY^`rE2u?3fe|v_;8y6$ z;s83v9h}5Lty4(LvYv?nG-?lOn1d>RQ14b?F}!KX3`wBO;Q0t9fp^SiOba0TK-y-2 zs#F=o)3K6o)V2}2Jk(pE5mx+ZWv+|cyz;nX~zr7?>DAX#(UDGo}q7r*B}& za%B46Hhs@FA=&yJAeSMRf0&klilZH9mcV1@0LVnpc`>kj1#<|eH zg?$2KA1gd0>a!d_gIb*2h>Qa2Mz9Hh8sMOPOUxQf;4^MP#qbFxr0Nf(j1?&ZHi9xP zQZWaye+naLDL%w{Go}k5$6jE{a{LZDRRNUC>mjF+gO*k^>oQDbgvT2A$}DEb8%$Y_ z=cb^Pgiy7U8C?}vK%>sgZs6tqFgqTA?0CSGXyt<@a{*|W zlnpcvH)Hz!(?UvW6F?)4pp`Q0pd6#XBJhHXsa}Cym*EW)_(DQPf=^sxEiNtC! zyvN0h2XDFX)srHY5ONEL>WI$7e6ax!}xM~$XTIH^01&=0Y1}=6|_(SG}QyTDvU*-S!lcJ zIiU@VOr5W$Uq3Ho#S{Xt)74SoXbLvjQrE@T^5pKxzYOn12^5CSfE8> zpb}Go)$ssRmH_15i6>0R7a%cBuf8WFSHFl2bng8ZR4MR8$p=t<2)=aBk);Tn)D?Na z30#pMG~2`s>iMyQns02NZQ=|HJP?W>>R!-Eg3~}}w1eF@3B&@sZvrSOfktb=Lm&0L zpk69y?0E*L;|wY>!B#?@Jck+iz(5wz1wo*T-a+*lXn`{5fE~wrckpFvD?lS9pydIe zi{O~E1Wti(Lt2Ak0O)2O4W<>$;6v-dg8}slup3FYFoR0~=uMg+m5!kM`IsF4Fn}&s z(_q@bEC4xueh)L~{!Y*sIQY^{7H$PjP#c<4fg40|EAWCSUIoZ>8ui1$P&;ZEzssXkPKu|2CF`!402S0ybW@q1gOiT0J?DtbOZ&f zqXFo^7>z6iPJx}Aj^LgcX!;nmkclz7UI~1$l7$k#BZGSuG_X9t^)mzH+IVmk#ge5c z05aM|5qy1=gCZLwxq?o^f%NrQL31n$pgZ%x6Qs~m0&*oSD`=yQfUE*E9e|RY22%j| zTFZLSfsTv{Oic9(te{(GIiW=c2c*b=+*}KmVOHP--IT-uD)+%-Gys;a418a^F4`|yi4`>G&D4OAJ0N-l~a)aXshAc}Z76tIg3x@)y;~Ez5by(oN z1k3^_`9QbNfaZJ^Kn*co1s15uEsTnc3ar!RZVO2n8JIJ7fG&~%t$zSJlT+XTDB5O# z^?|Rzh2G{ghY?&-uuhM?Eu>+!{cRo{T(5(dJWJK^~mvXfDv@HFv8~? z+~9+fK^NBYD=;c>gNt|Y2rs_^hXSX-ZSYOScc8w4I}DuZ*;y616nL^g=?rvO27>~Z zBTJSd=oDwhEJbD>CT@o5KF0((rpw^APIxf zis1}s-61S%3M}IUr;7J?g+%KWvIPEcKw|_Y?SUhJ88qhuI)PPz)$tCZT6%)0miWNU zQgF3&1!Mt(6~j9a4KDUyfmq;T{{@Hz+VTVP@CA6C^rar^V|eg_tpz1{P}qK80@o6( zpn+!vCPx*}k-ZoP_!JRPZOvfC zunbhuf&IP+!~*+$0f+^vpE$uM<$@~QHIOo!Rlx#uH8QBU0TOj}o8!pbQ1Fz!5Y? z4O&vgs=;)CS%F*N3^@Cqff@`CPPmI4LF3inijWml9VzgDDnf9AuZN|01wkjq8bv{n z*RC*wN;3vfT@CXaxEKRhsgU{$xgNX$s(=(wQuG7nEG1Bo!3s=P$2ZLI7SR`GSc}Ms z!MvX72}mJ>6~jLe4G#BTAQm{>e}GsGELosaF`*5L78YD-29(?ySj?Ef!_uIuXhFf= z!2%2RDOfd5=Y1%|T|a?Ekr|YLFcJ?at%EMHg|5(mMa>)*NI?k?X>gGOx`NPg0jO99 z-MOT}G=oKf85%W9aBEn>l2xw&i<4%Um+`;45}dFk}gw1mAJp!wNlM0MtxmRpO9_ zv<0THIx;A5NGpmuGAgi@DDo-rgNkcGa2ro0W`wH>G+?q(4WOYfm0EDgfFTFh?NRFiX7Yu+>VUdirmxpKM|5j zgIumesTWxYcyYS@Qy~dWUVTQefBrLr+BCeNTjd!PcojIU89}20NFJKr@KgwN@)3tM zBU}Lv57bk~1Iz?GfGv)gL2(oT8kpyCtcUstG?~f^Izt^atiV9cF;02#n~Ne&`uEF+P1Jq$0@8>;N9Us%LTlEk0G?o-XxVC?=E_bP2KsQ-~5j zXfTJ>5tP8e7oG{61TEYHH&g$xID(IzZ(vnolLlS9sloJx1)7%Gq(Kc84W=I~ikzS% z3qImv`swFF4vhTMT`Nl~6*U?8BhI zH~rx&AuShbc}p^nsU%AYG$pD8S$_ud8zhy(+yzR>dk_$D>l!uf;=Yx_1u5v zLS)~8Oab{W@fFN>kqD20Bq1Inlqn>o6$K#?B?Jol39O1zkci>|MU(~;1E>&EWCyi` znd+HAd!LyVctFit_Ch62M^L)w(_~^$QeCdZ1xfi(UkZSdKc51#94KY8DhNR0P(e^Z z02Fzkt`evW=bgUdwUC@NvL3ivuw|Un?e8*+PXG2=NSTpuy3`vX84-j5Y}^W}pd%cV zc&2;35mI91QDjr#n_l=vNQ_Ouk&#DSM&?pM422&5~bl0~+?%bdai_osc^yzPfW->mWZum|} zpYi1M#CJmC^~=CD9S2fF_6VXiaR%O+0FAHgV1|sZ>;W|#7_1nsfoO1};u44jZd65F0a61ng9UR;WGLB3ojvQH#lJy7+D8x0G4zMV3fDarJ0kxDJFR*|{1sOG%bQG9D zp#>cnxB~SfzXG^L%*CnzF0n66-}*sF)#L^!&>w*P%IbK7B@48K89uu50Mw8Ioy8=e z04^Ot8H`(jXFA_UA#px5G2@Ry=0e<%9vf&MImD+2rq_HFQj+9T0BsiKfONM{@`1-D z`4m_L4o=_xQOJOU7nH4;6}YFr|0ra|$UR-_laLS0EYR)+kXg(EzovJ55=v)mn*QOF zP@^`tq6)Wys3TLBq9~}nk)_DZ!@vz{B7&yMRT#LJOEU?=hJxmN7E)s50u2R8O~3hB z$eEQ(fk%OBy5bihS*D*H(>JmUi%Wn~jDr%F8Iy+sE9gRv00mHE?gYcMUJ>!>ZC`}a z7`dju{UW5r$TeN!tB@Qc=XCS0LW+!srbm7in!?U0APX9inf?VNwtKqRHz7r(eVhuQ zy{oMHj7u0ng9RY-G_sUHE2&}1Bfkl03h#%jTf?Zt!OP06z%_m9Hz9x71CX8&bTq>e zO)ukjAw{)=u+az>2L(v`R^S+jMpNtYT}TD&Mg|40>BZlL^uZiP1+M8UzY7`2?g3i_ z8!AAz=-YQ8Ls`g8;a{Mm3Ft~(eh8U~fG^huovzOcIzJzD5s5(C^qwC=(qhLzL)YL@ zCneA}QIPLJ4%z!dNM8fg$pBqY%_49dG?)&$RvR)%0I~yFwa8B)W#+{^QlPCyV$7_L zAErO}B_t;Nf(5c2NetAv2A9b%rq}!wGV@)*3c6Jcv^Ld@X$R;wryZ%F=WxC)V0rlw{ehKL_;_)ZvZy|k8tWMAa?<-*?&5w=0iTC5t-z@@;dZ3*-p#DDD zel(nJ^G8UJk#lgyI-aPJi=9NY9lMJYmWTYV7U+rRf8pE#lxAF3@$3 zr@#q^3ABTU6*R*I3hM(*0!zVnR9|3%RS47L{|fnmhsN|7{~$#7{uO$_$TfZ5KOsNH z$J5{b6Oyc-!UFH4&tZXe(m|$rKzh}ns;dW7T7rg2Ks310S_EQ&E3E|}7O2($twsU$ zydl#TYgmvk#Rm0QL1wOC0k6p|55s68X_n^j7I z)$s(lHem(L3qprg&!8L33a)aP9H)S`aDm2FSfEwk6-2JxsTHz z3)%{r8YwKx^oL{mMP6a?dT7lADk0buxCOuyZmgQjkeUf}BO}y%9gJCy4?w#iO$9)k zZb5OOk%e5^C~ynNLI$)^%SMnmbhw-wG~dJu>gr#H?eCt>$0)4AxMcduFaho9ag4(8 zjK`<%V-(h8`|`U*Kw!f3w~WHtj1Ai*nS_}^8%boDg`1e>Jf6OgSy)UCwp9dFRIq|} zszJt4LCgIeS3KT+m06e-q~|t^a1+zP$J66kk@PTu%dQikF3Jl~y#ZQ)arW`_$*jU| z{3|9vR&9eW$aCBr1ZPbT3&Vh^aJ&fQ>+NaBL3M(?snC`$Syo>1{`}9AY z!ZILQic5Gm=;qt&T*Bb{(Eo4=w~L(x-EM0D+f)JC-vXZbpUEw(BG%B+30gt~-wgx0 z4&cbl=`*>7rKi8+7UpE?>X`nMTiAhd)^sZ#VMmr(Tneny9q-5rPoKmi9M5=T`X?S? zTgF?{4S0oZ8E;R|;}uSXYE^BrKz+slcWv16~Trttg=Y zx~N4_R6!gxjjhP1AP8!;fTk{1JO@wYYEIV@7FK1Hm>wW3tjV}!dX=!Ss*Qv|D|o(W z4Ww}gI^Y|We^?z?h-4{fLhid1IL-~~bsP{;as~BR6j>Bpmn%x}uuN|>7Z9EPL0H(H zp9gv&16Y9)*K|7(;X-a61$L+~*YsT?!e){icQy+^?w|p+`qLrJ%LnO-D$_Ydg&m|+ zKwEy;6uy9tKombNv7R|9^%;B~=BGl}bEWptSEOU#6(9z~(4Z zrl_sJ=Gf5C(7;mYC|9OvsK5zn;&UsigJz(?b84EPb8=ZhkqEk1b_J*$0_~;{7m$9& ztf&L(KQhAVNzew@4WN##_;hPYVK;dRxYiepO5A2lA3&NxhdGD~JY!bWoxVU)SY8k` zIROzn0$N|Fpfmk~r0@wwp6N5Cgyk5gOm9pQmYTjHL70_^mwWoeSYgHKzodkD7(2GJ zOAFgEGTmJ^-CagFnsLhXj|Rfx(|5@T_b^VG?iemCF+E;Zn3qv}dakUn35&Qu8~603 zvcg)t0=!I8pfVnG`5oAFNqa{|&^R(Lw_^=xq}37OnqqE6e$d2t38>Gb$iwWQz^1_C z#^T_}pva>j2<~<9C`e5=krP(b6maCpQsi;02a9qlusbqlDY1iY^5)THU{K%z&Cu|G z#)dSe*U1Sha)Xv3uz(tXiqg{$h6)S2OyyI8UgOIG8l84z&QjvaQs9BcU>4{mCH>2w()wF@cI7X_%=3 z)A_(@jJXgpFz*NsRAd7+ym%Be9OX(JS&X}bjE&z^pUJ=L% zA0U$%vIJ&8`wAH5Nl(vH5EhY{3HHYtCMD3~eXRc2qae&>+98?+Dt9!PRxl}oMkzsS z5Lm$Lb(tI#KnI+#lw|2LfbNH81;;vzg1A7(^xq1?+GgzF*=bhChL;Rk3hV;MKzC0l zaVW40tY=mb6?n}88XZ+&)nmLOqQnLo5#`WeQc)C~9-}BMCn}-9>bT-518A0k)$s<1 zAtY7tf7 zQQ&aY$Z|9R%YaTQU(c+_G2K#0SdNindW4d&ycsB0D+)sUyb5fN%q5OTR?QHAEa>_G z9v~6U5?Bt(W1zB^TZs+SBb&ZfNm$m11AI6tD`;eR1*oi9A(G`NCs3sTZAgO;No@oz zbm1^#Iw0c6s3psLAn+OLl3waUU48QXq= zH=;n>4B*k>7b1$_e)ail!r7CP;Itf&Z@gv$ntQt%&L?C^* z2O^5#qn9B`2AY0dd4&~pwyUcN%QG@9d_3JlUATf1C1*(sgLdLzORD_fV_k(oK><1= zR(!gOhOmOXxPqVpGk*UnPjAx@Hf0o?{xP3Na{5&bVGA@X`9ZssSV4=J8X(1kgQl<= zE4X-=9+)I7q5>)welWs{g&rpGIutB!RGxlJQ&>|8G+M66qrmIPm<27C6gaXJ*?Abb z8Mql(6?hZ`rVDBb3$sBAyXjh5!bY6pW=uVxwlDYgR4rjYMyB~Mr*G93wqV-#a{7C1 z;c3vaT7i?>kx`KcH1Pl}A;2XYD3Xq_fVMYG-=`xS&L}utOjq~ItjM34+I!L8T;*f)u301~1A5FWBdvJ~vd@ynZSlWB|R8 z7j$ttJE+9v0oAmSIulajLPoqqK?Odf^ar~ORO0>sxr@h)=?jPkRc@f(MguFP(F*A~ zi9(iwfi6P@h4crOEP>hJf~$p9iGyW&g}$&jtlnC#FRZ5}3YyRpMJ;x@K}%4$!L1Hn zP~iwF%mt=T|F18s1ko!q-OfN*1Hu*;2G3H1MkGP8{s81Op6L?}giSd?4!8kw!1VJ5 z!io^(u56GPoo-_&94jjbYGHxa#Xe+K zMK`eI#R*DYOxz6I;8`gJ7RcHsc1NZXC9dh`jD&Sf;E@O_gxNrKFf6h-6?ov0)c_6& zGp0Wv$8k^BHx?FEM~j^wa0m9VU^=kNSXh%6C4QzKFcuc)n+mT0SsWC&rav_nmNyY) z22B)+y77Wi1d|oR43Oob;QeE)3f$oRW1xnb0$Y|6Xfro>mUg;@iLhclD4-^=WH~Yl z%;f{;1y+5=2dPTn3QB|NPO73bs2z4A6||_%5!CnurwcAvx=>&R?K^X15||4P=!P_i zPLQ5IsY>$T#xARZ^jdS~10o>7AF1F)gb##4P5A>NS<`Qr2n%w}18aUDti&_@vx%^t zI;is4@r)r$2{hIMF0A<#_`$U!lLixb?UsTgqrm*>9;U)Nj9k-8O@;H>d0cs!KqHv1 zO@$Rjq!r{LE)!S)HhM>@63=u6GhriP1%ZX|JjkfPsmQCqH9gf#*idq9M~i@?K6GH| zMmnSte~_-oHGRFAu$?Lws4Kz=T8ji~y>kjYWQOKs@KJlv0t}S0AT7t~hUUVd!lIy@ zBnqu7AuSb<5_v9eP|*vnO+b8z76C49M;7oEGtgpw`bTqN2^n!nPzx*qdt?QcgyLc$ zEDl$;7_4ju7G={cgw^x*Ob!Za zj!fBnOpFZ7+zLVpB95S&3lu~Imhgd2hy$He#>AlLs=%%w0$Qxi#GoLe;0o$Af$nWq zVgsEO=c>S_ATqtcN?1Y*G_|S70pdFfWPz`+1&u(mfm#6!5YsiXlt2q1mQ2sJ6wdI1 zS_7J{WrrArVGjpHoI^pxkt0h9qzY_1#1UZo*g^J9m$wp@utaq|xMqiF$1oO)^A+nw zK(6M1cJDYq^IhP*=xUCP3To+&^2H^NOl3-Pd5W?Ma*mAI3TlOlilEhZjtq)y;PP7y zG|$QE)X>y1OJxSra3&CRRr_gb56eEC>!WsvaaK zFd^$TaCo3O6Uo=i$brLx>@o(&dZZ9V z2}Behp!k{***FH|5M@G^K?!RnsBge7g~c3brD8p3SQne$P#l92i74JdNfIdGf#Mhx zXClW(y(5a#&>Rg(VW4YTk(`OoN~G9?`4a49WGfw!9aHa!;x`ogVQ~$be8*)!iY-JK z#mLRApytTo$;$v*m7*Y9@Aw~7C@RQ0GL=30& z?k(obA5s+RIbbCMNF^soC9eWISS5#oENGD)sKDTb7zB|9t*GS$>*AHpR^(&?t#e>^ z6ve=nfDb1vWE~D7ZTa(!~SX zz@xwq;)9xBLJA@ZU?C+?1uc>dH%(;vUVCBj`eopoC_t(>KpK_!6!;ursz9wLAq8Fq zZipsVUU1h8)Hmf*0Chy!Ko%=;gRD~ER^S6K^@q3-Bm^2Ib5#I^(TfzY2e}{~bQH-} z;DwmT;s9E44l$5Rfg@W9RJyWdD{*Im+yYv6roaxe17w^6H%L1l=(-TFOToSZNx*yw z7FFT_i6eZ;s=xt?ZmzYEsBHiZ*||C?v?;i?m^1%KQ7{K}a(_Hw05x_RK%H_{@E|6; z0>~9$M+gZ(f(R_31S+>dQqwsdh2?a36hN+a6v+X(1LR+BGp3$Y(4kyrOdTMrKn*)E zt-v)s*il%~239?bfQEA*zJwUe0rsB~8^|ODQ3W;y4$xjGh{>SLBc#AReS@R0h&s#! zur|Onm}Go}xq;0Lv^%$aY1n0jVRUsAzd z2M;W8R zlp}+pGB-3_9T^q56*$3*UO7NZQ9Okgu(nvw>}Q=ri&k?H4Lg$*tA z%$O#mfx{Nuw_DZo($=(uk-~QjmA$WdaN&ZkoS9zdE-axSt-!9x2P!FqK{YOugMzRl_((_Sh^FxLZSKPI?%bfI$c~Jl zfjV#LY(;+1us3M+ICyUjKcwfo0vvdtl3XMURL^lCj&fvmTmY7wF5@ArRL=$KwlL2C zg*-S*gQQp;H-IR1P}wLXunIIb&X}bn4@#~IYzp$(pghb5E?5PY^C@sDa4B$SfksV1 zyGyQ>^vnS#|9QpC3^)v1!;HnB1H#KZ+Jthk|T)T zk_wt9(Nhpn5CvJMXrN#OvW26K~zx;6q8J#i~4v$<3ymVQoyY;NOdLVD3Vj31qm8ZmR5Evf+QhM1!Z^8_Hz*h z&{d3}9n@N&d00?t6%ts(r>LaBp~Maj2zF4KWLJQOh5{R??+&R36~LXz9H{Zq;Fa{C z1|6t@$}O;(PeDWh)Wn&7#alQrM;Kh(D!PL5q5^-mV*?9leK#xUYUVd-3akpkW=tSOQ-E^4+T3rRIi?WsYL0Z{C zjXGX%7??BPNK;@3-HxZkp}++xjg=JG6}doVnUR9B0%x{TJsW6q0wnyh6qFS>vO%o| zkm(Ab2BHF}wWWk)9B8YDB9{XAekVl^1x`g}R#50GfVMfU2F(+2tc5xh;RHzqZUtqK z;V$0Nipn6Rpqd_31SxU_z>I>dM05mg`IA=Q%T{6oZQF2U^p;ixU8~0j3IRn9$9nK; zcpe2_PzA^Uo?_&2WOCyLt>;+*s@Zv_$NLGpr-Lr@1En>TLi7YE__+`V;<740iqZq= z3LFZ&*-D`8_*~#c|4O2uRO!xAuOO-*3J#MzgoBj$vK6HjI21u+M7;jOQp%9>fz=T- zWdzLupabxg*c7C}%iBS1PA`99L;f{@oJ~Ou0LK>~7lV?vg2?nc{=&LCTuST;pn+H=a317R;LTRxM0Og#0&kWgw*nW~ zai#&n8X{oFp}LBDdSQUDWx{$sMRrh{1GhB6DF}2)bG8y2xQcLO$_Ag*%jQ^LoTbPC zPB!4RqL5){P))%Kjyq0ha}2z0R|u40L8TwNz>?`)fx@ajpdeOsRp3$ptz=dL@j#

@SI?jMlShKXG8Pgh&CN49k6(HKxoOuC6^8$!2@In(-#|xljB_yzx4>T;jvSM0p%VvJ`oy`-TY1Gj5!o z8zO8b`;5bpv5=R68&qwvI(`t&64=6rHV%C&M3~PABxO%0C};8j)685LM{8J;nM`XCz9MZ$#@crilKh!-==xIkMY5Hkpn z83R5JQ1mfaF@QE;DDX@_A1-VQp7{c8dxd!E2$q>I_XuIlBuEjYz=?HY6+96EYX4w~ z8ZOWXGzf#|HNl(OK~qagp!Fx9%|f6u3RD{LOuwTiEXz1``iBT%(R$=XC7cTU;6){% zCO>GplnrUt><4Ig05ru3qPfhNK7bbBHh@oW0I@)qkb(}ufXs=2qwfP#mcVv!^tCX9 z7vDld9h6x%FlGrvgC>He_azDQPM;Vltj7yIZ35!|=^Mj@rNkAmFDd~|&B#xei4qnT z{=*4fbOLJ0IKn4xl%tCmqy- zQcwVMZlr^@LC7d@O|Og-jswj}XG1Ta<_1r)af6p(fRa2s0;C`-8MwI>Ku38nDsWF% zh!>XN=N5Pf+7FL9lN=u}%n4oG#mNL(eZ|eKpfSBCURa7*T;S*QmGQ!I>fnU0psvWS zpbBXix$?4u0$qU{e8eH>=pZFFN1N&I;)TT-KTc;$5H_%T!H~_z%*e#Z0J@_}3EWVJ z9uCL@>g$58+5$~&<10F+_a_LmLEQ%N5qfsNnINnvDd4CCx*Y-(daR%m?4k370zan< zCkpG-^9VGsK$EE}3+T)V9&l;}E(s7JBd`;k-PSOJmmz`XEFBpX`4vP!3-X!44g~L6hewh; z7i9Z9cv8PHNmyaJbdoTS438Pp4JOc1RUOdeAE>{}2`UyqWe3-E=OpMP{tbBceuJ`B z1e7^FI3T%LfouB3Bw;yG9?)PK4?(^-;*^%W(|m==H=*Cu$ zRAD&>34t!qWcU}>EJZ#Ac1M;X&}K1x#wTpxg##epT4X6P3j6@|gct?pfT9i6<&4XP zC8qC76*kx31&+Q3cJN9zVs+Z239I@;m$fi~ss?bO&jVTb=?Ga?2(oknc)S6$>hc9^ z7Wl9hP@0**2-zB@#0$G1@dulN;Pee?!r~cR(83&a@Dk((E*8)#4-Qab03D#BpzA1K z=*SNmc7PhIz^uToAR*Aer^E;zVg-%YLjA!62>~W-A@H$OSbF+`0%6wap6SA2t`d%* z696DfQKc1_9OcUtxCM4Y_X6Eu2Q{&whB815Wx!@=VvexX^ndBXqV=G)K(JLKObRTZ z4!#s<&IlY(ypVtb&weZMC`e@~N`YJRpcT5HD-S_SW);~LcoYP)9GOA$H*C-_1@+ei zAwig>Bnp}ZW_EzAtQ7>W3snHwp~$AdQSZoD0vdh*IRi9>!=9xi1Kvsoj|b4aI8yL~ zYyh7>^noo)K?Wldz{@LnLBnkzW5JOD+S7;`87z><&?7rGA~S{EJV7%)pcr9>E)E8r z$Bq&sD?s<0fhr0|i!3DqF(O&d$_?rRf(DIP9XD`fDT%^@LWv7GQL<$z@n|qHAcYF6 z;{nj!FbYs*JdkKWQE&mQfYq^sJf)*r6k+yuz<(uLC4+_8NMtE z3{EGf>*WgTc!BKJcD&Lv4Ro3ps{$yg8cHkZBNZ7ZNbsHH^u4*lJYIrXiqMs`%%G-% zrUHwCRF)F6BZIqwA!v#jG_62_b3H)LWdNOz#>5Towt=d8%)S`Z^Mvw{9}{T67ZD4r zju$u~OASF`At5joyyNErCwRv&)J#GNcKU;SVV>##@`O2v%^~#)3{Iz@eqwS^5Z7Sh z&|nhKWjMkLD)pHJmV;W|ASSFa^o$u)T?l}x3z!gixwnE2NLLlx8RBm;QND=*g@kftd0vdOqVJW z)+RPNfD#Y~J0rJ(p{t_a^u8kDP)i{((-Ah%`a(_S6KslZpq};xHbqeo zeUVL(9~5JkK-;5Obs4U(DN0Y5DHb-W-vMzC#5Ef>K(@FsFW^#ORgiXMEVN?y0%FQo zGkyZm{9vJvAf_mo`2oaqGiQDQqPfhNo`9M&3}#FZKzv>^rW+ud$BbzURD1)7&tb;2 z2E@O>l{Nirv9N1BG;J#KIx;ANrh=KlhZumGTMCSh4%v#zu+*z)09v%{$e^g?$O2Z) z<;bE4nnV^>kWy3uO+kYOmq1+>(7E6W(x6rdxB|Xl3XOWQ8IiX^<)vP_?H5@;&HGKb|Z{Mg>+!kWz7huIU}6 z!t!El;P$*jwi2U)n1V2P<2-2fXJCb}loYoD)|Mlq0;GmxWSh=gCah@32AaHqj*BrW zFoE4FFbmdSP!a(ZL?W=`2pvhbYT*=Nl_*ChD~26B3ako>W=vZ^w6q!129UoG@IcP@ z+Q5^gAg&+|UJnb}Hpc}zTVBDDL0~@QfCFZMS>S`}K=w01y~?Bj^6LL;uveKO?OA4& z_AHYEcsq{B^t5teNl$4rrVAiDL7UN89Xq(P92pcO6$HVpoeQ7?{J=+#$veuI3G9K5 zI+7i{s?$X#3X4j!gB$Z?o1!w^W|goslOyAF^Hsu{)8|bW=A7=lN?4SUZF=i^VKY-! z1y^(C2Ov+q00$r|XhW<(FE^-I_`n11MzMl|jI7l0p;B1Ub}yu3#S88iD@cJxh^0Wu zQvEkwpla00RlpcQx)SxQXPV=9FC36x9Bj{{bP<8o-Q%MAThS4AJnBxyl1rbo;?Z^nO z263pEK5@FRSUom<;6>4(l&--98YubbZT1Qmopt!rj(1rE^Kpn63?1sO$A1!nMO zNp?_9098PsJ&vG`vt*VcXb%PG&~HfX#11;995SpS2^vS_14Se#fNwE4-edqZDZX$? zD@cI1bwf`dIKu}kMEJ?6V|>7Q8x)-yOegrjJ@X52Tdy%VUPZE15^CiYJ|#(TB1I3i z>BQ7(c9Njd1Z1P*4Y*BT89*h12Ga#Ts2z9U-lEi3H$c7;1RbnZufPU6@ZpjVwi#=?0C$qKtym-5P}zQnv9}3ySCIFhc(r4VjuOx_wZ_s78 zpyPi?Zp{cz|Jx*NE{)WR1GN!Bry7-Hf!7G>PWNvX78d~3_^^)N4t^z}>56T_!op-* zD>VI0v#?aOv>DR@P#K5bwvcy}DFdB^!4DdaVg_|>Ky3@q=oomjFK8ZzKTF^~=wK{x zawo?f52p$%On<*nm{p6>ieU~vQ$1*!p%Ap0T~S7X9d@8CsFA9y!0KoL+D|Jy{Yi_k zB3Zr$+m_QR%wG>`$FZ3)UEznUZMnd&pybG0qQC<^s*qV4)b?d{yuhC&Fb#ZsA|q&I z+(n65K{(5d$pyr5P+$g?(VzyMBU8yA(AHhJA~tkI?BGMj!HU=wWfauexIsHfL5G4v z5(}&24gM@>O(Ou>PpHrMfM1CX62USG%;45BL@DT$LN21p6AM^s7E}-gZTn}{V7kMv zC=4p~Z}5ZG*f>r=M4$rL7SJw7MWyNI+Jt4*B*4oupk=@win1o8ECo%@X)x_TFLe%J zFLjPkWTPF-@1WCx!KDs#(wH4IvcnEuC;;u2se;m6u_I$4XzmW&Vh~t9-LgYimr;Cr zdWWzrqxkft9m2{I`=B%D4%tc!pjZbVuMTR)Kj;8AA1{wMfZuD5zCE zU7$ypl}!-Tww0Nl)Ge%OhCIsRct!x!d}CJ72Ng#!%Rs$La4AHJ|6rDhGd`ZK(Iad^ zQsBTevvLY@LpvAK7fuvjq%H_L=s{3{4bg3!2a)J8D^{ z^G_6Jot`v7Sj7}26}=I}NJWU6oSZ}iwn>YmKtd!T0hA=fCJ1VX3r_c(Bpk@Ihz~Tu zug~}(T}g2IvPr@^I*>i36GW5*dBMjioM8kFs%bEtV1(=r?hpa5bQN@DQIwhfZ<4T~ zt{`Y#k_OX?R7Dw3>AxnG)sX?TTSZ8LRYAs#sRu-hnlW{NXzA%OlZDMKK}kgglvF?m z--410Xs|(0LDU;`im$YSAb9m68>9#Y&Fv@%f(Dj26a}ZBnJlc#xPSVG$--i?pwlTp z*P}Low}OCH*??xHQx%yNSQG`PD^C&DRNN2lrN0nSVh67(AfwD!I7K*`q;P*YRakX; z!c<`yLltZV7;+Pb>bmDRY~=%q9iM{gi>$? z9}vX`Iv|SKkx@|vRAi-1-!omCilb($Z@>_e@~}q+oMA(=$!L zkj3z;z0F!3J_GxJsFRWTr5g8FaeuEa42sGt;-t5;kXpj3iC}JWE&)R8B#n zl+5}LTuy=-z|&{W7Un??feVljI13E{h%MxW0KyW`z~1z;(}V>`ifxd7PHuP+bzqKg z8spjN>T`u-Nj7Hsstu3aYN4h8O14(RBNH!eVk5O(sV0)Nuz_mV&0h?dh`@ zkyA0fohK}$1)DXRBCNy=4%Zh#$lV!I>HtsxO|P9VEX>$4ed2s!8Ips=mK(IJmIc}{ zbW_j+9jBxKX&ZtLQ3jp04=#)u+!a|A*g#`d@TQ`I_;k+&!s?9T(+d{}i%24xAK8kK z<_D95f~u>6?(}&JgeBQQm7s*c)am;ckm1bfmJ5Y>!t@kWK~thEg`l}m@IV}B7Zqsz zt^&w()afu#BMp*vL5^m1q%e@*FBImKgiJDm<7@@^Ac0lqEbALkXvgsn5KX(AyPo<1)p6ktVp_7 zeU}K!I|-XHZ4id^cv;YTyc>jx>hVrrw?x=n1hgif7t)i3c$Qm1dHT;KBvw#}^0{`Y zFt4H%bS}$`X@{@^D>$?7fI8|3NI+ux>ZQU8^1a{@qbtJTQ&q`mG$3rXTPB>V#4RwB zQ-J|=+~5si(5eWqi6nRPlu3#ZxD)c&LAf7k8jnrEfTdo69dzX; zM8V!+wPIKS+GzutRRFE+Lmr9(*$y3wTEK@g6a^hf2eoa$@JtZ90;|IG{Pn`3 ziQvh0=x955sR`(+ThNwr(DeXZO3VtN0VJ$r^~}(*CT`Gdc$T6xXv#?eoLEVoCsPmw z@9-zNg$tS#;%4FoCm$sq1;OdtRtr1Haw}jR{{T4+G6Vu%?Z_d?#H|22_f!dVkRhly z;@H3q8d#mcJ^jO6VbSTyYlOuaMWx6ZP&Ru3mL zw*r>}Xl)WGg~?90UN79h1{uwoesI08DUs%|PdC~i94g-eTKvqS$qZh;$KoiErNY3y zHi3zwo~2Hi!STWLWgCQp82hLH+#p=c^nhpj{>{Ro!Z&yzXRC`U@H&DfYk3`SOrN$< z*sy*AZf*9!dc3yqP6}(EUASIg2 z3wRYlo?$R!+5vKbs5$cn5Djq`Gt^zoNbY)by+goJ5_TFO3&f4~3cQXWM_z?AaClje z-3oPQ`wisd4_QF>^X%YN1k)R)vuqaDWZX1eZ?muiBwB@$PC)*$qT*+oKb;Efm4A+pliC_Heqv)?_A(6 zJmd7lZ*pSHypH$-PCWz^|Dbe;)o-HU@ai*OV4D8mxUk6d2RnsTK^oY1K{Oie61HYsGd*vYumVVU z!Y<)r#@*9dc0;&oyM?V7*Gx}@a+`MxTZ2>`*e&crUcyzUN5Z`eI#EuYvQS4P-0Z!= zN+3`7?}dceuD#$8`?ePnL=yXi-I(5TO;6Y-Y|hv*N%cgT36xLR_!w1f6OIWiU z`9QahNrMI&85DROmrm{w0JR!I4+^U>@=mWgC@iD(fG10V+Y!9N4qQ_-eFwg-1iiikc@b9MiJ{bYY|#1+bd@)#;=I6{HT~lu zVaYU5-L?gKBsFNo8ZVk#m$xkfzOQT4n!Xl zr~%^0YsPc|q#u^DB996y6G~E`z<@QDmK~*Gk^)_83JzX;Ns8C89&{fmhXOd+;Y?W6 zZAhUcEKs3$VfxKu!qT#!Gy+RkpkPKxSJMTL3yT|}DuKHf>`z!S0=GO6$tdc$usSC^ z`Aly;F6_fJ``Pr%$AyhK{~VttpeW?{=lFE~6T&J?e~wQ#Jt3^m*fl-%gm5-v*Yt}g zgdc%{_CiJw% z1_4K&>7P#t3;7{OuMn0}a0y7Q6s%+s0-fIjZfV0(jSy-H5`0=%jj?HZ)oEc5saAGS zBXa{gxJ*C54h|HyECoJ+*6DXn3rjLCnf~pxur}k`=}KpWwHT*N_dO%5&vJdoe{QVTt5BD8R1aI71OQG3R@A%=b&;Gme1#%rA9tK0P2cBJ4Ya2I|_j71YSq* z(nnCmz^j1VXVL*j><$jdAr;g8&IxnWAHddMqD}R|Y{mq3F*MG>F0P;Xq(i__02YA) z;Hu#OZ7EyaHFAjTQZ<}t7I4%>w5Pzm!w)>*GoA%N33vi%%`3F50Z-c_cdA%H>Ait>`hsu# zBGW%!5Ke&g+v~yoJxI3^)Zb%LU=z5^tiTRhW5NsVjw8IQ!~~5;CP;Gt9FLGj%LxvU zBS5K-8Qy3C#R9z74pDZ4BTI=%0ii?^q~->w(+(>8p`CV+V_!_WBrIAF?zTdDqceE3 z6hvV`$E(BwI$#adu?IKs7|fV3ds9l-Qr(`f9RiMAu*ARx>Yc+}E&wYzplz-LywON0 z?*y7gCIxV2!_;KPbOY2v0{P_%NDn;aae)#XILbi1;_2@%3F|N}pDuG**q(9O^q9-S zDvYP6H(nMtVQiVc?Xs{NsJZj+val)N^6xDIjuHyIjt!hy0w1S4UJ+JdTr)lGim)AH z*Yu@VgpKMOI6=o3gIdRjL6=+dI-cMJ9U;c3z%Ou-S%FJ|PXN|MR^$Thx(E04FK~kL z2PY`nK)b~F1pcvs4ggYs=7j}ppw8bNP9;_r2hgZDXx%A4w*s%@8&2p=E}*d*rYD>V zy!9ZD-QYB1dcX31H@u5WBLM;;nih0 z#OWwsXvOdg$~_9&9%jYx7s@>X;r;<>YT(LJ;8Wmre8365&y!c5v4u;Cl|>W9fgr~; zaG5c6fFgGS7if%H1azw#uRi0P=^w5Ni`UQKVsQXDRFioEmpStdkh&WnlLf#%EKt+a z@c|boTtK3{;A`ag1a32f!s7{-5)){Wia`N<9)TbzKo)R;>Th1qmJ09!SBc@u!S3B+!`(= zP{cxSEft2IVWPpbg3FOnpk?~CYr^vN;ERhOMr;7N$Zg>~v5aG5c^07c&iQ1pTNXJCJHaAgThVF!8n2N!t9 zJT$fhK(PfHOJ)R(KXPS(E~{ez6*djr=1dHrYcNCuKqfr*wZB7sD-FCz|CQD2jc2^z$duzYA_j?F--u4 zJnNr)~^7`h?+A^0cqU84JivIaKp-iE!@au709iQC%8e`cm*45 zw2>E_=DE$7c7TlH)ns14X3l&7WE`Uv!x?b8RRDG34sd4)EaOyy90v<4|EAx&CCup$ znONp!z?ye2fD;aC-o1k2ECGlmyc$dwxWReX?6$C;*bR_ANWTO;{KhBng-u|^^uM=- zmFvN|5}LU|3HJd=tpd2;3_4(fSD*0>iVdK`Mwj6Qq@3r~WPZSH&in!7#UCJzFiSpw zf=}QMpTG)m+3<%Oxoi*uXXPKOyGeOO`z?4pj}8DIsWO2cZJ34SAY%Ub=<+7B`^zI4X!~k0hF{r`)9U*#=u3uTm5)7 zm{x$txgB>vHE-a_66gZg1$$6@1=8#|jnRtX1V}e1Yw(yc9RaaH%@xq-y9a2u9=LhI zlO@m#j-D$R_G>U*;8A2|a!^286ayOe1|3jokfp$^z$Wl%`v1Ga#&V!J4mQw|KJdCe z1tn$$X6VI9py8kC0r!M8`Iqp58cPZa;5P9B-svB=aSF5YYBGbGjQ4~Uggbb_lQm2X z3e2#vmg$%832Q15F_4cKbMd<`%uSk()uigs1)Y>e`go8e;-M^1Kmu`=6L5P=qLhx#tn=DoqW?79}7E4uH;f+_XQoW?RbF^G{-rE z5ww4SU7&G#%wu5_rY^qelO7AFGj>n^^;lSqqYHF=6fZ0H^!#_iV$Ilc6muqoq=>8qa!Ycn=ZuYV;hI{nQvVQI$3=^W36&Dj63EmvR@ z=$r2PTzEH}!~Q~8opJVd{TITTj7;kqr$@aMKE$|ky6h{kQNgcZMj0?RPT%lKSc9>D z`ps9u8yWkjcfS^nV4VP3O+Q`UT2f-#8({;+Wz!Ac2rDz5o*w!}SV;m_4uiG~L+)}0 zU8N?lcY6ODVNF3${5gVd*kuEq$PNnblW&Cm7}rdfcq{D7xM_OYTVYSe-sv0O3WrIZ z1}_uoVO8P)M??o}7U;Y(M}C2p=?d?J^%+l34|pf+%(!X#ly|~vjHjpXekUBv*gKv7 zy|5!=%k;qa!VUEkK`sSNJs)5PO_r`^Wn!*p1WlGUFl8&Ts(@~60}bR~WL5xo%|PrX z&>{b<`ivsr9cT;+;LQk(;1lNrKq(5e&V)mW!<-p(Pn<#)XeylnbSbC;$8v!NCdi^% z(4MvcN0IF5(I13G7+a zAgm(S0$s_{!3b*kFhVLNX6UWDoYNgRgvILTFv4#+UIM?p5p-kJD$wnXtf1xJY-XTs z;jEzhj;DYU41*cNY7h->=Ywy71g#ebxd^lsh!I>_<)En)9-y0mgM-rsKlngB5-;7|Bu4rTA-`S zL8U9JBRD53Ktt*ssBH+kgc}r!py+R4n(pvPSeofT&-B<&!m3Qy9!_uhBwSM8*4Qc_ z&?3q$!~nU#&{5Oz&&)(oj|4q49;!Is_ce9A7@|5D=JzWP-ZmoC!@} z<#0oF9G5}u>1SkQs#j)kRDh(u>E)k=bD7R@O@BB`M0ono&%*n}+CV85Ggw_ z0~n7?U-(T}i}A?xGv9;_7^hAD3*v2>uJK*ifbqff$nU~Rj8CW6d>5Y1cy7AL58-)? zGp29;A*{|gbNcNc!t#u>rvLsSY`{2cy3SAGcF8HA2{T^MN%uEE7hQsiSVtxWMuFMW z&-@gQ6bDO#F643qr2|Fy`rUphOrs;E-L=+jBrtf1CQ4wFs zC9s)AflHt91iumsFOvcn=u*xG#_0hd$>i|n6%;;*n7 zSWdV$ zNszV;DB2AF3&$`%n%?(c7&Lc#`MShEy4dBi|#V8{<&S7s4t_-agN6c(92-I7JbjPc3z0v3?~#wF7avWS2x z&UY*#R!mLY)3sPdv>AJ+hp~#72!dk(oX&o*W+`wBT$(;1&^MpUnkw-;C*jJR-u=H*kxHa;yQ1tl*s9 zz#$?s{UNuA8K|D-0Xxo{2PF}`03T-rbIEQVa00o*BcjK+W;!=7*jKTPSke$G-#LUaCzy%s4;8x_HF3m5ZfLuOsgUbhQSosjbFQUWDBd}(AGrx#GGrz!^ z=~wx|4&)FJK`uDJoilh5R3HHM*+c=T<^uvy%|e1;&8~tdzB>W#bHIXSfuM-9;!|)A zxxoX?As2W+IfNTIhx`*1ks_u9;+g@vN|;rFNs)bet&oTsW8?HCLh!7@_!Kmv_7bAi@`K?_kW5fOLPB#4yy z1waK7XcZYE{?>?qgXgRWdZs}t$M`|oKtsm}ZHb~HF^rwl_lbg2F0+`38{>=VUSc8- z7?(}g78j9ZTt3}ZTtq|u-P~ycpiYuIFO!txi@DPTK)2$&0?~}zj&J5p?-3V~V!Swg zxwwcqFXp##1t-pS`l2D%rp2wZ7~**yFok2K|Opv1p$H6)1OF+ zC@`*>&MGBR%NRWUeU+@36>qi@vm>`Bi-RMh<3%n7<}%PZ(Tt!&78Dr4twaWa1|~-a z1twhv16~Fx5Y5D`z~tzVJ)K2b#EFbgzKpDZn+%Gfe} zr?iM7W9#(WAgXQpUuh9r#-{1kG9vSMSAFOZa8z*Y_|PFB&^P^=j7SONv+dEcA_|O* zHq&e5MC3Vtf!0~F3N-LM!^pO3aNXFoIXQYMDGy%wv9-Ah!0w}zhKxe>8fLcG`Wnv1fj*Nw%BbXSR&OpyFQ($m93uZEclxk!t zaVmfs@E4gK&oDTiojzYxB%Sf#bapinIrbA9rwIrw@5B#M?rk_v~;bB}p{i>QsGh@f}NOh5L#y8V9s*4zyEETb46mVqVVF9i8 zWmaGjSSDh@B;d#hZm)q_g#yb(ESUuynLxc=R7p7UhZs+9zo8)#$;dYOX|sSM z+w|+b0tQU8pKj085^-l_JTQHuw#a*D*oj$85(=RCX(dp)P=K7O2+F=Zj!5TgaVhYc zGlN=C0-zC5CI!$XrJ!!!V;zwbmf&-u7#umW9Ov|aPY45PXK3?kCBd^vhf|^~S z!0h;gAxnWppkaEVzQ|t2jnm}~L~MC%+;|zJKxaqkGO%pVFc8sVWLvVnRlres`fUf1 z-07@FBEQ&f|8Ej-G=c~e8H>2k=uQL@fRkv|d6j2b?Wmv_e2s$d6vCvgP%+UsP(+4=oaViKqYGer>KuXYepC?FXI#L}%K$QF5xWw%qp&wGgCis8wkHMd632!O z3WcuI^~^;g*w;+y1TReLH5W0HRZv*2#Nw#q2+GL{pj~-Npk9|CBLiqud-@w&5y$D; z79w0sM`lhpw-9lhUgs+k#%;#*0(1?wq~np9)9+e{s4(qln$Bz~V#IX3X}YA_p0nULTy^VJ%|DH0{vz1J)vwn3mn1 z9%3V6!L;q}^gbIA6@epN?E;D-psR!=6eS&(oS%NkM#P-y_~z-qZA4U<4!xVMXe%Pk zbfm zj7;zQr(bsvxg$9dvXm3#_!FQer{kIzEui_l#naC?ir82z0ej;PixMZONZ`*`yqnis9mK#{7UvL$Xn7+kX z#Ftl0LD*5mis1vRg0OuW=DEo&MBCL_ti98+0ujvw|qN-!36=gISSD zL2SCTtB5h9*mi$c5pyQS``g>yMWh));j_d;WUAPMLoEW1?68SU1_gEnDNv02dy1$t zy*jkL!c$}eBh&sv+qJw!v>BN;^K1|I5n%;ukMkAjWwyljbY}1 zayU5UnEQ#SFi-j0u|3XDB%6t8&+O@^14IlMe@_1sAmYh;0JQON!@DK{Mm3=BHKGdMA{gcem6}Q3==VB zTGqVXBTQrwBh$~u>2JbCOqrH9ZC8#EdBUjM!3iH#kN}OTae$7^{slf!T>^Yg3urV2 zHh@0eBuZo+yj*?=D!1Q8i3l-HnHDXQ&8ZC9>B8VRVdM0YXc1+(z0m9o8GQi_zJdlp zm>la09S<;4%nQpp4|B}PP%@$mF%F(Qf}?#>ty zXAu1_M#L3FyT*z*Fy5Qq7Avxasb}MKn>dj;kr`YHETEn%sLlb8KClZMoxUzkL<6Ma zMx02dK9Y@$5c^;*;8tL9e82=6k-Emn!~_|UnlQZ}UL=_D`Sc_4BBu7b3^AayBv>?= z1C-bl7##(&Oqm#>Kr1*DCPActv1$4Seo-}Z zHqi08ObTqEwGd;kHZvg}I~;b)vOeO{tSIMar{ z>E9DYoI$irl8B1ng_+X@zzY%>9kmsi9nZ|1UYI1JRexmWbO8lMfjk8kM+1-_PqH(y zLHxLZF53W~v+5p<{_EEbs@Kv|kai3PMniYc3qm63sk ziJ1lDLGWTwMOFnC4JH=Q>UB*f1|`nrjy#|enMr|jy8Ulyu}mITZUsh1W{}y;3QVAy zkC_89PR{Jekp-$U1>zK#^%*5Vr8cBT#sSu&z^utEV9vy%z~BgS5@=`$bhRFXW4##@ zxbg=X2C`2CWFOSJXi%$w5#%BUu#KR04T~d7mI9N&Np6re8i-ji4v1Q2a8rjxff*Dt zERLYL9H?3gWGAt3Pv7@cL86`~3*;~ZP+T!{gFOUt1K7_BoS<4rgNXrD2eE=u4uc~j zC^SLagb;25YcdBNa_0yt2$(_XfCVy7D4+&%V}KGPD2*u~^fGHOd4LWWWl^XH4J|1! zYcTnME*fBPWKd)TZEIz5P+$fX6Chhy6c`cyO#tsUV^H7#xq}rnOT-TLKm=%y9wX>1 zWk%3J8=#%8;5*_$iy08X3=MlwLT7da4P3D(JMv_KBM~&;&7#S~V$Pfa_7Er(kYgea zq>0gy4H6BYmI*WHo)vgRG%zBj$2h=hLE%~fig0Gf4#q46h=s6to4|-18BCDKFo8q{ ze3+IQwAX{doOuSL0yAic^anD+DlJGfPUeL-5*eQri0?VfdrirKsf|iOaa5!#Y0oehX3!UDUCZfOr z7G@EcGJRc|h&)pV|Mc@|B1$rUIA9$W(A7qu)(4a098mv>OW^c$-gFT?CME9a*6AWD zj60_%q>G62cX29k=rg*2PZtNB8QzvIqRaA+O<>9N?dc+lybbI@ptCs?xCJ(`NKL<+ zE@HxXdb&`Chz4W(bn6TeS;nsEkr^Tmj9t@bWr(;b{bN()Q{VvY)>L9u-~i3SaS9w@ zQ{Vuf$ik_>DzJ7sQ>F-LwW4mOhyml>>2aANmW*qr&&U+9Vr-p$Ia9=$amsY@ERj^k zWz%c3L_n*J7G;U3F*Wf|Kb0k-3Zg$|iO4d&y*phXTf~9&AKP+)1JgsZMW!?MPk);& zq63X@;puugA{LD8)6;WAltHR{azw1bQ$FkhN2Z_45m9H{HT_$Thz8@z?ee)ICCp3@ zR&1YIAaajca3wefSR52U2hcMKtYV)&Ly%Kxy4^HM_U&s+M7kJxko#QIoy$bj8P`tF zE)$Vw+%mnpOeBe^m2dj{G7&crZC);-%h)tMwOquJv1$79auG*OaAyxxDzQOA1+t(A zd}at6xbB2?0H+I9i1tQkx}x2SS`Eh&^Bcuh-$wXUv)2StlZ0p8={Q z89@O7Dz-thGt3I?pcN4u`iwP*k{9B1U4{x42L%pCSu2JOAk*s2nC38og2Iex28hpY z#V`TH=g?#>uw&`~G1=CdGdFqR7||EL!cVs8hB!JX-i^&&D(H$dliLW*i8*tF6z*uh2!E5QB) zO=>YIu!C+VbacoTIK>At`N{PA^&*mzFF+Gs;6^Qn2GavZ1!e(=HE*U1Hi*b@zW`YS z4lBm#(zPNMj4wgS(4BGp^qmbNrUIe744^zIu%20gNr7G9?)HBTBH4^gCnikKZW1wN zIz3_fq9&1JjGfann?-6EXHS3DED|WXf(?{Rc@#jiub?^*bO_Uh={_wYXBoSuOSOua znw(((?a70Uq%ebqPtGuaZs&xJs4#&h%`6~83!uYTnFac$H@Au?vv>4N6A);HOhj@x zWGe|OFbVWbKiw*_Srm2}xfsY!R#4N}amR+~liNhfg%&{00#}kTXW9dr1>CSLKMHHcB1!&8& z|>s*I`qljb6ImrAS%G}pc)%!AZVxH^u)b_e$(ep5~*O^IbCeBNEGAG?S+#? z^ck5h{+hmMibyf%1qOeXdQhify4+L|CC2aD9jA)$F*4rT9xzR04kOdmrs?mdi8CStxlW`+n4Bh!}V=~**Ho-@vxo@Fg5Gd*vXNFihI^ary< zMOFjjbivsoPW8XpA&n;m1#lNZ0yH`Wy3`STONs)!Km(@&8@NPZbJPGYA7N&IbO}IX zaiHCvU_R);Mn?&_yaF4jP0VZnYMg_5*LseOpu3tFnZb888Gy#=AjJ!Ny*n?H0?0(L zAa=)q3Li+xz~XrJOOt@3Dd^N>HhsnjL<3X-*TR1GEpP+`oVz^2J0V$Q^) zzyRvavN_7YmXfj6gPaO3q(F{%zyNaE9X@kraHk1$rJ4e}z;4juRZzhOUVy`3#+0GJ z1}@Rq1x~|8GGI~#3T)s8C`gwSG)Q3q3{z8~02)qHU;~YVu_&+$9GHG?j);PI13SoC zaOaP~QR5G&+sWqGz&M?Cu1LDZ`!^i|j?#{o-aryrFe4)<5(OYL!>phz%%Z>|FpF(^ z;68pe#xv7*&lSmltQ1h6C!z>hDZr-5tYOXsjRF-0?!SzXr0`*S+B^{@=LSv%7EooQ zqQK&)0a>b`z$);Y19beKKBEqz9s%E1%mS*Z*YvfDxhr(%)p!WKIC} zVHiQOFF;e$;EtjKGiZq}g93{ta|YN;ETB+VVc`D2$Y==;8ii~n7I5d0$?*aM=+0y& zN463rX2%P&CJJOJGCFE1LYH5F(kK(CThcIl`po$v>ij=W%@6>k4@D*(Hf~47>F4K* z7);k&E2;x(@qkRrR$vym$^-Jhgd!7Ympv1x5Myyv0JW+m89+TiP#NyH@q4p?BZuSi z@67@NlSR32uugYeAfh773R-mv?ob|M7x>Mj#HPSHy<&lg91EKUlgjj23qXA5|OlLrg9YK9VUa$htwsH#vCQ$dr08BCYC@?uPm@#=MFgdNdy6P$y zsN2lsR1do3s=%E20I2k00mmDY0;d^M4n$K7C@jpFA{3Yum>dnV%$QOXm_Yk|Hh^rK z!U$cU&*Zp(5p+EncsK~$nO0zOT*H{<*n_x0gURs;W0vC#BmpOo)%6AD%sUtrm_SX- z9+08+;HC|e(+hTU<_jPJSY68x?u;@ywt(thCI)k+B_P=eFy0-I2SD>qW=tDEj{N|4 z94IJWfUE%7S76Tc2gGJEXZixtU(aC8xCNxXg(=IC!BOWAHz=S_Fo7192xNg;urdN? zm=&1x8E-Iw7k4s&#s&mVGlS>21mwZNBA^2r|6p?b!juJCt_d=sfjJ9#dV|Rkv|xkD z@d6X90%B5N(qultRIemx&b)(3feAEbv;kx)=z2IO|wL<{o|kSN%NW=u~&GBcP#ZFgpnixwc5D-g_j1}lb>%nD54$S5#p z+5=)Um@{pGTG;{NGfx2Xtr$RI0UAU)1?HPE9bs1B104_v;{IUDQe;&CTd%+b8V_`Q z(Kk(isa}B{)Ky{vcd^lB>p=rB3<5tn6rl3@j8~YIm|b}pr4&GB31kV}1DU;p349(J zSUYCuvx26UK$8q$d5GoUo8TBVm?Q-HI6?7uhZ!7yXol8<4vhxu!eQzIW>8YN0V>wv zUILW?H<$$$a)Zo$!mPy13kvH8%vsz@ED8bw_h4slftS~T7K!5agDy%Ef(@f6C@_Hr zLyv$99T(8yt&AW$9j4D;A!1ceT$q68^zd5_w;Ob<8>n?6;Pn4La~LluW1L{k0<{ek z1eOcDWe5553@g~5j0*Kk8cZix&6pIxxTmfOkNJDrk8Sv;c(UcIFE#ki2ySM6;SP z9RSgsW=wxTd5OV{=>;f(L)D&OffPh@Kt@5!9dqUtARVBr21zZOE}Q@>J5;Ypu+3{h{0ga`~yU@m@~DoB63|1C{;38Gadoa@B(fN$Qkeg?hS}H zg*D4j6Ix7hD6k0pW>N%27P}S04Ukp_Go}k5`T;8_(O#JTZ z8E6SDcraH1WW)rr0W6+JLASIv^+d%XJB&93BN_jvgGHWn_HYI>m zfLeMW8g#2N%w0RcD!HL51;C4HnZZjyKq^72Wnn5$fK_sVRO&zsy#P`IT1*L|FCbZe z1FV!26k-!LPG7f9#1J_oK|^eyNwkmaL}WqJRs!oqltA-cChJA4c)%@YPzGRjd@;Sh zk3&odG<@L*S#rzl_yAQdJ!MSm(xu)K<3fHH;AZ%gc~-9 z$ci3gQ)1I)Py+QEnHU&Bt!$9hn>L69GQOP7vQZ=i*<~ys7x!$Op0!a#mGR~D2^&QW zbdX|*kz0Y;5!AB)jm=zO$P$>$2I-Q4n(EBD4CT|`Y!nG%63LnFut`J;M8|9rF=Kkg zJ>CA9ge(_$0kQ@YgTSllyElpKV0<;bWV1+v#4TaaERZ8p33%WmTL8>w6u32=XNyQP z>9e+q6!GJc#MutOw{?-Qu8&YBIn5F}gR?esl6M64LUPXE3`Bn_fTQQ#b?Z^7<(6xz4g zF};7MNEnA5Xoig0@$~d}J4F^VKA%2mmxv+Lfmzd!?h?^t>gb>TVV8)qKtn(DuC)%( zy|F*~rYr9jDPVjteadc;P{tS2U+orI&G>Tqv^}6vhv_@_h{QAV3Vh!#y;sDIk@4m9 zqg^O z-gHo;gz@|K?*~P8GBMuRzVV2NKO-aKcE)2O!HkUGr-vRFv0@aNKH<2?B+1hZZs67Y zKg7U+tiY_ms=@R@RN(t`>k}fWjNhj(JRx#ftl?c3IQc0s`GXb=fQt`-UDN;H5NX`b zeM;mC6XUMw56*~0Gph(Jn{Ibj#Ex;v^xCr`JIyyYH3|q!5akvDB?M3tm)TLtaawOD zSP*55=k22o0Y^Q@y^lcM@9DYcL=wyovMDlvs&#My4Jxofi)#dSvpF&rD}qK392phC zOJ~6H5}-awmI9-|q3JB=MHG4GKL(rk9&Fxrqw^xNjEs}F2V4+wWn?_HecDBlRg8=x z(;Y91D1nk=;S-U->D*UEe75sF72#yu&V5znCKIE`blsmErXYEl8zNvj{-ubkI(#9y zfMf5976E}KQN)6E5yxXtp~cf1y_vPJ2i?}~|8IzdGMjG|Qe*{X9nie20xLIC&vyQo z4gp6c$KRhpE67B-`Jv(G$lzEn&^o>Qj))7>^HN2d z+!z)xXJT-?^K;sC;q9_ICM&rVSrynEmCcw;KuI1nr~sOVR$u{7Lks-pRAAF*v{7Q` z1_-Y%;({mosGD~y|`AG{}0%k*mdbcg#QVoY1OrY{T-mtgz7y;;DK zZ~FcK@rl#fABgapKVIG*w5>aC8 zo9_8Y#2~1j2jl@AC3arWnZ_KTMV1<%ijcz*qzBZk*AQr6hF*Tn%c#Jqz#;G(+(?b$ z1`SUruz=e5;3;`VM@B~mfw$97KN3-AJU;#NBav=FMbJbjSb4AlqXLJ({ptOWMPwNt zY+w6Wgq5xS?l(v(SAfP6s19^oav~Uh!6>Sh)Rfvw$Nn%mj7? zR!4>D|K5tIFi-p2JYDsjh-!d_0xM`vofS0Pzz&*@V+9p624+kakgI6e;mrh4`UOpc zgG@01*CVXD455nP$ruks@LnKYhVbdL--%3Oy0mV){(F%)M&`5snx;?qAaa@;v;z$^ zgvBl}X?og6k!eg{w@?4~QRG&A0d&ZS6+8&RA~2l?cAY#g2M_2FRRs<+rV0gC(AEYe z(C#D9Pz1ZcGd8F`(3mok9(EqkwZtGj9iZ`Z&?pi}6KF7mUEnLGruzS&dw$q>K)1z% z24`5ng8{6daR<;a6=;T9fkWUGALzoqHH=DZylkKaRVx^ISryn7m<6V@D{;8;f-ZCC z0FC5;7Po=!ya6q?;BjP7WMXzuU{c_51L<-E&B7p!=sgh2}U+`5#k@3~^LtjOd#5h2cS}a)tr6|3qRKe@*ZGCnBrxi(P@!u>;g{28~C8;!=SPw3bU?CXWKA zKI4k%f&WEBr{Di4lFj&cy7zyP4#wZpulyHLX8b+<$A6I!#+g%nMf(`%PQ4?V!gy-B zBcrG+9_yvJt$0V&!xmCQ(=U83@S*AjuW1 zSxPJ*-+&d(m|nmnD#18&dMA@;AIDsA#slZkXQ1BAUdw zd-_`zQEA5A(>Ym1rKJub0=t9Vk+~3LH9KhQ#-ZtUtfJ-8``8tE;V#<%3X2Wwpq0to z0{f<4U=@{TJUIOWtEeR7!RcIVqAHA2r|Ywc$}>)#?#~8xQU;r74bv3%>FII;=B)c! z7+LG5f0!#HQ_lb@#uz~pBP^h@05t9kod*GrgtH*#K|o8Iz+e38pp41_nqYuw1Lb?rA}|I}|7d!5g^YYXq*?%%7mU#2iQSzSv>KHO zGBIG`R4rFIHeEa%50s055e1bxJ@iD+Xpz+YUVC4O&|Q(qhHH2-*Pc z$XIN}z~Cqe8s1>hWl&NATUei^#0qZfDlkCQv4Rwd@i2nU*9Hv?vuH3SD1nEwC6ri; zT#G<`R8S=f^TRu4kPpC%pCGdx0`HhXtsN!>R)1cQ#jI=1nK?i`yS3)b4309~Rtyp# zXM@JJCopEQs4yrnOi$+&HD_#^KA%(65Wang4V39Q1io`kf5<5+!T5LjFHTVnrXO6> zRk%bw7^h6n9@E=)maWO2%MhI$|I`DxO=)jkEkff5mke%lYGZD{Uwj+D#oViQ+P!SL9KRnK2d94@TwU$ z&~YZ91={|6qFW)#Vaws=`9(o1{Pg)nLqUsz+xSH_Au9(@z&uqaAS%Q3hhsW;73Ky3 zu<`=}qJAQvO$8cE0*V|8VDmw%@0+G835uFADou|R6phf|$qSk*Jj2M#2(8yZjY0v? zy3a;_Yep6xW^M&`#~GkuU=1b%foIbn2#PAp{R8i&22I{gV1jfKAhVaCTQLRxPFEBX z)njVppB^YAdI;vigCoH;%@$mFQ5m5!kx6>zyi0Uv-oW5T~RDvYymAw#BJU#DAi3QgT!Dk{2# zo$cS8P60>D$rCj6n0C#btoUDYdWEd0J!8Z49kQYdEDh`eOQzqE6>VZ#%CkK|PL!XK z@zC@FdC`2vnbRN0gCm7YLDWii7LNjh0=p*j0w&Nb8XIJbFdJ;oXQG0rB4g9^W(Co; zprv0%ilWh=6-51tqDhQR(?2PSE@WIbeWQk``1I9EqCAXux9?CA?P6s7H9cEPRBw8r zil_@?!}M(`qVkLl)32$BIxsEenJ%F!I-7C(^ew8Qg_4J+HlU0gi72o;ZeY$5*fHHs zO;nR{$@C00(YK5{r-!MF`ZFGwzD!*-fw5^iyN2jWh`q{;ceh{F5SWI3`Fv4T#|0kzZ*un7EO zg^r%khIX zONkva>dFS1&H2EpzzCXj5O-uy6kv9M2A+U|h#QLo=vFaD21U;4b9F>j>o;&f9TK3# z3w8$+IOra5fL!7Lvityt0!tRCoy9=`baoOOXf*T(2RPQJJL-xG*Ms&1g4UXX+guEw?jv|g za03@;BAzo#fz1(gjuN{97wAMZHeH4WE>~VQ1vbY6T%e`^o8u3TEP+paptw50rNqI@ z4s!MpE=BMKvj@097gK@{JK|E{)L@cP0JY{o)jzueJLLEcPms2-Rd;u0WFE3kt7e*oeC z16WM1VbGq_^)^dQ3Zh`~~8~7|Ti046Ga0TyL=7erlVb@^d5M%(4eS%}81Ejfw z6O?@U1rD(*f>to`m@!S^1hpy6m?m(7BM3B#!V2=S;{?tufkT{1;Du|2ylg!6tf163 z1HAW>33Q7&SjAy*-+m6K5(h6YD9mPXDsqD6`-H)jwSs6Ccs2th!sfVu6O`USNez_X zFK{|?f|>wqkaWI)6Oyh#LBj<;#Ei`mveA?ceA3vAdNxp0z2O9PR+tnxU3o!mLvev) zOgy~YpoqA@r6>h*xF$0sN;sWZ%$Zaaz?6&vr-GC@lZXN*dZciI7KgJr!lMne4i1!1 zL7jLuP}5G}G#exg>I-?96&MvbL4(*FJUrYA!r(IK2RM!XfKMQ>f!y+e)tvbQD7+c1 z82)fNF(^ubw_1Se!w;O`Lye%p%B#QzPM)BF5zsOjF$F$>yHM{aNr8u28X6p$6{QuV z>OqUX6*<7Zb(FVaSO7{PVDmxOT9`5Y07axI_&_%{P)r1XPk)1?6n;o%5_riBb_<(6 z;}%e6;ZXpq+`whV1Ws?Dqy<{9Z~$^D7n>&Y1unQN>LJd6x`Kxrv;;cf)qnyE zsKf!SZD-6fRs|BE3vk&qm@a_Bdmcc_Is0ZakaDB3X9TaI0Lj)8Akm?gq8SeOk9aM9|vInyQtH3^RiSveCiGvp$1TWY@ zLE-oTs*n}jO%@P1zzZrazCe{Z{(wpgf*kvSyp>kWHb=<*dtT5XhHUzbJse6LysV&dtOImSIw&J)GBfv`tN=QY4ICf}pxSH&Gq{d} z-A}}-z^x!8a16Adu^xOV4i8At2Iv3`sPqPDfK+)~KYzCX-4rcKBX*PYvBVheZU^kn-SDU;{^Fw-il!YXzZTRis20?=p?`^0oW8o z%$S~l_-q@B?RQf2R)PKCg60ep@+nE6HjoC>2_{8QuY#z9|2s#vvS>PD>{314PCI@FoDF80;W`Oet%mGWVIA8(jb}lYZ?qbtqp22L# z1lp4(I9=9SR4GD`TR|LD?1_WUi-0vipcW$vK2Y*z=5}IGlm-z}pd=5itD6-#)`AxI zG&?D@DM+`pfLEllIX;*^-C8u-;E{+UgTN*cM+SFLGnRSzK4%7o277_WBF+p93=A=# zVGZW}E+94o!v}kTC)1T|L{&teih$N?fL6kAWEnFuI59Xe2yC4mZ6hix`V1@unsSr? zbr!%GhpAWin!I?e)3c(tHRc&RyV?&}24f?b`iU@ogMor_z}VYm?1CzkKhQzKT=s)O4l|l1ADa`q~7t^cfhe8TcGoxs^Z;`ko({GN9S&qzwj!Y#= zjE*x-Llzo?rtcUO7#)|u`3%!tCx|LDHcii*AZo}1-Vg;^DD60*b^5XiqHatRTBpC8 zAZnsCp%pTf&jg-{1|Me&TJom=nmq(Vi1llSP*@9-01evZyWN`sw;pL|vJBeoQZ$B5KLF zWBP_Eq9Kesrn62JwPO0YYP#c8QGLm+>;k($!%UzR-PVj8pk)!Dy_y2Mr*}>jHDUH< zWSq|UmQPq1Bm-K$tH31ijyc`2zSvQI`qQbR5sZ7L+f5TyVB9l3ewwIzeJSkNJpsfL zZYIz+Bt}P(EG1@L&|+s@1`c0d22hKeN0Ci|5yW5st?y@4U<37?K{Jhtpd&RvD~pss zJW#R83|d?Z+RP1Ft^?W&4cbnlzyi4%t&)+cUV%ZMkzu;sbkV|kbI5R?0A$TFg9a0e z5;Le$f~;N!4GlxrF292=Ndn!krohY{$p~J3-T>LVtN^Z41sa$XKxY9k>oOQPGJ^W7 zpuMz8OrVuQprWn;w7dnpIThq~@T%(R@n`vkr+=I-TE=*6dd3XV<)CSP!I`4AjK`+? z&lDBq*auo9$_1h`7{5;!S}1DC z*fQN`p{Rw)X&xmeaPx{6bj$~+#sh6D0dGBa)Dw8Xj*tbF3`b^9105l>W1*-j)8C2H z?=KY9U_3XSW09!5C_*VXMT53*2%MX4vq)5mW6hjt0*=fAQ>Mo)5>*vG&Z7uEX@=R6 z-Ha(jf!T4>is{o9iE1z&pT2*Qs08D>={FXM$}%3G{%w({D$|dN(`6TnritBRS73&0 zY&guq!~)xxKYiw6(F&%opQj5i5w)skR%8S1?`EoZ1T8321?^h`WjR&_HtrH7R>y{Y zE3PsXx`IXz6<8hrG+bE4S_oQ#?6{$Knt(ndgA(Yl9z{k4R!5F9&;-tlRt#ZoMK;H; z42~}u6d0u)zcMJWNh`8}h839=85NkP_b(9*^;I5+*{5>ZoD1<)>*>AFir zWz0Y)MF^bdfmG2TZ43%b0vC9knA<>uMWFmIaFIue0TLac2{VB#flEBoCm!SzW&`b% zR$!RUv`q9RvB;ArdMC5FJCUI$as7E$>pL~85u86pTAO6O&Xjs z7zLiNLu*5BZUzMgM=k|Mfu8LTR*EJuac*Z<1RcRUL16mEXKbR=6V`~za@}HAU;&Ns z2wa}tzDCrI@xt_TYeW?oFHC>GMzn(Q>hz4YqJi?axEWbN+kO@>gG)wGmSj<6W2y%+ zc$CpOF(@j1!jS# zpjHqwXqH+6yn&Q~M+h`SW{?Fwg#tVh35pht>4m>pM4dr#Vv(f)nlS~hKgVK>1=JWp zkTIYUEzoHeaC1&DfDNh#oyiZLd1Es;Ri$ODQ_`RQ33MAhpFrwDFP3IO-55atk(kigo& zOGQ;d27vZmgBFl8@UVew2MvpWst9=oM=Ovt3n-ew6L-jd>0kl}f>Q%%Y-$D*XcH6* z_)r~$OJHdaG~;uP2h@gUaRjZ_07VZZb`)3~S3t~hUjQ;^2NNjeEdU*7HH0^)*ty+MsU2Cy1Ojv~+)C8%O%c5GnW zuCYl}1hh=kcC)Ag|1HqYF9FaJ7ElN8%JiJgq8?0Z=1kwVSyYSnDL0~I>YDy`v#12) z%js-eMD-Y7PB+~mstu}WlD3Gt%KVrJuJ{%*!)kdJ2L(obMg|22f!os$ZxIb=?4B;Y zRn%ts10NZY>DgOF_XyvF7pLGxjslaw!|7JrL_?U~w@#n1O*E422Iz>j=hNSB6P1Rr zUriU-E^4WE1~QZaUdAbK8nk8*v=0nazJa&8GdW6RIj-3;J%78X2Gip2(`RlMHIco2 zy&Y7|9XybQ?i`?%`Y`vfIDo7Y&|qQ!T|ELbK0$#AG?M1{>g)6cJ4Mw&EDPCODlo0tG5zN*Q5{g2sqYpwClF@UyCGpVeYdD0E}~<(EW_QcyT6vABXV z;&g#sqT*~U3e4F8kRX&q4?)qQ+=p*t1`Bn!*!E7_MN<61dHQ2t!!1SUuh8fT&6Y zQW%2fo0uFgGh{jPq6Q=A;sWq4N`k>y0NL#bN^|miMdhd8Js`@#@#-r${pL>B+a)SF zo$a8g0Ge}^4~lAnZc#fZDvd1w*&zXlo(Ojy6t&ZUIUlsQS%FdD3^ZSZw|g>}F=;3; zgZ8nm*)d)I5F~uV4~ZID;?LL62<8PX7h(cWGJraI(SG_iE+yGCx=9J(L5n{ zShS6C+VrJ|MUOE3m@qy2h^P$X>ggRvL{%bTjs$HI-~b)#^ME1CQ2;57nKhVr6j?w; zF9&G36ZWJE^HT$;Dcr!A1#(WpNl|(34v;_xL_q(js6Lv*vX6>tGEL!~KH;ROB(vkL z8`F7DiHb{};Fbc-Xu%8(fNV~2eEMj*?I}nmXFeuss|j;DBz!@OX9Tv3fR0)Ml|?84 zoOw*t3Y37C9usw(_ix+Yz0C-ou3Iq2I4%q0+1;nKXpcM~nW=x>w`}FX0 zqLz$@rcXR4s=(I(K6%3rG%mvG_+k3~bE4vmAEsYFC#oh4+T;sri-DsYv{v^3)AR>% zETU{XK$}i>OqV(@DkTVRk_o(LUY-S7jK}CGGTrsOsCRvY0xNi4iACT8n<5ivUI(;% z6*O+b3T+91&L0Fd>_H2k_kk7*@`4r(!+O!+Wz?W;^`JGBpc+76Drm6+i@<)6Qc&#+ zGpB;7UV#;K41)rTz(J5CXp~I@w8n(NksZ`rgDGqPbx#Gd6hI5V8$gGCDX<6}nr?PM zRF?<5#u~h2Q(*t}ybGc_yc0l*CoqD}I6cHR{o))+k?HZ*_&MughRp!UFJJ`Sp~nhd zTn+NZRCdr-N`d|CU@s~%!M00)#;V|VFt9p;c8#$J9A{^80Ik8^!U!6Fhnc$rWG-m4 z7>mG9kY{*79Z!xd#~l-=fsS(JfQ&9}m^j`0qNpn00g!6Y`f3&hR>uR=>n@5)nw$WM zf%5BNc2Eb+jOhj_J28QKd*B{AsK3JM2--Uaas0lAq#3`i! z9xDTlWI3LISPrrK0GtCV*1+;N;6e%vju+q@P~d~*SHOi77#tVCIiO|W7eHqWK!&e` z7#;8b05t<0Z~bT&5ST8?J!Psqe;{Z$6Fj#0?~NB!{c2I(t zp!~oB+Dr+mj2Q)f@qo7Q@+dKb7MW@=ae$94VszAUWE42V0c$k#GJ?F!=*TLtclxEP zqKS;#r<+_8Rb|=Ep};hKW2l_;^onbunOH@6ryE}vO%dJ70iF?11Sd@B=;PMuYp#px zG49!Z`#NawZ})V$o1)H)JEkYy6y40YeY)5!Q8~ul(@k%Q+A!{!UUW;;TxeKslj9bx_XdC19>Dz9L`Y~>r&U#1Ggz?C9 zt2?4`jEASsyd!EUvWWvWLd67%Dn>_1fg{tO-4P9B+&hP@;Q_cQ!23|tn{mhV$cLg< zj0>kvc_`||cx3whhoUZwTc)c#618F6K0Wb~s6ONG=@TD`1~cxM{`8S(qQFMb9TlLg z2pX)@5ZF6C>@nEyHIGGQ7=KNl`B+p{crOQN0tvhqQ35nPA}FwZ`o+hh*ZF!xxp^Q1 zvu_y$dZurCBI+Z%jvwSyu=m+Pm3Rk}5_k^~qa(k--s$pBMI{+GO}Bh1nk=>(>?s{3 zW?l{jPS6OF29t)s!s(lyin@dD;dmjcJ6-*mXc6O%>9d}R#xm}h{^OabmNX!Fz%SX=B22#48(#5ps)tn04f<66_`LJhrsXYk6()VG47ad z^h#6(q&?!5s0`zg>7}nk?HKn@-||Y-g>lF9pRYuBGai|~^|h$7$aZj=1)YKqS|SA5 zXE*)LYf%f|-C!9RB_>{0P{T|@krO;zsle!HA^_2)0+9nv@-qrRlNymz8H zOh2YgKl4shQ3?{dpo4QAe@&Yv09srRKHcr-wCU{cMO7HTPuF=bs?5~JJ3a8ds1oCm z=_T()s~I;<|L|T^k!=gV0Ea(mEf_aV5BLDi8WTQFdvw-4sMOilS>4u*~72_^~_zXN8+>XZ}EOu_k-4GTVx8rIE ziP7g|h%=9EBVehdPW zc&7)Xoh#Jr&Av3q*Nd%9H+wtD@>8?LT^%-|gFa0U% z%D8>|?w_KpOpH6GAN(!q!gzE#+aFN_jvMDd4Qqj8)9wCF|)(w}Z{Opa%I+h5VMj3>5-{1bI#WZHOg`t<*zzKma{ zKm9M7#<*{)kC-vz{;3UOdW;9At`}2fTt4-xm?q<dhmx{<>qX3BJQ)AYqmVv88ROm|}zYXF_m&McK}(o-2!JlCIu4y1T?69n-~=5a zeS;rFUJ=N0+;DuFfC9I`+UeX}V(zkMK(kz+K_^zn7hE7EH~6z04HUQ>PcjJ1o1V`l zrpkDB`aE7SvFWS1#JEMy@hh-8&fv)cIg9~Bun3&reu_&hl@ZrW2&OrArZWnPIWvBl z?kXr|$8_+{^i~M>q@Y;5@S{Vmpg|~2$mAfK0++yr>DEGGYK;4)CkcsJFg?06eU6Zr z3*(pR4~4`)_pD0`ixn{*oZcraX3uzW`Z-}SXO>I+iX7ADiipWfHxv<5U_3ZINCYf7 zNkq(<@zV5bB4YWBm#4dlidlksRia{vOm{y|e=I7t8DgsZ^lf5dW{g*-zZDZ}mz*?X zx`3k`C{40J#sNW_fEYn(vR7Qpl44=V zu21)o65GsZJxy9noAJhU9ci&(ZUY4YP(NB z5rOmDZDqtX85x&sPnQ*|U}RiA{ehgA46Bv`x4^0CjPhamO>7S1 z-RXAfVgZa7r}wLi-DbQuJzGO8X*#o}m?j?!Xrcgg-2$5ekH703?2E`og7VG6O2)l5tU5-IWmYo{BUi5*jz z&kO2AYA}UB9Q^@o8o1zdWKa+SnPx7wpV46Yb#pO$rla?!OIUz2bgYHgNtTEFitN)9 zEySizFHjTXp8mjCjBWZQOR*D-H>S_F68p(GYx-1cF(pX$5foTEeZRHXDaJ?ByKKZx zOV8y6?fnCd3EbdA&qM{bVi%y9h+jbvoQbyk*@-P-WV|u`v%T0|#(C2(I*9FJJUhL| zQEWFP*PH+q>KaTQpnT;caDIEVlh|%XXyGUABBso^e7e00IJZQ&i0#9f?R8znaycFx z16_A5uy*<)S1|{U8K8v4A+UD(D_1cW{s%|Fa>u|m(--dPA6>;HriZ$T$uXXuUg9Pe z!Zhpt^iyu&FnTB?CO%!wT}+$f9$57UW{_&J>G|$rI!yQZr!#ts2~XePF2=$1fPcDz zyO`+o^X_7Hj4P(|d59S>8cetM5VK)E{A=3uIu9{FNO335|ArrY20kw{4-2>BhU3#e zdx)uVTmdezo(eQ^d+8R!Hkcm|L_#kV|+4Q-Al}vanAH; zFEMB4&fn9fFZ2Q%{lQBtm+|rRIBzjkt~+4IzTs2in7+|lOq3l|UGfS*Yrg&7V*HQ* z-@yjX#TB3cuMs%E{gJm=86(q+nbV_u#XQ*F{G2A>C_Y`rLQG=%0bel<#xK(s1d7>A zSM?K%W!yKt*-tD`^4;la0*-2+(i%G711hbVL8bLuKQT+D>1U>E_>0*yewv=)FV-SC z_slc_M{S&{PoJ6Y5CB%)5FnPqegEz>0Y`a|YL@8-dc?%1vjvLHgg7D@<~VSUlMNCx zhlra^?+y}E0Qq2TkeC#RJ{csY2BJSgX~|%*8-{z@+CY__5p-whD;SN6jfk|NR_KXlQaYn`?(;Gs?%$aU1 zo4zwtOonO7^6l3{#T=LzpG}vD5PQcoY43K9NU^Pqj8CTvMvKWzzZE6M%XX4Mfkoii z^tVxB%8buJ;+ila#}qMPLC_VJJXw%KBS0H0UrbMl7IRX0369t^d`fJf!^#~m@PqQ* zDR9%}1fLS5Q zpc7ey1>Q{ejuF#=xurG+?7W3BVsPhOh!Im{RuOm$aYdOqmhlIN@ZF6F#6i;Rup^5=b6)vJo^w#N*-mr-#QQ?9W0HUxg&T z7b5;J9uYzllf{Gu!P_+BU`}O%IW-^wVdTU#G2!WD31R{;_r`+U1DeJc z9^n{}Jm|_xBzew6xcqd_M1R5U$5QA}(4#}qN)>1z_jv>~iliDHt|ozlgGr;8=Q zSWa+O9-K7`&N>EXeS)*(l3{wi;H$8T4AULc#YCnrh6|sEvwkOwi8HD|Y#2GChYy%M66vB=H zvCSatj#QWnx1@>*3xgYgLJBOPB`->#eUk!fr{78yGZN~V1*#1+m{8j%qM?Oo4zg+T#qzli-}AZ%o3AgTsd7QOH2l8p3L;L zEU|LtAHSzfe~=|60vde%mL+D%Jo(SG=|-o^G2WX z2dvC$oW4F!Oo8$9^b2`na*WTWzt0o%oZg!!CLr{c2&FNMpQiidi^(&-0GD8Frl62+ zpWc@*ro{Md`o?@QWyVv}ujY%Duzl#9F5svzePgDW!Swn9F=@sx(>E51Nlf2dAST54 zefqHiF&nmX@23eks!ZqW5?cY0R%U!L{c53@uj7Pu(*y*5z#844c@0NMjDflt(7pz& zEC!t?#Oeqsi$8*U9-!DvE)p}Shcx7S1i@zxv1%}N2!hUJ0hj#z3QPh&`9Wj%m-#_c zH;fAWpws(Squ2{tx54UoLMTh%7u+qNnHz{@UqH92a0uK6J7kNfB0Feoo7EAV@IdSH zm{}A!1a40kD;Cp>M)nJ6k^t3AkO;)gk03Ky1YRM`WaMV52kppoWEA)f>Y#%UkhlR# z+>YNs)!?7$^NYoF*}g;Amx{##8UIdKDG}4C|HtphT> zw}>i%j+z2F4b`!bKmqv@G-LChAL4Ij(D^^Q3}1vCSwR;~u{wSb1ZOcw{R+)ucS^(z z>wmz55_Ap}dhkNTP)rAB=MQ3_{c^B4`y_^lGc+GT3_x`sh9-g|LNaL#pFng=0`EP;ubU?~Q7J=sJ%PPfS<(`bX0VqTN z0Piwla_j*01E3|a5+`VpHl!5&!!rG0s+ibx`6@9{i5Aen5_s3Y0+S=SKHvn^2WzMM zREZfd{sNa1(`QzRfhG;ERf!cd{+#YrEoR2JYkFg~m=QZ@bt9;C1FH9@pRE=vVr-r6 zP$MSE0k#~}?MSQ<(-sEvgcLZyH4wxp)92TSxr%^o6xCqTQDBEIRRlHGYQz*dz%uNh zPC>0$pbXfVpgWC01MpxeerTQWhZW+Rd$nS0(^u7s@k4a-2&|ocuomG3zB;fMyz9i8 z8GlVbUkCGo#&o@UF)p}2?CZt)8C$2HuSfR6hk6`d&}+cuh4uzyFKlRldw~s7t%1G3 z3G)IQ#0%>i#n`4>Hi`-JgQa<((k<)|>3g+e?9(e6#f135l^QQpx`!Q7X|Hb-6NW|D z{YJ2FWtzk$Gya~wp$X<&&FKQoVw`Z_f@96NSxiU*Y$7zRAX3U4h|!YzpkxKw)5_>L z8(inkU{_)X*SbB;Vn+O@K%+&l47CQLXZqu2F;T{6kd^^Yi$ia0 z7fCH*1{`z1y}mP0%^O<8xP?2xR-FJ_1TIjoAY@smzibf`V0BzpD-Nw< z+WcUPU`>W6P(2e`#l$&U1VDWo0cfh;&?3e<{X(l48%Gc5upCfEyw@tGkE%*a2jq9q zpcbQJ7o!5gF+CiRU{7oV`x6|LU2S6WOp_R=b&82h-`^&tz;u-X%>LRYCdjVfr~nQ! z-gdCB!A+*%b}?DTInxW<#WaKzK#iFlkbVqk0P6_Uf`jd1qH@pp6&Mw`9hnQQ7#cu} zV_7wsPk`padDOWbAFP=ErCltC?*)TnA7~kc0*3;dK>PH(4l!-Mmkf^mjE)n*x5x;z zPhZ|4<}L7y!Lb{()PTeB!HO9I0`1e8I>odFo-;W1qKMjdiuEympMIuO%!X;+hv{5h zVrv+`OyAxmX3p3#{e72MGUL_h!QEm?jGfcVy2W(aCU${FFHxEdU%SPc8NW}j>=CnJ zy7OWB_8zbihW%o~)609sbQn9PFXj6rb# zsbLKq^%a<;6+qo8fqB!d`^0n^yQXLMiK#MPo!;Lkro`SUpa|;AP8SpvS7+MLHT_|q zm_Or}>4yDcI*g~L$MuU@@!xhT1u?zpWfR1N z7{5$6cq1l0eZd4V9>&J$YbS_FLI!2FpPnEl#>jG$Uy*IP?o=`9X_LfUK`l!6Nn#3& z%co~d5({IzJ$?TqvAxKni$c)R#lFeNql@xW#C(Je6a+Ou7wcw$Qo#i=1<=@H-4wAd z#=Fxwr;4@nfd_S1pjrOS^n~eRD$@^56|-S_Gj-ZDunz;Ki6ybknl@d)Q6Cfu!qZPr z6SH9aGCg3LnBH`w>0$~T9qXqFD1dsyQPagjm>%*^Pn;&EF#W=GF&@aM8Yl-nnJ%`6 z@y7IpGsKL!4HRTRuDJmkN4q~mOrP=WbpDxQ`iu*w+s_n(l>JNs&!$(*6w77op8jm6 zm_GYCP&)?F3YVE~F-t6vanAIvSz?Z$usJ(ROoMUG^lu>Qz;yZ9ViJt6r(4Vx({X@g z{vJWdv3$@b*c@>7?+^qf3vf~ZxBfv@G&m)I&f3yo0=G<_O>bBrrZoN7Y%vYSW7FTy z7E=m106Mr^2z1^aXn0NmG}rM$C`+J60F<-NfW{D5A(Ehl$8a02@GG(Lf?DYvppkKi z4Hx*qgB6K$#4Hq%+zD!VftoptpqtmgZBvjb(|655)W*}l%@K1`M$!YCp@8TCt)2n3 zXO%#0uvgRL=ZY!V&SwMjn`3M8n!kHL;}%gRHeNS^#(?2Z~`vXd=9~Oy8FixD#xmZkv zan5wT#bS<(ucsG)sAJPNEf%w2?3w<0v6wRBoay{a#5@_dPmf$87Rxl9ak_W4n8NfM zOT>hm?Y!m?e~_8Wic^d|2}QH_bM?v z#!u5XtPvBPK7W;%9@FZJ)6cIGTg&)mI;hLc#|lf9!U~)MQ>V{gEhfh}ZTh~|Vp@#T zr@ve+W)BKdtucFZR^0}u^@}5A6_RWlHx9bV-Azf~PZX5L01noUXP(%+Tb*v1tMVGX-E4 zFmfvhJN{wFQq)$E1s%7e$g3a)ra2T?9d`(3DGEAv9G~93LF_Fk9IG~pDM-x{P-IgO zR!}x$DghnC!|J$0C`&VFTRFDU)hJ@UfvOrW| z;q>0EVyTLV5C)gUkmIo-9UCEt1;X3JycrixkKZQtld*An(~ z^gY|fv=~orf4*Hzh*9V;LpJC*4M$c_UIx%wtN#qsxp#^gGAd5D-6^KZcwu_-PO(Nt z{pnA4im5Q3oX)jNtcLN~^!8n1ek`*El-Q=*my0P)|Gi6$kK-tVg1LgYz?|uVyTyz_ zJPQRL2v2Q#(QYweE=VuxikK4H^pCs6WT&s$EymCIX!^!~Vj|No>=u(?nF}|NZ;u!| z=hGHhR8h-0!?v2CGQ9coZ#mMD+L|+ws89Py<+p&R)fkjNl1BCy$@WT zfpf|3ePVfx%cg7X7hA>io&!_^f?HK=(>GR%2}8%Yq^9#95DR3SF+K5sm<3bU@#(V< zh`BJ%n*QK`*dEXvoZu}nVR`sOX^R*rWr}4f@Im_A3S0`D0*j_!I4EY%{`Ys2faC5( z)5Q*nnSzGOd=H6PGTojzz2gvgj1AoS>pv#OGyU5kF1&6^26rteqVQ}mC<6$ue z#>LZ(j)>)RECqKHRtQeNa9m7&`iUcANsP;;YabPJWNMf+J?|)Z;wtx;n920#N5$;f zW=)zd;HVBs;r7SCDLnU>m^0(@=^KxUC9tmm8+K*-`Qu^=)B8?~Nly(gR7&}pqRLJ*htofgaCFi?7$Xa`H)xF}}G^#A;H z?n~eqEbmKVmTZqfYUQT)-xEul{^63C6U;ua2Jg#a`fR%=!}<#f(-&L@r&gGK%AkUa z<%*aq`z?^7vkZ_lU2p}VYW)>4Pqr0PrVBV4VM@kc71Lqbc42z=Rj`9jUlp@tYx+J- zz)>0Epn_`%2OYa6=FN0_%Jj9FVk*-wTo>b*Zhu`&8Rp!w>td!%$1hA@dmU`j+v{TH zOph;2SGys$M(hg*+FN4skhxNRfn(E` z+!C{7oIU;CEipgFlhbu?i&-$vo}PAFtekP_^n16(JQ!zBSHB~cAiNBeHeP^D2Cc4F z0-eq}>ke3+?}3=~^nZ86#0~KXw}2lr()KOi>4bs6H^DdgoS(hyIW#X z)61WUNiv?CKJl5D8sqHgJD!P^GA^C2_gu`3arX4o=VCT&U7*q#Jfto>eb;j_3usuz zzZ8?7F7^T(j7BfS_Cb6)6J#_i_w@LeV%m(er|*6#=EJyVy5JKrX%%qtV0AnolqCT3 z33x74fmwkE*`p%UqhE=sFwUOd@JdXZscF{qwXeiLJ>kEv!~z*-PxpTvTtDhU3RxA{j(&g1avY;2X6;Hz^80CoxWc5HkeT zh9AVj85c|s{U~O`xM2FEk6^(gAH^`zPB78oE5em!-t4 z!0y<>_y=@@AZX3d46ZDJ8Pl7;h#51!n6B_$OltbYFJj_`*qezngg_J9td0wWFq(+?;_f&)B@23ma5^G(c{4LT})^qZJAs|L7L`SqKa7UNlvFIAWpvQJk8`8MIZ z7>_tOa5@B014m%?^ngENX47|m7c=MUuXpUL2YH4^fmz_y^nc&Qd>EHc_x>Rk1S);! z{}9t=y2U>|@Q;|@biE&9Y}0;<8ESRFinS@CptFfVT^c2JGp0YFlkKO7g3fn3%Ag>v zASEzodh|~*CyvVu3OwL`@zS4S;UHuG{S;H@>#cX}Mzz^sdh#zZamMA-tAC04!>#@G zOU#@&D}^9d{uT>jTswW;Z!u@axzj)W7E@w+1onX`YlmQ#z+8~$9T*#@Py8dMF1=U) zv;+dYlmJvZf~O83WBE&_U;ZPO!Zcf8>N;`1>Fs~TR75A%J5GSQSP(k9^F?gB;y*F3 z>0JNB_!*Z^m-;7`4f0_7KQT?lTcE~ZJ%-ai2xno2mXw01z?|t(|HWe91*pP5G3P1b zqKtE=iiz7YE(JA2MW<$pJ3|_o9@88Cim|hP5C-{(kx{%1zF>h-ypu~`U>SIf;}6(* zPSflEh_OwNU=sfb86*Ye)IetOKy~=sniyz$4m1{_2x`kgGADHI0F;qWGK(ufMnu^K zeog<#ES}1C6x0?uJUxa*+<@`(^j;Qm1?dxOAsreqSTBfCgGoR^TtQM`;q((M;(Gii z`9X&QgYMa405wpiv$Bd?GB!@PXBBs4Tsys$Ros}@KtU3euYQOr@Ce+Qew0-l)JFcv zDz3`7V7fe;xFO@h>49wGDNGwqPv6ca-pgn--IZN@8l&;_JM7{*Lf{6{3GiSKGicgb z;MsI34)Jcr<uuQL8*O%LG`*Jivoy@5+y zm+|ZLbzI_2Y+F8o*6U24ASf;|-H%%wv;t@&pSbw+K5lVure)pJZ*z--3e$^%;!@LH zdBlC#=JZS#a1@<>pIcmE`dfZ+&|m^s@-L6L0n>x->H5524PZm2@rpYzE}wp#7i=vv zpLh(@rk?4Ed@vRIjB}GClY-y+=?S)VRASD6YbIZ~8Yu@tKU|k= z>yNM_lN+RWzCm1E+HN5`tU(8|V}ocGtjiCrXcX8TAq4@mE&~r(t0<^&16y^5Z@Rpq zxcqbt32|-CAN){*rYkCn%TBM95a;Fmg(|j2LY$Z5BLirimB6d%2PMQ^m)YXCJ5EkroGy=Xc78 zD?-u=hXT^n%x)QRU&a;FIc3FF7++1-lNFbMn4~s6S5~}}Y0A0jKV-qiS;&c7vMu}w zTCO(zfwVYiB>*`3=ZcE6Pq&g6*JfLFZkm9jD*6j2seWL!Iaqq2A>+a=H>hbbg86sU-UGDDAwxF_Q=P%x`b|E(gP2PsxS zWoMzPxReVx3&Uokz+;}^aqSEI;I$0ku}=jS0r31L$bX>u%?+zT%W`1R0UEGBuPQFX z_-pzbRdHqBGoaI6z}1z&Yj77&X1ax%xTqZ12#9%1j+jPdtAUN^P!m_?IS(-cG$Q^Q z%fcCHb@61zmDB6h#V2wc04e1cST$Wj1Dw>XG{lV=mru{r5LaW|G<~XucnRa7>0FxP z#Y}t7POsM#7X|g`r)i2?GTk~m{i3G$a>g&yE49Qk8COhytR?=8@#^$b+Tsq3tEY48 zi0APC*@{#!eAzm^O-EdhapLr?I$*o5>xi2(u9?oSD}GHMX|XJgZJF;7euwrWOPU%XQn%~*B? zP9$R?&1%@RxWL-!XZ6K38Rvi&6KYJCHxTDygoGX>Yj{qdWFRiZxOVzl191&z14sSo zHw?t(z-2^_pc1IIWmEv|CW9^}5H%DxWP{9iP4_Vrmt+AC2xJz@@;fz$tKNyOWW)7^5V} zJYGn-3Yjw8WVq#=u09`1;>ZmdOote0p@?C~(K4uOMMg~x!Kbg@Da<3kv z8!sad=$u^zCV}71F>aWC&RpD(as4z4@hHZP)1xiK^B6ZwKV>1lmT|-M3QO^R z#tqXsti)$CZkWErN_;BghUr$;;-QQircbaIui)LltH=WCA~J#v6txkb&bVRvS{v~a zF0c$Ux8wVZ)3t5IYZy07kGB^WpT6H#yb{I^uoJI>u^-vN)YLO>n10`0T#j+W^ndo^ z%Nf^gU*;fg%*^y)#rE6I;$=*rty$nrzDNfcrceLxCT^+)x?Y;eL4ie|(E$DWddD*i z@LRRbm>8z}x{GTvODM8Vf5XQiF@2G{xIEK>4bu<1i|a7$*f9O0ySNh5h7Hq2J;eQ( zR=t^?>LISdC^5ayLtKn$)|TnZJj9b2_e|&X6c1x;pPt|;?!~xp`f5+{Y{v7`#l6JU z`CF!S2xu}em@_aaFgX65I^EYxT%U2`^ja_REXFI-UwDZJG2WYQ?JeHO{P=Ij^!?uA z;-IpiK!8JqdD6d*>HodORj`>M#q7AVce}rj_d zI3gjp{g|KlK}M!8bEkI(h#N5dnLB-VfcSaF=hJQ5g!HEG3>4?pcy+!*z>&@I5s12Q z9&#BR{8Bd+$FoaX1O!?{xi?NPSSl;acx8IuQdzm_T0!ExjAy1>1c}QrwoKoCkzJ||dQTjB_(0)qmlCbI?TtY1cM1*rPP)9(k1i*if{t)^fVxH$b! zu(+!1QK;GokXrDmilDQaL7V?s1wKx<3lW!M{4hN-L>#oevn@p2mT}SaBO&7Pj9a&> zgoFJW9i{av)U4)9rzyoeYJ_Sz4D-573$e9=vI6-&3`zUZK@Plrl=X7MSVn|cqQ~+(YuwqCB84JE# zDMf+P6eO3dzzOPZSTiOlaDq;bk5S-M;B%BOv|>n7-~=C=z@@~vEqtepra8$A~ z@ytwMubMISfG$U1HDl@kEm#9zlEwxyqk&P0NrBU`gE32B5zq7sSA?Xe3&x4t)=vPh z#0H)7!XnVc33AOGMkQXbvAmEi)u3?GWtagz41p7rRwghi379i4U}SPoVAo|(PylVu zF5+cS;B*9?p8$3t=n68%EbxU601z@Kk;$a0d`1X1R1!hnLLL8>aynxx9c>~B{JHUrabAoR! zn!yGRCu9v?pvAA88caKw6+x;GfbX>b4oYkqpd&!0fRpSIX50PoZB}&AO$?i)0ZNy#dm?h zvj{XG;|My0;llJ|DdGx@$EQC}5nsf4fdLfpjE)zkYh{Y-O+Sz-&c}FS`uS9GdB)?@ z-=~VJG9H>PnkKH#^j~PYTblTF@ui^iQ(Rdb961z0cSVEFc~f8!I5FKMUEGfEJcBQ& zFkyED9oTqbdPBOnDr3X;W$EJ8jHou*FrJv6o++-v*s#4PQ{0b{@znI&S>kG-mJ4&X z_ytDB#_3mb#A_IvrU&MV=gD2=RRFK9QeYN1#mlY406J;}62S^g0?pI!=8CH`woGTu z6Yl|;FgZ_rA0vnnTp-TL#B`l^x?_oq6w}lr)2oWaqoQxKE3kl)JZBb&W^v@q25qkr zPyk&|DDZ?Gw%?sefklDAj442Y1vLKcxNH*YS%17tj)$*x2{?j}$%AR0u3s!JVZI8a zlv@CPEFdov=l~l>i7bH!i1UEKhn}#2&Rqr_9;A>ZuxoliX zSX{>gd}b;ODA^!f#t1qP72&c^AnO>BtYbuTnMRhtlj#8^;(AO!CQWZB5tm?muzhBU zxFaLurs=m!#Wk2Vu}wGZWiy#x;K*z}oij#GZThS-ac7phyo&78H(q5{V%#*HrCi*H z@!s^na`86CP16rnh#P>`kX48$G2WY=Qz0ILBB(xHvQpd@BBMK9t4T;5Dh)D{ZTiGk zA+hP-D#aZ*?tyAyUPkWe4P_D1|2);{B;38=I7=tbY zi@+2?1x5u1M^?}|5@3$NB@P8f#|I44@Z`{Xj!pc}8@R)b{0|#bfnXPP~ z^DitEnLv~d=vW3vhinCAP;_jau2(A_%=mb^{tG$9>5FT{?HIRCzfdP`FfW>30J7t3dXI|57djpxT@228pO9Swr+peAYR1Cwq2Cln2~Y%!x~26 z=?#9Y0)~!Y=fF#4WBJhkC7Je9zHFfOt-$H&b2`QCkW_=t{@o#y<+$$n^!xwCMP)%c z!79J7!wiHeS$b%?LYH_d-OOLoAv3_X9Yk<25n(dV&&by$g>W7gv}17EF~5N(A^Z%LFX

aasPQ5CaY$Xa*ZFU8+~y zp7H$j!gu2O(>r^`g&8kQpW7=gZhebKffoUYd=Zl#BaH&8IIkOiq>c5IN#5_kxY zQIO0Hxh%)oho(>K6PIUvH+@^5xV|4LT0b7@0{LelGjb3k+jeQXald#l*ZW-!0*=g% z3<8&@Pv{r7umpJ%)D}`=QD9Y20C|i>K|$asALOV7W>6Cc8uD8}2c$DP-rfrhIK~O$ zvbqo7wFn4Y0ms4?W+hhMRg63{$Tg zgRNP@qQuI(n2~28*qRkAFxQ><*$C4XGf`a5V9_5KAFS;NixR6V?|hiP6L5X|j)SZh zxH^5+L~%Ke#aqCEa&`Iz5bySqMo?T_o&F8Po4ErjCo@T0PVdNtCa@gX^eL<$(`PaA z%mKS<1}n@}uU54SII;*_ot`mCT#n=RBd7^|AQS$shRA_UI080dIwQ|aun8yNCQQBC zF5t*2aCQ1EkO`~6fgx~p+GKG#js>6y0ZFNWcumKeAy)ZJ7MIiO|JVZ71~z;O8_0Q+ z7RyJ}e~^Xh z6*xKlVyc7`Gl#&8>2Icp%P^jv&N&sFTRf(UTQT089-tv1DT10cKFDP`EnJhk=>xHjE$y0{!0sGt$JHa!=_29+-Y*QQSdv0oo+77(~Led}~_1-9*5ngs-| zPrnUf|J>LBVl&PVkARdwX3VSt=Ru{1_OwspEYlCo5En-a#|d(XaQrkw+?4S&sFcx~ z9yC*2gz?7o^qJyn>{I4U6A-v5Fr8wIir(c^RuEl!~ zyiAl?gK2`Cqr`OHx#F^NP$9dS%7E_FO12eNX}bj5k%%8aL{yUr8$1RWDHPuvyc{rB_5 z#izfWC(gt4kZ1b)`Qp+{Yo|?@m@lrwbpOzFxB23_*c2I0KRaKXm+}7e+w;XEpbFJF zKm|Rsz)i3*sAjT#0u|y+)8EV&mjYYHcxn2@1>$vlkhTK2dI2Sn*oERwj2EVVd?&8T zdO|Ks;Pmu56A7*9D_4pOpjG^!#13-kB5_3t)Y^pCjA;reypJ549=J$cmg&jC=>;Hq zGRO3QQjMa(X~y&eR2lw|$#QIjR^C4si97K>0Uy!h_;eY# zcsn=UYq7YT*$a3X4NetLI6$eQo~IFD62+|UHUUQ_M^H9#WEMC#ecobmd&XPS z?=2RWQJKNT6faH9c72v|bY=w9&kOMWd-myWNA(9Gg0>`KSS|zT{czU|p zYH=$h50-))CI*g*lGP|)hsk_8G=1f2aSy(Suo}sgw}_`?y1_GXrRf)rh_iqW^8;sp z1_d^OlhcjXh^z8};{np3gXTEn=`+@d^D|zUe(Hg^EF#O`*E3U?N0o3fj2`Z%}r_XsL&MCwTTATxJfjWZJIX;;^-E^I}Jmc-@q3gth*`{~5 z2?#uzzHXhkoYvPa5F3)+I6>L1oTn0W>fj4bXdZB6bcCw^woY7*4WfR!)Ov9l4iMXs zLE!OpyY=ES0U%W%9@wlUTp+WGc}hWMt>A*01=hhR@EELS3s_ANPYFoP4!9byIgA32 zr>|NsuE=h(s<_te^4l^z|FX{ggq80i69s z&6s|OIv$xbO+ZnG$pKU*IQ|d?4S4OJuCPg5jPdkz^G)J0pmf0s8fq@&WmI5xd?5-> z7t1$^n_`jqbZGjAP2%^MHcpy;VY7G)S7@#Bn4Gj>f+*&(jabmr9dsXN5e89|Ga{_PY`VtR3YdeSa&AI6*0 z*Y6TnlQ|C#@AdQE1i@P(PpU$;M+@I<1`RUPn z#G4pzZojuj+?bK+wsQdq%cJ{_9wxL@2} zeUYN5qX8PfmY$L|lsT{PZ{@3H|BXN5Q=~`=jEbjJKwT z9u+s>l*o2u^;Teal$hRkR9w3L4dfgk1!hoDtH2@v<$EfzNGozVf?Kz5Keq@dutJ~zJouE!TsLg;>6@$8vj%%lNf_f7R zr$0O{E-Vf1wV^l=R5r?GIo>)r{q-?$zmog7xEj;6L(`3qi_c`dIsNQ$adj?)w(0+l zi#srVIyBwkg!loK#YegX9C<)vVDLlkKnGHTN6(lYcgSQ3JYk<6mmwj+wCTunyOZJz z8Lv!#a8g{)>@{dO6Lh{Iv*QABP|J)}0Frjsh%2#x#X&=KH{?K-I4BO}vK$!&E==z` zCoVBP=9D<6+7yt@>?k&a=E51FgNn?KKR}jGXXKWUWSa+4%QD@tM_hRN-c#bbrrqm7 zO%PCj6P#bBh=NM4be>F5YWg7pE4fb11Pwhp?pg!BitF5T`P1Su{L3I++{NEPJyd~n z)4f1~&%sx1IL-m}c{l~mO)otyZi%HuW<34k4U|FpO!Be+SX%PO1Y_;TX(=riKT zRWc}AC&*?wPC7h&0(KeLu*jV=;?a!fK_fbP)6>q1i!ff8UVTGRHFm_Tya>DA}Ng&4tM4Rxf}^!#h$tjcnrq6B0DdNKpG^qwE;5O5Tm z?r$L>Zh$Q*g4CP=UFgPy5@(X;Kq8^pn(j3=jmKQEpD z4L45NEXV&7J3)5FpAnZuX}Yq4TkD|kJ2QRy;R}!w1DZg=?Mjdo#8mO^$`{4e8JQRC zYo1)aWN#Xu|S3Zjnr>IqEs@=5nGhPN)G1fJ*UJaWELLm z5^zLy4yc?5<+BIV?Qe)nGVMGF9;(}3ctc#Bk?qs^1_4J?NFegw692}w_jwCwJ2N;` zD{hN_V|x31dchs>EiC5+m>j0-rAcUWegOB<*adD)kGU(Z?)(etUnLd=P*e9S8?31d zDm`4mL#NE(o*g8+!&`5llf*$KGN|!=?5?;xW8?H^cg0PZPS2Pwb5Gol>4L!YzWd@5 zo(K(~rU0n7`*&)mfFmbTn1duh-9=W%1E3~wBL`^Yaf2+VkWv8Ed<*2V91qNye)gWY ztOTTQhN2nLEM>Ycu8AH2miJNGs33bl4tRZRdi#BGc@a#fJv}ge*M0FS#*@=cABdaE z?q>&AvkhWd3ZO+&;GlxEM&TKG`vY)BKJ`Fc5tbIk9*WyC-k2WpP+Sex`fPY8E(vOV za!E3ReFUjaKyHCv*SC~;dgDWI*Z%55aRtU3)4x3wH-a@`w}3+B=5(J&;!@y-==2P2 z30`;;)@B`8-`q#yrHtTF9hvF+kHHGdoTIcd|j=QD8S? zY7m3g01aZ08sOtQagpi!9*gtBhBdxD7MEtcF5P zlt8ENgVF=2JAGizbpNN~s*Ja%mp&E$#&~ji&NFc-#{JX%KZ>g`UYNe-nYcFN`RNNj zikmQ=pU(SS9CW#!%qMV_@V-DoYkI*a@JP+x=i(ZSH>a=rBrZ99?ptx5>D({G1wh?0 z`?uhcn*6uodW^TH*Srw#F$Fb(L7}MxYWA@TfcqvNn{oxc2mgFTtfSc+6k{c+5brPaNzTuIc{o#o1Uvf;Xl|y$3t%ps|GL zboSTcyweL`iR-dKMwF&6c_nVncys!lSK|7Nx2N;G77t@QIi2UDxD&$rr(cureLk2y zc5lTE7;jF82En|y;yuv(1usA>-id1p>}OZx1`WG|=TJZ%Eqo_#!3Y^Yn|}2j%)g*| z4#lrrj9C1+^u4$$g1bV^G)25aqskf-^3*t-%h{&P27O-^mLBz;#Np?I_RYB2{KuZ*AKP| zIPy&I{Vpyof*9t7$-X!?z4N=cJmZ(?E5D1|u{{SVW}j~NLtL8i-Sns*;tJC>e~9xl z9X>eS>Ib-~8}$R6in@P@OR+&3Cm_}0(+~X+&jEFX41S7-%bo_e5ueB@vG8(%3jZ5& z;DQ(AoJU8dFZl`S3_+`2m{ZP7pZ@ZvxH%Rt>^j;Z;K&K`g1FuWu`B^Zdk3bs7c|7j zf>e!w@-(}^`{}j6#2pw9PT%)ST#fP2^ryeXH5iXg=ld|b#=#>3N7{))RZ zo}RwuuecrKjp?udLI$1u|B0(ixBMqA!gzDK-#_qpQ^r5>9>$Z?zy1?fgbcfY8ZHb9 zpbV|@A3V^R!y_Tf`vaWKnH?vHfd)J${znNBP?2}&gY|BIV4ewb=1p~HA{ zYLbL?z=NOD1O(pmfFgMcD7u*;(|Z%-%$P1ng921M%kc$hDgn6-%MAAZ4>6GUFGwTD zC-ZbeCJEtbj1odfZ8K)5IrENAS7VfrM{4fEWX?>VUau%2Iz5k30=ZEKlRkBLdLN^N zHXFpT)AutYbdOGwLrXM%gVL7~zC3KdWR0d7Y+ zc5IxUz%0=QN>vUg#KouoXO`H*xPAH_770bBdHvJxvq&hYe*51dAn+ZWg}w-Yx>@!- zj-Vp>h5)39VRQWUzh$}#tAr!t`RTC>#r3(sv#6l@>DKi5tP)~KMH46rZy?QXoMV*$ zO%8!awnf<_&_}jGH9llYTWq>Nn*=B0m+6sg5}Go*p+kls<3LFUJYMVwo!gklCLs?R zV1VXlcy|HP;}xI&l1<_|1Z<4mBA%mh%@=LPkVB&M(Dl3)cLcz#V>l4aTl)$7bcY+_5=_S-jqT~R+!EPtpz0qq1~x&=jA?~rn%L_Y;HkOw*Ro{)s{ z^wUBTf{Zt(-xZPoO|5+wl32xfbNV7-3Crp7A`+a_p9o6`Gv1v3Ls&wc@%D5D5eYfQ zozv|_BpexUPHz-}sX8noVa#}Y`gaitc~D$%LB=pauJ|Ag>P=495|xl=+&kS%R6-t7 z`R0mBsIr5yuj7x|(`SlG_#*jm0=U3J`0&4|gqS4UhoI%Dp#BxJBgpJC)2Hi+Nk}kF zKRDe(Oaip_AVW;TS78!Z6=<*qX@Cxjoen3J(o@w{N z=?^8qE?|_D*u!{o`aVes3C8o&uS-hUNg!q-!2MEiI%gNS#WQ_ku!Qh*2Pp|p_J^SE z-Qw@lCrC-CvTX)&c1_Sxh6SpIyqJU$aBcp`8J1ziMX9hA7$a5MXFJ6Ged!3Ag zF4LXa(+|i=hzW9l#`D09F;E%?-R*TnM#7PC>2xt!31h~_>0YuD3XDspXM=dX)91@d zCm*8Q%IlWe1LW^xVXcACx`u;Ep@#zI& z5-bsjW*iG-p$xc^S|F3-ZBY)GXC3uMU@xW_m_JWJr=^oa@*zD(T* zr$11T5QUY^U|Xj%DoRK)O+7eWMiHDAZ4@Pt7hi#DYEaZbDss>S*Fr@JNydlMcPmPy zGTxl7s3ajN0bjlV?Q%jKw|3fenMeug>F<;z*cfk4FHw>(M4C7Rm98sfLB%(-BiM;N zu)$uC49LpWQxW<>g#^^^FzJ_25>lXMON^?7EYq54)2me_6vZ3aL5&PhvI19`pvnoXc(ba6H{;Fe|5YV4 zKzcOQBvhC_9+>X0CNTxFvIMdyLRMYE1hTY5XL_Bwgc9S0=?m2*#F>Lh6D@a#p(FhlB}IJy;eg46n8ilo2;G+QXmapE5eL>Ny$sFInwN);W(HwX?Wa& z>l)Ak75K=}^gs;>3C2s)yEG+=AqzhAr|W8gD`ZD4330}g(<8McWW->L%MO6jJEWWd z4eGt`6*rl_UrPcsWsT_1{?n4sWxP3EM_WP?v^1eGQbJ_qzi3UYK65BOwVLQ&Ity{-DWN@PI$YNFr#?`QP#B&vj55 z3LrfonU4p+Lx$6hbtO_XL9H0@_z$@C2VGn@K~9OojA;R4^&Dut^?sgQvx{ryn+wkV2a92Sp-iO$>tq zhrq4rUyLMFrGKz1Fl#dJ5Dx(7U{H2vb_9*p=CXSeC#gV(nv90I6|aY4EugCr<<2R!Hro<{&>9z<;pN*n~Vup$k9z?8!msZL*UUR+@M8B2*) z$jIIWaK#6T+juJpbH?-2r&~#wL00t4X1p*x%UVK9Um{zH!;#e&?EM4a<%Fz`j0)@m z;4lWyyF=0tv*Q98P*DL}*5hqG;mi#1_!MZ@!D$E+*3kGM2b#r%)wG~J5O6y{gXD9k zC)-G9G96kpz0XF%45`uu`3R(7^YQ67Y$SY{jvt$@ZYu#=Y3E@pp@S4rAVt_C3M2!G zC{YDaS_NlUj1m^43KD6;(_=LxIHtd|l@Noa@p_OxZnUU`=;(y5OMo^KAPY!9%E3$Q z1W==rfEF&as01koN9FX()5JxmueFm97DTLfXVzz&AS>{G`b9g5G^W>&rd!%eC@|f= zJU!Z80<_v`9*DXy{h>X$!ux42p~852x}1ZA3FEEl!44AYl90h>cu58dX;6c4?X>BW z9VFcA;Vm0@+X~bzWEB8+Z@`HIr25^l4%7f(0=NAdz>Vhyu`B_^6bxvqAEz192T*Mb zs_9t4!zTie$skbs7&O4*2(E`gjhjeEi4dk|ho-N0lrU#{b!hqvM+p_AmMC~k0px;{ z)21srNl5a6x@ZcJ^@i*M4bwfGBs7&F!x6|{1Qj|HK-C~P4TJ2N;v^Bm^lkt2S56Y< zOuzSorsTz^TRDSU4(psH92qZ6|KKbk!+2{tuZu(iC$} z2UUj$x4&?e_{qevWlAS#&F{kL2R$StL01$*);$OafDJSTcSQxhuuoU;l#raB=qX{q zcxCz|Pl!t^KN%*i{V8{}9HQnD^LUQ_UF9}Y@x6_Y$Nwgw_l{oH$x?`LI3qdxB zPoLloRy507LXl~1_w>Eq5-v>ZS59a4k!S>MLu&Jp2xdf7mkbJA0_Ue&-WS)JF6ajy zqEYjckY~Ix-OUeNJ?8sKNKW79C&4}a)GKlB>GS-+O>mGX=*G(@7kcff$ z=VE|_7~`AiF9IY=nHJBO9v>(n#rSr5RiMNN##_^~gCyjvK#e@`>IfDER)Gdi$YA6G zaU~Y;7!NaOJLU;7(6|j~98V0iG!>k{U0EEa2mTiqWxO!`VUUEfBxr~WTy26IAuY1t zsVZj22Qt$a{udXw18rOY&9wax1221JPyj7g`2m@2Lr#7gOdG_(_eg`sI}}(2-Z3k% z_)f16mQa-hwbVgpMMH+l=fJEuAv@jCSwdVA+)732U??yEkWf5D8rz3+(0rQ z->o~|A>hb@=oU*Ld;wE(1~i?)1yX`M_W_fAacuhTFbR34FYMFTDM(09{~jhG$ar)* zXSjp`)ANJVt;4}R)#z{uEw;lTgE&A2iL*AyWC^^RzC2t)R~k{=fy_j4?)3NJ5~6b8 z@(pylJ9zyys9O9APEr>nl~{OLrg!p57;}II#z6gm>9G+K*^D=*pNx>O7eVxQKs`;6 zcOm^;dB%s+8y<>_N}`4dxO@bu`gCY|RwTI7*%c{~3u{?vM}d1F)=?7jkd^_sJX8Sn zf9gymbf-^^k`QFPF@1RyI9FeYl8~G(A1%Q-{i_amlno>R8fA+H7n4(>B^=P3Y2aA| z&@#{0(GoI@Gp2LINXQ87g=Py-BKaVu#4hl7x><~bBIB*;5it@n_Mp59YK*>+He>n# zo+|)XBcNFkRsoP+Q0ct^x)o>xXc!(e%??TrAHXe<%`p<7?LePmB&-;3PS=e^$pWAO zg$^}BVtSA> zKpFDkbe}j0Iq}Cl3e36;SEM0Z-5|N(!Ov;a8{;H&K$B={rKn-9rS zR>uZVByW(*5_m9OF+oC->F$x~TJaLf%(wovPftvckdS=G$4h9+t^uj!#p{4QN2c2(NGLI%BkX`{|JtWd zO8}QLpf~_^mlF~tbf*7LklGv$eO97GFeB5}hUxEmY}HrzKo;oo3QowbF>vqV&5!9DQzQ(P z;3EX^hA4C=1T69{O=n4!C}abzrD1ZIe%?w#d-}3e3G{XDXq#+g_&^C4v~U2rYWsI8 z(&z%HYJjwAz`08|4V*@E(j-C|Z%jXw2F_ov)4)Sp2aP3^rmLq**fZT0nEoaWJciJi zF44-g{m68V3<((-M5Hr7hWU0#DuDOHIUYJP-62ClhVjAl*bE6trejB@S7b=UL$Wrk zVmUwkX@-Oa z^CdJGPfkCPFQLNp^}zIZ`4YxVO$Vnd7fARAc7mEW;2q4+HaVgN23^!W4Rqh*QuvZF zkn=$;FpSi}r~q~?XhFQx^t%NTf{dr9zb`-;L5As_KWVycp#=JhBao@j4ooj7l!%a8 z09xbIBnV4+;PM#~Xwx4RN*rQ*I(=!8L=4lK_0yS)CA1jdOxG)xsA60{ePyx44yN^N z(-*b~sW3jA-ccg49*G-QDzO&CRlLD0%lL4*cA3Nl#tqZ|lu4K|Zk%phE)mE0W_tW2 z2{RFJPD)9V`~I9Xab1U^r{&?KSE*fxDbgM=#M z!s*$S5(WC`@9B~z!BN&^egG2i&$nV%<@fxy?*HCS|PQb<+_-wp^%f{!(HaD)y8)OYkkkn|KEjxhLEx6i^o4a2=E9gM`Ofr>GbOsF^G%WnfVjh!6J9h- zUph%*0iqD<1s6ihtsJw#)-bkBUpEgL(qF82W%5 z)E_`WEyvh8ZJNYu+!lSrXVJ&$1ydx}O^=x&;RCe|6lA}_LB`y|(GT;@T(EDj1lk{* zf%b3v#+efQOpKGKADk`W!+3Z)-y8{f#`fuYb0k6-yQVkKk zNF*~dO*lQ>ex8IO+v?LD0*;K+^&~mPrpF$WD48~2LYnCz_w<9OgcPUS&6j9ooHqTy ze2K-3)262^keJDMZu-9k5=E*jxfD4SxE$rpn0lB&7Z`w#@ZoZt!JMVQt-$5j!JH-V zpL6=ug%XO4P1Cn8lrU#(n*M&FgryevtQgQ-H<#lImMjHs$RSFggBTRJ98Z8xzT

  • MG}gPXQuZrlBj1~Hl2U5ge21@uIalMNytohTPz_VcvcX!UzC?Y zfy>bXWZc>5`HLm|`OgYUDR4P{V9ZkDQ{WLeJN@ipiNlOd(~K6nF%VPUl-HA*l)3djS?-!3q)ws{>uQj&R=wX3#+<0ZS#6q|ON{aOpEn zU{m7e>GK}x0uUsl&4z-L0;wG?VAD2ov#&F$MNvJSwzdb!?m4p$~h1=5?tddyAcwxH#YKc-=h<8~W6!;zG3mv)K zc^N@&1>Lj^3jT+yCBztaP5-f4LW%L*bm=t`k}TkGv{)nI#JFsF^%{vRLFm39XgD$p zyqW%OjYKGK3!egu0+$BU0~UePoYT*3l#rQTvQ|Qnv1@wUS_vb@+0%Efm5@=B$aZA& zb-0FZ-^GXbrLd4NLnDy1ZfdafFxef1>5hY=d6AC1m_A!$lOqF$Zb_E3i4T7dkR!^RaL+ zFo-d7JDp{4Is;1FJ6J$z@d`+bw4w-)C^y(3B$?^e>m?L)t_p&djDjvcWnf|iseu>` zF_tl#k42P`ksWI6^yBL#RJqoGuGw+r1tr*T>m>}Nnn3${9xg|KBka^Ql_Bw;UfWOajO z2!4?H3&439e~>dXFbIMJ&FL&CU?E!IK`wxZ3^W=3>8+b2gc+|+pSejwUf~Rbq6j>^ znVFdQ7(f~oMR<5Y@!@oK`i)Hz%F57SWOO>qfE-1jnCE6>WB@s1y4+?78AyD93Pvu+ z1FTt~s9*pQ90J#-hisN`V_Y+R_GSr56|}%#!3qw1E|Aw(ux2T7DsT#Ho_=F9B=A8+ zH&g{U@FB)S68PTfAzQ$KU$sRdo$>ATM_VMagj@I&m_hmL0Gq&RPLTBz*rp#`Bq6fB zc&mgCBh!xy(-&@&m?*u44|JL;I7PrC7Gmnr>Au?$g)1m@L8qVbz)F9x0JQK2rJG&T z`?pJIa3Ko7=?Aw8@-ucw=m_2f7v&FFl(>08na=6#_N6-{SQr_XOkcHALZ7LXd;9I35&?{$ z`>l6Nn`~ z)4lgg7&G3SUcX=B3FGGJDF-C>n%sWTA>hadD}6!37%ZUE*95je7kYtC&tlPF$}nT9 zP+$Qa*Xa2A#q`jF5++Q)UQF*kC}GGn_vQ9|2PM)O88=T?J1j9z@>gf4fTI+uiJ*hy zwoJc!SVD?vTG#ZShb2symUeB|KO&I?(l+U+L@m>d6VnBcNr)-nws^}4lqSXu`9==V zCQfz*MolILb7le1{>>}}c2M7uU4czt<8+bZ68?-Yr>7j3&}ZzNKIOQCh1fkE3ko1zf)o0 z76$DeU{qjoJjdX8=X(?QK+|_D)BE>Js89dkBd$H2@1#U4tJHK}aGzL(fm@IP ze6|~d1`~?{Q49e?v>E44uRSA?%y?}2!!r_jj0>j+oRyGaTrfTNtb_&Q!tKk> zN=#*BI`?3@?Rg0krr!^?7oV3fV`RFqeERAO68ELoFKh+-LIh$oX!u>l@!zy20f7nA zmtK^}XPmoT^pb=y_@G>^s}ki*j0>kPx-MbHICuKZ>k?{=3#Q$WFk)Od-S~z?A=93X z)0f?lFyPy>5p;gG1`}v5;_$WUPi{!WG0vavd{ZJ$cs`#32PiNhr(G(rIIey;ealS= zO~zf*@7XwAPD9L-A{bSsU%4%D8?@(o;~k0jj4IO?-jyg|RGBVxPePedWx5%Jin=FZ z!l*L6|DJ@Hgi5v<69ecj9R^2rD+Ue)1_eIQu(HbZz4s&n>RVZ1=eRm#DY2paP z3P_3=6xcMGLB~WYFgP+;GYTj$I5Ju>@F*~V90+nHIOZ65n7KhmT8m^UvVu-e%?4i# z3(8FvSxTVRAP3075Rb5cePO`l0P;GA5(|i8QDXPzWdJR;V0Lf>8N^toz?$XAk)_0; zz^cIEEv>|=!0ro*W)=ljf6z@LYn?>ulz5yJI+i;!xGQof@GPG``+-C}8!O0XtkWJ! zNGpPJ6KJ0@g95vwNS-1qs1#vvWVB`!0HqH`1=i^{4<&r0*fg09%$Yb87(fY%3*sG+ z%N??(FMcRdqc|UQSqG>DQD6e4R7VB@u!uu8IEOL|{GIObNa8uun`PTIAA`#O8PoNi zNJufxnC|&R;s_(0r}M2vkK^F=Lw8iAn!9MF@htGS%Gma=x#-3 z15o22Brwk1?)*vO0262$nf0s0J4U9KdDFjslW1XFxV`GTL?kEUw5?Ypg&7&A zO@F~C`Gj%W^s`Ko4;bH0U&k!z%s74fCuT`GM#dT21z9DV85w78pUNi5&BQo!`a%v# zDaNVvqpvXWOBcTE?TlWb$$HGP(xq!#0@=||-xKQR8AzD-^-ma%QR zh=ODe>CH-#b}W-Q6?v!YDodJA z|Dzv0To-Y8bILZ^ad446Pfj(gBDmohc(c~vC?7&lIjQsXK9hEH5IbBgrQiAt( zf2V+>qT`PKP62_L+uhV8r5V|ef+sy2raSHtl$hS2E(yAKWvRMkIOFT-f7K;DI665L znDrT7Fi!7x6B3{9GewkRdX9#q6~~sDO`rzS;_2%&B(20JvMaJFuz*T#MRrGaMP|pT z_nHM1StbW68BI6Tl+>)>dI-el5$AT~QDk;J2;qrwI|?f@J8r(#AfU*?Bg*Y4q{!^J zPw#|ts^*AK9TJlu{_ip-8*XMyFoAsR2;0y~?F+fh)F+41>j zFpm@B&~43NeH;+Et9QV1>=5g>t^}LH2JythS#Dr1E4QPJBD3TBFJO%<5MLaDXk>;c z-2mY+L5w-K5Uh_8V#Unv>GQNC?HR93zo#XsAa;)vbX}&85;J(?CivzK4}qTPBHEH# zY!7;-2?*Ss?xHQJ#PphLdWN=SIExd5K=bs&+LDfpcc-)JNY*k=ncl1;xmIK%yCOTd zHiJZ<0-Iz1-03d5lIDz8rq}37+A_YGzVNHC$nirKa%uVtLrFtv@EIVWz50Kt0vW#1&uP~9cwB80j^EpR}%as?ja*7LdPBSOyKy_g=rV<5C&=u$m zW=sVNTr3XI>Rmg;}LSK z2kkNCR1g$6Jl)hxQkU`2^kg$hBk99nJLfQhECsD!1zXAnvQ%jL1~W++aUpP5AJh#~ zU3xrULMeatUL;Qjv`r#eA7kEB|&H7d7De>2~THNVggm`oQ@k9 zvlN&VcmQrt*hFi4ihJ0q&bY z4#@@`WX~pWms^2Bfm4(D#`N2klEL*o%&?R79j7p7fzHT>9(dieVmzZfIaJ zW1IzI{r}Hw%{YTuffIUMdjm)ibkI1j0;gjGGwdM!CCp09yrA>%IW?FTFq<*004du5 zKG<6YbQC*tmcSy=5#!E`TRV0po+| zKGu@5jI*X^Sxc^F?4AB#mWar7EgQ*LMwRIuHj=%J-P6TvB|(?kM%zj{F?LO#XDeyN z*gO5Et)wYq?{t1UNlnJ4>6Uhq!Hiwgd+j7`7<;E5ER+$q8yq;jpQeahJ z1J@G&IThHZdxc2KO*ajcJjA_D2-K?kz?7xJF#Th`P%z`l>5hDIQq$uLgp?R(Oke0E z3ECmUH&qa1*F!!zvFUMB1*Moa3QfQ7A#|Q`<8(F`Nn^&n(>MCc$s%_!LG5dYYz0<< ze{9opT_m3|exDxVDyh%7etL_mqyppj=}TQD)#Sblf|{R93fv033Y-cY0vm)BK*!&6 zII@;y32d1D(pA!dv2(hXo1`)0x#@9klDdq0r}wx?$}oPMzS2$71?1uPZjyS6E4jdj zH8Yqo$$+{uW=s+a%#I46_7V8-=Kq}2?c61GnZERH&vlpVViMdW1hN-;PPikVz^3gS zUXsm>TGyGFIO-Wemre*|D{+7tqo5Q5I>H0g=LF?8HqbfBTndcSvkJr&<=G1rSrmAj zEIXEiOC1&kwk#b61|?2c1yq2K`b5r?9H<8Z)(0{QB*x(=Fnz*reo@B%)8~3ig3`oCZ^>-N8Pg92Nm?*APX84oX#?tf zvg$KdObeD2nf}&CQl9bWbY5RbB3vIVS0{)COwFNI3lGW}zlE*K`M z&Dc2IHcV2P@z(UjFiCZ$2^*((g-I@Ayfs}nTvF2r$uMS+X7o-y5}y$wKfNPdQkMPf z#7@xY#&o*~Nf(e!EfJDUoO?vM_j56^)-z9E7$Pnrz~IQJ$fm#wN;eEj>;gx(8%IhO zF*3dUIDL7Pq$$(HPuuTBNmek*ZRZ5txT(RkfYFR;2k7GY9gJCy1_D)}1Szn6dP$7r z0mdEEO=BgE8Fx(2jFo)DwtOLIglKzooTNA- zpe{KJcmP0&-I1k80W|EjfNlE29YSLDHqg3;)rrBJ$pqqV4KpSK2ulKVM+__I1{XgC z@R}NHMh^v6Q0?RbQlQJA1ir+@3&K$a-}&MT;i!N)J`j#FnB%R$3Oe;4)DLtt0M}5g z%=(NW(=Un&iqr>~F-0gaIx>OkR#xz}4NQ(RK!JCI0kk$`3a0{t0;?u-0_c7RMo_|g z!2qhuSsfwrO`u~GST&hJ_dkGSKQLseFmSH}U5&#GDy0rEXDe~4Feq?>vN3ppH)vo$ z;5(?q;?!qknO>hPDWb;(s{0reLHAW~fXYM`fhKU>CW3sM2I#&ET?PR~-suOEC55%P zp|`}SV5(72kqAsZ$DGR?xAIRX^H8cYmItSt4QTSvk`O+0Wwa0#4bQxs6(0W~)i1kIT%K&=7L zAb|pt0;u}_QO^LntmGe?z!I>>S{Rl1K|Lu2eozCCg~5Vgy|>y|$%ld5Ia*43GfmHXTqi6Le<} zoC-qb%o`XLcz78VI274IKGNU71RDH> zk0)`0Dw-{zs+1i(uC#&)lt4g>*c?|dWeF^s?wBemov;IBJE-x9xP^xk)J9NXRA3bV zUp>O9$$Wqjls3Wl@qn5UpoR}9$RU^az(f2DqY^tf#Mwb14pObbbYl9RR7t7&3*gWO zTcf}z@Cei{V|>5}UY5xPD#Lt zK|!Eafz9zhW1*vrn{#~)0f1o~q7sWeG5KTzWm(x^0J+QAHI zEP~2CM@B^^CI^r^S1>CHI{p9u|37~OIE4zBGfx01oB_T^X99B;=nf+((3Lj}n5XOa z3dv{9U{xK4blc?a9c+JHA5-15UD8R1b;Z$I=VgTKb1oHj~CNrihpcKt)#ss=6$?*Xb zsHEg{yug$t&{NmtOV_QjG$OiS3NU$`J7G5u<>q-8y* z=mcd!@R+>(#uL;Z0}Y&m`rq%TKPi>eXM8tZq)bwo@xyeBGD&^L zOVcyTz@35CGD#gBl*{orpgl;CXF!!6q;lj?;Dp|hcehMZj&a}gKV_1djF+aXl}qX{ z&0?D#xQ}0adUm;_GGo{D{&H~V>|mj!$n<;VlKqT_x93$z-eY8XaB{j|m82Qdr<2p$ zsw9n=x=&5tRV8_W@!0n6YRTn{OueV3o7760GR;3VJ-b%Yh-uxa>GNwPCo#3~OgFBR zG-mrdqd`Dm*7Te@Njc^Qp5@aYJP=WyzM)Psifu~+XfkTMK)qxFBjcOtg$QKQ~F5GHw4kU8`C0gZnE97M6OJI;L8t8f6B@2khBO>?#b< zhN==LXdZ?^iKEc59yIF6;K*Rj2pW^-P~dd4VqgOGfgsmSu}<$_$17S78g*w;0;LHh zE|69hB~A?{26xc#GZW|%Le^|PCI&_(P#l7*8c2b}qR0qJ3@nPw3hdx`V+9QnDsh0W zOkxmdU;-`4fZQRsfXR^~n~#Z+fr(pz$&oQjkp&b>AHW4Evx7oCiz8z;Xj==T0%%Jv zXkK#zlL9+<*k6gmjA;jm1DWqU0Ag~Ofo@A<2bBN{?2eEs5L6(sgRVsoBWGON$Fgdc+XDKp)()T4M(3}cNlbsFJ zszz#svvMmyZsB5u_(0$nD=1qbJe2^d96-G_gqIv&fJ!XH>=iqt2t&+Z)w3%wfwS2S zCQwK+WhsC{^ac}XzK0RCt_JRx4=8?N=T<=XiwM*!pD?|`isls-(A{_7$f{=$*u-MS z^n(c!IZWW85+;xbm_Q{6EKVAj6=2iO9UupSDmZq~=!^n8sOD5+QD6tHT?36v!yP<< z8ByUg@UWpfm|d4)5;Jc-1ITGeeHj)IA1TzCL7@&B4&rd+fld3dgKm^l;6xHo;Lv1Z zF=w6uawaF3xdOxlUE~F>!P&vXJ*)zU*c6$-X%sy2$mGaeUkK_?vnw!~F&zNuU{zoT zRUZPsS;6Jv36N(&*T=Dgm*}yB?w(TuUDUP$e6<}?N<+F+4m>f+uFG%32q`=|M zXvTB{X4Q>)&;S?2t_L8yUND0ib)Z%RyW<1qEUb3DK(PyCrY^%PW>;R&WpQB7nK69; zne_vz{{!4iR)J~k3ha&zEFeojfy!79%0`%Er397IRSRd0T$4aORNIRIKagW3!)Xn zzysF7uE~6&p2eK`1`9lV9)PsJ0BZ*=v;j@PL$rUus{I9vIr9&YZb%|-0Hw?hR!E9y zUy+8vyw<0L~n88Kd4v-A!l4g)@1@0hF=K*=G0tZMb*gSA57HDKLV>*Cv+zF6& z(26UpPP+h-y#WoM3t$h{_kvA?SpEPc|AIA32~^HL0L%AsfQnr3>M3@|17NM7J`*(b z*!3Adup-ARct{6Yo3iUNd}0On@0dVCJK&)G0kWZi4HO+ez~0cgCC0eN)?xXOZO3Ux?9 z1d8?*Y>*lmLMpI3t^nKcf+0)b4f}LIM^4%43M&M8>R}PR0ptqMwcM-%%^Zq|x_Jjk zdUK-LAWtDK;k!G;z9^V3GP)8+8YQDU5F_UK&k~mvGf3} zrHun_(2IJIoJ5u}BWUD=U6WZrkw*bkV7`E>bmRcb@+z=vGBYUhA?)H&;A;hqX}7in zfGh+x7C@a#26N^QAVUp67JdL5(}b|_2OG#715h5(WVTS`0hPuIAeBGhDm9rE6v4J> zAlt^Rz}u<-vJzq&s1AVI)&O$v1qP5I4eX%wgJfL?JIJ~Sh;;!Vzk%*LWOwXh3*QtY6_2ckTbK({CJLc}5MN_aA309{)Sa*z{4FnX3|0UHN35-f>S zwS%jAXjTAO%%@P#E3iex6%>A;Y9EwvCxE;&0pvb#1CvRh0kn*p9du)+1{0+1GK1Zb z0owB72A$`^?zn&*G~~sqz$DPf4$6Tm*pXWz;HY8OWM05-&b$F+{0??ds|%#O9-{sL zR`omB&6!Vt)L&o+h3*D0IRktj6uaXIc5sCNZ~R<9Hv-&DWK!UEVyp#)zb?Zic8nG$ zb3Iz)2UMv!-T?asw6l~=V6PC!KR4KsOB9eIU4~oiij0WV$mqx21(+>V=? zngkq=G6-lhfSOYe*g->zqTtF7bd|B=83xC1e_8|t7$;2EYmt0{F~6(WD!G|$J11!P zV|v~?$z;a6)9ubbPG8R{w|BaDyX0g>rRl5NC6_QYyxShwA$gdC@yB%0iIR%y z8@QEt9qU0G6S=@QFY^jK<5T2;6hxqN(m|tt9F9uUV<$?^Wc)Y%`$S1C#y``QCP{+! z*F{c}RA>A#y>XJH38T{VZIdLm_@418g0A}nEkR-x*f{;&BuN92AE1kjK}``*FIYfc zfx&S>>vW^ZlD2}sKr+0bg-_s7R)PQ18zxHzG2WPdd9q|Fc3J7^B~v8L8E;HqGet6! zX+i6D@u`wEj1n8Tl^DU(0FDg&plF%_8q5%Q#y9=`48gC{7fhGj#?}N{{m4Ik!Yn~U zrpYg+_s@_tVp{?dW1RkbmSE5H;F*#(d^uKz$*=!vxdxOfo>{(z>p=-&OiOZ zEJ<0UCA5%h#W@`nK-)ruXGOtYb4H}Y%34zRxFlVj+B}WBrU4}9xR!3$~T4e?C^prpcNrSi`8pHvy85||7 z7$iW0_*M*{r8*4Oj0&LB->n!JK}{lVU4~LcP<2}mGNMF@7ewhO@i{UU*DLWmG8Tfa zTLFpbf>eXfcm;K|xOEwHc^N^|GL8(~imVC@j*?(YKoug$bVy?ZG%*j-VZ{Ji$^bIV ziUG8~fY*_+*ouJxWFWULgO(B-$owKDUPwtT1~y57&8i-v7~yQN2FDrDVRdfMx<5$J z@+pAstL74z$)mun&$t6=s8s=?2Qt#FJB;OWU0@Xr^EsZ z5H%$>HxR9&!~~*pl$hPX{YVB!X^53f){LMbXVA6iprkSV!vab1dTvJ<(7nX23ZOIw z%3lmjbqd^$PZ&U=EDoR`(PT0)XZ`_l18De~2a?c1GhLv#5tzabvaN#&aT~k>xb6jw zaVu~;g0z6tfrfM#1lmAr%RozBph2lw&lCfSJ8&0F!RZtOw>fAaQh{5ONd&U$Qyk)U zNN|C!Cdzf4k!@$~dr%OtfmTe!gU=%8irpmAST&=@b+ zJ)mV%vzr7Q1;8_o0w1TpTPAr_S9kB9B90`}E_>B_kQTrYo+Hj1=$Y z2So~l0^?c*wzWYxod-{u2l3|Q*rrWQU)MH#XJr6`}m_B#4q&nmJ>56M5<)%MgEx8@0VDxoov7w=+h}H2=V-2?=`}CvRCDo?C+b$`=65z;PGM#^iC z4rsyvwSkqmKSP`h!v+)l}SMy9EEr}yuYG-Wz@cRI%bNz?7WcS%lQWV(N6 zdhZ@dQ>MvaQM2j4_DJ$GUYpLfS29QT&d+I}4Go}rtP|X+t^g0+v4iGUw@jbBS5jU0 zC^x9_)4_-&vHk2`NoCNMijVswS2CTxJAKZ6NhvwlmI}~-A-e|C1ST`41)y%<0;Vj- zTX&~l*)M6t)Nl{vGAEuXpcb7$7HHcI+w|QBB%@*c?K=)g@-uS3Wn^RmRbdYpvZv=a z%c|BZF*wRX%2i=^UPc8bT?QdVW>B&P^?(%_AVC8nK~e&ajM;qb%uK9|j0_Acpk=X4 zrc4~>%n=Gq;3|w!U;@7)Gbp_S% ztziMJhW-s*WCs=pE$?^46bB8xJ^;-lD=<$#c2F{nZ@sX<24Rq+85P(BE>5>PBx%aH zb$Y=e$%BkjryCuX)DVIkG7UaM0kjc^Rbc=0oWqil(i?>pV2iRBFe)%I)q_s{1+7LA zI5_>mVaXYcho)B^k+iZn%&q_)FXk3_#SAKQSrow+tut^dunAn?038JdUctt$z$$Q* zU4apFl*_3ZCK=1Pb^4}bl2b*ufy`n6ZL|gLAz=oMQwnUG9&=n$ zQf@n_(aa25JJrDqDm6Jk2QDxuK-6rXKKZz$GUJZv+ac8LPVdQfEdO#gX8 zQigHwbg`3?+KhXrJDrqVA-M{&-4JxvKPaJrmNPIrI%Es%n9h7kawg-{>2ps>sw2nR z2Zk&KHi7-qubq-?VmvrK^0cG{(}fe$C!dxyV!C`{`tj3}L5zo{OP!IlVmv%O^o*n% z7IPwb^fj6FjN^r+R|68YrpO;i-+Hh%l&3Q>#rjE#diD4|F(`zqCaxlK0Zm^QYn2}@p zt_zZCjO^1NUXYYxn$bV~?*&OMqc`kIETHoiSwYdxV8%29bi>yS<}AmB2U|f$QKmsx zO7aSU{H4GoFm-z5MM)XS6QFaUxdgCC_g$2fV|*}u?M2Br#%a@eE=d|OPM>anNz$Bg z#`N+_l8%fsr|-NZ>A^IMfBJ^iERaJ!FH1^kfNw8i5SYS`uo3Pk&{~%nELo140uR`y z3$9@iV~1V}Fuia)i|q8dmnFG{AF#tTf_6PHDliGmp1%7s*e$m%OUmlrKhgm@KM={q ztO|VK`4z|j8|X56(CQS>_QBcHRjx>a?`d$mBI(OMK?O9tr!sxk6-kAFIj9aaW10by zouQKD_`0D1-3G@Gppjq46D(N*82UPt6<8fRl&5oDl~k01-ctd0ogg&0XY)`0v6e*^ zbdIw4I+hTox%|^}*RdGOz>basDFg3RRA2?SUs(jE@K4{fj>VX13jg$v>sWM=!Ub;T z^tx-3N{kPtFT5tH$T1Iehz+km+Vr~hEK)37ApLXJv&d_-L&cRqbJmdYEM5V0>$n7_ zOxM3Isld2%dcbu_S;j-t^R7$EGp(63z5lvo5ZgRZp)N2z@usBQ^xYd+xJ3|=1U3p3 zN&M4qZD5gQdcZ#Y=LQye5G}QlMTMKgkx`KyG>N3ZG2L?`i!|fD>FaMuDltx--m#HI z-X3)HKd2suC6NW7dkhz_XF2K!AnXw4c4Saw2e}y(f8fCrP#YiQXoH)Q+KdmT$J~_E zLkb~&uv$>cnR^qQa`xPmRA78C{q9Xk6~^P!cWh$OXZ$h!#U>Vcj;WyF;SflhF0dIE zY-s54R*MI7Q5vUw}3+iA*oNEvW|y?dUBm z8lZ*CwK&+cUrpT2tw3lHOi=>m5oU3q?iy~YVC#S-sGN-<8EUULV<-JnTuNQ&S1 zTvB{`&sG+0#s||M+>umNgGDbabAZl30wwbqoS^Mw(BR;o{$MMMJk#%~(>?A=sxX5B zDE}@vRwvw*EN7fIo#URQEZ1*#P?@uV%aL)q{yj<1sYns`BsB~~m>m>EoEU4BL;}Du z;&^~7%W)N>z$qT6ouH$g__#r9xDIe-37nk1;U0?PLG4J0<3HX52fDz0aG)FBmz1a% zQDAla!3FaB7dAyvkWC7rpaFBJUC>gi0c3ascNXZhc$6&2$E^U*<)Hb!4sMWBYZOJm z5zXq@!HpaWNbcnYxmSU4`ic9Jf@Tlc6`6TJ0-%l|hrmD3dT&;6XM6@|6*Q~k4DKv} z{~$$NAcrjA&H`-%oWAiIkLdIVJ6LQ$rESs!NmZuvP18FcNcJ$!pDy-LQdtr`)5G(` z^q_~5N}xPn_E0hcv;*j&q@)rbDD;dh+H|U{?eU5rUk@Qm??w3fePi#qfYnfmMNL`j$N`_CcT`4jR+EAYsrv zml;zBKge?aEXUuViygQH(jbllkK^-#Hf(@QgrskAD~1F73ap@0IH!PU&gpJ@S)4!x zZr5HGDNWeLgRJ_DANV1rpnCBIKiG@>)6ec@0hOFOPbH-|_!Pi3Jl}M|eJloyQ>NEH zmDDtR1NIEKB@8Lv*#w$6Kye?TC<3Aa6h#$S9UlmQLY7g1S>WyTIr~_G7@McR-^ZdO z(ZZo9uD}Xf4%Z?GqRp5Z1Ql2n#HSZLlN6I^<=89mmK8K=zzQ09f|=hoJ#jyam|#12 zi#4m`4}mNNQ3Vl!j_DoyS?m})ryt+XqRF)I;B@`xl46Wgr+>UBDLh^F01G!iqSyr; zB`Y%B`~ZupaIV_offTTWvrG5xz3C!T1o_&x-i?MC` z#TSz5@-v~q4~|4oDI@?Y4n)nF7YHe^Du_(KbC5*_)DHP|kVOG>0CV_DNqKQm1re~1 zw~K%}g^mY=vK*%!Y89Bi@1>-ezzHD*R>u=US&pCXGzd7(ygGgJOGz!JhC|cuzmybX zn#w=@z#$ez?Fa0TXb0QN%c~#)nGt~1RS!UhJP^uq)D-|1(h}37UrBPpZZtdaQc`4k z-785>_BA`21soTDpWgRMQbQ4Xls8JU%O5& zCKJ5$1G_x?jiixU8;1g`qXu|_N)(h$6hOHJ1Mjaah6i1dHmD2 zABPm5w~n*ObAoI1J={th)Aip=njn?8kY)mtz-<2MUMC>Uv&<7L^2oUyRI^T>eu71o zrxz676Ts_kxTYUC!D6h9UbDgS{cPkM2`+uvr^}yYkpR_omM2+2b=|%Xl9DX!0*m;k zuRqBmt-TmU2M=idRlOT8Xgxlp-2MP+A$$-4RsGWyKT3*8aF{Xu09Wsj;d)lbA0nUy z(o?4Uf0R@Pl{lWKSd_y+HPQr8P+`*{3aS*Ju_>@ZPsHJNtap<(XI>x*TIsIAG(l8} z-Hd4lsGywzRtm2I85AIggeb5&E&yL#KV|yckCJ*^+>j+_tP0%If1P5{WP?-)(=|`C z=t@sv2UUG1L?HzPFOLGN;{nkuZUq*BDbweCLTML)nii0D(cMp==K8eHSekR7<-#Bb zv(!T>x&>mOifOv+85V6RZf-NC9iR(YLG3+=XLpD}!lLK1qzu*;K4{TBimyOz!3&?k zZNZP9C8fl48PZQ{;qn%dF3`Aa~0kV~L=~ zb>kOE*McEBeZ?G%yIx-i6=Eap*6q!KBTQfT-aI-itIVkXewgT0I_7Smy=35w) zc+HqLfGW2Q;-J{u&pv&@Mi%k<9U!3{;;6j}UcD~2;5zKA*V0T91K0@R)Z9k_5nJWJp!BPa%YB$Svz zv8Tb*ApsfoV$opIVNhfSl}XHIOfx{G8?PDD1ke!{5TCO;P5_-_;W$GA5(jU-N=oW0 z@PM{c@+feEDA24BW40nU43=m;lh`5vp|O^GZ;p6PYxS;XrF zKqK*rpgeL+0(4QLFo>_j51KGxhOF5G6^@!r42q)QLyHBo^cfhG1gset6nGq&vJ}87 zSsgD(WGRS%jxS(U5U^%E0tymQbLJBun%j!uhJ*q)s44;#9w$I<7Xat1EJY4C(5)9D zpp}WBRgvIPX>J7$kXm>Z3Ud1ci7ZDA0p#=po(FUUjU*_5(rw{)Npap6Af+!Ppo0gw z7g(e;KY)ZjfUSJb>~w%3j2F~ogq9~#3apMFq#!0w-*ACNynd#1mLj-yD4pfVQlu#0 z1oAd3XiYH-D8c6_g7(2P7lKw|C~+uqIWj8pFgrlg8#jvsco7e{3Ecp?pbj#`r6dHJ zqyg=MV*#z^QxX9!*H>V1goHe^0*fO!Oci+*`0GI-%Hjx#17T1c@GJ0wf?5Ey?j3B7 zfPx5U5`)$8n^cwpSP3iW0P7BE&|tn5!weA3Yt8sYN`V>VRmTo#P^tk3w<|BR0uOS~ zE|3PLH|Z?L_s~|ubiwh!Z9nzNuBvOp#ay#MKdM3#c6z(;0K>97K1JxC*~Ci4OrbLI^& zD?o}^L8s|EB9C-&gInRy{C+|P)Wnp5h8u$-vjPLS9qG!;G2Qnv3upcTsJnN-31l16-C*y(Pshi^yr9IX!UI1dn&N=?`zRh(sVM0ePRpjHyEb)XW3VS2rkt zhG7&yf!qKlI}||4a3W|x1k?jn0+pKzAp0jMD1wIkR)FL|T~CGS`BzvZTo-^101dJ` zE>Or4IK>35p&1px+d4Iv7?ePrPmqnAW=uOkG_x7g0g$c>P@@h&j0!se5l`~9~_mupjsP}OJ0E7@j?L_+lZkyP;Ui1n6N`(`rNB5;<+C{3O^{I=0DKZ z4`@6fv?4W*etuY3yC}lY+2t1gc^IuY${{Tqj0w`EP?N_Dg7dNv=@SXq(f)<;w zI-XFPzWcwVwxozT^8=8dM9i6QC@FxN4xqsq#|KK&8*fXB@Z3+j4<)tL*Ux1AG z0QTGqkm&-D6!k?39)4y_KR{}Jz~T+r$sC|~heS;SXl%Vf85+5;gu%|kicr!4N@X32 zS&mO1w}Vb;Pg7!;x=l(&qd^XoWaLmY8Y?)wK;0pjS*+71vPy~6BMLmQ(U4R(0c6As zIZzydF4$lb0F9hFE|ALtEzbj$EuiStU|JyO2tIfMVcQjkwK9GG(Z{vuHsLCtp|b;pxfZsAO|e6 z)H|+X1P!G@#Qw1@=Y|aLP8VU45~<%HufXcKK_0E3;BbPRObOb!&kFLJf=C1}ivsG% z?GBLE9rCDo43vx=56FYU58?q9s0U8SD=~x9Fsml>0eN%g3-X|NsRu1WaJ(Q7O~lNQ zGL{p3!hj+R$XhjvB0Qi|1|T8$0u+KTZf`=K!T_ad#~>cm(B9**o#&T{+?GKUdS?tn%U!QC*>{ufqg@Pacnt0p*fXUHpv zEC&mKa)TMu0#LeGfR-*mYsSHwd08E2AjNk5f9677Hbl-|0n)od9yRm3@-l;SKNh=h zpxONZr2he$-Jm7qP`hs+*$uK<5}frR?tTH%`vPY7beVfBTIm-+ExHRbs3i(yx)EHW zAXVKg3a~==21pa=)&|g=Cvw|?1>w;LAT7-1M1vO1% zQHu@G68U<3-HREr3LHq?iwht-xy_h1fDB`>Vps#>ii7a|2_?MxB0LlF_vKR{0UAqz{=h;jzhP6TgzWz1IOXLeAK0iAyW zUKj*RNIyWG-5(NJj=K*|f5B21!t*CyA0oK}NHH3JlOfQ3cQ-0jNL+ zrKbc)dP;$He0jm?iN}#ak;9Qekqb0x16s?;>NrCZl$r#xlt9~vlmx(C-g?IgU{Q`N zB|-3Z7|`+u$Pthok_sRpMNtJ1Ur`KHt4L;nR?RSgwK}el%u*1A3@C`1GcS;2a!`O? z@WSf2KoV5yLHo1}NJBonsQt`((B8@&k_tSalmHsl0}cO3LV6MlB+ZyEfPw-v1OyTj zHD^8m;x|Y^QUmBtEmp?^lCad!BBjL4%LWRQ1}U`E0GfAMA_Xd8*P1hTNHIA;Yrc9| z#H;{~G^~)ya{LZjYQcdhEMdJXL_~o`Z5TB`C!%fuY1#lCo`AJQpc+6ESW=+W+98=G zuoNE42+cb{n(KE+fd)&#w@Jcg1VCPJJRpVW$cjKavL~Sa1GUo+NSQNVkbYm7w! z(jA`8$SoyZziv;ffFp}Q8aRtPE>Ot=U7*Z;i-m~^a$3>?(7s(21_e-o&g94>uz~}$ znqQxh1$4R_q_GDf!Q((mOpdDlEDqovloGR}rW-Hlpk+whayWiv0B_1=cjILR9qsAP z%gn>V%B{dMeLlC8L_PRmRM4ro3M`HiS&m;nH$&=bCU9L1YF@B%D?lm}0XJSIZYBpu zMg`^)$A9~N{AVw8RbY0M^9C>gGHX6_OtX2*toE3Ptu z*71U7H5~smTv)|g=*r7zE9JPMcbb4cBZCsNBV!@hr5t5EjNFbdS}}yVK_?9|IKE_1 zV3cgof&ESIS%P z8HVtGknnR1;b=aoaKRVYgjJ@mc*SQd_!2`-n_o&pVlDVqb{-`rP}>ib*O)b!I0TLh zP1k+R=fk;4Sb@n=0d(S7BeR06#6!@By z%%A}-4$$enYznLbSElo`@GoL~H+`F+l&J;!R1s($KXgn3T5KS42<#|wP@8DF0xQ3g zD5C!bS>OyhJ@(x6C?P4(`lebTDPP95(@zUYWeY!mU)cuf%W()CpYACvr3RXLDioF~ zW}Gwqsj!rp$N|vBfd@ciObX1Pb^Qt~0`I5mi%4lOuALqsBBd3G#ULTrRa>kI;N_DH zW=s!2o9iDiLd!qs^gm`{~Y{{2-U+aPn&* z83`(z7!|;&U==4nc(K(@+&Yc=AX{V#cwMGy<#^F;T@<$_@^gw@k=w^ zZJ%BvE+xfunrHfKE`9~J2S29?2)v(ukc%I@R_=kgls?moW77pCq@J}&+g8CiMFaTX?3ECpe47wabU_L+0MW7o98O@kZ zfFkJxTbASfAJh3IrBo5i=b-wT!POL}z+(RCf;{~4GT;kwd$~a;z0P1&V&Mf{(Foez z%i!3tak>u=e;CuvAJZR7O35(JpZ-r$N|NdRkLi+9QZh`7_^1Ej;dfzNJKd3&KalbL z^m9_+q_Bq80A2k`MrGp%1SeYvz$o)TyR2V6gZ ziZ@o!vD|C|lQ_VW1|X+9{+K=8Tt-S$Y0HFZ0*dUQ9Y6xFL1U1hiqcU7beah(=*&Mh zfxXk4WTb5P9)l(pEwVuC#2Ex0Pd_6g<(Bb)U5O1`Q-C&kgR|;veuN0DBJBW87IwfA zE=&WWB{~5lKY=mJ5qf`R8q5etBZNa>>hvmEDbNbgnX*!$jK`+Ghw%91q%1*+0cs*7 zfsdfeHK@&y_yl?SG{mS^3SehCC`!pNJvlf%PEkrs0h@^zKqg*b0yns)Pf-MGdIi#S z>hN?9C8-5WH%?4nr6d*1_b(zmXBe4IbA?SY7yh*=?84&uQHyQ-l;03 z!gyf%I#nrc#@*9zsY)p_PM@COE-yY^!A_ogx}2I+9pjeibJe7h8MjVnRF~3MytS!8 zz;W-EW&zMpEr+xj(+gGwCP&ala3;qWtXTrvrU$A^MKErezDixn7O+J_UQ&1 zQmu?TrXO^WpTM|by1S;75o6EvDorUD#+TFgYf4Fjme1VLl#1g4?_XeYT)>_s&^bN8 zNj{Nr+4L1!QXz~xK?X}qmv@$zQ9OI1RlsrWyH>a(8rT$=91pN&DKI(yUc0b!XZ4+@BPdEkKX&X)%T#JhZOK)iL62L;5-0(nqC*tyF?143zf zWT8AL3>ZD+6BxHnw=9y^XNQKt^!y@uNycr{6@BC-rXMhna$wvtozYOri*fsO-C}u= zcW--wy_@4D5AyDOFL{u6_ms$ky?e7n9`4-bVqs+5F@2V~lnLXG>F3R*Oc{4fXSI+r zW85*_20|5ENLer*nZCwCN{eyF^Z*+v+38;_r1T|rb0{$BGq$iPG4qOo&N2ZtPa4<+ z7EZUel+tHBGCkW;Y7*m)>HjRHG9i@8^d2iID;WE=l@veYmg&E(q+A$xOn0!B(q}wA zy}(*ZMQl6R;4`dB%)I=d-5a3u1O#?ZUu7*Nrwx(21Cs-#Hb#LTJRp}mfeF3i zv$a$V#KlHp-@!`0z?6WF6JQkBKYfOclpf>n>BnuPoFFP?c?7r>m>fZa&x`_pryJW! zDKYM!9%d`$$hc$r3|pxz#vRj{?W8;ycTD%OlgeS-F@2|jOq$Fmzy~%7gCs71HU$c7n66+iCC|8ix&w&XFg*c8ZJpj=FJ&VHF$L@Z@QGmp z>!+Wymy(d$05xm{_+TzRkYS*6dKd-PPjirxVB9cW(m^VNamVx~2Ps{~HPbgZNChzN zp3dkfrOvo;x^A1Cps@9n;y|rFs~5OrPv770I|``dfFf z5*rV&k^~R1l5HMRYZ(7bU)C#U%D8{}onEAdP*rX?w=mfCkMKfv#w80i*e!f zm3?yNj9aEZ?30UU+%es;Uk+s7gnl`h>6zYAR*c)H|Ll@eXWTtqv0F}64wA$~kPl&F z1O>T7mI9-|kLmnAQZkJHrZ;xWf$RzJkxFLVF`ctVPMh)f^anmt?uznOZ~99qGyb0b*Iz1-amRH304XWP?bEXYz_HaGAT@(=`*f8+sR)pLfl`@L zJ3wngm^49$&M|@R%urx*oB-ZXy?a^^IKni8q|6w1PfvjGI)kJX8MjYg9RxPWC0Ht! zamVyU!Qi~g6C&l$_%zcU<$IXa0%@=VnIXq;f;wBwpuNQkj9_(}!odmcN4S)u z7);#)aMl6u%VH2XGTlEy%A0Y=^w|+o&WuN<-;V%?ieV&3aC>c}lpy1DgE%R<>1(2- z3`HPOsG`KoE3Uu<-T*GJd-}&HsU*f7)88jZ$xJVZmf~UDKD{(`iEF4 zai*;t(THOb&&1)LTfGNf3itEWf_h(am}P_%;7z5tmjuzR{s zij)@P@9E_!2p3&WkqWMdBn^V5dORjRe#wpmYN|6b+JYn4zK|3qYb^3mjpo zDMtykqJ{~QDFh(?1SKG_;RT8;pau=dMWAE^svxH?O$8^HgE3Mf(?6w3DRM7_?^|M? z{vTB8E5u5%O}9;xdJk%cq)X{AZkZmHE_H};$8^~YsZOR3i>9y0kV<3P&^Nu2iC=8` zze}QQ({(bX^q4mEP2I<@IK4Jgs+jTO^pBZRc8u?)8)QkH1y$ZzQtk>1Hi54zP~>r3 z0%h?!u7I-m9M^1`o|-M?FaLlYI>`eXQQ`z0k*<-Y06MIJ%~2&wVDs4arXP!@=j2JrFl|{py(>@Z zwZe~$(*zs^1zMPuz(){+hMK^GydZ)FG*U7(U&@B5XVdh{`BGMlpQiH_NCiplUph^| zQNnS_(rE$$b49r`LBm&|>olhBpI%iUrL6MjR)c^d6Av@wUW>bzTR=P}ZpY^n!7N5@ z$4?WdA1shk);zIknt;GOQEnx$TS2>$9fcgfgN)~LdZ+l~QAR#y?##SyDs>G~Uq&TFJ|3#c&nW z!2;c5$mpo4z#?#J`tMRHXAX#&0&Azcl}TAjp8*|t0G=cPZP5pxnCz$zy5LM;-t<{z zQf7>2r(Z0SvS3^|ovU2Rf*o`W4#<(7}|3Q(_ZXFrBeVDur?W^t39eFXmJ3OcQ`^ZUpTpoxz{w*aH;;59KpEPTHQ5-w;1nHU9jPhZ`c>+$v?pIDfiUn^Z94f$4Q^Qc{fbr_XGYQe<2(eRrGG%&-H(phH7s6q!Mk zgd!+c3uGy9fLi|&N}K`^Y0!=bHVq~LMIrD3s0y4QX&EIRs5FlvCrCGkqOby+0*?Z_ zz=7$j+NCsX4hk#qD+pwP4*n5>7^x$0P#DA&h8ST0<%%fqCjLv4y1+cUZYfn(VFgZs`O_c3ML?=Ey20|ty1}YH zbW2q-&flKZBc%hn0C9S+lo8{>>8E<7lo=OHf7c6=oSxAq70Eb%dRC8A=Jfi0De38M z{ZdC*?lCytoz6EwN|$lhbejoMQj8a-hfk0)Ve7jKPCyeUNEtHjn|^qLlnLYh=|3h& zsWKjzE;muCSM(r1XxG1hBBug7D6ScG8Ds?xPCqbF%9QcY^luZT%oz_&*PA3I%Xo0Q z&m<|(y(m?aq|6x)Pv102stBsv0IXXhO986eaxz$V{$#N3<&(j>k4^^b{tOa4JY9Q= z)I7$^(@#y2Qf53j{oNEP4^aBHm?~u{dH(b?0Y^zt*$TO*1XM~WfD6^GsZy3quTM`u zI#p^d!RfWLr6d@SPMaOA`56;vqLs$vWWW(7ZYne6X+oO2Mpj%;GlVLNZ|{bE&>e` z^C+=_&U$I&1iLU>i5;{i#nCXE?*t>`bpHiX^3$ajNcB!%wm>S7|0HPXIY=Yu{9A#! z)7chE*>PNAP+(SI5qLG-XQ7m~E2v;+VRBGlLNSyLY^Z29XgP}`3wUZufmvVzXj&Dt zg9o&$0CfH62Z&MEr|(%PCC9jF`t5~MKDw8M6hUKT%w|j>prJA|rU1~|X$C8X2nBXf zvS0vFSIK zNC`1snErZ+lr7_n>6%NW?Cf9iE3kkj1zAD28iL9S2FFtj3ZQit;Hw~Cfu(ss_uYY) zSSTu1#s03YAkbP34Gqpv|LJ@k@5I+sTEQ> zMo)zlSacc0KnpHdbQy#}^EwKki>ZzagN8~NK@(gOphM-D96OFp6Hs6lSUbI9g_I}b z=jn%5fJ+d`l~Pj?+2zzqsR+i;)0J0AX)?Z;?z>9Lkq;6OV4pfJSUr8tDk&|W(+qB) zbAVVK`N3Cuvp5QXhO`+Jm;_n`pjLu|3mg<+&&xnPuaKp{;Mf8VDG4P`a7YQQ28UGe zYAFK|sKJa-gBcXq1%6GRz8W5eAy8vL9RP3`f)4`@5O_2F(P}9z18}R0RbVZsy~6>D zJx7fg~DnI5o4N?jP_a1PLZVMb6yg6g_g)4SG4 z*)p!3eqxPOIODJB3TvfI7++5hT`Q%=_-uO3TB%4z^Xb>tN_A?l_&rU)Q3kmTcl_Gb zCg3=|yA9Mx{LfhEXznKM$OLLCcde5$Wj_9U+Vn%~q&z?uD~3sn%QJ%NcTh4`U~&Ze z0kqJYxdgPh8GL7=<9aDkrUk2~N3569vVw*YJ2Z?yXQ3;BPL^W<2R`_YP(}=MSit7& zSubS;3u7jze$X_?ujyPHz@D?;AO&jsrf!hZE%2Xe3@=(ZDf(7DziFALn6&c8{@gK_@!h)q(mj0>ihZIVi5oIm}} zCMg|w+n9f|RH|Z&04Np}*ujP28t~#wNF0Jz7%H#{9GgCMvy?95+UbWkODQp~p8jmJ zlpN!)>8x9%l&7!TE2k#*3{+fjXfkV(>0;#6B-31ra#*v zB`0aGAh_I-F`Jh~3UqE1n8G|!1%ZNVhm@hf7SP^0@YNGspjdx2J#L4TA>*#;6Lv_2vu)v1 zV0XMQ{p}8^WsKLSPuVFI&Uk(Lo1Iccj5nso?~CJnj0vPXrOp)FGagWqaM#e+aSMQUO zQUrURO_O;8c)*h#SMe+ecpa4d!`kg)9>$> za%B88UGadF3Hx0kMRpz`?&UNd zrfVFO@`Z_Y9h9fTR3$or6-IFmbIzQZ{T8-+~%MDB`C;;;TX8YAE7aho!`s zj+~v|d|1j{ZtMC60r14NBby?lg5;hbi%nFR4oIB@;lpWLCx6|(&k@A7L*!HMYhU^!R7F1K2 z6xbXaAg0QlkTPa#dk5;aOn-1hN_=|W2`M9({>WoeHf&QtlENrH`w0?X2@*#Qp_#{} zGGtGJ#KlpZh;H9wkP;?z`|4$%fyBk|YW)vVf}vGp`jwMXHf&ArF(RkrlvEkhocGh; zoRX4agajhf()ZJ?PD@EK-4mL=;)s+H+kTKXKDcuvrtdv1B?$=w;pwkVONBDt*zRyf zDwL7uJ}4b;V9at<6SzHn;aRD0#{1KMo|Q6HUHl!qdgcKr{j)eYayT+8@HsMrPD*6X zcHB9qLBNq!;KB6Zb5a_D2ewWVP~cPGb_8$8W)^rbz2}^iG}D2t(^sC8n#j0uy7hS} zO_>k(K^I>;vXm%sfNuA3`~l{Jm;y(pH=LJJkOZj!oeBuLW8(fa0WecoK}cZ#^quFW z)U7`}gor5#Ixz@L6#>OKV-`p)OkBi?K|vTMw}3GV#8Ch%o-Tbss)%vp^u7yH+Kl_B z@46thj`894%!^WKjEoPrU$`Vy$jJC`y3-Y@tD+0uOoQw(17B_4!JMVQA@FFr&Q+;4 z-A6(Spu?&Z*}-e29c>&L6hL<>I0||5GDv}r!-i_*R^kwNJpKMvDRb_}px{*iExdj* zUGbWf4)-IFfB-}w;+j;S)^ZUgae?_F3W5qk;ESWdJxfhy2G9YwETD$DxFz#}SOqr6 z$I}I_ONlamp00LX%7F3N^ziFao{Zb4FT5_*&-i4z;SDKs#$D4hZ%7%zi|j==r1TiS zO}}_UN|^D&^upWxqSJrfkkVpYK3(;u)B?s6(~sQ**FJAo$hr@DunsPpJ~$< z-jcFs{51XHEh!(S!}q7_-!Fk)50@G#>3O)pGrxKe&z>_ zfy;nayD@7#J8B zxVgF6x!FMkR5KFASGM8jIvSjgafL8y3 z2AdfRc{vnV6u=T1Obnna!dXEVVK6DO@_=e8&_!jC_7QuQBO_?A%hAM5krlLD9X!Sb zx^SJ*k*Q?*-Dgtb4o`(Z=d_8r^0I<@ZMqDQt&B_zY;empm{^n;dD%c63WTkSoC*vY zOgf5O)7_p+DQQ7Wk#pr`0huNVUZ%>x$O3jUs8zt_$daYN>Bv-)HGS4|sSw7`)89Xr zGGbh}UFn6?Tt>#H(+|9qvQYa3zE++=fms05hj3&m0=`UYP=?N|1 zRbX~xaL-cWQD6h#QMq8c<{PPCfs?|Zb4A&}CmqfgP+*?E?~Rnw^mT8fco^qT-~C4F zKGSmE=@$}2#HVk5DxvDWJh^{OzxiHD(OeO9 zNazH{EG1s>(w0U61#VCn@G0;*wlMzTVqgFr$s;fW)TsQ&Sm^kI!A*h3k->es!3QZR z!-nvs9f*=(y)=jVeAmzaLclw?WQf}<8_(8{!?wLN} zqm+g$_>w0v1#a*?EFdR1+9Whko!P~j&VD_V9s*?{LU!|HsLlo1#O4%@den0)nS1A`r zUqNiT{x>Nf#^uv%zDZ?r-r`r{0xuDoUhrMYWxCOKDHEvgmP{A;A>}iD=XWVhrjFy& zpL~~6Wn4R*>xWb-5KcuV~mrh^vLn@l_(R7ZVQjN^|0?VfN{FJ)Qcy)TlFDV_y ztJA0clA6wR-gIlQ2iQzA+n;O-uHl>j5?z;IC47j%aqy9Sd0=#n3SSJV6cOQ|xR zo4)10lr7`M>7V{fxiOxfYAHPr;v}W1zog9=mrpljls00#Fg=A)+Jte{^qGv(a^?q) z!v~Iq9e02-Bxqqe=!9-ij}5#sUIJvNLpCTuE3kux+xZ06PJhQJohS$z%KN3akPvr+;RZR^bMD2sAdq zAh3425}UL&#}d%o375dC>8Whe#yTIrL*`@PgYW!~O+V0tG#n>^gceOd!X`aM>A_Y= z7G*L}U~sg^a(n^fgTlli+wsHJ=~e8~hrGA*D?vsnR%ARj z-HcOO!S4k~!At?zYJBi@vI5|1i4>Rw7P5n?QbtFCEbzK9O3>iqr3MNpmpHo&JnVT9pl4E?k=~$SrNbp|8NXL3jj89$^blTY zPz!1*ue3i?=lSW+c%?xD%;$uq#ip;~ldfas1rI{%@=F`>frhCxm>iTO9S<-FyqTWM zFI}$i2(+kmLM?l)0>G7B8Qw5|!<=Hu5X&oidwFck;UHG5{pMszxb4eEDaxXD~wbL~O zrQHO;iIPKLEjX*kC~-_T5R{gJ7&})`+KsL4>ofsJ+37O{q#YQ)Oa~dw#mcQ9tstQw zCeS)PK}cGg@%8i`A?ZlS+?=4m+UakFr2CorCQh#tmiC9}S;@G4dWDFz49|1$g#|6# zN_?)o(*?z)m6;shO+P9kZN<22ny7Syh?e7P1_c3WP^G{RDtlTOkzB+vy-!qHh;iZc zg`(1CjE&Q8h)OF7&j1}0JOh03_bCPiNpKMJib)qTHcqb>lNMJ7kF;(9UmIh_v;lM{ z%MR8o1%5|U1u94Opu_>PyFmbC_mSxz#idK9pA(lB=F)Q1 zPykDvn*Lf`x&vHSNk~^te3OgC8bpuUr(PSDc#2N7vvar4W=8c z0&k}4NlCZyyk&5F!vHbm&-4RQ(oJwv_DV~~h&O^f@_{u=iC=*aR#)>2%${x_BmEpy z*GS4re*#rrymHdw;>*F&)xx194Zh|;B1=I~fnVUy^oyF(ozsuVNy{+an*LBuT7(fQ zR4*?r&3Iw@YamEJo;yNV86#p&*^Xag;$pT7ges&U6k% zX;YYS`XbVt(=RAW^U8sfj{r3J%;5kZMGh{bk=*lMS(JO}doiw^{zpaH4Cdtb z%F?XU52{M5GX9?aTvgf}CcZ*VnvZer^zCZW8BAM`O;=P0XQ4E8gbH;HX)_K3M|}kj zX@O0fCD^z3!?Fu=2Y+xfLeRnvbf7vrXlXN}BRGqI)<)|xFeveZ zlIyAI^*Yj$lJJs|7hHQXW+`!js!ULD=t!%IfHTSgW{4=e;}XU!fyU{tbfk?LUrY}$ zl=hpRqbn_Gh}6JicYFbB?VSMa2nG$Iv1B>~j?WmNwsf!}Y?-Yuoy`p%FJ*;{lL{J0rwf2GIyl@Q*?bAeAnWN945fvp zUp0^x-~r`zerY8^P$4w^qk(iB<5^GuMKCR7pB`u^?LYmep|l`l<8*E#X&1r%ddI$c zkkwGH{g|F$B;5zfryq=@^%<8>S1^|DX1q0hjj{9`p$iOIpxu8b*d0N41+hDJ9G`Az zE-g2`!bF;%<1&K+54iN|H<6wy(_8P@jbbi1=lo$;;$(4{p6DnYK3%~~T4?%8Q)v;# z<N>2pY8E7VL2(c`2dYrSg!1OO>(%LwzwPtLbo@*|x$$ko45X_nGXCd7?UCTmR z2wGBs!|2a+4-4s;jAy3@+DT8CUSug9&NzGee;etE)1$4VB^Vb@Uu!QtQH;q!fe(`E zShF0DGAIZs2nfs(n7+}*{5d_zL%yW<)*(6GrPkcAVqK-rF8T9F@gd8A_pN0z``UdZXv?2bFQKohPV99atd z3W5T^rt{cKFMzmb7Rc^Wkk(_GEigUMU3$axV~*0Y;uGr~Cm@IJD{v58 znSQ`Ydc*WpPSX60%ct*hlD-QH)0xiF(u}vJ2e?ZwhJHYT70&I?; zvtZ{=*LRmzV0;9!WC!Ei?VKLc%#2KPj!ozDly>3$!htZG8)CMBgS6muPA_SBv6Bo6 zpiK!}3ZT;(`307NYpyRKkIaK4>33ez;_!lIJFmC&A4bN1)46=5!x+DT8pURe|EACI zm3G%T#Q>h+dIg>kP%;j zO=tF(R^)HwR1{GV0BHbSx^`f?vA?tu&vAZM&>biu3Ty&%rziSL>oNYCKEYqwNcNYI zf`C4w4)W9`Gq-|(29pN2K-2X5{?ZDJ7p4VBt1zCLt{Nb1$oOk|c!0DS39%%6TCKpNc37XtV4`4m_bL=*%C7EBiil=fk4oE{q}ZOeFa`s_eyGp7Fx z(=P`~t1&80{}m`LBMNR^fX7<66xan$@PooiX!@NXX{G7DLDHrmqVD6oMxRQU(BH%m4HJBJc_nkT}5CP?-2_m3rBG3{ZcF=N;4$wL5BH)}iLnO;_dhc}T zm>{Gt!3(-qm&>spd@cgJV~0qVz()>fKLb)0;4*ZD27)F9`GjeQQ+9Uwl zs_G^UBHcmbj-bX7$ckmG$X47C%>vz#hQ;9*L_yV+XqMx8kR2SLp+>l~K^L%r%xYyu zHtPZ|=bivL_k>86<1Uar6SeUI4$r3;fGl)Nt%uE7BHpozra~(k*odWVG6N-`Sj+FXSe7kqJ zG$-Tq5C7OX>)CY~6hH%g6|TRx*fV$SR3F04D}>5}m-IINdl*T2u|>2zJnc%Z!c>KvgJ{z-@3CxhR2e zu~c9Ktu1Jot{W{aCGrg1M2J8Qz36CZ6@KtkD?9Ys^Jd2Bz0uMNqL4mji4qq$u(?16 zH8V~>7%eR(x)NMH)}Tqfj+R!C2X|yZH}69GGA)csJYWNOKnAo<*NTz0wR#~0>KOJg zg1b2k3QQoWX7G^M1knB%1!hn?w}TP16c#iC&aTNk0lW{OiE;Y;7-`wS4$xu|HmD7A z7?F2rfQ`{$ngKe9h#frP59+u-I66%L(q=ve4xS}wR!`@Sm6of&4cd0k4x08Ccn!68 z1Jqt_1$IZsW$Vx-5HQ>KFd~O^6UY-9Ogk7AL4_k|-!C@XkD%BN)&uhJ^rf-V;`MhR zjyS=X1#$$tKI0Wk)s8n1!E^yRm~Mcb+X%ac2;_(vBH#r!Am1|x%mFPuVb^Dz10IY7 z4ef)D(op0ASwBNWk(J4z9#jiLrXhGhH~TU(FfxH|jRfT%P&Wv)1WbWT0krY@r4XpZ zJ0YUP4_tYbOP#RW(5`nE)6CZB5VVNyCa(w z1E`#FWVT`eXJ}Sv)?WcN58@s+1r~w7;8d_iM2Q1j#e%95&=?}S1{0_jVG(!+UWj=? z6sy;`6&Muwo&Nv-|DV5EfnzQBG-oG;HU)u}7SOV;2ISC(dd2a^IYkaac)r-CAkw-$73ss_^x5i?K)$F3k?#{{bLz%?PG zqbhhL22@eK=67OH1ZUAJqDmaRb)XB}!N!8~)df*Vz5?B)ETSX`^@yT?f*>dw1slNe z$gdy>a+)FsPu=va1Zf2^aEZtPZvVgw_-!+$FG!FUM=jjZ>unBDLkU!Gb6}~rKO{(t z)K_zZCW^tzc8DmjK~6CPw}ccJ9a}-k8`8Dv5mDmetp+944iRwvIw1n-g)@N$E7=_x ztQd}inkx)e3`apUDCKi0usa?R&4L6cs91tW{u~i-@x`uD&vZuwdpBA{E# zS$Jx=L2DmDA~Qskm;_*}g4i8FsR^8t91RrM6qp3&fya>6h$wM^mbtNO;4%&x3v2>s zVbda@cBTeXJr3(Zi<>~rwH+d$lV}#QL+4T;nsDiXG>brKnGGD=C`s9%1D-#@IUZt2 zy&Jm6L6^0QfR=N!IWlJnKnnXKpun$EfJKV{C}0kVC~+#V2|!D8r~(b94iWGjxX`2z zkDH^S3LK6SS&B^HNk~VYEYO9mjz>i8vJ`niYidCra|Fd1IB*qsal{#K4QLrTg93LJ z$O$t1Yd9j-A#;Na1Fu7#z9d=N zs~!~StOB1Ip@Gg@t-t}ghFD-e*yDFZl$ao=a)U-DML=i4gGNE?&6w_pfNBa@c>)@U zKq^X*!=DF~>KzY&vn3O(a0b7&Vx9z`1FK2rLb)5Xo|61bGKq)RZgmf~Kv(Ypp>g#&&*KAq0^TRRT}CusJdz zYnslODs5>okI{4GST#iImTzu_)F8lB8;LMoLZs9H7E^$`fHGMz^cK-ft+bMc)?*a1Dwdf_L1UOK9MW|B+pJ) zNRyUiET7IdMcPwlIm~0MP>(@Q$N_t7y5bCJarO(MSxQ_2yQUwQEv;4$8WjZX-~z4o zM#^UB1uA-ZS^+-RoIw#Zln#n1CD29cpvL0?5d}tFhI$qU1!hMLP!AH+rBr}5W0_Hl z-n%ncrVBWVVrg8Xm%kw6!Hr{PaDf1h3`kJ~>NoX^3D@(Kfzm8Wc4dSXrHs&`v;&k* z9iO3;#T=jvizw~NKq(ehc!Ep>r$^BJfS~w<#3gcB3~RlK)Pv_mk;+XBlfXqTqvJ*J zxbO@SCGaYAcnyx%8Q?OU8L9LJWlgYS91Rqhz-2={xNrc~lxXEQraM4k!~`n8A=wD8 z&7hZu3ydl}7`SU~Xr8UxBwWCWdk1`1C|LF2|-3gX;=6f``g z+~C}+$OEb?eOVkJ_KgCU^Eu%L9N9SP{9Sy zZwEj_1)wy?1F4DxAbk!q)K$FLc+@J&G zK*0`4Mi@a-ug?f>2$q1Z1&~kx4Towltq=i~Q>>u#^jM+icPW4t&?s_&Pt%9wB1T6+ z1w;i1Nvj9tM)`J2Ylr0z?pJI*9LX!Pd zNOWxGE3nst3kuMdC+yLg5Aq!G(Fu*sLZV~S4^;X={4azaodqO&yKc@sOUr z=>m?rSc+^O=y(XRLtWx=wIDZx)VO0y-u+8qz(;f4q#r42lC_Xc$3!wDDfZ{}4VLYw5#r+QEiolAkwj0sepcYw}Q0ByG6f;3DZa~?fRScU;Qn84i> z@CghGOpfTDp2CDY{sFRt9Ta__dv)i4;)l(QX$GkNoW8F>T1go!@3;bFy8^Q#N?<^a z1ZiZP&R8fNKAxoF8p!lw4`tL$%4 zHmrg4fms9~k^Th5;;QL+MbcvYqM-2ySlxL+bozr?($XxVpzVNybEU!wL_Pn#mm z!&o)_VUe_##We;i22cW3V1*`7cpY*A-1y~DVsaD!$1l*aECy&by@VM#8C6W5ULviN zumaru--0RyGGzzY6f`gI0NYXyl0N{JXL4Kt^#sh8BPd3dPggF5+j0d}3S`O+uqkM^ z+yL7GY8pB|0LwEuJ^&5mAvI2(pcqv)eSN94PAtSPUr?n$w)_B_f@aGPuq~w^`34qn zh$%3G`baR3w6GutSLt+%GHD%Ch!s;%r9ei^02_g3#S9iuq?AmbStf13cy#)yGHGR^ z(`^YT-6~JdEtl340wr8Wc5B8npwVaWU=_HV!3s&C2WCra$S;7{4>|#fAqz5Y!vGpP zS^#!J@pOR-X(g3nkV`i}RkCX^tpIJOf~BAhV3kGF!$2yFKq?O)RPI1lc>t`kaQYmO zN>CN1%AeA2wDqkS0`~X&& zH$AUXT8S}l`ov0U5h<7-8dyOAo(t060Xo;o(PaAmN@;O#u*d|E2z1Dp6)A`&fHmcS zG|d1jVFHg`f>N8~3PyqVLJHuJoB>vrJzcR%T68*pl{B~B0Rp9bmQTAhn?5%3x|?J$(kp zH3+o_z-rS#YEK}X<+x?-Gy#Qru&>r46rTVqP6a8xfTS4Vh;<0H7r<&$Kx%Iwsf8{0 zU~t@kP_jUZ3IZ|4ys0HpQ@R<*|vYJY&$`h(Oq zuwzMUl8(m_iW}HLsTka?>0qC3*d{F^ae`6c1GK#B0L%JiSXBGc{L zq}e3SAtZNzCEY<<4?wimpGU|Z0L!|8WKTfC#_4Z&aflb`5>jB7A9nn^a8li zeS%$)2_yz;;a>pl*#V7vLXHyxx1zyw&2EsMx+4#GwZsK>@K7QaMeLvf;XCZ0y)Pg$ zK|`ZA*cI3vAAp>v!2}xAxWS$!uoK+;euCjP#~+MV3?D!m)U6rcfQ(_+V0yrA#`FTj zX4GY9U}Ujoe8&!HEWBU`9dHS1EY$x1nXiB{CJC<6x5KO!C}Uy0=Z`gSP7Hk zfpw4$46G@+2gNiekpDpr+QDJQbOhuecwgiI$UP@OrZ}pB`d@JOoIy1KWZ4CfGKhOl zAiL)R*gcLQB{#rIkXz5-4plwl9S-DPC`hda(+v)AC-ebS53B_M8y0$k;sTHskQW|s zm@&Npd4bK0=>^E~A3)k2L37Ot?2a!uU~|)7kagGdfV4aQ04ard-~+M;etBWKwfL$grq`ntV24VEu6@n0;$qqYTyKWsso}LTJiIM#uFebXnQ!P2Q*2G z)OT>2G0osqU{_!>W10e@;ZZUHG~l!Vq~B3df!%QeXO_TQ@T!C*oXF`FWS!#*kTQsm z7H}f9FjsJb3R-)RyElN9FgebEClOGT)ax^DL2*4utp?Kui0gMi^`J+|9uyaVw1A>y z2d5d+36K}q%$SaVXt-AnfV^@6WSFBCD0RWTa-|-_B*z;dbr8Q?K=#WGuwU#zet7^^ zf|>oEpqK_y3Ub#2PBW%ADDHUya?b~lA@z>>pya{{%YI)lOmO@GQU`I*2W0pB0K3N) z3j) z1;Yf#86ag4_e|hIit8C%pt!aHxo5%jjcwAR)9YHKxp@|Vg{?utD=7=45aJ=*YvnnX(^ryU?EeG&<(JQ#kd4ML)~)&EN23edjOH+hRZzw z%Nc{@UV!DMzi*Z1;&}lUHUbHMfCx*qNptah01F#}gnxjA#rfbS{Q%1ufaDsu!R-@q zL4kMB7E}W_D4g{{k{wV<5x8UrSW*upIRPRmaf4CdBh>X1z_Pj^*%=U7yPF8v8DLo* zkn93(aDqZ~2gL=xK+RYHR;3M6wSs%PLA$hM{R*&<7D#9VvPMTnfv-?q8^B65K}vR@ zC}Dsr*#TCf0a9`Rq6E@&0~d6lvO=Hn2sd(i1}O!l=lTQOW=v;L2Gmc0Ot}Cu#SyfT z{scEHJzv2v!SM!286-Vl0No1>tKM#a-J=dN=>b>?6NY=9aDxh74Fv|sDl(8-4Wn$ay6S7(-#m8FRng->}=ox9Rnq;!0z~g8&>kR z@F4piWRPPANEyWS4baJSaATo^2b4-WGeE~Mr|^Kn2o^*jr5a2Vc)&q4 z1F8j4yhEaS4vIHHT0r47gU5_%1rMaAS^}bB;Z@JH0MuaI05Z){64aXEfd$wW43ivp zfRsT3Yy)zD?EnXuD#$Abz)Fy#8&YQ;LGcPmsRq*l9>sdlq|ynf9)w>Ydd{G@0Hg=x zmlHf@OjkgDU^8R70CM~dkaovgklCdRJh0Zo9Sq%$4?rp*?!N(Xe?4TF<^k9PDj*NM z0IOkge1g`>e}m!ykWvk%7m&F40M&x-{x2vl0BHfa{{xR1(;uk&f7F9K(7+2yuD2l` z_yPAo3omkt1!;He0I7s{paHZ|5tiRNctI&v8RY&6U?tER!BJS?8?+Re09K*|QZfUf zWI9KWG;jS3upD^B%mRd*0;{7aT>ApB5(SX<6(~x?;7V42mB@pXY=9~Omp_n())rpm zvzx zRB13>;00&&8xYkPVRMHUyuyq@0lan=qU8o}JvcxgK=u59reRp%JmFOW-SN$!zzv$V z0qFrPmHEM|zz$l~_=OizK)m1uCDR9xaBkp()Mo6CFL<*+Z3iA`cd3OBIX*#VX)rbL z1%g7_u>+#X5f(YHt%p5)$SE44r-KhP*-m`qOz4*upMH9xH2?JXQ>8_xpYE5ohQtmV z<{S-1>`VYV3^R5n@PYD=EGTwnfR&)c&Kwl4f>dcR&ENya&H{*PjM!Piha5W~EufID z-vK(}irI<*bkr57*RX;Q6x0j&z_E4!Viu@#$p;QAyJW7b)LGdwI6`K{q z2T1*gBa0P74?iT$ zH}HcZ;s+!mX7Iz(d;>p9M9kqw&Ob1-Hh|1xwqjTVHERLbtQq{^2-?B#4Kr*3#4uPC z?cqnx3?Rcan0D}!6+=AJA4~;hh$3klnFn~{3L`%p0Q(d(KOEo(;nUb4Wwu3_o)I2PxHHI>8T4$QPhm(Cfo1=vrdgHJC0y!s!N@9$4DHgAz*8+@RE( z0tzGzrW^cZlpMRJg92)*w7;|oG))sxZkRs+yACs)9)QD13KUK+z)Db3@tf%jXGjYx zynuwt2Z#cABM&xF{RONl_yZ¥3^h!VVS=e^9~!WTOVt4>D5=qZDYG&`ahaDM_V4 z0F;g~gP=hGlvE@^LC^tKg4wn25kO8VAf*~i9RlF;a{`(cShk%afSfrbxfP}_m?_O# zKS98ZX^sFS%YwH`F97Lxd<1Da%@BaK{g$9>XLnoyQi-v)0ql72(IudRyOGza!Nvi$ zAUVFCg$JZogJ}cA^*g|3B0LOp{T>wSL0Uki%MJlErXx_t9{>#np8#ohd<=2?0l4GO zVCZ(d08**IT#vdk;{w?A;-H|v0ak#L`tBe(oP`IZN`vVJ=)@Ct#|IGA7`4_DELt82 zfXlHLAT9Ms%iCZH^9>d~F9bk2)bRsI50VxhXt(AIiU%Ps`v5+d8kD=Sx6!~WjHf@H zB`t=uUIW|~u%9a}INffxv>)-!fFEFoVW#CD-~bf^7cvasqO3s>l)yL@t5ltTiY;6b1Wm1tcIhplN|+tSxXY^`I6kM9&6DK<+@(0}IGK za6JeuJ0Jmh08I-lAdjE~B*c~jr~xSm-rb2OAQ?rcJIs|XgaoAq-k>}Ib_Qlpo&X0W zWX(TV339g@QVU-}@f=902Ga#WaM5}Lq6O4uXAyw(!S0~=3#1xUF4f-bWB(7_<>06U zsnuZm0dabR5IEO4E`YW z(U28jcVGtC8X@F{tPmu-2!Ttf4G=BJQN0D(hddzFprG0yWX5y=6hvHROnX2yivnA= z8I$)8P(Ymknc}Do+KUJes52PGI9>oLgT(m>A*3=w+hx`X0uLC^>xXfKVB8PgMluOEOs{Q_iyqYh~P zR0uZE`36I`;|Gv3u&4Q%ULgDW!}P|b(n^p~LNx!b>Ypy)NS*W{faK>NV5eaE`45V# z1)zTZA!NqX0zN1a9McWLpnM<<8gJ57V0UZ~hWWWi7&&W!bURJ}DFgdifT=?mDW)d~ zPyf3NN2WLf^0YEFG6f_5bj4-TvRH%m1IQM2YS`k=4=SW)fV}Fcr@$ic4m?da2PHJX zN)~{WfJ1|626A955C$cCK5(+f>ed+(rVBV?TPh42EGD{`Fy#Zebp_ZVm{q|V6gPvE zDu7n&2~SU)A}#F9?zjV_&(TnU-Ejjv*7soOaXbK02Jz<(WPctIp6|#&iXeyg~PpgLYGaQuqy!A&y3%(Df0p#N!AVVCDK~o-ZAOFEHz_9_e)e7R{AHoVg2p>0yfRZscH-YWPoVXQCw_h)< zSI^&~AOPN|%z~FcUyQ^FS1ICjBEu*!}ei*-9*+ z8%YrT1O`|Wj0L;}(HDA47ccmrSsu^^K^D-KJjgm4*mV}5*02IIXo(DqJ|p;c2L%@J zx)=!s7SMSCA_^>^BOp1ZPh2S_!3MhFK>&QW4CJy5`wI+NYZciX85CJTPG?2DWk8F>l^7M6vII`@PrqLwB{^MggR~jr=jovvq|er0I0-*q zlfm)CN$7cz4E)>*42~RGj(3oyuV6^OK$d=hA^ine`VEG3!>MW70vb#VpdCpJjz2Ku zdrl#_t>x7AGaIFkGfA$vISutZ6AlH?X=eiWrmxu|t;KZg=JXp|q}>_6Ojp<{9j$y4 zT{SyYwGixV7xw8Fc1bHRJ-<2qz*ew6j&0IbOs{TEci9H!_H2`mVd}g!{na*UJ;_D4 zrU^K5Ar9K$WmRAW-EemfO?6?|>P{4AI(ld-|-M(t47UZlk#b zbW<`1=sf+0JEgUl=HH&qy9=h338YngdfqN+d!~iAr?1~7Eyj3n`mtT0;JQ8i(=M3n z7!m3uPk_wVL-HvDG~lMM+byld^z-)gTf4#GqqIj_nXUCB_^Kc8J<=LJknRDC5*sg* z0z2p^2FRUHkVUcrkoh&x`B9)98ys2OpgYl^+t5LqdO!zBfij5_XrHGIc%Q0S^ zes7O-Cgb+$9($!XG5(qUd#|(t%E`7$((?KEBCkEqD>4f~|6d6&KK z5O9=q?0(-NATVkBkA2c*j7&EhrWYKLHe&kJFntM#Zf=}@#fqA;zA!)VxXWN?v1TJtXGJ+0=Vg^n1 zfUZ6h$Wnlus4dU{y4VPG))Q#^Ea*6DX3zz34%y)2f8l%!ka`KwVfo;z;Xs!_379h* zfR4=8a|CUjbYxTn4filPPG8ytvJZ4l8j}Kxf)A4;stX(qvJ@Buo-sSh7do=L^FlNs zlrVx0-ev?{(R6_UbXY4R_em=z&UzNmnNuI8KR6_<#mF_C`>?bfBiD4V!_vl#yQbG2 zmiA@bJ^jpKX&1H~v!)3M?3yliM7n};+w}e;(jOW3Om93YeL!f#dT8mxp}^oMkma~z z{q*2t(uIuMwjVtvJ%y2R&-ADh(jC~8eK{c=!FXW0?@4JbiERR)b9N7~DzSi9V6j*; zO7QS-D{xKkKPj!p$US}ENofg2uIblLN-Khznh<{~u{g53DX>iEI3=yAwL=ILlpLUP z6BKuB3e4c)iw!~wjG9a=iu?*}jt7LY6xbX$OpiV#ZOX_!ec~x;B`r{I6BJ|60u83^ zgCIm3=tON$=aS9w!}PnSqz&sqof$UA2|{pt96`MxHpdPjSR}H6ZUSf!GG{&@q`(F` zKk9~%0-FMx;|-xKB^EQL1t1=a8Pf{TT>>kFvK%D@suV!3TOgFB#3QhsQ-Mu^#SGME zU~_ySl;tQdP^BQK0FrQIRAi|K9Z0Ond_zb{2y#+J80cVP1p#yB86fjO{W>4ul`< zNWTR_SpqvCNA>dZ!~BKh-1#6)@(4|WpnGmm^=L3X5Q1FQcVl|g8EG{(7J*GHN<7os z&PeCzE#U;Y`U@-KSRe@=L2l5!Rv%auSrynoH>iV-A5dUc;1^gjUE!>>JO_&zlZXO? z0@rl+v(hTTtT|>(3_Jp$^D?+Vr4)-}J?P*yHb>BsQqT&(1||hIQ0(%6wz;3+P+(I4 zot4A^iX!k@K_&%e(6qI{7d8QiYwk?ncUD@QjZ1@xMTuql?X%KKA}ktAEQ-7eY>pGy zKxqoJJfC~I;5lh^9k5#`uqp8bG(F{&00$j$(bOmKm67`C{s@lxF9WGze_*?e9APN;|+FL z-UO9L3T%!S*uhzoi2>X=0>uGB0Avzm#WpCBL6$^=t2}5-X)v)Ua5^%VfDejx>|o6j zKv?tx$)b7zkXJt-3bBQ85B>k8&ZJ0jeqO__pXt^hwBWM*c2Rx(?pz8#O^bXM4M^La$|8Nl{q#tmi zh4c+hw2;1l5CHlA1jr;Fer`y)*Z?Yq*c=-m<>CtvkHw4$T#n5U0^Mm-r2s0hUI=9g zZ07`}EpXXa&)gscJ0=KH`0<-Fe*hITps^h`@Cc(IwD5ZXFZ^x@folK;GtdwpIKv~^ z&I&2NKy^6W_PJ9UKxeZ-3qNkSOLo*l3qNo^?|>F$4MK`6;A{UBm>hK+nH(8FBWoOj z4B)!}*&JVhSCvBY7ncI46hlo(4P0me`2!Jdkc9LBE}+B!3dtAXkmLoWq$#W*&ET@3 zfi+8kU0~nzb(f{pV=yabSi77Dtgi#SP?OE^2TPVfKR+npOyEK;b-{`t>rX*0XVYZv z;DQ#2bEX?!krtMn!38dn7k~`{so_@;Xn`tOGClE%v?L?f^y(|pI*bdZuec(u%E&$a z+!blndM-1j4WKi-xXhSVfM^yorVCu4;umfZo8tv8P=;E;l_daa8hqeFPSo5Wj~@Ul zdBByW#G=W}04mHFK&ccg`hqJ$?&`IuRmm{~yX`T_C= z=o-r1;9~vH^oLiah3kKCDX}PUf!ZWYAZIpkXMrvraXiBX+WgEeFcn-5LzXcrFoVv} z0H>23T*xKP0&Y-LgPJ$cqHP8@T85i|5CA0;$O=_%Zp1-qkV*`6+75Fz9}5d31L!z4 zHpdm*SxTVV1;shY>r27<7#+dAWCll8H)+(85!Ib1(A{}}I~vWQJ5U`8S+WXBa`2k@ z0;rziH)p;93RBRcP&V-DQ8rMJDe)?>3w&UMmYUOjuS<*TKH*k`#3HC>bO6OCixP(; zg97LtLr~4A$OF!@54aUM6u75Pye_Tf`hgpi$UsY15oO>HZsdf<%MChRPLufqXniW^ z(kdQMIPrq+B7jC2&$JuTV)dy0U%-PNPdsSepMVem#}jxtA;^KCNfpNpTv-CYK}#ph zK#MTh96_rsLFr=!PnH6wz)}uSX5GO9KB$I4fy0_nq8`*(W&xSBfybN)6p;sbu%s{O zx=Bd-0u6P5iXv2}KR|c-4TMV}MbQO>0La1+oZhYxffjs{Z) zpAs*$A;<$MQ9#SZKo==Nv%G*6!v#K2gABAn%#3LP$S%-|Bv5oO;Dc4^Px!#+`7J{ew44fmeYC+OqtB>`Wd`XqtWj z*3RbmgAbIZxe;j^w1^3|_@5W4`GcBLX7Hm$-30zDO=ejJb3{X{10(_w00nFV|8%e0 z()#t>u*AIp)bClrpQV7XT!U!^Kcr#9p9RhfphZydG|#UDzoS%whXWLhpc@(a!AV|$ z!;I+w$Up%rh7TYbv;YMZj0gB(!Pp|8#LnU{{l{%-nR@Ut6a_9&UyDV7%ds9*sR&+Z~d>Cm;ks{_X&;5@6>B_bPU?Nr76t^%_jz>;kU0 z1*{kz2tfLM8=&+GP}2vr>;UA04FWJ9{1HG(aNxVWOqqTNfZH~pl?UvO2S6QlNIS4a z5M8YTn<-O+pgHpdK?QSg((VAYn?S1r*c?GAhRv}iOfDizM(F*VcFgr8@IYMT*L3bl{Y@Gi4uCy9j z3Ht!u{2K`4pv4qI0AxO7YMc#JmmC4Nw!i`Z0y5VO_QwWjP5cDb?Rz7LII~1T1$0>& zgCm1AqXej{2^m{r(_{v3h5;?O2gT>~iC3h>R6rBk;CrCKqrGgJ%;0@23NX!}p)&62 z|L#f4DP4dMBQQZ6eF8Fyzy!KMf=!e8fY5Y7DFKn`>+eg;OwYS7%}t691@;d@puzC# z)34o^){{CQpva-Xs=xqhfiQ0Xjf!nx%u-+yn8Gn#;em7$8w03KaAx|l2htjhZ>FDr zAZ^dsJe~icv?*iDbk~Q{?u@O|dmlUZxEFHwyIQ`XQ zX)VU>(?y?12QVI-p7umqf$`Av_9xP|?4YKwf+ENC^G~Ej89AmueIl*QT+ie%o%gA< zp&1i1BRA;UI2I)~NAL|x@U{h47U;}YX3)WkYzpAZ!L1k=KtgN^oYM=QN*gL}7l3XZ z<>lm7-~^qG#i_srDh#<4I2=W?6gj5fdn#?Be^5Y?4HS8-j-bOtm>fZ8`hq+w&;S~H z1s%%4HYv2as|tP#lhWA76GspCeSSa>}E_qKsJF|_pG4ix&o8H-svBnOKUT3 zoG$Z1TAK0jbc+|#vQj&5bqF|WIQHM_5D@4SCX;Jz>Wq>tQ_t1qR1GuyH&FP+Njomw^Fv7BqM|;R3R{6DaDy@^_HsuOQ^PxfM7B z4hSfME+Jz#W161zQd)`e&Gd`21+}(se<^Lv%-Asf^BZYz#+}pc-%9@!KOz7s)7cc* z&6qZT+AyFqM3@8)P4|B%J+b}=3+R|kfh;A^&@-s;Wpf0b-2ob0L=oLG0k9AU{l~TXKnyBQ9$Q&fG*>H!Ui7V z0fjqLy}(H}kU3kogr7a-m zOn|OT1=WTfYzhp}5jHkQ(7+axzxlDKUZ?mu#R?>>0w*EZ`Ubl@#E_2kKyg zau7&Q1A9^y==ual&jQeU=te2MIC?OaafNaDoEw$A9KRN9Hm` z(6tQ*7<@su!|F0jn$G-5x?sA(XK79oM+ODXY)AXTwMx8g3cT$CMqwz+->L<(#(n*u91L!1D491d(6Oc&Teql%Dh#%RUx2Be=6yyYJpW>?tFn4YjhS}YGha-g#W zm=)L^85DdtK|Lm2hDq!K$3SfvP<~-IWBLM8%?3F*-~*Vg%kYNXk+IN<;Rnb7(BLc+ zsA1&zfjvv06Wqn=;FvD|MOu^>BnJvhP~|;gy5kpVAI6#6`@Tr4F^czcC~zont~Fkz^wVFZ|FZlO;$fNI@J(8B`qOXHNsN29dwiGnWn^45ecBIcQ^uLo z&-{=!Vca*J@u#%3&=Od&1)auNvwphXPialYebXa;N-Oj4KooI|jvLlb@A@fSqr6p= zo0|cYSsZx;wu*A|gV~^>z>!s8u_$*76C=}fKPds3>8`(|ouu~(C^CaG4il)pV=!a# zP+)Qd%?~jO9Gt%BmvlVivFS{|rI(6bkY{4AX9dm69AL{h@t`; z=(q(?nF<;cgw*8^K)C>P$Q`&Y|MFXUJr}zIlL8a?;;QLu|44@fegqZ#hTwu9G*!u5 zrUaT}t}j*qU%|>)D4nImsLQ~h$m9rW7qAzWC^CZ`>3D_#bb2SaFHi)kemOwf<(U+i zrw9C%)@FP&{r(C`t?A4DN((bJb4*|JPg-yK%fHeG7+a>V`X~LFwUuM9BJcEmCK+{R zMg`vKXZ}m8GV)IU^k3S=fY*^hfgyWs$8yjivkVGMg&^ZWvr`<|3cLyoYul7~K~s2+ z^~Hs2o&Ga6Pw!`z37@)GMpC#9H0sHbWz5I`Dx5W$85H>tRxDtY5uL8VD8s`EG6a;7 z_@>)3%E&SEDez8@W0Y}ZKZ-3aN&-$6Z3+TT4gZ*%oiy57 zoD4c_88{Rer(b*|Eipa#k+hx!uYzEk0-qCOhXRA6eev3k7A0Pg9^UD}OfotGO&p-c zrUj^`&}24%_+Ce!SAh@gU&sFpg#yz>6c|C}jsVE(0uZl*mTM`1-Ne_Xz&rgFlZ+fQ zuL9q6Ze|%5K2+V)W0_@S#lUI>+LZVdc#)hk{jHFU*7Ua5(wdUy44^`sS3v+CWbY#p{WK>{K;9Z`j$N;|D8&VrJ zfEp0s#+|?+q^hHXMS)ELl5Zw}RLo$3R0tDTK*jrSP)h-H#u=Mq2Mg%LaVCL@913jU zbEFx}KnIzzIqm?dRA6&lF?|onY2bzm=*(p{#|;hJE<_#bdHn4&QDnRC-_zPsYBdDQ-XeG1hGag`7;@|}}D0Z+SH@6k)p`pRV zt-u00FBx2Ip8y#TI%f4$$#dpvxmbXKZr{9G=d?DHF%YJ3X0G#)y%3`ZP`%Lmgg64sdx0K6{=?fyE8f zv|v&Ibz0wbvWZfj8#R1i=Q>QEAx?k^-`1Gbl+)sekWiE*uxV21*mCIiFtgO8+x#gv4Q zbqOd4DF}8b2~H0amPuf2n7&TZ5;yNm&Lbr^v53WDH%3cDjDYrba&U9JJn zZS0WlCHN!>F30JwMP<0^K^utl85gi3CS5tedwam$5O!p3pw0-pKI0lxrL3S)D-EU< z;LZp)sFw}uY>Q+ma)SCI*@_$rTnYlqvlO|YeG+y@c`Jqopc`2ktr+&O)+?|p@R~90 z0Hu9CGbZqrt&mO$_|{g?fq9@qWkEN#zGMbnIPixRG3vs>BMM3`4AzXGl|1aGOpw#_ z!21w5&6pZMtKY%h8uZ=+y8@>fQwK;o0n&p3bsyNlOMA?iCV<3efV&Uu;Q4zd0o3k; z0vo80BM4eS04`L(rY!*J0v&3{?g*O82Nx>r`iv{42aC%nifjPM?tq#J%I{19`=@t^ z%NQ}Pn!a0HMvq;wM6ek)-9b`Drv3oPl?T8brju--RHMKK zPKGBy;-D@YyCdinJkZn_tbf3+&v=6kv44pJH2)6TgvqAF<_KN_$nFU0AAk%1^$*w- zz~`4;VKZZT0%C)<+k)gkojB$o1r`NRa^e8pW4DS8)IR{LegQg^1Ed;suo#;e(+4nH zm*EW?q<;YF+kt|F-SGn(sBZvT5CSR_83h`r>q^OpX9{+JS6zV$1gv>fff00pqXM&| zKo++l1A`L-XdWvY+!~uE0y@8xK>-vO3ZTIRO=boK!4_^%4g@W$*#JtNELIE$Kt~`8 zPG2h}qo5!FvCf!*0kudF1l^DcDoEZ+$w*5Iv~Yui7&P7rGKATRVFAbxf$56UGV+W9 z(;cN{qy<6tg4RzkSur$#Ww+-_%g8V?HLy?bmXT3rT(Et;jEplg1V84pjd zSCsMMcnVG?0^6ovRFrW=@Z^+ad=R`MB^g!5tJ7yF$;gYYWmjNOV9{g-oqDAJS^dVn zYWf)^8E3{*(*=}eOxV&L>x&)br+X^PxH8_E-mffU!MJ7m2@r4VbUqcCY{sqAt5jr^ z86Qrcry_F_g{L`vtE$W*#;wyM)MT_6A8v0}lbOxP^x@2O1r3=T#`DvAHDnAJ&rjc{ zA!E;YVfsG}8F87D0t(8L;rV0fX z#|H2nQ>+4~r>DEfXix9cl+k3oIDLnvjFsL^9>gME7Dvc6S1h2*wgeuc$gE(@a$GxY zx`LLBG~@p1Hd-?JqF>lyTA37BKv!)!KKs``y+KPxp7G}N1zIwaA_ya3Dpt*yepX9H zn`z7O>A$pO6mTnCeRR5qwv4<8rX?p2Pp{UNQ2}YymXQ=i*ulsRb;hSd)6Z$k$TMD= z{!Uw_iSgF-LLC_i#u?MQbY$cik4|5uBcos@k?qLpjkrTtfeAcW!60xMeA=>(5-Vsa zr=vm^Xl#QWv_6ngfm`7GG+l6H80yM6N?s6vc^|Z_^9Mtg5-VuvN8sf2Ze1BA#?#Z+ z>B?9!UY!0?7bTcMiXp+w4vH6~U^kgFa@F;-C9Y`z4 z&`*cD1RR+J7BV9{8rjTC(~b3IVmL1___H{GPSBY?LtjRf>A{Na`}Jkmm>922KV~SS z#dvG_M?;yJOcSO~?=g}w zA|^#(}Bf`1D3o88yb6 z)7O~G$Oy9{QaBrUgiD}t`gKzoS*DJS)4!R@Br@%qHa*fzMpmIsScw_3)(&*%8t4FK zP>MUjl;wC}TBm>`)AW57GLq95n9FcOjh=qpOok6+%yToDM5e>jrhA#=HRdE%W3(WS z@11_fTt*IL+8=Y7B&OE&(-SOYWEtD0*ICF&GWD#VKGQ-*o@v7R>ANgsCNr&HIo;n< zMuPGE^bAXxG{#%g&s)kkFrJ?-U?ro+cz(LQm5hhP1py^?h<`vWW5*W^SDzX%lI;0n4W7bBhPqodat#NBje5K=dB@G5oVwQ zo8t^fGW%mKqsQ1jUE4-Rp7GsuZyOmi+{({PpT6EkMv?L6^ouqq=7Bn?AoI>lpZ*V_ zfbrpUD_a>|&X1sU$1JdrdAg#lj0EGQ>7BMRO^oNK^VorNlC7PLoe3f*f$mg0fyhbF za?ZnyDL{b*T*9$HZoUziF@3$Ai~{57=@;x|Y#1+2=e0-4WFT`EfD1iv@rabkKr$~F zvK*HmnqF!zqs;hj`b>KnYo@2ireCv{Q9vsW@f&ew`g9cs83m+l2r?YM9(ZuTvLU*b zOVcMg$W$|)pU&qfV+l$Ikh2xR*9L23DX~G5LA)clM5}NF2f|E88B4~C(=R*9nCsr; zfz9lImal^n320o(AsZA1NM@aYgn_w}j3VQ^>5)#5Fqq~fBkx0k2?*~X8I5G;rRj{$ zGLs>-m>x5m0Jt7gpZ?ZaMgm%y@qsEcaMUs?unXLpuI(Zt#W-WSn+w=u=`J#Mj2EXb zbCI!N>zO@GK;Y)|d9IMuhqTG$2Sb+QFxh61XInKaq)|u%bl?qHZd8W^Im61TI#$aYaR7kU1WH~YjK$S5*oF3walHx&{ zA(fEn^c!w6K~^UP6j(u1dK{qpCqP>`6j(tSm`z{?Xk{C#29tymyBU)LlY;^S6ZrN< z1y;}v_iO@3r>D5fD2PBSEd^Fb3vhX$z#?#E`V4m&RmRKH_qof+GM=A)$6dw{lzKd5 zoEa}n5Au-FV>~^*(L=_T@#6IT9x_%SPxX4qNJu^8QDT9XFbb^T0%6_p4gp6NfrZoO zd&-D%++fIZWCr;Sq?+j=&-5No8M*0Ao-!7Aj94|b6PFQKEPM$nQ<)(aO2bSAg*KYQ zJ{_9g<^`_LmwCw?V!Okm$i%}qePe^1H2*CgaDz+%)TCtv1%|gw6ywe5pS@-D7(1pb z`N$}PDkXOx8AYZu)2COn)#(Moe+n(GJk%%?p_k!NBNvhXFJl1)kx2GX0^C zj1<$U!_)tQbbmTDUBOqzmhsl~IA0kl;TfP~DcJ1}>8dk0(PUnjPUeYe;IyIM1nZH3ap^khvQms6inYZQ$}R^A%7WR zru&DcU-y^M#;R$$b%2aE?<6wrbS)WlX` zMhYz!NC>eCoS&W$BxBEbe)@tS8E?i5)87Qim_kaB4sdDDCUAPXez1%S$oUW3WJK8= zK{55E&637KoY|odS+L(+vYf_e!5P$j4|W+ z=?UR7HZB+7L&OZAt=X`)2I%w@b`7QkGo}pCPDh3;B~VxF1p{au9Vq>SoB+BQ43utf zO@9zBBh5Ht`k!!cL&QGX~vc5nvpW{ zOc%~h_llI!XKbHd6$x&U%!&lJNN{KeH8>){EfR$&6!Ss8SiqR&cxL)^-zc~O#)s28 zqhtb2?}IN@;!$J*ZSdq!1kJoPfNs%oTmq^T8aY4%DIH8o9AIaHFZ^*_GJ86Iw9E_+ z@Wt+|0#Bx|kCqAMgg2?DD@K8vHYzbPzCshu&Va6QVgXNYZ8$r2ss$h=JBPq69!OPTVaDX3zzUx1Vg+3e%`PxwdQ7a0 zFXQRyt72uWcrFTn7D7y6RAS?0nEugEMt(YfoQycA9RsbALAC!3aOPkIRkKLV0FcZB z25`47Bu++8v0WI^asU^FN}vfZP`?svzrefci{oU>NHY4&^y%#JGK!$C4AeOwU!yo@ zx^ujYt`gRU1hTqI(YrU?j~p6;J06Q}fx zU4ao48K6i2SBZ#V0_}tZCFApnGOCO_r+-b9(PY{(XSza?jEu>aIiU6mEV^BJnLyL( zkm8!%@c>8?vWTSuyloIvvLBc;y&y?Ol^f9jW1L=?CL=X{SCWjj1d^5xaCC$Etd0lf zO#hoCBLgaPrITf*L4B!==}V}Wz{$l1oLcf-GD{@j~ zq@)n?jG%Qm7r;dUqXLJ(t?3I=Wc)++gC?UPwGpVi-7p)zHip%4#caqPH|7Fx#pnnc ztzZ{82pXPZaZq3dFEKV_DuC2SpgY>;P7_ez5STLEIaS7-argAnL(F(mw*aJR!3%tX`rP_DB(N=~a-cSx7fWjr-KFI`4c71A7G6?n(2 z!~_`?Dzst{1?{W^O>Rp=gr;9imyr`>R$z2|!w5>nO03?z3<}KCGGt^pS$%mKxIt>C zt7gc^D>Az(fKKQC|DVyj2o%EX3ZSi$%q5OY3QUg7B}%N*Q!`{#Bw0ZxP_R0Jc2BTr zFfj-+GAOZv=4)7|FUpWnVrLdm2c_ZZr!!;}L|H)-(X5V)S!PUc7(wH@FBlbAr)A1C zh&8yb7$k3_Px&t;3+3*1(Nk27&d{d2?kn8P`uY&y{(? zxPH1!o{S#j!|mRAGWv`N?y7tlLlE~)fs88S!|hCkGA2w4^E*2Q9RGB53J5F_I<#~J-C0*>dKrf)Bo zQRD`3xD`1aAN-s){c*XBjRd$RVsSLc5_kgd=rDo`1M>0uQz>ljx|XRVY8 z<2@+=9=-)loeP|wo>3`dz<6u=%t{#r#tYMTRmwOq{+j-`QbvvO^mLUfnHrIsNIe5) zN6_HW+G*3bRLK}iAY_<9nFn$WEvvxJ>Hn)_JfZ9o>jHjoE*2%ar-k3h8 zPR0;q8mPg<3mU^zU{qjnbjTLC1!{VGF)f%g-L_svO>qIJyuK-*1j>udg`o5U9`S^! zzAZ4FuTDmC`rLY%OptfLDIC#9?b0`46$r>qR_H(wD=6Y`Z=c*KW5dYwxncUnCYb|_=ci9< z28Z0WW*G&>ThqTc%cL{jnjYUGV+4|Gk+EVtKmBZr%vxBtqQ6y!mr-K++*TPyfw$}m zOp5HF@CFTzEIT&+T&s*8C>%inGktxV4C{1>HW@|1ZM>j<2l!|Qkis*krhB!?C^79j zJUy>XMhg~_U=7S?rguV`+YjnwSf^iZlQA*>0qSmmMG^FkQq!Nb%UCdNm76Zt zArm7EGL_Ns1Y?#GA7}wGivpk7^Z*SR8EqxdDh)+m1s2fx5#ZCT1VEJ%iz8D>mJ&Ot zVIeGV0Fo`IU+<97&XiDM1!YqWCK)9*@csnQ^~o%b8d*x5pt&cI4na^;1e84$7(uIR z1x^Wovb70#1wLpVhfQD`s{(@pC#W^g3<`CNECmtJDHsMyLIP(%N5-*eFj-8`>y*(F zamZF+R1g$64>rOFV#M?foigGk4xnZ8pm`T1K?N32#)3|jLI%k|!Kabs$ON@K1gi62 zr;NCKfEzdpL2Wyb!xgv%dWAtQjDadD?2-{y0l6o_j445Z1)A(2V`-4`suMVJ&gr?>aWn7XfE%u?VGcnj-^xq=!2$R>bN3J)lyoB+4wdBF(` z6hkK%L1RW2KxY=PI9`~}*DE7uX2x^_6#EYtK}nLu@djg-0yk*wwi3I*HPC>`3-GnB zut=d$84E$t3p&q0Ltq&*G*^R!5P{DG@(v#?@E!Mn;{FReNU7d`X0UclwV*1(Q4?XJ zD=(u0%k+Xi84)Q^{|)5f4~$s?kmT6|xu_E?Zz2IsWD}Sm16m+kpq7AjvkHLQyEpn| z)cCIOfO5eam}dTd8AV3+>4yC>UTO#jaey4p1=>KvssPQ%VE=G(E3iyo)h}Z!3Qd-) zIG}d9^0F(iO#k06V4Ez=iImZ`FbIuXi7pBOFK|*o|Gi2BkY9=UZr!!8KQRaKk z3l)OsFr6x+2g*N)&K<-&u!h>HGS+;M$b#NF$}*jCx{S#5yHjQ4Tv5slgf6HLK^;6$ zB(Q*5vMiA7djp%Jrpt)9VDS*h0XU7~f)_;yd%-S4EmIJZm?nYp7&!DFFlGrr(lso_ zfxHL`6GWMUFlf5|bQwv;_tQP6%NS3upDv>kG zj0zl%7Fhzf1VBwR;pxj}%b0>%S$Aj4s3~B!vQYHgo~}GcriAU30F#3P^Yo3Oa>CO$ z%#qQDj_7j2E1Bs9`}icMOU#v#VeUNKI(=e@yzq3dxia!hy@#h~&y^`*oHqTzTp2ya zz0>*U$!M@INx1I9Dk%NNQVWMpc1x7}*7Obat( z-}L>b>1SNB{laP)Ek+1ebGph}nJmU7+xyqbSb)S% ztdr?vT(UiAz05&I#+B1WHp-MR9ho`3YokmlWB;^GG8PgOgn3v&#VNR^g3S2dV8{~a zpB}JD#)0X?we6EO$(&>s`!}fpG`WxqZb9%eDR3+BIUbogy>Oe13S;Z^Y1?Gt7|%@q zx=qG`apH8d?J^3CXQqd5mr-OqGre-Vj67q<^qJdbf*B`Hf4g1Alx2c257YF-?J~kl z0{zpicgSpDd^!Ei4jC!Ad+lw|iE>EBbme6P4cvhG!b}34(^YoL=(D|K2aQ5upMhxJ zDU&F37i0)%))QJD@`5{-3e2G4qiMTjETVQp$3MWvI!=N1E*QXG1KG#~TB8CwAsgJd zfr#@mayv38GJqNZ0-$4v7`R2j#yT=PJ~-GV;CSm`mw>?1=~H&e_(_81ML?|s4ka$o zTobr$%c#I9aDMvlUEq#@*=`vt(8`G2GOCPMrcd83W5o2DeY*TU8L{aPcgtuqT@V0G z;fqW!IWH>0!2<3Dwj7;qw?{@zW)r6)bD<;XfF*GE2h#Y{$O8Lfy5k`k5pHlt0@QI_ zIeq&4Ju(_B9do7$fTgD2-y;(s+Q6;|DY@Z#83b-lciAhW%61y$6OQQ$M`a{rF7Y5w z>a&?K{Qyns|6s~;ymoN<;=MA;Odk(TKe1OvkMY9v?|Ws!7`ILL+b5F@nUg;~efvHc zE9im(iRlVwWY~Ej);(YZHw2~~l92{Y4kg}^k(i!vT85S917nr~D{PrT>V6p&(2Rcf zei?N(=zxOf@(5wCjb;;Q+~7yII>LtctA#c`u+nl0*p7OUpOEm&Ae(x=XAk) zGUC(UpORsn&UH{mi1EgB*@H3~OfQa27q}-QF+KjEj3DEe>4gVn5}7BQXq{evQAQlQ zx1FZ<9Fh^6{{Em07dxo`%q(zo+94V7=?#Zuc$jVrOlLeKBRYNYAsK1L3)A->l96S6 zKmG0@8AE2!=!4*4nFO{L(A*jObjAZRBGbDM%kU6R64M2a$Z&$D-~!LcNKAiqSVoxf z=JbDuWu%eCc-b|w96R7i1+3-cq3LNyWDKz<8E_PynLa)6jEpqcJJa7Ckzr#5`9^Fy z_fZ*sri%xs%N&)FQNWj!x&$1VLF+@rrso}%v1Pn4ea%ssO^m0f#~+iiV!SbZ)-f5a z*qgAV2rjRgK~V-C3I)&HF$vrffTmOwY0%ye7EqSQulY7;hRjpyCJ(nJ=zv8AT?P(E zM#$a~27N{bf%nr}j>`lw9-e;vxQsgE$?5-(%ZM_bpDua=JW&{QLdKf${PaE$@7DCw zCuFvP@-AqOZ~C;8GW?7Z)0dr;Q4)F!&Xb@N4r&@IWC@&}e(R)+E91fG3a4bGA!$iz zdfX{kiW8rH;gpQh^cAQA;N)@ffQ%T|4#q54yW-;k8Hwp)r)7khCJTeoj>vTT(=vRJ zq$oZ;^|Xu{@~{C&3)0hXpO)d`Xkdc0GN&U+xk8o5 zvV%6@atWN7b{3w5WVyg%j?85!!qO1eyaQznaW=5+&;(O|R)!17mD1BMoRMK?TeZFu zlmQjb%7{;Ya8^d0@y2wHb23KYL?u3b<~bRqX=h~E*iOA`g)0j)9+u9;RFrS95^i_G2QE;4DWRQi!!Vr2fz&hh3&*kGSbt1FUs&i+zcws zr`unGWL>b_%!@KwaD@*p%5X5A0X6@?`53I=!9^K%R&XRBiz{c1Czje-sx+P%4kh5xGWRTe-gCcAAHLI8)&#= z`n}6Cx{Rl%^IVZJ22E|i7lyGpDr7mXJ3c+=ii|Sjm+56!Wb_$7OkZ{dyx#2W6`4ZS z2S29?2)yS3?I+1mVg_xR0PRJ|0L>_{fu}p!9XmEoPrfRn1S)phugb_Vewn`Ps*EP6 zn7%3_4XQBSUX=-A{5aj=nv5>v&FOj9WTa)lE8Kd)=ZQUFRAT03RA6%i^}!iIrY*fD z6D0BxRz!e?`M~b{bZ9#Fb(!zlUtpW-Ku2)Bfpb9ny$nEG)K@DovDY(#w!AHvu6skq zo|#kN*7W=vGEzA+xWK2QDzGcCf=*O~EugXhA11&ITFL_2djlG!f*z>|9z#+9wK8D> zpivwJbI`eD;CUNx3j);oX3=NN0UZYcy3h?=7EU*;m66kdtu$qWk4`i&nlW{NN*vJe zFK90_s{o{rJ!krZlQQBukXG;vMo?AA0>Q1!yt`Yj!u7nQ%0H(RA)ozC^s0Dm>gN$ zdFz>^6j&T*Fo7ma!SM_oX<5Rg#0(mWVNd|qil9LdeMU2o7&9oZT7a6g?!3$jERH*v zK+P*gP%F4Zi5WEH%%ab@he?Uem6r*03Ir%2fzBmy+`t4H6kLGnWQiF}=FBTVOW9$* zUI7`->o;nO_0BSP6pKf(eM!J3n6X>F@1E7OWL6?bun#>pAAogXsd38Pg4r)(0T1pv9M^9PVd z(3mJIXn_={ztX{+Af(o7k%;wAsK=v$P&T?dQWK`f@F7Qndx_1)XonOG5 zC9s80fdO=C6c6ZB3>L=)%%Clh913g#lVFP{>QU9Qfl@Wt9ScA%7x*FwO?{}!zV!Fg`n8(010qdG0b2E zU7ZfPAsZyX;lRQ_2W@#`ah$-Ko_$4Gj$q6$;GKMIe`L0Esd?GWc0BJOG6lXp1Vyf&-v5Qtx}=c(5wf=5ZoGYp4@S; z6?BC4LS_ub$ZA1FJ;WIYn6d=kvEx+?$$7U9wL;erLfROhIIRaI3RKUMVKR8+9APM? znK%QjUV)rYLknIR?U3KmeQ(ZG@= z@Rc30{08DuP||B)&2sz#(#Ty8nzw-HbOkl0kU|5N7ikb5pyj%tIXI@QdIeU2qkQmL z(i5QaQ{X+b(?Nz1a9sc^7o83;gn>&cMu9H^){LMbV@LtR1Fi!>b;(IA$qOwda3EI= z;B>2jC*g8%JA%>;$m~tir{_PB5zT6sG$I!B7tZCWn!p$B<1LJ zK1S&<{lQZi(Rz@7z;jd(vyd%EHVl-*4uEnPXb(FmN^XFPPR9>SSWa=mIGGtl44rrSd)L6SDF#X;k z84WpTdIwFYpvE;w^MdIHFJ$5vZ%yxiA*0LKJALm988hw+44%B8y^pMp7pDJvA!EmQ zVLIb08S&{MFJ&|t_fM~TDI>#pVfyTsG7606r|)?w;{s7&!FYPQ)hiiuHpuM6^n>fWK=;hk=2fmh(V(L9KJ@>VYGTQ~vJT7#YZq;iUQO1YU54@I96on6F@v?xt zE0E>*=};GBypZ>ej1c4d=?ZUTY?vk=m>%~=W*_6@>9TKS#CflSMkbjYFaKy45STvQ z@vY1Rsl99pte`dngBg>B0(f~UcxM8ez~1Sv-^wI2PMYrjPDV-up5MVMP(d9Wfz#7# z-$B;#JbWi3!U0+@6pDe8B79(l@P_7r&FXO~`b2{4x8FSFEuKfoYMW%Jfrzd@oQDOWtz5RoXoh*F)61d=H zv1SCH#soEU`Xi9a)6>~M%2={Zo75@b$T3~;lZ^QE*pD*&j5num_$VXAwge>33>Rl- z<#1#cfXYGR1*C@y$^9QcLZXEClT0P!-s$b1z$=nhev)xyJTU$BCmCg?F5&4LKgx(r zSNkl(&v<6K*=HFo#_s8fpJikjd#2ZamT_dfF#X_X8ACScYW;~{WW=WPeUag1T77W3 z>=zkL-fm$9sEzCbXQu~$k+ER9b#Qv`7a48Fx6^lik+EYuJN?HO@OCGeuQH}=t3g#I z56BxL)3d+I2r!lp$@GpNGTls1d8f-hm6M%r^i!rBcD#?gK(FZZ6+dN^ z82?T`{Zqz;@$YnjUoy6go3{J?k||+goICySADKmrv$m)Gm8oWA`N++^V|qY1yZdyV z|1v^+vjjo6Vq}AoS+>C2=|2BuEEwlbZ~ZT`g>mLoE7=0ZnNwHGhIsGDV3LNMx9~tH zTZuyja?~zpQI8@QvjeDc0a`_^zzI4=6m*WTl8O_kMf*X%Ie?d0fkQ!Mxsrex(*_|& zX2t1=ljOyx3#=94X6JAO-3h=uy_iwfQ9;0rX@`&k2lyfc4#yorSqi)Y4V;ebjvNXc zx(xS(re9~2)fW~pV>%$@$nRJHRmm%`YPuwotg0%HqkN$fj{-M{;#S~R;PF)AmsaFc z;86hcyrq@66?rBHDk)8GWs;St=TYEuWOC;fT6OD;V8DvVsbn3Y-dp-qMN!py@YI zNPqap&M6EE9dKQ#$f>{ziZTVR={>x%64TE!%d#`_PQT79D<{YWIuQ5AY;RGGaUm9HvZPm_QdpF>xzE&F7zP&n7EZ4>}PD zbnt@$2dFm-I;x&SpYaQ`k|4Mo69k=WD+*dn!(qzwf!Umifm?webg}>gw*oiF-=N_c z4p0LA!K}dH2s+pYG&2EWgU)W`6PPNfz@g9B!h$$+Pl87nbms{-Xdzq!OFbyIxD~)g zoB$m(#%;xL26Sc^=%62nQQ)OY3hV;&!A5}&?*TiF5#%%>&0o`9A$gRMq!Ni~_;K-=R&8+~sm`agH0d#bTBIuM$#%x6{@M%Y&%V``L z6geFk6uG8x$jV8>QweASo>P%gfk}}Mw3>!Vfp5AchpY}G-}E#NS+z(Z1xU&fb7WBD zaRgb!pvcDzTFuVq&f)+X(^TLCU3{R#smlO91%pF@6SOQ4bZ9#$g{;`I>e-JFV?pl$_|gMtW)g94`le=$@&ha*JA2Q~#BaHeB`8(_u+ z+I1of8ft-@nF@(W5e0-EMIKQ9AGAD#NeYyp__7ql6u3a=^KdwR;LB1FN&&_C4n7dM zhfm-J4=8+p@F{VEk`Z{gM}kKPlsZH}sjQI2!I4pspNAQgYxosJ6}dnm$_#R^fEzC} z_)IFO<(vw<0*BZYxExus6uG9~=adz#=fiZbAUM%|;Zp>!!u-IeDC)?-t;nyyrzi|6 zix?r<4V0unM=c8}@PJO@xkzoH<>8x8!5pb(hApQV7%FoR#I9_d76 zh>j`zigF+w6ZjRy6*wG02Pg}HhH+Uyfv^IL6-)RPBjpr}%>z@Z=kUa@(AUqQ){1Ed6$x%jg{)fe~> zXS~TbMaU}arpvr6T1Slf~11Hf~%QK#sR!U{K&tkO50|2qd_on!^!v!!?Hn(+oicX2(0-(*y*5g7vR~XyB}RNzqHwqiITIDIdltW^935Fg^q8z5(10DBd5fi;IC=%#00fi^Bs zS7eZTf9?S;=@%C~;abOb}AwP>?fY`U3KmxEa$25DmG7nnOXvjOhW$ ziwp_^;No0~QQC~@4M>DVK~w?6c>&_A0JV1&I2=J2RKH{fxp!o907_Gi zjEVxFT9*;pngRvd0zL&s1wPPHbq>b`e4x}Ok)^~W@CI^_g%T)7fsA1icr$&2psbqY zg)@+?m!MDwO^PXk4yWe;)Z<4K6!DO`Z$9Sqf~RL&(5810Z?C@d0m^0)z>!=gpW7@PdwG)nsB&QZ{Ek z0pci|YcbydwOkH>ZWn+wQk8fdSwI%>ID+kg*Wz5DW`Kw~D4cW|F7UE|W|D4zwM~JH zoPo~$29?L4HUn6(L_IHPq?W_+1aFqWE^wjqffqUB34qSmA!_#c?~-FvOtp(;HDF+0x#(7K@P_Wd?2quIQone_>{nwbnq#$drwakkrkfq zEh1YdFQCAs$OJm%mM2S*4XLdMI$Vu`n_+slsH_Ut4p0Q_0EOgqF;Q7hr6b*ty@^bq zbitA3cm|m-kmY!#dwQd&tRB|}kUr3LHoVh!ipsj#u0dAEk>$7rnJvQEdUw00PK`(Fs5i5A1*QvPivhXm2hD|}uI#8n+ZW>6K!I1$}dom~pOfM9dH821zJ>bYv1YH2e z2&(fKKsSPLIG*6ia%3%1;&x=pQWS7x0N?8hZhlX{C@w1&vIDg9MH$gGXHejVl-B|v z<3UwBj~UYm9!Pn@ZpL%~#N@JK0PmOqH{M-AEoDVENQdkIPnH7jbZZG&-TDhWphN`9 zz@U3vI2?EIfYQ1|mIAW^vp_39D8YW8ZpiIgEW`Zun1*I%@M+QYu5yr_peWDAy*z}zm zvI2Ts3LM~3InbEK2M%a*`M?3nq2N0%mAD+)K;;gXqmUafM~5@RN``E0w+FZZbwE%5l1FPE(K0S;pvW2vI_OWkSoc+H#2c$f&0avwy6Re zXsi<4Sq66;e{d*pfMRhP=;dRyKXpi8GdlO@^XNhNF7{? zTp;=bry{7!vI1^_kbKz>J6 zaMDn~tBMPziXX)+(B5EV!^lWb;^4v!9xxw34g_5q2N}zOgtsU(tZ#5B@q^F$c zE1+!);4r-a^8vzttlVICD}mw=?oRM^E?DyuzJxD^#hnk}(T3{I7bxz8=zIcq=L1Z4 zvS4#33uxUv$?g;dCt)TBPzyR6lBQuv5S-#VK)0%VfIA#ir2pW^f(Gyp6o*3s_zT$K zpe7zD@qiPpKc)|$Aq`q*1W9Eeg+Dl;sRcPkK#Nwe6=sA2-?0IHs~#k@AsHKzS30^UH+!*e_3|wXgXsIfXr}z`)RuGvkqb)0+FA6HhC*X0?3=}7Ul^@xDD_JH;y939FHz=`vGz3AxdhQeo#SHUI*lx4Vaz*?{pzK#|eYWSkRcN7)ml@ zRODCSg12Sa-9h8*3VaG;0-IROm=1uF&jIc%MJ`Z1&8^G80cvmyKxq!g6WpNTD{ciY zfhp_?9QyT)7r0S}#6i`tCi4ky@WqN8j-ZPbc@+d;>KR{PRsMh*)Cv3mvik-%sEGo) zIT7SW=okV6Xr=_zIGVwk1*%#Eo-s=)a4GN!)Hkpzsz65EW^f8@Vgc1&3d(j&E#OwK z8B+s@RxxM(0W!J+)Yb)6lH6GWkXosS2RVC!1B^q1se=bOyg>KaL8lr(a}%J|c%TtA zaM7Vq4{{{D)`z-Kflr`;T~Qg?Ss?4{&6wtZoCAtq5Up&^JOR{PT>y2~1Rj{Pmhd2l zDQIp0lrR>6odueM0uRKz09{Q|&!NF|f&(;eumaRCV02UkHCj0wK{q+VdMn`D+W8b1 z1varbs=9;QSD>5@>KOAw#+KPZO*`mVxFG1@An-M09N>E)LDl#Pjx2%S9H0)s79Pau z*%Ca0+!~<40!XihLxX7psDlr>R~Sd;z?Y@?ah5Llbn%g;ih$;RJQ$%$eqZnjhdK$F0Dr$PY?Rpc|gK93>EAyf-+q6u2A(5CWjvpFtOvfrn8+ zJwwp6t~v7w&_&wZ+zRf{@gGjms2eAEwg)uU&Ia#nfyQt^8Om`1JGesynyY&NVS@Tr ze4rjXvjfxg^%}BL^{fiufh$nBUd&bGdm+Yc;1Z# zRQB-jvVnT)j9CIXilFQOy4eV(2{KqRgI$3Kv_!hl7qq?uCI?wD0cv)EQuFi~EVAPD zpe`lIwF)pr4WMwl!44`?cCcqD@xbQLK$mudrXtuO*Z4wuN1&^VI6zk)DG9*Tfts`n z*v**_uq!}EA3!5&KR`x-rU_BBe_*eNb{N>Ql=xsKfbQs40=1<<7fOl3L_y}hU^8ca z018v^{X?MRj6j2jV7D${hqZh&F%Uw}f0*@~f!-H}m|57cMpVdkDLr!6b%2D+~T&AFgD73RwY zf=Zy~$^mftULdF>q9CLo0_rD!U$=3cPdz&+eQV8i#YLwdL%9>}u4V9~wokRI+|cF^(( zs2Sk-i3jZF%rDqLOPRn!PF#+lF<%AbtPh#46{v^J^g~BYxImL8NRwyK0bdUAJrYVP z=FAh=6p&Vmus~Ld@R>1zCN~^suz~XaB0?lu5rA<)Hbpsp^+?HkxYQku*Q z*c4S1WWm$%yo?GeipmOHiu|B7gEZ=UQAZYh!8d5012n}3s((N^16*K&Y!d+0y(*Bo z1<*(r%orwa1!V=5Kwd@#glUl457`(_P|3Q34Kff0TEY#w26Ys`bM_#$ z8caLb6d6IDIK!sI1kR|Sb{ZS###jag4t<8R(--Q>DtaAZ0~gESK~(U7$ayx{fCwj| z!c|~2W4glzD)H@@Zh(>{=vo6%1$=`oOW;3vgyhL|9z9tZo(F886wkq}AfUiE-9%4T zsUE$F0vf=^GOrIE>*a=yq%nbF*O5UHR+fV%=2;mzLFLW{Rz($FMg>kq zHc)KyuyTXP*`}N8%W8{*i(62-0AD1+B+$UFpd84{IQ@^ltnBn@`m%hXD_FCXm=!=W zipro-LRQd7C9l9vPLS6@mRLtZ6*wWqAff>bNp6raWYASp9N?>_ctJOvaDxhc z1_khx+Xc{K69teqC2sKi5U3dfx;xXH`3C5+$s1tHrf@jcLyqd@a0FdP#Q|EmtH2Ao z+X<8(U$B6td_iqg9#E=zzyewT#0Xyi#0{=Tu%va+8Wu*-C86NOzmS0&)I~TQ@XKJd zK!phC91jhq4=j)z-NK5<(O+2LIl6Vaqmitl*&i0P9No?e%h90j8ssKl4$xK-1zrV4 zf!AE1Vt>l?8Ah@)j1#7BHIh|lRG)rOR90d7Un5z$`YG_~5XjxBbCCH0S&mEKd{Bry z0NuRH4{8buDDWt9gL+4d+zJAUf(qQAYv@6ZKG5heqG2b&$XE}$a!){siKSkF33RX) zqrf|6Zthvkj4bua4AUEpWdj)-re8Lejb_|EUEf63j&bVrJQG<1#;Mcio5+R;U6?Zs zR&|1JWx6qEJFBVeJVs6x1<+XmB?_$5*QtohZa--z8_&peWz}>o3t0oEJFBM0Sje8? zozKj~4B3HsfgyXkgA=>N^z$u}JPx7)RiK8Cqd}Gui@+6T&{Ts4lL6>FS7y+uEbJg@ z&`csL=&VwNw1)!tkUU4m>DrF$annz>ND9?o``RJkC@KIw`T`u=pd;X}GJ~6ryo~5( zzXhphN2q62V8*Thzyhvek?;rEsQGpq<+Di%)Yz(g4id>+B>_AHlmDm-SK&K>w1|b|k zQKksqdkH>iP=QsT8+2F?Xefe3mqE>uxv(Cz^d58$EolFo5_bTL1E?(GcH)R)aZuo{ zc47(vt)&DVQwTbdS)hS^xgs0rFj#?i%!(WkSFkuZ{`=3Y$OI~mnL%sem_X|$K`Yb1 zhs?8pb_szm&;)x=pr0SKT`NF|m6uV0MYEpSLy;RagJht{=?L)_OPvC@0<$R-LmjIE zH@5=&+Lo&2C7?P3v=fh85wu0%322Wbs32njm8UVFbw}W{TohQ%m@+^+{j3GjlYt2{yx~kWjF%KkX&6oqau|}6c$x*&A)QX`9!czs$bd*9kDqv0tXj35Qh-wWc z&=zLUK{3C<=k(Mlv4YRjV9{WzfUJiB4?442GqQlMz?uHcLAIXp!1Odn*(S!`=`S5+ zix{U&k8zUi)7&Ga^EhQaA9g94K#69ZU8T9FG}msx`^bY#p@;E-0} zlor@9-O*K65he-VJ&Y__=_;$k*tva~t85Pwq5AsfzkZ@R3f?0?3M z(`CJ6>(thP{14tnPPG4R!T#q$_@AjxfeYkScW_X^eeTZ7D8=n`h5?izz)b_tgc{fM zjn1-4j4(e=|K=CGMal22gvd>3U5VnFKCHomcWMTPXlEYVG>|B z!z7^Yb_K#@$V$rYfE-o`y6^*X?l&m+u!2rtbbPRQ z`qB{Dn@;POwg?FHi*k!V%QyvQM{dXKP{ApP+K4e*iB*L`ffbsNIUsu~SU~CcP^fH_ z6)4RjynPnrbI@_X3?TnXL(?a#0vqTENU#GGSiz?Nun6p(9ug+&z_yW3fmMNdYJq~t z^mSpfj*Ppee+ZK`W$c`;7A{-M)dD)m8FaGM?&)j8WqTNZPq&JY-DCGs(2)^z(kN)V zhzf%OJEEi(b9}s_ML?iQl>0an6BndL6@cVL(BfxB7RLuGwwpxCPGMx+H~nOktUBYa z=^vwH=Q8e^-XATyg;8a?ON?v@`Y8!vl+@$h!P1X)QY&OIBafsVmF zd~JF`lB~KwGrIyS5}5^&Uq)wZCPF)QdKEAJdx6|qeqIWvSDmjWkLPI~%+ z99d4L*Uj5k=g8VIG95fO{dKOaDbxRR)0OgMjhNP)pB|Pc>m<1xd}F5yq(290MT3^% zD=;{nn!Yhl_7UUx=~MD$Z5a1VznCu@!uV&qT7j%Kqu4~yBsu8lXeEBo!Ka{+bpc14 z>FI^Cs*HWpdkbad7#B`oQz+}pxM%vWLRlrw*KeBy9FH&vJeaOpB&)^PKRuvG)`(;2 z!4^BH0O?o8QCKC>P5rOWU|9Fgl)Ra4X_vQQ!j~2FIYl2Rd3xVAu4-Vp)C0 zY11bb%O*26P5)XfYsA<;U86*{2+5GeC18urmB=a}nZ>NY2ioPU05S`7-rlb1oTV_c zM5g7Aq6&l#s)?ucF>_A z&?1CQm!XMK+MKxqq#kqt1{=t;j%Q9x=dY4=;#kS02$GPW9$6(T&GeshdRdihKO^sS zwrbfp8SvIBP~Bn9ETDk6Duz>mQDD~ehHBYfp%W({Z81jhA}$8UBPXWI*T`yO6OXEq zH4!?3P!DdzpExmnL5-{|_}dRyKg?$cgEtwX*U| zCr(VCRx4Y>_;$K*ovbP2iRqqovQ8k`-a1(sknCy@edNUSlOX!UiRsVlWcArP*+FgP z>GeA$#HSnA%StmgP4};tbr6FsY*XL_wR;#H!6)G~OkY|rtIOCl{c^pmr8trj&>8Y@ zHOdXL%4(p~LO|s#vjUp}s{#+G7IIVI5ST8kz@WgX$-ICORGhI)XKa!cixbFF;1XEL zrO2ef=_qf;l%v22+UA&{zzI6RRsodXK<5$v=TzX-XDoqslbEa-!MTVNd>Q}g48!KPG8s{D^gzpj&)8@;}3iUa0erJMJ(7c6CjQOEggm2aKH(= z-UPI04Rjt0Xq$q-GA@u|GZ>NcFblT=r!K=RM$kZv(;3kA6Y%h&(^)VRyb^T<_*4*9 zfu?$}z?*Msb2O}slg2E59 zSCmPC4Rm39jz9}&tvjp0X-6n&T;(-71pvSN%|rf+MN)nJ@8{XU4=FrA}KHk)z7^pZANXStQa&5x?m zUnvagI27`NjtFBt=*Zv)>I*1-gsD;IoEJ%a)(l35Brvsod5`QOM#k0Kz4~N@nHbkhkDDMX%eZEG%>-F##&y$Y zOpw)Q+%)~f1X)?@HNw#2cEE?hbbvSX{{U}$zQCF#fFTRo_Xj>QhEZVMc7=(uevFLU zrsq$R^=DkYeg7m`IY!Mbe4qgAfCnY$+zE)-4<=af?f|i-vrUoBlR2|!nt;GIe$d&4 zjG&1`7SLhfuuzyVMK;i2H6#qd8^AX(f{Xx#CCHKujG)^cK`hXY=GDTAj0&s*Tc!(6 zm6fyF!l%FpjvDB311cbm3JjptZwD5kt}oqz%-^tRdgfGF8#b`JE>2%PRaTC1%k)!I zWz!hHOgEe+o5=WT`h*V>I@9-0lhp>bwHO3mO#d)VR*$iHy8LumEym{QUejfhgnBlj z)&mnZPTx0OR*A8B`orn63c5(r;9`FQLOCO7y)5`9G4N=xz+R#0dNX7-nDz-xkC-7F z3OX`${S4V_+&VgpjG*;dpnI_#>)pXgkU@iqMG4d_<6~uHWME)q01e48aw{-SZ&@N+ z!q_yTGG3Y9w^UY-@!s^kOJzM7S5IeOCacJ}XS(JxSqDa&=~>HU z%>?&>PQ>J80MGI%unTPFnZ9wE>{U>csbjfpxA>ml9RiL=8DQ-qCh$7pBQvLKt&mk= zZu!$OJ#dArHKWb+-W9UqNM?afVg{MDbp^z%o|Urg;%$FAz)ctEa0=*75Ef7y3vAX| zkXfr_0}PI^fjWK=tiYwu*uV^Ooi0NY=o}VdZ(i^+YDY!|wi3sV4GM*>(+{qa^_69n zR$?(@+QX>8<+x@_r-0+(Yh3~gtkO!Ld(9cAzdtV_HvQ%bSwkf@fy>Mae9M*C9Cg6A zcJMhVbSzf_jS_(F?qGD~<(SU5PS$by)zz{RY@qJ-k(tv!uau637GF*T||cJ#L(Sca5wO)9c3Rf@@_}6>UHZ=(#|J9J>G_kU?cQg95W7qrk4| z;cI2(nKtumuUIQ9#Rv-VDeGjrnWz2jn69*5)>Lfu-wpvsV`R(NL6+sNmsMeY__t&G zl=ZR)8JXG-PY>THYsNI^@bvzTvM!7^)30oljaEDej#hTiRY(d<0{8h8xp~+@Ytb2@ z{$|=9uu1k2lfvH59iZWxIiEWO1p1JMZTe58GvJ7-vk+KOn2a*f`x`qM#*X z<8*~Bf_jWIr~7XaJjXb5`lOw*I%ImOn{mc;sBb>(lGSINI9*}4tP{dFEU3Pz+%2mP zYHxxryHcLMalW89sJ(e`x2zcB#OV*`3u;XdohBHB*VU6CuAVgg`(9a11LTO{08e;= zBLWmVpy4LanX_!5J-VPpzzVDad#4BNla*uKI6Z%#tUTk!>HYg;uQK*cPu(wT!8mjJ zy#2DKOr0Fly{8Gr;B`X}#0@>uuN;(J!}w}?<0090nb(2}44~m(4uLmaCQ zJJW^@(`TNPwP$=h{pLwo8OANsznzqom;3=cy$aNf0H0k9&V&N5r|X@PtrCZwY79ES zP=V2L4(P->(5=(^Psz%&Jy1IAyx}X<1jsP1AEv%UUw_ zPG5dnR#oyMY_<*DTT)Ex}Y z!N_%!m65exnZa=a<8+q`vVownUpCNP8VYQV8n6}TY@oyaK%RomI)e)VQ2K`P71%VH z1wq%SFrvj@xSPSTI z9ER!dF33vio!B@{z!4l^SGYiBOb@7|IDs)sg@GSbc(R)@g@6`$gYLy;bG$H}=aQ^5 z)6DJDD=x|!F)iFaea%H#RsNP~9RivR4CV|B3QUfFr%u0kNmfb}QjE2Lc7bx5F*SgU zZD5?vdr8)bv2S|7C0R4ZH7jPs{gU6qw* zoIicmRas~I1)yFUqXrX;z#BnF26s?~1WiqIDu99sEXf3y1f6>c&ZG9Aya;N}|DG;$ zP1c$1YkP};z=Y|k*JM94&fgw+UDlqF>CXM>vu?o9G`WRF)Xb;=)#`=dZXD<;Wp)J~fvvotYYr87K;2P+ZM=$X3Lw!3+=}(=3Y?BTjF4R$ zkcrS1P{)kb43suFLFMB-5Fb>DTQST9F`2=m@N+=DMg}W}*&rIchhqka#bCxX1*C}G zQNGZMVHQXT)H4UU8gy>qZw^qmb_pZ+^f1s7JoV6CH7L**Fe)*FZ~ftP1f96arNE@X z<=6u1YJ#@Dc7c1edl;2KlkE%&pizAuR&bAa2dGEP=_qT(@Bw6By%`g@Z^;b41clR) z-HPD?$Vr@_OXBR9PJq}fYt5MtfE9yo4gtCD0Am*DSOrdf#ug^zPB$B98#Si}Qv;J3 zQwM0k0Cb!tXo#SL38_O5KD7bl8u0m;OrXTX1DgK;T_`1R3Usy_rzZ0Zuqse5-f;m_ zmIBy+^)L@|2tYizh6x@vjNncxr!K<^(Ao=5@Tr*K7z76v6KKOeII-?vGGqDxavG>J z1#%kb6sZ}Y&Mv1W^ACij4a}gsZ5cI~IKWQ(!H^~Jo)hY}2IlFz@5nkcT1@|cM^?*Q z(wun$vw|eJ4FNhM1+>t zLLns~1$F^RP_jJ1tOPx70yNgc%?%3XBg~4tAo>8aB8ww~B8LLIA~&;x0%(;sr{e`? z(0LyS2P%L^I8fa9fH_M6>_$HDK?u;?EcJj{Q9ywcboDdXi7W~nSs+^k6a=R4z9*~1 zC@}rrJy~&P0Y$#)Kkmt@)PoLK1i5oF3&^1#m=P;zS$IIp<{X)_!K+-rWyK3-MM02h zZH{<($&n?{03KcX!>lCA;-J8wAPVv?A7~U>5bTT}%%E934$v_T917fy zjM>v?-n#LlMcXc;?!UQ9Vp_+Qlcmda!mt^ zB0DGqnH>~36hz%XDT-Zz19UI}X#eC!ct2Ufam~i*0uN**jc2eZa4K+{F--x{>}E_8 zK${5{uz=Gbq%;*&5Le&?ogX4_4(x~}(_jATS@)Z|8xYYrqKFgwF{&01aG*gPJy+n#?<V_DsY02F9IF6F};vcP(p@NpK%97B{N9v79lex1<D?e2T z!0C8`6_hZ4ux2?jD2RZw9GDQ8$pb2eRZy#W-xpf#kN zjvLrPtrtdU)^lW31hs$|tr%{wgQCui=?aKuHexLHL%f&>64$z%JOUwQj`RZ>VwkxbVh%15r1$t zVFVTE;Jl&43Qmd)3eaK&k$E^B|A2y5gNXsu*a0mh6EtJ`0P;G!8Pg9C&1}Z>WxD(` zS+V*Tf>}zSz<7b=TTV@8&}!EQpi&rA{&Ip&WKdvJU8tl(8>Jm7X5^9CVM;}BE? zfDQ;g0OEm;g9e4n385@U5rHaD;Q|il143C!Y@iiN;P%7;Av2~6LJFMqju(Wo91Xzg zet;H(E1;^mAq0)Y8$zIiwyQvazd=Yz(&_*I|Nr^Jc)39P%)vG843MRuh~RXbA(W*c zxm-XB6uc{hl-NM3G?^EG0+bW9)NKdIos#At*MsITa613xw+;bE5!jd%C$d)rV5_1y zK>?t_1oIErJO4rY3=sN2^8*KjvasnIQ+DoEM<2zj}dXoQM-qK`nbmNGkxe4_E=(AOWj2tH)(9HY*Gqau(w7ePAu2K*)WBLI~ zj~!f~BJ2kzhy?9{29?d=_8+LF0dDqjDlnTdO#p@P3@(uOIUOf(fkT>8pK%TsaxVnp zYOo^E3S~|WrWqh@pgY*UgB$wNn#>EpD~h<$V@6=vbpKbf;!Fbm(=%Sl%CT<%Y23jz zz4w)@O#K0FkiB3d9XD`gIWj1)gG+cY0WIbqa3iC4uQvU&|`1b?|^T z#(`FVgWBr~pmW(2I2AyLwwN(30A+&}JXuQo(;cNWphUlbC(DsRfgcRyK{Gg@B2SOOjOmA<0;kietE;YZnKOf${NQTVoaqm& z-r-f?&Q=m}WXlnlJKgcEtXw_l=oWC@2d()y9YF`ga5_S(K4wr=2Wc2G|KL>+&|qS4 ztS@#1Eua+4E>jYBtXJT3WOS1*1n0Xsd|BY0ivY-J0w9@e(D{0xYyUtU2R_iEbf^&u z5Uur4y+V!(S&BlS?k6Yc;20%d1tx(WP<8|z7RL+ev@&NX2o)*uDzGT)XINRI+^JROe$6SRc{YQCdbC#t{* zwPXvwBBKJrXs9z8;m+K_56&HYppz##HJCd11!jX2%^nwiB~X&Z(8>$f`hyqT1zf=A z$eblGPgsc;JhKU&YKDc%^t|`7GW9=rmB78sKfFq!panxpypD{Z=0C3^3)BP%j{~$! zLJX8ML40uGx`0oKS3%T~IZH__%Z%v{uY#b07{~}vm z@*pcgt4;J7&j^6CEoegqN)lrRMF7YF8cb*S!Cg0SL^xjH2ZfvxGbkz`*Qv8OD1db9 zplD}-X$M*QhgXSBfjJ8_4fzK=c=2Pp@kd#+dRTIXyB1Xa+z|jBCuqiW14M(G(&o$; z1QfU&EkO2yjttUZ5>NyUnSrW}8vTqTo`4iHnlU{94PrcEuwr-t(qaM%xeMU% zdmxY{(8>v&?gv%Yjz4&(zxXJd$;dxF=##9i5xCQXSPt;aKa&Eim%uz-PMni{`n=DwY*|>FwwM-QebOS}s40*u1&YB9LZH;5 z!L&mN6a(;H1+-+v=uIOk`<_Vy(e1>3_ z0*@o8`UD;4$0KlvTZvhL*|FYDTA&|1N;F3hoMNF(I#3b?AJqq{E;%9R{edcbaI+Ua z4yX!ltS$hJ0f0{M;dI;}2HsGObLJHwi&qFvfA>XJ)^Z0Sr|u9mXFdVZ0jilf z9Z!IqEO2PL%U4;&cm+_RTOpVwa2u2%LFe@GfI?6KM5qYt1h+k|pm>1`)J_2%;s@~p zNDFvO6x9o0AKVa}e)y}biX`Zy7;qbwLxEp`15}=XrB`rG`zEW-3L2VWo_^trh^PRk zKH~&Y^ z9-$c%yl-PBQ02&|2-%JR8X`yPz=5i4W~?2!Fhtr14?N-M!NHR`+_qCKEdq{ah-A(R zil-ey;IV)Xq3LJ7%Stm%5Ssq-yR52ukB|}*XoQLpq^1KLhB6Aw3QVwIV*&@8;PjY% zoN}Vb0mTH%G1D`C$g1dez=y33K#Ao61E{z97_@VpQ@v8{2?pP zIA!|HAF?{^Q`kXsZyM7d{*V=!uJBXV(Dx>2+LcX%$pbX)3O;TQynF^U6$oAupuj3{ z2Gova(_pdyDFLsramZEx&9g~>4h}Q`T?%#vJg4U{ja5)=`i7sflI~uNjG!q<0q}Zl zrg{ZNNA^NTR?z$rXdseFfkmJTe8x1Z0s}ZcpewmSEAUvrM^P4o)`1CZKl)Sl6cf{f z727BNkricPjGn&ypR6Kd;PjLKWKEcwgr@)hCmY4sI6dmW>>?H?27%@&axzRE8>h<2 zX)sz&b(c%zI>X=y+OnX_5IS{}Ts`B;=|+rlCm8#-|7Mh1&d9iKx^b<|!`Bs%^7JP8@5 z7NO|}PYEd+Nx~I6o_yCK;Hd1lAzLwL|8c$xC9!fKjf3MVA{(!Jy1 z-s!z+a+=c{_~n)}?wzh7AlJkAX8J~T5uNGhb>sv=>!x(%q*9x9YN{nsO7wF1qi??wofCkXn6<9T)>vfG883guD z{~#!5$=Ec#KvPbGv1xjst(++1?&<3_<>aPU>dECy|12b@%(!>DxUifosQHthfcOh`ceqUtF$% zapiOY32^Mhb_s!E=YpjiXxYmK2{|jq$1UON^7J|CW@CWqdQ;-d;#+ z`h5X8VaDd^)1~C(8Jo9nmy%;;Vr-edS4K{nxrJlObbmoPS;p4sKSBIfjzvgOAv)b( zR!$5OAG``YsPS3Q;UOBnm63oFQhc1CzA$k{UO724jR zAlJ^w^zh4cK_xjuwtmp=JFe-urzCWlXMgROZg51RcRH)GoG#WGcgE?{9aQA3Q6}7z z8Fx zygG80jDM!P>&Qjp$moxdGCFGuhrnYIn5RIQou!pS;K}qFT{&g^x&0|Dbs^{WXQ0#t z&h66~H%=GSmy=`MxZFTrj*)ks5U4*38VP1}RGw~YAZN0_MUE5ZR*uEU2@R6rIY4)vA{W4iu|>%dc$DN!|7a_xjT$eEi>KS#gY#;! zz1%w4`9h!#l^lwo=>sNR2GGbYXaxi>qXMJAlIeC1a{7#~rsp}xxeCk{0+p2@NfrmN zm%`Zp&zBgVGrn$B`M)}VR|v@{fS6c4K= zvw;$5Ta-nXF(ZQ#m%vjI1<>6%pfXfJ3Dm;S084<93zxv&=@XpgMCCzhG?^I`*%es9 z+slj@89<}V3LH)hP7DHDryp^aGiPj?{>NEPo3UxShKrnx+cS`{pe-Ei3S1zw3&3Xo zU;q#1@d)e&NrQ%gSv8px6u=wLG?_t*yto9OiGb#cz$R~*-sK{9j&bAkNLRT)#*N!o zyUIy2GR~WR)D0YWzun{}GJc)j>n^vDamI9g4>|GavG&Z|4or{{cF-si_^1p4(27B8 zM$lO+pbddatdQeASv*RybwXyJ|Gs*EGy`CEzo7h0#^kcnG2Oz zK?gO1icMBmB_@FtkR|L&tO}rMD+C|3#e$W^Q(EBd_6{F84MxVj(>MCc$x3YGgRiG` zPyjD;V-@(vrpP@#-$hbl`afSed&c$C?fm4b#8KKGpv_E>_Q&*t!g8Y1*Ne!pPJihq zXT{#h0ctbvm_A=bPIS7xznq511|iVmUkxS}1tv$;B78zLKMAEW{scu^H-Rq5pbIZMWu(|H3y1>|kxS)L$yQ`{6;?ucCh`L+#nz@YkEPHoFt2q8!yB3i^8J9(-%a^Ni#m1z9&j9 zXZm6X=7Q-nqU8#iL~^Fj?`2b&e$kOxmFJWU6E|eb;{x{S2{Cdhni&er`i!9MDPI{J zUowD7Ath-mh8gS%%#JhIL06O>nK>P_qw&S`w=r_!^#uyd;5DGk;9X2ipm}s=@IoqP zP#T6VRAdINNK#-1Z9{{LfEH*mgSH>TL=>1cnJdhhXMlHpPh(VMQD6owQvy#e@_`a) zjUt~TBPbj|8_Pj!)0qn$>%lkHgI5FW04F^&rZXU#&z$K9h-L=w%02*MvYIhX0Qq79 zdzK@!K$QYBsGe3}cHF_3C9s(ba+JXi&}s1?MPRQARPhLKD}W8Z05XBejOhu8<}(Ma z^u8ak3ErOYT0nKy;7DWage(+9i4Mhf!{}mMZK&I*_ zu{hR)R^~A)@R@_|wPgku24+kzm=u`71u!%C(!Sfm^$N@ipmB4MIA|djXl3GVA+WO@ z=YUpTfkwNS9j7p7DX^>sEsg*yp8z`cfzO<|14M&%Q-DV88bBNtGo}uZ|2sfYEl{NZ zT2j!!oTbDnu$>dsxk3sCF=N^QG9I)p3u3$iEVOoj z#CL%1)fK4X;RD4a$ggHh2f#KefPx&fAWNWM7!rynK;?NHuLq0HPIcf@2@o*zN@B``^7EoeQ03Dd&_=7pikxih=kwKAx z8ML|ze8N9yUk9jJ#o_?Bt^t&-Ko^?!3xmvKHv=tnf+SzivHFY}YD^4fOdX(7lO;=$ zNr3@$;0b6H2()vG8NBN0t}qWH_w6W(0#!WI8ISTwOkd!_&OZH5f}C3Y0+2b# zamXo9<=AzmS-_D8w0{Fsc7XPwRYSrYIkE7Fft&>K`U;RcSFmI`iVIZnh$6%{fW$XI z#YGU}J3!()Sh5`JWd*8uzU#l_c)^n8C@D|{NlFk0eE^An zfQs`VEdBu!|G|>wD1aK-4WLt01hNzu1QrP^FgrG|PH#+-6LaeT33I@NHJErnX$UkZ z$_$zlhL=U448#Bn_YT%90Z_Oj%Ax6ZljNkPd#w`^W4tq+FIi4j^qw&2bWWHjCV*_2 zz&hO_Sx&ls28c6*HOrAlpbB(mCbI^U0VLaMD6&G5A{#uREdYz?fEO|^V9j!5M@pRF zdnZ7PU=%q)i_t+V92t<(=n9ZMD_FA}nFOjpgJkt?N<6MC4$~7;<;3ejiy)anx859K zh1#XWYsRz##DOe-IRMgqfE85u?qJOlXatoTyr|^{D9|}TE2%(d-mrpCCKK2#qyVx6 zG}H%P1;wnuVa9X=WGpD!lsLevp_mn7IoFyq-2n-6AejlNPe2D#F@u*uZRP^CTUoNe zCzP;&E+k+B%_*`$vlDn878b-WK<<43x*l7gYIGf%HqI%3o zLCy)Df(3^;2a|&Wa&R}Wfsz&5^zEr~O6(mVP6ylc2dQ%6K@&in32a%890FC~v-TWq zAhi-GNWjO@F@uhy1H~<<*$NJ_1t2|;Bk4eeC}?vFH~<*H15KDG|M0Y^cDzG`)7dqO5T4oSUp!(wgD9s_4H*5k`PK*j{Yui9o zJX;o|=0dJH>cQy>w4xhgiW$=hkR>PBvK+ZU^$j$~UjPY8fLdIec@&r(FR*2S(zpzg ziW?vmHy~~U^(|3C=K)Ca0UJ2wgX0X8XkUN?xn4j{!xPvoG+nMzPNx0?Nc00FoI#Vw z;7a-jNbm>TO)O?i4WODCbQmMFPGRD9WL9JVEyo051s3og(`-IwCI&`kL;yB0If7b` z%#ICA-~fkM014b5jG*RQJ!6(5i$E0*6GGDmkOb&9TTmKeR0Q`*V1e}lB=>?5G@cEv zZ@57%#y&|s=5FM<~!pqY*p zAWdvY4GBdGiJ(()^7k=xd9q-jIaO# zZFtxLvVs+62Gj^p84lX~+YDD~#&iH=!~uvA+@M@L39_945}u%@*9ovu0-%m@a^9zrX};9e{10 z3fi~;lf40wy}^{_$O$THnROW^GrB4;fD#h3Tahb^gC(?#HDh`JQu%-hoJBy(Z+?Id zLjt#xK)eO)St<^SB^UQq>pP~F9x zC9s1NW|t_W#|Cc!eZbJe4DPfrJ2o(92_OOkq>ovH38e?~8KhSM78+m+nL#}cln%|F z=@WD1BlGk!4M`5>->O9ssqefj`jj)*kNt}^*mV2 zm{9sbdxR7jAnj>T&jryF0_lhKgh1{{hvZ!$c!6;P)OoqVoaLy9WCKW=#f%B1cLcUU zh+6?){|IE;3+61xBS)w6<;!WSo`kzu5N;v7xAgmA19(-#^a-nkL?zHVORS(8k2%Y6 z<@D}+Imt8@H(pK!P{R>)mL0qbF=K-DlN@!BT@NaYIG|l7Q0)Th>Oe9^2a5tLsENm_ zzyxaOfU*Ii$8>7C&1#{b0C3L;wEPN|D&GBQ7jWc6qzV}<{Ums)2%3dPG~BZsfBl%g zra(^G9NtsvInyNID2^~q8f(94#Wjek=>}_rM7iPJroM*ha)olzhVbsvnpZ6XjuHsd zK)0{LgBsp>>Ucaot58mw8{TU=aBcbokU2L%F$U@`?K;tfVvZ3&6W(t@x~5)-_WR8=G= zn+)nCp>&7-fI^%D;VV$_!I7l^(FX24X)wXNsi5u>Jox!Ro=2)}+kdu$i|Xk*>x4`g zPfph_mQ&L>4NsQ*Sb9-ej>|#T^9bZlU%yUBSr*=(f(a`!fQKo--Ky#D*9l2`f_hPo zpzhPcSq%b?>%kjvR z>HZ~hQt60J6d$w`1zNXb#)Q_1+72(KKcynj&`6pk(1d0w963`E8sGPi^)L&w7U;h_(Ho{S&nDWN_Sob z7Dst+UPf+5Mg^7<(9{ZOD=kbLv{MF(woc>%WBSuFIS~tZFRf!TY9+%1>a%g-=(9~< zJ>9TePC^}}zXmGEA>}v^QU{hXOOXZKapQ(}+|Ep&KCxU*QXk%Dd;X;zB_y~&MJ1$Y zg!kZ9&6s|(Tuwz8++#zo9jDKjE>IySp>Ti=xgrOR$s)IJ7tEOMQ6VSG4ezYI{xZFw zLQX;e)=^^ zvZbI>2C31)=*XbR0_&x*ff}A2j7kjfUK%SX#$mlQ$iM)oF$rn()q{rv!PObK-v#QW zJ%qRVKsw)MtZt`|7{9wV@PnP%ndm z95ng>?gb*3T9|f&%G@2O?nPt{c;^s3m4Y0j%P^^)Q3=v<23yE##sujrBK2%PgNhPv zM9l#1+Cux144}vcA2I|gJVBjCczp&^hTMB?M9U=*`=``n?@WH5)G6S|hGZ$8uH`v! zfr+jb)X@ZwSU@_OpxPC@(iGm$oDC|?xKvT~f#;e)!ylk51@Cw&OxNO(*I}GBJ)B40 zjB(cVJ|1}+rZ01+pW%@YWm>Rtx-zf4iS2CA!UIsxm=!wz=>a+DRDoFlA_Y2JgcW*d zX#jKz8nj?ofmxtw`V?My6|E-F&>3{jnn8&RG>gok#4WHz#FZCx5jGoWnplBLfm>k9 z^hdn%c8nXREAq*QGH#q+&L^+LxNZ6@K6$z6s~R~hr`y%asWNVuo>D6pAhcds36$$t zz{h_uC@=}!ntrTSPLX{QzXFTn1BU4rbJ)zctJcYdF-HxQ9^+MBmxN=*D3TaD z)j)LaUPf7vw9Oe&Fn`VtMrDxr!5t9s`#Tt=BzZu~^cVy-i8wO2a|?htjG)b%MFSg2=GVY$v-X^EYxNy2|n_Mj8k?GxS za{5f2ywmrz$>}mKoc^j!E|l@ebcc311(0Y`yPO;2+UYCX!CKC@%f&EmnXc0zr^)`C zM}bM7(Pw&GpPbnAq7FHG#vRi)b;u<%?wBstDOb<9WBRmCxk$zx(?4{|DKl=FF485Z z&G>t|Lzi3vK&$m^OJL7g6nECoh^AJh4L zq+}TXO>gX$Q)b*fJ-|l_WD{o(*w6<)Qtpg9rq}k!DKdVaKEFp!mT~v=y*+YvjQ^+q z>5=nc+%es=S1yL}&-7)z;2^luD;LhVaQa72DUe|iePG|!^~q^5E}XuyPtKfi%k+nR za`B8iraSh_`7!R8KA~SOjB)$)=lya&7{5<{FhNd+SJP1gbX#+wv;vF3sp*^(<&;5z zI#EuMv2nWpL^*558PmHb$|*Aa6`H(VzOKWW8?I~$#Ocv=Z;Mi&}U?DY+xz`A0p^@_Sp0d zljXXE|1*HDGU5d7E#?KW1RNEo`%aNlXS^`Ibc&n~(^-Cyy+&+Ipi5jYOus!vPL}c6 z^q*7YdKphnZ=WhB$9Q)7vZ->mj2EUqnJVYQ)_r!GfWU(3X4B-%m{y&go;OWyDdU&v zBGcuT@po)RI*7kv>-3Y;2i{c{40=^F4#IqfVw^hNZMNJA{u#)U6Shv5og?ST*grjej@)6!htsv^%57x4J^kTAIq~UV=E~_X z_D@%wC)dI_b^5Y-a_bnUP7j?g*C_W;2z1t@CTJIiBWQsJXk`ksCNqN*mlK1))ahU5 z%k{}`elrc!Dro_o69+n*k(U9qT>(shYk-9dtpbRLsy1P% zT)ymvztaRj_dtNgMIG0GIG_ap;2Qn!wCQh_%Jni%n_jg{PD^ek$V!DQ1t!pvBG3sL z7Ldg~PGwH@0#m0SS|(Q}4N?PAAfN=ve45M*O1w}7!OP{^7{5=ywOp=*aq4vc6>?{p z8n#WBTPY{YbYa_c+m&*sL5CA^t&%&bjclZIyR8<%Te9&^Ni^N0t-c91sbSPb)7+A1=6u|B93=IPZJPW zB+9*wiHW72v2MENA1OP=?b9>ZN-EumNbEa?DEvL@-YP#V*IaSMN{0a;T?3&CH=8#Q<;H7^L7$FPB4YHLuR6w_m zFex;E%-`y5hc(`G7Jjf9YBoSpnLZO6qytl z6*vTb@lSW)5EhyKbDx|X|64Vrx2xL?kgY2M7~&-Tl?gO1q@lNJ|bdx=%x*z~Xia#oBhrcXQ|C&zY@L4if!+4QXkmill!J1vjBBT_Iw+^c{RI>}tQt%L0&Az=MivkeSUa8PkX#7k_UVa- zhv?nFZEPpLa@5neq4ZL#O0G zS%C4hoDSp8>3XN-~Oc!42Xi5*meIO=Efotl2(wA_a2^=IT%7*9@Laz<_!$3;+kiCN&)^t7{bW{jJs z&pj&_B`{k6dUP)MR*VzZrn8@u)8OeiK21QEfk9E+vGe$J>vMA93Ri?ccUb5svGRg$ z?_kkj(f}RX2s-hM#nB{7;Og{E=j7%x{+u3gUQUVe^z`EMa_Wqyr_VkwCoS=mA7+I# zj}&NU2nXm`JAvoZ&z+YOtA7rcmE@6t$*Ksvg$qmYh{J@{k%Ylh2B6#tI>-)5TnscE z2oXQcfFv#o8Xbm+vmgociGW6ZA>yn8Z>JYtkQ0+YSS|!wGXarh5O_O%{slQPeT1w4 zXpj#g%Z_9wKWKOhBFrK1cKVYGa@ve9rVCt@D^Y#~8fj+~(1a`w=28$8xWEtU+_Hcd z&4BpJr!T)KXDEFO?tfm;T_G)uN_@PG3S0_o0#BxYz9?5IxoF~a0Y@Ey7G_xB@+j~s zaD#H;w99gqOcy6kzj#^BneoeXmdkRLOjjmOufGgQqG0L2aB0yia_Srpj!hF#02L`N zSL70zmYkoy{0g`Q^X7`2y3}ueN5(?XDJ-B{xLF(zFlB)*JXYWom@{4Xs$2?V za@qnfSQWw7_#I_X5K!P2m^1yzO}T3Fe*vnKHhb{`8KV7vq`f26yG^7|%>!e^*YG@z3fbv`q6DZSx7NW3d zFil`m6yag#R$vgA3v(zhWP0`k zxkZeNr}I6OQxQ@vw(*6KwWu7CdUUWrn^0q+s@cB{og~m{Y?Lbw(ox=caM>= zar(R`aKdgUuQ1IAU;SG|(cX1exs`t?_GvNDr7l$gN22lApks<#^%e z^xoHUvW%0buYN5j$#muC^kc8(bfr2$<}o9g2U@e%z?da)dphqMIZ2&W?DUd1a_USi=cmtoBd5rr3R?QY z;G{bJ*c&+ork|^(zjz~O%CuwlbosY()r|Y6&weW>%cwAY=Ucf%(HU*i1RO2E+sPRf z7z8d0D=-RNny&v&E|TNSwCMs03Xd@iRs}N1R44?{+L4iSFitzM+xiXSWGmcGv_g>CS z20BvkgdH?g0Ggn`q`|}oIf?SZbcYXe^O!nLOn>k}PFLsvn*tkXeaZ$V1@QTx9~h@A zev}hu+jC-?fWXx0b|2+Rg%7ZCw3Ypygp~ zpvln*pi?y2&6pN|4l!o5Vps#>gAPUm@gX-Ef>H+?XkJ5s4RrCJz*J!cHhsnitcYV4 z4M3F}o8vszEG5wO7nfl-!GWBo&v22|nsE**=-AT>te~@!HJHw@)+=%=uxTlygu!mJq2qeCRRZ#>azJXOy7(`!TRb&Ur-(Xeb1JQR_L5tDA*9vk7>=OXp zuF1>B1Ufd34Rqt2z;t#cE>OpaOP4{#k;xrA-~;l|JkU8+RtyXZ^=zOE>gKS57HYCO z{s&*a=g6Q4<})fl47tRt#0AbDpiNy0T-J@kBCbYc~Uz!NqF?zJ@n zOTec+zhOljz6a5x!2~MpK^vVpz+2OKI0Sw(DKY6X1Wmv7Sx(*>bcz+Y6UQMij~V2_ zC+r|g9Xn1;6L2&EB~8%M0!~LpH_&ZcijW1LN=)EbHTfcE#mGNB|BIY#{ReghaIC&y zXR24=bYubL0yam`$xI;Me*oRBEAW*~0W{B|z~|s~3)N zKnywnqItl_2C+Gwfc6|f9^b(M+NyehBMX`wesCzUvebhXv$BE$N|alH&9MWl<^)HU zqXEbZpyb1mrNpDl02)DKb6mlZ1v<_NbbHPM4(R4@(CxU)GeAu%M(`O$VBf3(-9ziR zk|PV02pl(ZWPu}pD@T?hXv&67mthKr5(_B0>)F7k1c7!suz~I!hPc5IWCI82a!h8> zK`m_hj0-rC)2US^QuE1aOrq04ZClb1z- z4SWI!7vw133DX131;usQ9Di_uid+uRxeK7B;%uO`FAV0)AGkmRp)8;*i(fOw#_ z1)x|w!3|y&3(6E+AhQo}gVt$+<};WaKxZ0n;8qfa9D@)Bx(P`^#GH8sNZ$hPERg*( zxU&>QL3_d3^ch!hE3tsoXfiM02D=utVh3cGh&eNo3s}vVz%Bp{po2^TxnLD&eo+#3 zC>Ka{0^Bv*!AsW)dAUKsxPw~}9Gn}t6AB4E zGI|ixKqkO+&f$QRjxZC~aDXy^F2e>6MZW3#ndMdLw{R$Of>!IWX)rATt>CM71E(Dp zGo}Tgf*5ot0ocb1Y>o@KvK%251i1LVfpmD30jN{K=6I3|lEKb!LBiky7bFZWazQfO z6)s3vT;25>Om)maDmd8BcmeUbe~^xo*r``_tml}vV#^y zq9$WLP*+p{$w3BFKB9*O_yN> z8>o3m^kPgG`VVJ)P^193SJn>5_lsEEwlZ z5BejgTo0X8$f}A{OdedJyGeqco`YY$bxM;fdKRGMLnbXVu$vH93oWAp) zoB<=>^jH7nf*BW0xBD-r&$wiI-hVkA#wF8d|CiGRRi!;l3T)u}d_^7qfJfRuyIK`E z1QvkG5XL(!h&cvGas>JG2512>NZ*60Qu0!=SHW5K1qs-muHqQ2RlTHPlIU*6QnF#!2~JG(ir)s ztFg!{G4fCMWRXv1yfA$&i@Xlw(&@KYtVkXHmL{KFwHrL+Wb8xHuU z=>-Tr==M;@1skVZaLVT~ZkxWGQ@%!j#>Qy^+yc+pK)v1$MkPjGCeZz_U7&@$430>8 z!8fQa**HCjOJ1FE*7Pbac~`zopvg%oN6_{`(6-*QT=J`hw!*|%K~=RAXv1t5w|vI* z3T}DH>D)Z>ii|qb9e435PQS$`FT#0kVv~TtL{aXZ=@Xe*^roxw%jYm|m_CVL-iGnr z^z;1kPK@uS3kk@3F#et%Eg-MP_ zqKO9x%Bw;YGJ!@m-%M{n7uX;ue_R>bSp#>~7#+c#wJuTajjW7J^^VMn%#J+=+NNK; z%P2X$VLhYO^n~5=Y|~3w<+SVfZ)+D&Wa3fdcKp%>W~p*Je!Br?sc<{8C^9>KU)%;# zq0H^bs>tj(|4%cBrv%|Gh42))9oZC_9nYR`0?8?GJF+V>J1#%d0OHAWJ8~#8JI;jg z9>k@B+>U&T%#KrEgB>aWQTgB(SS3Hi(8-6vR`5Zrzt__WattrT6wnqgMJ65| z2=D)Cus&{XN8WlxX2<83!CWqGMPJZn^{3$Ow_#`3J-U zjh!-q+Pn-3%w|k1+^*9L=ZeZ`d;qN-VG~G$o+}FOk!yfXClR>BuE-2&6o3vrm|i9% zA7Z*eP?1}KSAkhzgP2gm@YpXTq{{g%IhGTPb@|sLfzf9jP zDX+x1bNW3=d3DCk)7hltEhG=Hfd-prFe!r0J!J;%Rp$hCRU8?ohfB#DFg8x_kdl`Z zngTjT7gRWdYC;8&vgtddlJUY;6_jN*>0(@|w~wgdq#NS-_)5;G?-&Ky6IN zC*9KvW#wg=c2Au?K~~<0yFn0ik+oo!(ZY|W<<{Zp%jD$47%xr#D<>bz3m&p!alFBnCGbXgdc#6_ zmHG{!2;3l%<+%7?J7|U*6zTlni^3PMDuOo4vuH4_U^QbpAOSIY4~S+mW7+}Iw?iTe z;(8?}$FET&8gK%+@4jtf9dP6c*Q z&zuE3dH`}NJE+iM0iVdt;<$%B3v{>!_)_tf={kz?67>(j*+7G713UPPZWd6=Tf?rz zZpQQmEThS^h25O#3%ddfcsPW`5p--hn*xKtF>X+^pa*nV2n)#hb3j*aKoZ6T5R)Bb zQU?dsi{lKKVorqZ3qZz#jtg5ZV#&M$q;>(= z;1yuC({q-}%kppF1RY!o9?D_SWZp1cuwP!Jeh;T3V>TZPBNH}~DW5Tb z^BLFlHOu4`H9vr&2^>(M83Rz_0=W=;NHq)SaB2k>#~EB%0w08@pI;`gz`p=AN(0SQ z8cZvu|63+6ZTSL}vtMv!IU>~zAR{%H7H}zovN7mHVUU^+paC!B0(|`hd8OzdJfJ8R z$O7#h0`*;3^cfp?l^A$I!Ofz{{Da4wxdXIDyMs4Nff;g`3%HyI4WF@sv_d5QvDq*% zC?G97XK`FI{rqxyiKscCgMXpk0vQCY61oHw7^KaZ7Jy_~K!b5+Ofx{z6L>*}&fv`w zcm)p}i0&02=@q)rVxTQ(ASZ#&v1L?Xa%9X_06Cs_mAq2@ z6wpPzFt>GpI1sl@05Or>)&MH*xDi1pq`(3?)frUY@*%rT2+eK1Abk+G2_f7z2V^{h z1`{L8^b3>aMc9{uB2@s zDS3FKfEQeQf(z#hAe9&RK*i*AKTUaM%R69u%$RO~ByRA5V-D(nSet?uqy%Eo3y`W8 zd|8hB+o!M6lvnoq0+M4eWBLG+_<&tII1<6OH-H?%x>kW>t&>7qi#c-#Kd7AK2PN7L z{w#q<)1|fK<)o*8RDo7=gJePHOd~}}h?cw(+X9fpg6TC{@{;un_(54~Hz#BT0IU>S z0WyM9fvd%wc>_rC3a}j;__G8S!hHo!;vns;3d{nB*cCy=4VxL$5s+3;H3Om{zTN>c z0(4F_3+S|Dfp;(?TzQ%5K~pI#premL8}S?$@Pn>9Q(|!B0oTu<$zaeam@MYZCqQPL z;Lj2`47ceC!iF0lCa4Ht0^f{g#`FNB{07+75BNcqEVNnb%F77aJ-`C8Nr72lw=|m+ zC^>;c_XSAL3;ry}X-k^~9G`BVzD`@-!t4jw5D8EyJp(sKfAA}^gQvftmDLB3wh#PS zj-YOuBMUf_uub1LO!prV7)s*)ews=Lx%uMJ*2(eAYjfsK|ldC-#tMf%kjz2 zX#$Q9eu7sEw{bZ#7G|?5FmRhOEdjOmn9P_KfM`f~&H$D3Gr(pq5XcgE2};^*pe_I4 zRu|~FC~y=0g%(JH6_LzW2q>_aGOYkNk~EkgjV8wx0-!=|`duA)?fNYsJ0RI}1IUgI z5VPQ|3s}VQvcLoF07%^dfh@ZNEEtjVz&Dw*C~(>_{Qw#81MDDXfo5(+1|DWmGabA;LmR1r zegRVVLLkc#x;F-1)-fUi;sZ#{2Z1cdouCd4xapV7q( zI$joZUnKYrG=}N>_2rEjCrtmWFR#Kgfy0pzJQKqrFk!l~fqX6Fi|Gpu*=`52~)9McQ$G6_%TG?&*A-_EWCI^Rzb)Qe=$WM&X}$f3aOXfb_4ABU)c zfdaFmLpG>?20D?8Mc@`>nFf=<24NmH(0&O>YVZIZ*};${uw(i}bNLvJTkMcUbOIYd zdso>6R)J3-1NGOLK_f{QI20H`H^d4~7qyUAbKb=c61>O(nzUjD)fWtqP3555wReL$ zHQ);hz-tph8DE2mLEs|D1SWxt9H1EjX2*u<{e2u_)2CR-D{AdwcLd-4fZ|lp>H?_A zGeAogW-w+6?45q!Lf(@L;nwL!mhz?`KFGC90vo0mTFQF~Yy-Ov;qU3EE#=i2H%$L* zDR0g=ak{pZd>7M=6Vq2INd+^$p8iisY767W=?ARkV;Emf7f_K}#CUo70UP3;O{#|R)AYG&Qpt>4r!%Tc z=_}sa)F9xvcT2N?0;?_qhctMv9FyY%)+_}k#}}+w0-vV`s!K&Meww~YUCNko_4L>F z@&Sz7ryFQUxlC_xkau9*G5w%}`~=1g)7>?tj2L^SS7}PQFut6=UsFmN)XRPDC?Csn z2UM&uf%-)PoznxH$q{s> z3lpe^D)1GgR(yJn0Bh zmX`(cpkT3cmxl(65-3EX3*|u}!ssENz_@j~Ws$r-J2XV5=NHLKPFL`emz;jUK+1vf z({x5dDKEzD({+pGU8c|S1bh9qC)n#bUh*KX&-aoCd3{fbJlN}u-UzQxV9yfhoSx+^ zpUAj*`WbKe5XLXl6@BDQ*rA@D9^)e~$@mo{CO*B_M}CPP!^kjQp(6%vfPyRN$ z0yFxiRfQ~pC+tWn6`0JJB0&598L}MPe@?IUl{YflDh$#m03t*{3*$hWgEWx!fi~VV zAoN{3HvOTmJZPCXr=PsE?{rW*hXu3%1#$yDFO%b^I}HMkGq1J?C@?v)7dkS#NjriU zor6raKsFh)@`D9*Zz(fqYmMW)Bhw50D+x+G8n4Uh~t`;CK z$;kL*x6(Yw=tbKF#Ub7d@ZUq)B8f?r4T|JLgZ7KUhzzS_gz$cx?ZR}FXO)HHlgyp zjQh8r3YGuJXw)@*I(YRuwAZb`4O%Dd*n}#YrNEg5T92;4>exJe`rB}MeWpLtrprah zOECSNHr*mZ-i@hy`t*hfc}bZDbe+7Q>$;c}SREUuPv06LZ!7a}8j6isp!*Kl!M6UN zHk~h0KANdx`tC@*#$~!T&O`rZCQeK^@efo6HD0wHQmg&=dqvVyCTBlDh zjFLaaxL~?Xw7f598SjH=d9mqb6)dt$-KVD?h?bXw3YsuYI6Yk~2F@3o?h_*~%QW}& z^z0b7KFj(emfcOapZXT9_ec#(-8; zf(|bO?O*_>qccb3jG1nnnf@bI-b(Ka2Y5T9Bj`e21r}~0@ER@!CPyyE9lxdt2+R}Z zzQe@C3^|CzA!~YIw7mB8?l^fRrdwyGuaA?LhPoN*#!qLaGsep=lKaA;2x_4~Ol5^G z(sxvKd@~)iYJd8{czIdI^V9Fg%ZsyKV8|ADHvME;RYnv652Cnm^yF@B!D zCP989BFCdL zM>8&%emF%wg7M09nN)dMz7zc1pvEL<$t;(^{ORtg@^=}xZ&yu|H)UkpH9b0A9<GJA~E2qCnm(OP0K0P8sK9lj>^g|i)#*DkBf6I{fk+{H+1v*Ke9dvg+ z*gc>lU-<;qPLIlzH&>ViTG_`huu4FIU7yhfF%QWC9+q}GIelZMye4Dg^xK*8Qry-G zpw&hb7_$UkPs@_;Vr-n=oh9!FTN}%xzzdpFcl^&V{Z*EH4Z9*}k&D2a>ABhRsw%Mk z1fXlbxD_}Y1wic|$es*P;#*5Pr zmdlHyx zXwd@bgkEqZp};C|b$W26yei|>>6Ml8JDC1+PB*TSzr=WBJ6pB9Iuq9hK_yVCViwpi z-Mm)*Fla7@2Xy%#XwC(6YB4kDnkBw0B>|+l7|`v2eDJv#et`|s_3LoY#r%NG#r&Ag zS&wxt<_G3n%n!(13=e2mG=Q9(d~3pN)6x(SmPEX50*iwNt?-&O(oqWfHhI{aT~EH`6X;?Mf$49X-hXZIL%&oHjjj zmc0CQ!B%-2#y`{jTIIbNQD%y!e{7YPm_BE=JnwX|Hu*M+=7bcdo43mwz$b?C+U1oP z|4yISE}zNRI2|+*H2vlZd0sYY@C49yhYopOM#k6E{W|597=KSM?3B+{hxI}og#@ZV z<5O&)4t@t4XdxYBK5PYOQf9?;)-HJ;gksPkVl0lJ6aBES&oolVMd5UA@elT_jb#hviuk3;hAozAaBL^f4X6hd?@3`>Fqu8 z`W8cTMx{XnJg8I;PVQz*@c9&8P?RZf;-5iz1Rvu-3>WMG&02t_Q$UB`vMI0%`~V$@ z%%Z@$)|?5)tjZ%{C4K0*Y6x@Sb1a|(TtUO+P!~cbSEj$|mAAz=uhJ+2TI38%2)Jie z8X^A2Kc~_t0v-p0x?lombP6;{;)rENrBMWVxC`HWN}~u5`}BgP@{*LysLW(U_T~?a z`ILXcpy&mSgyEh|LCFl@nIjerCdM7pV1oHSU6?qw~(@pl8RWA)pL`Z#rc9g%$E*(={i_ zi^#xdLRdiuC&H|L0GfMvFg*#V#Hn6^q@KZ(%n2HZMG6)=GhTwm}(>kCFS>%dbIkhvAu`~_tCV*24#^5WC= zR?Evw|F%k=j|V?z?%MKwY=K& zb*tsM*dTKe(}E{h84A!%nv4q2X`x5c9jD5RvdsXEn$LjDLCnB92LYO;P=MQI z&J3E901w-pZplJy3Tn3Bd4TweTpmAl8BbfzuBTY3t0Ga!M zA94Wf^x|pqGU^||?gP&iK;{`ffaV!MePd`D4Vj*pzHOSkYCY1V1Z-lV0Y0(t19YMz zWMZKM&&0wU(8xTK853wW0cynrP&7{fPb|z3$P)M?%mcC?bPoj!XdE2U?*A>UsLTKw z9EN2-(98tJw8Dbv71QNa71n?(fs7%8W*4h`o<+%<>WT`MHFmSIk0JUw;Gc}7$j}w=d zW86HwYo@#z2Xy&U?)2T_@`|t~?ql#uD-P(?NbYo933)jj>#ZKc)?4+=lGkTy5t+V0 zLS97m^9sv?tLgt|$;+d*mN{TYoJ}v3l$T?C3|?Ku@$^f(fFr9w?(~C_;5A*Z zXUnT|z|5Y`DK+E%=bWg9F4_RY1cfPz6H!QkAYph<)m)GT90BXH2fGnv}S|IO-P;vI~^y&rj zy3=RN$#e4Df=q{lqJ@8YLzjF6QzOsxe{%9#b~`u~8F&OiH5#PoL2Ac?rNAXTL<+Md z-!{E|p}gw!Zh3i5=EeWor*Bv&FEf3!yu3crswrr*oqLhIB~!b|bX^5`I}n|#ARoqf zcKY2#@`_B4CQVx`FFHgU?;W7H!?zqN7dn&63tB}%pfUd#ytYXY-hPK?j=PHTlDHN` zK{R13hyq16xFrdVJy4$jx*#fj3H}99oghCEwIB+t5;U=nzwr)MhUAzDoCq1r&9!3`epWPrBdr@vE{ml1(3Gz0}SI0#rY zn7}P|BgQAw16Ab3SV1z7#Y-ApAVa{O!rLzIntpDXydu^{x#a`Ux}*n)wM!t=K_()% z&0!691<MEE`e4;f!8iUB(SVqngd?D z6fy&0!SLP6pF=h0&kW;D?Dr*DeYUu!c)zV|| zswKS(AXi-guUdlCJ}_HAfefvM;qCdI(=}Gg2h_g-84PkYXdx4*Sq~a4105g=7B^#p z7ue8+Oz^@0w2&o>cHIA0OCN}HQ+T(j7V;SE@Xnc4YZJ{1KjX$-~+jBdc20bggkWF z638V;%a*|X2?h-&mgx^x$&2_r292EwqP5}8nU;WUH)C1=YU(dQUNr>PfKtdwkPA5RxPe7JCrLUG(w!Z?^4(nuK(+*Ms3MVtB z2O#Ya_(02@AjdgQ-=HZkEBywf0u-wt=@%%@pU${OUR3x8NC9$?K0h>FbB(-=+7G@g zq%+5%i2@$EuOO$Hq@nN@+qiEiq^nx7MZ?9M_wJYR$XVky!Lbs zU3q!N$I~@*sEOy#tqXyY?V)fHQv&u zH~g13VLUv&f1A8N)9b0zA8eDCVZ1#3?>2c!rgu}POKg|dVVpXB!3%k5W_E#=shtXT zEK7xz*rvUdw*oDtf4E&3eq|E$tWFDX%2`N?4H%)N5b`&0B)5wgMdr4!XqX z^Q7s1JLNq<%OGa#l-FTwnf{td!I*LBba`e4O<1EiZF&H+0%)Uy`!0DyiEW@=42Vb_d0*>qgX^MfAKl z@*qTg*g<)D)k&Mc2M>e%V~)iVsDBiB6ePLYH^Y=p|M*p2m;>(Hcpe2wjtd~yatfqP zZ{UIWLEwD#meGUY_mF>J|Y{FAA}=rh6yz6fuznpZ`ls806m177;sGnt z#-^y|h`c=8Rgfb7=?jlQJhJPEy!>?eBl4__0@Lq9#eRU)Z3hL6;B?ue5Orop<>gfy z5V5L`#R8~@83hz1xwgTTPH+5y9IN&M3X&YNp|aBx1i*fr{^6*+Jkx~D(*=(~5|Q39 zd3pIKh*(y}V%_wdWAgHB6F~0dn?CUv#GR{-$;&fMTtEFZ)Swq2HE%#_IH&U-hp16I zE-$Y-A1nqAEO{)3L;dnmP(f0)50oAt@hpd1slJecBu6hOo&^QcrUwW?JoezYyuAD# zkf<=&E*UHaOqV+$FE77jU5kJt#1?66iegU4%d@Qnxte!+;|YjQW`V?JgJP6(`hKX` z4UpJ3kUGBUf1zSxC*|c;mx9y@f?WYRy$;$;L@#3=3M)u*T?P3VtaQ5IZ+T%*&iH#y zUY_GKNL)xDZMuU9#CsP`%FDAI1G!v$dct{#qxeqA%QM~FFkSx?BrbhU$;-2y1UZO* zdf_REnl6x78%Q7D^!2CY#SyvwfRutH$03+K52YaX6kd>*=lB5%Mp1#Z>GIML(e)SP z<>fDcL?M9-Itc;YThrfyv_63GRJZJAQ+Vf+WXRPzKf%NSl6924Yv@C3$&{B_K0| z1=6PT%R)rwUy_&SdI2&;4iqQT7yg9=^j(nZdmxv|38YO|kbtP(a8_QPZ4*dTc)G)7 zh&$ea#8!cPCOEwgDyDNzUY_mck7fZ!*6D%gAnLL}VqLeJ1RPnW_n(v3Hv=tT&EgXP zZPQ_8W9Q)H;^yJy;}>86t-58(;$spJ1YL515t87Dobabvz>y0rB4@M-II?4l$l3c_ zKu5u+O-G5y*&s7Og*tLXGWE@ve>n$$;XF>EHC}d>0{nP;4;{2{#${0!prH@(N7zj!f6NCNBY6x8?@o zFFGydP81?@xk;}H{=x= zA51@ULmqr`Fvw`oYDo6!nm6TzK#sG$DW3&8+Z((X^1w}bH^%GJ*>A~jXM8Yy|1Ehr zrbp+e-@hfV%=Go+be7xl%1pnHPuB#|rzcJKyAAeFqmqI&bj>^RI!tSJOb@>!uf??Z`}F2J@;b6lAA_rvT=2nKyiATKW=;p4 z(do*|2nv8RcOYr#)g5^wrl*gm%iWbXV0v(9dca+I3lQCPS6*6f&aCO+MWzbOjtihn zC06hOQ_PM_W=%hQSAHkswCT0?!l0$L|zd@pLim#3d%e0pU8WI_%=`F=Yr_lPvxsXboeufx~gaLx*&BcpUGQ; z7R^3;Ca=NBcz8Ph3;79<9+WI&%k+IOQR03j9|&LS0_rF+{+-_TO1_A(W%|!o^2Ushrt7|zFJbBxnV!%nFFk$VYk4J! zUJ=m142J?z&jr*4sD>yJeuJe;k^WX*m~qwg>bLU2jIXAje=EO%v448QJNaT2$YLcX z&_4eR&}w!D@I{EAt-z~hfNE-qEP?Ln;_u}Z8T+SOzL$5$(f_DcU~+`?KRRTmFM2P3 zj#GirktvHWC?IgU;x9J4>3cuO3o!;w7yK-*GF{=LJj?X5kMe$C7GuElg+1c_*q_K`i9T)ER4<&{U83yvoJb=*{r+*RXmQ< z1OLivOb^;GFEstaCwXD8;^~cFFMp9&W#t#B;;{h>vVv}uwT3VS z1*&+gz;2md{#Cx5kz@M%ukspTr!bm=l}x|zL!M>2%QyLQh^Qf0l$jSanSNMac>0I$ z^5WA4zssw#3JX;6=z(RY$A6dC1Upj)#MfZd22m27b@){897=~S z^oC#Z;@e;Tl3&2g0j(UiPoMZt-ji|j^xOZy;}By1<&!`?`Ktf&R)`UZscZ^nOz);n zwNtwq ztN=RXAeULekZIeb=?j<@M40+Sz)SC#c21gpky*ij=`ciGu}=i0YQe~rc@S+U!Px`Y$#GX;7^p&aR*Usx>Ux6%;|WMhu8PJ!yJ9yMnUK8Fr}aTzNr*779$D zLnvYS`~_%N3+R3-=vkSd5t$9}T;DVO9=n1RsD}B)uAsztX1XkgLIRH5p9jzV&G6BO zqZ|t0(TDY%3gFR)3!Dnz(TBGn{(+6t`M4l_Z7v0QrV|^d`#|}bTnY-Tpr$Lw^gb>H za36U&mjZkgAs5#u!c#5<`Pf#lMgfo_Yb;$FNM#6Xgd3sp3o@t{0-jAK5x0(dy$05`-#cR~C~o2LJR@}+qcvo}qT5`d(LasdT-`3aCwDUd-b*j%<%KtVnhIzRzZgl&KV z)$>Z|o`;NFfISc1QG+(-H{DQBL7rnGsMF3VkURb2F^D(wL86cW4T0S0{Kp}pYXueL zndWbpeoheLhu4A%@@$ZCi|GPF5Pxb4Daf-y1}LWcLB-OA6yU=XAa7tDo>(fRARh}I zo&YJrHavmqS1EMALdGq?ewD%&oQlE<@*I$X3{b&(0^+N9kSJs*Lm+qhf2inOVFh`4 z=wJrOIoJj>rr#D;kY|GoV@wl)_+L^)L7ojVf-&7rL;*hLkUQP+yu745WE2DJL{M`U zwMX{q>oh3^d5)W)Q4Ar0-02I>LtJPlr64a48QTEs#M*Bb6jhLCgAR5ql!7?MA0!4D z=9qpTD%J}UgA8&^mz9R7I}H+p3~@~VAPN!V6jP9AgN$fQ*Aqkbh~Y(fNqNYq2G}_~ z*uwgQw1PYbWSB!Bclv~j5TEGDD9Fo020Fl6xv*(v6jzXEgN}Sml!3U$9wY`C^O$}f zDpm^;gN%4g=aq%1+YJ(fjCV|bAPy1x2@->hbxfC&K=z5^WqC5|eA_23xp1C5}tp~mpP>EKZnEHOL@ zGLRp3+02Nkg2(4`e`U16n#uTnx}Kh zLiB^9?o?BYfFmbr)J^UO4VPexI?!BNe zdaay-v^;b?Bp23H0F8%0&x>IY*vva!?~1%6)5Ig&FUcu%GBWm0k5N#lSAcb;Sk0I$ z6qp<>AU!E&M{t{pQJ{PJZv}-5jA!8e{WppV;Qqd#l7ceF@8hkYeU`b?^>53|GM%0@ zJz7aYmGR7UP_JJWy`=^^&I@X4kI3|Ox8)@{uIvEs)6AWI{o9fkK&9SkC@7&B z$Oby?`NfLqs+tN)&ZsWrgGj*wive_Id=Y3npaQcal7qQHo(CyqECN}7Pg6mhg#*T7 z=Wv7zOM;@}o~D8fi2kFg0ON~I*VR(sutqXcfzgpQOMwg776nE}<}3wHG$F<;1y+z% z1zHLs^{580feZlKjqm`YBMZn33{fVKG3cTSED)z7v@^hoqM#Fcz@a})TS1L+()7LB z3MoP_?oJl~pG(XPIvbGH@xk5c#ySdT87EI?)K!RKoH9L5S7AHnWDx~sM}sVZ$75w_mYS*vE(}G|xfd7+8n}d`%&n z!1V0}P71Y5jMJvSb5Z!sIBohXSA{&#W-K>_dyLbjTevH{Vw^Tz*+apIar*Qy4~0g? z&D(EyDC9FTZrkqRrSP1QamRK8ABA`pkv$@cT-*xWEOiRZpqoQ3Fk}hrnZ7tcp^<-@ zhyo{Q^C_zWlLEKE^y$Wd3Ufdsd4UQ`8K+M#3sN}9IBmN{u!1qj89AW}=8V&(F9}sB z5t$~UzzWjHtiS@kKTCmKVES~OFopSy)25#ZQ&`M6eR@H-!cxZF+r=Uj_ApBA5m990 zR$u}jUjF~TJ?QvyR!4|K-b5-CGai_p5T$T}aoTjvXa#Ah=^~(Oyc8Hgrh+Uo0G&U@ zqQE9FZF^j_LOL7&Y!UDd3CK2kf!W)yBr3!+F)rF}lA^E%6b}zm6&x6+O;<`&uwk4! zJw8oAigD`nsx$>P#;Ma6LU?=96jY_Bf=qYFR^WDGZ~`q8zrg@XDYf9sD*N)*Z&PfiyoRj_6}Io+pJ zK~ngX2oDpti-fLA$Zy4;_6jTu1x`(`C{-|LJUM+sse+^kSYCl)x636(F zXQta!DEKf<_&mL;6wD-^_-7>{met5!J8$ar-7r5XiQMy3Uyr~j-~n9O)| zdT*V=L&l@i^XnDzm|k>jzhAGA#Kd@Rx>J*as_%I;mk59?mT+VgxBznyL`2{m%vBIR zSj8;RL=K}iGek(>-1aR^3Ia@w7q=g6Q3wFJRIFXWm;a)OA`?gJ)`w#=Fy_3>5OFt4~$%VB9~wV5)*BEgQx#0=w}`MffR4X{q)QRdjkxTf<9NW^ z7Zg|o_KPU6gNj3TO=bfH(7t}q`~|B5o4{_6ga)XjP-IhJ*JM^uWCg_s?0o4hBCaeB z3hbckAW@7u0WOCiS{xKvr&~@_;A6Z$-D8@93gi9hxe#j7GzC+}W7AJeQwU_-KV53N zf;{8C=~mOh`78{=E1RyM$hd#{jOhyUe)~X9xxfUvOp+Vqlm|?ph7uFVZ4hT30JX&w zm_c$En3R|m*d1?xWtCV2z8vvpj z1vUe82@;C}vjT^}vFVvJ6%-hEPw$$kV8Hlb`tF$u>Wo{aKc1=Z22{k%QjlakHvQEs z1!cwu)A?qD<;-R)1T!9+-ZfhxMsJGMItjO5}84f$Lz=f z4w?JYMdm2nVcb3a;~WK9#(mTI=YlESxe5}3`#`k^D7h-IC~zon3hbL6HdjH8argAn zxe9(v_e7@eTdtrWEyJJy*=H&+TSNg?V~Gen77>0raxV% zpv-u1I_Dw<1@VKR5*)NSpB0n`SREy@j2RgO4o`Pjq!7<|aQcEp3cjEQ!XgDr#%a?v z7Aqt(9-Q8@SRsh<;Pm^86;AVmuV7XHEyDy|EwFw1&Ls*O()&S%f(%pw9ghO87nDFj z%_wkW`ky5VR@z%c92tCA96)<)z_+P^lRu*(6X@~*R!2}oFe>nX^TPh=iAxon823+K zvQ)vEasTuOOBEI|?w{VgOd*nS|Ma`d6xtd0Z%4zA$;1_c&I#@*9dHi6A`*rebLirYyFqHN%!tM*Nwzez!Xap&{{n-oMC zcTc~%30#$Y-=v@<3eNBB@W^fWzrS8!%XGcX3dW4PrzdSzn9F!-+7<;u_v!9C73>+m zgFLUu;>aNIefk=Rz?+>4DvaN!3+_@-7C0@U$f&@i%?z68U@R2)KHX)P!Z*gB)2(+a zD2tvK0UZzqtC2vP2@3^&ZZF=g@RSi`{|~tR^?Mcg7-vuK+pCabvq*$H27Y-eGwA$q zP!0rLWR6)E{KZlis7w#qr{KmoefrFO;MhI8Pa&7_?{@3`3N1`bGd6BNcu+x^d3yUb z1u>B{B{s-RID-NsD2Fo$9Aufk{+fa;e;TN5$_hRJMS+bwVS3#)1sx6z@Y&`MctA>| z1wc2lA*-4`^QeLg({_&O6ITd`vV*tY3GALOcT-eMK!Zs@5ws!#bbqy@puqO&ipLa| zGftg;@|Z#ZDMkO$Z>FMFsUdqDsW7nw?IUCy7)x} z0mfz1H7+XHnQ?*crQ&d$z?h}PsKD*0@`sB7bT|%318Do`7DfdQ#~qAW3fztwS&-|m z6c`mar=Pp1VC2Z<$e0D{IWRb~WGRAr5i8aG8s9iufC+Pl#2^=$jk@EECtT#QI{2zm^l@=rq^Fq zh+yQ}e&e#jQYOYYP{7Nh#1kYEF0q3nfgf}XAR9OM26`0+1-9um*Ayl&Uf#}oU4fCA zA1xH8E4E51O}D?L@LOm*C+I*g(1}|jpo?NaiywS$E9?~8!3jwZj8JI}Ch(zO(svXl zO;^0HP|o;e`po+ZJ)i{~>JJpIFn*c-=Yc{4kDvmmhEieRPMLo2rGn-3@)rtujN7M6u9R|{uJcmCA93ySsg+Wi z+Yh`{SkB1506BflxHP@&wSp-|aP4}nppGuM{lja8^^Bab)5O@P=N(nh*?#4%LKLgU z{>vikWEmVA8X5$+h2p{)nb;W^7+|yl6NBRxkj&g`+rvI9tmW9w{YN2PY5QC`#hI+z z<5U!b0-QtqUHlc2^K%PIGILYGjKreM;`}^?{4|Bsijvf#JcZJ{%=|osvc#Oy)MAD5 z%#sX++|1(K#FFHU%)E4klEm$6OB8FQr^iiD)ZM;kf?^lb_Oi)}ub8JVn6Ah+eZq9b z#O;bR6mxC2@4c!xi)nl94MkU}?Yy5A?@Mg|%B`f#G+l#FDQNpUJ|#2h?V(yqlA7WM zdWMF22D&M!WqQWuMi$yBsfNaBDMs6~gO#|=wom9(S~FpLy_B*H>-1al%5%1FS5WR? z+FqxuTq(3vomtv)$FuQ)R$wMe0~8018C2arICb7FCcIs=G_BA1wxnVyG3t}H(% zu_QAm6^C?UN=i|1L1HpgzBDf}DJNB-WV?cma#(6T3j+fKD+2=q8-wGmYh41|oDdEJ z13QD`8;}5_7K7st5QANW0R$Ks92Z^h65wWJVsJbLW-v22z5+8?7#w?UfTUO%9M6Cm znhcIp?tny07##21?-JnlF=b$2$jnR5DNRX@PsuEr{%xVMJ^P*qT>{(|zS9F2D_d%H zJnRzS7Fe}v)hdv6APij?acT@qYzzzx z=N>XJfdXUwEl7gRyfyvI5@oe|UIxbxFF}4{VQ_4C)g{2q+Psp9Nsz(u0EojV#NhY= z%-G4`_y@$`0XYHTjTx`I1h}EzI00hwG<^8Q1d?Y+JuA!L*z^WuHY0=Ma~R_Xh`|an zg^|H=p4!_k0d96i1_m&4JOSbwXqkcfs0qRNRu8iO&oTyZI50CXFbFMTVB%z8U;u>+|8fQ`zq=5J z#(raDQekkM@UcsPTht_#1?+-c8Hh^?qZpXn7#uHvC2gQO@4tpPd6qB(lRATA&!;W{ zZUvBE9H0t(9zhf+fE6$>FfjzwGch=x0V@P~)CHz~I>P8Dt>H zvmU7|-~`aToDoci3NbKgfC5EffdYf$8IU@8P(b)X&Fee`F>e{vJPrm92FIQ+T>{+o z@}Pti2sN+dImEm|VTgGO42%qpXF%!{K!k%wk0^IVT$V!GA2Gm~RnGoad2ttC7p@qS*=X;j`w<0Koaj zfk}&jfq_YZp#$V!kV;7&1_p*=sI?DXLh{?W`22ApXcSVFbCd=O4%fNd^Xn zeNYoZWFRKI%VlKpVsJbIRv^W|z;G~?1zcd5yo3}SSLW6;FsU*)_WTDWU1a6AK6AjiPKa1*Sco`E6pJ|sk<`5Bm$860~WK}N_kFfiOrWnls(!)xCe z!1UP$1}1X`$1@-W3JMGi3@@Qc<5(;tTWl5tr5y$a<_G&9GdT7%bqjDSC^9fGyh~*P z*Bi2D8Nu{9AyD)(Ffc3Zsc&X*JOfgwsKmg)@CB++Ne)ucbP7Tgg347U2FIS}ZUJst zWd;U@Ur?Jby@A-gqL6_JUkO}n+j%UCsK&4z_8l<#7`~Xr~?_9{hq{QIZ(+4sFRLW&RjqqFvaYSPz1Ctbk z;~B65P$^dcRbX@tk|x~pAZel>WCEy=D}!3$dY+v)d;>8e zxe#K+1dtJ+Qmz&1(e%rZpn6`&z$C-qcm||E0aVI$rLi!9a?bU~j9~gl0Z0L;fd$G7 zJrhC20;sI(Nn>HE2Pw3<22t3a2Up0!;CKe45M0>xL9@fT6_8~5D~*9koWZeY639uQ zTs;Y@U>me>H#`c;FOFxx3P2fW8q`z&pgF8Hj?YT(E+kDIf))Qf>`Yf$Kqtf_*t)1!uqtK&9IjXxN{4!w9aF zTM8MN>Om#$RFDy%Vr_34q-n6^F{ELAt$=|E)M5cEP+?$TI0Cgp@GqpU-4xEiq{iUb zGYw>dDgy(<8K{Er-w*{U5Cvzz3e*@F7_LASa6n9`XXu8g=$Q^OLY;wu;SN-V#~+9h zk75~^3>X~GfE9pRc~77Ue!PMxSRDhd@Ox%}OweRtV0Z&n;0JYtEyRj5UURR-glFETuz&YWoUEMHxMd zfk~Rdv1b;@1YHIO2F`Sd2~RvBCJ6g5Fi9{to&hV+V_;xtEN5Y21+~AtWFak^hInvp z>X{8PLZ5+wK|CE&!rH%tWH)(YM2??^4NCu{Q zP|gLbFk@g~u!B0JRt*v&^9;cr?wJoV!kmGD!5M1AdT6NI%>wzy@eEjj1p@=+mrN}@nAppECD$HRN}Qm9k8%o9Fnwl9Du|oSOus((VGrwD7cA1+LDDS zkfgN~WCW-sF(I7=+~M4|iUCZwgh3j3AO*6ZM#OAr2AB}d2+l(OnV_hEw11X?5-6w< zQNI$Jtgc2wl2vXdC`Q4m&VW=&f?5*Wps^Zl3W?Q?Vc^`@vm9g$s3ma#>Yw${N(16r zh$$dNa-hbQfAoD=Y*BelkOQ)i#6ku>XQx8@KYGvGmDqG)&rVNw|SAh%zl}AsY zs?<8sRDoJ~U{#>@#w)0*;-_e;K)vYIpe7@z&G8|fg{dAC2_EOrl!3dwAa#KLsc>ZY|p(i5l;!{B%Zq)rml5RuOSm*4da+K!OYNZ1k7V0P?T5Aqc# zzSW?PJ_bz(XO}R7T0>{R3P24JoeW6-WKI{TUCY3*wTqESgu$_A1IPqWL&PEj5^!BH zjNopUZ#SeS11kWvK%6q_AsMw)5Ry^51sOr(RXrO)Mu7TRUKxO^-yV zg444g3f}X8tvIs<-2MSIJyM`5Hcy49*aK0~vlV0nsOgaoRj{`YqF@e0!5OduP}3s| zs=%ZZqM!z%pl2J%1W?l>7pkD~DMY~^UPh*RP*8wXfSMi!P!(b4AS$Ln{jnWnga&9t zBm+|6PWc6C3D!hIihi&HP}8Fv8ehvkK-ySx&Wxa@;)ER_6B?kB@=!@OXU2LaP+|m) zx`0&3f(q|Ws4BB}5F_3>fs6o+mO1w91QmFo0(=rw(W$o(Mc1H;KxGtI5vUBG4OLVQ zRkRPP2sHS$3uF=tsQ{V%=HG6lojWI@~LCbZjiq~ zWyp4@0#9^~w)5EVTKKt_N{W@)I3cBn^lAqvib6@ZFlRj7jNiy)40g(&Db2r>aw zBKdT0#`mGw@A;7{Liq(Q^o71gJ=M&V-b>x8)!usX!IDm2m;2 zK-4W0QgCt3hZJ1b7eN|Hhe5hQsn`#yAQC!u{}ZC%3|Iju1&4tZ)H5(FUj#8idNH_( z({lu51St8&WkSl=SZm1m&x>qu6XXI&foK-gb;S!HS@l9M1Cu9%W6x2LZjb|VGg-g` z-+I=d5|e@9f&!%Q1uFnKpgxm@i5FD6Y{%>Kpv0uRhyhG1xG^x|1epNJPU2ZCO!Xj@53C^_ zDbC-JQW>lQlrZJ9Aocb$T}T6gTOV8n_M8G40m@IRS&(FK6l%iJe9%y`;~B65PWL^Pz5`Vfot+JUXE)I zNNZuA9;CGZGXbmsl#-`sL3(<@6Cq6&?)piLpk8UuMUWAoM$McoNH=f44P+pDNgg=Y zp8+cXHELF7v4AFy7* z1)w&~IjAd*{xUGtgA!s%FvN(&v4F~ z0qoqU5Xex*Rge*&MvW3g1;befhyp!*M)2@6SOKU}W1I~spBB6YRdoyuXWud~Sui;E zTmzW^YSY+fL%i_|S_G{QfpoIK3P5d|!0dV!a04LWJ!nLUfuWwCfhmZ=vFAF-2vDOY zBO4MO{??G_Sf0(m1ZwJo6@VHwrBFw#SA-OjrpLkY)pG-60;o;Xm<>rZ=6@kg7G7>9 zaLXF3Km%0%ysn3+xPAnZto+L%-PM~QBS4Ls)@(?!O0|I`EAcE)o7(XVSOI7}r7s&& z)RYE5ikh`-;JL$|TOboa!zuH!AxZn`d`M}%uopaJbLJMf{RbLPSqSy0&<}`5-_$aK zM`3S+i~x=0zJ@p%D!w!+V*`1=@qM-ysU_H-Q>#j%UCMK;tVPvRS}gWM*4PSNYCwNHKb^TR?(a4>Y*)8|wCT zFCcCgI5$21h_Y1unR_5hK%*-RIglXkaD!N4{GO4?pTV)`K1cy*bVVQsV#zBTh$T}Y zmYe}A01d84=dge?u0Ay5-fv@M0!`CC0GR+9T+z>gl-2&w5-R_W0g|;2uILtT@IuK%Cd%(K(o(*r`pbd6*w_4FdWN)ln!F|APtmD&`k3T zWP&pT1H+9R7A8=MTVn<){TLWT-!Ov8mos1mpoQ9>peEF-e}EYAK%bE*mcg;-ImifC z(5zK1WDv4x9%KyW>u2zk;Tf<3H_&ndr~*yD$rMl3}ZICf=k*n zUoN3_FnyDjgipfED;LFfjDxLKF$cRADjAkyRxIYCg z?q!=8LBr=~zzTvu!_H7wCQO8M>yGz8!tx!+gkS~+hQGOx7FqXW$VkeEd_n-vHz_8^VBU2rN;~B7m zFwh`t9wcENGlSH=>svr3Ire-2nGnvvzz~`T$>h_;Aep?rp@|VR9exI^B7%W|Au$h< z?$|y+QceDAMsVBnBglwI1_p+Fs1aYFbGydz;I{V}u!1Pi%7Hvc_G!7u2(G|WzCzjv zpFk#nn$BHND>M!;)`Q0vp(@URRm3nbFwB4&vFkk~2l}lA*PT6|K}N(fFfc67gCv?C z?;&;0vJc>r{R~(^9B4`&YQppYhza)jUkRl6x6ZjE64~?U*Rs)AHs2v9Jt&ST!EhfD@bBsV0aC6gx^EZ zpcwHW*VRhn)@LNeuRJ$k7LgdkO}pm4vKa@ zq)GPK8l;thp)mwfKZ8|dFfcH>@P?`Ap--$l6)2>P%j{71|%KKnFT3{|8_?Ua7z|3Ffi=O zhvZAeg^+w%x)73a|ACAs230xvkh0;X8KjKkw}W)oKnnCq7#J8H=0j4eR3D_3vGDA) zv&vHSJ^w)_lrk_de9Q+An=&XzKqg1dHi1&D;~B7mG6n{QANi1c>!AY4GMk$qQ~C`( z0^E}23=9nH1rQUa=|RR_r|$!|HqU?+R4_0w@D)IO!MX_IiwKo^a8cdU2r{CQfq_A) z01}Bl+K@<;kA=iCSV0v71A|Thq?A-x0O_~hjECggCXfl$3=9lL1&|hkw;m(7U+%BZ zz!U~r2?A13!@$7cUH~bn9i17N#6k7DZaa82PERvPMJ;I6Q~{)kpz922BD`+{XUj8S z1$7Jz421QvtMU{QwFg28M?LkYz_;1x=tBhk8`d9O6*{JqFN{pq@66 z3C#=)47Z^Qrkg<&v~x3oJ4#>$Ees3{9|~CN!C|BhHKIcq)UR{wX$Kk63R-4Z2#Io4 zJxEc#{S&1B4OY;`z`!6<2uXHdp~>#{S4O5J&@zu60dBo^1_lQGLP)aHh9#4so|)Hm{oNH8SmF{n6zvP=ia;tmD|2Kz!5@WLSRXONUqa~71W9eX-K3OX4W z7`zK1T>`c>kXf{x1aP@`2CSfqfq@~QkOkbM_A-YwXy@jGduTmfAQQSl(}f@tK#`@V z3o&Bx9B@)P16I)knl6O;USI*l_t!PR*|Mh_WJE6m149N>f%9z?1!uqt`WP4(%ApD> zj-n{&=>eJ04_YMG266?c5d&R_*PsH5ZpSlV6%!a37`h7~1-z9Cq-;H33+{sS^n#3- z2&xOAA-r1$lCgifGlEv?T>vQ%T~P?hPhT%XnvpSH;D%;TA4oSNsFSp-5RzYHuRvPV z7rYpmtQj27fK`A7)6N${5+cVHkjoetrg||lSur^F^n;85jio(?#+63^B(Ao5Kzh?) z1)#CC_l1xusN?~tL}6gq>(2;Y=Qp7TG)My)MEeW%k!QdPKx1aEMUW~e!VFRcb^0)Z7W(x}2AKdFGxIBA z0WYSytOA;;VPH7r&B)}!;CKeC05oQnQv@klG#)X6=fd>r1Hh5hGX-P>Xbif(2ol|| zS3yGJy$2*DzzRTPW<5nLOrW8bFD#5;y4{-*REzaY1(^UE7g+%fi6xehA~eyHk;#_9 z@eEi2Xv}O^5hOylJYZz12ajL*GlIqqdZvMl0FCdQD`H^+4TD6V2K8nc7;^j=K_jqd zzzRTPW*?vmZXAFpc<2jO&@&xm0%*)ks2E~G+9HU8=@12HzzRTinm`oPGaR1`QQ-?w z(K7>N1Zd1G45ETzN)JQr4^!Jrw_ytGeIVR#>{3FLyGAO6_7Hv z!UGiLj%UCMKx1Z`it8Z_|CtXU&3YSuM$i~c&n%D;pi!tZ#SjH&E_qxLkhqY4M_Ivg=Eh&U_^6 zL6rmpgRmDPXe_#C4#)`5n3+-u#E5IqhUP~%aH2T_Rsb3^Gb(|^XxM2;7M<@2$)a;X zCVp?0&gJv?zA^unborhnq32x){%mXO^jhQ8v zK&s99eUR}iWoWVjD*z>ysuD=etTP4De>v>N2&!&+=7UTCjhQu+KuVf*uOao(Cx1q! zdQedVRsk9`n+jFY`~aeY!5>@}^(+7xp$ghm09CN<8KkiC_693B16BYUGuvOn!US^d z868mXih;qvgOSOd!Ler{$OO=s*@F@mCXizS9)LzK80s0eLDM!^1!y9WxfIfhG30}^ zV(db|O|1!wdIY#dxk_2U%c5_~LI!g8rh|$!#|vP|v=&I6bV3hOq2#)OT8EAk7K3#1 zmO@&%XXPL*+^1Ebak6^HGay;;(hiO)h}j9ynbhs>pysw?&k~T^K_#(hDWp}g^%kU6 zar_pzrG5saKo(RItCd39kv4xJ?a1g-(6A)~cuD+HkSUl;3XtmKnlRF0Wl;&<#SCbBn|HfhNR&=-|E4Z z^{fP$1uCE?L*v)g2r`p4(WjE^Czt|O^BvkFxI$$`r3MNnn6S0T#wASvrv4KfT=aIb)>3ONl?wGc_w z8L%o);k^#3>f!;2sx~B5J!?Q_flBZ#5LNXI@rxkJvXPXX0V@NQRAgi3{;pOfT}t$8KTM*N!1yUDoId*eykLd|3xQ5GVg^vMy7gDOJW@;^g$)Mb~Pkv z`b0y_sW}E2n*%EVmFPF>Aqg)%6cRlzG9WFo^*sXIvY-+@vJqlJ)fz}46j=#snKOX; zW7|QB7(p{~AciEUbiZ8+H80{e#Jmk&;3i}bdu49g;LEAk9j z0jPL?08KE#(GU|B90x1t*$6TLR8K!Hg;YY$3qdJ~fg$_?DBnAt0V|LN<-gyhkmCJ` zC8UPPY5=7q$DU0fBS6LbylO~^}Xbm4}cr`eAfE*p77| z72r4qF(g3+KKc}h&UNt6!#j`yM$r1A?L7kAN}z&YsRmL)KktT&wVW@56o23;S`h}v z{UDW$pk){!h7xG{B)$wXq@cH;9<(Bofg#)rRQ7;%fQ#M#Af=3;RT(=#ZU@b%WI+S? z-U>(nuf4$lYIE!dDFDYNh@k|US}8As6rPt?K-v}`uA>1XBQ|KK$9)qWsqR7g3kY!dV^{) zsFy$!RA7~$MXZydD)$^=gv>UtK~vhZ8)PVGro|8%wy97OH&Obfv&HEgr9mjD96=1o&l=>O}X^fKspOS(;%IN zwY8Ax@x34;Kr=3LYaq?=TV^1?GcdHAW&jNpodGKV&A4o+fn>?v<&Z2{6~O=+MCjQE zG66K>GP?|t07We!8SQ_4Dx|0as{l>8EP|?FhpKo4QSc6=fDyDd68(P2TSZRV1Kf-*W(DC}=WfJJfn5 z=%Tv57_j@+gA{<%Fo+=unw8lHRXOoK#49D~V3qGd3P37*4uZ@CP0t*KI`rISh(n)( zcfL5T2PvrM0`0s5N2mgW;~9{UBxu6sEY!(MCPSPY1a&e<0pkJ&$DTtV(?K&gm!WxS zrZ^-|&p!YvFdff;6lj5FZtg*i`rQpN>USo{_n=)cpfR>v_m#Qpdk%w41I^+*DTDOY z=EXy%-E@+{6Ga<93c!I1Vn~8!bKaIgQcK@0NG>}CS^fMGqyUs3dyat21WoJwhMJiH zHS=8(#7vL^Q0Skj2eBkU6Fe;C5bJ}WN;g21egG)|7Y9ds1h`c}Gd;Xel?G6iVPKWu zfCLQ`f%X)g0V$RQP5OvI6<>{m_(}q-*zp5M0fz!;wgj>mvgcTj0Jl157DyhdV;xk- z&qR=$K^rQ;Rxmh#EdVK(1kDL)K$XvfD!&C*?)U+u0Gw)%g8~CIIb;Y`IRmP4cOnB* zy&Qw%Mvw|{2!j}Epjjd-sM2Vt(m7y5LH2`v2w6~b0%R~~%E-AKlC^rF>G4w>xRtve zqyQ8jXFv=|(A<$%Iixacy#*;MmefO(z6Ysb1nr?Z2{P0NG!F%J!VG9A^FosYNC9&L zXqE685K|H~pA-*On*>#R9umkOK?)cZ860~~flLKWEM-7d`ao6IPlPDl2vWfa8e0Z2 zBtcV5g;1qhP^IP&l^;P07(tsjPlF5vO*K_P{cjcrX;IWCg7f1BkOGjW&VU$_pgAY- zHUrSgc<3&J?)oTDr^d1849G0dq*EKz)_Zp$BieQbkXb3P0??$>u^LECt9cJnU3|L+ z4(u-=1)#w0ISVohH2KtD4ypEcK{wECVw0VA?+2Y z(qAy8J?B7%g65;LGz01b>>%?3XVO6}8^<#s6{?{5sC`h6 zx-&pLx;y|hrwqyh3=E77j0~V%{Fgw+f##+TmP2abi)N5?eUuY2mIYP-nwvTaRd52T zz=8`rHr!Kx8Ds=#Zt5ab#eS%Yf1F?yXTSbMp$X|Bbg}YJFzt8-tN=7q^%sa&I@>S5F*1Rw?HeEyKvP!26_DQk;k%ID{;qut_27OqSOsXxN}>YNifCR1 zX+^ws1r1a=_S^&+0h+SXsDR|D+*goDcy<9C31`3xKvPyBwGdYp?1Q*+MT>U_uE5|cn75NMd3|1A8=F-bakmk~%JkUg;W6y1n5un*A z_XT2Ot^fwL3Uwd+vZt08LJn)S!^{k6F5fif{XyoPDMcdVQUUqy;eR8+~Pk2RsfovN~wV4 z^OvoVfduY+MkWmg$DVs26F{?5c@->7^`I`d+e=8RFZLzanP@)~(Dc;oT1a6y?;50iGgSptS2}J1DF79QXFv=|&@@#I)VX325a*tk0yRY( z>w6x6%mU3*byPq`EtdR(j9R2ifz3JtRsfo(nguoD_Af|JtW65Ms<7uF$OOJOfrx3)%rz0cp6Lgf8$XNvVhAlSd#UK=V}lp@DW&4AQh&lnL%G zp8+cXW!95W1y7&~cpwUT9)nB(RnF+cf_7gac@?BU0W?i@8S02DHyN1tKt}SGgI1b? zOEypq`~+kYXu3+H4w6!nZ$tch%^4EIU32^&pAlsy>Kz?0E(<0yJIq4VuZFDt-J?K5Bn zpy{gr6_BB?Hwz%6QGXVI6H(7|kO`oPDZWZbtn>v!VkHLLt#>>FQovacnwgTU1SK=a z2OtsFExe#Xc*h?g4!GU-qDO$66Er_1T?xuGjtf8ntRg(1`2fcqAPz{`84!aLG)pB1 zH4P-d+6Xlb!~rSmc?mKNG*zVlHm%-q0Z4>(BPYZ-5C^3042U5Knz2#_8^^%lbpXqE1gVah+ zW?*12gZd%@S}NyqgG2KdNC7yE-hv!Ig@J*=5~@-jsxrQa3#@cKNCimg84yEqDgy(9 z4b-1eypWRo7YDfc^d6*u3p76kb_=M2dDkPrtvii@fx!W4I_Cq3mbskMSHDq~lsN-d zF&(s=2Wsfk`;dl0Jm>TWZ4bI;n1>pYWN09MzK#M@2DjA_3?gS?vP@w`EOF=Uqq)U4)0|P?@RM+?A zkYMr#>jHZb<|qaR2hh2put3R2PY17rs11S%cqI3_bRDY|e^{|HiX2BboFH3I`f4m1i` zI3P9ff3E4eAC)Bodwzma(;5Z_hWtuMD|+TFNQHBZ3$%oQfx!uqm>3uY5Q!C}$z&~P zxdPPU_t3;-&N+ShM`eYGo?jrRtplwufjYh%>b)QyNFGpNVBll~6<7)k0w5bewI?VO zgN~~K>0Qsjz+ev5n+(;f!84uz6DTqL2C3KpDkh*R{GlpdaZmRKsW<~xv5|p+!4ay$ z8mi&|_w@Ep$_n*8e?VqzVqjo!gZklPJS53VgUc@l2Dl5stx*mJBo~78Ds2WWSb*m6 zimQ+&(C0i*{RhrWpn}rzA4ny5$nh^|Xm1N>IbS8H5nIo|zz01#V&8R8BNeI#R1|=e zN^S-1i-(%-e;Q(XZ7#@k$A2INObwu|wf{g_bQ@?rL?xu<>$Cvk2~{tUp^p1N3YY{y zyXrtJ$?Xgb3_X=BOq`%n;@b*H3pV)%186+?A4mn*t^a!jxD|FVFfdF7yOn`~9qQB` zgi~1<9QT9NfrlF3ddxDQ?zZ0r!YMffO+EfKJZ=8M+U& z#t~XDRf9UupzZH2;IY;-O&|sPK}#GfAGhh`585kH|K#eF~1u??a8`92g0U2?Kfq~&W)Mpi1kVzltXu&R!0`R~B zh#`3xv|1y)!EqBv0jOzr z2E>pAP4o&?L6XwaSkN>#1H);^BJ-Y3kWrvzHwsmdwF@lZrG5+y{I?;?Rlo{B&6;|r zDo942B@3yZpUN^Zd4ta20T}_B-}S5lwU-$f8ty|5vItgSVgj8s*3}E$weJn}7^@Xz zWT8uei3v0q3LZ4+1{njI+x4%4bhdjgf)XGDgMI}#anzpys{qaI#zR%aKvg7{f#wDs zdwM`dfTncwt02oLG@L*^4+e%TC-6Mm8L$FS*Rr_^RMasrctIB*-jHNs3Sn^U=>?ep zn#b*ghF53+B-8K_-CaY!^Wt5&w=6(k09P2^s`(JOfq%ny=jeRdL`2 zBw8aN3VJ4hi~vp5?t?05ybDp_2vKkbtN=7qdj_iD{so8vEr^1ii69d|Gqty%3hJ9r zKvW1rRGa~;08Q1tfU3B>52E1D53nnHCV`9q&CY&29GZ|z8XmVDtnx!6mX4~afh>D#M6=%RIK(n)Q)sXJTp;eHywOF1B zG#S@31!M$x?M*eLW#%RU>6LEfW&$mDKLb_(nw(XKnxL=}V!|siCh%gasUQ>NLG=Rq zthSao6O$HbK^8~_Xj0a&8j{OuFG5m{Pz89zw`Uqi0ccLvz8W$_lMg*wX^S}6j!hs1 z;0z05NP;G3J)lP2y$11cUcDR>Xnwb6I>;>0>}*&y3wYb+$?KrLF$06F2Y8jqUXTJ% zF>waOkOWQBqEEIzkq2kYo*BIY+_Io)TJ*{GV+o*&2GmY>+zCg0yGUfyBgI0W?{y(Cai$21=`KS1X?-SGY4b>XsUHZH49TcsFYk4 z18Ky(ScwXTO6apW{)SoqB)$ezu)AR|CitedJKqmV4>kWonae(*f^8L$G- z#Oh(F2?}fw6Y7P*iR&*&0jTz@@0r&tz^w$DSv>{~URh}H{)z;hbO@e4&KX_?(&mxc!pea`L zl?C;J;K_jXAO)bdK77WSwr^>KUKLb_(nlg=oPI^xcX9Vvme^Cz3em%=TCV*y2 zvuYr%LCt9l;Eg-M72tifXTS3Q`rLhF8UwgMUI8)zRO{oKy9X-(&3K~D-PbcP%t~VbuK`>MG6FQoc>tPB7Dhnw zOr01Lc={i#05rFG7^)yZ0+Lj}a5I4yEUyBY0GiQ6pEi3g0I8(G3P6*Xr=TW?*F*E= zojP#C=p9G}C|~xh2AKt##XMUBX;i2|50KO21y@1qK?*>L;0%Z%37W*b3^jDiREV=S z@`LN5o;4t&Kr@%uK$9?_4T{k1zM+I zpn1x>P}lsK2x*P)mtX=d-q{FJ0Iq643`x*Dp(_*?y^F>epikOyu2H%05lm1p2A{a zV1UhFT@_^lE#-X=QUHpGo{b=*K(msJ& zZ2}nqnw7*cy>tew05mJ9UCRPK%5M>Prj3E&xHotwOwVSJ37}a?6X8We9MAE5kGOU=^S#N%Xm_&r(d_ z4e{GRMu4Uy&oo0;wm)Y8O+Ya)v^#?5de49rfF>cMp{|+O4snezKNC|hgJaKjkO`nk z$Rv=DL48g`h=-Er+ApHAuWjeLQG8N430fJKt_P(9^rFY(5b8o z3QVAVw`af#Ky!~3P>&`+&!`lVVPXnpaQp&N01lF!AfrHYkhLJYK;gdw+TigQV*<6* z&wy2cCLtT4W=%Q+8I3QKW@5@^aQq5V0IEHEc7eT0w?bNg;twG$z``O%aC;Q205mNb z*vbOlAQ$`&GRNUl3|=GHvj=1XXj(D`s$k0th=LPEU5Eb3|U==<4K}LY4CHF%^BJ?sOB;FN5LISJ+G%a}&WCQ~P!_&7A z6BM8!aR6ijXj<|LQ~?t-B<4W;c?PTiG%Z_pMbBZ75un*fF{lEy=@12vAVGBotN=6{sR&izH36a^9-^S<2*?D`Y@~S`Bvl2R zf`nx@BrNOCfK`BIBOReeG(rRO3p6B-f{XygQ#jNJ`vVXoUKBv01FQfvTA2t{@M9rF zK`KN+&oPh*pxMZ5sDe*3APV>j>LEseRe+`;OQ9-ePKK!1o)1>ga~xy@Xd1E;s^A24 zqS*wZ;0#y+Xc}@7G=o_yK3Sy67O77l6(EQAoaq(d zmIuv5-h(PNhX!MW2&f4KO3h3Rj(b7MK&9y!5JM6)ANdgKQ59%&Be4ivnf(MQ0IBRb z3vwlB8uDo^q(R01m=UtUVMY-Hs7t;RqyprKGa!Z}X!h}SEhN#hE`X$lX_DZUV9z;_ zS)jSbuTTZKM?w={2xd~mmvcgGrIsX0W_N^PzRaYki7y~s%#<) zZl!GjDF8K9&VU$_ph?7f3Fy4Lpb4bsutf%}wC5toEYKvPN*!bfL~sJ6X_YSvZhxKu zD*(+Q;+VPbxdbu+G;wHB2l3C+|Bz;cgD|*Vx*nvU9@LII17b;nW)RJxmN7wNah4nt zX!qK0kOF2F@S3B`py373#Gy6RlUC4`zzq^0W5L4$i1K$L;pHR(@NkhWd7-iGIg3A z(aXA0;PI57AO*|{AhkVLLFR%c3lr)f36BGm@E8~v3_+(b8!|ZV2B`o!;S7i&37RyF zSPmKTIa>!A@=390U@8P1Fa$CSG-sFw4b#6`kTmj21niwN*FXwDQ-+z)G_w2^B#o$v zLYh<8K?*=Kh6P{~>KPb}p*c)l1l*oI16Bc=F)W6LL&sxCIJ9s;*3{ep83CFwtbr=H zdKID|8lvC~SOI9huobFc-)V>fBZz{Yn;;WF(}jKYP!;P=LR5$gGlACaoB^uTL>B0ZMy(2*v^1efaVIX)IoM6%v1->7cel~KL=%Aa#t5430f_ zdj+_SKy!gC(0M$>V&duEd@6D>XFw{9L34qeP!)(Z#M4{(ROF?5?)3_A8-wNo`JgJ` zYlo-r=TlLTJ_A+&nhJ!?_`=re*`H2^=|)x7|a3;%nXh_4?tlEn*D=K49~EIt^)=w83wHw z2B~0X1WoGg12MrR28dw{nh1nV7Q+`0Prt^mq9EP#5Hw)|n*M{$6(>U1%X&{26i^Y7 zIRjE)1e)~IhfW*4UJfaqzDG=V6i|_??|B4r8EBTz2X zpfNqLT5u`$6l6YV&d(cqX4pYFNPX~~2R!V12CM)y=jRVK>ZT>csMoxpa@n!x8OQ|C zlwTk;z#}1D5Qg?9@L1FtumaGOUkFG6sAYsUlh^RPSAZK7{XHNCc+M}p9+L5DEFet| z=8p`ZLDn;11)wQE&^f!Hsj39%V$q9U4B$b*7a-RhWnf^)gGPhtEl4MezW_YoeW4zt zLKL(E7&IrQBnT;8gakm74URo8LE1sneihJje8n3egBW^c;LVU{zzRUqd+579ml%Q9 zludZmE5I%KVi=yAO)aV!-jgua>KF-kmZJ+6Bt0}$@jbgDFE&IoKz1f0@pu= z6oHc-*Mlb+&VW^bLSPv*q*Eq9ia7lV;Be}B3z~-mh46N0!e|zQBn&=IP)Q9sgNDKJ z3`iAt$LDTP+=7<LyiWA&g#SRhrBoS^$` zpsHMI)I3f%?-=Lre&E5t!K>9^- zS0UY=|4EGC5pu8s(By42G$_16AwdxX9_MlF`2z|o(By3*G)z073r!!YK$hw=fY;BQ z`2#BdBtet7X<)}NFf50<#W5LtNJGzGkXt~LxOq^w1Vi0YmjpS20i-|yG>2OOwMu*u zB-dO|1f>&jW_Mw5?D+>W2sD*j3pL0CVo*K9{v@!s&wx}Y$bn8cS_w%|laD}rB@d5a z7SR5K{~*tSw%N8r-K+tP;23y>a5R9f(gMW@s72ZjRi_V)Un96W0S3l;2FIR;J^^k; z&1MFOaX0_9jx zxd&3G1ZuOmG(swX$y$&&tS~D1GM3Eh&$zTe;anA7!SOKWkL|^To8wFbJ;P@V- z0JLzTrw?QlsP>F+gp9`uGJxtM28QZb@I2`zkOI({+!+u0fvq1G|9yAQ*LaVUE0`Q{3jUWXeThD+PkSiUb9teQWBhHQoC#{|dAfrI_X(cpf zR~Cc2;`I#w4uHbb@eD|XJg7o#f*KKX8xjVphd{vy3UE+sbRx(gP_^382&vz^CPJ!@ zRX@S|(>8$=fc*zz$b)Lu2~gKRmxHvf140>?Ky$R91+t){mp~~3RI^Tns+(a6iD~W> zP}o8(J~IhaN`flZS&fjS_Z&LE-czUJ?)UZ(s~dnA@)oG83(Famw*#2149%mq!{~r2$U&d#(|ZA>ebcI za9P#@=_;figgFlsd{aTDfhyO{5YN^#Y*&VaisV61QygX-NSz|6j@{7+S<%Mx9I~QK z{1RyT6r>E?BJ7z43KmdZdjeXjM1+7^Pz($Omm!NAKuSS{Bv>J+uD#X>nZWl5VT8;K zhF=C%#~^hOBd3Fm1Xa3Ep+=?#LyT0s3_5}yuJ8<4A*k~G3{|)dy6bAeCD0TuTw%`) zkcpt$m$8Wje3GT`DoEfQsJ{prV}YwY16B#Dg*lrbCG~&k*>7(zg4Q9x752;o840R} zMVlZ26cP+-zA`X)p$5yZCL9Y+n zA^>Ulw0?t(DM1yU0V@R6!xm6Sa^8bfC%-R&R>p#qK|*pi$V5;*?9v2Tq{hquS(tI= z5@^gEbo3kO2wn!qGa!}V^;UsRkk&`zWk~L;y##JD_sjw1J5YU$xZ4a;SF(qJ6`TPp z0M*BdO_2QYA3C2a7YZ&cdgg*m0M*ASO_16p%#?wt9#q8o9t0&7$1`9Rp!ztY337zl zOgBgppQj57e8--7eFEIzBQM&TAS-AtK+n)=`w3}fg1rtJDgmjK1Xam%n^?f73;qj+ zbiP=ALAIyO2f0QYRQ_**s(1la@%SfL#Tk$aB~X=oyb024)eM0&hg^R_R&#+Z12ujZ zfSd)Yn{Pu^ia=GWBC7;t0+32cP$m5y8V2<>k0F`f`4A}6JN7IDxeHWFe{O=*I}@Sz z(l#9cENFd|Of$q=w?n~&Y(2xphN}^aIngrr63DItF26%A=z(HKcsvz_zc;g2vz`EY~|Vv8M}Gx4;fMT83^{> z2ap0r1yK8U8OSWqva7IWP?Lax!PpE^!uq9vY;`;XQlJ7_Z57)LX*q423>h#Fj{qrv zI?DlczE96`kVioaud<ueS)fLO9RS+#;dln5RuZ%nYYNohsZfK3p$3B#FeWfK z_N)Z?+8z}D3!(9EAqI*6&7t6=cm}Kjv?gmgRDmH>!E}g%o>d?tKx?wrLL*^TAfycB z5CNTr;&=wE0JJ7+Z!@IKUmXYcNZ5BtCObM&RtkeqE2x@qJOfq$TAU@(0%=`2m_W9V`!SvD<@3~UdLpA?jmU<1fWpoLjZU>g}2Y^OnLixp=WnCd|{ zUxHPD7H0XiKnhDMJ4j&}1Ew8&Hi8m0Xkk_|bkeXII%@VP1>z)7q6R4fugO9mi~I>b znACB?rasKE$a;py;8RH*&wymXC(va>hZ#E)AY+lwm>^3UH-r2EYT8>=vVzASVOQIq zU}9pDWpF$LQXmIv+T$9{1fPnw1!N4U*2x9g14{owW)O?YSU`i(j%UCsKn?o57RY!e zuN!2DMdv^Gs(_xYAR|Bx`i2%r;lmUMN%v;VOiY0cj%UCMK<)VnPz5)jjRRRm@S5qK zZ6Fgs?fLozP!)=QA?4Ry2C#}VU=^Tt{Dv0DAp+hHAY+21|3Sk?jy>B!Mu1xH$66o- z+|)IY0&W*8c#!f8SOKW@ejTcS?=3_@-#_q~h@U|U>OoT~Jv%^VfyTpd4e^2%fST_w zTOcLi5$HMHHJad7ch63c382>dcTlK;jstlQ86=ixWnu!Q4zL1HyZuKCq$tR}2+A>_ znZb+T;-+U8$Ouq-oxK$@7|s6xVuS^Fh}ZE9SOKV^&e00-@uG(i4^8_49(C>64Ke}L zM3-!ZoHUsp0;&uc7+OQXhjN|)D*!dk&00a3kAWel-WHM<|c02=C0BTgv zgDUt7UA7}X1FWFuAjkwzTN=Cuih+RvwgjpdycEju%)vg;(Yv5V^cJWQ6S_cJ85mZA zX~&*JAR|DHXY@r^nunJHk z8GVt|3GiYm$DSh~BS3>y=!>N6z>B3E&wv$x+Q#UMq*x)L-E$OV0;p}w09}0qTO`$4 z4_+I zY7*-}9TCyR$OKM*8IaIE16BbVrb1svRS3Q@&#~tO$OurA7=0DhWbkS#$1`9Bpq8)? z)Q*hjkO-XsrX71uf=mE4fT7E*>KS0`tloj|qH{b0Rsm`Nqp!2#24A}8*mDYG1gHUw zzRoHOd>5VL8L$FS(-(c2l_&TrI>(;VAQM2NRP}Apt4GguLz)Wgy&&503|IxI>5IM) zi@hDJpyv$82vCD}avP*1sS$wG;#uE8+eRGEfE9omyz|>2!*OEjkY29K8OT7?S&#{4 zp!#Qf8>HF!=P6_n#Wprb6#!NNYUA#MDu_^rD7gCvG`8&6a}Hz#sEvEH4blMM)PuC( zI+&S2X9Jx9D*&}@ajofnzpCgAT92n_aH6q&25k=&hwzEAJm?`-v%j_FUvwo z<=_877iNI3!2JwT#H0Y)I(q@+Bv8xt0dxghfCi-fWAP7sO!az@0`QO#h#_eU^8c$g zNR9B{A5SG^gooKlk3Gy;O!7+zzRUS3jRP9_FM)T0cygsK^4S7 zub*D@7ks?&8L$FS3ziG2fKeRMquay)-UQQg1!MxK1ugHmOvHMv4SQ` z9Y2B;fZcT+WEQAZtJw}2U{QyjhW3;b90(gh3P38)fEeI&(+t6Gt7l;NcNgNgSVr)| z;7(f?|gP7n$(So6AVY?+{M75X=e7bSZZIEH0O$Xsn1@-ggAu9YK zD$amafHoUMLseXS14&LiOyEU~J$FDxfHoV%L*0`&4dR}&9N^=O&VUtw_8KHX6@0FP zD9Gmm$Jz&w0u~tt&`#u@`nw?GKpPKIp>aIL1>7-bSi=ZeaRF8U+Ha5rb39aL0JPnp5Nd_h14x@nf}N=zd;-&bkP)E$24&C~ zefbv>qX*cTm?9V)H-i*_OAQc1614H42C8zVBt&Hq4|p%e7mxz*LdORnGeJ8LnxHCe zp|gaN9865n433*Y3K;7_gGC^gBxox_2UO`fsP*X(>pz1OFm^CF_B;d`3fhy<2UR%< zYP}}J`pqB(%mU!qG7wWTo`Hd33N%kVj)sg>2b=&mSHFT3)Pshqdme#|1#M55(+;T$ zI-(&X5CN0GJ&vs)1>o!hVn~AaDJ*V>^iX$2Lwcys4}nI&9KV7TfZOzsL1uzBEUbZA zUk|nZbwAkpEg%Jq^`JuxKrBhn)`cxl>sLUnkC_9u{tHL}$oifqAVWc07xQpR+lnl zoN2!RBU3zsW6w*FOF&&eDg-iStaq4!X(EGT&ufqopx&=v2c*h; z3_Wa5y9(4pbvy%B0P6h)bwIkuljcDBwo;HAg?iqAOaS$MlRBX7ukVu}t%O#{(Bm1f z3Q+Girvp--9I1f}*xP-B^wZvgi~#k1OFAI=fWZcm51PJ#24@}5fE9o`SG7=AZieoE zDLD?B8*uD-2Qq;Nl>X4CT%orPoB^utnSOKWh+us4H`tQ|4isF_^aA5a*0JlFto!&`MXG&aw)Cb9R;B6vj zK7i~1C6Z}S1@)&O3e4&t)7&5X1h_dto!(hs1&#|q0<1BwK}SnDUI26EL1Z3)IGnZ* zKr)b##ZP_p0^FRS`P4-aB?~@*Oq+TMq6D=32rK|fa?2np9)JZNoP?+VP4<5VX#u6V zRS*>mzyhKNASyspU0?xFqFdJi+Fa%M03^U#2D&m8H2(SpWB@3|o$3HJl^qv=1Xw4| z1CMi@0dY7%$?Z`Gs8jFw03^VgW(u0XaP0XC(hEv!|2sf?tsNJD1X#bBgYUmM1LAOk z5}I5mXkg3n0Z4%LzB**^@mrq&cReR4l^J(}26`M9fJ9ikmVu`%&wx0bpd{wn3A(Mp z@c~GHRowz|=gW7H#h~<+(a8c@OS}Liz+kp2W|2BoQYouC%G;{uQX z>l`uAf<4DGAPy%eJu!8G785%@012@Ae1oLPI-dClPXC~B z62oqAfII+8v+#k9?)eYW44Q}xfygcZ39z1=4wgLw;&6fnNXolG2Z1_1012?#vxB$z z_cZhiaC3r2N4mPf5wQRyz?$;_91&+g98S>CNd1y-aMF7K5@Fr`3!LFO#XFwcI&_IY?4-4o# zhX)`5R-W_V%QAafKzc!=AelX&rSOgmKmx3Ho58bxXFwcI&=5#x4q1Jg zR*+`U_{S2c#UKIJ4{IS7gE*X^!H+#XkZ1x4us*&F$%<_ty`Yhg`#s=@SO5}WP4|ba ztO0R2LBk%OdcY~*0Z4$=tp%I{dg|Ljnn7b8!o6U3EdU9yR;sShYsw4I6s==VCrGb8 zsQpm`kzN22VNF>9mOcaGaDoOoCiFr|E|37Lh7S1T``ZVJV~_xA#BxY9fjFF?Aqv5MNLB<1uxeg_ zWW|Xfy`b?4-F`?kfdp7>)Q2VDZ|xBw);%HINRe4YVuI6(sryb~aW-qikj0dCf_Kfq;E&oq!` z&~StN1V|i%1Xy)6!Bxl^5Qh^q#9%W4;$x5iYX|gFc=PB%VM5tQ)E!@dV;-iXY@YxQj|CtB)@yITne7aS!wDKZxCxPc01{vo4u^!+tbT!dZcflC z1;3#d&*~T8X5|rwSPbHDf(8pjCPES>NPzW*4){X2p4lLaK?4OA6G4MKjtf8ntmfjN z)jp1AKpalcD1jYB_5nzM_0lr1>?aTh++Uhg4>AQbSl}`d)X;WZFbCvVmswJ@Y`CLBj+Y5GOAH39vqR40iGv5Qh^qM34t@=mU@d ztMhzt2kp zwE!f*dbuAQRGUE@a90t;;7kCO|BEJqW{MpjfJ9i|hk-kvpFtduOM4c9j01J}*GvTU z2^|-J1Xy3Pf^*{*5C_yTKLcWLf_nSACW1PCjt@Wrtjw#y<-r#a2c)cLF~~GfPyZ;y z=?m&XBCO{+LF;TBH-k7JThD+PoS=UGDM(yA012@6_=Dr3X9>s@P~ZLnG>}08tY=xk zMbl;w2W$w4!3jF8;4Z}J4?qH}6BmMuna>~&BMYegzZ7H~sC)k!Y8*&_wFOewZ31yX ziR%oA!3pZy|A4st0Z4%L)jUYBF9R6@I;7z5L`ZQ75@7xC4_tU`0&&2>4q|YE`tm}P zz$Ih-1CR*o(SP79_X)%S4G;7z2N?(I$tzC+mo^JP0<1hi;IerWhy!*Lh`|Xut-x#& zXhD+W1CRjgA{B7M^%IB#Qr5EqWE$wq0^dpC@@D}^fc3%`2JqEWn?O8}x-%dKC+O&c zyh-3D)I*Q}%QtWp_zA=Ysp?q?G7WTmK_ApKkN|7a*92UmYy)w?#(@}|ppy>xCqt4yNPzW#2{`$G0&&2m ztp%9|I_^MfGQ@FfLFFH7`#iAgHiM)=<8fy|3{KFg2O5(>LmZ9|Kmx20J)qfS$DVZ{ zQ$R-^=u8HU!ZAsP z{DIeGNI?z~V2!Z=k7H~Calpob7@VL(5CR|}@c<;i`n?f61l6+vWC-X4gviO@vULGS zfc07m*1qdIrSd1RaG?3k_|M0Bi12aA^MkaX_Kivk7Df=s<*y$&iEv5@2N%1DAxGKpgP; zt1}=LC+Jv&DU%`XAdmp-aYt}B>l26rcGG5%aiGHy)=vf(m{hyzk~2E+iL<8Czt98C{E z0<0V=;M(pJhy${o(pWE$w?gxV>P5*IA6ffZEZf_DJ#1POq1JBYyvIyqs& z6mWa(0a##d80dBlu+pC(0g%$3T_96IXDF2nSm57uP#y$N%J%F983sCAK?75{ly77UBP-P#K#~d ztn6hVA3J^maX`teXCKI(Qg;T#-~^qgFkvbS=v1HwAOTj} z67YSozd;;Os_HoaG7fZ>!mg>{@rVT=0oKM6@O{UdKpe1XAOKmC*mDqM3h3m7FH^x0v;Zs+=n5LQ0K5MTSO9cj0?#x^0RSY2Zrs0Ym`Y=LZ||2_ygt%buek(?Dk`Oq&KSz!rc7 z%$9=6FR;>0AOVolGav>h=x~M2AdfOQJ^%}-`GTq%u+qOrLFo@%RU88u3p!`vFhuPF zkO1rXK#*F;GawEp=#YiW(C`Ndu=44G!r!syI7lz(fQ5I{z-_PvV1XwtpkW0_ItL4Y zPFP@_4sL!u01K>iVPLWXm45;Zj(m8hHQ`VBK5{D#;x`fjFQT?KuN74s?jZ zhUt(I9FPEOT`41z1%u;05C?Rb-Wd>s6Lf0A`RR~a2_(R}y%gL%_yppBO*;!RtsZoE z!X1eF7l1@ql^a3sciaTxfYhABy&IAuY0<1obpiJQS3B&;@>p2H94s?zJ z!wg7xfdp88H9$-QaX`w>fEb*hGZgq|fa=0}#|I!0)@vD{?C1Ci!~v=6IS(=pbbx~V z3`jZv39wc-g3^KGCJ+at>0F3kK^%~>o(mw;K<6en%mAmE z1t0;|uu?|w$&{NwJdnCGAOh%T12zrB;5-7#e-CDWE4v3E5!R$sP-W-%2gCtYbUjx9A!b6-8c2Y(G##AQdai*qp9EcyFcVVvfdp9lQ^B)=XFwdzQw$6Y zku$;megG0+eVz{X_a_hs?1JkcLrya=Fl2)q3L1X}iLgF`xO5YU15$Se#Na%`z`#&3 z6Ot4_0<89sq|kE%WXf64;sS_27JvnU?}7?UaG|#eBmg!D#Na%~z`(F%CM2A}0!uSN z;mlAEx_R*vNCc#|=O)P5^Ps~!XF>`Lut2>iXs8roEJy&P^bCl>c>#1h|4c|D3@jj= z0cnH@FgSh!34oOL+ya?;k%58X*-Y^8{Q|JSkI2Ic3fifHDW-Abb^Csxhl-ZD=1q+0Mf)+IC#o%}lBmg!Q z#NfQez`$TV8xpc0fqG6+M9?9u4Uk135za>p3=Fw*SU@NAJpc=E zWHW+q4*<3Qet<+!H1#|NC6>nw3=A`&7J&t3AXx+wK-@9~vIr!?`GkRi;Skg!uz(-T zA_m7FAORFTJx{>l|CE7&;oTf?Z*Rd9(D3h`G|1Qwhy%*NXFv?jXABGszvh4&G!MW6 zFP=aeG@vbTPeCqz&cMLHJr`14f(4G1fa(Ttl05?!c)`HHAU_vUo`D7GIpjfg8(2xt zGms%KK`C@Dqv;(>?L7kn!=t&N zE)eLlG*FOxWkI??AZd`gGav@%2hh1T^T0DH4?qH}+*yoFb_|Z6Kpc>=o>w5_K7uN& zdEiQC0Z4$=EEQboYyxpW%FciooSzsN7%b<3L-qkkfb~))V?C(p_6fuTsq1+SGVU|z zKC5|0INYQ zI4;hBIGjHi7#MCqEd~j&`j4Q}q90dY8gfkJdXSoQ%( zpq@3O7A*Y)tBxEX5fRhw~o;1A`CL1s_29k9AoF*abZwL7M+FFfb%R zq!)k$Sj!-?XF(heMh1ooh~xthpVbW_+4Bjcm64Hwp>IAU8Gr;>EvmuE-~@;RK9mcz zZEC@Ma0~PSNRYL*z8pM}(DNB&05c;4!@Bv9)-yuhb^{N3U)iWRtCmSOJ1JeRXwF44h?W_S6Y{ zUqPDL85tNf7C>?rNPu-|IXHKn0dY7%zIIpuX=H!|SnpRs(#kiGUQR{^hDeC)0+0af z(h9Kb84!mPRIU~+fMiIJ0IO^bI75C0aTr-Z;S^wGU|?AYNoXJe*1SqcLi+{MEXc^f zAiWTh$3OzCH!2``48-9SVq{=2T?lCmg9KP-RD&DCJ-bAaFyt?U)EFQE*58%j8siLz!zsqdz)-yq zoDLs=1X#~kg41EoUyxpLMh1pnsNTQe^eC4WhPC3_ks2yla(!0;A&wAyLtHnAH; zpfesEH-i*_yE!0+ENK1Qr-h)=eFg@#7|1$U|0vKLYdv@*q^A*N9%v06^jH-5sVKL? zz?ckd-N-+k8FY_S_UKw*hd#AzowmY5y zs{${sgQ}{8smccV45F&11!NXzjhz})m0}OXER!sdS)lemXo(<5nIvejogP$Km?}h> zO9n`pV^1r{f83ypF&9AsAO^Yx^%Qsus^dV85N zffm$&90pa^(+)BYv??zc>i)|zkOix!OCVh`aGT={SQ%(}UNlr${mWp8KNX50L(*V% zJslw9KnwMfq3ZNjK#ZGS0$NxAvK2HG1y%-Hu$K*0=D!@G>`5_XKn`MB=L7+6RZv71 zF9J;tF))1i3)x7dSx^s3UZ5lZGLjLrgZ&IhwE}1*UllZ9@}L1TQ4{84R?vWb7s!pE zHGM5mb+ck20k|$0)R_hu2y!D>8E9=^AJmOazaeh)En#3%0mVPWk=-EUK&$+wL5(~A z3bH1+rx-L51Tqd3A7Evm<$enmfrhFW817$%tY7vo1}#H_D(mS1nFd<-w;F1z`~!%s zrqz&fItPHdy@cbfBEy}j~QY>V>zfLn`e>`ZX z@)@u)&?3R7(4^X!gXOQRxZCHaS>zM$`WuV1_%!?tp>>BiPiS>|+)X#twfYuMPErw*SXVBBb zdGbNFIrdBhnE+Zr$h8=fU;HmZ9Np3b8bokB16BZ9L@2hHg^2?c|9z~GEHIrFbYPWZ z&m@o$poN5Tiy=AlgBm0y+_Z-rbp=)cT1%(_bwn5Rwz;D(LHpJndnSWS0Iep}Sqv$3 zxz<8wZlJwcEK<|i}2 zYbDNr6@bWbJSVL-&Y2Y&f9M6Ci=z*qa zpsulrg1Ba@64=K*(?Q7uw5>7>>KZw4nAI~dC`N-;x0!=dQ0st!fkzi)0RzL+nV@XycxDDj0ciDM(_%;-@C$>aX_=Xz zGRd)LCP;xX0|Udv#Vkzqpu+I~a>)LGH!~nf1grwI25}+Oh|PB)*(exl#4L~zpk;^~ z7K8eu3=Af^ke%SoGeD)N;~B65&`QLEi$TMD3=G!VkRAG)W`LJJ^vnjCU=9lZi;F?+ zVFreSD;jb)k7B0PN8-U86lUT3(Qm z$ph5>oewesG`Oa-1QM95l_7zt#0J@K2UY-Dr)awb)DmT2;I9WYDHs@TPh$YxIMcHL zWCCcZV#pFmXsfV6Lfe!Lyqf6@SOI9DEoBL)iOs;UQx&wjfq@~kUI{c!dNnR5LZrz zxbh500eJ7^i6xM-Z9^<0Q$48BB^M0p1VM5@&tgzU0WD!Xy#!LiZ9WLew%>z5&1ZTcm}M( zi-CdRHdMuvNe~sraiCdu$DXAiBS4EA??V+VdIeGNwFI2fc7hau+jbxZ_?D)}OCXt# z8G7OAl`o9oE0&gli~=on{Imp|?CTj;8$#0kq%Vw2jtq`xz$!q?9T}EF%FZtakh0V4 zASk0d_ACb(0b23MwiJ@?*GED&2x-oN^jyFSK#LxQp$Y^aKr+UW+u)gpo)sVyK+7H# zmO|^lWA>2x&u=l<$7jGQKuaIhmqK>de0T)epLwttyz=KKNCC*lJu5+GftEpPLY*tR z65`zX{~1Bs{LX+CfL224FNNeW?cb0*c2k>?NdvV0Y!%1|&>BdqrI6+k_iIS=X!UnS zrdJG(XTSc?oPGo3F&oaxa6nnZRy16BZ90GS50<0`ZP z{0q_m?pX^m0krfn6KY3}DWnPzJO;Mn3|IkZ(PQ3HNMUj$nz0^yy%Pg7_!g?3bs!@^ z>mJLYAyEl!OBk?1jz$4104;s2gNAkq^vIc3KG0a8W6ye!382-FElVM}C;b5=sp=_$ zwjMj40V@D)b?jIQ>g3fkFu0dOdLL|m!3nTu1IUO(&?Tf$I~*QBVsu9oxQ5vYQUHqk zGa!Z}Xwlq0EcItuQ{^=twe0b1+0aVa>%Fr3qa z<&9^Tg2o6K7|wZuS}6<+3u+me>h(Ye*ML-jRyIC@M$EfckW#)(3)EP3?AZcR09xGm z8k#s{9)VkL4D+-=y$i=PU3f|1tvkhbfXb~glGDsSpX%9(X>%b>sI-UV50Igo+ zgDSWx3rXbesgSVT4l)6>a8VelV4h_?#0c$UU=?S;DnM%%C7_P@6bNbOxIq;3>;M@7 zTC6Cy43dVI$U~ZCe~cKIW-vHz11Vqx9clz(NP^ZYsxJc#EHN-Vdk*R;GcdUEGcuiK zaQp^R03QGC*$FbVn1O-8ei>-glYxP;2NWg@3|5BVQK~aw1)!^#qM&AV)?~r zO&|r}$rTU-d?nNLdZ>F#py#NHR6`D-+6yuZv?y^FRKXtT*>A@nXTR+PDFA0e5JM8Q zE^#i@)`I5{TQg$7WqHp&kWrweiHnv&>WizfkorPjih-%#oWb!7SOsWR;`(JQOrSnK z7j#eR_a;b%u^(gvsH?DR8Kjau6AP(i8KfYs1F!M=rZvk{i={6})Ts!t01Q`KZfA|O*K`8-{ z2zo6AUgdWNtN^s~@Y6C#Mi*QNxkI$a8PcaX1Tq11ITQPGh=Q6s^$-;sAu7&*Re)9- zqVGv{YJ(VY7-R%!ouS%tNJ1-s-l2Fm9n{)!JOfq$T4AWU9FovPpqJdJ<%7NP6Qlq< z&v^u7R6S^sp)ORZ3V3`pXL->Murs^YR(60?>lN?aLue zrEX}A``H#8XFX>?CV-X-?t=!7q8TKoerE%B?$3Y~fL04011FGr28K6}AoWZUXgr9) z@e@b|D24Q#1(^j}EqDeR6Njfjdc`-xAps3m09qz^9;(20C8XIU1c|DTAO+yGehy?5 zXrSLJ~;*@mNUB16BcABzPU{E(V6L;gE#ul?e9PZ;%2|;n{N@WEN<>;9aN} zf2%>_|79rHS-U|BKq}9G7~qSRocHza zAr2ILzz80W1}PBz0G0HA3z4+=2$t-*2+|FjcK!-g@bwu)fx&CAf-_(Rpi$^wPz8xl z2Nj;DO85l2E5^g>H~-ocR)sf#)Xri z3Vtnv*uj1Ptl$h-0cb=x1FGP}G>C#HGr$UZ?t)AJjRogI6>vixq4klmo{5*i@eEi6 zXcV{zs=@-AJzm_07;z6|1Ze!X9IAj3nmaNtffbwqD*%oB)<6~9fTk)Ah(~+wgG>O8 z`8Glo>;tE&dIkn*NLZZ#s{jr6wn0@~f+nk5P)9rf837vU?S?9t54Gdj2Z$rU3P6Lr z6QBzEp(dPxn(z>00%&-5DpY|?Jv3Q8-48JWtO7K?I}56!3Tnp@h#ftTKt_PZcjrSD zoSq2@?Q;tuCV&-y#&?%M6AyF<|EcnmTDG`zcV1q)L>sK5~HfEcj{d|Ha*8L$e_ z`0hHW5q?ldY=b)D3CIZ0`0nNvkZS03KcwUL6MQVC;{}ic(OoMbtx0=+NKd-23|v0< zJO$|nm6k_Wu+%ewG645?NPphwJE&T8JOfq%DlV_DfOPuu6(N1PbrqngEytc`AR|Df z<)?yZ#ptr2?F|ny1m>#=0^IxMK*i;itt?C&+gO-vwy`i3Y-3?svWs%ffVEFALL`y(~-``&gJ__OUQc*vGuN%Q^8>trX`12m~I?qVd6Q$ z!sKv-g{kBS3)6}tEKGNfurLW6WnpqT%2Ll%ag>E=%~2Mn2S-_$M2@jAc^qS5syW8O zwBZ;F)01N?OcKXgn0$`2Ff|-!VcK$>h3Um{7ABb!EKC6>SeRN)urTd7!NTf0o>MGL2TrjteL2O#q;Z;sDdsc_(}dG3 zOh-UhRQ)*3!lZMCg(=|-3)7S{EKDcPurU2O!@^{6mW3(hEDO_&vn)(!&ayBuoMT}! zImg13agK#)&N&vQ3+GsvSkALBS)6BK$~n)%wBS4o)0OiqOdJ}bcIsbl^G*)0gWkOd2;> zm|||QFip6@!c>3c1`E@V8!SvZH(8hxZn7{AZ3)76}EKFyfvoJBdU|}+O!NQdBf`w_$3l^peFIbpZUa~M*ykud@dC9`G;3W&w zm6t3`9IsfIY+kW26})0$TJnm8>BcJ-CZ5+UOb)MEm`Yx=Fs*pa!gS{~X#dL_7ABWB zEKC(|SeVwlVPSgkhJ{JwEen&!TNb98w=7H>-m)+~dCS5i@s5Sb=N$`E!#ftHE$>*E zUc6&rl6lX<6!4yfspUNj(~kEnOmE(^Fe!XsVG8-c!qoABg=xPx!>bbmS8Y(~nOqOgf)gm=Zp-FirW)!gS&@ z3)7#^EKCMpSeR12urST|!oqas3kws&R~9CduPjU%Us;&ud}U#}@Rfy$1UHQhs#POYl$>uu?Q^9u@rX}B5m~MP$VdDA0!sPISg{kBR3)6}pEKGNP zu+%dN{A6Kr`N_gm@sovV%}*Al2R~VuM1HX_dHiBws`RDNt1lU-aT-aEdD%e<=*08ZMJz!&H5@Ba$@?d9Ws$pkk+Q81r^n{(2NrHow z$%liLseyx)X$uD{(+dt(CK*mvrT|V>rWQ_CrX8HDOm8?@nH0EKnL@Z&nL4;wnf7q8 zGJW7;Wm4g0Ws2ZtW$NK(Wjesk%JhYsl}Uq#m8m|4hm~mp4=d9V9#*CwJgiJQysS(K zysS)9cv+cF@Uk-f;bmnq;A3S<;bUc*!N(_YjFm}2oRuj=oRz6VoRw*h zI4jc!aaJZ3309^E309^a309^960A%WiL6XtBVkB9aCP=a}9g$>Z`XR~6 zq$9=3lpw{*G)0P)>4X$3(;q2TCIe|!rW9#brWw+#OlPE7nHXePnM`C@nKEQpndZo_ zGF^~iWnz(KWwMZEWy+CdWm+K1%5+7Rm5D=+mB~hqm8n3Em1&6_E7J`*Rwf>KRwf5| zR;H4Ac~+(s@~ljEn6j+&D6j+%m6j+(oD6ld;P+(;eQDkNEP-JDQQDkM>pvcPf zM3I$ALWz~hM~RiGL5Y=VixMl-3nf-28D&QQB7I-ttRRR2Yll}SU5l_^FI5+oDUSecHfu`>No zV`b7&XJtxIXJwkA&dPK`ot5d2IxCZb1}jsF1}oDH4OXTz8mvqVnygGFnygG2nygH7 zG+CK0XtFY~Xt6R`Xt6ToXt6Ra&|+n}qQ%O@q0P! ztV|t-tW0|hS(!c4F(66N@=3lZ81XM03npnHHF{GF>reW#X`4WwNngWh$^>Wm;mv%5utXY{ntXY|AtXY{h zShF%cv1VnGuwiBLv0-IuuwiA|V#CVx!iJSe#+H>Sz?PM%#g>(6hb=498(UT;1v^%z z5Ia_;4m(z+J$9^2AM99}RP0%q>Lct~nR@J5nGV>qGJUaUWzuk9Wr}fNWt!l?%5=nm zmFb5AE0c~RD^r3aE7KH5R;Ck? zOdC8{nVvw!Bs^J}d^}m18a!Em1h6t)2w-Jm31nrm2xMi-31nqj5Xj1OC6JYgBZ!sBrap+3sUV1zX-N<((~Tfj zCZ1qcCWl~FrjlS*rWL`gOm~7=nFK;unOs6xnJPk9nbw4`GCc@kWfBQxW%3Axghov$ zE7OKhR;DMRtV|MNtV}*(tV|7ItV~clTvdLj(D#&4FT9U)cbR&nA zi6@tp$sw1OsU(+`X+Ws)dhW%4OtWojs3W!h4}%Jiaul}VLMbcLlu}lv6Q!(7e@a=I49ZxUQp#AFW|Xlqohf5wVkl>2GAUmT7-;d3 zo}NB~JZ44?76!*F zP$nyb;}0lPfWdK256CHu9D)pv2Ovz4Q=WpDOdwsIy%PnvnK)P(9M?iv0t}89AuK@# z$1fmOJ+l%><%~X%NlGB4d!S4K2FC|brXYi3Yd^>+K1K$|6(ANfCrH;BD3g`J@dKEt z0`mWq2_UmnIQSVHcR-jRZFj&-K9GWji68}hAO%b6!E7N8W(LO-P$mf3kb*5xCKH3>4JebJ!SN56DFAZMg2^DY0w7-;fin3S9AAK${2&EAQ$Py%K?*iN znDro6TmiGiL2mm2W%4sP&Y22QECv!i0A=zsI6eV0ML?!?Oamzp0hzW2%H(HoyZ~lO zgS35tGWi)CXG{mFl?RFLfin3S93M=t2MNo7gj;5Sn6e<|3NX{0fuF(g41}r7&*1n0 z%v1#_m@*TjR+WR3!Epza$%-(%>_-E0}>Mei-DM+h29h9g2ZIOVqm5Mh&f^2 zM9A&!7eEYV2e8l%?VuPkHCoBXhWMyzX0A{c=I9>oV z*ccoe7J;PL85}3T7zbdC3t$EZgJZ*Dm>MvHlfm%-m;o{e%-~{hY*+$Q17_5NGA5V> zQUqr3FgP|Wg((6vco`fIfEge)UKPm-fLTlojt9UDkbW?OnZdDPC0G%I;{-5+g~9Ovm;q7)W(YDkHmriF z0W*Xc91nmQAT?lyFoR>mYM2@@LxjQc0GPqfQ15sF%o1gAY*+(R1ZIdaI355qKn8&s z;tY-rYhh}@3<(Cu17HS74T!<=|NsC042})!K+0Gd94CMok_?UqzzmQwFhh#Lv0*(( zO+6?^!7OP8#{*ynND-JJ!{FGk0i+0|2F#FUa6ABJfYg8)atw|Q8)0g|40#5}17HS7 z4Va<8;MlMUrUuMVWNYI9>p=lo%WvHp3Kw8OjWf2fz%FL12aogJZ)Mm>Muc zmBH} z85|pSfs_g{I4%G)KtTy+h%q=m05f&6U+eV0y99mV2p;nFkN5<3xneVFoTi7@c@`n z57Gu=34%fkgh5^cMGDByeK6f11}kWRxC?{h0T72#iox*$hyfOLWpHfR4^k_|;5Y%y z09gQL2r)Qb05d>k*a46lAqK|@Uxf-d^){gib^Kalq1vk zPE`@NJu*>%8>;RC68ixXyWuE8%>*R&f}_**r>clEoj5w(W2%b0)(?;zC{8CF1G$x( z!Ep|l!NcIV0?goLaC`t_XiRrpqO4VaYAZ!Mh3?{C#PpmQ}I%} zkph{R2T9*efy{k@jClcNgWT~sW&7=EDhe)47cOo8drM_4V|@c?5e;b1H3I`f(@Kad zK_;|9*`Nu!4k#Nm8`TYEgXS6gplr}|-^7&=Ux6myr$X7FiKCfNHfXwgE|d+L*j@-> z*E2{kFfc5Ia2P=QDON(++6)W~YoTn=j)aXXA>IRdX&aOcn*G`hWrHTV4nWzUnb@OH zHfYB56qF5``#cY2gUXpJE9)VKf@T{dD_Oy-5kUJwZbB7-^4vWr8??RRF_aD3eDDIw z2JJI=3uW6dFfe>t30WisN+&;{Y*1$S2W5k1)LB+R)JTFBKyg7iph-~yC>u1pDF$VO zwp++R*`NcIm7r|U?gWihkTd~`Tzx1TG#h6IWrJqtZJ}(?47UrE4VujMUd6&x4+=ui zymSy$0yG0131x$d_yi~$v==5F$_DL3$%C>%drC^7Y|yrk8YmmI!=o9>2JPkOS_PSd z1kJZjS_PR*6lGvYVwer(fEEEPSp}I+1x>B4gR((6W5+5;at5usI1FWj7Fe8vvO((u zZmxpN)q=KUJcqKa7#yG6-rjy+MU%0f1A1jVC=qi**`UPA2W5j2r680I3N;ZZ8EIVf9{fq_8@$_DvE4ax?EvKEA0&j3nrdJql+C}fSGY*5-WTL(#p zphRp9WrK2$1C$L)ORi8hC@pzG*`Ux2fu>tf=JAJ$gECLZI!GD=WtgaS^$-pyZ6rV? zK%tffWrLzU2g(Lz?II`}lw&HOY*3o4gR()%p=BK;?SW#b8_EV17n7iDP;AYFvUx!X zeF2mMDhrlF*`Tbt4$1~))vZuAC>QQo2T3!a#C-(H2Bp_CP&Oz*Us(r9OQ0lk7s>{u z=V$96NeNUoeOSlBBna|3D2M%C2bwK+T=Ho8-bX4+>s>ZNN@)fLMpn0t@bZtvb0Z|0 z85kJZeBo>+w!n>$m||dHU=2gC*`g6_u7r({z-C}zVoigynb~qSLj1$Qz`#`kVb?Qo zGB9w}KsXFs3=CW?8zCNIWMJUxg|e9#7`UcE*~|tZx2diptJ=_8LfN1kDh*|WVo(vv2IYQrC>xafbfIieZZ(FoLCM1s$_6C~dng-} z5nVSU(yq^D=r$Bkx(tR&fO1w8lnn~0L?|1Sy)&R}P^QU;vO#ILY%?T&L20oL$_9mW z8CGHXySegq6NJ@w$2I2%54bt=jEYortVjPSH$*lPZ za>8z?X)qdO+5@o68|W=lFdC$1%BOmeCgrCPi(oWJ>cpo>0^B0$A0RR?nhAUW;2*GD z!bON2jOGB#ZTJk**LeXV2csc!Pr!0N&qL(EG${Q+Bxig9X)K3I!f1%x8L-^T6A+Cs z8Y0*56{PPjR3D6n$ZY}3ZG$EZ7!8qo0g@A}ciImz6h?C}IL`exNq}2mIYgWRN^>wc zUI2*;6@Gxk7mNmlaLad4F#ewjUfIt8qe1Cx2UzC*6o?Fr2Fbhu$q3f}xdAZ_MmI1x zF8Bd5ZQ&e<1dImRa|J9@HVq;Jqd_tqKS5dG%5{jNVRQq7;~tQNh}tX2LU0(Zz_5Uk z!STb-N%aEUf;( zGvO~t&%Wmn85j+cIRcjX2-O3l85kMr9e;qOzR!ecg3+KXu;w4gxWye185j+cc>tC% zjfCVv7!8t{@*ku}2Py-jK~6XUmNADOrvRhtK~jG}Qi94*O)y%3!Er;wWC3nLQK$rr zZeVbH0+zV_3=-Wix`DxQM&o1wZlMS7A#R4zAWP1GWo|%aU^LkNh9;0Tf1vppMmsP# zZUIRMsZ4@c1EWEy_XSv{{S71~!f24poMw;oglB-$3hAr7!8uSQxBF(T?(-ZMuVg#bb&N&fm#HkK{7|c zG7F#6BGsUD~txoZ0H4AwGi9AejqbnX8eIV-8?6NTy{XNKeRPNRoxoAekK?8Ns!eAu?<0p^OI5+!IJo z$^nQBj0WjhFbO2{5}MgzGy?~N;}x)s3bfq=qd_tqlR=i?UCKI1WZbv)K)>WFIsS!f2>u&s2~@>Y)u17!5M> zz|_eC_1r@8@eo5{G)U?TSkrH4w8LnS%!+9sRbd%!YTP*Y$u zNah1bMsNo-f5Yeo2FE1}L5Af+@_9W2lmRXWZ-Awk!PnU{Fu-V#X+4WTnl>$jcnC&= zWDbC3_Vq*ZEQ|)Vg}#7fMDIiEO&AT$mMa#6GPH3$G}9VGX{hWSkgQNMvX2uGTo-4~Bj)T!4nKNJ+C1`;Hqd_taD?xe=Lo*DNhUC93U?~-7j)Bo2 zO)tPQ`=QwaMuTMLtO6Og8tNk$4U)M4mN@{eXkj!+re!rqPYg6SgXntDCM1lY!Dx`o2e6D5Bcv>b(IA;6>p;e_L#u5V4U)M5mWhX&2BSeTJ?lYw&RzkP()A1s z5C-UYP|!p$sO5LF2hx0p(H!6g;}?)N(ZkTjIE;p7m=zm9*-5h-k|1C-RPqi;Qp9IJ zB>lr^u&MQq6E=dZNtTUgk0W1jB|Nc4`F}~QJb+9#L$)$UOkd-8v326GjWvgHzm; z10ao}&=d!wAsSDBOPI1CEZJZNfz(NM`PAW0$f z1CWvxMl;k4FgU&dOSwT~A4Y>>f6ft*i6^0P2&17UUI0txLZc2wLnT{|f^@EcwwGWu zRB{JcQW#_`sQd#nKvIx_h&N!_`_REo7!8sKt>QQaGI!$wNE(LGP{}J`$)Fh!Nf-^4 z>^Kfe{a(<71EWFy-vgGJDhFv&!Ds;n#}8nMkH;V}0i!{~gC{^{#XW~u1fvBQ9B+ao z1fc1N0ZM~TJv|B1Fm)NkBp5Bg;8=eEETa!iCNNrn!SM@Nq6ylTg3$sDjw?=q%-RZV zaKdN-2FE)f384=QAyEvYLGd`@G$_=ppiu>*K>-80hUf@bwg4JeFd8Jw!r=G=ELjN+ zQy2}ETyq9w?oMb_!Dy)D1F+;AXjH*ysN|HhAf4JXpz{yfQ1hX(CqS}-DWfYG2;eHTDl%ArvLqXifow}2&fO@SB%qd{23*eE`h>rz8vISHy90)K2#P)gJhW)9G6@LnHzQxVlIq^O5OlV9y$nVkiuxFWY0B_PIEa1 zCQ#=VMuQ|d7#t6PB_~03PO678pt4`UvdK_E7!8tSVsKn>9c1qAMG#A2G)R(z!SN1Q z(hC~)Fd8Ju#NasL21sW+G<0D!ND_1n&=IigX=pIOXpk%igX0gdWD-;oMuQ}o7#!E! z1et3HwG>7}B_Dt#zePjFdSNtFa>^}`PGPxv$o6_+IS37sdp95i&nG&uc1+Ws@{fMf%q(FUVIT0!SjgC%dA zfrLGbhDtWv1?ijzH5W!hCAWYj^PxInG)R((!SO{sShgCf6-Gm4=iCFCdmI|!Fd8HY znhgU>Zi4z0MnffA?t^rCLM34|NRo-caR*q^?g_{*^$ZLU21pJ(2Jr?gyA~P@Fd8Jy z!Qi;y0mxjtvmmVu3@{ob$;9Az1uR(w)d{0Pk{k?<9S=b|U7(UM8r=TnU~t?6mgN9D ziGcw|L$!VYOMZlgAdH4eE_nnpcRy4TMnffUfF;AAk}w)1$;9B;^LVlVcfDvnH0)tC zRQ3Q!R&>K6NIMTkgCsc^9KV1h1$`btPR)YREDVk-o`6bdwJQ(_7!AsvV2S4Ckn=8J zv;afBpx(bJpz!kH()fV z^4RbK6djUJAT=k91_c`^|A8eJUxP@3Y2^I(5~Nk?EJPMYgS4_RIGzDZ?l=gMgwY^L zCI-ibS0J5_7C|IoG*og6STb}bL=r|rC12EoWhJLTWMMQ^cFt>1&}{esi2)c58bY`L zl9A|}0SOuy&A`~G#PC1|G@tfnvH-Wl(!Y>dUl`56*tn=1EWZORzxX|4J@}yd#qS{o zIWRUYDg-Nd16I&!2{8ahBjgvn1zEr&3prK_MmsRJf%K~|I9>tEpMjpk2%{159q&N; zpS-DuSp4J-gr3RB!PM~_s3<{tXA|F5o#J_?V0HY!DN}vr1V0lNV z1uz;S57PeyEWh9))O;x209uLDpaROOA3^5Vzc7WI_64IG1R9hWzzXhw75w-EN#rmZ zBp(EppYRD}faY7s+&PR!$R7dA&w2zYL11)?K!aM)0gwZJfaL2Xs;)p3z~~up1#3Qo zEVz3LA`hbx@(;lB8}u2NI2jliU^K!3Q@()o2R?!n)-W33fD>Op?H>v4D-Z=R8ezd7 zumOFiAo4I8A-~}($bv(UAeje7BP@6Vmj8YQA`hbx`e%HbTra>a!Fn2^07fGeocRW_ zfX@ID#4sA+!-nr5`66f#!)Sy9wt(e*pdk#S5$3=6UJo*0Jv4}6G(y3gA0T&3?`Ss8cfYDO~8q^rTaoq9~q`w~;0x%k6K16;;Jy^k~Q;-mV(Fg@^!19On zAt3;x5e``J3uHkoGz4HY!U0#n^5)PGfYAu^>pOmf444HC0T_)?um>#v>Jg-C0izKX zd;rVKL)TZqXoUVHe?aD|K^IKHXt;d6;|;Kar$&$#hi0kC`` zG>Bm|+ycihVEH4^5Qfn(`FaM&75_jMJo*bMXkav`pn&A_J79S$Xc~ag2>A*BLHd_M z9SEbR2r#ff^dAAsw?Z8Vqh~&?Br4gi`9~uQP8X{j0Dv3^j71$d?JsZ?baa3W*vSDg07kbMz-q%iVEM2=kb($CgXAHp z`~z6N|2CvDg3&Do4ayMtC7mGiuWCV7$H3^8dbolcUhwHb^?SkTi2GT_c_hy!3W z$O1?R9stWH{Dwp!jBYVtV1PK_3s}Alsvkzzw-_)mLKLj%23g>_7h(X62FXJ#xC53y zrvY&Qj7G>$=mF_(QikY<(I5vv^dAAst3ewqP`aK0k;s046-Fe8A00ph|FUqY?6F!17Me zq8LVl4(UT?3-QyI>FsUVs%`h1LZy8Wcj1G%{xr$O0p% z17I{l{sLIu09um5XoP&rWRQMkXhEm^o}r!zwEi8W01^c|zzWVl3nCbekbeV~mxLxF z7>$r$Fa>16(*2M)hS4)X#~L3{@nLYh0+zoDjpD1O^^n%@j0xa4_F-`BmL`pdnBXV;~f)m;tij0W<_)G@|cz z2P{7i>Tnp1ke@IUq<`*vhy!4>17j1kZ+HYO&uaz_`FaK@g8`x7CrE)f^rkKbD2?d) zt(gTfUmTiAU^GJh0a*Tp45TE6(TJ|!l-VHt#j@aXteyeNU_dB10aox98ig<#;l4j$ z`QOlc&S10yV;jhWDh!Sr=721iBnv4BVKhSi30R)XJCLZ7(f94>b$ZrIL??0 zGQb_01Yk76hiAa@pP&wa(FhMT%me9ffjR(2BjmS$<(oiB2vq)n84O?tfPBEh;P?Wp zfCXv*j7Ioi&U}ysN1?rM7>$s>0G7Y^29g9|G^le89UfQ!($DmdfvFyp{Xq;64N?H< zeeVD(sDU~FMk9LPZ@}_9pbmi1AbExdIw1^>3l@SbNP7wC)x&5+@B0c^e%U?f_`|Y$ z5QT`|bH^f(0Uprt2N;cz-vgFkb`hcvM$drvu0Md~b)ouUG(vvKVvzYRq0nm}TtXp1 zg(!({fE7qVa~X^V$wTUzo+Tgy6rgDYMkC}8faSj$ff5-51B?dAL#m1|VEH?s@&~ln z<_>hI22_$liux5xK^9m*@4$i42>Cl8dBM1FNP`JRH!wI(SO(I#dOsv+U^JqDIs%s8 z1hKH50m@(iDS(uKKfnr_p@T~>8c{&4Sq`#b2E+gc7>y`^9)RU%L*o=iBjRq#3Xpzl zaNN{0FhCd#3SWbT@&5J?yfl9Xp~ zya1N`VF6hP3Zq#WX55fxaBNu%(z&e2^X?fn@6$9N&Ot ztDsBxVKhkcKZE0fbs%&7EFqS{Xpp7<862;GB*pyCL5zdZJPbEZ{AXlv>{vfVfEzW* zg33S8PI9o!{14zdi2+7~wy1z5B%l&78Z{4Nc94v85tb!fFyV~ z2|{utgm(M_5@ZM65W)Z&2%oTdiU7B=%^HYin>EuV72%SCtmn zux*L}x6+lj5Ya1dr>~P&l@fRY7X1a?mGkR4M0Cb>klM^U5Ve_iAfji$q9PX{q9PX{ zq76GhYA+p!h+aAl5#0h7Of;M9L84;Q4=AXz2(;`1X^^@PF-GeC^nVJfVj?;Y430a%GHjP1GHjPXGE((# zK%(;Y&~yr;!IxVyFn~5hf|-sBc25!D=B<1QNof$;aUVz!bf?=D5QF#7J%}uXcKieq z1j%;n0qM1XS^=RQ7k~uW8$g>+!Rc!ch%cWFwIKWD^!19W;`JZEqN||>!)UOR!07`t z-@wS=xMVNLhO-wT_CsjLJs?4L&@Fsm3vPh8yj~|EY9O@Z2aq5lk@xHanX_R(M9qf% z>L7;W5|AjupaUSTawOEHV0yZYoT^0q7mx(+6sTh%v}4cyDFWQk%)kWB3@bn)yi=f# zh0u-%K!Q+ZkUVn-B*Hrn>J|v?_yr^gRR%F_!U2$z``_e_E)+cRnUeq~kh`V$~g zDZy@tD2xV~&j5AfACL<9CGijyF#5qKe#QeF3~dbo3=Pa6<2M`zIW!~^q6|hO8}tOE zLOv8)L_+B4p(?7v`ZJDzf>iSkqy+?_9k+o5L8p=|1Eg(^ZK^H(=-kH{rCL)A(d=P&3? zzcV2JAMb%U_;?S*|93#5qLFP7Q5Y?t%_{&ppz170i^Ds}jX)6E@d!vz^wtPJ2>mPt*#Z2ZwWMMSS^;6D+^v-}zki%%GJ5PWm z&)$W20!Bka`wvKx_pd2rKpH|j&bu%LbSn@OgX4w^AU{@GLZTZ&J6-|_f|?;uKn&h5 zP|Xn9vF#!#mYEnFXMh;IY-W%EhtQ6@K!Tvs^9+c=d-N4#Z~#I(z5@w@Om3*Z1hSZA zB4m_S5qxc}<06nC=rF!5AO^4cL`c)qWfF*XyaEzrX9Umtf>xY?xV$M)HPsL`-It+` zn*(C-8c&2QjS83qq8;~u1lbuut53kq!3!Wh@7oC=;~5yZAVz!w34(4+YPkY(q7>8! zbBGa3K!WUy44`UKfWdJGh|Ak=2`Wt(7$CIcO^_g{4fY1a;4Os8LTJa{s~{)VgSLNz zSiE~)K>{B_J01iHf;3+NF?ji*A*2Hdp|2o8kZi{_kmL6BK>B_V+HoaF5ajedAO`QW z_mHT8(2jROf*{!s*QN;6bMuC@K{P{X$BEZL?gDKa0x@_cp}7}AJ01lIf-JrPV(?}{ z{RE*Me}V)-vOPCI&N7C|LTJaeAVHAq0T6@t=L7~OE>L$s4C1FpAW=|~`2u3_CPFns zXve8Hq0U+XV(^Acge0iKNg&$s6i5)H_YR1`>jjlfhsgc|34&xN+yXhv0V*3iiGit} zlfiKlNE9S}1jOJy2))b{LOVVK34&yQfEc`+q0t1P9cSK#I%^Gx!TVVrlEI)&_46P> z(0ykQKn$VjcOl)RHBg#?g@M`e_wDJ7I;tYxQ|?R=;1+tc52EBdlm;z}Sr3xoH2^JK zZvpLGW?(qN;CKQgD8va}`z!*bLCPQB0UKEV2c&|x`!>YDX<*uM;a!k>L94Yv4BjWu zb2#3EX~(M|K~Tnf0%GvKf%cidfoaG8AVEF>28M=~Mu2&wv;rTcADd{ZJY-6Tsm30VD@f)NmhU*jH%pmjQb20RxKzgMtEs z<5G|;NZA$;gI5{4h{g;|JDvv#BAf?GSuV^BjxRvM(ktFW))Z`k(xB1<($f-PaO`^k z4U{<`hG-?UQ{MumK@C9#2FHUSNs#RqKn&g}s83VDwBuKhAV{|5A!tN_ZAm>O+y$Y7 z01%I^07-&W?Eo>v{h(t2kx-fi6vv?DK6gOUAa!p*4Dnu5$fD3$P#Ugo!Xv257l0Vz z%+P@2n+jcM54II#;1Q5K$iOQghPVTCl)xKG!`1x&NrTjNJf0%JE&dmJJtik~k0M;% z8jv(d-5wBw_hApH{ldWT1x!2Md^|-!ftyDFo=_N=85}==RBFbzK~xsCP2ZrWs#w3| z2`Edxehm@*3Z@+ofCQn9HqaRqATF=G6=V&tE|_-w0un^1>3Iq=$NVwGoKi6DxB?`I zP;&sp<<+pvh-F{PUjXX!#| zuzM639Ou580y;k$+&OjJ0OF{xfqHS<c99V4J)tyQ-3O2~NL|C5DFWP@UGk9rNSFNde}<}x^;XD>huR!2yh`vc;DOzM3L&6{&T4Aw1IK~8ks0OEk855gE1Kn&3Z z;8e!Iumwtk0)~UZ@heD@5wvK!3~1%!6&dk=Cm6NBRd5JS^bAF>>f(Q~?-v8q!26_6;e%Noc9 zpb*;eCrA)vV#fzi3U3jFv^F5L;|h=u7tX6{3Ah$C(PWd>+Nq`$<&ytT*1h}QUu0m8zgVNyK0}3@z7CHe^0&3&j05POB zA41gfKAO&GqAFbf2P6uzu;&xV$upq~E+Mqz3XmXDsR82hCPLLfXvfvMm4(ob8$p5~*&`r^<^yOn{oK+EVs z4D}{)NbELEw>4E&5O@FK`V$~g zxdqT(?kXtFV93Cbz{0@5s=(mD#KGYB9;Aw?k%z(Y4~QlE0Nl%EV0Z7mdq~QsOsreOp=)?aD z)1%E)6@_Q~0A=Qx(Cs>lLG<(o=BjeSXTTC~_d(2O0v!o8UC=^RPN3l@NJkWO4^PU1 z>F>-`6$Q3{L}hiM+g%){OqVxT6_vUT5@lkPVQ_o_V#&VlfdtaO9*BJZuPFlDVEH*9 zmTYDlM82jCEH8T)Bnp(?3=F~`diqB*RXKr{-yr9BLk}RUe?0xJxvHYT z4zTDx==!VUSEut?s45D)0g1}?9e`Le{lIj43sq6+Nq<0r3J#+MAf~MBB8URDMPLQ> z$3UWxK)eECO3Zu$DH32bC{H8$KNRe+_C}C9hcEi8l*4*L_oGTtOqFq zson!($f7niA<7aU=g5INVvdhNszAy=fEXGFpbfP%@29I+swxO9`8P#?TLaO|oE~DS zs!)FeB&v?3Q3<-0_yOpMK_(CZvQ2=I!LjE*$PkR?B?BWv0wZGs=%_o;tSCq|1Na2^ z10eP4AD~UR-%uLVQg9Fem1uqp3{dkK!DFRg!0MNqLbCZ5D2-5$rhG-iQ~_=cMALM- zqm`BQT{Iq#V)4 zoZe-vDlc#XEQ)AQPTvU<{R0+7G#{tG0EupB0eN`ebx0+A^7?df8&w5?Cm>OEL=`^W z-9}YjU`8v*HbjLzy$mFJ1|+KC2;JxHeR=v)kO>WKAaiY?tKHmAO}}cRs*t(`Br1!j z${__Cv=Cu{7CN^ z9x*b3QV4{0+%R#f05|Wf%Md{b?f3*Nm~#pu2%#NkOqwddEtqi|3 z%WOIdl3`%j52Zo7Rwhps;1+T?1{olP(M_xjj$1%7!jM4=1_l@n?jXDXNeQ_XpkgmbJ$FfPG_hmU^GY)wDklmDFO8cj0Q=9_J+*_`CsttO-P9Vqe1&O zXMxfP<8MfAg3&Atj(0#3V&2duEsO@42kOvGm_1d1TRi72WDFWcGcY7DFf=$YI358@ z8$hK&bUo-4TDbfVki2lsONe$D4UWY%b3oB?^d3YCMuVjufTf;Ygh;_?kfDxK=7My+ zH-!vS!f3G639wXs4|LZMj0Vg70n0_4gqR4U!BQLMflPc1H4#RGrJjJL6riL1Fd8g1 zV?Ib%%m;`&V6*^3LOp}y8L*t}TZkNt7GOwVaBNrr(k2KEFBlCraSKRFYUvw@6JfL< zLjofxOhAJSFTg6cUW4R)7`?FJ8zVypHv@wvgX5fqQ|kq|r2?VOg3;i}Y;XYQkP8c^ z3UEughe47oj0Oz?Fn~HR3Ji`di$E&Epa}v-gPPf(RXQMt>;S7c$N@0}MxT8CZE|~W zy)^%vLz~J}+EITI@Vl<3~N-kLnGJM%lNFImL$cEnltLWB(WFHuP za``vjhMC-|6A~F5dzOLBsHlftR$l?7!J*O#36uk1B`wh1mM~fnl$k(1R?wOgu!;rH zO_VSi*&!>IgRIeqZl;9Mf(##^$)uja@eWu?k0hi=2crcUzCe|LF5dwe!Uvr@g3%Kj zKq|OE?Qq83Z;TC{+^usSGB{3I1*%Vf-UQX+j!QrsW?9f6#|aRV_2OZ$_ze(; zSq{9X^bd$F$bJh_=P5vGMh3?Xt3gHJ{lgH6A5a>!H54Sld%_S>XhLYm8EZhQ-`<2& z`y#g>G$VuK8L-6B!w`WtP#RRHuAM5tT`#`48**X#W+)A+&cLmUeEPZrgX12MwD138N1%IDP@?;7vOYNv06mam5yp>Yh+YnE<8h8743|-T}!7 z#k_@-Cop;gGlS!Vt)NC-b|@rtAhhEVkf318RfuvJ-N4}Z11wQ@8X^Is7ce-k*#@%u zH?+Y5qc<=(KBx!FOoA?FgV7Th9H(ptY5BPrQfR~I2@H-Wz!EjkH7ziD0)yipu!KEy zbq9=|z~H!H2gsIJi*GBA1qgX5gtAT9RSATb4_ zCxC8T0!hR`cZ|D$XB-$nG1#&PByj?|Tio~oWN>H#gX0dcL^^b%c=si^#2b)=@Y8x| zf8{BZW^7;vUB3iMr&F&%nw~J4aRM`g53Pv+dU}tdL z1Crw1ZwToHL1@PhAi;WGK{kk+AhhF>{h$aO4l=heZ1lzsDJUZ9g=ck^bsBg$2(w|C6l2Q zIh1DbU~rsp2xR~7Ziozoc02+S6nfYS5rom8UhEH$40=Cz4gcY(0^F*1v>;({M{D|V zKUG=b2Ov?E7U;=uV0!xc098qWDMzLXaH~}8gCwSkebW{ERb>QDfJN^dfrR{>Bh!6A zqJO}mZ%>1290mq~Gt(>lRb_-X90i&16dFsPPckxbO#kn%D#`x@EKwm283pc`e%@bI zR$#_4kZQG65J7`g)Bk`>J_8m_GK17i`DW8K0#s!L8jge1daQt`4O#(Fy9F%z@h)g+ zf`Q@V-RTV=wJ*S;wXqOy*Tzm?0}`Ec0%XGDN05yA_|fzm0je@K7r>$tS0SP>8nh8h zM2fMqiGzV#m7{@?gMq1#!Lj8eC^+R$L)6Njo^BARDl4!9EV}UkM0DeU>9HWuHy}}! zV-Fzx)A-;go8x$BAcxJgW&E;Z~Q{`v|Eu9c#WN^F!Qmk?n z>R=cRTG-DZ#K7dl!N8>ma@qn0K?cW;(;$Pnpx)rRI-NgARn~M5NL29Q8EATe(x840 zNPddxKPE1#W;vKWag$`;S`F zcY#EE&Vtk)hpIiUHT`*zs;uwHAt!l)6+LZsEUX-fNcE&l2z4(j#rsqp6(E= zDl4$!9LS{j*O1Aytk=_XgH>gP?|?*An5RHO7)(!#P?Z#(a2}-EOAMkq6hu#d6QwF; zdju@;xDPTf3Zpk1P11gQYnPC%99!Mah-M=OhBMQd861Cr6sy2m$Z_G*6+%>H>epPD zD!?r^&j8|j7!5j70o3R}1CoV?-vba^Ra_HN{J>~X3UZKgXkg%CkWl4lo**b7Fp-(T zamq!IlLf&uwG0f_P#U6`0irkpq*xTJ`UF_@9BoJ>%+a2HAw*S{{|{L77}R&irvC|1 zl@r)-31sjVXnNc7aJp8gs%-rekSOmbXxkP-JGNY&D!>hO{0tDA_bpTbgm&Bk5`-!^ z17eH4fGU8|psoTa9NvIrp(-1$fI{psR3(@O-FX65s>I;903;1nx&_2meWwFCYUr=d z^tYj^vivW=qRKNMqDIpt!&K#j=UfH(Yq>TgFN5jn1;MIP!WTdiDuy#5X&X#W=ZjL6 zOmDdca>qVsdOrC8lCHtG>428rfn=f1*a2dTC7p$+czy{&!&Ke?$wF1W0kKsM_CPEH z)6@O^RV4)$Tn7~@PocYgInGR12v?O6xB?PYRd~q21R4NPcsSiBTvb+};|9nptoIXMrv7x>+GD3{MySdN_uK-h&DaM~3#LKdkm3i6-#85^a(|zm-WZ`O zBk%<*+PMx=9dxdnzBWQt#%jfFQ1Cvv12F?e!`yoUBnu6+J0P~oYUr5H>Z{ZBBUQyk z!DY^bJ0OSjLPvahPlKdn1df13H{OSojvMb!zYo&D2r4dqfTb9r6$|4*kOpy4CXm#c zyCAa`LUkyi<802XbAioS!0@=v)3QhQJgGRt~)I#-mctnCS~;KMbLVgyEmeFVCu zhyfJ%pg}W`Wq-hG6%-+vT|tpyxm?#TKZo_?0{BDHC&qhf62tNUfE`y4K>FFOr zRiy-GJOC@#hLoinwWq&~QkBg*0~Wo~2gw958b{I9@DOB#0%%$cGzJHy4Hy^>yq9Ba zXqd%PWf{@DNs!^fY|!2tkYbhJ8W3;()|g%pttun%0wk*XR}7LU|B6kY6Rj#MFy|4- z;Cav<{yeSeC!aOInZ&eMxJ&e~hXu{~NF+&K!# zB|XQcE5xeG2z&sG)Ch{}Zb!EARy@ zDhRF51+Pxmj8m1ZU-1GIEfUZM2#f|bAMQvqp4!aEGSBctvml4UfvL<4j(5O{S3%n) zFnYrQH~l-(45y6cz={o_iYL4THLuyB<1{du5wv9B2w2MM1#~zY!~hLvgH|p${s7DM zLi=_w8r;EO^9p3*!rPDv6hg$7aE3$V}ms*Pzko6Vo8tU^EMOguMO) zSdtrR9gK!7*!csNv$zS-2%{l~rEYiwGL-Q=L=HxCfX4uzfaPvLcZkAh1yF`}obeW< zZ{JghzJ2vjh631wXTXw|Za^eqv;t_Qnq$K|(4h5vsG%?#)Uab@aNGix?1yTE(VziG z&^dHqNtq=OQ(-g%BWU2H-f_-*khxV*b73@SybN>>9a!@8Oo&z(4LZe1fWfil14!rb zDG*5*4I1WDU~t?4mK5xONWy5)P%CH%1T5+IhLOny6z?Dghz7}m*64l&^=#4IgX0yD3~#~;NXY`B9VdL6&YGy2SZ`6u3Z4W64YOEQvV!YL&~%RtlnuIn&K}AJ zWmu<5R`B^;pi_SG+c}%cNML`Ga##OR1af9NPk-_oE z7tqY5GK1p<5Q7tR+abt!&>qIG(^->L^`uXH1*tgA;CKbZ;5Y-nlJ?El>3&J7()AO* zffP6}I4%G)0vH@OfEf)8jt9UDM$lzhAT^+kEFcl+_1J$vY)OzyK{KErYe7rdK{RMu z9(2Cncc`v4UfdPD*AZTwRNPrb2%go^T1;k|lrV$iw1#N{+-%Hjpz9+U9A7j{Ka--mm1#lK^yXC6 zFpJ+!(*(E~*%=(?G*1)Y<^Y8>-&H5d!AY-{198Z85ps6$%;{}Mp&dA6ipy1HJ;CP{JngBN|q?6DkrCuB z&;$s^Skpukj}24RENtXM=r)MPO*Fo^R*#ETgi80Ia9u*(@382Z>D)<-fhFkHF= z9$Bhqh+<%1kX8b77z#mC+P5LCI8fSu7z(L3LA`iq=yJnA1_p))FCl7z7#J9?yo0bq z7#J8T6d~*|&_%CHA?ygyu>4dAJBERQVarVjyM%#(Azl@{8H<6T5|ryyAlZ?P z&RR&{5LCo`v4nIXQb9p_8q%Z71qJmp2)l@Zfx#0xQdP>pz~E>C>9>?IOmECpm7Ly? zqsqG7GeYgNh|kVgyNo6oE8==av~57%D{}@e9fay3j!z5F4i67jz#Z$TUw- z=HvVgNuD4T$T6!Y+Eo*fSdaR$Xj6Zm>3+F zgM_#m{zJu>863}nM412YXJK$`SO^LmP^$&ZU}A7Q0A?^VI6eR~KxfG=nkK-l35rIL zk)XB|sE`H~0U$o8<^uVUg8@|1GcYh%GB_Rq=`aFCJ_vhEoUUJ>s>Rf?czSe!s$o6P z0!V3Oe;Y!V9EH$j&~*>59zY75?-L;<(&UE_zCRX4D7I4#IT%T!aya7JEYZmL3YMq)~Sd9gyl^sYkHEE8P@#}&(_32=j} zCQuk!GC1x*5zt|9yZ{nl1%)mfgX5EB(>01zP3w<9hvPtv6iMiEF;GMZLB&Bq4hv#X zl)!=+bSE_|s6kgi!-5-BIX|2ai2{&SS8hSrpuDUKjT&%4G!HCZ&j706Qo6t#@Ty63 zs0NTPM23O113Jk8 zmVlHQ7#LuQNQJ?1&gy9b-1UsC4329+ELKoV^D;Q@0dqjH!p7iu0n7oV4bV|HAPy%; z83-FOIQ{?&fMOXGSw;+wJ!?Q|5TciX!I;5u0g8YggX0#k08EP#L%rh(un0_#34`Mu zumDVnK7->2umDVjHiKi!T97p`6$T8BGf)H!864Mu1z=j(7#t6P1Yl|Z0f@us#^Cs2 zZ9T{&cTj?7U|;|pbF&Uqi9s|vP62a3zSm`NTms^7LSz^iSQ#95fCXT#U}kVUgCd~F z;P?O}zzWgg_yxoPE!=Eb53&LjggOk4^;1ARZipHNhU(&42FEoZA&|-)AO={!Cxhb& z5Qov1!SM>1;ltqg0K^alIS=H1kbxk_fY_j7b_1fjGXuot0mTtW5>f?i0Ew}zid(g6 z6@%jk5C`0m1}OlU1i~OT$aG!?$4MKZ1^Xfp1KeF>WN_R8;(#ihCm;qF$P*xapjPk? zkjQ(c?U%|_3m6q0KWstno`4KxW^nA;3NnBAi3)(inZTgA|RoD946Cjm33lkG~v3w{mq;v=62T{W*F zLG4+Pd?s{F2dLrB$XL(7@CPaYYDR(-Xik7=0JZ2qe8$HRKB#pM;%hWO_@H(ws26v8 z6@(9J^?~@4WpsoQZNT)#efP>nsAo=;w#pj@gH;AtWU0Dcfe1rIX z77+754e)x9z_MUS;~7*ygF2V_(6bIfwF5}r4O$_C%1)4l2cXA+fC_z(JcA0P!q$ey z`5fq(si5{5Nd0P6h&*V30i=El^r8R<&^Ba{f-}%#+dv1qfcV`e5DlPKKFC33&_fSF z{S%M@o<X zIRok60PR|YgQ_o(4>Oe^3P2qpkb;ZQ-*K{Fm9#CBfk}p<+ z$SZ=p&&a@FVhZ7drj9`BmraH6K~)%tuNwx@Uk@6400~IhLj=sBK02ZS(cl41wTW*b zd{F-fq}~H+0jMhp;@d&_pzb7yFTf5l57dJL@zd8q^cO(QD^do_*Ms}9ARk02Lt1a3 zvJIr+)*}cXG;jh6p$HiWAJh&4Rkd}{J+7c;3rN2#R6nRK1>#Roh0IrjDt{0^NEyNh z%?5+`ZeaEG3=9TPALsmsSODsBfE1MLLHNat4B&#~b_isS3{;SU3YJz)NHY`ElmX@Y z26l)%sHp+TTqD(3ZaC*kajL;U=XDL{R0Re)KCNIXL|+VgPJsK zj0_Cd?=Ucd@7o2nn?MRa=t2~L#&|#msDwf+0Cj*t^6gV0OBq1TSP(xC%GZQC=#MEx z-Vhq17n~q`6Q~Ct=|cE642+;7qZm%;GSoAHg3lJJp~?_4fDan009lZ#4AB7UID`1b z%8(X3sO|tcu;?R%4{ET1e00zW!UwfFK~Yln62b=!z<~72|A+9)p$`8<%5P8K=R+9e9%Y_h%aTr z$OOv5;8A`?28NwZd<8xY@;!ST&eP!kJObb(Ty7K3BMF;M#l z)Kp<(aGY{%`ho^kmjFV5s)LQe zaRo>bxUIy-;JD}bbdyF^N3k6zKrM`$435V^j3$ta7#ZcKcQ>lmnOr#ulH+A?d;n&| zFgU&eGdLL>e}Eb442~_QKx&j294CMnCt0@JHmQ0r2`GZ@L7OJPy@MI#AU1~WYg$yN zF=~Q1pb{74D3EJGMFEKY=l}oz4329qfy@F8;9r`a*QOd+U$_o3>;@Z(gAKRAhT1M_ zL7FyEk`UVV7KAQ^Zjgp`hz>)C+U_ogv?O3dZ_r^khDNA*_fHUcXh*jmKC}iKO8aIC zX$e@UL1@_US-dJF%niW@uOUXvV54Q*%^?*!Yy@nN8>E7chl}jlIy3u_@5g&sT^;>ceO~2rUa;Hz8~R;opVQ4NyA393oy0Z3t|G@)tuZ zZt&zyxXy zY_x#TA=__ssyZ{*|GPg;fSc*RJ%eNKgJ}ZX%nnQr432Zb%n7Ux432BT%r6|ERo@^c zOM?ZE1B2rkFnd88zXOBg9WdMDg%D^S7R=T)6mwv3{0(BWI#kq3IxskPKLnLZ0oU%x zI50TQ26H~KYAHlJFgUIP@z@&j+Dnuj7#w$jc?w=DFRD2(IGzIYLe6n2YdJ7D-U9Pp zI-beabzpFO3F5IkBnRz3X5hf!_!G?KsGDpqW8%Qz*!gJt*Iw0sjP)O$Lkot67a#@~ zgX07+BZa|n0f@o$f|2in3uAA z^R@@P433MyyceA;%E!AH95;b@>;lQUamTY+85|FRx!eox9bJ3U)QrLL(yQrzCaB7Z zeg=y#e0%1c^r6cPj?J&932-0Xu0K(=fMxrksjAPx_Yi_wf}ldyar>qjs)rftomYeB zHbCVqgXsZAP;CXOLK(L2gDf`#soe%W77fJyu?aG?1S+Jr?t^sdKc?BBZ}Y*39Uvl7y%0I?Z%Kt|s|?9`1AHmE^4dv84>I6H$llh-hU zD|rz6=@v+Z4PtLz2VsNSTjDzz!J8OB?DiFqIvd2!-wt7eM(Dn8WCUkr5L;*uBRF@0 z*lSin*r0Zr+ZIM9P*)p#(w4?LMkY{G1|(6qn~@3BPy?|QRzcXH0`Ke&MkY}E3&i%@ zz{mt@U4q#2_d(d8=CJD;MkY`*7{q?Mm5~Y5cL%Z0tcS2cO|F+a8JR%+I}m&Maz>_l zQ0ETB@!rPB1nTC1*nXQJY*5E=?H)!ZP=^o1_FT=#1nOyk*t0i7*r39YbsZxUsK)_f zx9?(P0;Mq!d)rC~8`N}4+`-5M>U4nE%o`b*K<#q@P)Bg}UPdNROB5vWXbpr7YAcy; zWn=<1+CglV^^8oQwiAe*wG+Yy4L5#T!N>$^g@V{`w=ptlF)%QI*k?9E*r1YEV-F(} zsD}z-%dBQ(0`*Qo?Co0^nRG!H4}v(Y>p&a^22c^~w40Ghj{(BAT*b%)Y9xZhpYMRM zL8Yzi21X`O2Li-a-N(oTYWjlMiEAKiP~2|X%E$!j+k@CI*E2GK`t=}o&@M(MP}3O1 z7OGzX;eg_Q`!+@Rf=>S}PfuK)q8C`^a`iCQut6#MayhVb_BaQruofCeVa5h`nSDBNM1y z4q|`b0%3y^)zNi~OrRbLh^@7gkqI;q1Y*0dfUrS{t$8~m6KI(ch|RN!kqOkV2eHrZ zW@G{l6oJ^8s~~JpB4paaSkDA%bAvbz>lm3p?QIadYd3@qN}RV=GBSZil|bya9gIw% zHaCc!xdFlkCEDo z|JZUyCQx4sB$2%h!Um=CyiJTupy4YJ``jKzCQ$DJ#Liv~VS`e<&t^s@&}apSy>%@k z6R5`lVz1r>VS{o)>Pki?&;S64&A)?@3Dm|1u{UgBWC{Ycra+uadl{KP-5*dE;atne z1nL-o*s)s~nLvGQ5Lq%` zqDJcf|Nk0uA?#iM|NrMc24SyfU|^Va4#HMuWMJTdvcLZO|9|x?h`7uD|NjpygRrOk z|Nmd?IGA0}aQXlL|9qPv7K288{%itEFx+BbV3>3s!Ui2F`L+|HMxT*^L1`XD{O-U1 z|7XpDh;#h^|3CKvMBL#2|NpvAA?)n`|NlE2f~aZz|NsBO*%0yN|NsA=^RgboIr0Dh z|BAy9HfXd@^ddw9C^4i?frxi7Ffe?X3lYD_z`$?~YUpE7iS-hq22}UXz7Ao_FfuUA zgZfyTk%3{-Vu-jQBLhRsNr*X@>i_-!U$qb-@%G>U|8~m|NsA8Hz69d z|Ns9VvK*qu_W%F?+onO-asU7ScRL1AQ~3Y?|G;|?cFq6)|1ZvjsOkRy|9>_l*cq09 z)GuRXst1+F+y4LmUj~h*10X}8E7^Y8vuxBzbFesjcun#dXFg%(Aaokl<taZo z>)8MQ|93+j3d%LC(5!gj|NsBSlOY!0`TzgFEVOI@)&Bxe--D`8HE2kKDyaKU8SBCA zHBiCtx&)#bl%VvXJ^&So{Lt(KS~cU}3sD0aRoc)AaVY2@JsW5NHHm?N;VLxIEdwp- zfts_Afq~&BG##8}U|?v5hWZsy<-raOqMHm13@@OryUoDBpmzvj*+bCMof{A~C}HYC zbH@h;28MGdAR+XPfq`L7FN6(BuyHRS>Ol$C0~$4;LhLxyL7>EY0F;nHm4_H31A`c} z@BtMX9nh?(#K^!P3XN}dMh1pE(D>5=&5d+GECUrROP~(YV`O0PzX`Dzl-hlujsO)n z_Ry#VttH5ZCO}Xws8@mp``Lg0|BFM*|BL_r|1W_CBPd7YLWAzwzyJRop*{fRjsmC! zpn^+%BE;f%|Nj4vnE_#ga?NFELG z9SEBfv`zx*V^9frYbr!s^#A|=Inby9WwQdP@1_6$|34AxP%TiFgX-1&|NplGqEr`JhJ&)A2DGFD)mj{PAr^c5|NpLmka8Fvikr3|7Y%p7z)bA(-%V6pmJLe znz%sh*U(C+;s5{t=Q<$43(DVJ3lQuSXu9e7|Nnm?G{H{%|Ns9HsAV($|Ns99s%GK; z|NrlFGBVYJ+U!d~ilD9ol?N=)LCs%|Ns4< z?EU}$|Llqk8&EVLz5DP%9 zAUkMz4l0!Rpt%54C?!E#N}yKfGHBxh)UG=VjT%rp?ISc>f+p;Goj<_z)R&;9 z*c@m91u7Z6o7C4~d)*hOJK4%9cGhQ>o^B?Ky-O`tU!sMUMsJVZUH zsGbUKqG^DNWT=^-hRH{0c>pTH=YSjpD*r)ccsI1b0kv3+pfw(-JbyI_5?r8G?;U8$ z2eDs4icbbmId2I~XdrevwEPE^`D{-i4g!_=i=pYpn2~|uJJft|`5y#zktrhs11r=K zpt?XF+9baKYQs-~q+Sr)4xDWm7%u($|6gb_!~zg|=UE6FR9|pIo8%yN8Z_O2n(FmX zN8JDS|3C9|u=;uiP(>07ZS8>AYoR^>RVtgHB^QV-3w0={f+>I&v7id35Ss5nosD%b z!GL0kJ1Ti(XKTw+0$Qpc?N3G#i6@MW)Lj zjsVquNfV&$e^5*G9n=EL|NsAML#tX)t@r@yOAuQU+L8g)kk-(m+8I=yL*p1!Tc$x1 zBdE6A2QAzJ{{R2~09posYR-2HAdZLxmH*o=K)RJ;%3dl1{M z9-0t9b$RJbhy;k;1q}%h`!lq_0oCh`&_V{(p7((kN}xKv5gHvJwhJ_WgX;UwCm`j= zJW!h->U&W8e+D$W)`L2NOP~^aYev%ZbCF*3=S6dIB{%Za||0)aaN4?SO*Vtk94<@xT86e* zps5$co((OCKn)WmXgLpRn6N?(0`+*dL$ln)|Ns9#g{lEHSl)F(q7u|#34kWpTmS$6 z-w!RIK#iAopacsl|3REqXk!x8l9>u^tAp59n<4QB8uH|XwrW5voD6993dH^eWrJEf zRZuplt0fN2a-h~u7&Os=*cQ;}1F`F;ynwhA)CxKVZU2K>L4we79>ng1R>z=myYJAJ zE~s@h?I|RvLF`JXdQeMgA+%5eu@6Em2C*kYyZWFO)J2Gg>lr{pEFsW@18Pp4gC<5$ z-|q=D0fGh=?nAR8Xs!PwXfqtt%&LRNJ*a&{ip^$EpjBV^Eu}5gHPEK}!^%*%H*g z3x_%s#IA!n4%F6r1TBa_LoC`*i$U$a=g{^bsNI(Zt;|4dnM2U>AJhgs3oX&EFfcF} zKrH~VuS2s6sBQQP>NpTv5L&Q+*k7Sp6x4&h0aXKPGwMOj1hpAKvs2uQ79DO?*YkV0 zgtMw$gG+_&W6pM~y{hH%^&cB&2ynC8PE2?b@z9yUv9SrnJ>i$0dvLWggX2UncfpO; zaFa4;2FHb9Zo>of%EOM%433*XTs9N6uh~4#433Auya{qg_FQvfaJ&TOIXsoz#U6ehI#F#4S%Kni37InU2!zn5`5@||pfl`edmvM7vC0sdSsgO1He)5keD5I0 zOjc45WOl3G(-<O9Xw?XUm+iao_UV}Jck!>g#kRJwgJqq zXJBwOg-ng*U4dBOW)GPV6n+Y^SWy5{piDD^tO+=O58_jk#}ETU10g;idK;6uV{P71ftvvh(UK2LM+&62r-}t zYSCmr2JnQUu_eT3Uo9Z|8Z03bldXIZTEP-B`}uhx#A91EArp&E4G{AJnxJJuQWGTT zrHmj!^TQD0!he>KiNg6o5RFpHA#t~Z58}hQe302p(>6vX&>YVdKFF-6Y#YP@?aq*p z^l^qbB;Fa~;9@Af)|s&$JnebP8Dhb6XUGJhj}^p)aaNF^Rnvr2KC8?iaVx3`5ufS= zq17Kl)`_q~ug?~2VgOIS7DE@VOpSr$0`JGrD0mFXHHG);8NjoaA+`+QIbGq`5C#2f zA&Kb%G-zk5LJT-13$d_M4`QK(5d(M{HbwxF9r-LF4xjQ4;(&GUpiyZF(U)ihi3$w~ zh<@|>Fo@68SRo2*uRt_32STz-rT`@9B2yqiZ8aSdMK>)V`UG7d^R$wT5dHbTAr5Cc z1IZOXe?ly}{}3V`4vnG<+K`Z}zbpcA(cR0C5Xjer#PR3T5DiQZAqulNW&B6#tJRhDx93t=#V$ftGNNSH?260e^79=G0vOwq+ED(J&WFd*oSOAi^ZJ>#@ z9~#25zza$185nj4LNthlLgKgvT6Erbh42{_A!&lq3F2e*Uyu+vZ^!_i`w!KEMA3`@ zNYPrO3b8o+F~ouE+#u$^gGNDf0EB<=0t1r>DE~+8gBaMd7vj?&FCqLNyby~8Ss@yJ z%0cA&&p<8y0a;u0bkf_PyWnfAJEr)n% z0nxBq0kWX%>>r56DGHFd(r|%nY zXMtmo1_4tRrUVBF{$mMoOpFZ+Q;7ovPqATPYH>it8*EsZE;yiKkXd_dSeQ~AA^3nJ z3lj`4v1MU8%D}*IlwtbZpQ@kg0|c3wKzPD`W+o60*ulhfL4cV_K!XL0L4168!VYF8 z5JpxbV8p@%!Z0xbkR3aim|%P`4H6S%u4kIy#0B0?qg&6)RFTKZ zw5N!bDWDmGCzP-^pyWddPjc~Em1gkfUn@*r`D#jH$MTv(Z|)Po2Yh{YP8 zSilA%YuX^f$|T{*%H-q8%G4pw%GBZo!5_R?nLrq%wjhX=351sfu`=BVf|&I~l9fp$ zl$FUNl$A*!k(J3M5kiC1gO(w{a6ODMKy*C=JuL>U0s^g<04-Ajty55DFa{m73pw8b zBEY}^>hFRuXhjETQ3Plj5wsixgh5k>Hq#xKDGN`({a5ux{gb;4Odx!GCL&vGHMHSq3H$Mph$ymw^d{k;Ty2AUmJjWvFMeIm!sfFhzC08JIvA#s|^2 z-Z3zNFo=CPkbwyqgT$tUGBP1!kQfLrl4W25;kHmlCKz4-y)~T!bUqJgshJ;W*To|S zCe=sVFaB2*W2{$bXJwku$I1kvr}VKhskB4bAihRBD|j^)D0o3Z3kp(D(1I{1xNShI z=s^?m44`v^7#Qw0F*2=GWn?-czyMla07Br6p>f?bH27=c5u}n{6 zR5Py!t=u;K#lVCQFA8R4Vu@m80v*7>Va><{I+P)GG6NH6^*3mt6KEY1hz6}y0A@uGe~86(sDGDfCzxr|KDav7Os)iE;7tYZWR+{`jYrn9+>Oiyze znI_dSGR>%CWO|j$$TYK-k!eOPM9sQ7MyADej7&nkj7%E6j7-f77@1l?7bo3eWa7HR u$n^OrBh#0oj7+=o7@2nGf%Sk!Z$Lu;pfeG+PiIkEEX}m0bGnU!S||V+Lr+Tp