upgrade to multi-for syntax
This commit is contained in:
parent
127d44e375
commit
8d6e8aa4bd
@ -38,7 +38,7 @@ use `ReleaseSafe` until it is fuzzed.
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
1. zig 0.11.0-dev.1580+a5b34a61a or higher.
|
||||
1. zig 0.11.0-dev.1782+b52be973d or higher.
|
||||
2. [cmph][cmph]: bundled with this repository.
|
||||
|
||||
Trying it out
|
||||
|
@ -87,7 +87,7 @@ pub fn init(
|
||||
{
|
||||
var name_idx = try baseAllocator.alloc(NameIdx, usersConst.len);
|
||||
defer baseAllocator.free(name_idx);
|
||||
for (usersConst) |user, i| name_idx[i] =
|
||||
for (usersConst, 0..) |user, i| name_idx[i] =
|
||||
NameIdx{ .name = user.name, .idx = i };
|
||||
sort.sort(NameIdx, name_idx, {}, Compare.name);
|
||||
|
||||
@ -102,7 +102,7 @@ pub fn init(
|
||||
{
|
||||
var gid_idx = try baseAllocator.alloc(GidIdx, groupsConst.len);
|
||||
defer baseAllocator.free(gid_idx);
|
||||
for (groupsConst) |group, i|
|
||||
for (groupsConst, 0..) |group, i|
|
||||
gid_idx[i] = GidIdx{ .gid = group.gid, .idx = i };
|
||||
sort.sort(GidIdx, gid_idx, {}, Compare.gid);
|
||||
|
||||
@ -116,7 +116,7 @@ pub fn init(
|
||||
|
||||
// verify whatever comes to cmph are unique: user names
|
||||
var name2user = StringHashMap(u32).init(allocator);
|
||||
for (users.items(.name)) |name, i| {
|
||||
for (users.items(.name), 0..) |name, i| {
|
||||
var result = try name2user.getOrPut(name);
|
||||
if (result.found_existing)
|
||||
return error.Duplicate;
|
||||
@ -124,7 +124,7 @@ pub fn init(
|
||||
}
|
||||
// verify whatever comes to cmph are unique: group names
|
||||
var name2group = StringHashMap(u32).init(allocator);
|
||||
for (groups.items(.name)) |name, i| {
|
||||
for (groups.items(.name), 0..) |name, i| {
|
||||
var result = try name2group.getOrPut(name);
|
||||
if (result.found_existing)
|
||||
return error.Duplicate;
|
||||
@ -161,7 +161,7 @@ pub fn init(
|
||||
defer baseAllocator.free(user2groups);
|
||||
mem.set(ArrayListUnmanaged(u32), user2groups, ArrayListUnmanaged(u32){});
|
||||
|
||||
for (groups.items(.members)) |groupmembers, i| {
|
||||
for (groups.items(.members), groups.items(.name), 0..) |groupmembers, name, i| {
|
||||
var members = try allocator.alloc(u32, groupmembers.len);
|
||||
members.len = 0;
|
||||
|
||||
@ -180,7 +180,6 @@ pub fn init(
|
||||
members[members.len - 1] = user_idx;
|
||||
try user2groups[user_idx].append(allocator, @intCast(u32, i));
|
||||
} else {
|
||||
const name = groups.get(i).name;
|
||||
return err.returnf(
|
||||
"user '{s}' not found, member of group '{s}'",
|
||||
.{ member_name, name },
|
||||
@ -197,9 +196,9 @@ pub fn init(
|
||||
|
||||
var user2groups_final = try allocator.alloc([]const u32, users.len);
|
||||
user2groups_final.len = users.len;
|
||||
for (user2groups) |*usergroups, i| {
|
||||
for (user2groups, user2groups_final) |*usergroups, *user2groups_final_i| {
|
||||
sort.sort(u32, usergroups.items, {}, comptime sort.asc(u32));
|
||||
user2groups_final[i] = try usergroups.toOwnedSlice(allocator);
|
||||
user2groups_final_i.* = try usergroups.toOwnedSlice(allocator);
|
||||
}
|
||||
|
||||
return Corpus{
|
||||
|
49
src/DB.zig
49
src/DB.zig
@ -252,10 +252,10 @@ pub fn fieldOffsets(lengths: DBNumbers) DBNumbers {
|
||||
// skipping header (so index 1). This used to be an inline for with stage1,
|
||||
// but that and a comptime assertion crashes the compiler as of
|
||||
// 0.11.0-dev.1580+a5b34a61a
|
||||
inline for (DB_fields[1..]) |field, i| {
|
||||
assert(mem.eql(u8, field.name, meta.fields(DBNumbers)[i + 1].name));
|
||||
@field(result, field.name) = offset;
|
||||
offset += @field(lengths, field.name);
|
||||
inline for (DB_fields[1..], meta.fields(DBNumbers)[1..]) |db_field, dbn_field| {
|
||||
assert(mem.eql(u8, db_field.name, dbn_field.name));
|
||||
@field(result, db_field.name) = offset;
|
||||
offset += @field(lengths, db_field.name);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -530,12 +530,12 @@ fn additionalGids(
|
||||
var scratch = try allocator.alignedAlloc(u32, 8, 256);
|
||||
var scratch_allocated: bool = true;
|
||||
defer if (scratch_allocated) allocator.free(scratch);
|
||||
for (corpus.user2groups) |usergroups, user_idx| {
|
||||
for (corpus.user2groups, idx2offset) |usergroups, *u_idx2offset| {
|
||||
if (usergroups.len == 0) {
|
||||
idx2offset[user_idx] = 0;
|
||||
u_idx2offset.* = 0;
|
||||
continue;
|
||||
}
|
||||
idx2offset[user_idx] = blob.items.len;
|
||||
u_idx2offset.* = blob.items.len;
|
||||
if (scratch.len < usergroups.len) {
|
||||
allocator.free(scratch);
|
||||
scratch_allocated = false;
|
||||
@ -544,7 +544,7 @@ fn additionalGids(
|
||||
}
|
||||
scratch.len = usergroups.len;
|
||||
const corpusGids = corpus.groups.items(.gid);
|
||||
for (usergroups) |group_idx, i|
|
||||
for (usergroups, 0..) |group_idx, i|
|
||||
scratch[i] = corpusGids[group_idx];
|
||||
compress.deltaCompress(u32, scratch) catch |err| switch (err) {
|
||||
error.NotSorted => unreachable,
|
||||
@ -624,17 +624,17 @@ fn groupMembers(
|
||||
var scratch = try ArrayListAligned(u32, 8).initCapacity(allocator, 1024);
|
||||
defer scratch.deinit();
|
||||
|
||||
for (corpus.group2users) |members, group_idx| {
|
||||
for (corpus.group2users, idx2offset) |members, *idx2offset_g| {
|
||||
if (members.len == 0) {
|
||||
idx2offset[group_idx] = 0;
|
||||
idx2offset_g.* = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
idx2offset[group_idx] = blob.items.len;
|
||||
idx2offset_g.* = blob.items.len;
|
||||
try scratch.ensureTotalCapacity(members.len);
|
||||
scratch.items.len = members.len;
|
||||
for (members) |user_idx, i|
|
||||
scratch.items[i] = user2offset[user_idx];
|
||||
for (members, scratch.items) |user_idx, *scratch_i|
|
||||
scratch_i.* = user2offset[user_idx];
|
||||
|
||||
compress.deltaCompress(u32, scratch.items) catch |err| switch (err) {
|
||||
error.NotSorted => unreachable,
|
||||
@ -668,17 +668,20 @@ fn groupsSection(
|
||||
var blob = try ArrayListAligned(u8, 8).initCapacity(allocator, 8 * corpus.groups.len);
|
||||
errdefer blob.deinit();
|
||||
|
||||
var i: usize = 0;
|
||||
while (i < corpus.groups.len) : (i += 1) {
|
||||
for (
|
||||
corpus.groups.items(.gid),
|
||||
corpus.groups.items(.name),
|
||||
idx2offset,
|
||||
members_offset,
|
||||
) |gid, name, *idx2offset_i, members_offset_i| {
|
||||
// TODO: this may be inefficient; it's calling `.slice()` on every iteration
|
||||
const group = corpus.groups.get(i);
|
||||
const group_offset = @intCast(u32, blob.items.len);
|
||||
assert(group_offset & 7 == 0);
|
||||
idx2offset[i] = @truncate(u32, group_offset >> 3);
|
||||
idx2offset_i.* = @truncate(u32, group_offset >> 3);
|
||||
const group_stored = GroupStored{
|
||||
.gid = group.gid,
|
||||
.name = group.name,
|
||||
.members_offset = members_offset[i],
|
||||
.gid = gid,
|
||||
.name = name,
|
||||
.members_offset = members_offset_i,
|
||||
};
|
||||
try PackedGroup.packTo(&blob, group_stored);
|
||||
try blob.appendNTimes(0, mem.alignForward(blob.items.len, 8) - blob.items.len);
|
||||
@ -714,8 +717,8 @@ fn bdzIdx(
|
||||
assert(keys.len <= math.maxInt(u32));
|
||||
var result = try allocator.alignedAlloc(u32, 8, keys.len);
|
||||
errdefer allocator.free(result);
|
||||
for (keys) |key, i|
|
||||
result[search_fn(packed_mphf, key)] = idx2offset[i];
|
||||
for (keys, idx2offset) |key, idx2offset_i|
|
||||
result[search_fn(packed_mphf, key)] = idx2offset_i;
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -904,7 +907,7 @@ test "DB pack gids" {
|
||||
const k4 = bdz.search_u32(cmph_gid, 100000);
|
||||
var hashes = &[_]u32{ k1, k2, k3, k4 };
|
||||
sort.sort(u32, hashes, {}, comptime sort.asc(u32));
|
||||
for (hashes) |hash, i|
|
||||
for (hashes, 0..) |hash, i|
|
||||
try testing.expectEqual(i, hash);
|
||||
}
|
||||
|
||||
|
@ -34,9 +34,9 @@ pub fn init(
|
||||
var ptr_end = @sizeOf([]const u8) * members.len;
|
||||
var members_ptr = mem.bytesAsSlice([]const u8, buf[0..ptr_end]);
|
||||
var offset: usize = ptr_end;
|
||||
for (members) |member, i| {
|
||||
for (members, members_ptr) |member, *members_ptr_i| {
|
||||
mem.copy(u8, buf[offset..], member);
|
||||
members_ptr[i] = buf[offset .. offset + member.len];
|
||||
members_ptr_i.* = buf[offset .. offset + member.len];
|
||||
offset += member.len;
|
||||
}
|
||||
mem.copy(u8, buf[offset..], name);
|
||||
|
24
src/cmph.zig
24
src/cmph.zig
@ -46,13 +46,13 @@ pub fn pack(allocator: Allocator, input: [][*:0]const u8) error{OutOfMemory}![]a
|
||||
pub fn packU32(allocator: Allocator, numbers: []const u32) error{OutOfMemory}![]align(8) const u8 {
|
||||
var keys: [][6]u8 = try allocator.alignedAlloc([6]u8, 8, numbers.len);
|
||||
defer allocator.free(keys);
|
||||
for (numbers) |n, i|
|
||||
keys[i] = unzeroZ(n);
|
||||
for (numbers, keys) |n, *key_i|
|
||||
key_i.* = unzeroZ(n);
|
||||
|
||||
var keys2 = try allocator.alignedAlloc([*:0]const u8, 8, numbers.len);
|
||||
defer allocator.free(keys2);
|
||||
for (keys) |_, i|
|
||||
keys2[i] = @ptrCast([*:0]const u8, &keys[i]);
|
||||
for (keys, keys2) |*key_i, *key2_i|
|
||||
key2_i.* = @ptrCast([*:0]const u8, key_i);
|
||||
return pack(allocator, keys2);
|
||||
}
|
||||
|
||||
@ -61,8 +61,8 @@ pub fn packStr(allocator: Allocator, strings: []const []const u8) error{OutOfMem
|
||||
var arena = std.heap.ArenaAllocator.init(allocator);
|
||||
defer arena.deinit();
|
||||
var keys = try arena.allocator().alignedAlloc([*:0]const u8, 8, strings.len);
|
||||
for (strings) |_, i|
|
||||
keys[i] = try arena.allocator().dupeZ(u8, strings[i]);
|
||||
for (strings, keys) |string, *key_i|
|
||||
key_i.* = try arena.allocator().dupeZ(u8, string);
|
||||
return pack(allocator, keys);
|
||||
}
|
||||
|
||||
@ -126,10 +126,10 @@ test "cmph pack u32" {
|
||||
const packed_mphf = try packU32(testing.allocator, keys);
|
||||
defer testing.allocator.free(packed_mphf);
|
||||
var hashes: [keys.len]u32 = undefined;
|
||||
for (keys) |key, i|
|
||||
hashes[i] = bdz.search_u32(packed_mphf, key);
|
||||
for (keys, &hashes) |key, *hash_i|
|
||||
hash_i.* = bdz.search_u32(packed_mphf, key);
|
||||
sort.sort(u32, hashes[0..], {}, comptime sort.asc(u32));
|
||||
for (hashes) |hash, i|
|
||||
for (hashes, 0..) |hash, i|
|
||||
try testing.expectEqual(i, hash);
|
||||
}
|
||||
|
||||
@ -138,9 +138,9 @@ test "cmph pack str" {
|
||||
const packed_mphf = try packStr(testing.allocator, keys[0..]);
|
||||
defer testing.allocator.free(packed_mphf);
|
||||
var hashes: [keys.len]u32 = undefined;
|
||||
for (keys) |key, i|
|
||||
hashes[i] = bdz.search(packed_mphf, key);
|
||||
for (keys, &hashes) |key, *hash_i|
|
||||
hash_i.* = bdz.search(packed_mphf, key);
|
||||
sort.sort(u32, hashes[0..], {}, comptime sort.asc(u32));
|
||||
for (hashes) |hash, i|
|
||||
for (hashes, 0..) |hash, i|
|
||||
try testing.expectEqual(i, hash);
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ pub fn uvarint(buf: []const u8) error{Overflow}!Varint {
|
||||
var x: u64 = 0;
|
||||
var s: u6 = 0;
|
||||
|
||||
for (buf) |b, i| {
|
||||
for (buf, 0..) |b, i| {
|
||||
if (i == maxVarintLen64)
|
||||
// Catch byte reads past maxVarintLen64.
|
||||
// See issue https://golang.org/issues/41185
|
||||
|
@ -40,8 +40,8 @@ pub fn ParseResult(comptime flags: []const Flag) type {
|
||||
flag_data: [flags.len]FlagData = blk: {
|
||||
// Init all flags to false/null
|
||||
var flag_data: [flags.len]FlagData = undefined;
|
||||
inline for (flags) |flag, i| {
|
||||
flag_data[i] = switch (flag.kind) {
|
||||
inline for (flags, &flag_data) |flag, *flag_data_i| {
|
||||
flag_data_i.* = switch (flag.kind) {
|
||||
.boolean => .{
|
||||
.name = flag.name,
|
||||
.value = .{ .boolean = false },
|
||||
@ -79,10 +79,10 @@ pub fn parse(args: []const [*:0]const u8, comptime flags: []const Flag) !ParseRe
|
||||
var arg_idx: usize = 0;
|
||||
while (arg_idx < args.len) : (arg_idx += 1) {
|
||||
var parsed_flag = false;
|
||||
inline for (flags) |flag, flag_idx| {
|
||||
inline for (flags, &ret.flag_data) |flag, *flag_data_i| {
|
||||
if (cstr.cmp(flag.name, args[arg_idx]) == 0) {
|
||||
switch (flag.kind) {
|
||||
.boolean => ret.flag_data[flag_idx].value.boolean = true,
|
||||
.boolean => flag_data_i.*.value.boolean = true,
|
||||
.arg => {
|
||||
arg_idx += 1;
|
||||
if (arg_idx == args.len) {
|
||||
@ -90,7 +90,7 @@ pub fn parse(args: []const [*:0]const u8, comptime flags: []const Flag) !ParseRe
|
||||
"' requires an argument but none was provided!", .{});
|
||||
return error.MissingFlagArgument;
|
||||
}
|
||||
ret.flag_data[flag_idx].value.arg = args[arg_idx];
|
||||
flag_data_i.*.value.arg = args[arg_idx];
|
||||
},
|
||||
}
|
||||
parsed_flag = true;
|
||||
|
@ -65,7 +65,7 @@ pub const ShellWriter = struct {
|
||||
if (shells.len == 0) return self;
|
||||
|
||||
errdefer self.shell2idx.deinit();
|
||||
for (shells.constSlice()) |shell, idx| {
|
||||
for (shells.constSlice(), 0..) |shell, idx| {
|
||||
const idx8 = @intCast(u8, idx);
|
||||
const offset = @intCast(u16, self.blob.len);
|
||||
self.blob.appendSliceAssumeCapacity(shell);
|
||||
@ -177,7 +177,7 @@ test "shell basic shellpopcon" {
|
||||
|
||||
// copying section_index until https://github.com/ziglang/zig/pull/14580
|
||||
var section_index: [max_shells]u16 align(8) = undefined;
|
||||
for (sections.index.constSlice()) |elem, i|
|
||||
for (sections.index.constSlice(), 0..) |elem, i|
|
||||
section_index[i] = elem;
|
||||
const shellReader = ShellReader.init(
|
||||
std.mem.sliceAsBytes(section_index[0..sections.index.len]),
|
||||
|
@ -39,7 +39,7 @@ pub fn name(s: []const u8, err: *ErrCtx) error{InvalidRecord}!void {
|
||||
error.InvalidRecord,
|
||||
);
|
||||
|
||||
for (s[1..]) |c, i| {
|
||||
for (s[1..], 0..) |c, i| {
|
||||
if (!(ascii.isAlphanumeric(c) or c == '_' or c == '.' or c == '@' or c == '-'))
|
||||
return err.returnf(
|
||||
"invalid character {s} at position {d}",
|
||||
@ -69,7 +69,7 @@ pub fn path(s: []const u8, err: *ErrCtx) error{InvalidRecord}!void {
|
||||
if (s[0] != '/')
|
||||
return err.returnf("must start with /", .{}, error.InvalidRecord);
|
||||
|
||||
for (s[1..]) |c, i| {
|
||||
for (s[1..], 0..) |c, i| {
|
||||
if (!(ascii.isAlphanumeric(c) or c == '/' or c == '_' or c == '.' or c == '@' or c == '-'))
|
||||
return err.returnf(
|
||||
"invalid character 0xD at position {d}",
|
||||
|
Loading…
Reference in New Issue
Block a user