zig 0.11
This commit is contained in:
parent
37e24524c2
commit
993a29d2f8
@ -33,7 +33,7 @@ Project goals
|
|||||||
Dependencies
|
Dependencies
|
||||||
------------
|
------------
|
||||||
|
|
||||||
1. zig around `0.11.0-dev.3735+a72d634b7`.
|
1. zig v0.11.
|
||||||
2. [cmph][cmph]: bundled with this repository.
|
2. [cmph][cmph]: bundled with this repository.
|
||||||
|
|
||||||
Building
|
Building
|
||||||
@ -41,7 +41,7 @@ Building
|
|||||||
|
|
||||||
Clone, compile and run tests first:
|
Clone, compile and run tests first:
|
||||||
|
|
||||||
$ git clone --recursive https://git.jakstys.lt/motiejus/turbonss
|
$ git clone https://git.jakstys.lt/motiejus/turbonss
|
||||||
$ zig build test
|
$ zig build test
|
||||||
$ zig build -Dtarget=x86_64-linux-gnu.2.16 -Doptimize=ReleaseSafe
|
$ zig build -Dtarget=x86_64-linux-gnu.2.16 -Doptimize=ReleaseSafe
|
||||||
|
|
||||||
@ -183,7 +183,6 @@ keep the following in mind:
|
|||||||
- if the database file was replaced while the program has been running,
|
- if the database file was replaced while the program has been running,
|
||||||
turbonss will not re-read the file (it holds to the previous file
|
turbonss will not re-read the file (it holds to the previous file
|
||||||
descriptor).
|
descriptor).
|
||||||
- requires a nightly version of zig (that will change with 0.11).
|
|
||||||
|
|
||||||
The license is permissive, so feel free to fork and implement the above (I
|
The license is permissive, so feel free to fork and implement the above (I
|
||||||
would appreciate if you told me, but surely you don't have to). I am also
|
would appreciate if you told me, but surely you don't have to). I am also
|
||||||
|
12
build.zig
12
build.zig
@ -47,7 +47,7 @@ pub fn build(b: *zbs.Builder) void {
|
|||||||
cmph.want_lto = true;
|
cmph.want_lto = true;
|
||||||
cmph.compress_debug_sections = .zlib;
|
cmph.compress_debug_sections = .zlib;
|
||||||
cmph.omit_frame_pointer = true;
|
cmph.omit_frame_pointer = true;
|
||||||
cmph.addIncludePath("deps/cmph/src");
|
cmph.addIncludePath(.{ .path = "deps/cmph/src" });
|
||||||
cmph.addConfigHeader(b.addConfigHeader(.{}, .{
|
cmph.addConfigHeader(b.addConfigHeader(.{}, .{
|
||||||
.HAVE_DLFCN_H = true,
|
.HAVE_DLFCN_H = true,
|
||||||
.HAVE_GETOPT_H = true,
|
.HAVE_GETOPT_H = true,
|
||||||
@ -91,8 +91,8 @@ pub fn build(b: *zbs.Builder) void {
|
|||||||
//"-DDEBUG",
|
//"-DDEBUG",
|
||||||
});
|
});
|
||||||
bdz.omit_frame_pointer = true;
|
bdz.omit_frame_pointer = true;
|
||||||
bdz.addIncludePath("deps/cmph/src");
|
bdz.addIncludePath(.{ .path = "deps/cmph/src" });
|
||||||
bdz.addIncludePath("include/deps/cmph");
|
bdz.addIncludePath(.{ .path = "include/deps/cmph" });
|
||||||
bdz.want_lto = true;
|
bdz.want_lto = true;
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -147,7 +147,7 @@ pub fn build(b: *zbs.Builder) void {
|
|||||||
exe.want_lto = true;
|
exe.want_lto = true;
|
||||||
exe.linkLibC();
|
exe.linkLibC();
|
||||||
exe.linkLibrary(bdz);
|
exe.linkLibrary(bdz);
|
||||||
exe.addIncludePath("deps/cmph/src");
|
exe.addIncludePath(.{ .path = "deps/cmph/src" });
|
||||||
b.installArtifact(exe);
|
b.installArtifact(exe);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -168,7 +168,7 @@ pub fn build(b: *zbs.Builder) void {
|
|||||||
so.want_lto = true;
|
so.want_lto = true;
|
||||||
so.linkLibC();
|
so.linkLibC();
|
||||||
so.linkLibrary(bdz);
|
so.linkLibrary(bdz);
|
||||||
so.addIncludePath("deps/cmph/src");
|
so.addIncludePath(.{ .path = "deps/cmph/src" });
|
||||||
b.installArtifact(so);
|
b.installArtifact(so);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -187,5 +187,5 @@ pub fn build(b: *zbs.Builder) void {
|
|||||||
fn addCmphDeps(exe: *zbs.LibExeObjStep, cmph: *zbs.LibExeObjStep) void {
|
fn addCmphDeps(exe: *zbs.LibExeObjStep, cmph: *zbs.LibExeObjStep) void {
|
||||||
exe.linkLibC();
|
exe.linkLibC();
|
||||||
exe.linkLibrary(cmph);
|
exe.linkLibrary(cmph);
|
||||||
exe.addIncludePath("deps/cmph/src");
|
exe.addIncludePath(.{ .path = "deps/cmph/src" });
|
||||||
}
|
}
|
||||||
|
@ -120,7 +120,7 @@ pub fn init(
|
|||||||
var result = try name2user.getOrPut(name);
|
var result = try name2user.getOrPut(name);
|
||||||
if (result.found_existing)
|
if (result.found_existing)
|
||||||
return error.Duplicate;
|
return error.Duplicate;
|
||||||
result.value_ptr.* = @intCast(u32, i);
|
result.value_ptr.* = @intCast(i);
|
||||||
}
|
}
|
||||||
// verify whatever comes to cmph are unique: group names
|
// verify whatever comes to cmph are unique: group names
|
||||||
var name2group = StringHashMap(u32).init(allocator);
|
var name2group = StringHashMap(u32).init(allocator);
|
||||||
@ -128,7 +128,7 @@ pub fn init(
|
|||||||
var result = try name2group.getOrPut(name);
|
var result = try name2group.getOrPut(name);
|
||||||
if (result.found_existing)
|
if (result.found_existing)
|
||||||
return error.Duplicate;
|
return error.Duplicate;
|
||||||
result.value_ptr.* = @intCast(u32, i);
|
result.value_ptr.* = @intCast(i);
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify whatever comes to cmph are unique: gids
|
// verify whatever comes to cmph are unique: gids
|
||||||
@ -178,7 +178,7 @@ pub fn init(
|
|||||||
// continue;
|
// continue;
|
||||||
members.len += 1;
|
members.len += 1;
|
||||||
members[members.len - 1] = user_idx;
|
members[members.len - 1] = user_idx;
|
||||||
try user2groups[user_idx].append(allocator, @intCast(u32, i));
|
try user2groups[user_idx].append(allocator, @intCast(i));
|
||||||
} else {
|
} else {
|
||||||
return err.returnf(
|
return err.returnf(
|
||||||
"user '{s}' not found, member of group '{s}'",
|
"user '{s}' not found, member of group '{s}'",
|
||||||
|
34
src/DB.zig
34
src/DB.zig
@ -270,8 +270,8 @@ pub fn fromBytes(buf: []align(8) const u8) InvalidHeader!DB {
|
|||||||
const end = (start_block + @field(lengths, field.name)) << section_length_bits;
|
const end = (start_block + @field(lengths, field.name)) << section_length_bits;
|
||||||
const start = start_block << section_length_bits;
|
const start = start_block << section_length_bits;
|
||||||
const slice_type = meta.Child(field.type);
|
const slice_type = meta.Child(field.type);
|
||||||
const value = mem.bytesAsSlice(slice_type, @alignCast(8, buf[start..end]));
|
const value = mem.bytesAsSlice(slice_type, buf[start..end]);
|
||||||
@field(result, field.name) = value;
|
@field(result, field.name) = @alignCast(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
@ -293,9 +293,7 @@ pub fn packCGroupNoMembers(group: *const PackedGroup, buf: []u8) error{BufferToo
|
|||||||
return CGroup{
|
return CGroup{
|
||||||
.gr_name = buf[name_start .. name_start + name.len :0].ptr,
|
.gr_name = buf[name_start .. name_start + name.len :0].ptr,
|
||||||
.gr_gid = group.gid(),
|
.gr_gid = group.gid(),
|
||||||
// TODO: how can we use bytesAsSlice in a way that does not need
|
.gr_mem = @ptrCast(member_ptrs.ptr),
|
||||||
// this ugly ptrCast?
|
|
||||||
.gr_mem = @ptrCast([*:null]align(1) const ?[*:0]const u8, member_ptrs.ptr),
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -342,7 +340,7 @@ pub fn packCGroup(self: *const DB, group: *const PackedGroup, buf: []u8) error{
|
|||||||
|
|
||||||
var i: usize = 0;
|
var i: usize = 0;
|
||||||
while (try it.next()) |member_offset| : (i += 1) {
|
while (try it.next()) |member_offset| : (i += 1) {
|
||||||
const entry = try PackedUser.fromBytes(@alignCast(8, self.users[member_offset << 3 ..]));
|
const entry = try PackedUser.fromBytes(@alignCast(self.users[member_offset << 3 ..]));
|
||||||
const start = buf_offset;
|
const start = buf_offset;
|
||||||
const name = entry.user.name();
|
const name = entry.user.name();
|
||||||
if (buf_offset + name.len + 1 > buf.len)
|
if (buf_offset + name.len + 1 > buf.len)
|
||||||
@ -364,7 +362,7 @@ pub fn packCGroup(self: *const DB, group: *const PackedGroup, buf: []u8) error{
|
|||||||
.gr_gid = group.gid(),
|
.gr_gid = group.gid(),
|
||||||
// TODO: how can we use bytesAsSlice in a way that does not need
|
// TODO: how can we use bytesAsSlice in a way that does not need
|
||||||
// this ugly ptrCast?
|
// this ugly ptrCast?
|
||||||
.gr_mem = @ptrCast([*:null]align(1) const ?[*:0]const u8, member_ptrs.ptr),
|
.gr_mem = @ptrCast(member_ptrs.ptr),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -373,7 +371,7 @@ pub fn getGroupByName(self: *const DB, name: []const u8) error{Overflow}!?Packed
|
|||||||
if (idx >= self.header.num_groups)
|
if (idx >= self.header.num_groups)
|
||||||
return null;
|
return null;
|
||||||
const offset = self.idx_groupname2group[idx];
|
const offset = self.idx_groupname2group[idx];
|
||||||
const group = (try PackedGroup.fromBytes(@alignCast(8, self.groups[offset << 3 ..]))).group;
|
const group = (try PackedGroup.fromBytes(@alignCast(self.groups[offset << 3 ..]))).group;
|
||||||
if (!mem.eql(u8, name, group.name()))
|
if (!mem.eql(u8, name, group.name()))
|
||||||
return null;
|
return null;
|
||||||
return group;
|
return group;
|
||||||
@ -384,7 +382,7 @@ pub fn getGroupByGid(self: *const DB, gid: u32) error{Overflow}!?PackedGroup {
|
|||||||
if (idx >= self.header.num_groups)
|
if (idx >= self.header.num_groups)
|
||||||
return null;
|
return null;
|
||||||
const offset = self.idx_gid2group[idx];
|
const offset = self.idx_gid2group[idx];
|
||||||
const group = (try PackedGroup.fromBytes(@alignCast(8, self.groups[offset << 3 ..]))).group;
|
const group = (try PackedGroup.fromBytes(@alignCast(self.groups[offset << 3 ..]))).group;
|
||||||
if (gid != group.gid())
|
if (gid != group.gid())
|
||||||
return null;
|
return null;
|
||||||
return group;
|
return group;
|
||||||
@ -468,7 +466,7 @@ pub fn getUserByName(self: *const DB, name: []const u8) error{Overflow}!?PackedU
|
|||||||
if (idx >= self.header.num_users)
|
if (idx >= self.header.num_users)
|
||||||
return null;
|
return null;
|
||||||
const offset = self.idx_name2user[idx];
|
const offset = self.idx_name2user[idx];
|
||||||
const user = (try PackedUser.fromBytes(@alignCast(8, self.users[offset << 3 ..]))).user;
|
const user = (try PackedUser.fromBytes(@alignCast(self.users[offset << 3 ..]))).user;
|
||||||
if (!mem.eql(u8, name, user.name())) return null;
|
if (!mem.eql(u8, name, user.name())) return null;
|
||||||
return user;
|
return user;
|
||||||
}
|
}
|
||||||
@ -484,7 +482,7 @@ pub fn getUserByUid(self: *const DB, uid: u32) error{Overflow}!?PackedUser {
|
|||||||
if (idx >= self.header.num_users)
|
if (idx >= self.header.num_users)
|
||||||
return null;
|
return null;
|
||||||
const offset = self.idx_uid2user[idx];
|
const offset = self.idx_uid2user[idx];
|
||||||
const user = (try PackedUser.fromBytes(@alignCast(8, self.users[offset << 3 ..]))).user;
|
const user = (try PackedUser.fromBytes(@alignCast(self.users[offset << 3 ..]))).user;
|
||||||
if (uid != user.uid()) return null;
|
if (uid != user.uid()) return null;
|
||||||
return user;
|
return user;
|
||||||
}
|
}
|
||||||
@ -588,7 +586,7 @@ fn usersSection(
|
|||||||
const user = users.get(i);
|
const user = users.get(i);
|
||||||
const user_offset = math.cast(u35, blob.items.len) orelse return error.TooMany;
|
const user_offset = math.cast(u35, blob.items.len) orelse return error.TooMany;
|
||||||
assert(user_offset & 7 == 0);
|
assert(user_offset & 7 == 0);
|
||||||
idx2offset[i] = @truncate(u32, user_offset >> 3);
|
idx2offset[i] = @as(u32, @truncate(user_offset >> 3));
|
||||||
try PackedUser.packTo(
|
try PackedUser.packTo(
|
||||||
&blob,
|
&blob,
|
||||||
user,
|
user,
|
||||||
@ -598,7 +596,7 @@ fn usersSection(
|
|||||||
try blob.appendNTimes(0, mem.alignForward(usize, blob.items.len, 8) - blob.items.len);
|
try blob.appendNTimes(0, mem.alignForward(usize, blob.items.len, 8) - blob.items.len);
|
||||||
}
|
}
|
||||||
return UsersSection{
|
return UsersSection{
|
||||||
.len = @intCast(u32, users.len),
|
.len = @intCast(users.len),
|
||||||
.idx2offset = idx2offset,
|
.idx2offset = idx2offset,
|
||||||
.blob = try blob.toOwnedSlice(),
|
.blob = try blob.toOwnedSlice(),
|
||||||
};
|
};
|
||||||
@ -677,9 +675,9 @@ fn groupsSection(
|
|||||||
members_offset,
|
members_offset,
|
||||||
) |gid, name, *idx2offset_i, members_offset_i| {
|
) |gid, name, *idx2offset_i, members_offset_i| {
|
||||||
// TODO: this may be inefficient; it's calling `.slice()` on every iteration
|
// TODO: this may be inefficient; it's calling `.slice()` on every iteration
|
||||||
const group_offset = @intCast(u32, blob.items.len);
|
const group_offset = blob.items.len;
|
||||||
assert(group_offset & 7 == 0);
|
assert(group_offset & 7 == 0);
|
||||||
idx2offset_i.* = @truncate(u32, group_offset >> 3);
|
idx2offset_i.* = @truncate(group_offset >> 3);
|
||||||
const group_stored = GroupStored{
|
const group_stored = GroupStored{
|
||||||
.gid = gid,
|
.gid = gid,
|
||||||
.name = name,
|
.name = name,
|
||||||
@ -690,7 +688,7 @@ fn groupsSection(
|
|||||||
}
|
}
|
||||||
|
|
||||||
return GroupsSection{
|
return GroupsSection{
|
||||||
.len = @intCast(u32, corpus.groups.len),
|
.len = @intCast(corpus.groups.len),
|
||||||
.idx2offset = idx2offset,
|
.idx2offset = idx2offset,
|
||||||
.blob = try blob.toOwnedSlice(),
|
.blob = try blob.toOwnedSlice(),
|
||||||
};
|
};
|
||||||
@ -733,9 +731,9 @@ pub fn nblocks_n(comptime T: type, nbytes: usize) T {
|
|||||||
u64 => u70,
|
u64 => u70,
|
||||||
else => @compileError("unsupported type " ++ @typeName(T)),
|
else => @compileError("unsupported type " ++ @typeName(T)),
|
||||||
};
|
};
|
||||||
const upper = @intCast(B, mem.alignForward(usize, nbytes, section_length));
|
const upper = @as(B, @intCast(mem.alignForward(usize, nbytes, section_length)));
|
||||||
assert(upper & (section_length - 1) == 0);
|
assert(upper & (section_length - 1) == 0);
|
||||||
return @truncate(T, upper >> section_length_bits);
|
return @truncate(upper >> section_length_bits);
|
||||||
}
|
}
|
||||||
|
|
||||||
// nblocks returns how many blocks a particular slice will take.
|
// nblocks returns how many blocks a particular slice will take.
|
||||||
|
@ -20,7 +20,7 @@ pub fn open(fname: []const u8) Error!File {
|
|||||||
errdefer if (fd_open) os.close(fd);
|
errdefer if (fd_open) os.close(fd);
|
||||||
|
|
||||||
const st = try os.fstat(fd);
|
const st = try os.fstat(fd);
|
||||||
const size = @intCast(usize, st.size);
|
const size: usize = @intCast(st.size);
|
||||||
const ptr = try os.mmap(null, size, os.PROT.READ, os.MAP.SHARED, fd, 0);
|
const ptr = try os.mmap(null, size, os.PROT.READ, os.MAP.SHARED, fd, 0);
|
||||||
errdefer os.munmap(ptr);
|
errdefer os.munmap(ptr);
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ pub fn init(
|
|||||||
const buf_len = @sizeOf([]const u8) * members.len + name.len + blk: {
|
const buf_len = @sizeOf([]const u8) * members.len + name.len + blk: {
|
||||||
var sum: usize = 0;
|
var sum: usize = 0;
|
||||||
for (members) |member| sum += member.len;
|
for (members) |member| sum += member.len;
|
||||||
break :blk @intCast(u32, sum);
|
break :blk @as(u32, @intCast(sum));
|
||||||
};
|
};
|
||||||
var buf = try allocator.alloc(u8, buf_len);
|
var buf = try allocator.alloc(u8, buf_len);
|
||||||
errdefer allocator.free(buf);
|
errdefer allocator.free(buf);
|
||||||
|
@ -70,7 +70,7 @@ pub const Iterator = struct {
|
|||||||
|
|
||||||
pub fn next(it: *Iterator) error{Overflow}!?PackedGroup {
|
pub fn next(it: *Iterator) error{Overflow}!?PackedGroup {
|
||||||
if (it.idx == it.total) return null;
|
if (it.idx == it.total) return null;
|
||||||
const entry = try fromBytes(@alignCast(8, it.section[it.next_start..]));
|
const entry = try fromBytes(@alignCast(it.section[it.next_start..]));
|
||||||
it.idx += 1;
|
it.idx += 1;
|
||||||
it.next_start += entry.end;
|
it.next_start += entry.end;
|
||||||
it.advanced_by = entry.end;
|
it.advanced_by = entry.end;
|
||||||
|
@ -85,7 +85,7 @@ pub const Entry = struct {
|
|||||||
|
|
||||||
pub fn fromBytes(blob: []align(8) const u8) error{Overflow}!Entry {
|
pub fn fromBytes(blob: []align(8) const u8) error{Overflow}!Entry {
|
||||||
const start_var_payload = @bitSizeOf(Inner) / 8;
|
const start_var_payload = @bitSizeOf(Inner) / 8;
|
||||||
const inner = @ptrCast(*align(8) const Inner, blob[0..start_var_payload]);
|
const inner = @as(*align(8) const Inner, @ptrCast(blob[0..start_var_payload]));
|
||||||
const end_strings = start_var_payload + inner.stringLength();
|
const end_strings = start_var_payload + inner.stringLength();
|
||||||
const gids_offset = try compress.uvarint(blob[end_strings..]);
|
const gids_offset = try compress.uvarint(blob[end_strings..]);
|
||||||
const end_payload = end_strings + gids_offset.bytes_read;
|
const end_payload = end_strings + gids_offset.bytes_read;
|
||||||
@ -110,7 +110,7 @@ pub const Iterator = struct {
|
|||||||
|
|
||||||
pub fn next(it: *Iterator) error{Overflow}!?PackedUser {
|
pub fn next(it: *Iterator) error{Overflow}!?PackedUser {
|
||||||
if (it.idx == it.total) return null;
|
if (it.idx == it.total) return null;
|
||||||
const entry = try fromBytes(@alignCast(8, it.section[it.next_start..]));
|
const entry = try fromBytes(@alignCast(it.section[it.next_start..]));
|
||||||
it.idx += 1;
|
it.idx += 1;
|
||||||
it.next_start += entry.end;
|
it.next_start += entry.end;
|
||||||
it.advanced_by = entry.end;
|
it.advanced_by = entry.end;
|
||||||
|
10
src/bdz.zig
10
src/bdz.zig
@ -21,10 +21,10 @@ pub fn search_u32(packed_mphf: []const u8, key: u32) u32 {
|
|||||||
pub fn unzero(x: u32) [5]u8 {
|
pub fn unzero(x: u32) [5]u8 {
|
||||||
const bit: u8 = 0b10000000;
|
const bit: u8 = 0b10000000;
|
||||||
var buf: [u32len]u8 = undefined;
|
var buf: [u32len]u8 = undefined;
|
||||||
buf[0] = @truncate(u8, (x & 0b11111110_00000000_00000000_00000000) >> 25) | bit;
|
buf[0] = @truncate(((x & 0b11111110_00000000_00000000_00000000) >> 25) | bit);
|
||||||
buf[1] = @truncate(u8, (x & 0b00000001_11111100_00000000_00000000) >> 18) | bit;
|
buf[1] = @truncate(((x & 0b00000001_11111100_00000000_00000000) >> 18) | bit);
|
||||||
buf[2] = @truncate(u8, (x & 0b00000000_00000011_11110000_00000000) >> 12) | bit;
|
buf[2] = @truncate(((x & 0b00000000_00000011_11110000_00000000) >> 12) | bit);
|
||||||
buf[3] = @truncate(u8, (x & 0b00000000_00000000_00001111_11000000) >> 6) | bit;
|
buf[3] = @truncate(((x & 0b00000000_00000000_00001111_11000000) >> 6) | bit);
|
||||||
buf[4] = @truncate(u8, (x & 0b00000000_00000000_00000000_00111111) >> 0) | bit;
|
buf[4] = @truncate(((x & 0b00000000_00000000_00000000_00111111) >> 0) | bit);
|
||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ extern fn cmph_destroy(mphf: [*]u8) void;
|
|||||||
// minus first 4 bytes") for further storage. The slice must be freed by the
|
// minus first 4 bytes") for further storage. The slice must be freed by the
|
||||||
// caller.
|
// caller.
|
||||||
pub fn pack(allocator: Allocator, input: [][*:0]const u8) error{OutOfMemory}![]align(8) const u8 {
|
pub fn pack(allocator: Allocator, input: [][*:0]const u8) error{OutOfMemory}![]align(8) const u8 {
|
||||||
const input_len = @intCast(c_uint, input.len);
|
const input_len: c_uint = @intCast(input.len);
|
||||||
var source = cmph_io_vector_adapter(input.ptr, input_len);
|
var source = cmph_io_vector_adapter(input.ptr, input_len);
|
||||||
defer cmph_io_vector_adapter_destroy(source);
|
defer cmph_io_vector_adapter_destroy(source);
|
||||||
var config = cmph_config_new(source) orelse return error.OutOfMemory;
|
var config = cmph_config_new(source) orelse return error.OutOfMemory;
|
||||||
@ -52,7 +52,7 @@ pub fn packU32(allocator: Allocator, numbers: []const u32) error{OutOfMemory}![]
|
|||||||
var keys2 = try allocator.alignedAlloc([*:0]const u8, 8, numbers.len);
|
var keys2 = try allocator.alignedAlloc([*:0]const u8, 8, numbers.len);
|
||||||
defer allocator.free(keys2);
|
defer allocator.free(keys2);
|
||||||
for (keys, keys2) |*key_i, *key2_i|
|
for (keys, keys2) |*key_i, *key2_i|
|
||||||
key2_i.* = @ptrCast([*:0]const u8, key_i);
|
key2_i.* = @ptrCast(key_i);
|
||||||
return pack(allocator, keys2);
|
return pack(allocator, keys2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,11 +86,11 @@ pub fn putUvarint(buf: []u8, x: u64) usize {
|
|||||||
var mutX = x;
|
var mutX = x;
|
||||||
|
|
||||||
while (mutX >= 0x80) {
|
while (mutX >= 0x80) {
|
||||||
buf[i] = @truncate(u8, mutX) | 0x80;
|
buf[i] = @as(u8, @truncate(mutX)) | 0x80;
|
||||||
mutX >>= 7;
|
mutX >>= 7;
|
||||||
i += 1;
|
i += 1;
|
||||||
}
|
}
|
||||||
buf[i] = @truncate(u8, mutX);
|
buf[i] = @truncate(mutX);
|
||||||
|
|
||||||
return i + 1;
|
return i + 1;
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
const cstr = std.cstr;
|
|
||||||
|
|
||||||
pub const Flag = struct {
|
pub const Flag = struct {
|
||||||
name: [*:0]const u8,
|
name: [*:0]const u8,
|
||||||
@ -57,14 +56,14 @@ pub fn ParseResult(comptime flags: []const Flag) type {
|
|||||||
|
|
||||||
pub fn boolFlag(self: Self, flag_name: [*:0]const u8) bool {
|
pub fn boolFlag(self: Self, flag_name: [*:0]const u8) bool {
|
||||||
for (self.flag_data) |flag_data| {
|
for (self.flag_data) |flag_data| {
|
||||||
if (cstr.cmp(flag_data.name, flag_name) == 0) return flag_data.value.boolean;
|
if (std.mem.orderZ(u8, flag_data.name, flag_name) == .eq) return flag_data.value.boolean;
|
||||||
}
|
}
|
||||||
unreachable; // Invalid flag_name
|
unreachable; // Invalid flag_name
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn argFlag(self: Self, flag_name: [*:0]const u8) ?[:0]const u8 {
|
pub fn argFlag(self: Self, flag_name: [*:0]const u8) ?[:0]const u8 {
|
||||||
for (self.flag_data) |flag_data| {
|
for (self.flag_data) |flag_data| {
|
||||||
if (cstr.cmp(flag_data.name, flag_name) == 0) {
|
if (std.mem.orderZ(u8, flag_data.name, flag_name) == .eq) {
|
||||||
return std.mem.span(flag_data.value.arg);
|
return std.mem.span(flag_data.value.arg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -80,7 +79,7 @@ pub fn parse(args: []const [*:0]const u8, comptime flags: []const Flag) !ParseRe
|
|||||||
while (arg_idx < args.len) : (arg_idx += 1) {
|
while (arg_idx < args.len) : (arg_idx += 1) {
|
||||||
var parsed_flag = false;
|
var parsed_flag = false;
|
||||||
inline for (flags, &ret.flag_data) |flag, *flag_data_i| {
|
inline for (flags, &ret.flag_data) |flag, *flag_data_i| {
|
||||||
if (cstr.cmp(flag.name, args[arg_idx]) == 0) {
|
if (std.mem.orderZ(u8, flag.name, args[arg_idx]) == .eq) {
|
||||||
switch (flag.kind) {
|
switch (flag.kind) {
|
||||||
.boolean => flag_data_i.*.value.boolean = true,
|
.boolean => flag_data_i.*.value.boolean = true,
|
||||||
.arg => {
|
.arg => {
|
||||||
|
@ -499,7 +499,7 @@ fn initgroups_dyn(
|
|||||||
if (limit > 0 and size.* == limit)
|
if (limit > 0 and size.* == limit)
|
||||||
return c.NSS_STATUS_SUCCESS;
|
return c.NSS_STATUS_SUCCESS;
|
||||||
|
|
||||||
const oldsize = @intCast(usize, size.*);
|
const oldsize: u32 = @intCast(size.*);
|
||||||
const newsize_want = blk: {
|
const newsize_want = blk: {
|
||||||
// double the oldsize until it is above or equal 'remaining'
|
// double the oldsize until it is above or equal 'remaining'
|
||||||
var res = oldsize;
|
var res = oldsize;
|
||||||
@ -510,7 +510,7 @@ fn initgroups_dyn(
|
|||||||
const newsize: usize = if (limit <= 0)
|
const newsize: usize = if (limit <= 0)
|
||||||
newsize_want
|
newsize_want
|
||||||
else
|
else
|
||||||
@min(@intCast(usize, limit), newsize_want);
|
@min(@as(usize, @intCast(limit)), newsize_want);
|
||||||
|
|
||||||
var buf = groupsp.*[0..oldsize];
|
var buf = groupsp.*[0..oldsize];
|
||||||
const new_groups = state.initgroups_dyn_allocator.realloc(
|
const new_groups = state.initgroups_dyn_allocator.realloc(
|
||||||
@ -520,7 +520,7 @@ fn initgroups_dyn(
|
|||||||
|
|
||||||
if (new_groups) |newgroups| {
|
if (new_groups) |newgroups| {
|
||||||
groupsp.* = newgroups.ptr;
|
groupsp.* = newgroups.ptr;
|
||||||
size.* = @intCast(c_long, newsize);
|
size.* = @intCast(newsize);
|
||||||
} else |err| switch (err) {
|
} else |err| switch (err) {
|
||||||
error.OutOfMemory => {
|
error.OutOfMemory => {
|
||||||
errnop.* = @intFromEnum(os.E.NOMEM);
|
errnop.* = @intFromEnum(os.E.NOMEM);
|
||||||
@ -530,7 +530,7 @@ fn initgroups_dyn(
|
|||||||
}
|
}
|
||||||
|
|
||||||
any = true;
|
any = true;
|
||||||
groupsp.*[@intCast(usize, start.*)] = @intCast(u32, gid);
|
groupsp.*[@intCast(start.*)] = @intCast(gid);
|
||||||
start.* += 1;
|
start.* += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -605,7 +605,7 @@ test "libnss getpwuid_r and getpwnam_r" {
|
|||||||
c.NSS_STATUS_NOTFOUND,
|
c.NSS_STATUS_NOTFOUND,
|
||||||
getpwnam_r(&state.file.db, "does not exist", &passwd, &buf, buf.len, &errno),
|
getpwnam_r(&state.file.db, "does not exist", &passwd, &buf, buf.len, &errno),
|
||||||
);
|
);
|
||||||
try testing.expectEqual(@intFromEnum(os.E.NOENT), @intCast(u16, errno));
|
try testing.expectEqual(@intFromEnum(os.E.NOENT), @as(u16, @intCast(errno)));
|
||||||
|
|
||||||
passwd = undefined;
|
passwd = undefined;
|
||||||
var small_buffer: [1]u8 = undefined;
|
var small_buffer: [1]u8 = undefined;
|
||||||
@ -613,7 +613,7 @@ test "libnss getpwuid_r and getpwnam_r" {
|
|||||||
c.NSS_STATUS_TRYAGAIN,
|
c.NSS_STATUS_TRYAGAIN,
|
||||||
getpwuid_r(&state.file.db, 0, &passwd, &small_buffer, small_buffer.len, &errno),
|
getpwuid_r(&state.file.db, 0, &passwd, &small_buffer, small_buffer.len, &errno),
|
||||||
);
|
);
|
||||||
try testing.expectEqual(@intFromEnum(os.E.RANGE), @intCast(u16, errno));
|
try testing.expectEqual(@intFromEnum(os.E.RANGE), @as(u16, @intCast(errno)));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn testVidmantas(u: CUser) !void {
|
fn testVidmantas(u: CUser) !void {
|
||||||
@ -657,7 +657,7 @@ test "libnss getgrgid_r and getgrnam_r" {
|
|||||||
c.NSS_STATUS_NOTFOUND,
|
c.NSS_STATUS_NOTFOUND,
|
||||||
getgrnam_r(&state, "does not exist", &group, &buf, buf.len, &errno),
|
getgrnam_r(&state, "does not exist", &group, &buf, buf.len, &errno),
|
||||||
);
|
);
|
||||||
try testing.expectEqual(@intFromEnum(os.E.NOENT), @intCast(u16, errno));
|
try testing.expectEqual(@intFromEnum(os.E.NOENT), @as(u16, @intCast(errno)));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn testSvcGroup(g: CGroup) !void {
|
fn testSvcGroup(g: CGroup) !void {
|
||||||
@ -683,7 +683,7 @@ test "libnss initgroups_dyn" {
|
|||||||
};
|
};
|
||||||
|
|
||||||
var size: c_long = 3; // size of the gids array is this
|
var size: c_long = 3; // size of the gids array is this
|
||||||
var groups = try allocator.alloc(u32, @intCast(usize, size)); // buffer too small
|
var groups = try allocator.alloc(u32, @intCast(size)); // buffer too small
|
||||||
defer allocator.free(groups);
|
defer allocator.free(groups);
|
||||||
var errno: c_int = 42; // canary
|
var errno: c_int = 42; // canary
|
||||||
groups[0] = 42; // canary
|
groups[0] = 42; // canary
|
||||||
|
@ -57,7 +57,7 @@ pub const ShellWriter = struct {
|
|||||||
) error{OutOfMemory}!ShellSections {
|
) error{OutOfMemory}!ShellSections {
|
||||||
assert(shells.len <= max_shells);
|
assert(shells.len <= max_shells);
|
||||||
var self = ShellSections{
|
var self = ShellSections{
|
||||||
.len = @intCast(u8, shells.len),
|
.len = @intCast(shells.len),
|
||||||
.index = BoundedArray(u16, max_shells).init(shells.len) catch unreachable,
|
.index = BoundedArray(u16, max_shells).init(shells.len) catch unreachable,
|
||||||
.blob = BoundedArray(u8, (max_shells + 1) * max_shell_len).init(0) catch unreachable,
|
.blob = BoundedArray(u8, (max_shells + 1) * max_shell_len).init(0) catch unreachable,
|
||||||
.shell2idx = StringHashMap(u8).init(allocator),
|
.shell2idx = StringHashMap(u8).init(allocator),
|
||||||
@ -66,13 +66,13 @@ pub const ShellWriter = struct {
|
|||||||
|
|
||||||
errdefer self.shell2idx.deinit();
|
errdefer self.shell2idx.deinit();
|
||||||
for (shells.constSlice(), 0..) |shell, idx| {
|
for (shells.constSlice(), 0..) |shell, idx| {
|
||||||
const idx8 = @intCast(u8, idx);
|
const idx8: u8 = @intCast(idx);
|
||||||
const offset = @intCast(u16, self.blob.len);
|
const offset: u16 = @intCast(self.blob.len);
|
||||||
self.blob.appendSliceAssumeCapacity(shell);
|
self.blob.appendSliceAssumeCapacity(shell);
|
||||||
try self.shell2idx.put(self.blob.constSlice()[offset..], idx8);
|
try self.shell2idx.put(self.blob.constSlice()[offset..], idx8);
|
||||||
self.index.set(idx8, offset);
|
self.index.set(idx8, offset);
|
||||||
}
|
}
|
||||||
self.index.appendAssumeCapacity(@intCast(u8, self.blob.len));
|
self.index.appendAssumeCapacity(@intCast(self.blob.len));
|
||||||
return self;
|
return self;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ fn execute(
|
|||||||
|
|
||||||
const info = Info{
|
const info = Info{
|
||||||
.fname = db_file,
|
.fname = db_file,
|
||||||
.size_file = splitInt(@intCast(u64, file_size_bytes)).constSlice(),
|
.size_file = splitInt(@as(u64, @intCast(file_size_bytes))).constSlice(),
|
||||||
.version = db.header.version,
|
.version = db.header.version,
|
||||||
.endian = @tagName(db.header.host.endian),
|
.endian = @tagName(db.header.host.endian),
|
||||||
.ptr_size = db.header.host.ptr_size,
|
.ptr_size = db.header.host.ptr_size,
|
||||||
|
@ -194,7 +194,7 @@ fn printGroup(stdout: anytype, db: *const DB, g: *const PackedGroup) error{Overf
|
|||||||
var line_writer = io.bufferedWriter(stdout);
|
var line_writer = io.bufferedWriter(stdout);
|
||||||
var i: usize = 0;
|
var i: usize = 0;
|
||||||
while (try it.next()) |member_offset| : (i += 1) {
|
while (try it.next()) |member_offset| : (i += 1) {
|
||||||
const puser = try PackedUser.fromBytes(@alignCast(8, db.users[member_offset << 3 ..]));
|
const puser = try PackedUser.fromBytes(@alignCast(db.users[member_offset << 3 ..]));
|
||||||
const name = puser.user.name();
|
const name = puser.user.name();
|
||||||
if (i != 0)
|
if (i != 0)
|
||||||
_ = line_writer.write(",") catch return 3;
|
_ = line_writer.write(",") catch return 3;
|
||||||
|
@ -131,7 +131,7 @@ fn dump_passwd(wr: anytype, num_users: u64) error{IO}!void {
|
|||||||
const gecos = fmt.bufPrint(buf_gecos[0..], "User {d}", .{i}) catch unreachable;
|
const gecos = fmt.bufPrint(buf_gecos[0..], "User {d}", .{i}) catch unreachable;
|
||||||
const home = fmt.bufPrint(buf_home[0..], "/home/{s}", .{name}) catch unreachable;
|
const home = fmt.bufPrint(buf_home[0..], "/home/{s}", .{name}) catch unreachable;
|
||||||
const user = User{
|
const user = User{
|
||||||
.uid = @intCast(u32, i),
|
.uid = @intCast(i),
|
||||||
.gid = 1000000,
|
.gid = 1000000,
|
||||||
.name = name,
|
.name = name,
|
||||||
.gecos = gecos,
|
.gecos = gecos,
|
||||||
|
Loading…
Reference in New Issue
Block a user