stage2
This commit is contained in:
parent
5d3dfdc8dc
commit
0ecd6172fc
71
build.zig
71
build.zig
@ -4,14 +4,15 @@ const zbs = std.build;
|
|||||||
|
|
||||||
pub fn build(b: *zbs.Builder) void {
|
pub fn build(b: *zbs.Builder) void {
|
||||||
const target = b.standardTargetOptions(.{});
|
const target = b.standardTargetOptions(.{});
|
||||||
const mode = b.standardReleaseOptions();
|
const optimize = b.standardOptimizeOption(.{});
|
||||||
b.use_stage1 = true;
|
|
||||||
|
|
||||||
const strip = b.option(bool, "strip", "Omit debug information") orelse false;
|
const strip = b.option(bool, "strip", "Omit debug information") orelse false;
|
||||||
|
|
||||||
const cmph = b.addStaticLibrary("cmph", null);
|
const cmph = b.addStaticLibrary(.{
|
||||||
cmph.setTarget(target);
|
.name = "cmph",
|
||||||
cmph.setBuildMode(mode);
|
.target = target,
|
||||||
|
.optimize = optimize,
|
||||||
|
});
|
||||||
cmph.linkLibC();
|
cmph.linkLibC();
|
||||||
cmph.addCSourceFiles(&.{
|
cmph.addCSourceFiles(&.{
|
||||||
"deps/cmph/src/bdz.c",
|
"deps/cmph/src/bdz.c",
|
||||||
@ -49,9 +50,11 @@ pub fn build(b: *zbs.Builder) void {
|
|||||||
cmph.addIncludePath("deps/cmph/src");
|
cmph.addIncludePath("deps/cmph/src");
|
||||||
cmph.addIncludePath("include/deps/cmph");
|
cmph.addIncludePath("include/deps/cmph");
|
||||||
|
|
||||||
const bdz = b.addStaticLibrary("bdz", null);
|
const bdz = b.addStaticLibrary(.{
|
||||||
bdz.setTarget(target);
|
.name = "bdz",
|
||||||
bdz.setBuildMode(mode);
|
.target = target,
|
||||||
|
.optimize = optimize,
|
||||||
|
});
|
||||||
bdz.linkLibC();
|
bdz.linkLibC();
|
||||||
bdz.addCSourceFiles(&.{
|
bdz.addCSourceFiles(&.{
|
||||||
"deps/bdz_read.c",
|
"deps/bdz_read.c",
|
||||||
@ -70,56 +73,72 @@ pub fn build(b: *zbs.Builder) void {
|
|||||||
bdz.want_lto = true;
|
bdz.want_lto = true;
|
||||||
|
|
||||||
{
|
{
|
||||||
const exe = b.addExecutable("turbonss-unix2db", "src/turbonss-unix2db.zig");
|
const exe = b.addExecutable(.{
|
||||||
|
.name = "turbonss-unix2db",
|
||||||
|
.root_source_file = .{ .path = "src/turbonss-unix2db.zig" },
|
||||||
|
.target = target,
|
||||||
|
.optimize = optimize,
|
||||||
|
});
|
||||||
exe.compress_debug_sections = .zlib;
|
exe.compress_debug_sections = .zlib;
|
||||||
exe.strip = strip;
|
exe.strip = strip;
|
||||||
exe.want_lto = true;
|
exe.want_lto = true;
|
||||||
exe.setTarget(target);
|
|
||||||
exe.setBuildMode(mode);
|
|
||||||
addCmphDeps(exe, cmph);
|
addCmphDeps(exe, cmph);
|
||||||
exe.install();
|
exe.install();
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
const exe = b.addExecutable("turbonss-analyze", "src/turbonss-analyze.zig");
|
const exe = b.addExecutable(.{
|
||||||
|
.name = "turbonss-analyze",
|
||||||
|
.root_source_file = .{ .path = "src/turbonss-analyze.zig" },
|
||||||
|
.target = target,
|
||||||
|
.optimize = optimize,
|
||||||
|
});
|
||||||
exe.compress_debug_sections = .zlib;
|
exe.compress_debug_sections = .zlib;
|
||||||
exe.strip = strip;
|
exe.strip = strip;
|
||||||
exe.want_lto = true;
|
exe.want_lto = true;
|
||||||
exe.setTarget(target);
|
|
||||||
exe.setBuildMode(mode);
|
|
||||||
exe.install();
|
exe.install();
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
const exe = b.addExecutable("turbonss-makecorpus", "src/turbonss-makecorpus.zig");
|
const exe = b.addExecutable(.{
|
||||||
|
.name = "turbonss-makecorpus",
|
||||||
|
.root_source_file = .{ .path = "src/turbonss-makecorpus.zig" },
|
||||||
|
.target = target,
|
||||||
|
.optimize = optimize,
|
||||||
|
});
|
||||||
exe.compress_debug_sections = .zlib;
|
exe.compress_debug_sections = .zlib;
|
||||||
exe.strip = strip;
|
exe.strip = strip;
|
||||||
exe.want_lto = true;
|
exe.want_lto = true;
|
||||||
exe.setTarget(target);
|
|
||||||
exe.setBuildMode(mode);
|
|
||||||
exe.install();
|
exe.install();
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
const exe = b.addExecutable("turbonss-getent", "src/turbonss-getent.zig");
|
const exe = b.addExecutable(.{
|
||||||
|
.name = "turbonss-getent",
|
||||||
|
.root_source_file = .{ .path = "src/turbonss-getent.zig" },
|
||||||
|
.target = target,
|
||||||
|
.optimize = optimize,
|
||||||
|
});
|
||||||
exe.compress_debug_sections = .zlib;
|
exe.compress_debug_sections = .zlib;
|
||||||
exe.strip = strip;
|
exe.strip = strip;
|
||||||
exe.want_lto = true;
|
exe.want_lto = true;
|
||||||
exe.linkLibC();
|
exe.linkLibC();
|
||||||
exe.linkLibrary(bdz);
|
exe.linkLibrary(bdz);
|
||||||
exe.addIncludePath("deps/cmph/src");
|
exe.addIncludePath("deps/cmph/src");
|
||||||
exe.setTarget(target);
|
|
||||||
exe.setBuildMode(mode);
|
|
||||||
exe.install();
|
exe.install();
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
const so = b.addSharedLibrary("nss_turbo", "src/libnss.zig", .{
|
const so = b.addSharedLibrary(.{
|
||||||
.versioned = builtin.Version{
|
.name = "nss_turbo",
|
||||||
|
.root_source_file = .{ .path = "src/libnss.zig" },
|
||||||
|
.version = builtin.Version{
|
||||||
.major = 2,
|
.major = 2,
|
||||||
.minor = 0,
|
.minor = 0,
|
||||||
.patch = 0,
|
.patch = 0,
|
||||||
},
|
},
|
||||||
|
.target = target,
|
||||||
|
.optimize = optimize,
|
||||||
});
|
});
|
||||||
so.compress_debug_sections = .zlib;
|
so.compress_debug_sections = .zlib;
|
||||||
so.strip = strip;
|
so.strip = strip;
|
||||||
@ -127,13 +146,15 @@ pub fn build(b: *zbs.Builder) void {
|
|||||||
so.linkLibC();
|
so.linkLibC();
|
||||||
so.linkLibrary(bdz);
|
so.linkLibrary(bdz);
|
||||||
so.addIncludePath("deps/cmph/src");
|
so.addIncludePath("deps/cmph/src");
|
||||||
so.setTarget(target);
|
|
||||||
so.setBuildMode(mode);
|
|
||||||
so.install();
|
so.install();
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
const src_test = b.addTest("src/test_all.zig");
|
const src_test = b.addTest(.{
|
||||||
|
.root_source_file = .{ .path = "src/test_all.zig" },
|
||||||
|
.target = target,
|
||||||
|
.optimize = optimize,
|
||||||
|
});
|
||||||
addCmphDeps(src_test, cmph);
|
addCmphDeps(src_test, cmph);
|
||||||
const test_step = b.step("test", "Run the tests");
|
const test_step = b.step("test", "Run the tests");
|
||||||
test_step.dependOn(&src_test.step);
|
test_step.dependOn(&src_test.step);
|
||||||
|
@ -199,7 +199,7 @@ pub fn init(
|
|||||||
user2groups_final.len = users.len;
|
user2groups_final.len = users.len;
|
||||||
for (user2groups) |*usergroups, i| {
|
for (user2groups) |*usergroups, i| {
|
||||||
sort.sort(u32, usergroups.items, {}, comptime sort.asc(u32));
|
sort.sort(u32, usergroups.items, {}, comptime sort.asc(u32));
|
||||||
user2groups_final[i] = usergroups.toOwnedSlice(allocator);
|
user2groups_final[i] = try usergroups.toOwnedSlice(allocator);
|
||||||
}
|
}
|
||||||
|
|
||||||
return Corpus{
|
return Corpus{
|
||||||
|
123
src/DB.zig
123
src/DB.zig
@ -6,7 +6,7 @@ const meta = std.meta;
|
|||||||
const sort = std.sort;
|
const sort = std.sort;
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
const ArrayList = std.ArrayList;
|
const ArrayListAligned = std.ArrayListAligned;
|
||||||
const AutoHashMap = std.AutoHashMap;
|
const AutoHashMap = std.AutoHashMap;
|
||||||
const BoundedArray = std.BoundedArray;
|
const BoundedArray = std.BoundedArray;
|
||||||
|
|
||||||
@ -37,20 +37,20 @@ const zeroes = &[_]u8{0} ** section_length;
|
|||||||
const DB = @This();
|
const DB = @This();
|
||||||
// All sections, as they end up in the DB. Order is important.
|
// All sections, as they end up in the DB. Order is important.
|
||||||
header: *const Header,
|
header: *const Header,
|
||||||
bdz_gid: []const u8,
|
bdz_gid: []align(8) const u8,
|
||||||
bdz_groupname: []const u8,
|
bdz_groupname: []align(8) const u8,
|
||||||
bdz_uid: []const u8,
|
bdz_uid: []align(8) const u8,
|
||||||
bdz_username: []const u8,
|
bdz_username: []align(8) const u8,
|
||||||
idx_gid2group: []const u32,
|
idx_gid2group: []align(8) const u32,
|
||||||
idx_groupname2group: []const u32,
|
idx_groupname2group: []align(8) const u32,
|
||||||
idx_uid2user: []const u32,
|
idx_uid2user: []align(8) const u32,
|
||||||
idx_name2user: []const u32,
|
idx_name2user: []align(8) const u32,
|
||||||
shell_index: []const u16,
|
shell_index: []align(8) const u16,
|
||||||
shell_blob: []const u8,
|
shell_blob: []align(8) const u8,
|
||||||
groups: []const u8,
|
groups: []align(8) const u8,
|
||||||
users: []const u8,
|
users: []align(8) const u8,
|
||||||
groupmembers: []const u8,
|
groupmembers: []align(8) const u8,
|
||||||
additional_gids: []const u8,
|
additional_gids: []align(8) const u8,
|
||||||
|
|
||||||
pub fn fromCorpus(
|
pub fn fromCorpus(
|
||||||
allocator: Allocator,
|
allocator: Allocator,
|
||||||
@ -79,10 +79,12 @@ pub fn fromCorpus(
|
|||||||
var shell = try shellSections(allocator, corpus);
|
var shell = try shellSections(allocator, corpus);
|
||||||
defer shell.deinit();
|
defer shell.deinit();
|
||||||
|
|
||||||
const shell_index = try allocator.dupe(u16, shell.index.constSlice());
|
const shell_index = try allocator.alignedAlloc(u16, 8, shell.index.len);
|
||||||
|
mem.copy(u16, shell_index, shell.index.constSlice());
|
||||||
errdefer allocator.free(shell_index);
|
errdefer allocator.free(shell_index);
|
||||||
|
|
||||||
const shell_blob = try allocator.dupe(u8, shell.blob.constSlice());
|
const shell_blob = try allocator.alignedAlloc(u8, 8, shell.blob.len);
|
||||||
|
mem.copy(u8, shell_blob, shell.blob.constSlice());
|
||||||
errdefer allocator.free(shell_blob);
|
errdefer allocator.free(shell_blob);
|
||||||
|
|
||||||
const additional_gids = try additionalGids(allocator, corpus);
|
const additional_gids = try additionalGids(allocator, corpus);
|
||||||
@ -180,12 +182,12 @@ pub fn deinit(self: *DB, allocator: Allocator) void {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const DB_fields = meta.fields(DB);
|
const DB_fields = meta.fields(DB);
|
||||||
pub fn iov(self: *const DB) BoundedArray(os.iovec_const, DB_fields.len * 2) {
|
pub fn iov(self: *align(8) const DB) BoundedArray(os.iovec_const, DB_fields.len * 2) {
|
||||||
var result = BoundedArray(os.iovec_const, DB_fields.len * 2).init(0) catch unreachable;
|
var result = BoundedArray(os.iovec_const, DB_fields.len * 2).init(0) catch unreachable;
|
||||||
inline for (DB_fields) |field| {
|
inline for (DB_fields) |field| {
|
||||||
comptime assertDefinedLayout(field.field_type);
|
comptime assertDefinedLayout(field.type);
|
||||||
const value = @field(self, field.name);
|
const value = @field(self, field.name);
|
||||||
const bytes: []const u8 = switch (@TypeOf(value)) {
|
const bytes: []align(8) const u8 = switch (@TypeOf(value)) {
|
||||||
*const Header => mem.asBytes(value),
|
*const Header => mem.asBytes(value),
|
||||||
else => mem.sliceAsBytes(value),
|
else => mem.sliceAsBytes(value),
|
||||||
};
|
};
|
||||||
@ -248,12 +250,11 @@ pub fn fieldOffsets(lengths: DBNumbers) DBNumbers {
|
|||||||
var result: DBNumbers = undefined;
|
var result: DBNumbers = undefined;
|
||||||
result.header = 0;
|
result.header = 0;
|
||||||
var offset = comptime nblocks_n(u64, @sizeOf(Header));
|
var offset = comptime nblocks_n(u64, @sizeOf(Header));
|
||||||
inline for (DB_fields[0..]) |field, i| {
|
// skipping header (so index 1). This used to be an inline for with stage1,
|
||||||
comptime {
|
// but that and a comptime assertion crashes the compiler as of
|
||||||
assert(mem.eql(u8, field.name, meta.fields(DBNumbers)[i].name));
|
// 0.11.0-dev.1580+a5b34a61a
|
||||||
if (mem.eql(u8, field.name, "header")) continue;
|
inline for (DB_fields[1..]) |field, i| {
|
||||||
}
|
assert(mem.eql(u8, field.name, meta.fields(DBNumbers)[i + 1].name));
|
||||||
|
|
||||||
@field(result, field.name) = offset;
|
@field(result, field.name) = offset;
|
||||||
offset += @field(lengths, field.name);
|
offset += @field(lengths, field.name);
|
||||||
}
|
}
|
||||||
@ -271,8 +272,8 @@ pub fn fromBytes(buf: []align(8) const u8) InvalidHeader!DB {
|
|||||||
const start_block = @field(offsets, field.name);
|
const start_block = @field(offsets, field.name);
|
||||||
const end = (start_block + @field(lengths, field.name)) << section_length_bits;
|
const end = (start_block + @field(lengths, field.name)) << section_length_bits;
|
||||||
const start = start_block << section_length_bits;
|
const start = start_block << section_length_bits;
|
||||||
const slice_type = meta.Child(field.field_type);
|
const slice_type = meta.Child(field.type);
|
||||||
const value = mem.bytesAsSlice(slice_type, buf[start..end]);
|
const value = mem.bytesAsSlice(slice_type, @alignCast(8, buf[start..end]));
|
||||||
@field(result, field.name) = value;
|
@field(result, field.name) = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -343,7 +344,7 @@ pub fn packCGroup(self: *const DB, group: *const PackedGroup, buf: []u8) error{B
|
|||||||
|
|
||||||
var i: usize = 0;
|
var i: usize = 0;
|
||||||
while (it.nextMust()) |member_offset| : (i += 1) {
|
while (it.nextMust()) |member_offset| : (i += 1) {
|
||||||
const entry = PackedUser.fromBytes(self.users[member_offset << 3 ..]);
|
const entry = PackedUser.fromBytes(@alignCast(8, self.users[member_offset << 3 ..]));
|
||||||
const start = buf_offset;
|
const start = buf_offset;
|
||||||
const name = entry.user.name();
|
const name = entry.user.name();
|
||||||
if (buf_offset + name.len + 1 > buf.len)
|
if (buf_offset + name.len + 1 > buf.len)
|
||||||
@ -374,8 +375,7 @@ pub fn getGroupByName(self: *const DB, name: []const u8) ?PackedGroup {
|
|||||||
const idx = bdz.search(self.bdz_groupname, name);
|
const idx = bdz.search(self.bdz_groupname, name);
|
||||||
if (idx >= self.header.num_groups) return null;
|
if (idx >= self.header.num_groups) return null;
|
||||||
const offset = self.idx_groupname2group[idx];
|
const offset = self.idx_groupname2group[idx];
|
||||||
const nbits = PackedGroup.alignment_bits;
|
const group = PackedGroup.fromBytes(@alignCast(8, self.groups[offset << 3 ..])).group;
|
||||||
const group = PackedGroup.fromBytes(self.groups[offset << nbits ..]).group;
|
|
||||||
if (!mem.eql(u8, name, group.name())) return null;
|
if (!mem.eql(u8, name, group.name())) return null;
|
||||||
return group;
|
return group;
|
||||||
}
|
}
|
||||||
@ -384,8 +384,7 @@ pub fn getGroupByGid(self: *const DB, gid: u32) ?PackedGroup {
|
|||||||
const idx = bdz.search_u32(self.bdz_gid, gid);
|
const idx = bdz.search_u32(self.bdz_gid, gid);
|
||||||
if (idx >= self.header.num_groups) return null;
|
if (idx >= self.header.num_groups) return null;
|
||||||
const offset = self.idx_gid2group[idx];
|
const offset = self.idx_gid2group[idx];
|
||||||
const nbits = PackedGroup.alignment_bits;
|
const group = PackedGroup.fromBytes(@alignCast(8, self.groups[offset << 3 ..])).group;
|
||||||
const group = PackedGroup.fromBytes(self.groups[offset << nbits ..]).group;
|
|
||||||
if (gid != group.gid()) return null;
|
if (gid != group.gid()) return null;
|
||||||
return group;
|
return group;
|
||||||
}
|
}
|
||||||
@ -467,8 +466,7 @@ pub fn getUserByName(self: *const DB, name: []const u8) ?PackedUser {
|
|||||||
// bdz may return a hash that's bigger than the number of users
|
// bdz may return a hash that's bigger than the number of users
|
||||||
if (idx >= self.header.num_users) return null;
|
if (idx >= self.header.num_users) return null;
|
||||||
const offset = self.idx_name2user[idx];
|
const offset = self.idx_name2user[idx];
|
||||||
const nbits = PackedUser.alignment_bits;
|
const user = PackedUser.fromBytes(@alignCast(8, self.users[offset << 3 ..])).user;
|
||||||
const user = PackedUser.fromBytes(self.users[offset << nbits ..]).user;
|
|
||||||
if (!mem.eql(u8, name, user.name())) return null;
|
if (!mem.eql(u8, name, user.name())) return null;
|
||||||
return user;
|
return user;
|
||||||
}
|
}
|
||||||
@ -483,8 +481,7 @@ pub fn getUserByUid(self: *const DB, uid: u32) ?PackedUser {
|
|||||||
const idx = bdz.search_u32(self.bdz_uid, uid);
|
const idx = bdz.search_u32(self.bdz_uid, uid);
|
||||||
if (idx >= self.header.num_users) return null;
|
if (idx >= self.header.num_users) return null;
|
||||||
const offset = self.idx_uid2user[idx];
|
const offset = self.idx_uid2user[idx];
|
||||||
const nbits = PackedUser.alignment_bits;
|
const user = PackedUser.fromBytes(@alignCast(8, self.users[offset << 3 ..])).user;
|
||||||
const user = PackedUser.fromBytes(self.users[offset << nbits ..]).user;
|
|
||||||
if (uid != user.uid()) return null;
|
if (uid != user.uid()) return null;
|
||||||
return user;
|
return user;
|
||||||
}
|
}
|
||||||
@ -508,20 +505,20 @@ fn shellSections(
|
|||||||
|
|
||||||
const AdditionalGids = struct {
|
const AdditionalGids = struct {
|
||||||
// user index -> offset in blob
|
// user index -> offset in blob
|
||||||
idx2offset: []const u64,
|
idx2offset: []align(8) const u64,
|
||||||
// compressed user gids blob. A blob contains N <= users.len items,
|
// compressed user gids blob. A blob contains N <= users.len items,
|
||||||
// an item is:
|
// an item is:
|
||||||
// len: varint
|
// len: varint
|
||||||
// gid: [varint]varint,
|
// gid: [varint]varint,
|
||||||
// ... and the gid list is delta-compressed.
|
// ... and the gid list is delta-compressed.
|
||||||
blob: []const u8,
|
blob: []align(8) const u8,
|
||||||
};
|
};
|
||||||
|
|
||||||
fn additionalGids(
|
fn additionalGids(
|
||||||
allocator: Allocator,
|
allocator: Allocator,
|
||||||
corpus: *const Corpus,
|
corpus: *const Corpus,
|
||||||
) error{OutOfMemory}!AdditionalGids {
|
) error{OutOfMemory}!AdditionalGids {
|
||||||
var blob = ArrayList(u8).init(allocator);
|
var blob = ArrayListAligned(u8, 8).init(allocator);
|
||||||
errdefer blob.deinit();
|
errdefer blob.deinit();
|
||||||
var idx2offset = try allocator.alloc(u64, corpus.users.len);
|
var idx2offset = try allocator.alloc(u64, corpus.users.len);
|
||||||
errdefer allocator.free(idx2offset);
|
errdefer allocator.free(idx2offset);
|
||||||
@ -529,7 +526,7 @@ fn additionalGids(
|
|||||||
// zero'th entry is empty, so groupless users can refer to it.
|
// zero'th entry is empty, so groupless users can refer to it.
|
||||||
try compress.appendUvarint(&blob, 0);
|
try compress.appendUvarint(&blob, 0);
|
||||||
|
|
||||||
var scratch = try allocator.alloc(u32, 256);
|
var scratch = try allocator.alignedAlloc(u32, 8, 256);
|
||||||
var scratch_allocated: bool = true;
|
var scratch_allocated: bool = true;
|
||||||
defer if (scratch_allocated) allocator.free(scratch);
|
defer if (scratch_allocated) allocator.free(scratch);
|
||||||
for (corpus.user2groups) |usergroups, user_idx| {
|
for (corpus.user2groups) |usergroups, user_idx| {
|
||||||
@ -541,7 +538,7 @@ fn additionalGids(
|
|||||||
if (scratch.len < usergroups.len) {
|
if (scratch.len < usergroups.len) {
|
||||||
allocator.free(scratch);
|
allocator.free(scratch);
|
||||||
scratch_allocated = false;
|
scratch_allocated = false;
|
||||||
scratch = try allocator.alloc(u32, usergroups.len);
|
scratch = try allocator.alignedAlloc(u32, 8, usergroups.len);
|
||||||
scratch_allocated = true;
|
scratch_allocated = true;
|
||||||
}
|
}
|
||||||
scratch.len = usergroups.len;
|
scratch.len = usergroups.len;
|
||||||
@ -558,7 +555,7 @@ fn additionalGids(
|
|||||||
|
|
||||||
return AdditionalGids{
|
return AdditionalGids{
|
||||||
.idx2offset = idx2offset,
|
.idx2offset = idx2offset,
|
||||||
.blob = blob.toOwnedSlice(),
|
.blob = try blob.toOwnedSlice(),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -566,8 +563,8 @@ const UsersSection = struct {
|
|||||||
// number of users in this section
|
// number of users in this section
|
||||||
len: u32,
|
len: u32,
|
||||||
// user index -> offset in blob
|
// user index -> offset in blob
|
||||||
idx2offset: []const u32,
|
idx2offset: []align(8) const u32,
|
||||||
blob: []const u8,
|
blob: []align(8) const u8,
|
||||||
};
|
};
|
||||||
|
|
||||||
fn usersSection(
|
fn usersSection(
|
||||||
@ -576,15 +573,15 @@ fn usersSection(
|
|||||||
gids: *const AdditionalGids,
|
gids: *const AdditionalGids,
|
||||||
shells: *const ShellSections,
|
shells: *const ShellSections,
|
||||||
) error{ OutOfMemory, InvalidRecord, TooMany }!UsersSection {
|
) error{ OutOfMemory, InvalidRecord, TooMany }!UsersSection {
|
||||||
var idx2offset = try allocator.alloc(u32, corpus.users.len);
|
var idx2offset = try allocator.alignedAlloc(u32, 8, corpus.users.len);
|
||||||
errdefer allocator.free(idx2offset);
|
errdefer allocator.free(idx2offset);
|
||||||
// as of writing each user takes 12 bytes + blobs + padding, padded to
|
// as of writing each user takes 12 bytes + blobs + padding, padded to
|
||||||
// 8 bytes. 24 is an optimistic lower bound for an average record size.
|
// 8 bytes. 24 is an optimistic lower bound for an average record size.
|
||||||
var blob = try ArrayList(u8).initCapacity(allocator, 24 * corpus.users.len);
|
var blob = try ArrayListAligned(u8, 8).initCapacity(allocator, 24 * corpus.users.len);
|
||||||
errdefer blob.deinit();
|
errdefer blob.deinit();
|
||||||
var i: usize = 0;
|
var i: usize = 0;
|
||||||
while (i < corpus.users.len) : (i += 1) {
|
while (i < corpus.users.len) : (i += 1) {
|
||||||
// TODO: this is inefficient by calling `.slice()` on every iteration
|
// TODO: this may be inefficient by calling `.slice()` on every iteration?
|
||||||
const user = corpus.users.get(i);
|
const user = corpus.users.get(i);
|
||||||
const user_offset = math.cast(u35, blob.items.len) orelse return error.TooMany;
|
const user_offset = math.cast(u35, blob.items.len) orelse return error.TooMany;
|
||||||
assert(user_offset & 7 == 0);
|
assert(user_offset & 7 == 0);
|
||||||
@ -600,7 +597,7 @@ fn usersSection(
|
|||||||
return UsersSection{
|
return UsersSection{
|
||||||
.len = @intCast(u32, corpus.users.len),
|
.len = @intCast(u32, corpus.users.len),
|
||||||
.idx2offset = idx2offset,
|
.idx2offset = idx2offset,
|
||||||
.blob = blob.toOwnedSlice(),
|
.blob = try blob.toOwnedSlice(),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -608,7 +605,7 @@ const GroupMembers = struct {
|
|||||||
// group index to it's offset in blob
|
// group index to it's offset in blob
|
||||||
idx2offset: []const u64,
|
idx2offset: []const u64,
|
||||||
// members are delta-varint encoded byte-offsets to the user struct
|
// members are delta-varint encoded byte-offsets to the user struct
|
||||||
blob: []const u8,
|
blob: []align(8) const u8,
|
||||||
};
|
};
|
||||||
|
|
||||||
fn groupMembers(
|
fn groupMembers(
|
||||||
@ -618,12 +615,12 @@ fn groupMembers(
|
|||||||
) error{OutOfMemory}!GroupMembers {
|
) error{OutOfMemory}!GroupMembers {
|
||||||
var idx2offset = try allocator.alloc(u64, corpus.groups.len);
|
var idx2offset = try allocator.alloc(u64, corpus.groups.len);
|
||||||
errdefer allocator.free(idx2offset);
|
errdefer allocator.free(idx2offset);
|
||||||
var blob = ArrayList(u8).init(allocator);
|
var blob = ArrayListAligned(u8, 8).init(allocator);
|
||||||
errdefer blob.deinit();
|
errdefer blob.deinit();
|
||||||
// zero'th entry is empty, so empty groups can refer to it
|
// zero'th entry is empty, so empty groups can refer to it
|
||||||
try compress.appendUvarint(&blob, 0);
|
try compress.appendUvarint(&blob, 0);
|
||||||
|
|
||||||
var scratch = try ArrayList(u32).initCapacity(allocator, 1024);
|
var scratch = try ArrayListAligned(u32, 8).initCapacity(allocator, 1024);
|
||||||
defer scratch.deinit();
|
defer scratch.deinit();
|
||||||
|
|
||||||
for (corpus.group2users) |members, group_idx| {
|
for (corpus.group2users) |members, group_idx| {
|
||||||
@ -647,7 +644,7 @@ fn groupMembers(
|
|||||||
}
|
}
|
||||||
return GroupMembers{
|
return GroupMembers{
|
||||||
.idx2offset = idx2offset,
|
.idx2offset = idx2offset,
|
||||||
.blob = blob.toOwnedSlice(),
|
.blob = try blob.toOwnedSlice(),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -655,8 +652,8 @@ const GroupsSection = struct {
|
|||||||
// number of groups in this section
|
// number of groups in this section
|
||||||
len: u32,
|
len: u32,
|
||||||
// group index -> offset in blob
|
// group index -> offset in blob
|
||||||
idx2offset: []const u32,
|
idx2offset: []align(8) const u32,
|
||||||
blob: []const u8,
|
blob: []align(8) const u8,
|
||||||
};
|
};
|
||||||
|
|
||||||
fn groupsSection(
|
fn groupsSection(
|
||||||
@ -664,10 +661,10 @@ fn groupsSection(
|
|||||||
corpus: *const Corpus,
|
corpus: *const Corpus,
|
||||||
members_offset: []const u64,
|
members_offset: []const u64,
|
||||||
) error{ OutOfMemory, InvalidRecord }!GroupsSection {
|
) error{ OutOfMemory, InvalidRecord }!GroupsSection {
|
||||||
var idx2offset = try allocator.alloc(u32, corpus.groups.len);
|
var idx2offset = try allocator.alignedAlloc(u32, 8, corpus.groups.len);
|
||||||
errdefer allocator.free(idx2offset);
|
errdefer allocator.free(idx2offset);
|
||||||
|
|
||||||
var blob = try ArrayList(u8).initCapacity(allocator, 8 * corpus.groups.len);
|
var blob = try ArrayListAligned(u8, 8).initCapacity(allocator, 8 * corpus.groups.len);
|
||||||
errdefer blob.deinit();
|
errdefer blob.deinit();
|
||||||
|
|
||||||
var i: usize = 0;
|
var i: usize = 0;
|
||||||
@ -689,7 +686,7 @@ fn groupsSection(
|
|||||||
return GroupsSection{
|
return GroupsSection{
|
||||||
.len = @intCast(u32, corpus.groups.len),
|
.len = @intCast(u32, corpus.groups.len),
|
||||||
.idx2offset = idx2offset,
|
.idx2offset = idx2offset,
|
||||||
.blob = blob.toOwnedSlice(),
|
.blob = try blob.toOwnedSlice(),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -707,14 +704,14 @@ fn bdzIdx(
|
|||||||
packed_mphf: []const u8,
|
packed_mphf: []const u8,
|
||||||
keys: []const T,
|
keys: []const T,
|
||||||
idx2offset: []const u32,
|
idx2offset: []const u32,
|
||||||
) error{OutOfMemory}![]const u32 {
|
) error{OutOfMemory}![]align(8) const u32 {
|
||||||
const search_fn = switch (T) {
|
const search_fn = switch (T) {
|
||||||
u32 => bdz.search_u32,
|
u32 => bdz.search_u32,
|
||||||
[]const u8 => bdz.search,
|
[]const u8 => bdz.search,
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
};
|
};
|
||||||
assert(keys.len <= math.maxInt(u32));
|
assert(keys.len <= math.maxInt(u32));
|
||||||
var result = try allocator.alloc(u32, keys.len);
|
var result = try allocator.alignedAlloc(u32, 8, keys.len);
|
||||||
errdefer allocator.free(result);
|
errdefer allocator.free(result);
|
||||||
for (keys) |key, i|
|
for (keys) |key, i|
|
||||||
result[search_fn(packed_mphf, key)] = idx2offset[i];
|
result[search_fn(packed_mphf, key)] = idx2offset[i];
|
||||||
@ -751,7 +748,7 @@ fn assertDefinedLayout(comptime T: type) void {
|
|||||||
if (meta.containerLayout(T) == .Auto)
|
if (meta.containerLayout(T) == .Auto)
|
||||||
@compileError("layout of " ++ @typeName(T) ++ " is undefined");
|
@compileError("layout of " ++ @typeName(T) ++ " is undefined");
|
||||||
for (meta.fields(T)) |field|
|
for (meta.fields(T)) |field|
|
||||||
assertDefinedLayout(field.field_type);
|
assertDefinedLayout(field.type);
|
||||||
},
|
},
|
||||||
else => @compileError("unexpected type " ++ @typeName(T)),
|
else => @compileError("unexpected type " ++ @typeName(T)),
|
||||||
},
|
},
|
||||||
@ -793,7 +790,7 @@ test "DB getgrnam/getgrgid" {
|
|||||||
var errc = ErrCtx{};
|
var errc = ErrCtx{};
|
||||||
var db = try DB.fromCorpus(testing.allocator, &corpus, &errc);
|
var db = try DB.fromCorpus(testing.allocator, &corpus, &errc);
|
||||||
defer db.deinit(testing.allocator);
|
defer db.deinit(testing.allocator);
|
||||||
var buf = try testing.allocator.alloc(u8, db.getgrBufsize());
|
var buf = try testing.allocator.alignedAlloc(u8, 8, db.getgrBufsize());
|
||||||
defer testing.allocator.free(buf);
|
defer testing.allocator.free(buf);
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -835,7 +832,7 @@ test "DB getpwnam/getpwuid" {
|
|||||||
var errc = ErrCtx{};
|
var errc = ErrCtx{};
|
||||||
var db = try DB.fromCorpus(testing.allocator, &corpus, &errc);
|
var db = try DB.fromCorpus(testing.allocator, &corpus, &errc);
|
||||||
defer db.deinit(testing.allocator);
|
defer db.deinit(testing.allocator);
|
||||||
var buf = try testing.allocator.alloc(u8, db.getpwBufsize());
|
var buf = try testing.allocator.alignedAlloc(u8, 8, db.getpwBufsize());
|
||||||
defer testing.allocator.free(buf);
|
defer testing.allocator.free(buf);
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -135,7 +135,7 @@ pub const CGroup = extern struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// size of the pointer to a single member.
|
// size of the pointer to a single member.
|
||||||
pub const ptr_size = @sizeOf(meta.Child(meta.fieldInfo(CGroup, .gr_mem).field_type));
|
pub const ptr_size = @sizeOf(meta.Child(meta.fieldInfo(CGroup, .gr_mem).type));
|
||||||
|
|
||||||
const testing = std.testing;
|
const testing = std.testing;
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@ const std = @import("std");
|
|||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const Allocator = mem.Allocator;
|
const Allocator = mem.Allocator;
|
||||||
const ArrayList = std.ArrayList;
|
const ArrayListAligned = std.ArrayListAligned;
|
||||||
const BufSet = std.BufSet;
|
const BufSet = std.BufSet;
|
||||||
|
|
||||||
const pad = @import("padding.zig");
|
const pad = @import("padding.zig");
|
||||||
@ -40,7 +40,7 @@ pub const Entry = struct {
|
|||||||
end: usize,
|
end: usize,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn fromBytes(bytes: []const u8) Entry {
|
pub fn fromBytes(bytes: []align(8) const u8) Entry {
|
||||||
const inner = mem.bytesAsValue(Inner, bytes[0..@sizeOf(Inner)]);
|
const inner = mem.bytesAsValue(Inner, bytes[0..@sizeOf(Inner)]);
|
||||||
const start_blob = @sizeOf(Inner);
|
const start_blob = @sizeOf(Inner);
|
||||||
const end_strings = @sizeOf(Inner) + inner.groupnameLen();
|
const end_strings = @sizeOf(Inner) + inner.groupnameLen();
|
||||||
@ -65,7 +65,7 @@ fn validateUtf8(s: []const u8) InvalidRecord!void {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub const Iterator = struct {
|
pub const Iterator = struct {
|
||||||
section: []const u8,
|
section: []align(8) const u8,
|
||||||
next_start: usize = 0,
|
next_start: usize = 0,
|
||||||
idx: u32 = 0,
|
idx: u32 = 0,
|
||||||
total: u32,
|
total: u32,
|
||||||
@ -73,7 +73,7 @@ pub const Iterator = struct {
|
|||||||
|
|
||||||
pub fn next(it: *Iterator) ?PackedGroup {
|
pub fn next(it: *Iterator) ?PackedGroup {
|
||||||
if (it.idx == it.total) return null;
|
if (it.idx == it.total) return null;
|
||||||
const entry = fromBytes(it.section[it.next_start..]);
|
const entry = fromBytes(@alignCast(8, it.section[it.next_start..]));
|
||||||
it.idx += 1;
|
it.idx += 1;
|
||||||
it.next_start += entry.end;
|
it.next_start += entry.end;
|
||||||
it.advanced_by = entry.end;
|
it.advanced_by = entry.end;
|
||||||
@ -88,7 +88,7 @@ pub const Iterator = struct {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn iterator(section: []const u8, total: u32) Iterator {
|
pub fn iterator(section: []align(8) const u8, total: u32) Iterator {
|
||||||
return Iterator{
|
return Iterator{
|
||||||
.section = section,
|
.section = section,
|
||||||
.total = total,
|
.total = total,
|
||||||
@ -108,7 +108,7 @@ pub inline fn name(self: *const PackedGroup) []const u8 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn packTo(
|
pub fn packTo(
|
||||||
arr: *ArrayList(u8),
|
arr: *ArrayListAligned(u8, 8),
|
||||||
group: GroupStored,
|
group: GroupStored,
|
||||||
) error{ InvalidRecord, OutOfMemory }!void {
|
) error{ InvalidRecord, OutOfMemory }!void {
|
||||||
std.debug.assert(arr.items.len & 7 == 0);
|
std.debug.assert(arr.items.len & 7 == 0);
|
||||||
@ -127,7 +127,7 @@ test "PackedGroup alignment" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "PackedGroup construct" {
|
test "PackedGroup construct" {
|
||||||
var buf = ArrayList(u8).init(testing.allocator);
|
var buf = ArrayListAligned(u8, 8).init(testing.allocator);
|
||||||
defer buf.deinit();
|
defer buf.deinit();
|
||||||
|
|
||||||
const groups = [_]GroupStored{
|
const groups = [_]GroupStored{
|
||||||
|
@ -3,7 +3,7 @@ const assert = std.debug.assert;
|
|||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
const math = std.math;
|
const math = std.math;
|
||||||
const Allocator = mem.Allocator;
|
const Allocator = mem.Allocator;
|
||||||
const ArrayList = std.ArrayList;
|
const ArrayListAligned = std.ArrayListAligned;
|
||||||
const StringHashMap = std.StringHashMap;
|
const StringHashMap = std.StringHashMap;
|
||||||
const fieldInfo = std.meta.fieldInfo;
|
const fieldInfo = std.meta.fieldInfo;
|
||||||
|
|
||||||
@ -72,11 +72,11 @@ const Inner = packed struct {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// PackedUser does not allocate; it re-interprets the "bytes" blob
|
// PackedUser does not allocate; it re-interprets the var_payload
|
||||||
// field. Both of those fields are pointers to "our representation" of
|
// field. Both of those fields are pointers to "our representation" of
|
||||||
// that field.
|
// that field.
|
||||||
inner: *const Inner,
|
inner: *align(8) const Inner,
|
||||||
bytes: []const u8,
|
var_payload: []const u8,
|
||||||
additional_gids_offset: u64,
|
additional_gids_offset: u64,
|
||||||
|
|
||||||
pub const Entry = struct {
|
pub const Entry = struct {
|
||||||
@ -84,27 +84,27 @@ pub const Entry = struct {
|
|||||||
end: usize,
|
end: usize,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn fromBytes(bytes: []const u8) Entry {
|
pub fn fromBytes(blob: []align(8) const u8) Entry {
|
||||||
const inner = mem.bytesAsValue(Inner, bytes[0..@sizeOf(Inner)]);
|
const start_var_payload = @bitSizeOf(Inner) / 8;
|
||||||
const start_blob = @sizeOf(Inner);
|
const inner = @ptrCast(*align(8) const Inner, blob[0..start_var_payload]);
|
||||||
const end_strings = start_blob + inner.stringLength();
|
const end_strings = start_var_payload + inner.stringLength();
|
||||||
const gids_offset = compress.uvarint(bytes[end_strings..]) catch |err| switch (err) {
|
const gids_offset = compress.uvarint(blob[end_strings..]) catch |err| switch (err) {
|
||||||
error.Overflow => unreachable,
|
error.Overflow => unreachable,
|
||||||
};
|
};
|
||||||
const end_blob = end_strings + gids_offset.bytes_read;
|
const end_payload = end_strings + gids_offset.bytes_read;
|
||||||
|
|
||||||
return Entry{
|
return Entry{
|
||||||
.user = PackedUser{
|
.user = PackedUser{
|
||||||
.inner = inner,
|
.inner = inner,
|
||||||
.bytes = bytes[start_blob..end_blob],
|
.var_payload = blob[start_var_payload..end_payload],
|
||||||
.additional_gids_offset = gids_offset.value,
|
.additional_gids_offset = gids_offset.value,
|
||||||
},
|
},
|
||||||
.end = pad.roundUp(usize, alignment_bits, end_blob),
|
.end = pad.roundUp(usize, alignment_bits, end_payload),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const Iterator = struct {
|
pub const Iterator = struct {
|
||||||
section: []const u8,
|
section: []align(8) const u8,
|
||||||
next_start: usize = 0,
|
next_start: usize = 0,
|
||||||
shell_reader: ShellReader,
|
shell_reader: ShellReader,
|
||||||
idx: u32 = 0,
|
idx: u32 = 0,
|
||||||
@ -113,7 +113,7 @@ pub const Iterator = struct {
|
|||||||
|
|
||||||
pub fn next(it: *Iterator) ?PackedUser {
|
pub fn next(it: *Iterator) ?PackedUser {
|
||||||
if (it.idx == it.total) return null;
|
if (it.idx == it.total) return null;
|
||||||
const entry = fromBytes(it.section[it.next_start..]);
|
const entry = fromBytes(@alignCast(8, it.section[it.next_start..]));
|
||||||
it.idx += 1;
|
it.idx += 1;
|
||||||
it.next_start += entry.end;
|
it.next_start += entry.end;
|
||||||
it.advanced_by = entry.end;
|
it.advanced_by = entry.end;
|
||||||
@ -128,7 +128,7 @@ pub const Iterator = struct {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn iterator(section: []const u8, total: u32, shell_reader: ShellReader) Iterator {
|
pub fn iterator(section: []align(8) const u8, total: u32, shell_reader: ShellReader) Iterator {
|
||||||
return Iterator{
|
return Iterator{
|
||||||
.section = section,
|
.section = section,
|
||||||
.total = total,
|
.total = total,
|
||||||
@ -138,7 +138,7 @@ pub fn iterator(section: []const u8, total: u32, shell_reader: ShellReader) Iter
|
|||||||
|
|
||||||
// packTo packs the User record and copies it to the given arraylist.
|
// packTo packs the User record and copies it to the given arraylist.
|
||||||
pub fn packTo(
|
pub fn packTo(
|
||||||
arr: *ArrayList(u8),
|
arr: *ArrayListAligned(u8, 8),
|
||||||
user: User,
|
user: User,
|
||||||
additional_gids_offset: u64,
|
additional_gids_offset: u64,
|
||||||
idxFn: StringHashMap(u8),
|
idxFn: StringHashMap(u8),
|
||||||
@ -146,10 +146,10 @@ pub fn packTo(
|
|||||||
std.debug.assert(arr.items.len & 7 == 0);
|
std.debug.assert(arr.items.len & 7 == 0);
|
||||||
// function arguments are consts. We need to mutate the underlying
|
// function arguments are consts. We need to mutate the underlying
|
||||||
// slice, so passing it via pointer instead.
|
// slice, so passing it via pointer instead.
|
||||||
const home_len = try validate.downCast(fieldInfo(Inner, .home_len).field_type, user.home.len - 1);
|
const home_len = try validate.downCast(fieldInfo(Inner, .home_len).type, user.home.len - 1);
|
||||||
const name_len = try validate.downCast(fieldInfo(Inner, .name_len).field_type, user.name.len - 1);
|
const name_len = try validate.downCast(fieldInfo(Inner, .name_len).type, user.name.len - 1);
|
||||||
const shell_len = try validate.downCast(fieldInfo(Inner, .shell_len_or_idx).field_type, user.shell.len - 1);
|
const shell_len = try validate.downCast(fieldInfo(Inner, .shell_len_or_idx).type, user.shell.len - 1);
|
||||||
const gecos_len = try validate.downCast(fieldInfo(Inner, .gecos_len).field_type, user.gecos.len);
|
const gecos_len = try validate.downCast(fieldInfo(Inner, .gecos_len).type, user.gecos.len);
|
||||||
|
|
||||||
try validate.utf8(user.home);
|
try validate.utf8(user.home);
|
||||||
try validate.utf8(user.name);
|
try validate.utf8(user.name);
|
||||||
@ -166,7 +166,7 @@ pub fn packTo(
|
|||||||
.name_len = name_len,
|
.name_len = name_len,
|
||||||
.gecos_len = gecos_len,
|
.gecos_len = gecos_len,
|
||||||
};
|
};
|
||||||
try arr.*.appendSlice(mem.asBytes(&inner)[0..@sizeOf(Inner)]);
|
try arr.*.appendSlice(mem.asBytes(&inner)[0 .. @bitSizeOf(Inner) / 8]);
|
||||||
try arr.*.appendSlice(user.home);
|
try arr.*.appendSlice(user.home);
|
||||||
|
|
||||||
if (!inner.name_is_a_suffix)
|
if (!inner.name_is_a_suffix)
|
||||||
@ -190,26 +190,26 @@ pub fn additionalGidsOffset(self: PackedUser) u64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn home(self: PackedUser) []const u8 {
|
pub fn home(self: PackedUser) []const u8 {
|
||||||
return self.bytes[0..self.inner.homeLen()];
|
return self.var_payload[0..self.inner.homeLen()];
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn name(self: PackedUser) []const u8 {
|
pub fn name(self: PackedUser) []const u8 {
|
||||||
const name_pos = self.inner.nameStart();
|
const name_pos = self.inner.nameStart();
|
||||||
const name_len = self.inner.nameLen();
|
const name_len = self.inner.nameLen();
|
||||||
return self.bytes[name_pos .. name_pos + name_len];
|
return self.var_payload[name_pos .. name_pos + name_len];
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn gecos(self: PackedUser) []const u8 {
|
pub fn gecos(self: PackedUser) []const u8 {
|
||||||
const gecos_pos = self.inner.gecosStart();
|
const gecos_pos = self.inner.gecosStart();
|
||||||
const gecos_len = self.inner.gecosLen();
|
const gecos_len = self.inner.gecosLen();
|
||||||
return self.bytes[gecos_pos .. gecos_pos + gecos_len];
|
return self.var_payload[gecos_pos .. gecos_pos + gecos_len];
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn shell(self: PackedUser, shell_reader: ShellReader) []const u8 {
|
pub fn shell(self: PackedUser, shell_reader: ShellReader) []const u8 {
|
||||||
if (self.inner.shell_here) {
|
if (self.inner.shell_here) {
|
||||||
const shell_pos = self.inner.maybeShellStart();
|
const shell_pos = self.inner.maybeShellStart();
|
||||||
const shell_len = self.inner.shellLen();
|
const shell_len = self.inner.shellLen();
|
||||||
return self.bytes[shell_pos .. shell_pos + shell_len];
|
return self.var_payload[shell_pos .. shell_pos + shell_len];
|
||||||
}
|
}
|
||||||
return shell_reader.get(self.inner.shell_len_or_idx);
|
return shell_reader.get(self.inner.shell_len_or_idx);
|
||||||
}
|
}
|
||||||
@ -226,25 +226,18 @@ pub fn toUser(self: *const PackedUser, shell_reader: ShellReader) User {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const max_home_len = math.maxInt(fieldInfo(Inner, .home_len).field_type) + 1;
|
pub const max_home_len = math.maxInt(fieldInfo(Inner, .home_len).type) + 1;
|
||||||
pub const max_name_len = math.maxInt(fieldInfo(Inner, .name_len).field_type) + 1;
|
pub const max_name_len = math.maxInt(fieldInfo(Inner, .name_len).type) + 1;
|
||||||
pub const max_gecos_len = math.maxInt(fieldInfo(Inner, .gecos_len).field_type);
|
pub const max_gecos_len = math.maxInt(fieldInfo(Inner, .gecos_len).type);
|
||||||
|
|
||||||
pub const max_str_len =
|
pub const max_str_len =
|
||||||
math.maxInt(fieldInfo(Inner, .shell_len_or_idx).field_type) + 1 +
|
math.maxInt(fieldInfo(Inner, .shell_len_or_idx).type) + 1 +
|
||||||
max_home_len +
|
max_home_len +
|
||||||
max_name_len +
|
max_name_len +
|
||||||
max_gecos_len;
|
max_gecos_len;
|
||||||
|
|
||||||
const testing = std.testing;
|
const testing = std.testing;
|
||||||
|
|
||||||
test "PackedUser internal and external alignment" {
|
|
||||||
try testing.expectEqual(
|
|
||||||
@sizeOf(PackedUser.Inner) * 8,
|
|
||||||
@bitSizeOf(PackedUser.Inner),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn testShellIndex(allocator: Allocator) StringHashMap(u8) {
|
fn testShellIndex(allocator: Allocator) StringHashMap(u8) {
|
||||||
var result = StringHashMap(u8).init(allocator);
|
var result = StringHashMap(u8).init(allocator);
|
||||||
result.put("/bin/bash", 0) catch unreachable;
|
result.put("/bin/bash", 0) catch unreachable;
|
||||||
@ -252,13 +245,14 @@ fn testShellIndex(allocator: Allocator) StringHashMap(u8) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const test_shell_reader_index: [3]u16 align(8) = .{ 0, 9, 17 };
|
||||||
const test_shell_reader = ShellReader{
|
const test_shell_reader = ShellReader{
|
||||||
.blob = "/bin/bash/bin/zsh",
|
.blob = "/bin/bash/bin/zsh",
|
||||||
.index = &[_]u16{ 0, 9, 17 },
|
.index = &test_shell_reader_index,
|
||||||
};
|
};
|
||||||
|
|
||||||
test "PackedUser pack max_user" {
|
test "PackedUser pack max_user" {
|
||||||
var arr = ArrayList(u8).init(testing.allocator);
|
var arr = ArrayListAligned(u8, 8).init(testing.allocator);
|
||||||
defer arr.deinit();
|
defer arr.deinit();
|
||||||
|
|
||||||
var idx_noop = StringHashMap(u8).init(testing.allocator);
|
var idx_noop = StringHashMap(u8).init(testing.allocator);
|
||||||
@ -268,7 +262,7 @@ test "PackedUser pack max_user" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "PackedUser construct section" {
|
test "PackedUser construct section" {
|
||||||
var buf = ArrayList(u8).init(testing.allocator);
|
var buf = ArrayListAligned(u8, 8).init(testing.allocator);
|
||||||
defer buf.deinit();
|
defer buf.deinit();
|
||||||
|
|
||||||
const users = [_]User{ User{
|
const users = [_]User{ User{
|
||||||
|
14
src/cmph.zig
14
src/cmph.zig
@ -24,7 +24,7 @@ extern fn cmph_destroy(mphf: [*]u8) void;
|
|||||||
// pack packs cmph hashes for the given input and returns a slice ("cmph pack
|
// pack packs cmph hashes for the given input and returns a slice ("cmph pack
|
||||||
// minus first 4 bytes") for further storage. The slice must be freed by the
|
// minus first 4 bytes") for further storage. The slice must be freed by the
|
||||||
// caller.
|
// caller.
|
||||||
pub fn pack(allocator: Allocator, input: [][*:0]const u8) error{OutOfMemory}![]const u8 {
|
pub fn pack(allocator: Allocator, input: [][*:0]const u8) error{OutOfMemory}![]align(8) const u8 {
|
||||||
const input_len = @intCast(c_uint, input.len);
|
const input_len = @intCast(c_uint, input.len);
|
||||||
var source = cmph_io_vector_adapter(input.ptr, input_len);
|
var source = cmph_io_vector_adapter(input.ptr, input_len);
|
||||||
defer cmph_io_vector_adapter_destroy(source);
|
defer cmph_io_vector_adapter_destroy(source);
|
||||||
@ -35,7 +35,7 @@ pub fn pack(allocator: Allocator, input: [][*:0]const u8) error{OutOfMemory}![]c
|
|||||||
cmph_config_destroy(config);
|
cmph_config_destroy(config);
|
||||||
|
|
||||||
const size = cmph_packed_size(mph);
|
const size = cmph_packed_size(mph);
|
||||||
var buf = try allocator.alloc(u8, size);
|
var buf = try allocator.alignedAlloc(u8, 8, size);
|
||||||
errdefer allocator.free(buf);
|
errdefer allocator.free(buf);
|
||||||
cmph_pack(mph, buf.ptr);
|
cmph_pack(mph, buf.ptr);
|
||||||
cmph_destroy(mph);
|
cmph_destroy(mph);
|
||||||
@ -43,13 +43,13 @@ pub fn pack(allocator: Allocator, input: [][*:0]const u8) error{OutOfMemory}![]c
|
|||||||
}
|
}
|
||||||
|
|
||||||
// perfect-hash a list of numbers and return the packed mphf
|
// perfect-hash a list of numbers and return the packed mphf
|
||||||
pub fn packU32(allocator: Allocator, numbers: []const u32) error{OutOfMemory}![]const u8 {
|
pub fn packU32(allocator: Allocator, numbers: []const u32) error{OutOfMemory}![]align(8) const u8 {
|
||||||
var keys: [][6]u8 = try allocator.alloc([6]u8, numbers.len);
|
var keys: [][6]u8 = try allocator.alignedAlloc([6]u8, 8, numbers.len);
|
||||||
defer allocator.free(keys);
|
defer allocator.free(keys);
|
||||||
for (numbers) |n, i|
|
for (numbers) |n, i|
|
||||||
keys[i] = unzeroZ(n);
|
keys[i] = unzeroZ(n);
|
||||||
|
|
||||||
var keys2 = try allocator.alloc([*:0]const u8, numbers.len);
|
var keys2 = try allocator.alignedAlloc([*:0]const u8, 8, numbers.len);
|
||||||
defer allocator.free(keys2);
|
defer allocator.free(keys2);
|
||||||
for (keys) |_, i|
|
for (keys) |_, i|
|
||||||
keys2[i] = @ptrCast([*:0]const u8, &keys[i]);
|
keys2[i] = @ptrCast([*:0]const u8, &keys[i]);
|
||||||
@ -57,10 +57,10 @@ pub fn packU32(allocator: Allocator, numbers: []const u32) error{OutOfMemory}![]
|
|||||||
}
|
}
|
||||||
|
|
||||||
// perfect-hash a list of strings and return the packed mphf
|
// perfect-hash a list of strings and return the packed mphf
|
||||||
pub fn packStr(allocator: Allocator, strings: []const []const u8) error{OutOfMemory}![]const u8 {
|
pub fn packStr(allocator: Allocator, strings: []const []const u8) error{OutOfMemory}![]align(8) const u8 {
|
||||||
var arena = std.heap.ArenaAllocator.init(allocator);
|
var arena = std.heap.ArenaAllocator.init(allocator);
|
||||||
defer arena.deinit();
|
defer arena.deinit();
|
||||||
var keys = try arena.allocator().alloc([*:0]const u8, strings.len);
|
var keys = try arena.allocator().alignedAlloc([*:0]const u8, 8, strings.len);
|
||||||
for (strings) |_, i|
|
for (strings) |_, i|
|
||||||
keys[i] = try arena.allocator().dupeZ(u8, strings[i]);
|
keys[i] = try arena.allocator().dupeZ(u8, strings[i]);
|
||||||
return pack(allocator, keys);
|
return pack(allocator, keys);
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
// golang's varint implementation.
|
// golang's varint implementation.
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
|
|
||||||
const ArrayList = std.ArrayList;
|
const ArrayListAligned = std.ArrayListAligned;
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const math = std.math;
|
const math = std.math;
|
||||||
@ -183,7 +183,7 @@ pub fn deltaDecompressionIterator(vit: *VarintSliceIterator) DeltaDecompressionI
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn appendUvarint(arr: *ArrayList(u8), x: u64) Allocator.Error!void {
|
pub fn appendUvarint(arr: *ArrayListAligned(u8, 8), x: u64) Allocator.Error!void {
|
||||||
var buf: [maxVarintLen64]u8 = undefined;
|
var buf: [maxVarintLen64]u8 = undefined;
|
||||||
const n = putUvarint(&buf, x);
|
const n = putUvarint(&buf, x);
|
||||||
try arr.appendSlice(buf[0..n]);
|
try arr.appendSlice(buf[0..n]);
|
||||||
@ -221,7 +221,7 @@ test "compress putUvarint/uvarint" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "compress varintSliceIterator" {
|
test "compress varintSliceIterator" {
|
||||||
var buf = ArrayList(u8).init(testing.allocator);
|
var buf = ArrayListAligned(u8, 8).init(testing.allocator);
|
||||||
defer buf.deinit();
|
defer buf.deinit();
|
||||||
try appendUvarint(&buf, uvarint_tests.len);
|
try appendUvarint(&buf, uvarint_tests.len);
|
||||||
for (uvarint_tests) |x|
|
for (uvarint_tests) |x|
|
||||||
@ -245,7 +245,7 @@ test "compress delta compress/decompress" {
|
|||||||
.{ .input = &[_]u8{ 0, 254, 255 }, .want = &[_]u8{ 0, 253, 0 } },
|
.{ .input = &[_]u8{ 0, 254, 255 }, .want = &[_]u8{ 0, 253, 0 } },
|
||||||
};
|
};
|
||||||
for (tests) |t| {
|
for (tests) |t| {
|
||||||
var arr = try ArrayList(u8).initCapacity(
|
var arr = try ArrayListAligned(u8, 8).initCapacity(
|
||||||
testing.allocator,
|
testing.allocator,
|
||||||
t.input.len,
|
t.input.len,
|
||||||
);
|
);
|
||||||
@ -274,7 +274,7 @@ test "compress delta compression negative tests" {
|
|||||||
&[_]u8{ 0, 1, 1 },
|
&[_]u8{ 0, 1, 1 },
|
||||||
&[_]u8{ 0, 1, 2, 1 },
|
&[_]u8{ 0, 1, 2, 1 },
|
||||||
}) |t| {
|
}) |t| {
|
||||||
var arr = try ArrayList(u8).initCapacity(testing.allocator, t.len);
|
var arr = try ArrayListAligned(u8, 8).initCapacity(testing.allocator, t.len);
|
||||||
defer arr.deinit();
|
defer arr.deinit();
|
||||||
try arr.appendSlice(t);
|
try arr.appendSlice(t);
|
||||||
try testing.expectError(error.NotSorted, deltaCompress(u8, arr.items));
|
try testing.expectError(error.NotSorted, deltaCompress(u8, arr.items));
|
||||||
@ -286,7 +286,7 @@ test "compress delta decompress overflow" {
|
|||||||
&[_]u8{ 255, 0 },
|
&[_]u8{ 255, 0 },
|
||||||
&[_]u8{ 0, 128, 127 },
|
&[_]u8{ 0, 128, 127 },
|
||||||
}) |t| {
|
}) |t| {
|
||||||
var arr = try ArrayList(u8).initCapacity(testing.allocator, t.len);
|
var arr = try ArrayListAligned(u8, 8).initCapacity(testing.allocator, t.len);
|
||||||
defer arr.deinit();
|
defer arr.deinit();
|
||||||
try arr.appendSlice(t);
|
try arr.appendSlice(t);
|
||||||
try testing.expectError(error.Overflow, deltaDecompress(u8, arr.items));
|
try testing.expectError(error.Overflow, deltaDecompress(u8, arr.items));
|
||||||
@ -298,7 +298,7 @@ test "compress delta decompression with an iterator" {
|
|||||||
std.mem.copy(u64, compressed[0..], uvarint_tests[0..]);
|
std.mem.copy(u64, compressed[0..], uvarint_tests[0..]);
|
||||||
try deltaCompress(u64, compressed[0..]);
|
try deltaCompress(u64, compressed[0..]);
|
||||||
|
|
||||||
var buf = ArrayList(u8).init(testing.allocator);
|
var buf = ArrayListAligned(u8, 8).init(testing.allocator);
|
||||||
defer buf.deinit();
|
defer buf.deinit();
|
||||||
try appendUvarint(&buf, compressed.len);
|
try appendUvarint(&buf, compressed.len);
|
||||||
for (compressed) |x|
|
for (compressed) |x|
|
||||||
@ -316,7 +316,7 @@ test "compress delta decompression with an iterator" {
|
|||||||
|
|
||||||
test "compress appendUvarint" {
|
test "compress appendUvarint" {
|
||||||
for (uvarint_tests) |x| {
|
for (uvarint_tests) |x| {
|
||||||
var buf = ArrayList(u8).init(testing.allocator);
|
var buf = ArrayListAligned(u8, 8).init(testing.allocator);
|
||||||
defer buf.deinit();
|
defer buf.deinit();
|
||||||
|
|
||||||
try appendUvarint(&buf, x);
|
try appendUvarint(&buf, x);
|
||||||
|
@ -20,6 +20,15 @@ const Endian = enum(u4) {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub const Host = packed struct {
|
||||||
|
endian: Endian = Endian.native(),
|
||||||
|
ptr_size: u4 = ptr_size,
|
||||||
|
|
||||||
|
pub fn new() Host {
|
||||||
|
return Host{};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
pub const section_length_bits = 6;
|
pub const section_length_bits = 6;
|
||||||
pub const section_length = 1 << section_length_bits;
|
pub const section_length = 1 << section_length_bits;
|
||||||
|
|
||||||
@ -30,28 +39,29 @@ pub const Invalid = error{
|
|||||||
InvalidPointerSize,
|
InvalidPointerSize,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const Header = packed struct {
|
pub const Header = extern struct {
|
||||||
magic: [4]u8 = magic,
|
magic: [8]u8 = magic ++ [1]u8{0} ** 4,
|
||||||
version: u8 = version,
|
|
||||||
endian: Endian = Endian.native(),
|
|
||||||
ptr_size: u4 = ptr_size,
|
|
||||||
nblocks_shell_blob: u8,
|
|
||||||
num_shells: u8,
|
|
||||||
num_groups: u32,
|
|
||||||
num_users: u32,
|
|
||||||
nblocks_bdz_gid: u32,
|
|
||||||
nblocks_bdz_groupname: u32,
|
|
||||||
nblocks_bdz_uid: u32,
|
|
||||||
nblocks_bdz_username: u32,
|
|
||||||
nblocks_groups: u64,
|
nblocks_groups: u64,
|
||||||
nblocks_users: u64,
|
nblocks_users: u64,
|
||||||
nblocks_groupmembers: u64,
|
nblocks_groupmembers: u64,
|
||||||
nblocks_additional_gids: u64,
|
nblocks_additional_gids: u64,
|
||||||
getgr_bufsize: u64,
|
getgr_bufsize: u64,
|
||||||
getpw_bufsize: u64,
|
getpw_bufsize: u64,
|
||||||
padding: [48]u8 = [1]u8{0} ** 48,
|
|
||||||
|
|
||||||
pub fn fromBytes(blob: *const [@sizeOf(Header)]u8) Invalid!*const Header {
|
num_groups: u32,
|
||||||
|
num_users: u32,
|
||||||
|
nblocks_bdz_gid: u32,
|
||||||
|
nblocks_bdz_groupname: u32,
|
||||||
|
nblocks_bdz_uid: u32,
|
||||||
|
nblocks_bdz_username: u32,
|
||||||
|
|
||||||
|
nblocks_shell_blob: u8,
|
||||||
|
num_shells: u8,
|
||||||
|
version: u8 = version,
|
||||||
|
host: Host = Host.new(),
|
||||||
|
padding: [40]u8 = [1]u8{0} ** 40,
|
||||||
|
|
||||||
|
pub fn fromBytes(blob: *align(8) const [@sizeOf(Header)]u8) Invalid!*const Header {
|
||||||
const self = mem.bytesAsValue(Header, blob);
|
const self = mem.bytesAsValue(Header, blob);
|
||||||
|
|
||||||
if (!mem.eql(u8, magic[0..4], blob[0..4]))
|
if (!mem.eql(u8, magic[0..4], blob[0..4]))
|
||||||
@ -60,13 +70,13 @@ pub const Header = packed struct {
|
|||||||
if (self.version != 0)
|
if (self.version != 0)
|
||||||
return error.InvalidVersion;
|
return error.InvalidVersion;
|
||||||
|
|
||||||
if (self.endian != Endian.native())
|
if (self.host.endian != Endian.native())
|
||||||
return error.InvalidEndianess;
|
return error.InvalidEndianess;
|
||||||
|
|
||||||
// when ptr size is larger than on the host that constructed it the DB,
|
// when ptr size is larger than on the host that constructed it the DB,
|
||||||
// getgr_bufsize/getpw_bufsize may return insufficient values, causing
|
// getgr_bufsize/getpw_bufsize may return insufficient values, causing
|
||||||
// OutOfMemory for getgr* and getpw* calls.
|
// OutOfMemory for getgr* and getpw* calls.
|
||||||
if (self.ptr_size < ptr_size)
|
if (self.host.ptr_size < ptr_size)
|
||||||
return error.InvalidPointerSize;
|
return error.InvalidPointerSize;
|
||||||
|
|
||||||
return self;
|
return self;
|
||||||
@ -80,7 +90,7 @@ test "header Section length is a power of two" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "header fits into two sections" {
|
test "header fits into two sections" {
|
||||||
try testing.expect(@sizeOf(Header) == 2 * section_length);
|
try testing.expectEqual(2 * section_length, @sizeOf(Header));
|
||||||
}
|
}
|
||||||
|
|
||||||
test "header bit header size is equal to @sizeOf(Header)" {
|
test "header bit header size is equal to @sizeOf(Header)" {
|
||||||
|
@ -29,7 +29,7 @@ const ENV_DB = "TURBONSS_DB";
|
|||||||
const ENV_VERBOSE = "TURBONSS_VERBOSE";
|
const ENV_VERBOSE = "TURBONSS_VERBOSE";
|
||||||
const ENV_OMIT_MEMBERS = "TURBONSS_OMIT_MEMBERS";
|
const ENV_OMIT_MEMBERS = "TURBONSS_OMIT_MEMBERS";
|
||||||
|
|
||||||
export var turbonss_db_path: [:0]const u8 = "/etc/turbonss/db.turbo";
|
export var turbonss_db_path: [*:0]const u8 = "/etc/turbonss/db.turbo";
|
||||||
|
|
||||||
// State is a type of the global variable holding the process state:
|
// State is a type of the global variable holding the process state:
|
||||||
// the DB handle and all the iterators.
|
// the DB handle and all the iterators.
|
||||||
@ -90,7 +90,7 @@ fn init() void {
|
|||||||
|
|
||||||
// argv does not exist because
|
// argv does not exist because
|
||||||
// https://github.com/ziglang/zig/issues/4524#issuecomment-1184748756
|
// https://github.com/ziglang/zig/issues/4524#issuecomment-1184748756
|
||||||
// so reading /proc/self/cmdline because
|
// so reading /proc/self/cmdline
|
||||||
const fd = os.openZ("/proc/self/cmdline", os.O.RDONLY, 0) catch break :blk false;
|
const fd = os.openZ("/proc/self/cmdline", os.O.RDONLY, 0) catch break :blk false;
|
||||||
defer os.close(fd);
|
defer os.close(fd);
|
||||||
break :blk isId(fd);
|
break :blk isId(fd);
|
||||||
@ -99,7 +99,7 @@ fn init() void {
|
|||||||
const fname = if (getenv(ENV_DB)) |env|
|
const fname = if (getenv(ENV_DB)) |env|
|
||||||
mem.sliceTo(env, 0)
|
mem.sliceTo(env, 0)
|
||||||
else
|
else
|
||||||
turbonss_db_path;
|
mem.sliceTo(turbonss_db_path, 0);
|
||||||
|
|
||||||
const file = File.open(fname) catch |err| {
|
const file = File.open(fname) catch |err| {
|
||||||
if (verbose)
|
if (verbose)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
const ArrayList = std.ArrayList;
|
const ArrayListAligned = std.ArrayListAligned;
|
||||||
|
|
||||||
// rounds up an int to the nearest factor of nbits.
|
// rounds up an int to the nearest factor of nbits.
|
||||||
pub fn roundUp(comptime T: type, comptime nbits: u8, n: T) T {
|
pub fn roundUp(comptime T: type, comptime nbits: u8, n: T) T {
|
||||||
@ -16,8 +16,8 @@ pub fn until(comptime T: type, comptime nbits: u8, n: T) T {
|
|||||||
return roundUp(T, nbits, n) - n;
|
return roundUp(T, nbits, n) - n;
|
||||||
}
|
}
|
||||||
|
|
||||||
// arrayList adds padding to an ArrayList(u8) for a given number of nbits
|
// arrayList adds padding to an ArrayListAligned(u8, 8) for a given number of nbits
|
||||||
pub fn arrayList(arr: *ArrayList(u8), comptime nbits: u8) Allocator.Error!void {
|
pub fn arrayList(arr: *ArrayListAligned(u8, 8), comptime nbits: u8) Allocator.Error!void {
|
||||||
const padding = until(u64, nbits, arr.items.len);
|
const padding = until(u64, nbits, arr.items.len);
|
||||||
try arr.*.appendNTimes(0, padding);
|
try arr.*.appendNTimes(0, padding);
|
||||||
}
|
}
|
||||||
@ -40,7 +40,7 @@ test "padding" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "padding arrayList" {
|
test "padding arrayList" {
|
||||||
var buf = try ArrayList(u8).initCapacity(testing.allocator, 16);
|
var buf = try ArrayListAligned(u8, 8).initCapacity(testing.allocator, 16);
|
||||||
defer buf.deinit();
|
defer buf.deinit();
|
||||||
|
|
||||||
buf.appendAssumeCapacity(1);
|
buf.appendAssumeCapacity(1);
|
||||||
|
@ -10,10 +10,10 @@ pub const max_shell_len = 256;
|
|||||||
|
|
||||||
// ShellReader interprets "Shell Index" and "Shell Blob" sections.
|
// ShellReader interprets "Shell Index" and "Shell Blob" sections.
|
||||||
pub const ShellReader = struct {
|
pub const ShellReader = struct {
|
||||||
index: []const u16,
|
index: []align(8) const u16,
|
||||||
blob: []const u8,
|
blob: []const u8,
|
||||||
|
|
||||||
pub fn init(index: []align(2) const u8, blob: []const u8) ShellReader {
|
pub fn init(index: []align(8) const u8, blob: []const u8) ShellReader {
|
||||||
return ShellReader{
|
return ShellReader{
|
||||||
.index = std.mem.bytesAsSlice(u16, index),
|
.index = std.mem.bytesAsSlice(u16, index),
|
||||||
.blob = blob,
|
.blob = blob,
|
||||||
@ -175,8 +175,12 @@ test "shell basic shellpopcon" {
|
|||||||
try testing.expectEqual(sections.getIndex(nobody), null);
|
try testing.expectEqual(sections.getIndex(nobody), null);
|
||||||
try testing.expectEqual(sections.blob.constSlice().len, bash.len + zsh.len + long.len);
|
try testing.expectEqual(sections.blob.constSlice().len, bash.len + zsh.len + long.len);
|
||||||
|
|
||||||
|
// copying section_index until https://github.com/ziglang/zig/pull/14580
|
||||||
|
var section_index: [max_shells]u16 align(8) = undefined;
|
||||||
|
for (sections.index.constSlice()) |elem, i|
|
||||||
|
section_index[i] = elem;
|
||||||
const shellReader = ShellReader.init(
|
const shellReader = ShellReader.init(
|
||||||
std.mem.sliceAsBytes(sections.index.constSlice()),
|
std.mem.sliceAsBytes(section_index[0..sections.index.len]),
|
||||||
sections.blob.constSlice(),
|
sections.blob.constSlice(),
|
||||||
);
|
);
|
||||||
try testing.expectEqualStrings(shellReader.get(0), long);
|
try testing.expectEqualStrings(shellReader.get(0), long);
|
||||||
|
@ -18,6 +18,7 @@ const File = @import("File.zig");
|
|||||||
const PackedUser = @import("PackedUser.zig");
|
const PackedUser = @import("PackedUser.zig");
|
||||||
const PackedGroup = @import("PackedGroup.zig");
|
const PackedGroup = @import("PackedGroup.zig");
|
||||||
const Header = @import("header.zig").Header;
|
const Header = @import("header.zig").Header;
|
||||||
|
const HeaderHost = @import("header.zig").Host;
|
||||||
const section_length_bits = @import("header.zig").section_length_bits;
|
const section_length_bits = @import("header.zig").section_length_bits;
|
||||||
|
|
||||||
const usage =
|
const usage =
|
||||||
@ -34,14 +35,14 @@ const usage =
|
|||||||
const Info = struct {
|
const Info = struct {
|
||||||
fname: []const u8,
|
fname: []const u8,
|
||||||
size_file: []const u8,
|
size_file: []const u8,
|
||||||
version: meta.fieldInfo(Header, .version).field_type,
|
version: meta.fieldInfo(Header, .version).type,
|
||||||
endian: []const u8,
|
endian: []const u8,
|
||||||
ptr_size: meta.fieldInfo(Header, .ptr_size).field_type,
|
ptr_size: meta.fieldInfo(HeaderHost, .ptr_size).type,
|
||||||
getgr_bufsize: meta.fieldInfo(Header, .getgr_bufsize).field_type,
|
getgr_bufsize: meta.fieldInfo(Header, .getgr_bufsize).type,
|
||||||
getpw_bufsize: meta.fieldInfo(Header, .getpw_bufsize).field_type,
|
getpw_bufsize: meta.fieldInfo(Header, .getpw_bufsize).type,
|
||||||
users: meta.fieldInfo(Header, .num_users).field_type,
|
users: meta.fieldInfo(Header, .num_users).type,
|
||||||
groups: meta.fieldInfo(Header, .num_groups).field_type,
|
groups: meta.fieldInfo(Header, .num_groups).type,
|
||||||
shells: meta.fieldInfo(Header, .num_shells).field_type,
|
shells: meta.fieldInfo(Header, .num_shells).type,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn main() !void {
|
pub fn main() !void {
|
||||||
@ -115,8 +116,8 @@ fn execute(
|
|||||||
.fname = db_file,
|
.fname = db_file,
|
||||||
.size_file = splitInt(@intCast(u64, file_size_bytes)).constSlice(),
|
.size_file = splitInt(@intCast(u64, file_size_bytes)).constSlice(),
|
||||||
.version = db.header.version,
|
.version = db.header.version,
|
||||||
.endian = @tagName(db.header.endian),
|
.endian = @tagName(db.header.host.endian),
|
||||||
.ptr_size = db.header.ptr_size,
|
.ptr_size = db.header.host.ptr_size,
|
||||||
.getgr_bufsize = db.header.getgr_bufsize,
|
.getgr_bufsize = db.header.getgr_bufsize,
|
||||||
.getpw_bufsize = db.header.getpw_bufsize,
|
.getpw_bufsize = db.header.getpw_bufsize,
|
||||||
.users = db.header.num_users,
|
.users = db.header.num_users,
|
||||||
|
@ -180,7 +180,7 @@ fn printGroup(stdout: anytype, db: *const DB, g: *const PackedGroup) ?u8 {
|
|||||||
var line_writer = io.bufferedWriter(stdout);
|
var line_writer = io.bufferedWriter(stdout);
|
||||||
var i: usize = 0;
|
var i: usize = 0;
|
||||||
while (it.nextMust()) |member_offset| : (i += 1) {
|
while (it.nextMust()) |member_offset| : (i += 1) {
|
||||||
const puser = PackedUser.fromBytes(db.users[member_offset << 3 ..]);
|
const puser = PackedUser.fromBytes(@alignCast(8, db.users[member_offset << 3 ..]));
|
||||||
const name = puser.user.name();
|
const name = puser.user.name();
|
||||||
if (i != 0)
|
if (i != 0)
|
||||||
_ = line_writer.write(",") catch return 3;
|
_ = line_writer.write(",") catch return 3;
|
||||||
@ -249,7 +249,8 @@ test "turbonss-getent passwdAll" {
|
|||||||
};
|
};
|
||||||
|
|
||||||
var i: usize = 0;
|
var i: usize = 0;
|
||||||
const reader = io.fixedBufferStream(stdout.items).reader();
|
var buf_stream = io.fixedBufferStream(stdout.items);
|
||||||
|
var reader = buf_stream.reader();
|
||||||
while (try reader.readUntilDelimiterOrEof(buf[0..], '\n')) |line| {
|
while (try reader.readUntilDelimiterOrEof(buf[0..], '\n')) |line| {
|
||||||
var name = mem.split(u8, line, ":");
|
var name = mem.split(u8, line, ":");
|
||||||
try testing.expectEqualStrings(want_names[i], name.next().?);
|
try testing.expectEqualStrings(want_names[i], name.next().?);
|
||||||
|
@ -79,16 +79,18 @@ fn execute(
|
|||||||
return fail(errc.wrapf("open '{s}'", .{group_fname}), stderr, err);
|
return fail(errc.wrapf("open '{s}'", .{group_fname}), stderr, err);
|
||||||
defer group_file.close();
|
defer group_file.close();
|
||||||
|
|
||||||
var passwdReader = io.bufferedReader(passwd_file.reader()).reader();
|
var passwd_buf = io.bufferedReader(passwd_file.reader());
|
||||||
var users = User.fromReader(allocator, &errc, passwdReader) catch |err|
|
var passwd_reader = passwd_buf.reader();
|
||||||
|
var users = User.fromReader(allocator, &errc, passwd_reader) catch |err|
|
||||||
return fail(errc.wrap("read users"), stderr, err);
|
return fail(errc.wrap("read users"), stderr, err);
|
||||||
defer {
|
defer {
|
||||||
for (users) |*user| user.deinit(allocator);
|
for (users) |*user| user.deinit(allocator);
|
||||||
allocator.free(users);
|
allocator.free(users);
|
||||||
}
|
}
|
||||||
|
|
||||||
var groupReader = io.bufferedReader(group_file.reader()).reader();
|
var group_buf = io.bufferedReader(group_file.reader());
|
||||||
var groups = Group.fromReader(allocator, groupReader) catch |err|
|
var group_reader = group_buf.reader();
|
||||||
|
var groups = Group.fromReader(allocator, group_reader) catch |err|
|
||||||
return fail(errc.wrap("read groups"), stderr, err);
|
return fail(errc.wrap("read groups"), stderr, err);
|
||||||
defer {
|
defer {
|
||||||
for (groups) |*group| group.deinit(allocator);
|
for (groups) |*group| group.deinit(allocator);
|
||||||
|
@ -32,7 +32,7 @@ pub fn name(s: []const u8, err: *ErrCtx) error{InvalidRecord}!void {
|
|||||||
return err.returnf("cannot be empty", .{}, error.InvalidRecord);
|
return err.returnf("cannot be empty", .{}, error.InvalidRecord);
|
||||||
|
|
||||||
const c0 = s[0];
|
const c0 = s[0];
|
||||||
if (!(ascii.isAlNum(c0) or c0 == '_' or c0 == '.' or c0 == '@'))
|
if (!(ascii.isAlphanumeric(c0) or c0 == '_' or c0 == '.' or c0 == '@'))
|
||||||
return err.returnf(
|
return err.returnf(
|
||||||
"invalid character {s} at position 0",
|
"invalid character {s} at position 0",
|
||||||
.{debugChar(c0).constSlice()},
|
.{debugChar(c0).constSlice()},
|
||||||
@ -40,7 +40,7 @@ pub fn name(s: []const u8, err: *ErrCtx) error{InvalidRecord}!void {
|
|||||||
);
|
);
|
||||||
|
|
||||||
for (s[1..]) |c, i| {
|
for (s[1..]) |c, i| {
|
||||||
if (!(ascii.isAlNum(c) or c == '_' or c == '.' or c == '@' or c == '-'))
|
if (!(ascii.isAlphanumeric(c) or c == '_' or c == '.' or c == '@' or c == '-'))
|
||||||
return err.returnf(
|
return err.returnf(
|
||||||
"invalid character {s} at position {d}",
|
"invalid character {s} at position {d}",
|
||||||
.{ debugChar(c).constSlice(), i + 2 },
|
.{ debugChar(c).constSlice(), i + 2 },
|
||||||
@ -70,7 +70,7 @@ pub fn path(s: []const u8, err: *ErrCtx) error{InvalidRecord}!void {
|
|||||||
return err.returnf("must start with /", .{}, error.InvalidRecord);
|
return err.returnf("must start with /", .{}, error.InvalidRecord);
|
||||||
|
|
||||||
for (s[1..]) |c, i| {
|
for (s[1..]) |c, i| {
|
||||||
if (!(ascii.isAlNum(c) or c == '/' or c == '_' or c == '.' or c == '@' or c == '-'))
|
if (!(ascii.isAlphanumeric(c) or c == '/' or c == '_' or c == '.' or c == '@' or c == '-'))
|
||||||
return err.returnf(
|
return err.returnf(
|
||||||
"invalid character 0xD at position {d}",
|
"invalid character 0xD at position {d}",
|
||||||
.{i + 2},
|
.{i + 2},
|
||||||
|
Loading…
Reference in New Issue
Block a user