rename lib to src

This commit is contained in:
2022-05-25 09:42:42 +03:00
committed by Motiejus Jakštys
parent b126b95885
commit c602b7a039
20 changed files with 14 additions and 6 deletions

276
src/Corpus.zig Normal file
View File

@@ -0,0 +1,276 @@
const std = @import("std");
const mem = std.mem;
const math = std.math;
const sort = std.sort;
const unicode = std.unicode;
const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
const StringHashMap = std.StringHashMap;
const MultiArrayList = std.MultiArrayList;
const ArrayListUnmanaged = std.ArrayListUnmanaged;
const User = @import("User.zig");
const Group = @import("Group.zig");
pub const Corpus = @This();
arena: ArenaAllocator,
// sorted by name, by unicode codepoint
users: MultiArrayList(User),
// sorted by gid
groups: MultiArrayList(Group),
name2user: StringHashMap(u32),
name2group: StringHashMap(u32),
group2users: []const []const u32,
user2groups: []const []const u32,
getgr_bufsize: usize,
getpw_bufsize: usize,
pub fn init(
baseAllocator: Allocator,
usersConst: []const User,
groupsConst: []const Group,
) error{ OutOfMemory, InvalidUtf8, Duplicate, NotFound, TooMany }!Corpus {
if (usersConst.len >= math.maxInt(u32)) return error.TooMany;
if (groupsConst.len >= math.maxInt(u32)) return error.TooMany;
var arena = ArenaAllocator.init(baseAllocator);
var allocator = arena.allocator();
errdefer arena.deinit();
var groups_arr = try allocator.alloc(Group, groupsConst.len);
var users_arr = try allocator.alloc(User, usersConst.len);
var getgr_bufsize: usize = 0;
for (groupsConst) |*group, i| {
groups_arr[i] = try group.clone(allocator);
getgr_bufsize = math.max(getgr_bufsize, group.strlenZ());
}
var getpw_bufsize: usize = 0;
for (usersConst) |*user, i| {
users_arr[i] = try user.clone(allocator);
getpw_bufsize = math.max(getpw_bufsize, user.strlenZ());
}
sort.sort(User, users_arr, {}, cmpUser);
sort.sort(Group, groups_arr, {}, cmpGroup);
var users = MultiArrayList(User){};
try users.ensureTotalCapacity(allocator, users_arr.len);
for (users_arr) |user|
users.appendAssumeCapacity(user);
var groups = MultiArrayList(Group){};
try groups.ensureTotalCapacity(allocator, groups_arr.len);
for (groups_arr) |group|
groups.appendAssumeCapacity(group);
var name2user = StringHashMap(u32).init(allocator);
var name2group = StringHashMap(u32).init(allocator);
for (users.items(.name)) |name, i| {
var res1 = try name2user.getOrPut(name);
if (res1.found_existing)
return error.Duplicate;
res1.value_ptr.* = @intCast(u32, i);
}
for (groups.items(.name)) |name, i| {
var res1 = try name2group.getOrPut(name);
if (res1.found_existing)
return error.Duplicate;
res1.value_ptr.* = @intCast(u32, i);
}
var group2users = try allocator.alloc([]u32, groups.len);
// uses baseAllocator, because it will be freed before
// returning from this function. This keeps the arena clean.
var user2groups = try baseAllocator.alloc(ArrayListUnmanaged(u32), users.len);
defer baseAllocator.free(user2groups);
mem.set(ArrayListUnmanaged(u32), user2groups, ArrayListUnmanaged(u32){});
for (groups.items(.members)) |groupmembers, i| {
var members = try allocator.alloc(u32, groupmembers.len);
members.len = 0;
for (groupmembers) |member_name| {
if (name2user.get(member_name)) |user_idx| {
members.len += 1;
members[members.len - 1] = user_idx;
try user2groups[user_idx].append(allocator, @intCast(u32, i));
} else return error.NotFound;
}
group2users[i] = members;
}
for (group2users) |*groupusers|
sort.sort(u32, groupusers.*, {}, comptime sort.asc(u32));
var user2groups_final = try allocator.alloc([]const u32, users.len);
user2groups_final.len = users.len;
for (user2groups) |*usergroups, i| {
sort.sort(u32, usergroups.items, {}, comptime sort.asc(u32));
user2groups_final[i] = usergroups.toOwnedSlice(allocator);
}
return Corpus{
.arena = arena,
.users = users,
.groups = groups,
.name2user = name2user,
.name2group = name2group,
.group2users = group2users,
.user2groups = user2groups_final,
.getgr_bufsize = getgr_bufsize,
.getpw_bufsize = getpw_bufsize,
};
}
pub fn deinit(self: *Corpus) void {
self.arena.deinit();
self.* = undefined;
}
// cmpUser compares two users for sorting. By username's utf8 codepoints, ascending.
fn cmpUser(_: void, a: User, b: User) bool {
var utf8_a = (unicode.Utf8View.init(a.name) catch unreachable).iterator();
var utf8_b = (unicode.Utf8View.init(b.name) catch unreachable).iterator();
while (utf8_a.nextCodepoint()) |codepoint_a| {
if (utf8_b.nextCodepoint()) |codepoint_b| {
if (codepoint_a == codepoint_b) {
continue;
} else return codepoint_a < codepoint_b;
}
// a is a prefix of b. It is thus shorter.
return false;
}
// b is a prefix of a
return true;
}
fn cmpGroup(_: void, a: Group, b: Group) bool {
return a.gid < b.gid;
}
fn testUser(name: []const u8) User {
var result = mem.zeroes(User);
result.name = name;
return result;
}
const testing = std.testing;
const someMembers = @import("Group.zig").someMembers;
test "users compare function" {
const a = testUser("a");
const b = testUser("b");
const bb = testUser("bb");
try testing.expect(cmpUser({}, a, b));
try testing.expect(!cmpUser({}, b, a));
try testing.expect(cmpUser({}, a, bb));
try testing.expect(!cmpUser({}, bb, a));
try testing.expect(cmpUser({}, b, bb));
try testing.expect(!cmpUser({}, bb, b));
}
pub fn testCorpus(allocator: Allocator) !Corpus {
const users = [_]User{ User{
.uid = 0,
.gid = 0,
.name = "root",
.gecos = "",
.home = "/root",
.shell = "/bin/bash",
}, User{
.uid = 128,
.gid = 128,
.name = "vidmantas",
.gecos = "Vidmantas Kaminskas",
.home = "/home/vidmantas",
.shell = "/bin/bash",
}, User{
.uid = 1000,
.gid = math.maxInt(u32),
.name = "Name" ** 8,
.gecos = "Gecos" ** 51,
.home = "Home" ** 16,
.shell = "She.LllL" ** 8,
}, User{
.uid = 100000,
.gid = 1002,
.name = "svc-bar",
.gecos = "",
.home = "/",
.shell = "/",
}, User{
.uid = 65534,
.gid = 65534,
.name = "nobody",
.gecos = "nobody",
.home = "/nonexistent",
.shell = "/usr/sbin/nologin",
} };
var group0 = try Group.init(allocator, 0, "root", &[_][]const u8{"root"});
var group1 = try Group.init(allocator, 128, "vidmantas", &[_][]const u8{"vidmantas"});
const members2 = &[_][]const u8{ "svc-bar", "Name" ** 8, "vidmantas", "root" };
var group2 = try Group.init(allocator, 9999, "all", members2);
const members3 = &[_][]const u8{ "svc-bar", "vidmantas" };
var group3 = try Group.init(allocator, 100000, "service-account", members3);
defer group0.deinit(allocator);
defer group1.deinit(allocator);
defer group2.deinit(allocator);
defer group3.deinit(allocator);
const groups = [_]Group{ group0, group1, group2, group3 };
return try Corpus.init(allocator, users[0..], groups[0..]);
}
test "test corpus" {
var corpus = try testCorpus(testing.allocator);
defer corpus.deinit();
const name_name = 0;
const nobody = 1;
const root = 2;
const svc_bar = 3;
const vidmantas = 4;
const usernames = corpus.users.items(.name);
try testing.expectEqualStrings(usernames[name_name], "Name" ** 8);
try testing.expectEqualStrings(usernames[nobody], "nobody");
try testing.expectEqualStrings(usernames[root], "root");
try testing.expectEqualStrings(usernames[svc_bar], "svc-bar");
try testing.expectEqualStrings(usernames[vidmantas], "vidmantas");
const g_root = 0;
const g_vidmantas = 1;
const g_all = 2;
const g_service_account = 3;
const groupnames = corpus.groups.items(.name);
try testing.expectEqualStrings(groupnames[g_root], "root");
try testing.expectEqualStrings(groupnames[g_service_account], "service-account");
try testing.expectEqualStrings(groupnames[g_vidmantas], "vidmantas");
try testing.expectEqualStrings(groupnames[g_all], "all");
try testing.expectEqual(corpus.name2user.get("404"), null);
try testing.expectEqual(corpus.name2user.get("vidmantas").?, vidmantas);
try testing.expectEqual(corpus.name2group.get("404"), null);
try testing.expectEqual(corpus.name2group.get("vidmantas").?, g_vidmantas);
const membersOfAll = corpus.group2users[g_all];
try testing.expectEqual(membersOfAll[0], name_name);
try testing.expectEqual(membersOfAll[1], root);
try testing.expectEqual(membersOfAll[2], svc_bar);
try testing.expectEqual(membersOfAll[3], vidmantas);
const groupsOfVidmantas = corpus.user2groups[vidmantas];
try testing.expectEqual(groupsOfVidmantas[0], g_vidmantas);
try testing.expectEqual(groupsOfVidmantas[1], g_all);
try testing.expectEqual(groupsOfVidmantas[2], g_service_account);
}

812
src/DB.zig Normal file
View File

@@ -0,0 +1,812 @@
const std = @import("std");
const os = std.os;
const mem = std.mem;
const math = std.math;
const meta = std.meta;
const sort = std.sort;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const AutoHashMap = std.AutoHashMap;
const BoundedArray = std.BoundedArray;
const Corpus = @import("Corpus.zig");
const pad = @import("padding.zig");
const compress = @import("compress.zig");
const Group = @import("Group.zig");
const CGroup = Group.CGroup;
const PackedGroup = @import("PackedGroup.zig");
const GroupStored = PackedGroup.GroupStored;
const User = @import("User.zig");
const CUser = User.CUser;
const PackedUser = @import("PackedUser.zig");
const ShellSections = @import("shell.zig").ShellWriter.ShellSections;
const ShellReader = @import("shell.zig").ShellReader;
const ShellWriter = @import("shell.zig").ShellWriter;
const InvalidHeader = @import("header.zig").Invalid;
const Header = @import("header.zig").Header;
const max_shells = @import("shell.zig").max_shells;
const section_length_bits = @import("header.zig").section_length_bits;
const section_length = @import("header.zig").section_length;
const cmph = @import("cmph.zig");
const bdz = @import("bdz.zig");
const zeroes = &[_]u8{0} ** section_length;
const DB = @This();
// All sections, as they end up in the DB. Order is important.
header: *const Header,
bdz_gid: []const u8,
bdz_groupname: []const u8,
bdz_uid: []const u8,
bdz_username: []const u8,
idx_gid2group: []const u32,
idx_groupname2group: []const u32,
idx_uid2user: []const u32,
idx_name2user: []const u32,
shell_index: []const u16,
shell_blob: []const u8,
groups: []const u8,
users: []const u8,
groupmembers: []const u8,
additional_gids: []const u8,
pub fn fromCorpus(
allocator: Allocator,
corpus: *const Corpus,
) error{ OutOfMemory, InvalidRecord, TooMany }!DB {
const gids = corpus.groups.items(.gid);
const gnames = corpus.groups.items(.name);
const uids = corpus.users.items(.uid);
const unames = corpus.users.items(.name);
const bdz_gid = try cmph.packU32(allocator, gids);
errdefer allocator.free(bdz_gid);
const bdz_groupname = try cmph.packStr(allocator, gnames);
errdefer allocator.free(bdz_groupname);
const bdz_uid = try cmph.packU32(allocator, uids);
errdefer allocator.free(bdz_uid);
const bdz_username = try cmph.packStr(allocator, unames);
errdefer allocator.free(bdz_username);
var shell = try shellSections(allocator, corpus);
defer shell.deinit();
const shell_index = try allocator.dupe(u16, shell.index.constSlice());
errdefer allocator.free(shell_index);
const shell_blob = try allocator.dupe(u8, shell.blob.constSlice());
errdefer allocator.free(shell_blob);
const additional_gids = try additionalGids(allocator, corpus);
errdefer allocator.free(additional_gids.blob);
defer allocator.free(additional_gids.idx2offset);
const users = try usersSection(allocator, corpus, &additional_gids, &shell);
errdefer allocator.free(users.blob);
defer allocator.free(users.idx2offset);
const groupmembers = try groupMembers(allocator, corpus, users.idx2offset);
errdefer allocator.free(groupmembers.blob);
defer allocator.free(groupmembers.idx2offset);
const groups = try groupsSection(allocator, corpus, groupmembers.idx2offset);
errdefer allocator.free(groups.blob);
defer allocator.free(groups.idx2offset);
const idx_gid2group = try bdzIdx(u32, allocator, bdz_gid, gids, groups.idx2offset);
errdefer allocator.free(idx_gid2group);
const idx_groupname2group = try bdzIdx([]const u8, allocator, bdz_groupname, gnames, groups.idx2offset);
errdefer allocator.free(idx_groupname2group);
const idx_uid2user = try bdzIdx(u32, allocator, bdz_uid, uids, users.idx2offset);
errdefer allocator.free(idx_uid2user);
const idx_name2user = try bdzIdx([]const u8, allocator, bdz_username, unames, users.idx2offset);
errdefer allocator.free(idx_name2user);
const header = try allocator.create(Header);
errdefer allocator.destroy(header);
header.* = Header{
.nblocks_shell_blob = nblocks(u8, shell.blob.constSlice()),
.num_shells = shell.len,
.num_groups = groups.len,
.num_users = users.len,
.nblocks_bdz_gid = nblocks(u32, bdz_gid),
.nblocks_bdz_groupname = nblocks(u32, bdz_groupname),
.nblocks_bdz_uid = nblocks(u32, bdz_uid),
.nblocks_bdz_username = nblocks(u32, bdz_username),
.nblocks_groups = nblocks(u64, groups.blob),
.nblocks_users = nblocks(u64, users.blob),
.nblocks_groupmembers = nblocks(u64, groupmembers.blob),
.nblocks_additional_gids = nblocks(u64, additional_gids.blob),
.getgr_bufsize = corpus.getgr_bufsize,
.getpw_bufsize = corpus.getpw_bufsize,
};
return DB{
.header = header,
.bdz_gid = bdz_gid,
.bdz_groupname = bdz_groupname,
.bdz_uid = bdz_uid,
.bdz_username = bdz_username,
.idx_gid2group = idx_gid2group,
.idx_groupname2group = idx_groupname2group,
.idx_uid2user = idx_uid2user,
.idx_name2user = idx_name2user,
.shell_index = shell_index,
.shell_blob = shell_blob,
.groups = groups.blob,
.users = users.blob,
.groupmembers = groupmembers.blob,
.additional_gids = additional_gids.blob,
};
}
pub fn getgrBufsize(self: *const DB) usize {
return self.header.getgr_bufsize;
}
pub fn getpwBufsize(self: *const DB) usize {
return self.header.getpw_bufsize;
}
pub fn deinit(self: *DB, allocator: Allocator) void {
allocator.destroy(self.header);
allocator.free(self.bdz_gid);
allocator.free(self.bdz_groupname);
allocator.free(self.bdz_uid);
allocator.free(self.bdz_username);
allocator.free(self.idx_gid2group);
allocator.free(self.idx_groupname2group);
allocator.free(self.idx_uid2user);
allocator.free(self.idx_name2user);
allocator.free(self.shell_index);
allocator.free(self.shell_blob);
allocator.free(self.groups);
allocator.free(self.users);
allocator.free(self.groupmembers);
allocator.free(self.additional_gids);
self.* = undefined;
}
const DB_fields = meta.fields(DB);
pub fn iov(self: *const DB) BoundedArray(os.iovec_const, DB_fields.len * 2) {
var result = BoundedArray(os.iovec_const, DB_fields.len * 2).init(0) catch unreachable;
inline for (DB_fields) |field| {
comptime assertDefinedLayout(field.field_type);
const value = @field(self, field.name);
const bytes: []const u8 = switch (@TypeOf(value)) {
*const Header => mem.asBytes(value),
else => mem.sliceAsBytes(value),
};
result.appendAssumeCapacity(os.iovec_const{
.iov_base = bytes.ptr,
.iov_len = bytes.len,
});
const padding = pad.until(usize, section_length_bits, bytes.len);
if (padding != 0)
result.appendAssumeCapacity(.{
.iov_base = zeroes,
.iov_len = padding,
});
}
return result;
}
pub fn fromBytes(buf: []align(8) const u8) InvalidHeader!DB {
const header = try Header.fromBytes(buf[0..@sizeOf(Header)]);
// At first the tuple below had field names too, but moved it to comments,
// because it segfaulted. https://github.com/ziglang/zig/issues/3915 and
// https://paste.sr.ht/~motiejus/2830736e796801517c1fa8639be6615cd56ada27
const lengths = .{
header.nblocks_bdz_gid, // bdz_gid
header.nblocks_bdz_groupname, // bdz_groupname
header.nblocks_bdz_uid, // bdz_uid
header.nblocks_bdz_username, // bdz_username
nblocks_n(u32, header.num_groups * 4), // idx_gid2group
nblocks_n(u32, header.num_groups * 4), // idx_groupname2group
nblocks_n(u32, header.num_users * 4), // idx_uid2user
nblocks_n(u32, header.num_users * 4), // idx_name2user
nblocks_n(u16, header.num_shells * 2), // shell_index
header.nblocks_shell_blob, // shell_blob
header.nblocks_groups, // groups
header.nblocks_users, // users
header.nblocks_groupmembers, // groupmembers
header.nblocks_additional_gids, // additional_gids
};
var result: DB = undefined;
result.header = header;
var offset = comptime nblocks_n(u64, @sizeOf(Header));
comptime assert(mem.eql(u8, DB_fields[0].name, "header"));
inline for (DB_fields[1..]) |field, i| {
const start = offset << section_length_bits;
const end = (offset + lengths[i]) << section_length_bits;
const slice_type = meta.Child(field.field_type);
const value = mem.bytesAsSlice(slice_type, buf[start..end]);
@field(result, field.name) = value;
offset += lengths[i];
}
return result;
}
// dumps PackedGroup to []u8 and returns a CGroup.
fn getGroup(self: *const DB, group: PackedGroup, buf: *[]u8) error{OutOfMemory}!CGroup {
const members_slice = self.groupmembers[group.members_offset..];
var vit = compress.VarintSliceIteratorMust(members_slice);
const num_members = vit.remaining;
const ptr_end = @sizeOf(?[*:0]const u8) * (num_members + 1);
if (ptr_end > buf.len) return error.OutOfMemory;
var member_ptrs = mem.bytesAsSlice(?[*:0]const u8, buf.*[0..ptr_end]);
member_ptrs[member_ptrs.len - 1] = null;
var buf_offset: usize = ptr_end;
var it = compress.DeltaDecompressionIterator(&vit);
var i: usize = 0;
while (it.nextMust()) |member_offset| : (i += 1) {
const entry = PackedUser.fromBytes(self.users[member_offset << 3 ..]);
const start = buf_offset;
const name = entry.user.name();
if (buf_offset + name.len + 1 > buf.len) return error.OutOfMemory;
mem.copy(u8, buf.*[buf_offset..], name);
buf_offset += name.len;
buf.*[buf_offset] = 0;
buf_offset += 1;
// TODO: arr[i] = buf[...] triggers a bug in zig pre-0.10
const terminated = buf.*[start .. buf_offset - 1 :0];
member_ptrs[i] = terminated;
}
const name = group.name();
if (buf_offset + name.len + 1 > buf.len) return error.OutOfMemory;
mem.copy(u8, buf.*[buf_offset..], name);
buf.*[buf_offset + name.len] = 0;
return CGroup{
.gid = group.gid(),
.name = buf.*[buf_offset .. buf_offset + name.len].ptr,
.members = member_ptrs.ptr,
};
}
// get a CGroup entry by name.
fn getgrnam(self: *const DB, name: []const u8, buf: *[]u8) error{OutOfMemory}!?CGroup {
const idx = bdz.search(self.bdz_groupname, name);
if (idx >= self.header.num_groups) return null;
const offset = self.idx_groupname2group[idx];
const nbits = PackedGroup.alignment_bits;
const group = PackedGroup.fromBytes(self.groups[offset << nbits ..]).group;
if (!mem.eql(u8, name, group.name())) return null;
return try self.getGroup(group, buf);
}
// get a CGroup entry by it's gid.
fn getgrgid(self: *const DB, gid: u32, buf: *[]u8) error{OutOfMemory}!?CGroup {
const idx = bdz.search_u32(self.bdz_gid, gid);
if (idx >= self.header.num_groups) return null;
const offset = self.idx_gid2group[idx];
const nbits = PackedGroup.alignment_bits;
const group = PackedGroup.fromBytes(self.groups[offset << nbits ..]).group;
if (gid != group.gid()) return null;
return try self.getGroup(group, buf);
}
fn pushStr(str: []const u8, buf: *[]u8, offset: *usize) [*]const u8 {
const start = offset.*;
mem.copy(u8, buf.*[offset.*..], str);
buf.*[offset.* + str.len] = 0;
offset.* += str.len + 1;
return @ptrCast([*]const u8, &buf.*[start]);
}
fn getUser(self: *const DB, user: PackedUser, buf: *[]u8) error{OutOfMemory}!CUser {
const shell_reader = ShellReader{
.index = self.shell_index,
.blob = self.shell_blob,
};
const name = user.name();
const gecos = user.gecos();
const home = user.home();
const shell = user.shell(shell_reader);
const strlen =
name.len + 1 +
gecos.len + 1 +
home.len + 1 +
shell.len + 1;
if (strlen > buf.len) return error.OutOfMemory;
var offset: usize = 0;
const pw_name = pushStr(name, buf, &offset);
const pw_gecos = pushStr(gecos, buf, &offset);
const pw_dir = pushStr(home, buf, &offset);
const pw_shell = pushStr(shell, buf, &offset);
return CUser{
.pw_name = pw_name,
.pw_uid = user.uid(),
.pw_gid = user.gid(),
.pw_gecos = pw_gecos,
.pw_dir = pw_dir,
.pw_shell = pw_shell,
};
}
// get a CUser entry by name.
pub fn getpwnam(self: *const DB, name: []const u8, buf: *[]u8) error{OutOfMemory}!?CUser {
const idx = bdz.search(self.bdz_username, name);
// bdz may return a hash that's bigger than the number of users
if (idx >= self.header.num_users) return null;
const offset = self.idx_name2user[idx];
const nbits = PackedUser.alignment_bits;
const user = PackedUser.fromBytes(self.users[offset << nbits ..]).user;
if (!mem.eql(u8, name, user.name())) return null;
return try self.getUser(user, buf);
}
// get a CUser entry by uid.
pub fn getpwuid(self: *const DB, uid: u32, buf: *[]u8) error{OutOfMemory}!?CUser {
const idx = bdz.search_u32(self.bdz_uid, uid);
if (idx >= self.header.num_users) return null;
const offset = self.idx_uid2user[idx];
const nbits = PackedUser.alignment_bits;
const user = PackedUser.fromBytes(self.users[offset << nbits ..]).user;
if (uid != user.uid()) return null;
return try self.getUser(user, buf);
}
fn shellSections(
allocator: Allocator,
corpus: *const Corpus,
) error{OutOfMemory}!ShellSections {
var popcon = ShellWriter.init(allocator);
errdefer popcon.deinit();
for (corpus.users.items(.shell)) |shell|
try popcon.put(shell);
return popcon.toOwnedSections(max_shells);
}
const AdditionalGids = struct {
// user index -> offset in blob
idx2offset: []const u64,
// compressed user gids blob. A blob contains N <= users.len items,
// an item is:
// len: varint
// gid: [varint]varint,
// ... and the gid list is delta-compressed.
blob: []const u8,
};
fn additionalGids(
allocator: Allocator,
corpus: *const Corpus,
) error{OutOfMemory}!AdditionalGids {
var blob = ArrayList(u8).init(allocator);
errdefer blob.deinit();
var idx2offset = try allocator.alloc(u64, corpus.users.len);
errdefer allocator.free(idx2offset);
// zero'th entry is empty, so groupless users can refer to it.
try compress.appendUvarint(&blob, 0);
var scratch = try allocator.alloc(u32, 256);
var scratch_allocated: bool = true;
defer if (scratch_allocated) allocator.free(scratch);
for (corpus.user2groups) |usergroups, user_idx| {
if (usergroups.len == 0) {
idx2offset[user_idx] = 0;
continue;
}
idx2offset[user_idx] = blob.items.len;
if (scratch.len < usergroups.len) {
allocator.free(scratch);
scratch_allocated = false;
scratch = try allocator.alloc(u32, usergroups.len);
scratch_allocated = true;
}
scratch.len = usergroups.len;
const corpusGids = corpus.groups.items(.gid);
for (usergroups) |group_idx, i|
scratch[i] = corpusGids[group_idx];
compress.deltaCompress(u32, scratch) catch |err| switch (err) {
error.NotSorted => unreachable,
};
try compress.appendUvarint(&blob, usergroups.len);
for (scratch) |gid|
try compress.appendUvarint(&blob, gid);
}
return AdditionalGids{
.idx2offset = idx2offset,
.blob = blob.toOwnedSlice(),
};
}
const UsersSection = struct {
// number of users in this section
len: u32,
// user index -> offset in blob
idx2offset: []const u32,
blob: []const u8,
};
fn usersSection(
allocator: Allocator,
corpus: *const Corpus,
gids: *const AdditionalGids,
shells: *const ShellSections,
) error{ OutOfMemory, InvalidRecord, TooMany }!UsersSection {
var idx2offset = try allocator.alloc(u32, corpus.users.len);
errdefer allocator.free(idx2offset);
// as of writing each user takes 12 bytes + blobs + padding, padded to
// 8 bytes. 24 is an optimistic lower bound for an average record size.
var blob = try ArrayList(u8).initCapacity(allocator, 24 * corpus.users.len);
errdefer blob.deinit();
var i: usize = 0;
while (i < corpus.users.len) : (i += 1) {
// TODO: this is inefficient by calling `.slice()` on every iteration
const user = corpus.users.get(i);
const user_offset = math.cast(u35, blob.items.len) catch |err| switch (err) {
error.Overflow => return error.TooMany,
};
assert(user_offset & 7 == 0);
idx2offset[i] = @truncate(u32, user_offset >> 3);
try PackedUser.packTo(
&blob,
user,
gids.idx2offset[i],
shells.shell2idx,
);
try pad.arrayList(&blob, PackedUser.alignment_bits);
}
return UsersSection{
.len = @intCast(u32, corpus.users.len),
.idx2offset = idx2offset,
.blob = blob.toOwnedSlice(),
};
}
const GroupMembers = struct {
// group index to it's offset in blob
idx2offset: []const u64,
// members are delta-varint encoded byte-offsets to the user struct
blob: []const u8,
};
fn groupMembers(
allocator: Allocator,
corpus: *const Corpus,
user2offset: []const u32,
) error{OutOfMemory}!GroupMembers {
var idx2offset = try allocator.alloc(u64, corpus.groups.len);
errdefer allocator.free(idx2offset);
var blob = ArrayList(u8).init(allocator);
errdefer blob.deinit();
// zero'th entry is empty, so empty groups can refer to it
try compress.appendUvarint(&blob, 0);
var scratch = try ArrayList(u32).initCapacity(allocator, 1024);
defer scratch.deinit();
for (corpus.group2users) |members, group_idx| {
if (members.len == 0) {
idx2offset[group_idx] = 0;
continue;
}
idx2offset[group_idx] = blob.items.len;
try scratch.ensureTotalCapacity(members.len);
scratch.items.len = members.len;
for (members) |user_idx, i|
scratch.items[i] = user2offset[user_idx];
compress.deltaCompress(u32, scratch.items) catch |err| switch (err) {
error.NotSorted => unreachable,
};
try compress.appendUvarint(&blob, members.len);
for (scratch.items) |elem|
try compress.appendUvarint(&blob, elem);
}
return GroupMembers{
.idx2offset = idx2offset,
.blob = blob.toOwnedSlice(),
};
}
const GroupsSection = struct {
// number of groups in this section
len: u32,
// group index -> offset in blob
idx2offset: []const u32,
blob: []const u8,
};
fn groupsSection(
allocator: Allocator,
corpus: *const Corpus,
members_offset: []const u64,
) error{ OutOfMemory, InvalidRecord }!GroupsSection {
var idx2offset = try allocator.alloc(u32, corpus.groups.len);
errdefer allocator.free(idx2offset);
var blob = try ArrayList(u8).initCapacity(allocator, 8 * corpus.groups.len);
errdefer blob.deinit();
var i: usize = 0;
while (i < corpus.groups.len) : (i += 1) {
// TODO: this is inefficient; it's calling `.slice()` on every iteration
const group = corpus.groups.get(i);
const group_offset = @intCast(u32, blob.items.len);
assert(group_offset & 7 == 0);
idx2offset[i] = @truncate(u32, group_offset >> 3);
const group_stored = GroupStored{
.gid = group.gid,
.name = group.name,
.members_offset = members_offset[i],
};
try PackedGroup.packTo(&blob, group_stored);
try pad.arrayList(&blob, PackedGroup.alignment_bits);
}
return GroupsSection{
.len = @intCast(u32, corpus.groups.len),
.idx2offset = idx2offset,
.blob = blob.toOwnedSlice(),
};
}
// creates a bdz index using packed_mphf.
// hash = bdz_search(packed_mphf, keys[i]);
// result[hash] = idx2offset[i];
fn bdzIdx(
comptime T: type,
allocator: Allocator,
packed_mphf: []const u8,
keys: []const T,
idx2offset: []const u32,
) error{OutOfMemory}![]const u32 {
const search_fn = switch (T) {
u32 => bdz.search_u32,
[]const u8 => bdz.search,
else => unreachable,
};
assert(keys.len <= math.maxInt(u32));
var result = try allocator.alloc(u32, keys.len);
errdefer allocator.free(result);
for (keys) |key, i|
result[search_fn(packed_mphf, key)] = idx2offset[i];
return result;
}
// nblocks_n returns how many blocks a given number of bytes will take
fn nblocks_n(comptime T: type, nbytes: usize) T {
const B = switch (T) {
u8 => u14,
u16 => u22,
u32 => u38,
u64 => u70,
else => @compileError("got " ++ @typeName(T) ++ ", only u8, u32 and u64 are supported"),
};
const upper = pad.roundUp(B, section_length_bits, @intCast(B, nbytes));
assert(upper & (section_length - 1) == 0);
return @truncate(T, upper >> section_length_bits);
}
// nblocks returns how many blocks a particular slice will take.
fn nblocks(comptime T: type, arr: []const u8) T {
return nblocks_n(T, arr.len);
}
fn assertDefinedLayout(comptime T: type) void {
return switch (T) {
u4, u8, u16, u32, u64 => {},
else => switch (@typeInfo(T)) {
.Array => assertDefinedLayout(meta.Elem(T)),
.Pointer => |info| assertDefinedLayout(info.child),
.Enum => assertDefinedLayout(meta.Tag(T)),
.Struct => {
if (meta.containerLayout(T) == .Auto)
@compileError("layout of " ++ @typeName(T) ++ " is undefined");
for (meta.fields(T)) |field|
assertDefinedLayout(field.field_type);
},
else => @compileError("unexpected type " ++ @typeName(T)),
},
};
}
const testing = std.testing;
test "read/write via iovec" {
const allocator = testing.allocator;
var corpus = try Corpus.testCorpus(allocator);
defer corpus.deinit();
var db = try DB.fromCorpus(allocator, &corpus);
defer db.deinit(allocator);
const fd = try os.memfd_create("test_turbonss_db", 0);
defer os.close(fd);
const len = try os.writev(fd, db.iov().constSlice());
const buf = try os.mmap(null, len, os.PROT.READ, os.MAP.SHARED, fd, 0);
const db2 = try fromBytes(buf);
try testing.expectEqual(corpus.groups.len, db.header.num_groups);
try testing.expectEqual(corpus.users.len, db.header.num_users);
try testing.expectEqual(db.header.num_groups, db2.header.num_groups);
try testing.expectEqual(db.header.num_users, db2.header.num_users);
const num_groups = db2.header.num_groups;
const num_users = db2.header.num_users;
try testing.expectEqualSlices(u32, db.idx_gid2group, db2.idx_gid2group[0..num_groups]);
try testing.expectEqualSlices(u32, db.idx_uid2user, db2.idx_uid2user[0..num_users]);
}
test "getgrnam/getgrgid" {
var corpus = try Corpus.testCorpus(testing.allocator);
defer corpus.deinit();
var db = try DB.fromCorpus(testing.allocator, &corpus);
defer db.deinit(testing.allocator);
var buf = try testing.allocator.alloc(u8, db.getgrBufsize());
defer testing.allocator.free(buf);
{
try testing.expectEqual(try db.getgrnam("doesnotexist", &buf), null);
const all = (try db.getgrnam("all", &buf)).?;
try testing.expectEqual(all.gid, 9999);
try testing.expectEqualStrings(all.name[0..4], "all\x00");
const members = all.members;
try testing.expectEqualStrings(mem.sliceTo(members[0].?, 0), "Name" ** 8);
try testing.expectEqualStrings(mem.sliceTo(members[1].?, 0), "root");
try testing.expectEqualStrings(mem.sliceTo(members[2].?, 0), "svc-bar");
try testing.expectEqualStrings(mem.sliceTo(members[3].?, 0), "vidmantas");
try testing.expectEqual(members[4], null);
}
{
try testing.expectEqual(try db.getgrgid(42, &buf), null);
const all = (try db.getgrgid(9999, &buf)).?;
try testing.expectEqual(all.gid, 9999);
try testing.expectEqualStrings(all.name[0..3], "all");
}
_ = try db.getgrnam("all", &buf);
buf.len -= 1;
try testing.expectError(error.OutOfMemory, db.getgrnam("all", &buf));
}
test "getpwnam/getpwuid" {
var corpus = try Corpus.testCorpus(testing.allocator);
defer corpus.deinit();
var db = try DB.fromCorpus(testing.allocator, &corpus);
defer db.deinit(testing.allocator);
var buf = try testing.allocator.alloc(u8, db.getpwBufsize());
defer testing.allocator.free(buf);
{
try testing.expectEqual(try db.getpwnam("doesnotexist", &buf), null);
const vidmantas = (try db.getpwnam("vidmantas", &buf)).?;
try testing.expectEqual(vidmantas.pw_uid, 128);
try testing.expectEqual(vidmantas.pw_gid, 128);
try testing.expectEqualStrings(vidmantas.pw_name[0..10], "vidmantas\x00");
try testing.expectEqualStrings(vidmantas.pw_gecos[0..20], "Vidmantas Kaminskas\x00");
try testing.expectEqualStrings(vidmantas.pw_dir[0..16], "/home/vidmantas\x00");
}
{
try testing.expectEqual(try db.getpwuid(123456, &buf), null);
const vidmantas = (try db.getpwuid(128, &buf)).?;
try testing.expectEqual(vidmantas.pw_uid, 128);
try testing.expectEqual(vidmantas.pw_gid, 128);
try testing.expectEqualStrings(vidmantas.pw_name[0..10], "vidmantas\x00");
}
const long = try db.getpwnam("Name" ** 8, &buf);
try testing.expectEqualStrings(long.?.pw_name[0..33], "Name" ** 8 ++ "\x00");
buf.len -= 1;
try testing.expectError(error.OutOfMemory, db.getpwnam("Name" ** 8, &buf));
}
test "additionalGids" {
const allocator = testing.allocator;
var corpus = try Corpus.testCorpus(allocator);
defer corpus.deinit();
var additional_gids = try additionalGids(allocator, &corpus);
defer allocator.free(additional_gids.idx2offset);
defer allocator.free(additional_gids.blob);
var user_idx: usize = 0;
while (user_idx < corpus.users.len) : (user_idx += 1) {
const groups = corpus.user2groups[user_idx];
const offset = additional_gids.idx2offset[user_idx];
if (groups.len == 0) {
try testing.expect(offset == 0);
continue;
}
var vit = try compress.VarintSliceIterator(additional_gids.blob[offset..]);
var it = compress.DeltaDecompressionIterator(&vit);
try testing.expectEqual(it.remaining(), groups.len);
var i: u64 = 0;
const corpusGids = corpus.groups.items(.gid);
while (try it.next()) |gid| : (i += 1) {
try testing.expectEqual(gid, corpusGids[groups[i]]);
}
try testing.expectEqual(i, groups.len);
}
}
test "pack gids" {
const allocator = testing.allocator;
var corpus = try Corpus.testCorpus(allocator);
defer corpus.deinit();
const cmph_gid = try cmph.packU32(allocator, corpus.groups.items(.gid));
defer allocator.free(cmph_gid);
const k1 = bdz.search_u32(cmph_gid, 0);
const k2 = bdz.search_u32(cmph_gid, 128);
const k3 = bdz.search_u32(cmph_gid, 9999);
const k4 = bdz.search_u32(cmph_gid, 100000);
var hashes = &[_]u32{ k1, k2, k3, k4 };
sort.sort(u32, hashes, {}, comptime sort.asc(u32));
for (hashes) |hash, i|
try testing.expectEqual(i, hash);
}
const hash_offsets = &[_]u32{ 0, 10, 20, 30 };
fn expectUsedHashes(allocator: Allocator, arr: []const u32) !void {
var used = AutoHashMap(u32, void).init(allocator);
defer used.deinit();
for (arr) |elem|
try used.putNoClobber(elem, {});
for (hash_offsets) |item|
try testing.expect(used.get(item) != null);
}
test "bdzIdx on u32" {
const keys = [_]u32{ 42, 1, 2, 3 };
const mphf = try cmph.packU32(testing.allocator, keys[0..]);
defer testing.allocator.free(mphf);
var result = try bdzIdx(u32, testing.allocator, mphf, keys[0..], hash_offsets);
defer testing.allocator.free(result);
try expectUsedHashes(testing.allocator, result);
}
test "bdzIdx on str" {
const keys = [_][]const u8{ "42", "1", "2", "3" };
const mphf = try cmph.packStr(testing.allocator, keys[0..]);
defer testing.allocator.free(mphf);
var result = try bdzIdx([]const u8, testing.allocator, mphf, keys[0..], hash_offsets);
defer testing.allocator.free(result);
try expectUsedHashes(testing.allocator, result);
}
test "nblocks" {
const tests = .{
.{ 0, &[_]u8{} },
.{ 1, &[_]u8{ 1, 2, 42 } },
.{ 1, &[_]u8{1} ** 63 },
.{ 1, &[_]u8{1} ** 64 },
.{ 2, &[_]u8{1} ** 65 },
.{ 255, &[_]u8{1} ** (255 * 64) },
};
inline for (tests) |tt| {
try testing.expectEqual(nblocks(u8, tt[1]), tt[0]);
try testing.expectEqual(nblocks(u32, tt[1]), tt[0]);
try testing.expectEqual(nblocks(u64, tt[1]), tt[0]);
}
}

71
src/File.zig Normal file
View File

@@ -0,0 +1,71 @@
const std = @import("std");
const os = std.os;
const fs = std.fs;
const Allocator = std.mem.Allocator;
const Corpus = @import("Corpus.zig");
const DB = @import("DB.zig");
const InvalidHeader = @import("header.zig").Invalid;
const File = @This();
db: DB,
ptr: []align(4096) const u8,
pub const Error = os.OpenError || os.FStatError || os.MMapError || InvalidHeader;
pub fn open(fname: []const u8) Error!File {
const fd = try os.open(fname, os.O.RDONLY, 0);
var fd_open = true;
errdefer if (fd_open) os.close(fd);
const st = try os.fstat(fd);
const size = @intCast(usize, st.size);
const ptr = try os.mmap(null, size, os.PROT.READ, os.MAP.SHARED, fd, 0);
errdefer os.munmap(ptr);
os.close(fd);
fd_open = false;
const db = try DB.fromBytes(ptr);
return File{ .db = db, .ptr = ptr };
}
pub fn close(self: *File) void {
os.munmap(self.ptr);
self.* = undefined;
}
const testing = std.testing;
pub const TestFile = struct {
dir: testing.TmpDir,
path: [:0]const u8,
pub fn init(allocator: Allocator) !TestFile {
var corpus = try Corpus.testCorpus(allocator);
defer corpus.deinit();
var db = try DB.fromCorpus(allocator, &corpus);
defer db.deinit(allocator);
var tmp = testing.tmpDir(.{});
errdefer tmp.cleanup();
const mode = os.O.RDWR | os.O.CREAT | os.O.EXCL;
const fd = try os.openat(tmp.dir.fd, "db.turbo", mode, 0o666);
defer os.close(fd);
_ = try os.writev(fd, db.iov().constSlice());
const dir_path = try tmp.getFullPath(allocator);
defer allocator.free(dir_path);
const full = &[_][]const u8{ dir_path, "db.turbo\x00" };
var result = try fs.path.join(allocator, full);
return TestFile{ .dir = tmp, .path = result[0 .. result.len - 1 :0] };
}
pub fn deinit(self: *TestFile, allocator: Allocator) void {
self.dir.cleanup();
allocator.free(self.path);
}
};

102
src/Group.zig Normal file
View File

@@ -0,0 +1,102 @@
const std = @import("std");
const mem = std.mem;
const meta = std.meta;
const Allocator = mem.Allocator;
const Group = @This();
gid: u32,
name: []const u8,
members: []align(1) const []const u8,
// storage of name, members and members strings. for no particular reason.
// Everything (name, members, member strings) could be allocated separately
// (one extreme) and only once (another extreme). For some reason I chose the
// right extreme, but it doesn't have to be this way.
_buf: []const u8,
pub fn init(
allocator: Allocator,
gid: u32,
name: []const u8,
members: []align(1) const []const u8,
) error{OutOfMemory}!Group {
const buf_len = @sizeOf([]const u8) * members.len + name.len + blk: {
var sum: usize = 0;
for (members) |member| sum += member.len;
break :blk @intCast(u32, sum);
};
var buf = try allocator.alloc(u8, buf_len);
errdefer allocator.free(buf);
var ptr_end = @sizeOf([]const u8) * members.len;
var members_ptr = mem.bytesAsSlice([]const u8, buf[0..ptr_end]);
var offset: usize = ptr_end;
for (members) |member, i| {
mem.copy(u8, buf[offset..], member);
members_ptr[i] = buf[offset .. offset + member.len];
offset += member.len;
}
mem.copy(u8, buf[offset..], name);
return Group{
.gid = gid,
.name = buf[offset .. offset + name.len],
.members = members_ptr,
._buf = buf,
};
}
// This could be made more efficient, but clone() is never in the hot path.
pub fn clone(self: *const Group, allocator: Allocator) error{OutOfMemory}!Group {
return init(allocator, self.gid, self.name, self.members);
}
pub fn deinit(self: *Group, allocator: Allocator) void {
allocator.free(self._buf);
self.* = undefined;
}
// suggested buffer size in bytes if all strings were zero-terminated
// (for CGroup).
pub fn strlenZ(self: *const Group) usize {
var count: usize = 0;
for (self.members) |member|
count += member.len + 1;
count += ptr_size * (self.members.len + 1);
count += self.name.len + 1;
return count;
}
// Name type should be 0-terminated, members should be null-terminated, but
// sentinel is not part of the type (but is always there). Adding sentinel
// crashes zig compiler in pre-0.10. https://github.com/ziglang/zig/issues/7517
pub const CGroup = extern struct {
gid: u32,
// should be [*:0]const u8
name: [*]const u8,
// Should be [*:null]align(1) const ?[*:0]const u8
members: [*]align(1) const ?[*:0]const u8,
};
// size of the pointer to a single member.
pub const ptr_size = @sizeOf(meta.Child(meta.fieldInfo(CGroup, .members).field_type));
const testing = std.testing;
test "Group.clone" {
// TODO: how to do this on stack?
var member1 = mem.sliceTo(try testing.allocator.dupeZ(u8, "member1"), 0);
defer testing.allocator.free(member1);
var group = try init(testing.allocator, 1, "foo", &[_][]const u8{ member1, "member2" });
defer group.deinit(testing.allocator);
var cloned = try group.clone(testing.allocator);
defer cloned.deinit(testing.allocator);
group.gid = 2;
group.name = "bar";
member1[0] = 'x';
try testing.expectEqual(cloned.gid, 1);
try testing.expectEqualSlices(u8, cloned.name, "foo");
try testing.expectEqualSlices(u8, cloned.members[0], "member1");
}

149
src/PackedGroup.zig Normal file
View File

@@ -0,0 +1,149 @@
const std = @import("std");
const pad = @import("padding.zig");
const validate = @import("validate.zig");
const compress = @import("compress.zig");
const InvalidRecord = validate.InvalidRecord;
const mem = std.mem;
const Allocator = mem.Allocator;
const ArrayList = std.ArrayList;
const BufSet = std.BufSet;
const PackedGroup = @This();
pub const GroupStored = struct {
gid: u32,
name: []const u8,
members_offset: u64,
};
pub const alignment_bits = 3;
const Inner = packed struct {
gid: u32,
padding: u3 = 0,
groupname_len: u5,
pub fn groupnameLen(self: *const Inner) usize {
return @as(usize, self.groupname_len) + 1;
}
};
inner: *const Inner,
groupdata: []const u8,
members_offset: u64,
pub const Entry = struct {
group: PackedGroup,
next: ?[]const u8,
};
pub fn fromBytes(bytes: []const u8) Entry {
const inner = mem.bytesAsValue(Inner, bytes[0..@sizeOf(Inner)]);
const start_blob = @sizeOf(Inner);
const end_strings = @sizeOf(Inner) + inner.groupnameLen();
const members_offset = compress.uvarint(bytes[end_strings..]) catch |err| switch (err) {
error.Overflow => unreachable,
};
const end_blob = end_strings + members_offset.bytes_read;
const next_start = pad.roundUp(usize, alignment_bits, end_blob);
var next: ?[]const u8 = null;
if (next_start < bytes.len)
next = bytes[next_start..];
return Entry{
.group = PackedGroup{
.inner = inner,
.groupdata = bytes[start_blob..end_strings],
.members_offset = members_offset.value,
},
.next = next,
};
}
fn validateUtf8(s: []const u8) InvalidRecord!void {
if (!std.unicode.utf8ValidateSlice(s))
return error.InvalidRecord;
}
pub const Iterator = struct {
section: ?[]const u8,
pub fn next(it: *Iterator) ?PackedGroup {
if (it.section) |section| {
const entry = fromBytes(section);
it.section = entry.next;
return entry.group;
}
return null;
}
};
pub fn iterator(section: []const u8) Iterator {
return Iterator{ .section = section };
}
pub fn gid(self: *const PackedGroup) u32 {
return self.inner.gid;
}
pub fn membersOffset(self: *const PackedGroup) u64 {
return self.members_offset;
}
pub fn name(self: *const PackedGroup) []const u8 {
return self.groupdata;
}
pub fn packTo(
arr: *ArrayList(u8),
group: GroupStored,
) error{ InvalidRecord, OutOfMemory }!void {
std.debug.assert(arr.items.len & 7 == 0);
try validate.utf8(group.name);
const len = try validate.downCast(u5, group.name.len - 1);
const inner = Inner{ .gid = group.gid, .groupname_len = len };
try arr.*.appendSlice(mem.asBytes(&inner));
try arr.*.appendSlice(group.name);
try compress.appendUvarint(arr, group.members_offset);
}
const testing = std.testing;
test "PackedGroup alignment" {
try testing.expectEqual(@sizeOf(PackedGroup) * 8, @bitSizeOf(PackedGroup));
}
test "construct PackedGroups" {
var buf = ArrayList(u8).init(testing.allocator);
defer buf.deinit();
const groups = [_]GroupStored{
GroupStored{
.gid = 1000,
.name = "sudo",
.members_offset = 1,
},
GroupStored{
.gid = std.math.maxInt(u32),
.name = "Name" ** 8, // 32
.members_offset = std.math.maxInt(u64),
},
};
for (groups) |group| {
try PackedGroup.packTo(&buf, group);
try pad.arrayList(&buf, PackedGroup.alignment_bits);
}
var i: u29 = 0;
var it = PackedGroup.iterator(buf.items);
while (it.next()) |group| : (i += 1) {
try testing.expectEqual(groups[i].gid, group.gid());
try testing.expectEqualStrings(groups[i].name, group.name());
try testing.expectEqual(groups[i].members_offset, group.membersOffset());
}
try testing.expectEqual(groups.len, i);
}

295
src/PackedUser.zig Normal file
View File

@@ -0,0 +1,295 @@
const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
const math = std.math;
const Allocator = mem.Allocator;
const ArrayList = std.ArrayList;
const StringHashMap = std.StringHashMap;
const fieldInfo = std.meta.fieldInfo;
const pad = @import("padding.zig");
const validate = @import("validate.zig");
const compress = @import("compress.zig");
const ShellReader = @import("shell.zig").ShellReader;
const InvalidRecord = validate.InvalidRecord;
const User = @import("User.zig");
const PackedUser = @This();
pub const alignment_bits = 3;
const Inner = packed struct {
uid: u32,
gid: u32,
shell_len_or_idx: u8,
shell_here: bool,
name_is_a_suffix: bool,
home_len: u6,
name_len: u5,
gecos_len: u11,
fn homeLen(self: *const Inner) usize {
return @as(u32, self.home_len) + 1;
}
fn nameStart(self: *const Inner) usize {
const name_len = self.nameLen();
if (self.name_is_a_suffix) {
return self.homeLen() - name_len;
} else return self.homeLen();
}
fn nameLen(self: *const Inner) usize {
return @as(u32, self.name_len) + 1;
}
fn gecosStart(self: *const Inner) usize {
if (self.name_is_a_suffix) {
return self.homeLen();
} else return self.homeLen() + self.nameLen();
}
fn gecosLen(self: *const Inner) usize {
return self.gecos_len;
}
fn maybeShellStart(self: *const Inner) usize {
assert(self.shell_here);
return self.gecosStart() + self.gecosLen();
}
fn shellLen(self: *const Inner) usize {
return @as(u32, self.shell_len_or_idx) + 1;
}
// stringLength returns the length of the blob storing string values.
fn stringLength(self: *const Inner) usize {
var result: usize = self.homeLen() + self.gecosLen();
if (!self.name_is_a_suffix)
result += self.nameLen();
if (self.shell_here)
result += self.shellLen();
return result;
}
};
// PackedUser does not allocate; it re-interprets the "bytes" blob
// field. Both of those fields are pointers to "our representation" of
// that field.
inner: *const Inner,
bytes: []const u8,
additional_gids_offset: u64,
pub const Entry = struct {
user: PackedUser,
next: ?[]const u8,
};
// TODO(motiejus) provide a way to return an entry without decoding the
// additional_gids_offset:
// - will not return the 'next' slice.
// - cannot throw an Overflow error.
pub fn fromBytes(bytes: []const u8) Entry {
const inner = mem.bytesAsValue(Inner, bytes[0..@sizeOf(Inner)]);
const start_blob = @sizeOf(Inner);
const end_strings = start_blob + inner.stringLength();
const gids_offset = compress.uvarint(bytes[end_strings..]) catch |err| switch (err) {
error.Overflow => unreachable,
};
const end_blob = end_strings + gids_offset.bytes_read;
const nextStart = pad.roundUp(usize, alignment_bits, end_blob);
var next: ?[]const u8 = null;
if (nextStart < bytes.len)
next = bytes[nextStart..];
return Entry{
.user = PackedUser{
.inner = inner,
.bytes = bytes[start_blob..end_blob],
.additional_gids_offset = gids_offset.value,
},
.next = next,
};
}
pub const Iterator = struct {
section: ?[]const u8,
shell_reader: ShellReader,
pub fn next(it: *Iterator) ?PackedUser {
if (it.section) |section| {
const entry = PackedUser.fromBytes(section);
it.section = entry.next;
return entry.user;
}
return null;
}
};
pub fn iterator(section: []const u8, shell_reader: ShellReader) Iterator {
return Iterator{ .section = section, .shell_reader = shell_reader };
}
// packTo packs the User record and copies it to the given arraylist.
pub fn packTo(
arr: *ArrayList(u8),
user: User,
additional_gids_offset: u64,
idxFn: StringHashMap(u8),
) error{ InvalidRecord, OutOfMemory }!void {
std.debug.assert(arr.items.len & 7 == 0);
// function arguments are consts. We need to mutate the underlying
// slice, so passing it via pointer instead.
//const home_len = try validate.downCast(u6, user.home.len - 1);
const home_len = try validate.downCast(fieldInfo(Inner, .home_len).field_type, user.home.len - 1);
const name_len = try validate.downCast(fieldInfo(Inner, .name_len).field_type, user.name.len - 1);
const shell_len = try validate.downCast(fieldInfo(Inner, .shell_len_or_idx).field_type, user.shell.len - 1);
const gecos_len = try validate.downCast(fieldInfo(Inner, .gecos_len).field_type, user.gecos.len);
try validate.utf8(user.home);
try validate.utf8(user.name);
try validate.utf8(user.shell);
try validate.utf8(user.gecos);
const inner = Inner{
.uid = user.uid,
.gid = user.gid,
.shell_here = idxFn.get(user.shell) == null,
.shell_len_or_idx = idxFn.get(user.shell) orelse shell_len,
.home_len = home_len,
.name_is_a_suffix = mem.endsWith(u8, user.home, user.name),
.name_len = name_len,
.gecos_len = gecos_len,
};
try arr.*.appendSlice(mem.asBytes(&inner)[0..@sizeOf(Inner)]);
try arr.*.appendSlice(user.home);
if (!inner.name_is_a_suffix)
try arr.*.appendSlice(user.name);
try arr.*.appendSlice(user.gecos);
if (inner.shell_here)
try arr.*.appendSlice(user.shell);
try compress.appendUvarint(arr, additional_gids_offset);
}
pub fn uid(self: PackedUser) u32 {
return self.inner.uid;
}
pub fn gid(self: PackedUser) u32 {
return self.inner.gid;
}
pub fn additionalGidsOffset(self: PackedUser) u64 {
return self.additional_gids_offset;
}
pub fn home(self: PackedUser) []const u8 {
return self.bytes[0..self.inner.homeLen()];
}
pub fn name(self: PackedUser) []const u8 {
const name_pos = self.inner.nameStart();
const name_len = self.inner.nameLen();
return self.bytes[name_pos .. name_pos + name_len];
}
pub fn gecos(self: PackedUser) []const u8 {
const gecos_pos = self.inner.gecosStart();
const gecos_len = self.inner.gecosLen();
return self.bytes[gecos_pos .. gecos_pos + gecos_len];
}
pub fn shell(self: PackedUser, shell_reader: ShellReader) []const u8 {
if (self.inner.shell_here) {
const shell_pos = self.inner.maybeShellStart();
const shell_len = self.inner.shellLen();
return self.bytes[shell_pos .. shell_pos + shell_len];
}
return shell_reader.get(self.inner.shell_len_or_idx);
}
pub const max_str_len =
math.maxInt(fieldInfo(Inner, .shell_len_or_idx).field_type) + 1 +
math.maxInt(fieldInfo(Inner, .home_len).field_type) + 1 +
math.maxInt(fieldInfo(Inner, .name_len).field_type) + 1 +
math.maxInt(fieldInfo(Inner, .gecos_len).field_type);
const testing = std.testing;
test "PackedUser internal and external alignment" {
try testing.expectEqual(
@sizeOf(PackedUser.Inner) * 8,
@bitSizeOf(PackedUser.Inner),
);
}
fn testShellIndex(allocator: Allocator) StringHashMap(u8) {
var result = StringHashMap(u8).init(allocator);
result.put("/bin/bash", 0) catch unreachable;
result.put("/bin/zsh", 1) catch unreachable;
return result;
}
const test_shell_reader = ShellReader{
.blob = "/bin/bash/bin/zsh",
.index = &[_]u16{ 0, 9, 17 },
};
test "pack max_user" {
var arr = ArrayList(u8).init(testing.allocator);
defer arr.deinit();
var idx_noop = StringHashMap(u8).init(testing.allocator);
defer idx_noop.deinit();
try packTo(&arr, User.max_user, 0, idx_noop);
}
test "construct PackedUser section" {
var buf = ArrayList(u8).init(testing.allocator);
defer buf.deinit();
const users = [_]User{ User{
.uid = 1000,
.gid = 1000,
.name = "vidmantas",
.gecos = "Vidmantas Kaminskas",
.home = "/home/vidmantas",
.shell = "/bin/bash",
}, User{
.uid = 1001,
.gid = 1001,
.name = "svc-foo",
.gecos = "Service Account",
.home = "/home/service1",
.shell = "/usr/bin/nologin",
}, User.max_user, User{
.uid = 1002,
.gid = 1002,
.name = "svc-bar",
.gecos = "",
.home = "/",
.shell = "/bin/zsh",
} };
var shellIndex = testShellIndex(testing.allocator);
const additional_gids = math.maxInt(u64);
defer shellIndex.deinit();
for (users) |user| {
try PackedUser.packTo(&buf, user, additional_gids, shellIndex);
try pad.arrayList(&buf, PackedUser.alignment_bits);
}
var i: u29 = 0;
var it1 = PackedUser.iterator(buf.items, test_shell_reader);
while (it1.next()) |user| : (i += 1) {
try testing.expectEqual(users[i].uid, user.uid());
try testing.expectEqual(users[i].gid, user.gid());
try testing.expectEqual(user.additionalGidsOffset(), additional_gids);
try testing.expectEqualStrings(users[i].name, user.name());
try testing.expectEqualStrings(users[i].gecos, user.gecos());
try testing.expectEqualStrings(users[i].home, user.home());
try testing.expectEqualStrings(users[i].shell, user.shell(test_shell_reader));
}
try testing.expectEqual(users.len, i);
}

174
src/User.zig Normal file
View File

@@ -0,0 +1,174 @@
const std = @import("std");
const mem = std.mem;
const fmt = std.fmt;
const maxInt = std.math.maxInt;
const Allocator = mem.Allocator;
const ArrayList = std.ArrayList;
const BoundedArray = std.BoundedArray;
const pw_passwd = "x\x00";
const User = @This();
const PackedUser = @import("PackedUser.zig");
const validate = @import("validate.zig");
uid: u32,
gid: u32,
name: []const u8,
gecos: []const u8,
home: []const u8,
shell: []const u8,
// deep-clones a User record with a given Allocator.
pub fn clone(
self: *const User,
allocator: Allocator,
) error{OutOfMemory}!User {
const stringdata = try allocator.alloc(u8, self.strlen());
const gecos_start = self.name.len;
const home_start = gecos_start + self.gecos.len;
const shell_start = home_start + self.home.len;
mem.copy(u8, stringdata[0..self.name.len], self.name);
mem.copy(u8, stringdata[gecos_start..], self.gecos);
mem.copy(u8, stringdata[home_start..], self.home);
mem.copy(u8, stringdata[shell_start..], self.shell);
return User{
.uid = self.uid,
.gid = self.gid,
.name = stringdata[0..self.name.len],
.gecos = stringdata[gecos_start .. gecos_start + self.gecos.len],
.home = stringdata[home_start .. home_start + self.home.len],
.shell = stringdata[shell_start .. shell_start + self.shell.len],
};
}
const line_fmt = "{s}:x:{d}:{d}:{s}:{s}:{s}\n";
const max_line_len = fmt.count(line_fmt, .{
max_user.uid,
max_user.gid,
max_user.gecos,
max_user.home,
max_user.shell,
});
// toPasswdLine formats the user in /etc/passwd format, including the EOL
fn toLine(self: *const User) BoundedArray(u8, max_line_len) {
var result = BoundedArray(u8, max_line_len);
fmt.bufPrint(result.slice(), line_fmt, .{
self.uid,
self.gid,
self.gecos,
self.home,
self.shell,
});
return result;
}
// fromLine accepts a line of /etc/passwd (with or without the EOL) and makes a
// User.
fn fromLine(allocator: Allocator, line: []const u8) error{ InvalidRecord, OutOfMemory }!User {
var it = mem.split(u8, line, ":");
const uids = it.next() orelse return error.InvalidRecord;
const gids = it.next() orelse return error.InvalidRecord;
const name = it.next() orelse return error.InvalidRecord;
const gecos = it.next() orelse return error.InvalidRecord;
const home = it.next() orelse return error.InvalidRecord;
const shell = it.next() orelse return error.InvalidRecord;
// the line must be exhaustive.
if (it.next() != null) return error.InvalidRecord;
const uid = fmt.parseInt(u32, uids) orelse return error.InvalidRecord;
const gid = fmt.parseInt(u32, gids) orelse return error.InvalidRecord;
try validate.utf8(name);
try validate.utf8(gecos);
try validate.utf8(home);
try validate.utf8(shell);
const user = User{
.uid = uid,
.gid = gid,
.name = name,
.gecos = gecos,
.home = home,
.shell = shell,
};
return try user.clone(allocator);
}
fn strlen(self: *const User) usize {
return self.name.len +
self.gecos.len +
self.home.len +
self.shell.len;
}
// length of all string-data fields, assuming they are zero-terminated.
// Does not include password, since that's always static 'x\00'.
pub fn strlenZ(self: *const User) usize {
return self.strlen() + 4; // '\0' of name, gecos, home and shell
}
pub fn deinit(self: *User, allocator: Allocator) void {
const slice = self.home.ptr[0..self.strlen()];
allocator.free(slice);
self.* = undefined;
}
pub fn fromReader(allocator: Allocator, reader: anytype) ![]User {
var users = ArrayList(User).init(allocator);
var scratch = ArrayList(u8).init(allocator);
defer scratch.deinit();
while (true) {
const line = reader.readUntilDelimiterArrayList(&scratch, '\n', maxInt(usize));
_ = line;
}
_ = users;
}
pub const CUser = extern struct {
pw_name: [*]const u8,
pw_passwd: [*]const u8 = pw_passwd,
pw_uid: u32,
pw_gid: u32,
pw_gecos: [*]const u8,
pw_dir: [*]const u8,
pw_shell: [*]const u8,
};
const testing = std.testing;
pub const max_user = User{
.uid = maxInt(u32),
.gid = maxInt(u32),
.name = "Name" ** 8,
.gecos = "realname" ** 255 ++ "realnam",
.home = "Home" ** 16,
.shell = "She.LllL" ** 32,
};
test "max_user and max_str_len are consistent" {
const total_len = max_user.name.len +
max_user.gecos.len +
max_user.home.len +
max_user.shell.len;
try testing.expectEqual(total_len, PackedUser.max_str_len);
}
test "User.clone" {
var allocator = testing.allocator;
const user = User{
.uid = 1000,
.gid = 1000,
.name = "vidmantas",
.gecos = "Vidmantas Kaminskas",
.home = "/home/vidmantas",
.shell = "/bin/bash",
};
var user2 = try user.clone(allocator);
defer user2.deinit(allocator);
try testing.expectEqualStrings(user.shell, "/bin/bash");
}

30
src/bdz.zig Normal file
View File

@@ -0,0 +1,30 @@
const std = @import("std");
extern fn bdz_search_packed(packed_mphf: [*]const u8, key: [*]const u8, len: c_uint) u32;
pub fn search(packed_mphf: []const u8, key: []const u8) u32 {
const len = std.math.cast(c_uint, key.len) catch unreachable;
return @as(u32, bdz_search_packed(packed_mphf.ptr, key.ptr, len));
}
const u32len = 5;
pub fn search_u32(packed_mphf: []const u8, key: u32) u32 {
return @as(u32, bdz_search_packed(packed_mphf.ptr, &unzero(key), u32len));
}
// encode a u32 to 5 bytes so no bytes is a '\0'.
//
// TODO(motiejus) figure out how to use cmph_io_byte_vector_adapter, so cmph
// packing would accept zero bytes. For now we will be doing a dance of not
// passing zero bytes.
pub fn unzero(x: u32) [5]u8 {
const bit: u8 = 0b10000000;
var buf: [u32len]u8 = undefined;
buf[0] = @truncate(u8, (x & 0b11111110_00000000_00000000_00000000) >> 25) | bit;
buf[1] = @truncate(u8, (x & 0b00000001_11111100_00000000_00000000) >> 18) | bit;
buf[2] = @truncate(u8, (x & 0b00000000_00000011_11110000_00000000) >> 12) | bit;
buf[3] = @truncate(u8, (x & 0b00000000_00000000_00001111_11000000) >> 6) | bit;
buf[4] = @truncate(u8, (x & 0b00000000_00000000_00000000_00111111) >> 0) | bit;
return buf;
}

146
src/cmph.zig Normal file
View File

@@ -0,0 +1,146 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const math = std.math;
const sort = std.sort;
const assert = std.debug.assert;
const bdz = @import("bdz.zig");
const CMPH_BDZ = @cImport({
@cInclude("cmph_types.h");
}).CMPH_BDZ;
extern fn cmph_io_vector_adapter(vector: [*]const [*:0]const u8, len: c_uint) [*]u8;
extern fn cmph_io_vector_adapter_destroy(key_source: [*]u8) void;
extern fn cmph_config_new(key_source: [*]const u8) ?[*]u8;
extern fn cmph_config_set_algo(mph: [*]u8, algo: c_int) void;
extern fn cmph_config_set_b(mph: [*]u8, b: c_int) void;
extern fn cmph_new(config: [*]const u8) ?[*]u8;
extern fn cmph_config_destroy(mph: [*]u8) void;
extern fn cmph_packed_size(mphf: [*]const u8) u32;
extern fn cmph_pack(mphf: [*]const u8, packed_mphf: [*]u8) void;
extern fn cmph_destroy(mphf: [*]u8) void;
// pack packs cmph hashes for the given input and returns a slice ("cmph pack
// minus first 4 bytes") for further storage. The slice must be freed by the
// caller.
pub fn pack(allocator: Allocator, input: [][*:0]const u8) error{OutOfMemory}![]const u8 {
const input_len = @intCast(c_uint, input.len);
var source = cmph_io_vector_adapter(input.ptr, input_len);
defer cmph_io_vector_adapter_destroy(source);
var config = cmph_config_new(source) orelse return error.OutOfMemory;
cmph_config_set_algo(config, CMPH_BDZ);
cmph_config_set_b(config, 7);
var mph = cmph_new(config) orelse return error.OutOfMemory;
cmph_config_destroy(config);
const size = cmph_packed_size(mph);
var buf = try allocator.alloc(u8, size);
errdefer allocator.free(buf);
cmph_pack(mph, buf.ptr);
cmph_destroy(mph);
return buf[4..];
}
// perfect-hash a list of numbers and return the packed mphf
pub fn packU32(allocator: Allocator, numbers: []const u32) error{OutOfMemory}![]const u8 {
var keys: [][6]u8 = try allocator.alloc([6]u8, numbers.len);
defer allocator.free(keys);
for (numbers) |n, i|
keys[i] = unzeroZ(n);
var keys2 = try allocator.alloc([*:0]const u8, numbers.len);
defer allocator.free(keys2);
for (keys) |_, i|
keys2[i] = @ptrCast([*:0]const u8, &keys[i]);
return pack(allocator, keys2);
}
// perfect-hash a list of strings and return the packed mphf
pub fn packStr(allocator: Allocator, strings: []const []const u8) error{OutOfMemory}![]const u8 {
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
var keys = try arena.allocator().alloc([*:0]const u8, strings.len);
for (strings) |_, i|
keys[i] = try arena.allocator().dupeZ(u8, strings[i]);
return pack(allocator, keys);
}
const testing = std.testing;
const items = .{
"aaaaaaaaaa",
"bbbbbbbbbb",
"cccccccccc",
"dddddddddd",
"eeeeeeeeee",
"ffffffffff",
"gggggggggg",
"hhhhhhhhhh",
"iiiiiiiiii",
"jjjjjjjjjj",
};
const items_len = items.len;
fn samplePack(allocator: Allocator) ![]const u8 {
var vector = std.ArrayList([*:0]const u8).init(allocator);
defer vector.deinit();
try vector.appendSlice(&items);
return pack(allocator, vector.items);
}
test "basic pack/unpack" {
const buf = try samplePack(testing.allocator);
defer testing.allocator.free(buf);
try testing.expect(buf.len < 100);
var used: [items_len]bool = undefined;
inline for (items) |elem| {
const hashed = bdz.search(buf, elem);
used[hashed] = true;
}
for (used) |item| try testing.expect(item);
}
// encodes a u32 to 6 bytes so no bytes except the last one is a '\0'.
// This is useful for cmph-packing, where it accepts 0-terminated char*s.
pub fn unzeroZ(x: u32) [6]u8 {
var buf: [6]u8 = undefined;
std.mem.copy(u8, &buf, &bdz.unzero(x));
buf[5] = 0;
return buf;
}
test "unzeroZ" {
const result = unzeroZ(0);
try testing.expect(result[0] != 0);
try testing.expect(result[1] != 0);
try testing.expect(result[2] != 0);
try testing.expect(result[3] != 0);
try testing.expect(result[4] != 0);
try testing.expect(result[5] == 0);
}
test "pack u32" {
const keys = &[_]u32{ 42, 1, math.maxInt(u32), 2 };
const packed_mphf = try packU32(testing.allocator, keys);
defer testing.allocator.free(packed_mphf);
var hashes: [keys.len]u32 = undefined;
for (keys) |key, i|
hashes[i] = bdz.search_u32(packed_mphf, key);
sort.sort(u32, hashes[0..], {}, comptime sort.asc(u32));
for (hashes) |hash, i|
try testing.expectEqual(i, hash);
}
test "pack str" {
const keys = &[_][]const u8{ "foo", "bar", "baz", "1", "2", "3" };
const packed_mphf = try packStr(testing.allocator, keys[0..]);
defer testing.allocator.free(packed_mphf);
var hashes: [keys.len]u32 = undefined;
for (keys) |key, i|
hashes[i] = bdz.search(packed_mphf, key);
sort.sort(u32, hashes[0..], {}, comptime sort.asc(u32));
for (hashes) |hash, i|
try testing.expectEqual(i, hash);
}

335
src/compress.zig Normal file
View File

@@ -0,0 +1,335 @@
//
// varint64 []const u8 variants
//
// Thanks to https://github.com/gsquire/zig-snappy/blob/master/snappy.zig and
// golang's varint implementation.
const std = @import("std");
const ArrayList = std.ArrayList;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const math = std.math;
// compresses a strictly incrementing sorted slice of integers using delta
// compression. Compression is in-place.
pub fn deltaCompress(comptime T: type, elems: []T) error{NotSorted}!void {
if (elems.len <= 1) {
return;
}
var prev: T = elems[0];
var i: usize = 1;
while (i < elems.len) : (i += 1) {
const cur = elems[i];
if (cur <= prev) {
return error.NotSorted;
}
elems[i] = cur - prev - 1;
prev = cur;
}
}
// decompresses a slice compressed by deltaCompress. In-place.
pub fn deltaDecompress(comptime T: type, elems: []T) error{Overflow}!void {
if (elems.len <= 1) {
return;
}
var i: usize = 1;
while (i < elems.len) : (i += 1) {
const x = try math.add(T, elems[i - 1], 1);
elems[i] = try math.add(T, elems[i], x);
}
}
// Represents a variable length integer that we read from a byte stream along
// with how many bytes were read to decode it.
pub const Varint = struct {
value: u64,
bytes_read: usize,
};
pub const maxVarintLen64 = 10;
// https://golang.org/pkg/encoding/binary/#Uvarint
pub fn uvarint(buf: []const u8) error{Overflow}!Varint {
var x: u64 = 0;
var s: u6 = 0;
for (buf) |b, i| {
if (i == maxVarintLen64)
// Catch byte reads past maxVarintLen64.
// See issue https://golang.org/issues/41185
return error.Overflow;
if (b < 0x80) {
if (i == maxVarintLen64 - 1 and b > 1) {
return error.Overflow;
}
return Varint{
.value = x | (@as(u64, b) << s),
.bytes_read = i + 1,
};
}
x |= (@as(u64, b & 0x7f) << s);
s = try math.add(u6, s, 7);
}
return Varint{
.value = 0,
.bytes_read = 0,
};
}
pub fn uvarintMust(buf: []const u8) Varint {
return uvarint(buf) catch |err| switch (err) {
error.Overflow => unreachable,
};
}
// https://golang.org/pkg/encoding/binary/#PutUvarint
pub fn putUvarint(buf: []u8, x: u64) usize {
var i: usize = 0;
var mutX = x;
while (mutX >= 0x80) {
buf[i] = @truncate(u8, mutX) | 0x80;
mutX >>= 7;
i += 1;
}
buf[i] = @truncate(u8, mutX);
return i + 1;
}
// VarintSliceIterator iterates over varint-encoded slice.
// The first element is the length of the slice, in decoded numbers.
const varintSliceIterator = struct {
remaining: usize,
arr: []const u8,
idx: usize,
pub fn next(self: *varintSliceIterator) error{Overflow}!?u64 {
if (self.remaining == 0)
return null;
const value = try uvarint(self.arr[self.idx..]);
self.idx += value.bytes_read;
self.remaining -= 1;
return value.value;
}
pub fn nextMust(self: *varintSliceIterator) ?u64 {
return self.next() catch |err| switch (err) {
error.Overflow => unreachable,
};
}
// returns the number of remaining items. If called before the first
// next(), returns the length of the slice.
pub fn remaining(self: *const varintSliceIterator) usize {
return self.remaining;
}
};
pub fn VarintSliceIterator(arr: []const u8) error{Overflow}!varintSliceIterator {
const firstnumber = try uvarint(arr);
return varintSliceIterator{
.remaining = firstnumber.value,
.arr = arr,
.idx = firstnumber.bytes_read,
};
}
pub fn VarintSliceIteratorMust(arr: []const u8) varintSliceIterator {
return VarintSliceIterator(arr) catch |err| switch (err) {
error.Overflow => unreachable,
};
}
const deltaDecompressionIterator = struct {
vit: *varintSliceIterator,
prev: u64,
add_to_prev: u1,
pub fn next(self: *deltaDecompressionIterator) error{Overflow}!?u64 {
const current = try self.vit.next();
if (current == null) return null;
const prevExtra = try math.add(u64, self.prev, self.add_to_prev);
const result = try math.add(u64, current.?, prevExtra);
self.prev = result;
self.add_to_prev = 1;
return result;
}
// returns the number of remaining items. If called before the first
// next(), returns the length of the slice.
pub fn remaining(self: *const deltaDecompressionIterator) usize {
return self.vit.remaining;
}
pub fn nextMust(self: *deltaDecompressionIterator) ?u64 {
return self.next() catch |err| switch (err) {
error.Overflow => unreachable,
};
}
};
pub fn DeltaDecompressionIterator(vit: *varintSliceIterator) deltaDecompressionIterator {
return deltaDecompressionIterator{
.vit = vit,
.prev = 0,
.add_to_prev = 0,
};
}
pub fn appendUvarint(arr: *ArrayList(u8), x: u64) Allocator.Error!void {
var buf: [maxVarintLen64]u8 = undefined;
const n = putUvarint(&buf, x);
try arr.appendSlice(buf[0..n]);
}
const testing = std.testing;
const uvarint_tests = [_]u64{
0,
1,
2,
10,
20,
63,
64,
65,
127,
128,
129,
255,
256,
257,
1 << 63 - 1,
};
test "putUvarint/uvarint" {
for (uvarint_tests) |x| {
var buf: [maxVarintLen64]u8 = undefined;
const n = putUvarint(buf[0..], x);
const got = try uvarint(buf[0..n]);
try testing.expectEqual(x, got.value);
try testing.expectEqual(n, got.bytes_read);
}
}
test "VarintSliceIterator" {
var buf = ArrayList(u8).init(testing.allocator);
defer buf.deinit();
try appendUvarint(&buf, uvarint_tests.len);
for (uvarint_tests) |x|
try appendUvarint(&buf, x);
var it = try VarintSliceIterator(buf.items);
var i: usize = 0;
while (try it.next()) |got| : (i += 1) {
try testing.expectEqual(uvarint_tests[i], got);
}
try testing.expectEqual(i, uvarint_tests.len);
}
test "delta compress/decompress" {
const tests = [_]struct { input: []const u8, want: []const u8 }{
.{ .input = &[_]u8{}, .want = &[_]u8{} },
.{ .input = &[_]u8{0}, .want = &[_]u8{0} },
.{ .input = &[_]u8{10}, .want = &[_]u8{10} },
.{ .input = &[_]u8{ 0, 1, 2 }, .want = &[_]u8{ 0, 0, 0 } },
.{ .input = &[_]u8{ 10, 20, 30, 255 }, .want = &[_]u8{ 10, 9, 9, 224 } },
.{ .input = &[_]u8{ 0, 254, 255 }, .want = &[_]u8{ 0, 253, 0 } },
};
for (tests) |t| {
var arr = try ArrayList(u8).initCapacity(
testing.allocator,
t.input.len,
);
defer arr.deinit();
try arr.appendSlice(t.input);
try deltaCompress(u8, arr.items);
try testing.expectEqualSlices(u8, arr.items, t.want);
try deltaDecompress(u8, arr.items);
try testing.expectEqualSlices(u8, arr.items, t.input);
}
}
test "delta compression with varint tests" {
var scratch: [uvarint_tests.len]u64 = undefined;
std.mem.copy(u64, scratch[0..], uvarint_tests[0..]);
try deltaCompress(u64, scratch[0..]);
try deltaDecompress(u64, scratch[0..]);
try testing.expectEqualSlices(u64, uvarint_tests[0..], scratch[0..]);
}
test "delta compression negative tests" {
for ([_][]const u8{
&[_]u8{ 0, 0 },
&[_]u8{ 0, 1, 1 },
&[_]u8{ 0, 1, 2, 1 },
}) |t| {
var arr = try ArrayList(u8).initCapacity(testing.allocator, t.len);
defer arr.deinit();
try arr.appendSlice(t);
try testing.expectError(error.NotSorted, deltaCompress(u8, arr.items));
}
}
test "delta decompress overflow" {
for ([_][]const u8{
&[_]u8{ 255, 0 },
&[_]u8{ 0, 128, 127 },
}) |t| {
var arr = try ArrayList(u8).initCapacity(testing.allocator, t.len);
defer arr.deinit();
try arr.appendSlice(t);
try testing.expectError(error.Overflow, deltaDecompress(u8, arr.items));
}
}
test "delta decompression with an iterator" {
var compressed: [uvarint_tests.len]u64 = undefined;
std.mem.copy(u64, compressed[0..], uvarint_tests[0..]);
try deltaCompress(u64, compressed[0..]);
var buf = ArrayList(u8).init(testing.allocator);
defer buf.deinit();
try appendUvarint(&buf, compressed.len);
for (compressed) |x|
try appendUvarint(&buf, x);
var it = DeltaDecompressionIterator(&try VarintSliceIterator(buf.items));
var i: usize = 0;
try testing.expectEqual(it.remaining(), uvarint_tests.len);
while (try it.next()) |got| : (i += 1) {
try testing.expectEqual(uvarint_tests[i], got);
}
try testing.expectEqual(i, uvarint_tests.len);
}
test "appendUvarint" {
for (uvarint_tests) |x| {
var buf = ArrayList(u8).init(testing.allocator);
defer buf.deinit();
try appendUvarint(&buf, x);
const got = try uvarint(buf.items);
try testing.expectEqual(x, got.value);
}
}
test "overflow" {
for ([_][]const u8{
&[_]u8{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x2 },
&[_]u8{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x1, 0, 0 },
&[_]u8{ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
}) |t| {
try testing.expectError(error.Overflow, uvarint(t));
}
}

105
src/flags.zig Normal file
View File

@@ -0,0 +1,105 @@
// Zero allocation argument parsing for unix-like systems.
// Released under the Zero Clause BSD (0BSD) license:
//
// Copyright 2022 Isaac Freund
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
const std = @import("std");
const cstr = std.cstr;
pub const Flag = struct {
name: [*:0]const u8,
kind: enum { boolean, arg },
};
pub fn ParseResult(comptime flags: []const Flag) type {
return struct {
const Self = @This();
const FlagData = struct {
name: [*:0]const u8,
value: union {
boolean: bool,
arg: ?[*:0]const u8,
},
};
/// Remaining args after the recognized flags
args: [][*:0]const u8,
/// Data obtained from parsed flags
flag_data: [flags.len]FlagData = blk: {
// Init all flags to false/null
var flag_data: [flags.len]FlagData = undefined;
inline for (flags) |flag, i| {
flag_data[i] = switch (flag.kind) {
.boolean => .{
.name = flag.name,
.value = .{ .boolean = false },
},
.arg => .{
.name = flag.name,
.value = .{ .arg = null },
},
};
}
break :blk flag_data;
},
pub fn boolFlag(self: Self, flag_name: [*:0]const u8) bool {
for (self.flag_data) |flag_data| {
if (cstr.cmp(flag_data.name, flag_name) == 0) return flag_data.value.boolean;
}
unreachable; // Invalid flag_name
}
pub fn argFlag(self: Self, flag_name: [*:0]const u8) ?[:0]const u8 {
for (self.flag_data) |flag_data| {
if (cstr.cmp(flag_data.name, flag_name) == 0) {
return std.mem.span(flag_data.value.arg);
}
}
unreachable; // Invalid flag_name
}
};
}
pub fn parse(args: [][*:0]const u8, comptime flags: []const Flag) !ParseResult(flags) {
var ret: ParseResult(flags) = .{ .args = undefined };
var arg_idx: usize = 0;
while (arg_idx < args.len) : (arg_idx += 1) {
var parsed_flag = false;
inline for (flags) |flag, flag_idx| {
if (cstr.cmp(flag.name, args[arg_idx]) == 0) {
switch (flag.kind) {
.boolean => ret.flag_data[flag_idx].value.boolean = true,
.arg => {
arg_idx += 1;
if (arg_idx == args.len) {
std.log.err("option '" ++ flag.name ++
"' requires an argument but none was provided!", .{});
return error.MissingFlagArgument;
}
ret.flag_data[flag_idx].value.arg = args[arg_idx];
},
}
parsed_flag = true;
}
}
if (!parsed_flag) break;
}
ret.args = args[arg_idx..];
return ret;
}

111
src/header.zig Normal file
View File

@@ -0,0 +1,111 @@
const std = @import("std");
const mem = std.mem;
const math = std.math;
const native_endian = @import("builtin").target.cpu.arch.endian();
const ptr_size = @import("Group.zig").ptr_size;
const max_shells = @import("shell.zig").max_shells;
const magic = [4]u8{ 0xf0, 0x9f, 0xa4, 0xb7 };
const version = 0;
const Endian = enum(u4) {
big,
little,
fn native() Endian {
return switch (native_endian) {
.Little => Endian.little,
.Big => Endian.big,
};
}
};
pub const section_length_bits = 6;
pub const section_length = 1 << section_length_bits;
pub const Invalid = error{
InvalidMagic,
InvalidVersion,
InvalidEndianess,
InvalidPointerSize,
};
pub const Header = packed struct {
magic: [4]u8 = magic,
version: u8 = version,
endian: Endian = Endian.native(),
ptr_size: u4 = ptr_size,
nblocks_shell_blob: u8,
num_shells: u8,
num_groups: u32,
num_users: u32,
nblocks_bdz_gid: u32,
nblocks_bdz_groupname: u32,
nblocks_bdz_uid: u32,
nblocks_bdz_username: u32,
nblocks_groups: u64,
nblocks_users: u64,
nblocks_groupmembers: u64,
nblocks_additional_gids: u64,
getgr_bufsize: u64,
getpw_bufsize: u64,
padding: [48]u8 = [1]u8{0} ** 48,
pub fn fromBytes(blob: *const [@sizeOf(Header)]u8) Invalid!*const Header {
const self = mem.bytesAsValue(Header, blob);
if (!mem.eql(u8, magic[0..4], blob[0..4]))
return error.InvalidMagic;
if (self.version != 0)
return error.InvalidVersion;
if (self.endian != Endian.native())
return error.InvalidEndianess;
// when ptr size is larger than on the host that constructed it the DB,
// getgr_bufsize/getpw_bufsize may return insufficient values, causing
// OutOfMemory for getgr* and getpw* calls.
if (self.ptr_size < ptr_size)
return error.InvalidPointerSize;
return self;
}
};
const testing = std.testing;
test "Section length is a power of two" {
try testing.expect(std.math.isPowerOfTwo(section_length));
}
test "Header fits into two section" {
try testing.expect(@sizeOf(Header) == 2 * section_length);
}
test "bit header size is equal to @sizeOf(Header)" {
try testing.expectEqual(@sizeOf(Header) * 8, @bitSizeOf(Header));
}
test "header pack and unpack" {
const header1 = Header{
.nblocks_shell_blob = 0,
.num_shells = 0,
.num_groups = 0,
.num_users = 0,
.nblocks_bdz_gid = 0,
.nblocks_bdz_groupname = 0,
.nblocks_bdz_uid = 0,
.nblocks_bdz_username = 0,
.nblocks_groups = 0,
.nblocks_users = 0,
.nblocks_groupmembers = 0,
.nblocks_additional_gids = 1,
.getgr_bufsize = 16,
.getpw_bufsize = 32,
};
const bytes = mem.asBytes(&header1);
const header = try Header.fromBytes(bytes);
try testing.expectEqual(header1, header.*);
}

153
src/libnss.zig Normal file
View File

@@ -0,0 +1,153 @@
const std = @import("std");
const os = std.os;
const fmt = std.fmt;
const mem = std.mem;
const process = std.process;
const DB = @import("DB.zig");
const File = @import("File.zig");
const CGroup = @import("Group.zig").CGroup;
const PackedGroup = @import("PackedGroup.zig");
const CUser = @import("User.zig").CUser;
const PackedUser = @import("PackedUser.zig");
const c = @cImport({
@cInclude("nss.h");
});
const TURBONSS_DB = "DB";
const TURBONSS_VERBOSE = "TURBONSS_VERBOSE";
const TURBONSS_OMIT_MEMBERS = "TURBONSS_OMIT_MEMBERS";
extern fn putenv(name: [*:0]const u8, value: [*:0]const u8, overwrite: c_int) c_int;
extern fn getenv(name: [*:0]const u8) ?[*:0]const u8;
export const turbonss_default_path: [:0]const u8 = "/etc/turbonss/db.turbo";
// State is a type of the global variable holding the process state:
// the DB handle and all the iterators.
const State = struct {
file: ?File,
getpwent_iterator: ?PackedUser.Iterator,
getgrent_iterator: ?PackedGroup.Iterator,
omit_members: bool,
verbose: bool = false,
fn debug(self: *const State, comptime format: []const u8, args: anytype) void {
self.log(.debug, format, args);
}
fn info(self: *const State, comptime format: []const u8, args: anytype) void {
self.log(.info, format, args);
}
fn err(self: *const State, comptime format: []const u8, args: anytype) void {
self.log(.err, format, args);
}
fn log(
self: *const State,
comptime level: std.log.Level,
comptime format: []const u8,
args: anytype,
) void {
if (!self.verbose) return;
const stderr = std.io.getStdErr().writer();
std.debug.getStderrMutex().lock();
defer std.debug.getStderrMutex().unlock();
stderr.print("[" ++ level.asText() ++ "] " ++ format ++ "\n", args) catch return;
}
};
// state is initialized on library startup.
var state: State = undefined;
// constructor
export fn _turbo_init() void {
//if (os.getenvZ(TURBONSS_VERBOSE)) |env|
// state.verbose = mem.eql(u8, env, "1");
if (getenv(TURBONSS_VERBOSE)) |env| {
const envZ = mem.sliceTo(env, 0);
state.verbose = mem.eql(u8, envZ, "1");
}
//std.debug.print("TURBONSS_VERBOSE: {s}\n", .{os.getenv(TURBONSS_VERBOSE)});
std.debug.print("TURBONSS_VERBOSE: {s}\n", .{getenv(TURBONSS_VERBOSE)});
const fname = os.getenvZ(TURBONSS_DB) orelse turbonss_default_path[0..];
state.debug("opening {s}", .{fname});
state.file = File.open(fname) catch |err| {
state.err("open {s}: {s}", .{ fname, @errorName(err) });
return;
};
state.debug("turbonss database opened", .{});
const omit_members_env = os.getenvZ(TURBONSS_OMIT_MEMBERS) orelse "auto";
state.omit_members = shouldOmitMembers(omit_members_env, os.argv);
state.debug("omitting members from getgr* calls: {any}\n", .{state.omit_members});
return;
}
fn shouldOmitMembers(env: []const u8, argv: [][*:0]u8) bool {
if (mem.eql(u8, env, "1")) return true;
if (mem.eql(u8, env, "0")) return false;
if (argv.len == 0) return false;
return mem.eql(u8, mem.sliceTo(argv[0], 0), "id");
}
// destructor
export fn _turbo_fini() void {
if (state.file) |*fooo|
fooo.close();
}
export fn _nss_turbo_getpwuid_r(
uid: c_uint,
res: *CUser,
buf: [*]u8,
len: usize,
errnop: *c_int,
) c.enum_nss_status {
if (state.file == null) {
errnop.* = @enumToInt(os.E.AGAIN);
return c.NSS_STATUS_UNAVAIL;
}
const cuser = state.file.?.db.getpwuid(uid, &buf[0..len]) catch |err| switch (err) {
error.OutOfMemory => {
errnop.* = @enumToInt(os.E.RANGE);
return c.NSS_STATUS_TRYAGAIN;
},
};
if (cuser == null) {
errnop.* = @enumToInt(os.E.NOENT);
return c.NSS_STATUS_NOTFOUND;
}
res.* = cuser.?;
return c.NSS_STATUS_SUCCESS;
}
const testing = std.testing;
test "nss_turbo_getpwuid_r" {
var tf = try File.TestFile.init(testing.allocator);
defer tf.deinit(testing.allocator);
var env = try process.getEnvMap(testing.allocator);
defer env.deinit();
if (true)
return error.SkipZigTest;
try testing.expectEqual(putenv(TURBONSS_VERBOSE, "1", 1), 0);
try testing.expectEqual(putenv(TURBONSS_DB, tf.path, 1), 0);
_turbo_init();
try testing.expect(state.file != null);
var buf = try testing.allocator.alloc(u8, state.file.?.db.getpwBufsize());
var user: CUser = undefined;
var errno: c_int = undefined;
const ret = _nss_turbo_getpwuid_r(128, &user, buf.ptr, buf.len, &errno);
try testing.expectEqual(ret, c.NSS_STATUS_SUCCESS);
}

53
src/padding.zig Normal file
View File

@@ -0,0 +1,53 @@
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
// rounds up an int to the nearest factor of nbits.
pub fn roundUp(comptime T: type, comptime nbits: u8, n: T) T {
comptime assert(nbits < @bitSizeOf(T));
const factor = comptime (1 << nbits) - 1;
return ((n + factor) & ~@as(T, factor));
}
// rounds up an integer to the nearest factor of nbits and returns the
// difference (padding)
pub fn until(comptime T: type, comptime nbits: u8, n: T) T {
return roundUp(T, nbits, n) - n;
}
// arrayList adds padding to an ArrayList(u8) for a given number of nbits
pub fn arrayList(arr: *ArrayList(u8), comptime nbits: u8) Allocator.Error!void {
const padding = until(u64, nbits, arr.items.len);
try arr.*.appendNTimes(0, padding);
}
const testing = std.testing;
test "padding" {
try testing.expectEqual(until(u12, 2, 0), 0);
try testing.expectEqual(until(u12, 2, 1), 3);
try testing.expectEqual(until(u12, 2, 2), 2);
try testing.expectEqual(until(u12, 2, 3), 1);
try testing.expectEqual(until(u12, 2, 4), 0);
try testing.expectEqual(until(u12, 2, 40), 0);
try testing.expectEqual(until(u12, 2, 41), 3);
try testing.expectEqual(until(u12, 2, 42), 2);
try testing.expectEqual(until(u12, 2, 43), 1);
try testing.expectEqual(until(u12, 2, 44), 0);
try testing.expectEqual(until(u12, 2, 4091), 1);
try testing.expectEqual(until(u12, 2, 4092), 0);
}
test "arrayList" {
var buf = try ArrayList(u8).initCapacity(testing.allocator, 16);
defer buf.deinit();
buf.appendAssumeCapacity(1);
try arrayList(&buf, 3);
try testing.expectEqual(buf.items.len, 8);
buf.appendAssumeCapacity(2);
try arrayList(&buf, 10);
try testing.expectEqual(buf.items.len, 1024);
}

187
src/shell.zig Normal file
View File

@@ -0,0 +1,187 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const PriorityDequeue = std.PriorityDequeue;
const StringHashMap = std.StringHashMap;
const BoundedArray = std.BoundedArray;
const assert = std.debug.assert;
pub const max_shells = 255;
pub const max_shell_len = 256;
// ShellReader interprets "Shell Index" and "Shell Blob" sections.
pub const ShellReader = struct {
index: []const u16,
blob: []const u8,
pub fn init(index: []align(2) const u8, blob: []const u8) ShellReader {
return ShellReader{
.index = std.mem.bytesAsSlice(u16, index),
.blob = blob,
};
}
// get returns a shell at the given index.
pub fn get(self: *const ShellReader, idx: u8) []const u8 {
return self.blob[self.index[idx]..self.index[idx + 1]];
}
};
// ShellWriter is a shell popularity contest: collect shells and return the
// popular ones, sorted by score. score := len(shell) * number_of_shells.
pub const ShellWriter = struct {
counts: std.StringHashMap(u32),
allocator: Allocator,
const KV = struct {
shell: []const u8,
score: u64,
};
pub const ShellSections = struct {
// len is the number of shells in this section.
len: u8,
// index points the i'th shell to it's offset in blob. The last
// byte of the i'th shell is index[i+1].
index: BoundedArray(u16, max_shells),
// blob contains `index.len+1` number of records. The last record is
// pointing to the end of the blob, so length of the last shell can be
// calculated from the index array.
blob: BoundedArray(u8, (max_shells + 1) * max_shell_len),
// shell2idx helps translate a shell (string) to it's index.
shell2idx: StringHashMap(u8),
// initializes and populates shell sections. All strings are copied,
// nothing is owned.
pub fn init(
allocator: Allocator,
shells: BoundedArray([]const u8, max_shells),
) error{OutOfMemory}!ShellSections {
assert(shells.len <= max_shells);
var self = ShellSections{
.len = @intCast(u8, shells.len),
.index = BoundedArray(u16, max_shells).init(shells.len) catch unreachable,
.blob = BoundedArray(u8, (max_shells + 1) * max_shell_len).init(0) catch unreachable,
.shell2idx = StringHashMap(u8).init(allocator),
};
if (shells.len == 0) return self;
errdefer self.shell2idx.deinit();
for (shells.constSlice()) |shell, idx| {
const idx8 = @intCast(u8, idx);
const offset = @intCast(u16, self.blob.len);
self.blob.appendSliceAssumeCapacity(shell);
try self.shell2idx.put(self.blob.constSlice()[offset..], idx8);
self.index.set(idx8, offset);
}
self.index.appendAssumeCapacity(@intCast(u8, self.blob.len));
return self;
}
pub fn deinit(self: *ShellSections) void {
self.shell2idx.deinit();
self.* = undefined;
}
pub fn getIndex(self: *const ShellSections, shell: []const u8) ?u8 {
return self.shell2idx.get(shell);
}
};
pub fn init(allocator: Allocator) ShellWriter {
return ShellWriter{
.counts = std.StringHashMap(u32).init(allocator),
.allocator = allocator,
};
}
pub fn deinit(self: *ShellWriter) void {
var it = self.counts.keyIterator();
while (it.next()) |key_ptr|
self.counts.allocator.free(key_ptr.*);
self.counts.deinit();
self.* = undefined;
}
pub fn put(self: *ShellWriter, shell: []const u8) !void {
const res = try self.counts.getOrPutAdapted(shell, self.counts.ctx);
if (!res.found_existing) {
res.key_ptr.* = try self.allocator.dupe(u8, shell);
res.value_ptr.* = 1;
} else {
res.value_ptr.* += 1;
}
}
fn cmpShells(_: void, a: KV, b: KV) std.math.Order {
return std.math.order(a.score, b.score);
}
// toOwnedSections returns the analyzed ShellSections. Resets the shell
// popularity contest. ShellSections memory is allocated by the ShellWriter
// allocator, and must be deInit'ed by the caller.
pub fn toOwnedSections(self: *ShellWriter, limit: u10) error{OutOfMemory}!ShellSections {
assert(limit <= max_shells);
var deque = PriorityDequeue(KV, void, cmpShells).init(self.allocator, {});
defer deque.deinit();
var it = self.counts.iterator();
while (it.next()) |entry| {
if (entry.value_ptr.* == 1)
continue;
const score = entry.key_ptr.*.len * entry.value_ptr.*;
try deque.add(KV{ .shell = entry.key_ptr.*, .score = score });
}
const total = std.math.min(deque.count(), limit);
var topShells = BoundedArray([]const u8, max_shells).init(total) catch unreachable;
var i: u32 = 0;
while (i < total) : (i += 1)
topShells.set(i, deque.removeMax().shell);
const result = ShellSections.init(self.allocator, topShells);
self.deinit();
self.* = init(self.allocator);
return result;
}
};
const testing = std.testing;
test "basic shellpopcon" {
var popcon = ShellWriter.init(testing.allocator);
const bash = "/bin/bash"; // 9 chars
const zsh = "/bin/zsh"; // 8 chars
const long = "/bin/very-long-shell-name-ought-to-be-first"; // 43 chars
const nobody = "/bin/nobody"; // only 1 instance, ought to ignore
const input = [_][]const u8{
zsh, zsh, zsh, zsh, // zsh score 8*4=32
bash, bash, bash, nobody, // bash score 3*9=27
long, long, // long score 2*43=86
};
for (input) |shell| {
try popcon.put(shell);
}
var sections = try popcon.toOwnedSections(max_shells);
defer sections.deinit();
try testing.expectEqual(sections.index.len, 4); // all but "nobody" qualify
try testing.expectEqual(sections.getIndex(long).?, 0);
try testing.expectEqual(sections.getIndex(zsh).?, 1);
try testing.expectEqual(sections.getIndex(bash).?, 2);
try testing.expectEqual(sections.getIndex(nobody), null);
try testing.expectEqual(sections.blob.constSlice().len, bash.len + zsh.len + long.len);
const shellReader = ShellReader.init(
std.mem.sliceAsBytes(sections.index.constSlice()),
sections.blob.constSlice(),
);
try testing.expectEqualStrings(shellReader.get(0), long);
try testing.expectEqualStrings(shellReader.get(1), zsh);
try testing.expectEqualStrings(shellReader.get(2), bash);
try testing.expectEqual(shellReader.index.len, 4);
}

17
src/test_all.zig Normal file
View File

@@ -0,0 +1,17 @@
test "turbonss test suite" {
_ = @import("bdz.zig");
_ = @import("cmph.zig");
_ = @import("compress.zig");
_ = @import("Corpus.zig");
_ = @import("DB.zig");
_ = @import("Group.zig");
_ = @import("header.zig");
_ = @import("libnss.zig");
_ = @import("PackedGroup.zig");
_ = @import("PackedUser.zig");
_ = @import("padding.zig");
_ = @import("shell.zig");
_ = @import("User.zig");
_ = @import("validate.zig");
_ = @import("unix2db/main.zig");
}

10
src/unix2db/main.zig Normal file
View File

@@ -0,0 +1,10 @@
const std = @import("std");
const flags = @import("../flags.zig");
pub fn main() !void {}
const testing = std.testing;
test "stub" {
_ = flags;
}

15
src/validate.zig Normal file
View File

@@ -0,0 +1,15 @@
const std = @import("std");
pub fn downCast(comptime T: type, n: u64) error{InvalidRecord}!T {
return std.math.cast(T, n) catch |err| switch (err) {
error.Overflow => {
return error.InvalidRecord;
},
};
}
pub fn utf8(s: []const u8) error{InvalidRecord}!void {
if (!std.unicode.utf8ValidateSlice(s)) {
return error.InvalidRecord;
}
}