tests for initgroups_dyn

This commit is contained in:
Motiejus Jakštys 2022-07-09 13:00:45 +03:00
parent 5fa4a71ddf
commit f327fb24ba
4 changed files with 106 additions and 22 deletions

View File

@ -260,10 +260,18 @@ pub fn testCorpus(allocator: Allocator) !Corpus {
var group0 = try Group.init(allocator, 0, "root", &[_][]const u8{"root"}); var group0 = try Group.init(allocator, 0, "root", &[_][]const u8{"root"});
var group1 = try Group.init(allocator, 128, "vidmantas", &[_][]const u8{"vidmantas"}); var group1 = try Group.init(allocator, 128, "vidmantas", &[_][]const u8{"vidmantas"});
const members2 = &[_][]const u8{ "svc-bar", "Name" ** 8, "vidmantas", "root" }; var group2 = try Group.init(
var group2 = try Group.init(allocator, 9999, "all", members2); allocator,
const members3 = &[_][]const u8{ "svc-bar", "vidmantas" }; 9999,
var group3 = try Group.init(allocator, 100000, "service-account", members3); "all",
&[_][]const u8{ "svc-bar", "Name" ** 8, "vidmantas", "root" },
);
var group3 = try Group.init(
allocator,
100000,
"service-account",
&[_][]const u8{ "svc-bar", "vidmantas" },
);
defer group0.deinit(allocator); defer group0.deinit(allocator);
defer group1.deinit(allocator); defer group1.deinit(allocator);
defer group2.deinit(allocator); defer group2.deinit(allocator);

View File

@ -313,7 +313,7 @@ pub fn packCGroup(self: *const DB, group: PackedGroup, buf: []u8) error{BufferTo
member_ptrs[member_ptrs.len - 1] = null; member_ptrs[member_ptrs.len - 1] = null;
var buf_offset: usize = ptr_end; var buf_offset: usize = ptr_end;
var it = compress.DeltaDecompressionIterator(&vit); var it = compress.deltaDecompressionIterator(&vit);
var i: usize = 0; var i: usize = 0;
while (it.nextMust()) |member_offset| : (i += 1) { while (it.nextMust()) |member_offset| : (i += 1) {
const entry = PackedUser.fromBytes(self.users[member_offset << 3 ..]); const entry = PackedUser.fromBytes(self.users[member_offset << 3 ..]);
@ -647,6 +647,11 @@ fn groupsSection(
}; };
} }
pub fn userGids(self: *const DB, offset: u64) compress.DeltaDecompressionIterator {
var vit = compress.VarintSliceIteratorMust(self.additional_gids[offset..]);
return compress.deltaDecompressionIterator(&vit);
}
// creates a bdz index using packed_mphf. // creates a bdz index using packed_mphf.
// hash = bdz_search(packed_mphf, keys[i]); // hash = bdz_search(packed_mphf, keys[i]);
// result[hash] = idx2offset[i]; // result[hash] = idx2offset[i];
@ -830,7 +835,7 @@ test "additionalGids" {
continue; continue;
} }
var vit = try compress.VarintSliceIterator(additional_gids.blob[offset..]); var vit = try compress.VarintSliceIterator(additional_gids.blob[offset..]);
var it = compress.DeltaDecompressionIterator(&vit); var it = compress.deltaDecompressionIterator(&vit);
try testing.expectEqual(it.remaining(), groups.len); try testing.expectEqual(it.remaining(), groups.len);
var i: u64 = 0; var i: u64 = 0;
const corpusGids = corpus.groups.items(.gid); const corpusGids = corpus.groups.items(.gid);

View File

@ -145,12 +145,12 @@ pub fn VarintSliceIteratorMust(arr: []const u8) varintSliceIterator {
}; };
} }
const deltaDecompressionIterator = struct { pub const DeltaDecompressionIterator = struct {
vit: *varintSliceIterator, vit: *varintSliceIterator,
prev: u64, prev: u64,
add_to_prev: u1, add_to_prev: u1,
pub fn next(self: *deltaDecompressionIterator) error{Overflow}!?u64 { pub fn next(self: *DeltaDecompressionIterator) error{Overflow}!?u64 {
const current = try self.vit.next(); const current = try self.vit.next();
if (current == null) return null; if (current == null) return null;
@ -163,19 +163,19 @@ const deltaDecompressionIterator = struct {
// returns the number of remaining items. If called before the first // returns the number of remaining items. If called before the first
// next(), returns the length of the slice. // next(), returns the length of the slice.
pub fn remaining(self: *const deltaDecompressionIterator) usize { pub fn remaining(self: *const DeltaDecompressionIterator) usize {
return self.vit.remaining; return self.vit.remaining;
} }
pub fn nextMust(self: *deltaDecompressionIterator) ?u64 { pub fn nextMust(self: *DeltaDecompressionIterator) ?u64 {
return self.next() catch |err| switch (err) { return self.next() catch |err| switch (err) {
error.Overflow => unreachable, error.Overflow => unreachable,
}; };
} }
}; };
pub fn DeltaDecompressionIterator(vit: *varintSliceIterator) deltaDecompressionIterator { pub fn deltaDecompressionIterator(vit: *varintSliceIterator) DeltaDecompressionIterator {
return deltaDecompressionIterator{ return DeltaDecompressionIterator{
.vit = vit, .vit = vit,
.prev = 0, .prev = 0,
.add_to_prev = 0, .add_to_prev = 0,
@ -304,7 +304,7 @@ test "delta decompression with an iterator" {
try appendUvarint(&buf, x); try appendUvarint(&buf, x);
var vit = try VarintSliceIterator(buf.items); var vit = try VarintSliceIterator(buf.items);
var it = DeltaDecompressionIterator(&vit); var it = deltaDecompressionIterator(&vit);
var i: usize = 0; var i: usize = 0;
try testing.expectEqual(it.remaining(), uvarint_tests.len); try testing.expectEqual(it.remaining(), uvarint_tests.len);
while (try it.next()) |got| : (i += 1) { while (try it.next()) |got| : (i += 1) {

View File

@ -3,9 +3,11 @@ const os = std.os;
const fmt = std.fmt; const fmt = std.fmt;
const log = std.log; const log = std.log;
const mem = std.mem; const mem = std.mem;
const process = std.process; const math = std.math;
const heap = std.heap;
const once = std.once; const once = std.once;
const Mutex = std.Thread.Mutex; const Mutex = std.Thread.Mutex;
const Allocator = mem.Allocator;
const DB = @import("DB.zig"); const DB = @import("DB.zig");
const File = @import("File.zig"); const File = @import("File.zig");
@ -39,6 +41,9 @@ const State = struct {
getgrent_iterator_mu: Mutex = Mutex{}, getgrent_iterator_mu: Mutex = Mutex{},
getgrent_iterator: ?PackedGroup.Iterator = null, getgrent_iterator: ?PackedGroup.Iterator = null,
// allocator for
initgroups_dyn_allocator: Allocator,
}; };
// global_state is initialized on first call to an nss function // global_state is initialized on first call to an nss function
@ -98,6 +103,7 @@ fn init() void {
global_state = State{ global_state = State{
.file = file, .file = file,
.omit_members = omit_members, .omit_members = omit_members,
.initgroups_dyn_allocator = heap.raw_c_allocator,
}; };
} }
@ -329,24 +335,56 @@ export fn _nss_turbo_initgroups_dyn(
_: u32, _: u32,
start: *c_long, start: *c_long,
size: *c_long, size: *c_long,
groups: [*]u32, groupsp: *[*]u32,
limit: c_long, limit: c_long,
errnop: *c_int, errnop: *c_int,
) c.enum_nss_status { ) c.enum_nss_status {
const db = getDBErrno(errnop) orelse return c.NSS_STATUS_UNAVAIL; const state = getStateErrno(errnop) orelse return c.NSS_STATUS_UNAVAIL;
const db = state.file.db;
const user = db.getUser(mem.sliceTo(user_name, 0)) orelse { const user = db.getUser(mem.sliceTo(user_name, 0)) orelse {
errnop.* = @enumToInt(os.E.NOENT); errnop.* = @enumToInt(os.E.NOENT);
return c.NSS_STATUS_NOTFOUND; return c.NSS_STATUS_NOTFOUND;
}; };
_ = user; var gids = db.userGids(user.additional_gids_offset);
_ = start; if (size.* < gids.remaining()) {
_ = size; const oldsize = @intCast(usize, size.*);
_ = groups; const newsize = if (limit <= 0)
_ = limit; oldsize + gids.remaining()
else
math.min(@intCast(usize, limit), oldsize + gids.remaining());
return c.NSS_STATUS_SUCCESS; var buf = groupsp.*[0..oldsize];
const new_groups = state.initgroups_dyn_allocator.realloc(buf, newsize);
if (new_groups) |newgroups| {
groupsp.* = newgroups.ptr;
size.* = @intCast(c_long, newsize);
} else |err| switch (err) {
error.OutOfMemory => {
errnop.* = @enumToInt(os.E.NOMEM);
return c.NSS_STATUS_TRYAGAIN;
},
}
}
// I was not able to understand the expected behavior of limit. In glibc
// compat-initgroups.c limit is used to malloc the buffer. Something like
// this is all over the place:
// if (limit > 0 && *size == limit) /* We reached the maximum. */
// return;
// Our implementation will thus limit the number of *added* entries.
var added: usize = 0;
while (gids.nextMust()) |gid| {
if (limit > 0 and added == limit) break;
added += 1;
groupsp.*[@intCast(usize, start.*)] = @intCast(u32, gid);
start.* += 1;
}
return if (added > 0) c.NSS_STATUS_SUCCESS else c.NSS_STATUS_NOTFOUND;
} }
fn getState() ?State { fn getState() ?State {
@ -459,6 +497,39 @@ test "getgrgid_r and getgrnam_r" {
try testing.expectEqual(@enumToInt(os.E.NOENT), @intCast(u16, errno)); try testing.expectEqual(@enumToInt(os.E.NOENT), @intCast(u16, errno));
} }
test "initgroups_dyn" {
var tf = try File.TestDB.init(testing.allocator);
defer tf.deinit();
const turbonss_db_path_old = turbonss_db_path;
turbonss_db_path = tf.path;
defer {
turbonss_db_path = turbonss_db_path_old;
}
const allocator = testing.allocator;
global_state.?.initgroups_dyn_allocator = allocator;
var size: c_long = 3; // size of the gids array is this
var groups = try allocator.alloc(u32, @intCast(usize, size)); // buffer too small
defer allocator.free(groups);
var errno: c_int = 42; // canary
groups[0] = 42; // canary
var start: c_long = 1; // canary is there
const status = _nss_turbo_initgroups_dyn(
"vidmantas",
0, // gid, ignored
&start,
&size,
&groups.ptr,
3, // limit: besides canary accept 2 groups
&errno,
);
try testing.expectEqual(c.NSS_STATUS_SUCCESS, status);
try testing.expectEqual(@as(c_int, 42), errno);
try testing.expectEqual(@as(u32, 42), groups[0]);
try testing.expectEqual(@as(u32, 128), groups[1]);
}
fn testVidmantasGroup(g: CGroup) !void { fn testVidmantasGroup(g: CGroup) !void {
try testing.expectEqual(@as(u32, 128), g.gid); try testing.expectEqual(@as(u32, 128), g.gid);
try testing.expectEqualStrings("vidmantas", mem.sliceTo(g.name, 0)); try testing.expectEqualStrings("vidmantas", mem.sliceTo(g.name, 0));