This commit is contained in:
2023-08-21 13:50:52 +03:00
parent 37e24524c2
commit 993a29d2f8
17 changed files with 59 additions and 63 deletions

View File

@@ -120,7 +120,7 @@ pub fn init(
var result = try name2user.getOrPut(name);
if (result.found_existing)
return error.Duplicate;
result.value_ptr.* = @intCast(u32, i);
result.value_ptr.* = @intCast(i);
}
// verify whatever comes to cmph are unique: group names
var name2group = StringHashMap(u32).init(allocator);
@@ -128,7 +128,7 @@ pub fn init(
var result = try name2group.getOrPut(name);
if (result.found_existing)
return error.Duplicate;
result.value_ptr.* = @intCast(u32, i);
result.value_ptr.* = @intCast(i);
}
// verify whatever comes to cmph are unique: gids
@@ -178,7 +178,7 @@ pub fn init(
// continue;
members.len += 1;
members[members.len - 1] = user_idx;
try user2groups[user_idx].append(allocator, @intCast(u32, i));
try user2groups[user_idx].append(allocator, @intCast(i));
} else {
return err.returnf(
"user '{s}' not found, member of group '{s}'",

View File

@@ -270,8 +270,8 @@ pub fn fromBytes(buf: []align(8) const u8) InvalidHeader!DB {
const end = (start_block + @field(lengths, field.name)) << section_length_bits;
const start = start_block << section_length_bits;
const slice_type = meta.Child(field.type);
const value = mem.bytesAsSlice(slice_type, @alignCast(8, buf[start..end]));
@field(result, field.name) = value;
const value = mem.bytesAsSlice(slice_type, buf[start..end]);
@field(result, field.name) = @alignCast(value);
}
return result;
@@ -293,9 +293,7 @@ pub fn packCGroupNoMembers(group: *const PackedGroup, buf: []u8) error{BufferToo
return CGroup{
.gr_name = buf[name_start .. name_start + name.len :0].ptr,
.gr_gid = group.gid(),
// TODO: how can we use bytesAsSlice in a way that does not need
// this ugly ptrCast?
.gr_mem = @ptrCast([*:null]align(1) const ?[*:0]const u8, member_ptrs.ptr),
.gr_mem = @ptrCast(member_ptrs.ptr),
};
}
@@ -342,7 +340,7 @@ pub fn packCGroup(self: *const DB, group: *const PackedGroup, buf: []u8) error{
var i: usize = 0;
while (try it.next()) |member_offset| : (i += 1) {
const entry = try PackedUser.fromBytes(@alignCast(8, self.users[member_offset << 3 ..]));
const entry = try PackedUser.fromBytes(@alignCast(self.users[member_offset << 3 ..]));
const start = buf_offset;
const name = entry.user.name();
if (buf_offset + name.len + 1 > buf.len)
@@ -364,7 +362,7 @@ pub fn packCGroup(self: *const DB, group: *const PackedGroup, buf: []u8) error{
.gr_gid = group.gid(),
// TODO: how can we use bytesAsSlice in a way that does not need
// this ugly ptrCast?
.gr_mem = @ptrCast([*:null]align(1) const ?[*:0]const u8, member_ptrs.ptr),
.gr_mem = @ptrCast(member_ptrs.ptr),
};
}
@@ -373,7 +371,7 @@ pub fn getGroupByName(self: *const DB, name: []const u8) error{Overflow}!?Packed
if (idx >= self.header.num_groups)
return null;
const offset = self.idx_groupname2group[idx];
const group = (try PackedGroup.fromBytes(@alignCast(8, self.groups[offset << 3 ..]))).group;
const group = (try PackedGroup.fromBytes(@alignCast(self.groups[offset << 3 ..]))).group;
if (!mem.eql(u8, name, group.name()))
return null;
return group;
@@ -384,7 +382,7 @@ pub fn getGroupByGid(self: *const DB, gid: u32) error{Overflow}!?PackedGroup {
if (idx >= self.header.num_groups)
return null;
const offset = self.idx_gid2group[idx];
const group = (try PackedGroup.fromBytes(@alignCast(8, self.groups[offset << 3 ..]))).group;
const group = (try PackedGroup.fromBytes(@alignCast(self.groups[offset << 3 ..]))).group;
if (gid != group.gid())
return null;
return group;
@@ -468,7 +466,7 @@ pub fn getUserByName(self: *const DB, name: []const u8) error{Overflow}!?PackedU
if (idx >= self.header.num_users)
return null;
const offset = self.idx_name2user[idx];
const user = (try PackedUser.fromBytes(@alignCast(8, self.users[offset << 3 ..]))).user;
const user = (try PackedUser.fromBytes(@alignCast(self.users[offset << 3 ..]))).user;
if (!mem.eql(u8, name, user.name())) return null;
return user;
}
@@ -484,7 +482,7 @@ pub fn getUserByUid(self: *const DB, uid: u32) error{Overflow}!?PackedUser {
if (idx >= self.header.num_users)
return null;
const offset = self.idx_uid2user[idx];
const user = (try PackedUser.fromBytes(@alignCast(8, self.users[offset << 3 ..]))).user;
const user = (try PackedUser.fromBytes(@alignCast(self.users[offset << 3 ..]))).user;
if (uid != user.uid()) return null;
return user;
}
@@ -588,7 +586,7 @@ fn usersSection(
const user = users.get(i);
const user_offset = math.cast(u35, blob.items.len) orelse return error.TooMany;
assert(user_offset & 7 == 0);
idx2offset[i] = @truncate(u32, user_offset >> 3);
idx2offset[i] = @as(u32, @truncate(user_offset >> 3));
try PackedUser.packTo(
&blob,
user,
@@ -598,7 +596,7 @@ fn usersSection(
try blob.appendNTimes(0, mem.alignForward(usize, blob.items.len, 8) - blob.items.len);
}
return UsersSection{
.len = @intCast(u32, users.len),
.len = @intCast(users.len),
.idx2offset = idx2offset,
.blob = try blob.toOwnedSlice(),
};
@@ -677,9 +675,9 @@ fn groupsSection(
members_offset,
) |gid, name, *idx2offset_i, members_offset_i| {
// TODO: this may be inefficient; it's calling `.slice()` on every iteration
const group_offset = @intCast(u32, blob.items.len);
const group_offset = blob.items.len;
assert(group_offset & 7 == 0);
idx2offset_i.* = @truncate(u32, group_offset >> 3);
idx2offset_i.* = @truncate(group_offset >> 3);
const group_stored = GroupStored{
.gid = gid,
.name = name,
@@ -690,7 +688,7 @@ fn groupsSection(
}
return GroupsSection{
.len = @intCast(u32, corpus.groups.len),
.len = @intCast(corpus.groups.len),
.idx2offset = idx2offset,
.blob = try blob.toOwnedSlice(),
};
@@ -733,9 +731,9 @@ pub fn nblocks_n(comptime T: type, nbytes: usize) T {
u64 => u70,
else => @compileError("unsupported type " ++ @typeName(T)),
};
const upper = @intCast(B, mem.alignForward(usize, nbytes, section_length));
const upper = @as(B, @intCast(mem.alignForward(usize, nbytes, section_length)));
assert(upper & (section_length - 1) == 0);
return @truncate(T, upper >> section_length_bits);
return @truncate(upper >> section_length_bits);
}
// nblocks returns how many blocks a particular slice will take.

View File

@@ -20,7 +20,7 @@ pub fn open(fname: []const u8) Error!File {
errdefer if (fd_open) os.close(fd);
const st = try os.fstat(fd);
const size = @intCast(usize, st.size);
const size: usize = @intCast(st.size);
const ptr = try os.mmap(null, size, os.PROT.READ, os.MAP.SHARED, fd, 0);
errdefer os.munmap(ptr);

View File

@@ -27,7 +27,7 @@ pub fn init(
const buf_len = @sizeOf([]const u8) * members.len + name.len + blk: {
var sum: usize = 0;
for (members) |member| sum += member.len;
break :blk @intCast(u32, sum);
break :blk @as(u32, @intCast(sum));
};
var buf = try allocator.alloc(u8, buf_len);
errdefer allocator.free(buf);

View File

@@ -70,7 +70,7 @@ pub const Iterator = struct {
pub fn next(it: *Iterator) error{Overflow}!?PackedGroup {
if (it.idx == it.total) return null;
const entry = try fromBytes(@alignCast(8, it.section[it.next_start..]));
const entry = try fromBytes(@alignCast(it.section[it.next_start..]));
it.idx += 1;
it.next_start += entry.end;
it.advanced_by = entry.end;

View File

@@ -85,7 +85,7 @@ pub const Entry = struct {
pub fn fromBytes(blob: []align(8) const u8) error{Overflow}!Entry {
const start_var_payload = @bitSizeOf(Inner) / 8;
const inner = @ptrCast(*align(8) const Inner, blob[0..start_var_payload]);
const inner = @as(*align(8) const Inner, @ptrCast(blob[0..start_var_payload]));
const end_strings = start_var_payload + inner.stringLength();
const gids_offset = try compress.uvarint(blob[end_strings..]);
const end_payload = end_strings + gids_offset.bytes_read;
@@ -110,7 +110,7 @@ pub const Iterator = struct {
pub fn next(it: *Iterator) error{Overflow}!?PackedUser {
if (it.idx == it.total) return null;
const entry = try fromBytes(@alignCast(8, it.section[it.next_start..]));
const entry = try fromBytes(@alignCast(it.section[it.next_start..]));
it.idx += 1;
it.next_start += entry.end;
it.advanced_by = entry.end;

View File

@@ -21,10 +21,10 @@ pub fn search_u32(packed_mphf: []const u8, key: u32) u32 {
pub fn unzero(x: u32) [5]u8 {
const bit: u8 = 0b10000000;
var buf: [u32len]u8 = undefined;
buf[0] = @truncate(u8, (x & 0b11111110_00000000_00000000_00000000) >> 25) | bit;
buf[1] = @truncate(u8, (x & 0b00000001_11111100_00000000_00000000) >> 18) | bit;
buf[2] = @truncate(u8, (x & 0b00000000_00000011_11110000_00000000) >> 12) | bit;
buf[3] = @truncate(u8, (x & 0b00000000_00000000_00001111_11000000) >> 6) | bit;
buf[4] = @truncate(u8, (x & 0b00000000_00000000_00000000_00111111) >> 0) | bit;
buf[0] = @truncate(((x & 0b11111110_00000000_00000000_00000000) >> 25) | bit);
buf[1] = @truncate(((x & 0b00000001_11111100_00000000_00000000) >> 18) | bit);
buf[2] = @truncate(((x & 0b00000000_00000011_11110000_00000000) >> 12) | bit);
buf[3] = @truncate(((x & 0b00000000_00000000_00001111_11000000) >> 6) | bit);
buf[4] = @truncate(((x & 0b00000000_00000000_00000000_00111111) >> 0) | bit);
return buf;
}

View File

@@ -25,7 +25,7 @@ extern fn cmph_destroy(mphf: [*]u8) void;
// minus first 4 bytes") for further storage. The slice must be freed by the
// caller.
pub fn pack(allocator: Allocator, input: [][*:0]const u8) error{OutOfMemory}![]align(8) const u8 {
const input_len = @intCast(c_uint, input.len);
const input_len: c_uint = @intCast(input.len);
var source = cmph_io_vector_adapter(input.ptr, input_len);
defer cmph_io_vector_adapter_destroy(source);
var config = cmph_config_new(source) orelse return error.OutOfMemory;
@@ -52,7 +52,7 @@ pub fn packU32(allocator: Allocator, numbers: []const u32) error{OutOfMemory}![]
var keys2 = try allocator.alignedAlloc([*:0]const u8, 8, numbers.len);
defer allocator.free(keys2);
for (keys, keys2) |*key_i, *key2_i|
key2_i.* = @ptrCast([*:0]const u8, key_i);
key2_i.* = @ptrCast(key_i);
return pack(allocator, keys2);
}

View File

@@ -86,11 +86,11 @@ pub fn putUvarint(buf: []u8, x: u64) usize {
var mutX = x;
while (mutX >= 0x80) {
buf[i] = @truncate(u8, mutX) | 0x80;
buf[i] = @as(u8, @truncate(mutX)) | 0x80;
mutX >>= 7;
i += 1;
}
buf[i] = @truncate(u8, mutX);
buf[i] = @truncate(mutX);
return i + 1;
}

View File

@@ -15,7 +15,6 @@
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
const std = @import("std");
const cstr = std.cstr;
pub const Flag = struct {
name: [*:0]const u8,
@@ -57,14 +56,14 @@ pub fn ParseResult(comptime flags: []const Flag) type {
pub fn boolFlag(self: Self, flag_name: [*:0]const u8) bool {
for (self.flag_data) |flag_data| {
if (cstr.cmp(flag_data.name, flag_name) == 0) return flag_data.value.boolean;
if (std.mem.orderZ(u8, flag_data.name, flag_name) == .eq) return flag_data.value.boolean;
}
unreachable; // Invalid flag_name
}
pub fn argFlag(self: Self, flag_name: [*:0]const u8) ?[:0]const u8 {
for (self.flag_data) |flag_data| {
if (cstr.cmp(flag_data.name, flag_name) == 0) {
if (std.mem.orderZ(u8, flag_data.name, flag_name) == .eq) {
return std.mem.span(flag_data.value.arg);
}
}
@@ -80,7 +79,7 @@ pub fn parse(args: []const [*:0]const u8, comptime flags: []const Flag) !ParseRe
while (arg_idx < args.len) : (arg_idx += 1) {
var parsed_flag = false;
inline for (flags, &ret.flag_data) |flag, *flag_data_i| {
if (cstr.cmp(flag.name, args[arg_idx]) == 0) {
if (std.mem.orderZ(u8, flag.name, args[arg_idx]) == .eq) {
switch (flag.kind) {
.boolean => flag_data_i.*.value.boolean = true,
.arg => {

View File

@@ -499,7 +499,7 @@ fn initgroups_dyn(
if (limit > 0 and size.* == limit)
return c.NSS_STATUS_SUCCESS;
const oldsize = @intCast(usize, size.*);
const oldsize: u32 = @intCast(size.*);
const newsize_want = blk: {
// double the oldsize until it is above or equal 'remaining'
var res = oldsize;
@@ -510,7 +510,7 @@ fn initgroups_dyn(
const newsize: usize = if (limit <= 0)
newsize_want
else
@min(@intCast(usize, limit), newsize_want);
@min(@as(usize, @intCast(limit)), newsize_want);
var buf = groupsp.*[0..oldsize];
const new_groups = state.initgroups_dyn_allocator.realloc(
@@ -520,7 +520,7 @@ fn initgroups_dyn(
if (new_groups) |newgroups| {
groupsp.* = newgroups.ptr;
size.* = @intCast(c_long, newsize);
size.* = @intCast(newsize);
} else |err| switch (err) {
error.OutOfMemory => {
errnop.* = @intFromEnum(os.E.NOMEM);
@@ -530,7 +530,7 @@ fn initgroups_dyn(
}
any = true;
groupsp.*[@intCast(usize, start.*)] = @intCast(u32, gid);
groupsp.*[@intCast(start.*)] = @intCast(gid);
start.* += 1;
}
@@ -605,7 +605,7 @@ test "libnss getpwuid_r and getpwnam_r" {
c.NSS_STATUS_NOTFOUND,
getpwnam_r(&state.file.db, "does not exist", &passwd, &buf, buf.len, &errno),
);
try testing.expectEqual(@intFromEnum(os.E.NOENT), @intCast(u16, errno));
try testing.expectEqual(@intFromEnum(os.E.NOENT), @as(u16, @intCast(errno)));
passwd = undefined;
var small_buffer: [1]u8 = undefined;
@@ -613,7 +613,7 @@ test "libnss getpwuid_r and getpwnam_r" {
c.NSS_STATUS_TRYAGAIN,
getpwuid_r(&state.file.db, 0, &passwd, &small_buffer, small_buffer.len, &errno),
);
try testing.expectEqual(@intFromEnum(os.E.RANGE), @intCast(u16, errno));
try testing.expectEqual(@intFromEnum(os.E.RANGE), @as(u16, @intCast(errno)));
}
fn testVidmantas(u: CUser) !void {
@@ -657,7 +657,7 @@ test "libnss getgrgid_r and getgrnam_r" {
c.NSS_STATUS_NOTFOUND,
getgrnam_r(&state, "does not exist", &group, &buf, buf.len, &errno),
);
try testing.expectEqual(@intFromEnum(os.E.NOENT), @intCast(u16, errno));
try testing.expectEqual(@intFromEnum(os.E.NOENT), @as(u16, @intCast(errno)));
}
fn testSvcGroup(g: CGroup) !void {
@@ -683,7 +683,7 @@ test "libnss initgroups_dyn" {
};
var size: c_long = 3; // size of the gids array is this
var groups = try allocator.alloc(u32, @intCast(usize, size)); // buffer too small
var groups = try allocator.alloc(u32, @intCast(size)); // buffer too small
defer allocator.free(groups);
var errno: c_int = 42; // canary
groups[0] = 42; // canary

View File

@@ -57,7 +57,7 @@ pub const ShellWriter = struct {
) error{OutOfMemory}!ShellSections {
assert(shells.len <= max_shells);
var self = ShellSections{
.len = @intCast(u8, shells.len),
.len = @intCast(shells.len),
.index = BoundedArray(u16, max_shells).init(shells.len) catch unreachable,
.blob = BoundedArray(u8, (max_shells + 1) * max_shell_len).init(0) catch unreachable,
.shell2idx = StringHashMap(u8).init(allocator),
@@ -66,13 +66,13 @@ pub const ShellWriter = struct {
errdefer self.shell2idx.deinit();
for (shells.constSlice(), 0..) |shell, idx| {
const idx8 = @intCast(u8, idx);
const offset = @intCast(u16, self.blob.len);
const idx8: u8 = @intCast(idx);
const offset: u16 = @intCast(self.blob.len);
self.blob.appendSliceAssumeCapacity(shell);
try self.shell2idx.put(self.blob.constSlice()[offset..], idx8);
self.index.set(idx8, offset);
}
self.index.appendAssumeCapacity(@intCast(u8, self.blob.len));
self.index.appendAssumeCapacity(@intCast(self.blob.len));
return self;
}

View File

@@ -115,7 +115,7 @@ fn execute(
const info = Info{
.fname = db_file,
.size_file = splitInt(@intCast(u64, file_size_bytes)).constSlice(),
.size_file = splitInt(@as(u64, @intCast(file_size_bytes))).constSlice(),
.version = db.header.version,
.endian = @tagName(db.header.host.endian),
.ptr_size = db.header.host.ptr_size,

View File

@@ -194,7 +194,7 @@ fn printGroup(stdout: anytype, db: *const DB, g: *const PackedGroup) error{Overf
var line_writer = io.bufferedWriter(stdout);
var i: usize = 0;
while (try it.next()) |member_offset| : (i += 1) {
const puser = try PackedUser.fromBytes(@alignCast(8, db.users[member_offset << 3 ..]));
const puser = try PackedUser.fromBytes(@alignCast(db.users[member_offset << 3 ..]));
const name = puser.user.name();
if (i != 0)
_ = line_writer.write(",") catch return 3;

View File

@@ -131,7 +131,7 @@ fn dump_passwd(wr: anytype, num_users: u64) error{IO}!void {
const gecos = fmt.bufPrint(buf_gecos[0..], "User {d}", .{i}) catch unreachable;
const home = fmt.bufPrint(buf_home[0..], "/home/{s}", .{name}) catch unreachable;
const user = User{
.uid = @intCast(u32, i),
.uid = @intCast(i),
.gid = 1000000,
.name = name,
.gecos = gecos,