Skip to content

Commit b45387f

Browse files
committed
coff: write base relocations for the dynamic linker
This means we can request ASLR on by default as other COFF linkers do. Currently, we write the base relocations in bulk, however, given that there is a mechanism for padding in place in PE/COFF I believe there might be room for making it an incremental operation (write base relocation whenever we add/update a pointer that would require it).
1 parent 4ba0ad2 commit b45387f

File tree

2 files changed

+154
-8
lines changed

2 files changed

+154
-8
lines changed

src/link/Coff.zig

Lines changed: 142 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ text_section_index: ?u16 = null,
5050
got_section_index: ?u16 = null,
5151
rdata_section_index: ?u16 = null,
5252
data_section_index: ?u16 = null,
53+
reloc_section_index: ?u16 = null,
5354

5455
locals: std.ArrayListUnmanaged(coff.Symbol) = .{},
5556
globals: std.StringArrayHashMapUnmanaged(SymbolWithLoc) = .{},
@@ -98,11 +99,16 @@ atom_by_index_table: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
9899
/// with `Decl` `main`, and lives as long as that `Decl`.
99100
unnamed_const_atoms: UnnamedConstTable = .{},
100101

101-
/// A table of relocations indexed by the owning them `TextBlock`.
102-
/// Note that once we refactor `TextBlock`'s lifetime and ownership rules,
102+
/// A table of relocations indexed by the owning them `Atom`.
103+
/// Note that once we refactor `Atom`'s lifetime and ownership rules,
103104
/// this will be a table indexed by index into the list of Atoms.
104105
relocs: RelocTable = .{},
105106

107+
/// A table of base relocations indexed by the owning them `Atom`.
108+
/// Note that once we refactor `Atom`'s lifetime and ownership rules,
109+
/// this will be a table indexed by index into the list of Atoms.
110+
base_relocs: BaseRelocationTable = .{},
111+
106112
pub const Reloc = struct {
107113
@"type": enum {
108114
got,
@@ -117,6 +123,7 @@ pub const Reloc = struct {
117123
};
118124

119125
const RelocTable = std.AutoHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Reloc));
126+
const BaseRelocationTable = std.AutoHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(u32));
120127
const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*Atom));
121128

122129
const default_file_alignment: u16 = 0x200;
@@ -150,7 +157,17 @@ const Section = struct {
150157
free_list: std.ArrayListUnmanaged(*Atom) = .{},
151158
};
152159

153-
pub const PtrWidth = enum { p32, p64 };
160+
pub const PtrWidth = enum {
161+
p32,
162+
p64,
163+
164+
fn abiSize(pw: PtrWidth) u4 {
165+
return switch (pw) {
166+
.p32 => 4,
167+
.p64 => 8,
168+
};
169+
}
170+
};
154171
pub const SrcFn = void;
155172

156173
pub const Export = struct {
@@ -274,6 +291,14 @@ pub fn deinit(self: *Coff) void {
274291
}
275292
self.relocs.deinit(gpa);
276293
}
294+
295+
{
296+
var it = self.base_relocs.valueIterator();
297+
while (it.next()) |relocs| {
298+
relocs.deinit(gpa);
299+
}
300+
self.base_relocs.deinit(gpa);
301+
}
277302
}
278303

279304
fn populateMissingMetadata(self: *Coff) !void {
@@ -307,7 +332,7 @@ fn populateMissingMetadata(self: *Coff) !void {
307332

308333
if (self.got_section_index == null) {
309334
self.got_section_index = @intCast(u16, self.sections.slice().len);
310-
const file_size = @intCast(u32, self.base.options.symbol_count_hint);
335+
const file_size = @intCast(u32, self.base.options.symbol_count_hint) * self.ptr_width.abiSize();
311336
const off = self.findFreeSpace(file_size, self.page_size);
312337
log.debug("found .got free space 0x{x} to 0x{x}", .{ off, off + file_size });
313338
var header = coff.SectionHeader{
@@ -378,6 +403,31 @@ fn populateMissingMetadata(self: *Coff) !void {
378403
try self.sections.append(gpa, .{ .header = header });
379404
}
380405

406+
if (self.reloc_section_index == null) {
407+
self.reloc_section_index = @intCast(u16, self.sections.slice().len);
408+
const file_size = @intCast(u32, self.base.options.symbol_count_hint) * @sizeOf(coff.BaseRelocation);
409+
const off = self.findFreeSpace(file_size, self.page_size);
410+
log.debug("found .reloc free space 0x{x} to 0x{x}", .{ off, off + file_size });
411+
var header = coff.SectionHeader{
412+
.name = undefined,
413+
.virtual_size = file_size,
414+
.virtual_address = off,
415+
.size_of_raw_data = file_size,
416+
.pointer_to_raw_data = off,
417+
.pointer_to_relocations = 0,
418+
.pointer_to_linenumbers = 0,
419+
.number_of_relocations = 0,
420+
.number_of_linenumbers = 0,
421+
.flags = .{
422+
.CNT_INITIALIZED_DATA = 1,
423+
.MEM_PURGEABLE = 1,
424+
.MEM_READ = 1,
425+
},
426+
};
427+
try self.setSectionName(&header, ".reloc");
428+
try self.sections.append(gpa, .{ .header = header });
429+
}
430+
381431
if (self.strtab_offset == null) {
382432
try self.strtab.buffer.append(gpa, 0);
383433
self.strtab_offset = self.findFreeSpace(@intCast(u32, self.strtab.len()), 1);
@@ -605,6 +655,14 @@ fn createGotAtom(self: *Coff, target: SymbolWithLoc) !*Atom {
605655
.prev_vaddr = sym.value,
606656
});
607657

658+
const target_sym = self.getSymbol(target);
659+
switch (target_sym.section_number) {
660+
.UNDEFINED => @panic("TODO generate a binding for undefined GOT target"),
661+
.ABSOLUTE => {},
662+
.DEBUG => unreachable, // not possible
663+
else => try atom.addBaseRelocation(self, 0),
664+
}
665+
608666
return atom;
609667
}
610668

@@ -1179,6 +1237,7 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
11791237
try self.resolveRelocs(atom.*);
11801238
}
11811239
}
1240+
try self.writeBaseRelocations();
11821241

11831242
if (self.getEntryPoint()) |entry_sym_loc| {
11841243
self.entry_addr = self.getSymbol(entry_sym_loc).value;
@@ -1216,6 +1275,83 @@ pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !v
12161275
log.debug("TODO implement updateDeclLineNumber", .{});
12171276
}
12181277

1278+
/// TODO: note if we need to rewrite base relocations by dirtying any of the entries in the global table
1279+
/// TODO: note that .ABSOLUTE is used as padding within each block; we could use this fact to do
1280+
/// incremental updates and writes into the table instead of doing it all at once
1281+
fn writeBaseRelocations(self: *Coff) !void {
1282+
const gpa = self.base.allocator;
1283+
1284+
var pages = std.AutoHashMap(u32, std.ArrayList(coff.BaseRelocation)).init(gpa);
1285+
defer {
1286+
var it = pages.valueIterator();
1287+
while (it.next()) |inner| {
1288+
inner.deinit();
1289+
}
1290+
pages.deinit();
1291+
}
1292+
1293+
var it = self.base_relocs.iterator();
1294+
while (it.next()) |entry| {
1295+
const atom = entry.key_ptr.*;
1296+
const offsets = entry.value_ptr.*;
1297+
1298+
for (offsets.items) |offset| {
1299+
const sym = atom.getSymbol(self);
1300+
const rva = sym.value + offset;
1301+
const page = mem.alignBackwardGeneric(u32, rva, self.page_size);
1302+
const gop = try pages.getOrPut(page);
1303+
if (!gop.found_existing) {
1304+
gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa);
1305+
}
1306+
try gop.value_ptr.append(.{
1307+
.offset = @intCast(u12, rva - page),
1308+
.@"type" = .DIR64,
1309+
});
1310+
}
1311+
}
1312+
1313+
var buffer = std.ArrayList(u8).init(gpa);
1314+
defer buffer.deinit();
1315+
1316+
var pages_it = pages.iterator();
1317+
while (pages_it.next()) |entry| {
1318+
// Pad to required 4byte alignment
1319+
if (!mem.isAlignedGeneric(
1320+
usize,
1321+
entry.value_ptr.items.len * @sizeOf(coff.BaseRelocation),
1322+
@sizeOf(u32),
1323+
)) {
1324+
try entry.value_ptr.append(.{
1325+
.offset = 0,
1326+
.@"type" = .ABSOLUTE,
1327+
});
1328+
}
1329+
1330+
const block_size = @intCast(
1331+
u32,
1332+
entry.value_ptr.items.len * @sizeOf(coff.BaseRelocation) + @sizeOf(coff.BaseRelocationDirectoryEntry),
1333+
);
1334+
try buffer.ensureUnusedCapacity(block_size);
1335+
buffer.appendSliceAssumeCapacity(mem.asBytes(&coff.BaseRelocationDirectoryEntry{
1336+
.page_rva = entry.key_ptr.*,
1337+
.block_size = block_size,
1338+
}));
1339+
buffer.appendSliceAssumeCapacity(mem.sliceAsBytes(entry.value_ptr.items));
1340+
}
1341+
1342+
const header = &self.sections.items(.header)[self.reloc_section_index.?];
1343+
const sect_capacity = self.allocatedSize(header.pointer_to_raw_data);
1344+
const needed_size = @intCast(u32, buffer.items.len);
1345+
assert(needed_size < sect_capacity); // TODO expand .reloc section
1346+
1347+
try self.base.file.?.pwriteAll(buffer.items, header.pointer_to_raw_data);
1348+
1349+
self.data_directories[@enumToInt(coff.DirectoryEntry.BASERELOC)] = .{
1350+
.virtual_address = header.virtual_address,
1351+
.size = needed_size,
1352+
};
1353+
}
1354+
12191355
fn writeStrtab(self: *Coff) !void {
12201356
const allocated_size = self.allocatedSize(self.strtab_offset.?);
12211357
const needed_size = @intCast(u32, self.strtab.len());
@@ -1277,8 +1413,8 @@ fn writeHeader(self: *Coff) !void {
12771413
writer.writeAll(mem.asBytes(&coff_header)) catch unreachable;
12781414

12791415
const dll_flags: coff.DllFlags = .{
1280-
.HIGH_ENTROPY_VA = 0, //@boolToInt(self.base.options.pie),
1281-
.DYNAMIC_BASE = 0,
1416+
.HIGH_ENTROPY_VA = 1, // TODO do we want to permit non-PIE builds at all?
1417+
.DYNAMIC_BASE = 1,
12821418
.TERMINAL_SERVER_AWARE = 1, // We are not a legacy app
12831419
.NX_COMPAT = 1, // We are compatible with Data Execution Prevention
12841420
};

src/link/Coff/Atom.zig

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ const Atom = @This();
22

33
const std = @import("std");
44
const coff = std.coff;
5+
const log = std.log.scoped(.link);
56

67
const Allocator = std.mem.Allocator;
78

@@ -100,11 +101,20 @@ pub fn freeListEligible(self: Atom, coff_file: *const Coff) bool {
100101

101102
pub fn addRelocation(self: *Atom, coff_file: *Coff, reloc: Reloc) !void {
102103
const gpa = coff_file.base.allocator;
103-
// TODO causes a segfault on Windows
104-
// log.debug("adding reloc of type {s} to target %{d}", .{ @tagName(reloc.@"type"), reloc.target.sym_index });
104+
log.debug(" (adding reloc of type {s} to target %{d})", .{ @tagName(reloc.@"type"), reloc.target.sym_index });
105105
const gop = try coff_file.relocs.getOrPut(gpa, self);
106106
if (!gop.found_existing) {
107107
gop.value_ptr.* = .{};
108108
}
109109
try gop.value_ptr.append(gpa, reloc);
110110
}
111+
112+
pub fn addBaseRelocation(self: *Atom, coff_file: *Coff, offset: u32) !void {
113+
const gpa = coff_file.base.allocator;
114+
log.debug(" (adding base relocation at offset 0x{x} in %{d})", .{ offset, self.sym_index });
115+
const gop = try coff_file.base_relocs.getOrPut(gpa, self);
116+
if (!gop.found_existing) {
117+
gop.value_ptr.* = .{};
118+
}
119+
try gop.value_ptr.append(gpa, offset);
120+
}

0 commit comments

Comments
 (0)