x86/vdso: Error out if the vDSO isn't a valid DSO
[cascardo/linux.git] / arch / x86 / entry / vdso / vdso2c.h
1 /*
2  * This file is included twice from vdso2c.c.  It generates code for 32-bit
3  * and 64-bit vDSOs.  We need both for 64-bit builds, since 32-bit vDSOs
4  * are built for 32-bit userspace.
5  */
6
7 static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
8                          void *stripped_addr, size_t stripped_len,
9                          FILE *outfile, const char *name)
10 {
11         int found_load = 0;
12         unsigned long load_size = -1;  /* Work around bogus warning */
13         unsigned long mapping_size;
14         ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
15         int i;
16         unsigned long j;
17         ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
18                 *alt_sec = NULL;
19         ELF(Dyn) *dyn = 0, *dyn_end = 0;
20         const char *secstrings;
21         INT_BITS syms[NSYMS] = {};
22
23         ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff));
24
25         if (hdr->e_type != ET_DYN)
26                 fail("input is not a shared object\n");
27
28         /* Walk the segment table. */
29         for (i = 0; i < GET_LE(&hdr->e_phnum); i++) {
30                 if (GET_LE(&pt[i].p_type) == PT_LOAD) {
31                         if (found_load)
32                                 fail("multiple PT_LOAD segs\n");
33
34                         if (GET_LE(&pt[i].p_offset) != 0 ||
35                             GET_LE(&pt[i].p_vaddr) != 0)
36                                 fail("PT_LOAD in wrong place\n");
37
38                         if (GET_LE(&pt[i].p_memsz) != GET_LE(&pt[i].p_filesz))
39                                 fail("cannot handle memsz != filesz\n");
40
41                         load_size = GET_LE(&pt[i].p_memsz);
42                         found_load = 1;
43                 } else if (GET_LE(&pt[i].p_type) == PT_DYNAMIC) {
44                         dyn = raw_addr + GET_LE(&pt[i].p_offset);
45                         dyn_end = raw_addr + GET_LE(&pt[i].p_offset) +
46                                 GET_LE(&pt[i].p_memsz);
47                 }
48         }
49         if (!found_load)
50                 fail("no PT_LOAD seg\n");
51
52         if (stripped_len < load_size)
53                 fail("stripped input is too short\n");
54
55         if (!dyn)
56                 fail("input has no PT_DYNAMIC section -- your toolchain is buggy\n");
57
58         /* Walk the dynamic table */
59         for (i = 0; dyn + i < dyn_end &&
60                      GET_LE(&dyn[i].d_tag) != DT_NULL; i++) {
61                 typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag);
62                 if (tag == DT_REL || tag == DT_RELSZ || tag == DT_RELA ||
63                     tag == DT_RELENT || tag == DT_TEXTREL)
64                         fail("vdso image contains dynamic relocations\n");
65         }
66
67         /* Walk the section table */
68         secstrings_hdr = raw_addr + GET_LE(&hdr->e_shoff) +
69                 GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx);
70         secstrings = raw_addr + GET_LE(&secstrings_hdr->sh_offset);
71         for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
72                 ELF(Shdr) *sh = raw_addr + GET_LE(&hdr->e_shoff) +
73                         GET_LE(&hdr->e_shentsize) * i;
74                 if (GET_LE(&sh->sh_type) == SHT_SYMTAB)
75                         symtab_hdr = sh;
76
77                 if (!strcmp(secstrings + GET_LE(&sh->sh_name),
78                             ".altinstructions"))
79                         alt_sec = sh;
80         }
81
82         if (!symtab_hdr)
83                 fail("no symbol table\n");
84
85         strtab_hdr = raw_addr + GET_LE(&hdr->e_shoff) +
86                 GET_LE(&hdr->e_shentsize) * GET_LE(&symtab_hdr->sh_link);
87
88         /* Walk the symbol table */
89         for (i = 0;
90              i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize);
91              i++) {
92                 int k;
93                 ELF(Sym) *sym = raw_addr + GET_LE(&symtab_hdr->sh_offset) +
94                         GET_LE(&symtab_hdr->sh_entsize) * i;
95                 const char *name = raw_addr + GET_LE(&strtab_hdr->sh_offset) +
96                         GET_LE(&sym->st_name);
97
98                 for (k = 0; k < NSYMS; k++) {
99                         if (!strcmp(name, required_syms[k].name)) {
100                                 if (syms[k]) {
101                                         fail("duplicate symbol %s\n",
102                                              required_syms[k].name);
103                                 }
104
105                                 /*
106                                  * Careful: we use negative addresses, but
107                                  * st_value is unsigned, so we rely
108                                  * on syms[k] being a signed type of the
109                                  * correct width.
110                                  */
111                                 syms[k] = GET_LE(&sym->st_value);
112                         }
113                 }
114         }
115
116         /* Validate mapping addresses. */
117         for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) {
118                 INT_BITS symval = syms[special_pages[i]];
119
120                 if (!symval)
121                         continue;  /* The mapping isn't used; ignore it. */
122
123                 if (symval % 4096)
124                         fail("%s must be a multiple of 4096\n",
125                              required_syms[i].name);
126                 if (symval + 4096 < syms[sym_vvar_start])
127                         fail("%s underruns vvar_start\n",
128                              required_syms[i].name);
129                 if (symval + 4096 > 0)
130                         fail("%s is on the wrong side of the vdso text\n",
131                              required_syms[i].name);
132         }
133         if (syms[sym_vvar_start] % 4096)
134                 fail("vvar_begin must be a multiple of 4096\n");
135
136         if (!name) {
137                 fwrite(stripped_addr, stripped_len, 1, outfile);
138                 return;
139         }
140
141         mapping_size = (stripped_len + 4095) / 4096 * 4096;
142
143         fprintf(outfile, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n");
144         fprintf(outfile, "#include <linux/linkage.h>\n");
145         fprintf(outfile, "#include <asm/page_types.h>\n");
146         fprintf(outfile, "#include <asm/vdso.h>\n");
147         fprintf(outfile, "\n");
148         fprintf(outfile,
149                 "static unsigned char raw_data[%lu] __ro_after_init __aligned(PAGE_SIZE) = {",
150                 mapping_size);
151         for (j = 0; j < stripped_len; j++) {
152                 if (j % 10 == 0)
153                         fprintf(outfile, "\n\t");
154                 fprintf(outfile, "0x%02X, ",
155                         (int)((unsigned char *)stripped_addr)[j]);
156         }
157         fprintf(outfile, "\n};\n\n");
158
159         fprintf(outfile, "const struct vdso_image %s = {\n", name);
160         fprintf(outfile, "\t.data = raw_data,\n");
161         fprintf(outfile, "\t.size = %lu,\n", mapping_size);
162         if (alt_sec) {
163                 fprintf(outfile, "\t.alt = %lu,\n",
164                         (unsigned long)GET_LE(&alt_sec->sh_offset));
165                 fprintf(outfile, "\t.alt_len = %lu,\n",
166                         (unsigned long)GET_LE(&alt_sec->sh_size));
167         }
168         for (i = 0; i < NSYMS; i++) {
169                 if (required_syms[i].export && syms[i])
170                         fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n",
171                                 required_syms[i].name, (int64_t)syms[i]);
172         }
173         fprintf(outfile, "};\n");
174 }