|
35 | 35 |
|
36 | 36 | #include <linux/uprobes.h>
|
37 | 37 |
|
| 38 | +#define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) |
| 39 | +#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE |
| 40 | + |
38 | 41 | static struct srcu_struct uprobes_srcu;
|
39 | 42 | static struct rb_root uprobes_tree = RB_ROOT;
|
40 | 43 |
|
@@ -1042,6 +1045,213 @@ int uprobe_mmap(struct vm_area_struct *vma)
|
1042 | 1045 | return ret;
|
1043 | 1046 | }
|
1044 | 1047 |
|
| 1048 | +/* Slot allocation for XOL */ |
| 1049 | +static int xol_add_vma(struct xol_area *area) |
| 1050 | +{ |
| 1051 | + struct mm_struct *mm; |
| 1052 | + int ret; |
| 1053 | + |
| 1054 | + area->page = alloc_page(GFP_HIGHUSER); |
| 1055 | + if (!area->page) |
| 1056 | + return -ENOMEM; |
| 1057 | + |
| 1058 | + ret = -EALREADY; |
| 1059 | + mm = current->mm; |
| 1060 | + |
| 1061 | + down_write(&mm->mmap_sem); |
| 1062 | + if (mm->uprobes_state.xol_area) |
| 1063 | + goto fail; |
| 1064 | + |
| 1065 | + ret = -ENOMEM; |
| 1066 | + |
| 1067 | + /* Try to map as high as possible, this is only a hint. */ |
| 1068 | + area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0); |
| 1069 | + if (area->vaddr & ~PAGE_MASK) { |
| 1070 | + ret = area->vaddr; |
| 1071 | + goto fail; |
| 1072 | + } |
| 1073 | + |
| 1074 | + ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE, |
| 1075 | + VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page); |
| 1076 | + if (ret) |
| 1077 | + goto fail; |
| 1078 | + |
| 1079 | + smp_wmb(); /* pairs with get_xol_area() */ |
| 1080 | + mm->uprobes_state.xol_area = area; |
| 1081 | + ret = 0; |
| 1082 | + |
| 1083 | +fail: |
| 1084 | + up_write(&mm->mmap_sem); |
| 1085 | + if (ret) |
| 1086 | + __free_page(area->page); |
| 1087 | + |
| 1088 | + return ret; |
| 1089 | +} |
| 1090 | + |
| 1091 | +static struct xol_area *get_xol_area(struct mm_struct *mm) |
| 1092 | +{ |
| 1093 | + struct xol_area *area; |
| 1094 | + |
| 1095 | + area = mm->uprobes_state.xol_area; |
| 1096 | + smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */ |
| 1097 | + |
| 1098 | + return area; |
| 1099 | +} |
| 1100 | + |
| 1101 | +/* |
| 1102 | + * xol_alloc_area - Allocate process's xol_area. |
| 1103 | + * This area will be used for storing instructions for execution out of |
| 1104 | + * line. |
| 1105 | + * |
| 1106 | + * Returns the allocated area or NULL. |
| 1107 | + */ |
| 1108 | +static struct xol_area *xol_alloc_area(void) |
| 1109 | +{ |
| 1110 | + struct xol_area *area; |
| 1111 | + |
| 1112 | + area = kzalloc(sizeof(*area), GFP_KERNEL); |
| 1113 | + if (unlikely(!area)) |
| 1114 | + return NULL; |
| 1115 | + |
| 1116 | + area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL); |
| 1117 | + |
| 1118 | + if (!area->bitmap) |
| 1119 | + goto fail; |
| 1120 | + |
| 1121 | + init_waitqueue_head(&area->wq); |
| 1122 | + if (!xol_add_vma(area)) |
| 1123 | + return area; |
| 1124 | + |
| 1125 | +fail: |
| 1126 | + kfree(area->bitmap); |
| 1127 | + kfree(area); |
| 1128 | + |
| 1129 | + return get_xol_area(current->mm); |
| 1130 | +} |
| 1131 | + |
| 1132 | +/* |
| 1133 | + * uprobe_clear_state - Free the area allocated for slots. |
| 1134 | + */ |
| 1135 | +void uprobe_clear_state(struct mm_struct *mm) |
| 1136 | +{ |
| 1137 | + struct xol_area *area = mm->uprobes_state.xol_area; |
| 1138 | + |
| 1139 | + if (!area) |
| 1140 | + return; |
| 1141 | + |
| 1142 | + put_page(area->page); |
| 1143 | + kfree(area->bitmap); |
| 1144 | + kfree(area); |
| 1145 | +} |
| 1146 | + |
| 1147 | +/* |
| 1148 | + * uprobe_reset_state - Free the area allocated for slots. |
| 1149 | + */ |
| 1150 | +void uprobe_reset_state(struct mm_struct *mm) |
| 1151 | +{ |
| 1152 | + mm->uprobes_state.xol_area = NULL; |
| 1153 | +} |
| 1154 | + |
| 1155 | +/* |
| 1156 | + * - search for a free slot. |
| 1157 | + */ |
| 1158 | +static unsigned long xol_take_insn_slot(struct xol_area *area) |
| 1159 | +{ |
| 1160 | + unsigned long slot_addr; |
| 1161 | + int slot_nr; |
| 1162 | + |
| 1163 | + do { |
| 1164 | + slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); |
| 1165 | + if (slot_nr < UINSNS_PER_PAGE) { |
| 1166 | + if (!test_and_set_bit(slot_nr, area->bitmap)) |
| 1167 | + break; |
| 1168 | + |
| 1169 | + slot_nr = UINSNS_PER_PAGE; |
| 1170 | + continue; |
| 1171 | + } |
| 1172 | + wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); |
| 1173 | + } while (slot_nr >= UINSNS_PER_PAGE); |
| 1174 | + |
| 1175 | + slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); |
| 1176 | + atomic_inc(&area->slot_count); |
| 1177 | + |
| 1178 | + return slot_addr; |
| 1179 | +} |
| 1180 | + |
| 1181 | +/* |
| 1182 | + * xol_get_insn_slot - If was not allocated a slot, then |
| 1183 | + * allocate a slot. |
| 1184 | + * Returns the allocated slot address or 0. |
| 1185 | + */ |
| 1186 | +static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot_addr) |
| 1187 | +{ |
| 1188 | + struct xol_area *area; |
| 1189 | + unsigned long offset; |
| 1190 | + void *vaddr; |
| 1191 | + |
| 1192 | + area = get_xol_area(current->mm); |
| 1193 | + if (!area) { |
| 1194 | + area = xol_alloc_area(); |
| 1195 | + if (!area) |
| 1196 | + return 0; |
| 1197 | + } |
| 1198 | + current->utask->xol_vaddr = xol_take_insn_slot(area); |
| 1199 | + |
| 1200 | + /* |
| 1201 | + * Initialize the slot if xol_vaddr points to valid |
| 1202 | + * instruction slot. |
| 1203 | + */ |
| 1204 | + if (unlikely(!current->utask->xol_vaddr)) |
| 1205 | + return 0; |
| 1206 | + |
| 1207 | + current->utask->vaddr = slot_addr; |
| 1208 | + offset = current->utask->xol_vaddr & ~PAGE_MASK; |
| 1209 | + vaddr = kmap_atomic(area->page); |
| 1210 | + memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES); |
| 1211 | + kunmap_atomic(vaddr); |
| 1212 | + |
| 1213 | + return current->utask->xol_vaddr; |
| 1214 | +} |
| 1215 | + |
| 1216 | +/* |
| 1217 | + * xol_free_insn_slot - If slot was earlier allocated by |
| 1218 | + * @xol_get_insn_slot(), make the slot available for |
| 1219 | + * subsequent requests. |
| 1220 | + */ |
| 1221 | +static void xol_free_insn_slot(struct task_struct *tsk) |
| 1222 | +{ |
| 1223 | + struct xol_area *area; |
| 1224 | + unsigned long vma_end; |
| 1225 | + unsigned long slot_addr; |
| 1226 | + |
| 1227 | + if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) |
| 1228 | + return; |
| 1229 | + |
| 1230 | + slot_addr = tsk->utask->xol_vaddr; |
| 1231 | + |
| 1232 | + if (unlikely(!slot_addr || IS_ERR_VALUE(slot_addr))) |
| 1233 | + return; |
| 1234 | + |
| 1235 | + area = tsk->mm->uprobes_state.xol_area; |
| 1236 | + vma_end = area->vaddr + PAGE_SIZE; |
| 1237 | + if (area->vaddr <= slot_addr && slot_addr < vma_end) { |
| 1238 | + unsigned long offset; |
| 1239 | + int slot_nr; |
| 1240 | + |
| 1241 | + offset = slot_addr - area->vaddr; |
| 1242 | + slot_nr = offset / UPROBE_XOL_SLOT_BYTES; |
| 1243 | + if (slot_nr >= UINSNS_PER_PAGE) |
| 1244 | + return; |
| 1245 | + |
| 1246 | + clear_bit(slot_nr, area->bitmap); |
| 1247 | + atomic_dec(&area->slot_count); |
| 1248 | + if (waitqueue_active(&area->wq)) |
| 1249 | + wake_up(&area->wq); |
| 1250 | + |
| 1251 | + tsk->utask->xol_vaddr = 0; |
| 1252 | + } |
| 1253 | +} |
| 1254 | + |
1045 | 1255 | /**
|
1046 | 1256 | * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
|
1047 | 1257 | * @regs: Reflects the saved state of the task after it has hit a breakpoint
|
@@ -1070,6 +1280,7 @@ void uprobe_free_utask(struct task_struct *t)
|
1070 | 1280 | if (utask->active_uprobe)
|
1071 | 1281 | put_uprobe(utask->active_uprobe);
|
1072 | 1282 |
|
| 1283 | + xol_free_insn_slot(t); |
1073 | 1284 | kfree(utask);
|
1074 | 1285 | t->utask = NULL;
|
1075 | 1286 | }
|
@@ -1108,6 +1319,9 @@ static struct uprobe_task *add_utask(void)
|
1108 | 1319 | static int
|
1109 | 1320 | pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long vaddr)
|
1110 | 1321 | {
|
| 1322 | + if (xol_get_insn_slot(uprobe, vaddr) && !arch_uprobe_pre_xol(&uprobe->arch, regs)) |
| 1323 | + return 0; |
| 1324 | + |
1111 | 1325 | return -EFAULT;
|
1112 | 1326 | }
|
1113 | 1327 |
|
@@ -1252,6 +1466,7 @@ static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
|
1252 | 1466 | utask->active_uprobe = NULL;
|
1253 | 1467 | utask->state = UTASK_RUNNING;
|
1254 | 1468 | user_disable_single_step(current);
|
| 1469 | + xol_free_insn_slot(current); |
1255 | 1470 |
|
1256 | 1471 | spin_lock_irq(¤t->sighand->siglock);
|
1257 | 1472 | recalc_sigpending(); /* see uprobe_deny_signal() */
|
|
0 commit comments