Lines Matching refs:insn

23  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
31 * The second pass is all possible path descent from the 1st insn.
33 * analysis is limited to 32k insn, which may be hit even if total number of
34 * insn is less then 4K, but there are too many branches that change stack/regs.
54 * 1st insn copies R10 (which has FRAME_PTR) type into R1
57 * So after 2nd insn, the register R1 has type PTR_TO_STACK
103 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
104 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
105 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
114 * If it's ok, then verifier allows this BPF_CALL insn and looks at
120 * insn, the register holding that pointer in the true branch changes state to
318 static void print_bpf_insn(struct bpf_insn *insn)
320 u8 class = BPF_CLASS(insn->code);
323 if (BPF_SRC(insn->code) == BPF_X)
325 insn->code, class == BPF_ALU ? "(u32) " : "",
326 insn->dst_reg,
327 bpf_alu_string[BPF_OP(insn->code) >> 4],
329 insn->src_reg);
332 insn->code, class == BPF_ALU ? "(u32) " : "",
333 insn->dst_reg,
334 bpf_alu_string[BPF_OP(insn->code) >> 4],
336 insn->imm);
338 if (BPF_MODE(insn->code) == BPF_MEM)
340 insn->code,
341 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
342 insn->dst_reg,
343 insn->off, insn->src_reg);
344 else if (BPF_MODE(insn->code) == BPF_XADD)
346 insn->code,
347 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
348 insn->dst_reg, insn->off,
349 insn->src_reg);
351 verbose("BUG_%02x\n", insn->code);
353 if (BPF_MODE(insn->code) != BPF_MEM) {
354 verbose("BUG_st_%02x\n", insn->code);
358 insn->code,
359 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
360 insn->dst_reg,
361 insn->off, insn->imm);
363 if (BPF_MODE(insn->code) != BPF_MEM) {
364 verbose("BUG_ldx_%02x\n", insn->code);
368 insn->code, insn->dst_reg,
369 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
370 insn->src_reg, insn->off);
372 if (BPF_MODE(insn->code) == BPF_ABS) {
374 insn->code,
375 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
376 insn->imm);
377 } else if (BPF_MODE(insn->code) == BPF_IND) {
379 insn->code,
380 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
381 insn->src_reg, insn->imm);
382 } else if (BPF_MODE(insn->code) == BPF_IMM) {
384 insn->code, insn->dst_reg, insn->imm);
386 verbose("BUG_ld_%02x\n", insn->code);
390 u8 opcode = BPF_OP(insn->code);
393 verbose("(%02x) call %d\n", insn->code, insn->imm);
394 } else if (insn->code == (BPF_JMP | BPF_JA)) {
396 insn->code, insn->off);
397 } else if (insn->code == (BPF_JMP | BPF_EXIT)) {
398 verbose("(%02x) exit\n", insn->code);
399 } else if (BPF_SRC(insn->code) == BPF_X) {
401 insn->code, insn->dst_reg,
402 bpf_jmp_string[BPF_OP(insn->code) >> 4],
403 insn->src_reg, insn->off);
406 insn->code, insn->dst_reg,
407 bpf_jmp_string[BPF_OP(insn->code) >> 4],
408 insn->imm, insn->off);
411 verbose("(%02x) %s\n", insn->code, bpf_class_string[class]);
695 static int check_xadd(struct verifier_env *env, struct bpf_insn *insn)
700 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
701 insn->imm != 0) {
707 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
712 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
717 err = check_mem_access(env, insn->dst_reg, insn->off,
718 BPF_SIZE(insn->code), BPF_READ, -1);
723 return check_mem_access(env, insn->dst_reg, insn->off,
724 BPF_SIZE(insn->code), BPF_WRITE, -1);
917 static int check_alu_op(struct reg_state *regs, struct bpf_insn *insn)
919 u8 opcode = BPF_OP(insn->code);
924 if (BPF_SRC(insn->code) != 0 ||
925 insn->src_reg != BPF_REG_0 ||
926 insn->off != 0 || insn->imm != 0) {
931 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
932 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) {
939 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
944 err = check_reg_arg(regs, insn->dst_reg, DST_OP);
950 if (BPF_SRC(insn->code) == BPF_X) {
951 if (insn->imm != 0 || insn->off != 0) {
957 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
961 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
968 err = check_reg_arg(regs, insn->dst_reg, DST_OP);
972 if (BPF_SRC(insn->code) == BPF_X) {
973 if (BPF_CLASS(insn->code) == BPF_ALU64) {
977 regs[insn->dst_reg] = regs[insn->src_reg];
979 regs[insn->dst_reg].type = UNKNOWN_VALUE;
980 regs[insn->dst_reg].map_ptr = NULL;
986 regs[insn->dst_reg].type = CONST_IMM;
987 regs[insn->dst_reg].imm = insn->imm;
998 if (BPF_SRC(insn->code) == BPF_X) {
999 if (insn->imm != 0 || insn->off != 0) {
1004 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1008 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
1015 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1020 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
1026 if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 &&
1027 regs[insn->dst_reg].type == FRAME_PTR &&
1028 BPF_SRC(insn->code) == BPF_K)
1032 err = check_reg_arg(regs, insn->dst_reg, DST_OP);
1037 regs[insn->dst_reg].type = PTR_TO_STACK;
1038 regs[insn->dst_reg].imm = insn->imm;
1046 struct bpf_insn *insn, int *insn_idx)
1050 u8 opcode = BPF_OP(insn->code);
1058 if (BPF_SRC(insn->code) == BPF_X) {
1059 if (insn->imm != 0) {
1065 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1069 if (insn->src_reg != BPF_REG_0) {
1076 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1081 if (BPF_SRC(insn->code) == BPF_K &&
1083 regs[insn->dst_reg].type == CONST_IMM &&
1084 regs[insn->dst_reg].imm == insn->imm) {
1089 *insn_idx += insn->off;
1100 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
1105 if (BPF_SRC(insn->code) == BPF_K &&
1106 insn->imm == 0 && (opcode == BPF_JEQ ||
1108 regs[insn->dst_reg].type == PTR_TO_MAP_VALUE_OR_NULL) {
1110 /* next fallthrough insn can access memory via
1113 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
1115 other_branch->regs[insn->dst_reg].type = CONST_IMM;
1116 other_branch->regs[insn->dst_reg].imm = 0;
1118 other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
1119 regs[insn->dst_reg].type = CONST_IMM;
1120 regs[insn->dst_reg].imm = 0;
1122 } else if (BPF_SRC(insn->code) == BPF_K &&
1129 other_branch->regs[insn->dst_reg].type = CONST_IMM;
1130 other_branch->regs[insn->dst_reg].imm = insn->imm;
1135 regs[insn->dst_reg].type = CONST_IMM;
1136 regs[insn->dst_reg].imm = insn->imm;
1145 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
1147 u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
1153 static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn)
1158 if (BPF_SIZE(insn->code) != BPF_DW) {
1159 verbose("invalid BPF_LD_IMM insn\n");
1162 if (insn->off != 0) {
1167 err = check_reg_arg(regs, insn->dst_reg, DST_OP);
1171 if (insn->src_reg == 0)
1176 BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
1178 regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
1179 regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
1243 verbose("jump out of range from insn %d to %d\n", t, w);
1260 verbose("back-edge from insn %d to %d\n", t, w);
1266 verbose("insn state internal bug\n");
1292 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
1365 verbose("unreachable insn %d\n", i);
1481 struct bpf_insn *insn;
1486 verbose("invalid insn idx %d insn_cnt %d\n",
1491 insn = &insns[insn_idx];
1492 class = BPF_CLASS(insn->code);
1495 verbose("BPF program is too large. Proccessed %d insn\n",
1523 print_bpf_insn(insn);
1527 err = check_alu_op(regs, insn);
1532 if (BPF_MODE(insn->code) != BPF_MEM ||
1533 insn->imm != 0) {
1538 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1542 err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK);
1549 err = check_mem_access(env, insn->src_reg, insn->off,
1550 BPF_SIZE(insn->code), BPF_READ,
1551 insn->dst_reg);
1556 if (BPF_MODE(insn->code) == BPF_XADD) {
1557 err = check_xadd(env, insn);
1564 if (BPF_MODE(insn->code) != BPF_MEM ||
1565 insn->imm != 0) {
1570 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1574 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1579 err = check_mem_access(env, insn->dst_reg, insn->off,
1580 BPF_SIZE(insn->code), BPF_WRITE,
1581 insn->src_reg);
1586 if (BPF_MODE(insn->code) != BPF_MEM ||
1587 insn->src_reg != BPF_REG_0) {
1592 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1597 err = check_mem_access(env, insn->dst_reg, insn->off,
1598 BPF_SIZE(insn->code), BPF_WRITE,
1604 u8 opcode = BPF_OP(insn->code);
1607 if (BPF_SRC(insn->code) != BPF_K ||
1608 insn->off != 0 ||
1609 insn->src_reg != BPF_REG_0 ||
1610 insn->dst_reg != BPF_REG_0) {
1615 err = check_call(env, insn->imm);
1620 if (BPF_SRC(insn->code) != BPF_K ||
1621 insn->imm != 0 ||
1622 insn->src_reg != BPF_REG_0 ||
1623 insn->dst_reg != BPF_REG_0) {
1628 insn_idx += insn->off + 1;
1632 if (BPF_SRC(insn->code) != BPF_K ||
1633 insn->imm != 0 ||
1634 insn->src_reg != BPF_REG_0 ||
1635 insn->dst_reg != BPF_REG_0) {
1659 err = check_cond_jmp_op(env, insn, &insn_idx);
1664 u8 mode = BPF_MODE(insn->code);
1670 err = check_ld_imm(env, insn);
1680 verbose("unknown insn class %d\n", class);
1695 struct bpf_insn *insn = env->prog->insnsi;
1699 for (i = 0; i < insn_cnt; i++, insn++) {
1700 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
1704 if (i == insn_cnt - 1 || insn[1].code != 0 ||
1705 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
1706 insn[1].off != 0) {
1707 verbose("invalid bpf_ld_imm64 insn\n");
1711 if (insn->src_reg == 0)
1715 if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
1716 verbose("unrecognized bpf_ld_imm64 insn\n");
1720 f = fdget(insn->imm);
1725 insn->imm);
1731 insn[0].imm = (u32) (unsigned long) map;
1732 insn[1].imm = ((u64) (unsigned long) map) >> 32;
1758 insn++;
1782 struct bpf_insn *insn = env->prog->insnsi;
1786 for (i = 0; i < insn_cnt; i++, insn++)
1787 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
1788 insn->src_reg = 0;