/*
* This file compiles an abstract syntax tree (AST) into Python bytecode.
*
* The primary entry point is _PyAST_Compile(), which returns a
* PyCodeObject. The compiler makes several passes to build the code
* object:
* 1. Checks for future statements. See future.c
* 2. Builds a symbol table. See symtable.c.
* 3. Generate code for basic blocks. See compiler_mod() in this file.
* 4. Assemble the basic blocks into final code. See assemble() in
* this file.
* 5. Optimize the byte code (peephole optimizations).
*
* Note that compiler_mod() suggests module, but the module ast type
* (mod_ty) has cases for expressions and interactive statements.
*
* CAUTION: The VISIT_* macros abort the current function when they
* encounter a problem. So don't invoke them when there is memory
* which needs to be released. Code blocks are OK, as the compiler
* structure takes care of releasing those. Use the arena to manage
* objects.
*/
#include
POP_BLOCK
JUMP E
L:
E:
The special instructions use the block stack. Each block
stack entry contains the instruction that created it (here
SETUP_FINALLY), the level of the value stack at the time the
block stack entry was created, and a label (here L).
SETUP_FINALLY:
Pushes the current value stack level and the label
onto the block stack.
POP_BLOCK:
Pops en entry from the block stack.
The block stack is unwound when an exception is raised:
when a SETUP_FINALLY entry is found, the raised and the caught
exceptions are pushed onto the value stack (and the exception
condition is cleared), and the interpreter jumps to the label
gotten from the block stack.
*/
static int
compiler_try_finally(struct compiler *c, stmt_ty s)
{
basicblock *body, *end, *exit, *cleanup;
body = compiler_new_block(c);
end = compiler_new_block(c);
exit = compiler_new_block(c);
cleanup = compiler_new_block(c);
if (body == NULL || end == NULL || exit == NULL || cleanup == NULL) {
return 0;
}
/* `try` block */
ADDOP_JUMP(c, SETUP_FINALLY, end);
compiler_use_next_block(c, body);
if (!compiler_push_fblock(c, FINALLY_TRY, body, end, s->v.Try.finalbody))
return 0;
if (s->v.Try.handlers && asdl_seq_LEN(s->v.Try.handlers)) {
if (!compiler_try_except(c, s))
return 0;
}
else {
VISIT_SEQ(c, stmt, s->v.Try.body);
}
ADDOP_NOLINE(c, POP_BLOCK);
compiler_pop_fblock(c, FINALLY_TRY, body);
VISIT_SEQ(c, stmt, s->v.Try.finalbody);
ADDOP_JUMP_NOLINE(c, JUMP_FORWARD, exit);
/* `finally` block */
compiler_use_next_block(c, end);
UNSET_LOC(c);
ADDOP_JUMP(c, SETUP_CLEANUP, cleanup);
ADDOP(c, PUSH_EXC_INFO);
if (!compiler_push_fblock(c, FINALLY_END, end, NULL, NULL))
return 0;
VISIT_SEQ(c, stmt, s->v.Try.finalbody);
compiler_pop_fblock(c, FINALLY_END, end);
ADDOP_I(c, RERAISE, 0);
compiler_use_next_block(c, cleanup);
POP_EXCEPT_AND_RERAISE(c);
compiler_use_next_block(c, exit);
return 1;
}
static int
compiler_try_star_finally(struct compiler *c, stmt_ty s)
{
basicblock *body = compiler_new_block(c);
if (body == NULL) {
return 0;
}
basicblock *end = compiler_new_block(c);
if (!end) {
return 0;
}
basicblock *exit = compiler_new_block(c);
if (!exit) {
return 0;
}
basicblock *cleanup = compiler_new_block(c);
if (!cleanup) {
return 0;
}
/* `try` block */
ADDOP_JUMP(c, SETUP_FINALLY, end);
compiler_use_next_block(c, body);
if (!compiler_push_fblock(c, FINALLY_TRY, body, end, s->v.TryStar.finalbody)) {
return 0;
}
if (s->v.TryStar.handlers && asdl_seq_LEN(s->v.TryStar.handlers)) {
if (!compiler_try_star_except(c, s)) {
return 0;
}
}
else {
VISIT_SEQ(c, stmt, s->v.TryStar.body);
}
ADDOP_NOLINE(c, POP_BLOCK);
compiler_pop_fblock(c, FINALLY_TRY, body);
VISIT_SEQ(c, stmt, s->v.TryStar.finalbody);
ADDOP_JUMP_NOLINE(c, JUMP_FORWARD, exit);
/* `finally` block */
compiler_use_next_block(c, end);
UNSET_LOC(c);
ADDOP_JUMP(c, SETUP_CLEANUP, cleanup);
ADDOP(c, PUSH_EXC_INFO);
if (!compiler_push_fblock(c, FINALLY_END, end, NULL, NULL)) {
return 0;
}
VISIT_SEQ(c, stmt, s->v.TryStar.finalbody);
compiler_pop_fblock(c, FINALLY_END, end);
ADDOP_I(c, RERAISE, 0);
compiler_use_next_block(c, cleanup);
POP_EXCEPT_AND_RERAISE(c);
compiler_use_next_block(c, exit);
return 1;
}
/*
Code generated for "try: S except E1 as V1: S1 except E2 as V2: S2 ...":
(The contents of the value stack is shown in [], with the top
at the right; 'tb' is trace-back info, 'val' the exception's
associated value, and 'exc' the exception.)
Value stack Label Instruction Argument
[] SETUP_FINALLY L1
[]
[] POP_BLOCK
[] JUMP_FORWARD L0
[exc] L1:
JUMP_FORWARD L0
[exc] L2:
[] POP_BLOCK
[] JUMP_FORWARD L0
[exc] L1: COPY 1 ) save copy of the original exception
[orig, exc] BUILD_LIST ) list for raised/reraised excs ("result")
[orig, exc, res] SWAP 2
[orig, res, exc]
[orig, res, rest] JUMP_FORWARD L2
[orig, res, rest, i, v] R1: LIST_APPEND 3 ) exc raised in except* body - add to res
[orig, res, rest, i] POP
[orig, res, rest] L2:
SETUP_WITH E
or POP_TOP
LOAD_CONST (None, None, None)
CALL_FUNCTION_EX 0
JUMP_FORWARD EXIT
E: WITH_EXCEPT_START (calls EXPR.__exit__)
POP_JUMP_IF_TRUE T:
RERAISE
T: POP_TOP (remove exception from stack)
POP_EXCEPT
POP_TOP
EXIT:
*/
static int
compiler_with(struct compiler *c, stmt_ty s, int pos)
{
basicblock *block, *final, *exit, *cleanup;
withitem_ty item = asdl_seq_GET(s->v.With.items, pos);
assert(s->kind == With_kind);
block = compiler_new_block(c);
final = compiler_new_block(c);
exit = compiler_new_block(c);
cleanup = compiler_new_block(c);
if (!block || !final || !exit || !cleanup)
return 0;
/* Evaluate EXPR */
VISIT(c, expr, item->context_expr);
/* Will push bound __exit__ */
ADDOP(c, BEFORE_WITH);
ADDOP_JUMP(c, SETUP_WITH, final);
/* SETUP_WITH pushes a finally block. */
compiler_use_next_block(c, block);
if (!compiler_push_fblock(c, WITH, block, final, s)) {
return 0;
}
if (item->optional_vars) {
VISIT(c, expr, item->optional_vars);
}
else {
/* Discard result from context.__enter__() */
ADDOP(c, POP_TOP);
}
pos++;
if (pos == asdl_seq_LEN(s->v.With.items))
/* BLOCK code */
VISIT_SEQ(c, stmt, s->v.With.body)
else if (!compiler_with(c, s, pos))
return 0;
/* Mark all following code as artificial */
UNSET_LOC(c);
ADDOP(c, POP_BLOCK);
compiler_pop_fblock(c, WITH, block);
/* End of body; start the cleanup. */
/* For successful outcome:
* call __exit__(None, None, None)
*/
SET_LOC(c, s);
if (!compiler_call_exit_with_nones(c))
return 0;
ADDOP(c, POP_TOP);
ADDOP_JUMP(c, JUMP_FORWARD, exit);
/* For exceptional outcome: */
compiler_use_next_block(c, final);
ADDOP_JUMP(c, SETUP_CLEANUP, cleanup);
ADDOP(c, PUSH_EXC_INFO);
ADDOP(c, WITH_EXCEPT_START);
compiler_with_except_finish(c, cleanup);
compiler_use_next_block(c, exit);
return 1;
}
static int
compiler_visit_expr1(struct compiler *c, expr_ty e)
{
switch (e->kind) {
case NamedExpr_kind:
VISIT(c, expr, e->v.NamedExpr.value);
ADDOP_I(c, COPY, 1);
VISIT(c, expr, e->v.NamedExpr.target);
break;
case BoolOp_kind:
return compiler_boolop(c, e);
case BinOp_kind:
VISIT(c, expr, e->v.BinOp.left);
VISIT(c, expr, e->v.BinOp.right);
ADDOP_BINARY(c, e->v.BinOp.op);
break;
case UnaryOp_kind:
VISIT(c, expr, e->v.UnaryOp.operand);
ADDOP(c, unaryop(e->v.UnaryOp.op));
break;
case Lambda_kind:
return compiler_lambda(c, e);
case IfExp_kind:
return compiler_ifexp(c, e);
case Dict_kind:
return compiler_dict(c, e);
case Set_kind:
return compiler_set(c, e);
case GeneratorExp_kind:
return compiler_genexp(c, e);
case ListComp_kind:
return compiler_listcomp(c, e);
case SetComp_kind:
return compiler_setcomp(c, e);
case DictComp_kind:
return compiler_dictcomp(c, e);
case Yield_kind:
if (c->u->u_ste->ste_type != FunctionBlock)
return compiler_error(c, "'yield' outside function");
if (e->v.Yield.value) {
VISIT(c, expr, e->v.Yield.value);
}
else {
ADDOP_LOAD_CONST(c, Py_None);
}
ADDOP_YIELD(c);
break;
case YieldFrom_kind:
if (c->u->u_ste->ste_type != FunctionBlock)
return compiler_error(c, "'yield' outside function");
if (c->u->u_scope_type == COMPILER_SCOPE_ASYNC_FUNCTION)
return compiler_error(c, "'yield from' inside async function");
VISIT(c, expr, e->v.YieldFrom.value);
ADDOP(c, GET_YIELD_FROM_ITER);
ADDOP_LOAD_CONST(c, Py_None);
ADD_YIELD_FROM(c, 0);
break;
case Await_kind:
if (!IS_TOP_LEVEL_AWAIT(c)){
if (c->u->u_ste->ste_type != FunctionBlock){
return compiler_error(c, "'await' outside function");
}
if (c->u->u_scope_type != COMPILER_SCOPE_ASYNC_FUNCTION &&
c->u->u_scope_type != COMPILER_SCOPE_COMPREHENSION){
return compiler_error(c, "'await' outside async function");
}
}
VISIT(c, expr, e->v.Await.value);
ADDOP_I(c, GET_AWAITABLE, 0);
ADDOP_LOAD_CONST(c, Py_None);
ADD_YIELD_FROM(c, 1);
break;
case Compare_kind:
return compiler_compare(c, e);
case Call_kind:
return compiler_call(c, e);
case Constant_kind:
ADDOP_LOAD_CONST(c, e->v.Constant.value);
break;
case JoinedStr_kind:
return compiler_joined_str(c, e);
case FormattedValue_kind:
return compiler_formatted_value(c, e);
/* The following exprs can be assignment targets. */
case Attribute_kind:
VISIT(c, expr, e->v.Attribute.value);
switch (e->v.Attribute.ctx) {
case Load:
{
int old_lineno = c->u->u_lineno;
c->u->u_lineno = e->end_lineno;
ADDOP_NAME(c, LOAD_ATTR, e->v.Attribute.attr, names);
c->u->u_lineno = old_lineno;
break;
}
case Store:
if (forbidden_name(c, e->v.Attribute.attr, e->v.Attribute.ctx)) {
return 0;
}
int old_lineno = c->u->u_lineno;
c->u->u_lineno = e->end_lineno;
ADDOP_NAME(c, STORE_ATTR, e->v.Attribute.attr, names);
c->u->u_lineno = old_lineno;
break;
case Del:
ADDOP_NAME(c, DELETE_ATTR, e->v.Attribute.attr, names);
break;
}
break;
case Subscript_kind:
return compiler_subscript(c, e);
case Starred_kind:
switch (e->v.Starred.ctx) {
case Store:
/* In all legitimate cases, the Starred node was already replaced
* by compiler_list/compiler_tuple. XXX: is that okay? */
return compiler_error(c,
"starred assignment target must be in a list or tuple");
default:
return compiler_error(c,
"can't use starred expression here");
}
break;
case Slice_kind:
return compiler_slice(c, e);
case Name_kind:
return compiler_nameop(c, e->v.Name.id, e->v.Name.ctx);
/* child nodes of List and Tuple will have expr_context set */
case List_kind:
return compiler_list(c, e);
case Tuple_kind:
return compiler_tuple(c, e);
}
return 1;
}
static int
compiler_visit_expr(struct compiler *c, expr_ty e)
{
int old_lineno = c->u->u_lineno;
int old_end_lineno = c->u->u_end_lineno;
int old_col_offset = c->u->u_col_offset;
int old_end_col_offset = c->u->u_end_col_offset;
SET_LOC(c, e);
int res = compiler_visit_expr1(c, e);
c->u->u_lineno = old_lineno;
c->u->u_end_lineno = old_end_lineno;
c->u->u_col_offset = old_col_offset;
c->u->u_end_col_offset = old_end_col_offset;
return res;
}
static int
compiler_augassign(struct compiler *c, stmt_ty s)
{
assert(s->kind == AugAssign_kind);
expr_ty e = s->v.AugAssign.target;
int old_lineno = c->u->u_lineno;
int old_end_lineno = c->u->u_end_lineno;
int old_col_offset = c->u->u_col_offset;
int old_end_col_offset = c->u->u_end_col_offset;
SET_LOC(c, e);
switch (e->kind) {
case Attribute_kind:
VISIT(c, expr, e->v.Attribute.value);
ADDOP_I(c, COPY, 1);
int old_lineno = c->u->u_lineno;
c->u->u_lineno = e->end_lineno;
ADDOP_NAME(c, LOAD_ATTR, e->v.Attribute.attr, names);
c->u->u_lineno = old_lineno;
break;
case Subscript_kind:
VISIT(c, expr, e->v.Subscript.value);
VISIT(c, expr, e->v.Subscript.slice);
ADDOP_I(c, COPY, 2);
ADDOP_I(c, COPY, 2);
ADDOP(c, BINARY_SUBSCR);
break;
case Name_kind:
if (!compiler_nameop(c, e->v.Name.id, Load))
return 0;
break;
default:
PyErr_Format(PyExc_SystemError,
"invalid node type (%d) for augmented assignment",
e->kind);
return 0;
}
c->u->u_lineno = old_lineno;
c->u->u_end_lineno = old_end_lineno;
c->u->u_col_offset = old_col_offset;
c->u->u_end_col_offset = old_end_col_offset;
VISIT(c, expr, s->v.AugAssign.value);
ADDOP_INPLACE(c, s->v.AugAssign.op);
SET_LOC(c, e);
switch (e->kind) {
case Attribute_kind:
c->u->u_lineno = e->end_lineno;
ADDOP_I(c, SWAP, 2);
ADDOP_NAME(c, STORE_ATTR, e->v.Attribute.attr, names);
break;
case Subscript_kind:
ADDOP_I(c, SWAP, 3);
ADDOP_I(c, SWAP, 2);
ADDOP(c, STORE_SUBSCR);
break;
case Name_kind:
return compiler_nameop(c, e->v.Name.id, Store);
default:
Py_UNREACHABLE();
}
return 1;
}
static int
check_ann_expr(struct compiler *c, expr_ty e)
{
VISIT(c, expr, e);
ADDOP(c, POP_TOP);
return 1;
}
static int
check_annotation(struct compiler *c, stmt_ty s)
{
/* Annotations of complex targets does not produce anything
under annotations future */
if (c->c_future->ff_features & CO_FUTURE_ANNOTATIONS) {
return 1;
}
/* Annotations are only evaluated in a module or class. */
if (c->u->u_scope_type == COMPILER_SCOPE_MODULE ||
c->u->u_scope_type == COMPILER_SCOPE_CLASS) {
return check_ann_expr(c, s->v.AnnAssign.annotation);
}
return 1;
}
static int
check_ann_subscr(struct compiler *c, expr_ty e)
{
/* We check that everything in a subscript is defined at runtime. */
switch (e->kind) {
case Slice_kind:
if (e->v.Slice.lower && !check_ann_expr(c, e->v.Slice.lower)) {
return 0;
}
if (e->v.Slice.upper && !check_ann_expr(c, e->v.Slice.upper)) {
return 0;
}
if (e->v.Slice.step && !check_ann_expr(c, e->v.Slice.step)) {
return 0;
}
return 1;
case Tuple_kind: {
/* extended slice */
asdl_expr_seq *elts = e->v.Tuple.elts;
Py_ssize_t i, n = asdl_seq_LEN(elts);
for (i = 0; i < n; i++) {
if (!check_ann_subscr(c, asdl_seq_GET(elts, i))) {
return 0;
}
}
return 1;
}
default:
return check_ann_expr(c, e);
}
}
static int
compiler_annassign(struct compiler *c, stmt_ty s)
{
expr_ty targ = s->v.AnnAssign.target;
PyObject* mangled;
assert(s->kind == AnnAssign_kind);
/* We perform the actual assignment first. */
if (s->v.AnnAssign.value) {
VISIT(c, expr, s->v.AnnAssign.value);
VISIT(c, expr, targ);
}
switch (targ->kind) {
case Name_kind:
if (forbidden_name(c, targ->v.Name.id, Store))
return 0;
/* If we have a simple name in a module or class, store annotation. */
if (s->v.AnnAssign.simple &&
(c->u->u_scope_type == COMPILER_SCOPE_MODULE ||
c->u->u_scope_type == COMPILER_SCOPE_CLASS)) {
if (c->c_future->ff_features & CO_FUTURE_ANNOTATIONS) {
VISIT(c, annexpr, s->v.AnnAssign.annotation)
}
else {
VISIT(c, expr, s->v.AnnAssign.annotation);
}
ADDOP_NAME(c, LOAD_NAME, &_Py_ID(__annotations__), names);
mangled = _Py_Mangle(c->u->u_private, targ->v.Name.id);
ADDOP_LOAD_CONST_NEW(c, mangled);
ADDOP(c, STORE_SUBSCR);
}
break;
case Attribute_kind:
if (forbidden_name(c, targ->v.Attribute.attr, Store))
return 0;
if (!s->v.AnnAssign.value &&
!check_ann_expr(c, targ->v.Attribute.value)) {
return 0;
}
break;
case Subscript_kind:
if (!s->v.AnnAssign.value &&
(!check_ann_expr(c, targ->v.Subscript.value) ||
!check_ann_subscr(c, targ->v.Subscript.slice))) {
return 0;
}
break;
default:
PyErr_Format(PyExc_SystemError,
"invalid node type (%d) for annotated assignment",
targ->kind);
return 0;
}
/* Annotation is evaluated last. */
if (!s->v.AnnAssign.simple && !check_annotation(c, s)) {
return 0;
}
return 1;
}
/* Raises a SyntaxError and returns 0.
If something goes wrong, a different exception may be raised.
*/
static int
compiler_error(struct compiler *c, const char *format, ...)
{
va_list vargs;
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, format);
#else
va_start(vargs);
#endif
PyObject *msg = PyUnicode_FromFormatV(format, vargs);
va_end(vargs);
if (msg == NULL) {
return 0;
}
PyObject *loc = PyErr_ProgramTextObject(c->c_filename, c->u->u_lineno);
if (loc == NULL) {
Py_INCREF(Py_None);
loc = Py_None;
}
PyObject *args = Py_BuildValue("O(OiiOii)", msg, c->c_filename,
c->u->u_lineno, c->u->u_col_offset + 1, loc,
c->u->u_end_lineno, c->u->u_end_col_offset + 1);
Py_DECREF(msg);
if (args == NULL) {
goto exit;
}
PyErr_SetObject(PyExc_SyntaxError, args);
exit:
Py_DECREF(loc);
Py_XDECREF(args);
return 0;
}
/* Emits a SyntaxWarning and returns 1 on success.
If a SyntaxWarning raised as error, replaces it with a SyntaxError
and returns 0.
*/
static int
compiler_warn(struct compiler *c, const char *format, ...)
{
va_list vargs;
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, format);
#else
va_start(vargs);
#endif
PyObject *msg = PyUnicode_FromFormatV(format, vargs);
va_end(vargs);
if (msg == NULL) {
return 0;
}
if (PyErr_WarnExplicitObject(PyExc_SyntaxWarning, msg, c->c_filename,
c->u->u_lineno, NULL, NULL) < 0)
{
if (PyErr_ExceptionMatches(PyExc_SyntaxWarning)) {
/* Replace the SyntaxWarning exception with a SyntaxError
to get a more accurate error report */
PyErr_Clear();
assert(PyUnicode_AsUTF8(msg) != NULL);
compiler_error(c, PyUnicode_AsUTF8(msg));
}
Py_DECREF(msg);
return 0;
}
Py_DECREF(msg);
return 1;
}
static int
compiler_subscript(struct compiler *c, expr_ty e)
{
expr_context_ty ctx = e->v.Subscript.ctx;
int op = 0;
if (ctx == Load) {
if (!check_subscripter(c, e->v.Subscript.value)) {
return 0;
}
if (!check_index(c, e->v.Subscript.value, e->v.Subscript.slice)) {
return 0;
}
}
switch (ctx) {
case Load: op = BINARY_SUBSCR; break;
case Store: op = STORE_SUBSCR; break;
case Del: op = DELETE_SUBSCR; break;
}
assert(op);
VISIT(c, expr, e->v.Subscript.value);
VISIT(c, expr, e->v.Subscript.slice);
ADDOP(c, op);
return 1;
}
static int
compiler_slice(struct compiler *c, expr_ty s)
{
int n = 2;
assert(s->kind == Slice_kind);
/* only handles the cases where BUILD_SLICE is emitted */
if (s->v.Slice.lower) {
VISIT(c, expr, s->v.Slice.lower);
}
else {
ADDOP_LOAD_CONST(c, Py_None);
}
if (s->v.Slice.upper) {
VISIT(c, expr, s->v.Slice.upper);
}
else {
ADDOP_LOAD_CONST(c, Py_None);
}
if (s->v.Slice.step) {
n++;
VISIT(c, expr, s->v.Slice.step);
}
ADDOP_I(c, BUILD_SLICE, n);
return 1;
}
// PEP 634: Structural Pattern Matching
// To keep things simple, all compiler_pattern_* and pattern_helper_* routines
// follow the convention of consuming TOS (the subject for the given pattern)
// and calling jump_to_fail_pop on failure (no match).
// When calling into these routines, it's important that pc->on_top be kept
// updated to reflect the current number of items that we are using on the top
// of the stack: they will be popped on failure, and any name captures will be
// stored *underneath* them on success. This lets us defer all names stores
// until the *entire* pattern matches.
#define WILDCARD_CHECK(N) \
((N)->kind == MatchAs_kind && !(N)->v.MatchAs.name)
#define WILDCARD_STAR_CHECK(N) \
((N)->kind == MatchStar_kind && !(N)->v.MatchStar.name)
// Limit permitted subexpressions, even if the parser & AST validator let them through
#define MATCH_VALUE_EXPR(N) \
((N)->kind == Constant_kind || (N)->kind == Attribute_kind)
// Allocate or resize pc->fail_pop to allow for n items to be popped on failure.
static int
ensure_fail_pop(struct compiler *c, pattern_context *pc, Py_ssize_t n)
{
Py_ssize_t size = n + 1;
if (size <= pc->fail_pop_size) {
return 1;
}
Py_ssize_t needed = sizeof(basicblock*) * size;
basicblock **resized = PyObject_Realloc(pc->fail_pop, needed);
if (resized == NULL) {
PyErr_NoMemory();
return 0;
}
pc->fail_pop = resized;
while (pc->fail_pop_size < size) {
basicblock *new_block;
RETURN_IF_FALSE(new_block = compiler_new_block(c));
pc->fail_pop[pc->fail_pop_size++] = new_block;
}
return 1;
}
// Use op to jump to the correct fail_pop block.
static int
jump_to_fail_pop(struct compiler *c, pattern_context *pc, int op)
{
// Pop any items on the top of the stack, plus any objects we were going to
// capture on success:
Py_ssize_t pops = pc->on_top + PyList_GET_SIZE(pc->stores);
RETURN_IF_FALSE(ensure_fail_pop(c, pc, pops));
ADDOP_JUMP(c, op, pc->fail_pop[pops]);
return 1;
}
// Build all of the fail_pop blocks and reset fail_pop.
static int
emit_and_reset_fail_pop(struct compiler *c, pattern_context *pc)
{
if (!pc->fail_pop_size) {
assert(pc->fail_pop == NULL);
return 1;
}
while (--pc->fail_pop_size) {
compiler_use_next_block(c, pc->fail_pop[pc->fail_pop_size]);
if (!compiler_addop(c, POP_TOP)) {
pc->fail_pop_size = 0;
PyObject_Free(pc->fail_pop);
pc->fail_pop = NULL;
return 0;
}
}
compiler_use_next_block(c, pc->fail_pop[0]);
PyObject_Free(pc->fail_pop);
pc->fail_pop = NULL;
return 1;
}
static int
compiler_error_duplicate_store(struct compiler *c, identifier n)
{
return compiler_error(c, "multiple assignments to name %R in pattern", n);
}
// Duplicate the effect of 3.10's ROT_* instructions using SWAPs.
static int
pattern_helper_rotate(struct compiler *c, Py_ssize_t count)
{
while (1 < count) {
ADDOP_I(c, SWAP, count--);
}
return 1;
}
static int
pattern_helper_store_name(struct compiler *c, identifier n, pattern_context *pc)
{
if (n == NULL) {
ADDOP(c, POP_TOP);
return 1;
}
if (forbidden_name(c, n, Store)) {
return 0;
}
// Can't assign to the same name twice:
int duplicate = PySequence_Contains(pc->stores, n);
if (duplicate < 0) {
return 0;
}
if (duplicate) {
return compiler_error_duplicate_store(c, n);
}
// Rotate this object underneath any items we need to preserve:
Py_ssize_t rotations = pc->on_top + PyList_GET_SIZE(pc->stores) + 1;
RETURN_IF_FALSE(pattern_helper_rotate(c, rotations));
return !PyList_Append(pc->stores, n);
}
static int
pattern_unpack_helper(struct compiler *c, asdl_pattern_seq *elts)
{
Py_ssize_t n = asdl_seq_LEN(elts);
int seen_star = 0;
for (Py_ssize_t i = 0; i < n; i++) {
pattern_ty elt = asdl_seq_GET(elts, i);
if (elt->kind == MatchStar_kind && !seen_star) {
if ((i >= (1 << 8)) ||
(n-i-1 >= (INT_MAX >> 8)))
return compiler_error(c,
"too many expressions in "
"star-unpacking sequence pattern");
ADDOP_I(c, UNPACK_EX, (i + ((n-i-1) << 8)));
seen_star = 1;
}
else if (elt->kind == MatchStar_kind) {
return compiler_error(c,
"multiple starred expressions in sequence pattern");
}
}
if (!seen_star) {
ADDOP_I(c, UNPACK_SEQUENCE, n);
}
return 1;
}
static int
pattern_helper_sequence_unpack(struct compiler *c, asdl_pattern_seq *patterns,
Py_ssize_t star, pattern_context *pc)
{
RETURN_IF_FALSE(pattern_unpack_helper(c, patterns));
Py_ssize_t size = asdl_seq_LEN(patterns);
// We've now got a bunch of new subjects on the stack. They need to remain
// there after each subpattern match:
pc->on_top += size;
for (Py_ssize_t i = 0; i < size; i++) {
// One less item to keep track of each time we loop through:
pc->on_top--;
pattern_ty pattern = asdl_seq_GET(patterns, i);
RETURN_IF_FALSE(compiler_pattern_subpattern(c, pattern, pc));
}
return 1;
}
// Like pattern_helper_sequence_unpack, but uses BINARY_SUBSCR instead of
// UNPACK_SEQUENCE / UNPACK_EX. This is more efficient for patterns with a
// starred wildcard like [first, *_] / [first, *_, last] / [*_, last] / etc.
static int
pattern_helper_sequence_subscr(struct compiler *c, asdl_pattern_seq *patterns,
Py_ssize_t star, pattern_context *pc)
{
// We need to keep the subject around for extracting elements:
pc->on_top++;
Py_ssize_t size = asdl_seq_LEN(patterns);
for (Py_ssize_t i = 0; i < size; i++) {
pattern_ty pattern = asdl_seq_GET(patterns, i);
if (WILDCARD_CHECK(pattern)) {
continue;
}
if (i == star) {
assert(WILDCARD_STAR_CHECK(pattern));
continue;
}
ADDOP_I(c, COPY, 1);
if (i < star) {
ADDOP_LOAD_CONST_NEW(c, PyLong_FromSsize_t(i));
}
else {
// The subject may not support negative indexing! Compute a
// nonnegative index:
ADDOP(c, GET_LEN);
ADDOP_LOAD_CONST_NEW(c, PyLong_FromSsize_t(size - i));
ADDOP_BINARY(c, Sub);
}
ADDOP(c, BINARY_SUBSCR);
RETURN_IF_FALSE(compiler_pattern_subpattern(c, pattern, pc));
}
// Pop the subject, we're done with it:
pc->on_top--;
ADDOP(c, POP_TOP);
return 1;
}
// Like compiler_pattern, but turn off checks for irrefutability.
static int
compiler_pattern_subpattern(struct compiler *c, pattern_ty p, pattern_context *pc)
{
int allow_irrefutable = pc->allow_irrefutable;
pc->allow_irrefutable = 1;
RETURN_IF_FALSE(compiler_pattern(c, p, pc));
pc->allow_irrefutable = allow_irrefutable;
return 1;
}
static int
compiler_pattern_as(struct compiler *c, pattern_ty p, pattern_context *pc)
{
assert(p->kind == MatchAs_kind);
if (p->v.MatchAs.pattern == NULL) {
// An irrefutable match:
if (!pc->allow_irrefutable) {
if (p->v.MatchAs.name) {
const char *e = "name capture %R makes remaining patterns unreachable";
return compiler_error(c, e, p->v.MatchAs.name);
}
const char *e = "wildcard makes remaining patterns unreachable";
return compiler_error(c, e);
}
return pattern_helper_store_name(c, p->v.MatchAs.name, pc);
}
// Need to make a copy for (possibly) storing later:
pc->on_top++;
ADDOP_I(c, COPY, 1);
RETURN_IF_FALSE(compiler_pattern(c, p->v.MatchAs.pattern, pc));
// Success! Store it:
pc->on_top--;
RETURN_IF_FALSE(pattern_helper_store_name(c, p->v.MatchAs.name, pc));
return 1;
}
static int
compiler_pattern_star(struct compiler *c, pattern_ty p, pattern_context *pc)
{
assert(p->kind == MatchStar_kind);
RETURN_IF_FALSE(pattern_helper_store_name(c, p->v.MatchStar.name, pc));
return 1;
}
static int
validate_kwd_attrs(struct compiler *c, asdl_identifier_seq *attrs, asdl_pattern_seq* patterns)
{
// Any errors will point to the pattern rather than the arg name as the
// parser is only supplying identifiers rather than Name or keyword nodes
Py_ssize_t nattrs = asdl_seq_LEN(attrs);
for (Py_ssize_t i = 0; i < nattrs; i++) {
identifier attr = ((identifier)asdl_seq_GET(attrs, i));
SET_LOC(c, ((pattern_ty) asdl_seq_GET(patterns, i)));
if (forbidden_name(c, attr, Store)) {
return -1;
}
for (Py_ssize_t j = i + 1; j < nattrs; j++) {
identifier other = ((identifier)asdl_seq_GET(attrs, j));
if (!PyUnicode_Compare(attr, other)) {
SET_LOC(c, ((pattern_ty) asdl_seq_GET(patterns, j)));
compiler_error(c, "attribute name repeated in class pattern: %U", attr);
return -1;
}
}
}
return 0;
}
static int
compiler_pattern_class(struct compiler *c, pattern_ty p, pattern_context *pc)
{
assert(p->kind == MatchClass_kind);
asdl_pattern_seq *patterns = p->v.MatchClass.patterns;
asdl_identifier_seq *kwd_attrs = p->v.MatchClass.kwd_attrs;
asdl_pattern_seq *kwd_patterns = p->v.MatchClass.kwd_patterns;
Py_ssize_t nargs = asdl_seq_LEN(patterns);
Py_ssize_t nattrs = asdl_seq_LEN(kwd_attrs);
Py_ssize_t nkwd_patterns = asdl_seq_LEN(kwd_patterns);
if (nattrs != nkwd_patterns) {
// AST validator shouldn't let this happen, but if it does,
// just fail, don't crash out of the interpreter
const char * e = "kwd_attrs (%d) / kwd_patterns (%d) length mismatch in class pattern";
return compiler_error(c, e, nattrs, nkwd_patterns);
}
if (INT_MAX < nargs || INT_MAX < nargs + nattrs - 1) {
const char *e = "too many sub-patterns in class pattern %R";
return compiler_error(c, e, p->v.MatchClass.cls);
}
if (nattrs) {
RETURN_IF_FALSE(!validate_kwd_attrs(c, kwd_attrs, kwd_patterns));
SET_LOC(c, p);
}
VISIT(c, expr, p->v.MatchClass.cls);
PyObject *attr_names;
RETURN_IF_FALSE(attr_names = PyTuple_New(nattrs));
Py_ssize_t i;
for (i = 0; i < nattrs; i++) {
PyObject *name = asdl_seq_GET(kwd_attrs, i);
Py_INCREF(name);
PyTuple_SET_ITEM(attr_names, i, name);
}
ADDOP_LOAD_CONST_NEW(c, attr_names);
ADDOP_I(c, MATCH_CLASS, nargs);
ADDOP_I(c, COPY, 1);
ADDOP_LOAD_CONST(c, Py_None);
ADDOP_I(c, IS_OP, 1);
// TOS is now a tuple of (nargs + nattrs) attributes (or None):
pc->on_top++;
RETURN_IF_FALSE(jump_to_fail_pop(c, pc, POP_JUMP_IF_FALSE));
ADDOP_I(c, UNPACK_SEQUENCE, nargs + nattrs);
pc->on_top += nargs + nattrs - 1;
for (i = 0; i < nargs + nattrs; i++) {
pc->on_top--;
pattern_ty pattern;
if (i < nargs) {
// Positional:
pattern = asdl_seq_GET(patterns, i);
}
else {
// Keyword:
pattern = asdl_seq_GET(kwd_patterns, i - nargs);
}
if (WILDCARD_CHECK(pattern)) {
ADDOP(c, POP_TOP);
continue;
}
RETURN_IF_FALSE(compiler_pattern_subpattern(c, pattern, pc));
}
// Success! Pop the tuple of attributes:
return 1;
}
static int
compiler_pattern_mapping(struct compiler *c, pattern_ty p, pattern_context *pc)
{
assert(p->kind == MatchMapping_kind);
asdl_expr_seq *keys = p->v.MatchMapping.keys;
asdl_pattern_seq *patterns = p->v.MatchMapping.patterns;
Py_ssize_t size = asdl_seq_LEN(keys);
Py_ssize_t npatterns = asdl_seq_LEN(patterns);
if (size != npatterns) {
// AST validator shouldn't let this happen, but if it does,
// just fail, don't crash out of the interpreter
const char * e = "keys (%d) / patterns (%d) length mismatch in mapping pattern";
return compiler_error(c, e, size, npatterns);
}
// We have a double-star target if "rest" is set
PyObject *star_target = p->v.MatchMapping.rest;
// We need to keep the subject on top during the mapping and length checks:
pc->on_top++;
ADDOP(c, MATCH_MAPPING);
RETURN_IF_FALSE(jump_to_fail_pop(c, pc, POP_JUMP_IF_FALSE));
if (!size && !star_target) {
// If the pattern is just "{}", we're done! Pop the subject:
pc->on_top--;
ADDOP(c, POP_TOP);
return 1;
}
if (size) {
// If the pattern has any keys in it, perform a length check:
ADDOP(c, GET_LEN);
ADDOP_LOAD_CONST_NEW(c, PyLong_FromSsize_t(size));
ADDOP_COMPARE(c, GtE);
RETURN_IF_FALSE(jump_to_fail_pop(c, pc, POP_JUMP_IF_FALSE));
}
if (INT_MAX < size - 1) {
return compiler_error(c, "too many sub-patterns in mapping pattern");
}
// Collect all of the keys into a tuple for MATCH_KEYS and
// **rest. They can either be dotted names or literals:
// Maintaining a set of Constant_kind kind keys allows us to raise a
// SyntaxError in the case of duplicates.
PyObject *seen = PySet_New(NULL);
if (seen == NULL) {
return 0;
}
// NOTE: goto error on failure in the loop below to avoid leaking `seen`
for (Py_ssize_t i = 0; i < size; i++) {
expr_ty key = asdl_seq_GET(keys, i);
if (key == NULL) {
const char *e = "can't use NULL keys in MatchMapping "
"(set 'rest' parameter instead)";
SET_LOC(c, ((pattern_ty) asdl_seq_GET(patterns, i)));
compiler_error(c, e);
goto error;
}
if (key->kind == Constant_kind) {
int in_seen = PySet_Contains(seen, key->v.Constant.value);
if (in_seen < 0) {
goto error;
}
if (in_seen) {
const char *e = "mapping pattern checks duplicate key (%R)";
compiler_error(c, e, key->v.Constant.value);
goto error;
}
if (PySet_Add(seen, key->v.Constant.value)) {
goto error;
}
}
else if (key->kind != Attribute_kind) {
const char *e = "mapping pattern keys may only match literals and attribute lookups";
compiler_error(c, e);
goto error;
}
if (!compiler_visit_expr(c, key)) {
goto error;
}
}
// all keys have been checked; there are no duplicates
Py_DECREF(seen);
ADDOP_I(c, BUILD_TUPLE, size);
ADDOP(c, MATCH_KEYS);
// There's now a tuple of keys and a tuple of values on top of the subject:
pc->on_top += 2;
ADDOP_I(c, COPY, 1);
ADDOP_LOAD_CONST(c, Py_None);
ADDOP_I(c, IS_OP, 1);
RETURN_IF_FALSE(jump_to_fail_pop(c, pc, POP_JUMP_IF_FALSE));
// So far so good. Use that tuple of values on the stack to match
// sub-patterns against:
ADDOP_I(c, UNPACK_SEQUENCE, size);
pc->on_top += size - 1;
for (Py_ssize_t i = 0; i < size; i++) {
pc->on_top--;
pattern_ty pattern = asdl_seq_GET(patterns, i);
RETURN_IF_FALSE(compiler_pattern_subpattern(c, pattern, pc));
}
// If we get this far, it's a match! Whatever happens next should consume
// the tuple of keys and the subject:
pc->on_top -= 2;
if (star_target) {
// If we have a starred name, bind a dict of remaining items to it (this may
// seem a bit inefficient, but keys is rarely big enough to actually impact
// runtime):
// rest = dict(TOS1)
// for key in TOS:
// del rest[key]
ADDOP_I(c, BUILD_MAP, 0); // [subject, keys, empty]
ADDOP_I(c, SWAP, 3); // [empty, keys, subject]
ADDOP_I(c, DICT_UPDATE, 2); // [copy, keys]
ADDOP_I(c, UNPACK_SEQUENCE, size); // [copy, keys...]
while (size) {
ADDOP_I(c, COPY, 1 + size--); // [copy, keys..., copy]
ADDOP_I(c, SWAP, 2); // [copy, keys..., copy, key]
ADDOP(c, DELETE_SUBSCR); // [copy, keys...]
}
RETURN_IF_FALSE(pattern_helper_store_name(c, star_target, pc));
}
else {
ADDOP(c, POP_TOP); // Tuple of keys.
ADDOP(c, POP_TOP); // Subject.
}
return 1;
error:
Py_DECREF(seen);
return 0;
}
static int
compiler_pattern_or(struct compiler *c, pattern_ty p, pattern_context *pc)
{
assert(p->kind == MatchOr_kind);
basicblock *end;
RETURN_IF_FALSE(end = compiler_new_block(c));
Py_ssize_t size = asdl_seq_LEN(p->v.MatchOr.patterns);
assert(size > 1);
// We're going to be messing with pc. Keep the original info handy:
pattern_context old_pc = *pc;
Py_INCREF(pc->stores);
// control is the list of names bound by the first alternative. It is used
// for checking different name bindings in alternatives, and for correcting
// the order in which extracted elements are placed on the stack.
PyObject *control = NULL;
// NOTE: We can't use returning macros anymore! goto error on error.
for (Py_ssize_t i = 0; i < size; i++) {
pattern_ty alt = asdl_seq_GET(p->v.MatchOr.patterns, i);
SET_LOC(c, alt);
PyObject *pc_stores = PyList_New(0);
if (pc_stores == NULL) {
goto error;
}
Py_SETREF(pc->stores, pc_stores);
// An irrefutable sub-pattern must be last, if it is allowed at all:
pc->allow_irrefutable = (i == size - 1) && old_pc.allow_irrefutable;
pc->fail_pop = NULL;
pc->fail_pop_size = 0;
pc->on_top = 0;
if (!compiler_addop_i(c, COPY, 1) || !compiler_pattern(c, alt, pc)) {
goto error;
}
// Success!
Py_ssize_t nstores = PyList_GET_SIZE(pc->stores);
if (!i) {
// This is the first alternative, so save its stores as a "control"
// for the others (they can't bind a different set of names, and
// might need to be reordered):
assert(control == NULL);
control = pc->stores;
Py_INCREF(control);
}
else if (nstores != PyList_GET_SIZE(control)) {
goto diff;
}
else if (nstores) {
// There were captures. Check to see if we differ from control:
Py_ssize_t icontrol = nstores;
while (icontrol--) {
PyObject *name = PyList_GET_ITEM(control, icontrol);
Py_ssize_t istores = PySequence_Index(pc->stores, name);
if (istores < 0) {
PyErr_Clear();
goto diff;
}
if (icontrol != istores) {
// Reorder the names on the stack to match the order of the
// names in control. There's probably a better way of doing
// this; the current solution is potentially very
// inefficient when each alternative subpattern binds lots
// of names in different orders. It's fine for reasonable
// cases, though, and the peephole optimizer will ensure
// that the final code is as efficient as possible.
assert(istores < icontrol);
Py_ssize_t rotations = istores + 1;
// Perform the same rotation on pc->stores:
PyObject *rotated = PyList_GetSlice(pc->stores, 0,
rotations);
if (rotated == NULL ||
PyList_SetSlice(pc->stores, 0, rotations, NULL) ||
PyList_SetSlice(pc->stores, icontrol - istores,
icontrol - istores, rotated))
{
Py_XDECREF(rotated);
goto error;
}
Py_DECREF(rotated);
// That just did:
// rotated = pc_stores[:rotations]
// del pc_stores[:rotations]
// pc_stores[icontrol-istores:icontrol-istores] = rotated
// Do the same thing to the stack, using several
// rotations:
while (rotations--) {
if (!pattern_helper_rotate(c, icontrol + 1)){
goto error;
}
}
}
}
}
assert(control);
if (!compiler_addop_j(c, JUMP_FORWARD, end) ||
!emit_and_reset_fail_pop(c, pc))
{
goto error;
}
}
Py_DECREF(pc->stores);
*pc = old_pc;
Py_INCREF(pc->stores);
// Need to NULL this for the PyObject_Free call in the error block.
old_pc.fail_pop = NULL;
// No match. Pop the remaining copy of the subject and fail:
if (!compiler_addop(c, POP_TOP) || !jump_to_fail_pop(c, pc, JUMP_FORWARD)) {
goto error;
}
compiler_use_next_block(c, end);
Py_ssize_t nstores = PyList_GET_SIZE(control);
// There's a bunch of stuff on the stack between where the new stores
// are and where they need to be:
// - The other stores.
// - A copy of the subject.
// - Anything else that may be on top of the stack.
// - Any previous stores we've already stashed away on the stack.
Py_ssize_t nrots = nstores + 1 + pc->on_top + PyList_GET_SIZE(pc->stores);
for (Py_ssize_t i = 0; i < nstores; i++) {
// Rotate this capture to its proper place on the stack:
if (!pattern_helper_rotate(c, nrots)) {
goto error;
}
// Update the list of previous stores with this new name, checking for
// duplicates:
PyObject *name = PyList_GET_ITEM(control, i);
int dupe = PySequence_Contains(pc->stores, name);
if (dupe < 0) {
goto error;
}
if (dupe) {
compiler_error_duplicate_store(c, name);
goto error;
}
if (PyList_Append(pc->stores, name)) {
goto error;
}
}
Py_DECREF(old_pc.stores);
Py_DECREF(control);
// NOTE: Returning macros are safe again.
// Pop the copy of the subject:
ADDOP(c, POP_TOP);
return 1;
diff:
compiler_error(c, "alternative patterns bind different names");
error:
PyObject_Free(old_pc.fail_pop);
Py_DECREF(old_pc.stores);
Py_XDECREF(control);
return 0;
}
static int
compiler_pattern_sequence(struct compiler *c, pattern_ty p, pattern_context *pc)
{
assert(p->kind == MatchSequence_kind);
asdl_pattern_seq *patterns = p->v.MatchSequence.patterns;
Py_ssize_t size = asdl_seq_LEN(patterns);
Py_ssize_t star = -1;
int only_wildcard = 1;
int star_wildcard = 0;
// Find a starred name, if it exists. There may be at most one:
for (Py_ssize_t i = 0; i < size; i++) {
pattern_ty pattern = asdl_seq_GET(patterns, i);
if (pattern->kind == MatchStar_kind) {
if (star >= 0) {
const char *e = "multiple starred names in sequence pattern";
return compiler_error(c, e);
}
star_wildcard = WILDCARD_STAR_CHECK(pattern);
only_wildcard &= star_wildcard;
star = i;
continue;
}
only_wildcard &= WILDCARD_CHECK(pattern);
}
// We need to keep the subject on top during the sequence and length checks:
pc->on_top++;
ADDOP(c, MATCH_SEQUENCE);
RETURN_IF_FALSE(jump_to_fail_pop(c, pc, POP_JUMP_IF_FALSE));
if (star < 0) {
// No star: len(subject) == size
ADDOP(c, GET_LEN);
ADDOP_LOAD_CONST_NEW(c, PyLong_FromSsize_t(size));
ADDOP_COMPARE(c, Eq);
RETURN_IF_FALSE(jump_to_fail_pop(c, pc, POP_JUMP_IF_FALSE));
}
else if (size > 1) {
// Star: len(subject) >= size - 1
ADDOP(c, GET_LEN);
ADDOP_LOAD_CONST_NEW(c, PyLong_FromSsize_t(size - 1));
ADDOP_COMPARE(c, GtE);
RETURN_IF_FALSE(jump_to_fail_pop(c, pc, POP_JUMP_IF_FALSE));
}
// Whatever comes next should consume the subject:
pc->on_top--;
if (only_wildcard) {
// Patterns like: [] / [_] / [_, _] / [*_] / [_, *_] / [_, _, *_] / etc.
ADDOP(c, POP_TOP);
}
else if (star_wildcard) {
RETURN_IF_FALSE(pattern_helper_sequence_subscr(c, patterns, star, pc));
}
else {
RETURN_IF_FALSE(pattern_helper_sequence_unpack(c, patterns, star, pc));
}
return 1;
}
static int
compiler_pattern_value(struct compiler *c, pattern_ty p, pattern_context *pc)
{
assert(p->kind == MatchValue_kind);
expr_ty value = p->v.MatchValue.value;
if (!MATCH_VALUE_EXPR(value)) {
const char *e = "patterns may only match literals and attribute lookups";
return compiler_error(c, e);
}
VISIT(c, expr, value);
ADDOP_COMPARE(c, Eq);
RETURN_IF_FALSE(jump_to_fail_pop(c, pc, POP_JUMP_IF_FALSE));
return 1;
}
static int
compiler_pattern_singleton(struct compiler *c, pattern_ty p, pattern_context *pc)
{
assert(p->kind == MatchSingleton_kind);
ADDOP_LOAD_CONST(c, p->v.MatchSingleton.value);
ADDOP_COMPARE(c, Is);
RETURN_IF_FALSE(jump_to_fail_pop(c, pc, POP_JUMP_IF_FALSE));
return 1;
}
static int
compiler_pattern(struct compiler *c, pattern_ty p, pattern_context *pc)
{
SET_LOC(c, p);
switch (p->kind) {
case MatchValue_kind:
return compiler_pattern_value(c, p, pc);
case MatchSingleton_kind:
return compiler_pattern_singleton(c, p, pc);
case MatchSequence_kind:
return compiler_pattern_sequence(c, p, pc);
case MatchMapping_kind:
return compiler_pattern_mapping(c, p, pc);
case MatchClass_kind:
return compiler_pattern_class(c, p, pc);
case MatchStar_kind:
return compiler_pattern_star(c, p, pc);
case MatchAs_kind:
return compiler_pattern_as(c, p, pc);
case MatchOr_kind:
return compiler_pattern_or(c, p, pc);
}
// AST validator shouldn't let this happen, but if it does,
// just fail, don't crash out of the interpreter
const char *e = "invalid match pattern node in AST (kind=%d)";
return compiler_error(c, e, p->kind);
}
static int
compiler_match_inner(struct compiler *c, stmt_ty s, pattern_context *pc)
{
VISIT(c, expr, s->v.Match.subject);
basicblock *end;
RETURN_IF_FALSE(end = compiler_new_block(c));
Py_ssize_t cases = asdl_seq_LEN(s->v.Match.cases);
assert(cases > 0);
match_case_ty m = asdl_seq_GET(s->v.Match.cases, cases - 1);
int has_default = WILDCARD_CHECK(m->pattern) && 1 < cases;
for (Py_ssize_t i = 0; i < cases - has_default; i++) {
m = asdl_seq_GET(s->v.Match.cases, i);
SET_LOC(c, m->pattern);
// Only copy the subject if we're *not* on the last case:
if (i != cases - has_default - 1) {
ADDOP_I(c, COPY, 1);
}
RETURN_IF_FALSE(pc->stores = PyList_New(0));
// Irrefutable cases must be either guarded, last, or both:
pc->allow_irrefutable = m->guard != NULL || i == cases - 1;
pc->fail_pop = NULL;
pc->fail_pop_size = 0;
pc->on_top = 0;
// NOTE: Can't use returning macros here (they'll leak pc->stores)!
if (!compiler_pattern(c, m->pattern, pc)) {
Py_DECREF(pc->stores);
return 0;
}
assert(!pc->on_top);
// It's a match! Store all of the captured names (they're on the stack).
Py_ssize_t nstores = PyList_GET_SIZE(pc->stores);
for (Py_ssize_t n = 0; n < nstores; n++) {
PyObject *name = PyList_GET_ITEM(pc->stores, n);
if (!compiler_nameop(c, name, Store)) {
Py_DECREF(pc->stores);
return 0;
}
}
Py_DECREF(pc->stores);
// NOTE: Returning macros are safe again.
if (m->guard) {
RETURN_IF_FALSE(ensure_fail_pop(c, pc, 0));
RETURN_IF_FALSE(compiler_jump_if(c, m->guard, pc->fail_pop[0], 0));
}
// Success! Pop the subject off, we're done with it:
if (i != cases - has_default - 1) {
ADDOP(c, POP_TOP);
}
VISIT_SEQ(c, stmt, m->body);
ADDOP_JUMP(c, JUMP_FORWARD, end);
// If the pattern fails to match, we want the line number of the
// cleanup to be associated with the failed pattern, not the last line
// of the body
SET_LOC(c, m->pattern);
RETURN_IF_FALSE(emit_and_reset_fail_pop(c, pc));
}
if (has_default) {
// A trailing "case _" is common, and lets us save a bit of redundant
// pushing and popping in the loop above:
m = asdl_seq_GET(s->v.Match.cases, cases - 1);
SET_LOC(c, m->pattern);
if (cases == 1) {
// No matches. Done with the subject:
ADDOP(c, POP_TOP);
}
else {
// Show line coverage for default case (it doesn't create bytecode)
ADDOP(c, NOP);
}
if (m->guard) {
RETURN_IF_FALSE(compiler_jump_if(c, m->guard, end, 0));
}
VISIT_SEQ(c, stmt, m->body);
}
compiler_use_next_block(c, end);
return 1;
}
static int
compiler_match(struct compiler *c, stmt_ty s)
{
pattern_context pc;
pc.fail_pop = NULL;
int result = compiler_match_inner(c, s, &pc);
PyObject_Free(pc.fail_pop);
return result;
}
#undef WILDCARD_CHECK
#undef WILDCARD_STAR_CHECK
/* End of the compiler section, beginning of the assembler section */
/* do depth-first search of basic block graph, starting with block.
post records the block indices in post-order.
XXX must handle implicit jumps from one block to next
*/
struct assembler {
PyObject *a_bytecode; /* bytes containing bytecode */
int a_offset; /* offset into bytecode */
int a_nblocks; /* number of reachable blocks */
PyObject *a_lnotab; /* bytes containing lnotab */
PyObject* a_enotab; /* bytes containing enotab */
PyObject* a_cnotab; /* bytes containing cnotab */
int a_lnotab_off; /* offset into lnotab */
int a_enotab_off; /* offset into enotab */
int a_cnotab_off; /* offset into cnotab */
PyObject *a_except_table; /* bytes containing exception table */
int a_except_table_off; /* offset into exception table */
int a_prevlineno; /* lineno of last emitted line in line table */
int a_prev_end_lineno; /* end_lineno of last emitted line in line table */
int a_lineno; /* lineno of last emitted instruction */
int a_end_lineno; /* end_lineno of last emitted instruction */
int a_lineno_start; /* bytecode start offset of current lineno */
int a_end_lineno_start; /* bytecode start offset of current end_lineno */
basicblock *a_entry;
};
Py_LOCAL_INLINE(void)
stackdepth_push(basicblock ***sp, basicblock *b, int depth)
{
assert(b->b_startdepth < 0 || b->b_startdepth == depth);
if (b->b_startdepth < depth && b->b_startdepth < 100) {
assert(b->b_startdepth < 0);
b->b_startdepth = depth;
*(*sp)++ = b;
}
}
/* Find the flow path that needs the largest stack. We assume that
* cycles in the flow graph have no net effect on the stack depth.
*/
static int
stackdepth(struct compiler *c)
{
basicblock *b, *entryblock = NULL;
basicblock **stack, **sp;
int nblocks = 0, maxdepth = 0;
for (b = c->u->u_blocks; b != NULL; b = b->b_list) {
b->b_startdepth = INT_MIN;
entryblock = b;
nblocks++;
}
assert(entryblock!= NULL);
stack = (basicblock **)PyObject_Malloc(sizeof(basicblock *) * nblocks);
if (!stack) {
PyErr_NoMemory();
return -1;
}
sp = stack;
if (c->u->u_ste->ste_generator || c->u->u_ste->ste_coroutine) {
stackdepth_push(&sp, entryblock, 1);
} else {
stackdepth_push(&sp, entryblock, 0);
}
while (sp != stack) {
b = *--sp;
int depth = b->b_startdepth;
assert(depth >= 0);
basicblock *next = b->b_next;
for (int i = 0; i < b->b_iused; i++) {
struct instr *instr = &b->b_instr[i];
int effect = stack_effect(instr->i_opcode, instr->i_oparg, 0);
if (effect == PY_INVALID_STACK_EFFECT) {
PyErr_Format(PyExc_SystemError,
"compiler stack_effect(opcode=%d, arg=%i) failed",
instr->i_opcode, instr->i_oparg);
return -1;
}
int new_depth = depth + effect;
if (new_depth > maxdepth) {
maxdepth = new_depth;
}
assert(depth >= 0); /* invalid code or bug in stackdepth() */
if (is_jump(instr)) {
effect = stack_effect(instr->i_opcode, instr->i_oparg, 1);
assert(effect != PY_INVALID_STACK_EFFECT);
int target_depth = depth + effect;
if (target_depth > maxdepth) {
maxdepth = target_depth;
}
assert(target_depth >= 0); /* invalid code or bug in stackdepth() */
stackdepth_push(&sp, instr->i_target, target_depth);
}
depth = new_depth;
if (instr->i_opcode == JUMP_ABSOLUTE ||
instr->i_opcode == JUMP_NO_INTERRUPT ||
instr->i_opcode == JUMP_FORWARD ||
instr->i_opcode == RETURN_VALUE ||
instr->i_opcode == RAISE_VARARGS ||
instr->i_opcode == RERAISE)
{
/* remaining code is dead */
next = NULL;
break;
}
}
if (next != NULL) {
assert(b->b_nofallthrough == 0);
stackdepth_push(&sp, next, depth);
}
}
PyObject_Free(stack);
return maxdepth;
}
static int
assemble_init(struct assembler *a, int nblocks, int firstlineno)
{
memset(a, 0, sizeof(struct assembler));
a->a_prevlineno = a->a_lineno = firstlineno;
a->a_prev_end_lineno = a->a_end_lineno = firstlineno;
a->a_lnotab = NULL;
a->a_enotab = NULL;
a->a_cnotab = NULL;
a->a_cnotab_off = 0;
a->a_except_table = NULL;
a->a_bytecode = PyBytes_FromStringAndSize(NULL, DEFAULT_CODE_SIZE);
if (a->a_bytecode == NULL) {
goto error;
}
a->a_lnotab = PyBytes_FromStringAndSize(NULL, DEFAULT_LNOTAB_SIZE);
if (a->a_lnotab == NULL) {
goto error;
}
a->a_enotab = PyBytes_FromStringAndSize(NULL, DEFAULT_LNOTAB_SIZE);
if (a->a_enotab == NULL) {
goto error;
}
a->a_cnotab = PyBytes_FromStringAndSize(NULL, DEFAULT_CNOTAB_SIZE);
if (a->a_cnotab == NULL) {
goto error;
}
a->a_except_table = PyBytes_FromStringAndSize(NULL, DEFAULT_LNOTAB_SIZE);
if (a->a_except_table == NULL) {
goto error;
}
if ((size_t)nblocks > SIZE_MAX / sizeof(basicblock *)) {
PyErr_NoMemory();
goto error;
}
return 1;
error:
Py_XDECREF(a->a_bytecode);
Py_XDECREF(a->a_lnotab);
Py_XDECREF(a->a_enotab);
Py_XDECREF(a->a_cnotab);
Py_XDECREF(a->a_except_table);
return 0;
}
static void
assemble_free(struct assembler *a)
{
Py_XDECREF(a->a_bytecode);
Py_XDECREF(a->a_lnotab);
Py_XDECREF(a->a_enotab);
Py_XDECREF(a->a_cnotab);
Py_XDECREF(a->a_except_table);
}
static int
blocksize(basicblock *b)
{
int i;
int size = 0;
for (i = 0; i < b->b_iused; i++) {
size += instr_size(&b->b_instr[i]);
}
return size;
}
static int
assemble_emit_table_pair(struct assembler* a, PyObject** table, int* offset,
int left, int right)
{
Py_ssize_t len = PyBytes_GET_SIZE(*table);
if (*offset + 2 >= len) {
if (_PyBytes_Resize(table, len * 2) < 0)
return 0;
}
unsigned char* table_entry = (unsigned char*)PyBytes_AS_STRING(*table);
table_entry += *offset;
*offset += 2;
*table_entry++ = left;
*table_entry++ = right;
return 1;
}
static int
is_block_push(struct instr *instr)
{
int opcode = instr->i_opcode;
return opcode == SETUP_FINALLY || opcode == SETUP_WITH || opcode == SETUP_CLEANUP;
}
static basicblock *
push_except_block(ExceptStack *stack, struct instr *setup) {
assert(is_block_push(setup));
int opcode = setup->i_opcode;
basicblock * target = setup->i_target;
if (opcode == SETUP_WITH || opcode == SETUP_CLEANUP) {
target->b_preserve_lasti = 1;
}
stack->handlers[++stack->depth] = target;
return target;
}
static basicblock *
pop_except_block(ExceptStack *stack) {
assert(stack->depth > 0);
return stack->handlers[--stack->depth];
}
static basicblock *
except_stack_top(ExceptStack *stack) {
return stack->handlers[stack->depth];
}
static ExceptStack *
make_except_stack(void) {
ExceptStack *new = PyMem_Malloc(sizeof(ExceptStack));
if (new == NULL) {
PyErr_NoMemory();
return NULL;
}
new->depth = 0;
new->handlers[0] = NULL;
return new;
}
static ExceptStack *
copy_except_stack(ExceptStack *stack) {
ExceptStack *copy = PyMem_Malloc(sizeof(ExceptStack));
if (copy == NULL) {
PyErr_NoMemory();
return NULL;
}
memcpy(copy, stack, sizeof(ExceptStack));
return copy;
}
static int
label_exception_targets(basicblock *entry) {
int nblocks = 0;
for (basicblock *b = entry; b != NULL; b = b->b_next) {
b->b_visited = 0;
nblocks++;
}
basicblock **todo_stack = PyMem_Malloc(sizeof(basicblock *)*nblocks);
if (todo_stack == NULL) {
PyErr_NoMemory();
return -1;
}
ExceptStack *except_stack = make_except_stack();
if (except_stack == NULL) {
PyMem_Free(todo_stack);
PyErr_NoMemory();
return -1;
}
except_stack->depth = 0;
todo_stack[0] = entry;
entry->b_visited = 1;
entry->b_exceptstack = except_stack;
basicblock **todo = &todo_stack[1];
basicblock *handler = NULL;
while (todo > todo_stack) {
todo--;
basicblock *b = todo[0];
assert(b->b_visited == 1);
except_stack = b->b_exceptstack;
assert(except_stack != NULL);
b->b_exceptstack = NULL;
handler = except_stack_top(except_stack);
for (int i = 0; i < b->b_iused; i++) {
struct instr *instr = &b->b_instr[i];
if (is_block_push(instr)) {
if (!instr->i_target->b_visited) {
ExceptStack *copy = copy_except_stack(except_stack);
if (copy == NULL) {
goto error;
}
instr->i_target->b_exceptstack = copy;
todo[0] = instr->i_target;
instr->i_target->b_visited = 1;
todo++;
}
handler = push_except_block(except_stack, instr);
}
else if (instr->i_opcode == POP_BLOCK) {
handler = pop_except_block(except_stack);
}
else if (is_jump(instr)) {
instr->i_except = handler;
assert(i == b->b_iused -1);
if (!instr->i_target->b_visited) {
if (b->b_nofallthrough == 0) {
ExceptStack *copy = copy_except_stack(except_stack);
if (copy == NULL) {
goto error;
}
instr->i_target->b_exceptstack = copy;
}
else {
instr->i_target->b_exceptstack = except_stack;
except_stack = NULL;
}
todo[0] = instr->i_target;
instr->i_target->b_visited = 1;
todo++;
}
}
else {
instr->i_except = handler;
}
}
if (b->b_nofallthrough == 0 && !b->b_next->b_visited) {
assert(except_stack != NULL);
b->b_next->b_exceptstack = except_stack;
todo[0] = b->b_next;
b->b_next->b_visited = 1;
todo++;
}
else if (except_stack != NULL) {
PyMem_Free(except_stack);
}
}
#ifdef Py_DEBUG
for (basicblock *b = entry; b != NULL; b = b->b_next) {
assert(b->b_exceptstack == NULL);
}
#endif
PyMem_Free(todo_stack);
return 0;
error:
PyMem_Free(todo_stack);
PyMem_Free(except_stack);
return -1;
}
static void
convert_exception_handlers_to_nops(basicblock *entry) {
for (basicblock *b = entry; b != NULL; b = b->b_next) {
for (int i = 0; i < b->b_iused; i++) {
struct instr *instr = &b->b_instr[i];
if (is_block_push(instr) || instr->i_opcode == POP_BLOCK) {
instr->i_opcode = NOP;
}
}
}
}
static inline void
write_except_byte(struct assembler *a, int byte) {
unsigned char *p = (unsigned char *) PyBytes_AS_STRING(a->a_except_table);
p[a->a_except_table_off++] = byte;
}
#define CONTINUATION_BIT 64
static void
assemble_emit_exception_table_item(struct assembler *a, int value, int msb)
{
assert ((msb | 128) == 128);
assert(value >= 0 && value < (1 << 30));
if (value >= 1 << 24) {
write_except_byte(a, (value >> 24) | CONTINUATION_BIT | msb);
msb = 0;
}
if (value >= 1 << 18) {
write_except_byte(a, ((value >> 18)&0x3f) | CONTINUATION_BIT | msb);
msb = 0;
}
if (value >= 1 << 12) {
write_except_byte(a, ((value >> 12)&0x3f) | CONTINUATION_BIT | msb);
msb = 0;
}
if (value >= 1 << 6) {
write_except_byte(a, ((value >> 6)&0x3f) | CONTINUATION_BIT | msb);
msb = 0;
}
write_except_byte(a, (value&0x3f) | msb);
}
/* See Objects/exception_handling_notes.txt for details of layout */
#define MAX_SIZE_OF_ENTRY 20
static int
assemble_emit_exception_table_entry(struct assembler *a, int start, int end, basicblock *handler)
{
Py_ssize_t len = PyBytes_GET_SIZE(a->a_except_table);
if (a->a_except_table_off + MAX_SIZE_OF_ENTRY >= len) {
if (_PyBytes_Resize(&a->a_except_table, len * 2) < 0)
return 0;
}
int size = end-start;
assert(end > start);
int target = handler->b_offset;
int depth = handler->b_startdepth - 1;
if (handler->b_preserve_lasti) {
depth -= 1;
}
assert(depth >= 0);
int depth_lasti = (depth<<1) | handler->b_preserve_lasti;
assemble_emit_exception_table_item(a, start, (1<<7));
assemble_emit_exception_table_item(a, size, 0);
assemble_emit_exception_table_item(a, target, 0);
assemble_emit_exception_table_item(a, depth_lasti, 0);
return 1;
}
static int
assemble_exception_table(struct assembler *a)
{
basicblock *b;
int ioffset = 0;
basicblock *handler = NULL;
int start = -1;
for (b = a->a_entry; b != NULL; b = b->b_next) {
ioffset = b->b_offset;
for (int i = 0; i < b->b_iused; i++) {
struct instr *instr = &b->b_instr[i];
if (instr->i_except != handler) {
if (handler != NULL) {
RETURN_IF_FALSE(assemble_emit_exception_table_entry(a, start, ioffset, handler));
}
start = ioffset;
handler = instr->i_except;
}
ioffset += instr_size(instr);
}
}
if (handler != NULL) {
RETURN_IF_FALSE(assemble_emit_exception_table_entry(a, start, ioffset, handler));
}
return 1;
}
/* Appends a range to the end of the line number table. See
* Objects/lnotab_notes.txt for the description of the line number table. */
static int
assemble_line_range(struct assembler* a, int current, PyObject** table,
int* prev, int* start, int* offset)
{
int ldelta, bdelta;
bdelta = (a->a_offset - *start) * sizeof(_Py_CODEUNIT);
if (bdelta == 0) {
return 1;
}
if (current < 0) {
ldelta = -128;
}
else {
ldelta = current - *prev;
*prev = current;
while (ldelta > 127) {
if (!assemble_emit_table_pair(a, table, offset, 0, 127)) {
return 0;
}
ldelta -= 127;
}
while (ldelta < -127) {
if (!assemble_emit_table_pair(a, table, offset, 0, -127)) {
return 0;
}
ldelta += 127;
}
}
assert(-128 <= ldelta && ldelta < 128);
while (bdelta > 254) {
if (!assemble_emit_table_pair(a, table, offset, 254, ldelta)) {
return 0;
}
ldelta = current < 0 ? -128 : 0;
bdelta -= 254;
}
if (!assemble_emit_table_pair(a, table, offset, bdelta, ldelta)) {
return 0;
}
*start = a->a_offset;
return 1;
}
static int
assemble_start_line_range(struct assembler* a) {
return assemble_line_range(a, a->a_lineno, &a->a_lnotab,
&a->a_prevlineno, &a->a_lineno_start, &a->a_lnotab_off);
}
static int
assemble_end_line_range(struct assembler* a) {
return assemble_line_range(a, a->a_end_lineno, &a->a_enotab,
&a->a_prev_end_lineno, &a->a_end_lineno_start, &a->a_enotab_off);
}
static int
assemble_lnotab(struct assembler* a, struct instr* i)
{
if (i->i_lineno == a->a_lineno) {
return 1;
}
if (!assemble_start_line_range(a)) {
return 0;
}
a->a_lineno = i->i_lineno;
return 1;
}
static int
assemble_enotab(struct assembler* a, struct instr* i)
{
if (i->i_end_lineno == a->a_end_lineno) {
return 1;
}
if (!assemble_end_line_range(a)) {
return 0;
}
a->a_end_lineno = i->i_end_lineno;
return 1;
}
static int
assemble_cnotab(struct assembler* a, struct instr* i, int instr_size)
{
Py_ssize_t len = PyBytes_GET_SIZE(a->a_cnotab);
int difference = instr_size * 2;
if (a->a_cnotab_off + difference >= len) {
if (_PyBytes_Resize(&a->a_cnotab, difference + (len * 2)) < 0) {
return 0;
}
}
unsigned char* cnotab = (unsigned char*)PyBytes_AS_STRING(a->a_cnotab);
cnotab += a->a_cnotab_off;
a->a_cnotab_off += difference;
for (int j = 0; j < instr_size; j++) {
if (i->i_col_offset > 255 || i->i_end_col_offset > 255) {
*cnotab++ = 0;
*cnotab++ = 0;
continue;
}
*cnotab++ = i->i_col_offset + 1;
*cnotab++ = i->i_end_col_offset + 1;
}
return 1;
}
/* assemble_emit()
Extend the bytecode with a new instruction.
Update lnotab if necessary.
*/
static int
assemble_emit(struct assembler *a, struct instr *i)
{
Py_ssize_t len = PyBytes_GET_SIZE(a->a_bytecode);
_Py_CODEUNIT *code;
int size = instr_size(i);
if (i->i_lineno && !assemble_lnotab(a, i)) {
return 0;
}
if (!assemble_enotab(a, i)) {
return 0;
}
if (!assemble_cnotab(a, i, size)) {
return 0;
}
if (a->a_offset + size >= len / (int)sizeof(_Py_CODEUNIT)) {
if (len > PY_SSIZE_T_MAX / 2)
return 0;
if (_PyBytes_Resize(&a->a_bytecode, len * 2) < 0)
return 0;
}
code = (_Py_CODEUNIT *)PyBytes_AS_STRING(a->a_bytecode) + a->a_offset;
a->a_offset += size;
write_instr(code, i, size);
return 1;
}
static void
normalize_jumps(struct assembler *a)
{
for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) {
b->b_visited = 0;
}
for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) {
b->b_visited = 1;
if (b->b_iused == 0) {
continue;
}
struct instr *last = &b->b_instr[b->b_iused-1];
if (last->i_opcode == JUMP_ABSOLUTE) {
if (last->i_target->b_visited == 0) {
last->i_opcode = JUMP_FORWARD;
}
}
if (last->i_opcode == JUMP_FORWARD) {
if (last->i_target->b_visited == 1) {
last->i_opcode = JUMP_ABSOLUTE;
}
}
}
}
static void
assemble_jump_offsets(struct assembler *a, struct compiler *c)
{
basicblock *b;
int bsize, totsize, extended_arg_recompile;
int i;
/* Compute the size of each block and fixup jump args.
Replace block pointer with position in bytecode. */
do {
totsize = 0;
for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) {
bsize = blocksize(b);
b->b_offset = totsize;
totsize += bsize;
}
extended_arg_recompile = 0;
for (b = c->u->u_blocks; b != NULL; b = b->b_list) {
bsize = b->b_offset;
for (i = 0; i < b->b_iused; i++) {
struct instr *instr = &b->b_instr[i];
int isize = instr_size(instr);
/* Relative jumps are computed relative to
the instruction pointer after fetching
the jump instruction.
*/
bsize += isize;
if (is_jump(instr)) {
instr->i_oparg = instr->i_target->b_offset;
if (is_relative_jump(instr)) {
instr->i_oparg -= bsize;
}
if (instr_size(instr) != isize) {
extended_arg_recompile = 1;
}
}
}
}
/* XXX: This is an awful hack that could hurt performance, but
on the bright side it should work until we come up
with a better solution.
The issue is that in the first loop blocksize() is called
which calls instr_size() which requires i_oparg be set
appropriately. There is a bootstrap problem because
i_oparg is calculated in the second loop above.
So we loop until we stop seeing new EXTENDED_ARGs.
The only EXTENDED_ARGs that could be popping up are
ones in jump instructions. So this should converge
fairly quickly.
*/
} while (extended_arg_recompile);
}
static PyObject *
dict_keys_inorder(PyObject *dict, Py_ssize_t offset)
{
PyObject *tuple, *k, *v;
Py_ssize_t i, pos = 0, size = PyDict_GET_SIZE(dict);
tuple = PyTuple_New(size);
if (tuple == NULL)
return NULL;
while (PyDict_Next(dict, &pos, &k, &v)) {
i = PyLong_AS_LONG(v);
Py_INCREF(k);
assert((i - offset) < size);
assert((i - offset) >= 0);
PyTuple_SET_ITEM(tuple, i - offset, k);
}
return tuple;
}
static PyObject *
consts_dict_keys_inorder(PyObject *dict)
{
PyObject *consts, *k, *v;
Py_ssize_t i, pos = 0, size = PyDict_GET_SIZE(dict);
consts = PyList_New(size); /* PyCode_Optimize() requires a list */
if (consts == NULL)
return NULL;
while (PyDict_Next(dict, &pos, &k, &v)) {
i = PyLong_AS_LONG(v);
/* The keys of the dictionary can be tuples wrapping a constant.
* (see compiler_add_o and _PyCode_ConstantKey). In that case
* the object we want is always second. */
if (PyTuple_CheckExact(k)) {
k = PyTuple_GET_ITEM(k, 1);
}
Py_INCREF(k);
assert(i < size);
assert(i >= 0);
PyList_SET_ITEM(consts, i, k);
}
return consts;
}
static int
compute_code_flags(struct compiler *c)
{
PySTEntryObject *ste = c->u->u_ste;
int flags = 0;
if (ste->ste_type == FunctionBlock) {
flags |= CO_NEWLOCALS | CO_OPTIMIZED;
if (ste->ste_nested)
flags |= CO_NESTED;
if (ste->ste_generator && !ste->ste_coroutine)
flags |= CO_GENERATOR;
if (!ste->ste_generator && ste->ste_coroutine)
flags |= CO_COROUTINE;
if (ste->ste_generator && ste->ste_coroutine)
flags |= CO_ASYNC_GENERATOR;
if (ste->ste_varargs)
flags |= CO_VARARGS;
if (ste->ste_varkeywords)
flags |= CO_VARKEYWORDS;
}
/* (Only) inherit compilerflags in PyCF_MASK */
flags |= (c->c_flags->cf_flags & PyCF_MASK);
if ((IS_TOP_LEVEL_AWAIT(c)) &&
ste->ste_coroutine &&
!ste->ste_generator) {
flags |= CO_COROUTINE;
}
return flags;
}
// Merge *obj* with constant cache.
// Unlike merge_consts_recursive(), this function doesn't work recursively.
static int
merge_const_one(struct compiler *c, PyObject **obj)
{
PyObject *key = _PyCode_ConstantKey(*obj);
if (key == NULL) {
return 0;
}
// t is borrowed reference
PyObject *t = PyDict_SetDefault(c->c_const_cache, key, key);
Py_DECREF(key);
if (t == NULL) {
return 0;
}
if (t == key) { // obj is new constant.
return 1;
}
if (PyTuple_CheckExact(t)) {
// t is still borrowed reference
t = PyTuple_GET_ITEM(t, 1);
}
Py_INCREF(t);
Py_DECREF(*obj);
*obj = t;
return 1;
}
// This is in codeobject.c.
extern void _Py_set_localsplus_info(int, PyObject *, unsigned char,
PyObject *, PyObject *);
static void
compute_localsplus_info(struct compiler *c, int nlocalsplus,
PyObject *names, PyObject *kinds)
{
PyObject *k, *v;
Py_ssize_t pos = 0;
while (PyDict_Next(c->u->u_varnames, &pos, &k, &v)) {
int offset = (int)PyLong_AS_LONG(v);
assert(offset >= 0);
assert(offset < nlocalsplus);
// For now we do not distinguish arg kinds.
_PyLocals_Kind kind = CO_FAST_LOCAL;
if (PyDict_GetItem(c->u->u_cellvars, k) != NULL) {
kind |= CO_FAST_CELL;
}
_Py_set_localsplus_info(offset, k, kind, names, kinds);
}
int nlocals = (int)PyDict_GET_SIZE(c->u->u_varnames);
// This counter mirrors the fix done in fix_cell_offsets().
int numdropped = 0;
pos = 0;
while (PyDict_Next(c->u->u_cellvars, &pos, &k, &v)) {
if (PyDict_GetItem(c->u->u_varnames, k) != NULL) {
// Skip cells that are already covered by locals.
numdropped += 1;
continue;
}
int offset = (int)PyLong_AS_LONG(v);
assert(offset >= 0);
offset += nlocals - numdropped;
assert(offset < nlocalsplus);
_Py_set_localsplus_info(offset, k, CO_FAST_CELL, names, kinds);
}
pos = 0;
while (PyDict_Next(c->u->u_freevars, &pos, &k, &v)) {
int offset = (int)PyLong_AS_LONG(v);
assert(offset >= 0);
offset += nlocals - numdropped;
assert(offset < nlocalsplus);
_Py_set_localsplus_info(offset, k, CO_FAST_FREE, names, kinds);
}
}
static PyCodeObject *
makecode(struct compiler *c, struct assembler *a, PyObject *constslist,
int maxdepth, int nlocalsplus)
{
PyCodeObject *co = NULL;
PyObject *names = NULL;
PyObject *consts = NULL;
PyObject *localsplusnames = NULL;
PyObject *localspluskinds = NULL;
names = dict_keys_inorder(c->u->u_names, 0);
if (!names) {
goto error;
}
if (!merge_const_one(c, &names)) {
goto error;
}
int flags = compute_code_flags(c);
if (flags < 0) {
goto error;
}
consts = PyList_AsTuple(constslist); /* PyCode_New requires a tuple */
if (consts == NULL) {
goto error;
}
if (!merge_const_one(c, &consts)) {
goto error;
}
assert(c->u->u_posonlyargcount < INT_MAX);
assert(c->u->u_argcount < INT_MAX);
assert(c->u->u_kwonlyargcount < INT_MAX);
int posonlyargcount = (int)c->u->u_posonlyargcount;
int posorkwargcount = (int)c->u->u_argcount;
assert(INT_MAX - posonlyargcount - posorkwargcount > 0);
int kwonlyargcount = (int)c->u->u_kwonlyargcount;
localsplusnames = PyTuple_New(nlocalsplus);
if (localsplusnames == NULL) {
goto error;
}
localspluskinds = PyBytes_FromStringAndSize(NULL, nlocalsplus);
if (localspluskinds == NULL) {
goto error;
}
compute_localsplus_info(c, nlocalsplus, localsplusnames, localspluskinds);
struct _PyCodeConstructor con = {
.filename = c->c_filename,
.name = c->u->u_name,
.qualname = c->u->u_qualname ? c->u->u_qualname : c->u->u_name,
.flags = flags,
.code = a->a_bytecode,
.firstlineno = c->u->u_firstlineno,
.linetable = a->a_lnotab,
.endlinetable = a->a_enotab,
.columntable = a->a_cnotab,
.consts = consts,
.names = names,
.localsplusnames = localsplusnames,
.localspluskinds = localspluskinds,
.argcount = posonlyargcount + posorkwargcount,
.posonlyargcount = posonlyargcount,
.kwonlyargcount = kwonlyargcount,
.stacksize = maxdepth,
.exceptiontable = a->a_except_table,
};
if (_PyCode_Validate(&con) < 0) {
goto error;
}
if (!merge_const_one(c, &localsplusnames)) {
goto error;
}
con.localsplusnames = localsplusnames;
co = _PyCode_New(&con);
if (co == NULL) {
goto error;
}
error:
Py_XDECREF(names);
Py_XDECREF(consts);
Py_XDECREF(localsplusnames);
Py_XDECREF(localspluskinds);
return co;
}
/* For debugging purposes only */
#if 0
static void
dump_instr(struct instr *i)
{
const char *jrel = (is_relative_jump(i)) ? "jrel " : "";
const char *jabs = (is_jump(i) && !is_relative_jump(i))? "jabs " : "";
char arg[128];
*arg = '\0';
if (HAS_ARG(i->i_opcode)) {
sprintf(arg, "arg: %d ", i->i_oparg);
}
fprintf(stderr, "line: %d, opcode: %d %s%s%s\n",
i->i_lineno, i->i_opcode, arg, jabs, jrel);
}
static void
dump_basicblock(const basicblock *b)
{
const char *b_return = b->b_return ? "return " : "";
fprintf(stderr, "used: %d, depth: %d, offset: %d %s\n",
b->b_iused, b->b_startdepth, b->b_offset, b_return);
if (b->b_instr) {
int i;
for (i = 0; i < b->b_iused; i++) {
fprintf(stderr, " [%02d] ", i);
dump_instr(b->b_instr + i);
}
}
}
#endif
static int
normalize_basic_block(basicblock *bb);
static int
optimize_cfg(struct compiler *c, struct assembler *a, PyObject *consts);
static int
trim_unused_consts(struct compiler *c, struct assembler *a, PyObject *consts);
/* Duplicates exit BBs, so that line numbers can be propagated to them */
static int
duplicate_exits_without_lineno(struct compiler *c);
static int
extend_block(basicblock *bb);
static int *
build_cellfixedoffsets(struct compiler *c)
{
int nlocals = (int)PyDict_GET_SIZE(c->u->u_varnames);
int ncellvars = (int)PyDict_GET_SIZE(c->u->u_cellvars);
int nfreevars = (int)PyDict_GET_SIZE(c->u->u_freevars);
int noffsets = ncellvars + nfreevars;
int *fixed = PyMem_New(int, noffsets);
if (fixed == NULL) {
PyErr_NoMemory();
return NULL;
}
for (int i = 0; i < noffsets; i++) {
fixed[i] = nlocals + i;
}
PyObject *varname, *cellindex;
Py_ssize_t pos = 0;
while (PyDict_Next(c->u->u_cellvars, &pos, &varname, &cellindex)) {
PyObject *varindex = PyDict_GetItem(c->u->u_varnames, varname);
if (varindex != NULL) {
assert(PyLong_AS_LONG(cellindex) < INT_MAX);
assert(PyLong_AS_LONG(varindex) < INT_MAX);
int oldindex = (int)PyLong_AS_LONG(cellindex);
int argoffset = (int)PyLong_AS_LONG(varindex);
fixed[oldindex] = argoffset;
}
}
return fixed;
}
static inline int
insert_instruction(basicblock *block, int pos, struct instr *instr) {
if (compiler_next_instr(block) < 0) {
return -1;
}
for (int i = block->b_iused-1; i > pos; i--) {
block->b_instr[i] = block->b_instr[i-1];
}
block->b_instr[pos] = *instr;
return 0;
}
static int
insert_prefix_instructions(struct compiler *c, basicblock *entryblock,
int *fixed, int nfreevars)
{
int flags = compute_code_flags(c);
if (flags < 0) {
return -1;
}
assert(c->u->u_firstlineno > 0);
/* Add the generator prefix instructions. */
if (flags & (CO_GENERATOR | CO_COROUTINE | CO_ASYNC_GENERATOR)) {
struct instr make_gen = {
.i_opcode = RETURN_GENERATOR,
.i_oparg = 0,
.i_lineno = c->u->u_firstlineno,
.i_col_offset = -1,
.i_end_lineno = c->u->u_firstlineno,
.i_end_col_offset = -1,
.i_target = NULL,
};
if (insert_instruction(entryblock, 0, &make_gen) < 0) {
return -1;
}
struct instr pop_top = {
.i_opcode = POP_TOP,
.i_oparg = 0,
.i_lineno = -1,
.i_col_offset = -1,
.i_end_lineno = -1,
.i_end_col_offset = -1,
.i_target = NULL,
};
if (insert_instruction(entryblock, 1, &pop_top) < 0) {
return -1;
}
}
/* Set up cells for any variable that escapes, to be put in a closure. */
const int ncellvars = (int)PyDict_GET_SIZE(c->u->u_cellvars);
if (ncellvars) {
// c->u->u_cellvars has the cells out of order so we sort them
// before adding the MAKE_CELL instructions. Note that we
// adjust for arg cells, which come first.
const int nvars = ncellvars + (int)PyDict_GET_SIZE(c->u->u_varnames);
int *sorted = PyMem_RawCalloc(nvars, sizeof(int));
if (sorted == NULL) {
PyErr_NoMemory();
return -1;
}
for (int i = 0; i < ncellvars; i++) {
sorted[fixed[i]] = i + 1;
}
for (int i = 0, ncellsused = 0; ncellsused < ncellvars; i++) {
int oldindex = sorted[i] - 1;
if (oldindex == -1) {
continue;
}
struct instr make_cell = {
.i_opcode = MAKE_CELL,
// This will get fixed in offset_derefs().
.i_oparg = oldindex,
.i_lineno = -1,
.i_col_offset = -1,
.i_end_lineno = -1,
.i_end_col_offset = -1,
.i_target = NULL,
};
if (insert_instruction(entryblock, ncellsused, &make_cell) < 0) {
return -1;
}
ncellsused += 1;
}
PyMem_RawFree(sorted);
}
if (nfreevars) {
struct instr copy_frees = {
.i_opcode = COPY_FREE_VARS,
.i_oparg = nfreevars,
.i_lineno = -1,
.i_col_offset = -1,
.i_end_lineno = -1,
.i_end_col_offset = -1,
.i_target = NULL,
};
if (insert_instruction(entryblock, 0, ©_frees) < 0) {
return -1;
}
}
return 0;
}
/* Make sure that all returns have a line number, even if early passes
* have failed to propagate a correct line number.
* The resulting line number may not be correct according to PEP 626,
* but should be "good enough", and no worse than in older versions. */
static void
guarantee_lineno_for_exits(struct assembler *a, int firstlineno) {
int lineno = firstlineno;
assert(lineno > 0);
for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) {
if (b->b_iused == 0) {
continue;
}
struct instr *last = &b->b_instr[b->b_iused-1];
if (last->i_lineno < 0) {
if (last->i_opcode == RETURN_VALUE) {
for (int i = 0; i < b->b_iused; i++) {
assert(b->b_instr[i].i_lineno < 0);
b->b_instr[i].i_lineno = lineno;
}
}
}
else {
lineno = last->i_lineno;
}
}
}
static int
fix_cell_offsets(struct compiler *c, basicblock *entryblock, int *fixedmap)
{
int nlocals = (int)PyDict_GET_SIZE(c->u->u_varnames);
int ncellvars = (int)PyDict_GET_SIZE(c->u->u_cellvars);
int nfreevars = (int)PyDict_GET_SIZE(c->u->u_freevars);
int noffsets = ncellvars + nfreevars;
// First deal with duplicates (arg cells).
int numdropped = 0;
for (int i = 0; i < noffsets ; i++) {
if (fixedmap[i] == i + nlocals) {
fixedmap[i] -= numdropped;
}
else {
// It was a duplicate (cell/arg).
numdropped += 1;
}
}
// Then update offsets, either relative to locals or by cell2arg.
for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
for (int i = 0; i < b->b_iused; i++) {
struct instr *inst = &b->b_instr[i];
// This is called before extended args are generated.
assert(inst->i_opcode != EXTENDED_ARG);
int oldoffset = inst->i_oparg;
switch(inst->i_opcode) {
case MAKE_CELL:
case LOAD_CLOSURE:
case LOAD_DEREF:
case STORE_DEREF:
case DELETE_DEREF:
case LOAD_CLASSDEREF:
assert(oldoffset >= 0);
assert(oldoffset < noffsets);
assert(fixedmap[oldoffset] >= 0);
inst->i_oparg = fixedmap[oldoffset];
}
}
}
return numdropped;
}
static void
propagate_line_numbers(struct assembler *a);
static PyCodeObject *
assemble(struct compiler *c, int addNone)
{
basicblock *b, *entryblock;
struct assembler a;
int j, nblocks;
PyCodeObject *co = NULL;
PyObject *consts = NULL;
/* Make sure every block that falls off the end returns None. */
if (!c->u->u_curblock->b_return) {
UNSET_LOC(c);
if (addNone)
ADDOP_LOAD_CONST(c, Py_None);
ADDOP(c, RETURN_VALUE);
}
for (basicblock *b = c->u->u_blocks; b != NULL; b = b->b_list) {
if (normalize_basic_block(b)) {
return NULL;
}
}
for (basicblock *b = c->u->u_blocks; b != NULL; b = b->b_list) {
if (extend_block(b)) {
return NULL;
}
}
nblocks = 0;
entryblock = NULL;
for (b = c->u->u_blocks; b != NULL; b = b->b_list) {
nblocks++;
entryblock = b;
}
assert(entryblock != NULL);
assert(PyDict_GET_SIZE(c->u->u_varnames) < INT_MAX);
assert(PyDict_GET_SIZE(c->u->u_cellvars) < INT_MAX);
assert(PyDict_GET_SIZE(c->u->u_freevars) < INT_MAX);
int nlocals = (int)PyDict_GET_SIZE(c->u->u_varnames);
int ncellvars = (int)PyDict_GET_SIZE(c->u->u_cellvars);
int nfreevars = (int)PyDict_GET_SIZE(c->u->u_freevars);
assert(INT_MAX - nlocals - ncellvars > 0);
assert(INT_MAX - nlocals - ncellvars - nfreevars > 0);
int nlocalsplus = nlocals + ncellvars + nfreevars;
int *cellfixedoffsets = build_cellfixedoffsets(c);
if (cellfixedoffsets == NULL) {
goto error;
}
/* Set firstlineno if it wasn't explicitly set. */
if (!c->u->u_firstlineno) {
if (entryblock->b_instr && entryblock->b_instr->i_lineno) {
c->u->u_firstlineno = entryblock->b_instr->i_lineno;
}
else {
c->u->u_firstlineno = 1;
}
}
// This must be called before fix_cell_offsets().
if (insert_prefix_instructions(c, entryblock, cellfixedoffsets, nfreevars)) {
goto error;
}
if (!assemble_init(&a, nblocks, c->u->u_firstlineno))
goto error;
a.a_entry = entryblock;
a.a_nblocks = nblocks;
int numdropped = fix_cell_offsets(c, entryblock, cellfixedoffsets);
PyMem_Free(cellfixedoffsets); // At this point we're done with it.
cellfixedoffsets = NULL;
if (numdropped < 0) {
goto error;
}
nlocalsplus -= numdropped;
consts = consts_dict_keys_inorder(c->u->u_consts);
if (consts == NULL) {
goto error;
}
if (optimize_cfg(c, &a, consts)) {
goto error;
}
if (duplicate_exits_without_lineno(c)) {
return NULL;
}
if (trim_unused_consts(c, &a, consts)) {
goto error;
}
propagate_line_numbers(&a);
guarantee_lineno_for_exits(&a, c->u->u_firstlineno);
int maxdepth = stackdepth(c);
if (maxdepth < 0) {
goto error;
}
if (maxdepth > MAX_ALLOWED_STACK_USE) {
PyErr_Format(PyExc_SystemError,
"excessive stack use: stack is %d deep",
maxdepth);
goto error;
}
if (label_exception_targets(entryblock)) {
goto error;
}
convert_exception_handlers_to_nops(entryblock);
for (basicblock *b = a.a_entry; b != NULL; b = b->b_next) {
clean_basic_block(b);
}
/* Order of basic blocks must have been determined by now */
normalize_jumps(&a);
/* Can't modify the bytecode after computing jump offsets. */
assemble_jump_offsets(&a, c);
/* Emit code. */
for(b = entryblock; b != NULL; b = b->b_next) {
for (j = 0; j < b->b_iused; j++)
if (!assemble_emit(&a, &b->b_instr[j]))
goto error;
}
if (!assemble_exception_table(&a)) {
goto error;
}
if (_PyBytes_Resize(&a.a_except_table, a.a_except_table_off) < 0) {
goto error;
}
if (!merge_const_one(c, &a.a_except_table)) {
goto error;
}
if (!assemble_start_line_range(&a)) {
return 0;
}
if (_PyBytes_Resize(&a.a_lnotab, a.a_lnotab_off) < 0) {
goto error;
}
if (!merge_const_one(c, &a.a_lnotab)) {
goto error;
}
if (!assemble_end_line_range(&a)) {
return 0;
}
if (_PyBytes_Resize(&a.a_enotab, a.a_enotab_off) < 0) {
goto error;
}
if (!merge_const_one(c, &a.a_enotab)) {
goto error;
}
if (_PyBytes_Resize(&a.a_cnotab, a.a_cnotab_off) < 0) {
goto error;
}
if (!merge_const_one(c, &a.a_cnotab)) {
goto error;
}
if (_PyBytes_Resize(&a.a_bytecode, a.a_offset * sizeof(_Py_CODEUNIT)) < 0) {
goto error;
}
if (!merge_const_one(c, &a.a_bytecode)) {
goto error;
}
co = makecode(c, &a, consts, maxdepth, nlocalsplus);
error:
Py_XDECREF(consts);
assemble_free(&a);
if (cellfixedoffsets != NULL) {
PyMem_Free(cellfixedoffsets);
}
return co;
}
static PyObject*
get_const_value(int opcode, int oparg, PyObject *co_consts)
{
PyObject *constant = NULL;
assert(HAS_CONST(opcode));
if (opcode == LOAD_CONST) {
constant = PyList_GET_ITEM(co_consts, oparg);
}
if (constant == NULL) {
PyErr_SetString(PyExc_SystemError,
"Internal error: failed to get value of a constant");
return NULL;
}
Py_INCREF(constant);
return constant;
}
/* Replace LOAD_CONST c1, LOAD_CONST c2 ... LOAD_CONST cn, BUILD_TUPLE n
with LOAD_CONST (c1, c2, ... cn).
The consts table must still be in list form so that the
new constant (c1, c2, ... cn) can be appended.
Called with codestr pointing to the first LOAD_CONST.
*/
static int
fold_tuple_on_constants(struct compiler *c,
struct instr *inst,
int n, PyObject *consts)
{
/* Pre-conditions */
assert(PyList_CheckExact(consts));
assert(inst[n].i_opcode == BUILD_TUPLE);
assert(inst[n].i_oparg == n);
for (int i = 0; i < n; i++) {
if (!HAS_CONST(inst[i].i_opcode)) {
return 0;
}
}
/* Buildup new tuple of constants */
PyObject *newconst = PyTuple_New(n);
if (newconst == NULL) {
return -1;
}
for (int i = 0; i < n; i++) {
int op = inst[i].i_opcode;
int arg = inst[i].i_oparg;
PyObject *constant = get_const_value(op, arg, consts);
if (constant == NULL) {
return -1;
}
PyTuple_SET_ITEM(newconst, i, constant);
}
if (merge_const_one(c, &newconst) == 0) {
Py_DECREF(newconst);
return -1;
}
Py_ssize_t index;
for (index = 0; index < PyList_GET_SIZE(consts); index++) {
if (PyList_GET_ITEM(consts, index) == newconst) {
break;
}
}
if (index == PyList_GET_SIZE(consts)) {
if ((size_t)index >= (size_t)INT_MAX - 1) {
Py_DECREF(newconst);
PyErr_SetString(PyExc_OverflowError, "too many constants");
return -1;
}
if (PyList_Append(consts, newconst)) {
Py_DECREF(newconst);
return -1;
}
}
Py_DECREF(newconst);
for (int i = 0; i < n; i++) {
inst[i].i_opcode = NOP;
}
inst[n].i_opcode = LOAD_CONST;
inst[n].i_oparg = (int)index;
return 0;
}
#define VISITED (-1)
// Replace an arbitrary run of SWAPs and NOPs with an optimal one that has the
// same effect.
static int
swaptimize(basicblock *block, int *ix)
{
// NOTE: "./python -m test test_patma" serves as a good, quick stress test
// for this function. Make sure to blow away cached *.pyc files first!
assert(*ix < block->b_iused);
struct instr *instructions = &block->b_instr[*ix];
// Find the length of the current sequence of SWAPs and NOPs, and record the
// maximum depth of the stack manipulations:
assert(instructions[0].i_opcode == SWAP);
int depth = instructions[0].i_oparg;
int len = 0;
int more = false;
int limit = block->b_iused - *ix;
while (++len < limit) {
int opcode = instructions[len].i_opcode;
if (opcode == SWAP) {
depth = Py_MAX(depth, instructions[len].i_oparg);
more = true;
}
else if (opcode != NOP) {
break;
}
}
// It's already optimal if there's only one SWAP:
if (!more) {
return 0;
}
// Create an array with elements {0, 1, 2, ..., depth - 1}:
int *stack = PyMem_Malloc(depth * sizeof(int));
if (stack == NULL) {
PyErr_NoMemory();
return -1;
}
for (int i = 0; i < depth; i++) {
stack[i] = i;
}
// Simulate the combined effect of these instructions by "running" them on
// our "stack":
for (int i = 0; i < len; i++) {
if (instructions[i].i_opcode == SWAP) {
int oparg = instructions[i].i_oparg;
int top = stack[0];
// SWAPs are 1-indexed:
stack[0] = stack[oparg - 1];
stack[oparg - 1] = top;
}
}
// Now we can begin! Our approach here is based on a solution to a closely
// related problem (https://cs.stackexchange.com/a/13938). It's easiest to
// think of this algorithm as determining the steps needed to efficiently
// "un-shuffle" our stack. By performing the moves in *reverse* order,
// though, we can efficiently *shuffle* it! For this reason, we will be
// replacing instructions starting from the *end* of the run. Since the
// solution is optimal, we don't need to worry about running out of space:
int current = len - 1;
for (int i = 0; i < depth; i++) {
// Skip items that have already been visited, or just happen to be in
// the correct location:
if (stack[i] == VISITED || stack[i] == i) {
continue;
}
// Okay, we've found an item that hasn't been visited. It forms a cycle
// with other items; traversing the cycle and swapping each item with
// the next will put them all in the correct place. The weird
// loop-and-a-half is necessary to insert 0 into every cycle, since we
// can only swap from that position:
int j = i;
while (true) {
// Skip the actual swap if our item is zero, since swapping the top
// item with itself is pointless:
if (j) {
assert(0 <= current);
// SWAPs are 1-indexed:
instructions[current].i_opcode = SWAP;
instructions[current--].i_oparg = j + 1;
}
if (stack[j] == VISITED) {
// Completed the cycle:
assert(j == i);
break;
}
int next_j = stack[j];
stack[j] = VISITED;
j = next_j;
}
}
// NOP out any unused instructions:
while (0 <= current) {
instructions[current--].i_opcode = NOP;
}
PyMem_Free(stack);
*ix += len - 1;
return 0;
}
// This list is pretty small, since it's only okay to reorder opcodes that:
// - can't affect control flow (like jumping or raising exceptions)
// - can't invoke arbitrary code (besides finalizers)
// - only touch the TOS (and pop it when finished)
#define SWAPPABLE(opcode) \
((opcode) == STORE_FAST || (opcode) == POP_TOP)
static int
next_swappable_instruction(basicblock *block, int i, int lineno)
{
while (++i < block->b_iused) {
struct instr *instruction = &block->b_instr[i];
if (0 <= lineno && instruction->i_lineno != lineno) {
// Optimizing across this instruction could cause user-visible
// changes in the names bound between line tracing events!
return -1;
}
if (instruction->i_opcode == NOP) {
continue;
}
if (SWAPPABLE(instruction->i_opcode)) {
return i;
}
return -1;
}
return -1;
}
// Attempt to apply SWAPs statically by swapping *instructions* rather than
// stack items. For example, we can replace SWAP(2), POP_TOP, STORE_FAST(42)
// with the more efficient NOP, STORE_FAST(42), POP_TOP.
static void
apply_static_swaps(basicblock *block, int i)
{
// SWAPs are to our left, and potential swaperands are to our right:
for (; 0 <= i; i--) {
assert(i < block->b_iused);
struct instr *swap = &block->b_instr[i];
if (swap->i_opcode != SWAP) {
if (swap->i_opcode == NOP || SWAPPABLE(swap->i_opcode)) {
// Nope, but we know how to handle these. Keep looking:
continue;
}
// We can't reason about what this instruction does. Bail:
return;
}
int j = next_swappable_instruction(block, i, -1);
if (j < 0) {
return;
}
int k = j;
int lineno = block->b_instr[j].i_lineno;
for (int count = swap->i_oparg - 1; 0 < count; count--) {
k = next_swappable_instruction(block, k, lineno);
if (k < 0) {
return;
}
}
// Success!
swap->i_opcode = NOP;
struct instr temp = block->b_instr[j];
block->b_instr[j] = block->b_instr[k];
block->b_instr[k] = temp;
}
}
// Attempt to eliminate jumps to jumps by updating inst to jump to
// target->i_target using the provided opcode. Return whether or not the
// optimization was successful.
static bool
jump_thread(struct instr *inst, struct instr *target, int opcode)
{
assert(is_jump(inst));
assert(is_jump(target));
// bpo-45773: If inst->i_target == target->i_target, then nothing actually
// changes (and we fall into an infinite loop):
if (inst->i_lineno == target->i_lineno &&
inst->i_target != target->i_target)
{
inst->i_target = target->i_target;
inst->i_opcode = opcode;
return true;
}
return false;
}
/* Maximum size of basic block that should be copied in optimizer */
#define MAX_COPY_SIZE 4
/* Optimization */
static int
optimize_basic_block(struct compiler *c, basicblock *bb, PyObject *consts)
{
assert(PyList_CheckExact(consts));
struct instr nop;
nop.i_opcode = NOP;
struct instr *target;
for (int i = 0; i < bb->b_iused; i++) {
struct instr *inst = &bb->b_instr[i];
int oparg = inst->i_oparg;
int nextop = i+1 < bb->b_iused ? bb->b_instr[i+1].i_opcode : 0;
if (is_jump(inst)) {
/* Skip over empty basic blocks. */
while (inst->i_target->b_iused == 0) {
inst->i_target = inst->i_target->b_next;
}
target = &inst->i_target->b_instr[0];
}
else {
target = &nop;
}
switch (inst->i_opcode) {
/* Remove LOAD_CONST const; conditional jump */
case LOAD_CONST:
{
PyObject* cnt;
int is_true;
int jump_if_true;
switch(nextop) {
case POP_JUMP_IF_FALSE:
case POP_JUMP_IF_TRUE:
cnt = get_const_value(inst->i_opcode, oparg, consts);
if (cnt == NULL) {
goto error;
}
is_true = PyObject_IsTrue(cnt);
Py_DECREF(cnt);
if (is_true == -1) {
goto error;
}
inst->i_opcode = NOP;
jump_if_true = nextop == POP_JUMP_IF_TRUE;
if (is_true == jump_if_true) {
bb->b_instr[i+1].i_opcode = JUMP_ABSOLUTE;
bb->b_nofallthrough = 1;
}
else {
bb->b_instr[i+1].i_opcode = NOP;
}
break;
case JUMP_IF_FALSE_OR_POP:
case JUMP_IF_TRUE_OR_POP:
cnt = get_const_value(inst->i_opcode, oparg, consts);
if (cnt == NULL) {
goto error;
}
is_true = PyObject_IsTrue(cnt);
Py_DECREF(cnt);
if (is_true == -1) {
goto error;
}
jump_if_true = nextop == JUMP_IF_TRUE_OR_POP;
if (is_true == jump_if_true) {
bb->b_instr[i+1].i_opcode = JUMP_ABSOLUTE;
bb->b_nofallthrough = 1;
}
else {
inst->i_opcode = NOP;
bb->b_instr[i+1].i_opcode = NOP;
}
break;
case IS_OP:
cnt = get_const_value(inst->i_opcode, oparg, consts);
if (cnt == NULL) {
goto error;
}
int jump_op = i+2 < bb->b_iused ? bb->b_instr[i+2].i_opcode : 0;
if (Py_IsNone(cnt) && (jump_op == POP_JUMP_IF_FALSE || jump_op == POP_JUMP_IF_TRUE)) {
unsigned char nextarg = bb->b_instr[i+1].i_oparg;
inst->i_opcode = NOP;
bb->b_instr[i+1].i_opcode = NOP;
bb->b_instr[i+2].i_opcode = nextarg ^ (jump_op == POP_JUMP_IF_FALSE) ?
POP_JUMP_IF_NOT_NONE : POP_JUMP_IF_NONE;
}
Py_DECREF(cnt);
break;
}
break;
}
/* Try to fold tuples of constants.
Skip over BUILD_TUPLE(1) UNPACK_SEQUENCE(1).
Replace BUILD_TUPLE(2) UNPACK_SEQUENCE(2) with SWAP(2).
Replace BUILD_TUPLE(3) UNPACK_SEQUENCE(3) with SWAP(3). */
case BUILD_TUPLE:
if (nextop == UNPACK_SEQUENCE && oparg == bb->b_instr[i+1].i_oparg) {
switch(oparg) {
case 1:
inst->i_opcode = NOP;
bb->b_instr[i+1].i_opcode = NOP;
continue;
case 2:
case 3:
inst->i_opcode = NOP;
bb->b_instr[i+1].i_opcode = SWAP;
continue;
}
}
if (i >= oparg) {
if (fold_tuple_on_constants(c, inst-oparg, oparg, consts)) {
goto error;
}
}
break;
/* Simplify conditional jump to conditional jump where the
result of the first test implies the success of a similar
test or the failure of the opposite test.
Arises in code like:
"a and b or c"
"(a and b) and c"
"(a or b) or c"
"(a or b) and c"
x:JUMP_IF_FALSE_OR_POP y y:JUMP_IF_FALSE_OR_POP z
--> x:JUMP_IF_FALSE_OR_POP z
x:JUMP_IF_FALSE_OR_POP y y:JUMP_IF_TRUE_OR_POP z
--> x:POP_JUMP_IF_FALSE y+1
where y+1 is the instruction following the second test.
*/
case JUMP_IF_FALSE_OR_POP:
switch (target->i_opcode) {
case POP_JUMP_IF_FALSE:
i -= jump_thread(inst, target, POP_JUMP_IF_FALSE);
break;
case JUMP_ABSOLUTE:
case JUMP_FORWARD:
case JUMP_IF_FALSE_OR_POP:
i -= jump_thread(inst, target, JUMP_IF_FALSE_OR_POP);
break;
case JUMP_IF_TRUE_OR_POP:
case POP_JUMP_IF_TRUE:
if (inst->i_lineno == target->i_lineno) {
// We don't need to bother checking for loops here,
// since a block's b_next cannot point to itself:
assert(inst->i_target != inst->i_target->b_next);
inst->i_opcode = POP_JUMP_IF_FALSE;
inst->i_target = inst->i_target->b_next;
--i;
}
break;
}
break;
case JUMP_IF_TRUE_OR_POP:
switch (target->i_opcode) {
case POP_JUMP_IF_TRUE:
i -= jump_thread(inst, target, POP_JUMP_IF_TRUE);
break;
case JUMP_ABSOLUTE:
case JUMP_FORWARD:
case JUMP_IF_TRUE_OR_POP:
i -= jump_thread(inst, target, JUMP_IF_TRUE_OR_POP);
break;
case JUMP_IF_FALSE_OR_POP:
case POP_JUMP_IF_FALSE:
if (inst->i_lineno == target->i_lineno) {
// We don't need to bother checking for loops here,
// since a block's b_next cannot point to itself:
assert(inst->i_target != inst->i_target->b_next);
inst->i_opcode = POP_JUMP_IF_TRUE;
inst->i_target = inst->i_target->b_next;
--i;
}
break;
}
break;
case POP_JUMP_IF_NOT_NONE:
case POP_JUMP_IF_NONE:
switch (target->i_opcode) {
case JUMP_ABSOLUTE:
case JUMP_FORWARD:
i -= jump_thread(inst, target, inst->i_opcode);
}
break;
case POP_JUMP_IF_FALSE:
switch (target->i_opcode) {
case JUMP_ABSOLUTE:
case JUMP_FORWARD:
i -= jump_thread(inst, target, POP_JUMP_IF_FALSE);
}
break;
case POP_JUMP_IF_TRUE:
switch (target->i_opcode) {
case JUMP_ABSOLUTE:
case JUMP_FORWARD:
i -= jump_thread(inst, target, POP_JUMP_IF_TRUE);
}
break;
case JUMP_ABSOLUTE:
case JUMP_FORWARD:
switch (target->i_opcode) {
case JUMP_ABSOLUTE:
case JUMP_FORWARD:
i -= jump_thread(inst, target, JUMP_ABSOLUTE);
}
break;
case FOR_ITER:
if (target->i_opcode == JUMP_FORWARD) {
i -= jump_thread(inst, target, FOR_ITER);
}
break;
case SWAP:
if (oparg == 1) {
inst->i_opcode = NOP;
break;
}
if (swaptimize(bb, &i)) {
goto error;
}
apply_static_swaps(bb, i);
break;
case KW_NAMES:
break;
default:
/* All HAS_CONST opcodes should be handled with LOAD_CONST */
assert (!HAS_CONST(inst->i_opcode));
}
}
return 0;
error:
return -1;
}
/* If this block ends with an unconditional jump to an exit block,
* then remove the jump and extend this block with the target.
*/
static int
extend_block(basicblock *bb) {
if (bb->b_iused == 0) {
return 0;
}
struct instr *last = &bb->b_instr[bb->b_iused-1];
if (last->i_opcode != JUMP_ABSOLUTE && last->i_opcode != JUMP_FORWARD) {
return 0;
}
if (last->i_target->b_exit && last->i_target->b_iused <= MAX_COPY_SIZE) {
basicblock *to_copy = last->i_target;
last->i_opcode = NOP;
for (int i = 0; i < to_copy->b_iused; i++) {
int index = compiler_next_instr(bb);
if (index < 0) {
return -1;
}
bb->b_instr[index] = to_copy->b_instr[i];
}
bb->b_exit = 1;
}
return 0;
}
static void
clean_basic_block(basicblock *bb) {
/* Remove NOPs when legal to do so. */
int dest = 0;
int prev_lineno = -1;
for (int src = 0; src < bb->b_iused; src++) {
int lineno = bb->b_instr[src].i_lineno;
if (bb->b_instr[src].i_opcode == NOP) {
/* Eliminate no-op if it doesn't have a line number */
if (lineno < 0) {
continue;
}
/* or, if the previous instruction had the same line number. */
if (prev_lineno == lineno) {
continue;
}
/* or, if the next instruction has same line number or no line number */
if (src < bb->b_iused - 1) {
int next_lineno = bb->b_instr[src+1].i_lineno;
if (next_lineno < 0 || next_lineno == lineno) {
COPY_INSTR_LOC(bb->b_instr[src], bb->b_instr[src+1]);
continue;
}
}
else {
basicblock* next = bb->b_next;
while (next && next->b_iused == 0) {
next = next->b_next;
}
/* or if last instruction in BB and next BB has same line number */
if (next) {
if (lineno == next->b_instr[0].i_lineno) {
continue;
}
}
}
}
if (dest != src) {
bb->b_instr[dest] = bb->b_instr[src];
}
dest++;
prev_lineno = lineno;
}
assert(dest <= bb->b_iused);
bb->b_iused = dest;
}
static int
normalize_basic_block(basicblock *bb) {
/* Mark blocks as exit and/or nofallthrough.
Raise SystemError if CFG is malformed. */
for (int i = 0; i < bb->b_iused; i++) {
switch(bb->b_instr[i].i_opcode) {
case RETURN_VALUE:
case RAISE_VARARGS:
case RERAISE:
bb->b_exit = 1;
bb->b_nofallthrough = 1;
break;
case JUMP_ABSOLUTE:
case JUMP_FORWARD:
case JUMP_NO_INTERRUPT:
bb->b_nofallthrough = 1;
/* fall through */
case POP_JUMP_IF_NOT_NONE:
case POP_JUMP_IF_NONE:
case POP_JUMP_IF_FALSE:
case POP_JUMP_IF_TRUE:
case JUMP_IF_FALSE_OR_POP:
case JUMP_IF_TRUE_OR_POP:
case FOR_ITER:
if (i != bb->b_iused-1) {
PyErr_SetString(PyExc_SystemError, "malformed control flow graph.");
return -1;
}
/* Skip over empty basic blocks. */
while (bb->b_instr[i].i_target->b_iused == 0) {
bb->b_instr[i].i_target = bb->b_instr[i].i_target->b_next;
}
}
}
return 0;
}
static int
mark_reachable(struct assembler *a) {
basicblock **stack, **sp;
sp = stack = (basicblock **)PyObject_Malloc(sizeof(basicblock *) * a->a_nblocks);
if (stack == NULL) {
return -1;
}
a->a_entry->b_predecessors = 1;
*sp++ = a->a_entry;
while (sp > stack) {
basicblock *b = *(--sp);
if (b->b_next && !b->b_nofallthrough) {
if (b->b_next->b_predecessors == 0) {
*sp++ = b->b_next;
}
b->b_next->b_predecessors++;
}
for (int i = 0; i < b->b_iused; i++) {
basicblock *target;
if (is_jump(&b->b_instr[i])) {
target = b->b_instr[i].i_target;
if (target->b_predecessors == 0) {
*sp++ = target;
}
target->b_predecessors++;
}
}
}
PyObject_Free(stack);
return 0;
}
static void
eliminate_empty_basic_blocks(basicblock *entry) {
/* Eliminate empty blocks */
for (basicblock *b = entry; b != NULL; b = b->b_next) {
basicblock *next = b->b_next;
if (next) {
while (next->b_iused == 0 && next->b_next) {
next = next->b_next;
}
b->b_next = next;
}
}
for (basicblock *b = entry; b != NULL; b = b->b_next) {
if (b->b_iused == 0) {
continue;
}
if (is_jump(&b->b_instr[b->b_iused-1])) {
basicblock *target = b->b_instr[b->b_iused-1].i_target;
while (target->b_iused == 0) {
target = target->b_next;
}
b->b_instr[b->b_iused-1].i_target = target;
}
}
}
/* If an instruction has no line number, but it's predecessor in the BB does,
* then copy the line number. If a successor block has no line number, and only
* one predecessor, then inherit the line number.
* This ensures that all exit blocks (with one predecessor) receive a line number.
* Also reduces the size of the line number table,
* but has no impact on the generated line number events.
*/
static void
propagate_line_numbers(struct assembler *a) {
for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) {
if (b->b_iused == 0) {
continue;
}
// Not a real instruction, only to store positions
// from previous instructions and propagate them.
struct instr prev_instr = {
.i_lineno = -1,
.i_col_offset = -1,
.i_end_lineno = -1,
.i_end_col_offset = -1,
};
for (int i = 0; i < b->b_iused; i++) {
if (b->b_instr[i].i_lineno < 0) {
COPY_INSTR_LOC(prev_instr, b->b_instr[i]);
}
else {
COPY_INSTR_LOC(b->b_instr[i], prev_instr);
}
}
if (!b->b_nofallthrough && b->b_next->b_predecessors == 1) {
assert(b->b_next->b_iused);
if (b->b_next->b_instr[0].i_lineno < 0) {
COPY_INSTR_LOC(prev_instr, b->b_next->b_instr[0]);
}
}
if (is_jump(&b->b_instr[b->b_iused-1])) {
switch (b->b_instr[b->b_iused-1].i_opcode) {
/* Note: Only actual jumps, not exception handlers */
case SETUP_WITH:
case SETUP_FINALLY:
case SETUP_CLEANUP:
continue;
}
basicblock *target = b->b_instr[b->b_iused-1].i_target;
if (target->b_predecessors == 1) {
if (target->b_instr[0].i_lineno < 0) {
COPY_INSTR_LOC(prev_instr, target->b_instr[0]);
}
}
}
}
}
/* Perform optimizations on a control flow graph.
The consts object should still be in list form to allow new constants
to be appended.
All transformations keep the code size the same or smaller.
For those that reduce size, the gaps are initially filled with
NOPs. Later those NOPs are removed.
*/
static int
optimize_cfg(struct compiler *c, struct assembler *a, PyObject *consts)
{
for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) {
if (optimize_basic_block(c, b, consts)) {
return -1;
}
clean_basic_block(b);
assert(b->b_predecessors == 0);
}
for (basicblock *b = c->u->u_blocks; b != NULL; b = b->b_list) {
if (extend_block(b)) {
return -1;
}
}
if (mark_reachable(a)) {
return -1;
}
/* Delete unreachable instructions */
for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) {
if (b->b_predecessors == 0) {
b->b_iused = 0;
b->b_nofallthrough = 0;
}
}
eliminate_empty_basic_blocks(a->a_entry);
for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) {
clean_basic_block(b);
}
/* Delete jump instructions made redundant by previous step. If a non-empty
block ends with a jump instruction, check if the next non-empty block
reached through normal flow control is the target of that jump. If it
is, then the jump instruction is redundant and can be deleted.
*/
int maybe_empty_blocks = 0;
for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) {
if (b->b_iused > 0) {
struct instr *b_last_instr = &b->b_instr[b->b_iused - 1];
if (b_last_instr->i_opcode == JUMP_ABSOLUTE ||
b_last_instr->i_opcode == JUMP_NO_INTERRUPT ||
b_last_instr->i_opcode == JUMP_FORWARD) {
if (b_last_instr->i_target == b->b_next) {
assert(b->b_next->b_iused);
b->b_nofallthrough = 0;
b_last_instr->i_opcode = NOP;
maybe_empty_blocks = 1;
}
}
}
}
if (maybe_empty_blocks) {
eliminate_empty_basic_blocks(a->a_entry);
}
return 0;
}
// Remove trailing unused constants.
static int
trim_unused_consts(struct compiler *c, struct assembler *a, PyObject *consts)
{
assert(PyList_CheckExact(consts));
// The first constant may be docstring; keep it always.
int max_const_index = 0;
for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) {
for (int i = 0; i < b->b_iused; i++) {
if ((b->b_instr[i].i_opcode == LOAD_CONST ||
b->b_instr[i].i_opcode == KW_NAMES) &&
b->b_instr[i].i_oparg > max_const_index) {
max_const_index = b->b_instr[i].i_oparg;
}
}
}
if (max_const_index+1 < PyList_GET_SIZE(consts)) {
//fprintf(stderr, "removing trailing consts: max=%d, size=%d\n",
// max_const_index, (int)PyList_GET_SIZE(consts));
if (PyList_SetSlice(consts, max_const_index+1,
PyList_GET_SIZE(consts), NULL) < 0) {
return 1;
}
}
return 0;
}
static inline int
is_exit_without_lineno(basicblock *b) {
return b->b_exit && b->b_instr[0].i_lineno < 0;
}
/* PEP 626 mandates that the f_lineno of a frame is correct
* after a frame terminates. It would be prohibitively expensive
* to continuously update the f_lineno field at runtime,
* so we make sure that all exiting instruction (raises and returns)
* have a valid line number, allowing us to compute f_lineno lazily.
* We can do this by duplicating the exit blocks without line number
* so that none have more than one predecessor. We can then safely
* copy the line number from the sole predecessor block.
*/
static int
duplicate_exits_without_lineno(struct compiler *c)
{
/* Copy all exit blocks without line number that are targets of a jump.
*/
for (basicblock *b = c->u->u_blocks; b != NULL; b = b->b_list) {
if (b->b_iused > 0 && is_jump(&b->b_instr[b->b_iused-1])) {
switch (b->b_instr[b->b_iused-1].i_opcode) {
/* Note: Only actual jumps, not exception handlers */
case SETUP_WITH:
case SETUP_FINALLY:
case SETUP_CLEANUP:
continue;
}
basicblock *target = b->b_instr[b->b_iused-1].i_target;
if (is_exit_without_lineno(target) && target->b_predecessors > 1) {
basicblock *new_target = compiler_copy_block(c, target);
if (new_target == NULL) {
return -1;
}
COPY_INSTR_LOC(b->b_instr[b->b_iused-1], new_target->b_instr[0]);
b->b_instr[b->b_iused-1].i_target = new_target;
target->b_predecessors--;
new_target->b_predecessors = 1;
new_target->b_next = target->b_next;
target->b_next = new_target;
}
}
}
/* Eliminate empty blocks */
for (basicblock *b = c->u->u_blocks; b != NULL; b = b->b_list) {
while (b->b_next && b->b_next->b_iused == 0) {
b->b_next = b->b_next->b_next;
}
}
/* Any remaining reachable exit blocks without line number can only be reached by
* fall through, and thus can only have a single predecessor */
for (basicblock *b = c->u->u_blocks; b != NULL; b = b->b_list) {
if (!b->b_nofallthrough && b->b_next && b->b_iused > 0) {
if (is_exit_without_lineno(b->b_next)) {
assert(b->b_next->b_iused > 0);
COPY_INSTR_LOC(b->b_instr[b->b_iused-1], b->b_next->b_instr[0]);
}
}
}
return 0;
}
/* Retained for API compatibility.
* Optimization is now done in optimize_cfg */
PyObject *
PyCode_Optimize(PyObject *code, PyObject* Py_UNUSED(consts),
PyObject *Py_UNUSED(names), PyObject *Py_UNUSED(lnotab_obj))
{
Py_INCREF(code);
return code;
}