OpenJDK / graal / graal-jvmci-8
changeset 2874:d90bf514d647
Renamed packages.
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.max.graal.compiler/src/com/oracle/max/graal/compiler/C1XCompilation.java Wed Jun 08 08:59:54 2011 +0200 @@ -0,0 +1,310 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.oracle.max.graal.compiler; + +import java.util.*; + +import com.oracle.max.asm.*; +import com.oracle.max.graal.compiler.alloc.*; +import com.oracle.max.graal.compiler.asm.*; +import com.oracle.max.graal.compiler.debug.*; +import com.oracle.max.graal.compiler.gen.*; +import com.oracle.max.graal.compiler.gen.LIRGenerator.*; +import com.oracle.max.graal.compiler.graph.*; +import com.oracle.max.graal.compiler.lir.*; +import com.oracle.max.graal.compiler.observer.*; +import com.oracle.max.graal.compiler.value.*; +import com.sun.cri.ci.*; +import com.sun.cri.ri.*; + +/** + * This class encapsulates global information about the compilation of a particular method, + * including a reference to the runtime, statistics about the compiled code, etc. + */ +public final class C1XCompilation { + + private static ThreadLocal<C1XCompilation> currentCompilation = new ThreadLocal<C1XCompilation>(); + + public final C1XCompiler compiler; + public final CiTarget target; + public final RiRuntime runtime; + public final RiMethod method; + public final RiRegisterConfig registerConfig; + public final CiStatistics stats; + public final CiAssumptions assumptions = new CiAssumptions(); + public final FrameState placeholderState; + + public CompilerGraph graph = new CompilerGraph(); + + private boolean hasExceptionHandlers; + private final C1XCompilation parent; + + /** + * @see #setNotTypesafe() + * @see #isTypesafe() + */ + private boolean typesafe = true; + + private int nextID = 1; + + private FrameMap frameMap; + private TargetMethodAssembler assembler; + + private IR hir; + + private LIRGenerator lirGenerator; + + /** + * Creates a new compilation for the specified method and runtime. + * + * @param compiler the compiler + * @param method the method to be compiled or {@code null} if generating code for a stub + * @param osrBCI the bytecode index for on-stack replacement, if requested + * @param stats externally supplied statistics object to be used if not {@code null} + */ + public C1XCompilation(C1XCompiler compiler, RiMethod method, int osrBCI, CiStatistics stats) { + if (osrBCI != -1) { + throw new CiBailout("No OSR supported"); + } + this.parent = currentCompilation.get(); + currentCompilation.set(this); + this.compiler = compiler; + this.target = compiler.target; + this.runtime = compiler.runtime; + this.method = method; + this.stats = stats == null ? new CiStatistics() : stats; + this.registerConfig = method == null ? compiler.globalStubRegisterConfig : runtime.getRegisterConfig(method); + this.placeholderState = method != null && method.minimalDebugInfo() ? new FrameState(method, 0, 0, 0, 0, graph) : null; + + if (compiler.isObserved()) { + compiler.fireCompilationStarted(new CompilationEvent(this)); + } + } + + public void close() { + currentCompilation.set(parent); + } + + public IR hir() { + return hir; + } + + /** + * Records that this compilation has exception handlers. + */ + public void setHasExceptionHandlers() { + hasExceptionHandlers = true; + } + + /** + * Translates a given kind to a canonical architecture kind. + * This is an identity function for all but {@link CiKind#Word} + * which is translated to {@link CiKind#Int} or {@link CiKind#Long} + * depending on whether or not this is a {@linkplain #is64Bit() 64-bit} + * compilation. + */ + public CiKind archKind(CiKind kind) { + if (kind.isWord()) { + return target.arch.is64bit() ? CiKind.Long : CiKind.Int; + } + return kind; + } + + /** + * Determines if two given kinds are equal at the {@linkplain #archKind(CiKind) architecture} level. + */ + public boolean archKindsEqual(CiKind kind1, CiKind kind2) { + return archKind(kind1) == archKind(kind2); + } + + /** + * Records an assumption that the specified type has no finalizable subclasses. + * + * @param receiverType the type that is assumed to have no finalizable subclasses + * @return {@code true} if the assumption was recorded and can be assumed; {@code false} otherwise + */ + public boolean recordNoFinalizableSubclassAssumption(RiType receiverType) { + return false; + } + + /** + * Converts this compilation to a string. + * + * @return a string representation of this compilation + */ + @Override + public String toString() { + return "compile: " + method; + } + + /** + * Builds the block map for the specified method. + * + * @param method the method for which to build the block map + * @param osrBCI the OSR bytecode index; {@code -1} if this is not an OSR + * @return the block map for the specified method + */ + public BlockMap getBlockMap(RiMethod method) { + BlockMap map = new BlockMap(method); + map.build(); + if (compiler.isObserved()) { + String label = CiUtil.format("BlockListBuilder %f %r %H.%n(%p)", method, true); + compiler.fireCompilationEvent(new CompilationEvent(this, label, map, method.code().length)); + } + stats.bytecodeCount += method.code().length; + return map; + } + + /** + * Returns the frame map of this compilation. + * @return the frame map + */ + public FrameMap frameMap() { + return frameMap; + } + + public TargetMethodAssembler assembler() { + if (assembler == null) { + AbstractAssembler asm = compiler.backend.newAssembler(registerConfig); + assembler = new TargetMethodAssembler(asm); + assembler.setFrameSize(frameMap.frameSize()); + assembler.targetMethod.setCustomStackAreaOffset(frameMap.offsetToCustomArea()); + } + return assembler; + } + + public boolean hasExceptionHandlers() { + return hasExceptionHandlers; + } + + public CiResult compile() { + CiTargetMethod targetMethod; + try { + emitHIR(); + emitLIR(); + targetMethod = emitCode(); + + if (C1XOptions.PrintMetrics) { + C1XMetrics.BytecodesCompiled += method.code().length; + } + } catch (CiBailout b) { + return new CiResult(null, b, stats); + } catch (Throwable t) { + if (C1XOptions.BailoutOnException) { + return new CiResult(null, new CiBailout("Exception while compiling: " + method, t), stats); + } else { + throw new RuntimeException(t); + } + } finally { + if (compiler.isObserved()) { + compiler.fireCompilationFinished(new CompilationEvent(this)); + } + } + + return new CiResult(targetMethod, null, stats); + } + + public IR emitHIR() { + hir = new IR(this); + hir.build(); + return hir; + } + + public void initFrameMap(int numberOfLocks) { + frameMap = this.compiler.backend.newFrameMap(method, numberOfLocks); + } + + private void emitLIR() { + if (C1XOptions.GenLIR) { + if (C1XOptions.PrintTimers) { + C1XTimers.LIR_CREATE.start(); + } + + initFrameMap(hir.maxLocks()); + + lirGenerator = compiler.backend.newLIRGenerator(this); + + for (LIRBlock begin : hir.linearScanOrder()) { + lirGenerator.doBlock(begin); + } + + if (C1XOptions.PrintTimers) { + C1XTimers.LIR_CREATE.stop(); + } + + if (C1XOptions.PrintLIR && !TTY.isSuppressed()) { + LIRList.printLIR(hir.linearScanOrder()); + } + + new LinearScan(this, hir, lirGenerator, frameMap()).allocate(); + } + } + + private CiTargetMethod emitCode() { + if (C1XOptions.GenLIR && C1XOptions.GenCode) { + final LIRAssembler lirAssembler = compiler.backend.newLIRAssembler(this); + lirAssembler.emitCode(hir.linearScanOrder()); + + // generate code for slow cases + lirAssembler.emitLocalStubs(); + + // generate deoptimization stubs + ArrayList<DeoptimizationStub> deoptimizationStubs = lirGenerator.deoptimizationStubs(); + if (deoptimizationStubs != null) { + for (DeoptimizationStub stub : deoptimizationStubs) { + lirAssembler.emitDeoptizationStub(stub); + } + } + + // generate traps at the end of the method + lirAssembler.emitTraps(); + + CiTargetMethod targetMethod = assembler().finishTargetMethod(method, runtime, lirAssembler.registerRestoreEpilogueOffset, false); + if (assumptions.count() > 0) { + targetMethod.setAssumptions(assumptions); + } + + if (compiler.isObserved()) { + compiler.fireCompilationEvent(new CompilationEvent(this, "After code generation", graph, false, true, targetMethod)); + } + + if (C1XOptions.PrintTimers) { + C1XTimers.CODE_CREATE.stop(); + } + return targetMethod; + } + + return null; + } + + public int nextID() { + return nextID++; + } + + public static C1XCompilation compilation() { + C1XCompilation compilation = currentCompilation.get(); + assert compilation != null; + return compilation; + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.max.graal.compiler/src/com/oracle/max/graal/compiler/C1XCompiler.java Wed Jun 08 08:59:54 2011 +0200 @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.max.graal.compiler; + +import java.util.*; + +import com.oracle.max.graal.compiler.debug.*; +import com.oracle.max.graal.compiler.globalstub.*; +import com.oracle.max.graal.compiler.observer.*; +import com.oracle.max.graal.compiler.target.*; +import com.sun.cri.ci.*; +import com.sun.cri.ri.*; +import com.sun.cri.xir.*; + +/** + * This class implements the compiler interface for C1X. + * + * @author Thomas Wuerthinger + * @author Ben L. Titzer + */ +public class C1XCompiler extends ObservableCompiler { + + public final Map<Object, GlobalStub> stubs = new HashMap<Object, GlobalStub>(); + + /** + * The target that this compiler has been configured for. + */ + public final CiTarget target; + + /** + * The runtime that this compiler has been configured for. + */ + public final RiRuntime runtime; + + /** + * The XIR generator that lowers Java operations to machine operations. + */ + public final RiXirGenerator xir; + + /** + * The backend that this compiler has been configured for. + */ + public final Backend backend; + + public final RiRegisterConfig globalStubRegisterConfig; + + public C1XCompiler(RiRuntime runtime, CiTarget target, RiXirGenerator xirGen, RiRegisterConfig globalStubRegisterConfig) { + this.runtime = runtime; + this.target = target; + this.xir = xirGen; + this.globalStubRegisterConfig = globalStubRegisterConfig; + this.backend = Backend.create(target.arch, this); + init(); + } + + public CiResult compileMethod(RiMethod method, int osrBCI, RiXirGenerator xirGenerator, CiStatistics stats) { + long startTime = 0; + int index = C1XMetrics.CompiledMethods++; + if (C1XOptions.PrintCompilation) { + TTY.print(String.format("C1X %4d %-70s %-45s | ", index, method.holder().name(), method.name())); + startTime = System.nanoTime(); + } + + CiResult result = null; + TTY.Filter filter = new TTY.Filter(C1XOptions.PrintFilter, method); + C1XCompilation compilation = new C1XCompilation(this, method, osrBCI, stats); + try { + result = compilation.compile(); + } finally { + filter.remove(); + compilation.close(); + if (C1XOptions.PrintCompilation && !TTY.isSuppressed()) { + long time = (System.nanoTime() - startTime) / 100000; + TTY.println(String.format("%3d.%dms", time / 10, time % 10)); + } + } + + return result; + } + + private void init() { + final List<XirTemplate> xirTemplateStubs = xir.buildTemplates(backend.newXirAssembler()); + final GlobalStubEmitter emitter = backend.newGlobalStubEmitter(); + + if (xirTemplateStubs != null) { + for (XirTemplate template : xirTemplateStubs) { + TTY.Filter filter = new TTY.Filter(C1XOptions.PrintFilter, template.name); + try { + stubs.put(template, emitter.emit(template, runtime)); + } finally { + filter.remove(); + } + } + } + + for (GlobalStub.Id id : GlobalStub.Id.values()) { + TTY.Filter suppressor = new TTY.Filter(C1XOptions.PrintFilter, id); + try { + stubs.put(id, emitter.emit(id, runtime)); + } finally { + suppressor.remove(); + } + } + + if (C1XOptions.PrintCFGToFile) { + addCompilationObserver(new CFGPrinterObserver()); + } + if (C1XOptions.PrintDOTGraphToFile) { + addCompilationObserver(new GraphvizPrinterObserver(false)); + } + if (C1XOptions.PrintDOTGraphToPdf) { + addCompilationObserver(new GraphvizPrinterObserver(true)); + } + if (C1XOptions.PrintIdealGraphLevel != 0) { + CompilationObserver observer; + if (C1XOptions.PrintIdealGraphFile) { + observer = new IdealGraphPrinterObserver(); + } else { + observer = new IdealGraphPrinterObserver(C1XOptions.PrintIdealGraphAddress, C1XOptions.PrintIdealGraphPort); + } + addCompilationObserver(observer); + } + } + + public GlobalStub lookupGlobalStub(GlobalStub.Id id) { + GlobalStub globalStub = stubs.get(id); + assert globalStub != null : "no stub for global stub id: " + id; + return globalStub; + } + + public GlobalStub lookupGlobalStub(XirTemplate template) { + GlobalStub globalStub = stubs.get(template); + assert globalStub != null : "no stub for XirTemplate: " + template; + return globalStub; + } + + public GlobalStub lookupGlobalStub(CiRuntimeCall runtimeCall) { + GlobalStub globalStub = stubs.get(runtimeCall); + if (globalStub == null) { + globalStub = backend.newGlobalStubEmitter().emit(runtimeCall, runtime); + stubs.put(runtimeCall, globalStub); + } + + assert globalStub != null : "could not find global stub for runtime call: " + runtimeCall; + return globalStub; + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.max.graal.compiler/src/com/oracle/max/graal/compiler/C1XMetrics.java Wed Jun 08 08:59:54 2011 +0200 @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.max.graal.compiler; + +import java.lang.reflect.*; +import java.util.*; + +import com.oracle.max.graal.compiler.debug.*; + + +/** + * This class contains a number of fields that collect metrics about compilation, particularly + * the number of times certain optimizations are performed. + */ +public class C1XMetrics { + public static int CompiledMethods; + public static int TargetMethods; + public static int LocalValueNumberHits; + public static int ValueMapResizes; + public static int InlinedFinalizerChecks; + public static int InlineForcedMethods; + public static int InlineForbiddenMethods; + public static int InlinedJsrs; + public static int BlocksDeleted; + public static int BytecodesCompiled; + public static int CodeBytesEmitted; + public static int SafepointsEmitted; + public static int ExceptionHandlersEmitted; + public static int DataPatches; + public static int DirectCallSitesEmitted; + public static int IndirectCallSitesEmitted; + public static int HIRInstructions; + public static int LiveHIRInstructions; + public static int LIRInstructions; + public static int LIRVariables; + public static int LIRXIRInstructions; + public static int LIRMoveInstructions; + public static int LSRAIntervalsCreated; + public static int LSRASpills; + public static int LoadConstantIterations; + public static int CodeBufferCopies; + public static int UniqueValueIdsAssigned; + public static int FrameStatesCreated; + public static int FrameStateValuesCreated; + public static int NodesCanonicalized; + + public static void print() { + printClassFields(C1XMetrics.class); + + } + + public static void printClassFields(Class<?> javaClass) { + final String className = javaClass.getSimpleName(); + TTY.println(className + " {"); + for (final Field field : javaClass.getFields()) { + printField(field, false); + } + TTY.println("}"); + } + + public static void printField(final Field field, boolean tabbed) { + final String fieldName = String.format("%35s", field.getName()); + try { + String prefix = tabbed ? "" : " " + fieldName + " = "; + String postfix = tabbed ? "\t" : "\n"; + if (field.getType() == int.class) { + TTY.print(prefix + field.getInt(null) + postfix); + } else if (field.getType() == boolean.class) { + TTY.print(prefix + field.getBoolean(null) + postfix); + } else if (field.getType() == float.class) { + TTY.print(prefix + field.getFloat(null) + postfix); + } else if (field.getType() == String.class) { + TTY.print(prefix + field.get(null) + postfix); + } else if (field.getType() == Map.class) { + Map<?, ?> m = (Map<?, ?>) field.get(null); + TTY.print(prefix + printMap(m) + postfix); + } else { + TTY.print(prefix + field.get(null) + postfix); + } + } catch (IllegalAccessException e) { + // do nothing. + } + } + + private static String printMap(Map<?, ?> m) { + StringBuilder sb = new StringBuilder(); + + List<String> keys = new ArrayList<String>(); + for (Object key : m.keySet()) { + keys.add((String) key); + } + Collections.sort(keys); + + for (String key : keys) { + sb.append(key); + sb.append("\t"); + sb.append(m.get(key)); + sb.append("\n"); + } + + return sb.toString(); + } + + private static void printField(String fieldName, long value) { + TTY.print(" " + fieldName + " = " + value + "\n"); + } + + private static void printField(String fieldName, double value) { + TTY.print(" " + fieldName + " = " + value + "\n"); + } +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.max.graal.compiler/src/com/oracle/max/graal/compiler/C1XOptions.java Wed Jun 08 08:59:54 2011 +0200 @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.max.graal.compiler; + +import com.oracle.max.graal.compiler.debug.TTY.*; + +/** + * This class encapsulates options that control the behavior of the C1X compiler. + * The help message for each option is specified by a {@linkplain #helpMap help map}. + * + * (tw) WARNING: Fields of this class are treated as final by Graal. + * + * @author Ben L. Titzer + */ +public final class C1XOptions { + + // Checkstyle: stop + private static final boolean ____ = false; + // Checkstyle: resume + + // inlining settings + public static boolean Inline = ____; + public static int MaximumInstructionCount = 37000; + public static float MaximumInlineRatio = 0.90f; + public static int MaximumInlineSize = 35; + public static int MaximumTrivialSize = 6; + public static int MaximumInlineLevel = 9; + public static int MaximumRecursiveInlineLevel = 2; + public static int MaximumDesiredSize = 8000; + public static int MaximumShortLoopSize = 5; + + // debugging settings + public static boolean VerifyPointerMaps = ____; + public static int MethodEndBreakpointGuards = 0; + public static boolean ZapStackOnMethodEntry = ____; + public static boolean StressLinearScan = ____; + public static boolean BailoutOnException = ____; + + /** + * See {@link Filter#Filter(String, Object)}. + */ + public static String PrintFilter = null; + + // printing settings + public static boolean PrintHIR = ____; + public static boolean PrintLIR = ____; + public static boolean PrintCFGToFile = ____; + + // DOT output settings + public static boolean PrintDOTGraphToFile = ____; + public static boolean PrintDOTGraphToPdf = ____; + public static boolean OmitDOTFrameStates = ____; + + // Ideal graph visualizer output settings + public static int PrintIdealGraphLevel = 0; + public static boolean PrintIdealGraphFile = ____; + public static String PrintIdealGraphAddress = "127.0.0.1"; + public static int PrintIdealGraphPort = 4444; + + // Other printing settings + public static boolean PrintMetrics = ____; + public static boolean PrintTimers = ____; + public static boolean PrintCompilation = ____; + public static boolean PrintXirTemplates = ____; + public static boolean PrintIRWithLIR = ____; + public static boolean PrintAssembly = ____; + public static boolean PrintCodeBytes = ____; + public static int PrintAssemblyBytesPerLine = 16; + public static int TraceLinearScanLevel = 0; + public static int TraceLIRGeneratorLevel = 0; + public static boolean TraceRelocation = ____; + public static boolean TraceLIRVisit = ____; + public static boolean TraceAssembler = ____; + public static boolean TraceInlining = ____; + public static boolean TraceDeadCodeElimination = ____; + public static int TraceBytecodeParserLevel = 0; + public static boolean QuietBailout = ____; + + // state merging settings + public static boolean AssumeVerifiedBytecode = ____; + + // Linear scan settings + public static boolean CopyPointerStackArguments = true; + + // Code generator settings + public static boolean GenLIR = true; + public static boolean GenCode = true; + + public static boolean UseConstDirectCall = false; + + public static boolean GenSpecialDivChecks = ____; + public static boolean GenAssertionCode = ____; + public static boolean AlignCallsForPatching = true; + public static boolean NullCheckUniquePc = ____; + public static boolean InvokeSnippetAfterArguments = ____; + public static boolean ResolveClassBeforeStaticInvoke = true; + + // Translating tableswitch instructions + public static int SequentialSwitchLimit = 4; + public static int RangeTestsSwitchDensity = 5; + + public static boolean DetailedAsserts = ____; + + // Runtime settings + public static int ReadPrefetchInstr = 0; + public static int StackShadowPages = 2; + + // Assembler settings + public static boolean CommentedAssembly = ____; + public static boolean PrintLIRWithAssembly = ____; + + public static boolean OptCanonicalizer = true; +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.max.graal.compiler/src/com/oracle/max/graal/compiler/C1XTimers.java Wed Jun 08 08:59:54 2011 +0200 @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2009, 2009, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.max.graal.compiler; + +import com.oracle.max.graal.compiler.debug.*; + +/** + * This class contains timers that record the amount of time spent in various + * parts of the compiler. + * + * @author Christian Wimmer + */ +public enum C1XTimers { + HIR_CREATE("Create HIR"), + HIR_OPTIMIZE("Optimize HIR"), + NCE("Nullcheck elimination"), + LIR_CREATE("Create LIR"), + LIFETIME_ANALYSIS("Lifetime Analysis"), + LINEAR_SCAN("Linear Scan"), + RESOLUTION("Resolution"), + DEBUG_INFO("Create Debug Info"), + CODE_CREATE("Create Code"); + + private final String name; + private long start; + private long total; + + private C1XTimers(String name) { + this.name = name; + } + + public void start() { + start = System.nanoTime(); + } + + public void stop() { + total += System.nanoTime() - start; + } + + public static void reset() { + for (C1XTimers t : values()) { + t.total = 0; + } + } + + public static void print() { + long total = 0; + for (C1XTimers timer : C1XTimers.values()) { + total += timer.total; + } + if (total == 0) { + return; + } + + TTY.println(); + for (C1XTimers timer : C1XTimers.values()) { + TTY.println("%-20s: %7.4f s (%5.2f%%)", timer.name, timer.total / 1000000000.0, timer.total * 100.0 / total); + timer.total = 0; + } + TTY.println(); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.max.graal.compiler/src/com/oracle/max/graal/compiler/alloc/ControlFlowOptimizer.java Wed Jun 08 08:59:54 2011 +0200 @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.max.graal.compiler.alloc; + +import java.util.*; + +import com.oracle.max.graal.compiler.*; +import com.oracle.max.graal.compiler.graph.*; +import com.oracle.max.graal.compiler.ir.*; +import com.oracle.max.graal.compiler.lir.*; +import com.oracle.max.graal.compiler.util.*; +import com.sun.cri.ci.*; + +/** + * This class performs basic optimizations on the control flow graph after LIR generation. + */ +final class ControlFlowOptimizer { + + /** + * Performs control flow optimizations on the given IR graph. + * @param ir the IR graph that should be optimized + */ + public static void optimize(IR ir) { + ControlFlowOptimizer optimizer = new ControlFlowOptimizer(ir); + List<LIRBlock> code = ir.linearScanOrder(); + optimizer.reorderShortLoops(code); + optimizer.deleteEmptyBlocks(code); + optimizer.deleteUnnecessaryJumps(code); + optimizer.deleteJumpsToReturn(code); + } + + private final IR ir; + + private ControlFlowOptimizer(IR ir) { + this.ir = ir; + } + + private void reorderShortLoop(List<LIRBlock> code, LIRBlock headerBlock, int headerIdx) { + int i = headerIdx + 1; + int maxEnd = Math.min(headerIdx + C1XOptions.MaximumShortLoopSize, code.size()); + while (i < maxEnd && code.get(i).loopDepth() >= headerBlock.loopDepth()) { + i++; + } + + if (i == code.size() || code.get(i).loopDepth() < headerBlock.loopDepth()) { + int endIdx = i - 1; + LIRBlock endBlock = code.get(endIdx); + + if (endBlock.numberOfSux() == 1 && endBlock.suxAt(0) == headerBlock) { + // short loop from headerIdx to endIdx found . reorder blocks such that + // the headerBlock is the last block instead of the first block of the loop + + for (int j = headerIdx; j < endIdx; j++) { + code.set(j, code.get(j + 1)); + } + code.set(endIdx, headerBlock); + } + } + } + + private void reorderShortLoops(List<LIRBlock> code) { + for (int i = code.size() - 1; i >= 0; i--) { + LIRBlock block = code.get(i); + + if (block.isLinearScanLoopHeader()) { + reorderShortLoop(code, block, i); + } + } + + assert verify(code); + } + + // only blocks with exactly one successor can be deleted. Such blocks + // must always end with an unconditional branch to this successor + private boolean canDeleteBlock(LIRBlock block) { + if (block.numberOfSux() != 1 || + block == ir.startBlock || + block.suxAt(0) == block) { + return false; + } + + List<LIRInstruction> instructions = block.lir().instructionsList(); + + assert instructions.size() >= 2 : "block must have label and branch"; + assert instructions.get(0).code == LIROpcode.Label : "first instruction must always be a label"; + assert instructions.get(instructions.size() - 1) instanceof LIRBranch : "last instruction must always be a branch but is " + instructions.get(instructions.size() - 1); + assert ((LIRBranch) instructions.get(instructions.size() - 1)).cond() == Condition.TRUE : "branch must be unconditional"; + assert ((LIRBranch) instructions.get(instructions.size() - 1)).block() == block.suxAt(0) : "branch target must be the successor"; + + // block must have exactly one successor + + return instructions.size() == 2 && instructions.get(instructions.size() - 1).info == null; + } + + private void deleteEmptyBlocks(List<LIRBlock> code) { + int oldPos = 0; + int newPos = 0; + int numBlocks = code.size(); + + while (oldPos < numBlocks) { + LIRBlock block = code.get(oldPos); + + if (canDeleteBlock(block)) { + LIRBlock newTarget = block.suxAt(0); + + // update the block references in any branching LIR instructions + for (LIRBlock pred : block.blockPredecessors()) { + for (LIRInstruction instr : pred.lir().instructionsList()) { + if (instr instanceof LIRBranch) { + ((LIRBranch) instr).substitute(block, newTarget); + } else if (instr instanceof LIRTableSwitch) { + ((LIRTableSwitch) instr).substitute(block, newTarget); + } + } + } + + // adjust successor and predecessor lists + block.replaceWith(newTarget); + C1XMetrics.BlocksDeleted++; + } else { + // adjust position of this block in the block list if blocks before + // have been deleted + if (newPos != oldPos) { + code.set(newPos, code.get(oldPos)); + } + newPos++; + } + oldPos++; + } + assert verify(code); + Util.truncate(code, newPos); + + assert verify(code); + } + + private void deleteUnnecessaryJumps(List<LIRBlock> code) { + // skip the last block because there a branch is always necessary + for (int i = code.size() - 2; i >= 0; i--) { + LIRBlock block = code.get(i); + List<LIRInstruction> instructions = block.lir().instructionsList(); + + LIRInstruction lastOp = instructions.get(instructions.size() - 1); + if (lastOp.code == LIROpcode.Branch) { + assert lastOp instanceof LIRBranch : "branch must be of type LIRBranch"; + LIRBranch lastBranch = (LIRBranch) lastOp; + + assert lastBranch.block() != null : "last branch must always have a block as target"; + assert lastBranch.label() == lastBranch.block().label() : "must be equal"; + + if (lastBranch.info == null) { + if (lastBranch.block() == code.get(i + 1)) { + // delete last branch instruction + Util.truncate(instructions, instructions.size() - 1); + + } else { + LIRInstruction prevOp = instructions.get(instructions.size() - 2); + if (prevOp.code == LIROpcode.Branch || prevOp.code == LIROpcode.CondFloatBranch) { + assert prevOp instanceof LIRBranch : "branch must be of type LIRBranch"; + LIRBranch prevBranch = (LIRBranch) prevOp; + + if (prevBranch.block() == code.get(i + 1) && prevBranch.info == null) { + // eliminate a conditional branch to the immediate successor + prevBranch.changeBlock(lastBranch.block()); + prevBranch.negateCondition(); + Util.truncate(instructions, instructions.size() - 1); + } + } + } + } + } + } + + assert verify(code); + } + + private void deleteJumpsToReturn(List<LIRBlock> code) { + for (int i = code.size() - 1; i >= 0; i--) { + LIRBlock block = code.get(i); + List<LIRInstruction> curInstructions = block.lir().instructionsList(); + LIRInstruction curLastOp = curInstructions.get(curInstructions.size() - 1); + + assert curInstructions.get(0).code == LIROpcode.Label : "first instruction must always be a label"; + if (curInstructions.size() == 2 && curLastOp.code == LIROpcode.Return) { + // the block contains only a label and a return + // if a predecessor ends with an unconditional jump to this block, then the jump + // can be replaced with a return instruction + // + // Note: the original block with only a return statement cannot be deleted completely + // because the predecessors might have other (conditional) jumps to this block. + // this may lead to unnecesary return instructions in the final code + + assert curLastOp.info == null : "return instructions do not have debug information"; + + assert curLastOp instanceof LIROp1 : "return must be LIROp1"; + CiValue returnOpr = ((LIROp1) curLastOp).operand(); + + for (int j = block.numberOfPreds() - 1; j >= 0; j--) { + LIRBlock pred = block.predAt(j); + List<LIRInstruction> predInstructions = pred.lir().instructionsList(); + LIRInstruction predLastOp = predInstructions.get(predInstructions.size() - 1); + + if (predLastOp.code == LIROpcode.Branch) { + assert predLastOp instanceof LIRBranch : "branch must be LIRBranch"; + LIRBranch predLastBranch = (LIRBranch) predLastOp; + + if (predLastBranch.block() == block && predLastBranch.cond() == Condition.TRUE && predLastBranch.info == null) { + // replace the jump to a return with a direct return + // Note: currently the edge between the blocks is not deleted + predInstructions.set(predInstructions.size() - 1, new LIROp1(LIROpcode.Return, returnOpr)); + } + } + } + } + } + } + + private boolean verify(List<LIRBlock> code) { + for (LIRBlock block : code) { + List<LIRInstruction> instructions = block.lir().instructionsList(); + + for (LIRInstruction instr : instructions) { + if (instr instanceof LIRBranch) { + LIRBranch opBranch = (LIRBranch) instr; + assert opBranch.block() == null || code.contains(opBranch.block()) : "missing successor branch from: " + block + " to: " + opBranch.block(); + assert opBranch.unorderedBlock() == null || code.contains(opBranch.unorderedBlock()) : "missing successor branch from: " + block + " to: " + opBranch.unorderedBlock(); + } + } + + for (LIRBlock sux : block.blockSuccessors()) { + assert code.contains(sux) : "missing successor from: " + block + "to: " + sux; + } + + for (LIRBlock pred : block.blockPredecessors()) { + assert code.contains(pred) : "missing predecessor from: " + block + "to: " + pred; + } + } + + return true; + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.max.graal.compiler/src/com/oracle/max/graal/compiler/alloc/EdgeMoveOptimizer.java Wed Jun 08 08:59:54 2011 +0200 @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.max.graal.compiler.alloc; + +import java.util.*; + +import com.oracle.max.graal.compiler.*; +import com.oracle.max.graal.compiler.ir.*; +import com.oracle.max.graal.compiler.lir.*; + +/** + * This class optimizes moves, particularly those that result from eliminating SSA form. + * + * When a block has more than one predecessor, and all predecessors end with + * the {@linkplain #same(LIRInstruction, LIRInstruction) same} sequence of + * {@linkplain LIROpcode#Move move} instructions, then these sequences + * can be replaced with a single copy of the sequence at the beginning of the block. + * + * Similarly, when a block has more than one successor, then same sequences of + * moves at the beginning of the successors can be placed once at the end of + * the block. But because the moves must be inserted before all branch + * instructions, this works only when there is exactly one conditional branch + * at the end of the block (because the moves must be inserted before all + * branches, but after all compares). + * + * This optimization affects all kind of moves (reg->reg, reg->stack and + * stack->reg). Because this optimization works best when a block contains only + * a few moves, it has a huge impact on the number of blocks that are totally + * empty. + * + * @author Christian Wimmer (original HotSpot implementation) + * @author Thomas Wuerthinger + * @author Doug Simon + */ +final class EdgeMoveOptimizer { + + /** + * Optimizes moves on block edges. + * + * @param blockList a list of blocks whose moves should be optimized + */ + public static void optimize(List<LIRBlock> blockList) { + EdgeMoveOptimizer optimizer = new EdgeMoveOptimizer(); + + // ignore the first block in the list (index 0 is not processed) + for (int i = blockList.size() - 1; i >= 1; i--) { + LIRBlock block = blockList.get(i); + + if (block.numberOfPreds() > 1) { + optimizer.optimizeMovesAtBlockEnd(block); + } + if (block.numberOfSux() == 2) { + optimizer.optimizeMovesAtBlockBegin(block); + } + } + } + + private final List<List<LIRInstruction>> edgeInstructionSeqences; + + private EdgeMoveOptimizer() { + edgeInstructionSeqences = new ArrayList<List<LIRInstruction>>(4); + } + + /** + * Determines if two operations are both {@linkplain LIROpcode#Move moves} + * that have the same {@linkplain LIROp1#operand() source} and {@linkplain LIROp1#result() destination} + * operands and they have the same {@linkplain LIRInstruction#info debug info}. + * + * @param op1 the first instruction to compare + * @param op2 the second instruction to compare + * @return {@code true} if {@code op1} and {@code op2} are the same by the above algorithm + */ + private boolean same(LIRInstruction op1, LIRInstruction op2) { + assert op1 != null; + assert op2 != null; + + if (op1.code == LIROpcode.Move && op2.code == LIROpcode.Move) { + assert op1 instanceof LIROp1 : "move must be LIROp1"; + assert op2 instanceof LIROp1 : "move must be LIROp1"; + LIROp1 move1 = (LIROp1) op1; + LIROp1 move2 = (LIROp1) op2; + if (move1.info == move2.info && move1.operand().equals(move2.operand()) && move1.result().equals(move2.result())) { + // these moves are exactly equal and can be optimized + return true; + } + } + return false; + } + + /** + * Moves the longest {@linkplain #same common} subsequence at the end all + * predecessors of {@code block} to the start of {@code block}. + */ + private void optimizeMovesAtBlockEnd(LIRBlock block) { + if (block.isPredecessor(block)) { + // currently we can't handle this correctly. + return; + } + + // clear all internal data structures + edgeInstructionSeqences.clear(); + + int numPreds = block.numberOfPreds(); + assert numPreds > 1 : "do not call otherwise"; + + // setup a list with the LIR instructions of all predecessors + for (int i = 0; i < numPreds; i++) { + LIRBlock pred = block.predAt(i); + assert pred != null; + assert pred.lir() != null; + List<LIRInstruction> predInstructions = pred.lir().instructionsList(); + + if (pred.numberOfSux() != 1) { + // this can happen with switch-statements where multiple edges are between + // the same blocks. + return; + } + + assert pred.suxAt(0) == block : "invalid control flow"; + assert predInstructions.get(predInstructions.size() - 1).code == LIROpcode.Branch : "block with successor must end with branch"; + assert predInstructions.get(predInstructions.size() - 1) instanceof LIRBranch : "branch must be LIROpBranch"; + assert ((LIRBranch) predInstructions.get(predInstructions.size() - 1)).cond() == Condition.TRUE : "block must end with unconditional branch"; + + if (predInstructions.get(predInstructions.size() - 1).info != null) { + // can not optimize instructions that have debug info + return; + } + + // ignore the unconditional branch at the end of the block + List<LIRInstruction> seq = predInstructions.subList(0, predInstructions.size() - 1); + edgeInstructionSeqences.add(seq); + } + + // process lir-instructions while all predecessors end with the same instruction + while (true) { + List<LIRInstruction> seq = edgeInstructionSeqences.get(0); + if (seq.isEmpty()) { + return; + } + + LIRInstruction op = last(seq); + for (int i = 1; i < numPreds; ++i) { + List<LIRInstruction> otherSeq = edgeInstructionSeqences.get(i); + if (otherSeq.isEmpty() || !same(op, last(otherSeq))) { + return; + } + } + + // insert the instruction at the beginning of the current block + block.lir().insertBefore(1, op); + + // delete the instruction at the end of all predecessors + for (int i = 0; i < numPreds; i++) { + seq = edgeInstructionSeqences.get(i); + removeLast(seq); + } + } + } + + /** + * Moves the longest {@linkplain #same common} subsequence at the start of all + * successors of {@code block} to the end of {@code block} just prior to the + * branch instruction ending {@code block}. + */ + private void optimizeMovesAtBlockBegin(LIRBlock block) { + + edgeInstructionSeqences.clear(); + int numSux = block.numberOfSux(); + + List<LIRInstruction> instructions = block.lir().instructionsList(); + + assert numSux == 2 : "method should not be called otherwise"; + assert instructions.get(instructions.size() - 1).code == LIROpcode.Branch : "block with successor must end with branch block=B" + block.blockID(); + assert instructions.get(instructions.size() - 1) instanceof LIRBranch : "branch must be LIROpBranch"; + assert ((LIRBranch) instructions.get(instructions.size() - 1)).cond() == Condition.TRUE : "block must end with unconditional branch"; + + if (instructions.get(instructions.size() - 1).info != null) { + // cannot optimize instructions when debug info is needed + return; + } + + LIRInstruction branch = instructions.get(instructions.size() - 2); + if (branch.info != null || (branch.code != LIROpcode.Branch && branch.code != LIROpcode.CondFloatBranch)) { + // not a valid case for optimization + // currently, only blocks that end with two branches (conditional branch followed + // by unconditional branch) are optimized + return; + } + + // now it is guaranteed that the block ends with two branch instructions. + // the instructions are inserted at the end of the block before these two branches + int insertIdx = instructions.size() - 2; + + if (C1XOptions.DetailedAsserts) { + for (int i = insertIdx - 1; i >= 0; i--) { + LIRInstruction op = instructions.get(i); + if ((op.code == LIROpcode.Branch || op.code == LIROpcode.CondFloatBranch) && ((LIRBranch) op).block() != null) { + throw new Error("block with two successors can have only two branch instructions"); + } + } + } + + // setup a list with the lir-instructions of all successors + for (int i = 0; i < numSux; i++) { + LIRBlock sux = block.suxAt(i); + List<LIRInstruction> suxInstructions = sux.lir().instructionsList(); + + assert suxInstructions.get(0).code == LIROpcode.Label : "block must start with label"; + + if (sux.numberOfPreds() != 1) { + // this can happen with switch-statements where multiple edges are between + // the same blocks. + return; + } + assert sux.predAt(0) == block : "invalid control flow"; + + // ignore the label at the beginning of the block + List<LIRInstruction> seq = suxInstructions.subList(1, suxInstructions.size()); + edgeInstructionSeqences.add(seq); + } + + // process LIR instructions while all successors begin with the same instruction + while (true) { + List<LIRInstruction> seq = edgeInstructionSeqences.get(0); + if (seq.isEmpty()) { + return; + } + + LIRInstruction op = first(seq); + for (int i = 1; i < numSux; i++) { + List<LIRInstruction> otherSeq = edgeInstructionSeqences.get(i); + if (otherSeq.isEmpty() || !same(op, first(otherSeq))) { + // these instructions are different and cannot be optimized . + // no further optimization possible + return; + } + } + + // insert instruction at end of current block + block.lir().insertBefore(insertIdx, op); + insertIdx++; + + // delete the instructions at the beginning of all successors + for (int i = 0; i < numSux; i++) { + seq = edgeInstructionSeqences.get(i); + removeFirst(seq); + } + } + } + + /** + * Gets the first element from a LIR instruction sequence. + */ + private static LIRInstruction first(List<LIRInstruction> seq) { + return seq.get(0); + } + + /** + * Gets the last element from a LIR instruction sequence. + */ + private static LIRInstruction last(List<LIRInstruction> seq) { + return seq.get(seq.size() - 1); + } + + /** + * Removes the first element from a LIR instruction sequence. + */ + private static void removeFirst(List<LIRInstruction> seq) { + seq.remove(0); + } + + /** + * Removes the last element from a LIR instruction sequence. + */ + private static void removeLast(List<LIRInstruction> seq) { + seq.remove(seq.size() - 1); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.max.graal.compiler/src/com/oracle/max/graal/compiler/alloc/Interval.java Wed Jun 08 08:59:54 2011 +0200 @@ -0,0 +1,1173 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.max.graal.compiler.alloc; + +import java.util.*; + +import com.oracle.max.graal.compiler.*; +import com.oracle.max.graal.compiler.debug.*; +import com.oracle.max.graal.compiler.lir.*; +import com.oracle.max.graal.compiler.util.*; +import com.sun.cri.ci.*; + +/** + * Represents an interval in the {@linkplain LinearScan linear scan register allocator}. + * + * @author Thomas Wuerthinger + * @author Doug Simon + */ +public final class Interval { + + /** + * A pair of intervals. + */ + static final class Pair { + public final Interval first; + public final Interval second; + public Pair(Interval first, Interval second) { + this.first = first; + this.second = second; + } + } + + /** + * A set of interval lists, one per {@linkplain RegisterBinding binding} type. + */ + static final class RegisterBindingLists { + + /** + * List of intervals whose binding is currently {@link RegisterBinding#Fixed}. + */ + public Interval fixed; + + /** + * List of intervals whose binding is currently {@link RegisterBinding#Any}. + */ + public Interval any; + + public RegisterBindingLists(Interval fixed, Interval any) { + this.fixed = fixed; + this.any = any; + } + + /** + * Gets the list for a specified binding. + * + * @param binding specifies the list to be returned + * @return the list of intervals whose binding is {@code binding} + */ + public Interval get(RegisterBinding binding) { + if (binding == RegisterBinding.Any) { + return any; + } + assert binding == RegisterBinding.Fixed; + return fixed; + } + + /** + * Sets the list for a specified binding. + * + * @param binding specifies the list to be replaced + * @param a list of intervals whose binding is {@code binding} + */ + public void set(RegisterBinding binding, Interval list) { + assert list != null; + if (binding == RegisterBinding.Any) { + any = list; + } else { + assert binding == RegisterBinding.Fixed; + fixed = list; + } + } + + /** + * Adds an interval to a list sorted by {@linkplain Interval#currentFrom() current from} positions. + * + * @param binding specifies the list to be updated + * @param interval the interval to add + */ + public void addToListSortedByCurrentFromPositions(RegisterBinding binding, Interval interval) { + Interval list = get(binding); + Interval prev = null; + Interval cur = list; + while (cur.currentFrom() < interval.currentFrom()) { + prev = cur; + cur = cur.next; + } + Interval result = list; + if (prev == null) { + // add to head of list + result = interval; + } else { + // add before 'cur' + prev.next = interval; + } + interval.next = cur; + set(binding, result); + } + + /** + * Adds an interval to a list sorted by {@linkplain Interval#from() start} positions and + * {@linkplain Interval#firstUsage(RegisterPriority) first usage} positions. + * + * @param binding specifies the list to be updated + * @param interval the interval to add + */ + public void addToListSortedByStartAndUsePositions(RegisterBinding binding, Interval interval) { + Interval list = get(binding); + Interval prev = null; + Interval cur = list; + while (cur.from() < interval.from() || (cur.from() == interval.from() && cur.firstUsage(RegisterPriority.None) < interval.firstUsage(RegisterPriority.None))) { + prev = cur; + cur = cur.next; + } + if (prev == null) { + list = interval; + } else { + prev.next = interval; + } + interval.next = cur; + set(binding, list); + } + + /** + * Removes an interval from a list. + * + * @param binding specifies the list to be updated + * @param interval the interval to remove + */ + public void remove(RegisterBinding binding, Interval i) { + Interval list = get(binding); + Interval prev = null; + Interval cur = list; + while (cur != i) { + assert cur != null && cur != Interval.EndMarker : "interval has not been found in list: " + i; + prev = cur; + cur = cur.next; + } + if (prev == null) { + set(binding, cur.next); + } else { + prev.next = cur.next; + } + } + } + + /** + * Constants denoting the register usage priority for an interval. + * The constants are declared in increasing order of priority are + * are used to optimize spilling when multiple overlapping intervals + * compete for limited registers. + */ + enum RegisterPriority { + /** + * No special reason for an interval to be allocated a register. + */ + None, + + /** + * Priority level for intervals live at the end of a loop. + */ + LiveAtLoopEnd, + + /** + * Priority level for intervals that should be allocated to a register. + */ + ShouldHaveRegister, + + /** + * Priority level for intervals that must be allocated to a register. + */ + MustHaveRegister; + + public static final RegisterPriority[] VALUES = values(); + + /** + * Determines if this priority is higher than or equal to a given priority. + */ + public boolean greaterEqual(RegisterPriority other) { + return ordinal() >= other.ordinal(); + } + + /** + * Determines if this priority is lower than a given priority. + */ + public boolean lessThan(RegisterPriority other) { + return ordinal() < other.ordinal(); + } + } + + /** + * Constants denoting whether an interval is bound to a specific register. This models + * platform dependencies on register usage for certain instructions. + */ + enum RegisterBinding { + /** + * Interval is bound to a specific register as required by the platform. + */ + Fixed, + + /** + * Interval has no specific register requirements. + */ + Any; + + public static final RegisterBinding[] VALUES = values(); + } + + /** + * Constants denoting the linear-scan states an interval may be in with respect to the + * {@linkplain Interval#from() start} {@code position} of the interval being processed. + */ + enum State { + /** + * An interval that starts after {@code position}. + */ + Unhandled, + + /** + * An interval that {@linkplain Interval#covers covers} {@code position} and has an assigned register. + */ + Active, + + /** + * An interval that starts before and ends after {@code position} but does not + * {@linkplain Interval#covers cover} it due to a lifetime hole. + */ + Inactive, + + /** + * An interval that ends before {@code position} or is spilled to memory. + */ + Handled; + } + + /** + * Constants used in optimization of spilling of an interval. + */ + enum SpillState { + /** + * Starting state of calculation: no definition found yet. + */ + NoDefinitionFound, + + /** + * One definition has already been found. Two consecutive definitions are treated as one + * (e.g. a consecutive move and add because of two-operand LIR form). + * The position of this definition is given by {@link Interval#spillDefinitionPos()}. + */ + NoSpillStore, + + /** + * One spill move has already been inserted. + */ + OneSpillStore, + + /** + * The interval should be stored immediately after its definition to prevent + * multiple redundant stores. + */ + StoreAtDefinition, + + /** + * The interval starts in memory (e.g. method parameter), so a store is never necessary. + */ + StartInMemory, + + /** + * The interval has more than one definition (e.g. resulting from phi moves), so stores + * to memory are not optimized. + */ + NoOptimization + } + + /** + * List of use positions. Each entry in the list records the use position and register + * priority associated with the use position. The entries in the list are in descending + * order of use position. + * + * @author Doug Simon + */ + public static final class UsePosList { + private IntList list; + + /** + * Creates a use list. + * + * @param initialCapacity the initial capacity of the list in terms of entries + */ + public UsePosList(int initialCapacity) { + list = new IntList(initialCapacity * 2); + } + + private UsePosList(IntList list) { + this.list = list; + } + + /** + * Splits this list around a given position. All entries in this list with a use position greater or equal than + * {@code splitPos} are removed from this list and added to the returned list. + * + * @param splitPos the position for the split + * @return a use position list containing all entries removed from this list that have a use position greater or equal + * than {@code splitPos} + */ + public UsePosList splitAt(int splitPos) { + int i = size() - 1; + int len = 0; + while (i >= 0 && usePos(i) < splitPos) { + --i; + len += 2; + } + int listSplitIndex = (i + 1) * 2; + IntList childList = list; + list = IntList.copy(this.list, listSplitIndex, len); + childList.setSize(listSplitIndex); + UsePosList child = new UsePosList(childList); + return child; + } + + /** + * Gets the use position at a specified index in this list. + * + * @param index the index of the entry for which the use position is returned + * @return the use position of entry {@code index} in this list + */ + public int usePos(int index) { + return list.get(index << 1); + } + + /** + * Gets the register priority for the use position at a specified index in this list. + * + * @param index the index of the entry for which the register priority is returned + * @return the register priority of entry {@code index} in this list + */ + public RegisterPriority registerPriority(int index) { + return RegisterPriority.VALUES[list.get((index << 1) + 1)]; + } + + public void add(int usePos, RegisterPriority registerPriority) { + assert list.size() == 0 || usePos(size() - 1) > usePos; + list.add(usePos); + list.add(registerPriority.ordinal()); + } + + public int size() { + return list.size() >> 1; + } + + public void removeLowestUsePos() { + list.setSize(list.size() - 2); + } + + public void setRegisterPriority(int index, RegisterPriority registerPriority) { + list.set(index * 2, registerPriority.ordinal()); + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder("["); + for (int i = size() - 1; i >= 0; --i) { + if (buf.length() != 1) { + buf.append(", "); + } + RegisterPriority prio = registerPriority(i); + buf.append(usePos(i)).append(" -> ").append(prio.ordinal()).append(':').append(prio); + } + return buf.append("]").toString(); + } + } + + /** + * The {@linkplain CiRegisterValue register} or {@linkplain CiVariable variable} for this interval prior to register allocation. + */ + public final CiValue operand; + + /** + * The {@linkplain OperandPool#operandNumber(CiValue) operand number} for this interval's {@linkplain #operand operand}. + */ + public final int operandNumber; + + /** + * The {@linkplain CiRegisterValue register}, {@linkplain CiStackSlot spill slot} or {@linkplain CiAddress address} assigned to this interval. + */ + private CiValue location; + + /** + * The stack slot to which all splits of this interval are spilled if necessary. + */ + private CiStackSlot spillSlot; + + /** + * The kind of this interval. + * Only valid if this is a {@linkplain #isVariable() variable}. + */ + private CiKind kind; + + /** + * The head of the list of ranges describing this interval. This list is sorted by {@linkplain LIRInstruction#id instruction ids}. + */ + private Range first; + + /** + * List of (use-positions, register-priorities) pairs, sorted by use-positions. + */ + private UsePosList usePosList; + + /** + * Iterator used to traverse the ranges of an interval. + */ + private Range current; + + /** + * Link to next interval in a sorted list of intervals that ends with {@link #EndMarker}. + */ + Interval next; + + /** + * The linear-scan state of this interval. + */ + State state; + + private int cachedTo; // cached value: to of last range (-1: not cached) + + /** + * The interval from which this one is derived. If this is a {@linkplain #isSplitParent() split parent}, it points to itself. + */ + private Interval splitParent; + + /** + * List of all intervals that are split off from this interval. This is only used if this is a {@linkplain #isSplitParent() split parent}. + */ + private List<Interval> splitChildren = Collections.emptyList(); + + /** + * Current split child that has been active or inactive last (always stored in split parents). + */ + private Interval currentSplitChild; + + /** + * Specifies if move is inserted between currentSplitChild and this interval when interval gets active the first time. + */ + private boolean insertMoveWhenActivated; + + /** + * For spill move optimization. + */ + private SpillState spillState; + + /** + * Position where this interval is defined (if defined only once). + */ + private int spillDefinitionPos; + + /** + * This interval should be assigned the same location as the hint interval. + */ + private Interval locationHint; + + void assignLocation(CiValue location) { + if (location.isRegister()) { + assert this.location == null : "cannot re-assign location for " + this; + if (location.kind == CiKind.Illegal && kind != CiKind.Illegal) { + location = location.asRegister().asValue(kind); + } + } else { + assert this.location == null || this.location.isRegister() : "cannot re-assign location for " + this; + assert location.isStackSlot(); + assert location.kind != CiKind.Illegal; + assert location.kind == this.kind; + } + this.location = location; + } + + /** + * Gets the {@linkplain CiRegisterValue register}, {@linkplain CiStackSlot spill slot} or {@linkplain CiAddress address} assigned to this interval. + */ + public CiValue location() { + return location; + } + + public CiKind kind() { + assert !operand.isRegister() : "cannot access type for fixed interval"; + return kind; + } + + void setKind(CiKind kind) { + assert operand.isRegister() || this.kind == CiKind.Illegal || this.kind == kind : "overwriting existing type"; + assert kind == kind.stackKind() || kind == CiKind.Short : "these kinds should have int type registers"; + this.kind = kind; + } + + public Range first() { + return first; + } + + int from() { + return first.from; + } + + int to() { + if (cachedTo == -1) { + cachedTo = calcTo(); + } + assert cachedTo == calcTo() : "invalid cached value"; + return cachedTo; + } + + int numUsePositions() { + return usePosList.size(); + } + + void setLocationHint(Interval interval) { + locationHint = interval; + } + + boolean isSplitParent() { + return splitParent == this; + } + + boolean isSplitChild() { + return splitParent != this; + } + + /** + * Gets the split parent for this interval. + */ + public Interval splitParent() { + assert splitParent.isSplitParent() : "not a split parent: " + this; + return splitParent; + } + + /** + * Gets the canonical spill slot for this interval. + */ + CiStackSlot spillSlot() { + return splitParent().spillSlot; + } + + void setSpillSlot(CiStackSlot slot) { + assert splitParent().spillSlot == null : "connot overwrite existing spill slot"; + splitParent().spillSlot = slot; + } + + Interval currentSplitChild() { + return splitParent().currentSplitChild; + } + + void makeCurrentSplitChild() { + splitParent().currentSplitChild = this; + } + + boolean insertMoveWhenActivated() { + return insertMoveWhenActivated; + } + + void setInsertMoveWhenActivated(boolean b) { + insertMoveWhenActivated = b; + } + + // for spill optimization + public SpillState spillState() { + return splitParent().spillState; + } + + int spillDefinitionPos() { + return splitParent().spillDefinitionPos; + } + + void setSpillState(SpillState state) { + assert state.ordinal() >= spillState().ordinal() : "state cannot decrease"; + splitParent().spillState = state; + } + + void setSpillDefinitionPos(int pos) { + assert spillDefinitionPos() == -1 : "cannot set the position twice"; + splitParent().spillDefinitionPos = pos; + } + + // returns true if this interval has a shadow copy on the stack that is always correct + boolean alwaysInMemory() { + return splitParent().spillState == SpillState.StoreAtDefinition || splitParent().spillState == SpillState.StartInMemory; + } + + void removeFirstUsePos() { + usePosList.removeLowestUsePos(); + } + + // test intersection + boolean intersects(Interval i) { + return first.intersects(i.first); + } + + int intersectsAt(Interval i) { + return first.intersectsAt(i.first); + } + + // range iteration + void rewindRange() { + current = first; + } + + void nextRange() { + assert this != EndMarker : "not allowed on sentinel"; + current = current.next; + } + + int currentFrom() { + return current.from; + } + + int currentTo() { + return current.to; + } + + boolean currentAtEnd() { + return current == Range.EndMarker; + } + + boolean currentIntersects(Interval it) { + return current.intersects(it.current); + } + + int currentIntersectsAt(Interval it) { + return current.intersectsAt(it.current); + } + + /** + * Sentinel interval to denote the end of an interval list. + */ + static final Interval EndMarker = new Interval(CiValue.IllegalValue, -1); + + Interval(CiValue operand, int operandNumber) { + C1XMetrics.LSRAIntervalsCreated++; + assert operand != null; + this.operand = operand; + this.operandNumber = operandNumber; + if (operand.isRegister()) { + location = operand; + } else { + assert operand.isIllegal() || operand.isVariable(); + } + this.kind = CiKind.Illegal; + this.first = Range.EndMarker; + this.usePosList = new UsePosList(4); + this.current = Range.EndMarker; + this.next = EndMarker; + this.cachedTo = -1; + this.spillState = SpillState.NoDefinitionFound; + this.spillDefinitionPos = -1; + splitParent = this; + currentSplitChild = this; + } + + int calcTo() { + assert first != Range.EndMarker : "interval has no range"; + + Range r = first; + while (r.next != Range.EndMarker) { + r = r.next; + } + return r.to; + } + + // consistency check of split-children + boolean checkSplitChildren() { + if (!splitChildren.isEmpty()) { + assert isSplitParent() : "only split parents can have children"; + + for (int i = 0; i < splitChildren.size(); i++) { + Interval i1 = splitChildren.get(i); + + assert i1.splitParent() == this : "not a split child of this interval"; + assert i1.kind() == kind() : "must be equal for all split children"; + assert i1.spillSlot() == spillSlot() : "must be equal for all split children"; + + for (int j = i + 1; j < splitChildren.size(); j++) { + Interval i2 = splitChildren.get(j); + + assert i1.operand != i2.operand : "same register number"; + + if (i1.from() < i2.from()) { + assert i1.to() <= i2.from() && i1.to() < i2.to() : "intervals overlapping"; + } else { + assert i2.from() < i1.from() : "intervals start at same opId"; + assert i2.to() <= i1.from() && i2.to() < i1.to() : "intervals overlapping"; + } + } + } + } + + return true; + } + + public Interval locationHint(boolean searchSplitChild, LinearScan allocator) { + if (!searchSplitChild) { + return locationHint; + } + + if (locationHint != null) { + assert locationHint.isSplitParent() : "ony split parents are valid hint registers"; + + if (locationHint.location != null && locationHint.location.isRegister()) { + return locationHint; + } else if (!locationHint.splitChildren.isEmpty()) { + // search the first split child that has a register assigned + int len = locationHint.splitChildren.size(); + for (int i = 0; i < len; i++) { + Interval interval = locationHint.splitChildren.get(i); + if (interval.location != null && interval.location.isRegister()) { + return interval; + } + } + } + } + + // no hint interval found that has a register assigned + return null; + } + + Interval getSplitChildAtOpId(int opId, LIRInstruction.OperandMode mode, LinearScan allocator) { + assert isSplitParent() : "can only be called for split parents"; + assert opId >= 0 : "invalid opId (method cannot be called for spill moves)"; + + if (splitChildren.isEmpty()) { + assert this.covers(opId, mode) : this + " does not cover " + opId; + return this; + } else { + Interval result = null; + int len = splitChildren.size(); + + // in outputMode, the end of the interval (opId == cur.to()) is not valid + int toOffset = (mode == LIRInstruction.OperandMode.Output ? 0 : 1); + + int i; + for (i = 0; i < len; i++) { + Interval cur = splitChildren.get(i); + if (cur.from() <= opId && opId < cur.to() + toOffset) { + if (i > 0) { + // exchange current split child to start of list (faster access for next call) + Util.atPutGrow(splitChildren, i, splitChildren.get(0), null); + Util.atPutGrow(splitChildren, 0, cur, null); + } + + // interval found + result = cur; + break; + } + } + + assert checkSplitChild(result, opId, allocator, toOffset, mode); + return result; + } + } + + private boolean checkSplitChild(Interval result, int opId, LinearScan allocator, int toOffset, LIRInstruction.OperandMode mode) { + if (result == null) { + // this is an error + StringBuilder msg = new StringBuilder(this.toString()).append(" has no child at ").append(opId); + if (!splitChildren.isEmpty()) { + Interval first = splitChildren.get(0); + Interval last = splitChildren.get(splitChildren.size() - 1); + msg.append(" (first = ").append(first).append(", last = ").append(last).append(")"); + } + throw new CiBailout("Linear Scan Error: " + msg); + } + + if (!splitChildren.isEmpty()) { + for (Interval interval : splitChildren) { + if (interval != result && interval.from() <= opId && opId < interval.to() + toOffset) { + TTY.println(String.format("two valid result intervals found for opId %d: %d and %d", opId, result.operandNumber, interval.operandNumber)); + TTY.println(result.logString(allocator)); + TTY.println(interval.logString(allocator)); + throw new CiBailout("two valid result intervals found"); + } + } + } + assert result.covers(opId, mode) : "opId not covered by interval"; + return true; + } + + // returns the last split child that ends before the given opId + Interval getSplitChildBeforeOpId(int opId) { + assert opId >= 0 : "invalid opId"; + + Interval parent = splitParent(); + Interval result = null; + + assert !parent.splitChildren.isEmpty() : "no split children available"; + int len = parent.splitChildren.size(); + + for (int i = len - 1; i >= 0; i--) { + Interval cur = parent.splitChildren.get(i); + if (cur.to() <= opId && (result == null || result.to() < cur.to())) { + result = cur; + } + } + + assert result != null : "no split child found"; + return result; + } + + // checks if opId is covered by any split child + boolean splitChildCovers(int opId, LIRInstruction.OperandMode mode) { + assert isSplitParent() : "can only be called for split parents"; + assert opId >= 0 : "invalid opId (method can not be called for spill moves)"; + + if (splitChildren.isEmpty()) { + // simple case if interval was not split + return covers(opId, mode); + + } else { + // extended case: check all split children + int len = splitChildren.size(); + for (int i = 0; i < len; i++) { + Interval cur = splitChildren.get(i); + if (cur.covers(opId, mode)) { + return true; + } + } + return false; + } + } + + // Note: use positions are sorted descending . first use has highest index + int firstUsage(RegisterPriority minRegisterPriority) { + assert operand.isVariable() : "cannot access use positions for fixed intervals"; + + for (int i = usePosList.size() - 1; i >= 0; --i) { + RegisterPriority registerPriority = usePosList.registerPriority(i); + if (registerPriority.greaterEqual(minRegisterPriority)) { + return usePosList.usePos(i); + } + } + return Integer.MAX_VALUE; + } + + int nextUsage(RegisterPriority minRegisterPriority, int from) { + assert operand.isVariable() : "cannot access use positions for fixed intervals"; + + for (int i = usePosList.size() - 1; i >= 0; --i) { + int usePos = usePosList.usePos(i); + if (usePos >= from && usePosList.registerPriority(i).greaterEqual(minRegisterPriority)) { + return usePos; + } + } + return Integer.MAX_VALUE; + } + + int nextUsageExact(RegisterPriority exactRegisterPriority, int from) { + assert operand.isVariable() : "cannot access use positions for fixed intervals"; + + for (int i = usePosList.size() - 1; i >= 0; --i) { + int usePos = usePosList.usePos(i); + if (usePos >= from && usePosList.registerPriority(i) == exactRegisterPriority) { + return usePos; + } + } + return Integer.MAX_VALUE; + } + + int previousUsage(RegisterPriority minRegisterPriority, int from) { + assert operand.isVariable() : "cannot access use positions for fixed intervals"; + + int prev = 0; + for (int i = usePosList.size() - 1; i >= 0; --i) { + int usePos = usePosList.usePos(i); + if (usePos > from) { + return prev; + } + if (usePosList.registerPriority(i).greaterEqual(minRegisterPriority)) { + prev = usePos; + } + } + return prev; + } + + void addUsePos(int pos, RegisterPriority registerPriority) { + assert covers(pos, LIRInstruction.OperandMode.Input) : "use position not covered by live range"; + + // do not add use positions for precolored intervals because they are never used + if (registerPriority != RegisterPriority.None && operand.isVariable()) { + if (C1XOptions.DetailedAsserts) { + for (int i = 0; i < usePosList.size(); i++) { + assert pos <= usePosList.usePos(i) : "already added a use-position with lower position"; + if (i > 0) { + assert usePosList.usePos(i) < usePosList.usePos(i - 1) : "not sorted descending"; + } + } + } + + // Note: addUse is called in descending order, so list gets sorted + // automatically by just appending new use positions + int len = usePosList.size(); + if (len == 0 || usePosList.usePos(len - 1) > pos) { + usePosList.add(pos, registerPriority); + } else if (usePosList.registerPriority(len - 1).lessThan(registerPriority)) { + assert usePosList.usePos(len - 1) == pos : "list not sorted correctly"; + usePosList.setRegisterPriority(len - 1, registerPriority); + } + } + } + + void addRange(int from, int to) { + assert from < to : "invalid range"; + assert first() == Range.EndMarker || to < first().next.from : "not inserting at begin of interval"; + assert from <= first().to : "not inserting at begin of interval"; + + if (first.from <= to) { + assert first != Range.EndMarker; + // join intersecting ranges + first.from = Math.min(from, first().from); + first.to = Math.max(to, first().to); + } else { + // insert new range + first = new Range(from, to, first()); + } + } + + Interval newSplitChild(LinearScan allocator) { + // allocate new interval + Interval parent = splitParent(); + Interval result = allocator.createDerivedInterval(parent); + result.setKind(kind()); + + result.splitParent = parent; + result.setLocationHint(parent); + + // insert new interval in children-list of parent + if (parent.splitChildren.isEmpty()) { + assert isSplitParent() : "list must be initialized at first split"; + + // Create new non-shared list + parent.splitChildren = new ArrayList<Interval>(4); + parent.splitChildren.add(this); + } + parent.splitChildren.add(result); + + return result; + } + + /** + * Splits this interval at a specified position and returns the remainder as a new <i>child</i> interval + * of this interval's {@linkplain #splitParent() parent} interval. + * <p> + * When an interval is split, a bi-directional link is established between the original <i>parent</i> + * interval and the <i>children</i> intervals that are split off this interval. + * When a split child is split again, the new created interval is a direct child + * of the original parent. That is, there is no tree of split children stored, just a flat list. + * All split children are spilled to the same {@linkplain #spillSlot spill slot}. + * + * @param splitPos the position at which to split this interval + * @param allocator the register allocator context + * @return the child interval split off from this interval + */ + Interval split(int splitPos, LinearScan allocator) { + assert operand.isVariable() : "cannot split fixed intervals"; + + // allocate new interval + Interval result = newSplitChild(allocator); + + // split the ranges + Range prev = null; + Range cur = first; + while (cur != Range.EndMarker && cur.to <= splitPos) { + prev = cur; + cur = cur.next; + } + assert cur != Range.EndMarker : "split interval after end of last range"; + + if (cur.from < splitPos) { + result.first = new Range(splitPos, cur.to, cur.next); + cur.to = splitPos; + cur.next = Range.EndMarker; + + } else { + assert prev != null : "split before start of first range"; + result.first = cur; + prev.next = Range.EndMarker; + } + result.current = result.first; + cachedTo = -1; // clear cached value + + // split list of use positions + result.usePosList = usePosList.splitAt(splitPos); + + if (C1XOptions.DetailedAsserts) { + for (int i = 0; i < usePosList.size(); i++) { + assert usePosList.usePos(i) < splitPos; + } + for (int i = 0; i < result.usePosList.size(); i++) { + assert result.usePosList.usePos(i) >= splitPos; + } + } + return result; + } + + /** + * Splits this interval at a specified position and returns + * the head as a new interval (this interval is the tail). + * + * Currently, only the first range can be split, and the new interval must not have split positions + */ + Interval splitFromStart(int splitPos, LinearScan allocator) { + assert operand.isVariable() : "cannot split fixed intervals"; + assert splitPos > from() && splitPos < to() : "can only split inside interval"; + assert splitPos > first.from && splitPos <= first.to : "can only split inside first range"; + assert firstUsage(RegisterPriority.None) > splitPos : "can not split when use positions are present"; + + // allocate new interval + Interval result = newSplitChild(allocator); + + // the new interval has only one range (checked by assertion above, + // so the splitting of the ranges is very simple + result.addRange(first.from, splitPos); + + if (splitPos == first.to) { + assert first.next != Range.EndMarker : "must not be at end"; + first = first.next; + } else { + first.from = splitPos; + } + + return result; + } + + // returns true if the opId is inside the interval + boolean covers(int opId, LIRInstruction.OperandMode mode) { + Range cur = first; + + while (cur != Range.EndMarker && cur.to < opId) { + cur = cur.next; + } + if (cur != Range.EndMarker) { + assert cur.to != cur.next.from : "ranges not separated"; + + if (mode == LIRInstruction.OperandMode.Output) { + return cur.from <= opId && opId < cur.to; + } else { + return cur.from <= opId && opId <= cur.to; + } + } + return false; + } + + // returns true if the interval has any hole between holeFrom and holeTo + // (even if the hole has only the length 1) + boolean hasHoleBetween(int holeFrom, int holeTo) { + assert holeFrom < holeTo : "check"; + assert from() <= holeFrom && holeTo <= to() : "index out of interval"; + + Range cur = first; + while (cur != Range.EndMarker) { + assert cur.to < cur.next.from : "no space between ranges"; + + // hole-range starts before this range . hole + if (holeFrom < cur.from) { + return true; + + // hole-range completely inside this range . no hole + } else { + if (holeTo <= cur.to) { + return false; + + // overlapping of hole-range with this range . hole + } else { + if (holeFrom <= cur.to) { + return true; + } + } + } + + cur = cur.next; + } + + return false; + } + + @Override + public String toString() { + String from = "?"; + String to = "?"; + if (first != null && first != Range.EndMarker) { + from = String.valueOf(from()); + to = String.valueOf(to()); + } + String location = this.location == null ? "" : "@" + this.location.name(); + return operandNumber + ":" + operand + (operand.isRegister() ? "" : location) + "[" + from + "," + to + "]"; + } + + /** + * Gets the use position information for this interval. + */ + public UsePosList usePosList() { + return usePosList; + } + + /** + * Gets a single line string for logging the details of this interval to a log stream. + * + * @param allocator the register allocator context + */ + public String logString(LinearScan allocator) { + StringBuilder buf = new StringBuilder(100); + buf.append(operandNumber).append(':').append(operand).append(' '); + if (!operand.isRegister()) { + if (location != null) { + buf.append("location{").append(location).append("} "); + } + } + + buf.append("hints{").append(splitParent.operandNumber); + Interval hint = locationHint(false, allocator); + if (hint != null && hint.operandNumber != splitParent.operandNumber) { + buf.append(", ").append(hint.operandNumber); + } + buf.append("} ranges{"); + + // print ranges + Range cur = first; + while (cur != Range.EndMarker) { + if (cur != first) { + buf.append(", "); + } + buf.append(cur); + cur = cur.next; + assert cur != null : "range list not closed with range sentinel"; + } + buf.append("} uses{"); + + // print use positions + int prev = 0; + for (int i = usePosList.size() - 1; i >= 0; --i) { + assert prev < usePosList.usePos(i) : "use positions not sorted"; + if (i != usePosList.size() - 1) { + buf.append(", "); + } + buf.append(usePosList.usePos(i)).append(':').append(usePosList.registerPriority(i)); + prev = usePosList.usePos(i); + } + return buf.append("} spill-state{").append(spillState()).append("}").toString(); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.max.graal.compiler/src/com/oracle/max/graal/compiler/alloc/IntervalWalker.java Wed Jun 08 08:59:54 2011 +0200 @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.max.graal.compiler.alloc; + +import com.oracle.max.graal.compiler.*; +import com.oracle.max.graal.compiler.alloc.Interval.*; +import com.oracle.max.graal.compiler.debug.*; + +/** + * + * @author Thomas Wuerthinger + */ +public class IntervalWalker { + + protected final C1XCompilation compilation; + protected final LinearScan allocator; + + /** + * Sorted list of intervals, not live before the current position. + */ + RegisterBindingLists unhandledLists; + + /** + * Sorted list of intervals, live at the current position. + */ + RegisterBindingLists activeLists; + + /** + * Sorted list of intervals in a life time hole at the current position. + */ + RegisterBindingLists inactiveLists; + + /** + * The current interval (taken from the unhandled list) being processed. + */ + protected Interval current; + + /** + * The current position (intercept point through the intervals). + */ + protected int currentPosition; + + /** + * The binding of the current interval being processed. + */ + protected RegisterBinding currentBinding; + + /** + * Processes the {@linkplain #current} interval in an attempt to allocate a physical + * register to it and thus allow it to be moved to a list of {@linkplain #activeLists active} intervals. + * + * @return {@code true} if a register was allocated to the {@linkplain #current} interval + */ + boolean activateCurrent() { + return true; + } + + void walkBefore(int lirOpId) { + walkTo(lirOpId - 1); + } + + void walk() { + walkTo(Integer.MAX_VALUE); + } + + /** + * Creates a new interval walker. + * + * @param allocator the register allocator context + * @param unhandledFixed the list of unhandled {@linkplain RegisterBinding#Fixed fixed} intervals + * @param unhandledAny the list of unhandled {@linkplain RegisterBinding#Any non-fixed} intervals + */ + IntervalWalker(LinearScan allocator, Interval unhandledFixed, Interval unhandledAny) { + this.compilation = allocator.compilation; + this.allocator = allocator; + + unhandledLists = new RegisterBindingLists(unhandledFixed, unhandledAny); + activeLists = new RegisterBindingLists(Interval.EndMarker, Interval.EndMarker); + inactiveLists = new RegisterBindingLists(Interval.EndMarker, Interval.EndMarker); + currentPosition = -1; + current = null; + nextInterval(); + } + + void removeFromList(Interval interval) { + if (interval.state == State.Active) { + activeLists.remove(RegisterBinding.Any, interval); + } else { + assert interval.state == State.Inactive : "invalid state"; + inactiveLists.remove(RegisterBinding.Any, interval); + } + } + + void walkTo(State state, int from) { + assert state == State.Active || state == State.Inactive : "wrong state"; + for (RegisterBinding binding : RegisterBinding.VALUES) { + Interval prevprev = null; + Interval prev = (state == State.Active) ? activeLists.get(binding) : inactiveLists.get(binding); + Interval next = prev; + while (next.currentFrom() <= from) { + Interval cur = next; + next = cur.next; + + boolean rangeHasChanged = false; + while (cur.currentTo() <= from) { + cur.nextRange(); + rangeHasChanged = true; + } + + // also handle move from inactive list to active list + rangeHasChanged = rangeHasChanged || (state == State.Inactive && cur.currentFrom() <= from); + + if (rangeHasChanged) { + // remove cur from list + if (prevprev == null) { + if (state == State.Active) { + activeLists.set(binding, next); + } else { + inactiveLists.set(binding, next); + } + } else { + prevprev.next = next; + } + prev = next; + if (cur.currentAtEnd()) { + // move to handled state (not maintained as a list) + cur.state = State.Handled; + intervalMoved(cur, binding, state, State.Handled); + } else if (cur.currentFrom() <= from) { + // sort into active list + activeLists.addToListSortedByCurrentFromPositions(binding, cur); + cur.state = State.Active; + if (prev == cur) { + assert state == State.Active : "check"; + prevprev = prev; + prev = cur.next; + } + intervalMoved(cur, binding, state, State.Active); + } else { + // sort into inactive list + inactiveLists.addToListSortedByCurrentFromPositions(binding, cur); + cur.state = State.Inactive; + if (prev == cur) { + assert state == State.Inactive : "check"; + prevprev = prev; + prev = cur.next; + } + intervalMoved(cur, binding, state, State.Inactive); + } + } else { + prevprev = prev; + prev = cur.next; + } + } + } + } + + void nextInterval() { + RegisterBinding binding; + Interval any = unhandledLists.any; + Interval fixed = unhandledLists.fixed; + + if (any != Interval.EndMarker) { + // intervals may start at same position . prefer fixed interval + binding = fixed != Interval.EndMarker && fixed.from() <= any.from() ? RegisterBinding.Fixed : RegisterBinding.Any; + + assert binding == RegisterBinding.Fixed && fixed.from() <= any.from() || binding == RegisterBinding.Any && any.from() <= fixed.from() : "wrong interval!!!"; + assert any == Interval.EndMarker || fixed == Interval.EndMarker || any.from() != fixed.from() || binding == RegisterBinding.Fixed : "if fixed and any-Interval start at same position, fixed must be processed first"; + + } else if (fixed != Interval.EndMarker) { + binding = RegisterBinding.Fixed; + } else { + current = null; + return; + } + currentBinding = binding; + current = unhandledLists.get(binding); + unhandledLists.set(binding, current.next); + current.next = Interval.EndMarker; + current.rewindRange(); + } + + void walkTo(int toOpId) { + assert currentPosition <= toOpId : "can not walk backwards"; + while (current != null) { + boolean isActive = current.from() <= toOpId; + int opId = isActive ? current.from() : toOpId; + + if (C1XOptions.TraceLinearScanLevel >= 2 && !TTY.isSuppressed()) { + if (currentPosition < opId) { + TTY.println(); + TTY.println("walkTo(%d) *", opId); + } + } + + // set currentPosition prior to call of walkTo + currentPosition = opId; + + // call walkTo even if currentPosition == id + walkTo(State.Active, opId); + walkTo(State.Inactive, opId); + + if (isActive) { + current.state = State.Active; + if (activateCurrent()) { + activeLists.addToListSortedByCurrentFromPositions(currentBinding, current); + intervalMoved(current, currentBinding, State.Unhandled, State.Active); + } + + nextInterval(); + } else { + return; + } + } + } + + private void intervalMoved(Interval interval, RegisterBinding kind, State from, State to) { + // intervalMoved() is called whenever an interval moves from one interval list to another. + // In the implementation of this method it is prohibited to move the interval to any list. + if (C1XOptions.TraceLinearScanLevel >= 4 && !TTY.isSuppressed()) { + TTY.print(from.toString() + " to " + to.toString()); + TTY.fillTo(23); + TTY.out().println(interval.logString(allocator)); + } + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.max.graal.compiler/src/com/oracle/max/graal/compiler/alloc/LIRInsertionBuffer.java Wed Jun 08 08:59:54 2011 +0200 @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.max.graal.compiler.alloc; + +import java.util.*; + +import com.oracle.max.graal.compiler.lir.*; +import com.oracle.max.graal.compiler.util.*; +import com.sun.cri.ci.*; + +/** + * + * @author Thomas Wuerthinger + */ +public final class LIRInsertionBuffer { + + private LIRList lir; // the lir list where ops of this buffer should be inserted later (null when uninitialized) + + // list of insertion points. index and count are stored alternately: + // indexAndCount[i * 2]: the index into lir list where "count" ops should be inserted + // indexAndCount[i * 2 + 1]: the number of ops to be inserted at index + private final IntList indexAndCount; + + // the LIROps to be inserted + private final List<LIRInstruction> ops; + + private void appendNew(int index, int count) { + indexAndCount.add(index); + indexAndCount.add(count); + } + + private void setCountAt(int i, int value) { + indexAndCount.set((i << 1) + 1, value); + } + + LIRInsertionBuffer() { + ops = new ArrayList<LIRInstruction>(8); + indexAndCount = new IntList(8); + } + + // must be called before using the insertion buffer + void init(LIRList lir) { + assert !initialized() : "already initialized"; + this.lir = lir; + indexAndCount.clear(); + ops.clear(); + } + + boolean initialized() { + return lir != null; + } + + // called automatically when the buffer is appended to the LIRList + public void finish() { + lir = null; + } + + // accessors + public LIRList lirList() { + return lir; + } + + public int numberOfInsertionPoints() { + return indexAndCount.size() >> 1; + } + + public int indexAt(int i) { + return indexAndCount.get((i << 1)); + } + + public int countAt(int i) { + return indexAndCount.get((i << 1) + 1); + } + + public int numberOfOps() { + return ops.size(); + } + + public LIRInstruction opAt(int i) { + return ops.get(i); + } + + void move(int index, CiValue src, CiValue dst, LIRDebugInfo info) { + append(index, new LIROp1(LIROpcode.Move, src, dst, dst.kind, info)); + } + + // Implementation of LIRInsertionBuffer + + private void append(int index, LIRInstruction op) { + assert indexAndCount.size() % 2 == 0 : "must have a count for each index"; + + int i = numberOfInsertionPoints() - 1; + if (i < 0 || indexAt(i) < index) { + appendNew(index, 1); + } else { + assert indexAt(i) == index : "can append LIROps in ascending order only"; + assert countAt(i) > 0 : "check"; + setCountAt(i, countAt(i) + 1); + } + ops.add(op); + + assert verify(); + } + + private boolean verify() { + int sum = 0; + int prevIdx = -1; + + for (int i = 0; i < numberOfInsertionPoints(); i++) { + assert prevIdx < indexAt(i) : "index must be ordered ascending"; + sum += countAt(i); + } + assert sum == numberOfOps() : "wrong total sum"; + return true; + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.max.graal.compiler/src/com/oracle/max/graal/compiler/alloc/LinearScan.java Wed Jun 08 08:59:54 2011 +0200 @@ -0,0 +1,2317 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.max.graal.compiler.alloc; + +import static com.sun.cri.ci.CiUtil.*; +import static java.lang.reflect.Modifier.*; + +import java.util.*; + +import com.oracle.max.graal.compiler.*; +import com.oracle.max.graal.compiler.alloc.Interval.*; +import com.oracle.max.graal.compiler.debug.*; +import com.oracle.max.graal.compiler.gen.*; +import com.oracle.max.graal.compiler.graph.*; +import com.oracle.max.graal.compiler.ir.*; +import com.oracle.max.graal.compiler.lir.*; +import com.oracle.max.graal.compiler.lir.LIRInstruction.*; +import com.oracle.max.graal.compiler.observer.*; +import com.oracle.max.graal.compiler.util.*; +import com.oracle.max.graal.compiler.value.*; +import com.oracle.max.graal.compiler.value.FrameState.*; +import com.oracle.max.graal.graph.*; +import com.sun.cri.ci.*; +import com.sun.cri.ri.*; + +/** + * An implementation of the linear scan register allocator algorithm described + * in <a href="http://doi.acm.org/10.1145/1064979.1064998">"Optimized Interval Splitting in a Linear Scan Register Allocator"</a> + * by Christian Wimmer and Hanspeter Moessenboeck. + * + * @author Christian Wimmer (original HotSpot implementation) + * @author Thomas Wuerthinger + * @author Doug Simon + */ +public final class LinearScan { + + final C1XCompilation compilation; + final IR ir; + final LIRGenerator gen; + final FrameMap frameMap; + final RiRegisterAttributes[] registerAttributes; + final CiRegister[] registers; + + private static final int INITIAL_SPLIT_INTERVALS_CAPACITY = 32; + + /** + * List of blocks in linear-scan order. This is only correct as long as the CFG does not change. + */ + final LIRBlock[] sortedBlocks; + + final OperandPool operands; + + /** + * Number of stack slots used for intervals allocated to memory. + */ + int maxSpills; + + /** + * Unused spill slot for a single-word value because of alignment of a double-word value. + */ + CiStackSlot unusedSpillSlot; + + /** + * Map from {@linkplain #operandNumber(CiValue) operand numbers} to intervals. + */ + Interval[] intervals; + + /** + * The number of valid entries in {@link #intervals}. + */ + int intervalsSize; + + /** + * The index of the first entry in {@link #intervals} for a {@linkplain #createDerivedInterval(Interval) derived interval}. + */ + int firstDerivedIntervalIndex = -1; + + /** + * Intervals sorted by {@link Interval#from()}. + */ + Interval[] sortedIntervals; + + /** + * Map from an instruction {@linkplain LIRInstruction#id id} to the instruction. + * Entries should be retrieved with {@link #instructionForId(int)} as the id is + * not simply an index into this array. + */ + LIRInstruction[] opIdToInstructionMap; + + /** + * Map from an instruction {@linkplain LIRInstruction#id id} to the {@linkplain + * LIRBlock block} containing the instruction. Entries should be retrieved with + * {@link #blockForId(int)} as the id is not simply an index into this array. + */ + LIRBlock[] opIdToBlockMap; + + /** + * Bit set for each variable that is contained in each loop. + */ + BitMap2D intervalInLoop; + + public LinearScan(C1XCompilation compilation, IR ir, LIRGenerator gen, FrameMap frameMap) { + this.compilation = compilation; + this.ir = ir; + this.gen = gen; + this.frameMap = frameMap; + this.maxSpills = frameMap.initialSpillSlot(); + this.unusedSpillSlot = null; + this.sortedBlocks = ir.linearScanOrder().toArray(new LIRBlock[ir.linearScanOrder().size()]); + CiRegister[] allocatableRegisters = compilation.registerConfig.getAllocatableRegisters(); + this.registers = new CiRegister[CiRegister.maxRegisterNumber(allocatableRegisters) + 1]; + for (CiRegister reg : allocatableRegisters) { + registers[reg.number] = reg; + } + this.registerAttributes = compilation.registerConfig.getAttributesMap(); + this.operands = gen.operands; + } + + /** + * Converts an operand (variable or register) to an index in a flat address space covering all the + * {@linkplain CiVariable variables} and {@linkplain CiRegisterValue registers} being processed by this + * allocator. + */ + int operandNumber(CiValue operand) { + return operands.operandNumber(operand); + } + + static final IntervalPredicate IS_PRECOLORED_INTERVAL = new IntervalPredicate() { + @Override + public boolean apply(Interval i) { + return i.operand.isRegister(); + } + }; + + static final IntervalPredicate IS_VARIABLE_INTERVAL = new IntervalPredicate() { + @Override + public boolean apply(Interval i) { + return i.operand.isVariable(); + } + }; + + static final IntervalPredicate IS_OOP_INTERVAL = new IntervalPredicate() { + @Override + public boolean apply(Interval i) { + return !i.operand.isRegister() && i.kind() == CiKind.Object; + } + }; + + /** + * Gets an object describing the attributes of a given register according to this register configuration. + */ + RiRegisterAttributes attributes(CiRegister reg) { + return registerAttributes[reg.number]; + } + + /** + * Allocates the next available spill slot for a value of a given kind. + */ + CiStackSlot allocateSpillSlot(CiKind kind) { + CiStackSlot spillSlot; + if (numberOfSpillSlots(kind) == 2) { + if (isOdd(maxSpills)) { + // alignment of double-slot values + // the hole because of the alignment is filled with the next single-slot value + assert unusedSpillSlot == null : "wasting a spill slot"; + unusedSpillSlot = CiStackSlot.get(kind, maxSpills); + maxSpills++; + } + spillSlot = CiStackSlot.get(kind, maxSpills); + maxSpills += 2; + } else if (unusedSpillSlot != null) { + // re-use hole that was the result of a previous double-word alignment + spillSlot = unusedSpillSlot; + unusedSpillSlot = null; + } else { + spillSlot = CiStackSlot.get(kind, maxSpills); + maxSpills++; + } + + return spillSlot; + } + + void assignSpillSlot(Interval interval) { + // assign the canonical spill slot of the parent (if a part of the interval + // is already spilled) or allocate a new spill slot + if (interval.spillSlot() != null) { + interval.assignLocation(interval.spillSlot()); + } else { + CiStackSlot slot = allocateSpillSlot(interval.kind()); + interval.setSpillSlot(slot); + interval.assignLocation(slot); + } + } + + /** + * Creates a new interval. + * + * @param operand the operand for the interval + * @return the created interval + */ + Interval createInterval(CiValue operand) { + assert isProcessed(operand); + assert operand.isLegal(); + int operandNumber = operandNumber(operand); + Interval interval = new Interval(operand, operandNumber); + assert operandNumber < intervalsSize; + assert intervals[operandNumber] == null; + intervals[operandNumber] = interval; + return interval; + } + + /** + * Creates an interval as a result of splitting or spilling another interval. + * + * @param source an interval being split of spilled + * @return a new interval derived from {@code source} + */ + Interval createDerivedInterval(Interval source) { + if (firstDerivedIntervalIndex == -1) { + firstDerivedIntervalIndex = intervalsSize; + } + if (intervalsSize == intervals.length) { + intervals = Arrays.copyOf(intervals, intervals.length * 2); + } + intervalsSize++; + Interval interval = createInterval(operands.newVariable(source.kind())); + assert intervals[intervalsSize - 1] == interval; + return interval; + } + + // copy the variable flags if an interval is split + void copyRegisterFlags(Interval from, Interval to) { + if (operands.mustBeByteRegister(from.operand)) { + operands.setMustBeByteRegister((CiVariable) to.operand); + } + + // Note: do not copy the mustStartInMemory flag because it is not necessary for child + // intervals (only the very beginning of the interval must be in memory) + } + + // access to block list (sorted in linear scan order) + int blockCount() { + assert sortedBlocks.length == ir.linearScanOrder().size() : "invalid cached block list"; + return sortedBlocks.length; + } + + LIRBlock blockAt(int index) { + assert sortedBlocks[index] == ir.linearScanOrder().get(index) : "invalid cached block list"; + return sortedBlocks[index]; + } + + /** + * Gets the size of the {@link LIRBlock#liveIn} and {@link LIRBlock#liveOut} sets for a basic block. These sets do + * not include any operands allocated as a result of creating {@linkplain #createDerivedInterval(Interval) derived + * intervals}. + */ + int liveSetSize() { + return firstDerivedIntervalIndex == -1 ? operands.size() : firstDerivedIntervalIndex; + } + + int numLoops() { + return ir.numLoops(); + } + + boolean isIntervalInLoop(int interval, int loop) { + return intervalInLoop.at(interval, loop); + } + + Interval intervalFor(CiValue operand) { + int operandNumber = operandNumber(operand); + assert operandNumber < intervalsSize; + return intervals[operandNumber]; + } + + /** + * Gets the highest instruction id allocated by this object. + */ + int maxOpId() { + assert opIdToInstructionMap.length > 0 : "no operations"; + return (opIdToInstructionMap.length - 1) << 1; + } + + /** + * Converts an {@linkplain LIRInstruction#id instruction id} to an instruction index. + * All LIR instructions in a method have an index one greater than their linear-scan order predecesor + * with the first instruction having an index of 0. + */ + static int opIdToIndex(int opId) { + return opId >> 1; + } + + /** + * Retrieves the {@link LIRInstruction} based on its {@linkplain LIRInstruction#id id}. + * + * @param opId an instruction {@linkplain LIRInstruction#id id} + * @return the instruction whose {@linkplain LIRInstruction#id} {@code == id} + */ + LIRInstruction instructionForId(int opId) { + assert isEven(opId) : "opId not even"; + LIRInstruction instr = opIdToInstructionMap[opIdToIndex(opId)]; + assert instr.id == opId; + return instr; + } + + /** + * Gets the block containing a given instruction. + * + * @param opId an instruction {@linkplain LIRInstruction#id id} + * @return the block containing the instruction denoted by {@code opId} + */ + LIRBlock blockForId(int opId) { + assert opIdToBlockMap.length > 0 && opId >= 0 && opId <= maxOpId() + 1 : "opId out of range"; + return opIdToBlockMap[opIdToIndex(opId)]; + } + + boolean isBlockBegin(int opId) { + return opId == 0 || blockForId(opId) != blockForId(opId - 1); + } + + boolean coversBlockBegin(int opId1, int opId2) { + return blockForId(opId1) != blockForId(opId2); + } + + /** + * Determines if an {@link LIRInstruction} destroys all caller saved registers. + * + * @param opId an instruction {@linkplain LIRInstruction#id id} + * @return {@code true} if the instruction denoted by {@code id} destroys all caller saved registers. + */ + boolean hasCall(int opId) { + assert isEven(opId) : "opId not even"; + return instructionForId(opId).hasCall; + } + + /** + * Eliminates moves from register to stack if the stack slot is known to be correct. + */ + void changeSpillDefinitionPos(Interval interval, int defPos) { + assert interval.isSplitParent() : "can only be called for split parents"; + + switch (interval.spillState()) { + case NoDefinitionFound: + assert interval.spillDefinitionPos() == -1 : "must no be set before"; + interval.setSpillDefinitionPos(defPos); + interval.setSpillState(SpillState.NoSpillStore); + break; + + case NoSpillStore: + assert defPos <= interval.spillDefinitionPos() : "positions are processed in reverse order when intervals are created"; + if (defPos < interval.spillDefinitionPos() - 2 || instructionForId(interval.spillDefinitionPos()).code == LIROpcode.Xir) { + // second definition found, so no spill optimization possible for this interval + interval.setSpillState(SpillState.NoOptimization); + } else { + // two consecutive definitions (because of two-operand LIR form) + assert blockForId(defPos) == blockForId(interval.spillDefinitionPos()) : "block must be equal"; + } + break; + + case NoOptimization: + // nothing to do + break; + + default: + throw new CiBailout("other states not allowed at this time"); + } + } + + // called during register allocation + void changeSpillState(Interval interval, int spillPos) { + switch (interval.spillState()) { + case NoSpillStore: { + int defLoopDepth = blockForId(interval.spillDefinitionPos()).loopDepth(); + int spillLoopDepth = blockForId(spillPos).loopDepth(); + + if (defLoopDepth < spillLoopDepth) { + // the loop depth of the spilling position is higher then the loop depth + // at the definition of the interval . move write to memory out of loop + // by storing at definitin of the interval + interval.setSpillState(SpillState.StoreAtDefinition); + } else { + // the interval is currently spilled only once, so for now there is no + // reason to store the interval at the definition + interval.setSpillState(SpillState.OneSpillStore); + } + break; + } + + case OneSpillStore: { + // the interval is spilled more then once, so it is better to store it to + // memory at the definition + interval.setSpillState(SpillState.StoreAtDefinition); + break; + } + + case StoreAtDefinition: + case StartInMemory: + case NoOptimization: + case NoDefinitionFound: + // nothing to do + break; + + default: + throw new CiBailout("other states not allowed at this time"); + } + } + + abstract static class IntervalPredicate { + abstract boolean apply(Interval i); + } + + private static final IntervalPredicate mustStoreAtDefinition = new IntervalPredicate() { + @Override + public boolean apply(Interval i) { + return i.isSplitParent() && i.spillState() == SpillState.StoreAtDefinition; + } + }; + + // called once before assignment of register numbers + void eliminateSpillMoves() { + if (C1XOptions.TraceLinearScanLevel >= 3) { + TTY.println(" Eliminating unnecessary spill moves"); + } + + // collect all intervals that must be stored after their definition. + // the list is sorted by Interval.spillDefinitionPos + Interval interval; + interval = createUnhandledLists(mustStoreAtDefinition, null).first; + if (C1XOptions.DetailedAsserts) { + checkIntervals(interval); + } + + LIRInsertionBuffer insertionBuffer = new LIRInsertionBuffer(); + int numBlocks = blockCount(); + for (int i = 0; i < numBlocks; i++) { + LIRBlock block = blockAt(i); + List<LIRInstruction> instructions = block.lir().instructionsList(); + int numInst = instructions.size(); + boolean hasNew = false; + + // iterate all instructions of the block. skip the first because it is always a label + for (int j = 1; j < numInst; j++) { + LIRInstruction op = instructions.get(j); + int opId = op.id; + + if (opId == -1) { + CiValue resultOperand = op.result(); + // remove move from register to stack if the stack slot is guaranteed to be correct. + // only moves that have been inserted by LinearScan can be removed. + assert op.code == LIROpcode.Move : "only moves can have a opId of -1"; + assert resultOperand.isVariable() : "LinearScan inserts only moves to variables"; + + LIROp1 op1 = (LIROp1) op; + Interval curInterval = intervalFor(resultOperand); + + if (!curInterval.location().isRegister() && curInterval.alwaysInMemory()) { + // move target is a stack slot that is always correct, so eliminate instruction + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("eliminating move from interval %d to %d", operandNumber(op1.operand()), operandNumber(op1.result())); + } + instructions.set(j, null); // null-instructions are deleted by assignRegNum + } + + } else { + // insert move from register to stack just after the beginning of the interval + assert interval == Interval.EndMarker || interval.spillDefinitionPos() >= opId : "invalid order"; + assert interval == Interval.EndMarker || (interval.isSplitParent() && interval.spillState() == SpillState.StoreAtDefinition) : "invalid interval"; + + while (interval != Interval.EndMarker && interval.spillDefinitionPos() == opId) { + if (!hasNew) { + // prepare insertion buffer (appended when all instructions of the block are processed) + insertionBuffer.init(block.lir()); + hasNew = true; + } + + CiValue fromLocation = interval.location(); + CiValue toLocation = canonicalSpillOpr(interval); + + assert fromLocation.isRegister() : "from operand must be a register but is: " + fromLocation + " toLocation=" + toLocation + " spillState=" + interval.spillState(); + assert toLocation.isStackSlot() : "to operand must be a stack slot"; + + insertionBuffer.move(j, fromLocation, toLocation, null); + + if (C1XOptions.TraceLinearScanLevel >= 4) { + CiStackSlot slot = interval.spillSlot(); + TTY.println("inserting move after definition of interval %d to stack slot %d%s at opId %d", + interval.operandNumber, slot.index(), slot.inCallerFrame() ? " in caller frame" : "", opId); + } + + interval = interval.next; + } + } + } // end of instruction iteration + + if (hasNew) { + block.lir().append(insertionBuffer); + } + } // end of block iteration + + assert interval == Interval.EndMarker : "missed an interval"; + } + + private void checkIntervals(Interval interval) { + Interval prev = null; + Interval temp = interval; + while (temp != Interval.EndMarker) { + assert temp.spillDefinitionPos() > 0 : "invalid spill definition pos"; + if (prev != null) { + assert temp.from() >= prev.from() : "intervals not sorted"; + assert temp.spillDefinitionPos() >= prev.spillDefinitionPos() : "when intervals are sorted by from : then they must also be sorted by spillDefinitionPos"; + } + + assert temp.spillSlot() != null : "interval has no spill slot assigned"; + assert temp.spillDefinitionPos() >= temp.from() : "invalid order"; + assert temp.spillDefinitionPos() <= temp.from() + 2 : "only intervals defined once at their start-pos can be optimized"; + + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("interval %d (from %d to %d) must be stored at %d", temp.operandNumber, temp.from(), temp.to(), temp.spillDefinitionPos()); + } + + prev = temp; + temp = temp.next; + } + } + + /** + * Numbers all instructions in all blocks. The numbering follows the {@linkplain ComputeLinearScanOrder linear scan order}. + */ + void numberInstructions() { + // Assign IDs to LIR nodes and build a mapping, lirOps, from ID to LIRInstruction node. + int numBlocks = blockCount(); + int numInstructions = 0; + for (int i = 0; i < numBlocks; i++) { + numInstructions += blockAt(i).lir().instructionsList().size(); + } + + // initialize with correct length + opIdToInstructionMap = new LIRInstruction[numInstructions]; + opIdToBlockMap = new LIRBlock[numInstructions]; + + int opId = 0; + int index = 0; + + for (int i = 0; i < numBlocks; i++) { + LIRBlock block = blockAt(i); + block.setFirstLirInstructionId(opId); + List<LIRInstruction> instructions = block.lir().instructionsList(); + + int numInst = instructions.size(); + for (int j = 0; j < numInst; j++) { + LIRInstruction op = instructions.get(j); + op.id = opId; + + opIdToInstructionMap[index] = op; + opIdToBlockMap[index] = block; + assert instructionForId(opId) == op : "must match"; + + index++; + opId += 2; // numbering of lirOps by two + } + block.setLastLirInstructionId((opId - 2)); + } + assert index == numInstructions : "must match"; + assert (index << 1) == opId : "must match: " + (index << 1); + } + + /** + * Computes local live sets (i.e. {@link LIRBlock#liveGen} and {@link LIRBlock#liveKill}) separately for each block. + */ + void computeLocalLiveSets() { + int numBlocks = blockCount(); + int liveSize = liveSetSize(); + + BitMap2D localIntervalInLoop = new BitMap2D(operands.size(), numLoops()); + + // iterate all blocks + for (int i = 0; i < numBlocks; i++) { + LIRBlock block = blockAt(i); + final CiBitMap liveGen = new CiBitMap(liveSize); + final CiBitMap liveKill = new CiBitMap(liveSize); + + List<LIRInstruction> instructions = block.lir().instructionsList(); + int numInst = instructions.size(); + + // iterate all instructions of the block. skip the first because it is always a label + assert !instructions.get(0).hasOperands() : "first operation must always be a label"; + for (int j = 1; j < numInst; j++) { + final LIRInstruction op = instructions.get(j); + + // iterate input operands of instruction + int n = op.operandCount(LIRInstruction.OperandMode.Input); + for (int k = 0; k < n; k++) { + CiValue operand = op.operandAt(LIRInstruction.OperandMode.Input, k); + + if (operand.isVariable()) { + int operandNum = operandNumber(operand); + if (!liveKill.get(operandNum)) { + liveGen.set(operandNum); + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" Setting liveGen for operand %d at instruction %d", operandNum, op.id); + } + } + if (block.loopIndex() >= 0) { + localIntervalInLoop.setBit(operandNum, block.loopIndex()); + } + } + + if (C1XOptions.DetailedAsserts) { + assert operand.isVariableOrRegister() : "visitor should only return register operands"; + verifyInput(block, liveKill, operand); + } + } + + // Add uses of live locals from interpreter's point of view for proper debug information generation + LIRDebugInfo info = op.info; + if (info != null) { + info.state.forEachLiveStateValue(new ValueProcedure() { + public void doValue(Value value) { + CiValue operand = value.operand(); + if (operand.isVariable()) { + int operandNum = operandNumber(operand); + if (!liveKill.get(operandNum)) { + liveGen.set(operandNum); + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" Setting liveGen for value %s, LIR opId %d, operand %d because of state for " + op.toString(), Util.valueString(value), op.id, operandNum); + } + } + } else if (operand.isRegister()) { + assert !isProcessed(operand) && !operand.kind.isObject(); + } else { + assert operand.isConstant() || operand.isIllegal() : "invalid operand for deoptimization value: " + value; + } + } + }); + } + + // iterate temp operands of instruction + n = op.operandCount(LIRInstruction.OperandMode.Temp); + for (int k = 0; k < n; k++) { + CiValue operand = op.operandAt(LIRInstruction.OperandMode.Temp, k); + + if (operand.isVariable()) { + int varNum = operandNumber(operand); + liveKill.set(varNum); + if (block.loopIndex() >= 0) { + localIntervalInLoop.setBit(varNum, block.loopIndex()); + } + } + + if (C1XOptions.DetailedAsserts) { + assert operand.isVariableOrRegister() : "visitor should only return register operands"; + verifyTemp(liveKill, operand); + } + } + + // iterate output operands of instruction + n = op.operandCount(LIRInstruction.OperandMode.Output); + for (int k = 0; k < n; k++) { + CiValue operand = op.operandAt(LIRInstruction.OperandMode.Output, k); + + if (operand.isVariable()) { + int varNum = operandNumber(operand); + liveKill.set(varNum); + if (block.loopIndex() >= 0) { + localIntervalInLoop.setBit(varNum, block.loopIndex()); + } + } + + if (C1XOptions.DetailedAsserts) { + assert operand.isVariableOrRegister() : "visitor should only return register operands"; + // fixed intervals are never live at block boundaries, so + // they need not be processed in live sets + // process them only in debug mode so that this can be checked + verifyTemp(liveKill, operand); + } + } + } // end of instruction iteration + + block.liveGen = liveGen; + block.liveKill = liveKill; + block.liveIn = new CiBitMap(liveSize); + block.liveOut = new CiBitMap(liveSize); + + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("liveGen B%d %s", block.blockID(), block.liveGen); + TTY.println("liveKill B%d %s", block.blockID(), block.liveKill); + } + } // end of block iteration + + intervalInLoop = localIntervalInLoop; + } + + private void verifyTemp(CiBitMap liveKill, CiValue operand) { + // fixed intervals are never live at block boundaries, so + // they need not be processed in live sets + // process them only in debug mode so that this can be checked + if (!operand.isVariable()) { + if (isProcessed(operand)) { + liveKill.set(operandNumber(operand)); + } + } + } + + private void verifyInput(LIRBlock block, CiBitMap liveKill, CiValue operand) { + // fixed intervals are never live at block boundaries, so + // they need not be processed in live sets. + // this is checked by these assertions to be sure about it. + // the entry block may have incoming + // values in registers, which is ok. + if (!operand.isVariable() && block != ir.startBlock) { + if (isProcessed(operand)) { + assert liveKill.get(operandNumber(operand)) : "using fixed register that is not defined in this block"; + } + } + } + + /** + * Performs a backward dataflow analysis to compute global live sets (i.e. {@link LIRBlock#liveIn} and + * {@link LIRBlock#liveOut}) for each block. + */ + void computeGlobalLiveSets() { + int numBlocks = blockCount(); + boolean changeOccurred; + boolean changeOccurredInBlock; + int iterationCount = 0; + CiBitMap liveOut = new CiBitMap(liveSetSize()); // scratch set for calculations + + // Perform a backward dataflow analysis to compute liveOut and liveIn for each block. + // The loop is executed until a fixpoint is reached (no changes in an iteration) + do { + changeOccurred = false; + + // iterate all blocks in reverse order + for (int i = numBlocks - 1; i >= 0; i--) { + LIRBlock block = blockAt(i); + + changeOccurredInBlock = false; + + // liveOut(block) is the union of liveIn(sux), for successors sux of block + int n = block.numberOfSux(); + if (n > 0) { + // block has successors + if (n > 0) { + liveOut.setFrom(block.suxAt(0).liveIn); + for (int j = 1; j < n; j++) { + liveOut.setUnion(block.suxAt(j).liveIn); + } + } else { + liveOut.clearAll(); + } + + if (!block.liveOut.isSame(liveOut)) { + // A change occurred. Swap the old and new live out sets to avoid copying. + CiBitMap temp = block.liveOut; + block.liveOut = liveOut; + liveOut = temp; + + changeOccurred = true; + changeOccurredInBlock = true; + } + } + + if (iterationCount == 0 || changeOccurredInBlock) { + // liveIn(block) is the union of liveGen(block) with (liveOut(block) & !liveKill(block)) + // note: liveIn has to be computed only in first iteration or if liveOut has changed! + CiBitMap liveIn = block.liveIn; + liveIn.setFrom(block.liveOut); + liveIn.setDifference(block.liveKill); + liveIn.setUnion(block.liveGen); + } + + if (C1XOptions.TraceLinearScanLevel >= 4) { + traceLiveness(changeOccurredInBlock, iterationCount, block); + } + } + iterationCount++; + + if (changeOccurred && iterationCount > 50) { + throw new CiBailout("too many iterations in computeGlobalLiveSets"); + } + } while (changeOccurred); + + if (C1XOptions.DetailedAsserts) { + verifyLiveness(numBlocks); + } + + // check that the liveIn set of the first block is empty + LIRBlock startBlock = ir.startBlock; + CiBitMap liveInArgs = new CiBitMap(startBlock.liveIn.size()); + if (!startBlock.liveIn.isSame(liveInArgs)) { + if (C1XOptions.DetailedAsserts) { + reportFailure(numBlocks); + } + + TTY.println("preds=" + startBlock.blockPredecessors().size() + ", succs=" + startBlock.blockSuccessors().size()); + TTY.println("startBlock-ID: " + startBlock.blockID()); + + // bailout of if this occurs in product mode. + throw new CiBailout("liveIn set of first block must be empty"); + } + } + + private void reportFailure(int numBlocks) { + TTY.println(compilation.method.toString()); + TTY.println("Error: liveIn set of first block must be empty (when this fails, variables are used before they are defined)"); + TTY.print("affected registers:"); + TTY.println(ir.startBlock.liveIn.toString()); + + // print some additional information to simplify debugging + for (int operandNum = 0; operandNum < ir.startBlock.liveIn.size(); operandNum++) { + if (ir.startBlock.liveIn.get(operandNum)) { + CiValue operand = operands.operandFor(operandNum); + Value instr = operand.isVariable() ? gen.operands.instructionForResult(((CiVariable) operand)) : null; + TTY.println(" var %d (HIR instruction %s)", operandNum, instr == null ? " " : instr.toString()); + + if (instr instanceof Phi) { + Phi phi = (Phi) instr; + TTY.println("phi block begin: " + phi.block()); + TTY.println("pred count on blockbegin: " + phi.block().predecessors().size()); + TTY.println("phi values: " + phi.valueCount()); + TTY.println("phi block preds:"); + for (Node n : phi.block().predecessors()) { + TTY.println(n.toString()); + } + } + + for (int j = 0; j < numBlocks; j++) { + LIRBlock block = blockAt(j); + if (block.liveGen.get(operandNum)) { + TTY.println(" used in block B%d", block.blockID()); + for (LIRInstruction ins : block.lir().instructionsList()) { + TTY.println(ins.id + ": " + ins.result() + " " + ins.toString()); + } + } + if (block.liveKill.get(operandNum)) { + TTY.println(" defined in block B%d", block.blockID()); + for (LIRInstruction ins : block.lir().instructionsList()) { + TTY.println(ins.id + ": " + ins.result() + " " + ins.toString()); + } + } + } + } + } + } + + private void verifyLiveness(int numBlocks) { + // check that fixed intervals are not live at block boundaries + // (live set must be empty at fixed intervals) + for (int i = 0; i < numBlocks; i++) { + LIRBlock block = blockAt(i); + for (int j = 0; j <= operands.maxRegisterNumber(); j++) { + assert !block.liveIn.get(j) : "liveIn set of fixed register must be empty"; + assert !block.liveOut.get(j) : "liveOut set of fixed register must be empty"; + assert !block.liveGen.get(j) : "liveGen set of fixed register must be empty"; + } + } + } + + private void traceLiveness(boolean changeOccurredInBlock, int iterationCount, LIRBlock block) { + char c = iterationCount == 0 || changeOccurredInBlock ? '*' : ' '; + TTY.print("(%d) liveIn%c B%d ", iterationCount, c, block.blockID()); + TTY.println(block.liveIn.toString()); + TTY.print("(%d) liveOut%c B%d ", iterationCount, c, block.blockID()); + TTY.println(block.liveOut.toString()); + } + + Interval addUse(CiValue operand, int from, int to, RegisterPriority registerPriority, CiKind kind) { + if (!isProcessed(operand)) { + return null; + } + if (C1XOptions.TraceLinearScanLevel >= 2 && kind == null) { + TTY.println(" use %s from %d to %d (%s)", operand, from, to, registerPriority.name()); + } + + if (kind == null) { + kind = operand.kind.stackKind(); + } + Interval interval = intervalFor(operand); + if (interval == null) { + interval = createInterval(operand); + } + + if (kind != CiKind.Illegal) { + interval.setKind(kind); + } + + if (operand.isVariable() && gen.operands.mustStayInMemory((CiVariable) operand)) { + interval.addRange(from, maxOpId()); + } else { + interval.addRange(from, to); + } + + interval.addUsePos(to, registerPriority); + return interval; + } + + void addTemp(CiValue operand, int tempPos, RegisterPriority registerPriority, CiKind kind) { + if (!isProcessed(operand)) { + return; + } + Interval interval = intervalFor(operand); + if (interval == null) { + interval = createInterval(operand); + } + + if (kind != CiKind.Illegal) { + interval.setKind(kind); + } + + interval.addRange(tempPos, tempPos + 1); + interval.addUsePos(tempPos, registerPriority); + } + + boolean isProcessed(CiValue operand) { + return !operand.isRegister() || attributes(operand.asRegister()).isAllocatable; + } + + void addDef(CiValue operand, int defPos, RegisterPriority registerPriority, CiKind kind) { + if (!isProcessed(operand)) { + return; + } + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println(" def %s defPos %d (%s)", operand, defPos, registerPriority.name()); + } + Interval interval = intervalFor(operand); + if (interval != null) { + + if (kind != CiKind.Illegal) { + interval.setKind(kind); + } + + Range r = interval.first(); + if (r.from <= defPos) { + // Update the starting point (when a range is first created for a use, its + // start is the beginning of the current block until a def is encountered.) + r.from = defPos; + interval.addUsePos(defPos, registerPriority); + + } else { + // Dead value - make vacuous interval + // also add register priority for dead intervals + interval.addRange(defPos, defPos + 1); + interval.addUsePos(defPos, registerPriority); + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println("Warning: def of operand %s at %d occurs without use", operand, defPos); + } + } + + } else { + // Dead value - make vacuous interval + // also add register priority for dead intervals + interval = createInterval(operand); + if (kind != CiKind.Illegal) { + interval.setKind(kind); + } + + interval.addRange(defPos, defPos + 1); + interval.addUsePos(defPos, registerPriority); + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println("Warning: dead value %s at %d in live intervals", operand, defPos); + } + } + + changeSpillDefinitionPos(interval, defPos); + if (registerPriority == RegisterPriority.None && interval.spillState().ordinal() <= SpillState.StartInMemory.ordinal()) { + // detection of method-parameters and roundfp-results + // TODO: move this directly to position where use-kind is computed + interval.setSpillState(SpillState.StartInMemory); + } + } + + /** + * Determines the register priority for an instruction's output/result operand. + */ + RegisterPriority registerPriorityOfOutputOperand(LIRInstruction op, CiValue operand) { + if (op.code == LIROpcode.Move) { + LIROp1 move = (LIROp1) op; + CiValue res = move.result(); + boolean resultInMemory = res.isVariable() && operands.mustStartInMemory((CiVariable) res); + + if (resultInMemory) { + // Begin of an interval with mustStartInMemory set. + // This interval will always get a stack slot first, so return noUse. + return RegisterPriority.None; + + } else if (move.operand().isStackSlot()) { + // method argument (condition must be equal to handleMethodArguments) + return RegisterPriority.None; + + } + } + + if (operand.isVariable() && operands.mustStartInMemory((CiVariable) operand)) { + // result is a stack-slot, so prevent immediate reloading + return RegisterPriority.None; + } + + // all other operands require a register + return RegisterPriority.MustHaveRegister; + } + + /** + * Determines the priority which with an instruction's input operand will be allocated a register. + */ + RegisterPriority registerPriorityOfInputOperand(LIRInstruction op, CiValue operand) { + if (op.code == LIROpcode.Move) { + LIROp1 move = (LIROp1) op; + CiValue res = move.result(); + boolean resultInMemory = res.isVariable() && operands.mustStartInMemory((CiVariable) res); + + if (resultInMemory) { + // Move to an interval with mustStartInMemory set. + // To avoid moves from stack to stack (not allowed) force the input operand to a register + return RegisterPriority.MustHaveRegister; + + } else if (move.operand().isVariableOrRegister() && move.result().isVariableOrRegister()) { + // The input operand is not forced to a register (moves from stack to register are allowed), + // but it is faster if the input operand is in a register + return RegisterPriority.ShouldHaveRegister; + } + } + + if (compilation.target.arch.isX86()) { + if (op.code == LIROpcode.Cmove) { + // conditional moves can handle stack operands + assert op.result().isVariableOrRegister(); + return RegisterPriority.ShouldHaveRegister; + } + + // optimizations for second input operand of arithmetic operations on Intel + // this operand is allowed to be on the stack in some cases + CiKind kind = operand.kind.stackKind(); + if (kind == CiKind.Float || kind == CiKind.Double) { + // SSE float instruction (CiKind.Double only supported with SSE2) + switch (op.code) { + case Cmp: + case Add: + case Sub: + case Mul: + case Div: { + LIROp2 op2 = (LIROp2) op; + if (op2.operand1() != op2.operand2() && op2.operand2() == operand) { + assert (op2.result().isVariableOrRegister() || op.code == LIROpcode.Cmp) && op2.operand1().isVariableOrRegister() : "cannot mark second operand as stack if others are not in register"; + return RegisterPriority.ShouldHaveRegister; + } + } + } + } else if (kind != CiKind.Long) { + // integer instruction (note: long operands must always be in register) + switch (op.code) { + case Cmp: + case Add: + case Sub: + case LogicAnd: + case LogicOr: + case LogicXor: { + LIROp2 op2 = (LIROp2) op; + if (op2.operand1() != op2.operand2() && op2.operand2() == operand) { + assert (op2.result().isVariableOrRegister() || op.code == LIROpcode.Cmp) && op2.operand1().isVariableOrRegister() : "cannot mark second operand as stack if others are not in register"; + return RegisterPriority.ShouldHaveRegister; + } + } + } + } + } // X86 + + // all other operands require a register + return RegisterPriority.MustHaveRegister; + } + + /** + * Optimizes moves related to incoming stack based arguments. + * The interval for the destination of such moves is assigned + * the stack slot (which is in the caller's frame) as its + * spill slot. + */ + void handleMethodArguments(LIRInstruction op) { + if (op.code == LIROpcode.Move) { + LIROp1 move = (LIROp1) op; + + if (move.operand().isStackSlot()) { + CiStackSlot slot = (CiStackSlot) move.operand(); + if (C1XOptions.DetailedAsserts) { + int argSlots = compilation.method.signature().argumentSlots(!isStatic(compilation.method.accessFlags())); + assert slot.index() >= 0 && slot.index() < argSlots; + assert move.id > 0 : "invalid id"; + assert blockForId(move.id).numberOfPreds() == 0 : "move from stack must be in first block"; + assert move.result().isVariable() : "result of move must be a variable"; + + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("found move from stack slot %s to %s", slot, move.result()); + } + } + + Interval interval = intervalFor(move.result()); + CiStackSlot copySlot = slot; + if (C1XOptions.CopyPointerStackArguments && slot.kind == CiKind.Object) { + copySlot = allocateSpillSlot(slot.kind); + } + interval.setSpillSlot(copySlot); + interval.assignLocation(copySlot); + } + } + } + + void addRegisterHints(LIRInstruction op) { + switch (op.code) { + case Move: // fall through + case Convert: { + LIROp1 move = (LIROp1) op; + + CiValue moveFrom = move.operand(); + CiValue moveTo = move.result(); + + if (moveTo.isVariableOrRegister() && moveFrom.isVariableOrRegister()) { + Interval from = intervalFor(moveFrom); + Interval to = intervalFor(moveTo); + if (from != null && to != null) { + to.setLocationHint(from); + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("operation at opId %d: added hint from interval %d to %d", move.id, from.operandNumber, to.operandNumber); + } + } + } + break; + } + case Cmove: { + LIROp2 cmove = (LIROp2) op; + + CiValue moveFrom = cmove.operand1(); + CiValue moveTo = cmove.result(); + + if (moveTo.isVariableOrRegister() && moveFrom.isVariableOrRegister()) { + Interval from = intervalFor(moveFrom); + Interval to = intervalFor(moveTo); + if (from != null && to != null) { + to.setLocationHint(from); + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("operation at opId %d: added hint from interval %d to %d", cmove.id, from.operandNumber, to.operandNumber); + } + } + } + break; + } + } + } + + void buildIntervals() { + intervalsSize = operands.size(); + intervals = new Interval[intervalsSize + INITIAL_SPLIT_INTERVALS_CAPACITY]; + + // create a list with all caller-save registers (cpu, fpu, xmm) + RiRegisterConfig registerConfig = compilation.registerConfig; + CiRegister[] callerSaveRegs = registerConfig.getCallerSaveRegisters(); + + // iterate all blocks in reverse order + for (int i = blockCount() - 1; i >= 0; i--) { + LIRBlock block = blockAt(i); + List<LIRInstruction> instructions = block.lir().instructionsList(); + final int blockFrom = block.firstLirInstructionId(); + int blockTo = block.lastLirInstructionId(); + + assert blockFrom == instructions.get(0).id; + assert blockTo == instructions.get(instructions.size() - 1).id; + + // Update intervals for operands live at the end of this block; + CiBitMap live = block.liveOut; + for (int operandNum = live.nextSetBit(0); operandNum >= 0; operandNum = live.nextSetBit(operandNum + 1)) { + assert live.get(operandNum) : "should not stop here otherwise"; + CiValue operand = operands.operandFor(operandNum); + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println("live in %s to %d", operand, blockTo + 2); + } + + addUse(operand, blockFrom, blockTo + 2, RegisterPriority.None, CiKind.Illegal); + + // add special use positions for loop-end blocks when the + // interval is used anywhere inside this loop. It's possible + // that the block was part of a non-natural loop, so it might + // have an invalid loop index. + if (block.isLinearScanLoopEnd() && block.loopIndex() != -1 && isIntervalInLoop(operandNum, block.loopIndex())) { + intervalFor(operand).addUsePos(blockTo + 1, RegisterPriority.LiveAtLoopEnd); + } + } + + // iterate all instructions of the block in reverse order. + // skip the first instruction because it is always a label + // definitions of intervals are processed before uses + assert !instructions.get(0).hasOperands() : "first operation must always be a label"; + for (int j = instructions.size() - 1; j >= 1; j--) { + LIRInstruction op = instructions.get(j); + final int opId = op.id; + + // add a temp range for each register if operation destroys caller-save registers + if (op.hasCall) { + for (CiRegister r : callerSaveRegs) { + if (attributes(r).isAllocatable) { + addTemp(r.asValue(), opId, RegisterPriority.None, CiKind.Illegal); + } + } + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("operation destroys all caller-save registers"); + } + } + + // Add any platform dependent temps + pdAddTemps(op); + + // visit definitions (output and temp operands) + int k; + int n; + n = op.operandCount(LIRInstruction.OperandMode.Output); + for (k = 0; k < n; k++) { + CiValue operand = op.operandAt(LIRInstruction.OperandMode.Output, k); + assert operand.isVariableOrRegister(); + addDef(operand, opId, registerPriorityOfOutputOperand(op, operand), operand.kind.stackKind()); + } + + n = op.operandCount(LIRInstruction.OperandMode.Temp); + for (k = 0; k < n; k++) { + CiValue operand = op.operandAt(LIRInstruction.OperandMode.Temp, k); + assert operand.isVariableOrRegister(); + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println(" temp %s tempPos %d (%s)", operand, opId, RegisterPriority.MustHaveRegister.name()); + } + addTemp(operand, opId, RegisterPriority.MustHaveRegister, operand.kind.stackKind()); + } + + // visit uses (input operands) + n = op.operandCount(LIRInstruction.OperandMode.Input); + for (k = 0; k < n; k++) { + CiValue operand = op.operandAt(LIRInstruction.OperandMode.Input, k); + assert operand.isVariableOrRegister(); + RegisterPriority p = registerPriorityOfInputOperand(op, operand); + Interval interval = addUse(operand, blockFrom, opId, p, null); + if (interval != null && op instanceof LIRXirInstruction) { + Range range = interval.first(); + // (tw) Increase range by 1 in order to overlap the input with the temp and the output operand. + if (range.to == opId) { + range.to++; + } + } + } + + // Add uses of live locals from interpreter's point of view for proper + // debug information generation + // Treat these operands as temp values (if the live range is extended + // to a call site, the value would be in a register at the call otherwise) + LIRDebugInfo info = op.info; + if (info != null) { + info.state.forEachLiveStateValue(new ValueProcedure() { + public void doValue(Value value) { + CiValue operand = value.operand(); + if (operand.isVariableOrRegister()) { + addUse(operand, blockFrom, (opId + 1), RegisterPriority.None, null); + } + } + }); + } + + // special steps for some instructions (especially moves) + handleMethodArguments(op); + addRegisterHints(op); + + } // end of instruction iteration + } // end of block iteration + + // add the range [0, 1] to all fixed intervals. + // the register allocator need not handle unhandled fixed intervals + for (Interval interval : intervals) { + if (interval != null && interval.operand.isRegister()) { + interval.addRange(0, 1); + } + } + } + + // * Phase 5: actual register allocation + + private void pdAddTemps(LIRInstruction op) { + // TODO Platform dependent! + assert compilation.target.arch.isX86(); + + switch (op.code) { + case Tan: + case Sin: + case Cos: { + // The slow path for these functions may need to save and + // restore all live registers but we don't want to save and + // restore everything all the time, so mark the xmms as being + // killed. If the slow path were explicit or we could propagate + // live register masks down to the assembly we could do better + // but we don't have any easy way to do that right now. We + // could also consider not killing all xmm registers if we + // assume that slow paths are uncommon but it's not clear that + // would be a good idea. + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println("killing XMMs for trig"); + } + int opId = op.id; + + for (CiRegister r : compilation.registerConfig.getCallerSaveRegisters()) { + if (r.isFpu()) { + addTemp(r.asValue(), opId, RegisterPriority.None, CiKind.Illegal); + } + } + break; + } + } + + } + + boolean isSorted(Interval[] intervals) { + int from = -1; + for (Interval interval : intervals) { + assert interval != null; + assert from <= interval.from(); + from = interval.from(); + + // XXX: very slow! + assert Arrays.asList(this.intervals).contains(interval); + } + return true; + } + + Interval addToList(Interval first, Interval prev, Interval interval) { + Interval newFirst = first; + if (prev != null) { + prev.next = interval; + } else { + newFirst = interval; + } + return newFirst; + } + + Interval.Pair createUnhandledLists(IntervalPredicate isList1, IntervalPredicate isList2) { + assert isSorted(sortedIntervals) : "interval list is not sorted"; + + Interval list1 = Interval.EndMarker; + Interval list2 = Interval.EndMarker; + + Interval list1Prev = null; + Interval list2Prev = null; + Interval v; + + int n = sortedIntervals.length; + for (int i = 0; i < n; i++) { + v = sortedIntervals[i]; + if (v == null) { + continue; + } + + if (isList1.apply(v)) { + list1 = addToList(list1, list1Prev, v); + list1Prev = v; + } else if (isList2 == null || isList2.apply(v)) { + list2 = addToList(list2, list2Prev, v); + list2Prev = v; + } + } + + if (list1Prev != null) { + list1Prev.next = Interval.EndMarker; + } + if (list2Prev != null) { + list2Prev.next = Interval.EndMarker; + } + + assert list1Prev == null || list1Prev.next == Interval.EndMarker : "linear list ends not with sentinel"; + assert list2Prev == null || list2Prev.next == Interval.EndMarker : "linear list ends not with sentinel"; + + return new Interval.Pair(list1, list2); + } + + void sortIntervalsBeforeAllocation() { + int sortedLen = 0; + for (Interval interval : intervals) { + if (interval != null) { + sortedLen++; + } + } + + Interval[] sortedList = new Interval[sortedLen]; + int sortedIdx = 0; + int sortedFromMax = -1; + + // special sorting algorithm: the original interval-list is almost sorted, + // only some intervals are swapped. So this is much faster than a complete QuickSort + for (Interval interval : intervals) { + if (interval != null) { + int from = interval.from(); + + if (sortedFromMax <= from) { + sortedList[sortedIdx++] = interval; + sortedFromMax = interval.from(); + } else { + // the assumption that the intervals are already sorted failed, + // so this interval must be sorted in manually + int j; + for (j = sortedIdx - 1; j >= 0 && from < sortedList[j].from(); j--) { + sortedList[j + 1] = sortedList[j]; + } + sortedList[j + 1] = interval; + sortedIdx++; + } + } + } + sortedIntervals = sortedList; + } + + void sortIntervalsAfterAllocation() { + if (firstDerivedIntervalIndex == -1) { + // no intervals have been added during allocation, so sorted list is already up to date + return; + } + + Interval[] oldList = sortedIntervals; + Interval[] newList = Arrays.copyOfRange(intervals, firstDerivedIntervalIndex, intervalsSize); + int oldLen = oldList.length; + int newLen = newList.length; + + // conventional sort-algorithm for new intervals + Arrays.sort(newList, INTERVAL_COMPARATOR); + + // merge old and new list (both already sorted) into one combined list + Interval[] combinedList = new Interval[oldLen + newLen]; + int oldIdx = 0; + int newIdx = 0; + + while (oldIdx + newIdx < combinedList.length) { + if (newIdx >= newLen || (oldIdx < oldLen && oldList[oldIdx].from() <= newList[newIdx].from())) { + combinedList[oldIdx + newIdx] = oldList[oldIdx]; + oldIdx++; + } else { + combinedList[oldIdx + newIdx] = newList[newIdx]; + newIdx++; + } + } + + sortedIntervals = combinedList; + } + + private static final Comparator<Interval> INTERVAL_COMPARATOR = new Comparator<Interval>() { + + public int compare(Interval a, Interval b) { + if (a != null) { + if (b != null) { + return a.from() - b.from(); + } else { + return -1; + } + } else { + if (b != null) { + return 1; + } else { + return 0; + } + } + } + }; + + public void allocateRegisters() { + Interval precoloredIntervals; + Interval notPrecoloredIntervals; + + Interval.Pair result = createUnhandledLists(IS_PRECOLORED_INTERVAL, IS_VARIABLE_INTERVAL); + precoloredIntervals = result.first; + notPrecoloredIntervals = result.second; + + // allocate cpu registers + LinearScanWalker lsw = new LinearScanWalker(this, precoloredIntervals, notPrecoloredIntervals); + lsw.walk(); + lsw.finishAllocation(); + } + + // * Phase 6: resolve data flow + // (insert moves at edges between blocks if intervals have been split) + + // wrapper for Interval.splitChildAtOpId that performs a bailout in product mode + // instead of returning null + Interval splitChildAtOpId(Interval interval, int opId, LIRInstruction.OperandMode mode) { + Interval result = interval.getSplitChildAtOpId(opId, mode, this); + + if (result != null) { + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("Split child at pos " + opId + " of interval " + interval.toString() + " is " + result.toString()); + } + return result; + } + + throw new CiBailout("LinearScan: interval is null"); + } + + Interval intervalAtBlockBegin(LIRBlock block, CiValue operand) { + assert operand.isVariable() : "register number out of bounds"; + assert intervalFor(operand) != null : "no interval found"; + + return splitChildAtOpId(intervalFor(operand), block.firstLirInstructionId(), LIRInstruction.OperandMode.Output); + } + + Interval intervalAtBlockEnd(LIRBlock block, CiValue operand) { + assert operand.isVariable() : "register number out of bounds"; + assert intervalFor(operand) != null : "no interval found"; + + return splitChildAtOpId(intervalFor(operand), block.lastLirInstructionId() + 1, LIRInstruction.OperandMode.Output); + } + + Interval intervalAtOpId(CiValue operand, int opId) { + assert operand.isVariable() : "register number out of bounds"; + assert intervalFor(operand) != null : "no interval found"; + + return splitChildAtOpId(intervalFor(operand), opId, LIRInstruction.OperandMode.Input); + } + + void resolveCollectMappings(LIRBlock fromBlock, LIRBlock toBlock, MoveResolver moveResolver) { + assert moveResolver.checkEmpty(); + + int numOperands = operands.size(); + CiBitMap liveAtEdge = toBlock.liveIn; + + // visit all variables for which the liveAtEdge bit is set + for (int operandNum = liveAtEdge.nextSetBit(0); operandNum >= 0; operandNum = liveAtEdge.nextSetBit(operandNum + 1)) { + assert operandNum < numOperands : "live information set for not exisiting interval"; + assert fromBlock.liveOut.get(operandNum) && toBlock.liveIn.get(operandNum) : "interval not live at this edge"; + + CiValue liveOperand = operands.operandFor(operandNum); + Interval fromInterval = intervalAtBlockEnd(fromBlock, liveOperand); + Interval toInterval = intervalAtBlockBegin(toBlock, liveOperand); + + if (fromInterval != toInterval && (fromInterval.location() != toInterval.location())) { + // need to insert move instruction + moveResolver.addMapping(fromInterval, toInterval); + } + } + } + + void resolveFindInsertPos(LIRBlock fromBlock, LIRBlock toBlock, MoveResolver moveResolver) { + if (fromBlock.numberOfSux() <= 1) { + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("inserting moves at end of fromBlock B%d", fromBlock.blockID()); + } + + List<LIRInstruction> instructions = fromBlock.lir().instructionsList(); + LIRInstruction instr = instructions.get(instructions.size() - 1); + if (instr instanceof LIRBranch) { + LIRBranch branch = (LIRBranch) instr; + // insert moves before branch + assert branch.cond() == Condition.TRUE : "block does not end with an unconditional jump"; + moveResolver.setInsertPosition(fromBlock.lir(), instructions.size() - 2); + } else { + moveResolver.setInsertPosition(fromBlock.lir(), instructions.size() - 1); + } + + } else { + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("inserting moves at beginning of toBlock B%d", toBlock.blockID()); + } + + if (C1XOptions.DetailedAsserts) { + assert fromBlock.lir().instructionsList().get(0) instanceof LIRLabel : "block does not start with a label"; + + // because the number of predecessor edges matches the number of + // successor edges, blocks which are reached by switch statements + // may have be more than one predecessor but it will be guaranteed + // that all predecessors will be the same. + for (int i = 0; i < toBlock.numberOfPreds(); i++) { + assert fromBlock == toBlock.predAt(i) : "all critical edges must be broken"; + } + } + + moveResolver.setInsertPosition(toBlock.lir(), 0); + } + } + + /** + * Inserts necessary moves (spilling or reloading) at edges between blocks for intervals that + * have been split. + */ + void resolveDataFlow() { + int numBlocks = blockCount(); + MoveResolver moveResolver = new MoveResolver(this); + CiBitMap blockCompleted = new CiBitMap(numBlocks); + CiBitMap alreadyResolved = new CiBitMap(numBlocks); + + int i; + for (i = 0; i < numBlocks; i++) { + LIRBlock block = blockAt(i); + + // check if block has only one predecessor and only one successor + if (block.numberOfPreds() == 1 && block.numberOfSux() == 1) { + List<LIRInstruction> instructions = block.lir().instructionsList(); + assert instructions.get(0).code == LIROpcode.Label : "block must start with label"; + assert instructions.get(instructions.size() - 1).code == LIROpcode.Branch : "block with successors must end with branch (" + block + "), " + instructions.get(instructions.size() - 1); + assert ((LIRBranch) instructions.get(instructions.size() - 1)).cond() == Condition.TRUE : "block with successor must end with unconditional branch"; + + // check if block is empty (only label and branch) + if (instructions.size() == 2) { + LIRBlock pred = block.predAt(0); + LIRBlock sux = block.suxAt(0); + + // prevent optimization of two consecutive blocks + if (!blockCompleted.get(pred.linearScanNumber()) && !blockCompleted.get(sux.linearScanNumber())) { + if (C1XOptions.TraceLinearScanLevel >= 3) { + TTY.println(" optimizing empty block B%d (pred: B%d, sux: B%d)", block.blockID(), pred.blockID(), sux.blockID()); + } + blockCompleted.set(block.linearScanNumber()); + + // directly resolve between pred and sux (without looking at the empty block between) + resolveCollectMappings(pred, sux, moveResolver); + if (moveResolver.hasMappings()) { + moveResolver.setInsertPosition(block.lir(), 0); + moveResolver.resolveAndAppendMoves(); + } + } + } + } + } + + for (i = 0; i < numBlocks; i++) { + if (!blockCompleted.get(i)) { + LIRBlock fromBlock = blockAt(i); + alreadyResolved.setFrom(blockCompleted); + + int numSux = fromBlock.numberOfSux(); + for (int s = 0; s < numSux; s++) { + LIRBlock toBlock = fromBlock.suxAt(s); + + // check for duplicate edges between the same blocks (can happen with switch blocks) + if (!alreadyResolved.get(toBlock.linearScanNumber())) { + if (C1XOptions.TraceLinearScanLevel >= 3) { + TTY.println(" processing edge between B%d and B%d", fromBlock.blockID(), toBlock.blockID()); + } + alreadyResolved.set(toBlock.linearScanNumber()); + + // collect all intervals that have been split between fromBlock and toBlock + resolveCollectMappings(fromBlock, toBlock, moveResolver); + if (moveResolver.hasMappings()) { + resolveFindInsertPos(fromBlock, toBlock, moveResolver); + moveResolver.resolveAndAppendMoves(); + } + } + } + } + } + } + + // * Phase 7: assign register numbers back to LIR + // (includes computation of debug information and oop maps) + + boolean verifyAssignedLocation(Interval interval, CiValue location) { + CiKind kind = interval.kind(); + + assert location.isRegister() || location.isStackSlot(); + + if (location.isRegister()) { + CiRegister reg = location.asRegister(); + + // register + switch (kind) { + case Byte: + case Char: + case Short: + case Jsr: + case Word: + case Object: + case Int: { + assert reg.isCpu() : "not cpu register"; + break; + } + + case Long: { + assert reg.isCpu() : "not cpu register"; + break; + } + + case Float: { + assert !compilation.target.arch.isX86() || reg.isFpu() : "not xmm register: " + reg; + break; + } + + case Double: { + assert !compilation.target.arch.isX86() || reg.isFpu() : "not xmm register: " + reg; + break; + } + + default: { + throw Util.shouldNotReachHere(); + } + } + } + return true; + } + + CiStackSlot canonicalSpillOpr(Interval interval) { + assert interval.spillSlot() != null : "canonical spill slot not set"; + return interval.spillSlot(); + } + + /** + * Assigns the allocated location for an LIR instruction operand back into the instruction. + * + * @param operand an LIR instruction operand + * @param opId the id of the LIR instruction using {@code operand} + * @param mode the usage mode for {@code operand} by the instruction + * @return the location assigned for the operand + */ + private CiValue colorLirOperand(CiVariable operand, int opId, OperandMode mode) { + Interval interval = intervalFor(operand); + assert interval != null : "interval must exist"; + + if (opId != -1) { + if (C1XOptions.DetailedAsserts) { + LIRBlock block = blockForId(opId); + if (block.numberOfSux() <= 1 && opId == block.lastLirInstructionId()) { + // check if spill moves could have been appended at the end of this block, but + // before the branch instruction. So the split child information for this branch would + // be incorrect. + LIRInstruction instr = block.lir().instructionsList().get(block.lir().instructionsList().size() - 1); + if (instr instanceof LIRBranch) { + LIRBranch branch = (LIRBranch) instr; + if (block.liveOut.get(operandNumber(operand))) { + assert branch.cond() == Condition.TRUE : "block does not end with an unconditional jump"; + throw new CiBailout("can't get split child for the last branch of a block because the information would be incorrect (moves are inserted before the branch in resolveDataFlow)"); + } + } + } + } + + // operands are not changed when an interval is split during allocation, + // so search the right interval here + interval = splitChildAtOpId(interval, opId, mode); + } + + return interval.location(); + } + + IntervalWalker initComputeOopMaps() { + // setup lists of potential oops for walking + Interval oopIntervals; + Interval nonOopIntervals; + + oopIntervals = createUnhandledLists(IS_OOP_INTERVAL, null).first; + + // intervals that have no oops inside need not to be processed. + // to ensure a walking until the last instruction id, add a dummy interval + // with a high operation id + nonOopIntervals = new Interval(CiValue.IllegalValue, -1); + nonOopIntervals.addRange(Integer.MAX_VALUE - 2, Integer.MAX_VALUE - 1); + + return new IntervalWalker(this, oopIntervals, nonOopIntervals); + } + + void computeOopMap(IntervalWalker iw, LIRInstruction op, LIRDebugInfo info, boolean isCallSite, CiBitMap frameRefMap, CiBitMap regRefMap) { + if (C1XOptions.TraceLinearScanLevel >= 3) { + TTY.println("creating oop map at opId %d", op.id); + } + + // walk before the current operation . intervals that start at + // the operation (i.e. output operands of the operation) are not + // included in the oop map + iw.walkBefore(op.id); + + // Iterate through active intervals + for (Interval interval = iw.activeLists.get(RegisterBinding.Fixed); interval != Interval.EndMarker; interval = interval.next) { + CiValue operand = interval.operand; + + assert interval.currentFrom() <= op.id && op.id <= interval.currentTo() : "interval should not be active otherwise"; + assert interval.operand.isVariable() : "fixed interval found"; + + // Check if this range covers the instruction. Intervals that + // start or end at the current operation are not included in the + // oop map, except in the case of patching moves. For patching + // moves, any intervals which end at this instruction are included + // in the oop map since we may safepoint while doing the patch + // before we've consumed the inputs. + if (op.id < interval.currentTo()) { + // caller-save registers must not be included into oop-maps at calls + assert !isCallSite || !operand.isRegister() || !isCallerSave(operand) : "interval is in a caller-save register at a call . register will be overwritten"; + + CiValue location = interval.location(); + if (location.isStackSlot()) { + location = frameMap.toStackAddress((CiStackSlot) location); + } + info.setOop(location, compilation, frameRefMap, regRefMap); + + // Spill optimization: when the stack value is guaranteed to be always correct, + // then it must be added to the oop map even if the interval is currently in a register + if (interval.alwaysInMemory() && op.id > interval.spillDefinitionPos() && !interval.location().equals(interval.spillSlot())) { + assert interval.spillDefinitionPos() > 0 : "position not set correctly"; + assert interval.spillSlot() != null : "no spill slot assigned"; + assert !interval.operand.isRegister() : "interval is on stack : so stack slot is registered twice"; + info.setOop(frameMap.toStackAddress(interval.spillSlot()), compilation, frameRefMap, regRefMap); + } + } + } + } + + private boolean isCallerSave(CiValue operand) { + return attributes(operand.asRegister()).isCallerSave; + } + + void computeOopMap(IntervalWalker iw, LIRInstruction op, LIRDebugInfo info, CiBitMap frameRefMap, CiBitMap regRefMap) { + computeOopMap(iw, op, info, op.hasCall, frameRefMap, regRefMap); + if (op instanceof LIRCall) { + List<CiValue> pointerSlots = ((LIRCall) op).pointerSlots; + if (pointerSlots != null) { + for (CiValue v : pointerSlots) { + info.setOop(v, compilation, frameRefMap, regRefMap); + } + } + } else if (op instanceof LIRXirInstruction) { + List<CiValue> pointerSlots = ((LIRXirInstruction) op).pointerSlots; + if (pointerSlots != null) { + for (CiValue v : pointerSlots) { + info.setOop(v, compilation, frameRefMap, regRefMap); + } + } + } + } + + CiValue toCiValue(int opId, Value value) { + if (value != null && value.operand() != CiValue.IllegalValue) { + CiValue operand = value.operand(); + Constant con = null; + if (value instanceof Constant) { + con = (Constant) value; + } + + assert con == null || operand.isVariable() || operand.isConstant() || operand.isIllegal() : "Constant instructions have only constant operands (or illegal if constant is optimized away)"; + + if (operand.isVariable()) { + OperandMode mode = OperandMode.Input; + LIRBlock block = blockForId(opId); + if (block.numberOfSux() == 1 && opId == block.lastLirInstructionId()) { + // generating debug information for the last instruction of a block. + // if this instruction is a branch, spill moves are inserted before this branch + // and so the wrong operand would be returned (spill moves at block boundaries are not + // considered in the live ranges of intervals) + // Solution: use the first opId of the branch target block instead. + final LIRInstruction instr = block.lir().instructionsList().get(block.lir().instructionsList().size() - 1); + if (instr instanceof LIRBranch) { + if (block.liveOut.get(operandNumber(operand))) { + opId = block.suxAt(0).firstLirInstructionId(); + mode = OperandMode.Output; + } + } + } + + // Get current location of operand + // The operand must be live because debug information is considered when building the intervals + // if the interval is not live, colorLirOperand will cause an assert on failure + operand = colorLirOperand((CiVariable) operand, opId, mode); + assert !hasCall(opId) || operand.isStackSlot() || !isCallerSave(operand) : "cannot have caller-save register operands at calls"; + return operand; + } else if (operand.isRegister()) { + assert false : "must not reach here"; + return operand; + } else { + assert value instanceof Constant; + assert operand.isConstant() : "operand must be constant"; + return operand; + } + } else { + // return a dummy value because real value not needed + return CiValue.IllegalValue; + } + } + + CiFrame computeFrameForState(FrameState state, int opId, CiBitMap frameRefMap) { + CiValue[] values = new CiValue[state.valuesSize() + state.locksSize()]; + int valueIndex = 0; + + for (int i = 0; i < state.valuesSize(); i++) { + values[valueIndex++] = toCiValue(opId, state.valueAt(i)); + } + + for (int i = 0; i < state.locksSize(); i++) { + if (compilation.runtime.sizeOfBasicObjectLock() != 0) { + CiStackSlot monitorAddress = frameMap.toMonitorBaseStackAddress(i); + values[valueIndex++] = monitorAddress; + assert frameRefMap != null; + CiStackSlot objectAddress = frameMap.toMonitorObjectStackAddress(i); +// LIRDebugInfo.setBit(frameRefMap, objectAddress.index()); + frameRefMap.set(objectAddress.index()); + } else { + Value lock = state.lockAt(i); + if (lock.isConstant() && compilation.runtime.asJavaClass(lock.asConstant()) != null) { + // lock on class for synchronized static method + values[valueIndex++] = lock.asConstant(); + } else { + values[valueIndex++] = toCiValue(opId, lock); + } + } + } + CiFrame caller = null; + if (state.outerFrameState() != null) { + caller = computeFrameForState(state.outerFrameState(), opId, frameRefMap); + } + return new CiFrame(caller, state.method, state.bci, values, state.localsSize(), state.stackSize(), state.locksSize()); + } + + private void computeDebugInfo(IntervalWalker iw, LIRInstruction op) { + assert iw != null : "interval walker needed for debug information"; + computeDebugInfo(iw, op, op.info); + + if (op instanceof LIRXirInstruction) { + LIRXirInstruction xir = (LIRXirInstruction) op; + if (xir.infoAfter != null) { + computeDebugInfo(iw, op, xir.infoAfter); + } + } + } + + + private void computeDebugInfo(IntervalWalker iw, LIRInstruction op, LIRDebugInfo info) { + if (info != null) { + if (info.debugInfo == null) { + int frameSize = compilation.frameMap().frameSize(); + int frameWords = frameSize / compilation.target.spillSlotSize; + CiBitMap frameRefMap = new CiBitMap(frameWords); + CiBitMap regRefMap = !op.hasCall ? new CiBitMap(compilation.target.arch.registerReferenceMapBitCount) : null; + CiFrame frame = compilation.placeholderState != null ? null : computeFrame(info.state, op.id, frameRefMap); + computeOopMap(iw, op, info, frameRefMap, regRefMap); + info.debugInfo = new CiDebugInfo(frame, regRefMap, frameRefMap); + } else if (C1XOptions.DetailedAsserts) { + assert info.debugInfo.frame().equals(computeFrame(info.state, op.id, new CiBitMap(info.debugInfo.frameRefMap.size()))); + } + } + } + + CiFrame computeFrame(FrameState state, int opId, CiBitMap frameRefMap) { + if (C1XOptions.TraceLinearScanLevel >= 3) { + TTY.println("creating debug information at opId %d", opId); + } + return computeFrameForState(state, opId, frameRefMap); + } + + private void assignLocations(List<LIRInstruction> instructions, IntervalWalker iw) { + int numInst = instructions.size(); + boolean hasDead = false; + + for (int j = 0; j < numInst; j++) { + LIRInstruction op = instructions.get(j); + if (op == null) { // this can happen when spill-moves are removed in eliminateSpillMoves + hasDead = true; + continue; + } + + // iterate all modes of the visitor and process all virtual operands + for (LIRInstruction.OperandMode mode : LIRInstruction.OPERAND_MODES) { + int n = op.operandCount(mode); + for (int k = 0; k < n; k++) { + CiValue operand = op.operandAt(mode, k); + if (operand.isVariable()) { + op.setOperandAt(mode, k, colorLirOperand((CiVariable) operand, op.id, mode)); + } + } + } + + if (op.info != null) { + // compute reference map and debug information + computeDebugInfo(iw, op); + } + + // make sure we haven't made the op invalid. + assert op.verify(); + + // remove useless moves + if (op.code == LIROpcode.Move) { + CiValue src = op.operand(0); + CiValue dst = op.result(); + if (dst == src || src.equals(dst)) { + // TODO: what about o.f = o.f and exceptions? + instructions.set(j, null); + hasDead = true; + } + } + } + + if (hasDead) { + // iterate all instructions of the block and remove all null-values. + int insertPoint = 0; + for (int j = 0; j < numInst; j++) { + LIRInstruction op = instructions.get(j); + if (op != null) { + if (insertPoint != j) { + instructions.set(insertPoint, op); + } + insertPoint++; + } + } + Util.truncate(instructions, insertPoint); + } + } + + private void assignLocations() { + IntervalWalker iw = initComputeOopMaps(); + for (LIRBlock block : sortedBlocks) { + assignLocations(block.lir().instructionsList(), iw); + } + } + + public void allocate() { + if (C1XOptions.PrintTimers) { + C1XTimers.LIFETIME_ANALYSIS.start(); + } + + numberInstructions(); + + printLir("Before register allocation", true); + + computeLocalLiveSets(); + computeGlobalLiveSets(); + + buildIntervals(); + sortIntervalsBeforeAllocation(); + + if (C1XOptions.PrintTimers) { + C1XTimers.LIFETIME_ANALYSIS.stop(); + C1XTimers.LINEAR_SCAN.start(); + } + + printIntervals("Before register allocation"); + + allocateRegisters(); + + if (C1XOptions.PrintTimers) { + C1XTimers.LINEAR_SCAN.stop(); + C1XTimers.RESOLUTION.start(); + } + + resolveDataFlow(); + + if (C1XOptions.PrintTimers) { + C1XTimers.RESOLUTION.stop(); + C1XTimers.DEBUG_INFO.start(); + } + + C1XMetrics.LSRASpills += (maxSpills - frameMap.initialSpillSlot()); + + // fill in number of spill slots into frameMap + frameMap.finalizeFrame(maxSpills); + + printIntervals("After register allocation"); + printLir("After register allocation", true); + + sortIntervalsAfterAllocation(); + + if (C1XOptions.DetailedAsserts) { + verify(); + } + + eliminateSpillMoves(); + assignLocations(); + + if (C1XOptions.DetailedAsserts) { + verifyIntervals(); + } + + if (C1XOptions.PrintTimers) { + C1XTimers.DEBUG_INFO.stop(); + C1XTimers.CODE_CREATE.start(); + } + + printLir("After register number assignment", true); + EdgeMoveOptimizer.optimize(ir.linearScanOrder()); + ControlFlowOptimizer.optimize(ir); + printLir("After control flow optimization", false); + } + + void printIntervals(String label) { + if (C1XOptions.TraceLinearScanLevel >= 1) { + int i; + TTY.println(); + TTY.println(label); + + for (Interval interval : intervals) { + if (interval != null) { + TTY.out().println(interval.logString(this)); + } + } + + TTY.println(); + TTY.println("--- Basic Blocks ---"); + for (i = 0; i < blockCount(); i++) { + LIRBlock block = blockAt(i); + TTY.print("B%d [%d, %d, %d, %d] ", block.blockID(), block.firstLirInstructionId(), block.lastLirInstructionId(), block.loopIndex(), block.loopDepth()); + } + TTY.println(); + TTY.println(); + } + + if (compilation.compiler.isObserved()) { + compilation.compiler.fireCompilationEvent(new CompilationEvent(compilation, label, this, intervals, intervalsSize)); + } + } + + void printLir(String label, boolean hirValid) { + if (C1XOptions.TraceLinearScanLevel >= 1 && !TTY.isSuppressed()) { + TTY.println(); + TTY.println(label); + LIRList.printLIR(ir.linearScanOrder()); + TTY.println(); + } + + if (compilation.compiler.isObserved()) { + compilation.compiler.fireCompilationEvent(new CompilationEvent(compilation, label, compilation.graph, hirValid, true)); + } + } + + boolean verify() { + // (check that all intervals have a correct register and that no registers are overwritten) + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println(" verifying intervals *"); + } + verifyIntervals(); + + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println(" verifying that no oops are in fixed intervals *"); + } + //verifyNoOopsInFixedIntervals(); + + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println(" verifying that unpinned constants are not alive across block boundaries"); + } + verifyConstants(); + + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println(" verifying register allocation *"); + } + verifyRegisters(); + + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println(" no errors found *"); + } + + return true; + } + + private void verifyRegisters() { + RegisterVerifier verifier = new RegisterVerifier(this); + verifier.verify(blockAt(0)); + } + + void verifyIntervals() { + int len = intervalsSize; + + for (int i = 0; i < len; i++) { + Interval i1 = intervals[i]; + if (i1 == null) { + continue; + } + + i1.checkSplitChildren(); + + if (i1.operandNumber != i) { + TTY.println("Interval %d is on position %d in list", i1.operandNumber, i); + TTY.println(i1.logString(this)); + throw new CiBailout(""); + } + + if (i1.operand.isVariable() && i1.kind() == CiKind.Illegal) { + TTY.println("Interval %d has no type assigned", i1.operandNumber); + TTY.println(i1.logString(this)); + throw new CiBailout(""); + } + + if (i1.location() == null) { + TTY.println("Interval %d has no register assigned", i1.operandNumber); + TTY.println(i1.logString(this)); + throw new CiBailout(""); + } + + if (!isProcessed(i1.location())) { + TTY.println("Can not have an Interval for an ignored register " + i1.location()); + TTY.println(i1.logString(this)); + throw new CiBailout(""); + } + + if (i1.first() == Range.EndMarker) { + TTY.println("Interval %d has no Range", i1.operandNumber); + TTY.println(i1.logString(this)); + throw new CiBailout(""); + } + + for (Range r = i1.first(); r != Range.EndMarker; r = r.next) { + if (r.from >= r.to) { + TTY.println("Interval %d has zero length range", i1.operandNumber); + TTY.println(i1.logString(this)); + throw new CiBailout(""); + } + } + + for (int j = i + 1; j < len; j++) { + Interval i2 = intervals[j]; + if (i2 == null) { + continue; + } + + // special intervals that are created in MoveResolver + // . ignore them because the range information has no meaning there + if (i1.from() == 1 && i1.to() == 2) { + continue; + } + if (i2.from() == 1 && i2.to() == 2) { + continue; + } + CiValue l1 = i1.location(); + CiValue l2 = i2.location(); + if (i1.intersects(i2) && (l1.equals(l2))) { + if (C1XOptions.DetailedAsserts) { + TTY.println("Intervals %d and %d overlap and have the same register assigned", i1.operandNumber, i2.operandNumber); + TTY.println(i1.logString(this)); + TTY.println(i2.logString(this)); + } + throw new CiBailout(""); + } + } + } + } + + void verifyNoOopsInFixedIntervals() { + Interval fixedIntervals; + Interval otherIntervals; + fixedIntervals = createUnhandledLists(IS_PRECOLORED_INTERVAL, null).first; + // to ensure a walking until the last instruction id, add a dummy interval + // with a high operation id + otherIntervals = new Interval(CiValue.IllegalValue, -1); + otherIntervals.addRange(Integer.MAX_VALUE - 2, Integer.MAX_VALUE - 1); + IntervalWalker iw = new IntervalWalker(this, fixedIntervals, otherIntervals); + + for (int i = 0; i < blockCount(); i++) { + LIRBlock block = blockAt(i); + + List<LIRInstruction> instructions = block.lir().instructionsList(); + + for (int j = 0; j < instructions.size(); j++) { + LIRInstruction op = instructions.get(j); + + if (op.info != null) { + iw.walkBefore(op.id); + boolean checkLive = true; + + // Make sure none of the fixed registers is live across an + // oopmap since we can't handle that correctly. + if (checkLive) { + for (Interval interval = iw.activeLists.get(RegisterBinding.Fixed); interval != Interval.EndMarker; interval = interval.next) { + if (interval.currentTo() > op.id + 1) { + // This interval is live out of this op so make sure + // that this interval represents some value that's + // referenced by this op either as an input or output. + boolean ok = false; + for (LIRInstruction.OperandMode mode : LIRInstruction.OPERAND_MODES) { + int n = op.operandCount(mode); + for (int k = 0; k < n; k++) { + CiValue operand = op.operandAt(mode, k); + if (operand.isRegister()) { + if (intervalFor(operand) == interval) { + ok = true; + break; + } + } + } + } + assert ok : "fixed intervals should never be live across an oopmap point"; + } + } + } + } + } + } + } + + void verifyConstants() { + int numBlocks = blockCount(); + + for (int i = 0; i < numBlocks; i++) { + LIRBlock block = blockAt(i); + CiBitMap liveAtEdge = block.liveIn; + + // visit all operands where the liveAtEdge bit is set + for (int operandNum = liveAtEdge.nextSetBit(0); operandNum >= 0; operandNum = liveAtEdge.nextSetBit(operandNum + 1)) { + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("checking interval %d of block B%d", operandNum, block.blockID()); + } + CiValue operand = operands.operandFor(operandNum); + assert operand.isVariable() : "value must have variable operand"; + Value value = gen.operands.instructionForResult(((CiVariable) operand)); + assert value != null : "all intervals live across block boundaries must have Value"; + // TKR assert value.asConstant() == null || value.isPinned() : + // "only pinned constants can be alive accross block boundaries"; + } + } + } + + public int numberOfSpillSlots(CiKind kind) { + return compilation.target.spillSlots(kind); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.max.graal.compiler/src/com/oracle/max/graal/compiler/alloc/LinearScanWalker.java Wed Jun 08 08:59:54 2011 +0200 @@ -0,0 +1,980 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.max.graal.compiler.alloc; + +import static com.sun.cri.ci.CiUtil.*; + +import java.util.*; + +import com.oracle.max.graal.compiler.*; +import com.oracle.max.graal.compiler.alloc.Interval.*; +import com.oracle.max.graal.compiler.debug.*; +import com.oracle.max.graal.compiler.lir.*; +import com.oracle.max.graal.compiler.util.*; +import com.sun.cri.ci.*; +import com.sun.cri.ci.CiRegister.*; + +/** + * + * @author Thomas Wuerthinger + */ +final class LinearScanWalker extends IntervalWalker { + + private CiRegister[] availableRegs; + + private final int[] usePos; + private final int[] blockPos; + + private List<Interval>[] spillIntervals; + + private MoveResolver moveResolver; // for ordering spill moves + + // accessors mapped to same functions in class LinearScan + int blockCount() { + return allocator.blockCount(); + } + + LIRBlock blockAt(int idx) { + return allocator.blockAt(idx); + } + + LIRBlock blockOfOpWithId(int opId) { + return allocator.blockForId(opId); + } + + LinearScanWalker(LinearScan allocator, Interval unhandledFixedFirst, Interval unhandledAnyFirst) { + super(allocator, unhandledFixedFirst, unhandledAnyFirst); + moveResolver = new MoveResolver(allocator); + spillIntervals = Util.uncheckedCast(new List[allocator.registers.length]); + for (int i = 0; i < allocator.registers.length; i++) { + spillIntervals[i] = new ArrayList<Interval>(2); + } + usePos = new int[allocator.registers.length]; + blockPos = new int[allocator.registers.length]; + } + + void initUseLists(boolean onlyProcessUsePos) { + for (CiRegister register : availableRegs) { + int i = register.number; + usePos[i] = Integer.MAX_VALUE; + + if (!onlyProcessUsePos) { + blockPos[i] = Integer.MAX_VALUE; + spillIntervals[i].clear(); + } + } + } + + void excludeFromUse(Interval i) { + CiValue location = i.location(); + int i1 = location.asRegister().number; + if (i1 >= availableRegs[0].number && i1 <= availableRegs[availableRegs.length - 1].number) { + usePos[i1] = 0; + } + } + + void setUsePos(Interval interval, int usePos, boolean onlyProcessUsePos) { + if (usePos != -1) { + assert usePos != 0 : "must use excludeFromUse to set usePos to 0"; + int i = interval.location().asRegister().number; + if (i >= availableRegs[0].number && i <= availableRegs[availableRegs.length - 1].number) { + if (this.usePos[i] > usePos) { + this.usePos[i] = usePos; + } + if (!onlyProcessUsePos) { + spillIntervals[i].add(interval); + } + } + } + } + + void setBlockPos(Interval i, int blockPos) { + if (blockPos != -1) { + int reg = i.location().asRegister().number; + if (reg >= availableRegs[0].number && reg <= availableRegs[availableRegs.length - 1].number) { + if (this.blockPos[reg] > blockPos) { + this.blockPos[reg] = blockPos; + } + if (usePos[reg] > blockPos) { + usePos[reg] = blockPos; + } + } + } + } + + void freeExcludeActiveFixed() { + Interval interval = activeLists.get(RegisterBinding.Fixed); + while (interval != Interval.EndMarker) { + assert interval.location().isRegister() : "active interval must have a register assigned"; + excludeFromUse(interval); + interval = interval.next; + } + } + + void freeExcludeActiveAny() { + Interval interval = activeLists.get(RegisterBinding.Any); + while (interval != Interval.EndMarker) { + assert interval.location().isRegister() : "active interval must have a register assigned"; + excludeFromUse(interval); + interval = interval.next; + } + } + + void freeCollectInactiveFixed(Interval current) { + Interval interval = inactiveLists.get(RegisterBinding.Fixed); + while (interval != Interval.EndMarker) { + if (current.to() <= interval.currentFrom()) { + assert interval.currentIntersectsAt(current) == -1 : "must not intersect"; + setUsePos(interval, interval.currentFrom(), true); + } else { + setUsePos(interval, interval.currentIntersectsAt(current), true); + } + interval = interval.next; + } + } + + void freeCollectInactiveAny(Interval current) { + Interval interval = inactiveLists.get(RegisterBinding.Any); + while (interval != Interval.EndMarker) { + setUsePos(interval, interval.currentIntersectsAt(current), true); + interval = interval.next; + } + } + + void freeCollectUnhandled(RegisterBinding kind, Interval current) { + Interval interval = unhandledLists.get(kind); + while (interval != Interval.EndMarker) { + setUsePos(interval, interval.intersectsAt(current), true); + if (kind == RegisterBinding.Fixed && current.to() <= interval.from()) { + setUsePos(interval, interval.from(), true); + } + interval = interval.next; + } + } + + void spillExcludeActiveFixed() { + Interval interval = activeLists.get(RegisterBinding.Fixed); + while (interval != Interval.EndMarker) { + excludeFromUse(interval); + interval = interval.next; + } + } + + void spillBlockUnhandledFixed(Interval current) { + Interval interval = unhandledLists.get(RegisterBinding.Fixed); + while (interval != Interval.EndMarker) { + setBlockPos(interval, interval.intersectsAt(current)); + interval = interval.next; + } + } + + void spillBlockInactiveFixed(Interval current) { + Interval interval = inactiveLists.get(RegisterBinding.Fixed); + while (interval != Interval.EndMarker) { + if (current.to() > interval.currentFrom()) { + setBlockPos(interval, interval.currentIntersectsAt(current)); + } else { + assert interval.currentIntersectsAt(current) == -1 : "invalid optimization: intervals intersect"; + } + + interval = interval.next; + } + } + + void spillCollectActiveAny() { + Interval interval = activeLists.get(RegisterBinding.Any); + while (interval != Interval.EndMarker) { + setUsePos(interval, Math.min(interval.nextUsage(RegisterPriority.LiveAtLoopEnd, currentPosition), interval.to()), false); + interval = interval.next; + } + } + + void spillCollectInactiveAny(Interval current) { + Interval interval = inactiveLists.get(RegisterBinding.Any); + while (interval != Interval.EndMarker) { + if (interval.currentIntersects(current)) { + setUsePos(interval, Math.min(interval.nextUsage(RegisterPriority.LiveAtLoopEnd, currentPosition), interval.to()), false); + } + interval = interval.next; + } + } + + void insertMove(int opId, Interval srcIt, Interval dstIt) { + // output all moves here. When source and target are equal, the move is + // optimized away later in assignRegNums + + opId = (opId + 1) & ~1; + LIRBlock opBlock = allocator.blockForId(opId); + assert opId > 0 && allocator.blockForId(opId - 2) == opBlock : "cannot insert move at block boundary"; + + // calculate index of instruction inside instruction list of current block + // the minimal index (for a block with no spill moves) can be calculated because the + // numbering of instructions is known. + // When the block already contains spill moves, the index must be increased until the + // correct index is reached. + List<LIRInstruction> list = opBlock.lir().instructionsList(); + int index = (opId - list.get(0).id) >> 1; + assert list.get(index).id <= opId : "error in calculation"; + + while (list.get(index).id != opId) { + index++; + assert 0 <= index && index < list.size() : "index out of bounds"; + } + assert 1 <= index && index < list.size() : "index out of bounds"; + assert list.get(index).id == opId : "error in calculation"; + + // insert new instruction before instruction at position index + moveResolver.moveInsertPosition(opBlock.lir(), index - 1); + moveResolver.addMapping(srcIt, dstIt); + } + + int findOptimalSplitPos(LIRBlock minBlock, LIRBlock maxBlock, int maxSplitPos) { + int fromBlockNr = minBlock.linearScanNumber(); + int toBlockNr = maxBlock.linearScanNumber(); + + assert 0 <= fromBlockNr && fromBlockNr < blockCount() : "out of range"; + assert 0 <= toBlockNr && toBlockNr < blockCount() : "out of range"; + assert fromBlockNr < toBlockNr : "must cross block boundary"; + + // Try to split at end of maxBlock. If this would be after + // maxSplitPos, then use the begin of maxBlock + int optimalSplitPos = maxBlock.lastLirInstructionId() + 2; + if (optimalSplitPos > maxSplitPos) { + optimalSplitPos = maxBlock.firstLirInstructionId(); + } + + int minLoopDepth = maxBlock.loopDepth(); + for (int i = toBlockNr - 1; i >= fromBlockNr; i--) { + LIRBlock cur = blockAt(i); + + if (cur.loopDepth() < minLoopDepth) { + // block with lower loop-depth found . split at the end of this block + minLoopDepth = cur.loopDepth(); + optimalSplitPos = cur.lastLirInstructionId() + 2; + } + } + assert optimalSplitPos > allocator.maxOpId() || allocator.isBlockBegin(optimalSplitPos) : "algorithm must move split pos to block boundary"; + + return optimalSplitPos; + } + + int findOptimalSplitPos(Interval interval, int minSplitPos, int maxSplitPos, boolean doLoopOptimization) { + int optimalSplitPos = -1; + if (minSplitPos == maxSplitPos) { + // trivial case, no optimization of split position possible + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" min-pos and max-pos are equal, no optimization possible"); + } + optimalSplitPos = minSplitPos; + + } else { + assert minSplitPos < maxSplitPos : "must be true then"; + assert minSplitPos > 0 : "cannot access minSplitPos - 1 otherwise"; + + // reason for using minSplitPos - 1: when the minimal split pos is exactly at the + // beginning of a block, then minSplitPos is also a possible split position. + // Use the block before as minBlock, because then minBlock.lastLirInstructionId() + 2 == minSplitPos + LIRBlock minBlock = allocator.blockForId(minSplitPos - 1); + + // reason for using maxSplitPos - 1: otherwise there would be an assert on failure + // when an interval ends at the end of the last block of the method + // (in this case, maxSplitPos == allocator().maxLirOpId() + 2, and there is no + // block at this opId) + LIRBlock maxBlock = allocator.blockForId(maxSplitPos - 1); + + assert minBlock.linearScanNumber() <= maxBlock.linearScanNumber() : "invalid order"; + if (minBlock == maxBlock) { + // split position cannot be moved to block boundary : so split as late as possible + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" cannot move split pos to block boundary because minPos and maxPos are in same block"); + } + optimalSplitPos = maxSplitPos; + + } else { + if (interval.hasHoleBetween(maxSplitPos - 1, maxSplitPos) && !allocator.isBlockBegin(maxSplitPos)) { + // Do not move split position if the interval has a hole before maxSplitPos. + // Intervals resulting from Phi-Functions have more than one definition (marked + // as mustHaveRegister) with a hole before each definition. When the register is needed + // for the second definition : an earlier reloading is unnecessary. + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" interval has hole just before maxSplitPos, so splitting at maxSplitPos"); + } + optimalSplitPos = maxSplitPos; + + } else { + // seach optimal block boundary between minSplitPos and maxSplitPos + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" moving split pos to optimal block boundary between block B%d and B%d", minBlock.blockID(), maxBlock.blockID()); + } + + if (doLoopOptimization) { + // Loop optimization: if a loop-end marker is found between min- and max-position : + // then split before this loop + int loopEndPos = interval.nextUsageExact(RegisterPriority.LiveAtLoopEnd, minBlock.lastLirInstructionId() + 2); + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" loop optimization: loop end found at pos %d", loopEndPos); + } + + assert loopEndPos > minSplitPos : "invalid order"; + if (loopEndPos < maxSplitPos) { + // loop-end marker found between min- and max-position + // if it is not the end marker for the same loop as the min-position : then move + // the max-position to this loop block. + // Desired result: uses tagged as shouldHaveRegister inside a loop cause a reloading + // of the interval (normally, only mustHaveRegister causes a reloading) + LIRBlock loopBlock = allocator.blockForId(loopEndPos); + + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" interval is used in loop that ends in block B%d, so trying to move maxBlock back from B%d to B%d", loopBlock.blockID(), maxBlock.blockID(), loopBlock.blockID()); + } + assert loopBlock != minBlock : "loopBlock and minBlock must be different because block boundary is needed between"; + + optimalSplitPos = findOptimalSplitPos(minBlock, loopBlock, loopBlock.lastLirInstructionId() + 2); + if (optimalSplitPos == loopBlock.lastLirInstructionId() + 2) { + optimalSplitPos = -1; + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" loop optimization not necessary"); + } + } else { + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" loop optimization successful"); + } + } + } + } + + if (optimalSplitPos == -1) { + // not calculated by loop optimization + optimalSplitPos = findOptimalSplitPos(minBlock, maxBlock, maxSplitPos); + } + } + } + } + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" optimal split position: %d", optimalSplitPos); + } + + return optimalSplitPos; + } + + // split an interval at the optimal position between minSplitPos and + // maxSplitPos in two parts: + // 1) the left part has already a location assigned + // 2) the right part is sorted into to the unhandled-list + void splitBeforeUsage(Interval interval, int minSplitPos, int maxSplitPos) { + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println("----- splitting interval: "); + } + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(interval.logString(allocator)); + } + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println(" between %d and %d", minSplitPos, maxSplitPos); + } + + assert interval.from() < minSplitPos : "cannot split at start of interval"; + assert currentPosition < minSplitPos : "cannot split before current position"; + assert minSplitPos <= maxSplitPos : "invalid order"; + assert maxSplitPos <= interval.to() : "cannot split after end of interval"; + + int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, true); + + assert minSplitPos <= optimalSplitPos && optimalSplitPos <= maxSplitPos : "out of range"; + assert optimalSplitPos <= interval.to() : "cannot split after end of interval"; + assert optimalSplitPos > interval.from() : "cannot split at start of interval"; + + if (optimalSplitPos == interval.to() && interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos) == Integer.MAX_VALUE) { + // the split position would be just before the end of the interval + // . no split at all necessary + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" no split necessary because optimal split position is at end of interval"); + } + return; + } + + // must calculate this before the actual split is performed and before split position is moved to odd opId + boolean moveNecessary = !allocator.isBlockBegin(optimalSplitPos) && !interval.hasHoleBetween(optimalSplitPos - 1, optimalSplitPos); + + if (!allocator.isBlockBegin(optimalSplitPos)) { + // move position before actual instruction (odd opId) + optimalSplitPos = (optimalSplitPos - 1) | 1; + } + + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" splitting at position %d", optimalSplitPos); + } + assert allocator.isBlockBegin(optimalSplitPos) || (optimalSplitPos % 2 == 1) : "split pos must be odd when not on block boundary"; + assert !allocator.isBlockBegin(optimalSplitPos) || (optimalSplitPos % 2 == 0) : "split pos must be even on block boundary"; + + Interval splitPart = interval.split(optimalSplitPos, allocator); + + allocator.copyRegisterFlags(interval, splitPart); + splitPart.setInsertMoveWhenActivated(moveNecessary); + + assert splitPart.from() >= current.currentFrom() : "cannot append new interval before current walk position"; + unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Any, splitPart); + + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println(" split interval in two parts (insertMoveWhenActivated: %b)", moveNecessary); + } + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.print(" "); + TTY.println(interval.logString(allocator)); + TTY.print(" "); + TTY.println(splitPart.logString(allocator)); + } + } + +// split an interval at the optimal position between minSplitPos and +// maxSplitPos in two parts: +// 1) the left part has already a location assigned +// 2) the right part is always on the stack and therefore ignored in further processing + + void splitForSpilling(Interval interval) { + // calculate allowed range of splitting position + int maxSplitPos = currentPosition; + int minSplitPos = Math.max(interval.previousUsage(RegisterPriority.ShouldHaveRegister, maxSplitPos) + 1, interval.from()); + + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.print("----- splitting and spilling interval: "); + TTY.println(interval.logString(allocator)); + TTY.println(" between %d and %d", minSplitPos, maxSplitPos); + } + + assert interval.state == State.Active : "why spill interval that is not active?"; + assert interval.from() <= minSplitPos : "cannot split before start of interval"; + assert minSplitPos <= maxSplitPos : "invalid order"; + assert maxSplitPos < interval.to() : "cannot split at end end of interval"; + assert currentPosition < interval.to() : "interval must not end before current position"; + + if (minSplitPos == interval.from()) { + // the whole interval is never used, so spill it entirely to memory + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println(" spilling entire interval because split pos is at beginning of interval"); + TTY.println(" use positions: " + interval.usePosList().size()); + } + assert interval.firstUsage(RegisterPriority.ShouldHaveRegister) > currentPosition : "interval must not have use position before currentPosition"; + + allocator.assignSpillSlot(interval); + allocator.changeSpillState(interval, minSplitPos); + + // Also kick parent intervals out of register to memory when they have no use + // position. This avoids short interval in register surrounded by intervals in + // memory . avoid useless moves from memory to register and back + Interval parent = interval; + while (parent != null && parent.isSplitChild()) { + parent = parent.getSplitChildBeforeOpId(parent.from()); + + if (parent.location().isRegister()) { + if (parent.firstUsage(RegisterPriority.ShouldHaveRegister) == Integer.MAX_VALUE) { + // parent is never used, so kick it out of its assigned register + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" kicking out interval %d out of its register because it is never used", parent.operandNumber); + } + allocator.assignSpillSlot(parent); + } else { + // do not go further back because the register is actually used by the interval + parent = null; + } + } + } + + } else { + // search optimal split pos, split interval and spill only the right hand part + int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, false); + + assert minSplitPos <= optimalSplitPos && optimalSplitPos <= maxSplitPos : "out of range"; + assert optimalSplitPos < interval.to() : "cannot split at end of interval"; + assert optimalSplitPos >= interval.from() : "cannot split before start of interval"; + + if (!allocator.isBlockBegin(optimalSplitPos)) { + // move position before actual instruction (odd opId) + optimalSplitPos = (optimalSplitPos - 1) | 1; + } + + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" splitting at position %d", optimalSplitPos); + } + assert allocator.isBlockBegin(optimalSplitPos) || (optimalSplitPos % 2 == 1) : "split pos must be odd when not on block boundary"; + assert !allocator.isBlockBegin(optimalSplitPos) || (optimalSplitPos % 2 == 0) : "split pos must be even on block boundary"; + + Interval spilledPart = interval.split(optimalSplitPos, allocator); + allocator.assignSpillSlot(spilledPart); + allocator.changeSpillState(spilledPart, optimalSplitPos); + + if (!allocator.isBlockBegin(optimalSplitPos)) { + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" inserting move from interval %d to %d", interval.operandNumber, spilledPart.operandNumber); + } + insertMove(optimalSplitPos, interval, spilledPart); + } + + // the currentSplitChild is needed later when moves are inserted for reloading + assert spilledPart.currentSplitChild() == interval : "overwriting wrong currentSplitChild"; + spilledPart.makeCurrentSplitChild(); + + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println(" split interval in two parts"); + TTY.print(" "); + TTY.println(interval.logString(allocator)); + TTY.print(" "); + TTY.println(spilledPart.logString(allocator)); + } + } + } + + void splitStackInterval(Interval interval) { + int minSplitPos = currentPosition + 1; + int maxSplitPos = Math.min(interval.firstUsage(RegisterPriority.ShouldHaveRegister), interval.to()); + + splitBeforeUsage(interval, minSplitPos, maxSplitPos); + } + + void splitWhenPartialRegisterAvailable(Interval interval, int registerAvailableUntil) { + int minSplitPos = Math.max(interval.previousUsage(RegisterPriority.ShouldHaveRegister, registerAvailableUntil), interval.from() + 1); + splitBeforeUsage(interval, minSplitPos, registerAvailableUntil); + } + + void splitAndSpillInterval(Interval interval) { + assert interval.state == State.Active || interval.state == State.Inactive : "other states not allowed"; + + int currentPos = currentPosition; + if (interval.state == State.Inactive) { + // the interval is currently inactive, so no spill slot is needed for now. + // when the split part is activated, the interval has a new chance to get a register, + // so in the best case no stack slot is necessary + assert interval.hasHoleBetween(currentPos - 1, currentPos + 1) : "interval can not be inactive otherwise"; + splitBeforeUsage(interval, currentPos + 1, currentPos + 1); + + } else { + // search the position where the interval must have a register and split + // at the optimal position before. + // The new created part is added to the unhandled list and will get a register + // when it is activated + int minSplitPos = currentPos + 1; + int maxSplitPos = Math.min(interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos), interval.to()); + + splitBeforeUsage(interval, minSplitPos, maxSplitPos); + + assert interval.nextUsage(RegisterPriority.MustHaveRegister, currentPos) == Integer.MAX_VALUE : "the remaining part is spilled to stack and therefore has no register"; + splitForSpilling(interval); + } + } + + boolean allocFreeRegister(Interval interval) { + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println("trying to find free register for " + interval.logString(allocator)); + } + + initUseLists(true); + freeExcludeActiveFixed(); + freeExcludeActiveAny(); + freeCollectInactiveFixed(interval); + freeCollectInactiveAny(interval); + // freeCollectUnhandled(fixedKind, cur); + assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"; + + // usePos contains the start of the next interval that has this register assigned + // (either as a fixed register or a normal allocated register in the past) + // only intervals overlapping with cur are processed, non-overlapping invervals can be ignored safely + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" state of registers:"); + for (CiRegister register : availableRegs) { + int i = register.number; + TTY.println(" reg %d: usePos: %d", register.number, usePos[i]); + } + } + + CiRegister hint = null; + Interval locationHint = interval.locationHint(true, allocator); + if (locationHint != null && locationHint.location() != null && locationHint.location().isRegister()) { + hint = locationHint.location().asRegister(); + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" hint register %d from interval %s", hint.number, locationHint.logString(allocator)); + } + } + assert interval.location() == null : "register already assigned to interval"; + + // the register must be free at least until this position + int regNeededUntil = interval.from() + 1; + int intervalTo = interval.to(); + + boolean needSplit = false; + int splitPos = -1; + + CiRegister reg = null; + CiRegister minFullReg = null; + CiRegister maxPartialReg = null; + + for (int i = 0; i < availableRegs.length; ++i) { + CiRegister availableReg = availableRegs[i]; + int number = availableReg.number; + if (usePos[number] >= intervalTo) { + // this register is free for the full interval + if (minFullReg == null || availableReg == hint || (usePos[number] < usePos[minFullReg.number] && minFullReg != hint)) { + minFullReg = availableReg; + } + } else if (usePos[number] > regNeededUntil) { + // this register is at least free until regNeededUntil + if (maxPartialReg == null || availableReg == hint || (usePos[number] > usePos[maxPartialReg.number] && maxPartialReg != hint)) { + maxPartialReg = availableReg; + } + } + } + + if (minFullReg != null) { + reg = minFullReg; + } else if (maxPartialReg != null) { + needSplit = true; + reg = maxPartialReg; + } else { + return false; + } + + splitPos = usePos[reg.number]; + interval.assignLocation(reg.asValue(interval.kind())); + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println("selected register %d", reg.number); + } + + assert splitPos > 0 : "invalid splitPos"; + if (needSplit) { + // register not available for full interval, so split it + splitWhenPartialRegisterAvailable(interval, splitPos); + } + + // only return true if interval is completely assigned + return true; + } + + CiRegister findLockedRegister(int regNeededUntil, int intervalTo, CiValue ignoreReg, boolean[] needSplit) { + int maxReg = -1; + CiRegister ignore = ignoreReg.isRegister() ? ignoreReg.asRegister() : null; + + for (CiRegister reg : availableRegs) { + int i = reg.number; + if (reg == ignore) { + // this register must be ignored + + } else if (usePos[i] > regNeededUntil) { + if (maxReg == -1 || (usePos[i] > usePos[maxReg])) { + maxReg = i; + } + } + } + + if (maxReg != -1) { + if (blockPos[maxReg] <= intervalTo) { + needSplit[0] = true; + } + return availableRegs[maxReg]; + } + + return null; + } + + void splitAndSpillIntersectingIntervals(CiRegister reg) { + assert reg != null : "no register assigned"; + + for (int i = 0; i < spillIntervals[reg.number].size(); i++) { + Interval interval = spillIntervals[reg.number].get(i); + removeFromList(interval); + splitAndSpillInterval(interval); + } + } + + // Split an Interval and spill it to memory so that cur can be placed in a register + void allocLockedRegister(Interval interval) { + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println("need to split and spill to get register for " + interval.logString(allocator)); + } + + // collect current usage of registers + initUseLists(false); + spillExcludeActiveFixed(); + // spillBlockUnhandledFixed(cur); + assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"; + spillBlockInactiveFixed(interval); + spillCollectActiveAny(); + spillCollectInactiveAny(interval); + + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" state of registers:"); + for (CiRegister reg : availableRegs) { + int i = reg.number; + TTY.print(" reg %d: usePos: %d, blockPos: %d, intervals: ", i, usePos[i], blockPos[i]); + for (int j = 0; j < spillIntervals[i].size(); j++) { + TTY.print("%d ", spillIntervals[i].get(j).operandNumber); + } + TTY.println(); + } + } + + // the register must be free at least until this position + int firstUsage = interval.firstUsage(RegisterPriority.MustHaveRegister); + int regNeededUntil = Math.min(firstUsage, interval.from() + 1); + int intervalTo = interval.to(); + assert regNeededUntil > 0 && regNeededUntil < Integer.MAX_VALUE : "interval has no use"; + + CiRegister reg = null; + CiRegister ignore = interval.location() != null && interval.location().isRegister() ? interval.location().asRegister() : null; + for (CiRegister availableReg : availableRegs) { + int number = availableReg.number; + if (availableReg == ignore) { + // this register must be ignored + } else if (usePos[number] > regNeededUntil) { + if (reg == null || (usePos[number] > usePos[reg.number])) { + reg = availableReg; + } + } + } + + if (reg == null || usePos[reg.number] <= firstUsage) { + // the first use of cur is later than the spilling position -> spill cur + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("able to spill current interval. firstUsage(register): %d, usePos: %d", firstUsage, reg == null ? 0 : usePos[reg.number]); + } + + if (firstUsage <= interval.from() + 1) { + assert false : "cannot spill interval that is used in first instruction (possible reason: no register found) firstUsage=" + firstUsage + ", interval.from()=" + interval.from(); + // assign a reasonable register and do a bailout in product mode to avoid errors + allocator.assignSpillSlot(interval); + throw new CiBailout("LinearScan: no register found"); + } + + splitAndSpillInterval(interval); + return; + } + + boolean needSplit = blockPos[reg.number] <= intervalTo; + + int splitPos = blockPos[reg.number]; + + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("decided to use register %d", reg.number); + } + assert splitPos > 0 : "invalid splitPos"; + assert needSplit || splitPos > interval.from() : "splitting interval at from"; + + interval.assignLocation(reg.asValue(interval.kind())); + if (needSplit) { + // register not available for full interval : so split it + splitWhenPartialRegisterAvailable(interval, splitPos); + } + + // perform splitting and spilling for all affected intervals + splitAndSpillIntersectingIntervals(reg); + } + + boolean noAllocationPossible(Interval interval) { + + if (compilation.target.arch.isX86()) { + // fast calculation of intervals that can never get a register because the + // the next instruction is a call that blocks all registers + // Note: this does not work if callee-saved registers are available (e.g. on Sparc) + + // check if this interval is the result of a split operation + // (an interval got a register until this position) + int pos = interval.from(); + if (isOdd(pos)) { + // the current instruction is a call that blocks all registers + if (pos < allocator.maxOpId() && allocator.hasCall(pos + 1) && interval.to() > pos + 1) { + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" free register cannot be available because all registers blocked by following call"); + } + + // safety check that there is really no register available + assert !allocFreeRegister(interval) : "found a register for this interval"; + return true; + } + + } + } + return false; + } + + void initVarsForAlloc(Interval interval) { + EnumMap<RegisterFlag, CiRegister[]> categorizedRegs = allocator.compilation.registerConfig.getCategorizedAllocatableRegisters(); + if (allocator.operands.mustBeByteRegister(interval.operand)) { + assert interval.kind() != CiKind.Float && interval.kind() != CiKind.Double : "cpu regs only"; + availableRegs = categorizedRegs.get(RegisterFlag.Byte); + } else if (interval.kind() == CiKind.Float || interval.kind() == CiKind.Double) { + availableRegs = categorizedRegs.get(RegisterFlag.FPU); + } else { + availableRegs = categorizedRegs.get(RegisterFlag.CPU); + } + } + + boolean isMove(LIRInstruction op, Interval from, Interval to) { + if (op.code != LIROpcode.Move) { + return false; + } + assert op instanceof LIROp1 : "move must be LIROp1"; + + CiValue input = ((LIROp1) op).operand(); + CiValue result = ((LIROp1) op).result(); + return input.isVariable() && result.isVariable() && input == from.operand && result == to.operand; + } + + // optimization (especially for phi functions of nested loops): + // assign same spill slot to non-intersecting intervals + void combineSpilledIntervals(Interval interval) { + if (interval.isSplitChild()) { + // optimization is only suitable for split parents + return; + } + + Interval registerHint = interval.locationHint(false, allocator); + if (registerHint == null) { + // cur is not the target of a move : otherwise registerHint would be set + return; + } + assert registerHint.isSplitParent() : "register hint must be split parent"; + + if (interval.spillState() != SpillState.NoOptimization || registerHint.spillState() != SpillState.NoOptimization) { + // combining the stack slots for intervals where spill move optimization is applied + // is not benefitial and would cause problems + return; + } + + int beginPos = interval.from(); + int endPos = interval.to(); + if (endPos > allocator.maxOpId() || isOdd(beginPos) || isOdd(endPos)) { + // safety check that lirOpWithId is allowed + return; + } + + if (!isMove(allocator.instructionForId(beginPos), registerHint, interval) || !isMove(allocator.instructionForId(endPos), interval, registerHint)) { + // cur and registerHint are not connected with two moves + return; + } + + Interval beginHint = registerHint.getSplitChildAtOpId(beginPos, LIRInstruction.OperandMode.Input, allocator); + Interval endHint = registerHint.getSplitChildAtOpId(endPos, LIRInstruction.OperandMode.Output, allocator); + if (beginHint == endHint || beginHint.to() != beginPos || endHint.from() != endPos) { + // registerHint must be split : otherwise the re-writing of use positions does not work + return; + } + + assert beginHint.location() != null : "must have register assigned"; + assert endHint.location() == null : "must not have register assigned"; + assert interval.firstUsage(RegisterPriority.MustHaveRegister) == beginPos : "must have use position at begin of interval because of move"; + assert endHint.firstUsage(RegisterPriority.MustHaveRegister) == endPos : "must have use position at begin of interval because of move"; + + if (beginHint.location().isRegister()) { + // registerHint is not spilled at beginPos : so it would not be benefitial to immediately spill cur + return; + } + assert registerHint.spillSlot() != null : "must be set when part of interval was spilled"; + + // modify intervals such that cur gets the same stack slot as registerHint + // delete use positions to prevent the intervals to get a register at beginning + interval.setSpillSlot(registerHint.spillSlot()); + interval.removeFirstUsePos(); + endHint.removeFirstUsePos(); + } + + // allocate a physical register or memory location to an interval + @Override + boolean activateCurrent() { + Interval interval = current; + boolean result = true; + + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println("+++++ activating interval " + interval.logString(allocator)); + } + + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" splitParent: %s, insertMoveWhenActivated: %b", interval.splitParent().operandNumber, interval.insertMoveWhenActivated()); + } + + final CiValue operand = interval.operand; + if (interval.location() != null && interval.location().isStackSlot()) { + // activating an interval that has a stack slot assigned . split it at first use position + // used for method parameters + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" interval has spill slot assigned (method parameter) . split it before first use"); + } + splitStackInterval(interval); + result = false; + + } else { + if (operand.isVariable() && allocator.operands.mustStartInMemory((CiVariable) operand)) { + assert interval.location() == null : "register already assigned"; + allocator.assignSpillSlot(interval); + + if (!allocator.operands.mustStayInMemory((CiVariable) operand)) { + // activating an interval that must start in a stack slot but may get a register later + // used for lirRoundfp: rounding is done by store to stack and reload later + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" interval must start in stack slot . split it before first use"); + } + splitStackInterval(interval); + } + + result = false; + } else if (interval.location() == null) { + // interval has not assigned register . normal allocation + // (this is the normal case for most intervals) + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" normal allocation of register"); + } + + // assign same spill slot to non-intersecting intervals + combineSpilledIntervals(interval); + + initVarsForAlloc(interval); + if (noAllocationPossible(interval) || !allocFreeRegister(interval)) { + // no empty register available. + // split and spill another interval so that this interval gets a register + allocLockedRegister(interval); + } + + // spilled intervals need not be move to active-list + if (!interval.location().isRegister()) { + result = false; + } + } + } + + // load spilled values that become active from stack slot to register + if (interval.insertMoveWhenActivated()) { + assert interval.isSplitChild(); + assert interval.currentSplitChild() != null; + assert interval.currentSplitChild().operand != operand : "cannot insert move between same interval"; + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("Inserting move from interval %d to %d because insertMoveWhenActivated is set", interval.currentSplitChild().operandNumber, interval.operandNumber); + } + + insertMove(interval.from(), interval.currentSplitChild(), interval); + } + interval.makeCurrentSplitChild(); + + return result; // true = interval is moved to active list + } + + public void finishAllocation() { + // must be called when all intervals are allocated + moveResolver.resolveAndAppendMoves(); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.max.graal.compiler/src/com/oracle/max/graal/compiler/alloc/MoveResolver.java Wed Jun 08 08:59:54 2011 +0200 @@ -0,0 +1,367 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.max.graal.compiler.alloc; + +import java.util.*; + +import com.oracle.max.graal.compiler.*; +import com.oracle.max.graal.compiler.debug.*; +import com.oracle.max.graal.compiler.lir.*; +import com.oracle.max.graal.compiler.util.*; +import com.sun.cri.ci.*; + +/** + * + * @author Thomas Wuerthinger + */ +final class MoveResolver { + + private final LinearScan allocator; + + private LIRList insertList; + private int insertIdx; + private LIRInsertionBuffer insertionBuffer; // buffer where moves are inserted + + private final List<Interval> mappingFrom; + private final List<CiValue> mappingFromOpr; + private final List<Interval> mappingTo; + private boolean multipleReadsAllowed; + private final int[] registerBlocked; + + private int registerBlocked(int reg) { + return registerBlocked[reg]; + } + + private void setRegisterBlocked(int reg, int direction) { + assert direction == 1 || direction == -1 : "out of bounds"; + registerBlocked[reg] += direction; + } + + void setMultipleReadsAllowed() { + multipleReadsAllowed = true; + } + + boolean hasMappings() { + return mappingFrom.size() > 0; + } + + MoveResolver(LinearScan allocator) { + + this.allocator = allocator; + this.multipleReadsAllowed = false; + this.mappingFrom = new ArrayList<Interval>(8); + this.mappingFromOpr = new ArrayList<CiValue>(8); + this.mappingTo = new ArrayList<Interval>(8); + this.insertIdx = -1; + this.insertionBuffer = new LIRInsertionBuffer(); + this.registerBlocked = new int[allocator.registers.length]; + assert checkEmpty(); + } + + boolean checkEmpty() { + assert mappingFrom.size() == 0 && mappingFromOpr.size() == 0 && mappingTo.size() == 0 : "list must be empty before and after processing"; + for (int i = 0; i < allocator.registers.length; i++) { + assert registerBlocked(i) == 0 : "register map must be empty before and after processing"; + } + assert !multipleReadsAllowed : "must have default value"; + return true; + } + + private boolean verifyBeforeResolve() { + assert mappingFrom.size() == mappingFromOpr.size() : "length must be equal"; + assert mappingFrom.size() == mappingTo.size() : "length must be equal"; + assert insertList != null && insertIdx != -1 : "insert position not set"; + + int i; + int j; + if (!multipleReadsAllowed) { + for (i = 0; i < mappingFrom.size(); i++) { + for (j = i + 1; j < mappingFrom.size(); j++) { + assert mappingFrom.get(i) == null || mappingFrom.get(i) != mappingFrom.get(j) : "cannot read from same interval twice"; + } + } + } + + for (i = 0; i < mappingTo.size(); i++) { + for (j = i + 1; j < mappingTo.size(); j++) { + assert mappingTo.get(i) != mappingTo.get(j) : "cannot write to same interval twice"; + } + } + + HashSet<CiValue> usedRegs = new HashSet<CiValue>(); + if (!multipleReadsAllowed) { + for (i = 0; i < mappingFrom.size(); i++) { + Interval interval = mappingFrom.get(i); + if (interval != null) { + boolean unique = usedRegs.add(interval.location()); + assert unique : "cannot read from same register twice"; + } + } + } + + usedRegs.clear(); + for (i = 0; i < mappingTo.size(); i++) { + Interval interval = mappingTo.get(i); + boolean unique = usedRegs.add(interval.location()); + assert unique : "cannot write to same register twice"; + } + + usedRegs.clear(); + for (i = 0; i < mappingFrom.size(); i++) { + Interval interval = mappingFrom.get(i); + if (interval != null && !interval.location().isRegister()) { + usedRegs.add(interval.location()); + } + } + for (i = 0; i < mappingTo.size(); i++) { + Interval interval = mappingTo.get(i); + assert !usedRegs.contains(interval.location()) || interval.location() == mappingFrom.get(i).location() : "stack slots used in mappingFrom must be disjoint to mappingTo"; + } + + return true; + } + + // mark assignedReg and assignedRegHi of the interval as blocked + private void blockRegisters(Interval interval) { + CiValue location = interval.location(); + if (location.isRegister()) { + int reg = location.asRegister().number; + assert multipleReadsAllowed || registerBlocked(reg) == 0 : "register already marked as used"; + setRegisterBlocked(reg, 1); + } + } + + // mark assignedReg and assignedRegHi of the interval as unblocked + private void unblockRegisters(Interval interval) { + CiValue location = interval.location(); + if (location.isRegister()) { + int reg = location.asRegister().number; + assert registerBlocked(reg) > 0 : "register already marked as unused"; + setRegisterBlocked(reg, -1); + } + } + + /** + * Checks if the {@linkplain Interval#location() location} of {@code to} is not blocked + * or is only blocked by {@code from}. + */ + private boolean safeToProcessMove(Interval from, Interval to) { + CiValue fromReg = from != null ? from.location() : null; + + CiValue reg = to.location(); + if (reg.isRegister()) { + if (registerBlocked(reg.asRegister().number) > 1 || (registerBlocked(reg.asRegister().number) == 1 && reg != fromReg)) { + return false; + } + } + + return true; + } + + private void createInsertionBuffer(LIRList list) { + assert !insertionBuffer.initialized() : "overwriting existing buffer"; + insertionBuffer.init(list); + } + + private void appendInsertionBuffer() { + if (insertionBuffer.initialized()) { + insertionBuffer.lirList().append(insertionBuffer); + } + assert !insertionBuffer.initialized() : "must be uninitialized now"; + + insertList = null; + insertIdx = -1; + } + + private void insertMove(Interval fromInterval, Interval toInterval) { + assert fromInterval.operand != toInterval.operand : "from and to interval equal: " + fromInterval; + assert Util.archKindsEqual(fromInterval.kind(), toInterval.kind()) : "move between different types"; + assert insertList != null && insertIdx != -1 : "must setup insert position first"; + assert insertionBuffer.lirList() == insertList : "wrong insertion buffer"; + + CiValue fromOpr = fromInterval.operand; + CiValue toOpr = toInterval.operand; + + insertionBuffer.move(insertIdx, fromOpr, toOpr, null); + + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("MoveResolver: inserted move from %d (%s) to %d (%s)", fromInterval.operandNumber, fromInterval.location(), toInterval.operandNumber, toInterval.location()); + } + } + + private void insertMove(CiValue fromOpr, Interval toInterval) { + assert Util.archKindsEqual(fromOpr.kind, toInterval.kind()) : "move between different types"; + assert insertList != null && insertIdx != -1 : "must setup insert position first"; + assert insertionBuffer.lirList() == insertList : "wrong insertion buffer"; + + CiValue toOpr = toInterval.operand; + insertionBuffer.move(insertIdx, fromOpr, toOpr, null); + + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.print("MoveResolver: inserted move from constant %s to %d (%s)", fromOpr, toInterval.operandNumber, toInterval.location()); + } + } + + private void resolveMappings() { + //if (C1XOptions.TraceLinearScanLevel >= 4) TTY.println("MoveResolver: resolving mappings for Block B%d, index %d", insertList.block() != null ? insertList.block().blockID : -1, insertIdx); + assert verifyBeforeResolve(); + + // Block all registers that are used as input operands of a move. + // When a register is blocked, no move to this register is emitted. + // This is necessary for detecting cycles in moves. + int i; + for (i = mappingFrom.size() - 1; i >= 0; i--) { + Interval fromInterval = mappingFrom.get(i); + if (fromInterval != null) { + blockRegisters(fromInterval); + } + } + + int spillCandidate = -1; + while (mappingFrom.size() > 0) { + boolean processedInterval = false; + + for (i = mappingFrom.size() - 1; i >= 0; i--) { + Interval fromInterval = mappingFrom.get(i); + Interval toInterval = mappingTo.get(i); + + if (safeToProcessMove(fromInterval, toInterval)) { + // this interval can be processed because target is free + if (fromInterval != null) { + insertMove(fromInterval, toInterval); + unblockRegisters(fromInterval); + } else { + insertMove(mappingFromOpr.get(i), toInterval); + } + mappingFrom.remove(i); + mappingFromOpr.remove(i); + mappingTo.remove(i); + + processedInterval = true; + } else if (fromInterval != null && fromInterval.location().isRegister()) { + // this interval cannot be processed now because target is not free + // it starts in a register, so it is a possible candidate for spilling + spillCandidate = i; + } + } + + if (!processedInterval) { + // no move could be processed because there is a cycle in the move list + // (e.g. r1 . r2, r2 . r1), so one interval must be spilled to memory + assert spillCandidate != -1 : "no interval in register for spilling found"; + + // create a new spill interval and assign a stack slot to it + Interval fromInterval = mappingFrom.get(spillCandidate); + Interval spillInterval = allocator.createDerivedInterval(fromInterval); + spillInterval.setKind(fromInterval.kind()); + + // add a dummy range because real position is difficult to calculate + // Note: this range is a special case when the integrity of the allocation is checked + spillInterval.addRange(1, 2); + + // do not allocate a new spill slot for temporary interval, but + // use spill slot assigned to fromInterval. Otherwise moves from + // one stack slot to another can happen (not allowed by LIRAssembler + CiStackSlot spillSlot = fromInterval.spillSlot(); + if (spillSlot == null) { + spillSlot = allocator.allocateSpillSlot(spillInterval.kind()); + fromInterval.setSpillSlot(spillSlot); + } + spillInterval.assignLocation(spillSlot); + + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("created new Interval %s for spilling", spillInterval.operand); + } + + // insert a move from register to stack and update the mapping + insertMove(fromInterval, spillInterval); + mappingFrom.set(spillCandidate, spillInterval); + unblockRegisters(fromInterval); + } + } + + // reset to default value + multipleReadsAllowed = false; + + // check that all intervals have been processed + assert checkEmpty(); + } + + void setInsertPosition(LIRList insertList, int insertIdx) { + //if (C1XOptions.TraceLinearScanLevel >= 4) TTY.println("MoveResolver: setting insert position to Block B%d, index %d", insertList.block() != null ? insertList.block().blockID : -1, insertIdx); + assert this.insertList == null && this.insertIdx == -1 : "use moveInsertPosition instead of setInsertPosition when data already set"; + + createInsertionBuffer(insertList); + this.insertList = insertList; + this.insertIdx = insertIdx; + } + + void moveInsertPosition(LIRList insertList, int insertIdx) { + //if (C1XOptions.TraceLinearScanLevel >= 4) TTY.println("MoveResolver: moving insert position to Block B%d, index %d", (insertList != null && insertList.block() != null) ? insertList.block().blockID : -1, insertIdx); + + if (this.insertList != null && (this.insertList != insertList || this.insertIdx != insertIdx)) { + // insert position changed . resolve current mappings + resolveMappings(); + } + + if (this.insertList != insertList) { + // block changed . append insertionBuffer because it is + // bound to a specific block and create a new insertionBuffer + appendInsertionBuffer(); + createInsertionBuffer(insertList); + } + + this.insertList = insertList; + this.insertIdx = insertIdx; + } + + void addMapping(Interval fromInterval, Interval toInterval) { + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("MoveResolver: adding mapping from interval %d (%s) to interval %d (%s)", fromInterval.operandNumber, fromInterval.location(), toInterval.operandNumber, toInterval.location()); + } + + assert fromInterval.operand != toInterval.operand : "from and to interval equal: " + fromInterval; + assert Util.archKindsEqual(fromInterval.kind(), toInterval.kind()); + mappingFrom.add(fromInterval); + mappingFromOpr.add(CiValue.IllegalValue); + mappingTo.add(toInterval); + } + + void addMapping(CiValue fromOpr, Interval toInterval) { + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("MoveResolver: adding mapping from %s to %d (%s)", fromOpr, toInterval.operandNumber, toInterval.location()); + } + assert fromOpr.isConstant() : "only for constants"; + + mappingFrom.add(null); + mappingFromOpr.add(fromOpr); + mappingTo.add(toInterval); + } + + void resolveAndAppendMoves() { + if (hasMappings()) { + resolveMappings(); + } + appendInsertionBuffer(); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.max.graal.compiler/src/com/oracle/max/graal/compiler/alloc/OperandPool.java Wed Jun 08 08:59:54 2011 +0200 @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.max.graal.compiler.alloc; + +import java.util.*; + +import com.oracle.max.graal.compiler.*; +import com.oracle.max.graal.compiler.ir.*; +import com.sun.cri.ci.*; + +/** + * An ordered, 0-based indexable pool of instruction operands for a method being compiled. + * The physical {@linkplain CiRegister registers} of the platform occupy the front of the + * pool (starting at index 0) followed by {@linkplain CiVariable variable} operands. + * The index of an operand in the pool is its {@linkplain #operandNumber(CiValue) operand number}. + * + * In the original HotSpot C1 source code, this pool corresponds to the + * "flat register file" mentioned in c1_LinearScan.cpp. + * + * @author Doug Simon + */ +public final class OperandPool { + + public static final int INITIAL_VARIABLE_CAPACITY = 20; + + /** + * The physical registers occupying the head of the operand pool. This is the complete + * {@linkplain CiArchitecture#registers register set} of the target architecture, not + * just the allocatable registers. + */ + private final CiRegister[] registers; + + /** + * The variable operands allocated from this pool. The {@linkplain #operandNumber(CiValue) number} + * of the first variable operand in this pool is one greater than the number of the last + * register operand in the pool. + */ + private final ArrayList<CiVariable> variables; + + /** + * Map from a {@linkplain CiVariable#index variable index} to the instruction whose result is stored in the denoted variable. + * This map is only populated and used if {@link C1XOptions#DetailedAsserts} is {@code true}. + */ + private final ArrayList<Value> variableDefs; + + /** + * The {@linkplain #operandNumber(CiValue) number} of the first variable operand + * {@linkplain #newVariable(CiKind) allocated} from this pool. + */ + private final int firstVariableNumber; + + /** + * Records which variable operands have the {@link VariableFlag#MustBeByteRegister} flag set. + */ + private CiBitMap mustBeByteRegister; + + /** + * Records which variable operands have the {@link VariableFlag#MustStartInMemory} flag set. + */ + private CiBitMap mustStartInMemory; + + /** + * Records which variable operands have the {@link VariableFlag#MustStayInMemory} flag set. + */ + private CiBitMap mustStayInMemory; + + /** + * Flags that can be set for {@linkplain CiValue#isVariable() variable} operands. + */ + public enum VariableFlag { + /** + * Denotes a variable that needs to be assigned a memory location + * at the beginning, but may then be loaded in a register. + */ + MustStartInMemory, + + /** + * Denotes a variable that needs to be assigned a memory location + * at the beginning and never subsequently loaded in a register. + */ + MustStayInMemory, + + /** + * Denotes a variable that must be assigned to a byte-sized register. + */ + MustBeByteRegister; + + public static final VariableFlag[] VALUES = values(); + } + + private static CiBitMap set(CiBitMap map, CiVariable variable) { + if (map == null) { + int length = CiBitMap.roundUpLength(variable.index + 1); + map = new CiBitMap(length); + } else if (map.size() <= variable.index) { + int length = CiBitMap.roundUpLength(variable.index + 1); + map.grow(length); + } + map.set(variable.index); + return map; + } + + private static boolean get(CiBitMap map, CiVariable variable) { + if (map == null || map.size() <= variable.index) { + return false; + } + return map.get(variable.index); + } + + /** + * Creates a new operand pool. + * + * @param target description of the target architecture for a compilation + */ + public OperandPool(CiTarget target) { + CiRegister[] registers = target.arch.registers; + this.firstVariableNumber = registers.length; + this.registers = registers; + variables = new ArrayList<CiVariable>(INITIAL_VARIABLE_CAPACITY); + variableDefs = C1XOptions.DetailedAsserts ? new ArrayList<Value>(INITIAL_VARIABLE_CAPACITY) : null; + } + + /** + * Creates a new {@linkplain CiVariable variable} operand. + * + * @param kind the kind of the variable + * @return a new variable + */ + public CiVariable newVariable(CiKind kind) { + return newVariable(kind, kind == CiKind.Boolean || kind == CiKind.Byte ? VariableFlag.MustBeByteRegister : null); + } + + /** + * Creates a new {@linkplain CiVariable variable} operand. + * + * @param kind the kind of the variable + * @param flag a flag that is set for the new variable operand (ignored if {@code null}) + * @return a new variable operand + */ + public CiVariable newVariable(CiKind kind, VariableFlag flag) { + assert kind != CiKind.Void; + int varIndex = variables.size(); + CiVariable var = CiVariable.get(kind, varIndex); + if (flag == VariableFlag.MustBeByteRegister) { + mustBeByteRegister = set(mustBeByteRegister, var); + } else if (flag == VariableFlag.MustStartInMemory) { + mustStartInMemory = set(mustStartInMemory, var); + } else if (flag == VariableFlag.MustStayInMemory) { + mustStayInMemory = set(mustStayInMemory, var); + } else { + assert flag == null; + } + variables.add(var); + return var; + } + + /** + * Gets the unique number for an operand contained in this pool. + * + * + * @param operand an operand + * @return the unique number for {@code operand} in the range {@code [0 .. size())} + */ + public int operandNumber(CiValue operand) { + if (operand.isRegister()) { + int number = operand.asRegister().number; + assert number < firstVariableNumber; + return number; + } + assert operand.isVariable(); + return firstVariableNumber + ((CiVariable) operand).index; + } + + /** + * Gets the operand in this pool denoted by a given operand number. + * + * @param operandNumber a value that must be in the range {@code [0 .. size())} + * @return the operand in this pool denoted by {@code operandNumber} + */ + public CiValue operandFor(int operandNumber) { + if (operandNumber < firstVariableNumber) { + assert operandNumber >= 0; + return registers[operandNumber].asValue(); + } + int index = operandNumber - firstVariableNumber; + CiVariable variable = variables.get(index); + assert variable.index == index; + return variable; + } + + /** + * Records that the result of {@code instruction} is stored in {@code result}. + * + * @param result the variable storing the result of {@code instruction} + * @param instruction an instruction that produces a result (i.e. pushes a value to the stack) + */ + public void recordResult(CiVariable result, Value instruction) { + while (variableDefs.size() <= result.index) { + variableDefs.add(null); + } + variableDefs.set(result.index, instruction); + } + + /** + * Gets the instruction whose result is recorded in a given variable. + * + * @param result the variable storing the result of an instruction + * @return the instruction that stores its result in {@code result} + */ + public Value instructionForResult(CiVariable result) { + if (variableDefs.size() > result.index) { + return variableDefs.get(result.index); + } + return null; + } + + public boolean mustStartInMemory(CiVariable operand) { + return get(mustStartInMemory, operand) || get(mustStayInMemory, operand); + } + + public boolean mustStayInMemory(CiVariable operand) { + return get(mustStayInMemory, operand); + } + + public boolean mustBeByteRegister(CiValue operand) { + return get(mustBeByteRegister, (CiVariable) operand); + } + + public void setMustBeByteRegister(CiVariable operand) { + mustBeByteRegister = set(mustBeByteRegister, operand); + } + + /** + * Gets the number of operands in this pool. This value will increase by 1 for + * each new variable operand {@linkplain #newVariable(CiKind) allocated} from this pool. + */ + public int size() { + return firstVariableNumber + variables.size(); + } + + /** + * Gets the highest operand number for a register operand in this pool. This value will + * never change for the lifetime of this pool. + */ + public int maxRegisterNumber() { + return firstVariableNumber - 1; + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.max.graal.compiler/src/com/oracle/max/graal/compiler/alloc/Range.java Wed Jun 08 08:59:54 2011 +0200 @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.max.graal.compiler.alloc; + + +/** + * Represents a range of integers from a start (inclusive) to an end (exclusive. + * + * @author Thomas Wuerthinger + */ +public final class Range { + + public static final Range EndMarker = new Range(Integer.MAX_VALUE, Integer.MAX_VALUE, null); + + /** + * The start of the range, inclusive. + */ + public int from; + + /** + * The end of the range, exclusive. + */ + public int to; + + /** + * A link to allow the range to be put into a singly linked list. + */ + public Range next; + + boolean intersects(Range r) { + return intersectsAt(r) != -1; + } + + + /** + * Creates a new range. + * + * @param from the start of the range, inclusive + * @param to the end of the range, exclusive + * @param next link to the next range in a linked list + */ + Range(int from, int to, Range next) { + this.from = from; + this.to = to; + this.next = next; + } + + int intersectsAt(Range r2) { + Range r1 = this; + + assert r2 != null : "null ranges not allowed"; + assert r1 != EndMarker && r2 != EndMarker : "empty ranges not allowed"; + + do { + if (r1.from < r2.from) { + if (r1.to <= r2.from) { + r1 = r1.next; + if (r1 == EndMarker) { + return -1; + } + } else { + return r2.from; + } + } else { + if (r2.from < r1.from) { + if (r2.to <= r1.from) { + r2 = r2.next; + if (r2 == EndMarker) { + return -1; + } + } else { + return r1.from; + } + } else { // r1.from() == r2.from() + if (r1.from == r1.to) { + r1 = r1.next; + if (r1 == EndMarker) { + return -1; + } + } else { + if (r2.from == r2.to) { + r2 = r2.next; + if (r2 == EndMarker) { + return -1; + } + } else { + return r1.from; + } + } + } + } + } while (true); + } + + @Override + public String toString() { + return "[" + from + ", " + to + "]"; + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.max.graal.compiler/src/com/oracle/max/graal/compiler/alloc/RegisterVerifier.java Wed Jun 08 08:59:54 2011 +0200 @@ -0,0 +1,277 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.max.graal.compiler.alloc; + +import java.util.*; + +import com.oracle.max.graal.compiler.*; +import com.oracle.max.graal.compiler.debug.*; +import com.oracle.max.graal.compiler.lir.*; +import com.oracle.max.graal.compiler.util.*; +import com.sun.cri.ci.*; + +/** + * + * @author Thomas Wuerthinger + */ +final class RegisterVerifier { + + LinearScan allocator; + List<LIRBlock> workList; // all blocks that must be processed + ArrayMap<Interval[]> savedStates; // saved information of previous check + + // simplified access to methods of LinearScan + C1XCompilation compilation() { + return allocator.compilation; + } + + Interval intervalAt(CiValue operand) { + return allocator.intervalFor(operand); + } + + // currently, only registers are processed + int stateSize() { + return allocator.operands.maxRegisterNumber() + 1; + } + + // accessors + Interval[] stateForBlock(LIRBlock block) { + return savedStates.get(block.blockID()); + } + + void setStateForBlock(LIRBlock block, Interval[] savedState) { + savedStates.put(block.blockID(), savedState); + } + + void addToWorkList(LIRBlock block) { + if (!workList.contains(block)) { + workList.add(block); + } + } + + RegisterVerifier(LinearScan allocator) { + this.allocator = allocator; + workList = new ArrayList<LIRBlock>(16); + this.savedStates = new ArrayMap<Interval[]>(); + + } + + void verify(LIRBlock start) { + // setup input registers (method arguments) for first block + Interval[] inputState = new Interval[stateSize()]; + CiCallingConvention args = compilation().frameMap().incomingArguments(); + for (int n = 0; n < args.locations.length; n++) { + CiValue operand = args.locations[n]; + if (operand.isRegister()) { + CiValue reg = operand; + Interval interval = intervalAt(reg); + inputState[reg.asRegister().number] = interval; + } + } + + setStateForBlock(start, inputState); + addToWorkList(start); + + // main loop for verification + do { + LIRBlock block = workList.get(0); + workList.remove(0); + + processBlock(block); + } while (!workList.isEmpty()); + } + + private void processBlock(LIRBlock block) { + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println(); + TTY.println("processBlock B%d", block.blockID()); + } + + // must copy state because it is modified + Interval[] inputState = copy(stateForBlock(block)); + + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("Input-State of intervals:"); + TTY.print(" "); + for (int i = 0; i < stateSize(); i++) { + if (inputState[i] != null) { + TTY.print(" %4d", inputState[i].operandNumber); + } else { + TTY.print(" __"); + } + } + TTY.println(); + TTY.println(); + } + + // process all operations of the block + processOperations(block.lir(), inputState); + + // iterate all successors + for (LIRBlock succ : block.blockSuccessors()) { + processSuccessor(succ, inputState); + } + } + + private void processSuccessor(LIRBlock block, Interval[] inputState) { + Interval[] savedState = stateForBlock(block); + + if (savedState != null) { + // this block was already processed before. + // check if new inputState is consistent with savedState + + boolean savedStateCorrect = true; + for (int i = 0; i < stateSize(); i++) { + if (inputState[i] != savedState[i]) { + // current inputState and previous savedState assume a different + // interval in this register . assume that this register is invalid + if (savedState[i] != null) { + // invalidate old calculation only if it assumed that + // register was valid. when the register was already invalid, + // then the old calculation was correct. + savedStateCorrect = false; + savedState[i] = null; + + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println("processSuccessor B%d: invalidating slot %d", block.blockID(), i); + } + } + } + } + + if (savedStateCorrect) { + // already processed block with correct inputState + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println("processSuccessor B%d: previous visit already correct", block.blockID()); + } + } else { + // must re-visit this block + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println("processSuccessor B%d: must re-visit because input state changed", block.blockID()); + } + addToWorkList(block); + } + + } else { + // block was not processed before, so set initial inputState + if (C1XOptions.TraceLinearScanLevel >= 2) { + TTY.println("processSuccessor B%d: initial visit", block.blockID()); + } + + setStateForBlock(block, copy(inputState)); + addToWorkList(block); + } + } + + Interval[] copy(Interval[] inputState) { + return inputState.clone(); + } + + void statePut(Interval[] inputState, CiValue location, Interval interval) { + if (location != null && location.isRegister()) { + CiRegister reg = location.asRegister(); + int regNum = reg.number; + if (interval != null) { + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" %s = %s", reg, interval.operand); + } + } else if (inputState[regNum] != null) { + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(" %s = null", reg); + } + } + + inputState[regNum] = interval; + } + } + + boolean checkState(Interval[] inputState, CiValue reg, Interval interval) { + if (reg != null && reg.isRegister()) { + if (inputState[reg.asRegister().number] != interval) { + throw new CiBailout("!! Error in register allocation: register " + reg + " does not contain interval " + interval.operand + " but interval " + inputState[reg.asRegister().number]); + } + } + return true; + } + + void processOperations(LIRList ops, Interval[] inputState) { + // visit all instructions of the block + for (int i = 0; i < ops.length(); i++) { + LIRInstruction op = ops.at(i); + + if (C1XOptions.TraceLinearScanLevel >= 4) { + TTY.println(op.toStringWithIdPrefix()); + } + + // check if input operands are correct + int n = op.operandCount(LIRInstruction.OperandMode.Input); + for (int j = 0; j < n; j++) { + CiValue operand = op.operandAt(LIRInstruction.OperandMode.Input, j); + if (allocator.isProcessed(operand)) { + Interval interval = intervalAt(operand); + if (op.id != -1) { + interval = interval.getSplitChildAtOpId(op.id, LIRInstruction.OperandMode.Input, allocator); + } + + assert checkState(inputState, interval.location(), interval.splitParent()); + } + } + + // invalidate all caller save registers at calls + if (op.hasCall) { + for (CiRegister r : allocator.compilation.registerConfig.getCallerSaveRegisters()) { + statePut(inputState, r.asValue(), null); + } + } + + // set temp operands (some operations use temp operands also as output operands, so can't set them null) + n = op.operandCount(LIRInstruction.OperandMode.Temp); + for (int j = 0; j < n; j++) { + CiValue operand = op.operandAt(LIRInstruction.OperandMode.Temp, j); + if (allocator.isProcessed(operand)) { + Interval interval = intervalAt(operand); + assert interval != null : "Could not find interval for operand " + operand; + if (op.id != -1) { + interval = interval.getSplitChildAtOpId(op.id, LIRInstruction.OperandMode.Temp, allocator); + } + + statePut(inputState, interval.location(), interval.splitParent()); + } + } + + // set output operands + n = op.operandCount(LIRInstruction.OperandMode.Output); + for (int j = 0; j < n; j++) { + CiValue operand = op.operandAt(LIRInstruction.OperandMode.Output, j); + if (allocator.isProcessed(operand)) { + Interval interval = intervalAt(operand); + if (op.id != -1) { + interval = interval.getSplitChildAtOpId(op.id, LIRInstruction.OperandMode.Output, allocator); + } + + statePut(inputState, interval.location(), interval.splitParent()); + } + } + } + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.max.graal.compiler/src/com/oracle/max/graal/compiler/asm/ExceptionInfo.java Wed Jun 08 08:59:54 2011 +0200 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.max.graal.compiler.asm; + +import com.oracle.max.graal.compiler.lir.*; + +public class ExceptionInfo { + + public final int codeOffset; + public final LIRBlock exceptionEdge; + public final int bci; + + public ExceptionInfo(int pcOffset, LIRBlock exceptionEdge, int bci) { + this.codeOffset = pcOffset; + this.exceptionEdge = exceptionEdge; + this.bci = bci; + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.max.graal.compiler/src/com/oracle/max/graal/compiler/asm/TargetMethodAssembler.java Wed Jun 08 08:59:54 2011 +0200 @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2011, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.max.graal.compiler.asm; + +import java.util.*; + +import com.oracle.max.asm.*; +import com.oracle.max.graal.compiler.*; +import com.oracle.max.graal.compiler.debug.*; +import com.oracle.max.graal.compiler.lir.*; +import com.oracle.max.graal.compiler.util.*; +import com.sun.cri.ci.*; +import com.sun.cri.ri.*; + +public class TargetMethodAssembler { + public final AbstractAssembler asm; + public final CiTargetMethod targetMethod; + public List<ExceptionInfo> exceptionInfoList; + protected int lastSafepointPos; + + public TargetMethodAssembler(AbstractAssembler asm) { + this.asm = asm; + this.targetMethod = new CiTargetMethod(); + } + + public void setFrameSize(int frameSize) { + targetMethod.setFrameSize(frameSize); + } + + public CiTargetMethod.Mark recordMark(Object id, CiTargetMethod.Mark[] references) { + return targetMethod.recordMark(asm.codeBuffer.position(), id, references); + } + + public void blockComment(String s) { + targetMethod.addAnnotation(new CiTargetMethod.CodeComment(asm.codeBuffer.position(), s)); + } + + public CiTargetMethod finishTargetMethod(Object name, RiRuntime runtime, int registerRestoreEpilogueOffset, boolean isStub) { + // Install code, data and frame size + targetMethod.setTargetCode(asm.codeBuffer.close(false), asm.codeBuffer.position()); + targetMethod.setRegisterRestoreEpilogueOffset(registerRestoreEpilogueOffset); + + // Record exception handlers if they exist + if (exceptionInfoList != null) { + for (ExceptionInfo ei : exceptionInfoList) { + int codeOffset = ei.codeOffset; + targetMethod.recordExceptionHandler(codeOffset, -1, 0, ei.exceptionEdge.blockEntryPco, -1, null); + } + } + + if (C1XOptions.PrintMetrics) { + C1XMetrics.TargetMethods++; + C1XMetrics.CodeBytesEmitted += targetMethod.targetCodeSize(); + C1XMetrics.SafepointsEmitted += targetMethod.safepoints.size(); + C1XMetrics.DirectCallSitesEmitted += targetMethod.directCalls.size(); + C1XMetrics.IndirectCallSitesEmitted += targetMethod.indirectCalls.size(); + C1XMetrics.DataPatches += targetMethod.dataReferences.size(); + C1XMetrics.ExceptionHandlersEmitted += targetMethod.exceptionHandlers.size(); + } + + if (C1XOptions.PrintAssembly && !TTY.isSuppressed() && !isStub) { + Util.printSection("Target Method", Util.SECTION_CHARACTER); + TTY.println("Name: " + name); + TTY.println("Frame size: " + targetMethod.frameSize()); + TTY.println("Register size: " + asm.target.arch.registerReferenceMapBitCount); + + if (C1XOptions.PrintCodeBytes) { + Util.printSection("Code", Util.SUB_SECTION_CHARACTER); + TTY.println("Code: %d bytes", targetMethod.targetCodeSize()); + Util.printBytes(0L, targetMethod.targetCode(), 0, targetMethod.targetCodeSize(), C1XOptions.PrintAssemblyBytesPerLine); + } + + Util.printSection("Disassembly", Util.SUB_SECTION_CHARACTER); + String disassembly = runtime.disassemble(targetMethod); + TTY.println(disassembly); + boolean noDis = disassembly == null || disassembly.length() == 0; + + Util.printSection("Safepoints", Util.SUB_SECTION_CHARACTER); + for (CiTargetMethod.Safepoint x : targetMethod.safepoints) { + TTY.println(x.toString()); + if (noDis && x.debugInfo != null) { + TTY.println(CiUtil.indent(x.debugInfo.toString(), " ")); + } + } + + Util.printSection("Direct Call Sites", Util.SUB_SECTION_CHARACTER); + for (CiTargetMethod.Call x : targetMethod.directCalls)