oldCodeGenerator.cpp

Go to the documentation of this file.
00001 /* Copyright 1994 - 1996 LongView Technologies L.L.C. $Revision: 1.27 $ */
00002 /* Copyright (c) 2006, Sun Microsystems, Inc.
00003 All rights reserved.
00004 
00005 Redistribution and use in source and binary forms, with or without modification, are permitted provided that the 
00006 following conditions are met:
00007 
00008     * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
00009     * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following 
00010           disclaimer in the documentation and/or other materials provided with the distribution.
00011     * Neither the name of Sun Microsystems nor the names of its contributors may be used to endorse or promote products derived 
00012           from this software without specific prior written permission.
00013 
00014 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT 
00015 NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 
00016 THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 
00017 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
00018 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 
00019 OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
00020 
00021 
00022 */
00023 
00024 # include "incls/_precompiled.incl"
00025 # ifdef DELTA_COMPILER
00026 # include "incls/_oldCodeGenerator.cpp.incl"
00027 
00028 
00029 static bool bb_needs_jump;
00030 // true if basic block needs a jump at the end to its successor, false otherwise
00031 // Note: most gen() nodes with more than one successor are implemented such that
00032 //       next() is the fall-through case. If that's not the case, an extra jump
00033 //       has to be generated (via endOfBasicBlock()). However, some of the nodes
00034 //       do explicit jumps to all successors to accomodate for arbitrary node
00035 //       reordering, in which case they may set the flag to false (it is auto-
00036 //       matically set to true for each node).
00037 // This flag should go away at soon as all node with more than one exit are
00038 // implemented correctly (i.e., do all the jumping themselves).
00039 
00040 
00041 void OldCodeGenerator::beginOfBasicBlock(Node* node) {
00042   theMacroAssm->bind(node->label);
00043 }
00044 
00045 
00046 void OldCodeGenerator::endOfBasicBlock(Node* node) {
00047   if (bb_needs_jump && node->next() != NULL) theMacroAssm->jmp(node->next()->label);
00048 }
00049 
00050 
00051 void OldCodeGenerator::beginOfNode(Node* node) {
00052   // assume that all nodes that may terminate a basic block need a jump at the end
00053   // (turned off for individual nodes by their gen() methods if no jump is needed
00054   // because they generate code patterns that generate the jumps already)
00055   bb_needs_jump = true;
00056 }
00057 
00058 
00059 void OldCodeGenerator::endOfNode(Node* node) {
00060   // nothing to do
00061 }
00062 
00063 
00064 void OldCodeGenerator::aPrologueNode(PrologueNode* node)                { node->gen(); }
00065 void OldCodeGenerator::aLoadIntNode(LoadIntNode* node)                  { node->gen(); }
00066 void OldCodeGenerator::aLoadOffsetNode(LoadOffsetNode* node)            { node->gen(); }
00067 void OldCodeGenerator::aLoadUplevelNode(LoadUplevelNode* node)          { node->gen(); }
00068 void OldCodeGenerator::anAssignNode(AssignNode* node)                   { node->gen(); }
00069 void OldCodeGenerator::aStoreOffsetNode(StoreOffsetNode* node)          { node->gen(); }
00070 void OldCodeGenerator::aStoreUplevelNode(StoreUplevelNode* node)        { node->gen(); }
00071 void OldCodeGenerator::anArithRRNode(ArithRRNode* node)                 { node->gen(); }
00072 void OldCodeGenerator::aFloatArithRRNode(FloatArithRRNode* node)        { node->gen(); }
00073 void OldCodeGenerator::aFloatUnaryArithNode(FloatUnaryArithNode* node)  { node->gen(); }
00074 void OldCodeGenerator::anArithRCNode(ArithRCNode* node)                 { node->gen(); }
00075 void OldCodeGenerator::aTArithRRNode(TArithRRNode* node)                { node->gen(); }
00076 void OldCodeGenerator::aContextCreateNode(ContextCreateNode* node)      { node->gen(); }
00077 void OldCodeGenerator::aContextInitNode(ContextInitNode* node)          { node->gen(); }
00078 void OldCodeGenerator::aContextZapNode(ContextZapNode* node)            { node->gen(); }
00079 void OldCodeGenerator::aBlockCreateNode(BlockCreateNode* node)          { node->gen(); }
00080 void OldCodeGenerator::aBlockMaterializeNode(BlockMaterializeNode* node){ node->gen(); }
00081 void OldCodeGenerator::aSendNode(SendNode* node)                        { node->gen(); }
00082 void OldCodeGenerator::aPrimNode(PrimNode* node)                        { node->gen(); }
00083 void OldCodeGenerator::aDLLNode(DLLNode* node)                          { node->gen(); }
00084 void OldCodeGenerator::aLoopHeaderNode(LoopHeaderNode* node)            { node->gen(); }
00085 void OldCodeGenerator::aReturnNode(ReturnNode* node)                    { node->gen(); }
00086 void OldCodeGenerator::aNLRSetupNode(NLRSetupNode* node)                { node->gen(); }
00087 void OldCodeGenerator::anInlinedReturnNode(InlinedReturnNode* node)     { node->gen(); }
00088 void OldCodeGenerator::aNLRContinuationNode(NLRContinuationNode* node)  { node->gen(); }
00089 void OldCodeGenerator::aBranchNode(BranchNode* node)                    { node->gen(); }
00090 void OldCodeGenerator::aTypeTestNode(TypeTestNode* node)                { node->gen(); }
00091 void OldCodeGenerator::aNLRTestNode(NLRTestNode* node)                  { node->gen(); }
00092 void OldCodeGenerator::aMergeNode(MergeNode* node)                      { node->gen(); }
00093 void OldCodeGenerator::anArrayAtNode(ArrayAtNode* node)                 { node->gen(); }
00094 void OldCodeGenerator::anArrayAtPutNode(ArrayAtPutNode* node)           { node->gen(); }
00095 void OldCodeGenerator::anInlinedPrimitiveNode(InlinedPrimitiveNode* node){ node->gen(); }
00096 void OldCodeGenerator::anUncommonNode(UncommonNode* node)               { node->gen(); }
00097 void OldCodeGenerator::aFixedCodeNode(FixedCodeNode* node)              { node->gen(); }
00098 void OldCodeGenerator::aNopNode(NopNode* node)                          { node->gen(); }
00099 void OldCodeGenerator::aCommentNode(CommentNode* node)                  { node->gen(); }
00100 
00101 
00102 //----------------------------------------------------------------------------------------------------
00103 //
00104 // Implementation of gen() nodes
00105 
00106 
00107 // Inline caches
00108 //
00109 // An inline cache is implemented via a dummy instruction that
00110 // follows the call immediately. The instruction's 32bit immediate
00111 // value provides the icache information. The instruction itself
00112 // does not modify the CPU state except the flags which are in
00113 // an undefined state after a call, anyway.
00114 
00115 static void inlineCache(Label& nlrTestPoint, SendInfo* info, bool super) {
00116   // generates the inline cache information (must follow a call instruction immediately)
00117   char flags = 0;
00118   if (super)                            setNth(flags, super_send_bit_no);
00119   if (info && info->uninlinable)        setNth(flags, uninlinable_bit_no);
00120   if (info && info->receiverStatic)     setNth(flags, receiver_static_bit_no);
00121   theMacroAssm->ic_info(nlrTestPoint, flags);
00122 }
00123 
00124 
00125 // Calls to C land
00126 //
00127 // When entering C land, the ebp & esp of the last Delta frame have to be recorded.
00128 // When leaving C land, last_Delta_fp has to be reset to 0. This is required to
00129 // allow proper stack traversal.
00130 
00131 static void call_C(char* dest, relocInfo::relocType relocType, bool needsDeltaFPCode) {
00132   if (needsDeltaFPCode) theMacroAssm->set_last_Delta_frame_before_call();
00133   theMacroAssm->call(dest, relocType);
00134   if (needsDeltaFPCode) theMacroAssm->reset_last_Delta_frame();
00135 }
00136 
00137 
00138 static void call_C(char* dest, relocInfo::relocType relocType, bool needsDeltaFPCode, Label& nlrTestPoint) {
00139   if (needsDeltaFPCode) theMacroAssm->set_last_Delta_frame_before_call();
00140   theMacroAssm->call(dest, relocType);
00141   inlineCache(nlrTestPoint, NULL, false);
00142   if (needsDeltaFPCode) theMacroAssm->reset_last_Delta_frame();
00143 }
00144 
00145 
00146 // Routines for debugging
00147 //
00148 // The verifyXXX routines are called from within compiled code if the
00149 // VerifyCode flag is set. The routines do plausibility checks on objects
00150 // and trap in case of an error. The verifyXXXCode routines are used to
00151 // generate the transparent call stubs for the verifyXXX's.
00152 
00153 static int callDepth = 0;       // to indent tracing messages
00154 static int numberOfCalls = 0;   // # of traced calls since start
00155 
00156 static void indent() { 
00157   const int maxIndent = 30;
00158   if (callDepth < maxIndent) {
00159     lprintf("%*s", callDepth, " ");
00160   } else {
00161     lprintf("%*s <%5d>", maxIndent - 9, " ", callDepth);
00162   }
00163 }
00164 
00165 
00166 static char* nmethodName() {
00167   deltaVFrame* f = DeltaProcess::active()->last_delta_vframe();
00168    return f->method()->selector()->as_string();
00169 }
00170 
00171 
00172 static void breakpointCode() {
00173   // generates a transparent call to a breakpoint routine where
00174   // a breakpoint can be set - for debugging purposes only
00175   if (!VerifyCode) warning(": breakpoint should not be called");
00176   theMacroAssm->pushad();
00177   call_C((char*)breakpoint, relocInfo::runtime_call_type, true);
00178   theMacroAssm->popad();
00179 }
00180 
00181 
00182 static void verifyOopCode(Register reg) {
00183   // generates transparent check code which test the contents of
00184   // reg for the mark bit and halts if set - for debugging purposes only
00185   if (!VerifyCode) warning(": verifyOop should not be called");
00186   Label L;
00187   theMacroAssm->test(reg, Mark_Tag_Bit);
00188   theMacroAssm->jcc(Assembler::zero, L);
00189   theMacroAssm->hlt();
00190   theMacroAssm->bind(L);
00191 }
00192 
00193 
00194 extern "C" void verifyContext(oop obj) {
00195   // verify entire context chain
00196   contextOop ctx = contextOop(obj); 
00197   while (1) {
00198     if (ctx->is_mark()) error("context should never be mark");
00199     if (!Universe::is_heap((oop*)ctx)) error("context outside of heap");
00200     if (!ctx->is_context()) error("should be a context");
00201     if (ctx->unoptimized_context() != NULL) {
00202       error("context has been deoptimized -- shouldn't use in compiled code");
00203     }
00204     if (!ctx->has_outer_context()) break; 
00205     ctx = ctx->outer_context();
00206   }
00207 }
00208 
00209 
00210 static void verifyContextCode(Register reg) {
00211   // generates transparent check code which verifies that reg contains
00212   // a legal context and halts if not - for debugging purposes only
00213   if (!VerifyCode) warning(": verifyContext should not be called");
00214   theMacroAssm->pushad();
00215   theMacroAssm->pushl(reg);     // pass argument (C calling convention)
00216   call_C((char*)verifyContext, relocInfo::runtime_call_type, true);
00217   theMacroAssm->addl(esp, oopSize);   // get rid of argument
00218   theMacroAssm->popad();
00219 }
00220 
00221 
00222 extern "C" void verifyNilOrContext(oop obj) {
00223   if (obj != nilObj) verifyContext(obj);
00224 }
00225 
00226 
00227 static void verifyNilOrContextCode(Register reg) {
00228   // generates transparent check code which verifies that reg contains
00229   // a legal context and halts if not - for debugging purposes only
00230   if (!VerifyCode) warning(": verifyNilOrContext should not be called");
00231   theMacroAssm->pushad();
00232   theMacroAssm->pushl(reg);     // pass argument (C calling convention)
00233   call_C((char*)verifyNilOrContext, relocInfo::runtime_call_type, true);
00234   theMacroAssm->addl(esp, oopSize);   // get rid of argument
00235   theMacroAssm->popad();
00236 }
00237 
00238 
00239 extern "C" void verifyBlock(blockClosureOop blk) {
00240   blk->verify();
00241 }
00242 
00243 
00244 static void verifyBlockCode(Register reg) {
00245   // generates transparent check code which verifies that reg contains
00246   // a legal context and halts if not - for debugging purposes only
00247   if (!VerifyCode) warning(": verifyBlockCode should not be called");
00248   theMacroAssm->pushad();
00249   theMacroAssm->pushl(reg);     // pass argument (C calling convention)
00250   call_C((char*)verifyBlock, relocInfo::runtime_call_type, true);
00251   theMacroAssm->addl(esp, oopSize);   // get rid of argument
00252   theMacroAssm->popad();
00253 }
00254 
00255 
00256 static int NumberOfReturns = 0;   // for debugging (conditional breakpoints)
00257 
00258 extern "C" void verifyReturn(oop obj) {
00259   NumberOfReturns++;
00260   obj->verify();
00261   if (TraceCalls) {
00262     ResourceMark rm;
00263     callDepth--;
00264     indent(); lprintf("return %s from %s\n", obj->print_value_string(), nmethodName()); 
00265   }
00266 }
00267 
00268 
00269 static void verifyReturnCode(Register reg) {
00270   // generates transparent check code which verifies that reg contains
00271   // a legal context and halts if not - for debugging purposes only
00272   if (!VerifyCode && !GenTraceCalls) warning(": verifyReturn should not be called");
00273   theMacroAssm->pushad();
00274   theMacroAssm->pushl(reg);     // pass argument (C calling convention)
00275   call_C((char*)verifyReturn, relocInfo::runtime_call_type, true);
00276   theMacroAssm->addl(esp, oopSize);   // get rid of argument
00277   theMacroAssm->popad();
00278 }
00279 
00280 
00281 extern "C" void verifyNLR(char* fp, char* nlrFrame, int nlrScopeID, oop nlrResult) {
00282   LOG_EVENT3("verifyNLR(%#x, %#x, %d, %#x)", fp, nlrFrame, nlrResult);
00283   if (nlrFrame <= fp) error("NLR went too far: %#x <= %#x", nlrFrame, fp);
00284   // treat >99 scopes as likely error -- might actually be ok
00285   if (nlrScopeID < 0 || nlrScopeID > 99) error("illegal NLR scope ID %#x", nlrScopeID);
00286   if (nlrResult->is_mark()) error("NLR result is a markOop");
00287   if (TraceCalls) {
00288     ResourceMark rm;
00289     callDepth--;
00290     indent(); lprintf("NLR %s from/thru %s\n", nlrResult->print_value_string(), nmethodName()); 
00291   }
00292 }
00293 
00294 
00295 static void verifyNLRCode() {
00296   // generates transparent check code which verifies NLR check & continuation
00297   if (!VerifyCode) warning(": verifyNLRCode should not be called");
00298   theMacroAssm->pushad();
00299   theMacroAssm->pushl(Mapping::asRegister(NLRResultLoc));       // pass argument (C calling convention)
00300   theMacroAssm->pushl(Mapping::asRegister(NLRHomeIdLoc));
00301   theMacroAssm->pushl(Mapping::asRegister(NLRHomeLoc));
00302   theMacroAssm->pushl(ebp);
00303   call_C((char*)verifyNLR, relocInfo::runtime_call_type, true);
00304   theMacroAssm->addl(esp, 4*oopSize);   // get rid of arguments
00305   theMacroAssm->popad();
00306 }
00307 
00308 
00309 extern "C" void verifySmi(oop obj) {
00310   if (!obj->is_smi()) fatal("should be a smi");
00311 }
00312 
00313 
00314 static void verifySmiCode(Register reg) {
00315   // generates transparent check code which verifies that reg contains
00316   // a legal smi and halts if not - for debugging purposes only
00317   if (!VerifyCode) warning(": verifySmi should not be called");
00318   theMacroAssm->pushad();
00319   theMacroAssm->pushl(reg);     // pass argument (C calling convention)
00320   call_C((char*)verifySmi, relocInfo::runtime_call_type, true);
00321   theMacroAssm->addl(esp, oopSize);   // get rid of argument
00322   theMacroAssm->popad();
00323 }
00324 
00325 
00326 extern "C" void verifyObj(oop obj) {
00327   if (!obj->is_smi() && !obj->is_mem()) fatal("should be an ordinary oop");
00328   klassOop klass = obj->klass();
00329   if (klass == NULL || !klass->is_mem()) fatal("should be an ordinary memOop");
00330   if (obj->is_block()) blockClosureOop(obj)->verify();
00331 }
00332 
00333 
00334 static void verifyObjCode(Register reg) {
00335   // generates transparent check code which verifies that reg contains
00336   // a legal oop and halts if not - for debugging purposes only
00337   if (!VerifyCode) warning(": verifyObj should not be called");
00338   theMacroAssm->pushad();
00339   theMacroAssm->pushl(reg);     // pass argument (C calling convention)
00340   call_C((char*)verifyObj, relocInfo::runtime_call_type, true);
00341   theMacroAssm->addl(esp, oopSize);   // get rid of argument
00342   theMacroAssm->popad();
00343 }
00344 
00345 
00346 extern "C" void verifyArguments(oop recv, int ebp, int nofArgs) {
00347   ResourceMark rm;
00348   numberOfCalls++;
00349   if (TraceCalls) { 
00350     callDepth++;
00351     indent(); 
00352     lprintf("calling %s %s ", nmethodName(), recv->print_value_string());
00353   }
00354   verifyObj(recv);
00355   int i = nofArgs;
00356   oop* arg = (oop*)(ebp + (nofArgs + 2)*oopSize);
00357   while (i-- > 0) {
00358     arg--;
00359     verifyObj(*arg);
00360     if (TraceCalls) {
00361       ResourceMark rm;
00362       lprintf("%s, ", (*arg)->print_value_string());
00363     }
00364   }
00365   if (VerifyDebugInfo) { 
00366     deltaVFrame* f = DeltaProcess::active()->last_delta_vframe();
00367     while (f != NULL) {
00368       f->verify_debug_info();
00369       f = f->sender_delta_frame();
00370     }
00371   }
00372 }
00373 
00374 
00375 static void verifyArgumentsCode(Register recv, int nofArgs) {
00376   // generates transparent check code which verifies that all arguments
00377   // are legal oops and halts if not - for debugging purposes only
00378   assert(VerifyCode || GenTraceCalls || VerifyDebugInfo, "performance bug: verifyArguments should not be called");
00379   theMacroAssm->pushad();
00380   theMacroAssm->pushl(nofArgs); // pass arguments (C calling convention)
00381   theMacroAssm->pushl(ebp);
00382   theMacroAssm->pushl(recv);
00383   call_C((char*)verifyArguments, relocInfo::runtime_call_type, true);
00384   theMacroAssm->addl(esp, 3*oopSize);   // get rid of arguments
00385   theMacroAssm->popad();
00386 }
00387 
00388 
00389 static int result_counter = 0;
00390 
00391 static void trace_result(int compilation, methodOop method, oop result) {
00392   ResourceMark rm;
00393   std->print("%6d: 0x%08x (compilation %4d, ", result_counter++, int(result), compilation);
00394   method->selector()->print_value();
00395   std->print(")\n", compilation);
00396 }
00397 
00398 
00399 static void call_trace_result(Register result) {
00400   theMacroAssm->pushad();
00401   theMacroAssm->pushl(result);
00402   theMacroAssm->pushl(theCompiler->method);
00403   theMacroAssm->pushl(nofCompilations);
00404   call_C((char*)trace_result, relocInfo::runtime_call_type, true);
00405   theMacroAssm->addl(esp, 3*oopSize);   // get rid of arguments
00406   theMacroAssm->popad();
00407 }
00408 
00409 
00410 // helper functions for loading/storing
00411 
00412 static Register moveLocToReg(Location src, Register temp) {
00413   // Returns the register corresponding to src if src is a register location,
00414   // otherwise loads the value at loc into register temp and returns temp.
00415   if (src.isRegisterLocation()) {
00416     return Mapping::asRegister(src);
00417   } else {
00418     Mapping::load(src, temp);
00419     return temp;
00420   }
00421 }
00422 
00423 
00424 static Register movePRegToReg(PReg* src, Register temp) {
00425   // Returns the src register if a register has been assigned to src,
00426   // otherwise loads the value src into register temp and returns temp.
00427   if (src->isConstPReg()) {
00428     theMacroAssm->movl(temp, ((ConstPReg*)src)->constant);
00429     return temp;
00430   } else {
00431     return moveLocToReg(src->loc, temp);
00432   }
00433 }
00434 
00435 
00436 static inline Register answerLocReg(Location src, Register temp) {
00437   // Returns the register corresponding to src if src is a register location,
00438   // otherwise returns temp.
00439   return src.isRegisterLocation() ? Mapping::asRegister(src) : temp;
00440 }
00441 
00442 
00443 static inline Register answerPRegReg(PReg* src, Register temp) {
00444   // Returns the src register if a register has been assigned to src,
00445   // otherwise returns temp.
00446   return answerLocReg(src->loc, temp);
00447 }
00448 
00449 
00450 static void load(PReg* src, Register dst) {
00451   // Loads src into register dst.
00452   if (src->isConstPReg()) {
00453     theMacroAssm->movl(dst, ((ConstPReg*)src)->constant);
00454   } else {
00455     Mapping::load(src->loc, dst);
00456   }
00457 }
00458 
00459 
00460 static void fload(PReg* src, Register base, Register temp) {
00461   assert(base != temp, "registers must be different");
00462   // Loads src into FPU ST
00463   if (src->isConstPReg()) {
00464     theMacroAssm->movl(temp, ((ConstPReg*)src)->constant);
00465     theMacroAssm->fld_d(Address(temp, byteOffset(doubleOopDesc::value_offset()))); // unbox float
00466   } else {
00467     Mapping::fload(src->loc, base);
00468   }
00469 }
00470 
00471 
00472 static void store(Register src, PReg* dst, Register temp1, Register temp2, bool needsStoreCheck = true) {
00473   // Stores register src to dst.
00474   assert(!dst->isConstPReg(), "destination cannot be a constant");
00475   Mapping::store(src, dst->loc, temp1, temp2, needsStoreCheck);
00476 }
00477 
00478 
00479 static void fstore(PReg* dst, Register base) {
00480   // Stores FPU ST to dst and pops ST
00481   assert(!dst->isConstPReg(), "destination cannot be a constant");
00482   Mapping::fstore(dst->loc, base);
00483 }
00484 
00485 
00486 static void storeO(ConstPReg* src, PReg* dst, Register temp1, Register temp2, bool needsStoreCheck = true) {
00487   // Stores constant src to dst.
00488   assert(!dst->isConstPReg(), "destination cannot be a constant");
00489   Mapping::storeO(src->constant, dst->loc, temp1, temp2, needsStoreCheck);
00490 }
00491 
00492 
00493 // The float section on the stack is 8byte-aligned. In order to access it, a base register
00494 // instead of ebp is used. This base register holds the 8byte aligned ebp value. For now,
00495 // all node accessing floats are using the same base register (temp3); this allows us to
00496 // get rid of unneccessary base register setup code if the previous node set it up already.
00497 
00498 static void set_floats_base(Node* node, Register base, bool enforce = false) {
00499   // Stores aligned ebp value into base
00500   assert(floatSize == 8, "check this code");
00501   assert(node->isAccessingFloats(), "must be a node accessing floats");
00502   assert(base == temp3, "check this code");
00503   if (node->hasSinglePredecessor() && node->firstPrev()->isAccessingFloats() && !enforce) {
00504     // previous node is also accessing floats => base register is
00505     // already set => no extra code necessary
00506   } else {
00507     theMacroAssm->movl(base, ebp);
00508     theMacroAssm->andl(base, -floatSize); 
00509   }
00510 }
00511 
00512 
00513 static void assign(Node* node, PReg* src, PReg* dst, Register temp1, Register temp2, Register temp3, bool needsStoreCheck = true) {
00514   // General assignment
00515   assert(temp1 != temp2 && temp1 != temp3 && temp2 != temp3, "registers must be different");
00516   if (src->loc != dst->loc) {
00517     if (node->isAccessingFloats()) {
00518       Register base = temp3;
00519       set_floats_base(node, base);
00520       fload(src, base, temp1);
00521       fstore(dst, base);
00522     } else if (src->isConstPReg()) {
00523       // assign constants directly without loading into temporary register first
00524       storeO((ConstPReg*)src, dst, temp1, temp2, needsStoreCheck);
00525     } else {
00526       Register t = movePRegToReg(src, answerPRegReg(dst, temp1));
00527       store(t, dst, temp2, temp3, needsStoreCheck);
00528     }
00529   }
00530 }
00531 
00532 
00533 static Register uplevelBase(PReg* startContext, int nofLevels, Register temp) {
00534   // Compute uplevel base; nofLevels is number of indirections (0 = in this context)
00535   Register b = nofLevels > 0 ? temp : answerPRegReg(startContext, temp);
00536   load(startContext, b);
00537   while (nofLevels-- > 0) { 
00538    if (VerifyCode) verifyContextCode(b);
00539    theMacroAssm->Load(b, contextOopDesc::parent_byte_offset(), b); 
00540   }
00541   return b;
00542 }
00543 
00544 
00545 // Code generation for statistical information on nmethods
00546 
00547 static char* nmethodAddr() {
00548   // hack to compute hypothetical nmethod address
00549   // should be fixed at some point
00550   return (char*)(((nmethod*) (theMacroAssm->code()->code_begin())) - 1);
00551 }
00552 
00553 
00554 static void incCounter() {
00555   // Generates code to increment the nmethod execution counter
00556   char* addr = nmethodAddr() + nmethod::invocationCountOffset();
00557   theMacroAssm->incl(Address(int(addr), relocInfo::internal_word_type));
00558 }
00559 
00560 
00561 // Helper functions for general code generation
00562 
00563 static Assembler::Condition mapToCC(BranchOpCode op) {
00564   switch (op) {
00565     case EQBranchOp : return Assembler::equal;
00566     case NEBranchOp : return Assembler::notEqual;
00567     case LTBranchOp : return Assembler::less;
00568     case LEBranchOp : return Assembler::lessEqual;
00569     case GTBranchOp : return Assembler::greater;
00570     case GEBranchOp : return Assembler::greaterEqual;
00571     case LTUBranchOp: return Assembler::below;
00572     case LEUBranchOp: return Assembler::belowEqual;
00573     case GTUBranchOp: return Assembler::above;
00574     case GEUBranchOp: return Assembler::aboveEqual;
00575     case VSBranchOp : return Assembler::overflow;
00576     case VCBranchOp : return Assembler::noOverflow;
00577     default: ShouldNotReachHere(); return Assembler::zero;
00578   }
00579 }
00580 
00581 
00582 static void primitiveCall(InlinedScope* scope, primitive_desc* pdesc) {
00583   if (pdesc->can_perform_NLR()) {
00584     call_C((char*)(&pdesc->fn()), relocInfo::prim_type, pdesc->needs_delta_fp_code(), scope->nlrTestPoint()->label);
00585   } else {
00586     call_C((char*)(&pdesc->fn()), relocInfo::prim_type, pdesc->needs_delta_fp_code());
00587   }
00588 }
00589 
00590 
00591 static void zapContext(PReg* context, Register temp) {
00592   Register c = movePRegToReg(context, temp);
00593   theMacroAssm->movl(Address(c, contextOopDesc::parent_byte_offset()), 0);
00594 }
00595 
00596 
00597 static void continueNLR(Register temp1, Register temp2) {
00598   assert(temp1 == ::temp1 && temp2 == ::temp2, "different register usage than stub routine - check this");
00599   if (VerifyCode) verifyNLRCode();
00600   theMacroAssm->jmp(StubRoutines::continue_NLR_entry(), relocInfo::runtime_call_type);
00601 }
00602 
00603 
00604 // Code generation for individual nodes
00605 
00606 void BasicNode::gen() {
00607   ScopeDescRecorder* rec = theCompiler->scopeDescRecorder();
00608   rec->addPcDesc(theMacroAssm->offset(), _scope->scopeInfo(), _bci);
00609 }
00610 
00611 
00612 static void checkRecompilation(Label& recompile_stub_call, Register t) {
00613   if (RecompilationPolicy::needRecompileCounter(theCompiler)) {
00614     // increment the nmethod execution counter and check limit
00615     char* addr = nmethodAddr() + nmethod::invocationCountOffset();
00616     theMacroAssm->movl(t, Address(int(addr), relocInfo::internal_word_type));
00617     theMacroAssm->incl(t);
00618     theMacroAssm->cmpl(t, theCompiler->get_invocation_counter_limit());
00619     theMacroAssm->movl(Address(int(addr), relocInfo::internal_word_type), t);
00620     theMacroAssm->jcc(Assembler::greaterEqual, recompile_stub_call);
00621   }
00622 }
00623 
00624 
00625 static void verify_context_chain(Register closure, int chain_length, Register temp1, Register temp2) {
00626   // Generates code to verify the context chain of a block closure. If the chain
00627   // contains deoptimized contextOops, the block has to be deoptimized as well.
00628   // Method: A bit in the mark field of each context indicates whether it has
00629   //         been deoptimized or not. All mark fields of the contexts in the
00630   //         context chain are or-ed together and the bit is checked at the end.
00631   assert(closure != temp1 && closure != temp2 && temp1 != temp2, "registers must be different");
00632   assert(chain_length >= 1, "must have at least one context in the chain");
00633   const Register context = temp1;
00634   const Register sum     = temp2;
00635   // initialize sum with mark of first context
00636   theMacroAssm->movl(context, Address(closure, blockClosureOopDesc::context_byte_offset()));
00637   theMacroAssm->movl(sum, Address(context, memOopDesc::mark_byte_offset()));
00638   // 'or' the mark fields of the remaining contexts in the chain to sum
00639   for (int i = chain_length - 1; i-- > 0; ) {
00640     theMacroAssm->movl(context, Address(context, contextOopDesc::parent_byte_offset()));
00641     theMacroAssm->orl(sum, Address(context, memOopDesc::mark_byte_offset()));
00642   }
00643   // check if there was any context that has been deoptimized
00644   theMacroAssm->testl(sum, markOopDesc::context_forward_bit_mask());
00645   theMacroAssm->jcc(Assembler::notZero, StubRoutines::deoptimize_block_entry(), relocInfo::runtime_call_type);
00646   // otherwise continue
00647 }
00648 
00649 
00650 void PrologueNode::gen() {
00651   BasicNode::gen();
00652 
00653   // call to recompiler - if the nmethod turns zombie, this will be overwritten by a call to the zombie handler
00654   // (see also comment in nmethod)
00655   Label recompile_stub_call;
00656   theMacroAssm->bind(recompile_stub_call);
00657   theCompiler->set_special_handler_call_offset(theMacroAssm->offset());
00658   theMacroAssm->call(StubRoutines::recompile_stub_entry(), relocInfo::runtime_call_type);
00659 
00660   // entry point for callers who need to verify receiver klass or if block
00661   theMacroAssm->align(oopSize);
00662   theCompiler->set_entry_point_offset(theMacroAssm->offset());
00663   Register recv = moveLocToReg(selfLoc, temp1);
00664   if (scope()->isMethodScope()) {
00665     // check class
00666     klassOop klass = _scope->selfKlass();
00667     if (klass == smiKlassObj) {
00668       // receiver must be a smi, check smi tag only
00669       theMacroAssm->test(recv, Mem_Tag);
00670       theMacroAssm->jcc(Assembler::notZero, CompiledIC::normalLookupRoutine());
00671     } else {
00672       // receiver could be a smi, check smi tag before loading class
00673       theMacroAssm->test(recv, Mem_Tag);
00674       theMacroAssm->jcc(Assembler::zero, CompiledIC::normalLookupRoutine());
00675       theMacroAssm->cmpl(Address(recv, memOopDesc::klass_byte_offset()), klass);
00676       theMacroAssm->jcc(Assembler::notEqual, CompiledIC::normalLookupRoutine());
00677     }
00678   } else {
00679     // If this is a block method and we expect a context
00680     // then the incoming context chain must be checked.
00681     // The context chain may contain a deoptimized contextOop.
00682     // (see StubRoutines::verify_context_chain for details)
00683     if (scope()->method()->block_info() == methodOopDesc::expects_context) {
00684       const bool use_fast_check = false;                        // turn this off if it doesn't work
00685       if (use_fast_check) {
00686         // What happens if the context chain is not anchored in a method?
00687         // Probably doesn't work correctly - think about this - gri 6/26/96
00688         // Turned off for now - because of problems. Should fix this.
00689         int length = _scope->homeContext() + 1;                 // includes context created within this scope
00690         if (scope()->allocatesCompiledContext()) length--;      // context has not been created yet -> adjust length
00691         verify_context_chain(recv, length, temp2, temp3);
00692       } else {
00693         theMacroAssm->call(StubRoutines::verify_context_chain(), relocInfo::runtime_call_type);
00694       }
00695     }
00696   }
00697 
00698   // callers who know the receiver class (e.g., PICs) should enter here
00699   theMacroAssm->align(oopSize);
00700   theCompiler->set_verified_entry_point_offset(theMacroAssm->offset());
00701   // build stack frame
00702   int frame_size = 2;   // return address & old ebp
00703   theMacroAssm->enter();
00704   // allocate float temporaries
00705   int nofFloats = theCompiler->totalNofFloatTemporaries();
00706   if (nofFloats > 0) {
00707     assert(floatSize == oopSize*2, "check this code");
00708     assert(first_float_offset == -4, "check this code");
00709     int float_section_size = nofFloats*(floatSize/oopSize) + 2; // 2 additional words for filler & float alignment
00710     frame_size += 1 + float_section_size;                       // magic word & floats
00711     theMacroAssm->pushl(Floats::magic);                         // magic word
00712     theMacroAssm->subl(esp, float_section_size * oopSize);      // add one word for float alignment
00713     theCompiler->set_float_section_size(float_section_size);
00714     theCompiler->set_float_section_start_offset(-2);            // float_section after ebp & magic word
00715   }
00716   // allocate normal temporaries
00717   int nofTemps = theAllocator->nofStackTemps();
00718   if (nofTemps > 0) {
00719     assert(first_temp_offset == -1, "check this code");
00720     frame_size += nofTemps;
00721     theMacroAssm->movl(temp2, nilObj);
00722     for (int i = 0; i < nofTemps; i++) theMacroAssm->pushl(temp2);
00723   }
00724   // make sure frame is big enough for deoptimization
00725   if (frame_size < minimum_size_for_deoptimized_frame) {
00726     if (nofTemps == 0) theMacroAssm->movl(temp2, nilObj);       // make sure temp2 holds nil
00727     while (frame_size < minimum_size_for_deoptimized_frame) {
00728       frame_size++;
00729       theMacroAssm->pushl(temp2);
00730     }
00731   }
00732 
00733   if (VerifyCode || VerifyDebugInfo || GenTraceCalls) verifyArgumentsCode(recv, scope()->method()->number_of_arguments());
00734 
00735   // initialize self and context (for blocks)
00736   // recv has already been loaded (possibly into temp1)
00737   if (scope()->isMethodScope()) {
00738     store(recv, scope()->self()->preg(), temp2, temp3);
00739   } else {
00740     // recv contains block closure -> get context out of it
00741     Register c = answerPRegReg(scope()->self()->preg(), temp2);
00742     theMacroAssm->Load(recv, blockClosureOopDesc::context_byte_offset(), c);
00743     store(c, scope()->self()->preg(), temp1, temp3);
00744     store(c, scope()->context(), temp1, temp3);
00745 
00746     if (VerifyCode) {
00747       switch (scope()->method()->block_info()) {
00748         case methodOopDesc::expects_nil      : verifyNilOrContextCode(c); break;
00749         case methodOopDesc::expects_self     : // fall through
00750         case methodOopDesc::expects_parameter: verifyOopCode(c);          break;
00751         case methodOopDesc::expects_context  : verifyContextCode(c);      break;
00752         default: ShouldNotReachHere();
00753       }
00754     }
00755   }
00756   // check for recompilation (do this last so stack frame is initialized properly)
00757   checkRecompilation(recompile_stub_call, temp2);
00758 }
00759 
00760 
00761 void LoadIntNode::gen() {
00762   BasicNode::gen();
00763   Register t = answerPRegReg(_dest, temp1);
00764   theMacroAssm->movl(t, _value);
00765   store(t, _dest, temp2, temp3);
00766 }
00767 
00768 
00769 void LoadOffsetNode::gen() {
00770   BasicNode::gen();
00771   Register b = movePRegToReg(_src, temp1);
00772   Register t = answerPRegReg(_dest, temp2);
00773   theMacroAssm->Load(b, byteOffset(offset), t);
00774   store(t, _dest, temp1, temp3);
00775 }
00776 
00777 
00778 void LoadUplevelNode::gen() {
00779   BasicNode::gen();
00780   Register b = uplevelBase(_context0, _nofLevels, temp1);
00781   Register t = answerPRegReg(_dest, temp2);
00782   theMacroAssm->Load(b, byteOffset(_offset), t);
00783   if (VerifyCode) verifyObjCode(t);
00784   store(t, _dest, temp1, temp3);
00785 }
00786 
00787 
00788 void StoreOffsetNode::gen() {
00789   BasicNode::gen();
00790   Register b = movePRegToReg(_base, temp1);
00791   Register t = movePRegToReg(_src, temp2);
00792   theMacroAssm->Store(t, b, byteOffset(_offset));
00793   if (_needsStoreCheck) {
00794     // NB: make sure b is a copy of base because storeCheck overwrites it (was bug 3/9/96 -Urs)
00795     if (b != temp1) load(_base, temp1);
00796     theMacroAssm->store_check(temp1, temp2);
00797   }
00798 }
00799 
00800 
00801 void StoreUplevelNode::gen() {
00802   StoreNode::gen();
00803   Register b = uplevelBase(_context0, _nofLevels, temp1);
00804   Register t = movePRegToReg(_src, temp2);
00805   theMacroAssm->Store(t, b, byteOffset(_offset));
00806   if (_needsStoreCheck) {
00807     // NB: make sure b is a copy of _context0 because storeCheck overwrites it
00808     if (b != temp1) load(_context0, temp1);
00809     theMacroAssm->store_check(temp1, temp2);
00810   }
00811 }
00812 
00813 
00814 void MergeNode::gen() {
00815   BasicNode::gen();
00816   // nothing to do
00817 }
00818 
00819 
00820 void SendNode::gen() {
00821   BasicNode::gen();
00822   if (isCounting()) incCounter();
00823   char* entry = _superSend ? CompiledIC::superLookupRoutine() : CompiledIC::normalLookupRoutine();
00824   theMacroAssm->call(entry, relocInfo::ic_type);
00825   inlineCache(scope()->nlrTestPoint()->label, _info, _superSend);
00826   assert(_dest->loc == resultLoc, "assignment missing");
00827 }
00828 
00829 
00830 void PrimNode::gen() {
00831   BasicNode::gen();
00832   primitiveCall(scope(), _pdesc);
00833   // assign result
00834   Register t = moveLocToReg(resultLoc, answerPRegReg(_dest, temp1));
00835   store(t, _dest, temp2, temp3);
00836 }
00837 
00838 
00839 void DLLNode::gen() {
00840   BasicNode::gen();
00841   assert(temp1 == ebx && temp2 == ecx && temp3 == edx, "registers are no temps anymore -> fix parameter passing");
00842   // determine entry point depending on whether a run-time lookup is needed or not
00843   // Note: do not do a DLL lookup at compile time since this may cause a call back.
00844   char* entry = (function() == NULL) ? StubRoutines::lookup_DLL_entry(async()) : StubRoutines::call_DLL_entry(async());
00845   // pass arguments for DLL_C_frame in registers
00846   // adjust this code if DLL_C_frame changes:
00847   // ebx: no. of arguments
00848   // ecx: address of last argument
00849   // edx: dll function entry point
00850   theMacroAssm->movl(ebx, nofArguments());      // setup no. of arguments
00851   theMacroAssm->movl(ecx, esp);                 // setup address of last argument
00852   // CompiledDLL_Cache
00853   // This code pattern must correspond to the CompiledDLL_Cache layout
00854   // (make sure assembler is not optimizing mov reg, 0 into xor reg, reg!)
00855   theMacroAssm->movl(edx, int(function()));     // part of CompiledDLL_Cache
00856   theMacroAssm->inline_oop(dll_name());         // part of CompiledDLL_Cache
00857   theMacroAssm->inline_oop(function_name());    // part of CompiledDLL_Cache
00858   theMacroAssm->call(entry, relocInfo::runtime_call_type);
00859   // Note: should also pop arguments in case of a NLR, could become a problem
00860   //       if DLL is called within a loop - fix this at some point.
00861   inlineCache(scope()->nlrTestPoint()->label, NULL, false);
00862   // assign result
00863   theMacroAssm->addl(esp, nofArguments()*oopSize);      // get rid of arguments
00864   Register t = moveLocToReg(resultLoc, answerPRegReg(_dest, temp1));
00865   store(t, _dest, temp2, temp3);
00866 }
00867 
00868 
00869 static bool producesResult(ArithOpCode op) {
00870   return (op != TestArithOp) && (op != CmpArithOp) && (op != tCmpArithOp);
00871 }
00872 
00873 
00874 static bool setupRegister(PReg* dst, PReg* arg, ArithOpCode op, Register& x, Register t) {
00875   // Sets up register x such that x := x op <some constant> corresponds to dst := arg op <some constant>.
00876   // If the temporary register t is used at all, x will be in t.
00877   // Returns true if op generated a result in x; returns false otherwise.
00878   bool result = producesResult(op);
00879   if (result) {
00880     // operation generates result, try to use as few registers as possible
00881     if ((dst->loc == arg->loc) /* || lastUsageOf(arg) */) {
00882       // arg is not used anymore afterwards, can be overwritten
00883       x = movePRegToReg(arg, t);
00884     } else {
00885       // have to load arg into a temporary register
00886       x = t; load(arg, t);
00887     }
00888   } else {
00889     // operation generates no result, use argument register directly
00890     assert(dst->isNoPReg(), "dst should be a noPReg");
00891     x = movePRegToReg(arg, t);
00892   }
00893   return result;
00894 }
00895 
00896 
00897 static bool setupRegisters(PReg* dst, PReg* arg1, ArithOpCode op, PReg* arg2, Register& x, Register& y, Register t1, Register t2) {
00898   // Sets up registers x & y such that x := x op y corresponds to dst := arg1 op arg2.
00899   // If the temporary registers t1 & t2 are used at all, x will be in t1 and y in t2.
00900   // Returns true if op generated a result in x; returns false otherwise.
00901   assert(t1 != t2, "registers should be different");
00902   bool result = producesResult(op);
00903   if (result) {
00904     // operation generates result, try to use as few registers as possible
00905     if ((dst->loc == arg1->loc) /* || lastUsageOf(arg1) */) {
00906       // arg1 is not used anymore afterwards, can be overwritten
00907       x = movePRegToReg(arg1, t1);
00908       y = movePRegToReg(arg2, t2);
00909     } else {
00910       // have to load arg1 into a temporary register
00911       x = t1; load(arg1, t1);
00912       y = movePRegToReg(arg2, t2);
00913     }
00914   } else {
00915     // operation generates no result, use argument registers directly
00916     assert(dst->isNoPReg(), "dst should be a noPReg");
00917     x = movePRegToReg(arg1, t1);
00918     y = movePRegToReg(arg2, t2);
00919   }
00920   return result;
00921 }
00922 
00923 
00924 static void arithRROp(ArithOpCode op, Register x, Register y) {
00925   assert(Int_Tag == 0, "check this code");
00926   switch (op) {
00927     case TestArithOp  : theMacroAssm->testl(x, y);      break;
00928     case tAddArithOp  : // fall through
00929     case  AddArithOp  : theMacroAssm->addl(x, y);       break;
00930     case tSubArithOp  : // fall through
00931     case  SubArithOp  : theMacroAssm->subl(x, y);       break;
00932     case tMulArithOp  : theMacroAssm->sarl(x, Tag_Size);
00933     case  MulArithOp  : theMacroAssm->imull(x, y);      break;
00934     case tDivArithOp  : // fall through
00935     case  DivArithOp  : Unimplemented();                break;
00936     case tModArithOp  : // fall through
00937     case  ModArithOp  : Unimplemented();                break;
00938     case tAndArithOp  : // fall through
00939     case  AndArithOp  : theMacroAssm->andl(x, y);       break;
00940     case tOrArithOp   : // fall through
00941     case  OrArithOp   : theMacroAssm->orl(x, y);        break;
00942     case tXOrArithOp  : // fall through
00943     case  XOrArithOp  : theMacroAssm->xorl(x, y);       break;
00944     case tShiftArithOp: Unimplemented();
00945     case  ShiftArithOp: Unimplemented();
00946     case tCmpArithOp  : // fall through
00947     case  CmpArithOp  : theMacroAssm->cmpl(x, y);       break;
00948     default           : ShouldNotReachHere();
00949   }
00950 }
00951 
00952 
00953 static void arithRCOp(ArithOpCode op, Register x, int y) {
00954   assert(Int_Tag == 0, "check this code");
00955   switch (op) {
00956     case TestArithOp  : theMacroAssm->testl(x, y);      break;
00957     case tAddArithOp  : // fall through
00958     case  AddArithOp  : theMacroAssm->addl(x, y);       break;
00959     case tSubArithOp  : // fall through
00960     case  SubArithOp  : theMacroAssm->subl(x, y);       break;
00961     case tMulArithOp  : y = arithmetic_shift_right(y, Tag_Size);
00962     case  MulArithOp  : theMacroAssm->imull(x, x, y);   break;
00963     case tDivArithOp  : // fall through
00964     case  DivArithOp  : Unimplemented();                break;
00965     case tModArithOp  : // fall through
00966     case  ModArithOp  : Unimplemented();                break;
00967     case tAndArithOp  : // fall through
00968     case  AndArithOp  : theMacroAssm->andl(x, y);       break;
00969     case tOrArithOp   : // fall through
00970     case  OrArithOp   : theMacroAssm->orl(x, y);        break;
00971     case tXOrArithOp  : // fall through
00972     case  XOrArithOp  : theMacroAssm->xorl(x, y);       break;
00973     case tShiftArithOp:
00974       if (y < 0) {
00975         // shift right
00976         int shift_count = ((-y) >> Tag_Size) % 32;
00977         theMacroAssm->sarl(x, shift_count);
00978         theMacroAssm->andl(x, -1 << Tag_Size);          // clear Tag bits
00979       } else if (y > 0) {
00980         // shift left
00981         int shift_count = ((+y) >> Tag_Size) % 32;
00982         theMacroAssm->shll(x, shift_count);
00983       }
00984       break;
00985     case  ShiftArithOp: Unimplemented();
00986     case tCmpArithOp  : // fall through
00987     case  CmpArithOp  : theMacroAssm->cmpl(x, y);       break;
00988     default           : ShouldNotReachHere();
00989   }
00990 }
00991 
00992 
00993 static void arithROOp(ArithOpCode op, Register x, oop y) {
00994   assert(!y->is_smi(), "check this code");
00995   switch (op) {
00996     case  CmpArithOp  : theMacroAssm->cmpl(x, y);       break;
00997     default           : ShouldNotReachHere();
00998   }
00999 }
01000 
01001 
01002 void TArithRRNode::gen() {
01003   BasicNode::gen();
01004   PReg* arg1 = _src;
01005   PReg* arg2 = _oper;
01006   if (arg2->isConstPReg()) {
01007     oop y = ((ConstPReg*)arg2)->constant;
01008     assert(y->is_smi() == _arg2IsInt, "flag value inconsistent");
01009     if (_arg2IsInt) {
01010       // perform operation
01011       Register x;
01012       bool result = setupRegister(_dest, arg1, _op, x, temp1);
01013       if (!_arg1IsInt) {
01014         // tag check necessary for arg1
01015         theMacroAssm->test(x, Mem_Tag);
01016         theMacroAssm->jcc(Assembler::notZero, next(1)->label);
01017       }
01018       arithRCOp(_op, x, int(y));                        // y is smiOop -> needs no relocation info
01019       if (result) store(x, _dest, temp2, temp3);
01020     } else {
01021       // operation fails always
01022       theMacroAssm->jmp(next(1)->label);
01023     }
01024   } else {
01025     Register x, y;
01026     bool result = setupRegisters(_dest, arg1, _op, arg2, x, y, temp1, temp2);
01027     // check argument tags
01028     Register tags = noreg;
01029     if (_arg1IsInt) {
01030       if (_arg2IsInt) {
01031         // both x & y are smis => no tag check necessary
01032       } else {
01033         // x is smi => check y
01034         tags = y;
01035       }
01036     } else {
01037       if (_arg2IsInt) {
01038         // y is smi => check x
01039         tags = x;
01040       } else {
01041         // check both x & y
01042         tags = temp3;
01043         theMacroAssm->movl(tags, x);
01044         theMacroAssm->orl (tags, y);
01045       }
01046     }
01047     if (tags != noreg) {
01048       // check tags
01049       theMacroAssm->test(tags, Mem_Tag);
01050       theMacroAssm->jcc(Assembler::notZero, next(1)->label);
01051     }
01052     // perform operation
01053     arithRROp(_op, x, y);
01054     if (result) {
01055       Register t = (x == temp1) ? temp2 : temp1;
01056       store(x, _dest, t, temp3);
01057     }
01058   }
01059 }
01060 
01061 
01062 void ArithRRNode::gen() {
01063   BasicNode::gen();
01064   PReg* arg1 = _src;
01065   PReg* arg2 = _oper;
01066   if (arg2->isConstPReg()) {
01067     oop y = ((ConstPReg*)arg2)->constant;
01068     Register x;
01069     bool result = setupRegister(_dest, arg1, _op, x, temp1);
01070     if (y->is_smi()) {
01071       arithRCOp(_op, x, int(y));                // y is smiOop -> needs no relocation info
01072     } else {
01073       arithROOp(_op, x, y);
01074     }
01075     if (result) store(x, _dest, temp2, temp3);
01076   } else {
01077     Register x, y;
01078     bool result = setupRegisters(_dest, arg1, _op, arg2, x, y, temp1, temp2);
01079     arithRROp(_op, x, y);
01080     if (result) {
01081       Register t = (x == temp1) ? temp2 : temp1;
01082       store(x, _dest, t, temp3);
01083     }
01084   }
01085 }
01086 
01087 
01088 void ArithRCNode::gen() {
01089   BasicNode::gen();
01090   PReg* arg1 = _src;
01091   int y = _oper;
01092   Register x;
01093   bool result = setupRegister(_dest, arg1, _op, x, temp1);
01094   arithRCOp(_op, x, y);
01095   if (result) store(x, _dest, temp2, temp3);
01096 }
01097 
01098 
01099 static void floatArithRROp(ArithOpCode op) {
01100   switch (op) {
01101     case fAddArithOp: theMacroAssm->faddp();    break;
01102     case fSubArithOp: theMacroAssm->fsubp();    break;
01103     case fMulArithOp: theMacroAssm->fmulp();    break;
01104     case fDivArithOp: theMacroAssm->fdivp();    break;
01105     case fModArithOp: theMacroAssm->fprem();    break;
01106     case fCmpArithOp: theMacroAssm->fcompp();   break;
01107     default         : ShouldNotReachHere();
01108   }
01109 }
01110 
01111 
01112 void FloatArithRRNode::gen() {
01113   BasicNode::gen();
01114   bool noResult = (_op == fCmpArithOp);
01115   bool exchange = (_op == fModArithOp || _op == fCmpArithOp);
01116   Register base = temp3;
01117   set_floats_base(this, base);
01118   fload(_src, base, temp1);
01119   fload(_oper, base, temp2);
01120   if (exchange) theMacroAssm->fxch();   // is paired with next instruction => no extra cycles
01121   floatArithRROp(_op);
01122   if (_op == fCmpArithOp) {
01123     // operation set FPU condition codes -> result is FPU status word
01124     assert(!Mapping::isFloatTemporary(_dest->loc), "fCmpArithOp doesn't produce a float");
01125     if (_dest->loc.isRegisterLocation() && _dest->loc.number() == eax.number()) {
01126       // store FPU status word in eax
01127       theMacroAssm->fwait();
01128       theMacroAssm->fnstsw_ax();
01129     } else {
01130       // store FPU status word not in eax
01131       Unimplemented();
01132     }
01133   } else {
01134     // store result (must be a float)
01135     fstore(_dest, base);
01136   }
01137 }
01138 
01139 
01140 static Address doubleKlass_addr() {
01141   return Address((int)&doubleKlassObj, relocInfo::external_word_type);
01142 }
01143 
01144 
01145 static oop oopify_float() {
01146   double x;
01147   __asm fstp x                                                  // get top of FPU stack
01148   BlockScavenge bs;                                             // because all registers are saved on the stack
01149   return oopFactory::new_double(x);                             // box the FloatValue
01150 }
01151 
01152 
01153 static void floatArithROp(ArithOpCode op, Register reg, Register temp) {
01154   assert(reg != temp, "registers must be different");
01155   switch (op) {
01156     case fNegArithOp   : theMacroAssm->fchs();  break;
01157     case fAbsArithOp   : theMacroAssm->fabs();  break;
01158     case fSqrArithOp   : theMacroAssm->fmul(0); break;
01159     case f2OopArithOp  :
01160       { theMacroAssm->pushl(reg);                               // reserve space for the result
01161         theMacroAssm->pushad();                                 // make sure no register is destroyed (no scavenge)
01162         theMacroAssm->call((char*)oopify_float, relocInfo::runtime_call_type);
01163         theMacroAssm->movl(Address(esp, nofRegisters * oopSize), eax);  // store result at reserved stack location
01164         theMacroAssm->popad();                                  // restore register contents
01165         theMacroAssm->popl(reg);                                // get result
01166       }
01167       break;
01168     case f2FloatArithOp:
01169       { Label is_smi, is_float, done;
01170         theMacroAssm->test(reg, Mem_Tag);                       // check if smi
01171         theMacroAssm->jcc(Assembler::zero, is_smi);
01172         theMacroAssm->movl(temp, Address(reg, memOopDesc::klass_byte_offset()));        // get object klass
01173         theMacroAssm->cmpl(temp, doubleKlass_addr());           // check if floatOop
01174         theMacroAssm->jcc(Assembler::equal, is_float);
01175         theMacroAssm->hlt(); // not yet implemented             // cannot be converted
01176 
01177         // convert smi
01178         theMacroAssm->bind(is_smi);
01179         theMacroAssm->sarl(reg, Tag_Size);                      // convert smi into int
01180         theMacroAssm->movl(Address(esp, -oopSize), reg);        // store it at end of stack
01181         theMacroAssm->fild_s(Address(esp, -oopSize));           // load & convert into FloatValue
01182         theMacroAssm->jmp(done);
01183 
01184         // unbox doubleOop
01185         theMacroAssm->bind(is_float);
01186         theMacroAssm->fld_d(Address(reg, byteOffset(doubleOopDesc::value_offset()))); // unbox float
01187 
01188         theMacroAssm->bind(done);
01189       }
01190       break;
01191     default            : ShouldNotReachHere();
01192   }
01193 }
01194 
01195 
01196 void FloatUnaryArithNode::gen() {
01197   BasicNode::gen();
01198   Register reg;
01199   Register base = temp3;
01200   set_floats_base(this, base);
01201   if (Mapping::isFloatTemporary(_src->loc) || _src->loc == topOfFloatStack) {
01202     // load argument on FPU stack & setup reg if result is an oop
01203     fload(_src, base, temp1);
01204     reg = temp1;
01205   } else {
01206     // load argument into reg
01207     reg = movePRegToReg(_src, temp1);
01208   }
01209   floatArithROp(_op, reg, temp2);
01210   if (Mapping::isFloatTemporary(_dest->loc) || _dest->loc == topOfFloatStack) {
01211     // result is on FPU stack
01212     fstore(_dest, base);
01213   } else {
01214     // result is in reg
01215     store(reg, _dest, temp2, temp3);
01216     set_floats_base(this, base, true);  // store may overwrite base -> make sure it is set again
01217   }
01218 }
01219 
01220 
01221 void AssignNode::gen() {
01222   StoreNode::gen();
01223   assign(this, _src, _dest, temp1, temp2, temp3);
01224 }
01225 
01226 
01227 void BranchNode::gen() {
01228   BasicNode::gen();
01229   theMacroAssm->jcc(mapToCC(_op), next(1)->label);
01230 }
01231 
01232 
01233 /*
01234 void BranchNode::gen() {
01235   BasicNode::gen();
01236   theMacroAssm->jcc(mapToCC(_op), next(1)->label);
01237   theMacroAssm->jmp(next()->label);     // this jump will be eliminated since this is the likely successor
01238   bb_needs_jump = false;                // no jump necessary at end of basic block
01239 }
01240 */
01241 
01242 
01243 void ContextCreateNode::gen() {
01244   BasicNode::gen();
01245 
01246   switch (_contextSize) {
01247     case 0:  primitiveCall(scope(), primitives::context_allocate0()); break;
01248     case 1:  primitiveCall(scope(), primitives::context_allocate1()); break;
01249     case 2:  primitiveCall(scope(), primitives::context_allocate2()); break;
01250     default: assert(_pdesc == primitives::context_allocate(), "bad context create prim");
01251              theMacroAssm->pushl((int)as_smiOop(_contextSize));
01252              primitiveCall(scope(), _pdesc);
01253              theMacroAssm->addl(esp, oopSize);  // pop argument, this is not a Pascal call - should fix this
01254   }
01255   Register context = Mapping::asRegister(resultLoc);
01256   if (_src == NULL) {
01257     assert(scope()->isMethodScope() || scope()->method()->block_info() == methodOopDesc::expects_nil, "inconsistency");
01258     theMacroAssm->movl(Address(context, contextOopDesc::parent_byte_offset()), NULL);
01259     // NULL for now; the interpreter uses nil. However, some of the
01260     // context verification code called from compiled code checks for
01261     // parents that are either a frame pointer, NULL or a context.
01262     // This should be unified at some point. (gri 5/9/96)
01263   } else {
01264     // parent is in _src (incoming)
01265     Register parent = movePRegToReg(_src, temp1);
01266     theMacroAssm->movl(Address(context, contextOopDesc::parent_byte_offset()), parent);
01267   }
01268   store(context, _dest, temp2, temp3);
01269   theMacroAssm->store_check(context, temp2);
01270 }
01271 
01272 
01273 void ContextInitNode::gen() {
01274   BasicNode::gen();
01275   // initialize context fields
01276   for (int i = nofTemps() - 1; i >= 0; i--) {
01277     PReg* src = _initializers->at(i)->preg();
01278     PReg* dest;
01279     if (src->isBlockPReg()) {
01280       // Blocks aren't actually assigned (at the PReg level) so that the inlining info isn't lost.
01281       if (wasEliminated()) {
01282         continue;                               // there's no assignment (context was eliminated)
01283       } else {
01284         dest = contents()->at(i)->preg();       // fake destination created by compiler
01285       }
01286     } else {
01287       dest = contents()->at(i)->preg();
01288     }
01289     assign(this, src, dest, temp1, temp2, temp3, false);
01290   }
01291   // NB: no store check necessary (done in ContextCreateNode)
01292   // init node must follow create immediately (since fields are uninitialized)
01293 }
01294 
01295 
01296 void ContextZapNode::gen() {
01297   // Only generated for new backend yet
01298   ShouldNotReachHere();
01299 }
01300 
01301 
01302 void FixedCodeNode::gen() {
01303   BasicNode::gen();
01304   switch(_kind) {
01305     case dead_end:    theMacroAssm->hlt(); break;
01306     case inc_counter: incCounter(); break;
01307     default:          fatal1("unexpected FixedCodeNode kind %d", _kind);
01308   }
01309 }
01310 
01311 
01312 void InlinedReturnNode::gen() {
01313   BasicNode::gen();
01314   assign(this, _src, _dest, temp1, temp2, temp3);
01315   if (scope()->needsContextZapping()) {
01316     zapContext(scope()->context(), temp1);
01317   }
01318 }
01319 
01320 
01321 void ReturnNode::gen() {
01322   BasicNode::gen();
01323   assert(_src->loc == resultLoc, "result in wrong location");
01324   if (scope()->needsContextZapping()) {
01325     zapContext(scope()->context(), temp1);
01326   }
01327   // remove stack frame
01328   if (VerifyCode || GenTraceCalls) verifyReturnCode(Mapping::asRegister(resultLoc));
01329   if (TraceResults) call_trace_result(result_reg);
01330   int no_of_args_to_pop = scope()->nofArguments();
01331   if (scope()->method()->is_blockMethod()) {
01332     // blocks are called via primitiveValue => need to pop first argument
01333     // of primitiveValue (= block closure) as well since return happens
01334     // directly (and not through primitiveValue).
01335     no_of_args_to_pop++;
01336   }
01337   theMacroAssm->leave();
01338   theMacroAssm->ret(no_of_args_to_pop * oopSize);
01339 }
01340 
01341 
01342 void NLRSetupNode::gen() {
01343   BasicNode::gen();
01344   assert(_src->loc == NLRResultLoc, "result in wrong location");
01345   // get, test if not zapped & assign home
01346   Label NLR_error;
01347   Register home = uplevelBase(scope()->context(), scope()->homeContext() + 1, temp1);
01348   theMacroAssm->testl(home, home);
01349   theMacroAssm->jcc(Assembler::zero, NLR_error); // zero -> home has been zapped
01350   if (home != Mapping::asRegister(NLRHomeLoc)) {
01351     theMacroAssm->movl(Mapping::asRegister(NLRHomeLoc), home);
01352   }
01353   // assign home id
01354   theMacroAssm->movl(NLR_homeId_reg, scope()->home()->scopeID());
01355   if (TraceResults) call_trace_result(NLR_result_reg);
01356   continueNLR(temp1, temp2);
01357   // call run-time routine in failure case
01358   theMacroAssm->bind(NLR_error);
01359   call_C((char*)suspend_on_NLR_error, relocInfo::runtime_call_type, true);
01360   theMacroAssm->hlt();
01361 }
01362 
01363 
01364 void NLRContinuationNode::gen() {
01365   BasicNode::gen();
01366   if (scope()->needsContextZapping()) {
01367     zapContext(scope()->context(), temp1);
01368   }
01369   continueNLR(temp1, temp2);
01370 }
01371 
01372 
01373 void NLRTestNode::gen() {
01374   BasicNode::gen();
01375   //theMacroAssm->bind(scope()->_nlrTestPointLabel);
01376   // arrived at the right frame?
01377   Label L;
01378   theMacroAssm->cmpl(Mapping::asRegister(NLRHomeLoc), ebp);
01379   theMacroAssm->jcc(Assembler::notEqual, L);
01380   // arrived at the right scope within a frame?
01381   int id = scope()->scopeID();
01382   if (id == 0) {
01383     // use x86 test to compare with 0 (smaller code than with cmp)
01384     theMacroAssm->testl(Mapping::asRegister(NLRHomeIdLoc), Mapping::asRegister(NLRHomeIdLoc));
01385   } else {
01386     theMacroAssm->cmpl(Mapping::asRegister(NLRHomeIdLoc), id);
01387   }
01388   theMacroAssm->jcc(Assembler::equal, next1()->label);
01389   // continue non-local return
01390   theMacroAssm->bind(L);
01391 }
01392 
01393 
01394 void InterruptCheckNode::gen() {
01395   BasicNode::gen();
01396   Unimplemented();
01397 }
01398 
01399 
01400 static void testForSingleKlass(Register obj, klassOop klass, Register klassReg, Label& success, Label& failure) {
01401   if (klass == Universe::smiKlassObj()) {
01402     // check tag
01403     theMacroAssm->test(obj, Mem_Tag);
01404   } else if (klass == Universe::trueObj()->klass()) {
01405     // only one instance: compare with trueObj
01406     theMacroAssm->cmpl(obj, Universe::trueObj());
01407   } else if (klass == Universe::falseObj()->klass()) {
01408     // only one instance: compare with falseObj
01409     theMacroAssm->cmpl(obj, Universe::falseObj());
01410   } else if (klass == Universe::nilObj()->klass()) {
01411     // only one instance: compare with nilObj
01412     theMacroAssm->cmpl(obj, Universe::nilObj());
01413   } else {
01414     // compare against obj's klass - must check if smi first
01415     theMacroAssm->test(obj, Mem_Tag);
01416     theMacroAssm->jcc(Assembler::zero, failure);
01417     theMacroAssm->movl(klassReg, Address(obj, memOopDesc::klass_byte_offset()));
01418     theMacroAssm->cmpl(klassReg, klass);
01419   }
01420   theMacroAssm->jcc(Assembler::notEqual, failure);
01421   theMacroAssm->jmp(success);   // this jump will be eliminated since this is the likely successor
01422 }
01423 
01424 
01425 static bool testForBoolKlasses(Register obj, klassOop klass1, klassOop klass2, Register klassReg, 
01426                                bool hasUnknown, Label& success1, Label& success2, Label& failure) {
01427   oop bool1  = Universe::trueObj();
01428   oop bool2  = Universe::falseObj();
01429   if (klass1 == bool2->klass() && klass2 == bool1->klass()) {
01430     oop t = bool1; bool1 = bool2; bool2 = t;
01431   }
01432   if (klass1 == bool1->klass() && klass2 == bool2->klass()) {
01433     if (hasUnknown) {
01434       theMacroAssm->cmpl(obj, bool1);
01435       theMacroAssm->jcc(Assembler::equal, success1);
01436       theMacroAssm->cmpl(obj, bool2);
01437       theMacroAssm->jcc(Assembler::notEqual, failure);
01438       theMacroAssm->jmp(success2);      // this jump will be eliminated since this is the likely successor
01439     } else {
01440       theMacroAssm->cmpl(obj, bool2);
01441       theMacroAssm->jcc(Assembler::equal, success2);
01442       theMacroAssm->jmp(success1);      // this jump will be eliminated since this is the likely successor
01443     }
01444     return true;
01445   }
01446   return false;
01447 }
01448 
01449 
01450 static void generalTypeTest(Register obj, Register klassReg, bool hasUnknown,
01451                             GrowableArray<klassOop>* classes, GrowableArray<Label*>* next) {
01452   // handle general case: N klasses, N+1 labels (first label = unknown case)
01453   int smi_case = -1;                    // index of smi case in next array (if there)
01454   const int len = classes->length();
01455   GrowableArray<klassOop> klasses(len); // list of classes excluding smi case
01456   GrowableArray<Label*>    labels(len); // list of nodes   excluding smi case
01457 
01458   // compute klasses & nodes list without smi case
01459   for (int i = 0; i < len; i++) {
01460     const klassOop klass = classes->at(i);
01461     if (klass == Universe::smiKlassObj()) {
01462       smi_case = i + 1;
01463     } else {
01464       klasses.append(klass);
01465       labels.append(next->at(i+1));
01466     }
01467   }
01468 
01469   if (smi_case == -1 && hasUnknown) {
01470     // smi case is also unknown case
01471     smi_case = 0;
01472   }
01473 
01474   // generate code
01475   if (smi_case >= 0) {
01476     theMacroAssm->test(obj, Mem_Tag);
01477     theMacroAssm->jcc(Assembler::zero, *next->at(smi_case));
01478   }
01479 
01480   bool klassHasBeenLoaded = false;
01481   const int nof_cmps = hasUnknown ? klasses.length() : klasses.length() - 1;
01482   for (i = 0; i < nof_cmps; i++) {
01483     const klassOop klass = klasses.at(i);
01484     if (klass == Universe::trueObj()->klass()) {
01485       // only one instance: compare with trueObj
01486       theMacroAssm->cmpl(obj, Universe::trueObj());
01487     } else if (klass == Universe::falseObj()->klass()) {
01488       // only one instance: compare with falseObj
01489       theMacroAssm->cmpl(obj, Universe::falseObj());
01490     } else if (klass == Universe::nilObj()->klass()) {
01491       // only one instance: compare with nilObj
01492       theMacroAssm->cmpl(obj, Universe::nilObj());
01493     } else {
01494       // compare with class
01495       assert(klass != Universe::smiKlassObj(), "should have been excluded");
01496       if (!klassHasBeenLoaded) {
01497         theMacroAssm->movl(klassReg, Address(obj, memOopDesc::klass_byte_offset()));
01498         klassHasBeenLoaded = true;
01499       }
01500       theMacroAssm->cmpl(klassReg, klass);
01501     }
01502     theMacroAssm->jcc(Assembler::equal, *labels.at(i));
01503   }
01504   if (hasUnknown) {
01505     theMacroAssm->jmp(*(next->first()));
01506   } else {
01507     // must be last case, no test required
01508     theMacroAssm->jmp(*labels.at(i));
01509   }
01510 }
01511 
01512 
01513 void TypeTestNode::gen() {
01514   BasicNode::gen();
01515   const int len = classes()->length();
01516   const Register obj = movePRegToReg(_src, temp1);
01517   const Register klassReg = temp2;
01518   bb_needs_jump = false;  // we generate all jumps explicitly
01519 
01520   if (ReorderBBs) {
01521     // generate better code for some of the frequent cases
01522 
01523     if (len == 1) {
01524       // handle all cases where only one klass is involved
01525       assert(hasUnknown(), "should be eliminated if there's no unknown case");
01526       assert(likelySuccessor() == next(1), "code pattern is not optimal");
01527       klassOop klass = classes()->at(0);
01528       testForSingleKlass(obj, klass, klassReg, next(1)->label, next()->label);
01529       return;
01530     }
01531 
01532     if (len == 2) {
01533       // handle pure boolean cases (ifTrue:/ifFalse:)
01534       klassOop klass1 = classes()->at(0);
01535       klassOop klass2 = classes()->at(1);
01536       if (testForBoolKlasses(obj, klass1, klass2, klassReg, hasUnknown(), next(1)->label, next(2)->label, next()->label)) {
01537         return;
01538       }
01539     }
01540   }
01541 
01542   // handle general case
01543   GrowableArray<Label*> labels(len + 1);
01544   for (int i = 0; i <= len; i++) labels.append(&next(i)->label);
01545   generalTypeTest(obj, klassReg, hasUnknown(), classes(), &labels);
01546 }
01547 
01548 /* old code
01549 void TypeTestNode::gen() {
01550   BasicNode::gen();
01551   const int len = classes()->length();
01552   const Register obj = movePRegToReg(_src, temp1);
01553   const Register klassReg = temp2;
01554 
01555   if (ReorderBBs) {
01556     // generate better code for some of the frequent cases
01557 
01558     if (len == 1) {
01559       // handle all cases where only one klass is involved
01560       assert(hasUnknown(), "should be eliminated if there's no unknown case");
01561       assert(likelySuccessor() == next(1), "code pattern is not optimal");
01562       klassOop klass = classes()->at(0);
01563       if (klass == Universe::smiKlassObj()) {
01564         // check tag
01565         theMacroAssm->test(obj, Mem_Tag);
01566       } else if (klass == Universe::trueObj()->klass()) {
01567         // only one instance: compare with trueObj
01568         theMacroAssm->cmpl(obj, Universe::trueObj());
01569       } else if (klass == Universe::falseObj()->klass()) {
01570         // only one instance: compare with falseObj
01571         theMacroAssm->cmpl(obj, Universe::falseObj());
01572       } else if (klass == Universe::nilObj()->klass()) {
01573         // only one instance: compare with nilObj
01574         theMacroAssm->cmpl(obj, Universe::nilObj());
01575       } else {
01576         // compare against obj's klass - must check if smi first
01577         theMacroAssm->test(obj, Mem_Tag);
01578         theMacroAssm->jcc(Assembler::zero, next()->label);
01579         theMacroAssm->movl(klassReg, Address(obj, memOopDesc::klass_byte_offset()));
01580         theMacroAssm->cmpl(klassReg, klass);
01581       }
01582       theMacroAssm->jcc(Assembler::notEqual, next()->label);
01583       theMacroAssm->jmp(next(1)->label);        // this jump will be eliminated since this is the likely successor
01584       bb_needs_jump = false;                    // no jump necessary at end of basic block
01585       return;
01586     }
01587 
01588     if (len == 2) {
01589       // handle pure boolean cases (ifTrue:/ifFalse:)
01590       klassOop klass1 = classes()->at(0);
01591       klassOop klass2 = classes()->at(1);
01592       oop      bool1  = Universe::trueObj();
01593       oop      bool2  = Universe::falseObj();
01594       if (klass1 == bool2->klass() && klass2 == bool1->klass()) {
01595         oop t = bool1; bool1 = bool2; bool2 = t;
01596       }
01597       if (klass1 == bool1->klass() && klass2 == bool2->klass()) {
01598         if (hasUnknown()) {
01599           assert(likelySuccessor() == next(2), "code pattern is not optimal");
01600           theMacroAssm->cmpl(obj, bool1);
01601           theMacroAssm->jcc(Assembler::equal, next(1)->label);
01602           theMacroAssm->cmpl(obj, bool2);
01603           theMacroAssm->jcc(Assembler::notEqual, next()->label);
01604           theMacroAssm->jmp(next(2)->label);    // this jump will be eliminated since this is the likely successor
01605         } else {
01606           assert(likelySuccessor() == next(1), "code pattern is not optimal");
01607           theMacroAssm->cmpl(obj, bool2);
01608           theMacroAssm->jcc(Assembler::equal, next(2)->label);
01609           theMacroAssm->jmp(next(1)->label);    // this jump will be eliminated since this is the likely successor
01610         }
01611         bb_needs_jump = false;                  // no jump necessary at end of basic block
01612         return;
01613       }
01614     } 
01615 
01616   // handle general case
01617   Node* smi_case = NULL;                // smi case if there
01618   GrowableArray<klassOop> klasses(len); // list of classes excluding smi case
01619   GrowableArray<Node*>    nodes(len);   // list of nodes   excluding smi case
01620 
01621   // compute klasses & nodes list without smi case
01622   for (int i = 0; i < len; i++) {
01623     const klassOop klass = classes()->at(i);
01624     if (klass == Universe::smiKlassObj()) {
01625       smi_case = next(i+1);
01626     } else {
01627       klasses.append(klass);
01628       nodes.append(next(i+1));
01629     }
01630   }
01631 
01632   if (smi_case == NULL && hasUnknown()) {
01633     // smi case is also unknown case
01634     smi_case = next();
01635   }
01636 
01637   // generate code
01638   if (smi_case != NULL) {
01639     theMacroAssm->test(obj, Mem_Tag);
01640     theMacroAssm->jcc(Assembler::zero, smi_case->label);
01641   }
01642 
01643   bool klassHasBeenLoaded = false;
01644   const int nof_cmps = hasUnknown() ? klasses.length() : klasses.length() - 1;
01645   for (i = 0; i < nof_cmps; i++) {
01646     const klassOop klass = klasses.at(i);
01647     if (klass == Universe::trueObj()->klass()) {
01648       // only one instance: compare with trueObj
01649       theMacroAssm->cmpl(obj, Universe::trueObj());
01650     } else if (klass == Universe::falseObj()->klass()) {
01651       // only one instance: compare with falseObj
01652       theMacroAssm->cmpl(obj, Universe::falseObj());
01653     } else if (klass == Universe::nilObj()->klass()) {
01654       // only one instance: compare with nilObj
01655       theMacroAssm->cmpl(obj, Universe::nilObj());
01656     } else {
01657       // compare with class
01658       assert(klass != Universe::smiKlassObj(), "should have been excluded");
01659       if (!klassHasBeenLoaded) {
01660         theMacroAssm->movl(klassReg, Address(obj, memOopDesc::klass_byte_offset()));
01661         klassHasBeenLoaded = true;
01662       }
01663       theMacroAssm->cmpl(klassReg, klass);
01664     }
01665     theMacroAssm->jcc(Assembler::equal, nodes.at(i)->label);
01666   }
01667   if (hasUnknown()) {
01668     theMacroAssm->jmp(next()->label);
01669   } else {
01670     // must be last case, no test required
01671     theMacroAssm->jmp(nodes.at(i)->label);
01672   }
01673   bb_needs_jump = false;
01674 }
01675 */
01676 
01677 
01678 void UncommonNode::gen() {
01679   BasicNode::gen();
01680   theMacroAssm->call(StubRoutines::unused_uncommon_trap_entry(), relocInfo::uncommon_type);
01681 }
01682 
01683 
01684 void BlockCreateNode::copyIntoContexts(Register val, Register t1, Register t2) {
01685   // Copy newly created block (in val) into all contexts that have a copy;
01686   // registers t1 and t2 can be used as scratch registers.
01687   // The BlockPReg has a list of all contexts containing the block.  It should
01688   // be stored into those that are allocated (weren't eliminated) and are in
01689   // a sender scope.  
01690   // Why not copy into contexts in a sibling scope?  There are two cases:
01691   //   (1) The sibling scope never created the block(s) that uplevel-access this
01692   //       block.  The context location still contains 0 but that doesn't matter
01693   //       because that context location is now inaccessible.
01694   //   (2) The sibling scope did create these block(s).  In this case, the receiver
01695   //       must already exist since it was materialized when the first uplevel-
01696   //       accessing block was created.
01697   // Urs 4/96
01698   BlockPReg* blk = block();
01699   GrowableArray<Location*>* copies = blk->contextCopies();
01700   if (copies == NULL) return;
01701   for (int i = copies->length() - 1; i >= 0; i--) {
01702     Location* l = copies->at(i);
01703     InlinedScope* scopeWithContext = theCompiler->scopes->at(l->scopeID());
01704     PReg* r = scopeWithContext->contextTemporaries()->at(l->tempNo())->preg();
01705     if (r->loc == unAllocated) continue;          // not uplevel-accessed (eliminated)
01706     if (r->isBlockPReg()) continue;               // ditto
01707     if (!r->loc.isContextLocation()) fatal("expected context location");
01708     if (scopeWithContext->isSenderOrSame(_scope)) {
01709       store(val, r, t1, t2);
01710     }
01711   }
01712 }
01713 
01714 
01715 void BlockCreateNode::materialize() {
01716   CompileTimeClosure* closure = block()->closure();
01717   if (closure->context()->loc.isRegisterLocation()) {
01718     // should not be allocated to a register, since the register will
01719     // be destroyed after the call - push it on the stack as temporary
01720     // fix - take this out after register allocation has been fixed
01721     theMacroAssm->pushl(Mapping::asRegister(closure->context()->loc));
01722   }
01723   int nofArgs = closure->nofArgs();
01724   switch (nofArgs) {
01725     case 0:   primitiveCall(scope(), primitives::block_allocate0()); break;
01726     case 1:   primitiveCall(scope(), primitives::block_allocate1()); break;
01727     case 2:   primitiveCall(scope(), primitives::block_allocate2()); break;
01728     default:  assert(_pdesc == primitives::block_allocate(), "bad block clone prim");
01729               theMacroAssm->pushl((int)as_smiOop(nofArgs));
01730               primitiveCall(scope(), _pdesc);
01731               theMacroAssm->addl(esp, oopSize); // pop argument, this is not a Pascal call - should fix this
01732   }
01733   // assign result
01734   Register t = moveLocToReg(resultLoc, answerPRegReg(_dest, temp1));
01735   store(t, _dest, temp2, temp3);
01736   // copy into all contexts that have a copy
01737   if (block()->isMemoized()) copyIntoContexts(t, temp2, temp3);
01738   // initialize block closure fields
01739   Register closureReg = Mapping::asRegister(resultLoc); // fix this: should refer to _dest->loc
01740   Register contextReg;
01741   if (closure->context()->loc.isRegisterLocation()) {
01742     // context value is on the stack -- see also comment above
01743     // take this out after register allocation has been fixed
01744     contextReg = temp1;
01745     theMacroAssm->popl(contextReg);
01746   } else {
01747     contextReg = movePRegToReg(closure->context(), temp1);
01748   }
01749   if (VerifyCode) {
01750     switch (closure->method()->block_info()) {
01751       case methodOopDesc::expects_nil      : verifyNilOrContextCode(contextReg); break;
01752       case methodOopDesc::expects_self     : // fall through
01753       case methodOopDesc::expects_parameter: verifyOopCode(contextReg); break;
01754       case methodOopDesc::expects_context  : verifyContextCode(contextReg); break;
01755       default: ShouldNotReachHere();
01756     }
01757   }
01758   theMacroAssm->Store(contextReg, closureReg, blockClosureOopDesc::context_byte_offset());
01759   // assert(theCompiler->jumpTableID == closure->parent_id(), "nmethod id must be the same");
01760   // fix this: RELOCATION INFORMATION IS NEEDED WHEN MOVING THE JUMPTABLE (Snapshot reading etc.)
01761   theMacroAssm->movl(Address(closureReg, blockClosureOopDesc::method_or_entry_byte_offset()), int(closure->jump_table_entry()));
01762   if (VerifyCode) verifyBlockCode(closureReg);
01763   theMacroAssm->store_check(closureReg, temp1);
01764 }
01765 
01766 
01767 void BlockCreateNode::gen() {
01768   BasicNode::gen();
01769   if (block()->closure()->method()->is_clean_block()) {
01770     // create the block now (doesn't need to be copied at run-time)
01771     CompileTimeClosure* closure = block()->closure();
01772     blockClosureOop blk = blockClosureOopDesc::create_clean_block(closure->nofArgs(), closure->jump_table_entry());
01773     Mapping::storeO(blk, _dest->loc, temp1, temp3, true);
01774   } else if (block()->isMemoized()) {
01775     // initialize block variable
01776     Mapping::storeO(MemoizedBlockNameDesc::uncreatedBlockValue(), _dest->loc, temp1, temp3, true);
01777   } else {
01778     // actually create block
01779     materialize();
01780   }
01781 }
01782 
01783 
01784 void BlockMaterializeNode::gen() {
01785   BasicNode::gen();
01786   if (block()->isMemoized() && !block()->closure()->method()->is_clean_block()) {
01787     // materialize block if it is not already materialized
01788     // (nothing to do in case of non-memoized or clean blocks)
01789     Label L;
01790     Register t = movePRegToReg(block(), temp1);
01791     assert(MemoizedBlockNameDesc::uncreatedBlockValue() == oop(0), "change the code generation here");
01792     theMacroAssm->testl(t, t);
01793     theMacroAssm->jcc(Assembler::notZero, L);
01794     materialize();
01795     theMacroAssm->bind(L);
01796   }
01797 }
01798 
01799 void LoopHeaderNode::gen() {
01800   if (!_activated) return;    // loop wasn't optimized
01801   // the loop header node performs all checks hoisted out of the loop:
01802   // for general loops:
01803   //   - do all type tests in the list, uncommon branch if they fail
01804   //     (common case: true/false tests, single-klass tests) 
01805   // additionally for integer loops:
01806   //   - test lowerBound (may be NULL), upperBound, loopVar for smi-ness (the first two may be ConstPRegs)
01807   //   - if upperBound is NULL, upperLoad is load of the array size
01808   //   - if loopArray is non-NULL, check lowerBound (if non-NULL) or initial value of loopVar against 1
01809   TrivialNode::gen();
01810   Label ok;
01811   Label failure;
01812   generateTypeTests(ok, failure);
01813   generateIntegerLoopTests(ok, failure);
01814   generateArrayLoopTests(ok, failure);
01815   if (ok.is_unbound()) theMacroAssm->bind(ok);
01816   theMacroAssm->jmp(next()->label);
01817   // above 2 lines could be eliminated with if (ok.is_unbound()) ok.redirectTo(next()->label)
01818   bb_needs_jump = false;  // we generate all jumps explicitly
01819   theMacroAssm->bind(failure);
01820   theMacroAssm->call(StubRoutines::unused_uncommon_trap_entry(), relocInfo::uncommon_type);
01821 }
01822 
01823 
01824 void LoopHeaderNode::generateTypeTests(Label& cont, Label& failure) {
01825   // test all values against expected classes
01826   Label* ok;
01827   const Register klassReg = temp2;
01828   const int len = _tests->length() - 1;
01829   int last;                                             // last case that generates a test
01830   for (last = len; last >= 0 && _tests->at(last)->testedPR->loc == unAllocated; last--) ;
01831   if (last < 0) return;                                 // no tests at all   
01832   for (int i = 0; i <= last; i++) {
01833     HoistedTypeTest* t = _tests->at(i);
01834     if (t->testedPR->loc == unAllocated) continue;      // optimized away, or ConstPReg
01835     if (t->testedPR->isConstPReg()) { 
01836       guarantee(t->testedPR->loc == unAllocated, "code assumes ConstPRegs are unallocated");
01837       handleConstantTypeTest((ConstPReg*)t->testedPR, t->klasses);
01838     } else {
01839       const Register obj = movePRegToReg(t->testedPR, temp1);
01840       Label okLabel;
01841       ok = (i == last) ? &cont : &okLabel;
01842       if (t->klasses->length() == 1) {
01843         testForSingleKlass(obj, t->klasses->at(0), klassReg, *ok, failure);
01844       } else if (t->klasses->length() == 2 &&
01845                  testForBoolKlasses(obj, t->klasses->at(0), t->klasses->at(1), klassReg, true, 
01846                  *ok, *ok, failure)) {
01847         // ok, was a bool test
01848       } else {
01849         const int len = t->klasses->length();
01850         GrowableArray<Label*> labels(len + 1);
01851         labels.append(&failure);
01852         for (int i = 0; i < len; i++) labels.append(ok);
01853         generalTypeTest(obj, klassReg, true, t->klasses, &labels);
01854       }
01855       if (i != last) theMacroAssm->bind(*ok);
01856     }
01857   }
01858 }
01859 
01860 void LoopHeaderNode::generateIntegerLoopTest(PReg* p, Label& prev, Label& failure) {
01861   const Register klassReg = temp2;
01862   if (p != NULL) {
01863     if (p->isConstPReg()) {
01864       // no run-time test necessary
01865       handleConstantTypeTest((ConstPReg*)p, NULL);
01866     } else if (p->loc == unAllocated) {
01867       // p is never used in loop, so no test needed
01868       guarantee(p->cpReg() == p, "should use cpReg()");
01869     } else {
01870       // generate run-time test
01871       if (prev.is_unbound()) theMacroAssm->bind(prev);
01872       Label ok;
01873       const Register obj = movePRegToReg(p, temp1);
01874       testForSingleKlass(obj, Universe::smiKlassObj(), klassReg, ok, failure);
01875       theMacroAssm->bind(ok);
01876     }
01877   }
01878 }
01879 
01880 void LoopHeaderNode::generateIntegerLoopTests(Label& prev, Label& failure) {
01881   if (!_integerLoop) return;
01882   generateIntegerLoopTest(_lowerBound, prev, failure);
01883   generateIntegerLoopTest(_upperBound, prev, failure);
01884   generateIntegerLoopTest(_loopVar   , prev, failure);
01885 }
01886 
01887 void LoopHeaderNode::handleConstantTypeTest(ConstPReg* r, GrowableArray<klassOop>* klasses) {
01888   // constant r is tested against klasses (efficiency hack: klasses == NULL means {smi})
01889   if ((klasses == NULL && r->constant->is_smi()) || (klasses && klasses->contains(r->constant->klass()))) {
01890     // always ok, no need to test
01891   } else {
01892     compiler_warning("loop header type test will always fail!");
01893     // don't jump to failure because that would make subsequent LoopHeader code unreachable (--> breaks back end)
01894     theMacroAssm->call(StubRoutines::unused_uncommon_trap_entry(), relocInfo::uncommon_type);
01895   }
01896 }
01897 
01898 void LoopHeaderNode::generateArrayLoopTests(Label& prev, Label& failure) {
01899   if (!_integerLoop) return;
01900   Register boundReg = temp1;
01901   const Register tempReg  = temp2;
01902   if (_upperLoad != NULL) {
01903     // The loop variable iterates from lowerBound...array size; if any of the array accesses use the loop variable
01904     // without an index range check, we need to check it here.
01905     PReg* loopArray = _upperLoad->src();
01906     AbstractArrayAtNode* atNode;
01907     for (int i = _arrayAccesses->length() - 1; i >= 0; i--) {
01908       atNode = _arrayAccesses->at(i);
01909       if (atNode->src() == loopArray && !atNode->needsBoundsCheck()) break;
01910     }
01911     if (i >= 0) {
01912       // loopVar is used to index into array; make sure lower & upper bound is within array range
01913       if (_lowerBound != NULL && _lowerBound->isConstPReg() && ((ConstPReg*)_lowerBound)->constant->is_smi() && ((ConstPReg*)_lowerBound)->constant >= as_smiOop(1)) {
01914         // loopVar iterates from smi_const to array size, so no test necessary
01915       } else {
01916         // test lower bound
01917        if (prev.is_unbound()) theMacroAssm->bind(prev);
01918        if (_lowerBound->loc == unAllocated) {
01919          guarantee(_lowerBound->cpReg() == _lowerBound, "should use cpReg()");
01920        } else {
01921          const Register t = movePRegToReg(_lowerBound ? _lowerBound : _loopVar, tempReg);
01922          theMacroAssm->cmpl(boundReg, as_smiOop(1));
01923          theMacroAssm->jcc(Assembler::less, failure);
01924        }
01925       }
01926 
01927       // test upper bound
01928       boundReg = movePRegToReg(_upperBound, boundReg);
01929       const Register t = movePRegToReg(atNode->src(), tempReg);
01930       theMacroAssm->movl(t, Address(t, byteOffset(atNode->sizeOffset())));
01931       theMacroAssm->cmpl(boundReg, t);
01932       theMacroAssm->jcc(Assembler::above, failure);
01933     }
01934   }
01935 }
01936 
01937 
01938 static void jcc_error(Node* node, Assembler::Condition cond, Label& label) {
01939 // Used in code pattern generators that also generate code to setup error messages.
01940 // If an uncommon trap is issued in the error situation anyway, the error message
01941 // setup code is not needed and we can jump to the uncommon node directly => saves
01942 // code & a jump in the commom case.
01943   Node* failure_start = node->next(1);
01944   if (failure_start->isUncommonNode()) {
01945     // error handling causes uncommon trap anyway, jump to uncommon node directly
01946     theMacroAssm->jcc(cond, failure_start->label);
01947   } else {
01948     // failure case is not uncommon, jump to label
01949     theMacroAssm->jcc(cond, label);
01950   }
01951 }
01952 
01953 
01954 void ArrayAtNode::gen() {
01955   BasicNode::gen();
01956   // load registers in an order that reduces load delays
01957   Register size  = temp3;
01958   Register index = temp2; load(_arg, index);    // index is modified -> load always into register
01959   Register array = movePRegToReg(_src, temp1);  // array is read_only
01960   // first element is at index 1 => subtract smi(1) (doesn't change smi/oop property)
01961   theMacroAssm->subl(index, int(smiOop_one));
01962   // preload size for bounds check if necessary
01963   if (_needBoundsCheck) {
01964     theMacroAssm->movl(size, Address(array, byteOffset(_sizeOffset)));
01965   }
01966   // do index smi check if necessary (still possible, even after subtracting smi(1))
01967   Label indexNotSmi;
01968   if (!_intArg) {
01969     theMacroAssm->test(index, Mem_Tag);
01970     jcc_error(this, Assembler::notZero, indexNotSmi);
01971   }
01972   // do bounds check if necessary
01973   Label indexOutOfBounds;
01974   if (_needBoundsCheck) {
01975     theMacroAssm->cmpl(index, size);
01976     jcc_error(this, Assembler::aboveEqual, indexOutOfBounds);
01977   }
01978   // load element
01979   Register t = answerPRegReg(_dest, temp3);
01980   assert(Tag_Size == 2, "check this code");
01981   switch (_access_type) {
01982     case byte_at:
01983       theMacroAssm->sarl(index, Tag_Size);      // adjust index
01984       theMacroAssm->xorl(t, t);                 // clear destination register
01985       theMacroAssm->movb(t, Address(array, index, Address::times_1, byteOffset(_dataOffset)));
01986       theMacroAssm->shll(t, Tag_Size);          // make result a smi
01987       break;
01988     case double_byte_at:
01989       theMacroAssm->sarl(index, Tag_Size - 1);  // adjust index
01990       theMacroAssm->movl(t, Address(array, index, Address::times_1, byteOffset(_dataOffset)));
01991       theMacroAssm->andl(t, 0x0000FFFF);        // clear upper 2 bytes
01992       theMacroAssm->shll(t, Tag_Size);          // make result a smi
01993       break;
01994     case character_at:
01995       { theMacroAssm->sarl(index, Tag_Size - 1);// adjust index
01996         theMacroAssm->movl(t, Address(array, index, Address::times_1, byteOffset(_dataOffset)));
01997         theMacroAssm->andl(t, 0x0000FFFF);      // clear upper 2 bytes
01998         // use t as index into asciiCharacters()
01999         // check index first, must be 0 <= t < asciiCharacters()->length()
02000         objArrayOop chars = Universe::asciiCharacters();
02001         theMacroAssm->cmpl(t, chars->length());
02002         jcc_error(this, Assembler::aboveEqual, indexOutOfBounds);
02003         // get character out of chars array
02004         theMacroAssm->movl(temp1, chars);
02005         theMacroAssm->movl(t, Address(temp1, t, Address::times_4, byteOffset(chars->klass()->klass_part()->non_indexable_size() + 1)));
02006       }
02007       break;
02008     case object_at:
02009       // smi index is already shifted the right way => no index adjustment necessary
02010       theMacroAssm->movl(t, Address(array, index, Address::times_1, byteOffset(_dataOffset)));
02011       break;
02012     default:
02013       ShouldNotReachHere();
02014       break;
02015   }
02016   assert(t != temp1 && t != temp2, "just checking");
02017   store(t, _dest, temp1, temp2);
02018   // handle error cases if not uncommon
02019   if (canFail() && !next(1)->isUncommonNode()) {
02020     Label exit;
02021     theMacroAssm->jmp(exit);
02022     // error messages
02023     if (!_intArg) {
02024       theMacroAssm->bind(indexNotSmi);
02025       theMacroAssm->movl(temp1, vmSymbols::first_argument_has_wrong_type());
02026       store(temp1, _error, temp2, temp3);
02027       theMacroAssm->jmp(next(1)->label);
02028     }
02029     if (_needBoundsCheck) {
02030       theMacroAssm->bind(indexOutOfBounds);
02031       theMacroAssm->movl(temp1, vmSymbols::out_of_bounds());
02032       store(temp1, _error, temp2, temp3);
02033       theMacroAssm->jmp(next(1)->label);
02034     }
02035     theMacroAssm->bind(exit);
02036   }
02037 }
02038 
02039 
02040 void ArrayAtPutNode::gen() {
02041   BasicNode::gen();
02042   // load registers in an order that reduces load delays
02043   Register size  = temp3;
02044   Register index = temp2; load(_arg, index);    // index is modified -> load always into register
02045   Register array = temp1; load(_src, array);    // array may be modified -> load always into register
02046   // first element is at index 1 => subtract smi(1) (doesn't change smi/oop property)
02047   theMacroAssm->subl(index, int(smiOop_one));
02048   // preload size for bounds check if necessary
02049   if (_needBoundsCheck) {
02050     theMacroAssm->movl(size, Address(array, byteOffset(_sizeOffset)));
02051   }
02052   // do index smi check if necessary (still possible, even after subtracting smi(1))
02053   Label indexNotSmi;
02054   if (!_intArg) {
02055     theMacroAssm->test(index, Mem_Tag);
02056     jcc_error(this, Assembler::notZero, indexNotSmi);
02057   }
02058   // do bounds check if necessary
02059   Label indexOutOfBounds;
02060   if (_needBoundsCheck) {
02061     theMacroAssm->cmpl(index, size);
02062     jcc_error(this, Assembler::aboveEqual, indexOutOfBounds);
02063   }
02064   // store element
02065   Label elementNotSmi, elementOutOfRange;
02066   Register element = temp3; load(elem, element);// element may be modified -> load always into register
02067   assert(Tag_Size == 2, "check this code");
02068   switch (_access_type) {
02069     case byte_at_put:
02070       if (!_smi_element) {
02071         theMacroAssm->test(element, Mem_Tag);
02072         jcc_error(this, Assembler::notZero, elementNotSmi);
02073       }
02074       theMacroAssm->sarl(element, Tag_Size);    // convert element into (int) byte
02075       if (_needs_element_range_check) {
02076         theMacroAssm->cmpl(element, 0x100);
02077         jcc_error(this, Assembler::aboveEqual, elementOutOfRange);
02078       }
02079       theMacroAssm->sarl(index, Tag_Size);      // adjust index
02080       theMacroAssm->movb(Address(array, index, Address::times_1, byteOffset(_dataOffset)), element);
02081       assert(!_needs_store_check, "just checking");
02082       break;
02083     case double_byte_at_put:
02084       if (!_smi_element) {
02085         theMacroAssm->test(element, Mem_Tag);
02086         jcc_error(this, Assembler::notZero, elementNotSmi);
02087       }
02088       theMacroAssm->sarl(element, Tag_Size);    // convert element into (int) double byte
02089       if (_needs_element_range_check) {
02090         theMacroAssm->cmpl(element, 0x10000);
02091         jcc_error(this, Assembler::aboveEqual, elementOutOfRange);
02092       }
02093       theMacroAssm->sarl(index, Tag_Size - 1);  // adjust index
02094       theMacroAssm->leal(array, Address(array, index, Address::times_1, byteOffset(_dataOffset)));
02095       assert(temp2 != array && temp2 != element, "check this code");
02096       theMacroAssm->movl(temp2, element);       // copy element (since element might be used afterwards)
02097       theMacroAssm->shrl(temp2, 8);             // shift 2nd byte into low-byte position
02098       theMacroAssm->movb(Address(array, 0), element);
02099       theMacroAssm->movb(Address(array, 1), temp2);
02100       assert(!_needs_store_check, "just checking");
02101       // Note: could use a better code sequence without introducing the extra movl & shrl
02102       //       instruction here - however, currently the assembler doesn't support addressing
02103       //       of the the 2nd byte in a register (otherwise two movb instructions would do).
02104       break;
02105     case object_at_put:
02106       // smi index is already shifted the right way => no index adjustment necessary
02107       if (_needs_store_check) {
02108         theMacroAssm->leal(array, Address(array, index, Address::times_1, byteOffset(_dataOffset)));
02109         theMacroAssm->movl(Address(array), element);
02110         theMacroAssm->store_check(array, temp3);
02111       } else {
02112         theMacroAssm->movl(Address(array, index, Address::times_1, byteOffset(_dataOffset)), element);
02113       }
02114       break;
02115     default:
02116       ShouldNotReachHere();
02117       break;
02118   }
02119   // handle error cases if not uncommon
02120   if (canFail() && !next(1)->isUncommonNode()) {
02121     Label exit;
02122     theMacroAssm->jmp(exit);
02123     // error messages
02124     if (!_intArg) {
02125       theMacroAssm->bind(indexNotSmi);
02126       theMacroAssm->movl(temp1, vmSymbols::first_argument_has_wrong_type());
02127       store(temp1, _error, temp2, temp3);
02128       theMacroAssm->jmp(next(1)->label);
02129     }
02130     if (_needBoundsCheck) {
02131       theMacroAssm->bind(indexOutOfBounds);
02132       theMacroAssm->movl(temp1, vmSymbols::out_of_bounds());
02133       store(temp1, _error, temp2, temp3);
02134       theMacroAssm->jmp(next(1)->label);
02135     }
02136     if (!_smi_element) {
02137       theMacroAssm->bind(elementNotSmi);
02138       theMacroAssm->movl(temp1, vmSymbols::second_argument_has_wrong_type());
02139       store(temp1, _error, temp2, temp3);
02140       theMacroAssm->jmp(next(1)->label);
02141     }
02142     if (_needs_element_range_check) {
02143       theMacroAssm->bind(elementOutOfRange);
02144       theMacroAssm->movl(temp1, vmSymbols::value_out_of_range());
02145       store(temp1, _error, temp2, temp3);
02146       theMacroAssm->jmp(next(1)->label);
02147     }
02148     theMacroAssm->bind(exit);
02149   }
02150 }
02151 
02152 
02153 void InlinedPrimitiveNode::gen() {
02154   BasicNode::gen();
02155   switch (_op) {
02156     case InlinedPrimitiveNode::obj_klass:
02157       { Register obj   = movePRegToReg(_src, temp1);                    // obj is read_only
02158         Register klass = temp2;
02159         Label is_smi;
02160         theMacroAssm->movl(klass, Universe::smiKlassObj());
02161         theMacroAssm->test(obj, Mem_Tag);
02162         theMacroAssm->jcc(Assembler::zero, is_smi);
02163         theMacroAssm->movl(klass, Address(obj, memOopDesc::klass_byte_offset()));
02164         theMacroAssm->bind(is_smi);
02165         store(klass, _dest, temp1, temp3);
02166       }
02167       break;
02168     case InlinedPrimitiveNode::obj_hash:
02169       { Unimplemented();
02170         // Implemented for the smi klass only by now - can be resolved in
02171         // the PrimInliner for that case without using an InlinedPrimitiveNode.
02172       };
02173       break;
02174     case InlinedPrimitiveNode::proxy_byte_at:
02175       { Register proxy  = temp1; load(_src,  proxy);                    // proxy is modified
02176         Register index  = temp2; load(_arg1, index);                    // index is modified
02177         Register result = answerPRegReg(_dest, temp3);
02178         Label indexNotSmi;
02179         // do index smi check if necessary
02180         if (!_arg1_is_smi) {
02181           theMacroAssm->test(index, Mem_Tag);
02182           jcc_error(this, Assembler::notZero, indexNotSmi);
02183         }
02184         // load element
02185         theMacroAssm->movl(proxy, Address(proxy, pointer_offset));      // unbox proxy
02186         theMacroAssm->sarl(index, Tag_Size);                            // adjust index
02187         theMacroAssm->xorl(result, result);                             // clear destination register
02188         theMacroAssm->movb(result, Address(proxy, index, Address::times_1, 0));
02189         theMacroAssm->shll(result, Tag_Size);                           // make result a smi
02190         // continue
02191         assert(result != temp1 && result != temp2, "just checking");
02192         store(result, _dest, temp1, temp2);
02193         // handle error cases if not uncommon
02194         if (canFail() && !next(1)->isUncommonNode()) {
02195           Label exit;
02196           theMacroAssm->jmp(exit);
02197           // error messages
02198           if (!_arg1_is_smi) {
02199             theMacroAssm->bind(indexNotSmi);
02200             theMacroAssm->movl(temp1, vmSymbols::first_argument_has_wrong_type());
02201             store(temp1, _error, temp2, temp3);
02202             theMacroAssm->jmp(next(1)->label);
02203           }
02204           theMacroAssm->bind(exit);
02205         }
02206       }
02207       break;
02208     case InlinedPrimitiveNode::proxy_byte_at_put:
02209       { bool const_val = _arg2->isConstPReg();
02210         Register proxy = temp1; load(_src,  proxy);                     // proxy is modified
02211         Register index = temp2; load(_arg1, index);                     // index is modified
02212         Register value; 
02213         if (const_val) {
02214           // value doesn't have to be loaded -> do nothing here
02215           if (!_arg2_is_smi) fatal("proxy_byte_at_put: should not happen - internal error");
02216           //if (!_arg2_is_smi) fatal("proxy_byte_at_put: should not happen - tell Robert");
02217         } else {
02218           value = temp3; load(_arg2, value);                            // value is modified
02219         }
02220         Label indexNotSmi, valueNotSmi;
02221         // do index smi check if necessary
02222         if (!_arg1_is_smi) {
02223           theMacroAssm->test(index, Mem_Tag);
02224           jcc_error(this, Assembler::notZero, indexNotSmi);
02225         }
02226         // do value smi check if necessary
02227         if (!_arg2_is_smi) {
02228           assert(!const_val, "constant shouldn't need a smi check");
02229           theMacroAssm->test(value, Mem_Tag);
02230           jcc_error(this, Assembler::notZero, valueNotSmi);
02231         }
02232         // store element
02233         theMacroAssm->movl(proxy, Address(proxy, pointer_offset));      // unbox proxy
02234         theMacroAssm->sarl(index, Tag_Size);                            // adjust index
02235         if (const_val) {
02236           smiOop constant = smiOop(((ConstPReg*)_arg2)->constant);
02237           assert(constant->is_smi(), "should be a smi");
02238           theMacroAssm->movb(Address(proxy, index, Address::times_1, 0), constant->value() & 0xFF);
02239         } else {
02240           theMacroAssm->sarl(value, Tag_Size);                          // adjust value
02241           theMacroAssm->movb(Address(proxy, index, Address::times_1, 0), value);
02242         }
02243         // handle error cases if not uncommon
02244         if (canFail() && !next(1)->isUncommonNode()) {
02245           Label exit;
02246           theMacroAssm->jmp(exit);
02247           // error messages
02248           if (!_arg1_is_smi) {
02249             theMacroAssm->bind(indexNotSmi);
02250             theMacroAssm->movl(temp1, vmSymbols::first_argument_has_wrong_type());
02251             store(temp1, _error, temp2, temp3);
02252             theMacroAssm->jmp(next(1)->label);
02253           }
02254           if (!_arg2_is_smi) {
02255             theMacroAssm->bind(valueNotSmi);
02256             theMacroAssm->movl(temp1, vmSymbols::second_argument_has_wrong_type());
02257             store(temp1, _error, temp2, temp3);
02258             theMacroAssm->jmp(next(1)->label);
02259           }
02260           theMacroAssm->bind(exit);
02261         }
02262       };
02263       break;
02264     default: ShouldNotReachHere();
02265   }
02266 }
02267 
02268 
02269 #endif

Generated on Mon Oct 9 13:37:21 2006 for Strongtalk VM by  doxygen 1.4.7