codeGenerator.cpp

Go to the documentation of this file.
00001 /* Copyright 1994 - 1996 LongView Technologies L.L.C. $Revision: 1.78 $ */
00002 /* Copyright (c) 2006, Sun Microsystems, Inc.
00003 All rights reserved.
00004 
00005 Redistribution and use in source and binary forms, with or without modification, are permitted provided that the 
00006 following conditions are met:
00007 
00008     * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
00009     * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following 
00010           disclaimer in the documentation and/or other materials provided with the distribution.
00011     * Neither the name of Sun Microsystems nor the names of its contributors may be used to endorse or promote products derived 
00012           from this software without specific prior written permission.
00013 
00014 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT 
00015 NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 
00016 THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 
00017 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
00018 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 
00019 OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
00020 
00021 
00022 */
00023 
00024 # include "incls/_precompiled.incl"
00025 
00026 # ifdef DELTA_COMPILER
00027 # include "incls/_codeGenerator.cpp.incl"
00028 
00029 
00030 // Sometimes a little stub has to be generated if a merge between two execution
00031 // paths requires that the PRegMapping of one path is made conformant with the
00032 // mapping of the other path. If this code cannot be emitted in place (because
00033 // it would destroy the mapping in use) a conditional jump to the stub instructions
00034 // is generated instead (note that in case of an absolute jump, merge code can
00035 // always be emitted in place). A Stub holds the necessary information to generate
00036 // the merge instructions.
00037 
00038 // Stub routines should eventually be canonicalized if possible -> saves space. FIX THIS
00039 
00040 class Stub: public ResourceObj {
00041  private:
00042   PRegMapping*  _mapping;
00043   Node*         _dst;
00044   Label         _stub_code;
00045   
00046   Stub(PRegMapping* mapping, Node* dst) {
00047     assert(dst->hasMapping() && !mapping->isConformant(dst->mapping()), "no stub required");
00048     _mapping = mapping;
00049     _dst     = dst;
00050   }
00051 
00052  public:
00053   static Stub* new_jcc_stub(PRegMapping* mapping, Node* dst, Assembler::Condition cc) {
00054     Stub* s = new Stub(mapping, dst);
00055     // generate conditional jump to stub code
00056     mapping->assembler()->jcc(cc, s->_stub_code);
00057     return s;
00058   }
00059 
00060   static Stub* new_NLR_stub(PRegMapping* mapping, Node* dst, int flags) {
00061     Stub* s = new Stub(mapping, dst);
00062     // generate inline cache with NLR jumping to stub code
00063     mapping->assembler()->ic_info(s->_stub_code, flags);
00064     return s;
00065   }
00066 
00067   void generateMergeStub() {
00068     _mapping->assembler()->bind(_stub_code);
00069     _mapping->makeConformant(_dst->mapping());
00070     _mapping->assembler()->jmp(_dst->label);
00071   }
00072 };
00073 
00074 
00075 class DebugInfoWriter: public PRegClosure {
00076  private:
00077   GrowableArray<PReg*>* _pregs;                 // maps index -> preg
00078   GrowableArray<int>*   _locations;             // previous preg location or illegalLocation
00079   GrowableArray<bool>*  _present;               // true if preg is currently present
00080 
00081   Location location_at(int i)                   { return Location(_locations->at(i)); }
00082   void location_at_put(int i, Location loc)     { _locations->at_put(i, loc._loc); }
00083 
00084  public:
00085   DebugInfoWriter(int number_of_pregs) {
00086     _pregs     = new GrowableArray<PReg*>(number_of_pregs, number_of_pregs, NULL                );
00087     _locations = new GrowableArray<int  >(number_of_pregs, number_of_pregs, illegalLocation._loc);
00088     _present   = new GrowableArray<bool >(number_of_pregs, number_of_pregs, false               );
00089   }
00090 
00091 
00092   void preg_do(PReg* preg) {
00093     if (preg->logicalAddress() != NULL && !preg->loc.isContextLocation()) {
00094       // record only debug-visible PRegs & ignore context PRegs
00095       // Note: ContextPRegs appear in the mapping only because
00096       //       their values might also be cached in a register.
00097       int i = preg->id();
00098       _pregs  ->at_put(i, preg);                // make sure preg is available
00099       _present->at_put(i, true);                // mark it as present
00100     }
00101   }
00102 
00103   
00104   void write_debug_info(PRegMapping* mapping, int pc_offset) {
00105     // record current pregs in mapping
00106     mapping->iterate(this);
00107     // determine changes & notify ScopeDescRecorder if necessary
00108     ScopeDescRecorder* rec = theCompiler->scopeDescRecorder();
00109     for (int i = _locations->length(); i-- > 0; ) {
00110       PReg*    preg    = _pregs->at(i);
00111       bool     present = _present->at(i);
00112       Location old_loc = location_at(i);
00113       Location new_loc = present ? mapping->locationFor(preg) : illegalLocation;
00114       if ((!present && old_loc != illegalLocation) ||   // preg not present anymore but has been there before
00115           ( present && old_loc == illegalLocation) ||   // preg present but has not been there before
00116           ( present && old_loc != new_loc)) {           // preg present but has changed location
00117         // preg location has changed => notify ScopeDescRecorder
00118         NameNode* nameNode;
00119         if (new_loc == illegalLocation) {
00120           nameNode = new IllegalName();
00121         } else {
00122           nameNode = new LocationName(new_loc);
00123         }
00124         // debugging
00125         if (PrintDebugInfoGeneration) {
00126           std->print_cr("%5d: %-20s @ %s", pc_offset, preg->name(), new_loc.name());
00127         }
00128         rec->changeLogicalAddress(preg->logicalAddress(), nameNode, pc_offset);
00129       }
00130       location_at_put(i, new_loc);              // record current location
00131       _present->at_put(i, false);               // mark as not present for next round
00132     }
00133   }
00134 
00135 
00136   void print() {
00137     std->print_cr("a DebugInfoWriter");
00138   }
00139 };
00140 
00141 
00142 // Implementation of CodeGenerator
00143 
00144 CodeGenerator::CodeGenerator(MacroAssembler* masm, PRegMapping* mapping) : _mergeStubs(16) {
00145   assert(masm == mapping->assembler(), "should be the same");
00146   PRegLocker::initialize();
00147   _masm            = masm;
00148   _currentMapping  = mapping;
00149   _debugInfoWriter = new DebugInfoWriter(bbIterator->pregTable->length());
00150   _maxNofStackTmps = 0;
00151   _previousNode    = NULL;
00152   _nilReg          = noreg;
00153   _pushCode        = NULL;
00154 }
00155 
00156 
00157 void CodeGenerator::setMapping(PRegMapping* mapping) {
00158   maxNofStackTmps(); // enforce adjustement of _maxNofStackTmps
00159   _currentMapping = mapping;
00160 }
00161 
00162 
00163 int CodeGenerator::maxNofStackTmps() {
00164   if (_currentMapping != NULL) {
00165     _maxNofStackTmps = max(_maxNofStackTmps, _currentMapping->maxNofStackTmps());
00166   }
00167   return _maxNofStackTmps;
00168 }
00169 
00170 
00171 Register CodeGenerator::def(PReg* preg) const {
00172   assert(!preg->isConstPReg(), "cannot assign to ConstPReg");
00173   assert(!preg->loc.isContextLocation(), "cannot assign into context yet");
00174   return _currentMapping->def(preg);
00175 }
00176 
00177 
00178 bool CodeGenerator::isLiveRangeBoundary(Node* a, Node* b) const {
00179   return a->scope() != b->scope() || a->bci() != b->bci();
00180 }
00181 
00182 
00183 void CodeGenerator::jmp(Node* from, Node* to, bool to_maybe_nontrivial) {
00184   // keep only PRegs that are still alive at dst
00185   if (from != NULL && isLiveRangeBoundary(from, to)) _currentMapping->killDeadsAt(to);
00186   // make mappings conformant if necessary
00187   if (to_maybe_nontrivial || (to->isMergeNode() && !to->isTrivial())) {
00188     // dst has more than one predecessor
00189     if (!to->hasMapping()) {
00190       // first jump to dst, use current mapping, must be injective
00191       _currentMapping->makeInjective();
00192       to->setMapping(_currentMapping);
00193     } else {
00194       // not the first mapping => make mapping conformant
00195       _currentMapping->makeConformant(to->mapping());
00196     }
00197   } else {
00198     // dst has exactly one predecessor => use current mapping
00199     assert(!to->hasMapping(), "more than one predecessor?");
00200     to->setMapping(_currentMapping);
00201   }
00202   _masm->jmp(to->label);
00203   setMapping(NULL);
00204 }
00205 
00206 
00207 void CodeGenerator::jcc(Assembler::Condition cc, Node* from, Node* to, bool to_maybe_nontrivial) {
00208   // make mappings conformant if necessary
00209   if (to_maybe_nontrivial || (to->isMergeNode() && !to->isTrivial())) {
00210     // dst has more than one predecessor
00211     if (!to->hasMapping()) {
00212       // first jump to dst, use current mapping, must be injective
00213       _currentMapping->makeInjective(); // may generate code => must be applied to current mapping
00214       PRegMapping* copy = new PRegMapping(_currentMapping);
00215       // eliminate PRegs that are not alive anymore at dst
00216       if (isLiveRangeBoundary(from, to)) copy->killDeadsAt(to);
00217       to->setMapping(copy);
00218       _masm->jcc(cc, to->label);
00219     } else {
00220       // not the first mapping
00221       PRegMapping* copy = new PRegMapping(_currentMapping);
00222       if (isLiveRangeBoundary(from, to)) copy->killDeadsAt(to);
00223       if (copy->isConformant(to->mapping())) {
00224         // everythink ok, simply jump (must use a copy with dead PRegs removed for comparison)
00225         _masm->jcc(cc, to->label);
00226       } else {
00227         // must make mappings conformant, use stub routine
00228         _mergeStubs.push(Stub::new_jcc_stub(copy, to, cc));
00229       }
00230     }
00231   } else {
00232     // dst has exactly one predecessor
00233     assert(!to->hasMapping(), "more than one predecessor?");
00234     PRegMapping* copy = new PRegMapping(_currentMapping);
00235     if (isLiveRangeBoundary(from, to)) copy->killDeadsAt(to);
00236     to->setMapping(copy);
00237     _masm->jcc(cc, to->label);
00238   }
00239 }
00240 
00241 
00242 void CodeGenerator::bindLabel(Node* node) {
00243   if (_currentMapping == NULL) {
00244     // continue with mapping at node, live ranges already adjusted
00245     assert(node->hasMapping(), "must have a mapping");
00246     setMapping(node->mapping());
00247   } else {
00248     // current mapping exists
00249     if (!node->hasMapping()) {
00250       // node is approached the first time
00251       if (node->isMergeNode() && !node->isTrivial()) {
00252         // more than one predecessor => store injective version of current mapping at node
00253         // (if only one predecessor => simply continue to use the current mapping)
00254         if (_previousNode != NULL && isLiveRangeBoundary(_previousNode, node)) _currentMapping->killDeadsAt(node);
00255         _currentMapping->makeInjective();
00256         node->setMapping(_currentMapping);
00257       }
00258     } else {
00259       // merge current mapping with node mapping
00260       if (_previousNode != NULL && isLiveRangeBoundary(_previousNode, node)) _currentMapping->killDeadsAt(node);
00261       _currentMapping->makeConformant(node->mapping());
00262       setMapping(node->mapping());
00263     }
00264   }
00265   assert(_currentMapping != NULL, "must have a mapping");
00266   _masm->bind(node->label);
00267 }
00268 
00269 
00270 void CodeGenerator::inlineCache(Node* call, MergeNode* nlrTestPoint, int flags) {
00271   assert(_currentMapping != NULL, "mapping must exist");
00272   assert(call->scope() == nlrTestPoint->scope(), "should be in the same scope");
00273   // make mappings conformant if necessary
00274   if (nlrTestPoint->isMergeNode() && !nlrTestPoint->isTrivial()) {
00275     // dst has more than one predecessor
00276     if (!nlrTestPoint->hasMapping()) {
00277       // first jump to dst, use current mapping, must be injective
00278       PRegMapping* copy = new PRegMapping(_currentMapping);
00279       if (isLiveRangeBoundary(call, nlrTestPoint)) copy->killDeadsAt(nlrTestPoint);
00280       assert(_currentMapping->isInjective(), "must be injective");
00281       copy->acquireNLRRegisters();
00282       nlrTestPoint->setMapping(copy);
00283       _masm->ic_info(nlrTestPoint->label, flags);
00284     } else {
00285       // not the first mapping
00286       PRegMapping* copy = new PRegMapping(_currentMapping);
00287       if (isLiveRangeBoundary(call, nlrTestPoint)) copy->killDeadsAt(nlrTestPoint);
00288       copy->acquireNLRRegisters();
00289       if (copy->isConformant(nlrTestPoint->mapping())) {
00290         // everything ok, simply jump (must use a copy with dead PRegs removed for comparison)
00291         _masm->ic_info(nlrTestPoint->label, flags);
00292       } else {
00293         // must make mappings conformant, use stub routine
00294         _mergeStubs.push(Stub::new_NLR_stub(copy, nlrTestPoint, flags));
00295       }
00296     }
00297   } else {
00298     // dst has exactly one predecessor
00299     assert(!nlrTestPoint->hasMapping(), "more than one predecessor?");
00300     PRegMapping* copy = new PRegMapping(_currentMapping);
00301     if (isLiveRangeBoundary(call, nlrTestPoint)) copy->killDeadsAt(nlrTestPoint);
00302     copy->acquireNLRRegisters();
00303     nlrTestPoint->setMapping(copy);
00304     _masm->ic_info(nlrTestPoint->label, flags);
00305   }
00306 }
00307 
00308 
00309 void CodeGenerator::generateMergeStubs() {
00310   char* start_pc = _masm->pc();
00311   while (_mergeStubs.nonEmpty()) _mergeStubs.pop()->generateMergeStub();
00312   if (PrintCodeGeneration && _masm->pc() > start_pc) {
00313     std->print("---\n");
00314     std->print("fixup merge stubs\n");
00315     _masm->code()->decode();
00316   }
00317 }
00318 
00319 
00320 // Code generation for statistical information on nmethods
00321 
00322 char* CodeGenerator::nmethodAddress() const {
00323   // hack to compute hypothetical nmethod address
00324   // should be fixed at some point
00325   return (char*)(((nmethod*) (_masm->code()->code_begin())) - 1);
00326 }
00327 
00328 
00329 void CodeGenerator::incrementInvocationCounter() {
00330   // Generates code to increment the nmethod execution counter
00331   char* addr = nmethodAddress() + nmethod::invocationCountOffset();
00332   _masm->incl(Address(int(addr), relocInfo::internal_word_type));
00333 }
00334 
00335 
00336 // Initialization / Finalization
00337 
00338 void CodeGenerator::initialize(InlinedScope* scope) {
00339   // This routine is called at the very beginning of code generation
00340   // for one nmethod (after creation of the CodeGenerator of course).
00341   // It sets up the initial mapping and thus makes sure that the correct
00342   // debugging information is written out before the code generation for
00343   // the PrologueNode.
00344 
00345   // setup arguments
00346   int i;
00347   for (i = 0; i < scope->nofArguments(); i++) {
00348     _currentMapping->mapToArgument(scope->argument(i)->preg(), i);
00349   }
00350   // setup temporaries (finalize() generates initialization code)
00351   for (i = 0; i < scope->nofTemporaries(); i++) {
00352     _currentMapping->mapToTemporary(scope->temporary(i)->preg(), i);
00353   }
00354   // setup receiver
00355   _currentMapping->mapToRegister(scope->self()->preg(), self_reg);
00356 }
00357 
00358 
00359 void CodeGenerator::finalize(InlinedScope* scope) {
00360   // first generate stubs if there are any
00361   generateMergeStubs();
00362 
00363   // patch 'initialize locals' code
00364   int n = maxNofStackTmps();
00365   int frame_size = 2 + n;       // return address, old ebp + stack temps
00366   // make sure frame is big enough for deoptimization
00367   if (frame_size < minimum_size_for_deoptimized_frame) {
00368     // add the difference to
00369     n += minimum_size_for_deoptimized_frame - frame_size;
00370   }
00371 
00372   Assembler masm(_pushCode);
00373   if (_pushCode->code_begin() + n <= _pushCode->code_limit()) {
00374     while (n-- > 0) masm.pushl(_nilReg);
00375   } else {
00376     masm.jmp(_masm->pc(), relocInfo::none);
00377     while (n-- > 0) _masm->pushl(_nilReg);
00378     _masm->jmp(_pushCode->code_limit(), relocInfo::none);
00379   }
00380 
00381    // store nofCompilations at end of code for easier debugging
00382   if (CompilerDebug) _masm->movl(eax, nofCompilations);
00383 
00384   if (PrintCodeGeneration) {
00385     std->print("---\n");
00386     std->print("merge stubs\n");
00387     _masm->code()->decode();
00388     std->print("---\n");
00389   }
00390 }
00391 
00392 
00393 /*
00394 void CodeGenerator::finalize(InlinedScope* scope) {
00395   // This routine is called at the very end of code generation for one
00396   // nmethod; it provides the entry points & sets up the stack frame
00397   // (i.e., this is the first code executed when entering a nmethod).
00398   // Note: _currentMapping is not used since this is one fixed code pattern.
00399 
00400   // first generate stubs if there are any
00401   generateMergeStubs();
00402 
00403   // set unverified entry point
00404   _masm->align(oopSize);
00405   theCompiler->set_entry_point_offset(_masm->offset());
00406 
00407   // verify receiver
00408   if (scope->isMethodScope()) {
00409     // check class
00410     klassOop klass = scope->selfKlass();
00411     if (klass == smiKlassObj) {
00412       // receiver must be a smi, check smi tag only
00413       _masm->testl(self_reg, Mem_Tag);                  // testl instead of test => no alignment nop's needed later
00414       _masm->jcc(Assembler::notZero, CompiledIC::normalLookupRoutine());
00415     } else {
00416       assert(self_reg != temp1, "choose another register");
00417       _masm->testl(self_reg, Mem_Tag);                  // testl instead of test => no alignment nop's needed later
00418       _masm->jcc(Assembler::zero, CompiledIC::normalLookupRoutine());
00419       _masm->cmpl(Address(self_reg, memOopDesc::klass_byte_offset()), klass);
00420       _masm->jcc(Assembler::notEqual, CompiledIC::normalLookupRoutine());
00421     }
00422   } else {
00423     // If this is a block method and we expect a context
00424     // then the incoming context chain must be checked.
00425     // The context chain may contain a deoptimized contextOop.
00426     // (see StubRoutines::verify_context_chain for details)
00427     if (scope->method()->block_info() == methodOopDesc::expects_context) {
00428       const bool use_fast_check = false;
00429       if (use_fast_check) {
00430         // look in old backend for this code
00431         Unimplemented();
00432       } else {
00433         _masm->call(StubRoutines::verify_context_chain(), relocInfo::runtime_call_type);
00434       }
00435     }
00436   }
00437 
00438   // set verified entry point (for callers who know the receiver is correct)
00439   _masm->align(oopSize);
00440   theCompiler->set_verified_entry_point_offset(_masm->offset());
00441 
00442   // build stack frame & initialize locals
00443   _masm->enter();
00444   int n = maxNofStackTmps();
00445   int frame_size = 2 + n;       // return address, old ebp + stack temps
00446   // make sure frame is big enough for deoptimization
00447   if (frame_size < minimum_size_for_deoptimized_frame) {
00448     // add the difference to
00449     n += minimum_size_for_deoptimized_frame - frame_size;
00450   }
00451   if (n == 1) {
00452     _masm->pushl(nilObj);
00453   } else if (n > 1) {
00454     _masm->movl(temp1, nilObj);
00455     while (n-- > 0) _masm->pushl(temp1);
00456   }
00457 
00458   // increment invocation counter & check for overflow (trigger recompilation)
00459   Label recompile_stub_call;
00460   if (RecompilationPolicy::needRecompileCounter(theCompiler)) {
00461     char* addr = nmethodAddress() + nmethod::invocationCountOffset();
00462     _masm->movl(temp1, Address(int(addr), relocInfo::internal_word_type));
00463     _masm->incl(temp1);
00464     _masm->cmpl(temp1, theCompiler->get_invocation_counter_limit());
00465     _masm->movl(Address(int(addr), relocInfo::internal_word_type), temp1);
00466     _masm->jcc(Assembler::greaterEqual, recompile_stub_call);
00467     //
00468     // need to fix this:
00469     // 1. put call to recompiler at end (otherwise we cannot provide debugging info easily)
00470     // 2. check if everything is still ok (e.g. does the recompiler call ever return? if not, no jump needed)
00471     // 3. check recompiler call stub routine (should not setup stack frame because registers cannot be seen!) - seems to be fixed
00472   }
00473 
00474   // jump to start of code
00475 
00476   // call to recompiler - if the nmethod turns zombie, this will be overwritten by a call to the zombie handler
00477   // (see also comment in nmethod)
00478   _masm->bind(recompile_stub_call);
00479   // write debug info
00480   theCompiler->set_special_handler_call_offset(theMacroAssm->offset());
00481   _masm->call(StubRoutines::recompile_stub_entry(), relocInfo::runtime_call_type);
00482 
00483   
00484   // store nofCompilations at end of code for easier debugging
00485   if (CompilerDebug) _masm->movl(eax, nofCompilations);
00486 
00487   if (PrintCodeGeneration) {
00488     std->print("---\n");
00489     std->print("entry point\n");
00490     _masm->code()->decode();
00491     std->print("---\n");
00492   }
00493 }
00494 */
00495 
00496 
00497 void CodeGenerator::zapContext(PReg* context) {
00498   _masm->movl(Address(use(context), contextOopDesc::parent_byte_offset()), 0);
00499 }
00500 
00501 
00502 void CodeGenerator::storeCheck(Register obj) {
00503   // Does a store check for the oop in register obj.
00504   //
00505   // Note: Could be optimized by hardwiring the byte map base address
00506   // in the code - however relocation would be necessary whenever the
00507   // base changes. Advantage: only one instead of two instructions.
00508   Temporary base(_currentMapping);
00509   Temporary indx(_currentMapping);
00510   _masm->movl(base.reg(), Address(int(&byte_map_base), relocInfo::external_word_type));
00511   _masm->movl(indx.reg(), obj);                                         // do not destroy obj (a preg may be mapped to it)
00512   _masm->shrl(indx.reg(), card_shift);                                  // divide obj by card_size
00513   _masm->movb(Address(base.reg(), indx.reg(), Address::times_1), 0);    // clear entry
00514 }
00515 
00516 
00517 void CodeGenerator::assign(PReg* dst, PReg* src, bool needsStoreCheck) {
00518   PRegLocker lock(src);         // make sure src stays in register if it's in a register
00519   enum { is_const, is_loaded, is_mapped, is_undefined } state = is_undefined;
00520   oop value;                    // valid if state == is_const
00521   Register reg;                 // valid if state == is_loaded
00522   PReg* preg;                   // valid if state == is_mapped
00523 
00524   { Temporary t1(_currentMapping, NLR_result_reg);
00525     if (t1.reg() != NLR_result_reg) {
00526       reg = t1.reg();
00527     } else {
00528       Temporary t2(_currentMapping);
00529       reg = t2.reg();
00530     }
00531   }
00532   Temporary t(_currentMapping, reg);
00533   assert(reg != NLR_result_reg, "fix this");
00534   assert(t.reg() == reg, "should be the same");
00535 
00536   // get/load source
00537   if (src->isConstPReg()) {
00538     value = ((ConstPReg*)src)->constant;
00539     state = is_const;
00540   } else if (src->loc == resultOfNLR) {
00541     _currentMapping->mapToRegister(src, NLR_result_reg);
00542     preg = src;
00543     state = is_mapped;
00544   } else if (src->loc.isContextLocation()) {
00545     PReg* context = theCompiler->contextList->at(src->loc.contextNo())->context();
00546     Address addr = Address(use(context), Mapping::contextOffset(src->loc.tempNo()));
00547     _masm->movl(reg, addr);
00548     state = is_loaded;
00549   } else {
00550     assert(!src->loc.isSpecialLocation(), "what's this?");
00551     preg = src;
00552     state = is_mapped;
00553   }
00554 
00555   // map/store to dest
00556   if (dst->loc == topOfStack) {
00557     switch (state) {
00558       case is_const : _masm->pushl(value);              break;
00559       case is_loaded: _masm->pushl(reg);                break;
00560       case is_mapped: _masm->pushl(use(preg));          break;
00561       default       : ShouldNotReachHere();
00562     }
00563   } else if (dst->loc.isContextLocation()) {
00564     PReg* context = theCompiler->contextList->at(dst->loc.contextNo())->context();
00565     PRegLocker lock(context);
00566     Address addr = Address(use(context), Mapping::contextOffset(dst->loc.tempNo()));
00567     switch (state) {
00568       case is_const : _masm->movl(addr, value);         break;
00569       case is_loaded: _masm->movl(addr, reg);           break;
00570       case is_mapped: _masm->movl(addr, use(preg));     break;
00571       default       : ShouldNotReachHere();
00572     }
00573     if (needsStoreCheck) storeCheck(use(context));
00574   } else {
00575     assert(!dst->loc.isSpecialLocation(), "what's this?");
00576     switch (state) {
00577       case is_const : _masm->movl(def(dst), value);     break;
00578       case is_loaded: _masm->movl(def(dst), reg);       break;
00579       case is_mapped: _currentMapping->move(dst, preg); break;
00580       default       : ShouldNotReachHere();
00581     }
00582   }
00583 }
00584 
00585 
00586 // Debugging
00587 
00588 static int _callDepth           = 0;
00589 static int _numberOfCalls       = 0;
00590 static int _numberOfReturns     = 0;
00591 static int _numberOfNLRs        = 0;
00592 
00593 
00594 void CodeGenerator::indent() {
00595   const int maxIndent = 40;
00596   if (_callDepth <= maxIndent) {
00597     std->print("%*s", _callDepth, "");
00598   } else {
00599     std->print("%*d: ", maxIndent-2, _callDepth);
00600   }
00601 }
00602 
00603 
00604 char* CodeGenerator::nmethodName() {
00605   deltaVFrame* f = DeltaProcess::active()->last_delta_vframe();
00606   return f->method()->selector()->as_string();
00607 }
00608 
00609 
00610 void CodeGenerator::verifyObj(oop obj) {
00611   if (!obj->is_smi() && !obj->is_mem()) fatal("should be an ordinary oop");
00612   klassOop klass = obj->klass();
00613   if (klass == NULL || !klass->is_mem()) fatal("should be an ordinary memOop");
00614   if (obj->is_block()) blockClosureOop(obj)->verify();
00615 }
00616 
00617 
00618 void CodeGenerator::verifyContext(oop obj) {
00619   if (obj->is_mark()) error("context should never be mark");
00620   if (!Universe::is_heap((oop*)obj)) error("context outside of heap");
00621   if (!obj->is_context()) error("should be a context");
00622   oop c = (oop)(contextOop(obj)->parent());
00623   if (c->is_mem()) verifyContext(c);
00624 }
00625 
00626 
00627 void CodeGenerator::verifyArguments(oop recv, oop* ebp, int nofArgs) {
00628   bool print_args_long = true;
00629   ResourceMark rm;
00630   _numberOfCalls++;
00631   _callDepth++;
00632   if (TraceCalls) { 
00633     ResourceMark rm;
00634     indent(); std->print("( %s %s ", recv->print_value_string(), nmethodName());
00635   }
00636   verifyObj(recv);
00637   int i = nofArgs;
00638   oop* arg = ebp + (nofArgs + 2);
00639   while (i-- > 0) {
00640     arg--;
00641     verifyObj(*arg);
00642     if (TraceCalls) {
00643       ResourceMark rm;
00644       if (print_args_long || (*arg)->is_smi()) {
00645         std->print("%s ", (*arg)->print_value_string());
00646       } else {
00647         std->print("0x%x ", *arg);
00648       }
00649     }
00650   }
00651   if (TraceCalls) std->cr();
00652   if (VerifyDebugInfo) { 
00653     deltaVFrame* f = DeltaProcess::active()->last_delta_vframe();
00654     while (f != NULL) {
00655       f->verify_debug_info();
00656       f = f->sender_delta_frame();
00657     }
00658   }
00659 }
00660 
00661 
00662 void CodeGenerator::verifyReturn(oop result) {
00663   _numberOfReturns++;
00664   result->verify();
00665   if (TraceCalls) {
00666     ResourceMark rm;
00667     indent(); std->print(") %s -> %s\n", nmethodName(), result->print_value_string());
00668   }
00669   _callDepth--;
00670 }
00671 
00672 
00673 void CodeGenerator::verifyNLR(char* fp, char* nlrFrame, int nlrScopeID, oop result) {
00674   _numberOfNLRs++;
00675   LOG_EVENT3("verifyNLR(%#x, %#x, %d, %#x)", fp, nlrFrame, result);
00676   if (nlrFrame <= fp) error("NLR went too far: %#x <= %#x", nlrFrame, fp);
00677   // treat >99 scopes as likely error -- might actually be ok
00678   if (nlrScopeID < 0 || nlrScopeID > 99) error("illegal NLR scope ID %#x", nlrScopeID);
00679   if (result->is_mark()) error("NLR result is a markOop");
00680   result->verify();
00681   if (TraceCalls) {
00682     ResourceMark rm;
00683     indent(); std->print(") %s  ^ %s\n", nmethodName(), result->print_value_string());
00684   }
00685   _callDepth--;
00686 }
00687 
00688 
00689 void CodeGenerator::callVerifyObj(Register obj) {
00690   // generates transparent check code which verifies that obj is
00691   // a legal oop and halts if not - for debugging purposes only
00692   if (!VerifyCode) warning(": verifyObj should not be called");
00693   _masm->pushad();
00694   _masm->call_C((char*)CodeGenerator::verifyObj, obj);
00695   _masm->popad();
00696 }
00697 
00698 
00699 void CodeGenerator::callVerifyContext(Register context) {
00700   // generates transparent check code which verifies that context is
00701   // a legal context and halts if not - for debugging purposes only
00702   if (!VerifyCode) warning(": verifyContext should not be called");
00703   _masm->pushad();
00704   _masm->call_C((char*)CodeGenerator::verifyContext, context);
00705   _masm->popad();
00706 }
00707 
00708 
00709 void CodeGenerator::callVerifyArguments(Register recv, int nofArgs) {
00710   // generates transparent check code which verifies that all arguments
00711   // are legal oops and halts if not - for debugging purposes only
00712   if (!VerifyCode && !TraceCalls && !TraceResults) warning(": performance bug: verifyArguments should not be called");
00713   assert(recv != temp1, "use another temporary register");
00714   _masm->pushad();
00715   _masm->movl(temp1, nofArgs);
00716   _masm->call_C((char*)CodeGenerator::verifyArguments, recv, ebp, temp1);
00717   _masm->popad();
00718 }
00719 
00720 
00721 void CodeGenerator::callVerifyReturn() {
00722   // generates transparent check code which verifies that result contains
00723   // a legal oop and halts if not - for debugging purposes only
00724   if (!VerifyCode && !TraceCalls && !TraceResults) warning(": verifyReturn should not be called");
00725   _masm->pushad();
00726   _masm->call_C((char*)CodeGenerator::verifyReturn, result_reg);
00727   _masm->popad();
00728 }
00729 
00730 
00731 void CodeGenerator::callVerifyNLR() {
00732   // generates transparent check code which verifies NLR check & continuation
00733   if (!VerifyCode && !TraceCalls && !TraceResults) warning(": verifyNLR should not be called");
00734   _masm->pushad();
00735   _masm->call_C((char*)CodeGenerator::verifyNLR, ebp, NLR_home_reg, NLR_homeId_reg, NLR_result_reg);
00736   _masm->popad();
00737 }
00738 
00739 
00740 // Basic blocks
00741 
00742 static bool bb_needs_jump;
00743 // true if basic block needs a jump at the end to its successor, false otherwise
00744 // Note: most gen() nodes with more than one successor are implemented such that
00745 //       next() is the fall-through case. If that's not the case, an extra jump
00746 //       has to be generated (via endOfBasicBlock()). However, some of the nodes
00747 //       do explicit jumps to all successors to accomodate for arbitrary node
00748 //       reordering, in which case they may set the flag to false (it is auto-
00749 //       matically set to true for each node).
00750 //
00751 // This flag should go away at soon as all node with more than one exit are
00752 // implemented correctly (i.e., do all the jumping themselves).
00753 
00754 
00755 void CodeGenerator::beginOfBasicBlock(Node* node) {
00756   if (PrintCodeGeneration && WizardMode) std->print("--- begin of basic block (N%d) ---\n", node->id());
00757   bindLabel(node);
00758 }
00759 
00760 
00761 void CodeGenerator::endOfBasicBlock(Node* node) {
00762   if (bb_needs_jump && node->next() != NULL) {
00763     Node* from = node;
00764     Node* to   = node->next();
00765     if (PrintCodeGeneration) {
00766       std->print("branch from N%d to N%d\n", from->id(), to->id());
00767       if (PrintPRegMapping) _currentMapping->print();
00768     }
00769     jmp(from, to);
00770     _previousNode = NULL;
00771     if (PrintCodeGeneration) _masm->code()->decode();
00772   }
00773 
00774   if (PrintCodeGeneration && WizardMode) std->print("--- end of basic block (N%d) ---\n", node->id());
00775 }
00776 
00777 
00778 void CodeGenerator::updateDebuggingInfo(Node* node) {
00779   ScopeDescRecorder* rec = theCompiler->scopeDescRecorder();
00780   int pc_offset = assembler()->offset();
00781   rec->addPcDesc(pc_offset, node->scope()->scopeInfo(), node->bci());
00782   _debugInfoWriter->write_debug_info(_currentMapping, pc_offset);
00783 }
00784 
00785 
00786 // For all nodes
00787 void CodeGenerator::beginOfNode(Node* node) {
00788   assert(_currentMapping != NULL, "must have a valid mapping");
00789   // adjust mapping to liveness of PRegs
00790   if (_previousNode != NULL && isLiveRangeBoundary(_previousNode, node)) _currentMapping->killDeadsAt(node);
00791   _currentMapping->cleanupContextReferences();
00792   // adjust debugging information if desired (e.g., when using disassembler with full symbolic support)
00793   if (GenerateFullDebugInfo) updateDebuggingInfo(node);
00794   // debugging
00795   if (PrintCodeGeneration) {
00796     std->print("---\n");
00797     std->print("N%d: ", node->id());
00798     node->print();
00799     std->print(" (bci = %d)\n", node->bci());
00800     if (PrintPRegMapping) _currentMapping->print();
00801   }
00802   bb_needs_jump = true;
00803 };
00804 
00805 
00806 void CodeGenerator::endOfNode(Node* node) {
00807   if (PrintCodeGeneration) _masm->code()->decode();
00808   // if _currentMapping == NULL there's no previous node & the next node will be
00809   // reached via a jump and it's mapping is already set up the right way
00810   // (i.e., no PReg killing required => set _previousNode to NULL)
00811   _previousNode = _currentMapping == NULL ? NULL : node;
00812 };
00813 
00814 
00815 // Individual nodes
00816 
00817 void CodeGenerator::aPrologueNode(PrologueNode* node) {
00818   // set unverified entry point
00819   _masm->align(oopSize);
00820   theCompiler->set_entry_point_offset(_masm->offset());
00821 
00822   // verify receiver
00823   InlinedScope* scope = node->scope();
00824   PReg* recv = scope->self()->preg();
00825   PRegLocker lock(recv);
00826   if (scope->isMethodScope()) {
00827     // check class
00828     klassOop klass = scope->selfKlass();
00829     if (klass == smiKlassObj) {
00830       // receiver must be a smi, check smi tag only
00831       _masm->test(use(recv), Mem_Tag);
00832       _masm->jcc(Assembler::notZero, CompiledIC::normalLookupRoutine());
00833     } else {
00834       _masm->test(use(recv), Mem_Tag);
00835       _masm->jcc(Assembler::zero, CompiledIC::normalLookupRoutine());
00836       _masm->cmpl(Address(use(recv), memOopDesc::klass_byte_offset()), klass);
00837       _masm->jcc(Assembler::notEqual, CompiledIC::normalLookupRoutine());
00838     }
00839   } else {
00840     // If this is a block method and we expect a context
00841     // then the incoming context chain must be checked.
00842     // The context chain may contain a deoptimized contextOop.
00843     // (see StubRoutines::verify_context_chain for details)
00844     if (scope->method()->block_info() == methodOopDesc::expects_context) {
00845       const bool use_fast_check = false;
00846       if (use_fast_check) {
00847         // look in old backend for this code
00848         Unimplemented();
00849       } else {
00850         _masm->call(StubRoutines::verify_context_chain(), relocInfo::runtime_call_type);
00851       }
00852     }
00853   }
00854 
00855   // set verified entry point (for callers who know the receiver is correct)
00856   _masm->align(oopSize);
00857   theCompiler->set_verified_entry_point_offset(_masm->offset());
00858 
00859   // build stack frame & initialize locals
00860   _masm->enter();
00861   { Temporary t(_currentMapping);
00862     _masm->movl(t.reg(), Universe::nilObj());
00863     _nilReg = t.reg();
00864     char* beg = _masm->pc();
00865     int i = 10;
00866     while (i-- > 0) _masm->nop();
00867     char* end = _masm->pc();
00868     _pushCode = new CodeBuffer(beg, end - beg);
00869   }
00870 
00871   if (scope->isBlockScope()) {
00872     // initialize context for blocks; recv is block closure => get context out of it
00873     // and store it in self & temp0 (which holds the context in the interpreter model).
00874     // Note: temp0 has been mapped to the stack when setting up temporaries.
00875     assert(scope->context() == scope->temporary(0)->preg(), "should be the same");
00876     Register reg = use(recv);
00877     _masm->movl(def(recv), Address(reg, blockClosureOopDesc::context_byte_offset()));
00878     assign(scope->context(), recv);
00879   }
00880   // debugging
00881   if (VerifyCode || VerifyDebugInfo || TraceCalls) callVerifyArguments(use(recv), scope->method()->number_of_arguments());
00882 
00883   // increment invocation counter & check for overflow (trigger recompilation)
00884   Label recompile_stub_call;
00885   if (RecompilationPolicy::needRecompileCounter(theCompiler)) {
00886     char* addr = nmethodAddress() + nmethod::invocationCountOffset();
00887     _masm->movl(temp1, Address(int(addr), relocInfo::internal_word_type));
00888     _masm->incl(temp1);
00889     _masm->cmpl(temp1, theCompiler->get_invocation_counter_limit());
00890     _masm->movl(Address(int(addr), relocInfo::internal_word_type), temp1);
00891     _masm->jcc(Assembler::greaterEqual, recompile_stub_call);
00892     //
00893     // need to fix this:
00894     // 1. put call to recompiler at end (otherwise we cannot provide debugging info easily)
00895     // 2. check if everything is still ok (e.g. does the recompiler call ever return? if not, no jump needed)
00896     // 3. check recompiler call stub routine (should not setup stack frame because registers cannot be seen!) - seems to be fixed
00897   }
00898   Label start;
00899   _masm->jmp(start);
00900 
00901   // call to recompiler - if the nmethod turns zombie, this will be overwritten
00902   // by a call to the zombie handler (see also comment in nmethod)
00903   _masm->bind(recompile_stub_call);
00904   updateDebuggingInfo(node);
00905   theCompiler->set_special_handler_call_offset(theMacroAssm->offset());
00906   _masm->call(StubRoutines::recompile_stub_entry(), relocInfo::runtime_call_type);
00907 
00908   _masm->bind(start);
00909 }
00910 
00911 
00912 void CodeGenerator::aLoadIntNode(LoadIntNode* node) {
00913   _masm->movl(def(node->dst()), node->value());
00914 }
00915 
00916 
00917 void CodeGenerator::aLoadOffsetNode(LoadOffsetNode* node) {
00918   PRegLocker lock(node->base(), node->dst());
00919   _masm->movl(def(node->dst()), Address(use(node->base()), byteOffset(node->offset)));
00920 }
00921 
00922 
00923 int CodeGenerator::byteOffset(int offset) {
00924   // Computes the byte offset from the beginning of an oop
00925   assert(offset >= 0, "wrong offset");
00926   return offset*oopSize - Mem_Tag;
00927 }
00928 
00929 
00930 void CodeGenerator::uplevelBase(PReg* startContext, int nofLevels, Register base) {
00931   // Compute uplevel base into register base; nofLevels is number of indirections (0 = in this context).
00932   _masm->movl(base, use(startContext));
00933   if (VerifyCode) callVerifyContext(base);
00934   while (nofLevels-- > 0) _masm->movl(base, Address(base, contextOopDesc::parent_byte_offset()));
00935 }
00936 
00937 
00938 void CodeGenerator::aLoadUplevelNode(LoadUplevelNode* node) {
00939   PRegLocker lock(node->context0());
00940   Temporary base(_currentMapping);
00941   uplevelBase(node->context0(), node->nofLevels(), base.reg());
00942   Register dst = def(node->dst());
00943   _masm->movl(dst, Address(base.reg(), byteOffset(node->offset())));
00944   if (VerifyCode) callVerifyObj(dst);
00945 }
00946 
00947 
00948 void CodeGenerator::anAssignNode(AssignNode* node) {
00949   assign(node->dst(), node->src());
00950 }
00951 
00952 
00953 void CodeGenerator::aStoreOffsetNode(StoreOffsetNode* node) {
00954   PRegLocker lock(node->base(), node->src());
00955   Register base = use(node->base());
00956   _masm->movl(Address(base, byteOffset(node->offset())), use(node->src()));
00957   if (node->needsStoreCheck()) storeCheck(base);
00958 }
00959 
00960 
00961 void CodeGenerator::aStoreUplevelNode(StoreUplevelNode* node) {
00962   PRegLocker lock(node->context0(), node->src());
00963   Temporary base(_currentMapping);
00964   uplevelBase(node->context0(), node->nofLevels(), base.reg());
00965   _masm->movl(Address(base.reg(), byteOffset(node->offset())), use(node->src()));
00966   if (node->needsStoreCheck()) storeCheck(base.reg());
00967 }
00968 
00969 
00970 void CodeGenerator::moveConstant(ArithOpCode op, PReg*& x, PReg*& y, bool& x_attr, bool& y_attr) {
00971   if (x->isConstPReg() && ArithOpIsCommutative[op]) {
00972     PReg* t1 = x     ; x      = y     ; y      = t1;
00973     bool  t2 = x_attr; x_attr = y_attr; y_attr = t2;
00974   }
00975 }
00976 
00977 
00978 void CodeGenerator::arithRROp(ArithOpCode op, Register x, Register y) { // x := x op y
00979   assert(Int_Tag == 0, "check this code");
00980   switch (op) {
00981     case TestArithOp  : _masm->testl(x, y);             break;
00982     case tAddArithOp  : // fall through
00983     case  AddArithOp  : _masm->addl(x, y);              break;
00984     case tSubArithOp  : // fall through
00985     case  SubArithOp  : _masm->subl(x, y);              break;
00986     case tMulArithOp  : _masm->sarl(x, Tag_Size);
00987     case  MulArithOp  : _masm->imull(x, y);             break;
00988     case tDivArithOp  : // fall through
00989     case  DivArithOp  : Unimplemented();                break;
00990     case tModArithOp  : // fall through
00991     case  ModArithOp  : Unimplemented();                break;
00992     case tAndArithOp  : // fall through
00993     case  AndArithOp  : _masm->andl(x, y);              break;
00994     case tOrArithOp   : // fall through
00995     case  OrArithOp   : _masm->orl(x, y);               break;
00996     case tXOrArithOp  : // fall through
00997     case  XOrArithOp  : _masm->xorl(x, y);              break;
00998     case tShiftArithOp: Unimplemented();
00999     case  ShiftArithOp: Unimplemented();
01000     case tCmpArithOp  : // fall through
01001     case  CmpArithOp  : _masm->cmpl(x, y);              break;
01002     default: ShouldNotReachHere();
01003   }
01004 }
01005 
01006 
01007 void CodeGenerator::arithRCOp(ArithOpCode op, Register x, int y) { // x := x op y
01008   assert(Int_Tag == 0, "check this code");
01009   switch (op) {
01010     case TestArithOp  : _masm->testl(x, y);             break;
01011     case tAddArithOp  : // fall through
01012     case  AddArithOp  :
01013       if (y == 0) {
01014         warning("code generated to add 0 (no load required)");
01015       } else {
01016         _masm->addl(x, y);
01017       }
01018       break;
01019     case tSubArithOp  : // fall through
01020     case  SubArithOp  :
01021       if (y == 0) {
01022         warning("code generated to subtract 0 (no load required)");
01023       } else {
01024         _masm->subl(x, y);
01025       }
01026       break;
01027     case tMulArithOp  : y = arithmetic_shift_right(y, Tag_Size);
01028     case  MulArithOp  :
01029       // catch a few trivial cases (since certain optimizations happen
01030       // after inlining of primitives, these cases cannot be handled in
01031       // the primitive inliner alone => phase ordering problem).
01032       // Note that overflow check must still remain possible (i.e.,
01033       // cannot easily substitute *4 with 2 adds without saving CC).
01034       switch (y) {
01035         case -1:
01036           _masm->negl(x);
01037           break;
01038         case  0:
01039           warning("code generated to multiply with 0 (no load required)");
01040           _masm->xorl(x, x);
01041           break;
01042         case  1:
01043           warning("code generated to multiply with 1 (no load required)");
01044           // do nothing
01045           break;
01046         case  2:
01047           _masm->addl(x, x);
01048           break;
01049         default:
01050           _masm->imull(x, x, y);
01051           break;
01052       }
01053       break;
01054     case tDivArithOp  : // fall through
01055     case  DivArithOp  : Unimplemented();                break;
01056     case tModArithOp  : // fall through
01057     case  ModArithOp  : Unimplemented();                break;
01058     case tAndArithOp  : // fall through
01059     case  AndArithOp  : _masm->andl(x, y);              break;
01060     case tOrArithOp   : // fall through
01061     case  OrArithOp   : _masm->orl(x, y);               break;
01062     case tXOrArithOp  : // fall through
01063     case  XOrArithOp  : _masm->xorl(x, y);              break;
01064     case tShiftArithOp:
01065       if (y < 0) {
01066         // shift right
01067         int shift_count = ((-y) >> Tag_Size) % 32;
01068         _masm->sarl(x, shift_count);
01069         _masm->andl(x, -1 << Tag_Size);                 // clear Tag bits
01070       } else if (y > 0) {
01071         // shift left
01072         int shift_count = ((+y) >> Tag_Size) % 32;
01073         _masm->shll(x, shift_count);
01074       }
01075       break;
01076     case  ShiftArithOp: Unimplemented();
01077     case tCmpArithOp  : // fall through
01078     case  CmpArithOp  : _masm->cmpl(x, y);              break;
01079     default: ShouldNotReachHere();
01080   }
01081 }
01082 
01083 
01084 void CodeGenerator::arithROOp(ArithOpCode op, Register x, oop y) { // x := x op y
01085   assert(!y->is_smi(), "check this code");
01086   switch (op) {
01087     case  CmpArithOp  : _masm->cmpl(x, y);              break;
01088     default           : ShouldNotReachHere();
01089   }
01090 }
01091 
01092 
01093 void CodeGenerator::arithRXOp(ArithOpCode op, Register x, oop y) { // x := x op y
01094   if (y->is_smi()) {
01095     arithRCOp(op, x, int(y));                           // y is smiOop -> needs no relocation info
01096   } else {
01097     arithROOp(op, x, y);
01098   }
01099 }
01100 
01101 
01102 bool CodeGenerator::producesResult(ArithOpCode op) {
01103   return (op != TestArithOp) && (op != CmpArithOp) && (op != tCmpArithOp);
01104 }
01105 
01106 
01107 Register CodeGenerator::targetRegister(ArithOpCode op, PReg* z, PReg* x) {
01108   assert(PRegLocker::locks(z) && PRegLocker::locks(x), "should be locked");
01109   Register reg = noreg;
01110   if (producesResult(op)) {
01111     // result produced -> use a copy of x as register for z
01112     Register x_reg = use(x);
01113     // x is guaranteed to be in a register
01114     if (_currentMapping->onStack(x)) {
01115       // x is also on stack -> release register location from mapping and use it as copy
01116       _currentMapping->killRegister(x);
01117       reg = _currentMapping->def(z, x_reg);
01118     } else {
01119       // preg is only in register -> need to allocate a new register & copy it explicitly
01120       reg = def(z);
01121       _masm->movl(reg, x_reg);
01122     }
01123   } else {
01124     // no result produced -> can use x directly as register for z
01125     reg = use(x);
01126   }
01127   return reg;
01128 }
01129 
01130 
01131 void CodeGenerator::anArithRRNode(ArithRRNode* node) {
01132   ArithOpCode op = node->op();
01133   PReg* z = node->dst();
01134   PReg* x = node->src();
01135   PReg* y = node->operand();
01136   bool dummy;
01137   moveConstant(op, x, y, dummy, dummy);
01138   PRegLocker lock(z, x, y);
01139   Register reg = targetRegister(op, z, x);
01140   if (y->isConstPReg()) {
01141     arithRXOp(op, reg, ((ConstPReg*)y)->constant);
01142   } else {
01143     arithRROp(op, reg, use(y));
01144   }
01145 }
01146 
01147 
01148 void CodeGenerator::anArithRCNode(ArithRCNode* node) {
01149   ArithOpCode op = node->op();
01150   PReg* z = node->dst();
01151   PReg* x = node->src();
01152   int   y = node->operand();
01153   PRegLocker lock(z, x);
01154   Register reg = targetRegister(op, z, x);
01155   arithRCOp(op, reg, y);
01156 }
01157 
01158 
01159 void CodeGenerator::aTArithRRNode(TArithRRNode* node) {
01160   ArithOpCode op = node->op();
01161   PReg* z = node->dst();
01162   PReg* x = node->src();
01163   PReg* y = node->operand();
01164   bool x_is_int = node->arg1IsInt();
01165   bool y_is_int = node->arg2IsInt();
01166   moveConstant(op, x, y, x_is_int, y_is_int);
01167   PRegLocker lock(z, x, y);
01168   Register tags = noreg;
01169   if (x_is_int) {
01170     if (y_is_int) {
01171       // both x & y are smis => no tag check necessary
01172     } else {
01173       // x is smi => check y
01174       tags = use(y);
01175     }
01176   } else {
01177     if (y_is_int) {
01178       // y is smi => check x
01179       tags = use(x);
01180     } else {
01181       // check both x & y
01182       Temporary t(_currentMapping);
01183       tags = t.reg();
01184       _masm->movl(tags, use(x));
01185       _masm->orl (tags, use(y));
01186     }
01187   }
01188   if (tags != noreg) {
01189     // check tags
01190     _masm->test(tags, Mem_Tag);
01191     jcc(Assembler::notZero, node, node->next(1));
01192   }
01193   Register reg = targetRegister(op, z, x);
01194   if (y->isConstPReg()) {
01195     arithRXOp(op, reg, ((ConstPReg*)y)->constant);
01196   } else {
01197     arithRROp(op, reg, use(y));
01198   }
01199 }
01200 
01201 
01202 void CodeGenerator::aFloatArithRRNode(FloatArithRRNode* node) {
01203   Unimplemented();
01204 }
01205 
01206 
01207 void CodeGenerator::aFloatUnaryArithNode(FloatUnaryArithNode* node) {
01208   Unimplemented();
01209 }
01210 
01211 
01212 void CodeGenerator::aContextCreateNode(ContextCreateNode* node) {
01213   // node->dst() has been pre-allocated (temp0) in the prologue node -> remove it from
01214   // mapping. Note that in cases where there's an incoming context (which serves as parent (node->src())),
01215   // node->src() and node->dst() differ because the NodeBuilder allocates a new SAPReg in this case.
01216   assert(node->src() != node->dst(), "should not be the same");
01217   assert(node->dst() == node->scope()->context(), "should assign to scope context");
01218   _currentMapping->kill(node->dst());           // kill it so that aPrimNode(node) can map the result to it
01219   switch (node->sizeOfContext()) {
01220     case 0 : // fall through for now - fix this
01221     case 1 : // fall through for now - fix this
01222     case 2 : // fall through for now - fix this
01223     default:
01224       _masm->pushl(int(as_smiOop(node->nofTemps())));
01225       aPrimNode(node);
01226       _masm->addl(esp, oopSize);        // pop argument, this is not a Pascal call - change this as some point?
01227       break;
01228   }
01229   PRegLocker lock(node->dst());         // once loaded, make sure context stays in register
01230   Register context_reg = use(node->dst());
01231   if (node->src() == NULL) {
01232     assert(node->scope()->isMethodScope() || node->scope()->method()->block_info() == methodOopDesc::expects_nil, "inconsistency");
01233     _masm->movl(Address(context_reg, contextOopDesc::parent_byte_offset()), NULL);
01234     // NULL for now; the interpreter uses nil. However, some of the
01235     // context verification code called from compiled code checks for
01236     // parents that are either a frame pointer, NULL or a context.
01237     // This should be unified at some point. (gri 5/9/96)
01238   } else if (_currentMapping->isDefined(node->src())) {
01239     // node->src() holds incoming context as parent and has been defined before
01240     _masm->movl(Address(context_reg, contextOopDesc::parent_byte_offset()), use(node->src()));
01241   } else {
01242     // node->src()->loc is pointing to the current stack frame (method context) and has not been explicitly defined
01243     assert(node->src()->loc == frameLoc, "parent should point to current stack frame");
01244     _masm->movl(Address(context_reg, contextOopDesc::parent_byte_offset()), frame_reg);
01245   }
01246   storeCheck(context_reg);
01247 }
01248 
01249 
01250 void CodeGenerator::aContextInitNode(ContextInitNode* node) {
01251   // initialize context temporaries (parent has been initialized in the ContextCreateNode)
01252   for (int i = node->nofTemps(); i-- > 0; ) {
01253     PReg* src = node->initialValue(i)->preg();
01254     PReg* dst;
01255     if (src->isBlockPReg()) {
01256       // Blocks aren't actually assigned (at the PReg level) so that the inlining info isn't lost.
01257       if (node->wasEliminated()) {
01258         continue;                               // there's no assignment (context was eliminated)
01259       } else {
01260         dst = node->contextTemp(i)->preg();     // fake destination created by compiler
01261       }
01262     } else {
01263       dst = node->contextTemp(i)->preg();
01264     }
01265     assign(dst, src, false);
01266   }
01267   // NB: no store check necessary (done in ContextCreateNode)
01268   // init node must follow create immediately (since fields are uninitialized)
01269   assert(node->firstPrev()->isContextCreateNode(), "should be immediatly after a ContextCreateNode");
01270 }
01271 
01272 
01273 void CodeGenerator::aContextZapNode(ContextZapNode* node) {
01274   if (!node->isActive()) return;
01275   assert(node->scope()->needsContextZapping() && node->src() == node->scope()->context(), "no zapping needed or wrong context");
01276   // Make sure these registers are not used within zapContext
01277   // because they are used for return/non-local return
01278   // -> allocate them as temporaries for now
01279   Temporary t1(_currentMapping, NLR_result_reg);
01280   Temporary t2(_currentMapping, NLR_home_reg);
01281   Temporary t3(_currentMapping, NLR_homeId_reg);
01282   // zap context
01283   _masm->movl(Address(use(node->src()), contextOopDesc::parent_byte_offset()), 0);
01284   // A better solution should be found here: There should be a mechanism
01285   // to exclude certain register from beeing taken.
01286 }
01287 
01288 
01289 void CodeGenerator::copyIntoContexts(BlockCreateNode* node) {
01290   // Copy newly created block into all contexts that have a copy.
01291   // The BlockPReg has a list of all contexts containing the block.  It should
01292   // be stored into those that are allocated (weren't eliminated) and are in
01293   // a sender scope.  
01294   // Why not copy into contexts in a sibling scope?  There are two cases:
01295   //   (1) The sibling scope never created the block(s) that uplevel-access this
01296   //       block.  The context location still contains 0 but that doesn't matter
01297   //       because that context location is now inaccessible.
01298   //   (2) The sibling scope did create these block(s).  In this case, the receiver
01299   //       must already exist since it was materialized when the first uplevel-
01300   //       accessing block was created.
01301   // Urs 4/96
01302   BlockPReg* blk = node->block();
01303   GrowableArray<Location*>* copies = blk->contextCopies();
01304   if (copies != NULL) {
01305     for (int i = copies->length(); i-- > 0; ) {
01306       Location* l = copies->at(i);
01307       InlinedScope* scopeWithContext = theCompiler->scopes->at(l->scopeID());
01308       PReg* r = scopeWithContext->contextTemporaries()->at(l->tempNo())->preg();
01309       if (r->loc == unAllocated) continue;        // not uplevel-accessed (eliminated)
01310       if (r->isBlockPReg()) continue;             // ditto
01311       if (!r->loc.isContextLocation()) fatal("expected context location");
01312       if (scopeWithContext->isSenderOrSame(node->scope())) {
01313         assign(r, node->block());
01314       }
01315     }
01316   }
01317 }
01318 
01319 
01320 void CodeGenerator::materializeBlock(BlockCreateNode* node) {
01321   CompileTimeClosure* closure = node->block()->closure();
01322   // allocate closure
01323   _currentMapping->kill(node->dst());   // kill it so that aPrimNode(node) can map the result to it
01324   int nofArgs = closure->nofArgs();
01325   switch (nofArgs) {
01326     case 0 : // fall through for now - fix this
01327     case 1 : // fall through for now - fix this
01328     case 2 : // fall through for now - fix this
01329     default:
01330       _masm->pushl(int(as_smiOop(nofArgs)));
01331       aPrimNode(node);                  // Note: primitive calls are called via call_C - also necessary for primitiveValue calls?
01332       _masm->addl(esp, oopSize);        // pop argument, this is not a Pascal call - change this at some point?
01333       break;
01334   }
01335   // copy into all contexts that have a copy
01336   if (node->block()->isMemoized()) copyIntoContexts(node);
01337   // initialize closure fields
01338   PRegLocker lock(node->block());       // once loaded, make sure closure stays in register
01339   Register closure_reg = use(node->block());
01340   // assert(theCompiler->jumpTableID == closure->parent_id(), "nmethod id must be the same");
01341   // fix this: RELOCATION INFORMATION IS NEEDED WHEN MOVING THE JUMPTABLE (Snapshot reading etc.)
01342   _masm->movl(Address(closure_reg, blockClosureOopDesc::context_byte_offset()        ), use(closure->context()));
01343   _masm->movl(Address(closure_reg, blockClosureOopDesc::method_or_entry_byte_offset()), (int)closure->jump_table_entry());
01344   storeCheck(closure_reg);
01345 }
01346 
01347 
01348 void CodeGenerator::aBlockCreateNode(BlockCreateNode* node) {
01349   if (node->block()->closure()->method()->is_clean_block()) {
01350     // create the block now (doesn't need to be copied at run-time
01351     CompileTimeClosure* closure = node->block()->closure();
01352     blockClosureOop blk = blockClosureOopDesc::create_clean_block(closure->nofArgs(), closure->jump_table_entry());
01353     _masm->movl(def(node->dst()), blk);
01354   } else if (node->block()->isMemoized()) {
01355     // initialize block variable
01356     _masm->movl(def(node->dst()), MemoizedBlockNameDesc::uncreatedBlockValue());
01357   } else {
01358     // actually create block
01359     materializeBlock(node);
01360   }
01361 }
01362 
01363 
01364 void CodeGenerator::aBlockMaterializeNode(BlockMaterializeNode* node) {
01365   assert(node->next() == node->likelySuccessor(), "code pattern is not optimal");
01366   if (node->block()->closure()->method()->is_clean_block()) {
01367     // no need to create the block (already exists)
01368   } else if (node->block()->isMemoized()) {
01369     // materialize block if it is not already materialized
01370     // (nothing to do in case of non-memoized blocks)
01371     Register closure_reg = use(node->block());
01372     assert(MemoizedBlockNameDesc::uncreatedBlockValue() == oop(0), "change the code generation here");
01373     _masm->testl(closure_reg, closure_reg);
01374     jcc(Assembler::notZero, node, node->next(), true);
01375     materializeBlock(node);
01376     jmp(node, node->next(), true);                      // will be eliminated since next() is the likely successor
01377     bb_needs_jump = false;
01378   }
01379 }
01380 
01381 
01382 void CodeGenerator::aSendNode(SendNode* node) {
01383   // Question concerning saveRegisters() below: is it really needed to also save the
01384   // recv (it is a parameter passed in)? If it happens to be also a visible value of
01385   // the caller and if it has not been stored before we would get an "intermediate"
01386   // frame with a unsaved register value => we should save the recv as well. However
01387   // this is only true, if the recv value is not explicitly assigned (and the assignment
01388   // has not been eliminated). Otherwise this is an unneccessary save instr.
01389   // For now: be conservative & save it always.
01390   if (node->isCounting()) incrementInvocationCounter();
01391   char* entry = node->isSuperSend() ? CompiledIC::superLookupRoutine() : CompiledIC::normalLookupRoutine();
01392   PReg* recv = node->recv();
01393   _currentMapping->killDeadsAt(node->next(), recv);     // free mapping of unused pregs
01394   _currentMapping->makeInjective();                     // make injective because NLR cannot deal with non-injective mappings yet
01395   _currentMapping->saveRegisters();                     // make sure none of the remaining preg values are lost
01396   _currentMapping->killRegisters(recv);                 // so PRegMapping::use can safely allocate receiverLoc if necessary
01397   _currentMapping->use(recv, receiver_reg);             // make sure recv is in the right register
01398   updateDebuggingInfo(node);
01399   _masm->call(entry, relocInfo::ic_type);
01400   _currentMapping->killRegisters();
01401   // compute flag settings
01402   int flags = 0;
01403   if (node->isSuperSend())      setNth(flags, super_send_bit_no);
01404   if (node->isUninlinable())    setNth(flags, uninlinable_bit_no);
01405   if (node->staticReceiver())   setNth(flags, receiver_static_bit_no);
01406   // inline cache
01407   inlineCache(node, node->scope()->nlrTestPoint(), flags);
01408   _currentMapping->mapToRegister(node->dst(), result_reg);      // NLR mapping of node->dst is handled in NLRTestNode
01409 }
01410 
01411 
01412 void CodeGenerator::aPrimNode(PrimNode* node) {
01413   MergeNode* nlr = node->pdesc()->can_perform_NLR() ? node->scope()->nlrTestPoint() : NULL;
01414   _currentMapping->killDeadsAt(node->next());           // free mapping of unused pregs
01415   _currentMapping->makeInjective();                     // make injective because NLR cannot deal with non-injective mappings yet
01416   _currentMapping->saveRegisters();                     // make sure none of the remaining preg values are lost
01417   _currentMapping->killRegisters();
01418   updateDebuggingInfo(node);
01419   // Note: cannot use call_C because inline cache code has to come immediately after call instruction!
01420   _masm->set_last_Delta_frame_before_call();
01421   _masm->call((char*)(node->pdesc()->fn()), relocInfo::prim_type);
01422   _currentMapping->killRegisters();
01423   if (nlr != NULL) inlineCache(node, nlr);
01424   _masm->reset_last_Delta_frame();
01425   _currentMapping->mapToRegister(node->dst(), result_reg);      // NLR mapping of node->dst is handled in NLRTestNode
01426 }
01427 
01428 
01429 void CodeGenerator::aDLLNode(DLLNode* node) {
01430   // determine entry point depending on whether a run-time lookup is needed or not.
01431   // Note: do not do a DLL lookup at compile time since this may cause a call back.
01432   char* entry = (node->function() == NULL)
01433                 ? StubRoutines::lookup_DLL_entry(node->async())
01434                 : StubRoutines::call_DLL_entry(node->async());
01435   // pass arguments for DLL lookup/parameter conversion routine in registers
01436   // (change this code if the corresponding routines change (StubRoutines))
01437   // ebx: no. of arguments
01438   // ecx: address of last argument
01439   // edx: dll function entry point (backpatched, belongs to CompiledDLL_Cache)
01440   _currentMapping->saveRegisters();
01441   _currentMapping->killRegisters();
01442   updateDebuggingInfo(node);
01443   _masm->movl(ebx, node->nofArguments());
01444   _masm->movl(ecx, esp);
01445   // CompiledDLL_Cache
01446   // This code pattern must correspond to the CompiledDLL_Cache layout
01447   // (make sure assembler is not optimizing mov reg, 0 into xor reg, reg!)
01448   _masm->movl(edx, int(node->function()));              // part of CompiledDLL_Cache
01449   _masm->inline_oop(node->dll_name());                  // part of CompiledDLL_Cache
01450   _masm->inline_oop(node->function_name());             // part of CompiledDLL_Cache
01451   _masm->call(entry, relocInfo::runtime_call_type);     // call lookup/parameter conversion routine
01452   _currentMapping->killRegisters();
01453   // For now: ordinary inline cache even though NLRs through DLLs are not allowed yet
01454   // (make sure somebody is popping arguments if NLRs are used).
01455   inlineCache(node, node->scope()->nlrTestPoint());
01456   _currentMapping->mapToRegister(node->dst(), result_reg);
01457   _masm->addl(esp, node->nofArguments()*oopSize);       // get rid of arguments
01458 }
01459 
01460 
01461 /*
01462 static void testForSingleKlass(Register obj, klassOop klass, Register klassReg, Label& success, Label& failure) {
01463   if (klass == Universe::smiKlassObj()) {
01464     // check tag
01465     theMacroAssm->test(obj, Mem_Tag);
01466   } else if (klass == Universe::trueObj()->klass()) {
01467     // only one instance: compare with trueObj
01468     theMacroAssm->cmpl(obj, Universe::trueObj());
01469   } else if (klass == Universe::falseObj()->klass()) {
01470     // only one instance: compare with falseObj
01471     theMacroAssm->cmpl(obj, Universe::falseObj());
01472   } else if (klass == Universe::nilObj()->klass()) {
01473     // only one instance: compare with nilObj
01474     theMacroAssm->cmpl(obj, Universe::nilObj());
01475   } else {
01476     // compare against obj's klass - must check if smi first
01477     theMacroAssm->test(obj, Mem_Tag);
01478     theMacroAssm->jcc(Assembler::zero, failure);
01479     theMacroAssm->movl(klassReg, Address(obj, memOopDesc::klass_byte_offset()));
01480     theMacroAssm->cmpl(klassReg, klass);
01481   }
01482   theMacroAssm->jcc(Assembler::notEqual, failure);
01483   theMacroAssm->jmp(success);   // this jump will be eliminated since this is the likely successor
01484 }
01485 */
01486 void CodeGenerator::testForSingleKlass(Register obj, klassOop klass, Register klassReg, Label& success, Label& failure) {
01487   if (klass == Universe::smiKlassObj()) {
01488     // check tag
01489     _masm->test(obj, Mem_Tag);
01490   } else if (klass == Universe::trueObj()->klass()) {
01491     // only one instance: compare with trueObj
01492     _masm->cmpl(obj, Universe::trueObj());
01493   } else if (klass == Universe::falseObj()->klass()) {
01494     // only one instance: compare with falseObj
01495     _masm->cmpl(obj, Universe::falseObj());
01496   } else if (klass == Universe::nilObj()->klass()) {
01497     // only one instance: compare with nilObj
01498     _masm->cmpl(obj, Universe::nilObj());
01499   } else {
01500     // compare against obj's klass - must check if smi first
01501     _masm->test(obj, Mem_Tag);
01502     _masm->jcc(Assembler::zero, failure);
01503     _masm->movl(klassReg, Address(obj, memOopDesc::klass_byte_offset()));
01504     _masm->cmpl(klassReg, klass);
01505   }
01506   _masm->jcc(Assembler::notEqual, failure);
01507   _masm->jmp(success);  // this jump will be eliminated since this is the likely successor
01508 }
01509 
01510 
01511 void CodeGenerator::generateTypeTests(LoopHeaderNode* node, Label& failure) {
01512   Unimplemented();
01513 
01514   int last = 0;
01515   for (int i = 0; i <= last; i++) {
01516     HoistedTypeTest* t = node->tests()->at(i);
01517     if (t->testedPR->loc == unAllocated) continue;      // optimized away, or ConstPReg
01518     if (t->testedPR->isConstPReg()) {
01519       guarantee(t->testedPR->loc == unAllocated, "code assumes ConstPRegs are unallocated");
01520       //handleConstantTypeTest((ConstPReg*)t->testedPR, t->klasses);
01521     } else {
01522 
01523 
01524     }
01525   }
01526 }
01527 
01528 
01529 /*
01530 void LoopHeaderNode::generateTypeTests(Label& cont, Label& failure) {
01531   // test all values against expected classes
01532   Label* ok;
01533   const Register klassReg = temp2;
01534   const int len = _tests->length() - 1;
01535   int last;                                             // last case that generates a test
01536   for (last = len; last >= 0 && _tests->at(last)->testedPR->loc == unAllocated; last--) ;
01537   if (last < 0) return;                                 // no tests at all   
01538   for (int i = 0; i <= last; i++) {
01539     HoistedTypeTest* t = _tests->at(i);
01540     if (t->testedPR->loc == unAllocated) continue;      // optimized away, or ConstPReg
01541     if (t->testedPR->isConstPReg()) { 
01542       guarantee(t->testedPR->loc == unAllocated, "code assumes ConstPRegs are unallocated");
01543       handleConstantTypeTest((ConstPReg*)t->testedPR, t->klasses);
01544     } else {
01545       const Register obj = movePRegToReg(t->testedPR, temp1);
01546       Label okLabel;
01547       ok = (i == last) ? &cont : &okLabel;
01548       if (t->klasses->length() == 1) {
01549         testForSingleKlass(obj, t->klasses->at(0), klassReg, *ok, failure);
01550       } else if (t->klasses->length() == 2 &&
01551                  testForBoolKlasses(obj, t->klasses->at(0), t->klasses->at(1), klassReg, true, 
01552                  *ok, *ok, failure)) {
01553         // ok, was a bool test
01554       } else {
01555         const int len = t->klasses->length();
01556         GrowableArray<Label*> labels(len + 1);
01557         labels.append(&failure);
01558         for (int i = 0; i < len; i++) labels.append(ok);
01559         generalTypeTest(obj, klassReg, true, t->klasses, &labels);
01560       }
01561       if (i != last) theMacroAssm->bind(*ok);
01562     }
01563   }
01564 }
01565 */
01566 
01567 
01568 /*
01569 void CodeGenerator::handleConstantTypeTest(ConstPReg* r, GrowableArray<klassOop>* klasses) {
01570   // constant r is tested against klasses (efficiency hack: klasses == NULL means {smi})
01571   if ((klasses == NULL && r->constant->is_smi()) || (klasses && klasses->contains(r->constant->klass()))) {
01572     // always ok, no need to test
01573   } else {
01574     compiler_warning("loop header type test will always fail!");
01575     // don't jump to failure because that would make subsequent LoopHeader code unreachable (--> breaks back end)
01576     theMacroAssm->call(StubRoutines::unused_uncommon_trap_entry(), relocInfo::uncommon_type);
01577   }
01578 }
01579 */
01580 
01581 
01582 void CodeGenerator::generateIntegerLoopTest(PReg* preg, LoopHeaderNode* node, Label& failure) {
01583   if (preg != NULL) {
01584     if (preg->isConstPReg()) {
01585       // no run-time test necessary
01586       //handleConstantTypeTest((ConstPReg*)preg, NULL);
01587     } else if (preg->loc == unAllocated) {
01588       // preg is never used in loop => no test needed
01589       guarantee(preg->cpReg() == preg, "should use cpReg()");
01590     } else {
01591       // generate run-time test
01592       /*
01593       if (prev.is_unbound()) theMacroAssm->bind(prev);
01594       Label ok;
01595       const Register obj = movePRegToReg(p, temp1);
01596       testForSingleKlass(obj, Universe::smiKlassObj(), klassReg, ok, failure);
01597       theMacroAssm->bind(ok);
01598       */
01599     }
01600   }
01601 }
01602 
01603 
01604 /*
01605 void LoopHeaderNode::generateIntegerLoopTest(PReg* p, Label& prev, Label& failure) {
01606   const Register klassReg = temp2;
01607   if (p != NULL) {
01608     if (p->isConstPReg()) {
01609       // no run-time test necessary
01610       handleConstantTypeTest((ConstPReg*)p, NULL);
01611     } else if (p->loc == unAllocated) {
01612       // p is never used in loop, so no test needed
01613       guarantee(p->cpReg() == p, "should use cpReg()");
01614     } else {
01615       // generate run-time test
01616       if (prev.is_unbound()) theMacroAssm->bind(prev);
01617       Label ok;
01618       const Register obj = movePRegToReg(p, temp1);
01619       testForSingleKlass(obj, Universe::smiKlassObj(), klassReg, ok, failure);
01620       theMacroAssm->bind(ok);
01621     }
01622   }
01623 }
01624 */
01625 
01626 
01627 void CodeGenerator::generateIntegerLoopTests(LoopHeaderNode* node, Label& failure) {
01628   assert(node->isIntegerLoop(), "must be integer loop");
01629   generateIntegerLoopTest(node->lowerBound(), node, failure);
01630   generateIntegerLoopTest(node->upperBound(), node, failure);
01631   generateIntegerLoopTest(node->loopVar   (), node, failure);
01632 }
01633 
01634 
01635 /*
01636 void LoopHeaderNode::generateIntegerLoopTests(Label& prev, Label& failure) {
01637   if (!_integerLoop) return;
01638   generateIntegerLoopTest(_lowerBound, prev, failure);
01639   generateIntegerLoopTest(_upperBound, prev, failure);
01640   generateIntegerLoopTest(_loopVar   , prev, failure);
01641 }
01642 */
01643 
01644 
01645 void CodeGenerator::generateArrayLoopTests(LoopHeaderNode* node, Label& failure) {
01646   assert(node->isIntegerLoop(), "must be integer loop");
01647   if (node->upperLoad() != NULL) {
01648     // The loop variable iterates from lowerBound...array size; if any of the array accesses
01649     // use the loop variable without an index range check, we need to check it here.
01650     PReg* loopArray = node->upperLoad()->src();
01651     AbstractArrayAtNode* atNode;
01652     int i = node->arrayAccesses()->length();
01653     while (i-- > 0) {
01654       atNode = node->arrayAccesses()->at(i);
01655       if (atNode->src() == loopArray && !atNode->needsBoundsCheck()) break;
01656     }
01657     if (i >= 0) {
01658       // loopVar is used to index into array; make sure lower & upper bound is within array range
01659       PReg* lo = node->lowerBound();
01660       PReg* hi = node->upperBound();
01661       if (lo != NULL && lo->isConstPReg() && ((ConstPReg*)lo)->constant->is_smi() && ((ConstPReg*)lo)->constant >= as_smiOop(1)) {
01662 
01663       } else {
01664         // test lower bound
01665         //
01666         if (lo->loc == unAllocated) {
01667           
01668         } else {
01669           //
01670         }
01671       }
01672       // test upper bound
01673       
01674     }
01675   }
01676 }
01677 
01678 
01679 /*
01680 void LoopHeaderNode::generateArrayLoopTests(Label& prev, Label& failure) {
01681   if (!_integerLoop) return;
01682   Register boundReg = temp1;
01683   const Register tempReg  = temp2;
01684   if (_upperLoad != NULL) {
01685     // The loop variable iterates from lowerBound...array size; if any of the array accesses use the loop variable
01686     // without an index range check, we need to check it here.
01687     PReg* loopArray = _upperLoad->src();
01688     AbstractArrayAtNode* atNode;
01689     for (int i = _arrayAccesses->length() - 1; i >= 0; i--) {
01690       atNode = _arrayAccesses->at(i);
01691       if (atNode->src() == loopArray && !atNode->needsBoundsCheck()) break;
01692     }
01693     if (i >= 0) {
01694       // loopVar is used to index into array; make sure lower & upper bound is within array range
01695       if (_lowerBound != NULL && _lowerBound->isConstPReg() && ((ConstPReg*)_lowerBound)->constant->is_smi() && ((ConstPReg*)_lowerBound)->constant >= as_smiOop(1)) {
01696         // loopVar iterates from smi_const to array size, so no test necessary
01697       } else {
01698         // test lower bound
01699        if (prev.is_unbound()) theMacroAssm->bind(prev);
01700        if (_lowerBound->loc == unAllocated) {
01701          guarantee(_lowerBound->cpReg() == _lowerBound, "should use cpReg()");
01702        } else {
01703          const Register t = movePRegToReg(_lowerBound ? _lowerBound : _loopVar, tempReg);
01704          theMacroAssm->cmpl(boundReg, as_smiOop(1));
01705          theMacroAssm->jcc(Assembler::less, failure);
01706        }
01707       }
01708 
01709       // test upper bound
01710       boundReg = movePRegToReg(_upperBound, boundReg);
01711       const Register t = movePRegToReg(atNode->src(), tempReg);
01712       theMacroAssm->movl(t, Address(t, byteOffset(atNode->sizeOffset())));
01713       theMacroAssm->cmpl(boundReg, t);
01714       theMacroAssm->jcc(Assembler::above, failure);
01715     }
01716   }
01717 }
01718 */
01719 
01720 
01721 void CodeGenerator::aLoopHeaderNode(LoopHeaderNode* node) {
01722   if (node->isActivated()) {
01723     warning("loop header node not yet implemented");
01724     return;
01725 
01726     // the loop header node performs all checks hoisted out of the loop:
01727     // for general loops:
01728     //   - do all type tests in the list, uncommon branch if they fail
01729     //     (common case: true/false tests, single-klass tests) 
01730     // additionally for integer loops:
01731     //   - test lowerBound (may be NULL), upperBound, loopVar for smi-ness (the first two may be ConstPRegs)
01732     //   - if upperBound is NULL, upperLoad is load of the array size
01733     //   - if loopArray is non-NULL, check lowerBound (if non-NULL) or initial value of loopVar against 1
01734     Label failure;
01735     generateTypeTests(node, failure);
01736     if (node->isIntegerLoop()) {
01737       generateIntegerLoopTests(node, failure);
01738       generateArrayLoopTests(node, failure);
01739     }
01740     _masm->bind(failure);
01741     updateDebuggingInfo(node);
01742     _masm->call(StubRoutines::unused_uncommon_trap_entry(), relocInfo::uncommon_type);
01743     bb_needs_jump = false;
01744     setMapping(NULL);
01745   }
01746 }
01747 
01748 
01749 /*
01750 void LoopHeaderNode::gen() {
01751   if (!_activated) return;    // loop wasn't optimized
01752   // the loop header node performs all checks hoisted out of the loop:
01753   // for general loops:
01754   //   - do all type tests in the list, uncommon branch if they fail
01755   //     (common case: true/false tests, single-klass tests) 
01756   // additionally for integer loops:
01757   //   - test lowerBound (may be NULL), upperBound, loopVar for smi-ness (the first two may be ConstPRegs)
01758   //   - if upperBound is NULL, upperLoad is load of the array size
01759   //   - if loopArray is non-NULL, check lowerBound (if non-NULL) or initial value of loopVar against 1
01760   TrivialNode::gen();
01761   Label ok;
01762   Label failure;
01763   generateTypeTests(ok, failure);
01764   generateIntegerLoopTests(ok, failure);
01765   generateArrayLoopTests(ok, failure);
01766   if (ok.is_unbound()) theMacroAssm->bind(ok);
01767   theMacroAssm->jmp(next()->label);
01768   // above 2 lines could be eliminated with: if (ok.is_unbound()) ok.redirectTo(next()->label)
01769   bb_needs_jump = false;  // we generate all jumps explicitly
01770   theMacroAssm->bind(failure);
01771   theMacroAssm->call(StubRoutines::unused_uncommon_trap_entry(), relocInfo::uncommon_type);
01772 }
01773 */
01774 
01775 
01776 void CodeGenerator::aReturnNode(ReturnNode* node) {
01777   InlinedScope* scope = node->scope();
01778   if (scope->needsContextZapping()) zapContext(scope->context());       // <<< still needed? What about ContextZapNode?
01779   // make sure result is in result_reg, no other pregs are used anymore
01780   PReg* result = scope->resultPR;
01781   _currentMapping->killRegisters(result);
01782   _currentMapping->use(result, result_reg);
01783   // remove stack frame & return
01784   if (VerifyCode || TraceCalls || TraceResults) callVerifyReturn();
01785   int no_of_args_to_pop = scope->nofArguments();
01786   if (scope->method()->is_blockMethod()) {
01787     // blocks are called via primitiveValue => need to pop first argument
01788     // of primitiveValue (= block closure) as well since return happens
01789     // directly (and not through primitiveValue).
01790     no_of_args_to_pop++;
01791   }
01792   _masm->leave();
01793   _masm->ret(no_of_args_to_pop * oopSize);
01794   // no pregs accessible anymore
01795   setMapping(NULL);
01796 }
01797 
01798 
01799 void CodeGenerator::aNLRSetupNode(NLRSetupNode* node) {
01800   InlinedScope* scope = node->scope();
01801   // compute home into a temporary register (NLR_home_reg might still be in use - but try to use it if possible)
01802   // and check if context has been  zapped
01803   //
01804   // QUESTION: Who is popping the arguments (for an ordinary return it happens automatically in the return).
01805   // Couldn't that be a problem in loops? How's this done in the old backend? In the interpreter?
01806   Label NLR_error;                                                      // for NLRs to non-existing frames
01807   Temporary home(_currentMapping, NLR_home_reg);                        // try to allocate temporary home into right register
01808   uplevelBase(scope->context(), scope->homeContext() + 1, home.reg());  // compute home
01809   _masm->testl(home.reg(), home.reg());                                 // check if zapped
01810   _masm->jcc(Assembler::zero, NLR_error);                               // zero -> home has been zapped
01811   // load result into temporary register (NLR_result_reg might still be in use - but try to use it if possible)
01812   PReg* resultPReg = scope->resultPR;
01813   _currentMapping->killRegisters(resultPReg);                           // no PRegs are used anymore except result
01814   Register result;                                                      // temporary result register
01815   { Temporary t(_currentMapping, NLR_result_reg); result = t.reg(); }   // try to allocate temporary result into right register
01816   _currentMapping->use(resultPReg, result);                             // load result into temporary result register
01817   // finally assign result and home to the right registers, make sure that they
01818   // don't overwrite each other (home could be in the result register & vice versa).
01819   // For now push them and pop them back into the right registers.
01820   if (result != NLR_result_reg || home.reg() != NLR_home_reg) {
01821     _masm->pushl(result);
01822     _masm->pushl(home.reg());
01823     _masm->popl(NLR_home_reg);
01824     _masm->popl(NLR_result_reg);
01825   }
01826   // assign home id
01827   _masm->movl(NLR_homeId_reg, scope->home()->scopeID());
01828   // issue NLR
01829   if (VerifyCode || TraceCalls || TraceResults) callVerifyNLR();
01830   _masm->jmp(StubRoutines::continue_NLR_entry(), relocInfo::runtime_call_type);
01831   // call run-time routine in failure case
01832   // what about the debugging information? FIX THIS
01833   _masm->bind(NLR_error);
01834   _masm->call_C((char*)suspend_on_NLR_error, relocInfo::runtime_call_type);
01835   // no pregs accessible anymore
01836   setMapping(NULL);
01837 }
01838 
01839 
01840 void CodeGenerator::anInlinedReturnNode(InlinedReturnNode* node) {
01841   // Not generated anymore for new backend
01842   ShouldNotReachHere();
01843 }
01844 
01845 
01846 void CodeGenerator::aNLRContinuationNode(NLRContinuationNode* node) {
01847   guarantee(_currentMapping->NLRinProgress(), "NLR must be in progress");
01848   InlinedScope* scope = node->scope();
01849   if (scope->needsContextZapping()) zapContext(scope->context());
01850   // continue with NLR
01851   if (VerifyCode || TraceCalls || TraceResults) callVerifyNLR();
01852   _masm->jmp(StubRoutines::continue_NLR_entry(), relocInfo::runtime_call_type);
01853   // no pregs accessible anymore
01854   setMapping(NULL);
01855 }
01856 
01857 
01858 Assembler::Condition CodeGenerator::mapToCC(BranchOpCode op) {
01859   switch (op) {
01860     case EQBranchOp : return Assembler::equal;
01861     case NEBranchOp : return Assembler::notEqual;
01862     case LTBranchOp : return Assembler::less;
01863     case LEBranchOp : return Assembler::lessEqual;
01864     case GTBranchOp : return Assembler::greater;
01865     case GEBranchOp : return Assembler::greaterEqual;
01866     case LTUBranchOp: return Assembler::below;
01867     case LEUBranchOp: return Assembler::belowEqual;
01868     case GTUBranchOp: return Assembler::above;
01869     case GEUBranchOp: return Assembler::aboveEqual;
01870     case VSBranchOp : return Assembler::overflow;
01871     case VCBranchOp : return Assembler::noOverflow;
01872   }
01873   ShouldNotReachHere();
01874   return Assembler::zero;
01875 }
01876 
01877 
01878 void CodeGenerator::aBranchNode(BranchNode* node) {
01879   jcc(mapToCC(node->op()), node, node->next(1));
01880 }
01881 
01882 
01883 void CodeGenerator::aTypeTestNode(TypeTestNode* node) {
01884   // Note 1: This code pattern requires *no* particular order
01885   //         of the the classes of the TypeTestNode.
01886   //
01887   // Note 2: In case of a TypeTestNode without unknown case,
01888   //         the last case would not have to be conditional.
01889   //         However, for debugging purposes right now all
01890   //         cases are always explicitly checked.
01891   const int len = node->classes()->length();
01892   
01893   if (ReorderBBs) {
01894     PRegLocker lock(node->src());
01895     Register obj = use(node->src());
01896       
01897     if (len == 1) {
01898       // handle all cases where only one klass is involved
01899       assert(node->hasUnknown(), "should be eliminated if there's no unknown case");
01900       assert(node->likelySuccessor() == node->next(1), "code pattern is not optimal");
01901       klassOop klass = node->classes()->at(0);
01902       if (klass == Universe::smiKlassObj()) {
01903         // check tag
01904         _masm->test(obj, Mem_Tag);
01905       } else if (klass == Universe::trueObj()->klass()) {
01906         // only one instance: compare with trueObj
01907         _masm->cmpl(obj, Universe::trueObj());
01908       } else if (klass == Universe::falseObj()->klass()) {
01909         // only one instance: compare with falseObj
01910         _masm->cmpl(obj, Universe::falseObj());
01911       } else if (klass == Universe::nilObj()->klass()) {
01912         // only one instance: compare with nilObj
01913         _masm->cmpl(obj, Universe::nilObj());
01914       } else {
01915         // compare against obj's klass - must check if smi first
01916         Temporary objKlass(_currentMapping);
01917         _masm->test(obj, Mem_Tag);
01918         _masm->jcc(Assembler::zero, node->next()->label);
01919         _masm->movl(objKlass.reg(), Address(obj, memOopDesc::klass_byte_offset()));
01920         _masm->cmpl(objKlass.reg(), klass);
01921       }
01922       jcc(Assembler::notEqual, node, node->next());
01923       jmp(node, node->next(1));                 // this jump will be eliminated since this is the likely successor
01924       bb_needs_jump = false;                    // no jump necessary at end of basic block
01925       return;
01926     }
01927 
01928     if (len == 2) {
01929       // handle pure boolean cases (ifTrue:/ifFalse:)
01930       klassOop klass1 = node->classes()->at(0);
01931       klassOop klass2 = node->classes()->at(1);
01932       oop      bool1  = Universe::trueObj();
01933       oop      bool2  = Universe::falseObj();
01934       if (klass1 == bool2->klass() && klass2 == bool1->klass()) {
01935         oop t = bool1; bool1 = bool2; bool2 = t;
01936       }
01937       if (klass1 == bool1->klass() && klass2 == bool2->klass()) {
01938         const bool ignoreNoUnknownForNow = true;
01939         // Note: Uncommon case: A TypeTestNode with no uncommon case has a successor
01940         //       at next(0) anyhow (because there are no "holes" (= NULLs) in the
01941         //       successor list of a node). That is, for now we have to jump to that
01942         //       point somehow (even though it can never happen), because otherwise
01943         //       the PRegMapping is not set for that node. (Maybe one should detect
01944         //       this case and then set a "dummy" PRegMapping, since it is not used
01945         //       anyhow but needs to be there only for assertion checking).
01946         if (ignoreNoUnknownForNow || node->hasUnknown()) {
01947           assert(ignoreNoUnknownForNow || node->likelySuccessor() == node->next(2), "code pattern is not optimal");
01948           _masm->cmpl(obj, bool1);
01949           jcc(Assembler::equal, node, node->next(1));
01950           _masm->cmpl(obj, bool2);
01951           jcc(Assembler::notEqual, node, node->next());
01952           jmp(node, node->next(2));             // this jump will be eliminated since this is the likely successor
01953         } else {
01954           assert(node->likelySuccessor() == node->next(1), "code pattern is not optimal");
01955           _masm->cmpl(obj, bool2);
01956           jcc(Assembler::equal, node, node->next(2));
01957           jmp(node, node->next(1));             // this jump will be eliminated since this is the likely successor
01958         }
01959         bb_needs_jump = false;                  // no jump necessary at end of basic block
01960         return;
01961       }
01962     }
01963   }
01964 
01965   // general case
01966   Label unknownCase;
01967   Temporary objKlass(_currentMapping);
01968   bool klassHasBeenLoaded = false;
01969   bool smiHasBeenChecked = false;
01970   PRegLocker lock(node->src());
01971   Register obj = use(node->src());
01972   for (int i = 0; i < len; i++) {
01973     klassOop klass = node->classes()->at(i);
01974     if (klass == trueObj->klass()) {
01975       // only one instance: compare with trueObj
01976       _masm->cmpl(obj, trueObj);
01977       jcc(Assembler::equal, node, node->next(i+1));
01978     } else if (klass == falseObj->klass()) {
01979       // only one instance: compare with falseObj
01980       _masm->cmpl(obj, falseObj);
01981       jcc(Assembler::equal, node, node->next(i+1));
01982     } else if (klass == nilObj->klass()) {
01983       // only one instance: compare with nilObj
01984       _masm->cmpl(obj, nilObj);
01985       jcc(Assembler::equal, node, node->next(i+1));
01986     } else if (klass == smiKlassObj) {
01987       // check smi tag only if not checked already, otherwise ignore
01988       if (!smiHasBeenChecked) {
01989         _masm->test(obj, Mem_Tag);
01990         jcc(Assembler::zero, node, node->next(i+1));
01991         smiHasBeenChecked = true;
01992       }
01993     } else {
01994       // compare with klass
01995       if (!klassHasBeenLoaded) {
01996         if (!smiHasBeenChecked) {
01997           Node* smiCase = node->smiCase();
01998           if (smiCase != NULL || node->hasUnknown()) {
01999             // smi can actually appear => check for it
02000             _masm->test(obj, Mem_Tag);
02001             if (smiCase != NULL) {
02002               // jump to smiCase if there's one
02003               jcc(Assembler::zero, node, smiCase);
02004             } else {
02005               // node hasUnknown & smiCase cannot happen => jump to unknown case (end of typetest)
02006               _masm->jcc(Assembler::zero, unknownCase);
02007             }
02008           }
02009           smiHasBeenChecked = true;
02010         }
02011         _masm->movl(objKlass.reg(), Address(obj, memOopDesc::klass_byte_offset()));
02012         klassHasBeenLoaded = true;
02013       }
02014       _masm->cmpl(objKlass.reg(), klass);
02015       jcc(Assembler::equal, node, node->next(i+1));
02016     }
02017   }
02018   // bind label in any case to avoid unbound label assertion bug
02019   _masm->bind(unknownCase);
02020 
02021   // Note: Possible problem: if the smi case is checked before the class
02022   //       is loaded, there's possibly a jump to the end of the TypeTestNode
02023   //       from the smi case. However, then the klass register isn't defined.
02024   //       if later there's the uncommon case, the klass register is defined.
02025   //       What if one refers to that register? Or is it not possible because
02026   //       it's not a regular PReg but a temporary? Think about this!
02027   //
02028   // >>>>> IS A PROBLEM! The temporary is likely to throw out another PReg from a register! FIX THIS!
02029 }
02030 
02031 
02032 // Note: Maybe should reorganize the way NLRs are treated in the intermediate representation;
02033 // may be able to avoid some jumps. For example, continuing the NLR is done via a stub routine,
02034 // maybe one can jump to that routine conditionally and thereby save a jump around a jump (that
02035 // stub routine could also do the zapping if necessary (could come in two versions)).
02036 
02037 void CodeGenerator::aNLRTestNode(NLRTestNode* node) {
02038   assert(_currentMapping->NLRinProgress(), "NLR must be in progress");
02039   InlinedScope* scope = node->scope();
02040   // check if arrived at the right frame
02041   Label L;
02042   _masm->cmpl(NLR_home_reg, frame_reg);
02043   _masm->jcc(Assembler::notEqual, L);
02044   // check if arrived at the right scope within the frame
02045   int id = scope->scopeID();
02046   if (id == 0) {
02047     // use test instruction to compare against 0 (smaller code than with cmp)
02048     _masm->testl(NLR_homeId_reg, NLR_homeId_reg);
02049   } else {
02050     _masm->cmpl(NLR_homeId_reg, id);
02051   }
02052   _currentMapping->releaseNLRRegisters();
02053   jcc(Assembler::equal, node, node->next(1));
02054   _currentMapping->acquireNLRRegisters();
02055   // otherwise continue NLR
02056   _masm->bind(L);
02057 }
02058 
02059 
02060 void CodeGenerator::aMergeNode(MergeNode* node) {
02061   assert(node->isTrivial() || _currentMapping->isInjective(), "must be injective if more than one predecessor");
02062 }
02063 
02064 
02065 void CodeGenerator::jcc_error(Assembler::Condition cc, AbstractBranchNode* node, Label& label) {
02066   assert(node->canFail(), "should not be called if node cannot fail");
02067   Node* failure_start = node->next(1);
02068   if (failure_start->isUncommonNode()) {
02069     jcc(cc, node, failure_start, true);
02070   } else {
02071     _masm->jcc(cc, label);
02072   }
02073 }
02074 
02075 
02076 void CodeGenerator::anArrayAtNode(ArrayAtNode* node) {
02077   PReg* array  = node->array();
02078   PReg* index  = node->index();
02079   PReg* result = node->dst();
02080   PReg* error  = node->error();
02081   PRegLocker lock(array, index);
02082   Register array_reg = use(array);
02083   // use temporary register for index - will be modified
02084   Temporary offset(_currentMapping, index);
02085   // first element is at index 1 => subtract smi(1) (doesn't change smi/oop property)
02086   theMacroAssm->subl(offset.reg(), int(smiOop_one));
02087   // do index smi check if necessary (still possible, even after subtracting smi(1))
02088   Label indexNotSmi;
02089   if (!node->index_is_smi()) {
02090     _masm->test(offset.reg(), Mem_Tag);
02091     jcc_error(Assembler::notZero, node, indexNotSmi);
02092   }
02093   // do bounds check if necessary
02094   Label indexOutOfBounds;
02095   if (node->index_needs_bounds_check()) {
02096     const int size_offset = byteOffset(node->size_word_offset());
02097     _masm->cmpl(offset.reg(), Address(array_reg, size_offset));
02098     jcc_error(Assembler::aboveEqual, node, indexOutOfBounds);
02099   }
02100   // load element
02101   assert(Tag_Size == 2, "check this code");
02102   const int data_offset = byteOffset(node->data_word_offset());
02103   switch (node->access_type()) {
02104     case ArrayAtNode::byte_at:
02105       { Register result_reg = def(result);
02106         _masm->sarl(offset.reg(), Tag_Size);    // adjust index
02107         if (result_reg.hasByteRegister()) {
02108           // result_reg has byte register -> can use byte load instruction
02109           _masm->xorl(result_reg, result_reg);  // clear destination register
02110           _masm->movb(result_reg, Address(array_reg, offset.reg(), Address::times_1, data_offset));
02111         } else {
02112           // result_reg has no byte register -> cannot use byte load instruction
02113           // instead of doing better register selection use word load & mask for now
02114           _masm->movl(result_reg, Address(array_reg, offset.reg(), Address::times_1, data_offset));
02115           _masm->andl(result_reg, 0x000000FF);  // clear uppper 3 bytes
02116         }
02117         _masm->shll(result_reg, Tag_Size);      // make result a smi
02118       }
02119       break;
02120     case ArrayAtNode::double_byte_at:
02121       { Register result_reg = def(result);
02122         _masm->sarl(offset.reg(), Tag_Size - 1);// adjust index
02123         _masm->movl(result_reg, Address(array_reg, offset.reg(), Address::times_1, data_offset));
02124         _masm->andl(result_reg, 0x0000FFFF);    // clear upper 2 bytes
02125         _masm->shll(result_reg, Tag_Size);      // make result a smi
02126       }
02127       break;
02128     case ArrayAtNode::character_at:
02129       { Register result_reg = def(result);
02130         _masm->sarl(offset.reg(), Tag_Size - 1);// adjust index
02131         _masm->movl(result_reg, Address(array_reg, offset.reg(), Address::times_1, data_offset));
02132         _masm->andl(result_reg, 0x0000FFFF);    // clear upper 2 bytes
02133         // use result_reg as index into asciiCharacters()
02134         // check index first, must be 0 <= result_reg < asciiCharacters()->length()
02135         objArrayOop chars = Universe::asciiCharacters();
02136         _masm->cmpl(result_reg, chars->length());
02137         jcc_error(Assembler::aboveEqual, node, indexOutOfBounds);
02138         // get character out of chars array
02139         _masm->movl(offset.reg(), chars);
02140         _masm->movl(result_reg, Address(offset.reg(), result_reg, Address::times_4, byteOffset(chars->klass()->klass_part()->non_indexable_size() + 1)));
02141       }
02142       break;
02143     case ArrayAtNode::object_at:
02144       // offset already shifted => no scaling necessary
02145       _masm->movl(def(result), Address(array_reg, offset.reg(), Address::times_1, data_offset));
02146       break;
02147     default:
02148       ShouldNotReachHere();
02149       break;
02150   }
02151   // handle error cases if not uncommon
02152   if (node->canFail() && !node->next(1)->isUncommonNode()) {
02153     Label exit;
02154     _masm->jmp(exit);
02155     // error messages
02156     if (!node->index_is_smi()) {
02157       _masm->bind(indexNotSmi);
02158       _masm->hlt();
02159     }
02160     if (node->index_needs_bounds_check()) {
02161       _masm->bind(indexOutOfBounds);
02162       _masm->hlt();
02163     }
02164     // hack for now - jcc so mapping stays alive
02165     // must do all the mapping in the program path taken - otherwise
02166     // mappings are inconsistent
02167     _masm->bind(exit);
02168     Register r = def(error);
02169     _masm->test(r, 0);
02170     jcc(Assembler::notZero, node, node->next(1));
02171   }
02172 }
02173 
02174 
02175 void CodeGenerator::anArrayAtPutNode(ArrayAtPutNode* node) {
02176   PReg* array   = node->array();
02177   PReg* index   = node->index();
02178   PReg* element = node->element();
02179   PReg* error   = node->error();
02180   PRegLocker lock(array, index, element);
02181   Register array_reg = use(array);
02182   // use temporary register for index - will be modified
02183   Temporary offset(_currentMapping, index);
02184   // first element is at index 1 => subtract smi(1) (doesn't change smi/oop property)
02185   theMacroAssm->subl(offset.reg(), int(smiOop_one));
02186   // do index smi check if necessary (still possible, even after subtracting smi(1))
02187   Label indexNotSmi;
02188   if (!node->index_is_smi()) {
02189     _masm->test(offset.reg(), Mem_Tag);
02190     jcc_error(Assembler::notZero, node, indexNotSmi);
02191   }
02192   // do bounds check if necessary
02193   Label indexOutOfBounds;
02194   if (node->index_needs_bounds_check()) {
02195     const int size_offset = byteOffset(node->size_word_offset());
02196     _masm->cmpl(offset.reg(), Address(array_reg, size_offset));
02197     jcc_error(Assembler::aboveEqual, node, indexOutOfBounds);
02198   }
02199   // store element
02200   assert(Tag_Size == 2, "check this code");
02201   const int data_offset = byteOffset(node->data_word_offset());
02202   Label elementNotSmi, elementOutOfRange;
02203   switch (node->access_type()) {
02204     case ArrayAtPutNode::byte_at_put:
02205       { // use temporary register for element - will be modified
02206         Temporary elt(_currentMapping, element);
02207         _masm->sarl(offset.reg(), Tag_Size);    // adjust index
02208         // do element smi check if necessary
02209         if (!node->element_is_smi()) {
02210           _masm->test(elt.reg(), Mem_Tag);
02211           jcc_error(Assembler::notZero, node, elementNotSmi);
02212         }
02213         _masm->sarl(elt.reg(), Tag_Size);       // convert element into (int) byte
02214         // do element range check if necessary
02215         if (node->element_needs_range_check()) {
02216           _masm->cmpl(elt.reg(), 0x100);
02217           jcc_error(Assembler::aboveEqual, node, elementOutOfRange);
02218         }
02219         // store the element
02220         if (elt.reg().hasByteRegister()) {
02221           // elt.reg() has byte register -> can use byte store instruction
02222           _masm->movb(Address(array_reg, offset.reg(), Address::times_1, data_offset), elt.reg());
02223         } else {
02224           // elt.reg() has no byte register -> cannot use byte store instruction
02225           // instead of doing a better register selection use word load/store & mask for now
02226           Temporary field(_currentMapping);
02227           _masm->movl(field.reg(), Address(array_reg, offset.reg(), Address::times_1, data_offset));
02228           _masm->andl(field.reg(), 0xFFFFFF00); // mask out lower byte
02229           _masm->orl(field.reg(), elt.reg());   // move elt (elt < 0x100 => no masking of elt needed)
02230           _masm->movl(Address(array_reg, offset.reg(), Address::times_1, data_offset), field.reg());
02231         }
02232         assert(!node->needs_store_check(), "just checking");
02233       }
02234       break;
02235     case ArrayAtPutNode::double_byte_at_put:
02236       { // use temporary register for element - will be modified
02237         Temporary elt(_currentMapping, element);
02238         _masm->sarl(offset.reg(), Tag_Size - 1);// adjust index
02239         // do element smi check if necessary
02240         if (!node->element_is_smi()) {
02241           _masm->test(elt.reg(), Mem_Tag);
02242           jcc_error(Assembler::notZero, node, elementNotSmi);
02243         }
02244         _masm->sarl(elt.reg(), Tag_Size);       // convert element into (int) double byte
02245         // do element range check if necessary
02246         if (node->element_needs_range_check()) {
02247           _masm->cmpl(elt.reg(), 0x10000);
02248           jcc_error(Assembler::aboveEqual, node, elementOutOfRange);
02249         }
02250         // store the element
02251         if (elt.reg().hasByteRegister()) {
02252           // elt.reg() has byte register -> can use byte store instructions
02253           _masm->movb(Address(array_reg, offset.reg(), Address::times_1, data_offset + 0), elt.reg());
02254           _masm->shrl(elt.reg(), 8);            // shift 2nd byte into low-byte position
02255           _masm->movb(Address(array_reg, offset.reg(), Address::times_1, data_offset + 1), elt.reg());
02256         } else {
02257           // elt.reg() has no byte register -> cannot use byte store instructions
02258           // instead of doing a better register selection use word load/store & mask for now
02259           Temporary field(_currentMapping);
02260           _masm->movl(field.reg(), Address(array_reg, offset.reg(), Address::times_1, data_offset));
02261           _masm->andl(field.reg(), 0xFFFF0000); // mask out lower two bytes
02262           _masm->orl(field.reg(), elt.reg());   // move elt (elt < 0x10000 => no masking of elt needed)
02263           _masm->movl(Address(array_reg, offset.reg(), Address::times_1, data_offset), field.reg());
02264         }
02265         assert(!node->needs_store_check(), "just checking");
02266       }
02267       break;
02268     case ArrayAtPutNode::object_at_put:
02269       // offset already shifted => no scaling necessary
02270       if (node->needs_store_check()) {
02271         _masm->leal(offset.reg(), Address(array_reg, offset.reg(), Address::times_1, data_offset));
02272         _masm->movl(Address(offset.reg()), use(element));
02273         storeCheck(offset.reg());
02274       } else {
02275         _masm->movl(Address(array_reg, offset.reg(), Address::times_1, data_offset), use(element));
02276       }
02277       break;
02278     default:
02279       ShouldNotReachHere();
02280       break;
02281   }
02282   // handle error cases if not uncommon
02283   if (node->canFail() && !node->next(1)->isUncommonNode()) {
02284     Label exit;
02285     _masm->jmp(exit);
02286     // error messages
02287     if (!node->index_is_smi()) {
02288       _masm->bind(indexNotSmi);
02289       _masm->hlt();
02290     }
02291     if (node->index_needs_bounds_check()) {
02292       _masm->bind(indexOutOfBounds);
02293       _masm->hlt();
02294     }
02295     if (!node->element_is_smi()) {
02296       _masm->bind(elementNotSmi);
02297       _masm->hlt();
02298     }
02299     if (node->element_needs_range_check()) {
02300       _masm->bind(elementOutOfRange);
02301       _masm->hlt();
02302     }
02303     // hack for now - jcc so mapping stays alive
02304     // must do all the mapping in the program path taken - otherwise
02305     // mappings are inconsistent
02306     _masm->bind(exit);
02307     Register r = def(error);
02308     _masm->test(r, 0);
02309     jcc(Assembler::notZero, node, node->next(1));
02310   }
02311 }
02312 
02313 
02314 void CodeGenerator::anInlinedPrimitiveNode(InlinedPrimitiveNode* node) {
02315   switch (node->op()) {
02316     case InlinedPrimitiveNode::obj_klass:
02317       { Label is_smi;
02318         PRegLocker lock(node->src());
02319         Register obj_reg   = use(node->src());
02320         Register klass_reg = def(node->dst());
02321         _masm->movl(klass_reg, Universe::smiKlassObj());
02322         _masm->test(obj_reg, Mem_Tag);
02323         _masm->jcc(Assembler::zero, is_smi);
02324         _masm->movl(klass_reg, Address(obj_reg, memOopDesc::klass_byte_offset()));
02325         _masm->bind(is_smi);
02326       };
02327       break;
02328     case InlinedPrimitiveNode::obj_hash:
02329       { Unimplemented();
02330         // Implemented for the smi klass only by now - can be resolved in
02331         // the PrimInliner for that case without using an InlinedPrimitiveNode.
02332       };
02333       break;
02334     case InlinedPrimitiveNode::proxy_byte_at:
02335       { PReg* proxy  = node->src();
02336         PReg* index  = node->arg1();
02337         PReg* result = node->dst();
02338         PReg* error  = node->error();
02339         PRegLocker lock(proxy, index);
02340         // use Temporary register for proxy & index - will be modified
02341         Temporary base  (_currentMapping, proxy);
02342         Temporary offset(_currentMapping, index);
02343         // do index smi check if necessary
02344         Label indexNotSmi;
02345         if (!node->arg1_is_smi()) {
02346           _masm->test(offset.reg(), Mem_Tag);
02347           jcc_error(Assembler::notZero, node, indexNotSmi);
02348         }
02349         // load element
02350         assert(Tag_Size == 2, "check this code");
02351         Register result_reg = def(result);
02352         _masm->movl(base.reg(), Address(base.reg(), pointer_offset));   // unbox proxy
02353         _masm->sarl(offset.reg(), Tag_Size);                            // adjust index
02354         if (result_reg.hasByteRegister()) {
02355           // result_reg has byte register -> can use byte load instruction
02356           _masm->xorl(result_reg, result_reg);                          // clear destination register
02357           _masm->movb(result_reg, Address(base.reg(), offset.reg(), Address::times_1, 0));
02358         } else {
02359           // result_reg has no byte register -> cannot use byte load instruction
02360           // instead of doing better register selection use word load & mask for now
02361           _masm->movl(result_reg, Address(base.reg(), offset.reg(), Address::times_1, 0));
02362           _masm->andl(result_reg, 0x000000FF);                          // clear uppper 3 bytes
02363         }
02364         _masm->shll(result_reg, Tag_Size);                              // make result a smi
02365         // handle error cases if not uncommon
02366         if (node->canFail() && !node->next(1)->isUncommonNode()) {
02367           Label exit;
02368           _masm->jmp(exit);
02369           // error messages
02370           if (!node->arg1_is_smi()) {
02371             _masm->bind(indexNotSmi);
02372             _masm->hlt();
02373           }
02374           // hack for now - jcc so mapping stays alive
02375           // must do all the mapping in the program path taken - otherwise
02376           // mappings are inconsistent
02377           _masm->bind(exit);
02378           Register r = def(error);
02379           _masm->test(r, 0);
02380           jcc(Assembler::notZero, node, node->next(1));
02381         }
02382       }
02383       break;
02384     case InlinedPrimitiveNode::proxy_byte_at_put:
02385       { bool const_val = node->arg2()->isConstPReg();
02386         PReg* proxy = node->src();
02387         PReg* index = node->arg1();
02388         PReg* value = node->arg2();
02389         PReg* error = node->error();
02390         // Locking turned off for now -> blocks too many registers for
02391         // this code (however may add unnecessary moves) -> find a better
02392         // solution for this
02393         //
02394         // PRegLocker lock(proxy, index, value);
02395         // use Temporary register for proxy & index - will be modified
02396         Temporary base  (_currentMapping, proxy);
02397         Temporary offset(_currentMapping, index);
02398         // use temporary register for value - will be modified
02399         // (actually only needed if not const_val - however right now
02400         // we can only allocate temps via constructors (i.e., they have
02401         // to be allocated/deallocated in a nested manner)).
02402         Temporary val(_currentMapping);
02403         if (const_val) {
02404           // value doesn't have to be loaded -> do nothing here
02405           if (!node->arg2_is_smi()) fatal("proxy_byte_at_put: should not happen - internal error");
02406           //if (!node->arg2_is_smi()) fatal("proxy_byte_at_put: should not happen - tell Robert");
02407         } else {
02408           _masm->movl(val.reg(), use(value));
02409         }
02410         // do index smi check if necessary
02411         Label indexNotSmi;
02412         if (!node->arg1_is_smi()) {
02413           _masm->test(offset.reg(), Mem_Tag);
02414           jcc_error(Assembler::notZero, node, indexNotSmi);
02415         }
02416         // do value smi check if necessary
02417         Label valueNotSmi;
02418         if (!node->arg2_is_smi()) {
02419           assert(!const_val, "constant shouldn't need a smi check");
02420           _masm->test(val.reg(), Mem_Tag);
02421           jcc_error(Assembler::notZero, node, valueNotSmi);
02422         }
02423         // store element
02424         assert(Tag_Size == 2, "check this code");
02425         _masm->movl(base.reg(), Address(base.reg(), pointer_offset));   // unbox proxy
02426         _masm->sarl(offset.reg(), Tag_Size);                            // adjust index
02427         if (const_val) {
02428           smiOop constant = smiOop(((ConstPReg*)value)->constant);
02429           assert(constant->is_smi(), "should be a smi");
02430           _masm->movb(Address(base.reg(), offset.reg(), Address::times_1, 0), constant->value() & 0xFF);
02431         } else {
02432           _masm->sarl(val.reg(), Tag_Size);                             // convert (smi)value into int
02433           if (val.reg().hasByteRegister()) {
02434             // val.reg() has byte register -> can use byte store instruction
02435             _masm->movb(Address(base.reg(), offset.reg(), Address::times_1, 0), val.reg());
02436           } else {
02437             // val.reg() has no byte register -> cannot use byte store instruction
02438             // instead of doing a better register selection use word load/store & mask for now
02439             Temporary field(_currentMapping);
02440             _masm->movl(field.reg(), Address(base.reg(), offset.reg(), Address::times_1, 0));
02441             _masm->andl(val  .reg(), 0x000000FF);                       // make sure value is one byte only
02442             _masm->andl(field.reg(), 0xFFFFFF00);                       // mask out lower byte of target field
02443             _masm->orl (field.reg(), val.reg());                        // move value byte into target field
02444             _masm->movl(Address(base.reg(), offset.reg(), Address::times_1, 0), field.reg());
02445           }
02446         }
02447         // handle error cases if not uncommon
02448         if (node->canFail() && !node->next(1)->isUncommonNode()) {
02449           Label exit;
02450           _masm->jmp(exit);
02451           // error messages
02452           if (!node->arg1_is_smi()) {
02453             _masm->bind(indexNotSmi);
02454             _masm->hlt();
02455           }
02456           if (!node->arg2_is_smi()) {
02457             _masm->bind(valueNotSmi);
02458             _masm->hlt();
02459           }
02460           // hack for now - jcc so mapping stays alive
02461           // must do all the mapping in the program path taken - otherwise
02462           // mappings are inconsistent
02463           _masm->bind(exit);
02464           Register r = def(error);
02465           _masm->test(r, 0);
02466           jcc(Assembler::notZero, node, node->next(1));
02467         }
02468       }
02469       break;
02470     default: ShouldNotReachHere();
02471   }
02472 }
02473 
02474 
02475 void CodeGenerator::anUncommonNode(UncommonNode* node) {
02476   //_currentMapping->saveRegisters();
02477   //_currentMapping->killRegisters();
02478   updateDebuggingInfo(node);
02479   _masm->call(StubRoutines::unused_uncommon_trap_entry(), relocInfo::uncommon_type);
02480   setMapping(NULL);
02481 }
02482 
02483 
02484 void CodeGenerator::aFixedCodeNode(FixedCodeNode* node) {
02485   switch(node->kind()) {
02486     case FixedCodeNode::dead_end:     _masm->hlt(); setMapping(NULL); break;
02487     case FixedCodeNode::inc_counter:  incrementInvocationCounter(); break;
02488     default:                          fatal1("unexpected FixedCodeNode kind %d", node->kind());
02489   }
02490 }
02491 
02492 
02493 # endif
02494 
02495 

Generated on Mon Oct 9 13:37:07 2006 for Strongtalk VM by  doxygen 1.4.7