Lines Matching +defs:val +defs:match
206 assert(consistent, "adr_check must match alias idx");
482 // (b) Observe that the next memory state makes an exact match
1044 // A load from an initialization barrier can match a captured store.
1495 // Steps (a), (b): Walk past independent stores to find an exact match.
1533 // No match.
2235 StoreNode* StoreNode::make( PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) {
2242 case T_BYTE: return new (C) StoreBNode(ctl, mem, adr, adr_type, val);
2243 case T_INT: return new (C) StoreINode(ctl, mem, adr, adr_type, val);
2245 case T_SHORT: return new (C) StoreCNode(ctl, mem, adr, adr_type, val);
2246 case T_LONG: return new (C) StoreLNode(ctl, mem, adr, adr_type, val);
2247 case T_FLOAT: return new (C) StoreFNode(ctl, mem, adr, adr_type, val);
2248 case T_DOUBLE: return new (C) StoreDNode(ctl, mem, adr, adr_type, val);
2253 (UseCompressedOops && val->bottom_type()->isa_klassptr() &&
2255 val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop()));
2256 return new (C) StoreNNode(ctl, mem, adr, adr_type, val);
2260 return new (C) StorePNode(ctl, mem, adr, adr_type, val);
2267 StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val) {
2269 return new (C) StoreLNode(ctl, mem, adr, adr_type, val, require_atomic);
2371 Node* val = in(MemNode::ValueIn);
2374 if (val->is_Load() &&
2375 val->in(MemNode::Address)->eqv_uncast(adr) &&
2376 val->in(MemNode::Memory )->eqv_uncast(mem) &&
2377 val->as_Load()->store_Opcode() == Opcode()) {
2384 mem->in(MemNode::ValueIn)->eqv_uncast(val) &&
2392 if (ReduceFieldZeroing && phase->type(val)->is_zero_type()) {
2400 // Steps (a), (b): Walk past independent stores to find an exact match.
2403 if (prev_val != NULL && phase->eqv(prev_val, val)) {
2404 // prev_val and val might differ by a cast; it would be good
2433 Node *val = in(MemNode::ValueIn);
2434 if( val->Opcode() == Op_AndI ) {
2435 const TypeInt *t = phase->type( val->in(2) )->isa_int();
2437 set_req(MemNode::ValueIn, val->in(1));
2451 Node *val = in(MemNode::ValueIn);
2452 if( val->Opcode() == Op_RShiftI ) {
2453 const TypeInt *t = phase->type( val->in(2) )->isa_int();
2455 Node *shl = val->in(1);
2577 LoadStoreNode::LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required )
2585 init_req(MemNode::ValueIn, val);
2606 LoadStoreConditionalNode::LoadStoreConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex ) : LoadStoreNode(c, mem, adr, val, NULL, TypeInt::BOOL, 5) {
2612 // Do we Match on this edge index or not? Do not match memory
2619 // Do we Match on this edge index or not? Do not match memory
2642 // (see jck test stmt114.stmt11402.val).
2781 // Do not match memory edge.
2905 //------------------------------match------------------------------------------
2907 Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
2990 // produced by it are optimizable if they match the control edge and
3165 Node* val = st->in(MemNode::ValueIn);
3167 if (!detect_init_independence(val, true, complexity_count))
3493 const Type* val = phase->type(st->in(MemNode::ValueIn));
3494 if (!val->singleton()) continue; //skip (non-con store)
3495 BasicType type = val->basic_type();
3499 case T_INT: con = val->is_int()->get_con(); break;
3500 case T_LONG: con = val->is_long()->get_con(); break;
3501 case T_FLOAT: con = jint_cast(val->getf()); break;
3502 case T_DOUBLE: con = jlong_cast(val->getd()); break;
3828 const Type* val = NULL;
3830 (val = phase->type(st->in(MemNode::ValueIn)))->singleton() &&
3831 (int)val->basic_type() < (int)T_OBJECT) {
4295 assert(C->must_alias(n_adr_type, alias_idx), "new memory must match selected slice");