Unify load/store operand accessors

Load and store instructions, as well as intrinsics which access memory,
can now shared the same methods for accessing the memory address and
data operands.

Note that while this change introduces the potential for non-load/store
instructions to have their operands accessed through getLoadAddress(),
getStoreAddress(), or getData(), that risk isn't any greater than using
the wrong getSrc() index, and would stick out as a mistake much more
clearly. The advantage this change brings is that we no longer have to
remember where the address and data operands are stored in sub-vector
load/store intrinsics. In addition, there are no more overly verbose
casts, and their cost is eliminated.

Bug: b/179497998
Change-Id: I0d9208555e00b0d3053f7d3baca241fef2b8cbeb
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/52531
Presubmit-Ready: Nicolas Capens <nicolascapens@google.com>
Tested-by: Nicolas Capens <nicolascapens@google.com>
Reviewed-by: Antonio Maiorano <amaiorano@google.com>
diff --git a/src/Reactor/Optimizer.cpp b/src/Reactor/Optimizer.cpp
index 27b14e3..4ac06de 100644
--- a/src/Reactor/Optimizer.cpp
+++ b/src/Reactor/Optimizer.cpp
@@ -41,9 +41,6 @@
 	static const Ice::InstIntrinsic *asStoreSubVector(const Ice::Inst *instruction);
 	static bool isLoad(const Ice::Inst &instruction);
 	static bool isStore(const Ice::Inst &instruction);
-	static Ice::Operand *storeAddress(const Ice::Inst *instruction);
-	static Ice::Operand *loadAddress(const Ice::Inst *instruction);
-	static Ice::Operand *storeData(const Ice::Inst *instruction);
 	static std::size_t storeSize(const Ice::Inst *instruction);
 	static bool loadTypeMatchesStore(const Ice::Inst *load, const Ice::Inst *store);
 
@@ -64,7 +61,7 @@
 	{
 		LoadStoreInst(Ice::Inst *inst, bool isStore)
 		    : inst(inst)
-		    , address(isStore ? storeAddress(inst) : loadAddress(inst))
+		    , address(isStore ? inst->getStoreAddress() : inst->getLoadAddress())
 		    , isStore(isStore)
 		{
 		}
@@ -233,7 +230,7 @@
 		if(addressUses.stores.size() == 1)
 		{
 			Ice::Inst *store = addressUses.stores[0];
-			Ice::Operand *storeValue = storeData(store);
+			Ice::Operand *storeValue = store->getData();
 
 			for(Ice::Inst *load = &*++store->getIterator(), *next = nullptr; load != next; next = load, load = &*++store->getIterator())
 			{
@@ -242,7 +239,7 @@
 					continue;
 				}
 
-				if(loadAddress(load) != address)
+				if(load->getLoadAddress() != address)
 				{
 					continue;
 				}
@@ -412,7 +409,7 @@
 					}
 
 					store = inst;
-					storeValue = storeData(store);
+					storeValue = store->getData();
 					unmatchedLoads = false;
 				}
 				else
@@ -556,7 +553,7 @@
 	}
 	else if(isStore(*instruction))
 	{
-		if(Ice::Variable *address = llvm::dyn_cast<Ice::Variable>(storeAddress(instruction)))
+		if(Ice::Variable *address = llvm::dyn_cast<Ice::Variable>(instruction->getStoreAddress()))
 		{
 			Ice::Inst *def = getDefinition(address);
 
@@ -582,7 +579,7 @@
 {
 	if(auto *instrinsic = llvm::dyn_cast<Ice::InstIntrinsic>(instruction))
 	{
-		if(instrinsic->getIntrinsicInfo().ID == Ice::Intrinsics::LoadSubVector)
+		if(instrinsic->getIntrinsicID() == Ice::Intrinsics::LoadSubVector)
 		{
 			return instrinsic;
 		}
@@ -595,7 +592,7 @@
 {
 	if(auto *instrinsic = llvm::dyn_cast<Ice::InstIntrinsic>(instruction))
 	{
-		if(instrinsic->getIntrinsicInfo().ID == Ice::Intrinsics::StoreSubVector)
+		if(instrinsic->getIntrinsicID() == Ice::Intrinsics::StoreSubVector)
 		{
 			return instrinsic;
 		}
@@ -624,57 +621,6 @@
 	return asStoreSubVector(&instruction) != nullptr;
 }
 
-Ice::Operand *Optimizer::storeAddress(const Ice::Inst *instruction)
-{
-	assert(isStore(*instruction));
-
-	if(auto *store = llvm::dyn_cast<Ice::InstStore>(instruction))
-	{
-		return store->getStoreAddress();
-	}
-
-	if(auto *storeSubVector = asStoreSubVector(instruction))
-	{
-		return storeSubVector->getSrc(1);
-	}
-
-	return nullptr;
-}
-
-Ice::Operand *Optimizer::loadAddress(const Ice::Inst *instruction)
-{
-	assert(isLoad(*instruction));
-
-	if(auto *load = llvm::dyn_cast<Ice::InstLoad>(instruction))
-	{
-		return load->getLoadAddress();
-	}
-
-	if(auto *loadSubVector = asLoadSubVector(instruction))
-	{
-		return loadSubVector->getSrc(0);
-	}
-
-	return nullptr;
-}
-
-Ice::Operand *Optimizer::storeData(const Ice::Inst *instruction)
-{
-	assert(isStore(*instruction));
-
-	if(auto *store = llvm::dyn_cast<Ice::InstStore>(instruction))
-	{
-		return store->getData();
-	}
-
-	if(auto *storeSubVector = asStoreSubVector(instruction))
-	{
-		return storeSubVector->getSrc(0);
-	}
-
-	return nullptr;
-}
-
 std::size_t Optimizer::storeSize(const Ice::Inst *store)
 {
 	assert(isStore(*store));
@@ -700,28 +646,24 @@
 	}
 
 	assert(isLoad(*load) && isStore(*store));
-	assert(loadAddress(load) == storeAddress(store));
+	assert(load->getLoadAddress() == store->getStoreAddress());
 
-	if(auto *instStore = llvm::dyn_cast<Ice::InstStore>(store))
+	if(store->getData()->getType() != load->getDest()->getType())
 	{
-		if(auto *instLoad = llvm::dyn_cast<Ice::InstLoad>(load))
-		{
-			return instStore->getData()->getType() == instLoad->getDest()->getType();
-		}
+		return false;
 	}
 
 	if(auto *storeSubVector = asStoreSubVector(store))
 	{
 		if(auto *loadSubVector = asLoadSubVector(load))
 		{
-			// Check for matching type and sub-vector width.
-			return storeSubVector->getSrc(0)->getType() == loadSubVector->getDest()->getType() &&
-			       llvm::cast<Ice::ConstantInteger32>(storeSubVector->getSrc(2))->getValue() ==
-			           llvm::cast<Ice::ConstantInteger32>(loadSubVector->getSrc(1))->getValue();
+			// Check for matching sub-vector width.
+			return llvm::cast<Ice::ConstantInteger32>(storeSubVector->getSrc(2))->getValue() ==
+			       llvm::cast<Ice::ConstantInteger32>(loadSubVector->getSrc(1))->getValue();
 		}
 	}
 
-	return false;
+	return true;
 }
 
 Optimizer::Uses *Optimizer::getUses(Ice::Operand *operand)
@@ -797,14 +739,14 @@
 
 	if(isLoad(*instruction))
 	{
-		if(value == loadAddress(instruction))
+		if(value == instruction->getLoadAddress())
 		{
 			loads.push_back(instruction);
 		}
 	}
 	else if(isStore(*instruction))
 	{
-		if(value == storeAddress(instruction))
+		if(value == instruction->getStoreAddress())
 		{
 			stores.push_back(instruction);
 		}
diff --git a/third_party/subzero/src/IceInst.h b/third_party/subzero/src/IceInst.h
index ac61f66..0e2c039 100644
--- a/third_party/subzero/src/IceInst.h
+++ b/third_party/subzero/src/IceInst.h
@@ -113,6 +113,11 @@
     assert(!isDeleted());
     Srcs[Index] = Replacement;
   }
+  // Instructions which load data take their address in Src[0], while
+  // store instructions use Src[1] for the address and Src[0] for the data.
+  Operand *getLoadAddress() const { return getSrc(0); }
+  Operand *getStoreAddress() const { return getSrc(1); }
+  Operand *getData() const { return getSrc(0); }
 
   bool isLastUse(const Operand *Src) const;
   void spliceLivenessInfo(Inst *OrigInst, Inst *SpliceAssn);
@@ -630,6 +635,7 @@
   }
 
   Intrinsics::IntrinsicInfo getIntrinsicInfo() const { return Info; }
+  Intrinsics::IntrinsicID getIntrinsicID() const { return Info.ID; }
   bool isMemoryWrite() const override {
     return getIntrinsicInfo().IsMemoryWrite;
   }
@@ -655,7 +661,6 @@
     (void)Align;
     return new (Func->allocate<InstLoad>()) InstLoad(Func, Dest, SourceAddr);
   }
-  Operand *getLoadAddress() const { return getSrc(0); }
   bool isMemoryWrite() const override { return false; }
   void dump(const Cfg *Func) const override;
   static bool classof(const Inst *Instr) { return Instr->getKind() == Load; }
@@ -761,8 +766,6 @@
     (void)Align;
     return new (Func->allocate<InstStore>()) InstStore(Func, Data, Addr);
   }
-  Operand *getStoreAddress() const { return getSrc(1); }
-  Operand *getData() const { return getSrc(0); }
   Variable *getRmwBeacon() const;
   void setRmwBeacon(Variable *Beacon);
   bool isMemoryWrite() const override { return true; }
diff --git a/third_party/subzero/src/IceTargetLowering.cpp b/third_party/subzero/src/IceTargetLowering.cpp
index 179d760..1381f03 100644
--- a/third_party/subzero/src/IceTargetLowering.cpp
+++ b/third_party/subzero/src/IceTargetLowering.cpp
@@ -366,9 +366,9 @@
     doAddressOptStore();
   else if (auto *Intrinsic =
                llvm::dyn_cast<InstIntrinsic>(&*Context.getCur())) {
-    if (Intrinsic->getIntrinsicInfo().ID == Intrinsics::LoadSubVector)
+    if (Intrinsic->getIntrinsicID() == Intrinsics::LoadSubVector)
       doAddressOptLoadSubVector();
-    else if (Intrinsic->getIntrinsicInfo().ID == Intrinsics::StoreSubVector)
+    else if (Intrinsic->getIntrinsicID() == Intrinsics::StoreSubVector)
       doAddressOptStoreSubVector();
   }
   Context.advanceCur();
diff --git a/third_party/subzero/src/IceTargetLoweringARM32.cpp b/third_party/subzero/src/IceTargetLoweringARM32.cpp
index aa75815..3795c04 100644
--- a/third_party/subzero/src/IceTargetLoweringARM32.cpp
+++ b/third_party/subzero/src/IceTargetLoweringARM32.cpp
@@ -723,7 +723,7 @@
   case Inst::Intrinsic: {
     Variable *Dest = Instr->getDest();
     auto *Intrinsic = llvm::cast<InstIntrinsic>(Instr);
-    Intrinsics::IntrinsicID ID = Intrinsic->getIntrinsicInfo().ID;
+    Intrinsics::IntrinsicID ID = Intrinsic->getIntrinsicID();
     switch (ID) {
     default:
       return;
@@ -5016,7 +5016,7 @@
 void TargetARM32::lowerIntrinsic(const InstIntrinsic *Instr) {
   Variable *Dest = Instr->getDest();
   Type DestTy = (Dest != nullptr) ? Dest->getType() : IceType_void;
-  Intrinsics::IntrinsicID ID = Instr->getIntrinsicInfo().ID;
+  Intrinsics::IntrinsicID ID = Instr->getIntrinsicID();
   switch (ID) {
   case Intrinsics::AtomicFence:
   case Intrinsics::AtomicFenceAll:
diff --git a/third_party/subzero/src/IceTargetLoweringMIPS32.cpp b/third_party/subzero/src/IceTargetLoweringMIPS32.cpp
index 9876fe7..eb9367c 100644
--- a/third_party/subzero/src/IceTargetLoweringMIPS32.cpp
+++ b/third_party/subzero/src/IceTargetLoweringMIPS32.cpp
@@ -582,7 +582,7 @@
   }
   case Inst::Intrinsic: {
     auto *Intrinsic = llvm::cast<InstIntrinsic>(Instr);
-    Intrinsics::IntrinsicID ID = Intrinsic->getIntrinsicInfo().ID;
+    Intrinsics::IntrinsicID ID = Intrinsic->getIntrinsicID();
     if (isVectorType(DestTy) && ID == Intrinsics::Fabs) {
       Operand *Src0 = Intrinsic->getArg(0);
       GlobalString FabsFloat = Ctx->getGlobalString("llvm.fabs.f32");
@@ -605,7 +605,8 @@
         Context.insert<InstExtractElement>(Op, Src0, Index);
         auto *Res = Func->makeVariable(IceType_f32);
         Variable *DestT = Func->makeVariable(IceType_v4f32);
-        auto *Intrinsic = Context.insert<InstIntrinsic>(1, Res, CallTarget, Info);
+        auto *Intrinsic =
+            Context.insert<InstIntrinsic>(1, Res, CallTarget, Info);
         Intrinsic->addArg(Op);
         Context.insert<InstInsertElement>(DestT, T, Res, Index);
         T = DestT;
@@ -4566,7 +4567,7 @@
   Variable *Dest = Instr->getDest();
   Type DestTy = (Dest == nullptr) ? IceType_void : Dest->getType();
 
-  Intrinsics::IntrinsicID ID = Instr->getIntrinsicInfo().ID;
+  Intrinsics::IntrinsicID ID = Instr->getIntrinsicID();
   switch (ID) {
   case Intrinsics::AtomicLoad: {
     assert(isScalarIntegerType(DestTy));
diff --git a/third_party/subzero/src/IceTargetLoweringX86BaseImpl.h b/third_party/subzero/src/IceTargetLoweringX86BaseImpl.h
index 98d9fba..fa37f05 100644
--- a/third_party/subzero/src/IceTargetLoweringX86BaseImpl.h
+++ b/third_party/subzero/src/IceTargetLoweringX86BaseImpl.h
@@ -850,7 +850,7 @@
         // An AtomicLoad intrinsic qualifies as long as it has a valid memory
         // ordering, and can be implemented in a single instruction (i.e., not
         // i64 on x86-32).
-        Intrinsics::IntrinsicID ID = Intrin->getIntrinsicInfo().ID;
+        Intrinsics::IntrinsicID ID = Intrin->getIntrinsicID();
         if (ID == Intrinsics::AtomicLoad &&
             (Traits::Is64Bit || Intrin->getDest()->getType() != IceType_i64) &&
             Intrinsics::isMemoryOrderValid(
@@ -4127,7 +4127,7 @@
 
 template <typename TraitsType>
 void TargetX86Base<TraitsType>::lowerIntrinsic(const InstIntrinsic *Instr) {
-  switch (Intrinsics::IntrinsicID ID = Instr->getIntrinsicInfo().ID) {
+  switch (Intrinsics::IntrinsicID ID = Instr->getIntrinsicID()) {
   case Intrinsics::AtomicCmpxchg: {
     if (!Intrinsics::isMemoryOrderValid(
             ID, getConstantMemoryOrder(Instr->getArg(3)),
@@ -7612,7 +7612,7 @@
   } else if (auto *Intrinsic = llvm::dyn_cast<InstIntrinsic>(Instr)) {
     CfgVector<Type> ArgTypes;
     Type ReturnType = IceType_void;
-    switch (Intrinsics::IntrinsicID ID = Intrinsic->getIntrinsicInfo().ID) {
+    switch (Intrinsics::IntrinsicID ID = Intrinsic->getIntrinsicID()) {
     default:
       return;
     case Intrinsics::Ctpop: {