diff --git a/src/shader_recompiler/CMakeLists.txt b/src/shader_recompiler/CMakeLists.txt
index 9b2240931..e89a0ee16 100644
--- a/src/shader_recompiler/CMakeLists.txt
+++ b/src/shader_recompiler/CMakeLists.txt
@@ -48,8 +48,8 @@ add_library(shader_recompiler STATIC
     backend/glsl/emit_glsl_special.cpp
     backend/glsl/emit_glsl_undefined.cpp
     backend/glsl/emit_glsl_warp.cpp
-    backend/glsl/reg_alloc.cpp
-    backend/glsl/reg_alloc.h
+    backend/glsl/var_alloc.cpp
+    backend/glsl/var_alloc.h
     backend/spirv/emit_context.cpp
     backend/spirv/emit_context.h
     backend/spirv/emit_spirv.cpp
diff --git a/src/shader_recompiler/backend/glsl/emit_context.h b/src/shader_recompiler/backend/glsl/emit_context.h
index 7ae7c8766..2f1062954 100644
--- a/src/shader_recompiler/backend/glsl/emit_context.h
+++ b/src/shader_recompiler/backend/glsl/emit_context.h
@@ -10,7 +10,7 @@
 
 #include <fmt/format.h>
 
-#include "shader_recompiler/backend/glsl/reg_alloc.h"
+#include "shader_recompiler/backend/glsl/var_alloc.h"
 #include "shader_recompiler/stage.h"
 
 namespace Shader {
@@ -35,81 +35,81 @@ public:
     explicit EmitContext(IR::Program& program, Bindings& bindings, const Profile& profile_,
                          const RuntimeInfo& runtime_info_);
 
-    template <Type type, typename... Args>
+    template <GlslVarType type, typename... Args>
     void Add(const char* format_str, IR::Inst& inst, Args&&... args) {
-        code += fmt::format(format_str, reg_alloc.Define(inst, type), std::forward<Args>(args)...);
+        code += fmt::format(format_str, var_alloc.Define(inst, type), std::forward<Args>(args)...);
         // TODO: Remove this
         code += '\n';
     }
 
     template <typename... Args>
     void AddU1(const char* format_str, IR::Inst& inst, Args&&... args) {
-        Add<Type::U1>(format_str, inst, args...);
+        Add<GlslVarType::U1>(format_str, inst, args...);
     }
 
     template <typename... Args>
     void AddF16x2(const char* format_str, IR::Inst& inst, Args&&... args) {
-        Add<Type::F16x2>(format_str, inst, args...);
+        Add<GlslVarType::F16x2>(format_str, inst, args...);
     }
 
     template <typename... Args>
     void AddU32(const char* format_str, IR::Inst& inst, Args&&... args) {
-        Add<Type::U32>(format_str, inst, args...);
+        Add<GlslVarType::U32>(format_str, inst, args...);
     }
 
     template <typename... Args>
     void AddS32(const char* format_str, IR::Inst& inst, Args&&... args) {
-        Add<Type::S32>(format_str, inst, args...);
+        Add<GlslVarType::S32>(format_str, inst, args...);
     }
 
     template <typename... Args>
     void AddF32(const char* format_str, IR::Inst& inst, Args&&... args) {
-        Add<Type::F32>(format_str, inst, args...);
+        Add<GlslVarType::F32>(format_str, inst, args...);
     }
 
     template <typename... Args>
     void AddS64(const char* format_str, IR::Inst& inst, Args&&... args) {
-        Add<Type::S64>(format_str, inst, args...);
+        Add<GlslVarType::S64>(format_str, inst, args...);
     }
 
     template <typename... Args>
     void AddU64(const char* format_str, IR::Inst& inst, Args&&... args) {
-        Add<Type::U64>(format_str, inst, args...);
+        Add<GlslVarType::U64>(format_str, inst, args...);
     }
 
     template <typename... Args>
     void AddF64(const char* format_str, IR::Inst& inst, Args&&... args) {
-        Add<Type::F64>(format_str, inst, args...);
+        Add<GlslVarType::F64>(format_str, inst, args...);
     }
 
     template <typename... Args>
     void AddU32x2(const char* format_str, IR::Inst& inst, Args&&... args) {
-        Add<Type::U32x2>(format_str, inst, args...);
+        Add<GlslVarType::U32x2>(format_str, inst, args...);
     }
 
     template <typename... Args>
     void AddF32x2(const char* format_str, IR::Inst& inst, Args&&... args) {
-        Add<Type::F32x2>(format_str, inst, args...);
+        Add<GlslVarType::F32x2>(format_str, inst, args...);
     }
 
     template <typename... Args>
     void AddU32x3(const char* format_str, IR::Inst& inst, Args&&... args) {
-        Add<Type::U32x3>(format_str, inst, args...);
+        Add<GlslVarType::U32x3>(format_str, inst, args...);
     }
 
     template <typename... Args>
     void AddF32x3(const char* format_str, IR::Inst& inst, Args&&... args) {
-        Add<Type::F32x3>(format_str, inst, args...);
+        Add<GlslVarType::F32x3>(format_str, inst, args...);
     }
 
     template <typename... Args>
     void AddU32x4(const char* format_str, IR::Inst& inst, Args&&... args) {
-        Add<Type::U32x4>(format_str, inst, args...);
+        Add<GlslVarType::U32x4>(format_str, inst, args...);
     }
 
     template <typename... Args>
     void AddF32x4(const char* format_str, IR::Inst& inst, Args&&... args) {
-        Add<Type::F32x4>(format_str, inst, args...);
+        Add<GlslVarType::F32x4>(format_str, inst, args...);
     }
 
     template <typename... Args>
@@ -121,7 +121,7 @@ public:
 
     std::string header;
     std::string code;
-    RegAlloc reg_alloc;
+    VarAlloc var_alloc;
     const Info& info;
     const Profile& profile;
     const RuntimeInfo& runtime_info;
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl.cpp b/src/shader_recompiler/backend/glsl/emit_glsl.cpp
index bac4b830d..4304ee4d5 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl.cpp
@@ -33,7 +33,7 @@ void SetDefinition(EmitContext& ctx, IR::Inst* inst, Args... args) {
 template <typename ArgType>
 auto Arg(EmitContext& ctx, const IR::Value& arg) {
     if constexpr (std::is_same_v<ArgType, std::string_view>) {
-        return ctx.reg_alloc.Consume(arg);
+        return ctx.var_alloc.Consume(arg);
     } else if constexpr (std::is_same_v<ArgType, const IR::Value&>) {
         return arg;
     } else if constexpr (std::is_same_v<ArgType, u32>) {
@@ -131,7 +131,7 @@ void EmitCode(EmitContext& ctx, const IR::Program& program) {
             }
             break;
         case IR::AbstractSyntaxNode::Type::If:
-            ctx.Add("if ({}){{", ctx.reg_alloc.Consume(node.data.if_node.cond));
+            ctx.Add("if ({}){{", ctx.var_alloc.Consume(node.data.if_node.cond));
             break;
         case IR::AbstractSyntaxNode::Type::EndIf:
             ctx.Add("}}");
@@ -142,7 +142,7 @@ void EmitCode(EmitContext& ctx, const IR::Program& program) {
                     ctx.Add("break;");
                 }
             } else {
-                ctx.Add("if({}){{break;}}", ctx.reg_alloc.Consume(node.data.break_node.cond));
+                ctx.Add("if({}){{break;}}", ctx.var_alloc.Consume(node.data.break_node.cond));
             }
             break;
         case IR::AbstractSyntaxNode::Type::Return:
@@ -153,7 +153,7 @@ void EmitCode(EmitContext& ctx, const IR::Program& program) {
             ctx.Add("for(;;){{");
             break;
         case IR::AbstractSyntaxNode::Type::Repeat:
-            ctx.Add("if({}){{", ctx.reg_alloc.Consume(node.data.repeat.cond));
+            ctx.Add("if({}){{", ctx.var_alloc.Consume(node.data.repeat.cond));
             ctx.Add("continue;\n}}else{{");
             ctx.Add("break;\n}}\n}}");
             break;
@@ -171,6 +171,23 @@ std::string GlslVersionSpecifier(const EmitContext& ctx) {
     }
     return "";
 }
+
+void DefineVariables(const EmitContext& ctx, std::string& header) {
+    for (u32 i = 0; i < static_cast<u32>(GlslVarType::Void); ++i) {
+        const auto type{static_cast<GlslVarType>(i)};
+        const auto& tracker{ctx.var_alloc.GetUseTracker(type)};
+        const auto type_name{ctx.var_alloc.GetGlslType(type)};
+        // Temps/return types that are never used are stored at index 0
+        if (tracker.uses_temp) {
+            header += fmt::format("{}{}={}(0);", type_name, ctx.var_alloc.Representation(0, type),
+                                  type_name);
+        }
+        for (u32 index = 1; index <= tracker.num_used; ++index) {
+            header += fmt::format("{}{}={}(0);", type_name,
+                                  ctx.var_alloc.Representation(index, type), type_name);
+        }
+    }
+}
 } // Anonymous namespace
 
 std::string EmitGLSL(const Profile& profile, const RuntimeInfo& runtime_info, IR::Program& program,
@@ -190,9 +207,7 @@ std::string EmitGLSL(const Profile& profile, const RuntimeInfo& runtime_info, IR
     if (program.stage == Stage::VertexA || program.stage == Stage::VertexB) {
         ctx.header += "gl_Position = vec4(0.0f, 0.0f, 0.0f, 1.0f);";
     }
-    for (size_t index = 0; index < ctx.reg_alloc.num_used_registers; ++index) {
-        ctx.header += fmt::format("{} R{};", ctx.reg_alloc.reg_types[index], index);
-    }
+    DefineVariables(ctx, ctx.header);
     if (ctx.uses_cc_carry) {
         ctx.header += "uint carry;";
     }
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
index 16791be84..918f90058 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
@@ -20,14 +20,14 @@ for (;;){{
 
 void SharedCasFunction(EmitContext& ctx, IR::Inst& inst, std::string_view offset,
                        std::string_view value, std::string_view function) {
-    const auto ret{ctx.reg_alloc.Define(inst, Type::U32)};
+    const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32)};
     const std::string smem{fmt::format("smem[{}/4]", offset)};
     ctx.Add(cas_loop.data(), ret, smem, ret, smem, function, smem, value, ret);
 }
 
 void SsboCasFunction(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
                      const IR::Value& offset, std::string_view value, std::string_view function) {
-    const auto ret{ctx.reg_alloc.Define(inst, Type::U32)};
+    const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32)};
     const std::string ssbo{fmt::format("ssbo{}[{}]", binding.U32(), offset.U32())};
     ctx.Add(cas_loop.data(), ret, ssbo, ret, ssbo, function, ssbo, value, ret);
 }
@@ -36,7 +36,7 @@ void SsboCasFunctionF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& bindi
                         const IR::Value& offset, std::string_view value,
                         std::string_view function) {
     const std::string ssbo{fmt::format("ssbo{}[{}]", binding.U32(), offset.U32())};
-    const auto ret{ctx.reg_alloc.Define(inst, Type::U32)};
+    const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32)};
     ctx.Add(cas_loop.data(), ret, ssbo, ret, ssbo, function, ssbo, value, ret);
     ctx.AddF32("{}=uintBitsToFloat({});", inst, ret);
 }
@@ -102,9 +102,8 @@ void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, std::string_vi
 void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
                                 std::string_view value) {
     // LOG_WARNING("Int64 Atomics not supported, fallback to non-atomic");
-    const auto ret{ctx.reg_alloc.Define(inst, Type::U64)};
-    ctx.Add("{}=packUint2x32(uvec2(smem[{}/4],smem[({}+4)/4]));", ret, pointer_offset,
-            pointer_offset);
+    ctx.AddU64("{}=packUint2x32(uvec2(smem[{}/4],smem[({}+4)/4]));", inst, pointer_offset,
+               pointer_offset);
     ctx.Add("smem[{}/4]=unpackUint2x32({}).x;smem[({}+4)/4]=unpackUint2x32({}).y;", pointer_offset,
             value, pointer_offset, value);
 }
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_bitwise_conversion.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_bitwise_conversion.cpp
index 3e8c648b1..2b08aa593 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_bitwise_conversion.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_bitwise_conversion.cpp
@@ -26,7 +26,7 @@ void EmitIdentity(EmitContext&, IR::Inst& inst, const IR::Value& value) {
 }
 
 void EmitConditionRef(EmitContext& ctx, IR::Inst& inst, const IR::Value& value) {
-    ctx.AddU1("{}={};", inst, ctx.reg_alloc.Consume(value));
+    ctx.AddU1("{}={};", inst, ctx.var_alloc.Consume(value));
 }
 
 void EmitBitCastU16F16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst) {
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp
index 46ce413bf..8688686e8 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp
@@ -29,7 +29,7 @@ void EmitGetCbufU8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst&
                    ctx.stage_name, binding.U32(), offset.U32() / 16, OffsetSwizzle(offset.U32()),
                    (offset.U32() % 4) * 8);
     } else {
-        const auto offset_var{ctx.reg_alloc.Consume(offset)};
+        const auto offset_var{ctx.var_alloc.Consume(offset)};
         ctx.AddU32(
             "{}=bitfieldExtract(floatBitsToUint({}_cbuf{}[{}/16][({}/4)%4]),int(({}%4)*8),8);",
             inst, ctx.stage_name, binding.U32(), offset_var, offset_var, offset_var);
@@ -44,7 +44,7 @@ void EmitGetCbufS8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst&
                    ctx.stage_name, binding.U32(), offset.U32() / 16, OffsetSwizzle(offset.U32()),
                    (offset.U32() % 4) * 8);
     } else {
-        const auto offset_var{ctx.reg_alloc.Consume(offset)};
+        const auto offset_var{ctx.var_alloc.Consume(offset)};
         ctx.AddU32(
             "{}=bitfieldExtract(floatBitsToInt({}_cbuf{}[{}/16][({}/4)%4]),int(({}%4)*8),8);", inst,
             ctx.stage_name, binding.U32(), offset_var, offset_var, offset_var);
@@ -59,7 +59,7 @@ void EmitGetCbufU16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst
                    ctx.stage_name, binding.U32(), offset.U32() / 16, OffsetSwizzle(offset.U32()),
                    ((offset.U32() / 2) % 2) * 16);
     } else {
-        const auto offset_var{ctx.reg_alloc.Consume(offset)};
+        const auto offset_var{ctx.var_alloc.Consume(offset)};
         ctx.AddU32("{}=bitfieldExtract(floatBitsToUint({}_cbuf{}[{}/16][({}/4)%4]),int((({}/"
                    "2)%2)*16),16);",
                    inst, ctx.stage_name, binding.U32(), offset_var, offset_var, offset_var);
@@ -74,7 +74,7 @@ void EmitGetCbufS16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst
                    ctx.stage_name, binding.U32(), offset.U32() / 16, OffsetSwizzle(offset.U32()),
                    ((offset.U32() / 2) % 2) * 16);
     } else {
-        const auto offset_var{ctx.reg_alloc.Consume(offset)};
+        const auto offset_var{ctx.var_alloc.Consume(offset)};
         ctx.AddU32(
             "{}=bitfieldExtract(floatBitsToInt({}_cbuf{}[{}/16][({}/4)%4]),int((({}/2)%2)*16),16);",
             inst, ctx.stage_name, binding.U32(), offset_var, offset_var, offset_var);
@@ -87,7 +87,7 @@ void EmitGetCbufU32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
         ctx.AddU32("{}=floatBitsToUint({}_cbuf{}[{}].{});", inst, ctx.stage_name, binding.U32(),
                    offset.U32() / 16, OffsetSwizzle(offset.U32()));
     } else {
-        const auto offset_var{ctx.reg_alloc.Consume(offset)};
+        const auto offset_var{ctx.var_alloc.Consume(offset)};
         ctx.AddU32("{}=floatBitsToUint({}_cbuf{}[{}/16][({}/4)%4]);", inst, ctx.stage_name,
                    binding.U32(), offset_var, offset_var);
     }
@@ -99,7 +99,7 @@ void EmitGetCbufF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
         ctx.AddF32("{}={}_cbuf{}[{}].{};", inst, ctx.stage_name, binding.U32(), offset.U32() / 16,
                    OffsetSwizzle(offset.U32()));
     } else {
-        const auto offset_var{ctx.reg_alloc.Consume(offset)};
+        const auto offset_var{ctx.var_alloc.Consume(offset)};
         ctx.AddF32("{}={}_cbuf{}[{}/16][({}/4)%4];", inst, ctx.stage_name, binding.U32(),
                    offset_var, offset_var);
     }
@@ -114,7 +114,7 @@ void EmitGetCbufU32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding
             ctx.stage_name, binding.U32(), (offset.U32() + 4) / 16,
             OffsetSwizzle(offset.U32() + 4));
     } else {
-        const auto offset_var{ctx.reg_alloc.Consume(offset)};
+        const auto offset_var{ctx.var_alloc.Consume(offset)};
         ctx.AddU32x2("{}=uvec2(floatBitsToUint({}_cbuf{}[{}/16][({}/"
                      "4)%4]),floatBitsToUint({}_cbuf{}[({}+4)/16][(({}+4)/4)%4]));",
                      inst, ctx.stage_name, binding.U32(), offset_var, offset_var, ctx.stage_name,
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
index e63e3f2bd..eb427d8b5 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
@@ -104,12 +104,12 @@ void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse
     }
     const auto texture{Texture(ctx, info, index)};
     const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""};
-    const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)};
+    const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
     const auto sparse_inst{PrepareSparse(inst)};
     if (!sparse_inst) {
         if (!offset.IsEmpty()) {
             ctx.Add("{}=textureOffset({},{},{}{});", texel, texture, coords,
-                    CastToIntVec(ctx.reg_alloc.Consume(offset), info), bias);
+                    CastToIntVec(ctx.var_alloc.Consume(offset), info), bias);
         } else {
             if (ctx.stage == Stage::Fragment) {
                 ctx.Add("{}=texture({},{}{});", texel, texture, coords, bias);
@@ -122,7 +122,7 @@ void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse
     // TODO: Query sparseTexels extension support
     if (!offset.IsEmpty()) {
         ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureOffsetARB({},{},{},{}{}));",
-                  *sparse_inst, texture, coords, CastToIntVec(ctx.reg_alloc.Consume(offset), info),
+                  *sparse_inst, texture, coords, CastToIntVec(ctx.var_alloc.Consume(offset), info),
                   texel, bias);
     } else {
         ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureARB({},{},{}{}));", *sparse_inst,
@@ -143,12 +143,12 @@ void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse
         throw NotImplementedException("Lod clamp samples");
     }
     const auto texture{Texture(ctx, info, index)};
-    const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)};
+    const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
     const auto sparse_inst{PrepareSparse(inst)};
     if (!sparse_inst) {
         if (!offset.IsEmpty()) {
             ctx.Add("{}=textureLodOffset({},{},{},{});", texel, texture, coords, lod_lc,
-                    CastToIntVec(ctx.reg_alloc.Consume(offset), info));
+                    CastToIntVec(ctx.var_alloc.Consume(offset), info));
         } else {
             ctx.Add("{}=textureLod({},{},{});", texel, texture, coords, lod_lc);
         }
@@ -158,7 +158,7 @@ void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse
     if (!offset.IsEmpty()) {
         ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchOffsetARB({},{},int({}),{},{}));",
                   *sparse_inst, texture, CastToIntVec(coords, info), lod_lc,
-                  CastToIntVec(ctx.reg_alloc.Consume(offset), info), texel);
+                  CastToIntVec(ctx.var_alloc.Consume(offset), info), texel);
     } else {
         ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureLodARB({},{},{},{}));", *sparse_inst,
                   texture, coords, lod_lc, texel);
@@ -232,7 +232,7 @@ void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Ins
                      [[maybe_unused]] const IR::Value& offset2) {
     const auto info{inst.Flags<IR::TextureInstInfo>()};
     const auto texture{Texture(ctx, info, index)};
-    const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)};
+    const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
     const auto sparse_inst{PrepareSparse(inst)};
     if (!sparse_inst) {
         if (offset.IsEmpty()) {
@@ -242,7 +242,7 @@ void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Ins
         }
         if (offset2.IsEmpty()) {
             ctx.Add("{}=textureGatherOffset({},{},{},int({}));", texel, texture, coords,
-                    CastToIntVec(ctx.reg_alloc.Consume(offset), info), info.gather_component);
+                    CastToIntVec(ctx.var_alloc.Consume(offset), info), info.gather_component);
             return;
         }
         // PTP
@@ -259,7 +259,7 @@ void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Ins
     if (offset2.IsEmpty()) {
         ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},{},int({})));",
                   *sparse_inst, texture, CastToIntVec(coords, info),
-                  CastToIntVec(ctx.reg_alloc.Consume(offset), info), texel, info.gather_component);
+                  CastToIntVec(ctx.var_alloc.Consume(offset), info), texel, info.gather_component);
     }
     // PTP
     const auto offsets{PtpOffsets(offset, offset2)};
@@ -276,7 +276,7 @@ void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR:
                          [[maybe_unused]] std::string_view dref) {
     const auto info{inst.Flags<IR::TextureInstInfo>()};
     const auto texture{Texture(ctx, info, index)};
-    const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)};
+    const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
     const auto sparse_inst{PrepareSparse(inst)};
     if (!sparse_inst) {
         if (offset.IsEmpty()) {
@@ -285,7 +285,7 @@ void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR:
         }
         if (offset2.IsEmpty()) {
             ctx.Add("{}=textureGatherOffset({},{},{},{});", texel, texture, coords, dref,
-                    CastToIntVec(ctx.reg_alloc.Consume(offset), info));
+                    CastToIntVec(ctx.var_alloc.Consume(offset), info));
             return;
         }
         // PTP
@@ -301,7 +301,7 @@ void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR:
     if (offset2.IsEmpty()) {
         ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},,{},{}));",
                   *sparse_inst, texture, CastToIntVec(coords, info), dref,
-                  CastToIntVec(ctx.reg_alloc.Consume(offset), info), texel);
+                  CastToIntVec(ctx.var_alloc.Consume(offset), info), texel);
     }
     // PTP
     const auto offsets{PtpOffsets(offset, offset2)};
@@ -323,7 +323,7 @@ void EmitImageFetch([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst
     }
     const auto texture{Texture(ctx, info, index)};
     const auto sparse_inst{PrepareSparse(inst)};
-    const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)};
+    const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
     if (!sparse_inst) {
         if (!offset.empty()) {
             ctx.Add("{}=texelFetchOffset({},{},int({}),{});", texel, texture,
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp
index 4a3d66c90..1c7413cd4 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp
@@ -29,7 +29,7 @@ void SetSignFlag(EmitContext& ctx, IR::Inst& inst, std::string_view result) {
 }
 } // Anonymous namespace
 void EmitIAdd32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
-    const auto result{ctx.reg_alloc.Define(inst, Type::U32)};
+    const auto result{ctx.var_alloc.Define(inst, GlslVarType::U32)};
     if (IR::Inst* const carry{inst.GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp)}) {
         ctx.uses_cc_carry = true;
         ctx.Add("{}=uaddCarry({},{},carry);", result, a, b);
@@ -130,7 +130,7 @@ void EmitBitFieldInsert(EmitContext& ctx, IR::Inst& inst, std::string_view base,
 
 void EmitBitFieldSExtract(EmitContext& ctx, IR::Inst& inst, std::string_view base,
                           std::string_view offset, std::string_view count) {
-    const auto result{ctx.reg_alloc.Define(inst, Type::U32)};
+    const auto result{ctx.var_alloc.Define(inst, GlslVarType::U32)};
     ctx.Add("{}=uint(bitfieldExtract(int({}),int({}),int({})));", result, base, offset, count);
     SetZeroFlag(ctx, inst, result);
     SetSignFlag(ctx, inst, result);
@@ -138,7 +138,7 @@ void EmitBitFieldSExtract(EmitContext& ctx, IR::Inst& inst, std::string_view bas
 
 void EmitBitFieldUExtract(EmitContext& ctx, IR::Inst& inst, std::string_view base,
                           std::string_view offset, std::string_view count) {
-    const auto result{ctx.reg_alloc.Define(inst, Type::U32)};
+    const auto result{ctx.var_alloc.Define(inst, GlslVarType::U32)};
     ctx.Add("{}=uint(bitfieldExtract(uint({}),int({}),int({})));", result, base, offset, count);
     SetZeroFlag(ctx, inst, result);
     SetSignFlag(ctx, inst, result);
@@ -184,7 +184,7 @@ void EmitUMax32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::strin
 
 void EmitSClamp32(EmitContext& ctx, IR::Inst& inst, std::string_view value, std::string_view min,
                   std::string_view max) {
-    const auto result{ctx.reg_alloc.Define(inst, Type::U32)};
+    const auto result{ctx.var_alloc.Define(inst, GlslVarType::U32)};
     ctx.Add("{}=clamp(int({}),int({}),int({}));", result, value, min, max);
     SetZeroFlag(ctx, inst, result);
     SetSignFlag(ctx, inst, result);
@@ -192,7 +192,7 @@ void EmitSClamp32(EmitContext& ctx, IR::Inst& inst, std::string_view value, std:
 
 void EmitUClamp32(EmitContext& ctx, IR::Inst& inst, std::string_view value, std::string_view min,
                   std::string_view max) {
-    const auto result{ctx.reg_alloc.Define(inst, Type::U32)};
+    const auto result{ctx.var_alloc.Define(inst, GlslVarType::U32)};
     ctx.Add("{}=clamp(uint({}),uint({}),uint({}));", result, value, min, max);
     SetZeroFlag(ctx, inst, result);
     SetSignFlag(ctx, inst, result);
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_memory.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_memory.cpp
index c66b7b788..8ce186733 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_memory.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_memory.cpp
@@ -12,7 +12,7 @@ namespace Shader::Backend::GLSL {
 void EmitLoadStorageU8([[maybe_unused]] EmitContext& ctx, IR::Inst& inst,
                        [[maybe_unused]] const IR::Value& binding,
                        [[maybe_unused]] const IR::Value& offset) {
-    const auto offset_var{ctx.reg_alloc.Consume(offset)};
+    const auto offset_var{ctx.var_alloc.Consume(offset)};
     ctx.AddU32("{}=bitfieldExtract(ssbo{}[{}/4],int({}%4)*8,8);", inst, binding.U32(), offset_var,
                offset_var);
 }
@@ -20,7 +20,7 @@ void EmitLoadStorageU8([[maybe_unused]] EmitContext& ctx, IR::Inst& inst,
 void EmitLoadStorageS8([[maybe_unused]] EmitContext& ctx, IR::Inst& inst,
                        [[maybe_unused]] const IR::Value& binding,
                        [[maybe_unused]] const IR::Value& offset) {
-    const auto offset_var{ctx.reg_alloc.Consume(offset)};
+    const auto offset_var{ctx.var_alloc.Consume(offset)};
     ctx.AddS32("{}=bitfieldExtract(int(ssbo{}[{}/4]),int({}%4)*8,8);", inst, binding.U32(),
                offset_var, offset_var);
 }
@@ -28,7 +28,7 @@ void EmitLoadStorageS8([[maybe_unused]] EmitContext& ctx, IR::Inst& inst,
 void EmitLoadStorageU16([[maybe_unused]] EmitContext& ctx, IR::Inst& inst,
                         [[maybe_unused]] const IR::Value& binding,
                         [[maybe_unused]] const IR::Value& offset) {
-    const auto offset_var{ctx.reg_alloc.Consume(offset)};
+    const auto offset_var{ctx.var_alloc.Consume(offset)};
     ctx.AddU32("{}=bitfieldExtract(ssbo{}[{}/4],int(({}/2)%2)*16,16);", inst, binding.U32(),
                offset_var, offset_var);
 }
@@ -36,27 +36,27 @@ void EmitLoadStorageU16([[maybe_unused]] EmitContext& ctx, IR::Inst& inst,
 void EmitLoadStorageS16([[maybe_unused]] EmitContext& ctx, IR::Inst& inst,
                         [[maybe_unused]] const IR::Value& binding,
                         [[maybe_unused]] const IR::Value& offset) {
-    const auto offset_var{ctx.reg_alloc.Consume(offset)};
+    const auto offset_var{ctx.var_alloc.Consume(offset)};
     ctx.AddS32("{}=bitfieldExtract(int(ssbo{}[{}/4]),int(({}/2)%2)*16,16);", inst, binding.U32(),
                offset_var, offset_var);
 }
 
 void EmitLoadStorage32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
                        const IR::Value& offset) {
-    const auto offset_var{ctx.reg_alloc.Consume(offset)};
+    const auto offset_var{ctx.var_alloc.Consume(offset)};
     ctx.AddU32("{}=ssbo{}[{}/4];", inst, binding.U32(), offset_var);
 }
 
 void EmitLoadStorage64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
                        const IR::Value& offset) {
-    const auto offset_var{ctx.reg_alloc.Consume(offset)};
+    const auto offset_var{ctx.var_alloc.Consume(offset)};
     ctx.AddU32x2("{}=uvec2(ssbo{}[{}/4],ssbo{}[({}+4)/4]);", inst, binding.U32(), offset_var,
                  binding.U32(), offset_var);
 }
 
 void EmitLoadStorage128(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
                         const IR::Value& offset) {
-    const auto offset_var{ctx.reg_alloc.Consume(offset)};
+    const auto offset_var{ctx.var_alloc.Consume(offset)};
     ctx.AddU32x4("{}=uvec4(ssbo{}[{}/4],ssbo{}[({}+4)/4],ssbo{}[({}+8)/4],ssbo{}[({}+12)/4]);",
                  inst, binding.U32(), offset_var, binding.U32(), offset_var, binding.U32(),
                  offset_var, binding.U32(), offset_var);
@@ -66,7 +66,7 @@ void EmitWriteStorageU8([[maybe_unused]] EmitContext& ctx,
                         [[maybe_unused]] const IR::Value& binding,
                         [[maybe_unused]] const IR::Value& offset,
                         [[maybe_unused]] std::string_view value) {
-    const auto offset_var{ctx.reg_alloc.Consume(offset)};
+    const auto offset_var{ctx.var_alloc.Consume(offset)};
     ctx.Add("ssbo{}[{}/4]=bitfieldInsert(ssbo{}[{}/4],{},int({}%4)*8,8);", binding.U32(),
             offset_var, binding.U32(), offset_var, value, offset_var);
 }
@@ -75,7 +75,7 @@ void EmitWriteStorageS8([[maybe_unused]] EmitContext& ctx,
                         [[maybe_unused]] const IR::Value& binding,
                         [[maybe_unused]] const IR::Value& offset,
                         [[maybe_unused]] std::string_view value) {
-    const auto offset_var{ctx.reg_alloc.Consume(offset)};
+    const auto offset_var{ctx.var_alloc.Consume(offset)};
     ctx.Add("ssbo{}[{}/4]=bitfieldInsert(ssbo{}[{}/4],{},int({}%4)*8,8);", binding.U32(),
             offset_var, binding.U32(), offset_var, value, offset_var);
 }
@@ -84,7 +84,7 @@ void EmitWriteStorageU16([[maybe_unused]] EmitContext& ctx,
                          [[maybe_unused]] const IR::Value& binding,
                          [[maybe_unused]] const IR::Value& offset,
                          [[maybe_unused]] std::string_view value) {
-    const auto offset_var{ctx.reg_alloc.Consume(offset)};
+    const auto offset_var{ctx.var_alloc.Consume(offset)};
     ctx.Add("ssbo{}[{}/4]=bitfieldInsert(ssbo{}[{}/4],{},int(({}/2)%2)*16,16);", binding.U32(),
             offset_var, binding.U32(), offset_var, value, offset_var);
 }
@@ -93,20 +93,20 @@ void EmitWriteStorageS16([[maybe_unused]] EmitContext& ctx,
                          [[maybe_unused]] const IR::Value& binding,
                          [[maybe_unused]] const IR::Value& offset,
                          [[maybe_unused]] std::string_view value) {
-    const auto offset_var{ctx.reg_alloc.Consume(offset)};
+    const auto offset_var{ctx.var_alloc.Consume(offset)};
     ctx.Add("ssbo{}[{}/4]=bitfieldInsert(ssbo{}[{}/4],{},int(({}/2)%2)*16,16);", binding.U32(),
             offset_var, binding.U32(), offset_var, value, offset_var);
 }
 
 void EmitWriteStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
                         std::string_view value) {
-    const auto offset_var{ctx.reg_alloc.Consume(offset)};
+    const auto offset_var{ctx.var_alloc.Consume(offset)};
     ctx.Add("ssbo{}[{}/4]={};", binding.U32(), offset_var, value);
 }
 
 void EmitWriteStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
                         std::string_view value) {
-    const auto offset_var{ctx.reg_alloc.Consume(offset)};
+    const auto offset_var{ctx.var_alloc.Consume(offset)};
     ctx.Add("ssbo{}[{}/4]={}.x;", binding.U32(), offset_var, value);
     ctx.Add("ssbo{}[({}+4)/4]={}.y;", binding.U32(), offset_var, value);
 }
@@ -115,7 +115,7 @@ void EmitWriteStorage128([[maybe_unused]] EmitContext& ctx,
                          [[maybe_unused]] const IR::Value& binding,
                          [[maybe_unused]] const IR::Value& offset,
                          [[maybe_unused]] std::string_view value) {
-    const auto offset_var{ctx.reg_alloc.Consume(offset)};
+    const auto offset_var{ctx.var_alloc.Consume(offset)};
     ctx.Add("ssbo{}[{}/4]={}.x;", binding.U32(), offset_var, value);
     ctx.Add("ssbo{}[({}+4)/4]={}.y;", binding.U32(), offset_var, value);
     ctx.Add("ssbo{}[({}+8)/4]={}.z;", binding.U32(), offset_var, value);
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_not_implemented.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_not_implemented.cpp
index 22ea9c9b1..806c4777b 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_not_implemented.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_not_implemented.cpp
@@ -21,11 +21,11 @@ static void NotImplemented() {
 void EmitPhi(EmitContext& ctx, IR::Inst& phi) {
     const size_t num_args{phi.NumArgs()};
     for (size_t i = 0; i < num_args; ++i) {
-        ctx.reg_alloc.Consume(phi.Arg(i));
+        ctx.var_alloc.Consume(phi.Arg(i));
     }
     if (!phi.Definition<Id>().is_valid) {
         // The phi node wasn't forward defined
-        ctx.Add("{};", ctx.reg_alloc.Define(phi, phi.Arg(0).Type()));
+        ctx.Add("{};", ctx.var_alloc.Define(phi, phi.Arg(0).Type()));
     }
 }
 
@@ -42,10 +42,10 @@ void EmitPhiMove(EmitContext& ctx, const IR::Value& phi_value, const IR::Value&
     const auto phi_type{phi.Arg(0).Type()};
     if (!phi.Definition<Id>().is_valid) {
         // The phi node wasn't forward defined
-        ctx.Add("{};", ctx.reg_alloc.Define(phi, phi_type));
+        ctx.Add("{};", ctx.var_alloc.Define(phi, phi_type));
     }
-    const auto phi_reg{ctx.reg_alloc.Consume(IR::Value{&phi})};
-    const auto val_reg{ctx.reg_alloc.Consume(value)};
+    const auto phi_reg{ctx.var_alloc.Consume(IR::Value{&phi})};
+    const auto val_reg{ctx.var_alloc.Consume(value)};
     if (phi_reg == val_reg) {
         return;
     }
diff --git a/src/shader_recompiler/backend/glsl/reg_alloc.cpp b/src/shader_recompiler/backend/glsl/reg_alloc.cpp
deleted file mode 100644
index b1de022d4..000000000
--- a/src/shader_recompiler/backend/glsl/reg_alloc.cpp
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <string>
-#include <string_view>
-
-#include <fmt/format.h>
-
-#include "shader_recompiler/backend/glsl/reg_alloc.h"
-#include "shader_recompiler/exception.h"
-#include "shader_recompiler/frontend/ir/value.h"
-
-namespace Shader::Backend::GLSL {
-namespace {
-std::string Representation(Id id) {
-    if (id.is_condition_code != 0) {
-        throw NotImplementedException("Condition code");
-    }
-    if (id.is_spill != 0) {
-        throw NotImplementedException("Spilling");
-    }
-    const u32 index{static_cast<u32>(id.index)};
-    return fmt::format("R{}", index);
-}
-
-std::string FormatFloat(std::string_view value, IR::Type type) {
-    // TODO: Confirm FP64 nan/inf
-    if (type == IR::Type::F32) {
-        if (value == "nan") {
-            return "uintBitsToFloat(0x7fc00000)";
-        }
-        if (value == "inf") {
-            return "uintBitsToFloat(0x7f800000)";
-        }
-        if (value == "-inf") {
-            return "uintBitsToFloat(0xff800000)";
-        }
-    }
-    if (value.find_first_of('e') != std::string_view::npos) {
-        // scientific notation
-        const auto cast{type == IR::Type::F32 ? "float" : "double"};
-        return fmt::format("{}({})", cast, value);
-    }
-    const bool needs_dot{value.find_first_of('.') == std::string_view::npos};
-    const bool needs_suffix{!value.ends_with('f')};
-    const auto suffix{type == IR::Type::F32 ? "f" : "lf"};
-    return fmt::format("{}{}{}", value, needs_dot ? "." : "", needs_suffix ? suffix : "");
-}
-
-std::string MakeImm(const IR::Value& value) {
-    switch (value.Type()) {
-    case IR::Type::U1:
-        return fmt::format("{}", value.U1() ? "true" : "false");
-    case IR::Type::U32:
-        return fmt::format("{}u", value.U32());
-    case IR::Type::F32:
-        return FormatFloat(fmt::format("{}", value.F32()), IR::Type::F32);
-    case IR::Type::U64:
-        return fmt::format("{}ul", value.U64());
-    case IR::Type::F64:
-        return FormatFloat(fmt::format("{}", value.F64()), IR::Type::F64);
-    case IR::Type::Void:
-        return "";
-    default:
-        throw NotImplementedException("Immediate type {}", value.Type());
-    }
-}
-} // Anonymous namespace
-
-std::string RegAlloc::Define(IR::Inst& inst) {
-    const Id id{Alloc()};
-    inst.SetDefinition<Id>(id);
-    return Representation(id);
-}
-
-std::string RegAlloc::Define(IR::Inst& inst, Type type) {
-    const Id id{Alloc()};
-    std::string type_str = "";
-    if (!register_defined[id.index]) {
-        register_defined[id.index] = true;
-        // type_str = GetGlslType(type);
-        reg_types.push_back(GetGlslType(type));
-        ++num_used_registers;
-    }
-    inst.SetDefinition<Id>(id);
-    return type_str + Representation(id);
-}
-
-std::string RegAlloc::Define(IR::Inst& inst, IR::Type type) {
-    return Define(inst, RegType(type));
-}
-
-std::string RegAlloc::Consume(const IR::Value& value) {
-    return value.IsImmediate() ? MakeImm(value) : Consume(*value.InstRecursive());
-}
-
-std::string RegAlloc::Consume(IR::Inst& inst) {
-    inst.DestructiveRemoveUsage();
-    // TODO: reuse variables of same type if possible
-    // if (!inst.HasUses()) {
-    //     Free(id);
-    // }
-    return Representation(inst.Definition<Id>());
-}
-
-Type RegAlloc::RegType(IR::Type type) {
-    switch (type) {
-    case IR::Type::U1:
-        return Type::U1;
-    case IR::Type::U32:
-        return Type::U32;
-    case IR::Type::F32:
-        return Type::F32;
-    case IR::Type::U64:
-        return Type::U64;
-    case IR::Type::F64:
-        return Type::F64;
-    default:
-        throw NotImplementedException("IR type {}", type);
-    }
-}
-
-std::string RegAlloc::GetGlslType(Type type) {
-    switch (type) {
-    case Type::U1:
-        return "bool ";
-    case Type::F16x2:
-        return "f16vec2 ";
-    case Type::U32:
-        return "uint ";
-    case Type::S32:
-        return "int ";
-    case Type::F32:
-        return "float ";
-    case Type::S64:
-        return "int64_t ";
-    case Type::U64:
-        return "uint64_t ";
-    case Type::F64:
-        return "double ";
-    case Type::U32x2:
-        return "uvec2 ";
-    case Type::F32x2:
-        return "vec2 ";
-    case Type::U32x3:
-        return "uvec3 ";
-    case Type::F32x3:
-        return "vec3 ";
-    case Type::U32x4:
-        return "uvec4 ";
-    case Type::F32x4:
-        return "vec4 ";
-    case Type::Void:
-        return "";
-    default:
-        throw NotImplementedException("Type {}", type);
-    }
-}
-
-std::string RegAlloc::GetGlslType(IR::Type type) {
-    return GetGlslType(RegType(type));
-}
-
-Id RegAlloc::Alloc() {
-    if (num_used_registers < NUM_REGS) {
-        for (size_t reg = 0; reg < NUM_REGS; ++reg) {
-            if (register_use[reg]) {
-                continue;
-            }
-            register_use[reg] = true;
-            Id ret{};
-            ret.is_valid.Assign(1);
-            ret.is_long.Assign(0);
-            ret.is_spill.Assign(0);
-            ret.is_condition_code.Assign(0);
-            ret.index.Assign(static_cast<u32>(reg));
-            return ret;
-        }
-    }
-    throw NotImplementedException("Register spilling");
-}
-
-void RegAlloc::Free(Id id) {
-    if (id.is_spill != 0) {
-        throw NotImplementedException("Free spill");
-    }
-    register_use[id.index] = false;
-}
-
-} // namespace Shader::Backend::GLSL
diff --git a/src/shader_recompiler/backend/glsl/reg_alloc.h b/src/shader_recompiler/backend/glsl/reg_alloc.h
deleted file mode 100644
index 6c293f9d1..000000000
--- a/src/shader_recompiler/backend/glsl/reg_alloc.h
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <bitset>
-#include <vector>
-
-#include "common/bit_field.h"
-#include "common/common_types.h"
-
-namespace Shader::IR {
-class Inst;
-class Value;
-enum class Type;
-} // namespace Shader::IR
-
-namespace Shader::Backend::GLSL {
-enum class Type : u32 {
-    U1,
-    F16x2,
-    S32,
-    U32,
-    F32,
-    S64,
-    U64,
-    F64,
-    U32x2,
-    F32x2,
-    U32x3,
-    F32x3,
-    U32x4,
-    F32x4,
-    Void,
-};
-
-struct Id {
-    union {
-        u32 raw;
-        BitField<0, 1, u32> is_valid;
-        BitField<1, 1, u32> is_long;
-        BitField<2, 1, u32> is_spill;
-        BitField<3, 1, u32> is_condition_code;
-        BitField<4, 1, u32> is_null;
-        BitField<5, 27, u32> index;
-    };
-
-    bool operator==(Id rhs) const noexcept {
-        return raw == rhs.raw;
-    }
-    bool operator!=(Id rhs) const noexcept {
-        return !operator==(rhs);
-    }
-};
-static_assert(sizeof(Id) == sizeof(u32));
-
-class RegAlloc {
-public:
-    std::string Define(IR::Inst& inst);
-    std::string Define(IR::Inst& inst, Type type);
-    std::string Define(IR::Inst& inst, IR::Type type);
-
-    std::string Consume(const IR::Value& value);
-    std::string Consume(IR::Inst& inst);
-
-    std::string GetGlslType(Type type);
-    std::string GetGlslType(IR::Type type);
-
-    size_t num_used_registers{};
-    std::vector<std::string> reg_types;
-
-private:
-    static constexpr size_t NUM_REGS = 4096;
-
-    Type RegType(IR::Type type);
-    Id Alloc();
-    void Free(Id id);
-
-    std::bitset<NUM_REGS> register_use{};
-    std::bitset<NUM_REGS> register_defined{};
-};
-
-} // namespace Shader::Backend::GLSL
diff --git a/src/shader_recompiler/backend/glsl/var_alloc.cpp b/src/shader_recompiler/backend/glsl/var_alloc.cpp
new file mode 100644
index 000000000..8c6944f07
--- /dev/null
+++ b/src/shader_recompiler/backend/glsl/var_alloc.cpp
@@ -0,0 +1,290 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <string>
+#include <string_view>
+
+#include <fmt/format.h>
+
+#include "shader_recompiler/backend/glsl/var_alloc.h"
+#include "shader_recompiler/exception.h"
+#include "shader_recompiler/frontend/ir/value.h"
+
+namespace Shader::Backend::GLSL {
+namespace {
+std::string TypePrefix(GlslVarType type) {
+    switch (type) {
+    case GlslVarType::U1:
+        return "b_";
+    case GlslVarType::F16x2:
+        return "f16x2_";
+    case GlslVarType::U32:
+        return "u_";
+    case GlslVarType::S32:
+        return "s_";
+    case GlslVarType::F32:
+        return "f_";
+    case GlslVarType::S64:
+        return "s64_";
+    case GlslVarType::U64:
+        return "u64_";
+    case GlslVarType::F64:
+        return "d_";
+    case GlslVarType::U32x2:
+        return "u2_";
+    case GlslVarType::F32x2:
+        return "f2_";
+    case GlslVarType::U32x3:
+        return "u3_";
+    case GlslVarType::F32x3:
+        return "f3_";
+    case GlslVarType::U32x4:
+        return "u4_";
+    case GlslVarType::F32x4:
+        return "f4_";
+    case GlslVarType::Void:
+        return "";
+    default:
+        throw NotImplementedException("Type {}", type);
+    }
+}
+
+std::string FormatFloat(std::string_view value, IR::Type type) {
+    // TODO: Confirm FP64 nan/inf
+    if (type == IR::Type::F32) {
+        if (value == "nan") {
+            return "uintBitsToFloat(0x7fc00000)";
+        }
+        if (value == "inf") {
+            return "uintBitsToFloat(0x7f800000)";
+        }
+        if (value == "-inf") {
+            return "uintBitsToFloat(0xff800000)";
+        }
+    }
+    if (value.find_first_of('e') != std::string_view::npos) {
+        // scientific notation
+        const auto cast{type == IR::Type::F32 ? "float" : "double"};
+        return fmt::format("{}({})", cast, value);
+    }
+    const bool needs_dot{value.find_first_of('.') == std::string_view::npos};
+    const bool needs_suffix{!value.ends_with('f')};
+    const auto suffix{type == IR::Type::F32 ? "f" : "lf"};
+    return fmt::format("{}{}{}", value, needs_dot ? "." : "", needs_suffix ? suffix : "");
+}
+
+std::string MakeImm(const IR::Value& value) {
+    switch (value.Type()) {
+    case IR::Type::U1:
+        return fmt::format("{}", value.U1() ? "true" : "false");
+    case IR::Type::U32:
+        return fmt::format("{}u", value.U32());
+    case IR::Type::F32:
+        return FormatFloat(fmt::format("{}", value.F32()), IR::Type::F32);
+    case IR::Type::U64:
+        return fmt::format("{}ul", value.U64());
+    case IR::Type::F64:
+        return FormatFloat(fmt::format("{}", value.F64()), IR::Type::F64);
+    case IR::Type::Void:
+        return "";
+    default:
+        throw NotImplementedException("Immediate type {}", value.Type());
+    }
+}
+} // Anonymous namespace
+
+std::string VarAlloc::Representation(u32 index, GlslVarType type) const {
+    const auto prefix{TypePrefix(type)};
+    return fmt::format("{}{}", prefix, index);
+}
+
+std::string VarAlloc::Representation(Id id) const {
+    return Representation(id.index, id.type);
+}
+
+std::string VarAlloc::Define(IR::Inst& inst, GlslVarType type) {
+    if (inst.HasUses()) {
+        inst.SetDefinition<Id>(Alloc(type));
+        return Representation(inst.Definition<Id>());
+    } else {
+        Id id{};
+        id.type.Assign(type);
+        // id.is_null.Assign(1);
+        GetUseTracker(type).uses_temp = true;
+        inst.SetDefinition<Id>(id);
+    }
+    return Representation(inst.Definition<Id>());
+}
+
+std::string VarAlloc::Define(IR::Inst& inst, IR::Type type) {
+    return Define(inst, RegType(type));
+}
+
+std::string VarAlloc::Consume(const IR::Value& value) {
+    return value.IsImmediate() ? MakeImm(value) : ConsumeInst(*value.InstRecursive());
+}
+
+std::string VarAlloc::ConsumeInst(IR::Inst& inst) {
+    inst.DestructiveRemoveUsage();
+    if (!inst.HasUses()) {
+        Free(inst.Definition<Id>());
+    }
+    return Representation(inst.Definition<Id>());
+}
+
+std::string VarAlloc::GetGlslType(IR::Type type) const {
+    return GetGlslType(RegType(type));
+}
+
+Id VarAlloc::Alloc(GlslVarType type) {
+    auto& use_tracker{GetUseTracker(type)};
+    if (use_tracker.num_used < NUM_VARS) {
+        for (size_t var = 1; var < NUM_VARS; ++var) {
+            if (use_tracker.var_use[var]) {
+                continue;
+            }
+            use_tracker.num_used = std::max(use_tracker.num_used, var + 1);
+            use_tracker.var_use[var] = true;
+            Id ret{};
+            ret.is_valid.Assign(1);
+            ret.type.Assign(type);
+            ret.index.Assign(static_cast<u32>(var));
+            return ret;
+        }
+    }
+    throw NotImplementedException("Variable spilling");
+}
+
+void VarAlloc::Free(Id id) {
+    if (id.is_valid == 0) {
+        // throw LogicError("Freeing invalid variable");
+        return;
+    }
+    auto& use_tracker{GetUseTracker(id.type)};
+    use_tracker.var_use[id.index] = false;
+}
+
+GlslVarType VarAlloc::RegType(IR::Type type) const {
+    switch (type) {
+    case IR::Type::U1:
+        return GlslVarType::U1;
+    case IR::Type::U32:
+        return GlslVarType::U32;
+    case IR::Type::F32:
+        return GlslVarType::F32;
+    case IR::Type::U64:
+        return GlslVarType::U64;
+    case IR::Type::F64:
+        return GlslVarType::F64;
+    default:
+        throw NotImplementedException("IR type {}", type);
+    }
+}
+
+std::string VarAlloc::GetGlslType(GlslVarType type) const {
+    switch (type) {
+    case GlslVarType::U1:
+        return "bool ";
+    case GlslVarType::F16x2:
+        return "f16vec2 ";
+    case GlslVarType::U32:
+        return "uint ";
+    case GlslVarType::S32:
+        return "int ";
+    case GlslVarType::F32:
+        return "float ";
+    case GlslVarType::S64:
+        return "int64_t ";
+    case GlslVarType::U64:
+        return "uint64_t ";
+    case GlslVarType::F64:
+        return "double ";
+    case GlslVarType::U32x2:
+        return "uvec2 ";
+    case GlslVarType::F32x2:
+        return "vec2 ";
+    case GlslVarType::U32x3:
+        return "uvec3 ";
+    case GlslVarType::F32x3:
+        return "vec3 ";
+    case GlslVarType::U32x4:
+        return "uvec4 ";
+    case GlslVarType::F32x4:
+        return "vec4 ";
+    case GlslVarType::Void:
+        return "";
+    default:
+        throw NotImplementedException("Type {}", type);
+    }
+}
+
+VarAlloc::UseTracker& VarAlloc::GetUseTracker(GlslVarType type) {
+    switch (type) {
+    case GlslVarType::U1:
+        return var_bool;
+    case GlslVarType::U32:
+        return var_u32;
+    case GlslVarType::S32:
+        return var_s32;
+    case GlslVarType::F32:
+        return var_f32;
+    case GlslVarType::S64:
+        return var_s64;
+    case GlslVarType::U64:
+        return var_u64;
+    case GlslVarType::F64:
+        return var_f64;
+    case GlslVarType::U32x2:
+        return var_u32x2;
+    case GlslVarType::F32x2:
+        return var_f32x2;
+    case GlslVarType::U32x3:
+        return var_u32x3;
+    case GlslVarType::F32x3:
+        return var_f32x3;
+    case GlslVarType::U32x4:
+        return var_u32x4;
+    case GlslVarType::F32x4:
+        return var_f32x4;
+    default:
+        throw NotImplementedException("Type {}", type);
+    }
+}
+
+const VarAlloc::UseTracker& VarAlloc::GetUseTracker(GlslVarType type) const {
+    switch (type) {
+    case GlslVarType::U1:
+        return var_bool;
+    case GlslVarType::F16x2:
+        return var_f16x2;
+    case GlslVarType::U32:
+        return var_u32;
+    case GlslVarType::S32:
+        return var_s32;
+    case GlslVarType::F32:
+        return var_f32;
+    case GlslVarType::S64:
+        return var_s64;
+    case GlslVarType::U64:
+        return var_u64;
+    case GlslVarType::F64:
+        return var_f64;
+    case GlslVarType::U32x2:
+        return var_u32x2;
+    case GlslVarType::F32x2:
+        return var_f32x2;
+    case GlslVarType::U32x3:
+        return var_u32x3;
+    case GlslVarType::F32x3:
+        return var_f32x3;
+    case GlslVarType::U32x4:
+        return var_u32x4;
+    case GlslVarType::F32x4:
+        return var_f32x4;
+    default:
+        throw NotImplementedException("Type {}", type);
+    }
+}
+
+} // namespace Shader::Backend::GLSL
diff --git a/src/shader_recompiler/backend/glsl/var_alloc.h b/src/shader_recompiler/backend/glsl/var_alloc.h
new file mode 100644
index 000000000..29d78a571
--- /dev/null
+++ b/src/shader_recompiler/backend/glsl/var_alloc.h
@@ -0,0 +1,100 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <bitset>
+#include <string>
+#include <vector>
+
+#include "common/bit_field.h"
+#include "common/common_types.h"
+
+namespace Shader::IR {
+class Inst;
+class Value;
+enum class Type;
+} // namespace Shader::IR
+
+namespace Shader::Backend::GLSL {
+enum class GlslVarType : u32 {
+    U1,
+    F16x2,
+    S32,
+    U32,
+    F32,
+    S64,
+    U64,
+    F64,
+    U32x2,
+    F32x2,
+    U32x3,
+    F32x3,
+    U32x4,
+    F32x4,
+    Void,
+};
+
+struct Id {
+    union {
+        u32 raw;
+        BitField<0, 1, u32> is_valid;
+        BitField<1, 4, GlslVarType> type;
+        BitField<5, 27, u32> index;
+    };
+
+    bool operator==(Id rhs) const noexcept {
+        return raw == rhs.raw;
+    }
+    bool operator!=(Id rhs) const noexcept {
+        return !operator==(rhs);
+    }
+};
+static_assert(sizeof(Id) == sizeof(u32));
+
+class VarAlloc {
+public:
+    static constexpr size_t NUM_VARS = 511;
+    struct UseTracker {
+        size_t num_used{};
+        std::bitset<NUM_VARS> var_use{};
+        bool uses_temp{};
+    };
+
+    std::string Define(IR::Inst& inst, GlslVarType type);
+    std::string Define(IR::Inst& inst, IR::Type type);
+
+    std::string Consume(const IR::Value& value);
+    std::string ConsumeInst(IR::Inst& inst);
+
+    std::string GetGlslType(GlslVarType type) const;
+    std::string GetGlslType(IR::Type type) const;
+
+    const UseTracker& GetUseTracker(GlslVarType type) const;
+    std::string Representation(u32 index, GlslVarType type) const;
+
+private:
+    GlslVarType RegType(IR::Type type) const;
+    Id Alloc(GlslVarType type);
+    void Free(Id id);
+    UseTracker& GetUseTracker(GlslVarType type);
+    std::string Representation(Id id) const;
+
+    UseTracker var_bool{};
+    UseTracker var_f16x2{};
+    UseTracker var_s32{};
+    UseTracker var_u32{};
+    UseTracker var_u32x2{};
+    UseTracker var_u32x3{};
+    UseTracker var_u32x4{};
+    UseTracker var_f32{};
+    UseTracker var_f32x2{};
+    UseTracker var_f32x3{};
+    UseTracker var_f32x4{};
+    UseTracker var_u64{};
+    UseTracker var_s64{};
+    UseTracker var_f64{};
+};
+
+} // namespace Shader::Backend::GLSL