|
|
|
@ -586,7 +586,9 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
|
|
|
|
|
cpu_addr_base += u64(std::max<s64>(difference2, 0));
|
|
|
|
|
const u64 new_size = cpu_addr_end2 - cpu_addr_base;
|
|
|
|
|
const u64 new_offset = cpu_addr_base - buffer.CpuAddr();
|
|
|
|
|
ASSERT(!IsRegionCpuModified(cpu_addr_base, new_size));
|
|
|
|
|
if (IsRegionCpuModified(cpu_addr_base, new_size)) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
downloads.push_back({
|
|
|
|
|
BufferCopy{
|
|
|
|
|
.src_offset = new_offset,
|
|
|
|
@ -596,8 +598,15 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
|
|
|
|
|
buffer_id,
|
|
|
|
|
});
|
|
|
|
|
total_size_bytes += new_size;
|
|
|
|
|
buffer.UnmarkRegionAsGpuModified(cpu_addr_base, new_size);
|
|
|
|
|
largest_copy = std::max(largest_copy, new_size);
|
|
|
|
|
constexpr u64 align_mask = ~(32ULL - 1);
|
|
|
|
|
const VAddr align_up_address = (cpu_addr_base + 31) & align_mask;
|
|
|
|
|
const u64 difference = align_up_address - cpu_addr_base;
|
|
|
|
|
if (difference > new_size) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
const u64 fixed_size = new_size - difference;
|
|
|
|
|
buffer.UnmarkRegionAsGpuModified(align_up_address, fixed_size & align_mask);
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
@ -1380,7 +1389,8 @@ typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr s
|
|
|
|
|
// Binding the whole map range would be technically correct, but games have large maps that make
|
|
|
|
|
// this approach unaffordable for now.
|
|
|
|
|
static constexpr u32 arbitrary_extra_bytes = 0xc000;
|
|
|
|
|
const u32 bytes_to_map_end = static_cast<u32>(gpu_memory.BytesToMapEnd(gpu_addr));
|
|
|
|
|
const u32 bytes_to_map_end =
|
|
|
|
|
std::max(size, static_cast<u32>(gpu_memory.BytesToMapEnd(gpu_addr)));
|
|
|
|
|
const Binding binding{
|
|
|
|
|
.cpu_addr = *cpu_addr,
|
|
|
|
|
.size = std::min(size + arbitrary_extra_bytes, bytes_to_map_end),
|
|
|
|
|