Skip to main content

Measure Protection

Users that want to measure the efficacy of RunSafe Protect's randomization can use the script below to verify that randomization occurred and see how many shared bytes exist between runs. As the number of runs increases, the number of shared bytes should approach 0 rapidly. In many cases, there will be 0 shared bytes in less than 10 runs of the protected program.

Gather Memory Dumps For Analysis

  1. Ensure that the yama ptrace_scope is off so you can attach to a running process
cat /proc/sys/kernel/yama/ptrace_scope # Save this value for restoring after analysis
echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope
  1. Attach gdb to the process
gdb attach pidof <binary>
  1. Get the bounds of the .text section
(gdb) info files
  1. Dump the .text section to a file
(gdb) dump binary memory <file_path> <start_addr> <end_addr>
  1. Exit gdb

  2. Restart the protected binary and repeat the steps above, saving all the memory dumps to a different file in the same directory.

  3. Run this script in Comparison Script, providing the directory where you dumped the files to.

python3 mem_compare.py --common-bytes /path/to/memory/dumps
  1. Set the ptrace_scope back to it's original value (usually 1) after you are done dumping from gdb
echo 1 | sudo tee /proc/sys/kernel/yama/ptrace_scope

Comparison Script

mem_compare.py
#!/usr/bin/env python3
# Copyright (c) 2025 RunSafe Security Inc.

import re
import argparse
import subprocess
from subprocess import CalledProcessError
import os
import itertools
import numpy as np


def padding_bytes(f1data, f2data):
# Padding is defined as consecutive matching bytes that are
# common in both sets of data
pad_count = 0
previous = -1
first = True
for (a, b) in zip(reversed(f1data), reversed(f2data)):
if a == b or a == previous:
if first:
pad_count += 1
previous = a
first = False
else:
pad_count += 1
else:
break
return pad_count


def single_padding_bytes(f1data):
# Padding is defined as consecutive matching bytes that are
# common at the end of the file
pad_count = 0
previous = -1
first = True
for a in reversed(f1data):
if first or a == previous:
if first:
pad_count += 1
previous = a
first = False
else:
pad_count += 1
else:
break
return pad_count


def main():
# Handle the arguments
parser = argparse.ArgumentParser()
parser.add_argument("mypath")
parser.add_argument(
"-o",
"--onetoone",
dest="one",
action="store_true",
help="compare all the files vs each other file",
)
parser.add_argument(
"-c",
"--commonbytes",
dest="common",
action="store_true",
help="find common bytes across all the files",
)
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true")
parser.set_defaults(verbose=False)
args = parser.parse_args()
mypath = args.mypath

if not os.path.isdir(mypath):
print(
"Input argument should be directory containing all the files" " to compare"
)
quit()
verbose = args.verbose
onetoone = args.one
common = args.common

# Get a list of absolute files to test from the provided directory
files_to_test = []
for (dirpath, _, filenames) in os.walk(mypath):
for f in filenames:
if "lfrdiff.csv" not in f:
files_to_test.append(os.path.abspath(os.path.join(dirpath, f)))
if verbose:
print(files_to_test)

# check that all the files to compare are the same size
exp_fs = 0
for f in files_to_test:
if not exp_fs:
exp_fs = os.path.getsize(f)
else:
fs = os.path.getsize(f)
if fs != exp_fs:
print("Memory dumps should all be the same size")
quit()

# Process the files
if onetoone:
with open("lfrdiff.csv", "w") as outfile:
# get the combination of files to test
combs = itertools.combinations(files_to_test, 2)
for c in combs:
file1 = c[0]
file2 = c[1]
if file1 == file2:
continue # Don't compare the files to themselves

f1data = np.fromfile(file1, dtype="uint8")
f2data = np.fromfile(file2, dtype="uint8")

# count the padding
pad_count = padding_bytes(f1data, f2data)

mask = f1data == f2data
bytes_common = mask.sum() - pad_count

# Calculate the % of bytes that are common
perc_cov = bytes_common / exp_fs * 100
if verbose:
print("--------------------------------")
print(f"{file1}: {exp_fs} bytes")
print(f"{file2}: {exp_fs} bytes")
print(f"bytes common: {bytes_common}\n")
print(f"% common: {perc_cov:.2f}\n")

outfile.write(
f"{file1}, {file2}, {exp_fs}, {bytes_common}, " f"{perc_cov:.2f}\n"
)

# compare all the files and get the total common bytes
if common:
mask = np.ones(exp_fs, dtype=bool)
values = np.full(exp_fs, 0, dtype="uint8")
lpadding = []
for index, f in enumerate(files_to_test):
data = np.fromfile(f, dtype="uint8")
lpadding.append(single_padding_bytes(data))
if index == 0:
# init the values array
values = data.copy()
else:
this_mask = values == data
new_mask = np.logical_and(mask, this_mask)
mask = new_mask.copy()

common_bytes = mask.sum() - min(lpadding)
com_percent = (common_bytes / exp_fs) * 100
print(f"Common bytes {common_bytes} of {exp_fs} bytes: {com_percent:.2f} %")

# get the common ranges of the binarys
print(f"Common bytes ranges")
start = 0
end = 0
last = False
last_index = 0
for index, x in enumerate(mask):
if x == True and last == False:
start = index
elif x == False and last == True:
end = index
print(f"{start}:{end}")
last = x
last_index = index
if last == True:
print(f"{start}:{last_index}")


if __name__ == "__main__":
main()