correlation_graph.py 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. import matplotlib as plt
  2. import math
  3. import numpy as np
  4. import json
  5. #Hamming Weight
  6. HW = [bin(n).count("1") for n in range(0, 256)]
  7. def popcount(x):
  8. x -= (x >> 1) & 0x5555555555555555
  9. x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
  10. x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
  11. return ((x * 0x0101010101010101) & 0xffffffffffffffff ) >> 56
  12. # Helper Functions
  13. def mean(X):
  14. return np.sum(X, axis=0)/len(X)
  15. def std_dev(X, X_bar):
  16. return np.sqrt(np.sum((X-X_bar)**2, axis=0))
  17. def cov(X, X_bar, Y, Y_bar):
  18. return np.sum((X-X_bar)*(Y-Y_bar), axis=0)
  19. def to16(byte1, byte2):
  20. return int((byte1 << 8) + byte2)
  21. # Speck Model
  22. NUM_ROUNDS = 22
  23. BLOCK_SIZE = 32
  24. KEY_SIZE = 64
  25. WORD_SIZE = 16
  26. # SHIFTs for SPECK
  27. ALPHA = 7
  28. BETA = 2
  29. mod_mask = (2 ** WORD_SIZE) -1
  30. mod_mask_sub = (2 ** WORD_SIZE)
  31. def ER16(x, y, k):
  32. rs_x = ((x << (16 - ALPHA)) + (x >> ALPHA)) & mod_mask
  33. add_sxy = (rs_x + y) & mod_mask
  34. new_x = k ^ add_sxy
  35. ls_y = ((y >> (16 - BETA)) + (y << BETA)) & mod_mask
  36. new_y = new_x ^ ls_y
  37. return new_x, new_y
  38. def simple_speck(plaintext, key):
  39. Ct_0 = (int(plaintext[1]) << 8) + int(plaintext[0])
  40. Ct_1 = (int(plaintext[3]) << 8) + int(plaintext[2])
  41. Ct_1, Ct_0 = ER16(Ct_1, Ct_0, key) # fixed 16 bit key of 0x55
  42. return popcount((Ct_1 << 8) + Ct_0)
  43. def calc_corr(traces, plaintexts):
  44. maxcpa = [0] * 256 # Correlations
  45. # Calculate mean and standard derivation
  46. t_bar = mean(traces)
  47. o_t = std_dev(traces, t_bar)
  48. for key in range(0, 256):
  49. hws = np.array([[simple_speck(pt, (key << 8) + 0x00) for pt in plaintexts]]).transpose()
  50. hws_bar = mean(hws)
  51. o_hws = std_dev(hws, hws_bar)
  52. correlation = cov(traces, t_bar, hws, hws_bar)
  53. cpaoutput = correlation/(o_t*o_hws)
  54. maxcpa[key] = max(abs(cpaoutput))
  55. # Return the two best guesses
  56. best_guess = int(np.argmax(maxcpa))
  57. return best_guess, maxcpa
  58. def analyze_correlations(traces, plaintexts):
  59. steps = 200
  60. max_traces = 1000
  61. allkeys = {}
  62. for j in range(256):
  63. allkeys[j] = []
  64. stats = []
  65. for i in range(steps, max_traces, steps):
  66. best, corrs = calc_corr(traces[:i], plaintexts[:i])
  67. for j in range(256):
  68. allkeys[j].append(corrs[j])
  69. stats.append(i)
  70. plt.figure()
  71. for keybyte, correlations in allkeys.items():
  72. if keybyte == 0x22:
  73. plt.plot(stats, correlations, color='gray')
  74. else:
  75. plt.plot(stats, correlations, color='lightgray')
  76. plt.ylabel('Correlation')
  77. plt.xlabel('Number of Traces')
  78. #plt.legend(loc="upper left")
  79. plt.save("plot_hiding.png")
  80. return allkeys
  81. # Load Inputs
  82. print("[+] Loading Hiding Data")
  83. trace_array = np.load("../sample_traces/5000_encryption_traces_with_hiding_random.npy")
  84. textin_array = np.load("../sample_traces/5000_plaintext_traces_with_hiding_random.npy")
  85. print("[+] Calculating Correlations")
  86. allkeys = analyze_correlations(trace_array, textin_array)
  87. with open("2000k_correlations.json", "w") as out:
  88. out.write(json.dumps(allkeys))