Oracular 백작
이 봇은 다른 봇이 시험으로 설정할 내용을 결정하기 위해 다른 모든 작업 봇 (라운드 수와 일부 끔찍한 휴리스틱을 부여 함)의 평균을 구하는 알고리즘을 사용합니다.
카운트는 md5 해시를 사용하여 시험을 요구합니다. 따라서 질문과 답변 모두 결정 론적입니다. Jade Emporer를 포함하여 똑같은 부울, 비 또는 빛의 순서를 묻고 응답하여 대부분의 입력을 무시합니다.
import numpy as np
import hashlib
class CountOracular:
'''Uses very little external data to make heuristical statistical
deterministic predictions about the average exam.
(Assonance not intended.)
To generate its own exams, uses a deterministic hash.'''
def __init__(self, id, number_of_bots):
self.last_round = []
#functions for calculating what other bots will likely do.
self.bots_calculators = [
self._jad, #Jade Emporer
self._alp, #Alpha
self._bet, #Beta
self._gam, #Gamma
self._wiq, #Wi Qe Lu
self._stu, #StudiousBot
self._pla, #Plagiarist
self._san, #Santayana
self._tho, #Thomas
self._dru, #Drunkard
self._yin, #YinYang
self._con, #Contrary
self._tit, #TitForTat
self._equ, #Equalizer
self._mar, #Marx
]
self.bot_types = len(self.bots_calculators)
def ask(self, n, id):
#if we can, show that hardcoding is no match for the power of heuristics:
if n == 2:
return [False, True]
#otherwise, refer to the wisdom of Mayor Prentiss in order to command The Ask
#i.e. hashes a quote, and uses that as the exam.
salt = b"I AM THE CIRCLE AND THE CIRCLE IS ME " * n
return self._md5_from(salt, n)
def answer(self, n, id):
#uses the power of heuristics to predict what the average bot will do
#ignores all inputs except the length of the output
#very approximate, and deterministic
#i.e. every game, Count Oracular will send the same lists of answers, in the same order
best_guess_totals = [0.5] * n #halfway between T and F
for bot in self.bots_calculators:
exam, confidence = bot(n)
if not exam:
continue
while len(exam) < n:
#ensure exam is long enough
exam += exam[:1]
exam = exam[:n] #ensure exam is short enough
#map T and F to floats [0,1] based on confidence
weighted_exam = [0.5+confidence*(0.5 if q else -0.5) for q in exam]
best_guess_totals = [current+new for current,new in zip(best_guess_totals, weighted_exam)]
best_guess_averages = [total/self.bot_types
for total
in best_guess_totals
]
best_guess = [avg > 0.5 for avg in best_guess_averages]
self.last_round = best_guess
return best_guess
def update(self, ranks, own, others):
pass
def _md5_from(self, data, n):
md5 = hashlib.md5(data)
for i in range(n):
md5.update(data)
exam = []
while len(exam) < n:
exam += [x == "0"
for x
in bin(int(md5.hexdigest(), 16))[2:].zfill(128)
]
md5.update(data)
return exam[:n]
def _invert(self, exam):
return [not val for val in exam]
def _digits_to_bools(self, iterable):
return [char=="1" for char in iterable]
def _plagiarise(self, n):
copy = (self.last_round * n)[:n]
return copy
'''functions to calculate expected exams for each other bot:
(these values, weighted with corresponding confidence ratings,
are summed to calculate the most likely exam.)'''
def _jad(self, n):
'''Calculate the mean of _jad's distribution, then
use that as the guess'''
mean = max(int(np.sqrt(np.power(2,n))), (2<<n)-1)
string_mean = f"{mean}".zfill(n)
exam = self._invert(self._digits_to_bools(string_mean))
return exam, 0.5
def _alp(self, n):
'''Alpha uses a predictable hash,
until it figures out we aren't Beta,
modelled by the probability of giving or solving
Alpha's exam'''
#probability that Alpha thinks we're Beta
#assuming we fail to pretend to be Beta if we meet Alpha
chance_beta = ((1 - 1/self.bot_types) ** n) ** 2
return self._md5_from(b"Beta", n), chance_beta
def _gam(self, n):
'''Gamma is like Beta, except after realising,
switches to 50-50 random choice of inverse
either Beta or Alpha's hash'''
#probability that Gamma thinks we're Alpha still
#(Unlikely that Gamma will think we're Beta;
#we'd need to fail Alpha but pass Beta,
#therefore, not accounted for)
chance_unknown = ((1 - 1/self.bot_types) ** n) ** 2
#default exam that assumes that Gamma thinks we're Alpha
exam = self._md5_from(b"Beta", n)
if chance_unknown > 0.5:#there exists a better heuristic here
#assume Gamma will consider us Alpha
confidence = chance_unknown
else:
#assume Gamma considers us neither Alpha nor Beta
alpha = self._invert(self._md5_from(b"Beta", n))
beta = self._invert(self._md5_from(b"Alpha", n))
#check for bools where both possible exams match
and_comp = [a and b for a, b in zip(alpha, beta)]
nor_comp = [not (a or b) for a, b in zip(alpha, beta)]
#count up matches vs times when fell back on default
#to calculate ratio of default
#to bools where hashes agree
confidence_vs_default = (sum(and_comp)+sum(nor_comp)) / n
confidence = confidence_vs_default * chance_unknown + (1 - confidence_vs_default) * (1 - chance_unknown)
for i in range(n):
if and_comp[i]:
exam[i] = True
if nor_comp[i]:
exam[i] = False
return exam, confidence
def _bet(self, n):
'''Beta is like Alpha, but with a different hash'''
#probability we haven't matched with Beta yet
#i.e. probability that Beta still thinks we're Alpha
chance_alpha = ((1 - 1/self.bot_types) ** n) ** 2
return self._md5_from(b"Alpha", n), chance_alpha
def _wiq(self, n):
'''Wi Qe Lu is hard to model, so we pretend
that it mimicks Plagiarist for the most part'''
if n == 1:
#first round is random
return [False], 0
#other rounds are based on exams it met
#leaning towards same as the previous exam
return self._plagiarise(n), 0.1
def _stu(self, n):
'''StudiousBot is random'''
return [False] * n, 0
def _pla(self, n):
'''Plagiarist copies the exams it received,
which can be modelled with the standard prediction
calculated for the previous round, padded with its first
element.'''
if n == 1:
return [True], 1
return self._plagiarise(n), 0.3
def _san(self, n):
'''Santayana is based on answers, which we don't predict.
Modelled as random.'''
#mostly random, slight leaning towards default False
return [False] * n, 0.1
def _tho(self, n):
'''Thomas has an unpredictable threshold.'''
#for all intents, random
return [False] * n, 0
def _dru(self, n):
'''Drunkard is utterly random.'''
return [False] * n, 0
def _yin(self, n):
'''YinYang inverts itself randomly, but not unpredictably.
We can model it to find the probability. Also notably,
one index is inverted, which factors into the confidence
especially for lower n.'''
if n == 1:
#one element is inverted, so whole list must be False
return [False], 1
if n == 2:
#split half and half randomly; can't predict
return [True] * n, 0
#cumulative chance of mostly ones or mostly zeros
truthy = 1
for _ in range(n):
#simulate repeated flipping
truthy = truthy * 0.44 + (1-truthy) * 0.56
falsey = 1 - truthy
if falsey > truthy:
return [False] * n, falsey - 1/n
return [True] * n, truthy - 1/n
def _con(self, n):
'''Contrary is like Jade Emporer, but inverts itself
so much that modelling the probability of inversion
is not worth the effort.'''
#there are some clever ways you could do statistics on this,
#but I'm content to call it uniform for now
return [False] * n, 0
def _tit(self, n):
'''TitForTat is most likely to give us False
but the confidence drops as the chance of having
met TitForTat increases.
The square root of the probability we calculate for
Alpha, Beta and Gamma, because those also care about what
we answer, whereas TitForTat only cares about what we ask'''
#probability that we've not given TitForTat an exam
chance_friends = (1 - 1/self.bot_types) ** n
return [False] * n, chance_friends
def _equ(self, n):
'''Equalizer always asks True'''
#certain that Equalizer's exam is all True
return [True] * n, 1
def _mar(self, n):
'''Marx returns mostly True, randomised based on our rank.
We don't predict our rank.
There's ~50% chance an answer is random'''
#75% chance we guess right (= 50% + 50%*50%)
return [True] * n, 0.75