commit 50638fbae09c717ef02ca3820921da76a5379da7 Author: Simon Klüttermann Date: Sat Jan 29 13:04:08 2022 +0100 initial push diff --git a/README b/README new file mode 100644 index 0000000..150c06f --- /dev/null +++ b/README @@ -0,0 +1,9 @@ +Instead of trying to find a model that is perfect at finding anomalies, ensembles try to combine multiple (maybe bad) models into one. +To do this, we need an algorithm to combine the predictions of different models. One way (that I commonly use) is to just average them in some way (score=sqrt(score_1**2+score_2**2)). This works only well if you have a huge number of mostly uncorrelated models. +If you have only a few models or correlated ones you can introduce bias this way. Assume we have three models: An isolation forest (iforest), an svm and a kNN algorithm. The iforest has low correlation to the other models (if finds different things anomalous compared to the svm and kNN), bzt the svm and the kNN find basically the same anomalies. If we just average each model, the svm and kNN have a much bigger influence on the result compared to the iforest. There is no real reason why this should be the case. +To solve this, you can add models depending on correlations between them. But instead of relying on the correlation existing between the models themself, this repository uses a special kind of neural network to find uncorrelated parts of the model predictions. + +n2ulayer.py and mu.py define this special kind of neural network. loss.py defines the correlation we want to minimize for use in tensorflow. +onemodel.py generates a quick (and quite random) anomaly detection model for use on the data defined in data.py (just a 2d gaussian). 20 models are generated and their predictions (sorted from most normal (green) to most anomal (red)) drawn in the numbered images in imgs +If you use all 20 models and simply average them this results in imgs/recombine.png. Notice how the green points are much more centered. +choosenext diff --git a/__pycache__/choosenext.cpython-39.pyc b/__pycache__/choosenext.cpython-39.pyc new file mode 100644 index 0000000..5038ec8 Binary files /dev/null and b/__pycache__/choosenext.cpython-39.pyc differ diff --git a/__pycache__/data.cpython-39.pyc b/__pycache__/data.cpython-39.pyc new file mode 100644 index 0000000..e76acd1 Binary files /dev/null and b/__pycache__/data.cpython-39.pyc differ diff --git a/__pycache__/loss.cpython-39.pyc b/__pycache__/loss.cpython-39.pyc new file mode 100644 index 0000000..ea5564b Binary files /dev/null and b/__pycache__/loss.cpython-39.pyc differ diff --git a/__pycache__/mu.cpython-39.pyc b/__pycache__/mu.cpython-39.pyc new file mode 100644 index 0000000..f73b67c Binary files /dev/null and b/__pycache__/mu.cpython-39.pyc differ diff --git a/__pycache__/n2ulayer.cpython-39.pyc b/__pycache__/n2ulayer.cpython-39.pyc new file mode 100644 index 0000000..8ad7161 Binary files /dev/null and b/__pycache__/n2ulayer.cpython-39.pyc differ diff --git a/before.png b/before.png new file mode 100644 index 0000000..53d53f5 Binary files /dev/null and b/before.png differ diff --git a/choosenext.py b/choosenext.py new file mode 100644 index 0000000..9c99045 --- /dev/null +++ b/choosenext.py @@ -0,0 +1,54 @@ +import numpy as np + +from tensorflow import keras +from mu import * +from n2ulayer import ulayer + +from loss import loss + + + +def choosenext(given,possble): + """given is a list of scores. possble is a list of list of scores. We want to find the combination of elements in possble that has the lowest correlation to given""" + opt=len(possble) + np.random.shuffle(possble) + possble=np.transpose(possble) + given=np.expand_dims(given,axis=1) + + + #print("given",given.shape) + #print("possble",possble.shape) + #print(loss(given,possble,K=np)) + #exit() + inp=keras.layers.Input(shape=possble.shape[1:]) + q=inp + #q=ulayer(opt,0,1)(q) + q=partr(q,1,opt,ulayer) + + model=keras.models.Model(inputs=inp,outputs=q) + model.compile(loss=loss,optimizer=keras.optimizers.Adam(lr=0.001)) + model.summary() + + model.fit(possble,given, + batch_size=32, + epochs=100, + verbose=1, + validation_split=0.0,#that stuff cant overfit + shuffle=True, + callbacks=[keras.callbacks.EarlyStopping(monitor='loss',patience=10,restore_best_weights=True)]) + + + + + return model.predict(possble) + + +if __name__=="__main__": + f=np.load("merged.npz") + x=f["ps"] + given=x[0] + possble=x[1:5] + choosenext(given,possble) + + + diff --git a/data.py b/data.py new file mode 100644 index 0000000..806de76 --- /dev/null +++ b/data.py @@ -0,0 +1,20 @@ +import numpy as np + +def data(n=1000): + """ + Generate 2d gaussian data. Few points but every model should have slightly different data. + Then use a big dataset as comparison algo. Basically subsampling instead of feature bagging. + """ + return np.random.normal(1.0,0.5,(n,2)) + + + +if __name__ == '__main__': + x=data() + + from plt import plt + + plt.plot(x[:,0],x[:,1],'.') + plt.how() + #print(x,y,z) + diff --git a/imgs/0.png b/imgs/0.png new file mode 100644 index 0000000..53d53f5 Binary files /dev/null and b/imgs/0.png differ diff --git a/imgs/1.png b/imgs/1.png new file mode 100644 index 0000000..3357c51 Binary files /dev/null and b/imgs/1.png differ diff --git a/imgs/10.png b/imgs/10.png new file mode 100644 index 0000000..86614d9 Binary files /dev/null and b/imgs/10.png differ diff --git a/imgs/11.png b/imgs/11.png new file mode 100644 index 0000000..08698f0 Binary files /dev/null and b/imgs/11.png differ diff --git a/imgs/12.png b/imgs/12.png new file mode 100644 index 0000000..07dd3e3 Binary files /dev/null and b/imgs/12.png differ diff --git a/imgs/13.png b/imgs/13.png new file mode 100644 index 0000000..e2bb09e Binary files /dev/null and b/imgs/13.png differ diff --git a/imgs/14.png b/imgs/14.png new file mode 100644 index 0000000..2e41f9a Binary files /dev/null and b/imgs/14.png differ diff --git a/imgs/15.png b/imgs/15.png new file mode 100644 index 0000000..29e50ce Binary files /dev/null and b/imgs/15.png differ diff --git a/imgs/16.png b/imgs/16.png new file mode 100644 index 0000000..b04cf7f Binary files /dev/null and b/imgs/16.png differ diff --git a/imgs/17.png b/imgs/17.png new file mode 100644 index 0000000..405533e Binary files /dev/null and b/imgs/17.png differ diff --git a/imgs/18.png b/imgs/18.png new file mode 100644 index 0000000..c344fcd Binary files /dev/null and b/imgs/18.png differ diff --git a/imgs/19.png b/imgs/19.png new file mode 100644 index 0000000..d299b20 Binary files /dev/null and b/imgs/19.png differ diff --git a/imgs/2.png b/imgs/2.png new file mode 100644 index 0000000..d4d0713 Binary files /dev/null and b/imgs/2.png differ diff --git a/imgs/3.png b/imgs/3.png new file mode 100644 index 0000000..459c253 Binary files /dev/null and b/imgs/3.png differ diff --git a/imgs/4.png b/imgs/4.png new file mode 100644 index 0000000..8346f98 Binary files /dev/null and b/imgs/4.png differ diff --git a/imgs/5.png b/imgs/5.png new file mode 100644 index 0000000..233cb10 Binary files /dev/null and b/imgs/5.png differ diff --git a/imgs/6.png b/imgs/6.png new file mode 100644 index 0000000..8b03261 Binary files /dev/null and b/imgs/6.png differ diff --git a/imgs/7.png b/imgs/7.png new file mode 100644 index 0000000..871cc73 Binary files /dev/null and b/imgs/7.png differ diff --git a/imgs/8.png b/imgs/8.png new file mode 100644 index 0000000..833fce1 Binary files /dev/null and b/imgs/8.png differ diff --git a/imgs/9.png b/imgs/9.png new file mode 100644 index 0000000..10f2722 Binary files /dev/null and b/imgs/9.png differ diff --git a/imgs/recombine.png b/imgs/recombine.png new file mode 100644 index 0000000..0ebf731 Binary files /dev/null and b/imgs/recombine.png differ diff --git a/loss.py b/loss.py new file mode 100644 index 0000000..4a8636e --- /dev/null +++ b/loss.py @@ -0,0 +1,23 @@ +import tensorflow as tf +from tensorflow import keras +from tensorflow.keras import backend as K + + +def loss(a,b,K=K): + """correlation between a and the first entry of b should be zero. Correlations are hard to optimize. So use corvariance and metric keeping properties""" + if len(b.shape)>1: + b=b[:,0] + if len(a.shape)>1: + a=a[:,0] + + return K.abs(K.mean((a-K.mean(a))*(b-K.mean(b)))) + + +if __name__=='__main__': + import numpy as np + x=np.random.uniform(-1,1,size=(1000,2)) + print(numpyloss2d(x[:,0],x[:,1],n=25)) + + + + diff --git a/main.py b/main.py new file mode 100644 index 0000000..409663f --- /dev/null +++ b/main.py @@ -0,0 +1,63 @@ +import numpy as np + +from choosenext import choosenext + +from plt import plt + + +def draw(p,dat): + mp=np.mean(p) + d=np.square(p-mp) + sx=[(xx,dd) for xx,dd in zip(dat,d)] + sx.sort(key=lambda x:x[1]) + sx=[zw[0] for zw in sx] + sx=np.array(sx) + + col1=[1.0,0.0,0.0] + col2=[0.0,1.0,0.0] + col1,col2=np.array(col1),np.array(col2) + ln=len(sx) + cols=[col1*(i/ln)+col2*(1-i/ln) for i in range(ln)] + + plt.scatter(sx[:,0],sx[:,1],c=cols) + + +def combine(a,b): + a=1+(a-np.mean(a))/np.std(a) + b=1+(b-np.mean(b))/np.std(b) + toc=np.concatenate((np.expand_dims(a,axis=1),np.expand_dims(b,axis=1)),axis=1) + toc=np.sqrt(np.mean(toc**2,axis=1)) + return toc + +if __name__=="__main__": + f=np.load("merged.npz") + dat=f["x"] + x=f["ps"] + np.random.shuffle(x) + given=x[0] + possble=x[1:5] + + ac=choosenext(given,possble) + nextbest=ac[:,0] + remainder=ac[:,1:] + + for row in np.transpose(ac): + print(np.corrcoef(given,row)[0,1]) + #as you see: the correlation is the lowest for the first row. + #so lets combine it + updated=combine(given,nextbest) + + + draw(given,dat) + plt.savefig("before.png") + plt.show() + draw(nextbest,dat) + plt.savefig("suggestion.png") + plt.show() + draw(updated,dat) + plt.savefig("updated.png") + plt.show() + + + + diff --git a/merged.npz b/merged.npz new file mode 100644 index 0000000..41b4ca2 Binary files /dev/null and b/merged.npz differ diff --git a/mu.py b/mu.py new file mode 100644 index 0000000..5b18472 --- /dev/null +++ b/mu.py @@ -0,0 +1,36 @@ +import numpy as np + + +def determu(q,dim,ulayer): + for i in range(dim): + for j in range(i+1,dim): + q=ulayer(dim,i,j)(q) + return q +def determr(q,dim,ulayer): + dex=[] + for i in range(dim): + for j in range(i+1,dim): + dex.append([i,j]) + np.random.shuffle(dex) + for i,j in dex: + q=ulayer(dim,i,j)(q) + return q +def partu(q,pdim,dim,ulayer): + for i in range(pdim): + for j in range(i+1,dim): + q=ulayer(dim,i,j)(q) + return q +def partr(q,pdim,dim,ulayer): + #this is often just an approximation. But for sqrt runtime.... + dex=[] + for i in range(pdim): + for j in range(i+1,dim): + dex.append([i,j]) + np.random.shuffle(dex) + for i,j in dex: + q=ulayer(dim,i,j)(q) + return q +def cutdown(q,pdim): + return q[:,:pdim] + + diff --git a/multimodel.py b/multimodel.py new file mode 100644 index 0000000..82fd314 --- /dev/null +++ b/multimodel.py @@ -0,0 +1,6 @@ +import os + +#yes I know, not the best way to do this... +for i in range(20): + os.system(f"python3 onemodel.py {i}") + diff --git a/n2ulayer.py b/n2ulayer.py new file mode 100644 index 0000000..5724809 --- /dev/null +++ b/n2ulayer.py @@ -0,0 +1,72 @@ +#use sin cos to get better gradients (than nulayer) +#migth habe better gradients? (seems that way but not sure yet) + +#should rename it, but who cares +#now also able to export the given matrix +from tensorflow.keras.layers import Layer +from tensorflow.keras import backend as K +from tensorflow import keras +import tensorflow as tf + +import numpy as np + + + +class ulayer(Layer): + def __init__(self,siz,dex1,dex2, **kwargs): + self.siz = siz + self.dex1 = dex1 + self.dex2 = dex2 + super(ulayer, self).__init__(**kwargs) + + def build(self, input_shape): + # Create a trainable weight variable for this layer. + self.kernel = self.add_weight(name='kernel', + shape=(1,), + initializer=keras.initializers.RandomUniform(-0.5, 0.5), + trainable=True) + super(ulayer, self).build(input_shape) # Be sure to call this at the end + + def numpify(self): + mat=np.eye(self.siz) + val=self.weights[0].numpy()[0] + sin,cos=np.sin(val),np.cos(val) + mat[self.dex1,self.dex2]=sin + mat[self.dex2,self.dex1]=-sin + mat[self.dex1,self.dex1]=cos + mat[self.dex2,self.dex2]=cos + return mat + + + + def call(self, x): + kernel=self.kernel + sin=K.sin(kernel) + cos=K.cos(kernel) + tan=sin/cos#that should diverge? + rows=[tf.expand_dims(x[:,i],1) for i in range(self.siz)] + #instead of ((1,a),(-a,1)), I want this to be + #((1,a),(-a,1))/sqrt(1+a**2) + #and with trigonometry, I can get the same result by + #a=sin(kernel)? + #multiply to make 1->cos(x) (aka *cos(x)) + #so a actually tan(kernel) + z1=rows[self.dex2]*tan + z2=rows[self.dex1]*tan + rows[self.dex1]+=z1 + rows[self.dex2]-=z2 + rows[self.dex1]*=cos + rows[self.dex2]*=cos + rows=K.concatenate(rows,axis=1) + return rows + + + mat=tf.eye(self.siz) + tf.assign(mat[self.dex1,self.dex2],self.kernel) + #mat[self.dex2,self.dex1]=-self.kernel + return K.dot(x, mat) + + def compute_output_shape(self, input_shape): + return input_shape + + diff --git a/old/imgs/0.png b/old/imgs/0.png new file mode 100644 index 0000000..1138c1b Binary files /dev/null and b/old/imgs/0.png differ diff --git a/old/imgs/1.png b/old/imgs/1.png new file mode 100644 index 0000000..f9fa874 Binary files /dev/null and b/old/imgs/1.png differ diff --git a/old/imgs/2.png b/old/imgs/2.png new file mode 100644 index 0000000..fd04739 Binary files /dev/null and b/old/imgs/2.png differ diff --git a/old/imgs/3.png b/old/imgs/3.png new file mode 100644 index 0000000..c84e5e8 Binary files /dev/null and b/old/imgs/3.png differ diff --git a/old/imgs/4.png b/old/imgs/4.png new file mode 100644 index 0000000..522c03d Binary files /dev/null and b/old/imgs/4.png differ diff --git a/old/imgs/5.png b/old/imgs/5.png new file mode 100644 index 0000000..a483be1 Binary files /dev/null and b/old/imgs/5.png differ diff --git a/old/imgs/6.png b/old/imgs/6.png new file mode 100644 index 0000000..fe8c86c Binary files /dev/null and b/old/imgs/6.png differ diff --git a/old/imgs/7.png b/old/imgs/7.png new file mode 100644 index 0000000..d836dba Binary files /dev/null and b/old/imgs/7.png differ diff --git a/old/imgs/8.png b/old/imgs/8.png new file mode 100644 index 0000000..1a8a731 Binary files /dev/null and b/old/imgs/8.png differ diff --git a/old/runs/0.npz b/old/runs/0.npz new file mode 100644 index 0000000..02c9fba Binary files /dev/null and b/old/runs/0.npz differ diff --git a/old/runs/1.npz b/old/runs/1.npz new file mode 100644 index 0000000..a848c57 Binary files /dev/null and b/old/runs/1.npz differ diff --git a/old/runs/2.npz b/old/runs/2.npz new file mode 100644 index 0000000..e209666 Binary files /dev/null and b/old/runs/2.npz differ diff --git a/old/runs/3.npz b/old/runs/3.npz new file mode 100644 index 0000000..c2eb22f Binary files /dev/null and b/old/runs/3.npz differ diff --git a/old/runs/4.npz b/old/runs/4.npz new file mode 100644 index 0000000..b01db9d Binary files /dev/null and b/old/runs/4.npz differ diff --git a/old/runs/5.npz b/old/runs/5.npz new file mode 100644 index 0000000..c4ebde0 Binary files /dev/null and b/old/runs/5.npz differ diff --git a/old/runs/6.npz b/old/runs/6.npz new file mode 100644 index 0000000..6b4a971 Binary files /dev/null and b/old/runs/6.npz differ diff --git a/old/runs/7.npz b/old/runs/7.npz new file mode 100644 index 0000000..879582e Binary files /dev/null and b/old/runs/7.npz differ diff --git a/old/runs/8.npz b/old/runs/8.npz new file mode 100644 index 0000000..b27a4af Binary files /dev/null and b/old/runs/8.npz differ diff --git a/onemodel.py b/onemodel.py new file mode 100644 index 0000000..078fec4 --- /dev/null +++ b/onemodel.py @@ -0,0 +1,105 @@ +import tensorflow as tf +from tensorflow import keras + +import numpy as np + +import os +import sys + + +from data import data + +os.makedirs('./runs', exist_ok=True) +os.makedirs('./imgs', exist_ok=True) + +dex=0 +if len(sys.argv)>1: + dex=int(sys.argv[1]) + + +seed=np.random.randint(100000) +x=data(1000) +np.random.seed(12) +X=data(10000) + +np.random.seed(seed) + + + +inp=keras.layers.Input(shape=x.shape[1:]) +q=inp +q=keras.layers.Dense(5,activation='relu')(q) +q=keras.layers.Dense(5,activation='relu')(q) +q=keras.layers.Dense(1,activation='linear')(q) + +model=keras.models.Model(inputs=inp,outputs=q) + +model.compile(optimizer='adam',loss='mse') + +model.fit(x,np.ones(len(x)), + epochs=500, + batch_size=25, + validation_split=0.2, + verbose=1, + shuffle=True, + callbacks=[keras.callbacks.EarlyStopping(monitor='val_loss',patience=10)]) + + +#Evaluation phase + + +x=X + +p=model.predict(x) +mp=np.mean(p) +d=(p-mp)**2 +d=np.sqrt(np.mean(d,axis=-1)) + +np.savez_compressed(f"runs/{dex}",d=d,x=x,p=p,mp=mp) + + +sx=[(xx,dd) for xx,dd in zip(x,d)] +sx.sort(key=lambda x:x[1]) +print(sx[0],sx[-1]) + +sx=[xx for xx,dd in sx] +sx=np.array(sx) + +from plt import plt + + +col1=[1.0,0.0,0.0] +col2=[0.0,1.0,0.0] + +col1,col2=np.array(col1),np.array(col2) + +ln=len(sx) + +cols=[col1*(i/ln)+col2*(1-i/ln) for i in range(ln)] + + + +plt.scatter(sx[:,0],sx[:,1],c=cols) + +plt.savefig(f"imgs/{dex}.png") + +#plt.plot(sx[:,0],sx[:,1],'.') + +plt.how() + + + + + + + + + + + + + + + + + diff --git a/recombine.py b/recombine.py new file mode 100644 index 0000000..f36ee8d --- /dev/null +++ b/recombine.py @@ -0,0 +1,77 @@ +import tensorflow as tf +from tensorflow import keras + +import numpy as np + +import os +import sys + + +from data import data + +fns=[f"runs/{zw}" for zw in os.listdir("runs")] + + +fs=[np.load(fn) for fn in fns if os.path.isfile(fn)] + +x=fs[0]["x"] +ds=[f["d"] for f in fs] +ps=[f["p"][:,0] for f in fs] + + +ds=np.array(ds) +ps=np.array(ps) + +d=np.sqrt(np.mean(np.square(ds),axis=0)) + +np.savez_compressed("merged",x=x,ds=ds,ps=ps,d=d) + + +print(np.corrcoef(ps)) + + + +sx=[(xx,dd) for xx,dd in zip(x,d)] +sx.sort(key=lambda x:x[1]) + +sx=[xx for xx,dd in sx] +sx=np.array(sx) + +from plt import plt + + +col1=[1.0,0.0,0.0] +col2=[0.0,1.0,0.0] + +col1,col2=np.array(col1),np.array(col2) + +ln=len(sx) + +cols=[col1*(i/ln)+col2*(1-i/ln) for i in range(ln)] + + + +plt.scatter(sx[:,0],sx[:,1],c=cols) + +plt.savefig(f"imgs/recombine.png") + +#plt.plot(sx[:,0],sx[:,1],'.') + +plt.how() + + + + + + + + + + + + + + + + + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..5816112 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +numpy +tensorflow +keras +matplotlib diff --git a/runs/0.npz b/runs/0.npz new file mode 100644 index 0000000..30a747b Binary files /dev/null and b/runs/0.npz differ diff --git a/runs/1.npz b/runs/1.npz new file mode 100644 index 0000000..ab5a1a7 Binary files /dev/null and b/runs/1.npz differ diff --git a/runs/10.npz b/runs/10.npz new file mode 100644 index 0000000..ce36c39 Binary files /dev/null and b/runs/10.npz differ diff --git a/runs/11.npz b/runs/11.npz new file mode 100644 index 0000000..61ef931 Binary files /dev/null and b/runs/11.npz differ diff --git a/runs/12.npz b/runs/12.npz new file mode 100644 index 0000000..db34073 Binary files /dev/null and b/runs/12.npz differ diff --git a/runs/13.npz b/runs/13.npz new file mode 100644 index 0000000..ce2337d Binary files /dev/null and b/runs/13.npz differ diff --git a/runs/14.npz b/runs/14.npz new file mode 100644 index 0000000..5aa733b Binary files /dev/null and b/runs/14.npz differ diff --git a/runs/15.npz b/runs/15.npz new file mode 100644 index 0000000..f2a3c81 Binary files /dev/null and b/runs/15.npz differ diff --git a/runs/16.npz b/runs/16.npz new file mode 100644 index 0000000..1935eef Binary files /dev/null and b/runs/16.npz differ diff --git a/runs/17.npz b/runs/17.npz new file mode 100644 index 0000000..ca14e42 Binary files /dev/null and b/runs/17.npz differ diff --git a/runs/18.npz b/runs/18.npz new file mode 100644 index 0000000..4474b89 Binary files /dev/null and b/runs/18.npz differ diff --git a/runs/19.npz b/runs/19.npz new file mode 100644 index 0000000..0bd5d9e Binary files /dev/null and b/runs/19.npz differ diff --git a/runs/2.npz b/runs/2.npz new file mode 100644 index 0000000..6a788e8 Binary files /dev/null and b/runs/2.npz differ diff --git a/runs/3.npz b/runs/3.npz new file mode 100644 index 0000000..f4da47c Binary files /dev/null and b/runs/3.npz differ diff --git a/runs/4.npz b/runs/4.npz new file mode 100644 index 0000000..280dc1c Binary files /dev/null and b/runs/4.npz differ diff --git a/runs/5.npz b/runs/5.npz new file mode 100644 index 0000000..c5bff29 Binary files /dev/null and b/runs/5.npz differ diff --git a/runs/6.npz b/runs/6.npz new file mode 100644 index 0000000..ae300c4 Binary files /dev/null and b/runs/6.npz differ diff --git a/runs/7.npz b/runs/7.npz new file mode 100644 index 0000000..a19df7e Binary files /dev/null and b/runs/7.npz differ diff --git a/runs/8.npz b/runs/8.npz new file mode 100644 index 0000000..d1e27c0 Binary files /dev/null and b/runs/8.npz differ diff --git a/runs/9.npz b/runs/9.npz new file mode 100644 index 0000000..d86a46c Binary files /dev/null and b/runs/9.npz differ diff --git a/suggestion.png b/suggestion.png new file mode 100644 index 0000000..82e13a4 Binary files /dev/null and b/suggestion.png differ diff --git a/updated.png b/updated.png new file mode 100644 index 0000000..ed0558a Binary files /dev/null and b/updated.png differ