-
Notifications
You must be signed in to change notification settings - Fork 32
Expand file tree
/
Copy pathtinder_dog.ts
More file actions
89 lines (79 loc) · 3.2 KB
/
tinder_dog.ts
File metadata and controls
89 lines (79 loc) · 3.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import * as tf from '@tensorflow/tfjs'
import type { Model, TaskProvider } from '../index.js'
import { models } from '../index.js'
export const tinderDog: TaskProvider<"image", "federated"> = {
getTask() {
return Promise.resolve({
id: 'tinder_dog',
dataType: "image",
displayInformation: {
title: "GDHF 2024 | TinderDog",
summary: {
preview: 'Which dog is the cutest....or not?',
overview: "Binary classification model for dog cuteness."
},
model: 'The model is a simple Convolutional Neural Network composed of two convolutional layers with ReLU activations and max pooling layers, followed by a fully connected output layer. The data preprocessing reshapes images into 64x64 pixels and normalizes values between 0 and 1',
dataFormatInformation: 'Accepted image formats are .png .jpg and .jpeg.',
dataExample:
"https://storage.googleapis.com/deai-313515.appspot.com/tinder_dog_preview.png",
sampleDataset: {
link: "https://storage.googleapis.com/deai-313515.appspot.com/tinder_dog.zip",
instructions:
'Opening the link should start downloading a zip file which you can unzip. To connect the data, pick one of the data splits (the folder 0 for example) and use the CSV option below to select the file named "labels.csv". You can now connect the images located in the same folder.',
},
},
trainingInformation: {
epochs: 10,
roundDuration: 2,
validationSplit: 0, // nicer plot for GDHF demo
batchSize: 10,
IMAGE_H: 64,
IMAGE_W: 64,
LABEL_LIST: ['Cute dogs', 'Less cute dogs'],
scheme: 'federated',
aggregationStrategy: 'mean',
minNbOfParticipants: 3,
tensorBackend: 'tfjs'
}
});
},
async getModel(): Promise<Model<'image'>> {
const task = await this.getTask();
const seed = 42 // set a seed to ensure reproducibility during GDHF demo
const imageHeight = task.trainingInformation.IMAGE_H
const imageWidth = task.trainingInformation.IMAGE_W
const imageChannels = 3
const model = tf.sequential()
model.add(
tf.layers.conv2d({
inputShape: [imageHeight, imageWidth, imageChannels],
kernelSize: 5,
filters: 8,
activation: 'relu',
kernelInitializer: tf.initializers.heNormal({ seed })
})
)
model.add(tf.layers.conv2d({
kernelSize: 5, filters: 16, activation: 'relu',
kernelInitializer: tf.initializers.heNormal({ seed })
}))
model.add(tf.layers.maxPooling2d({ poolSize: 2, strides: 2 }))
model.add(tf.layers.dropout({ rate: 0.25, seed }))
model.add(tf.layers.flatten())
model.add(tf.layers.dense({
units: 32, activation: 'relu',
kernelInitializer: tf.initializers.heNormal({ seed })
}))
model.add(tf.layers.dropout({rate:0.25, seed}))
model.add(tf.layers.dense({
units: 2, activation: 'softmax',
kernelInitializer: tf.initializers.heNormal({ seed })
}))
model.compile({
optimizer: tf.train.adam(0.0005),
loss: 'categoricalCrossentropy',
metrics: ['accuracy']
})
return Promise.resolve(new models.TFJS('image', model))
}
}