Skip to content

Commit 4cec915

Browse files
committed
Updated: used hybrid and corrected based on comments
1 parent a0e2c74 commit 4cec915

File tree

2 files changed

+17
-13
lines changed

2 files changed

+17
-13
lines changed

introduction_to_applying_machine_learning/gluon_recommender_system/gluon_recommender_system.ipynb

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,9 @@
119119
{
120120
"cell_type": "code",
121121
"execution_count": null,
122-
"metadata": {},
122+
"metadata": {
123+
"collapsed": true
124+
},
123125
"outputs": [],
124126
"source": [
125127
"!mkdir /tmp/recsys/\n",
@@ -379,7 +381,7 @@
379381
},
380382
"outputs": [],
381383
"source": [
382-
"class MFBlock(gluon.Block):\n",
384+
"class MFBlock(gluon.HybridBlock):\n",
383385
" def __init__(self, max_users, max_items, num_emb, dropout_p=0.5):\n",
384386
" super(MFBlock, self).__init__()\n",
385387
" \n",
@@ -394,15 +396,15 @@
394396
" self.dropout = gluon.nn.Dropout(dropout_p)\n",
395397
" self.dense = gluon.nn.Dense(num_emb, activation='relu')\n",
396398
" \n",
397-
" def forward(self, users, items):\n",
399+
" def hybrid_forward(self, F, users, items):\n",
398400
" a = self.user_embeddings(users)\n",
399401
" a = self.dense(a)\n",
400402
" \n",
401403
" b = self.item_embeddings(items)\n",
402404
" b = self.dense(b)\n",
403405
"\n",
404406
" predictions = self.dropout(a) * self.dropout(b) \n",
405-
" predictions = nd.sum(predictions, axis=1)\n",
407+
" predictions = F.sum(predictions, axis=1)\n",
406408
" return predictions"
407409
]
408410
},
@@ -443,6 +445,7 @@
443445
"net.collect_params().initialize(mx.init.Xavier(magnitude=2.24),\n",
444446
" ctx=ctx,\n",
445447
" force_reinit=True)\n",
448+
"net.hybridize()\n",
446449
"\n",
447450
"# Set optimization parameters\n",
448451
"opt = 'sgd'\n",
@@ -486,8 +489,7 @@
486489
" with mx.autograd.record():\n",
487490
" output = net(user, item) \n",
488491
" loss = loss_function(output, label)\n",
489-
" loss.backward()\n",
490-
" net.collect_params().values()\n",
492+
" loss.backward()\n",
491493
" trainer.step(batch_size)\n",
492494
" except:\n",
493495
" pass\n",
@@ -825,7 +827,7 @@
825827
"cell_type": "markdown",
826828
"metadata": {},
827829
"source": [
828-
"We can see that our neural network and embedding model produces substantially better results (1.28 vs 1.65 on mean square error).\n",
830+
"We can see that our neural network and embedding model produces substantially better results (~1.27 vs 1.65 on mean square error).\n",
829831
"\n",
830832
"For recommender systems, subjective accuracy also matters. Let's get some recommendations for a random user to see if they make intuitive sense."
831833
]
@@ -931,7 +933,9 @@
931933
{
932934
"cell_type": "code",
933935
"execution_count": null,
934-
"metadata": {},
936+
"metadata": {
937+
"collapsed": true
938+
},
935939
"outputs": [],
936940
"source": [
937941
"sagemaker.Session().delete_endpoint(predictor.endpoint)"

introduction_to_applying_machine_learning/gluon_recommender_system/recommender.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ def train(channel_input_dirs, hyperparameters, hosts, num_gpus, **kwargs):
5050
net.collect_params().initialize(mx.init.Xavier(magnitude=2.24),
5151
ctx=ctx,
5252
force_reinit=True)
53+
net.hybridize()
5354

5455
trainer = gluon.Trainer(net.collect_params(),
5556
opt,
@@ -63,7 +64,7 @@ def train(channel_input_dirs, hyperparameters, hosts, num_gpus, **kwargs):
6364
return trained_net, customer_index, product_index
6465

6566

66-
class MFBlock(gluon.Block):
67+
class MFBlock(gluon.HybridBlock):
6768
def __init__(self, max_users, max_items, num_emb, dropout_p=0.5):
6869
super(MFBlock, self).__init__()
6970

@@ -78,15 +79,15 @@ def __init__(self, max_users, max_items, num_emb, dropout_p=0.5):
7879
self.dropout = gluon.nn.Dropout(dropout_p)
7980
self.dense = gluon.nn.Dense(num_emb, activation='relu')
8081

81-
def forward(self, users, items):
82+
def hybrid_forward(self, F, users, items):
8283
a = self.user_embeddings(users)
8384
a = self.dense(a)
8485

8586
b = self.item_embeddings(items)
8687
b = self.dense(b)
8788

8889
predictions = self.dropout(a) * self.dropout(b)
89-
predictions = nd.sum(predictions, axis=1)
90+
predictions = F.sum(predictions, axis=1)
9091
return predictions
9192

9293

@@ -102,8 +103,7 @@ def execute(train_iter, test_iter, net, trainer, epochs, ctx):
102103
with mx.autograd.record():
103104
output = net(user, item)
104105
loss = loss_function(output, label)
105-
loss.backward()
106-
net.collect_params().values()
106+
loss.backward()
107107
trainer.step(batch_size)
108108
except:
109109
pass

0 commit comments

Comments
 (0)