Skip to content

Commit 0b9df9d

Browse files
frozenbugsSteve
and
Steve
authored
[Misc] Black auto fix. (dmlc#4652)
Co-authored-by: Steve <[email protected]>
1 parent f19f05c commit 0b9df9d

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

99 files changed

+8585
-4228
lines changed

examples/pytorch/NGCF/NGCF/main.py

+76-39
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,27 @@
1+
import os
2+
from time import time
3+
14
import torch
25
import torch.optim as optim
36
from model import NGCF
47
from utility.batch_test import *
58
from utility.helper import early_stopping
6-
from time import time
7-
import os
9+
810

911
def main(args):
1012
# Step 1: Prepare graph data and device ================================================================= #
1113
if args.gpu >= 0 and torch.cuda.is_available():
12-
device = 'cuda:{}'.format(args.gpu)
14+
device = "cuda:{}".format(args.gpu)
1315
else:
14-
device = 'cpu'
16+
device = "cpu"
1517

16-
g=data_generator.g
17-
g=g.to(device)
18+
g = data_generator.g
19+
g = g.to(device)
1820

1921
# Step 2: Create model and training components=========================================================== #
20-
model = NGCF(g, args.embed_size, args.layer_size, args.mess_dropout, args.regs[0]).to(device)
22+
model = NGCF(
23+
g, args.embed_size, args.layer_size, args.mess_dropout, args.regs[0]
24+
).to(device)
2125
optimizer = optim.Adam(model.parameters(), lr=args.lr)
2226

2327
# Step 3: training epoches ============================================================================== #
@@ -27,62 +31,89 @@ def main(args):
2731
loss_loger, pre_loger, rec_loger, ndcg_loger, hit_loger = [], [], [], [], []
2832
for epoch in range(args.epoch):
2933
t1 = time()
30-
loss, mf_loss, emb_loss = 0., 0., 0.
34+
loss, mf_loss, emb_loss = 0.0, 0.0, 0.0
3135
for idx in range(n_batch):
3236
users, pos_items, neg_items = data_generator.sample()
33-
u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings = model(g, 'user', 'item', users,
34-
pos_items,
35-
neg_items)
37+
u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings = model(
38+
g, "user", "item", users, pos_items, neg_items
39+
)
3640

37-
batch_loss, batch_mf_loss, batch_emb_loss = model.create_bpr_loss(u_g_embeddings,
38-
pos_i_g_embeddings,
39-
neg_i_g_embeddings)
41+
batch_loss, batch_mf_loss, batch_emb_loss = model.create_bpr_loss(
42+
u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings
43+
)
4044
optimizer.zero_grad()
4145
batch_loss.backward()
4246
optimizer.step()
4347

4448
loss += batch_loss
4549
mf_loss += batch_mf_loss
4650
emb_loss += batch_emb_loss
47-
4851

4952
if (epoch + 1) % 10 != 0:
5053
if args.verbose > 0 and epoch % args.verbose == 0:
51-
perf_str = 'Epoch %d [%.1fs]: train==[%.5f=%.5f + %.5f]' % (
52-
epoch, time() - t1, loss, mf_loss, emb_loss)
54+
perf_str = "Epoch %d [%.1fs]: train==[%.5f=%.5f + %.5f]" % (
55+
epoch,
56+
time() - t1,
57+
loss,
58+
mf_loss,
59+
emb_loss,
60+
)
5361
print(perf_str)
54-
continue #end the current epoch and move to the next epoch, let the following evaluation run every 10 epoches
62+
continue # end the current epoch and move to the next epoch, let the following evaluation run every 10 epoches
5563

56-
#evaluate the model every 10 epoches
64+
# evaluate the model every 10 epoches
5765
t2 = time()
5866
users_to_test = list(data_generator.test_set.keys())
5967
ret = test(model, g, users_to_test)
6068
t3 = time()
6169

6270
loss_loger.append(loss)
63-
rec_loger.append(ret['recall'])
64-
pre_loger.append(ret['precision'])
65-
ndcg_loger.append(ret['ndcg'])
66-
hit_loger.append(ret['hit_ratio'])
71+
rec_loger.append(ret["recall"])
72+
pre_loger.append(ret["precision"])
73+
ndcg_loger.append(ret["ndcg"])
74+
hit_loger.append(ret["hit_ratio"])
6775

6876
if args.verbose > 0:
69-
perf_str = 'Epoch %d [%.1fs + %.1fs]: train==[%.5f=%.5f + %.5f], recall=[%.5f, %.5f], ' \
70-
'precision=[%.5f, %.5f], hit=[%.5f, %.5f], ndcg=[%.5f, %.5f]' % \
71-
(epoch, t2 - t1, t3 - t2, loss, mf_loss, emb_loss, ret['recall'][0], ret['recall'][-1],
72-
ret['precision'][0], ret['precision'][-1], ret['hit_ratio'][0], ret['hit_ratio'][-1],
73-
ret['ndcg'][0], ret['ndcg'][-1])
77+
perf_str = (
78+
"Epoch %d [%.1fs + %.1fs]: train==[%.5f=%.5f + %.5f], recall=[%.5f, %.5f], "
79+
"precision=[%.5f, %.5f], hit=[%.5f, %.5f], ndcg=[%.5f, %.5f]"
80+
% (
81+
epoch,
82+
t2 - t1,
83+
t3 - t2,
84+
loss,
85+
mf_loss,
86+
emb_loss,
87+
ret["recall"][0],
88+
ret["recall"][-1],
89+
ret["precision"][0],
90+
ret["precision"][-1],
91+
ret["hit_ratio"][0],
92+
ret["hit_ratio"][-1],
93+
ret["ndcg"][0],
94+
ret["ndcg"][-1],
95+
)
96+
)
7497
print(perf_str)
7598

76-
cur_best_pre_0, stopping_step, should_stop = early_stopping(ret['recall'][0], cur_best_pre_0,
77-
stopping_step, expected_order='acc', flag_step=5)
99+
cur_best_pre_0, stopping_step, should_stop = early_stopping(
100+
ret["recall"][0],
101+
cur_best_pre_0,
102+
stopping_step,
103+
expected_order="acc",
104+
flag_step=5,
105+
)
78106

79107
# early stop
80108
if should_stop == True:
81109
break
82110

83-
if ret['recall'][0] == cur_best_pre_0 and args.save_flag == 1:
111+
if ret["recall"][0] == cur_best_pre_0 and args.save_flag == 1:
84112
torch.save(model.state_dict(), args.weights_path + args.model_name)
85-
print('save the weights in path: ', args.weights_path + args.model_name)
113+
print(
114+
"save the weights in path: ",
115+
args.weights_path + args.model_name,
116+
)
86117

87118
recs = np.array(rec_loger)
88119
pres = np.array(pre_loger)
@@ -92,19 +123,25 @@ def main(args):
92123
best_rec_0 = max(recs[:, 0])
93124
idx = list(recs[:, 0]).index(best_rec_0)
94125

95-
final_perf = "Best Iter=[%d]@[%.1f]\trecall=[%s], precision=[%s], hit=[%s], ndcg=[%s]" % \
96-
(idx, time() - t0, '\t'.join(['%.5f' % r for r in recs[idx]]),
97-
'\t'.join(['%.5f' % r for r in pres[idx]]),
98-
'\t'.join(['%.5f' % r for r in hit[idx]]),
99-
'\t'.join(['%.5f' % r for r in ndcgs[idx]]))
126+
final_perf = (
127+
"Best Iter=[%d]@[%.1f]\trecall=[%s], precision=[%s], hit=[%s], ndcg=[%s]"
128+
% (
129+
idx,
130+
time() - t0,
131+
"\t".join(["%.5f" % r for r in recs[idx]]),
132+
"\t".join(["%.5f" % r for r in pres[idx]]),
133+
"\t".join(["%.5f" % r for r in hit[idx]]),
134+
"\t".join(["%.5f" % r for r in ndcgs[idx]]),
135+
)
136+
)
100137
print(final_perf)
101138

102-
if __name__ == '__main__':
139+
140+
if __name__ == "__main__":
103141
if not os.path.exists(args.weights_path):
104142
os.mkdir(args.weights_path)
105143
args.mess_dropout = eval(args.mess_dropout)
106144
args.layer_size = eval(args.layer_size)
107145
args.regs = eval(args.regs)
108146
print(args)
109147
main(args)
110-

examples/pytorch/NGCF/NGCF/model.py

+70-34
Original file line numberDiff line numberDiff line change
@@ -1,85 +1,117 @@
11
import torch
22
import torch.nn as nn
33
import torch.nn.functional as F
4+
45
import dgl.function as fn
56

7+
68
class NGCFLayer(nn.Module):
79
def __init__(self, in_size, out_size, norm_dict, dropout):
810
super(NGCFLayer, self).__init__()
911
self.in_size = in_size
1012
self.out_size = out_size
1113

12-
#weights for different types of messages
13-
self.W1 = nn.Linear(in_size, out_size, bias = True)
14-
self.W2 = nn.Linear(in_size, out_size, bias = True)
14+
# weights for different types of messages
15+
self.W1 = nn.Linear(in_size, out_size, bias=True)
16+
self.W2 = nn.Linear(in_size, out_size, bias=True)
1517

16-
#leaky relu
18+
# leaky relu
1719
self.leaky_relu = nn.LeakyReLU(0.2)
1820

19-
#dropout layer
21+
# dropout layer
2022
self.dropout = nn.Dropout(dropout)
2123

22-
#initialization
24+
# initialization
2325
torch.nn.init.xavier_uniform_(self.W1.weight)
2426
torch.nn.init.constant_(self.W1.bias, 0)
2527
torch.nn.init.xavier_uniform_(self.W2.weight)
2628
torch.nn.init.constant_(self.W2.bias, 0)
2729

28-
#norm
30+
# norm
2931
self.norm_dict = norm_dict
3032

3133
def forward(self, g, feat_dict):
3234

33-
funcs = {} #message and reduce functions dict
34-
#for each type of edges, compute messages and reduce them all
35+
funcs = {} # message and reduce functions dict
36+
# for each type of edges, compute messages and reduce them all
3537
for srctype, etype, dsttype in g.canonical_etypes:
36-
if srctype == dsttype: #for self loops
38+
if srctype == dsttype: # for self loops
3739
messages = self.W1(feat_dict[srctype])
38-
g.nodes[srctype].data[etype] = messages #store in ndata
39-
funcs[(srctype, etype, dsttype)] = (fn.copy_u(etype, 'm'), fn.sum('m', 'h')) #define message and reduce functions
40+
g.nodes[srctype].data[etype] = messages # store in ndata
41+
funcs[(srctype, etype, dsttype)] = (
42+
fn.copy_u(etype, "m"),
43+
fn.sum("m", "h"),
44+
) # define message and reduce functions
4045
else:
4146
src, dst = g.edges(etype=(srctype, etype, dsttype))
4247
norm = self.norm_dict[(srctype, etype, dsttype)]
43-
messages = norm * (self.W1(feat_dict[srctype][src]) + self.W2(feat_dict[srctype][src]*feat_dict[dsttype][dst])) #compute messages
44-
g.edges[(srctype, etype, dsttype)].data[etype] = messages #store in edata
45-
funcs[(srctype, etype, dsttype)] = (fn.copy_e(etype, 'm'), fn.sum('m', 'h')) #define message and reduce functions
46-
47-
g.multi_update_all(funcs, 'sum') #update all, reduce by first type-wisely then across different types
48-
feature_dict={}
48+
messages = norm * (
49+
self.W1(feat_dict[srctype][src])
50+
+ self.W2(feat_dict[srctype][src] * feat_dict[dsttype][dst])
51+
) # compute messages
52+
g.edges[(srctype, etype, dsttype)].data[
53+
etype
54+
] = messages # store in edata
55+
funcs[(srctype, etype, dsttype)] = (
56+
fn.copy_e(etype, "m"),
57+
fn.sum("m", "h"),
58+
) # define message and reduce functions
59+
60+
g.multi_update_all(
61+
funcs, "sum"
62+
) # update all, reduce by first type-wisely then across different types
63+
feature_dict = {}
4964
for ntype in g.ntypes:
50-
h = self.leaky_relu(g.nodes[ntype].data['h']) #leaky relu
51-
h = self.dropout(h) #dropout
52-
h = F.normalize(h,dim=1,p=2) #l2 normalize
65+
h = self.leaky_relu(g.nodes[ntype].data["h"]) # leaky relu
66+
h = self.dropout(h) # dropout
67+
h = F.normalize(h, dim=1, p=2) # l2 normalize
5368
feature_dict[ntype] = h
5469
return feature_dict
5570

71+
5672
class NGCF(nn.Module):
5773
def __init__(self, g, in_size, layer_size, dropout, lmbd=1e-5):
5874
super(NGCF, self).__init__()
5975
self.lmbd = lmbd
6076
self.norm_dict = dict()
6177
for srctype, etype, dsttype in g.canonical_etypes:
6278
src, dst = g.edges(etype=(srctype, etype, dsttype))
63-
dst_degree = g.in_degrees(dst, etype=(srctype, etype, dsttype)).float() #obtain degrees
64-
src_degree = g.out_degrees(src, etype=(srctype, etype, dsttype)).float()
65-
norm = torch.pow(src_degree * dst_degree, -0.5).unsqueeze(1) #compute norm
79+
dst_degree = g.in_degrees(
80+
dst, etype=(srctype, etype, dsttype)
81+
).float() # obtain degrees
82+
src_degree = g.out_degrees(
83+
src, etype=(srctype, etype, dsttype)
84+
).float()
85+
norm = torch.pow(src_degree * dst_degree, -0.5).unsqueeze(
86+
1
87+
) # compute norm
6688
self.norm_dict[(srctype, etype, dsttype)] = norm
6789

6890
self.layers = nn.ModuleList()
6991
self.layers.append(
7092
NGCFLayer(in_size, layer_size[0], self.norm_dict, dropout[0])
7193
)
7294
self.num_layers = len(layer_size)
73-
for i in range(self.num_layers-1):
95+
for i in range(self.num_layers - 1):
7496
self.layers.append(
75-
NGCFLayer(layer_size[i], layer_size[i+1], self.norm_dict, dropout[i+1])
97+
NGCFLayer(
98+
layer_size[i],
99+
layer_size[i + 1],
100+
self.norm_dict,
101+
dropout[i + 1],
102+
)
76103
)
77104
self.initializer = nn.init.xavier_uniform_
78105

79-
#embeddings for different types of nodes
80-
self.feature_dict = nn.ParameterDict({
81-
ntype: nn.Parameter(self.initializer(torch.empty(g.num_nodes(ntype), in_size))) for ntype in g.ntypes
82-
})
106+
# embeddings for different types of nodes
107+
self.feature_dict = nn.ParameterDict(
108+
{
109+
ntype: nn.Parameter(
110+
self.initializer(torch.empty(g.num_nodes(ntype), in_size))
111+
)
112+
for ntype in g.ntypes
113+
}
114+
)
83115

84116
def create_bpr_loss(self, users, pos_items, neg_items):
85117
pos_scores = (users * pos_items).sum(1)
@@ -88,17 +120,21 @@ def create_bpr_loss(self, users, pos_items, neg_items):
88120
mf_loss = nn.LogSigmoid()(pos_scores - neg_scores).mean()
89121
mf_loss = -1 * mf_loss
90122

91-
regularizer = (torch.norm(users) ** 2 + torch.norm(pos_items) ** 2 + torch.norm(neg_items) ** 2) / 2
123+
regularizer = (
124+
torch.norm(users) ** 2
125+
+ torch.norm(pos_items) ** 2
126+
+ torch.norm(neg_items) ** 2
127+
) / 2
92128
emb_loss = self.lmbd * regularizer / users.shape[0]
93129

94130
return mf_loss + emb_loss, mf_loss, emb_loss
95131

96132
def rating(self, u_g_embeddings, pos_i_g_embeddings):
97133
return torch.matmul(u_g_embeddings, pos_i_g_embeddings.t())
98134

99-
def forward(self, g,user_key, item_key, users, pos_items, neg_items):
100-
h_dict = {ntype : self.feature_dict[ntype] for ntype in g.ntypes}
101-
#obtain features of each layer and concatenate them all
135+
def forward(self, g, user_key, item_key, users, pos_items, neg_items):
136+
h_dict = {ntype: self.feature_dict[ntype] for ntype in g.ntypes}
137+
# obtain features of each layer and concatenate them all
102138
user_embeds = []
103139
item_embeds = []
104140
user_embeds.append(h_dict[user_key])

0 commit comments

Comments
 (0)