1
1
import torch
2
2
import torch .nn as nn
3
3
import torch .nn .functional as F
4
+
4
5
import dgl .function as fn
5
6
7
+
6
8
class NGCFLayer (nn .Module ):
7
9
def __init__ (self , in_size , out_size , norm_dict , dropout ):
8
10
super (NGCFLayer , self ).__init__ ()
9
11
self .in_size = in_size
10
12
self .out_size = out_size
11
13
12
- #weights for different types of messages
13
- self .W1 = nn .Linear (in_size , out_size , bias = True )
14
- self .W2 = nn .Linear (in_size , out_size , bias = True )
14
+ # weights for different types of messages
15
+ self .W1 = nn .Linear (in_size , out_size , bias = True )
16
+ self .W2 = nn .Linear (in_size , out_size , bias = True )
15
17
16
- #leaky relu
18
+ # leaky relu
17
19
self .leaky_relu = nn .LeakyReLU (0.2 )
18
20
19
- #dropout layer
21
+ # dropout layer
20
22
self .dropout = nn .Dropout (dropout )
21
23
22
- #initialization
24
+ # initialization
23
25
torch .nn .init .xavier_uniform_ (self .W1 .weight )
24
26
torch .nn .init .constant_ (self .W1 .bias , 0 )
25
27
torch .nn .init .xavier_uniform_ (self .W2 .weight )
26
28
torch .nn .init .constant_ (self .W2 .bias , 0 )
27
29
28
- #norm
30
+ # norm
29
31
self .norm_dict = norm_dict
30
32
31
33
def forward (self , g , feat_dict ):
32
34
33
- funcs = {} # message and reduce functions dict
34
- #for each type of edges, compute messages and reduce them all
35
+ funcs = {} # message and reduce functions dict
36
+ # for each type of edges, compute messages and reduce them all
35
37
for srctype , etype , dsttype in g .canonical_etypes :
36
- if srctype == dsttype : # for self loops
38
+ if srctype == dsttype : # for self loops
37
39
messages = self .W1 (feat_dict [srctype ])
38
- g .nodes [srctype ].data [etype ] = messages #store in ndata
39
- funcs [(srctype , etype , dsttype )] = (fn .copy_u (etype , 'm' ), fn .sum ('m' , 'h' )) #define message and reduce functions
40
+ g .nodes [srctype ].data [etype ] = messages # store in ndata
41
+ funcs [(srctype , etype , dsttype )] = (
42
+ fn .copy_u (etype , "m" ),
43
+ fn .sum ("m" , "h" ),
44
+ ) # define message and reduce functions
40
45
else :
41
46
src , dst = g .edges (etype = (srctype , etype , dsttype ))
42
47
norm = self .norm_dict [(srctype , etype , dsttype )]
43
- messages = norm * (self .W1 (feat_dict [srctype ][src ]) + self .W2 (feat_dict [srctype ][src ]* feat_dict [dsttype ][dst ])) #compute messages
44
- g .edges [(srctype , etype , dsttype )].data [etype ] = messages #store in edata
45
- funcs [(srctype , etype , dsttype )] = (fn .copy_e (etype , 'm' ), fn .sum ('m' , 'h' )) #define message and reduce functions
46
-
47
- g .multi_update_all (funcs , 'sum' ) #update all, reduce by first type-wisely then across different types
48
- feature_dict = {}
48
+ messages = norm * (
49
+ self .W1 (feat_dict [srctype ][src ])
50
+ + self .W2 (feat_dict [srctype ][src ] * feat_dict [dsttype ][dst ])
51
+ ) # compute messages
52
+ g .edges [(srctype , etype , dsttype )].data [
53
+ etype
54
+ ] = messages # store in edata
55
+ funcs [(srctype , etype , dsttype )] = (
56
+ fn .copy_e (etype , "m" ),
57
+ fn .sum ("m" , "h" ),
58
+ ) # define message and reduce functions
59
+
60
+ g .multi_update_all (
61
+ funcs , "sum"
62
+ ) # update all, reduce by first type-wisely then across different types
63
+ feature_dict = {}
49
64
for ntype in g .ntypes :
50
- h = self .leaky_relu (g .nodes [ntype ].data ['h' ]) # leaky relu
51
- h = self .dropout (h ) # dropout
52
- h = F .normalize (h ,dim = 1 ,p = 2 ) # l2 normalize
65
+ h = self .leaky_relu (g .nodes [ntype ].data ["h" ]) # leaky relu
66
+ h = self .dropout (h ) # dropout
67
+ h = F .normalize (h , dim = 1 , p = 2 ) # l2 normalize
53
68
feature_dict [ntype ] = h
54
69
return feature_dict
55
70
71
+
56
72
class NGCF (nn .Module ):
57
73
def __init__ (self , g , in_size , layer_size , dropout , lmbd = 1e-5 ):
58
74
super (NGCF , self ).__init__ ()
59
75
self .lmbd = lmbd
60
76
self .norm_dict = dict ()
61
77
for srctype , etype , dsttype in g .canonical_etypes :
62
78
src , dst = g .edges (etype = (srctype , etype , dsttype ))
63
- dst_degree = g .in_degrees (dst , etype = (srctype , etype , dsttype )).float () #obtain degrees
64
- src_degree = g .out_degrees (src , etype = (srctype , etype , dsttype )).float ()
65
- norm = torch .pow (src_degree * dst_degree , - 0.5 ).unsqueeze (1 ) #compute norm
79
+ dst_degree = g .in_degrees (
80
+ dst , etype = (srctype , etype , dsttype )
81
+ ).float () # obtain degrees
82
+ src_degree = g .out_degrees (
83
+ src , etype = (srctype , etype , dsttype )
84
+ ).float ()
85
+ norm = torch .pow (src_degree * dst_degree , - 0.5 ).unsqueeze (
86
+ 1
87
+ ) # compute norm
66
88
self .norm_dict [(srctype , etype , dsttype )] = norm
67
89
68
90
self .layers = nn .ModuleList ()
69
91
self .layers .append (
70
92
NGCFLayer (in_size , layer_size [0 ], self .norm_dict , dropout [0 ])
71
93
)
72
94
self .num_layers = len (layer_size )
73
- for i in range (self .num_layers - 1 ):
95
+ for i in range (self .num_layers - 1 ):
74
96
self .layers .append (
75
- NGCFLayer (layer_size [i ], layer_size [i + 1 ], self .norm_dict , dropout [i + 1 ])
97
+ NGCFLayer (
98
+ layer_size [i ],
99
+ layer_size [i + 1 ],
100
+ self .norm_dict ,
101
+ dropout [i + 1 ],
102
+ )
76
103
)
77
104
self .initializer = nn .init .xavier_uniform_
78
105
79
- #embeddings for different types of nodes
80
- self .feature_dict = nn .ParameterDict ({
81
- ntype : nn .Parameter (self .initializer (torch .empty (g .num_nodes (ntype ), in_size ))) for ntype in g .ntypes
82
- })
106
+ # embeddings for different types of nodes
107
+ self .feature_dict = nn .ParameterDict (
108
+ {
109
+ ntype : nn .Parameter (
110
+ self .initializer (torch .empty (g .num_nodes (ntype ), in_size ))
111
+ )
112
+ for ntype in g .ntypes
113
+ }
114
+ )
83
115
84
116
def create_bpr_loss (self , users , pos_items , neg_items ):
85
117
pos_scores = (users * pos_items ).sum (1 )
@@ -88,17 +120,21 @@ def create_bpr_loss(self, users, pos_items, neg_items):
88
120
mf_loss = nn .LogSigmoid ()(pos_scores - neg_scores ).mean ()
89
121
mf_loss = - 1 * mf_loss
90
122
91
- regularizer = (torch .norm (users ) ** 2 + torch .norm (pos_items ) ** 2 + torch .norm (neg_items ) ** 2 ) / 2
123
+ regularizer = (
124
+ torch .norm (users ) ** 2
125
+ + torch .norm (pos_items ) ** 2
126
+ + torch .norm (neg_items ) ** 2
127
+ ) / 2
92
128
emb_loss = self .lmbd * regularizer / users .shape [0 ]
93
129
94
130
return mf_loss + emb_loss , mf_loss , emb_loss
95
131
96
132
def rating (self , u_g_embeddings , pos_i_g_embeddings ):
97
133
return torch .matmul (u_g_embeddings , pos_i_g_embeddings .t ())
98
134
99
- def forward (self , g ,user_key , item_key , users , pos_items , neg_items ):
100
- h_dict = {ntype : self .feature_dict [ntype ] for ntype in g .ntypes }
101
- #obtain features of each layer and concatenate them all
135
+ def forward (self , g , user_key , item_key , users , pos_items , neg_items ):
136
+ h_dict = {ntype : self .feature_dict [ntype ] for ntype in g .ntypes }
137
+ # obtain features of each layer and concatenate them all
102
138
user_embeds = []
103
139
item_embeds = []
104
140
user_embeds .append (h_dict [user_key ])
0 commit comments