报错的代码位置:
1.
1 File "F:\recommend experience\doing\modeldemo.py"中的: 2 3 self.user_dgi_feat = self.dgi.encoder(self.user_feat_sp_tensor).detach()
2.
File "F:\recommend experience\doing\DGI\dgi.py",
self.encoder = Encoder(g, in_feats, n_hidden, activation)
3.
File "F:\recommend experience\doing\DGI\dgi.py",
class Encoder(nn.Module):
def __init__(self, g, in_feats, n_hidden, activation):
super(Encoder, self).__init__()
self.g = g
self.conv = GCN(g, in_feats, n_hidden, activation)
def forward(self, features, corrupt=False):
features = self.conv(features)
return features
def __init__(self, g, in_feats, n_hidden, activation):
super(Encoder, self).__init__()
self.g = g
self.conv = GCN(g, in_feats, n_hidden, activation)
def forward(self, features, corrupt=False):
features = self.conv(features)
return features
4.
class GraphConv(nn.Module):
def __init__(self,
in_feats,
out_feats,
norm='both',
weight=True,
bias=True,
activation=None):
super(GraphConv, self).__init__()
if norm not in ('none', 'both', 'right'):
raise DGLError('Invalid norm value. Must be either "none", "both" or "right".'
' But got "{}".'.format(norm))
self._in_feats = in_feats
self._out_feats = out_feats
self._norm = norm
if weight:
self.weight = nn.Parameter(th.Tensor(in_feats, out_feats))
else:
self.register_parameter('weight', None)
if bias:
self.bias = nn.Parameter(th.Tensor(out_feats))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self._activation = activation
def reset_parameters(self):
"""Reinitialize learnable parameters."""
#-----------重新初始化科学的参数---------------------------------------------------
#-----------权重xavier初始化,偏差为0--------------------------------------
if self.weight is not None:
init.xavier_uniform_(self.weight)
if self.bias is not None:
init.zeros_(self.bias)
def forward(self, graph, feat, weight=None):
graph = graph.local_var()
# 在局部作用域下复制图
degs = graph.out_degrees().to(feat.device).float().clamp(min=1)
# clamp就是把最小值设置为1
# torch.clamp(input ,min,max,out=None)
# torch.clamp()的作用把input的数据,夹逼到[min,max]之间
# mport torch as t
# a=t.arange(8).view(2,4)
# a
#
# #tensor([[[0, 1, 2, 3]],
#
# [[4, 5, 6, 7]]])
#
# t.clamp(a,min=3)
#
# 结果为:
#
# tensor([[3, 3, 3, 3],
# [4, 5, 6, 7]])
norm = th.pow(degs, -0.5)
shp = norm.shape + (1,) * (feat.dim() - 1)
norm = th.reshape(norm, shp)
weight = self.weight
feat = th.matmul(feat, self.weight)
# th.matmul是矩阵乘法函数
feat = feat * norm
graph.srcdata['h'] = feat
graph.update_all(fn.copy_u(u='h',out='m'),
fn.sum(msg='m', out='h'))
# graph.update_all(fn.copy_src(src='h', out='m'),
# fn.sum(msg='m', out='h'))
rst = graph.dstdata['h']
rst = rst * norm
if self.bias is not None:
rst = rst + self.bias
if self._activation is not None:
rst = self._activation(rst)
return rst
def __init__(self,
in_feats,
out_feats,
norm='both',
weight=True,
bias=True,
activation=None):
super(GraphConv, self).__init__()
if norm not in ('none', 'both', 'right'):
raise DGLError('Invalid norm value. Must be either "none", "both" or "right".'
' But got "{}".'.format(norm))
self._in_feats = in_feats
self._out_feats = out_feats
self._norm = norm
if weight:
self.weight = nn.Parameter(th.Tensor(in_feats, out_feats))
else:
self.register_parameter('weight', None)
if bias:
self.bias = nn.Parameter(th.Tensor(out_feats))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self._activation = activation
def reset_parameters(self):
"""Reinitialize learnable parameters."""
#-----------重新初始化科学的参数---------------------------------------------------
#-----------权重xavier初始化,偏差为0--------------------------------------
if self.weight is not None:
init.xavier_uniform_(self.weight)
if self.bias is not None:
init.zeros_(self.bias)
def forward(self, graph, feat, weight=None):
graph = graph.local_var()
# 在局部作用域下复制图
degs = graph.out_degrees().to(feat.device).float().clamp(min=1)
# clamp就是把最小值设置为1
# torch.clamp(input ,min,max,out=None)
# torch.clamp()的作用把input的数据,夹逼到[min,max]之间
# mport torch as t
# a=t.arange(8).view(2,4)
# a
#
# #tensor([[[0, 1, 2, 3]],
#
# [[4, 5, 6, 7]]])
#
# t.clamp(a,min=3)
#
# 结果为:
#
# tensor([[3, 3, 3, 3],
# [4, 5, 6, 7]])
norm = th.pow(degs, -0.5)
shp = norm.shape + (1,) * (feat.dim() - 1)
norm = th.reshape(norm, shp)
weight = self.weight
feat = th.matmul(feat, self.weight)
# th.matmul是矩阵乘法函数
feat = feat * norm
graph.srcdata['h'] = feat
graph.update_all(fn.copy_u(u='h',out='m'),
fn.sum(msg='m', out='h'))
# graph.update_all(fn.copy_src(src='h', out='m'),
# fn.sum(msg='m', out='h'))
rst = graph.dstdata['h']
rst = rst * norm
if self.bias is not None:
rst = rst + self.bias
if self._activation is not None:
rst = self._activation(rst)
return rst
- tensor size non-singleton RuntimeError dimensiontensor size non-singleton runtimeerror non-singleton dimension dimensions vc-dimension dimension flavors belong error vc-dimension rademacher complexity dimension dimensions grafana series time ijkplayer-example dimension ijkplayer affected architecture dimension layering 4hana