|
- import numpy as np
- import scipy.sparse as sp
- import torch
-
- # for GNNGuard
- def accuracy(output, labels):
- """Return accuracy of output compared to labels.
-
- Parameters
- ----------
- output : torch.Tensor
- output from model
- labels : torch.Tensor or numpy.array
- node labels
-
- Returns
- -------
- float
- accuracy
- """
- if not hasattr(labels, '__len__'):
- labels = [labels]
- if type(labels) is not torch.Tensor:
- labels = torch.LongTensor(labels)
- preds = output.max(1)[1].type_as(labels)
- correct = preds.eq(labels).double()
- correct = correct.sum()
- return correct / len(labels)
-
- # for GCNSVD
- def normalize_adj(mx):
- """Normalize sparse adjacency matrix,
- A' = (D + I)^-1/2 * ( A + I ) * (D + I)^-1/2
- Row-normalize sparse matrix
- Parameters
- ----------
- mx : scipy.sparse.csr_matrix
- matrix to be normalized
- Returns
- -------
- scipy.sprase.lil_matrix
- normalized matrix
- """
-
- # TODO: maybe using coo format would be better?
- if type(mx) is not sp.lil.lil_matrix:
- mx = mx.tolil()
- if mx[0, 0] == 0 :
- mx = mx + sp.eye(mx.shape[0])
- rowsum = np.array(mx.sum(1))
- r_inv = np.power(rowsum, -1/2).flatten()
- r_inv[np.isinf(r_inv)] = 0.
- r_mat_inv = sp.diags(r_inv)
- mx = r_mat_inv.dot(mx)
- mx = mx.dot(r_mat_inv)
- return mx
-
-
- def to_tensor(adj, features, labels=None, device='cpu'):
- """Convert adj, features, labels from array or sparse matrix to
- torch Tensor.
- Parameters
- ----------
- adj : scipy.sparse.csr_matrix
- the adjacency matrix.
- features : scipy.sparse.csr_matrix
- node features
- labels : numpy.array
- node labels
- device : str
- 'cpu' or 'cuda'
- """
- if sp.issparse(adj):
- adj = sparse_mx_to_torch_sparse_tensor(adj)
- else:
- adj = torch.FloatTensor(adj)
- if sp.issparse(features):
- features = sparse_mx_to_torch_sparse_tensor(features)
- else:
- features = torch.FloatTensor(np.array(features))
-
- if labels is None:
- return adj.to(device), features.to(device)
- else:
- labels = torch.LongTensor(labels)
- return adj.to(device), features.to(device), labels.to(device)
-
- def normalize_adj_tensor(adj, sparse=False):
- """Normalize adjacency tensor matrix.
- """
- device = torch.device("cuda" if adj.is_cuda else "cpu")
- if sparse:
- # warnings.warn('If you find the training process is too slow, you can uncomment line 207 in deeprobust/graph/utils.py. Note that you need to install torch_sparse')
- # TODO if this is too slow, uncomment the following code,
- # but you need to install torch_scatter
- # return normalize_sparse_tensor(adj)
- adj = to_scipy(adj)
- mx = normalize_adj(adj)
- return sparse_mx_to_torch_sparse_tensor(mx).to(device)
- else:
- mx = adj + torch.eye(adj.shape[0]).to(device)
- rowsum = mx.sum(1)
- r_inv = rowsum.pow(-1/2).flatten()
- r_inv[torch.isinf(r_inv)] = 0.
- r_mat_inv = torch.diag(r_inv)
- mx = r_mat_inv @ mx
- mx = mx @ r_mat_inv
- return mx
-
- def to_scipy(tensor):
- """Convert a dense/sparse tensor to scipy matrix"""
- if is_sparse_tensor(tensor):
- values = tensor._values()
- indices = tensor._indices()
- return sp.csr_matrix((values.cpu().numpy(), indices.cpu().numpy()), shape=tensor.shape)
- else:
- indices = tensor.nonzero().t()
- values = tensor[indices[0], indices[1]]
- return sp.csr_matrix((values.cpu().numpy(), indices.cpu().numpy()), shape=tensor.shape)
- def is_sparse_tensor(tensor):
- """Check if a tensor is sparse tensor.
- Parameters
- ----------
- tensor : torch.Tensor
- given tensor
- Returns
- -------
- bool
- whether a tensor is sparse tensor
- """
- # if hasattr(tensor, 'nnz'):
- if tensor.layout == torch.sparse_coo:
- return True
- else:
- return False
-
- def sparse_mx_to_torch_sparse_tensor(sparse_mx):
- """Convert a scipy sparse matrix to a torch sparse tensor."""
- sparse_mx = sparse_mx.tocoo().astype(np.float32)
- sparserow=torch.LongTensor(sparse_mx.row).unsqueeze(1)
- sparsecol=torch.LongTensor(sparse_mx.col).unsqueeze(1)
- sparseconcat=torch.cat((sparserow, sparsecol),1)
- sparsedata=torch.FloatTensor(sparse_mx.data)
- return torch.sparse.FloatTensor(sparseconcat.t(),sparsedata,torch.Size(sparse_mx.shape))
-
- # slower version....
- # sparse_mx = sparse_mx.tocoo().astype(np.float32)
- # indices = torch.from_numpy(
- # np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
- # values = torch.from_numpy(sparse_mx.data)
- # shape = torch.Size(sparse_mx.shape)
- # return torch.sparse.FloatTensor(indices, values, shape)
|