|
@@ -3,6 +3,9 @@ from icosagon.bulkdec import BulkDecodeLayer |
|
|
from icosagon.input import OneHotInputLayer
|
|
|
from icosagon.input import OneHotInputLayer
|
|
|
from icosagon.convlayer import DecagonLayer
|
|
|
from icosagon.convlayer import DecagonLayer
|
|
|
import torch
|
|
|
import torch
|
|
|
|
|
|
import pytest
|
|
|
|
|
|
import time
|
|
|
|
|
|
import sys
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_bulk_decode_layer_01():
|
|
|
def test_bulk_decode_layer_01():
|
|
@@ -111,3 +114,127 @@ def test_bulk_decode_layer_03_big(): |
|
|
assert len(pred[0]) == len(data.relation_families[0].relation_types)
|
|
|
assert len(pred[0]) == len(data.relation_families[0].relation_types)
|
|
|
assert pred[0].shape[1] == data.node_types[0].count
|
|
|
assert pred[0].shape[1] == data.node_types[0].count
|
|
|
assert pred[0].shape[2] == data.node_types[1].count
|
|
|
assert pred[0].shape[2] == data.node_types[1].count
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_bulk_decode_layer_03_huge_gpu():
|
|
|
|
|
|
if torch.cuda.device_count() == 0:
|
|
|
|
|
|
pytest.skip('test_bulk_decode_layer_03_huge_gpu() requires CUDA support')
|
|
|
|
|
|
|
|
|
|
|
|
device = torch.device('cuda:0')
|
|
|
|
|
|
data = Data()
|
|
|
|
|
|
data.add_node_type('Foo', 20000)
|
|
|
|
|
|
data.add_node_type('Bar', 21000)
|
|
|
|
|
|
fam = data.add_relation_family('Foo-Bar', 0, 1, False)
|
|
|
|
|
|
print('Adding Foobar Relation 1...')
|
|
|
|
|
|
fam.add_relation_type('Foobar Relation 1',
|
|
|
|
|
|
torch.rand((20000, 21000), dtype=torch.float32).round().to_sparse().to(device),
|
|
|
|
|
|
torch.rand((21000, 20000), dtype=torch.float32).round().to_sparse().to(device))
|
|
|
|
|
|
print('Adding Foobar Relation 2...')
|
|
|
|
|
|
fam.add_relation_type('Foobar Relation 2',
|
|
|
|
|
|
torch.rand((20000, 21000), dtype=torch.float32).round().to_sparse().to(device),
|
|
|
|
|
|
torch.rand((21000, 20000), dtype=torch.float32).round().to_sparse().to(device))
|
|
|
|
|
|
|
|
|
|
|
|
in_layer = OneHotInputLayer(data)
|
|
|
|
|
|
d_layer = DecagonLayer(in_layer.output_dim, 32, data)
|
|
|
|
|
|
dec_layer = BulkDecodeLayer(input_dim=d_layer.output_dim, data=data,
|
|
|
|
|
|
keep_prob=1., activation=lambda x: x)
|
|
|
|
|
|
seq = torch.nn.Sequential(in_layer, d_layer, dec_layer)
|
|
|
|
|
|
seq = seq.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
print('Starting forward pass...')
|
|
|
|
|
|
t = time.time()
|
|
|
|
|
|
pred = seq(None)
|
|
|
|
|
|
print('Elapsed:', time.time() - t)
|
|
|
|
|
|
|
|
|
|
|
|
assert isinstance(pred, list)
|
|
|
|
|
|
assert len(pred) == len(data.relation_families)
|
|
|
|
|
|
assert isinstance(pred[0], torch.Tensor)
|
|
|
|
|
|
assert len(pred[0].shape) == 3
|
|
|
|
|
|
assert len(pred[0]) == len(data.relation_families[0].relation_types)
|
|
|
|
|
|
assert pred[0].shape[1] == data.node_types[0].count
|
|
|
|
|
|
assert pred[0].shape[2] == data.node_types[1].count
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_bulk_decode_layer_04_huge_multirel_gpu():
|
|
|
|
|
|
if torch.cuda.device_count() == 0:
|
|
|
|
|
|
pytest.skip('test_bulk_decode_layer_04_huge_multirel_gpu() requires CUDA support')
|
|
|
|
|
|
|
|
|
|
|
|
if torch.cuda.get_device_properties(0).total_memory < 64000000000:
|
|
|
|
|
|
pytest.skip('test_bulk_decode_layer_04_huge_multirel_gpu() requires GPU with 64GB of memory')
|
|
|
|
|
|
|
|
|
|
|
|
device = torch.device('cuda:0')
|
|
|
|
|
|
data = Data()
|
|
|
|
|
|
data.add_node_type('Foo', 20000)
|
|
|
|
|
|
data.add_node_type('Bar', 21000)
|
|
|
|
|
|
fam = data.add_relation_family('Foo-Bar', 0, 1, False)
|
|
|
|
|
|
print('Generating adj_mat ...')
|
|
|
|
|
|
adj_mat = torch.rand((20000, 21000), dtype=torch.float32).round().to_sparse().to(device)
|
|
|
|
|
|
print('Generating adj_mat_back ...')
|
|
|
|
|
|
adj_mat_back = torch.rand((21000, 20000), dtype=torch.float32).round().to_sparse().to(device)
|
|
|
|
|
|
print('Adding relations ...')
|
|
|
|
|
|
for i in range(1300):
|
|
|
|
|
|
sys.stdout.write('.')
|
|
|
|
|
|
sys.stdout.flush()
|
|
|
|
|
|
fam.add_relation_type(f'Foobar Relation {i}', adj_mat, adj_mat_back)
|
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
|
|
in_layer = OneHotInputLayer(data)
|
|
|
|
|
|
d_layer = DecagonLayer(in_layer.output_dim, 32, data)
|
|
|
|
|
|
dec_layer = BulkDecodeLayer(input_dim=d_layer.output_dim, data=data,
|
|
|
|
|
|
keep_prob=1., activation=lambda x: x)
|
|
|
|
|
|
seq = torch.nn.Sequential(in_layer, d_layer, dec_layer)
|
|
|
|
|
|
seq = seq.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
print('Starting forward pass...')
|
|
|
|
|
|
t = time.time()
|
|
|
|
|
|
pred = seq(None)
|
|
|
|
|
|
print('Elapsed:', time.time() - t)
|
|
|
|
|
|
|
|
|
|
|
|
assert isinstance(pred, list)
|
|
|
|
|
|
assert len(pred) == len(data.relation_families)
|
|
|
|
|
|
assert isinstance(pred[0], torch.Tensor)
|
|
|
|
|
|
assert len(pred[0].shape) == 3
|
|
|
|
|
|
assert len(pred[0]) == len(data.relation_families[0].relation_types)
|
|
|
|
|
|
assert pred[0].shape[1] == data.node_types[0].count
|
|
|
|
|
|
assert pred[0].shape[2] == data.node_types[1].count
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_bulk_decode_layer_04_big_multirel_gpu():
|
|
|
|
|
|
if torch.cuda.device_count() == 0:
|
|
|
|
|
|
pytest.skip('test_bulk_decode_layer_04_big_multirel_gpu() requires CUDA support')
|
|
|
|
|
|
|
|
|
|
|
|
device = torch.device('cuda:0')
|
|
|
|
|
|
data = Data()
|
|
|
|
|
|
data.add_node_type('Foo', 2000)
|
|
|
|
|
|
data.add_node_type('Bar', 2100)
|
|
|
|
|
|
fam = data.add_relation_family('Foo-Bar', 0, 1, False)
|
|
|
|
|
|
print('Generating adj_mat ...')
|
|
|
|
|
|
adj_mat = torch.rand((2000, 2100), dtype=torch.float32).round().to_sparse().to(device)
|
|
|
|
|
|
print('Generating adj_mat_back ...')
|
|
|
|
|
|
adj_mat_back = torch.rand((2100, 2000), dtype=torch.float32).round().to_sparse().to(device)
|
|
|
|
|
|
print('Adding relations ...')
|
|
|
|
|
|
for i in range(1300):
|
|
|
|
|
|
sys.stdout.write('.')
|
|
|
|
|
|
sys.stdout.flush()
|
|
|
|
|
|
fam.add_relation_type(f'Foobar Relation {i}', adj_mat, adj_mat_back)
|
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
|
|
in_layer = OneHotInputLayer(data)
|
|
|
|
|
|
d_layer = DecagonLayer(in_layer.output_dim, 32, data)
|
|
|
|
|
|
dec_layer = BulkDecodeLayer(input_dim=d_layer.output_dim, data=data,
|
|
|
|
|
|
keep_prob=1., activation=lambda x: x)
|
|
|
|
|
|
seq = torch.nn.Sequential(in_layer, d_layer, dec_layer)
|
|
|
|
|
|
seq = seq.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
print('Starting forward pass...')
|
|
|
|
|
|
t = time.time()
|
|
|
|
|
|
pred = seq(None)
|
|
|
|
|
|
print('Elapsed:', time.time() - t)
|
|
|
|
|
|
|
|
|
|
|
|
assert isinstance(pred, list)
|
|
|
|
|
|
assert len(pred) == len(data.relation_families)
|
|
|
|
|
|
assert isinstance(pred[0], torch.Tensor)
|
|
|
|
|
|
assert len(pred[0].shape) == 3
|
|
|
|
|
|
assert len(pred[0]) == len(data.relation_families[0].relation_types)
|
|
|
|
|
|
assert pred[0].shape[1] == data.node_types[0].count
|
|
|
|
|
|
assert pred[0].shape[2] == data.node_types[1].count
|