Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add DGL support #3987

Open
wants to merge 8 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file added models/public/gcn/README.md
Empty file.
24 changes: 24 additions & 0 deletions models/public/gcn/accuracy-check.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
models:
- name: GCN
launchers:
- framework: DGL
adapter: node_classification
device: CPU
model: gcn_model.pt
module: GCN.py
module_name: GCN
batch: 32
output_names:
- logits

datasets:
- name: Cora
reader: graph(dgl)_reader
data_source: graph.bin
annotation_conversion:
converter: DGL_converter
graph_path: graph.bin
metrics:
- name: node_accuracy_name
type: node_accuracy
reference: 0.778
22 changes: 22 additions & 0 deletions models/public/gcn/model.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
description: >-
Tmp
task_type: node_classification
files:
- name: gcn_model.pt
size: 94635
source: https://raw.githubusercontent.com/itlab-vision/itlab-vision-dl-benchmark-models/main/dgl/models/classification/GCN/gcn_model.pt
checksum: abceacb966cf92ce225e6b7e9b29b1a165e6283f0a780a1617344405d5661bd74ff4e3f7c6d7d0c14fbb44f486d24c2f
- name: GCN.py
size: 736
source: https://raw.githubusercontent.com/itlab-vision/itlab-vision-dl-benchmark-models/main/dgl/models/classification/GCN/GCN.py
checksum: a8cf92d876d5c4f495c8fc9c0354a1c337e60038c4c35b11959d5e56105c2f85d60378a41ba2436c7176dd9e708f761c
- name: graph.bin
size: 50908
source: https://raw.githubusercontent.com/itlab-vision/dl-benchmark/master/tests/smoke_test/test_graph/dgl/default_graph.bin
checksum: 7cf6911b0bd1a7dfd1aa5dc03193f3d0b805774ffd5aff6289763902568cbfd742bae87f6110c320e90ded6344850c9d
model_optimizer_args:
- --input=$dl_dir/graph.bin
- --input_model=$dl_dir/gcn_model.pt
- --model_class=$dl_dir/GCN.py
framework: dgl_pytorch
license: https://raw.githubusercontent.com/itlab-vision/itlab-vision-dl-benchmark-models/main/LICENSE
6 changes: 5 additions & 1 deletion tools/accuracy_checker/accuracy_checker/adapters/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,8 @@

from .palm_detection import PalmDetectionAdapter

from .graphs import GraphNodeClassificationAdapter

__all__ = [
'Adapter',
'AdapterField',
Expand Down Expand Up @@ -298,5 +300,7 @@

'ImageBackgroundMattingAdapter',

'PalmDetectionAdapter'
'PalmDetectionAdapter',

'GraphNodeClassificationAdapter'
]
84 changes: 84 additions & 0 deletions tools/accuracy_checker/accuracy_checker/adapters/graphs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
import numpy as np

from ..adapters import Adapter
from ..config import BoolField, StringField, NumberField
from ..representation import ClassificationPrediction, ArgMaxClassificationPrediction
from ..utils import softmax


class GraphNodeClassificationAdapter(Adapter):
"""
Class for converting output of node classification model to ClassificationPrediction representation
"""
__provider__ = 'node_classification'
prediction_types = (ClassificationPrediction, )

@classmethod
def parameters(cls):
parameters = super().parameters()

return parameters

def configure(self):
self.label_as_array = self.get_value_from_config('label_as_array')
self.block = self.get_value_from_config('block')
self.classification_out = self.get_value_from_config('classification_output')
self.multilabel_thresh = self.get_value_from_config('multi_label_threshold')
self.output_verified = False

def select_output_blob(self, outputs):
self.output_verified = True
if self.classification_out:
self.classification_out = self.check_output_name(self.classification_out, outputs)
return
super().select_output_blob(outputs)
self.classification_out = self.output_blob
return

def process(self, raw, identifiers, frame_meta):
"""
Args:
identifiers: list of input data identifiers
raw: output of model
frame_meta: list of meta information about each frame
Returns:
list of ClassificationPrediction objects
"""
if not self.output_verified:
self.select_output_blob(raw)
multi_infer = frame_meta[-1].get('multi_infer', False) if frame_meta else False
raw_prediction = self._extract_predictions(raw, frame_meta) # ok
prediction = raw_prediction[self.output_blob] # тензор предиктов
if multi_infer:
prediction = np.mean(prediction, axis=0)
if len(np.shape(prediction)) == 1:
prediction = np.expand_dims(prediction, axis=0)
prediction = np.reshape(prediction, (prediction.shape[0], -1))

result = []
if self.block:
result.append(self.prepare_representation(identifiers[0], prediction))
else:
for identifier, output in zip(identifiers, prediction):
result.append(self.prepare_representation(identifier, output))

return result

def prepare_representation(self, identifier, prediction):
single_prediction = ClassificationPrediction(
identifier, prediction, self.label_as_array,
multilabel_threshold=self.multilabel_thresh)
return single_prediction

@staticmethod
def _extract_predictions(outputs_list, meta):
is_multi_infer = meta[-1].get('multi_infer', False) if meta else False
if not is_multi_infer:
return outputs_list[0] if not isinstance(outputs_list, dict) else outputs_list

output_map = {}
for output_key in outputs_list[0].keys():
output_data = np.asarray([output[output_key] for output in outputs_list])
output_map[output_key] = output_data

return output_map
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from .market1501 import Market1501Converter
from .veri776 import VeRi776Converter
from .mars import MARSConverter
from .dgl import DGLConverter
from .pascal_voc import PascalVOCDetectionConverter, SYGDetectionConverter
from .sample_converter import SampleConverter
from .wider import WiderFormatConverter
Expand Down Expand Up @@ -150,6 +151,7 @@
'SYGDetectionConverter',
'WiderFormatConverter',
'MARSConverter',
'DGLConverter',
'DetectionOpenCVStorageFormatConverter',
'LFWConverter',
'FaceRecognitionBinary',
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import re

from ._reid_common import check_dirs, read_directory
from .format_converter import GraphFileBasedAnnotationConverter, ConverterReturn
from ..representation import ClassificationAnnotation
from pathlib import Path
import dgl

MARS_IMAGE_PATTERN = re.compile(r'([\d]+)C(\d)')


class DGLConverter(GraphFileBasedAnnotationConverter):
__provider__ = 'DGL_converter'
annotation_types = (ClassificationAnnotation, )

def convert(self, check_content=False, **kwargs):
print('run convert')
graph = dgl.data.utils.load_graphs(Path(self.graph_path).__str__())
g = graph[0][0]

labels = g.ndata["label"]

annotation = [
ClassificationAnnotation(identifier='', label=labels)
]

return ConverterReturn(annotation, {'labels': labels}, None)
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,22 @@ def convert(self, check_content=False, **kwargs):
pass


class GraphFileBasedAnnotationConverter(BaseFormatConverter):
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'graph_path': PathField(is_directory=False, description="Path to graph data.")
})
return parameters

def configure(self):
self.graph_path = self.get_value_from_config('graph_path')

def convert(self, check_content=False, **kwargs):
pass


def verify_label_map(label_map):
valid_label_map = {}
for class_id, class_name in label_map.items():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@
RawpyReader
)
from .text_readers import JSONReader
from .dgl_graph_reader import DGLGraphReader

__all__ = [
'BaseReader',
Expand Down Expand Up @@ -93,6 +94,7 @@
'LMDBReader',
'KaldiARKReader',
'JSONReader',
'DGLGraphReader'

'create_reader',
'REQUIRES_ANNOTATIONS',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from collections import OrderedDict, namedtuple
from functools import singledispatch
from pathlib import Path
import dgl

import numpy as np

Expand All @@ -42,6 +43,8 @@ def __init__(self, data, meta=None, identifier=''):

if self.metadata.get('input_is_dict_type'):
return
if isinstance(data, dgl.DGLGraph):
return
if np.isscalar(data):
self.metadata['image_size'] = 1
elif isinstance(data, list) and np.isscalar(data[0]):
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import numpy as np

from ..config import StringField, ConfigError
from .data_reader import BaseReader
from ..utils import get_path, read_json
from pathlib import Path
import dgl


class DGLGraphReader(BaseReader):
__provider__ = 'graph(dgl)_reader'

def configure(self):
if not self.data_source:
if not self._postpone_data_source:
raise ConfigError('data_source parameter is required to create "{}" '
'data reader and read data'.format(self.__provider__))
else:
self.data_source = get_path(self.data_source, is_directory=False)

def read(self, data_id):
data_path = self.data_source / data_id if self.data_source is not None else data_id

graph = dgl.data.utils.load_graphs(Path(data_path).__str__())
g = graph[0][0]

return g
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
from .base_custom_evaluator import BaseCustomEvaluator
from .base_models import BaseCascadeModel, create_model, create_encoder

class DGLEvaluator(BaseCustomEvaluator):
def __init__(self, dataset_config, launcher, model, orig_config):
super().__init__(dataset_config, launcher, orig_config)
print('create evaluator')
self.model = model
# if hasattr(self.model.decoder, 'adapter'):
# self.adapter_type = self.model.decoder.adapter.__provider__

@classmethod
def from_configs(cls, config, delayed_model_loading=False, orig_config=None):
dataset_config, launcher, _ = cls.get_dataset_and_launcher_info(config)
model = DGLGraphModel(
config.get('network_info', {}), launcher, config.get('_models', []), config.get('_model_is_blob'),
delayed_model_loading
)
return cls(dataset_config, launcher, model, orig_config)

def _process(self, output_callback, calculate_metrics, progress_reporter, metric_config, csv_file):
pass

class DGLGraphModel(BaseCascadeModel):
def __init__(self, network_info, launcher, models_args, is_blob, delayed_model_loading=False):
super().__init__(network_info, launcher)

def predict(self, identifiers, input_data, encoder_callback=None):
pass

def reset(self):
pass

def save_encoder_predictions(self):
pass

def _add_raw_encoder_predictions(self, encoder_prediction):
pass
10 changes: 9 additions & 1 deletion tools/accuracy_checker/accuracy_checker/launcher/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,13 @@
'paddle_paddle', "PaddlePaddle isn't installed. Please, install it before using. \n{}".format(import_error.msg)
)

try:
from .dgl_launcher import DGLLauncher
except ImportError as import_error:
DGLLauncher = unsupported_launcher(
'dgl', "DGL isn't installed. Please, install it before using. \n{}".format(import_error.msg)
)

from .pytorch_launcher import PyTorchLauncher

__all__ = [
Expand All @@ -107,5 +114,6 @@
'PyTorchLauncher',
'PaddlePaddleLauncher',
'DummyLauncher',
'InputFeeder'
'InputFeeder',
'DGLLauncher'
]
Loading