Skip to content
This repository was archived by the owner on Aug 16, 2022. It is now read-only.

Commit be3b889

Browse files
author
tefenet
committed
Frame_annotation class
1 parent d6d55ff commit be3b889

8 files changed

+75
-62
lines changed

examples/make_positions_file.py

+10-10
Original file line numberDiff line numberDiff line change
@@ -6,18 +6,18 @@
66
example_path = osp.join(Path.home(), '.sldatasets')
77
# must provide the path of 'lsa64_positions.mat', in this example
88
# # ~/.sldatasets/LSA&$_pre
9-
a_path = osp.join(example_path, 'LSA64_pre')
10-
positions.positions_mat_to_npz(osp.join(example_path, 'LSA64_pre'))
9+
# a_path = osp.join(example_path, 'LSA64_pre')
10+
# positions.positions_mat_to_npz(osp.join(example_path, 'LSA64_pre'))
1111

12-
# # can provide the destiny path of the npz else current dir will be used, in this example
13-
# # ~/.sldatasets/LSA64_raw
14-
a_path = osp.join(example_path, 'LSA64_raw')
15-
positions.get_humans_from_dataset(
16-
sld.get('lsa64', version='raw'), a_path)
12+
# # # can provide the destiny path of the npz else current dir will be used, in this example
13+
# # # ~/.sldatasets/LSA64_raw
14+
# a_path = osp.join(example_path, 'LSA64_raw')
15+
# positions.get_humans_from_dataset(
16+
# sld.get('lsa64', version='raw'), a_path)
1717

18-
a_path = osp.join(example_path, 'LSA64_cut')
19-
positions.get_humans_from_dataset(
20-
sld.get('lsa64', version='cut'), a_path)
18+
# a_path = osp.join(example_path, 'LSA64_cut')
19+
# positions.get_humans_from_dataset(
20+
# sld.get('lsa64', version='cut'), a_path)
2121

2222
a_path = osp.join(example_path, 'ASLLVD_pre')
2323
positions.get_humans_from_dataset(sld.get('asllvd').data, a_path)

examples/test_basics.py

+15-9
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,22 @@
11
import sldatasets as sld
22

33
dataset = sld.get("asllvd")
4+
print("dataset summary:")
45
dataset.summary()
56
for video, description in dataset:
7+
print('video description:')
68
print(description)
79
frame, frame_annot = video[3]
8-
print(frame.shape)
9-
print(frame_annot)
10-
dataset = sld.get("lsa64", version='pre')
11-
dataset.summary()
12-
for video, description in dataset:
13-
print(description)
14-
frame, frame_annot = video[3]
15-
print(frame.shape)
16-
print(frame_annot)
10+
print('frame shape: ', frame.shape)
11+
print('estimated body parts (x,y,score): ')
12+
frame_annot.pretty()
13+
face = frame_annot.h.get_face_box(640, 480)
14+
print('face box: ', face)
15+
print(frame_annot.h.get_upper_body_box(640, 480))
16+
# dataset = sld.get("lsa64", version='pre')
17+
# dataset.summary()
18+
# for video, description in dataset:
19+
# print(description)
20+
# frame, frame_annot = video[3]
21+
# print(frame.shape)
22+
# print(frame_annot)

positions/__init__.py

+15-29
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11

2+
from sldatasets.annotations import Human
23
from os import path as osp
34
import numpy as np
4-
from sldatasets.body import Human
5+
from sldatasets.annotations import Human as H, Frame_annotation as fa, BodyPart
56

67

78
def positions_mat_to_npz(path=None):
@@ -94,42 +95,27 @@ def process_video(video, e):
9495
for j in range(0, n):
9596
img = video[j, :]
9697
humans = e.inference(img, True, 4.0)
98+
h = H([])
9799
if len(humans) != 1:
98100
b = True
99101
else:
100-
h = Human([])
101-
h.body_parts = humans[0].body_parts
102-
h.score = humans[0].score
103-
frames.append(humans)
102+
h = translate(humans, h)
103+
frames.append(fa(h))
104104
if b:
105105
raise InferenceError(frames)
106106
return frames
107107

108108

109+
def translate(humans, h):
110+
body = {}
111+
for key, part in humans[0].body_parts.items():
112+
body[key] = BodyPart(part.uidx, part.part_idx,
113+
part.x, part.y, part.score)
114+
h.body_parts = body
115+
h.score = humans[0].score
116+
return h
117+
118+
109119
class InferenceError(Exception):
110120
def __init__(self, arg):
111121
self.args = arg
112-
113-
114-
def translate_tf_pose_humans(npz_file):
115-
from sldatasets.body import Human
116-
npz = np.load(npz_file)
117-
data = {}
118-
for video in npz.files:
119-
video_annotation = npz[video]
120-
n = video_annotation.size
121-
result = np.empty((n,), dtype=object)
122-
for i, frame_annotation in enumerate(video_annotation):
123-
h = Human([])
124-
try:
125-
human = frame_annotation[0]
126-
h.body_parts = human.body_parts
127-
h.score = human.score
128-
except:
129-
print('frame', str(i), ' of video ', video,
130-
" has no annotations. processing videos wait...")
131-
result[i] = h
132-
data[video] = result
133-
outfile = osp.join(osp.dirname(npz_file), 'positions.npz')
134-
np.savez(outfile, **data)
135-
return outfile

requirements.txt

+4-3
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
scikit-video
2-
matplotlib
2+
pandas
33
gdown
44
pathlib
5-
opencv-python
6-
tensorflow
5+
numpy
6+
openpyxl
7+
xlrd

setup.py

+10-8
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,24 @@
11
from setuptools import find_packages, setup
22

3-
REQUIRED_PACKAGES =[
3+
REQUIRED_PACKAGES = [
44
'scikit-video',
5-
'matplotlib',
65
'gdown',
76
'pathlib',
8-
'opencv-python',
9-
'tensorflow',
7+
'openpyxl',
8+
'xlrd',
9+
'pandas',
10+
'numpy',
1011
]
1112

12-
PROJECT_URLS={
13+
PROJECT_URLS = {
1314
'Tracker': 'https://github.com/midusi/sign_language_datasets/issues',
1415
'Documentation': 'https://github.com/midusi/sign_language_datasets/wiki',
1516
'Source': 'https://github.com/midusi/sign_language_datasets',
1617
}
1718

1819
setup(
1920
name='sldatasets',
20-
version='0.0.1',
21+
version='0.0.2',
2122
author='Pablo Kepes and Facundo Quiroga',
2223
author_email="[email protected]",
2324
description=(
@@ -28,9 +29,10 @@
2829
project_urls=PROJECT_URLS,
2930
packages=find_packages(),
3031
install_requires=REQUIRED_PACKAGES,
31-
#zip_safe=False,
32+
# zip_safe=False,
3233
classifiers=[
33-
'Development Status :: 2 - Pre-Alpha',
34+
'Development Status :: 3 - Alpha',
35+
'Intended Audience :: Science/Research',
3436
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
3537
'Programming Language :: Python :: 3',
3638
'Programming Language :: Python :: 3.7',

sldatasets/Datasetloader.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ def load_annotations(self):
6969
from gdown import download
7070
logging.info(f"Dowloading Positions to {outfile}")
7171
download(self.x.get_pos_url(), outfile, quiet=False)
72-
return np.load(outfile)
72+
return np.load(outfile, allow_pickle=True)
7373

7474

7575
class LSA64(Datasetloader):

sldatasets/body.py sldatasets/annotations.py

+16
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,22 @@ def _include_part(part_list, part_idx):
1313
return False, None
1414

1515

16+
class Frame_annotation(object):
17+
18+
def __init__(self, hum):
19+
self.h = hum
20+
21+
def get(self):
22+
dic = {}
23+
for part in self.h.body_parts.values():
24+
dic[part.get_part_name().name] = (part.x, part.y, part.score)
25+
return dic
26+
27+
def pretty(self):
28+
import pprint
29+
pprint.pprint(self.get())
30+
31+
1632
class Human:
1733
"""
1834
body_parts: list of BodyPart

sldatasets/video_dataset.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -18,13 +18,15 @@ def send(self, ignored_arg):
1818
anotation = self.loader.load_annotations()
1919
anot_index = specs['filename'].split('.')[0]
2020
try:
21-
for i, human in enumerate(anotation[anot_index]):
22-
l.append((frames[i], human))
21+
for i, man in enumerate(anotation[anot_index]):
22+
l.append((frames[i], man))
2323
return (l, specs)
2424
except TypeError:
2525
for f in frames:
2626
l.append((f, anotation[anot_index]))
2727
return (l, specs)
28+
except ModuleNotFoundError:
29+
print(anotation[anot_index].__class__)
2830

2931
def throw(self, type=None, value=None, traceback=None):
3032
raise StopIteration

0 commit comments

Comments
 (0)