jonigata commited on
Commit
554b11d
1 Parent(s): 89c9fa5

initial commit

Browse files
Dockerfile ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
+ # you will also find guides on how best to write your Dockerfile
3
+
4
+ FROM python:3.9
5
+
6
+ WORKDIR /code
7
+
8
+ COPY ./requirements.txt /code/requirements.txt
9
+
10
+ RUN apt-get update && apt-get upgrade -y && apt-get install -y libgl1-mesa-dev
11
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
12
+ RUN mim install mmcv-full==1.7.0
13
+ RUN pip install mmdet mmpose
14
+
15
+ # Set up a new user named "user" with user ID 1000
16
+ RUN useradd -m -u 1000 user
17
+
18
+ # Switch to the "user" user
19
+ USER user
20
+
21
+ # Set home to the user's home directory
22
+ ENV HOME=/home/user \
23
+ PATH=/home/user/.local/bin:$PATH
24
+
25
+ # Set the working directory to the user's home directory
26
+ WORKDIR $HOME/app
27
+
28
+ # Copy the current directory contents into the container at $HOME/app setting the owner to the user
29
+ COPY --chown=user . $HOME/app
30
+
31
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -1,11 +1,12 @@
1
  ---
2
- title: PoseMaker2
3
- emoji: 📉
4
- colorFrom: green
5
- colorTo: purple
6
  sdk: docker
7
  pinned: false
8
- license: creativeml-openrail-m
 
9
  ---
10
 
11
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Demo Docker Gradio
3
+ emoji: 📈
4
+ colorFrom: indigo
5
+ colorTo: indigo
6
  sdk: docker
7
  pinned: false
8
+ license: apache-2.0
9
+ duplicated_from: sayakpaul/demo-docker-gradio
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
external/coco.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_info = dict(
2
+ dataset_name='coco',
3
+ paper_info=dict(
4
+ author='Lin, Tsung-Yi and Maire, Michael and '
5
+ 'Belongie, Serge and Hays, James and '
6
+ 'Perona, Pietro and Ramanan, Deva and '
7
+ r'Doll{\'a}r, Piotr and Zitnick, C Lawrence',
8
+ title='Microsoft coco: Common objects in context',
9
+ container='European conference on computer vision',
10
+ year='2014',
11
+ homepage='http://cocodataset.org/',
12
+ ),
13
+ keypoint_info={
14
+ 0:
15
+ dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''),
16
+ 1:
17
+ dict(
18
+ name='left_eye',
19
+ id=1,
20
+ color=[51, 153, 255],
21
+ type='upper',
22
+ swap='right_eye'),
23
+ 2:
24
+ dict(
25
+ name='right_eye',
26
+ id=2,
27
+ color=[51, 153, 255],
28
+ type='upper',
29
+ swap='left_eye'),
30
+ 3:
31
+ dict(
32
+ name='left_ear',
33
+ id=3,
34
+ color=[51, 153, 255],
35
+ type='upper',
36
+ swap='right_ear'),
37
+ 4:
38
+ dict(
39
+ name='right_ear',
40
+ id=4,
41
+ color=[51, 153, 255],
42
+ type='upper',
43
+ swap='left_ear'),
44
+ 5:
45
+ dict(
46
+ name='left_shoulder',
47
+ id=5,
48
+ color=[0, 255, 0],
49
+ type='upper',
50
+ swap='right_shoulder'),
51
+ 6:
52
+ dict(
53
+ name='right_shoulder',
54
+ id=6,
55
+ color=[255, 128, 0],
56
+ type='upper',
57
+ swap='left_shoulder'),
58
+ 7:
59
+ dict(
60
+ name='left_elbow',
61
+ id=7,
62
+ color=[0, 255, 0],
63
+ type='upper',
64
+ swap='right_elbow'),
65
+ 8:
66
+ dict(
67
+ name='right_elbow',
68
+ id=8,
69
+ color=[255, 128, 0],
70
+ type='upper',
71
+ swap='left_elbow'),
72
+ 9:
73
+ dict(
74
+ name='left_wrist',
75
+ id=9,
76
+ color=[0, 255, 0],
77
+ type='upper',
78
+ swap='right_wrist'),
79
+ 10:
80
+ dict(
81
+ name='right_wrist',
82
+ id=10,
83
+ color=[255, 128, 0],
84
+ type='upper',
85
+ swap='left_wrist'),
86
+ 11:
87
+ dict(
88
+ name='left_hip',
89
+ id=11,
90
+ color=[0, 255, 0],
91
+ type='lower',
92
+ swap='right_hip'),
93
+ 12:
94
+ dict(
95
+ name='right_hip',
96
+ id=12,
97
+ color=[255, 128, 0],
98
+ type='lower',
99
+ swap='left_hip'),
100
+ 13:
101
+ dict(
102
+ name='left_knee',
103
+ id=13,
104
+ color=[0, 255, 0],
105
+ type='lower',
106
+ swap='right_knee'),
107
+ 14:
108
+ dict(
109
+ name='right_knee',
110
+ id=14,
111
+ color=[255, 128, 0],
112
+ type='lower',
113
+ swap='left_knee'),
114
+ 15:
115
+ dict(
116
+ name='left_ankle',
117
+ id=15,
118
+ color=[0, 255, 0],
119
+ type='lower',
120
+ swap='right_ankle'),
121
+ 16:
122
+ dict(
123
+ name='right_ankle',
124
+ id=16,
125
+ color=[255, 128, 0],
126
+ type='lower',
127
+ swap='left_ankle')
128
+ },
129
+ skeleton_info={
130
+ 0:
131
+ dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]),
132
+ 1:
133
+ dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]),
134
+ 2:
135
+ dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]),
136
+ 3:
137
+ dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]),
138
+ 4:
139
+ dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]),
140
+ 5:
141
+ dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]),
142
+ 6:
143
+ dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]),
144
+ 7:
145
+ dict(
146
+ link=('left_shoulder', 'right_shoulder'),
147
+ id=7,
148
+ color=[51, 153, 255]),
149
+ 8:
150
+ dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]),
151
+ 9:
152
+ dict(
153
+ link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]),
154
+ 10:
155
+ dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]),
156
+ 11:
157
+ dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]),
158
+ 12:
159
+ dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]),
160
+ 13:
161
+ dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]),
162
+ 14:
163
+ dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]),
164
+ 15:
165
+ dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]),
166
+ 16:
167
+ dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]),
168
+ 17:
169
+ dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]),
170
+ 18:
171
+ dict(
172
+ link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255])
173
+ },
174
+ joint_weights=[
175
+ 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5,
176
+ 1.5
177
+ ],
178
+ sigmas=[
179
+ 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062,
180
+ 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089
181
+ ])
external/default_runtime.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoint_config = dict(interval=10)
2
+
3
+ log_config = dict(
4
+ interval=50,
5
+ hooks=[
6
+ dict(type='TextLoggerHook'),
7
+ # dict(type='TensorboardLoggerHook')
8
+ # dict(type='PaviLoggerHook') # for internal services
9
+ ])
10
+
11
+ log_level = 'INFO'
12
+ load_from = None
13
+ resume_from = None
14
+ dist_params = dict(backend='nccl')
15
+ workflow = [('train', 1)]
16
+
17
+ # disable opencv multithreading to avoid system being overloaded
18
+ opencv_num_threads = 0
19
+ # set multi-process start method as `fork` to speed up the training
20
+ mp_start_method = 'fork'
external/faster_rcnn_r50_fpn_coco.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoint_config = dict(interval=1)
2
+ # yapf:disable
3
+ log_config = dict(
4
+ interval=50,
5
+ hooks=[
6
+ dict(type='TextLoggerHook'),
7
+ # dict(type='TensorboardLoggerHook')
8
+ ])
9
+ # yapf:enable
10
+ dist_params = dict(backend='nccl')
11
+ log_level = 'INFO'
12
+ load_from = None
13
+ resume_from = None
14
+ workflow = [('train', 1)]
15
+ # optimizer
16
+ optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
17
+ optimizer_config = dict(grad_clip=None)
18
+ # learning policy
19
+ lr_config = dict(
20
+ policy='step',
21
+ warmup='linear',
22
+ warmup_iters=500,
23
+ warmup_ratio=0.001,
24
+ step=[8, 11])
25
+ total_epochs = 12
26
+
27
+ model = dict(
28
+ type='FasterRCNN',
29
+ pretrained='torchvision://resnet50',
30
+ backbone=dict(
31
+ type='ResNet',
32
+ depth=50,
33
+ num_stages=4,
34
+ out_indices=(0, 1, 2, 3),
35
+ frozen_stages=1,
36
+ norm_cfg=dict(type='BN', requires_grad=True),
37
+ norm_eval=True,
38
+ style='pytorch'),
39
+ neck=dict(
40
+ type='FPN',
41
+ in_channels=[256, 512, 1024, 2048],
42
+ out_channels=256,
43
+ num_outs=5),
44
+ rpn_head=dict(
45
+ type='RPNHead',
46
+ in_channels=256,
47
+ feat_channels=256,
48
+ anchor_generator=dict(
49
+ type='AnchorGenerator',
50
+ scales=[8],
51
+ ratios=[0.5, 1.0, 2.0],
52
+ strides=[4, 8, 16, 32, 64]),
53
+ bbox_coder=dict(
54
+ type='DeltaXYWHBBoxCoder',
55
+ target_means=[.0, .0, .0, .0],
56
+ target_stds=[1.0, 1.0, 1.0, 1.0]),
57
+ loss_cls=dict(
58
+ type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
59
+ loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
60
+ roi_head=dict(
61
+ type='StandardRoIHead',
62
+ bbox_roi_extractor=dict(
63
+ type='SingleRoIExtractor',
64
+ roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
65
+ out_channels=256,
66
+ featmap_strides=[4, 8, 16, 32]),
67
+ bbox_head=dict(
68
+ type='Shared2FCBBoxHead',
69
+ in_channels=256,
70
+ fc_out_channels=1024,
71
+ roi_feat_size=7,
72
+ num_classes=80,
73
+ bbox_coder=dict(
74
+ type='DeltaXYWHBBoxCoder',
75
+ target_means=[0., 0., 0., 0.],
76
+ target_stds=[0.1, 0.1, 0.2, 0.2]),
77
+ reg_class_agnostic=False,
78
+ loss_cls=dict(
79
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
80
+ loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
81
+ # model training and testing settings
82
+ train_cfg=dict(
83
+ rpn=dict(
84
+ assigner=dict(
85
+ type='MaxIoUAssigner',
86
+ pos_iou_thr=0.7,
87
+ neg_iou_thr=0.3,
88
+ min_pos_iou=0.3,
89
+ match_low_quality=True,
90
+ ignore_iof_thr=-1),
91
+ sampler=dict(
92
+ type='RandomSampler',
93
+ num=256,
94
+ pos_fraction=0.5,
95
+ neg_pos_ub=-1,
96
+ add_gt_as_proposals=False),
97
+ allowed_border=-1,
98
+ pos_weight=-1,
99
+ debug=False),
100
+ rpn_proposal=dict(
101
+ nms_pre=2000,
102
+ max_per_img=1000,
103
+ nms=dict(type='nms', iou_threshold=0.7),
104
+ min_bbox_size=0),
105
+ rcnn=dict(
106
+ assigner=dict(
107
+ type='MaxIoUAssigner',
108
+ pos_iou_thr=0.5,
109
+ neg_iou_thr=0.5,
110
+ min_pos_iou=0.5,
111
+ match_low_quality=False,
112
+ ignore_iof_thr=-1),
113
+ sampler=dict(
114
+ type='RandomSampler',
115
+ num=512,
116
+ pos_fraction=0.25,
117
+ neg_pos_ub=-1,
118
+ add_gt_as_proposals=True),
119
+ pos_weight=-1,
120
+ debug=False)),
121
+ test_cfg=dict(
122
+ rpn=dict(
123
+ nms_pre=1000,
124
+ max_per_img=1000,
125
+ nms=dict(type='nms', iou_threshold=0.7),
126
+ min_bbox_size=0),
127
+ rcnn=dict(
128
+ score_thr=0.05,
129
+ nms=dict(type='nms', iou_threshold=0.5),
130
+ max_per_img=100)
131
+ # soft-nms is also supported for rcnn testing
132
+ # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
133
+ ))
134
+
135
+ dataset_type = 'CocoDataset'
136
+ data_root = 'data/coco'
137
+ img_norm_cfg = dict(
138
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
139
+ train_pipeline = [
140
+ dict(type='LoadImageFromFile'),
141
+ dict(type='LoadAnnotations', with_bbox=True),
142
+ dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
143
+ dict(type='RandomFlip', flip_ratio=0.5),
144
+ dict(type='Normalize', **img_norm_cfg),
145
+ dict(type='Pad', size_divisor=32),
146
+ dict(type='DefaultFormatBundle'),
147
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
148
+ ]
149
+ test_pipeline = [
150
+ dict(type='LoadImageFromFile'),
151
+ dict(
152
+ type='MultiScaleFlipAug',
153
+ img_scale=(1333, 800),
154
+ flip=False,
155
+ transforms=[
156
+ dict(type='Resize', keep_ratio=True),
157
+ dict(type='RandomFlip'),
158
+ dict(type='Normalize', **img_norm_cfg),
159
+ dict(type='Pad', size_divisor=32),
160
+ dict(type='DefaultFormatBundle'),
161
+ dict(type='Collect', keys=['img']),
162
+ ])
163
+ ]
164
+ data = dict(
165
+ samples_per_gpu=2,
166
+ workers_per_gpu=2,
167
+ train=dict(
168
+ type=dataset_type,
169
+ ann_file=f'{data_root}/annotations/instances_train2017.json',
170
+ img_prefix=f'{data_root}/train2017/',
171
+ pipeline=train_pipeline),
172
+ val=dict(
173
+ type=dataset_type,
174
+ ann_file=f'{data_root}/annotations/instances_val2017.json',
175
+ img_prefix=f'{data_root}/val2017/',
176
+ pipeline=test_pipeline),
177
+ test=dict(
178
+ type=dataset_type,
179
+ ann_file=f'{data_root}/annotations/instances_val2017.json',
180
+ img_prefix=f'{data_root}/val2017/',
181
+ pipeline=test_pipeline))
182
+ evaluation = dict(interval=1, metric='bbox')
external/hrnet_w48_coco_256x192.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = [
2
+ 'default_runtime.py',
3
+ 'coco.py'
4
+ ]
5
+ evaluation = dict(interval=10, metric='mAP', save_best='AP')
6
+
7
+ optimizer = dict(
8
+ type='Adam',
9
+ lr=5e-4,
10
+ )
11
+ optimizer_config = dict(grad_clip=None)
12
+ # learning policy
13
+ lr_config = dict(
14
+ policy='step',
15
+ warmup='linear',
16
+ warmup_iters=500,
17
+ warmup_ratio=0.001,
18
+ step=[170, 200])
19
+ total_epochs = 210
20
+ channel_cfg = dict(
21
+ num_output_channels=17,
22
+ dataset_joints=17,
23
+ dataset_channel=[
24
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
25
+ ],
26
+ inference_channel=[
27
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
28
+ ])
29
+
30
+ # model settings
31
+ model = dict(
32
+ type='TopDown',
33
+ pretrained='https://download.openmmlab.com/mmpose/'
34
+ 'pretrain_models/hrnet_w48-8ef0771d.pth',
35
+ backbone=dict(
36
+ type='HRNet',
37
+ in_channels=3,
38
+ extra=dict(
39
+ stage1=dict(
40
+ num_modules=1,
41
+ num_branches=1,
42
+ block='BOTTLENECK',
43
+ num_blocks=(4, ),
44
+ num_channels=(64, )),
45
+ stage2=dict(
46
+ num_modules=1,
47
+ num_branches=2,
48
+ block='BASIC',
49
+ num_blocks=(4, 4),
50
+ num_channels=(48, 96)),
51
+ stage3=dict(
52
+ num_modules=4,
53
+ num_branches=3,
54
+ block='BASIC',
55
+ num_blocks=(4, 4, 4),
56
+ num_channels=(48, 96, 192)),
57
+ stage4=dict(
58
+ num_modules=3,
59
+ num_branches=4,
60
+ block='BASIC',
61
+ num_blocks=(4, 4, 4, 4),
62
+ num_channels=(48, 96, 192, 384))),
63
+ ),
64
+ keypoint_head=dict(
65
+ type='TopdownHeatmapSimpleHead',
66
+ in_channels=48,
67
+ out_channels=channel_cfg['num_output_channels'],
68
+ num_deconv_layers=0,
69
+ extra=dict(final_conv_kernel=1, ),
70
+ loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
71
+ train_cfg=dict(),
72
+ test_cfg=dict(
73
+ flip_test=True,
74
+ post_process='default',
75
+ shift_heatmap=True,
76
+ modulate_kernel=11))
77
+
78
+ data_cfg = dict(
79
+ image_size=[192, 256],
80
+ heatmap_size=[48, 64],
81
+ num_output_channels=channel_cfg['num_output_channels'],
82
+ num_joints=channel_cfg['dataset_joints'],
83
+ dataset_channel=channel_cfg['dataset_channel'],
84
+ inference_channel=channel_cfg['inference_channel'],
85
+ soft_nms=False,
86
+ nms_thr=1.0,
87
+ oks_thr=0.9,
88
+ vis_thr=0.2,
89
+ use_gt_bbox=False,
90
+ det_bbox_thr=0.0,
91
+ bbox_file='data/coco/person_detection_results/'
92
+ 'COCO_val2017_detections_AP_H_56_person.json',
93
+ )
94
+
95
+ train_pipeline = [
96
+ dict(type='LoadImageFromFile'),
97
+ dict(type='TopDownGetBboxCenterScale', padding=1.25),
98
+ dict(type='TopDownRandomShiftBboxCenter', shift_factor=0.16, prob=0.3),
99
+ dict(type='TopDownRandomFlip', flip_prob=0.5),
100
+ dict(
101
+ type='TopDownHalfBodyTransform',
102
+ num_joints_half_body=8,
103
+ prob_half_body=0.3),
104
+ dict(
105
+ type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
106
+ dict(type='TopDownAffine'),
107
+ dict(type='ToTensor'),
108
+ dict(
109
+ type='NormalizeTensor',
110
+ mean=[0.485, 0.456, 0.406],
111
+ std=[0.229, 0.224, 0.225]),
112
+ dict(type='TopDownGenerateTarget', sigma=2),
113
+ dict(
114
+ type='Collect',
115
+ keys=['img', 'target', 'target_weight'],
116
+ meta_keys=[
117
+ 'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
118
+ 'rotation', 'bbox_score', 'flip_pairs'
119
+ ]),
120
+ ]
121
+
122
+ val_pipeline = [
123
+ dict(type='LoadImageFromFile'),
124
+ dict(type='TopDownGetBboxCenterScale', padding=1.25),
125
+ dict(type='TopDownAffine'),
126
+ dict(type='ToTensor'),
127
+ dict(
128
+ type='NormalizeTensor',
129
+ mean=[0.485, 0.456, 0.406],
130
+ std=[0.229, 0.224, 0.225]),
131
+ dict(
132
+ type='Collect',
133
+ keys=['img'],
134
+ meta_keys=[
135
+ 'image_file', 'center', 'scale', 'rotation', 'bbox_score',
136
+ 'flip_pairs'
137
+ ]),
138
+ ]
139
+
140
+ test_pipeline = val_pipeline
141
+
142
+ data_root = 'data/coco'
143
+ data = dict(
144
+ samples_per_gpu=32,
145
+ workers_per_gpu=2,
146
+ val_dataloader=dict(samples_per_gpu=32),
147
+ test_dataloader=dict(samples_per_gpu=32),
148
+ train=dict(
149
+ type='TopDownCocoDataset',
150
+ ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
151
+ img_prefix=f'{data_root}/train2017/',
152
+ data_cfg=data_cfg,
153
+ pipeline=train_pipeline,
154
+ dataset_info={{_base_.dataset_info}}),
155
+ val=dict(
156
+ type='TopDownCocoDataset',
157
+ ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
158
+ img_prefix=f'{data_root}/val2017/',
159
+ data_cfg=data_cfg,
160
+ pipeline=val_pipeline,
161
+ dataset_info={{_base_.dataset_info}}),
162
+ test=dict(
163
+ type='TopDownCocoDataset',
164
+ ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
165
+ img_prefix=f'{data_root}/val2017/',
166
+ data_cfg=data_cfg,
167
+ pipeline=test_pipeline,
168
+ dataset_info={{_base_.dataset_info}}),
169
+ )
faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:047c8118fc5ca88ba5ae1fab72f2cd6b070501fe3af2f3cba5cfa9a89b44b03e
3
+ size 167287506
fileservice.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, Request, Response
2
+
3
+ filenames = ["js/poseMaker.js"]
4
+ contents = '\n'.join([open(x).read() for x in filenames])
5
+
6
+ app = FastAPI()
7
+
8
+ @app.middleware("http")
9
+ async def insert_js(request: Request, call_next):
10
+ path = request.scope['path'] # get the request route
11
+ response = await call_next(request)
12
+
13
+ if path == "/":
14
+ response_body = ""
15
+ async for chunk in response.body_iterator:
16
+ response_body += chunk.decode()
17
+
18
+ some_javascript = f"""
19
+ <script type="text/javascript" defer>
20
+ {contents}
21
+ </script>
22
+ """
23
+
24
+ response_body = response_body.replace("</body>", some_javascript + "</body>")
25
+
26
+ del response.headers["content-length"]
27
+
28
+ return Response(
29
+ content=response_body,
30
+ status_code=response.status_code,
31
+ headers=dict(response.headers),
32
+ media_type=response.media_type
33
+ )
34
+
35
+ return response
hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9e0b3ab0439cb68e166c7543e59d2587cd8d7e9acf5ea62a8378eeb82fb50e5
3
+ size 255011654
js/poseMaker.js ADDED
@@ -0,0 +1,794 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ console.log("hello from poseEditor.js")
2
+ var canvas = null;
3
+ var ctx = null;
4
+
5
+ const wheelDisplayTime = 500;
6
+
7
+ const limbSeq = [
8
+ [1, 2], [2, 3], [3, 4], // 右腕
9
+ [1, 5], [5, 6], [6, 7], // 左腕
10
+ [1, 8], [8, 9], [9, 10], // 右胴→右脚
11
+ [1, 11], [11, 12], [12, 13], // 左胴→左脚
12
+ [1, 0], // 首
13
+ [0, 14], [14, 16], // 右目
14
+ [0, 15], [15, 17] // 左目
15
+ ];
16
+
17
+ function findParentNodeIndex(nodeIndex) {
18
+ // limbSeqの各要素の2番目の要素がjointIndexの場合、その要素の1番目の要素を返す
19
+ // 見つからないばあいは-1を返す
20
+ limbIndex = limbSeq.findIndex((limb) => limb[1] === nodeIndex);
21
+ return limbIndex === -1 ? -1 : limbSeq[limbIndex][0];
22
+ }
23
+
24
+ function cutOffLimb(pose, cutOffIndex) {
25
+ console.log(`cutOffLimb: ${cutOffIndex}`);
26
+ // 末端ノードの座標を削除する
27
+ var newPose = deepCopy(pose);
28
+ for (let i = 0; i < 18; i++) {
29
+ if (newPose[i] == null) {continue;}
30
+ // ルートまで検索し、その間にcuttOffIndexがあれば削除
31
+ var curr = i;
32
+ while (curr !== 1) {
33
+ console.log(`checking: ${i} -> ${curr}`);
34
+ let parent = findParentNodeIndex(curr);
35
+ if (parent === cutOffIndex) {
36
+ console.log(`cutOffLimb: ${i} -> ${cutOffIndex}`);
37
+ newPose[i] = null;
38
+ break;
39
+ }
40
+ curr = parent;
41
+ }
42
+ }
43
+ return newPose;
44
+ }
45
+
46
+ function repairPose(sourcePose) {
47
+ // TODO: ループには対応してないかも
48
+ var pose = sourcePose;
49
+ var newPose = new Array(18)
50
+ for (var k = 0; k < 3; k++) {
51
+ var processed = 0; // イテレーション用
52
+ for (let i = 0; i < 18; i++) {
53
+ if (pose[i] == null) {
54
+ let parent = findParentNodeIndex(i);
55
+ if (parent === -1) {continue;} // あり得ない
56
+ if (pose[parent] == null) {
57
+ console.log(`repair failed(A): ${i} -> parent loss`);
58
+ continue;
59
+ }
60
+
61
+ // サンプルデータから引っ張ってくる
62
+ var v = sampleCandidateSource[i].map((x, j) => x - sampleCandidateSource[parent][j]);
63
+ newPose[i] = pose[parent].map((x, j) => x + v[j]);
64
+ console.log(`repaired: ${i} -> ${newPose[newPose.length - 1]}`);
65
+ processed++;
66
+ } else {
67
+ newPose[i] = pose[i].map(x => x);
68
+ }
69
+ }
70
+ if (processed === 0) {break;}
71
+ pose = newPose;
72
+ }
73
+ return newPose;
74
+ }
75
+
76
+ function deepCopy(arr) {
77
+ return JSON.parse(JSON.stringify(arr));
78
+ }
79
+
80
+ function distSq(p0, p1) {
81
+ return (p0[0] - p1[0]) ** 2 + (p0[1] - p1[1]) ** 2;
82
+ }
83
+
84
+ // poseDataの形式:[[[x1, y1], [x2, y2], ...],[[x3, y3], [x4, y4], ...], ...]
85
+ // 各要素が人間
86
+ // 人間の各要素が関節
87
+
88
+ function poseDataToCandidateAndSubset(poseData) {
89
+ let candidate = [];
90
+ let subset = [];
91
+ for (let i = 0; i < poseData.length; i++) {
92
+ let person = poseData[i];
93
+ let subsetElement = [];
94
+ for (let j = 0; j < person.length; j++) {
95
+ candidate.push(person[j]);
96
+ subsetElement.push(candidate.length - 1);
97
+ }
98
+ subset.push(subsetElement);
99
+ }
100
+ return [candidate, subset];
101
+ }
102
+
103
+ // サンプルデータ
104
+ const sampleCandidateSource = [[235, 158],[234, 220],[193, 222],[138, 263],[89, 308],[276, 220],[325, 264],[375, 309],[207, 347],[203, 433],[199, 523],[261, 347],[262, 430],[261, 522],[227, 148],[245, 148],[208, 158],[258, 154]].map((p) => [p[0], p[1] - 70]);
105
+ const sampleSubsetElementSource = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17];
106
+
107
+ // const sampleCandidateSource = [[618.00, 0.00], [618.00, 44.00], [304.00, 81.00], [482.00, 96.00], [66.00, 270.00], [171.00, 280.00], [618.00, 82.00], [307.00, 112.00], [460.00, 143.00], [0.00, 301.00], [65.00, 301.00], [172.00, 303.00], [584.00, 86.00], [275.00, 119.00], [420.00, 139.00], [0.00, 301.00], [41.00, 301.00], [144.00, 303.00], [544.00, 131.00], [348.00, 139.00], [262.00, 160.00], [0.00, 337.00], [52.00, 339.00], [130.00, 348.00], [570.00, 175.00], [283.00, 177.00], [78.00, 338.00], [172.00, 380.00], [651.00, 78.00], [338.00, 111.00], [505.00, 144.00], [92.00, 301.00], [198.00, 305.00], [661.00, 132.00], [349.00, 156.00], [541.00, 179.00], [106.00, 336.00], [203.00, 348.00], [305.00, 159.00], [665.00, 160.00], [563.00, 192.00], [80.00, 343.00], [181.00, 385.00], [614.00, 205.00], [291.00, 220.00], [432.00, 320.00], [152.00, 372.00], [43.00, 380.00], [0.00, 386.00], [623.00, 281.00], [306.00, 290.00], [92.00, 357.00], [509.00, 434.00], [304.00, 357.00], [622.00, 368.00], [47.00, 394.00], [0.00, 395.00], [142.00, 405.00], [535.00, 565.00], [655.00, 200.00], [337.00, 217.00], [467.00, 322.00], [191.00, 372.00], [83.00, 375.00], [344.00, 282.00], [655.00, 282.00], [103.00, 343.00], [237.00, 368.00], [22.00, 377.00], [0.00, 379.00], [460.00, 459.00], [305.00, 352.00], [638.00, 355.00], [0.00, 401.00], [110.00, 412.00], [411.00, 570.00], [608.00, 0.00], [608.00, 40.00], [297.00, 75.00], [469.00, 84.00], [0.00, 261.00], [58.00, 263.00], [165.00, 275.00], [625.00, 0.00], [625.00, 39.00], [309.00, 74.00], [486.00, 83.00], [71.00, 264.00], [180.00, 276.00], [599.00, 0.00], [599.00, 44.00], [284.00, 80.00], [440.00, 93.00], [48.00, 271.00], [0.00, 272.00], [157.00, 277.00], [634.00, 0.00], [633.00, 41.00], [319.00, 77.00], [79.00, 269.00], [190.00, 277.00]];
108
+ // const sampleSubsetElementSource = [1.00,6.00,12.00,18.00,24.00,28.00,33.00,39.00,43.00,49.00,54.00,59.00,65.00,72.00,77.00,84.00,90.00,97.00,32.98,18.00],[5.00,11.00,17.00,23.00,27.00,32.00,37.00,42.00,46.00,-1.00,-1.00,62.00,67.00,-1.00,82.00,88.00,95.00,100.00,25.45,15.00],[4.00,10.00,16.00,22.00,26.00,31.00,36.00,41.00,47.00,51.00,57.00,63.00,66.00,74.00,81.00,87.00,93.00,99.00,26.97,18.00],[3.00,8.00,14.00,19.00,25.00,30.00,35.00,40.00,45.00,52.00,58.00,61.00,70.00,75.00,79.00,86.00,92.00,-1.00,30.45,17.00],[2.00,7.00,13.00,20.00,-1.00,29.00,34.00,38.00,44.00,50.00,53.00,60.00,64.00,71.00,78.00,85.00,91.00,98.00,27.89,17.00],[0.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,76.00,83.00,-1.00,96.00,3.33,4.00];
109
+
110
+ function makePoseFromCandidateAndSubsetElement(candidate, subsetElement) {
111
+ var pose = [];
112
+ for (let j = 0 ; j < 18; j++) {
113
+ let i = subsetElement[j];
114
+ pose.push(i < 0 || candidate[i] == null ? null : candidate[i].map((x)=>x));
115
+ }
116
+ return pose;
117
+ }
118
+
119
+ function makePoseDataFromCandidateAndSubset(candidate, subset) {
120
+ return subset.map(subsetElement => makePoseFromCandidateAndSubsetElement(candidate, subsetElement));
121
+ }
122
+
123
+ function addPerson() {
124
+ var dx = Math.random() * 100;
125
+ var dy = Math.random() * 100;
126
+
127
+ poseData.push(
128
+ makePoseFromCandidateAndSubsetElement(
129
+ sampleCandidateSource.map(point => [point[0] + dx, point[1] + dy]),
130
+ sampleSubsetElementSource));
131
+
132
+ addHistory();
133
+ Redraw();
134
+ }
135
+
136
+ function removePerson(personIndex) {
137
+ poseData.splice(personIndex, 1);
138
+ addHistory();
139
+ Redraw();
140
+ }
141
+
142
+ function repairPerson(personIndex) {
143
+ poseData[personIndex] = repairPose(poseData[personIndex]);
144
+ addHistory();
145
+ Redraw();
146
+ }
147
+
148
+ function cutOffPersonLimb(personIndex, limbIndex) {
149
+ poseData[personIndex] = cutOffLimb(poseData[personIndex], limbIndex);
150
+ console.log(poseData[personIndex]);
151
+ console.log(poseData);
152
+ addHistory();
153
+ Redraw();
154
+ }
155
+
156
+ // ドラッグ中の各キーが押されているかどうかのフラグ
157
+ var keyDownFlags = {};
158
+ // マウスカーソル
159
+ var mouseCursor = [-1, -1];
160
+
161
+ function cross(lhs, rhs) {return lhs[0] * rhs[1] - lhs[1] * rhs[0];}
162
+ function dot(lhs, rhs) {return lhs[0] * rhs[0] + lhs[1] * rhs[1];}
163
+ function directedAngleTo(lhs, rhs) {return Math.atan2(cross(lhs, rhs), dot(lhs, rhs));}
164
+
165
+ function isMouseOnCanvas() {
166
+ // mouseCursorがcanvasの範囲内にあるかどうかを判定
167
+ var rect = canvas.getBoundingClientRect();
168
+ var f = 0 <= mouseCursor[0] && mouseCursor[0] <= rect.width && 0 <= mouseCursor[1] && mouseCursor[1] <= rect.height;
169
+ return f;
170
+ }
171
+
172
+ function clearCanvas() {
173
+ var w = canvas.width;
174
+ var h = canvas.height;
175
+ ctx.fillStyle = 'black';
176
+ ctx.fillRect(0, 0, w, h);
177
+ }
178
+
179
+ function resizeCanvas(width, height) {
180
+ canvas.width = width ? width : canvas.width;
181
+ canvas.height = height ? height : canvas.height;
182
+ Redraw();
183
+ }
184
+
185
+ function calculateCenter(shape) {
186
+ var center = shape.reduce(function(acc, point) {
187
+ if (point === null) {
188
+ acc[0] += point[0];
189
+ acc[1] += point[1];
190
+ }
191
+ return acc;
192
+ }, [0, 0]);
193
+ center[0] /= shape.length;
194
+ center[1] /= shape.length;
195
+ return center;
196
+ }
197
+
198
+ // v2d -> v3d
199
+ function rotateX(vector, angle) {
200
+ var x = vector[0];
201
+ var y = vector[1];
202
+ var z = 0;
203
+
204
+ // X軸に対して回転する
205
+ var x1 = x;
206
+ var y1 = y * Math.cos(angle) - z * Math.sin(angle);
207
+ var z1 = y * Math.sin(angle) + z * Math.cos(angle);
208
+
209
+ return [x1, y1, z1];
210
+ }
211
+
212
+ // v2d -> v3d
213
+ function rotateY(vector, angle) {
214
+ var x = vector[0];
215
+ var y = vector[1];
216
+ var z = 0;
217
+
218
+ // Y軸に対して回転する
219
+ var x1 = x * Math.cos(angle) + z * Math.sin(angle);
220
+ var y1 = y;
221
+ var z1 = -x * Math.sin(angle) + z * Math.cos(angle);
222
+
223
+ return [x1, y1, z1];
224
+ }
225
+
226
+ // v3d -> v2d
227
+ function perspectiveProjection(vector, cameraDistance) {
228
+ var x = vector[0];
229
+ var y = vector[1];
230
+ var z = vector[2];
231
+
232
+ if (z === 0) {
233
+ return [x, y];
234
+ }
235
+
236
+ var scale = cameraDistance / (cameraDistance - z);
237
+ var x1 = x * scale;
238
+ var y1 = y * scale;
239
+
240
+ return [x1, y1];
241
+ }
242
+
243
+ // v2d -> v3d
244
+ function rotateAndProject(f, p, c, angle) {
245
+ var v = [p[0] - c[0], p[1] - c[1]];
246
+ var v1 = f(v, angle);
247
+ var v2 = perspectiveProjection(v1, 500);
248
+ return [v2[0] + c[0], v2[1] + c[1]];
249
+ }
250
+
251
+ function drawBodyPose() {
252
+ let stickWidth = 4;
253
+ let imageSize = Math.min(canvas.width, canvas.height);
254
+ stickWidth *= imageSize / 512;
255
+
256
+ const colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
257
+ [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
258
+ [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]];
259
+
260
+ ctx.globalAlpha = 0.6;
261
+
262
+ // edge
263
+ for (let i = 0; i < poseData.length; i++) {
264
+ const pose = poseData[i];
265
+
266
+ for (let j = 0; j < 17; j++) {
267
+ const p = pose[limbSeq[j][0]];
268
+ const q = pose[limbSeq[j][1]];
269
+ if (p == null || q == null) continue;
270
+ const [X0, Y0] = p;
271
+ const [X1, Y1] = q;
272
+ let angle = Math.atan2(Y1 - Y0, X1 - X0);
273
+ let magnitude = ((X0 - X1) ** 2 + (Y0 - Y1) ** 2) ** 0.5
274
+ let polygon = new Path2D();
275
+ polygon.ellipse((X0+X1)/2, (Y0+Y1)/2, magnitude / 2, stickWidth, angle, 0, 2 * Math.PI);
276
+ ctx.fillStyle = `rgb(${colors[j].join(',')})`;
277
+ ctx.fill(polygon);
278
+ }
279
+ }
280
+
281
+ ctx.globalAlpha = 1.0;
282
+
283
+ // node
284
+ for (let i = 0; i < poseData.length; i++) {
285
+ const pose = poseData[i];
286
+
287
+ ctx.font = '12px serif';
288
+ for (let j = 0; j < 18; j++) {
289
+ const p = pose[j];
290
+ if (p == null) continue;
291
+ const [x, y] = p;
292
+ ctx.beginPath();
293
+ ctx.arc(x, y, stickWidth, 0, 2 * Math.PI);
294
+ ctx.fillStyle = `rgb(${colors[j].join(',')})`;
295
+ ctx.fill();
296
+ // ctx.fillStyle = 'rgb(255,255,255)'
297
+ // ctx.fillText(j, x-3, y+4);
298
+ }
299
+ }
300
+ }
301
+
302
+ let lastWheeling = 0;
303
+
304
+ function drawUI() {
305
+ if (keyDownFlags['Space'] || keyDownFlags['BracketLeft'] || keyDownFlags['BracketRight'] ||
306
+ new Date().getTime() - lastWheeling < wheelDisplayTime) {
307
+ ctx.beginPath();
308
+ ctx.lineWidth=4;
309
+ ctx.arc(mouseCursor[0], mouseCursor[1], dragRange, 0, 2 * Math.PI);
310
+ ctx.strokeStyle = 'rgb(255,255,255)';
311
+ ctx.stroke();
312
+ }
313
+
314
+ if (isDragging && (dragMode == "rotate" || dragMode == "rotate2")) {
315
+ ctx.beginPath();
316
+ ctx.lineWidth=1;
317
+ ctx.strokeStyle = 'rgb(255,255,255)';
318
+ ctx.moveTo(dragStart[0], dragStart[1]);
319
+ ctx.lineTo(dragStart[0]+rotateBaseVector[0], dragStart[1]+rotateBaseVector[1]);
320
+ ctx.stroke();
321
+ }
322
+
323
+ let operationTextFlags = {
324
+ "Space": "Range Move",
325
+ "AltLeft": "Body Move",
326
+ "AltRight": "Body Move",
327
+ "ControlLeft": "Scale",
328
+ "ControlRight": "Scale",
329
+ "ShiftLeft": "Rotate",
330
+ "ShiftRight": "Rotate",
331
+ "KeyQ": "CutOff",
332
+ "KeyD": "Delete",
333
+ "KeyX": "X-Axis",
334
+ "KeyC": "Y-Axis",
335
+ "KeyR": "Repair",
336
+ }
337
+
338
+ // operationTextFlagsに含まれるものがkeyDownFlagsに含まれるばあい、そのキーの文字列を取得
339
+ let activeOperations = Object.keys(operationTextFlags).filter(key => keyDownFlags[key]);
340
+ if (activeOperations.length > 0) {
341
+ // 左上に表示
342
+ ctx.font = '20px serif';
343
+ ctx.fillStyle = 'rgb(255,255,255)';
344
+ ctx.fillText(operationTextFlags[activeOperations[0]], 10, 30);
345
+ }
346
+ }
347
+
348
+ function Redraw() {
349
+ clearCanvas();
350
+ drawBodyPose();
351
+ drawUI();
352
+ }
353
+
354
+ function getNearestNode(p) {
355
+ let minDistSq = Infinity;
356
+ let personIndex = -1;
357
+ let nodeIndex = -1;
358
+ for (let i = 0; i < poseData.length; i++) {
359
+ const pose = poseData[i];
360
+ for (let j = 0; j < pose.length; j++) {
361
+ const q = pose[j];
362
+ if (q == null) continue;
363
+ const d = distSq(p, q);
364
+ if (d < minDistSq) {
365
+ minDistSq = d;
366
+ personIndex = i;
367
+ nodeIndex = j;
368
+ }
369
+ }
370
+ }
371
+ return [personIndex, nodeIndex, Math.sqrt(minDistSq)];
372
+ }
373
+
374
+ let dragRange = 64;
375
+ let dragRangeDelta = 16;
376
+
377
+ // ドラッグ中に座標を保持するための変数
378
+ let isDragging = false;
379
+ let dragStart = [0, 0];
380
+ let dragPersonIndex = -1;
381
+ let dragMarks = [];
382
+ let dragMode = "";
383
+ let rotateBaseVector = null;
384
+ let history = [];
385
+ let historyIndex = 0;
386
+
387
+ function clearHistory() {
388
+ history = [];
389
+ historyIndex = 0;
390
+ }
391
+
392
+ function addHistory() {
393
+ history = history.slice(0, historyIndex);
394
+ history.push(JSON.parse(JSON.stringify(poseData)));
395
+ historyIndex = history.length;
396
+ }
397
+
398
+ function undo() {
399
+ if (1 < historyIndex) {
400
+ historyIndex--;
401
+ poseData = deepCopy(history[historyIndex-1]);
402
+ Redraw();
403
+ }
404
+ }
405
+
406
+ function redo() {
407
+ if (historyIndex < history.length) {
408
+ historyIndex++;
409
+ poseData = deepCopy(history[historyIndex-1]);
410
+ Redraw();
411
+ }
412
+ }
413
+
414
+ function fetchLatestPoseData() {
415
+ return history[historyIndex-1];
416
+ }
417
+
418
+ function getCanvasPosition(event) {
419
+ const rect = canvas.getBoundingClientRect();
420
+ const x = event.clientX - rect.left;
421
+ const y = event.clientY - rect.top;
422
+ return [x, y];
423
+ }
424
+
425
+ function forEachMarkedNodes(fn) {
426
+ for (let i = 0; i < dragMarks.length; i++) {
427
+ for (let j = 0; j < dragMarks[i].length; j++) {
428
+ if (dragMarks[i][j]) {
429
+ fn(i, j, poseData[i][j]);
430
+ }
431
+ }
432
+ }
433
+ }
434
+
435
+ // Canvas要素上でマウスが押された場合に呼び出される関数
436
+ function handleMouseDown(event) {
437
+ const p = getCanvasPosition(event);
438
+ const [personIndex, nodeIndex, minDist] = getNearestNode(p);
439
+
440
+ if (keyDownFlags["KeyD"]) {removePerson(personIndex);return;}
441
+ if (keyDownFlags["KeyR"]) {repairPerson(personIndex);return;}
442
+
443
+ if (keyDownFlags["KeyQ"] && minDist < 16) {
444
+ console.log("pressed KeyQ");
445
+ cutOffPersonLimb(personIndex, nodeIndex);
446
+ return;
447
+ }
448
+
449
+ // ドラッグ処理の開始
450
+ dragStart = p;
451
+ dragMarks = poseData.map(pose => pose.map(node => false));
452
+
453
+ if (event.altKey || event.ctrlKey || event.shiftKey ||
454
+ keyDownFlags["KeyX"] || keyDownFlags["KeyC"]) {
455
+ // dragMarksを設定
456
+ dragMarks[personIndex] =
457
+ poseData[personIndex].map((node) => node != null);
458
+ isDragging = true;
459
+ if (event.altKey) {
460
+ dragMode = "move";
461
+ } else if (event.ctrlKey) {
462
+ dragMode = "scale";
463
+ } else if (event.shiftKey) {
464
+ dragMode = "rotate";
465
+ rotateBaseVector = [0, 0];
466
+ } else if (keyDownFlags["KeyX"]) {
467
+ dragMode = "rotateX";
468
+ } else if (keyDownFlags["KeyC"]) {
469
+ dragMode = "rotateY";
470
+ }
471
+ } else if (keyDownFlags["Space"]) {
472
+ dragMarks[personIndex] =
473
+ poseData[personIndex].map(
474
+ (node) => node != null && distSq(p, node) < dragRange ** 2);
475
+ isDragging = dragMarks[personIndex].some((mark) => mark);
476
+ dragMode = "move";
477
+ } else if (minDist < 16) {
478
+ dragMarks[personIndex][nodeIndex] = true;
479
+ isDragging = true;
480
+ dragMode = "move";
481
+ }
482
+ }
483
+
484
+ // Canvas要素上でマウスが動いた場合に呼び出される関数
485
+ function handleMouseMove(event) {
486
+ mouseCursor = getCanvasPosition(event);
487
+ if (isDragging) {
488
+ const p = getCanvasPosition(event);
489
+ const dragOffset = [p[0] - dragStart[0], p[1] - dragStart[1]];
490
+ const latestPoseData = fetchLatestPoseData();
491
+
492
+ if (dragMode == "scale") {
493
+ // 拡大縮小
494
+ let xScale = 1 + dragOffset[0] / canvas.width;
495
+ let yScale = 1 + dragOffset[0] / canvas.height;
496
+ forEachMarkedNodes((i, j, node) => {
497
+ const lp = latestPoseData[i][j];
498
+ node[0] = (lp[0] - dragStart[0]) * xScale + dragStart[0];
499
+ node[1] = (lp[1] - dragStart[1]) * yScale + dragStart[1];
500
+ });
501
+ } else if (dragMode == "rotate") {
502
+ rotateBaseVector = dragOffset;
503
+ if (!event.shiftKey) {
504
+ dragMode = "rotate2";
505
+ }
506
+ } else if (dragMode == "rotate2") {
507
+ // 回転
508
+ let angle = directedAngleTo(rotateBaseVector, dragOffset);
509
+ forEachMarkedNodes((i, j, node) => {
510
+ const lp = latestPoseData[i][j];
511
+ let x = lp[0] - dragStart[0];
512
+ let y = lp[1] - dragStart[1];
513
+ let sin = Math.sin(angle);
514
+ let cos = Math.cos(angle);
515
+ node[0] = x * cos - y * sin + dragStart[0];
516
+ node[1] = x * sin + y * cos + dragStart[1];
517
+ });
518
+ } else if (dragMode == "rotateX") {
519
+ const center = dragStart;
520
+ const angle = dragOffset[1] / -40;
521
+ forEachMarkedNodes((i, j, node) => {
522
+ const lp = latestPoseData[i][j];
523
+ const np = rotateAndProject(rotateX, lp, center, angle);
524
+ node[0] = np[0];
525
+ node[1] = np[1];
526
+ });
527
+ } else if (dragMode == "rotateY") {
528
+ const center = dragStart;
529
+ const angle = dragOffset[0] / 40;
530
+ forEachMarkedNodes((i, j, node) => {
531
+ const lp = latestPoseData[i][j];
532
+ const np = rotateAndProject(rotateY, lp, center, angle);
533
+ node[0] = np[0];
534
+ node[1] = np[1];
535
+ });
536
+ } else if (dragMode == "move") {
537
+ // 移動
538
+ forEachMarkedNodes((i, j, node) => {
539
+ const lp = latestPoseData[i][j];
540
+ node[0] = lp[0] + dragOffset[0];
541
+ node[1] = lp[1] + dragOffset[1];
542
+ });
543
+ }
544
+ }
545
+
546
+ Redraw();
547
+ }
548
+
549
+ function handleMouseUp(event) {
550
+ isDragging = false;
551
+ addHistory();
552
+ Redraw();
553
+ }
554
+
555
+ function handleMouseLeave(event) {
556
+ mouseCursor = [-1,-1];
557
+ }
558
+
559
+ function ModifyDragRange(delta) { dragRange = Math.max(dragRangeDelta, Math.min(512, dragRange + delta)); }
560
+
561
+ document.addEventListener('wheel', function(event) {
562
+ if (!isMouseOnCanvas()) {return;}
563
+ if (!event.altKey && !keyDownFlags['Space']) {return;}
564
+
565
+ event.preventDefault();
566
+ const deltaY = event.deltaY;
567
+ if (deltaY < 0) {ModifyDragRange(-dragRangeDelta);}
568
+ if (0 < deltaY) {ModifyDragRange(dragRangeDelta);}
569
+ lastWheeling = new Date().getTime();
570
+ Redraw();
571
+ window.setTimeout(function() { Redraw(); }, wheelDisplayTime+10);
572
+ }, {passive: false});
573
+
574
+ document.addEventListener("keydown", (event) => {
575
+ if (!isMouseOnCanvas()) {return;}
576
+
577
+ if (event.code == "BracketLeft") { ModifyDragRange(-dragRangeDelta); }
578
+ if (event.code == "BracketRight") { ModifyDragRange(dragRangeDelta); }
579
+ keyDownFlags[event.code] = true;
580
+ Redraw();
581
+ event.preventDefault();
582
+ });
583
+ document.addEventListener("keyup", (event) => {
584
+ if (!isMouseOnCanvas()) {return;}
585
+
586
+ keyDownFlags[event.code] = false;
587
+ if (event.ctrlKey && event.code == "KeyE") {
588
+ addPerson();
589
+ } else if (event.ctrlKey && event.code == "KeyZ") {
590
+ if (event.shiftKey) {
591
+ redo();
592
+ } else {
593
+ undo();
594
+ }
595
+ }
596
+ Redraw();
597
+ event.preventDefault();
598
+ });
599
+
600
+ function initializeEditor() {
601
+ console.log("initializeEditor");
602
+
603
+ canvas = document.getElementById('canvas');
604
+ ctx = canvas.getContext('2d');
605
+
606
+ canvas.addEventListener('mousedown', handleMouseDown);
607
+ canvas.addEventListener('mousemove', handleMouseMove);
608
+ canvas.addEventListener('mouseup', handleMouseUp);
609
+ canvas.addEventListener('mouseleave', handleMouseLeave);
610
+ poseData = [];
611
+ clearHistory();
612
+ }
613
+
614
+ function importPose(jsonData) {
615
+ if (jsonData != null) {
616
+ newPoseData = makePoseDataFromCandidateAndSubset(jsonData.candidate, jsonData.subset);
617
+ } else {
618
+ newPoseData = makePoseDataFromCandidateAndSubset(sampleCandidateSource, [sampleSubsetElementSource]);
619
+ }
620
+ poseData = poseData.concat(newPoseData);
621
+ addHistory();
622
+ Redraw();
623
+ }
624
+
625
+ /*
626
+ function savePose() {
627
+ const canvasUrl = canvas.toDataURL();
628
+
629
+ const createEl = document.createElement('a');
630
+ createEl.href = canvasUrl;
631
+
632
+ // This is the name of our downloaded file
633
+ createEl.download = "pose.png";
634
+
635
+ createEl.click();
636
+ createEl.remove();
637
+
638
+ var [candidate, subset] = poseDataToCandidateAndSubset(poseData);
639
+ return {candidate: candidate, subset: subset};
640
+ }
641
+ */
642
+
643
+ // crc32
644
+ // CRC32を初期化
645
+ function initCrc32Table() {
646
+ const crcTable = new Uint32Array(256);
647
+ for (let i = 0; i < 256; i++) {
648
+ let c = i;
649
+ for (let j = 0; j < 8; j++) {
650
+ c = (c & 1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1);
651
+ }
652
+ crcTable[i] = c;
653
+ }
654
+ return crcTable;
655
+ }
656
+
657
+ // データのCRC32を計算
658
+ function getCrc32(data, crc=0) {
659
+ const crcTable = initCrc32Table();
660
+ crc = (crc ^ 0xFFFFFFFF) >>> 0;
661
+ for (let i = 0; i < data.length; i++) {
662
+ crc = crcTable[(crc ^ data[i]) & 0xFF] ^ (crc >>> 8);
663
+ }
664
+ return (crc ^ 0xFFFFFFFF) >>> 0;
665
+ }
666
+
667
+ function stringToUint8Array(str) {
668
+ var arr = new Uint8Array(str.length);
669
+ for (var i = 0; i < str.length; i++) {
670
+ arr[i] = str.charCodeAt(i);
671
+ }
672
+ return arr;
673
+ }
674
+
675
+ function base64ToUint8Array(base64Str) {
676
+ return stringToUint8Array(atob(base64Str));
677
+ }
678
+
679
+ function visitPng(png, type) {
680
+ var dataLength;
681
+ var chunkType;
682
+ var nextChunkPos;
683
+ var Signature = String.fromCharCode(137, 80, 78, 71, 13, 10, 26, 10);
684
+ var rpos = 0;
685
+
686
+ // シグネチャの確認
687
+ if (String.fromCharCode.apply(null, png.subarray(rpos, rpos += 8)) !== Signature) {
688
+ throw new Error('invalid signature');
689
+ }
690
+
691
+ // チャンクの探索
692
+ while (rpos < png.length) {
693
+ dataLength = (
694
+ (png[rpos++] << 24) |
695
+ (png[rpos++] << 16) |
696
+ (png[rpos++] << 8) |
697
+ (png[rpos++] )
698
+ ) >>> 0;
699
+
700
+ nextChunkPos = rpos + dataLength + 8;
701
+
702
+ chunkType = String.fromCharCode.apply(null, png.subarray(rpos, rpos += 4));
703
+
704
+ if (chunkType === type) {
705
+ return [rpos - 8, dataLength, nextChunkPos];
706
+ }
707
+
708
+ rpos = nextChunkPos;
709
+ }
710
+ }
711
+
712
+ function createChunk(type, data) {
713
+ var dataLength = data.length;
714
+ var chunk = new Uint8Array(4 + 4 + dataLength + 4);
715
+ var type = stringToUint8Array(type);
716
+ var pos = 0;
717
+
718
+ // length
719
+ chunk[pos++] = (dataLength >> 24) & 0xff;
720
+ chunk[pos++] = (dataLength >> 16) & 0xff;
721
+ chunk[pos++] = (dataLength >> 8) & 0xff;
722
+ chunk[pos++] = (dataLength ) & 0xff;
723
+
724
+ // type
725
+ chunk[pos++] = type[0];
726
+ chunk[pos++] = type[1];
727
+ chunk[pos++] = type[2];
728
+ chunk[pos++] = type[3];
729
+
730
+ // data
731
+ for (let i = 0; i < dataLength; ++i) {
732
+ chunk[pos++] = data[i];
733
+ }
734
+
735
+ //crc
736
+ initCrc32Table();
737
+ let crc = getCrc32(type);
738
+ crc = getCrc32(data, crc);
739
+ chunk[pos++] = (crc >> 24) & 0xff;
740
+ chunk[pos++] = (crc >> 16) & 0xff;
741
+ chunk[pos++] = (crc >> 8) & 0xff;
742
+ chunk[pos++] = (crc ) & 0xff;
743
+
744
+ return chunk;
745
+ }
746
+
747
+ function insertChunk(destBuffer, sourceBuffer, rpos, chunk) {
748
+ var pos = 0;
749
+
750
+ // IDAT チャンクの前までコピー
751
+ destBuffer.set(sourceBuffer.subarray(0, rpos), pos);
752
+ pos += rpos;
753
+
754
+ // hoGe チャンクをコピー
755
+ destBuffer.set(chunk, pos);
756
+ pos += chunk.length;
757
+
758
+ // IDAT チャンク以降をコピー
759
+ destBuffer.set(sourceBuffer.subarray(rpos), pos);
760
+ }
761
+
762
+ function mergeCanvasWithPose(keyword, content) {
763
+ const canvasUrl = canvas.toDataURL();
764
+
765
+ var insertion = stringToUint8Array(`${keyword}\0${content}`);
766
+ var chunk = createChunk("tEXt", insertion);
767
+ var sourceBuffer = base64ToUint8Array(canvasUrl.split(',')[1]);
768
+ var destBuffer = new Uint8Array(sourceBuffer.length + insertion.length + 12);
769
+
770
+ var [rpos, dataLength, nextChunkPos] = visitPng(sourceBuffer, "IHDR");
771
+ insertChunk(destBuffer, sourceBuffer, nextChunkPos, chunk);
772
+
773
+ var blob = new Blob([destBuffer], {type: "image/png"});
774
+ var url = URL.createObjectURL(blob);
775
+ return url;
776
+ }
777
+
778
+ function savePose() {
779
+ var [candidate, subset] = poseDataToCandidateAndSubset(poseData);
780
+ let jsonData = {candidate: candidate, subset: subset};
781
+
782
+ var url = mergeCanvasWithPose("openpose", JSON.stringify(jsonData));
783
+
784
+ const createEl = document.createElement('a');
785
+ createEl.href = url;
786
+
787
+ // This is the name of our downloaded file
788
+ createEl.download = "pose.png";
789
+
790
+ createEl.click();
791
+ createEl.remove();
792
+
793
+ return jsonData;
794
+ }
main.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import json as js
3
+ import util
4
+ from fileservice import app
5
+ from pose import infer, draw
6
+
7
+
8
+ def image_changed(image):
9
+ if image == None:
10
+ return "estimation", {}
11
+
12
+ if 'openpose' in image.info:
13
+ print("pose found")
14
+ jsonText = image.info['openpose']
15
+ jsonObj = js.loads(jsonText)
16
+ subset = jsonObj['subset']
17
+ return f"""{image.width}px x {image.height}px, {len(subset)} indivisual(s)""", jsonText
18
+ else:
19
+ print("pose not found")
20
+ pose_result, returned_outputs = infer(util.pil2cv(image))
21
+ print(len(pose_result))
22
+
23
+ candidate = []
24
+ subset = []
25
+ for d in pose_result:
26
+ n = len(candidate)
27
+ if d['bbox'][4] < 0.9:
28
+ continue
29
+ keypoints = d['keypoints'][:, :2].tolist()
30
+ midpoint = [(keypoints[5][0] + keypoints[6][0]) / 2, (keypoints[5][1] + keypoints[6][1]) / 2]
31
+ keypoints.append(midpoint)
32
+ candidate.extend(util.convert_keypoints(keypoints))
33
+ m = len(candidate)
34
+ subset.append([j for j in range(n, m)])
35
+ print("=====")
36
+ print(candidate)
37
+ print(subset)
38
+
39
+ jsonText = "{ \"candidate\": " + util.candidate_to_json_string(candidate) + ", \"subset\": " + util.subset_to_json_string(subset) + " }"
40
+ print(jsonText)
41
+ return f"""{image.width}px x {image.height}px, {len(subset)} indivisual(s)""", jsonText
42
+
43
+
44
+
45
+ return draw(image, pose_result)
46
+
47
+ html_text = f"""
48
+ <canvas id="canvas" width="512" height="512"></canvas>
49
+ """
50
+
51
+ with gr.Blocks(css="""button { min-width: 80px; }""") as demo:
52
+ with gr.Row():
53
+ with gr.Column(scale=1):
54
+ width = gr.Slider(label="Width", minimum=512, maximum=1024, step=64, value=512, interactive=True)
55
+ height = gr.Slider(label="Height", minimum=512, maximum=1024, step=64, value=512, interactive=True)
56
+ with gr.Accordion(label="Pose estimation", open=False):
57
+ source = gr.Image(type="pil")
58
+ estimationResult = gr.Markdown("""estimation""")
59
+ with gr.Row():
60
+ with gr.Column(min_width=80):
61
+ applySizeBtn = gr.Button(value="Apply size")
62
+ with gr.Column(min_width=80):
63
+ replaceBtn = gr.Button(value="Replace")
64
+ with gr.Column(min_width=80):
65
+ importBtn = gr.Button(value="Import")
66
+ with gr.Accordion(label="Json", open=False):
67
+ with gr.Row():
68
+ with gr.Column(min_width=80):
69
+ replaceWithJsonBtn = gr.Button(value="Replace")
70
+ with gr.Column(min_width=80):
71
+ importJsonBtn = gr.Button(value="Import")
72
+ gr.Markdown("""
73
+ | inout | how to |
74
+ | -----------------| ----------------------------------------------------------------------------------------- |
75
+ | Import | Paste json to "Json source" and click "Read", edit the width/height, then click "Replace" or "Import". |
76
+ | Export | click "Save" and "Copy to clipboard" of "Json" section. |
77
+ """)
78
+ json = gr.JSON(label="Json")
79
+ jsonSource = gr.Textbox(label="Json source", lines=10)
80
+ with gr.Accordion(label="Notes", open=False):
81
+ gr.Markdown("""
82
+ #### How to bring pose to ControlNet
83
+ 1. Press **Save** button
84
+ 2. **Drag** the file placed at the bottom left corder of browser
85
+ 3. **Drop** the file into ControlNet
86
+
87
+ #### Reuse pose image
88
+ Pose image generated by this tool has pose data in the image itself. You can reuse pose information by loading it as the image source instead of a regular image.
89
+
90
+ #### Points to note for pseudo-3D rotation
91
+ When performing pseudo-3D rotation on the X and Y axes, the projection is converted to 2D and Z-axis information is lost when the mouse button is released. This means that if you finish dragging while the shape is collapsed, you may not be able to restore it to its original state. In such a case, please use the "undo" function.
92
+ """)
93
+ with gr.Column(scale=2):
94
+ html = gr.HTML(html_text)
95
+ with gr.Row():
96
+ with gr.Column(scale=1, min_width=60):
97
+ saveBtn = gr.Button(value="Save")
98
+ with gr.Column(scale=7):
99
+ gr.Markdown("""
100
+ - "ctrl + drag" to **scale**
101
+ - "alt + drag" to **move**
102
+ - "shift + drag" to **rotate** (move right first, release shift, then up or down)
103
+ - "space + drag" to **range-move**
104
+ - "[", "]" or "Alt + wheel" or "Space + wheel" to shrink or expand **range**
105
+ - "ctrl + Z", "shift + ctrl + Z" to **undo**, **redo**
106
+ - "ctrl + E" **add** new person
107
+ - "D + click" to **delete** person
108
+ - "Q + click" to **cut off** limb
109
+ - "X + drag" to **x-axis** pseudo-3D rotation
110
+ - "C + drag" to **y-axis** pseudo-3D rotation
111
+ - "R + click" to **repair**
112
+
113
+ When using Q, X, C, R, pressing and dont release until the operation is complete.
114
+
115
+ [Contact us for feature requests or bug reports (anonymous)](https://t.co/UC3jJOJJtS)
116
+ """)
117
+
118
+ width.change(fn=None, inputs=[width], _js="(w) => { resizeCanvas(w,null); }")
119
+ height.change(fn=None, inputs=[height], _js="(h) => { resizeCanvas(null,h); }")
120
+
121
+ source.change(
122
+ fn = image_changed,
123
+ inputs = [source],
124
+ outputs = [estimationResult, json])
125
+ applySizeBtn.click(
126
+ fn = lambda x: (x.width, x.height),
127
+ inputs = [source],
128
+ outputs = [width, height])
129
+ replaceBtn.click(
130
+ fn = None,
131
+ inputs = [json],
132
+ outputs = [],
133
+ _js="(json) => { initializeEditor(); importPose(json); return []; }")
134
+ importBtn.click(
135
+ fn = None,
136
+ inputs = [json],
137
+ outputs = [],
138
+ _js="(json) => { importPose(json); return []; }")
139
+
140
+ saveBtn.click(
141
+ fn = None,
142
+ inputs = [], outputs = [json],
143
+ _js="() => { return [savePose()]; }")
144
+ jsonSource.change(
145
+ fn = lambda x: x,
146
+ inputs = [jsonSource], outputs = [json])
147
+ replaceWithJsonBtn.click(
148
+ fn = None,
149
+ inputs = [json],
150
+ outputs = [],
151
+ _js="(json) => { initializeEditor(); importPose(json); return []; }")
152
+ importJsonBtn.click(
153
+ fn = None,
154
+ inputs = [json],
155
+ outputs = [],
156
+ _js="(json) => { importPose(json); return []; }")
157
+ demo.load(fn=None, inputs=[], outputs=[], _js="() => { initializeEditor(); importPose(); return []; }")
158
+
159
+ print("mount")
160
+ gr.mount_gradio_app(app, demo, path="/")
pose.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from mmpose.apis import (inference_top_down_pose_model, init_pose_model,
2
+ process_mmdet_results, vis_pose_result)
3
+ from mmpose.datasets import DatasetInfo
4
+ from mmdet.apis import inference_detector, init_detector
5
+
6
+ det_model = init_detector(
7
+ "./external/faster_rcnn_r50_fpn_coco.py",
8
+ "./faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth",
9
+ device="cpu")
10
+ pose_model = init_pose_model(
11
+ "./external/hrnet_w48_coco_256x192.py",
12
+ "./hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth",
13
+ device="cpu")
14
+
15
+ dataset = pose_model.cfg.data['test']['type']
16
+ dataset_info = pose_model.cfg.data['test'].get('dataset_info', None)
17
+
18
+ dataset_info = DatasetInfo(dataset_info)
19
+
20
+ def infer(image):
21
+ mmdet_results = inference_detector(det_model, image)
22
+ person_results = process_mmdet_results(mmdet_results, 1)
23
+
24
+ pose_results, returned_outputs = inference_top_down_pose_model(
25
+ pose_model,
26
+ image,
27
+ person_results,
28
+ bbox_thr=0.3,
29
+ format='xyxy',
30
+ dataset=dataset,
31
+ dataset_info=dataset_info,
32
+ return_heatmap=False,
33
+ outputs=None)
34
+ print(pose_results)
35
+ print(returned_outputs)
36
+
37
+ return pose_results, returned_outputs
38
+
39
+ def draw(image, results):
40
+ return vis_pose_result(
41
+ pose_model,
42
+ image,
43
+ results,
44
+ dataset=dataset,
45
+ dataset_info=dataset_info,
46
+ kpt_score_thr=0.3,
47
+ radius=4,
48
+ thickness=3,
49
+ show=False,
50
+ out_file=None)
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ fastapi==0.92.0
2
+ gradio==3.18.0
3
+ numpy==1.23.5
4
+ opencv_python
5
+ scipy
6
+ torch
7
+ torchvision
8
+ openmim
util.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+
4
+ def pil2cv(image):
5
+ ''' PIL型 -> OpenCV型 '''
6
+ new_image = np.array(image, dtype=np.uint8)
7
+ if new_image.ndim == 2: # モノクロ
8
+ pass
9
+ elif new_image.shape[2] == 3: # カラー
10
+ new_image = cv2.cvtColor(new_image, cv2.COLOR_RGB2BGR)
11
+ elif new_image.shape[2] == 4: # 透過
12
+ new_image = cv2.cvtColor(new_image, cv2.COLOR_RGBA2BGRA)
13
+ return new_image
14
+
15
+ def candidate_to_json_string(arr):
16
+ a = [f'[{x:.2f}, {y:.2f}]' for x, y, *_ in arr]
17
+ return '[' + ', '.join(a) + ']'
18
+
19
+ # make subset to json
20
+ def subset_to_json_string(arr):
21
+ arr_str = ','.join(['[' + ','.join([f'{num:.2f}' for num in row]) + ']' for row in arr])
22
+ return '[' + arr_str + ']'
23
+
24
+ keypoint_index_mapping = [
25
+ 0,
26
+ 17,
27
+ 6,
28
+ 8,
29
+ 10,
30
+ 5,
31
+ 7,
32
+ 9,
33
+ 12,
34
+ 14,
35
+ 16,
36
+ 11,
37
+ 13,
38
+ 15,
39
+ 2,
40
+ 1,
41
+ 4,
42
+ 3,
43
+ ]
44
+
45
+ def convert_keypoints(keypoints):
46
+ return [keypoints[i] for i in keypoint_index_mapping]