-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathImageProcessor.py
155 lines (142 loc) · 5.54 KB
/
ImageProcessor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
from datetime import datetime
from os import path, remove
import os
import re
try:
from PIL.Image import ANTIALIAS, fromarray, new
from PIL import ImageFile, Image
except ModuleNotFoundError:
print("Could not find PIL")
from numpy import array, uint8
class ImageProcessor(object):
"""
A class to handle all the imageprocessing done on the screenshots.
Deals with blending (finding transparency), cropping, and stitching.
"""
def __init__(self, y_rotations, x_rotations):
self.target_dimension = 280
self.target_size = 512 * 1024 # 512 KB
self.cropping = {'left': [], 'top': [], 'right': [], 'bottom': []}
self.images = []
self.y_rotations = y_rotations
self.x_rotations = 2*x_rotations+1 # Total vertical rotations
def blend(self, file):
"""
Blends the two images into an alpha image using percieved luminescence.
https://en.wikipedia.org/wiki/Luma_(video)#Use_of_relative_luminance
Then, finds the closest-cropped lines that are all white.
Uses numpy because traversing python arrays is very slow.
"""
print(file)
blended_arr = Image.open(file)
blended_arr = array(blended_arr)
# Calculate crop lines
horizontal = blended_arr[:, :, 3].any(axis=0).nonzero()[0]
vertical = blended_arr[:, :, 3].any(axis=1).nonzero()[0]
self.cropping['left'].append(horizontal[0])
self.cropping['top'].append(vertical[0])
self.cropping['right'].append(horizontal[-1])
self.cropping['bottom'].append(vertical[-1])
# This needs to be a uint8 to render correctly.
blended_image = fromarray(blended_arr.astype(uint8), mode='RGBA')
blended_image = blended_image.crop((
horizontal[0],
vertical[0],
horizontal[-1],
vertical[-1]
))
self.images.append(blended_image)
def stitch_and_upload(self, directory, file_format="PNG"):
"""
Crops the images to a shared size, then pastes them together.
Prompts for login and uploads to the wiki when done.
"""
# Determining crop bounds
min_cropping = (
min(self.cropping['left']),
min(self.cropping['top']),
max(self.cropping['right']),
max(self.cropping['bottom'])
)
print('Min cropping: ' + str(min_cropping))
max_frame_size = (
min_cropping[2] - min_cropping[0],
min_cropping[3] - min_cropping[1]
)
print('Max frame size: ' + str(max_frame_size))
target_ratio = self.target_dimension / max(max_frame_size)
# target_ratio = 1
print('Target scaling ratio: %f' % target_ratio)
max_frame_size = (
int(target_ratio * max_frame_size[0]),
int(target_ratio * max_frame_size[1])
)
print('Scaled max frame size: ' + str(max_frame_size))
# Pasting together
full_image = new(mode='RGBA', color=(255, 255, 255, 0), size=((
(max_frame_size[0]+1)*self.y_rotations*self.x_rotations,
max_frame_size[1]
)))
curr_offset = 0
offset_map = []
for i, image in enumerate(self.images):
image = image.resize((
int(image.width*target_ratio),
int(image.height*target_ratio),
), ANTIALIAS)
left_crop = int(
target_ratio*(self.cropping['left'][i]-min_cropping[0]))
top_crop = int(
target_ratio*(self.cropping['top'][i]-min_cropping[1]))
full_image.paste(image, (curr_offset, top_crop), image)
# Offset map adds 1 manually for some reason
offset_map += [curr_offset-i, image.height, left_crop]
# Increase by 1 each time to add a 1px gap
curr_offset += image.width+1
full_image = full_image.crop((
0,
0,
curr_offset,
max_frame_size[1],
))
output_file = 'weapon.' + file_format.lower()
output_file = os.path.join(directory, output_file)
if path.exists(output_file):
remove(output_file)
# Ensure there is enough allocated space to save the image as progressive
ImageFile.MAXBLOCK = full_image.height * full_image.width * 16
if file_format == "JPEG":
full_image = full_image.convert("RGB")
full_image.save(output_file, format=file_format)
description = '''{{#switch: {{{1|}}}
| url = <nowiki>%s?%s</nowiki>
| map = \n%d, %d, %d, %d, %s
| height = %d
| startframe = 16
}}<noinclude>{{3D viewer}}[[Category:3D model images]]''' % (
"url",
datetime.strftime(datetime.utcnow(), '%Y%m%d%H%M%S'),
curr_offset,
max_frame_size[0],
max_frame_size[1],
self.x_rotations,
', '.join([str(o) for o in offset_map]),
self.target_dimension
)
with open(os.path.join(directory, "weaponoffsets.txt"), "w+") as f:
f.write(description)
if __name__ == "__main__":
directory = os.path.join(os.path.dirname(__file__), 'Booth')
files = os.listdir(directory)
files.sort(key=lambda f: int(re.sub('\D', '', f)))
WIDTH = 197
HEIGHT = 152
NUM = len(files)
Y_ROTATIONS = int(NUM/3)
X_ROTATIONS = 3
print(Y_ROTATIONS, X_ROTATIONS)
p = ImageProcessor(Y_ROTATIONS, 1)
for i, filename in enumerate(files):
print(filename)
p.blend(os.path.join(directory, filename))
p.stitch_and_upload()