diff --git a/blender_bg_scripts/model_validation_bg_render.py b/blender_bg_scripts/model_validation_bg_render.py index 8de589b..b567c5f 100644 --- a/blender_bg_scripts/model_validation_bg_render.py +++ b/blender_bg_scripts/model_validation_bg_render.py @@ -21,14 +21,14 @@ def getNode(mat, type): def link_collection( - file_name, obnames=[], location=(0, 0, 0), link=False, parent=None, **kwargs + file_name, obnames=[], location=(0, 0, 0), link=False, parent=None, **kwargs ): """link an instanced group - model type asset""" sel = utils.selection_get() with bpy.data.libraries.load(file_name, link=link, relative=True) as ( - data_from, - data_to, + data_from, + data_to, ): scols = [] for col in data_from.collections: @@ -76,13 +76,15 @@ def link_collection( def add_text_line(strip, text): - bpy.data.scenes["Composite"].sequence_editor.sequences_all[strip].text += text + 10 * ' ' + bpy.data.scenes["Composite"].sequence_editor.sequences_all[strip].text += ( + text + 10 * " " + ) def writeout_param(asset_data, param_name): pl = utils.get_param(asset_data, param_name) if pl is not None: - add_text_line('asset', f'{param_name}:{pl}') + add_text_line("asset", f"{param_name}:{pl}") def set_text(strip, text): @@ -90,28 +92,28 @@ def set_text(strip, text): def scale_cameras(asset_data): - params = asset_data['dictParameters'] - minx = params['boundBoxMinX'] - miny = params['boundBoxMinY'] - minz = params['boundBoxMinZ'] - maxx = params['boundBoxMaxX'] - maxy = params['boundBoxMaxY'] - maxz = params['boundBoxMaxZ'] - - dx = (maxx - minx) - dy = (maxy - miny) - dz = (maxz - minz) + params = asset_data["dictParameters"] + minx = params["boundBoxMinX"] + miny = params["boundBoxMinY"] + minz = params["boundBoxMinZ"] + maxx = params["boundBoxMaxX"] + maxy = params["boundBoxMaxY"] + maxz = params["boundBoxMaxZ"] + + dx = maxx - minx + dy = maxy - miny + dz = maxz - minz print(dx, dy, dz) r = math.sqrt(dx * dx + dy * dy + dz * dz) r *= 1.2 - scaler = bpy.data.objects['scaler'] + scaler = bpy.data.objects["scaler"] scaler.scale = (r, r, r) scaler.location.z = (maxz + minz) / 2 # get scene camera - cam = bpy.data.objects['Camera'] + cam = bpy.data.objects["Camera"] # Set ortho scale to max of dimensions cam.data.ortho_scale = max(dx, dy, dz) * 1.1 @@ -136,7 +138,7 @@ def scale_cameras(asset_data): def check_for_flat_faces(): for ob in bpy.context.scene.objects: - if ob.type == 'MESH': + if ob.type == "MESH": for f in ob.data.polygons: if not f.use_smooth: return True @@ -150,49 +152,49 @@ def mark_freestyle_edges(): def set_asset_data_texts(asset_data): - set_text('asset', '') - add_text_line('asset', asset_data['name']) - dx = utils.get_param(asset_data, 'dimensionX') - dy = utils.get_param(asset_data, 'dimensionY') - dz = utils.get_param(asset_data, 'dimensionZ') + set_text("asset", "") + add_text_line("asset", asset_data["name"]) + dx = utils.get_param(asset_data, "dimensionX") + dy = utils.get_param(asset_data, "dimensionY") + dz = utils.get_param(asset_data, "dimensionZ") dim_text = f"Dimensions:{dx}x{dy}x{dz}m" - add_text_line('asset', dim_text) - fc = utils.get_param(asset_data, 'faceCount', 1) - fcr = utils.get_param(asset_data, 'faceCountRender', 1) + add_text_line("asset", dim_text) + fc = utils.get_param(asset_data, "faceCount", 1) + fcr = utils.get_param(asset_data, "faceCountRender", 1) - add_text_line('asset', f"fcount {fc} render {fcr}") + add_text_line("asset", f"fcount {fc} render {fcr}") if check_for_flat_faces(): - add_text_line('asset', 'Flat faces detected') + add_text_line("asset", "Flat faces detected") - writeout_param(asset_data, 'productionLevel') - writeout_param(asset_data, 'shaders') - writeout_param(asset_data, 'modifiers') - writeout_param(asset_data, 'meshPolyType') - writeout_param(asset_data, 'manifold') - writeout_param(asset_data, 'objectCount') - writeout_param(asset_data, 'nodeCount') - writeout_param(asset_data, 'textureCount') - writeout_param(asset_data, 'textureResolutionMax') + writeout_param(asset_data, "productionLevel") + writeout_param(asset_data, "shaders") + writeout_param(asset_data, "modifiers") + writeout_param(asset_data, "meshPolyType") + writeout_param(asset_data, "manifold") + writeout_param(asset_data, "objectCount") + writeout_param(asset_data, "nodeCount") + writeout_param(asset_data, "textureCount") + writeout_param(asset_data, "textureResolutionMax") -def set_scene(name=''): - print(f'setting scene {name}') +def set_scene(name=""): + print(f"setting scene {name}") bpy.context.window.scene = bpy.data.scenes[name] - c = bpy.context.scene.objects.get('Camera') + c = bpy.context.scene.objects.get("Camera") if c is not None: bpy.context.scene.camera = c bpy.context.view_layer.update() # bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1) -def set_view_shading(shading_type='RENDERED', face_orientation=False, wireframe=False): +def set_view_shading(shading_type="RENDERED", face_orientation=False, wireframe=False): # bpy.data.workspaces['Layout'].screens['Layout'].areas[4].spaces[0].shading for w in bpy.data.workspaces: for a in w.screens[0].areas: - if a.type == 'VIEW_3D': + if a.type == "VIEW_3D": for s in a.spaces: - if s.type == 'VIEW_3D': + if s.type == "VIEW_3D": s.shading.type = shading_type s.overlay.show_wireframes = wireframe s.overlay.show_face_orientation = face_orientation @@ -200,67 +202,77 @@ def set_view_shading(shading_type='RENDERED', face_orientation=False, wireframe= # bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1) -def set_workspace(name='Layout'): +def set_workspace(name="Layout"): for a in range(0, 2): bpy.context.window.workspace = bpy.data.workspaces[name] bpy.context.workspace.update_tag() bpy.context.view_layer.update() # bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1) + def switch_off_all_modifiers(): - #switches off all modifiers for render in the scene and stores and returns them in a list with original state. + # switches off all modifiers for render in the scene and stores and returns them in a list with original state. original_states = [] for ob in bpy.context.scene.objects: - if ob.type == 'MESH': + if ob.type == "MESH": for m in ob.modifiers: original_states.append((ob, m, m.show_render)) m.show_render = False return original_states + def switch_on_all_modifiers(original_states): - #switches on all modifiers for render in the scene and restores them to the original state. + # switches on all modifiers for render in the scene and restores them to the original state. for ob, m, state in original_states: m.show_render = state -def add_geometry_nodes_to_all_objects(group = 'wireNodes', dimensions = 1): - #takes all visible objects in the scene and adds geometry nodes modifier with the group to them. - #avoids objects with more than 300k face. + +def add_geometry_nodes_to_all_objects(group="wireNodes", dimensions=1): + # takes all visible objects in the scene and adds geometry nodes modifier with the group to them. + # avoids objects with more than 300k face. for ob in bpy.context.scene.objects: - if ob.type == 'MESH' and ob.visible_get() and len(ob.data.polygons) < 300000: + if ob.type == "MESH" and ob.visible_get() and len(ob.data.polygons) < 300000: bpy.context.view_layer.objects.active = ob - bpy.ops.object.modifier_add(type='NODES') + bpy.ops.object.modifier_add(type="NODES") m = bpy.context.object.modifiers[-1] m.node_group = bpy.data.node_groups[group] - #asset dimensions needed + # asset dimensions needed m["Socket_0"] = float(dimensions) -def remove_geometry_nodes_from_all_objects(group = 'wireNodes'): - #takes all visible objects in the scene and removes geometry nodes modifier with the group to them. + +def remove_geometry_nodes_from_all_objects(group="wireNodes"): + # takes all visible objects in the scene and removes geometry nodes modifier with the group to them. for ob in bpy.context.scene.objects: - if ob.type == 'MESH' and ob.visible_get() and len(ob.data.polygons) < 300000: + if ob.type == "MESH" and ob.visible_get() and len(ob.data.polygons) < 300000: bpy.context.view_layer.objects.active = ob # check if the modifier is there for m in ob.modifiers: - if m.type == 'NODES' and m.node_group.name == group: + if m.type == "NODES" and m.node_group.name == group: bpy.context.object.modifiers.remove(m) -def render_model_validation( asset_data, filepath): + + +def render_model_validation(asset_data, filepath): # bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1) # render basic render - set_scene('Render') + set_scene("Render") # set_view_shading(shading_type='RENDERED') # set_workspace('Render') + # set samples to just 1 for speed + # bpy.context.scene.cycles.samples = 1 + bpy.ops.render.render(animation=True) # bpy.ops.render.opengl(animation=True, view_context=True) # render the Mesh checker # now in render - set_scene('Mesh_checker') + set_scene("Mesh_checker") # freestyle is crazy slow. Need better edge render :( # mark_freestyle_edges() # set_view_shading(shading_type='MATERIAL', wireframe = True, face_orientation=True) # set_workspace('Mesh_checker') + bpy.ops.render.render(animation=True) # bpy.ops.render.opengl(animation=True, view_context=False) @@ -272,20 +284,20 @@ def render_model_validation( asset_data, filepath): # set_scene('UV_checker') # bpy.ops.render.render(animation=True, write_still=True) - #switch off modifiers for this one - set_scene('Mesh_checker_no_modif') + # switch off modifiers for this one + set_scene("Mesh_checker_no_modif") original_states = switch_off_all_modifiers() - dimensionX = utils.get_param(asset_data, 'dimensionX') - dimensionY = utils.get_param(asset_data, 'dimensionY') - dimensionZ = utils.get_param(asset_data, 'dimensionZ') + dimensionX = utils.get_param(asset_data, "dimensionX") + dimensionY = utils.get_param(asset_data, "dimensionY") + dimensionZ = utils.get_param(asset_data, "dimensionZ") # Max length is taken as the dimension of the asset dimensions = max(dimensionX, dimensionY, dimensionZ) - add_geometry_nodes_to_all_objects(group='wireNodes', dimensions=dimensions) + add_geometry_nodes_to_all_objects(group="wireNodes", dimensions=dimensions) bpy.ops.render.render(animation=True) - remove_geometry_nodes_from_all_objects(group='wireNodes') + remove_geometry_nodes_from_all_objects(group="wireNodes") switch_on_all_modifiers(original_states) # switch to composite and render video - #No video, in this one we render only large stills + # No video, in this one we render only large stills # set_scene('Composite') # # bpy.context.scene.render.filepath = filepath @@ -296,42 +308,88 @@ def render_model_validation( asset_data, filepath): # print(f'rendering validation preview for {asset_data["name"]}') # bpy.ops.render.render(animation=True, write_still=True) -def export_gltf(filepath=''): + +def export_gltf(filepath=""): # print all selected objects names first for ob in bpy.context.selected_objects: print(ob.name) - bpy.ops.export_scene.gltf(filepath=filepath, export_format='GLB', export_copyright="", - export_image_format='WEBP', export_image_add_webp=True, export_image_webp_fallback=False, - export_texture_dir="", export_jpeg_quality=50, export_image_quality=50, - export_keep_originals=False, export_texcoords=True, export_normals=True, - export_draco_mesh_compression_enable=True, export_draco_mesh_compression_level=6, - export_draco_position_quantization=14, export_draco_normal_quantization=10, - export_draco_texcoord_quantization=12, export_draco_color_quantization=10, - export_draco_generic_quantization=12, export_tangents=False, export_materials='EXPORT', - export_colors=True, export_attributes=False, use_mesh_edges=False, - use_mesh_vertices=False, - export_cameras=False, use_selection=True, use_visible=False, use_renderable=False, - use_active_collection_with_nested=True, use_active_collection=False, - use_active_scene=False, export_extras=False, export_yup=True, export_apply=False, - export_animations=True, export_frame_range=False, export_frame_step=1, - export_force_sampling=True, export_animation_mode='ACTIONS', - export_nla_strips_merged_animation_name="Animation", export_def_bones=False, - export_hierarchy_flatten_bones=False, export_optimize_animation_size=True, - export_optimize_animation_keep_anim_armature=True, - export_optimize_animation_keep_anim_object=False, export_negative_frame='SLIDE', - export_anim_slide_to_zero=False, export_bake_animation=False, - export_anim_single_armature=True, export_reset_pose_bones=True, - export_current_frame=False, - export_rest_position_armature=True, export_anim_scene_split_object=True, - export_skins=True, - export_influence_nb=4, export_all_influences=False, export_morph=True, - export_morph_normal=True, export_morph_tangent=False, export_morph_animation=True, - export_morph_reset_sk_data=True, export_lights=False, export_try_sparse_sk=True, - export_try_omit_sparse_sk=False, export_gpu_instances=False, export_nla_strips=True, - export_original_specular=False, will_save_settings=False, filter_glob="*.glb") + bpy.ops.export_scene.gltf( + filepath=filepath, + export_format="GLB", + export_copyright="", + export_image_format="WEBP", + export_image_add_webp=True, + export_image_webp_fallback=False, + export_texture_dir="", + export_jpeg_quality=50, + export_image_quality=50, + export_keep_originals=False, + export_texcoords=True, + export_normals=True, + export_draco_mesh_compression_enable=True, + export_draco_mesh_compression_level=6, + export_draco_position_quantization=14, + export_draco_normal_quantization=10, + export_draco_texcoord_quantization=12, + export_draco_color_quantization=10, + export_draco_generic_quantization=12, + export_tangents=False, + export_materials="EXPORT", + export_colors=True, + export_attributes=False, + use_mesh_edges=False, + use_mesh_vertices=False, + export_cameras=False, + use_selection=True, + use_visible=False, + use_renderable=False, + use_active_collection_with_nested=True, + use_active_collection=False, + use_active_scene=False, + export_extras=False, + export_yup=True, + export_apply=False, + export_animations=True, + export_frame_range=False, + export_frame_step=1, + export_force_sampling=True, + export_animation_mode="ACTIONS", + export_nla_strips_merged_animation_name="Animation", + export_def_bones=False, + export_hierarchy_flatten_bones=False, + export_optimize_animation_size=True, + export_optimize_animation_keep_anim_armature=True, + export_optimize_animation_keep_anim_object=False, + export_negative_frame="SLIDE", + export_anim_slide_to_zero=False, + export_bake_animation=False, + export_anim_single_armature=True, + export_reset_pose_bones=True, + export_current_frame=False, + export_rest_position_armature=True, + export_anim_scene_split_object=True, + export_skins=True, + export_influence_nb=4, + export_all_influences=False, + export_morph=True, + export_morph_normal=True, + export_morph_tangent=False, + export_morph_animation=True, + export_morph_reset_sk_data=True, + export_lights=False, + export_try_sparse_sk=True, + export_try_omit_sparse_sk=False, + export_gpu_instances=False, + export_nla_strips=True, + export_original_specular=False, + will_save_settings=False, + filter_glob="*.glb", + ) + + def render_asset_bg(data): - asset_data = data['asset_data'] - set_scene('Empty_start') + asset_data = data["asset_data"] + set_scene("Empty_start") # first lets build the filepath and find out if its already rendered? s = bpy.context.scene @@ -356,28 +414,32 @@ def render_asset_bg(data): fpath = data["file_path"] if fpath: try: - parent, new_obs = link_collection(fpath, - location=(0, 0, 0), - rotation=(0, 0, 0), - link=True, - name=asset_data['name'], - parent=None) + parent, new_obs = link_collection( + fpath, + location=(0, 0, 0), + rotation=(0, 0, 0), + link=True, + name=asset_data["name"], + parent=None, + ) # we need to realize for UV , texture, and nodegraph exports here.. utils.activate_object(parent) - bpy.ops.object.duplicates_make_real(use_base_parent=True, use_hierarchy=True) + bpy.ops.object.duplicates_make_real( + use_base_parent=True, use_hierarchy=True + ) all_obs = bpy.context.selected_objects[:] - bpy.ops.object.make_local(type='ALL') + bpy.ops.object.make_local(type="ALL") except Exception as e: print(e) - print('failed to append asset') + print("failed to append asset") return for s in bpy.data.scenes: if s != bpy.context.scene: # s.collection.objects.link(parent) - #try link all already realized. + # try link all already realized. for ob in all_obs: s.collection.objects.link(ob) @@ -385,19 +447,25 @@ def render_asset_bg(data): scale_cameras(asset_data) - #save the file to temp folder, so all files go there. - blend_file_path = os.path.join((data['temp_folder']), f"{asset_data['name']}.blend") - bpy.ops.wm.save_as_mainfile(filepath=blend_file_path, compress=False, copy=False, relative_remap=False) + # save the file to temp folder, so all files go there. + blend_file_path = os.path.join( + (data["temp_folder"]), f"{asset_data['name']}.blend" + ) + bpy.ops.wm.save_as_mainfile( + filepath=blend_file_path, compress=False, copy=False, relative_remap=False + ) - #first render the video - render_model_validation( asset_data, data['result_filepath']) - #then render the rest, since that makes total mess in the file... - render_nodes_graph.visualize_and_save_all(tempfolder=data['result_folder'], objects=all_obs) + # first render the video + render_model_validation(asset_data, data["result_filepath"]) + # then render the rest, since that makes total mess in the file... + render_nodes_graph.visualize_and_save_all( + tempfolder=data["result_folder"], objects=all_obs + ) if __name__ == "__main__": - print('background resolution generator') + print("background resolution generator") datafile = sys.argv[-1] - with open(datafile, 'r', encoding='utf-8') as f: + with open(datafile, "r", encoding="utf-8") as f: data = json.load(f) render_asset_bg(data) diff --git a/blender_bg_scripts/pack_twinbru_material.py b/blender_bg_scripts/pack_twinbru_material.py new file mode 100644 index 0000000..3c7a605 --- /dev/null +++ b/blender_bg_scripts/pack_twinbru_material.py @@ -0,0 +1,106 @@ +""" +This script is used to pack a material from TwinBru to a blenderkit asset. +It imports textures from the unzipped folder , creates a node tree and assigns the textures to the material. +""" + +import sys +import os +import bpy +import json + +# import utils- add path +dir_path = os.path.dirname(os.path.realpath(__file__)) +parent_path = os.path.join(dir_path, os.path.pardir) +sys.path.append(parent_path) +from blenderkit_server_utils import paths + + +if __name__ == "__main__": + datafile = sys.argv[-1] + print(f"datafile: {datafile}") + with open(datafile, "r", encoding="utf-8") as f: + data = json.load(f) + twinbru_asset = data["asset_data"] + temp_folder = data["temp_folder"] + result_filepath = data["result_filepath"] + print(f"temp_folder: {temp_folder}") + + # convert name - remove _ and remove the number that comes last in name + readable_name = twinbru_asset["name"].split("_") + # capitalize the first letter of each word + readable_name = " ".join(word.capitalize() for word in readable_name[:-1]) + + # create a new material + material = bpy.data.materials.new(name=readable_name) + material.use_nodes = True + material.blend_method = "BLEND" + material.shadow_method = "HASHED" + material.diffuse_color = (1, 1, 1, 1) + # ensure the material is saved + material.use_fake_user = True + # create the node tree + nodes = material.node_tree.nodes + links = material.node_tree.links + + # set nodes spacing + node_gap_x = 400 + node_gap_y = 300 + # find the output node + output_node = nodes.get("Material Output") + if not output_node: + output_node = nodes.new(type="ShaderNodeOutputMaterial") + output_node.location = (node_gap_x, 0) + + # find Principled BSDF node + principled_bsdf = nodes.get("Principled BSDF") + if not principled_bsdf: + principled_bsdf = nodes.new(type="ShaderNodeBsdfPrincipled") + principled_bsdf.location = (0, 0) + + # Link the Principled BSDF to the Output Material node + links.new(principled_bsdf.outputs[0], output_node.inputs[0]) + + # Get the texture file names + texture_directory = os.path.join(temp_folder, "pbr-pol") + texture_files = os.listdir(texture_directory) + mapping_substrings = { + "BASE": "Base Color", + "MTL": "Metallic", + "ROUGH": "Roughness", + "ALPHA": "Alpha", + "NRM": "Normal", + } + index = 0 + texture_nodes = [] + for substring, mapping in mapping_substrings.items(): + for texture_file in texture_files: + if substring + "." in texture_file: + print(f"texture_file: {texture_file}") + texture_path = os.path.join(texture_directory, texture_file) + texture_node = nodes.new(type="ShaderNodeTexImage") + texture_node.location = ( + -2 * node_gap_x, + node_gap_y * 2 - index * node_gap_y, + ) + texture_node.image = bpy.data.images.load(texture_path) + # set anything besides color to non color + if mapping != "Base Color": + texture_node.image.colorspace_settings.name = "Non-Color" + # normal maps need a normal map node + if mapping == "Normal": + normal_map = nodes.new(type="ShaderNodeNormalMap") + normal_map.location = ( + -node_gap_x, + texture_node.location[1], + ) + links.new(texture_node.outputs[0], normal_map.inputs["Color"]) + links.new(normal_map.outputs[0], principled_bsdf.inputs[mapping]) + else: + links.new(texture_node.outputs[0], principled_bsdf.inputs[mapping]) + index += 1 + texture_nodes.append(texture_node) + + # Pack all .blend textures + bpy.ops.file.pack_all() + # save the material + bpy.ops.wm.save_as_mainfile(filepath=result_filepath) diff --git a/blenderkit_server_utils/cloudflare_storage.py b/blenderkit_server_utils/cloudflare_storage.py index 6dc5681..3ff42e5 100644 --- a/blenderkit_server_utils/cloudflare_storage.py +++ b/blenderkit_server_utils/cloudflare_storage.py @@ -5,8 +5,9 @@ import boto3 from botocore.exceptions import NoCredentialsError + class CloudflareStorage: - def __init__(self, access_key, secret_key, endpoint_url, region_name='auto'): + def __init__(self, access_key, secret_key, endpoint_url, region_name="auto"): """ Initializes the connection to Cloudflare's S3-compatible storage. @@ -16,11 +17,13 @@ def __init__(self, access_key, secret_key, endpoint_url, region_name='auto'): :param region_name: Region name, default is 'auto' for Cloudflare. """ self.session = boto3.session.Session() - self.client = self.session.client('s3', - region_name=region_name, - endpoint_url=endpoint_url, - aws_access_key_id=access_key, - aws_secret_access_key=secret_key) + self.client = self.session.client( + "s3", + region_name=region_name, + endpoint_url=endpoint_url, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + ) def upload_file(self, file_name, bucket_name, object_name=None): """ @@ -52,15 +55,16 @@ def list_all_folders(self, bucket_name): :param bucket_name: Name of the Cloudflare R2 bucket. :return: A set of all folder prefixes. """ - paginator = self.client.get_paginator('list_objects_v2') + paginator = self.client.get_paginator("list_objects_v2") folders = set() # Use a paginator to fetch all objects - for page in paginator.paginate(Bucket=bucket_name, Delimiter='/'): - for prefix in page.get('CommonPrefixes', []): - folders.add(prefix['Prefix']) + for page in paginator.paginate(Bucket=bucket_name, Delimiter="/"): + for prefix in page.get("CommonPrefixes", []): + folders.add(prefix["Prefix"]) return folders + def folder_exists(self, bucket_name, folder_name): """ Check if a folder exists in a specified bucket. @@ -70,17 +74,19 @@ def folder_exists(self, bucket_name, folder_name): :return: True if the folder exists, False otherwise. """ # Ensure the folder name ends with a '/' to accurately match the folder structure - if not folder_name.endswith('/'): - folder_name += '/' + if not folder_name.endswith("/"): + folder_name += "/" response = self.client.list_objects_v2( Bucket=bucket_name, Prefix=folder_name, - MaxKeys=1 # We only need to find one object to confirm the folder exists + MaxKeys=1, # We only need to find one object to confirm the folder exists ) - return 'Contents' in response and len(response['Contents']) > 0 + return "Contents" in response and len(response["Contents"]) > 0 - def upload_folder(self, local_folder_path, bucket_name, cloudflare_folder_prefix=''): + def upload_folder( + self, local_folder_path, bucket_name, cloudflare_folder_prefix="" + ): """ Recursively uploads a folder and its contents to Cloudflare R2, maintaining the folder structure, and creates an index file in the top-level directory listing all uploaded files. @@ -95,24 +101,38 @@ def upload_folder(self, local_folder_path, bucket_name, cloudflare_folder_prefix for filename in files: local_path = os.path.join(root, filename) relative_path = os.path.relpath(local_path, start=local_folder_path) - cloudflare_object_name = os.path.join(cloudflare_folder_prefix, relative_path) - cloudflare_object_name = cloudflare_object_name.replace('\\', '/') + cloudflare_object_name = os.path.join( + cloudflare_folder_prefix, relative_path + ) + cloudflare_object_name = cloudflare_object_name.replace("\\", "/") # Upload the file if self.upload_file(local_path, bucket_name, cloudflare_object_name): - uploaded_files.append(cloudflare_object_name) # Add successful uploads to the list + uploaded_files.append( + cloudflare_object_name + ) # Add successful uploads to the list # After all files are uploaded, create and upload the index.json file - index_file_path = '/tmp/index.json' if cloudflare_folder_prefix else cloudflare_folder_prefix + 'index.json' - with open(index_file_path, 'w') as index_file: + # only do this if there are files to upload + if not uploaded_files: + print("No files found to upload.") + return + index_file_path = ( + "/tmp/index.json" + if cloudflare_folder_prefix + else cloudflare_folder_prefix + "index.json" + ) + with open(index_file_path, "w") as index_file: json.dump(uploaded_files, index_file) # Upload the index file - cloudflare_object_name = os.path.join(cloudflare_folder_prefix, 'index.json') - cloudflare_object_name = cloudflare_object_name.replace('\\', '/') + cloudflare_object_name = os.path.join(cloudflare_folder_prefix, "index.json") + cloudflare_object_name = cloudflare_object_name.replace("\\", "/") self.upload_file(index_file_path, bucket_name, cloudflare_object_name) - print(f"Uploaded index file to Cloudflare R2 storage at {cloudflare_folder_prefix}index.json") + print( + f"Uploaded index file to Cloudflare R2 storage at {cloudflare_folder_prefix}index.json" + ) def delete_folder_contents(self, bucket_name, folder_prefix): """ @@ -122,17 +142,19 @@ def delete_folder_contents(self, bucket_name, folder_prefix): :param folder_prefix: The prefix of the folder to delete contents from. Must end with '/'. """ # Ensure the folder prefix ends with '/' to avoid accidentally deleting unintended objects - if not folder_prefix.endswith('/'): - folder_prefix += '/' + if not folder_prefix.endswith("/"): + folder_prefix += "/" # List all objects in the folder response = self.client.list_objects_v2(Bucket=bucket_name, Prefix=folder_prefix) - objects = response.get('Contents', []) + objects = response.get("Contents", []) # If there are objects to delete, prepare and execute the deletion if objects: - delete_keys = {'Objects': [{'Key': obj['Key']} for obj in objects]} - delete_response = self.client.delete_objects(Bucket=bucket_name, Delete=delete_keys) + delete_keys = {"Objects": [{"Key": obj["Key"]} for obj in objects]} + delete_response = self.client.delete_objects( + Bucket=bucket_name, Delete=delete_keys + ) print(f"Deleted objects: {delete_response}") else: print("No objects found to delete.") @@ -144,26 +166,28 @@ def delete_old_files(self, bucket_name, x_days): :param bucket_name: The name of the Cloudflare R2 bucket. :param x_days: The age threshold in days for deleting files. """ - paginator = self.client.get_paginator('list_objects_v2') + paginator = self.client.get_paginator("list_objects_v2") delete_before_date = datetime.now(timezone.utc) - timedelta(days=x_days) # Prepare a batch delete operation - delete_batch = {'Objects': []} + delete_batch = {"Objects": []} # Iterate through all objects in the bucket for page in paginator.paginate(Bucket=bucket_name): - for obj in page.get('Contents', []): + for obj in page.get("Contents", []): # If the object is older than the specified days, mark it for deletion - if obj['LastModified'] < delete_before_date: - delete_batch['Objects'].append({'Key': obj['Key']}) + if obj["LastModified"] < delete_before_date: + delete_batch["Objects"].append({"Key": obj["Key"]}) # Perform the deletion in batches of 1000 (S3 limit) - if len(delete_batch['Objects']) >= 1000: - self.client.delete_objects(Bucket=bucket_name, Delete=delete_batch) - delete_batch = {'Objects': []} # Reset batch + if len(delete_batch["Objects"]) >= 1000: + self.client.delete_objects( + Bucket=bucket_name, Delete=delete_batch + ) + delete_batch = {"Objects": []} # Reset batch # Delete any remaining objects in the last batch - if delete_batch['Objects']: + if delete_batch["Objects"]: self.client.delete_objects(Bucket=bucket_name, Delete=delete_batch) - print("Old files deleted.") \ No newline at end of file + print("Old files deleted.") diff --git a/blenderkit_server_utils/download.py b/blenderkit_server_utils/download.py index cc3449f..f85a0ce 100644 --- a/blenderkit_server_utils/download.py +++ b/blenderkit_server_utils/download.py @@ -4,233 +4,242 @@ from . import utils from . import paths -SCENE_UUID = '5d22a2ce-7d4e-4500-9b1a-e5e79f8732c0' - +SCENE_UUID = "5d22a2ce-7d4e-4500-9b1a-e5e79f8732c0" def server_2_local_filename(asset_data, filename): - """Convert file name on server to file name local. This should get replaced.""" + """Convert file name on server to file name local. This should get replaced.""" - fn = filename.replace('blend_', '') - fn = fn.replace('resolution_', '') - n = paths.slugify(asset_data['name']) + '_' + fn - return n + fn = filename.replace("blend_", "") + fn = fn.replace("resolution_", "") + n = paths.slugify(asset_data["name"]) + "_" + fn + return n def files_size_to_text(size): - fsmb = size / (1024 * 1024) - fskb = size % 1024 - if fsmb == 0: - return f'{round(fskb)}KB' - else: - return f'{round(fsmb, 1)}MB' + fsmb = size / (1024 * 1024) + fskb = size % 1024 + if fsmb == 0: + return f"{round(fskb)}KB" + else: + return f"{round(fsmb, 1)}MB" def get_core_file(asset_data, resolution, find_closest_with_url=False): - ''' - Returns core blend file. - ''' - for f in asset_data['files']: - if f['fileType'] == 'blend': - orig = f - return orig, 'blend' - - -def get_download_url(asset_data, scene_id, api_key, tcom=None, resolution='blend'): - ''''retrieves the download url. The server checks if user can download the item.''' - print('getting download url') - - headers = utils.get_headers(api_key) - - data = { - 'scene_uuid': scene_id - } - r = None - - res_file_info, resolution = get_core_file(asset_data, resolution) - print(res_file_info) - try: - r = requests.get(res_file_info['downloadUrl'], params=data, headers=headers) - except Exception as e: - print(e) - if tcom is not None: - tcom.error = True - if r == None: - if tcom is not None: - tcom.report = 'Connection Error' - tcom.error = True - return 'Connection Error' - print(r.status_code, r.text) - - if r.status_code < 400: - data = r.json() - url = data['filePath'] - - res_file_info['url'] = url - res_file_info['file_name'] = paths.extract_filename_from_url(url) - - # print(res_file_info, url) - print("URL:", url) - return True - - - - -def get_download_filepath(asset_data, resolution='blend', can_return_others=False, directory=None): - '''Get all possible paths of the asset and resolution. Usually global and local directory.''' - windows_path_limit = 250 - if directory is None: - directory = paths.get_download_dir(asset_data['assetType']) - - res_file, resolution = get_core_file(asset_data, resolution, find_closest_with_url=can_return_others) - name_slug = paths.slugify(asset_data['name']) - if len(name_slug) > 16: - name_slug = name_slug[:16] - asset_folder_name = f"{name_slug}_{asset_data['id']}" - - file_names = [] - - if not res_file: - return file_names - if res_file.get('url') is not None: - # Tweak the names a bit: - # remove resolution and blend words in names - # - fn = paths.extract_filename_from_url(res_file['url']) - n = server_2_local_filename(asset_data, fn) + """ + Returns core blend file. + """ + for f in asset_data["files"]: + if f["fileType"] == "blend": + orig = f + return orig, "blend" - asset_folder_path = os.path.join(directory, asset_folder_name) - if not os.path.exists(asset_folder_path): - os.makedirs(asset_folder_path) +def get_download_url(asset_data, scene_id, api_key, tcom=None, resolution="blend"): + """'retrieves the download url. The server checks if user can download the item.""" + print("getting download url") - file_name = os.path.join(asset_folder_path, n) - file_names.append(file_name) + headers = utils.get_headers(api_key) + + data = {"scene_uuid": scene_id} + r = None - print('file paths', file_names) + res_file_info, resolution = get_core_file(asset_data, resolution) + print(res_file_info) + try: + r = requests.get(res_file_info["downloadUrl"], params=data, headers=headers) + except Exception as e: + print(e) + if tcom is not None: + tcom.error = True + if r == None: + if tcom is not None: + tcom.report = "Connection Error" + tcom.error = True + return "Connection Error" + print(r.status_code, r.text) + + if r.status_code < 400: + data = r.json() + url = data["filePath"] + + res_file_info["url"] = url + res_file_info["file_name"] = paths.extract_filename_from_url(url) + + # print(res_file_info, url) + print("URL:", url) + return True + + +def get_download_filepath( + asset_data, resolution="blend", can_return_others=False, directory=None +): + """Get all possible paths of the asset and resolution. Usually global and local directory.""" + windows_path_limit = 250 + if directory is None: + directory = paths.get_download_dir(asset_data["assetType"]) + + res_file, resolution = get_core_file( + asset_data, resolution, find_closest_with_url=can_return_others + ) + name_slug = paths.slugify(asset_data["name"]) + if len(name_slug) > 16: + name_slug = name_slug[:16] + asset_folder_name = f"{name_slug}_{asset_data['id']}" + + file_names = [] + + if not res_file: + return file_names + if res_file.get("url") is not None: + # Tweak the names a bit: + # remove resolution and blend words in names + # + fn = paths.extract_filename_from_url(res_file["url"]) + n = server_2_local_filename(asset_data, fn) + + asset_folder_path = os.path.join(directory, asset_folder_name) + + if not os.path.exists(asset_folder_path): + os.makedirs(asset_folder_path) + + file_name = os.path.join(asset_folder_path, n) + file_names.append(file_name) + + print("file paths", file_names) - return file_names + return file_names -def check_existing(asset_data, resolution='blend', can_return_others=False, directory=None): - ''' check if the object exists on the hard drive''' - fexists = False +def check_existing( + asset_data, resolution="blend", can_return_others=False, directory=None +): + """check if the object exists on the hard drive""" + fexists = False - if asset_data.get('files') == None: - # this is because of some very odl files where asset data had no files structure. - return False + if asset_data.get("files") == None: + # this is because of some very odl files where asset data had no files structure. + return False - file_names = get_download_filepath(asset_data, resolution, can_return_others=can_return_others, directory=directory) + file_names = get_download_filepath( + asset_data, resolution, can_return_others=can_return_others, directory=directory + ) - print('check if file already exists' + str(file_names)) - if len(file_names) == 2: - # TODO this should check also for failed or running downloads. - # If download is running, assign just the running thread. if download isn't running but the file is wrong size, - # delete file and restart download (or continue downoad? if possible.) - if os.path.isfile(file_names[0]): # and not os.path.isfile(file_names[1]) - utils.copy_asset(file_names[0], file_names[1]) - elif not os.path.isfile(file_names[0]) and os.path.isfile( - file_names[1]): # only in case of changed settings or deleted/moved global dict. - utils.copy_asset(file_names[1], file_names[0]) + print("check if file already exists" + str(file_names)) + if len(file_names) == 2: + # TODO this should check also for failed or running downloads. + # If download is running, assign just the running thread. if download isn't running but the file is wrong size, + # delete file and restart download (or continue downoad? if possible.) + if os.path.isfile(file_names[0]): # and not os.path.isfile(file_names[1]) + utils.copy_asset(file_names[0], file_names[1]) + elif not os.path.isfile(file_names[0]) and os.path.isfile( + file_names[1] + ): # only in case of changed settings or deleted/moved global dict. + utils.copy_asset(file_names[1], file_names[0]) - if len(file_names) > 0 and os.path.isfile(file_names[0]): - fexists = True - return fexists + if len(file_names) > 0 and os.path.isfile(file_names[0]): + fexists = True + return fexists def delete_unfinished_file(file_name): - ''' - Deletes download if it wasn't finished. If the folder it's containing is empty, it also removes the directory - Parameters - ---------- - file_name - - Returns - ------- - None - ''' - try: - os.remove(file_name) - except Exception as e: - print(f'{e}') - asset_dir = os.path.dirname(file_name) - if len(os.listdir(asset_dir)) == 0: - os.rmdir(asset_dir) - return - - -def download_asset_file(asset_data, resolution='blend', api_key='', directory=None): - # this is a simple non-threaded way to download files for background resolution genenration tool - file_names = get_download_filepath(asset_data, resolution, directory=directory) # prefer global dir if possible. - if len(file_names) == 0: - return None - - file_name = file_names[0] - - if check_existing(asset_data, resolution=resolution, directory=directory): - # this sends the thread for processing, where another check should occur, since the file might be corrupted. - # print('not downloading, already in db') + """ + Deletes download if it wasn't finished. If the folder it's containing is empty, it also removes the directory + Parameters + ---------- + file_name + + Returns + ------- + None + """ + try: + os.remove(file_name) + except Exception as e: + print(f"{e}") + asset_dir = os.path.dirname(file_name) + if len(os.listdir(asset_dir)) == 0: + os.rmdir(asset_dir) + return + + +def download_asset_file(asset_data, resolution="blend", api_key="", directory=None): + # this is a simple non-threaded way to download files for background resolution genenration tool + file_names = get_download_filepath( + asset_data, resolution, directory=directory + ) # prefer global dir if possible. + if len(file_names) == 0: + return None + + file_name = file_names[0] + + if check_existing(asset_data, resolution=resolution, directory=directory): + # this sends the thread for processing, where another check should occur, since the file might be corrupted. + # print('not downloading, already in db') + return file_name + + download_canceled = False + + with open(file_name, "wb") as f: + print("Downloading %s" % file_name) + headers = utils.get_headers(api_key) + res_file_info, resolution = get_core_file(asset_data, resolution) + session = requests.Session() + + response = session.get(res_file_info["url"], stream=True) + total_length = response.headers.get("Content-Length") + + if total_length is None or int(total_length) < 1000: # no content length header + download_canceled = True + print(response.content) + else: + total_length = int(total_length) + dl = 0 + last_percent = 0 + percent = 0 + for data in response.iter_content(chunk_size=4096 * 10): + dl += len(data) + + # the exact output you're looking for: + fs_str = files_size_to_text(total_length) + + percent = int(dl * 100 / total_length) + if percent > last_percent: + last_percent = percent + # sys.stdout.write('\r') + # sys.stdout.write(f'Downloading {asset_data['name']} {fs_str} {percent}% ') # + int(dl * 50 / total_length) * 'x') + print( + f'Downloading {asset_data["name"]} {fs_str} {percent}% ' + ) # + int(dl * 50 / total_length) * 'x') + # sys.stdout.flush() + + # print(int(dl*50/total_length)*'x'+'\r') + f.write(data) + if download_canceled: + delete_unfinished_file(file_name) + return None + return file_name - download_canceled = False - with open(file_name, "wb") as f: - print("Downloading %s" % file_name) - headers = utils.get_headers(api_key) - res_file_info, resolution = get_core_file(asset_data, resolution) - session = requests.Session() +def download_asset(asset_data, resolution="blend", api_key="", directory=None): + """ + Download an asset non-threaded way. + Parameters + ---------- + asset_data - search result from elastic or assets endpoints from API - response = session.get(res_file_info['url'], stream=True) - total_length = response.headers.get('Content-Length') + Returns + ------- + path to the resulting asset file or None if asset isn't accessible + """ - if total_length is None or int(total_length) < 1000: # no content length header - download_canceled = True - print(response.content) - else: - total_length = int(total_length) - dl = 0 - last_percent = 0 - percent = 0 - for data in response.iter_content(chunk_size=4096 * 10): - dl += len(data) - - # the exact output you're looking for: - fs_str = files_size_to_text(total_length) - - percent = int(dl * 100 / total_length) - if percent > last_percent: - last_percent = percent - # sys.stdout.write('\r') - # sys.stdout.write(f'Downloading {asset_data['name']} {fs_str} {percent}% ') # + int(dl * 50 / total_length) * 'x') - print( - f'Downloading {asset_data["name"]} {fs_str} {percent}% ') # + int(dl * 50 / total_length) * 'x') - # sys.stdout.flush() - - # print(int(dl*50/total_length)*'x'+'\r') - f.write(data) - if download_canceled: - delete_unfinished_file(file_name) - return None - - return file_name - - -def download_asset(asset_data, resolution='blend', api_key='', directory=None): - ''' - Download an asset non-threaded way. - Parameters - ---------- - asset_data - search result from elastic or assets endpoints from API - - Returns - ------- - path to the resulting asset file or None if asset isn't accessible - ''' - - has_url = get_download_url(asset_data, SCENE_UUID, api_key, tcom=None, resolution='blend') # Resolution does not have any effect - if not has_url: - print("Could not get URL for the asset") - return None - - fpath = download_asset_file(asset_data, api_key=api_key, directory=directory) - return fpath + has_url = get_download_url( + asset_data, SCENE_UUID, api_key, tcom=None, resolution="blend" + ) # Resolution does not have any effect + if not has_url: + print("Could not get URL for the asset") + return None + + fpath = download_asset_file(asset_data, api_key=api_key, directory=directory) + return fpath diff --git a/blenderkit_server_utils/render_UVs.py b/blenderkit_server_utils/render_UVs.py index 703cd6b..c8c9e94 100644 --- a/blenderkit_server_utils/render_UVs.py +++ b/blenderkit_server_utils/render_UVs.py @@ -1,5 +1,4 @@ import bpy -import numpy as np # Sets up the camera within the given scene for rendering the UV layout. @@ -12,7 +11,7 @@ def setup_scene_camera(scene): # Configure the camera to use orthographic projection, # making it suitable for 2D UV layout rendering. - camera_data.type = 'ORTHO' + camera_data.type = "ORTHO" camera_data.ortho_scale = 1 # Adjust based on the size of your UV meshes. camera_object.location = (0.5, 0.5, 1) # Position the camera to capture all UVs. @@ -22,12 +21,12 @@ def set_render_settings(scene, filepath): # Enable transparency in the final render to accommodate for transparent materials. scene.render.film_transparent = True # Use the Cycles render engine for high-quality rendering. - scene.render.engine = 'CYCLES' + scene.render.engine = "CYCLES" scene.cycles.samples = 5 # Reduce samples for faster rendering of simple scenes. # Set output format to WEBP, resolution, and file path for saving the render. - scene.render.image_settings.file_format = 'WEBP' - scene.render.image_settings.color_mode = 'RGB' + scene.render.image_settings.file_format = "WEBP" + scene.render.image_settings.color_mode = "RGB" scene.render.image_settings.quality = 60 scene.render.resolution_x = 1024 @@ -43,7 +42,7 @@ def render_and_save(scene): # Cleans up by removing the temporary scene and its objects after rendering. def cleanup_scene(scene): - bpy.ops.object.select_all(action='DESELECT') + bpy.ops.object.select_all(action="DESELECT") for obj in scene.objects: obj.select_set(True) bpy.ops.object.delete() # Delete all objects in the scene. @@ -51,10 +50,10 @@ def cleanup_scene(scene): # Utility function to set the active scene and camera, ensuring correct rendering settings. -def set_scene(name=''): - print(f'setting scene {name}') +def set_scene(name=""): + print(f"setting scene {name}") bpy.context.window.scene = bpy.data.scenes[name] - c = bpy.context.scene.objects.get('Camera') + c = bpy.context.scene.objects.get("Camera") if c is not None: bpy.context.scene.camera = c bpy.context.view_layer.update() @@ -64,7 +63,7 @@ def set_scene(name=''): def export_uvs_as_webps(obs, filepath): original_scene = bpy.context.scene uv_scene = bpy.data.scenes.new("UVScene") # Create a new scene for UV rendering. - set_scene(name='UVScene') + set_scene(name="UVScene") setup_scene_camera(uv_scene) build_uv_meshes(obs, uv_scene) # Generate mesh representations of UVs. set_render_settings(uv_scene, filepath) @@ -75,30 +74,39 @@ def export_uvs_as_webps(obs, filepath): # Retrieves or creates a material designed for rendering UV layouts. def get_UV_material(): - m = bpy.data.materials.get('UV_RENDER_MATERIAL') + m = bpy.data.materials.get("UV_RENDER_MATERIAL") if m is None: - m = bpy.data.materials.new('UV_RENDER_MATERIAL') + m = bpy.data.materials.new("UV_RENDER_MATERIAL") m.use_nodes = True nodes = m.node_tree.nodes links = m.node_tree.links nodes.clear() # Start with a fresh node setup. # Set up nodes for a material that's partially transparent and emissive. - emission_node = nodes.new(type='ShaderNodeEmission') - emission_node.inputs['Color'].default_value = (1, 1, 1, 1) # White color for emission. - emission_node.inputs['Strength'].default_value = 1.0 # Emission strength. + emission_node = nodes.new(type="ShaderNodeEmission") + emission_node.inputs["Color"].default_value = ( + 1, + 1, + 1, + 1, + ) # White color for emission. + emission_node.inputs["Strength"].default_value = 1.0 # Emission strength. - transparent_node = nodes.new(type='ShaderNodeBsdfTransparent') + transparent_node = nodes.new(type="ShaderNodeBsdfTransparent") - mix_shader_node = nodes.new(type='ShaderNodeMixShader') - mix_shader_node.inputs['Fac'].default_value = 0.05 # Control the mix between transparent and emission. + mix_shader_node = nodes.new(type="ShaderNodeMixShader") + mix_shader_node.inputs["Fac"].default_value = ( + 0.05 # Control the mix between transparent and emission. + ) - material_output_node = nodes.new('ShaderNodeOutputMaterial') + material_output_node = nodes.new("ShaderNodeOutputMaterial") # Connect the nodes to set up the material. - links.new(emission_node.outputs['Emission'], mix_shader_node.inputs[2]) - links.new(transparent_node.outputs['BSDF'], mix_shader_node.inputs[1]) - links.new(mix_shader_node.outputs['Shader'], material_output_node.inputs['Surface']) + links.new(emission_node.outputs["Emission"], mix_shader_node.inputs[2]) + links.new(transparent_node.outputs["BSDF"], mix_shader_node.inputs[1]) + links.new( + mix_shader_node.outputs["Shader"], material_output_node.inputs["Surface"] + ) return m @@ -111,21 +119,30 @@ def build_uv_meshes(obs, scene): me = ob.data # The mesh data of the object. # Skip objects without UV layers. - if len(ob.data.uv_layers) == 0 or ob.data.uv_layers.active is None or len(ob.data.uv_layers.active.data) == 0: + if ( + len(ob.data.uv_layers) == 0 + or ob.data.uv_layers.active is None + or len(ob.data.uv_layers.active.data) == 0 + ): continue uv_layer = me.uv_layers.active # The active UV layer of the mesh. # Retrieve UV coordinates. - uvs = np.empty((2 * len(me.loops), 1)) + uvs = [0] * (2 * len(me.loops)) uv_layer.data.foreach_get("uv", uvs) - x, y = uvs.reshape((-1, 2)).T - z = np.zeros(len(x)) # Create a Z-axis array filled with zeros for 2D UV layout. + x = uvs[0::2] + y = uvs[1::2] + z = [0] * len(x) # Create a Z-axis array filled with zeros for 2D UV layout. # Create a new mesh for the UV layout. uvme = bpy.data.meshes.new("UVMesh_" + ob.name) - verts = np.array((x, y, z)).T # Combine x, y, z coordinates into vertices. - faces = [p.loop_indices for p in me.polygons] # Create faces from the polygons of the original mesh. + verts = [ + (x[i], y[i], z[i]) for i in range(len(x)) + ] # Combine x, y, z coordinates into vertices. + faces = [ + p.loop_indices for p in me.polygons + ] # Create faces from the polygons of the original mesh. # Convert UV data to mesh data. uvme.from_pydata(verts, [], faces) @@ -149,8 +166,7 @@ def build_uv_meshes(obs, scene): # Duplicate the object to apply a wireframe modifier for visual distinction of edges. # only do this for smaller objects. bpy.ops.object.duplicate() - bpy.ops.object.modifier_add(type='WIREFRAME') + bpy.ops.object.modifier_add(type="WIREFRAME") # Adjust the wireframe modifier to make the lines very thin. bpy.context.object.modifiers["Wireframe"].thickness = 0.001 - diff --git a/blenderkit_server_utils/send_to_bg.py b/blenderkit_server_utils/send_to_bg.py index fcf6f49..2988989 100644 --- a/blenderkit_server_utils/send_to_bg.py +++ b/blenderkit_server_utils/send_to_bg.py @@ -8,63 +8,67 @@ def version_to_float(version): - vars = version.split('.') - version = int(vars[0]) + .01 * int(vars[1]) + vars = version.split(".") + version = int(vars[0]) + 0.01 * int(vars[1]) if len(vars) > 2: - version += .0001 * int(vars[2]) + version += 0.0001 * int(vars[2]) return version def get_blender_version_from_blend(blend_file_path): # get blender version from blend file, works only for 2.8+ - with open(blend_file_path, 'rb') as blend_file: + with open(blend_file_path, "rb") as blend_file: # Read the first 12 bytes header = blend_file.read(24) # Check for compression - if header[0:7] == b'BLENDER': + if header[0:7] == b"BLENDER": # If the file is uncompressed, the version is in bytes 9-11 version_bytes = header[9:12] version = (chr(version_bytes[0]), chr(version_bytes[2])) - elif header[12:19] == b'BLENDER': + elif header[12:19] == b"BLENDER": # If the file is compressed, the version is in bytes 8-10 version_bytes = header[21:24] version = (chr(version_bytes[0]), chr(version_bytes[2])) else: version_bytes = None - version = ('2', '93') # last supported version by now + version = ("2", "93") # last supported version by now print(version) - return '.'.join(version) + return ".".join(version) -def get_blender_binary(asset_data, file_path='', binary_type='CLOSEST'): +def get_blender_binary(asset_data, file_path="", binary_type="CLOSEST"): # pick the right blender version for asset processing blenders_path = paths.BLENDERS_PATH blenders = [] # Get available blender versions for fn in os.listdir(blenders_path): blenders.append((version_to_float(fn), fn)) - if binary_type == 'CLOSEST': + if binary_type == "CLOSEST": # get asset's blender upload version - asset_blender_version = version_to_float(asset_data['sourceAppVersion']) - print('asset blender version', asset_blender_version) + asset_blender_version = version_to_float(asset_data["sourceAppVersion"]) + print("asset blender version", asset_blender_version) asset_blender_version_from_blend = get_blender_version_from_blend(file_path) - print('asset blender version from blend', asset_blender_version_from_blend) + print("asset blender version from blend", asset_blender_version_from_blend) - asset_blender_version_from_blend = version_to_float(asset_blender_version_from_blend) - asset_blender_version = max(asset_blender_version, asset_blender_version_from_blend) - print('asset blender version picked', asset_blender_version) + asset_blender_version_from_blend = version_to_float( + asset_blender_version_from_blend + ) + asset_blender_version = max( + asset_blender_version, asset_blender_version_from_blend + ) + print("asset blender version picked", asset_blender_version) blender_target = min(blenders, key=lambda x: abs(x[0] - asset_blender_version)) - if binary_type == 'NEWEST': + if binary_type == "NEWEST": blender_target = max(blenders, key=lambda x: x[0]) # use latest blender version for hdrs - if asset_data['assetType'] == 'hdr': + if asset_data["assetType"] == "hdr": blender_target = blenders[-1] print(blender_target) - ext = '.exe' if sys.platform == 'win32' else '' - binary = os.path.join(blenders_path, blender_target[1], f'blender{ext}') + ext = ".exe" if sys.platform == "win32" else "" + binary = os.path.join(blenders_path, blender_target[1], f"blender{ext}") print(binary) return binary @@ -80,21 +84,27 @@ def get_process_flags(): REALTIME_PRIORITY_CLASS = 0x00000100 flags = BELOW_NORMAL_PRIORITY_CLASS - if sys.platform != 'win32': # TODO test this on windows + if sys.platform != "win32": # TODO test this on windows flags = 0 return flags -def send_to_bg(asset_data: object, asset_file_path: object = '', template_file_path: object = '', temp_folder: object = '', result_path: object = '', - result_folder: object = '', - api_key: object = '', - script: object = '', - addons: object = '', - binary_type: object = 'CLOSEST', - verbosity_level: object = 2, - binary_path: str = "", - ) -> object: - ''' + +def send_to_bg( + asset_data: object, + asset_file_path: object = "", + template_file_path: object = "", + temp_folder: object = "", + result_path: object = "", + result_folder: object = "", + api_key: object = "", + script: object = "", + addons: object = "", + binary_type: object = "CLOSEST", + verbosity_level: object = 2, + binary_path: str = "", +) -> object: + """ Send varioust task to a new blender instance that runs and closes after finishing the task. This function waits until the process finishes. The function tries to set the same bpy.app.debug_value in the instance of Blender that is run. @@ -115,76 +125,89 @@ def send_to_bg(asset_data: object, asset_file_path: object = '', template_file_p Returns ------- None - ''' + """ def reader_thread(pipe, func): - for line in iter(pipe.readline, b''): + for line in iter(pipe.readline, b""): func(line.decode().strip()) pipe.close() if binary_path != "": print(f"Blender binary path: {binary_path}") else: - binary_path = get_blender_binary(asset_data, file_path=asset_file_path, binary_type=binary_type) + binary_path = get_blender_binary( + asset_data, file_path=asset_file_path, binary_type=binary_type + ) own_temp_folder = False - if temp_folder == '': + if temp_folder == "": temp_folder = tempfile.mkdtemp() own_temp_folder = True data = { - 'file_path': asset_file_path, - 'result_filepath': result_path, - 'result_folder': result_folder, - 'asset_data': asset_data, - 'api_key': api_key, - 'temp_folder': temp_folder, + "file_path": asset_file_path, + "result_filepath": result_path, + "result_folder": result_folder, + "asset_data": asset_data, + "api_key": api_key, + "temp_folder": temp_folder, } - datafile = os.path.join(temp_folder, 'resdata.json').replace('\\', '\\\\') + datafile = os.path.join(temp_folder, "resdata.json").replace("\\", "\\\\") script_path = os.path.dirname(os.path.realpath(__file__)) - with open(datafile, 'w', encoding='utf-8') as s: + with open(datafile, "w", encoding="utf-8") as s: json.dump(data, s, ensure_ascii=False, indent=4) - print('opening Blender instance to do processing - ', script) + print("opening Blender instance to do processing - ", script) # exclude hdrs from reading as .blend - if template_file_path == '': + if template_file_path == "": template_file_path = asset_file_path command = [ binary_path, "--background", - # "--factory-startup", + "--factory-startup", "-noaudio", template_file_path, - "--python", os.path.join(paths.BG_SCRIPTS_PATH, script), - "--", datafile + "--python", + os.path.join(paths.BG_SCRIPTS_PATH, script), + "--", + datafile, ] - if addons != '': - addons = f'--addons {addons}' + if addons != "": + addons = f"--addons {addons}" command.insert(3, addons) - - # Other code remains the same ... - stdout_val, stderr_val = subprocess.PIPE, subprocess.PIPE - - with subprocess.Popen(command, stdout=stdout_val, stderr=stderr_val, creationflags=get_process_flags()) as proc: + with subprocess.Popen( + command, stdout=stdout_val, stderr=stderr_val, creationflags=get_process_flags() + ) as proc: if verbosity_level == 2: - stdout_thread = threading.Thread(target=reader_thread, - args=(proc.stdout, lambda line: print('STDOUT:', line))) - stderr_thread = threading.Thread(target=reader_thread, - args=(proc.stderr, lambda line: print('STDERR:', line))) + stdout_thread = threading.Thread( + target=reader_thread, + args=(proc.stdout, lambda line: print("STDOUT:", line)), + ) + stderr_thread = threading.Thread( + target=reader_thread, + args=(proc.stderr, lambda line: print("STDERR:", line)), + ) elif verbosity_level == 1: - stdout_thread = threading.Thread(target=reader_thread, - args=(proc.stdout, lambda _: None)) - stderr_thread = threading.Thread(target=reader_thread, - args=(proc.stderr, lambda line: print('STDERR:', line))) + stdout_thread = threading.Thread( + target=reader_thread, args=(proc.stdout, lambda _: None) + ) + stderr_thread = threading.Thread( + target=reader_thread, + args=(proc.stderr, lambda line: print("STDERR:", line)), + ) else: - stdout_thread = threading.Thread(target=reader_thread, args=(proc.stdout, lambda _: None)) - stderr_thread = threading.Thread(target=reader_thread, args=(proc.stderr, lambda _: None)) + stdout_thread = threading.Thread( + target=reader_thread, args=(proc.stdout, lambda _: None) + ) + stderr_thread = threading.Thread( + target=reader_thread, args=(proc.stderr, lambda _: None) + ) stdout_thread.start() stderr_thread.start() diff --git a/blenderkit_server_utils/upload.py b/blenderkit_server_utils/upload.py index 8db819c..82ad23e 100644 --- a/blenderkit_server_utils/upload.py +++ b/blenderkit_server_utils/upload.py @@ -3,8 +3,9 @@ import requests from . import utils, paths + class upload_in_chunks(object): - def __init__(self, filename, chunksize=1 << 13, report_name='file'): + def __init__(self, filename, chunksize=1 << 13, report_name="file"): self.filename = filename self.chunksize = chunksize self.totalsize = os.path.getsize(filename) @@ -12,7 +13,7 @@ def __init__(self, filename, chunksize=1 << 13, report_name='file'): self.report_name = report_name def __iter__(self): - with open(self.filename, 'rb') as file: + with open(self.filename, "rb") as file: while True: data = file.read(self.chunksize) if not data: @@ -20,7 +21,9 @@ def __iter__(self): break self.readsofar += len(data) percent = self.readsofar * 1e2 / self.totalsize - print(f"Uploading {self.report_name} {percent}%",) + print( + f"Uploading {self.report_name} {percent}%", + ) # bg_blender.progress('uploading %s' % self.report_name, percent) # sys.stderr.write("\r{percent:3.0f}%".format(percent=percent)) @@ -29,21 +32,24 @@ def __iter__(self): def __len__(self): return self.totalsize + def upload_file(upload_data, f): - headers = utils.get_headers(upload_data['token']) - version_id = upload_data['id'] + headers = utils.get_headers(upload_data["token"]) + version_id = upload_data["id"] message = f"uploading {f['type']} {os.path.basename(f['file_path'])}" print(message) upload_info = { - 'assetId': version_id, - 'fileType': f['type'], - 'fileIndex': f['index'], - 'originalFilename': os.path.basename(f['file_path']) + "assetId": version_id, + "fileType": f["type"], + "fileIndex": f["index"], + "originalFilename": os.path.basename(f["file_path"]), } - upload_create_url = paths.get_api_url() + '/uploads/' - upload = requests.post(upload_create_url, json=upload_info, headers=headers, verify=True) + upload_create_url = paths.get_api_url() + "/uploads/" + upload = requests.post( + upload_create_url, json=upload_info, headers=headers, verify=True + ) upload = upload.json() chunk_size = 1024 * 1024 * 2 @@ -54,16 +60,28 @@ def upload_file(upload_data, f): try: session = requests.Session() session.trust_env = True - upload_response = session.put(upload['s3UploadUrl'], - data=upload_in_chunks(f['file_path'], chunk_size, f['type']), - stream=True, verify=True) + upload_response = session.put( + upload["s3UploadUrl"], + data=upload_in_chunks(f["file_path"], chunk_size, f["type"]), + stream=True, + verify=True, + ) if 250 > upload_response.status_code > 199: - upload_done_url = paths.get_api_url() + '/uploads_s3/' + upload['id'] + '/upload-file/' - upload_response = requests.post(upload_done_url, headers=headers, verify=True) + upload_done_url = ( + paths.get_api_url() + + "/uploads_s3/" + + upload["id"] + + "/upload-file/" + ) + upload_response = requests.post( + upload_done_url, headers=headers, verify=True + ) # print(upload_response) # print(upload_response.text) - print(f"Finished file upload: {os.path.basename(f['file_path'])}",) + print( + f"Finished file upload: {os.path.basename(f['file_path'])}", + ) return True else: message = f"Upload failed, retry. File : {f['type']} {os.path.basename(f['file_path'])}" @@ -74,13 +92,15 @@ def upload_file(upload_data, f): message = f"Upload failed, retry. File : {f['type']} {os.path.basename(f['file_path'])}" print(message) import time + time.sleep(1) # confirm single file upload to bkit server return False + def upload_files(upload_data, files): - '''uploads several files in one run''' + """uploads several files in one run""" uploaded_all = True for f in files: uploaded = upload_file(upload_data, f) @@ -89,21 +109,23 @@ def upload_files(upload_data, files): print(f"Uploaded all files for asset {upload_data['displayName']}") return uploaded_all -def upload_resolutions(files, asset_data, api_key = ''): + +def upload_resolutions(files, asset_data, api_key=""): upload_data = { - "name": asset_data['name'], - "displayName": asset_data['displayName'], + "name": asset_data["name"], + "displayName": asset_data["displayName"], "token": api_key, - "id": asset_data['id'] + "id": asset_data["id"], } uploaded = upload_files(upload_data, files) if uploaded: - print('upload finished successfully') + print("upload finished successfully") else: - print('upload failed.') + print("upload failed.") + -def get_individual_parameter(asset_id='', param_name='', api_key = ''): +def get_individual_parameter(asset_id="", param_name="", api_key=""): url = f"{paths.get_api_url()}/assets/{asset_id}/parameter/{param_name}/" headers = utils.get_headers(api_key) r = requests.get(url, headers=headers) # files = files, @@ -111,44 +133,80 @@ def get_individual_parameter(asset_id='', param_name='', api_key = ''): print(url) return parameter -def patch_individual_parameter(asset_id='', param_name='', param_value='', api_key = ''): + +def patch_individual_parameter(asset_id="", param_name="", param_value="", api_key=""): # changes individual parameter in the parameters dictionary of the assets url = f"{paths.get_api_url()}/assets/{asset_id}/parameter/{param_name}/" headers = utils.get_headers(api_key) metadata_dict = {"value": param_value} print(url) - r = requests.put(url, json=metadata_dict, headers=headers, verify=True) # files = files, + r = requests.put( + url, json=metadata_dict, headers=headers, verify=True + ) # files = files, print(r.text) print(r.status_code) -def delete_individual_parameter(asset_id='', param_name='', param_value='', api_key = ''): +def delete_individual_parameter(asset_id="", param_name="", param_value="", api_key=""): # changes individual parameter in the parameters dictionary of the assets url = f"{paths.get_api_url()}/assets/{asset_id}/parameter/{param_name}/" headers = utils.get_headers(api_key) metadata_dict = {"value": param_value} print(url) - r = requests.delete(url, json=metadata_dict, headers=headers, verify=True) # files = files, + r = requests.delete( + url, json=metadata_dict, headers=headers, verify=True + ) # files = files, print(r.text) print(r.status_code) + def patch_asset_empty(asset_id, api_key): - ''' - This function patches the asset for the purpose of it getting a reindex. - Should be removed once this is fixed on the server and - the server is able to reindex after uploads of resolutions - Returns - ------- - ''' - upload_data = { - } - url = f'{paths.get_api_url()}/assets/{asset_id}/' + """ + This function patches the asset for the purpose of it getting a reindex. + Should be removed once this is fixed on the server and + the server is able to reindex after uploads of resolutions + Returns + ------- + """ + upload_data = {} + url = f"{paths.get_api_url()}/assets/{asset_id}/" headers = utils.get_headers(api_key) - print('patching asset with empty data') + print("patching asset with empty data") try: - r = requests.patch(url, json=upload_data, headers=headers, verify=True) # files = files, + r = requests.patch( + url, json=upload_data, headers=headers, verify=True + ) # files = files, except requests.exceptions.RequestException as e: print(e) - return {'CANCELLED'} - print('patched asset with empty data') - return {'FINISHED'} + return {"CANCELLED"} + print("patched asset with empty data") + return {"FINISHED"} + + +def upload_asset_metadata(upload_data, api_key): + url = f"{paths.get_api_url()}/assets/" + headers = utils.get_headers(api_key) + print("uploading new asset metadata") + try: + r = requests.post( + url, json=upload_data, headers=headers, verify=True + ) # files = files, + print(r.text) + # result should be json + result = r.json() + print(result) + return result + except requests.exceptions.RequestException as e: + print(e) + return {"CANCELLED"} + + +def patch_asset_metadata(asset_id, api_key, data={}): + print("patching asset metadata") + + headers = utils.get_headers(api_key) + + url = f"{paths.get_api_url()}/assets/{asset_id}/" + print(url) + r = requests.patch(url, json=data, headers=headers, verify=True) # files = files, + print(r.text) diff --git a/generate_model_validations.py b/generate_model_validations.py index 78c6f16..1f0dfc7 100644 --- a/generate_model_validations.py +++ b/generate_model_validations.py @@ -11,6 +11,7 @@ import pathlib from blenderkit_server_utils import download, search, paths, upload, send_to_bg, utils + # Assuming necessary imports are done at the top of the script from blenderkit_server_utils.cloudflare_storage import CloudflareStorage @@ -18,14 +19,16 @@ results = [] page_size = 100 -MAX_ASSETS = int(os.environ.get('MAX_ASSET_COUNT', '100')) +MAX_ASSETS = int(os.environ.get("MAX_ASSET_COUNT", "100")) DONE_ASSETS_COUNT = 0 -DO_ASSETS=200 +DO_ASSETS = 200 ALL_FOLDERS = set() + + def render_model_validation_thread(asset_data, api_key): - ''' + """ A thread that: 1.downloads file 2.starts an instance of Blender that renders the validation @@ -39,20 +42,20 @@ def render_model_validation_thread(asset_data, api_key): Returns ------- - ''' + """ global DONE_ASSETS_COUNT, ALL_FOLDERS destination_directory = tempfile.gettempdir() - if len(asset_data['files']) == 0: - print('no files for asset %s' % asset_data['name']) + if len(asset_data["files"]) == 0: + print("no files for asset %s" % asset_data["name"]) return - upload_id = asset_data['files'][0]['downloadUrl'].split('/')[-2] + upload_id = asset_data["files"][0]["downloadUrl"].split("/")[-2] # Check if the asset has already been processed # stop using author folder result_file_name = f"{upload_id}" - predicted_filename = f'{result_file_name}.mkv'#let's try to super simplify now. + predicted_filename = f"{result_file_name}.mkv" # let's try to super simplify now. - #print('all validation folders', all_validation_folders) + # print('all validation folders', all_validation_folders) # check if the directory exists on the drive # we check file by file, since the comparison with folder contents is not reliable and would potentially @@ -60,31 +63,42 @@ def render_model_validation_thread(asset_data, api_key): # Initialize Cloudflare Storage with your credentials # f_exists = result_file_name in ALL_FOLDERS cloudflare_storage = CloudflareStorage( - access_key=os.getenv('CF_ACCESS_KEY'), - secret_key=os.getenv('CF_ACCESS_SECRET'), - endpoint_url=os.getenv('CF_ENDPOINT_URL') + access_key=os.getenv("CF_ACCESS_KEY"), + secret_key=os.getenv("CF_ACCESS_SECRET"), + endpoint_url=os.getenv("CF_ENDPOINT_URL"), ) - f_exists = cloudflare_storage.folder_exists('validation-renders', upload_id) - #let's not skip now. + f_exists = cloudflare_storage.folder_exists("validation-renders", upload_id) + # let's not skip now. if f_exists: - # purge the folder - # cloudflare_storage.delete_folder_contents('validation-renders', upload_id) - print(f'directory {upload_id} exists, skipping') - return + # check if the result folder is empty only with index.json, if yes, purge it and continue. Otherwise skip + files = cloudflare_storage.list_folder_contents("validation-renders", upload_id) + if len(files) == 1 and files[0] == "index.json": + # purge the folder + cloudflare_storage.delete_folder_contents("validation-renders", upload_id) + print(f"Purged the folder: {upload_id}") + else: + print(f"directory {upload_id} exists, skipping") + return # Download asset - asset_file_path = download.download_asset(asset_data, api_key=api_key, directory=destination_directory) + asset_file_path = download.download_asset( + asset_data, api_key=api_key, directory=destination_directory + ) # Unpack asset - send_to_bg.send_to_bg(asset_data, asset_file_path=asset_file_path, script='unpack_asset_bg.py') + send_to_bg.send_to_bg( + asset_data, asset_file_path=asset_file_path, script="unpack_asset_bg.py" + ) # find template file current_dir = pathlib.Path(__file__).parent.resolve() - template_file_path = os.path.join(current_dir, 'blend_files', 'model_validation_static_renders.blend') + template_file_path = os.path.join( + current_dir, "blend_files", "model_validation_static_renders.blend" + ) # Send to background to generate resolutions - #generated temp folder - #.blend gets resaved there and also /tmp renders of images + # generated temp folder + # .blend gets resaved there and also /tmp renders of images temp_folder = tempfile.mkdtemp() # result folder where the stuff for upload to drive goes @@ -92,87 +106,99 @@ def render_model_validation_thread(asset_data, api_key): os.makedirs(result_folder, exist_ok=True) # local file path of rendered image - result_path = os.path.join(temp_folder, - result_folder, - predicted_filename) + result_path = os.path.join(temp_folder, result_folder, predicted_filename) # send to background to render - send_to_bg.send_to_bg(asset_data, - asset_file_path=asset_file_path, - template_file_path=template_file_path, - result_path=result_path, - result_folder=result_folder, - temp_folder=temp_folder, - script='model_validation_bg_render.py', - binary_type='NEWEST', - verbosity_level=2) + send_to_bg.send_to_bg( + asset_data, + asset_file_path=asset_file_path, + template_file_path=template_file_path, + result_path=result_path, + result_folder=result_folder, + temp_folder=temp_folder, + script="model_validation_bg_render.py", + binary_type="NEWEST", + verbosity_level=2, + ) # generate gltf: # result is a json... - result_path = os.path.join(temp_folder, asset_data['assetBaseId'] + '_resdata.json') + result_path = os.path.join(temp_folder, asset_data["assetBaseId"] + "_resdata.json") - send_to_bg.send_to_bg(asset_data, asset_file_path=asset_file_path, - result_path=result_path, - script='gltf_bg_blender.py') + send_to_bg.send_to_bg( + asset_data, + asset_file_path=asset_file_path, + result_path=result_path, + script="gltf_bg_blender.py", + ) # gltf is a .glb in the same dir as the .blend asset file - gltf_path = asset_file_path.replace('.blend', '.glb') + gltf_path = asset_file_path.replace(".blend", ".glb") # move gltf to result folder try: shutil.move(gltf_path, result_folder) except Exception as e: - print(f'Error while moving {gltf_path} to {result_folder}: {e}') + print(f"Error while moving {gltf_path} to {result_folder}: {e}") DONE_ASSETS_COUNT += 1 # part of the results is in temfolder/tmp/Render, so let's move all of it's files to the result folder, # so that there are no subdirectories and everything is in one folder. # and then upload the result folder to drive - render_folder = os.path.join(temp_folder, 'tmp', 'Render') + render_folder = os.path.join(temp_folder, "tmp", "Render") try: file_names = os.listdir(render_folder) for file_name in file_names: - shutil.move(os.path.join(render_folder, file_name), result_folder) + shutil.move(os.path.join(render_folder, file_name), result_folder) except Exception as e: - print(f'Error while moving files from {render_folder} to {result_folder}: {e}') + print(f"Error while moving files from {render_folder} to {result_folder}: {e}") # Upload result # # Instead of using Google Drive for upload, use Cloudflare Storage # Initialize the CloudFlare service cloudflare_storage = CloudflareStorage( - access_key=os.getenv('CF_ACCESS_KEY'), - secret_key=os.getenv('CF_ACCESS_SECRET'), - endpoint_url=os.getenv('CF_ENDPOINT_URL') + access_key=os.getenv("CF_ACCESS_KEY"), + secret_key=os.getenv("CF_ACCESS_SECRET"), + endpoint_url=os.getenv("CF_ENDPOINT_URL"), + ) + cloudflare_storage.upload_folder( + result_folder, + bucket_name="validation-renders", + cloudflare_folder_prefix=result_file_name, ) - cloudflare_storage.upload_folder(result_folder, bucket_name='validation-renders', cloudflare_folder_prefix=result_file_name) - #cleanup + # cleanup try: shutil.rmtree(temp_folder) except Exception as e: - print(f'Error while deleting temp folder {temp_folder}: {e}') + print(f"Error while deleting temp folder {temp_folder}: {e}") return -def iterate_assets(filepath, thread_function=None, process_count=12, api_key=''): - ''' iterate through all assigned assets, check for those which need generation and send them to res gen''' +def iterate_assets(filepath, thread_function=None, process_count=12, api_key=""): + """iterate through all assigned assets, check for those which need generation and send them to res gen""" assets = search.load_assets_list(filepath) threads = [] for asset_data in assets: # if DONE_ASSETS_COUNT >= DO_ASSETS: # break if asset_data is not None: - print('downloading and generating validation render for %s' % asset_data['name']) - thread = threading.Thread(target=thread_function, args=(asset_data, api_key)) + print( + "downloading and generating validation render for %s" + % asset_data["name"] + ) + thread = threading.Thread( + target=thread_function, args=(asset_data, api_key) + ) thread.start() threads.append(thread) while len(threads) > process_count - 1: for t in threads: if not t.is_alive(): threads.remove(t) - break; + break time.sleep(0.1) # wait for a bit to finish all threads @@ -180,9 +206,9 @@ def main(): # cleanup the drive folder # get all folders from cloudflare to faster check if the folder exists cloudflare_storage = CloudflareStorage( - access_key=os.getenv('CF_ACCESS_KEY'), - secret_key=os.getenv('CF_ACCESS_SECRET'), - endpoint_url=os.getenv('CF_ENDPOINT_URL') + access_key=os.getenv("CF_ACCESS_KEY"), + secret_key=os.getenv("CF_ACCESS_SECRET"), + endpoint_url=os.getenv("CF_ENDPOINT_URL"), ) # ALL_FOLDERS = cloudflare_storage.list_all_folders(bucket_name='validation-renders') # print('deleting old files') @@ -191,22 +217,32 @@ def main(): # Get os temp directory dpath = tempfile.gettempdir() - filepath = os.path.join(dpath, 'assets_for_validation.json') + filepath = os.path.join(dpath, "assets_for_validation.json") params = { - 'order': 'last_blend_upload', - 'asset_type': 'model', - 'verification_status': 'uploaded' + "order": "last_blend_upload", + "asset_type": "model", + "verification_status": "uploaded", } - search.get_search_simple(params, filepath=filepath, page_size=min(MAX_ASSETS, 100), max_results=MAX_ASSETS, - api_key=paths.API_KEY) + search.get_search_simple( + params, + filepath=filepath, + page_size=min(MAX_ASSETS, 100), + max_results=MAX_ASSETS, + api_key=paths.API_KEY, + ) assets = search.load_assets_list(filepath) - print('ASSETS TO BE PROCESSED') + print("ASSETS TO BE PROCESSED") for i, a in enumerate(assets): - print(a['name'], a['assetType']) + print(a["name"], a["assetType"]) - iterate_assets(filepath, process_count=1, api_key=paths.API_KEY, thread_function=render_model_validation_thread) + iterate_assets( + filepath, + process_count=1, + api_key=paths.API_KEY, + thread_function=render_model_validation_thread, + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/sync_TwinBru_library.py b/sync_TwinBru_library.py new file mode 100644 index 0000000..1177095 --- /dev/null +++ b/sync_TwinBru_library.py @@ -0,0 +1,345 @@ +"""Script to sync twinbru library to blenderkit. +Required environment variables: +BLENDERKIT_API_KEY - API key to be used +BLENDERS_PATH - path to the folder with blender versions + +""" + +import csv +import json +import requests +import os +import tempfile +import time +from datetime import datetime +import pathlib +import re +import threading +import zipfile +from blenderkit_server_utils import download, search, paths, upload, send_to_bg + +results = [] +page_size = 100 + +MAX_ASSETS = int(os.environ.get("MAX_ASSET_COUNT", "100")) +SKIP_UPLOAD = os.environ.get("SKIP_UPLOAD", False) == "True" + + +def read_csv_file(file_path): + """ + Read a CSV file and return a list of dictionaries. + """ + try: + with open(file_path, "r", encoding="utf-8-sig") as file: + reader = csv.DictReader(file) + return [row for row in reader] + except UnicodeDecodeError: + # If UTF-8 fails, try with ISO-8859-1 encoding + with open(file_path, "r", encoding="iso-8859-1") as file: + reader = csv.DictReader(file) + return [row for row in reader] + except Exception as e: + print(f"Error reading CSV file: {e}") + return [] + + +def download_file(url, filepath): + """ + Download a file from a URL to a filepath. + Write progress to console. + """ + response = requests.get(url, stream=True) + total_length = int(response.headers.get("content-length")) + with open(filepath, "wb") as file: + for chunk in response.iter_content(chunk_size=8192): + file.write(chunk) + progress = int(file.tell() / total_length * 100) + print(f"Downloading: {progress}%", end="\r") + print() + + +def build_description_text(twinbru_asset): + """ + Build a description text for the asset. + """ + description = f"Physical material that renders exactly as in real life." + description += f"Brand: {twinbru_asset['brand']}\n" + description += f"Weight: {twinbru_asset['weight_g_per_m_squared']}\n" + description += f"End Use: {twinbru_asset['cat_end_use']}\n" + description += f"Usable Width: {twinbru_asset['selvedge_useable_width_cm']}\n" + description += f"Design Type: {twinbru_asset['cat_design_type']}\n" + description += f"Colour Type: {twinbru_asset['cat_colour']}\n" + description += f"Characteristics: {twinbru_asset['cat_characteristics']}\n" + description += f"Composition: {twinbru_asset['total_composition']}\n" + return description + + +def slugify_text(text): + """ + Slugify a text. + Remove special characters, replace spaces with underscores and make it lowercase. + """ + text = re.sub(r"[()/#-]", "", text) + text = re.sub(r"\s", "_", text) + text = re.sub(r"_+", "_", text) + return text.lower() + + +def build_tags_list(twinbru_asset): + """ + Create a list of tags for the asset. + """ + tags = [] + tags.extend(twinbru_asset["cat_end_use"].split(",")) + tags.extend(twinbru_asset["cat_design_type"].split(",")) + # tags.append(twinbru_asset["cat_colour"]) + tags.extend(twinbru_asset["cat_characteristics"].split(",")) + # remove duplicates + tags = list(set(tags)) + # shorten to max 5 tags + tags = tags[:5] + # make tags contain only alphanumeric characters and underscores + # there are these characters to be replaced: ()/#- and gaps + tags = [slugify_text(tag) for tag in tags] + + return tags + + +def dict_to_params(inputs): + parameters = [] + for k, v in inputs.items(): + value = "" + if isinstance(v, list): + value = ",".join(str(item) for item in v) + elif isinstance(v, bool): + value = str(v).lower() + elif isinstance(v, (int, float)): + value = f"{v:f}".rstrip("0").rstrip(".") + else: + value = str(v) + + param = {"parameterType": k, "value": value} + parameters.append(param) + return parameters + + +def get_thumbnail_path(temp_folder, twinbru_asset): + """ + Get the thumbnail path for the asset. + Thumbnails are stored in the /renders directory of the asset + """ + # Get the path to the renders directory + renders_dir = os.path.join(temp_folder, "renders") + + # Check if the renders directory exists + if not os.path.exists(renders_dir): + print(f"Renders directory not found for asset {twinbru_asset['name']}") + return None + + # List all files in the renders directory + render_files = os.listdir(renders_dir) + + # Filter for image files (assuming they are jpg or png) + image_files = [ + f for f in render_files if f.lower().endswith((".jpg", ".jpeg", ".png")) + ] + + # If no image files found, return None + if not image_files: + print(f"No thumbnail images found for asset {twinbru_asset['name']}") + return None + + # Sort the image files to get the first one (assuming it's the main thumbnail) + image_files.sort() + thumbnail_file = image_files[0] + + # Return the full path to the thumbnail + return os.path.join(renders_dir, thumbnail_file) + + +def generate_upload_data(twinbru_asset): + """ + Generate the upload data for the asset. + """ + # convert name - remove _ and remove the number that comes last in name + readable_name = twinbru_asset["name"].split("_") + # capitalize the first letter of each word + readable_name = " ".join(word.capitalize() for word in readable_name[:-1]) + + upload_data = { + "assetType": "material", + "sourceAppName": "blender", + "sourceAppVersion": "4.2.0", + "addonVersion": "3.12.3", + "name": readable_name, + "displayName": readable_name, + "description": build_description_text(twinbru_asset), + "tags": build_tags_list(twinbru_asset), + "category": "fabric", + "license": "royalty_free", + "isFree": True, + "isPrivate": False, + "parameters": { + # twinBru specific parameters + "twinbruReference": twinbru_asset["reference"], + "twinBruCatEndUse": twinbru_asset["cat_end_use"], + "twinBruColourType": twinbru_asset["cat_colour"], + "twinBruCharacteristics": twinbru_asset["cat_characteristics"], + "twinBruDesignType": twinbru_asset["cat_design_type"], + "productLink": twinbru_asset["url_info"], + # blenderkit specific parameters + "material_style": "realistic", + "engine": "cycles", + "shaders": ["principled"], + "uv": True, + "animated": False, + "purePbr": True, + "textureSizeMeters": float(twinbru_asset["texture_width_cm"]) * 0.01, + "procedural": False, + "nodeCount": 7, + "textureCount": 5, + "megapixels": 5 * 4 * 4, + "pbrType": "metallic", + "textureResolutionMax": 4096, + "textureResolutionMin": 4096, + "manufacturer": twinbru_asset["brand"], + "designCollection": twinbru_asset["collection_name"], + }, + } + upload_data["parameters"] = dict_to_params(upload_data["parameters"]) + return upload_data + + +def sync_TwinBru_library(file_path): + """ + Sync the TwinBru library to blenderkit. + 1. Read the CSV file + 2. For each asset: + 2.1. Search for the asset on blenderkit, if it exists, skip it, if it doesn't, upload it. + 2.2. Download the asset + 2.3. Unpack the asset + 2.4. Create blenderkit upload metadata + 2.5. Make an upload request to the blenderkit API, to uplod metadata and to get asset_base_id. + 2.6. run a pack_twinbru_material.py script to create a material in Blender 3D, + write the asset_base_id and other blenderkit props on the material. + 2.7. Upload the material to blenderkit + 2.8. Patch the asset data with a new parameter. + """ + + assets = read_csv_file(file_path) + current_dir = pathlib.Path(__file__).parent.resolve() + for twinbru_asset in assets: + bk_assets = search.get_search_simple( + parameters={ + "twinbruReference": twinbru_asset["reference"], + "verification_status": "uploaded,validated", + }, + filepath=None, + page_size=10, + max_results=1, + api_key=paths.API_KEY, + ) + if len(bk_assets) > 0: + print(f"Asset {twinbru_asset['name']} already exists on blenderkit") + continue + else: + print(f"Asset {twinbru_asset['name']} does not exist on blenderkit") + # Download the asset into temp folder + temp_folder = os.path.join(tempfile.gettempdir(), twinbru_asset["name"]) + # create the folder if it doesn't exist + if not os.path.exists(temp_folder): + os.makedirs(temp_folder) + + # check if the file exists + asset_file_name = twinbru_asset["url_texture"].split("/")[-1] + asset_file_path = os.path.join(temp_folder, asset_file_name) + if not os.path.exists(asset_file_path): + download_file(twinbru_asset["url_texture"], asset_file_path) + # Unzip the asset file + with zipfile.ZipFile(asset_file_path, "r") as zip_ref: + zip_ref.extractall(temp_folder) + + # Create blenderkit upload metadata + upload_data = generate_upload_data(twinbru_asset) + + # upload metadata and get result + print("uploading metadata") + # print json structured + print(json.dumps(upload_data, indent=4)) + asset_data = upload.upload_asset_metadata(upload_data, paths.API_KEY) + if asset_data.get("statusCode") == 400: + print(asset_data) + return + + # Run the _bg.py script to create a material in Blender 3D + send_to_bg.send_to_bg( + asset_data=asset_data, + template_file_path=os.path.join( + current_dir, "blend_files", "empty.blend" + ), + result_path=os.path.join(temp_folder, "material.blend"), + script="pack_twinbru_material.py", + binary_type="NEWEST", + temp_folder=temp_folder, + verbosity_level=2, + ) + # Upload the asset to blenderkit + thumbnail_path = get_thumbnail_path(temp_folder, twinbru_asset) + files = [ + { + "type": "thumbnail", + "index": 0, + "file_path": thumbnail_path, + }, + { + "type": "blend", + "index": 0, + "file_path": os.path.join(temp_folder, "material.blend"), + }, + ] + upload_data = { + "name": asset_data["name"], + "displayName": upload_data["displayName"], + "token": paths.API_KEY, + "id": asset_data["id"], + } + uploaded = upload.upload_files(upload_data, files) + + if uploaded: + print(f"Successfully uploaded asset: {asset_data['name']}") + else: + print(f"Failed to upload asset: {asset_data['name']}") + # mark asset as uploaded + upload.patch_asset_metadata( + asset_data["id"], paths.API_KEY, data={"verificationStatus": "uploaded"} + ) + + +def iterate_assets(filepath, thread_function=None, process_count=12, api_key=""): + """iterate through all assigned assets, check for those which need generation and send them to res gen""" + assets = search.load_assets_list(filepath) + threads = [] + for asset_data in assets: + if asset_data is not None: + print("downloading and generating resolution for %s" % asset_data["name"]) + thread = threading.Thread( + target=thread_function, args=(asset_data, api_key) + ) + thread.start() + threads.append(thread) + while len(threads) > process_count - 1: + for t in threads: + if not t.is_alive(): + threads.remove(t) + break + time.sleep(0.1) # wait for a bit to finish all threads + + +def main(): + dpath = tempfile.gettempdir() + + sync_TwinBru_library("twinBru_test_library.csv") + + +if __name__ == "__main__": + main()