diff --git a/blend_files/empty.blend b/blend_files/empty.blend index fa85193..4ad5f57 100644 Binary files a/blend_files/empty.blend and b/blend_files/empty.blend differ diff --git a/blend_files/material_thumbnailer_cycles.blend b/blend_files/material_thumbnailer_cycles.blend new file mode 100644 index 0000000..1faa580 Binary files /dev/null and b/blend_files/material_thumbnailer_cycles.blend differ diff --git a/blend_files/model_thumbnailer.blend b/blend_files/model_thumbnailer.blend new file mode 100644 index 0000000..6903130 Binary files /dev/null and b/blend_files/model_thumbnailer.blend differ diff --git a/blend_files/model_validation_static_renders.blend b/blend_files/model_validation_static_renders.blend index f55d03a..f9159f2 100644 Binary files a/blend_files/model_validation_static_renders.blend and b/blend_files/model_validation_static_renders.blend differ diff --git a/blender_bg_scripts/autothumb_material_bg.py b/blender_bg_scripts/autothumb_material_bg.py new file mode 100644 index 0000000..bf4ac99 --- /dev/null +++ b/blender_bg_scripts/autothumb_material_bg.py @@ -0,0 +1,169 @@ +""" +Background script for generating material thumbnails. +This script is called by render_thumbnail.py and expects: +- material file path +- template blend file (material_thumbnailer_cycles.blend) +- result filepath for the JSON output +- thumbnail parameters in the asset_data +""" + +import bpy +import os +import sys +import json +import traceback +from pathlib import Path +# Add parent directory to Python path so we can import blenderkit_server_utils +dir_path = os.path.dirname(os.path.realpath(__file__)) +parent_path = os.path.join(dir_path, os.path.pardir) +sys.path.append(parent_path) + +from blenderkit_server_utils import append_link, paths, utils + + +def render_thumbnails(): + bpy.ops.render.render(write_still=True, animation=False) + + +def unhide_collection(cname): + collection = bpy.context.scene.collection.children[cname] + collection.hide_viewport = False + collection.hide_render = False + collection.hide_select = False + + +if __name__ == "__main__": + try: + # args order must match the order in blenderkit/autothumb.py:get_thumbnailer_args()! + BLENDERKIT_EXPORT_DATA = sys.argv[-1] + + print("preparing thumbnail scene") + print(BLENDERKIT_EXPORT_DATA) + with open(BLENDERKIT_EXPORT_DATA, "r", encoding="utf-8") as s: + data = json.load(s) + # append_material(file_name, matname = None, link = False, fake_user = True) + + thumbnail_use_gpu = data.get("thumbnail_use_gpu") + + + mat = append_link.append_material( + file_name=data["file_path"], + matname=data["asset_data"]["name"], + link=True, + fake_user=False, + ) + + s = bpy.context.scene + + colmapdict = { + "BALL": "Ball", + "BALL_COMPLEX": "Ball complex", + "FLUID": "Fluid", + "CLOTH": "Cloth", + "HAIR": "Hair", + } + unhide_collection(colmapdict[data["asset_data"]["thumbnail_type"]]) + if data["asset_data"]["thumbnail_background"]: + unhide_collection("Background") + bpy.data.materials["bg checker colorable"].node_tree.nodes[ + "input_level" + ].outputs["Value"].default_value = data["asset_data"]["thumbnail_background_lightness"] + tscale = data["asset_data"]["thumbnail_scale"] + scaler = bpy.context.view_layer.objects["scaler"] + scaler.scale = (tscale, tscale, tscale) + utils.activate_object(scaler) + bpy.ops.object.transform_apply(location=False, rotation=False, scale=True) + + # find any object with solidify and scale the thickness accordingly + # this currently involves only cloth preview, but might also others or other scale depended modifiers + for ob in bpy.context.visible_objects: + if ob.name[:15] == "MaterialPreview": + for m in ob.modifiers: + if m.type == "SOLIDIFY": + m.thickness *= tscale + + bpy.context.view_layer.update() + + for ob in bpy.context.visible_objects: + if ob.name[:15] == "MaterialPreview": + utils.activate_object(ob) + if bpy.app.version >= (3, 3, 0): + bpy.ops.object.transform_apply( + location=False, rotation=False, scale=True, isolate_users=True + ) + else: + bpy.ops.object.transform_apply( + location=False, rotation=False, scale=True + ) + bpy.ops.object.transform_apply( + location=False, rotation=False, scale=True + ) + + ob.material_slots[0].material = mat + ob.data.use_auto_texspace = False + ob.data.texspace_size.x = 1 # / tscale + ob.data.texspace_size.y = 1 # / tscale + ob.data.texspace_size.z = 1 # / tscale + if data["asset_data"]["thumbnail_adaptive_subdivision"] == True: + ob.cycles.use_adaptive_subdivision = True + + else: + ob.cycles.use_adaptive_subdivision = False + # Get texture size from dictParameters + ts = data["asset_data"]["dictParameters"].get("textureSizeMeters", 1.0) + if data["asset_data"]["thumbnail_type"] in ["BALL", "BALL_COMPLEX", "CLOTH"]: + utils.automap( + ob.name, + tex_size=ts / tscale, + just_scale=True, + bg_exception=True, + ) + bpy.context.view_layer.update() + + s.cycles.volume_step_size = tscale * 0.1 + + if thumbnail_use_gpu is True: + bpy.context.scene.cycles.device = "GPU" + compute_device_type = data.get("cycles_compute_device_type") + if compute_device_type is not None: + # DOCS:https://github.com/dfelinto/blender/blob/master/intern/cycles/blender/addon/properties.py + bpy.context.preferences.addons[ + "cycles" + ].preferences.compute_device_type = compute_device_type + bpy.context.preferences.addons["cycles"].preferences.refresh_devices() + + s.cycles.samples = data["asset_data"]["thumbnail_samples"] + bpy.context.view_layer.cycles.use_denoising = data["asset_data"]["thumbnail_denoising"] + + # import blender's HDR here + hdr_path = Path("datafiles/studiolights/world/interior.exr") + bpath = Path(bpy.utils.resource_path("LOCAL")) + ipath = bpath / hdr_path + ipath = str(ipath) + + # this stuff is for mac and possibly linux. For blender // means relative path. + # for Mac, // means start of absolute path + if ipath.startswith("//"): + ipath = ipath[1:] + + img = bpy.data.images["interior.exr"] + img.filepath = ipath + img.reload() + + bpy.context.scene.render.resolution_x = int(data["asset_data"]["thumbnail_resolution"]) + bpy.context.scene.render.resolution_y = int(data["asset_data"]["thumbnail_resolution"]) + + bpy.context.scene.render.filepath = data["result_filepath"] + print("rendering thumbnail") + # bpy.ops.wm.save_as_mainfile(filepath='C:/tmp/test.blend') + render_thumbnails() + print( + "background autothumbnailer finished successfully (no upload)" + ) + sys.exit(0) + + + except Exception as e: + print(f"background autothumbnailer failed: {e}") + print(traceback.format_exc()) + sys.exit(1) diff --git a/blender_bg_scripts/autothumb_model_bg.py b/blender_bg_scripts/autothumb_model_bg.py new file mode 100644 index 0000000..c60ac6d --- /dev/null +++ b/blender_bg_scripts/autothumb_model_bg.py @@ -0,0 +1,192 @@ +""" +Background script for generating model thumbnails. +This script is called by render_thumbnail.py and runs within Blender's Python environment. +It handles the setup and rendering of 3D model thumbnails with the following workflow: +1. Imports the model into a pre-configured scene +2. Positions the model and camera for optimal framing +3. Configures render settings based on provided parameters +4. Renders the final thumbnail + +Required inputs (passed via JSON): +- model file path: Path to the 3D model file to render +- template blend file: Pre-configured Blender scene (model_thumbnailer.blend) +- result filepath: Where to save the rendered thumbnail +- thumbnail parameters: Various rendering settings in asset_data +""" + +import bpy +import os +import sys +import json +import math +import traceback +from pathlib import Path +# Add parent directory to Python path so we can import blenderkit_server_utils +# This is necessary because this script runs inside Blender's Python environment +dir_path = os.path.dirname(os.path.realpath(__file__)) +parent_path = os.path.join(dir_path, os.path.pardir) +sys.path.append(parent_path) + +from blenderkit_server_utils import append_link, paths, utils + + +def center_obs_for_thumbnail(obs): + """Center and scale objects for optimal thumbnail framing. + + This function: + 1. Centers the objects in world space + 2. Handles nested object hierarchies (parent-child relationships) + 3. Adjusts camera distance based on object bounds + 4. Scales the scene to ensure the object fits in frame + + Args: + obs (list): List of Blender objects to center and frame + """ + s = bpy.context.scene + parent = obs[0] + + # Handle instanced collections (linked objects) + if parent.type == "EMPTY" and parent.instance_collection is not None: + obs = parent.instance_collection.objects[:] + + # Get top-level parent + while parent.parent is not None: + parent = parent.parent + # Reset parent rotation for accurate snapping + parent.rotation_euler = (0, 0, 0) + parent.location = (0, 0, 0) + bpy.context.view_layer.update() + + # Calculate bounding box in world space + minx, miny, minz, maxx, maxy, maxz = utils.get_bounds_worldspace(obs) + + # Center object at world origin + cx = (maxx - minx) / 2 + minx + cy = (maxy - miny) / 2 + miny + for ob in s.collection.objects: + ob.select_set(False) + + bpy.context.view_layer.objects.active = parent + parent.location = (-cx, -cy, 0) + + # Adjust camera position and scale based on object size + camZ = s.camera.parent.parent + camZ.location.z = (maxz) / 2 + + # Calculate diagonal size of object for scaling + dx = maxx - minx + dy = maxy - miny + dz = maxz - minz + r = math.sqrt(dx * dx + dy * dy + dz * dz) + + # Scale scene elements to fit object + scaler = bpy.context.view_layer.objects["scaler"] + scaler.scale = (r, r, r) + coef = 0.7 # Camera distance coefficient + r *= coef + camZ.scale = (r, r, r) + bpy.context.view_layer.update() + + +def render_thumbnails(): + """Trigger Blender's render operation and save the result. + The output path and render settings should be configured before calling this.""" + bpy.ops.render.render(write_still=True, animation=False) + + +if __name__ == "__main__": + try: + # Load thumbnail configuration from JSON + # args order must match the order in blenderkit/autothumb.py:get_thumbnailer_args()! + BLENDERKIT_EXPORT_DATA = sys.argv[-1] + + print("preparing thumbnail scene") + print(BLENDERKIT_EXPORT_DATA) + with open(BLENDERKIT_EXPORT_DATA, "r", encoding="utf-8") as s: + data = json.load(s) + + thumbnail_use_gpu = data.get("thumbnail_use_gpu") + + # Import the 3D model into the scene + # The model is linked rather than appended to save memory + main_object, all_objects = append_link.link_collection( + file_name=data["file_path"], + location=(0, 0, 0), + rotation=(0, 0, 0), + link=True, + name=data["asset_data"]["name"], + parent=None, + ) + + # Position the model in the scene + center_obs_for_thumbnail(all_objects) + + # Select appropriate camera based on object placement type + # Each camera is pre-configured in the template file for different angles + camdict = { + "GROUND": "camera ground", # Looking down at object on ground + "WALL": "camera wall", # Looking at object mounted on wall + "CEILING": "camera ceiling", # Looking up at ceiling-mounted object + "FLOAT": "camera float", # Looking at floating object + } + bpy.context.scene.camera = bpy.data.objects[camdict[data["asset_data"]["thumbnail_snap_to"]]] + + + # Set the frame number to get different pre-configured angles + # The template file uses keyframes to store different viewpoints + fdict = { + "DEFAULT": 1, # Best angle for object type + "FRONT": 2, # Direct front view + "SIDE": 3, # Direct side view + "TOP": 4, # Top-down view + } + s = bpy.context.scene + s.frame_set(fdict[data["asset_data"]["thumbnail_angle"]]) + + # Enable the appropriate scene collection based on object placement + # Each collection has specific lighting and environment setup + snapdict = { + "GROUND": "Ground", # Floor-based lighting setup + "WALL": "Wall", # Wall-mounted lighting setup + "CEILING": "Ceiling", # Ceiling-mounted lighting setup + "FLOAT": "Float", # 360-degree lighting setup + } + collection = bpy.context.scene.collection.children[snapdict[data["asset_data"]["thumbnail_snap_to"]]] + collection.hide_viewport = False + collection.hide_render = False + collection.hide_select = False + + # Reset object rotation to ensure consistent orientation + main_object.rotation_euler = (0, 0, 0) + + # Configure render device (GPU/CPU) and settings + if thumbnail_use_gpu is True: + bpy.context.scene.cycles.device = "GPU" + compute_device_type = data.get("cycles_compute_device_type") + if compute_device_type is not None: + bpy.context.preferences.addons["cycles"].preferences.compute_device_type = compute_device_type + bpy.context.preferences.addons["cycles"].preferences.refresh_devices() + + # Set render quality parameters + s.cycles.samples = data["asset_data"]["thumbnail_samples"] + bpy.context.view_layer.cycles.use_denoising = data["asset_data"]["thumbnail_denoising"] + + # Configure background color brightness + bpy.data.materials["bkit background"].node_tree.nodes["Value"].outputs["Value"].default_value = data["asset_data"]["thumbnail_background_lightness"] + + # Set output resolution + bpy.context.scene.render.resolution_x = int(data["asset_data"]["thumbnail_resolution"]) + bpy.context.scene.render.resolution_y = int(data["asset_data"]["thumbnail_resolution"]) + + # Configure output path and start render + bpy.context.scene.render.filepath = data["result_filepath"] + print("rendering thumbnail") + render_thumbnails() + + print("background autothumbnailer finished successfully") + sys.exit(0) + + except Exception as e: + print(f"background autothumbnailer failed: {e}") + print(traceback.format_exc()) + sys.exit(1) diff --git a/blender_bg_scripts/model_validation_bg_render.py b/blender_bg_scripts/model_validation_bg_render.py index 8de589b..b567c5f 100644 --- a/blender_bg_scripts/model_validation_bg_render.py +++ b/blender_bg_scripts/model_validation_bg_render.py @@ -21,14 +21,14 @@ def getNode(mat, type): def link_collection( - file_name, obnames=[], location=(0, 0, 0), link=False, parent=None, **kwargs + file_name, obnames=[], location=(0, 0, 0), link=False, parent=None, **kwargs ): """link an instanced group - model type asset""" sel = utils.selection_get() with bpy.data.libraries.load(file_name, link=link, relative=True) as ( - data_from, - data_to, + data_from, + data_to, ): scols = [] for col in data_from.collections: @@ -76,13 +76,15 @@ def link_collection( def add_text_line(strip, text): - bpy.data.scenes["Composite"].sequence_editor.sequences_all[strip].text += text + 10 * ' ' + bpy.data.scenes["Composite"].sequence_editor.sequences_all[strip].text += ( + text + 10 * " " + ) def writeout_param(asset_data, param_name): pl = utils.get_param(asset_data, param_name) if pl is not None: - add_text_line('asset', f'{param_name}:{pl}') + add_text_line("asset", f"{param_name}:{pl}") def set_text(strip, text): @@ -90,28 +92,28 @@ def set_text(strip, text): def scale_cameras(asset_data): - params = asset_data['dictParameters'] - minx = params['boundBoxMinX'] - miny = params['boundBoxMinY'] - minz = params['boundBoxMinZ'] - maxx = params['boundBoxMaxX'] - maxy = params['boundBoxMaxY'] - maxz = params['boundBoxMaxZ'] - - dx = (maxx - minx) - dy = (maxy - miny) - dz = (maxz - minz) + params = asset_data["dictParameters"] + minx = params["boundBoxMinX"] + miny = params["boundBoxMinY"] + minz = params["boundBoxMinZ"] + maxx = params["boundBoxMaxX"] + maxy = params["boundBoxMaxY"] + maxz = params["boundBoxMaxZ"] + + dx = maxx - minx + dy = maxy - miny + dz = maxz - minz print(dx, dy, dz) r = math.sqrt(dx * dx + dy * dy + dz * dz) r *= 1.2 - scaler = bpy.data.objects['scaler'] + scaler = bpy.data.objects["scaler"] scaler.scale = (r, r, r) scaler.location.z = (maxz + minz) / 2 # get scene camera - cam = bpy.data.objects['Camera'] + cam = bpy.data.objects["Camera"] # Set ortho scale to max of dimensions cam.data.ortho_scale = max(dx, dy, dz) * 1.1 @@ -136,7 +138,7 @@ def scale_cameras(asset_data): def check_for_flat_faces(): for ob in bpy.context.scene.objects: - if ob.type == 'MESH': + if ob.type == "MESH": for f in ob.data.polygons: if not f.use_smooth: return True @@ -150,49 +152,49 @@ def mark_freestyle_edges(): def set_asset_data_texts(asset_data): - set_text('asset', '') - add_text_line('asset', asset_data['name']) - dx = utils.get_param(asset_data, 'dimensionX') - dy = utils.get_param(asset_data, 'dimensionY') - dz = utils.get_param(asset_data, 'dimensionZ') + set_text("asset", "") + add_text_line("asset", asset_data["name"]) + dx = utils.get_param(asset_data, "dimensionX") + dy = utils.get_param(asset_data, "dimensionY") + dz = utils.get_param(asset_data, "dimensionZ") dim_text = f"Dimensions:{dx}x{dy}x{dz}m" - add_text_line('asset', dim_text) - fc = utils.get_param(asset_data, 'faceCount', 1) - fcr = utils.get_param(asset_data, 'faceCountRender', 1) + add_text_line("asset", dim_text) + fc = utils.get_param(asset_data, "faceCount", 1) + fcr = utils.get_param(asset_data, "faceCountRender", 1) - add_text_line('asset', f"fcount {fc} render {fcr}") + add_text_line("asset", f"fcount {fc} render {fcr}") if check_for_flat_faces(): - add_text_line('asset', 'Flat faces detected') + add_text_line("asset", "Flat faces detected") - writeout_param(asset_data, 'productionLevel') - writeout_param(asset_data, 'shaders') - writeout_param(asset_data, 'modifiers') - writeout_param(asset_data, 'meshPolyType') - writeout_param(asset_data, 'manifold') - writeout_param(asset_data, 'objectCount') - writeout_param(asset_data, 'nodeCount') - writeout_param(asset_data, 'textureCount') - writeout_param(asset_data, 'textureResolutionMax') + writeout_param(asset_data, "productionLevel") + writeout_param(asset_data, "shaders") + writeout_param(asset_data, "modifiers") + writeout_param(asset_data, "meshPolyType") + writeout_param(asset_data, "manifold") + writeout_param(asset_data, "objectCount") + writeout_param(asset_data, "nodeCount") + writeout_param(asset_data, "textureCount") + writeout_param(asset_data, "textureResolutionMax") -def set_scene(name=''): - print(f'setting scene {name}') +def set_scene(name=""): + print(f"setting scene {name}") bpy.context.window.scene = bpy.data.scenes[name] - c = bpy.context.scene.objects.get('Camera') + c = bpy.context.scene.objects.get("Camera") if c is not None: bpy.context.scene.camera = c bpy.context.view_layer.update() # bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1) -def set_view_shading(shading_type='RENDERED', face_orientation=False, wireframe=False): +def set_view_shading(shading_type="RENDERED", face_orientation=False, wireframe=False): # bpy.data.workspaces['Layout'].screens['Layout'].areas[4].spaces[0].shading for w in bpy.data.workspaces: for a in w.screens[0].areas: - if a.type == 'VIEW_3D': + if a.type == "VIEW_3D": for s in a.spaces: - if s.type == 'VIEW_3D': + if s.type == "VIEW_3D": s.shading.type = shading_type s.overlay.show_wireframes = wireframe s.overlay.show_face_orientation = face_orientation @@ -200,67 +202,77 @@ def set_view_shading(shading_type='RENDERED', face_orientation=False, wireframe= # bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1) -def set_workspace(name='Layout'): +def set_workspace(name="Layout"): for a in range(0, 2): bpy.context.window.workspace = bpy.data.workspaces[name] bpy.context.workspace.update_tag() bpy.context.view_layer.update() # bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1) + def switch_off_all_modifiers(): - #switches off all modifiers for render in the scene and stores and returns them in a list with original state. + # switches off all modifiers for render in the scene and stores and returns them in a list with original state. original_states = [] for ob in bpy.context.scene.objects: - if ob.type == 'MESH': + if ob.type == "MESH": for m in ob.modifiers: original_states.append((ob, m, m.show_render)) m.show_render = False return original_states + def switch_on_all_modifiers(original_states): - #switches on all modifiers for render in the scene and restores them to the original state. + # switches on all modifiers for render in the scene and restores them to the original state. for ob, m, state in original_states: m.show_render = state -def add_geometry_nodes_to_all_objects(group = 'wireNodes', dimensions = 1): - #takes all visible objects in the scene and adds geometry nodes modifier with the group to them. - #avoids objects with more than 300k face. + +def add_geometry_nodes_to_all_objects(group="wireNodes", dimensions=1): + # takes all visible objects in the scene and adds geometry nodes modifier with the group to them. + # avoids objects with more than 300k face. for ob in bpy.context.scene.objects: - if ob.type == 'MESH' and ob.visible_get() and len(ob.data.polygons) < 300000: + if ob.type == "MESH" and ob.visible_get() and len(ob.data.polygons) < 300000: bpy.context.view_layer.objects.active = ob - bpy.ops.object.modifier_add(type='NODES') + bpy.ops.object.modifier_add(type="NODES") m = bpy.context.object.modifiers[-1] m.node_group = bpy.data.node_groups[group] - #asset dimensions needed + # asset dimensions needed m["Socket_0"] = float(dimensions) -def remove_geometry_nodes_from_all_objects(group = 'wireNodes'): - #takes all visible objects in the scene and removes geometry nodes modifier with the group to them. + +def remove_geometry_nodes_from_all_objects(group="wireNodes"): + # takes all visible objects in the scene and removes geometry nodes modifier with the group to them. for ob in bpy.context.scene.objects: - if ob.type == 'MESH' and ob.visible_get() and len(ob.data.polygons) < 300000: + if ob.type == "MESH" and ob.visible_get() and len(ob.data.polygons) < 300000: bpy.context.view_layer.objects.active = ob # check if the modifier is there for m in ob.modifiers: - if m.type == 'NODES' and m.node_group.name == group: + if m.type == "NODES" and m.node_group.name == group: bpy.context.object.modifiers.remove(m) -def render_model_validation( asset_data, filepath): + + +def render_model_validation(asset_data, filepath): # bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1) # render basic render - set_scene('Render') + set_scene("Render") # set_view_shading(shading_type='RENDERED') # set_workspace('Render') + # set samples to just 1 for speed + # bpy.context.scene.cycles.samples = 1 + bpy.ops.render.render(animation=True) # bpy.ops.render.opengl(animation=True, view_context=True) # render the Mesh checker # now in render - set_scene('Mesh_checker') + set_scene("Mesh_checker") # freestyle is crazy slow. Need better edge render :( # mark_freestyle_edges() # set_view_shading(shading_type='MATERIAL', wireframe = True, face_orientation=True) # set_workspace('Mesh_checker') + bpy.ops.render.render(animation=True) # bpy.ops.render.opengl(animation=True, view_context=False) @@ -272,20 +284,20 @@ def render_model_validation( asset_data, filepath): # set_scene('UV_checker') # bpy.ops.render.render(animation=True, write_still=True) - #switch off modifiers for this one - set_scene('Mesh_checker_no_modif') + # switch off modifiers for this one + set_scene("Mesh_checker_no_modif") original_states = switch_off_all_modifiers() - dimensionX = utils.get_param(asset_data, 'dimensionX') - dimensionY = utils.get_param(asset_data, 'dimensionY') - dimensionZ = utils.get_param(asset_data, 'dimensionZ') + dimensionX = utils.get_param(asset_data, "dimensionX") + dimensionY = utils.get_param(asset_data, "dimensionY") + dimensionZ = utils.get_param(asset_data, "dimensionZ") # Max length is taken as the dimension of the asset dimensions = max(dimensionX, dimensionY, dimensionZ) - add_geometry_nodes_to_all_objects(group='wireNodes', dimensions=dimensions) + add_geometry_nodes_to_all_objects(group="wireNodes", dimensions=dimensions) bpy.ops.render.render(animation=True) - remove_geometry_nodes_from_all_objects(group='wireNodes') + remove_geometry_nodes_from_all_objects(group="wireNodes") switch_on_all_modifiers(original_states) # switch to composite and render video - #No video, in this one we render only large stills + # No video, in this one we render only large stills # set_scene('Composite') # # bpy.context.scene.render.filepath = filepath @@ -296,42 +308,88 @@ def render_model_validation( asset_data, filepath): # print(f'rendering validation preview for {asset_data["name"]}') # bpy.ops.render.render(animation=True, write_still=True) -def export_gltf(filepath=''): + +def export_gltf(filepath=""): # print all selected objects names first for ob in bpy.context.selected_objects: print(ob.name) - bpy.ops.export_scene.gltf(filepath=filepath, export_format='GLB', export_copyright="", - export_image_format='WEBP', export_image_add_webp=True, export_image_webp_fallback=False, - export_texture_dir="", export_jpeg_quality=50, export_image_quality=50, - export_keep_originals=False, export_texcoords=True, export_normals=True, - export_draco_mesh_compression_enable=True, export_draco_mesh_compression_level=6, - export_draco_position_quantization=14, export_draco_normal_quantization=10, - export_draco_texcoord_quantization=12, export_draco_color_quantization=10, - export_draco_generic_quantization=12, export_tangents=False, export_materials='EXPORT', - export_colors=True, export_attributes=False, use_mesh_edges=False, - use_mesh_vertices=False, - export_cameras=False, use_selection=True, use_visible=False, use_renderable=False, - use_active_collection_with_nested=True, use_active_collection=False, - use_active_scene=False, export_extras=False, export_yup=True, export_apply=False, - export_animations=True, export_frame_range=False, export_frame_step=1, - export_force_sampling=True, export_animation_mode='ACTIONS', - export_nla_strips_merged_animation_name="Animation", export_def_bones=False, - export_hierarchy_flatten_bones=False, export_optimize_animation_size=True, - export_optimize_animation_keep_anim_armature=True, - export_optimize_animation_keep_anim_object=False, export_negative_frame='SLIDE', - export_anim_slide_to_zero=False, export_bake_animation=False, - export_anim_single_armature=True, export_reset_pose_bones=True, - export_current_frame=False, - export_rest_position_armature=True, export_anim_scene_split_object=True, - export_skins=True, - export_influence_nb=4, export_all_influences=False, export_morph=True, - export_morph_normal=True, export_morph_tangent=False, export_morph_animation=True, - export_morph_reset_sk_data=True, export_lights=False, export_try_sparse_sk=True, - export_try_omit_sparse_sk=False, export_gpu_instances=False, export_nla_strips=True, - export_original_specular=False, will_save_settings=False, filter_glob="*.glb") + bpy.ops.export_scene.gltf( + filepath=filepath, + export_format="GLB", + export_copyright="", + export_image_format="WEBP", + export_image_add_webp=True, + export_image_webp_fallback=False, + export_texture_dir="", + export_jpeg_quality=50, + export_image_quality=50, + export_keep_originals=False, + export_texcoords=True, + export_normals=True, + export_draco_mesh_compression_enable=True, + export_draco_mesh_compression_level=6, + export_draco_position_quantization=14, + export_draco_normal_quantization=10, + export_draco_texcoord_quantization=12, + export_draco_color_quantization=10, + export_draco_generic_quantization=12, + export_tangents=False, + export_materials="EXPORT", + export_colors=True, + export_attributes=False, + use_mesh_edges=False, + use_mesh_vertices=False, + export_cameras=False, + use_selection=True, + use_visible=False, + use_renderable=False, + use_active_collection_with_nested=True, + use_active_collection=False, + use_active_scene=False, + export_extras=False, + export_yup=True, + export_apply=False, + export_animations=True, + export_frame_range=False, + export_frame_step=1, + export_force_sampling=True, + export_animation_mode="ACTIONS", + export_nla_strips_merged_animation_name="Animation", + export_def_bones=False, + export_hierarchy_flatten_bones=False, + export_optimize_animation_size=True, + export_optimize_animation_keep_anim_armature=True, + export_optimize_animation_keep_anim_object=False, + export_negative_frame="SLIDE", + export_anim_slide_to_zero=False, + export_bake_animation=False, + export_anim_single_armature=True, + export_reset_pose_bones=True, + export_current_frame=False, + export_rest_position_armature=True, + export_anim_scene_split_object=True, + export_skins=True, + export_influence_nb=4, + export_all_influences=False, + export_morph=True, + export_morph_normal=True, + export_morph_tangent=False, + export_morph_animation=True, + export_morph_reset_sk_data=True, + export_lights=False, + export_try_sparse_sk=True, + export_try_omit_sparse_sk=False, + export_gpu_instances=False, + export_nla_strips=True, + export_original_specular=False, + will_save_settings=False, + filter_glob="*.glb", + ) + + def render_asset_bg(data): - asset_data = data['asset_data'] - set_scene('Empty_start') + asset_data = data["asset_data"] + set_scene("Empty_start") # first lets build the filepath and find out if its already rendered? s = bpy.context.scene @@ -356,28 +414,32 @@ def render_asset_bg(data): fpath = data["file_path"] if fpath: try: - parent, new_obs = link_collection(fpath, - location=(0, 0, 0), - rotation=(0, 0, 0), - link=True, - name=asset_data['name'], - parent=None) + parent, new_obs = link_collection( + fpath, + location=(0, 0, 0), + rotation=(0, 0, 0), + link=True, + name=asset_data["name"], + parent=None, + ) # we need to realize for UV , texture, and nodegraph exports here.. utils.activate_object(parent) - bpy.ops.object.duplicates_make_real(use_base_parent=True, use_hierarchy=True) + bpy.ops.object.duplicates_make_real( + use_base_parent=True, use_hierarchy=True + ) all_obs = bpy.context.selected_objects[:] - bpy.ops.object.make_local(type='ALL') + bpy.ops.object.make_local(type="ALL") except Exception as e: print(e) - print('failed to append asset') + print("failed to append asset") return for s in bpy.data.scenes: if s != bpy.context.scene: # s.collection.objects.link(parent) - #try link all already realized. + # try link all already realized. for ob in all_obs: s.collection.objects.link(ob) @@ -385,19 +447,25 @@ def render_asset_bg(data): scale_cameras(asset_data) - #save the file to temp folder, so all files go there. - blend_file_path = os.path.join((data['temp_folder']), f"{asset_data['name']}.blend") - bpy.ops.wm.save_as_mainfile(filepath=blend_file_path, compress=False, copy=False, relative_remap=False) + # save the file to temp folder, so all files go there. + blend_file_path = os.path.join( + (data["temp_folder"]), f"{asset_data['name']}.blend" + ) + bpy.ops.wm.save_as_mainfile( + filepath=blend_file_path, compress=False, copy=False, relative_remap=False + ) - #first render the video - render_model_validation( asset_data, data['result_filepath']) - #then render the rest, since that makes total mess in the file... - render_nodes_graph.visualize_and_save_all(tempfolder=data['result_folder'], objects=all_obs) + # first render the video + render_model_validation(asset_data, data["result_filepath"]) + # then render the rest, since that makes total mess in the file... + render_nodes_graph.visualize_and_save_all( + tempfolder=data["result_folder"], objects=all_obs + ) if __name__ == "__main__": - print('background resolution generator') + print("background resolution generator") datafile = sys.argv[-1] - with open(datafile, 'r', encoding='utf-8') as f: + with open(datafile, "r", encoding="utf-8") as f: data = json.load(f) render_asset_bg(data) diff --git a/blender_bg_scripts/pack_twinbru_material.py b/blender_bg_scripts/pack_twinbru_material.py new file mode 100644 index 0000000..2990bab --- /dev/null +++ b/blender_bg_scripts/pack_twinbru_material.py @@ -0,0 +1,143 @@ +""" +This script is used to pack a material from TwinBru to a blenderkit asset. +It imports textures from the unzipped folder , creates a node tree and assigns the textures to the material. +""" + +import sys +import os +import bpy +import json + +# import utils- add path +dir_path = os.path.dirname(os.path.realpath(__file__)) +parent_path = os.path.join(dir_path, os.path.pardir) +sys.path.append(parent_path) +from blenderkit_server_utils import paths + + +if __name__ == "__main__": + datafile = sys.argv[-1] + print(f"datafile: {datafile}") + with open(datafile, "r", encoding="utf-8") as f: + data = json.load(f) + twinbru_asset = data["asset_data"] + temp_folder = data["temp_folder"] + result_filepath = data["result_filepath"] + print(f"temp_folder: {temp_folder}") + + # convert name - remove _ and remove the number that comes last in name + # readable_name = twinbru_asset["name"].split("_") + # capitalize the first letter of each word + # readable_name = " ".join(word.capitalize() for word in readable_name[:-1]) + readable_name = twinbru_asset["name"] + + # create a new material + material = bpy.data.materials.new(name=readable_name) + material.name = readable_name + material.use_nodes = True + material.blend_method = "BLEND" + # material.shadow_method = "HASHED" + material.diffuse_color = (1, 1, 1, 1) + # ensure the material is saved + material.use_fake_user = True + # create the node tree + nodes = material.node_tree.nodes + links = material.node_tree.links + + # set nodes spacing + node_gap_x = 400 + node_gap_y = 300 + # find the output node + output_node = nodes.get("Material Output") + if not output_node: + output_node = nodes.new(type="ShaderNodeOutputMaterial") + output_node.location = (node_gap_x, 0) + + # find Principled BSDF node + principled_bsdf = nodes.get("Principled BSDF") + if not principled_bsdf: + principled_bsdf = nodes.new(type="ShaderNodeBsdfPrincipled") + principled_bsdf.location = (0, 0) + + # Link the Principled BSDF to the Output Material node + links.new(principled_bsdf.outputs[0], output_node.inputs[0]) + + # Get the texture file names + # texture_directory = os.path.join(temp_folder, "pbr-pol") + + # the final file TwinBru sent doesn't have subfolders + texture_directory = temp_folder + texture_files = os.listdir(texture_directory) + mapping_substrings = { + "BASE": "Base Color", + "MTL": "Metallic", + "ROUGH": "Roughness", + "ALPHA": "Alpha", + "NRM": "Normal", + } + mapping_substrings = { + "col": "Base Color", + "met": "Metallic", + "rough": "Roughness", + "alpha": "Alpha", + "nrm": "Normal", + } + index = 0 + texture_nodes = [] + for substring, mapping in mapping_substrings.items(): + for texture_file in texture_files: + if substring + "." in texture_file: + print(f"texture_file: {texture_file}") + texture_path = os.path.join(texture_directory, texture_file) + texture_node = nodes.new(type="ShaderNodeTexImage") + texture_node.location = ( + -4 * node_gap_x, + node_gap_y * 2 - index * node_gap_y, + ) + texture_node.image = bpy.data.images.load(texture_path) + # set anything besides color to non color + if mapping != "Base Color": + texture_node.image.colorspace_settings.name = "Non-Color" + # normal maps need a normal map node + if mapping == "Normal": + normal_map = nodes.new(type="ShaderNodeNormalMap") + normal_map.location = ( + -1 *node_gap_x, + texture_node.location[1], + ) + # Convert DX normal map to OpenGL + separate_xyz = nodes.new(type="ShaderNodeSeparateXYZ") + separate_xyz.location = (-2.5 * node_gap_x, texture_node.location[1]) + + # Invert Y channel + invert_y = nodes.new(type="ShaderNodeMath") + invert_y.operation = 'SUBTRACT' + invert_y.inputs[0].default_value = 1.0 + invert_y.location = (-2 * node_gap_x, texture_node.location[1] - 50) + + # Recombine channels + combine_xyz = nodes.new(type="ShaderNodeCombineXYZ") + combine_xyz.location = (-1.5 * node_gap_x, texture_node.location[1] - 100) + + # Link nodes + links.new(texture_node.outputs[0], separate_xyz.inputs[0]) + links.new(separate_xyz.outputs[0], combine_xyz.inputs[0]) # X + links.new(separate_xyz.outputs[1], invert_y.inputs[1]) # Y + links.new(invert_y.outputs[0], combine_xyz.inputs[1]) # Inverted Y + links.new(separate_xyz.outputs[2], combine_xyz.inputs[2]) # Z + links.new(combine_xyz.outputs[0], normal_map.inputs["Color"]) + links.new(normal_map.outputs[0], principled_bsdf.inputs[mapping]) + else: + links.new(texture_node.outputs[0], principled_bsdf.inputs[mapping]) + index += 1 + texture_nodes.append(texture_node) + + # Mark the material as asset for Belnder's asset manager + material.asset_mark() + material.asset_generate_preview() + material.name = twinbru_asset["name"] # not sure why but this works here but not before. + print(f"processed material {material.name}") + # Pack all .blend textures + bpy.ops.file.pack_all() + # save the material + bpy.ops.wm.save_as_mainfile(filepath=result_filepath) diff --git a/blenderkit_server_utils/append_link.py b/blenderkit_server_utils/append_link.py new file mode 100644 index 0000000..531934f --- /dev/null +++ b/blenderkit_server_utils/append_link.py @@ -0,0 +1,120 @@ +def append_material(file_name, matname=None, link=False, fake_user=True): + """Append a material type asset + + Args: + file_name (str): Path to the .blend file containing the material + matname (str, optional): Name of material to append. If None, appends first found. + link (bool, optional): Link the material instead of appending. Defaults to False. + fake_user (bool, optional): Set fake user on appended material. Defaults to True. + + Returns: + bpy.types.Material: The appended/linked material or None if failed + """ + import bpy + + mats_before = bpy.data.materials[:] + try: + with bpy.data.libraries.load(file_name, link=link, relative=True) as ( + data_from, + data_to, + ): + found = False + for m in data_from.materials: + if matname is None or m == matname: + data_to.materials = [m] + found = True + break + + if not found: + return None + + # Get the newly added material + mats_after = bpy.data.materials[:] + new_mats = [m for m in mats_after if m not in mats_before] + + if not new_mats: + return None + + mat = new_mats[0] + if fake_user: + mat.use_fake_user = True + + return mat + + except Exception as e: + print(f"Failed to append material: {e}") + return None + +def link_collection( + file_name: str, + location=(0, 0, 0), + rotation=(0, 0, 0), + link=False, + name=None, + parent=None, +) -> tuple: + """Link/append a collection from a blend file. + + Args: + file_name: Path to the blend file + location: Location for the collection (default: origin) + rotation: Rotation for the collection (default: no rotation) + link: True to link, False to append + name: Name of collection to find (if None, uses first) + parent: Parent object to parent collection to + + Returns: + tuple: (main_object, all_objects) + - main_object: The parent/main object of the collection + - all_objects: List of all objects in the collection + """ + import bpy + + # Store existing collections to find new ones + collections_before = bpy.data.collections[:] + objects_before = bpy.data.objects[:] + + # Link/append the collection + with bpy.data.libraries.load(file_name, link=link) as (data_from, data_to): + found = False + for cname in data_from.collections: + if name is None or cname == name: + data_to.collections = [cname] + found = True + break + + if not found: + print(f"Collection {name} not found in file {file_name}") + return None, [] + + # Find the newly added collection + collections_after = bpy.data.collections[:] + new_collections = [c for c in collections_after if c not in collections_before] + if not new_collections: + print("No new collections found after linking/appending") + return None, [] + + new_collection = new_collections[0] + + # Link the collection to the scene + if new_collection.name not in bpy.context.scene.collection.children: + bpy.context.scene.collection.children.link(new_collection) + + # Get all objects from the collection + all_objects = [] + for obj in new_collection.all_objects: + all_objects.append(obj) + if obj.parent is None: + obj.location = location + obj.rotation_euler = rotation + if parent is not None: + obj.parent = parent + + # Find main/parent object (first object without parent) + main_object = None + for obj in all_objects: + if obj.parent is None: + main_object = obj + break + + return main_object, all_objects \ No newline at end of file diff --git a/blenderkit_server_utils/cloudflare_storage.py b/blenderkit_server_utils/cloudflare_storage.py index 6dc5681..6ff98de 100644 --- a/blenderkit_server_utils/cloudflare_storage.py +++ b/blenderkit_server_utils/cloudflare_storage.py @@ -5,8 +5,9 @@ import boto3 from botocore.exceptions import NoCredentialsError + class CloudflareStorage: - def __init__(self, access_key, secret_key, endpoint_url, region_name='auto'): + def __init__(self, access_key, secret_key, endpoint_url, region_name="auto"): """ Initializes the connection to Cloudflare's S3-compatible storage. @@ -16,11 +17,13 @@ def __init__(self, access_key, secret_key, endpoint_url, region_name='auto'): :param region_name: Region name, default is 'auto' for Cloudflare. """ self.session = boto3.session.Session() - self.client = self.session.client('s3', - region_name=region_name, - endpoint_url=endpoint_url, - aws_access_key_id=access_key, - aws_secret_access_key=secret_key) + self.client = self.session.client( + "s3", + region_name=region_name, + endpoint_url=endpoint_url, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + ) def upload_file(self, file_name, bucket_name, object_name=None): """ @@ -52,15 +55,31 @@ def list_all_folders(self, bucket_name): :param bucket_name: Name of the Cloudflare R2 bucket. :return: A set of all folder prefixes. """ - paginator = self.client.get_paginator('list_objects_v2') + paginator = self.client.get_paginator("list_objects_v2") folders = set() # Use a paginator to fetch all objects - for page in paginator.paginate(Bucket=bucket_name, Delimiter='/'): - for prefix in page.get('CommonPrefixes', []): - folders.add(prefix['Prefix']) + for page in paginator.paginate(Bucket=bucket_name, Delimiter="/"): + for prefix in page.get("CommonPrefixes", []): + folders.add(prefix["Prefix"]) return folders + + def list_folder_contents(self, bucket_name, folder_name): + """ + List all objects in a specified folder within the Cloudflare R2 bucket. + + :param bucket_name: The name of the Cloudflare R2 bucket. + :param folder_name: The prefix of the folder to list contents from. Must end with '/'. + :return: A list of objects in the folder. + """ + # Ensure the folder name ends with '/' to accurately match the folder structure + if not folder_name.endswith("/"): + folder_name += "/" + + response = self.client.list_objects_v2(Bucket=bucket_name, Prefix=folder_name) + return response.get("Contents", []) + def folder_exists(self, bucket_name, folder_name): """ Check if a folder exists in a specified bucket. @@ -70,17 +89,19 @@ def folder_exists(self, bucket_name, folder_name): :return: True if the folder exists, False otherwise. """ # Ensure the folder name ends with a '/' to accurately match the folder structure - if not folder_name.endswith('/'): - folder_name += '/' + if not folder_name.endswith("/"): + folder_name += "/" response = self.client.list_objects_v2( Bucket=bucket_name, Prefix=folder_name, - MaxKeys=1 # We only need to find one object to confirm the folder exists + MaxKeys=1, # We only need to find one object to confirm the folder exists ) - return 'Contents' in response and len(response['Contents']) > 0 + return "Contents" in response and len(response["Contents"]) > 0 - def upload_folder(self, local_folder_path, bucket_name, cloudflare_folder_prefix=''): + def upload_folder( + self, local_folder_path, bucket_name, cloudflare_folder_prefix="" + ): """ Recursively uploads a folder and its contents to Cloudflare R2, maintaining the folder structure, and creates an index file in the top-level directory listing all uploaded files. @@ -95,24 +116,38 @@ def upload_folder(self, local_folder_path, bucket_name, cloudflare_folder_prefix for filename in files: local_path = os.path.join(root, filename) relative_path = os.path.relpath(local_path, start=local_folder_path) - cloudflare_object_name = os.path.join(cloudflare_folder_prefix, relative_path) - cloudflare_object_name = cloudflare_object_name.replace('\\', '/') + cloudflare_object_name = os.path.join( + cloudflare_folder_prefix, relative_path + ) + cloudflare_object_name = cloudflare_object_name.replace("\\", "/") # Upload the file if self.upload_file(local_path, bucket_name, cloudflare_object_name): - uploaded_files.append(cloudflare_object_name) # Add successful uploads to the list + uploaded_files.append( + cloudflare_object_name + ) # Add successful uploads to the list # After all files are uploaded, create and upload the index.json file - index_file_path = '/tmp/index.json' if cloudflare_folder_prefix else cloudflare_folder_prefix + 'index.json' - with open(index_file_path, 'w') as index_file: + # only do this if there are files to upload + if not uploaded_files: + print("No files found to upload.") + return + index_file_path = ( + "/tmp/index.json" + if cloudflare_folder_prefix + else cloudflare_folder_prefix + "index.json" + ) + with open(index_file_path, "w") as index_file: json.dump(uploaded_files, index_file) # Upload the index file - cloudflare_object_name = os.path.join(cloudflare_folder_prefix, 'index.json') - cloudflare_object_name = cloudflare_object_name.replace('\\', '/') + cloudflare_object_name = os.path.join(cloudflare_folder_prefix, "index.json") + cloudflare_object_name = cloudflare_object_name.replace("\\", "/") self.upload_file(index_file_path, bucket_name, cloudflare_object_name) - print(f"Uploaded index file to Cloudflare R2 storage at {cloudflare_folder_prefix}index.json") + print( + f"Uploaded index file to Cloudflare R2 storage at {cloudflare_folder_prefix}index.json" + ) def delete_folder_contents(self, bucket_name, folder_prefix): """ @@ -122,17 +157,19 @@ def delete_folder_contents(self, bucket_name, folder_prefix): :param folder_prefix: The prefix of the folder to delete contents from. Must end with '/'. """ # Ensure the folder prefix ends with '/' to avoid accidentally deleting unintended objects - if not folder_prefix.endswith('/'): - folder_prefix += '/' + if not folder_prefix.endswith("/"): + folder_prefix += "/" # List all objects in the folder response = self.client.list_objects_v2(Bucket=bucket_name, Prefix=folder_prefix) - objects = response.get('Contents', []) + objects = response.get("Contents", []) # If there are objects to delete, prepare and execute the deletion if objects: - delete_keys = {'Objects': [{'Key': obj['Key']} for obj in objects]} - delete_response = self.client.delete_objects(Bucket=bucket_name, Delete=delete_keys) + delete_keys = {"Objects": [{"Key": obj["Key"]} for obj in objects]} + delete_response = self.client.delete_objects( + Bucket=bucket_name, Delete=delete_keys + ) print(f"Deleted objects: {delete_response}") else: print("No objects found to delete.") @@ -144,26 +181,61 @@ def delete_old_files(self, bucket_name, x_days): :param bucket_name: The name of the Cloudflare R2 bucket. :param x_days: The age threshold in days for deleting files. """ - paginator = self.client.get_paginator('list_objects_v2') + paginator = self.client.get_paginator("list_objects_v2") delete_before_date = datetime.now(timezone.utc) - timedelta(days=x_days) # Prepare a batch delete operation - delete_batch = {'Objects': []} + delete_batch = {"Objects": []} + + # Iterate through all objects in the bucket + for page in paginator.paginate(Bucket=bucket_name): + for obj in page.get("Contents", []): + # If the object is older than the specified days, mark it for deletion + if obj["LastModified"] < delete_before_date: + delete_batch["Objects"].append({"Key": obj["Key"]}) + + # Perform the deletion in batches of 1000 (S3 limit) + if len(delete_batch["Objects"]) >= 1000: + self.client.delete_objects( + Bucket=bucket_name, Delete=delete_batch + ) + delete_batch = {"Objects": []} # Reset batch + + # Delete any remaining objects in the last batch + if delete_batch["Objects"]: + self.client.delete_objects(Bucket=bucket_name, Delete=delete_batch) + + print("Old files deleted.") + + def delete_new_files(self, bucket_name, x_days): + """ + Deletes files that are younger than x_days in the specified bucket. + + :param bucket_name: The name of the Cloudflare R2 bucket. + :param x_days: The age threshold in days for deleting files. + """ + paginator = self.client.get_paginator("list_objects_v2") + delete_after_date = datetime.now(timezone.utc) - timedelta(days=x_days) + + # Prepare a batch delete operation + delete_batch = {"Objects": []} # Iterate through all objects in the bucket for page in paginator.paginate(Bucket=bucket_name): - for obj in page.get('Contents', []): + for obj in page.get("Contents", []): # If the object is older than the specified days, mark it for deletion - if obj['LastModified'] < delete_before_date: - delete_batch['Objects'].append({'Key': obj['Key']}) + if obj["LastModified"] < delete_after_date: + delete_batch["Objects"].append({"Key": obj["Key"]}) # Perform the deletion in batches of 1000 (S3 limit) - if len(delete_batch['Objects']) >= 1000: - self.client.delete_objects(Bucket=bucket_name, Delete=delete_batch) - delete_batch = {'Objects': []} # Reset batch + if len(delete_batch["Objects"]) >= 1000: + self.client.delete_objects( + Bucket=bucket_name, Delete=delete_batch + ) + delete_batch = {"Objects": []} # Reset batch # Delete any remaining objects in the last batch - if delete_batch['Objects']: + if delete_batch["Objects"]: self.client.delete_objects(Bucket=bucket_name, Delete=delete_batch) - print("Old files deleted.") \ No newline at end of file + print("New files deleted.") diff --git a/blenderkit_server_utils/download.py b/blenderkit_server_utils/download.py index cc3449f..f85a0ce 100644 --- a/blenderkit_server_utils/download.py +++ b/blenderkit_server_utils/download.py @@ -4,233 +4,242 @@ from . import utils from . import paths -SCENE_UUID = '5d22a2ce-7d4e-4500-9b1a-e5e79f8732c0' - +SCENE_UUID = "5d22a2ce-7d4e-4500-9b1a-e5e79f8732c0" def server_2_local_filename(asset_data, filename): - """Convert file name on server to file name local. This should get replaced.""" + """Convert file name on server to file name local. This should get replaced.""" - fn = filename.replace('blend_', '') - fn = fn.replace('resolution_', '') - n = paths.slugify(asset_data['name']) + '_' + fn - return n + fn = filename.replace("blend_", "") + fn = fn.replace("resolution_", "") + n = paths.slugify(asset_data["name"]) + "_" + fn + return n def files_size_to_text(size): - fsmb = size / (1024 * 1024) - fskb = size % 1024 - if fsmb == 0: - return f'{round(fskb)}KB' - else: - return f'{round(fsmb, 1)}MB' + fsmb = size / (1024 * 1024) + fskb = size % 1024 + if fsmb == 0: + return f"{round(fskb)}KB" + else: + return f"{round(fsmb, 1)}MB" def get_core_file(asset_data, resolution, find_closest_with_url=False): - ''' - Returns core blend file. - ''' - for f in asset_data['files']: - if f['fileType'] == 'blend': - orig = f - return orig, 'blend' - - -def get_download_url(asset_data, scene_id, api_key, tcom=None, resolution='blend'): - ''''retrieves the download url. The server checks if user can download the item.''' - print('getting download url') - - headers = utils.get_headers(api_key) - - data = { - 'scene_uuid': scene_id - } - r = None - - res_file_info, resolution = get_core_file(asset_data, resolution) - print(res_file_info) - try: - r = requests.get(res_file_info['downloadUrl'], params=data, headers=headers) - except Exception as e: - print(e) - if tcom is not None: - tcom.error = True - if r == None: - if tcom is not None: - tcom.report = 'Connection Error' - tcom.error = True - return 'Connection Error' - print(r.status_code, r.text) - - if r.status_code < 400: - data = r.json() - url = data['filePath'] - - res_file_info['url'] = url - res_file_info['file_name'] = paths.extract_filename_from_url(url) - - # print(res_file_info, url) - print("URL:", url) - return True - - - - -def get_download_filepath(asset_data, resolution='blend', can_return_others=False, directory=None): - '''Get all possible paths of the asset and resolution. Usually global and local directory.''' - windows_path_limit = 250 - if directory is None: - directory = paths.get_download_dir(asset_data['assetType']) - - res_file, resolution = get_core_file(asset_data, resolution, find_closest_with_url=can_return_others) - name_slug = paths.slugify(asset_data['name']) - if len(name_slug) > 16: - name_slug = name_slug[:16] - asset_folder_name = f"{name_slug}_{asset_data['id']}" - - file_names = [] - - if not res_file: - return file_names - if res_file.get('url') is not None: - # Tweak the names a bit: - # remove resolution and blend words in names - # - fn = paths.extract_filename_from_url(res_file['url']) - n = server_2_local_filename(asset_data, fn) + """ + Returns core blend file. + """ + for f in asset_data["files"]: + if f["fileType"] == "blend": + orig = f + return orig, "blend" - asset_folder_path = os.path.join(directory, asset_folder_name) - if not os.path.exists(asset_folder_path): - os.makedirs(asset_folder_path) +def get_download_url(asset_data, scene_id, api_key, tcom=None, resolution="blend"): + """'retrieves the download url. The server checks if user can download the item.""" + print("getting download url") - file_name = os.path.join(asset_folder_path, n) - file_names.append(file_name) + headers = utils.get_headers(api_key) + + data = {"scene_uuid": scene_id} + r = None - print('file paths', file_names) + res_file_info, resolution = get_core_file(asset_data, resolution) + print(res_file_info) + try: + r = requests.get(res_file_info["downloadUrl"], params=data, headers=headers) + except Exception as e: + print(e) + if tcom is not None: + tcom.error = True + if r == None: + if tcom is not None: + tcom.report = "Connection Error" + tcom.error = True + return "Connection Error" + print(r.status_code, r.text) + + if r.status_code < 400: + data = r.json() + url = data["filePath"] + + res_file_info["url"] = url + res_file_info["file_name"] = paths.extract_filename_from_url(url) + + # print(res_file_info, url) + print("URL:", url) + return True + + +def get_download_filepath( + asset_data, resolution="blend", can_return_others=False, directory=None +): + """Get all possible paths of the asset and resolution. Usually global and local directory.""" + windows_path_limit = 250 + if directory is None: + directory = paths.get_download_dir(asset_data["assetType"]) + + res_file, resolution = get_core_file( + asset_data, resolution, find_closest_with_url=can_return_others + ) + name_slug = paths.slugify(asset_data["name"]) + if len(name_slug) > 16: + name_slug = name_slug[:16] + asset_folder_name = f"{name_slug}_{asset_data['id']}" + + file_names = [] + + if not res_file: + return file_names + if res_file.get("url") is not None: + # Tweak the names a bit: + # remove resolution and blend words in names + # + fn = paths.extract_filename_from_url(res_file["url"]) + n = server_2_local_filename(asset_data, fn) + + asset_folder_path = os.path.join(directory, asset_folder_name) + + if not os.path.exists(asset_folder_path): + os.makedirs(asset_folder_path) + + file_name = os.path.join(asset_folder_path, n) + file_names.append(file_name) + + print("file paths", file_names) - return file_names + return file_names -def check_existing(asset_data, resolution='blend', can_return_others=False, directory=None): - ''' check if the object exists on the hard drive''' - fexists = False +def check_existing( + asset_data, resolution="blend", can_return_others=False, directory=None +): + """check if the object exists on the hard drive""" + fexists = False - if asset_data.get('files') == None: - # this is because of some very odl files where asset data had no files structure. - return False + if asset_data.get("files") == None: + # this is because of some very odl files where asset data had no files structure. + return False - file_names = get_download_filepath(asset_data, resolution, can_return_others=can_return_others, directory=directory) + file_names = get_download_filepath( + asset_data, resolution, can_return_others=can_return_others, directory=directory + ) - print('check if file already exists' + str(file_names)) - if len(file_names) == 2: - # TODO this should check also for failed or running downloads. - # If download is running, assign just the running thread. if download isn't running but the file is wrong size, - # delete file and restart download (or continue downoad? if possible.) - if os.path.isfile(file_names[0]): # and not os.path.isfile(file_names[1]) - utils.copy_asset(file_names[0], file_names[1]) - elif not os.path.isfile(file_names[0]) and os.path.isfile( - file_names[1]): # only in case of changed settings or deleted/moved global dict. - utils.copy_asset(file_names[1], file_names[0]) + print("check if file already exists" + str(file_names)) + if len(file_names) == 2: + # TODO this should check also for failed or running downloads. + # If download is running, assign just the running thread. if download isn't running but the file is wrong size, + # delete file and restart download (or continue downoad? if possible.) + if os.path.isfile(file_names[0]): # and not os.path.isfile(file_names[1]) + utils.copy_asset(file_names[0], file_names[1]) + elif not os.path.isfile(file_names[0]) and os.path.isfile( + file_names[1] + ): # only in case of changed settings or deleted/moved global dict. + utils.copy_asset(file_names[1], file_names[0]) - if len(file_names) > 0 and os.path.isfile(file_names[0]): - fexists = True - return fexists + if len(file_names) > 0 and os.path.isfile(file_names[0]): + fexists = True + return fexists def delete_unfinished_file(file_name): - ''' - Deletes download if it wasn't finished. If the folder it's containing is empty, it also removes the directory - Parameters - ---------- - file_name - - Returns - ------- - None - ''' - try: - os.remove(file_name) - except Exception as e: - print(f'{e}') - asset_dir = os.path.dirname(file_name) - if len(os.listdir(asset_dir)) == 0: - os.rmdir(asset_dir) - return - - -def download_asset_file(asset_data, resolution='blend', api_key='', directory=None): - # this is a simple non-threaded way to download files for background resolution genenration tool - file_names = get_download_filepath(asset_data, resolution, directory=directory) # prefer global dir if possible. - if len(file_names) == 0: - return None - - file_name = file_names[0] - - if check_existing(asset_data, resolution=resolution, directory=directory): - # this sends the thread for processing, where another check should occur, since the file might be corrupted. - # print('not downloading, already in db') + """ + Deletes download if it wasn't finished. If the folder it's containing is empty, it also removes the directory + Parameters + ---------- + file_name + + Returns + ------- + None + """ + try: + os.remove(file_name) + except Exception as e: + print(f"{e}") + asset_dir = os.path.dirname(file_name) + if len(os.listdir(asset_dir)) == 0: + os.rmdir(asset_dir) + return + + +def download_asset_file(asset_data, resolution="blend", api_key="", directory=None): + # this is a simple non-threaded way to download files for background resolution genenration tool + file_names = get_download_filepath( + asset_data, resolution, directory=directory + ) # prefer global dir if possible. + if len(file_names) == 0: + return None + + file_name = file_names[0] + + if check_existing(asset_data, resolution=resolution, directory=directory): + # this sends the thread for processing, where another check should occur, since the file might be corrupted. + # print('not downloading, already in db') + return file_name + + download_canceled = False + + with open(file_name, "wb") as f: + print("Downloading %s" % file_name) + headers = utils.get_headers(api_key) + res_file_info, resolution = get_core_file(asset_data, resolution) + session = requests.Session() + + response = session.get(res_file_info["url"], stream=True) + total_length = response.headers.get("Content-Length") + + if total_length is None or int(total_length) < 1000: # no content length header + download_canceled = True + print(response.content) + else: + total_length = int(total_length) + dl = 0 + last_percent = 0 + percent = 0 + for data in response.iter_content(chunk_size=4096 * 10): + dl += len(data) + + # the exact output you're looking for: + fs_str = files_size_to_text(total_length) + + percent = int(dl * 100 / total_length) + if percent > last_percent: + last_percent = percent + # sys.stdout.write('\r') + # sys.stdout.write(f'Downloading {asset_data['name']} {fs_str} {percent}% ') # + int(dl * 50 / total_length) * 'x') + print( + f'Downloading {asset_data["name"]} {fs_str} {percent}% ' + ) # + int(dl * 50 / total_length) * 'x') + # sys.stdout.flush() + + # print(int(dl*50/total_length)*'x'+'\r') + f.write(data) + if download_canceled: + delete_unfinished_file(file_name) + return None + return file_name - download_canceled = False - with open(file_name, "wb") as f: - print("Downloading %s" % file_name) - headers = utils.get_headers(api_key) - res_file_info, resolution = get_core_file(asset_data, resolution) - session = requests.Session() +def download_asset(asset_data, resolution="blend", api_key="", directory=None): + """ + Download an asset non-threaded way. + Parameters + ---------- + asset_data - search result from elastic or assets endpoints from API - response = session.get(res_file_info['url'], stream=True) - total_length = response.headers.get('Content-Length') + Returns + ------- + path to the resulting asset file or None if asset isn't accessible + """ - if total_length is None or int(total_length) < 1000: # no content length header - download_canceled = True - print(response.content) - else: - total_length = int(total_length) - dl = 0 - last_percent = 0 - percent = 0 - for data in response.iter_content(chunk_size=4096 * 10): - dl += len(data) - - # the exact output you're looking for: - fs_str = files_size_to_text(total_length) - - percent = int(dl * 100 / total_length) - if percent > last_percent: - last_percent = percent - # sys.stdout.write('\r') - # sys.stdout.write(f'Downloading {asset_data['name']} {fs_str} {percent}% ') # + int(dl * 50 / total_length) * 'x') - print( - f'Downloading {asset_data["name"]} {fs_str} {percent}% ') # + int(dl * 50 / total_length) * 'x') - # sys.stdout.flush() - - # print(int(dl*50/total_length)*'x'+'\r') - f.write(data) - if download_canceled: - delete_unfinished_file(file_name) - return None - - return file_name - - -def download_asset(asset_data, resolution='blend', api_key='', directory=None): - ''' - Download an asset non-threaded way. - Parameters - ---------- - asset_data - search result from elastic or assets endpoints from API - - Returns - ------- - path to the resulting asset file or None if asset isn't accessible - ''' - - has_url = get_download_url(asset_data, SCENE_UUID, api_key, tcom=None, resolution='blend') # Resolution does not have any effect - if not has_url: - print("Could not get URL for the asset") - return None - - fpath = download_asset_file(asset_data, api_key=api_key, directory=directory) - return fpath + has_url = get_download_url( + asset_data, SCENE_UUID, api_key, tcom=None, resolution="blend" + ) # Resolution does not have any effect + if not has_url: + print("Could not get URL for the asset") + return None + + fpath = download_asset_file(asset_data, api_key=api_key, directory=directory) + return fpath diff --git a/blenderkit_server_utils/render_UVs.py b/blenderkit_server_utils/render_UVs.py index 703cd6b..c8c9e94 100644 --- a/blenderkit_server_utils/render_UVs.py +++ b/blenderkit_server_utils/render_UVs.py @@ -1,5 +1,4 @@ import bpy -import numpy as np # Sets up the camera within the given scene for rendering the UV layout. @@ -12,7 +11,7 @@ def setup_scene_camera(scene): # Configure the camera to use orthographic projection, # making it suitable for 2D UV layout rendering. - camera_data.type = 'ORTHO' + camera_data.type = "ORTHO" camera_data.ortho_scale = 1 # Adjust based on the size of your UV meshes. camera_object.location = (0.5, 0.5, 1) # Position the camera to capture all UVs. @@ -22,12 +21,12 @@ def set_render_settings(scene, filepath): # Enable transparency in the final render to accommodate for transparent materials. scene.render.film_transparent = True # Use the Cycles render engine for high-quality rendering. - scene.render.engine = 'CYCLES' + scene.render.engine = "CYCLES" scene.cycles.samples = 5 # Reduce samples for faster rendering of simple scenes. # Set output format to WEBP, resolution, and file path for saving the render. - scene.render.image_settings.file_format = 'WEBP' - scene.render.image_settings.color_mode = 'RGB' + scene.render.image_settings.file_format = "WEBP" + scene.render.image_settings.color_mode = "RGB" scene.render.image_settings.quality = 60 scene.render.resolution_x = 1024 @@ -43,7 +42,7 @@ def render_and_save(scene): # Cleans up by removing the temporary scene and its objects after rendering. def cleanup_scene(scene): - bpy.ops.object.select_all(action='DESELECT') + bpy.ops.object.select_all(action="DESELECT") for obj in scene.objects: obj.select_set(True) bpy.ops.object.delete() # Delete all objects in the scene. @@ -51,10 +50,10 @@ def cleanup_scene(scene): # Utility function to set the active scene and camera, ensuring correct rendering settings. -def set_scene(name=''): - print(f'setting scene {name}') +def set_scene(name=""): + print(f"setting scene {name}") bpy.context.window.scene = bpy.data.scenes[name] - c = bpy.context.scene.objects.get('Camera') + c = bpy.context.scene.objects.get("Camera") if c is not None: bpy.context.scene.camera = c bpy.context.view_layer.update() @@ -64,7 +63,7 @@ def set_scene(name=''): def export_uvs_as_webps(obs, filepath): original_scene = bpy.context.scene uv_scene = bpy.data.scenes.new("UVScene") # Create a new scene for UV rendering. - set_scene(name='UVScene') + set_scene(name="UVScene") setup_scene_camera(uv_scene) build_uv_meshes(obs, uv_scene) # Generate mesh representations of UVs. set_render_settings(uv_scene, filepath) @@ -75,30 +74,39 @@ def export_uvs_as_webps(obs, filepath): # Retrieves or creates a material designed for rendering UV layouts. def get_UV_material(): - m = bpy.data.materials.get('UV_RENDER_MATERIAL') + m = bpy.data.materials.get("UV_RENDER_MATERIAL") if m is None: - m = bpy.data.materials.new('UV_RENDER_MATERIAL') + m = bpy.data.materials.new("UV_RENDER_MATERIAL") m.use_nodes = True nodes = m.node_tree.nodes links = m.node_tree.links nodes.clear() # Start with a fresh node setup. # Set up nodes for a material that's partially transparent and emissive. - emission_node = nodes.new(type='ShaderNodeEmission') - emission_node.inputs['Color'].default_value = (1, 1, 1, 1) # White color for emission. - emission_node.inputs['Strength'].default_value = 1.0 # Emission strength. + emission_node = nodes.new(type="ShaderNodeEmission") + emission_node.inputs["Color"].default_value = ( + 1, + 1, + 1, + 1, + ) # White color for emission. + emission_node.inputs["Strength"].default_value = 1.0 # Emission strength. - transparent_node = nodes.new(type='ShaderNodeBsdfTransparent') + transparent_node = nodes.new(type="ShaderNodeBsdfTransparent") - mix_shader_node = nodes.new(type='ShaderNodeMixShader') - mix_shader_node.inputs['Fac'].default_value = 0.05 # Control the mix between transparent and emission. + mix_shader_node = nodes.new(type="ShaderNodeMixShader") + mix_shader_node.inputs["Fac"].default_value = ( + 0.05 # Control the mix between transparent and emission. + ) - material_output_node = nodes.new('ShaderNodeOutputMaterial') + material_output_node = nodes.new("ShaderNodeOutputMaterial") # Connect the nodes to set up the material. - links.new(emission_node.outputs['Emission'], mix_shader_node.inputs[2]) - links.new(transparent_node.outputs['BSDF'], mix_shader_node.inputs[1]) - links.new(mix_shader_node.outputs['Shader'], material_output_node.inputs['Surface']) + links.new(emission_node.outputs["Emission"], mix_shader_node.inputs[2]) + links.new(transparent_node.outputs["BSDF"], mix_shader_node.inputs[1]) + links.new( + mix_shader_node.outputs["Shader"], material_output_node.inputs["Surface"] + ) return m @@ -111,21 +119,30 @@ def build_uv_meshes(obs, scene): me = ob.data # The mesh data of the object. # Skip objects without UV layers. - if len(ob.data.uv_layers) == 0 or ob.data.uv_layers.active is None or len(ob.data.uv_layers.active.data) == 0: + if ( + len(ob.data.uv_layers) == 0 + or ob.data.uv_layers.active is None + or len(ob.data.uv_layers.active.data) == 0 + ): continue uv_layer = me.uv_layers.active # The active UV layer of the mesh. # Retrieve UV coordinates. - uvs = np.empty((2 * len(me.loops), 1)) + uvs = [0] * (2 * len(me.loops)) uv_layer.data.foreach_get("uv", uvs) - x, y = uvs.reshape((-1, 2)).T - z = np.zeros(len(x)) # Create a Z-axis array filled with zeros for 2D UV layout. + x = uvs[0::2] + y = uvs[1::2] + z = [0] * len(x) # Create a Z-axis array filled with zeros for 2D UV layout. # Create a new mesh for the UV layout. uvme = bpy.data.meshes.new("UVMesh_" + ob.name) - verts = np.array((x, y, z)).T # Combine x, y, z coordinates into vertices. - faces = [p.loop_indices for p in me.polygons] # Create faces from the polygons of the original mesh. + verts = [ + (x[i], y[i], z[i]) for i in range(len(x)) + ] # Combine x, y, z coordinates into vertices. + faces = [ + p.loop_indices for p in me.polygons + ] # Create faces from the polygons of the original mesh. # Convert UV data to mesh data. uvme.from_pydata(verts, [], faces) @@ -149,8 +166,7 @@ def build_uv_meshes(obs, scene): # Duplicate the object to apply a wireframe modifier for visual distinction of edges. # only do this for smaller objects. bpy.ops.object.duplicate() - bpy.ops.object.modifier_add(type='WIREFRAME') + bpy.ops.object.modifier_add(type="WIREFRAME") # Adjust the wireframe modifier to make the lines very thin. bpy.context.object.modifiers["Wireframe"].thickness = 0.001 - diff --git a/blenderkit_server_utils/search.py b/blenderkit_server_utils/search.py index 605a00e..76dbdf7 100644 --- a/blenderkit_server_utils/search.py +++ b/blenderkit_server_utils/search.py @@ -6,123 +6,129 @@ from . import utils, paths -def get_search_simple(parameters, filepath=None, page_size=100, max_results=100000000, api_key=''): - ''' - Searches and returns the - - - Parameters - ---------- - parameters - dict of blenderkit elastic parameters - filepath - a file to save the results. If None, results are returned - page_size - page size for retrieved results - max_results - max results of the search - api_key - BlenderKit api key - - Returns - ------- - Returns search results as a list, and optionally saves to filepath - - ''' - results = get_search_without_bullshit(parameters, page_size=page_size, max_results=max_results, api_key=api_key) - if not filepath: + +def get_search_simple( + parameters, filepath=None, page_size=100, max_results=100000000, api_key="" +): + """ + Searches and returns the + + + Parameters + ---------- + parameters - dict of blenderkit elastic parameters + filepath - a file to save the results. If None, results are returned + page_size - page size for retrieved results + max_results - max results of the search + api_key - BlenderKit api key + + Returns + ------- + Returns search results as a list, and optionally saves to filepath + + """ + results = get_search_without_bullshit( + parameters, page_size=page_size, max_results=max_results, api_key=api_key + ) + if not filepath: + return results + + with open(filepath, "w", encoding="utf-8") as s: + json.dump(results, s, ensure_ascii=False, indent=4) + print(f"retrieved {len(results)} assets from elastic search") return results - with open(filepath, 'w', encoding='utf-8') as s: - json.dump(results, s, ensure_ascii=False, indent=4) - print(f'retrieved {len(results)} assets from elastic search') - return results - - -def get_search_without_bullshit(parameters, page_size=100, max_results=100000000, api_key='') -> list: - headers = utils.get_headers(api_key) - url = paths.get_api_url() + '/search/' - requeststring = url + '?query=' - for p in parameters.keys(): - requeststring += f'+{p}:{parameters[p]}' - - requeststring += '&page_size=' + str(page_size) - requeststring += '&dict_parameters=1' - - print(requeststring) - for count in range(1,6): # retry 5 times - try: - response = requests.get(requeststring, headers=headers) # , params = rparameters) - response.raise_for_status() - search_results = response.json() - break # success, lets continue after the for loop - except requests.exceptions.HTTPError as e: - print(f"HTTP error occurred: {e} \nStatus Code: {response.status_code}, Response Content: {response.text}") - except requests.exceptions.ConnectionError: - print("Connection error occurred. Check network connection.") - except requests.exceptions.Timeout: - print("Request timed out. The server might be busy or unresponsive.") - except requests.exceptions.JSONDecodeError as e: - print(f"Failed to decode JSON. Response content is not valid JSON.\nResponse Content: {response.text}") - except requests.exceptions.RequestException as e: - print(f"Unexpected request exception: {e}") - if count == 5: - raise RuntimeError("Could not get search results 5 times, retry depleted, probably broken connection.") - - delay = count**2 # retry fast, then slowdown - print(f"retrying no. {count} in {delay} seconds") - time.sleep(delay) - - results = [] - results.extend(search_results['results']) - page_index = 2 - page_count = math.ceil(search_results['count'] / page_size) - while search_results.get('next') and len(results) < max_results: - print(f'getting page {page_index} , total pages {page_count}') - response = requests.get(search_results['next'], headers=headers) # , params = rparameters) - search_results = response.json() +def get_search_without_bullshit( + parameters, page_size=100, max_results=100000000, api_key="" +) -> list: + headers = utils.get_headers(api_key) + url = paths.get_api_url() + "/search/" + requeststring = url + "?query=" + for p in parameters.keys(): + requeststring += f"+{p}:{parameters[p]}" + + requeststring += '&page_size=' + str(page_size) + requeststring += '&dict_parameters=1' + + print(requeststring) + for count in range(1,6): # retry 5 times + try: + response = requests.get(requeststring, headers=headers) # , params = rparameters) + response.raise_for_status() + search_results = response.json() + break # success, lets continue after the for loop + except requests.exceptions.HTTPError as e: + print(f"HTTP error occurred: {e} \nStatus Code: {response.status_code}, Response Content: {response.text}") + except requests.exceptions.ConnectionError: + print("Connection error occurred. Check network connection.") + except requests.exceptions.Timeout: + print("Request timed out. The server might be busy or unresponsive.") + except requests.exceptions.JSONDecodeError as e: + print(f"Failed to decode JSON. Response content is not valid JSON.\nResponse Content: {response.text}") + except requests.exceptions.RequestException as e: + print(f"Unexpected request exception: {e}") + if count == 5: + raise RuntimeError("Could not get search results 5 times, retry depleted, probably broken connection.") + + delay = count**2 # retry fast, then slowdown + print(f"retrying no. {count} in {delay} seconds") + time.sleep(delay) + + results = [] results.extend(search_results['results']) - page_index += 1 - return results + page_index = 2 + page_count = math.ceil(search_results['count'] / page_size) + while search_results.get('next') and len(results) < max_results: + print(f'getting page {page_index} , total pages {page_count}') + response = requests.get(search_results['next'], headers=headers) # , params = rparameters) + search_results = response.json() + results.extend(search_results["results"]) + page_index += 1 + return results def load_assets_list(filepath): - if os.path.exists(filepath): - with open(filepath, 'r', encoding='utf-8') as s: - assets = json.load(s) - return assets + if os.path.exists(filepath): + with open(filepath, "r", encoding="utf-8") as s: + assets = json.load(s) + return assets def filter_assets(filepath_source, filepath_target, name_strings): - # Filter assets by will: - assets = load_assets_list(filepath_source) - nassets = [] - last_asset_with_resolution_index = -1 # should help toskip failed assets - for a in assets: - # print(a['name']) - for filter in name_strings: - if a['name'].find(filter) > -1: - print(a['name']) - nassets.append(a) - with open(filepath_target, 'w') as s: - json.dump(nassets, s) + # Filter assets by will: + assets = load_assets_list(filepath_source) + nassets = [] + last_asset_with_resolution_index = -1 # should help toskip failed assets + for a in assets: + # print(a['name']) + for filter in name_strings: + if a["name"].find(filter) > -1: + print(a["name"]) + nassets.append(a) + with open(filepath_target, "w") as s: + json.dump(nassets, s) def reduce_assets(filepath_source, filepath_target, count=20): - # Filter assets by will: - assets = load_assets_list(filepath_source) - nassets = assets[:count] - with open(filepath_target, 'w') as s: - json.dump(nassets, s) + # Filter assets by will: + assets = load_assets_list(filepath_source) + nassets = assets[:count] + with open(filepath_target, "w") as s: + json.dump(nassets, s) def assets_from_last_generated(filepath_source, filepath_target, count=20): - # Enables to skip all fails. - assets = load_assets_list(filepath_source) - nassets = [] - max_index = 0 - for i, a in enumerate(assets): - print(a['name']) - for f in a['files']: - if f['fileType'].find('resolution') > -1: - max_index = i - nassets = assets[max_index:] - - with open(filepath_target, 'w') as s: - json.dump(nassets, s) + # Enables to skip all fails. + assets = load_assets_list(filepath_source) + nassets = [] + max_index = 0 + for i, a in enumerate(assets): + print(a["name"]) + for f in a["files"]: + if f["fileType"].find("resolution") > -1: + max_index = i + nassets = assets[max_index:] + + with open(filepath_target, "w") as s: + json.dump(nassets, s) diff --git a/blenderkit_server_utils/send_to_bg.py b/blenderkit_server_utils/send_to_bg.py index 08e9875..3d880c6 100644 --- a/blenderkit_server_utils/send_to_bg.py +++ b/blenderkit_server_utils/send_to_bg.py @@ -8,64 +8,94 @@ def version_to_float(version): - vars = version.split('.') - version = int(vars[0]) + .01 * int(vars[1]) + vars = version.split(".") + version = int(vars[0]) + 0.01 * int(vars[1]) if len(vars) > 2: - version += .0001 * int(vars[2]) + version += 0.0001 * int(vars[2]) return version def get_blender_version_from_blend(blend_file_path): # get blender version from blend file, works only for 2.8+ - with open(blend_file_path, 'rb') as blend_file: + with open(blend_file_path, "rb") as blend_file: # Read the first 12 bytes header = blend_file.read(24) # Check for compression - if header[0:7] == b'BLENDER': + if header[0:7] == b"BLENDER": # If the file is uncompressed, the version is in bytes 9-11 version_bytes = header[9:12] version = (chr(version_bytes[0]), chr(version_bytes[2])) - elif header[12:19] == b'BLENDER': + elif header[12:19] == b"BLENDER": # If the file is compressed, the version is in bytes 8-10 version_bytes = header[21:24] version = (chr(version_bytes[0]), chr(version_bytes[2])) else: version_bytes = None - version = ('2', '93') # last supported version by now + version = ("2", "93") # last supported version by now print(version) - return '.'.join(version) + return ".".join(version) -def get_blender_binary(asset_data, file_path='', binary_type='CLOSEST'): +def get_blender_binary(asset_data, file_path="", binary_type="CLOSEST"): # pick the right blender version for asset processing blenders_path = paths.BLENDERS_PATH blenders = [] # Get available blender versions for fn in os.listdir(blenders_path): - blenders.append((version_to_float(fn), fn)) - if binary_type == 'CLOSEST': + # Skip hidden files and non-version directories + if fn.startswith('.') or not any(c.isdigit() for c in fn): + continue + try: + version = version_to_float(fn) + blenders.append((version, fn)) + except ValueError: + continue + + if len(blenders) == 0: + raise RuntimeError(f"No valid Blender versions found in {blenders_path}") + + if binary_type == "CLOSEST": # get asset's blender upload version - asset_blender_version = version_to_float(asset_data['sourceAppVersion']) - print('asset blender version', asset_blender_version) + asset_blender_version = version_to_float(asset_data["sourceAppVersion"]) + print("asset blender version", asset_blender_version) asset_blender_version_from_blend = get_blender_version_from_blend(file_path) - print('asset blender version from blend', asset_blender_version_from_blend) + print("asset blender version from blend", asset_blender_version_from_blend) - asset_blender_version_from_blend = version_to_float(asset_blender_version_from_blend) - asset_blender_version = max(asset_blender_version, asset_blender_version_from_blend) - print('asset blender version picked', asset_blender_version) + asset_blender_version_from_blend = version_to_float( + asset_blender_version_from_blend + ) + asset_blender_version = max( + asset_blender_version, asset_blender_version_from_blend + ) + print("asset blender version picked", asset_blender_version) blender_target = min(blenders, key=lambda x: abs(x[0] - asset_blender_version)) - if binary_type == 'NEWEST': + if binary_type == "NEWEST": blender_target = max(blenders, key=lambda x: x[0]) # use latest blender version for hdrs - if asset_data['assetType'] == 'hdr': + if asset_data["assetType"] == "hdr": blender_target = blenders[-1] print(blender_target) - ext = '.exe' if sys.platform == 'win32' else '' - binary = os.path.join(blenders_path, blender_target[1], f'blender{ext}') - print(binary) + + # Handle different OS paths + if sys.platform == "darwin": # macOS + binary = os.path.join( + blenders_path, + blender_target[1], + "Contents", + "MacOS", + "Blender" + ) + else: # Windows and Linux + ext = ".exe" if sys.platform == "win32" else "" + binary = os.path.join(blenders_path, blender_target[1], f"blender{ext}") + + print(f"Using Blender binary: {binary}") + if not os.path.exists(binary): + raise RuntimeError(f"Blender binary not found at {binary}") + return binary @@ -80,11 +110,12 @@ def get_process_flags(): REALTIME_PRIORITY_CLASS = 0x00000100 flags = BELOW_NORMAL_PRIORITY_CLASS - if sys.platform != 'win32': # TODO test this on windows + if sys.platform != "win32": # TODO test this on windows flags = 0 return flags + def send_to_bg( asset_data: dict, asset_file_path: str = '', @@ -116,26 +147,22 @@ def send_to_bg( script - script that should be run in background addons - addons that should be enabled in the background instance target_format - which file format we want to export, e.g.: gltf, gltf_godot - - command - command which should be run in background. - verbosity_level - level of verbosity: 0 for silent mode, 1 to only print errors, 2 to print everything - Returns - ------- - None ''' def reader_thread(pipe, func): - for line in iter(pipe.readline, b''): + for line in iter(pipe.readline, b""): func(line.decode().strip()) pipe.close() if binary_path != "": print(f"Blender binary path: {binary_path}") else: - binary_path = get_blender_binary(asset_data, file_path=asset_file_path, binary_type=binary_type) + binary_path = get_blender_binary( + asset_data, file_path=asset_file_path, binary_type=binary_type + ) own_temp_folder = False - if temp_folder == '': + if temp_folder == "": temp_folder = tempfile.mkdtemp() own_temp_folder = True data = { @@ -151,23 +178,25 @@ def reader_thread(pipe, func): with open(datafile, 'w', encoding='utf-8') as s: json.dump(data, s, ensure_ascii=False, indent=4) - print('opening Blender instance to do processing - ', script) + print("opening Blender instance to do processing - ", script) # exclude hdrs from reading as .blend - if template_file_path == '': + if template_file_path == "": template_file_path = asset_file_path command = [ binary_path, "--background", - # "--factory-startup", + "--factory-startup", "-noaudio", template_file_path, - "--python", os.path.join(paths.BG_SCRIPTS_PATH, script), - "--", datafile + "--python", + os.path.join(paths.BG_SCRIPTS_PATH, script), + "--", + datafile, ] - if addons != '': - addons = f'--addons {addons}' + if addons != "": + addons = f"--addons {addons}" command.insert(3, addons) # Other code remains the same ... @@ -175,18 +204,29 @@ def reader_thread(pipe, func): with subprocess.Popen(command, stdout=stdout_val, stderr=stderr_val, creationflags=get_process_flags()) as proc: if verbosity_level == 2: - stdout_thread = threading.Thread(target=reader_thread, - args=(proc.stdout, lambda line: print('STDOUT:', line))) - stderr_thread = threading.Thread(target=reader_thread, - args=(proc.stderr, lambda line: print('STDERR:', line))) + stdout_thread = threading.Thread( + target=reader_thread, + args=(proc.stdout, lambda line: print("STDOUT:", line)), + ) + stderr_thread = threading.Thread( + target=reader_thread, + args=(proc.stderr, lambda line: print("STDERR:", line)), + ) elif verbosity_level == 1: - stdout_thread = threading.Thread(target=reader_thread, - args=(proc.stdout, lambda _: None)) - stderr_thread = threading.Thread(target=reader_thread, - args=(proc.stderr, lambda line: print('STDERR:', line))) + stdout_thread = threading.Thread( + target=reader_thread, args=(proc.stdout, lambda _: None) + ) + stderr_thread = threading.Thread( + target=reader_thread, + args=(proc.stderr, lambda line: print("STDERR:", line)), + ) else: - stdout_thread = threading.Thread(target=reader_thread, args=(proc.stdout, lambda _: None)) - stderr_thread = threading.Thread(target=reader_thread, args=(proc.stderr, lambda _: None)) + stdout_thread = threading.Thread( + target=reader_thread, args=(proc.stdout, lambda _: None) + ) + stderr_thread = threading.Thread( + target=reader_thread, args=(proc.stderr, lambda _: None) + ) stdout_thread.start() stderr_thread.start() diff --git a/blenderkit_server_utils/upload.py b/blenderkit_server_utils/upload.py index 3d97122..fdb42ff 100644 --- a/blenderkit_server_utils/upload.py +++ b/blenderkit_server_utils/upload.py @@ -2,9 +2,11 @@ import sys import requests from . import utils, paths +import json + class upload_in_chunks(object): - def __init__(self, filename, chunksize=1 << 13, report_name='file'): + def __init__(self, filename, chunksize=1 << 13, report_name="file"): self.filename = filename self.chunksize = chunksize self.totalsize = os.path.getsize(filename) @@ -12,7 +14,7 @@ def __init__(self, filename, chunksize=1 << 13, report_name='file'): self.report_name = report_name def __iter__(self): - with open(self.filename, 'rb') as file: + with open(self.filename, "rb") as file: while True: data = file.read(self.chunksize) if not data: @@ -20,7 +22,9 @@ def __iter__(self): break self.readsofar += len(data) percent = self.readsofar * 1e2 / self.totalsize - print(f"Uploading {self.report_name} {percent}%",) + print( + f"Uploading {self.report_name} {percent}%", + ) # bg_blender.progress('uploading %s' % self.report_name, percent) # sys.stderr.write("\r{percent:3.0f}%".format(percent=percent)) @@ -29,20 +33,23 @@ def __iter__(self): def __len__(self): return self.totalsize + def upload_file(upload_data, f): headers = utils.get_headers(upload_data['token']) version_id = upload_data['id'] print(f"\n----> UPLOADING {f['type']} {os.path.basename(f['file_path'])}") + upload_info = { - 'assetId': version_id, - 'fileType': f['type'], - 'fileIndex': f['index'], - 'originalFilename': os.path.basename(f['file_path']) + "assetId": version_id, + "fileType": f["type"], + "fileIndex": f["index"], + "originalFilename": os.path.basename(f["file_path"]), } print(f" - data:{upload_info}") upload_create_url = paths.get_api_url() + '/uploads/' upload = requests.post(upload_create_url, json=upload_info, headers=headers, verify=True) + upload = upload.json() chunk_size = 1024 * 1024 * 2 @@ -62,11 +69,20 @@ def upload_file(upload_data, f): ) if 250 > upload_response.status_code > 199: - upload_done_url = paths.get_api_url() + '/uploads_s3/' + upload['id'] + '/upload-file/' - upload_response = requests.post(upload_done_url, headers=headers, verify=True) + upload_done_url = ( + paths.get_api_url() + + "/uploads_s3/" + + upload["id"] + + "/upload-file/" + ) + upload_response = requests.post( + upload_done_url, headers=headers, verify=True + ) # print(upload_response) # print(upload_response.text) - print(f"Finished file upload: {os.path.basename(f['file_path'])}",) + print( + f"Finished file upload: {os.path.basename(f['file_path'])}", + ) return True else: message = f"Upload failed, retry. File : {f['type']} {os.path.basename(f['file_path'])}" @@ -77,13 +93,15 @@ def upload_file(upload_data, f): message = f"Upload failed, retry. File : {f['type']} {os.path.basename(f['file_path'])}" print(message) import time + time.sleep(1) # confirm single file upload to bkit server return False + def upload_files(upload_data, files): - '''uploads several files in one run''' + """uploads several files in one run""" uploaded_all = True for f in files: uploaded = upload_file(upload_data, f) @@ -92,21 +110,23 @@ def upload_files(upload_data, files): print(f"Uploaded all files for asset {upload_data['displayName']}") return uploaded_all -def upload_resolutions(files, asset_data, api_key = ''): + +def upload_resolutions(files, asset_data, api_key=""): upload_data = { - "name": asset_data['name'], - "displayName": asset_data['displayName'], + "name": asset_data["name"], + "displayName": asset_data["displayName"], "token": api_key, - "id": asset_data['id'] + "id": asset_data["id"], } uploaded = upload_files(upload_data, files) if uploaded: - print('upload finished successfully') + print("upload finished successfully") else: - print('upload failed.') + print("upload failed.") + -def get_individual_parameter(asset_id='', param_name='', api_key = ''): +def get_individual_parameter(asset_id="", param_name="", api_key=""): url = f"{paths.get_api_url()}/assets/{asset_id}/parameter/{param_name}/" headers = utils.get_headers(api_key) r = requests.get(url, headers=headers) # files = files, @@ -114,44 +134,175 @@ def get_individual_parameter(asset_id='', param_name='', api_key = ''): print(url) return parameter -def patch_individual_parameter(asset_id='', param_name='', param_value='', api_key = ''): + +def patch_individual_parameter(asset_id="", param_name="", param_value="", api_key=""): # changes individual parameter in the parameters dictionary of the assets url = f"{paths.get_api_url()}/assets/{asset_id}/parameter/{param_name}/" headers = utils.get_headers(api_key) metadata_dict = {"value": param_value} print(url) - r = requests.put(url, json=metadata_dict, headers=headers, verify=True) # files = files, + r = requests.put( + url, json=metadata_dict, headers=headers, verify=True + ) # files = files, print(r.text) print(r.status_code) + if r.status_code == 200 or r.status_code == 201: + return True + else: + return False - -def delete_individual_parameter(asset_id='', param_name='', param_value='', api_key = ''): - # changes individual parameter in the parameters dictionary of the assets +def delete_individual_parameter(asset_id="", param_name="", param_value="", api_key=""): + # delete the parameter from the asset url = f"{paths.get_api_url()}/assets/{asset_id}/parameter/{param_name}/" headers = utils.get_headers(api_key) metadata_dict = {"value": param_value} print(url) - r = requests.delete(url, json=metadata_dict, headers=headers, verify=True) # files = files, + r = requests.delete( + url, json=metadata_dict, headers=headers, verify=True + ) # files = files, print(r.text) print(r.status_code) + if r.status_code == 200 or r.status_code == 201 or r.status_code == 204: + return True + else: + return False + def patch_asset_empty(asset_id, api_key): - ''' - This function patches the asset for the purpose of it getting a reindex. - Should be removed once this is fixed on the server and - the server is able to reindex after uploads of resolutions - Returns - ------- - ''' - upload_data = { - } - url = f'{paths.get_api_url()}/assets/{asset_id}/' + """ + This function patches the asset for the purpose of it getting a reindex. + Should be removed once this is fixed on the server and + the server is able to reindex after uploads of resolutions + Returns + ------- + """ + upload_data = {} + url = f"{paths.get_api_url()}/assets/{asset_id}/" headers = utils.get_headers(api_key) - print('patching asset with empty data') + print("patching asset with empty data") try: - r = requests.patch(url, json=upload_data, headers=headers, verify=True) # files = files, + r = requests.patch( + url, json=upload_data, headers=headers, verify=True + ) # files = files, except requests.exceptions.RequestException as e: print(e) - return {'CANCELLED'} - print('patched asset with empty data') - return {'FINISHED'} + return {"CANCELLED"} + print("patched asset with empty data") + return {"FINISHED"} + + +def upload_asset_metadata(upload_data, api_key): + url = f"{paths.get_api_url()}/assets/" + headers = utils.get_headers(api_key) + print("uploading new asset metadata") + try: + r = requests.post( + url, json=upload_data, headers=headers, verify=True + ) # files = files, + print(r.text) + # result should be json + result = r.json() + print(result) + return result + except requests.exceptions.RequestException as e: + print(e) + return {"CANCELLED"} + + +def patch_asset_metadata(asset_id, api_key, data={}): + print("patching asset metadata") + + headers = utils.get_headers(api_key) + + url = f"{paths.get_api_url()}/assets/{asset_id}/" + print(url) + r = requests.patch(url, json=data, headers=headers, verify=True) # files = files, + print(r.text) + + +def mark_for_thumbnail( + asset_id: str, + api_key: str, + # Common parameters + use_gpu: bool = None, + samples: int = None, + resolution: int = None, + denoising: bool = None, + background_lightness: float = None, + # Model-specific parameters + angle: str = None, # DEFAULT, FRONT, SIDE, TOP + snap_to: str = None, # GROUND, WALL, CEILING, FLOAT + # Material-specific parameters + thumbnail_type: str = None, # BALL, BALL_COMPLEX, FLUID, CLOTH, HAIR + scale: float = None, + background: bool = None, + adaptive_subdivision: bool = None, +) -> bool: + """Mark an asset for thumbnail regeneration. + + This function creates a JSON with thumbnail parameters and stores it in the + markThumbnailRender parameter of the asset. Only non-None parameters will be included. + + Args: + asset_id (str): The ID of the asset to update + api_key (str): BlenderKit API key + use_gpu (bool, optional): Use GPU for rendering + samples (int, optional): Number of render samples + resolution (int, optional): Resolution of render + denoising (bool, optional): Use denoising + background_lightness (float, optional): Background lightness (0-1) + angle (str, optional): Camera angle for models (DEFAULT, FRONT, SIDE, TOP) + snap_to (str, optional): Object placement for models (GROUND, WALL, CEILING, FLOAT) + thumbnail_type (str, optional): Type of material preview (BALL, BALL_COMPLEX, FLUID, CLOTH, HAIR) + scale (float, optional): Scale of preview object for materials + background (bool, optional): Use background for transparent materials + adaptive_subdivision (bool, optional): Use adaptive subdivision for materials + + Returns: + bool: True if successful, False otherwise + """ + # Build parameters dict with only non-None values + params = {} + + # Common parameters + if use_gpu is not None: + params['thumbnail_use_gpu'] = use_gpu + if samples is not None: + params['thumbnail_samples'] = samples + if resolution is not None: + params['thumbnail_resolution'] = resolution + if denoising is not None: + params['thumbnail_denoising'] = denoising + if background_lightness is not None: + params['thumbnail_background_lightness'] = background_lightness + + # Model-specific parameters + if angle is not None: + params['thumbnail_angle'] = angle + if snap_to is not None: + params['thumbnail_snap_to'] = snap_to + + # Material-specific parameters + if thumbnail_type is not None: + params['thumbnail_type'] = thumbnail_type + if scale is not None: + params['thumbnail_scale'] = scale + if background is not None: + params['thumbnail_background'] = background + if adaptive_subdivision is not None: + params['thumbnail_adaptive_subdivision'] = adaptive_subdivision + + try: + # Convert parameters to JSON string + params_json = json.dumps(params) + + # Update the asset's markThumbnailRender parameter + return patch_individual_parameter( + asset_id=asset_id, + param_name='markThumbnailRender', + param_value=params_json, + api_key=api_key + ) + except Exception as e: + print(f"Error marking asset for thumbnail: {e}") + return False diff --git a/blenderkit_server_utils/utils.py b/blenderkit_server_utils/utils.py index e60fe97..4219662 100644 --- a/blenderkit_server_utils/utils.py +++ b/blenderkit_server_utils/utils.py @@ -1,7 +1,10 @@ import platform +bpy = None try: import bpy + from mathutils import Vector + except: print('bpy not present') @@ -88,4 +91,140 @@ def enable_cycles_CUDA(): # Additional code for GPU rendering else: print("GPU rendering is not enabled.") - # Additional code for CPU rendering \ No newline at end of file + # Additional code for CPU rendering + +### moved all from blenderkit/utils.py to here +# only if bpy is present +if bpy: + def scale_2d(v, s, p): + """scale a 2d vector with a pivot""" + return (p[0] + s[0] * (v[0] - p[0]), p[1] + s[1] * (v[1] - p[1])) + + + def scale_uvs(ob, scale=1.0, pivot=Vector((0.5, 0.5))): + mesh = ob.data + if len(mesh.uv_layers) > 0: + uv = mesh.uv_layers[mesh.uv_layers.active_index] + + # Scale a UV map iterating over its coordinates to a given scale and with a pivot point + for uvindex in range(len(uv.data)): + uv.data[uvindex].uv = scale_2d(uv.data[uvindex].uv, scale, pivot) + + + # map uv cubic and switch of auto tex space and set it to 1,1,1 + def automap( + target_object=None, + target_slot=None, + tex_size=1, + bg_exception=False, + just_scale=False, + ): + tob = bpy.data.objects[target_object] + # only automap mesh models + if tob.type == "MESH" and len(tob.data.polygons) > 0: + # check polycount for a rare case where no polys are in editmesh + actob = bpy.context.active_object + bpy.context.view_layer.objects.active = tob + + # auto tex space + if tob.data.use_auto_texspace: + tob.data.use_auto_texspace = False + + if not just_scale: + tob.data.texspace_size = (1, 1, 1) + + if "automap" not in tob.data.uv_layers: + bpy.ops.mesh.uv_texture_add() + uvl = tob.data.uv_layers[-1] + uvl.name = "automap" + + tob.data.uv_layers.active = tob.data.uv_layers["automap"] + tob.data.uv_layers["automap"].active_render = True + + # TODO limit this to active material + # tob.data.uv_textures['automap'].active = True + + scale = tob.scale.copy() + + if target_slot is not None: + tob.active_material_index = target_slot + bpy.ops.object.mode_set(mode="EDIT") + bpy.ops.mesh.select_all(action="DESELECT") + + # this exception is just for a 2.8 background thunmbnailer crash, can be removed when material slot select works... + if bg_exception or len(tob.material_slots) == 0: + bpy.ops.mesh.select_all(action="SELECT") + else: + bpy.ops.object.material_slot_select() + + scale = (scale.x + scale.y + scale.z) / 3.0 + + if ( + tex_size == 0 + ): # prevent division by zero, it's possible to have 0 in tex size by unskilled uploaders + tex_size = 1 + + if not just_scale: + # compensate for the undocumented operator change in blender 3.2 + if bpy.app.version >= (3, 2, 0): + cube_size = (tex_size) / scale + else: + cube_size = ( + scale * 2.0 / (tex_size) + ) # it's * 2.0 because blender can't tell size of a unit cube :) + + bpy.ops.uv.cube_project(cube_size=cube_size, correct_aspect=False) + + bpy.ops.object.editmode_toggle() + # this by now works only for thumbnail preview, but should be extended to work on arbitrary objects. + # by now, it takes the basic uv map = 1 meter. also, it now doeasn't respect more materials on one object, + # it just scales whole UV. + if just_scale: + scale_uvs(tob, scale=Vector((1 / tex_size, 1 / tex_size))) + bpy.context.view_layer.objects.active = actob + +def get_bounds_worldspace(objects): + """Get the bounding box of objects in world space. + + Args: + objects: List of Blender objects + + Returns: + tuple: (minx, miny, minz, maxx, maxy, maxz) + """ + minx = miny = minz = float('inf') + maxx = maxy = maxz = -float('inf') + + for obj in objects: + # Skip objects that shouldn't be included in bounds + if obj.type == 'EMPTY' and not obj.instance_collection: + continue + + # Get object's world matrix + matrix_world = obj.matrix_world + + if obj.type == 'MESH': + # For mesh objects, use all vertices + for v in obj.data.vertices: + world_coord = matrix_world @ v.co + minx = min(minx, world_coord.x) + miny = min(miny, world_coord.y) + minz = min(minz, world_coord.z) + maxx = max(maxx, world_coord.x) + maxy = max(maxy, world_coord.y) + maxz = max(maxz, world_coord.z) + else: + # For non-mesh objects, use object location + world_coord = matrix_world.translation + minx = min(minx, world_coord.x) + miny = min(miny, world_coord.y) + minz = min(minz, world_coord.z) + maxx = max(maxx, world_coord.x) + maxy = max(maxy, world_coord.y) + maxz = max(maxz, world_coord.z) + + if minx == float('inf'): + # No valid objects found, return zero bounds + return 0, 0, 0, 0, 0, 0 + + return minx, miny, minz, maxx, maxy, maxz diff --git a/generate_model_validations.py b/generate_model_validations.py index 78c6f16..1d3080d 100644 --- a/generate_model_validations.py +++ b/generate_model_validations.py @@ -11,6 +11,7 @@ import pathlib from blenderkit_server_utils import download, search, paths, upload, send_to_bg, utils + # Assuming necessary imports are done at the top of the script from blenderkit_server_utils.cloudflare_storage import CloudflareStorage @@ -18,14 +19,16 @@ results = [] page_size = 100 -MAX_ASSETS = int(os.environ.get('MAX_ASSET_COUNT', '100')) +MAX_ASSETS = int(os.environ.get("MAX_ASSET_COUNT", "100")) DONE_ASSETS_COUNT = 0 -DO_ASSETS=200 +DO_ASSETS = 200 ALL_FOLDERS = set() + + def render_model_validation_thread(asset_data, api_key): - ''' + """ A thread that: 1.downloads file 2.starts an instance of Blender that renders the validation @@ -39,20 +42,20 @@ def render_model_validation_thread(asset_data, api_key): Returns ------- - ''' + """ global DONE_ASSETS_COUNT, ALL_FOLDERS destination_directory = tempfile.gettempdir() - if len(asset_data['files']) == 0: - print('no files for asset %s' % asset_data['name']) + if len(asset_data["files"]) == 0: + print("no files for asset %s" % asset_data["name"]) return - upload_id = asset_data['files'][0]['downloadUrl'].split('/')[-2] + upload_id = asset_data["files"][0]["downloadUrl"].split("/")[-2] # Check if the asset has already been processed # stop using author folder result_file_name = f"{upload_id}" - predicted_filename = f'{result_file_name}.mkv'#let's try to super simplify now. + predicted_filename = f"{result_file_name}.mkv" # let's try to super simplify now. - #print('all validation folders', all_validation_folders) + # print('all validation folders', all_validation_folders) # check if the directory exists on the drive # we check file by file, since the comparison with folder contents is not reliable and would potentially @@ -60,31 +63,43 @@ def render_model_validation_thread(asset_data, api_key): # Initialize Cloudflare Storage with your credentials # f_exists = result_file_name in ALL_FOLDERS cloudflare_storage = CloudflareStorage( - access_key=os.getenv('CF_ACCESS_KEY'), - secret_key=os.getenv('CF_ACCESS_SECRET'), - endpoint_url=os.getenv('CF_ENDPOINT_URL') + access_key=os.getenv("CF_ACCESS_KEY"), + secret_key=os.getenv("CF_ACCESS_SECRET"), + endpoint_url=os.getenv("CF_ENDPOINT_URL"), ) - f_exists = cloudflare_storage.folder_exists('validation-renders', upload_id) - #let's not skip now. + f_exists = cloudflare_storage.folder_exists("validation-renders", upload_id) + # let's not skip now. if f_exists: - # purge the folder - # cloudflare_storage.delete_folder_contents('validation-renders', upload_id) - print(f'directory {upload_id} exists, skipping') - return + # check if the result folder is empty only with index.json, if yes, purge it and continue. Otherwise skip + files = cloudflare_storage.list_folder_contents("validation-renders", upload_id) + + if len(files) == 1 and files[0] == "index.json": + # purge the folder + cloudflare_storage.delete_folder_contents("validation-renders", upload_id) + print(f"Purged the folder: {upload_id}") + else: + print(f"directory {upload_id} exists, skipping") + return # Download asset - asset_file_path = download.download_asset(asset_data, api_key=api_key, directory=destination_directory) + asset_file_path = download.download_asset( + asset_data, api_key=api_key, directory=destination_directory + ) # Unpack asset - send_to_bg.send_to_bg(asset_data, asset_file_path=asset_file_path, script='unpack_asset_bg.py') + send_to_bg.send_to_bg( + asset_data, asset_file_path=asset_file_path, script="unpack_asset_bg.py" + ) # find template file current_dir = pathlib.Path(__file__).parent.resolve() - template_file_path = os.path.join(current_dir, 'blend_files', 'model_validation_static_renders.blend') + template_file_path = os.path.join( + current_dir, "blend_files", "model_validation_static_renders.blend" + ) # Send to background to generate resolutions - #generated temp folder - #.blend gets resaved there and also /tmp renders of images + # generated temp folder + # .blend gets resaved there and also /tmp renders of images temp_folder = tempfile.mkdtemp() # result folder where the stuff for upload to drive goes @@ -92,87 +107,99 @@ def render_model_validation_thread(asset_data, api_key): os.makedirs(result_folder, exist_ok=True) # local file path of rendered image - result_path = os.path.join(temp_folder, - result_folder, - predicted_filename) + result_path = os.path.join(temp_folder, result_folder, predicted_filename) # send to background to render - send_to_bg.send_to_bg(asset_data, - asset_file_path=asset_file_path, - template_file_path=template_file_path, - result_path=result_path, - result_folder=result_folder, - temp_folder=temp_folder, - script='model_validation_bg_render.py', - binary_type='NEWEST', - verbosity_level=2) + send_to_bg.send_to_bg( + asset_data, + asset_file_path=asset_file_path, + template_file_path=template_file_path, + result_path=result_path, + result_folder=result_folder, + temp_folder=temp_folder, + script="model_validation_bg_render.py", + binary_type="NEWEST", + verbosity_level=2, + ) # generate gltf: # result is a json... - result_path = os.path.join(temp_folder, asset_data['assetBaseId'] + '_resdata.json') + result_path = os.path.join(temp_folder, asset_data["assetBaseId"] + "_resdata.json") - send_to_bg.send_to_bg(asset_data, asset_file_path=asset_file_path, - result_path=result_path, - script='gltf_bg_blender.py') + send_to_bg.send_to_bg( + asset_data, + asset_file_path=asset_file_path, + result_path=result_path, + script="gltf_bg_blender.py", + ) # gltf is a .glb in the same dir as the .blend asset file - gltf_path = asset_file_path.replace('.blend', '.glb') + gltf_path = asset_file_path.replace(".blend", ".glb") # move gltf to result folder try: shutil.move(gltf_path, result_folder) except Exception as e: - print(f'Error while moving {gltf_path} to {result_folder}: {e}') + print(f"Error while moving {gltf_path} to {result_folder}: {e}") DONE_ASSETS_COUNT += 1 # part of the results is in temfolder/tmp/Render, so let's move all of it's files to the result folder, # so that there are no subdirectories and everything is in one folder. # and then upload the result folder to drive - render_folder = os.path.join(temp_folder, 'tmp', 'Render') + render_folder = os.path.join(temp_folder, "tmp", "Render") try: file_names = os.listdir(render_folder) for file_name in file_names: - shutil.move(os.path.join(render_folder, file_name), result_folder) + shutil.move(os.path.join(render_folder, file_name), result_folder) except Exception as e: - print(f'Error while moving files from {render_folder} to {result_folder}: {e}') + print(f"Error while moving files from {render_folder} to {result_folder}: {e}") # Upload result # # Instead of using Google Drive for upload, use Cloudflare Storage # Initialize the CloudFlare service cloudflare_storage = CloudflareStorage( - access_key=os.getenv('CF_ACCESS_KEY'), - secret_key=os.getenv('CF_ACCESS_SECRET'), - endpoint_url=os.getenv('CF_ENDPOINT_URL') + access_key=os.getenv("CF_ACCESS_KEY"), + secret_key=os.getenv("CF_ACCESS_SECRET"), + endpoint_url=os.getenv("CF_ENDPOINT_URL"), + ) + cloudflare_storage.upload_folder( + result_folder, + bucket_name="validation-renders", + cloudflare_folder_prefix=result_file_name, ) - cloudflare_storage.upload_folder(result_folder, bucket_name='validation-renders', cloudflare_folder_prefix=result_file_name) - #cleanup + # cleanup try: shutil.rmtree(temp_folder) except Exception as e: - print(f'Error while deleting temp folder {temp_folder}: {e}') + print(f"Error while deleting temp folder {temp_folder}: {e}") return -def iterate_assets(filepath, thread_function=None, process_count=12, api_key=''): - ''' iterate through all assigned assets, check for those which need generation and send them to res gen''' +def iterate_assets(filepath, thread_function=None, process_count=12, api_key=""): + """iterate through all assigned assets, check for those which need generation and send them to res gen""" assets = search.load_assets_list(filepath) threads = [] for asset_data in assets: # if DONE_ASSETS_COUNT >= DO_ASSETS: # break if asset_data is not None: - print('downloading and generating validation render for %s' % asset_data['name']) - thread = threading.Thread(target=thread_function, args=(asset_data, api_key)) + print( + "downloading and generating validation render for %s" + % asset_data["name"] + ) + thread = threading.Thread( + target=thread_function, args=(asset_data, api_key) + ) thread.start() threads.append(thread) while len(threads) > process_count - 1: for t in threads: if not t.is_alive(): threads.remove(t) - break; + break time.sleep(0.1) # wait for a bit to finish all threads @@ -180,33 +207,45 @@ def main(): # cleanup the drive folder # get all folders from cloudflare to faster check if the folder exists cloudflare_storage = CloudflareStorage( - access_key=os.getenv('CF_ACCESS_KEY'), - secret_key=os.getenv('CF_ACCESS_SECRET'), - endpoint_url=os.getenv('CF_ENDPOINT_URL') + access_key=os.getenv("CF_ACCESS_KEY"), + secret_key=os.getenv("CF_ACCESS_SECRET"), + endpoint_url=os.getenv("CF_ENDPOINT_URL"), ) # ALL_FOLDERS = cloudflare_storage.list_all_folders(bucket_name='validation-renders') - # print('deleting old files') - # cloudflare_storage.delete_old_files(bucket_name='validation-renders', x_days=2) + # print("deleting old files") + # cloudflare_storage.delete_old_files(bucket_name="validation-renders", x_days=30) + # cloudflare_storage.delete_new_files(bucket_name="validation-renders", x_days=30) + # return # Get os temp directory dpath = tempfile.gettempdir() - filepath = os.path.join(dpath, 'assets_for_validation.json') + filepath = os.path.join(dpath, "assets_for_validation.json") params = { - 'order': 'last_blend_upload', - 'asset_type': 'model', - 'verification_status': 'uploaded' + "order": "-last_blend_upload", + "asset_type": "model", + "verification_status": "uploaded", } - search.get_search_simple(params, filepath=filepath, page_size=min(MAX_ASSETS, 100), max_results=MAX_ASSETS, - api_key=paths.API_KEY) + search.get_search_simple( + params, + filepath=filepath, + page_size=min(MAX_ASSETS, 100), + max_results=MAX_ASSETS, + api_key=paths.API_KEY, + ) assets = search.load_assets_list(filepath) - print('ASSETS TO BE PROCESSED') + print("ASSETS TO BE PROCESSED") for i, a in enumerate(assets): - print(a['name'], a['assetType']) + print(a["name"], a["assetType"]) - iterate_assets(filepath, process_count=1, api_key=paths.API_KEY, thread_function=render_model_validation_thread) + iterate_assets( + filepath, + process_count=1, + api_key=paths.API_KEY, + thread_function=render_model_validation_thread, + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/render_thumbnail.py b/render_thumbnail.py new file mode 100644 index 0000000..bdee5a9 --- /dev/null +++ b/render_thumbnail.py @@ -0,0 +1,372 @@ +""" +Script to rerender of thumbnail for materials and models. +This script handles the automated process of generating new thumbnails for BlenderKit assets. +It supports both materials and models, with configurable rendering parameters. + +Required environment variables: +BLENDERKIT_API_KEY - API key to be used +BLENDERS_PATH - path to the folder with blender versions + +Optional environment variables for thumbnail parameters: +THUMBNAIL_USE_GPU - (bool) Use GPU for rendering +THUMBNAIL_SAMPLES - (int) Number of render samples +THUMBNAIL_RESOLUTION - (int) Resolution of render +THUMBNAIL_DENOISING - (bool) Use denoising +THUMBNAIL_BACKGROUND_LIGHTNESS - (float) Background lightness (0-1) + +For materials: +THUMBNAIL_TYPE - Type of material preview (BALL, BALL_COMPLEX, FLUID, CLOTH, HAIR) +THUMBNAIL_SCALE - (float) Scale of preview object +THUMBNAIL_BACKGROUND - (bool) Use background for transparent materials +THUMBNAIL_ADAPTIVE_SUBDIVISION - (bool) Use adaptive subdivision + +For models: +THUMBNAIL_ANGLE - Camera angle (DEFAULT, FRONT, SIDE, TOP) +THUMBNAIL_SNAP_TO - Object placement (GROUND, WALL, CEILING, FLOAT) + +The script workflow: +1. Fetches assets that need thumbnail regeneration +2. For each asset: + - Downloads the asset file + - Renders a new thumbnail using Blender + - Uploads the new thumbnail + - Updates the asset metadata +3. Handles multiple assets concurrently using threading +""" + +import json +import os +import tempfile +import time +import threading +from datetime import datetime +from pathlib import Path + +from blenderkit_server_utils import download, search, paths, upload, send_to_bg + +# Required environment variables +ASSET_BASE_ID = os.environ.get('ASSET_BASE_ID', None) +MAX_ASSETS = int(os.environ.get('MAX_ASSET_COUNT', '100')) +SKIP_UPLOAD = os.environ.get('SKIP_UPLOAD', False) == "True" + +# Thumbnail default parameters +DEFAULT_THUMBNAIL_PARAMS = { + 'thumbnail_use_gpu': True, + 'thumbnail_samples': 100, + 'thumbnail_resolution': 2048, + 'thumbnail_denoising': True, + 'thumbnail_background_lightness': 0.9, +} + +# Material-specific defaults +DEFAULT_MATERIAL_PARAMS = { + 'thumbnail_type': 'BALL', + 'thumbnail_scale': 1.0, + 'thumbnail_background': False, + 'thumbnail_adaptive_subdivision': False, +} + +# Model-specific defaults +DEFAULT_MODEL_PARAMS = { + 'thumbnail_angle': 'DEFAULT', + 'thumbnail_snap_to': 'GROUND', +} + +def parse_json_params(json_str): + """Parse the markThumbnailRender JSON parameter. + + Args: + json_str: JSON string containing thumbnail parameters + + Returns: + dict: Parsed parameters or empty dict if invalid JSON + """ + if not json_str: + return {} + + if 1: + params = json.loads(json_str) + # String params + string_params = [ + 'thumbnail_type', + 'thumbnail_angle', + 'thumbnail_snap_to', + ] + for param in string_params: + if param in params and isinstance(params[param], str): + params[param] = params[param] + + # Convert string boolean values to actual booleans + bool_params = [ + 'thumbnail_use_gpu', + 'thumbnail_denoising', + 'thumbnail_background', + 'thumbnail_adaptive_subdivision' + ] + for param in bool_params: + if param in params and isinstance(params[param], str): + params[param] = params[param].lower() == 'true' + + # Convert numeric values + numeric_params = [ + 'thumbnail_samples', + 'thumbnail_resolution', + 'thumbnail_background_lightness', + 'thumbnail_scale' + ] + for param in numeric_params: + if param in params: + try: + if '.' in str(params[param]): # Convert to float if decimal point present + params[param] = float(params[param]) + else: + params[param] = int(params[param]) + except (ValueError, TypeError): + del params[param] # Remove invalid numeric values + print(params) + return params + # except json.JSONDecodeError: + # print(f"Warning: Invalid JSON in markThumbnailRender parameter") + # return {} + +def get_thumbnail_params(asset_type, mark_thumbnail_render=None): + """Get thumbnail parameters from environment variables or defaults. + + This function consolidates all thumbnail rendering parameters, combining values + from different sources in order of priority: + 1. Environment variables (highest priority) + 2. markThumbnailRender JSON parameter + 3. Default values (lowest priority) + + Args: + asset_type (str): Type of asset ('material' or 'model') + mark_thumbnail_render (str, optional): JSON string from markThumbnailRender parameter + + Returns: + dict: Combined dictionary of all thumbnail parameters + """ + # Start with default parameters + params = DEFAULT_THUMBNAIL_PARAMS.copy() + + # Add type-specific defaults + if asset_type == 'material': + params.update(DEFAULT_MATERIAL_PARAMS) + elif asset_type == 'model': + params.update(DEFAULT_MODEL_PARAMS) + + # Update with markThumbnailRender parameters if available + json_params = parse_json_params(mark_thumbnail_render) + if json_params: + params.update(json_params) + + + # Update with environment variables (highest priority) + env_updates = { + 'thumbnail_use_gpu': os.environ.get('THUMBNAIL_USE_GPU', params['thumbnail_use_gpu']) == "True", + 'thumbnail_samples': int(os.environ.get('THUMBNAIL_SAMPLES', params['thumbnail_samples'])), + 'thumbnail_resolution': int(os.environ.get('THUMBNAIL_RESOLUTION', params['thumbnail_resolution'])), + 'thumbnail_denoising': os.environ.get('THUMBNAIL_DENOISING', params['thumbnail_denoising']) == "True", + 'thumbnail_background_lightness': float(os.environ.get('THUMBNAIL_BACKGROUND_LIGHTNESS', params['thumbnail_background_lightness'])), + } + + # Add type-specific environment variables + if asset_type == 'material': + env_updates.update({ + 'thumbnail_type': os.environ.get('THUMBNAIL_TYPE', params['thumbnail_type']), + 'thumbnail_scale': float(os.environ.get('THUMBNAIL_SCALE', params['thumbnail_scale'])), + 'thumbnail_background': os.environ.get('THUMBNAIL_BACKGROUND', params['thumbnail_background']) == "True", + 'thumbnail_adaptive_subdivision': os.environ.get('THUMBNAIL_ADAPTIVE_SUBDIVISION', params['thumbnail_adaptive_subdivision']) == "True", + }) + elif asset_type == 'model': + env_updates.update({ + 'thumbnail_angle': os.environ.get('THUMBNAIL_ANGLE', params['thumbnail_angle']), + 'thumbnail_snap_to': os.environ.get('THUMBNAIL_SNAP_TO', params['thumbnail_snap_to']), + }) + + # Only update with environment variables that are actually set + params.update({k: v for k, v in env_updates.items() if k in params}) + + return params + +def render_thumbnail_thread(asset_data, api_key): + """Process a single asset's thumbnail in a separate thread. + + This function handles the complete thumbnail generation workflow for a single asset: + 1. Downloads the asset file to a temporary directory + 2. Sets up the thumbnail parameters based on asset type + 3. Launches Blender in background mode to render the thumbnail + 4. Uploads the resulting thumbnail + 5. Updates the asset metadata with new thumbnail information + 6. Cleans up temporary files + + Args: + asset_data (dict): Asset metadata including ID, type, and other properties + api_key (str): BlenderKit API key for authentication + """ + destination_directory = tempfile.gettempdir() + + # Get thumbnail parameters based on asset type and markThumbnailRender + thumbnail_params = get_thumbnail_params( + asset_data['assetType'].lower(), + mark_thumbnail_render=asset_data['dictParameters'].get('markThumbnailRender') + ) + # Download asset + asset_file_path = download.download_asset(asset_data, api_key=api_key, directory=destination_directory) + + if not asset_file_path: + print(f"Failed to download asset {asset_data['name']}") + return + + # Create temp folder for results + temp_folder = tempfile.mkdtemp() + result_filepath = os.path.join(temp_folder, f"{asset_data['assetBaseId']}_thumb.{'jpg' if asset_data['assetType'] == 'model' else 'png'}") + + + # Update asset_data with thumbnail parameters + asset_data.update(thumbnail_params) + + # Select appropriate script and template based on asset type + if asset_data['assetType'] == 'material': + script_name = 'autothumb_material_bg.py' + template_path = Path(__file__).parent / 'blend_files' / 'material_thumbnailer_cycles.blend' + elif asset_data['assetType'] == 'model': + script_name = 'autothumb_model_bg.py' + template_path = Path(__file__).parent / 'blend_files' / 'model_thumbnailer.blend' + else: + print(f"Unsupported asset type: {asset_data['assetType']}") + return + + # Send to background Blender for thumbnail generation + send_to_bg.send_to_bg( + asset_data, + asset_file_path=asset_file_path, + template_file_path=str(template_path), + result_path=result_filepath, + script=script_name, + ) + + # Check results and upload + try: + + if SKIP_UPLOAD: + print('----- SKIP_UPLOAD==True -> skipping upload -----') + return + + files = [ + { + "type": "thumbnail", + "index": 0, + "file_path": result_filepath, + } + ] + upload_data = { + "name": asset_data["name"], + "displayName": asset_data["displayName"], + "token": api_key, + "id":asset_data["id"], + } + # Upload the new thumbnail + print(f"Uploading thumbnail for {asset_data['name']}") + ok = upload.upload_files(upload_data, files) + + + if ok: + print(f"Successfully uploaded new thumbnail for {asset_data['name']}") + # Clear the markThumbnailRender parameter + clear_ok = upload.delete_individual_parameter( + asset_id=asset_data['id'], + param_name='markThumbnailRender', + param_value='', + api_key=api_key + ) + if clear_ok: + print(f"Successfully cleared markThumbnailRender for {asset_data['name']}") + else: + print(f"Failed to clear markThumbnailRender for {asset_data['name']}") + else: + print(f"Failed to upload thumbnail for {asset_data['name']}") + except Exception as e: + print(f"Error processing thumbnail results: {e}") + finally: + # Cleanup + try: + os.remove(asset_file_path) + os.remove(result_filepath) + os.rmdir(temp_folder) + except: + pass + +def iterate_assets(filepath, api_key, process_count=1): + """Process multiple assets concurrently using threading. + + Manages a pool of worker threads to process multiple assets simultaneously. + Limits the number of concurrent processes to avoid system overload. + + Args: + filepath (str): Path to the JSON file containing asset data + api_key (str): BlenderKit API key for authentication + process_count (int): Maximum number of concurrent thumbnail generations + """ + assets = search.load_assets_list(filepath) + threads = [] + + for asset_data in assets: + if asset_data is not None: + print(f"Processing thumbnail for {asset_data['name']}") + thread = threading.Thread(target=render_thumbnail_thread, args=(asset_data, api_key)) + thread.start() + threads.append(thread) + + while len(threads) > process_count - 1: + for t in threads[:]: + if not t.is_alive(): + threads.remove(t) + break + time.sleep(0.1) + + # Wait for remaining threads + for thread in threads: + thread.join() + +def main(): + """Main entry point for the thumbnail generation script. + + Sets up the initial conditions for thumbnail generation: + 1. Creates a temporary directory for asset processing + 2. Configures search parameters to find assets needing thumbnails + 3. Fetches the list of assets to process + 4. Initiates the thumbnail generation process + + The script can either process a specific asset (if ASSET_BASE_ID is set) + or process multiple assets based on search criteria. + """ + dpath = tempfile.gettempdir() + filepath = os.path.join(dpath, 'assets_for_thumbnails.json') + + # Set up search parameters + if ASSET_BASE_ID: + params = {'asset_base_id': ASSET_BASE_ID} + else: + params = { + 'asset_type': 'model,material', + 'order': 'created', + 'markThumbnailRender_isnull': False, + } + + # Get assets to process + assets = search.get_search_simple( + params, + filepath, + page_size=min(MAX_ASSETS, 100), + max_results=MAX_ASSETS, + api_key=paths.API_KEY + ) + + print(f'Found {len(assets)} assets to process:') + for asset in assets: + print(f"{asset['name']} ({asset['assetType']})") + + iterate_assets(filepath, api_key=paths.API_KEY) + +if __name__ == '__main__': + main() diff --git a/sync_TwinBru_library.py b/sync_TwinBru_library.py new file mode 100644 index 0000000..a5cd1e4 --- /dev/null +++ b/sync_TwinBru_library.py @@ -0,0 +1,417 @@ +"""Script to sync twinbru library to blenderkit. +Required environment variables: +BLENDERKIT_API_KEY - API key to be used +BLENDERS_PATH - path to the folder with blender versions + +""" + +import csv +import json +import requests +import os +import tempfile +import time +from datetime import datetime +import pathlib +import re +import threading +import zipfile +from blenderkit_server_utils import download, search, paths, upload, send_to_bg + +results = [] +page_size = 100 + +MAX_ASSETS = int(os.environ.get("MAX_ASSET_COUNT", "100")) +SKIP_UPLOAD = os.environ.get("SKIP_UPLOAD", False) == "True" + + +def read_csv_file(file_path): + """ + Read a CSV file and return a list of dictionaries. + """ + try: + with open(file_path, "r", encoding="utf-8-sig") as file: + reader = csv.DictReader(file) + return [row for row in reader] + except UnicodeDecodeError: + # If UTF-8 fails, try with ISO-8859-1 encoding + with open(file_path, "r", encoding="iso-8859-1") as file: + reader = csv.DictReader(file) + return [row for row in reader] + except Exception as e: + print(f"Error reading CSV file: {e}") + return [] + + +def download_file(url, filepath): + """ + Download a file from a URL to a filepath. + Write progress to console. + """ + response = requests.get(url, stream=True) + total_length = int(response.headers.get("content-length")) + with open(filepath, "wb") as file: + for chunk in response.iter_content(chunk_size=8192): + file.write(chunk) + progress = int(file.tell() / total_length * 100) + print(f"Downloading: {progress}%", end="\r") + print() + + +def build_description_text(twinbru_asset): + """ + Build a description text for the asset. + """ + description = f"Physical material that renders exactly as in real life." + description += f"Brand: {twinbru_asset['brand']}\n" + description += f"Weight: {twinbru_asset['weight_g_per_m_squared']}\n" + description += f"End Use: {twinbru_asset['cat_end_use']}\n" + description += f"Usable Width: {twinbru_asset['selvedge_useable_width_cm']}\n" + description += f"Design Type: {twinbru_asset['cat_design_type']}\n" + description += f"Colour Type: {twinbru_asset['cat_colour']}\n" + description += f"Characteristics: {twinbru_asset['cat_characteristics']}\n" + description += f"Composition: {twinbru_asset['total_composition']}\n" + return description + + +def slugify_text(text): + """ + Slugify a text. + Remove special characters, replace spaces with underscores and make it lowercase. + """ + text = re.sub(r"[()/#-]", "", text) + text = re.sub(r"\s", "_", text) + text = re.sub(r"_+", "_", text) + return text.lower() + + +def build_tags_list(twinbru_asset): + """ + Create a list of tags for the asset. + """ + tags = [] + tags.extend(twinbru_asset["cat_end_use"].split(",")) + tags.extend(twinbru_asset["cat_design_type"].split(",")) + # tags.append(twinbru_asset["cat_colour"]) + tags.extend(twinbru_asset["cat_characteristics"].split(",")) + # remove duplicates + tags = list(set(tags)) + # shorten to max 5 tags + tags = tags[:5] + # make tags contain only alphanumeric characters and underscores + # there are these characters to be replaced: ()/#- and gaps + tags = [slugify_text(tag) for tag in tags] + + return tags + + +def dict_to_params(inputs): + parameters = [] + for k, v in inputs.items(): + value = "" + if isinstance(v, list): + value = ",".join(str(item) for item in v) + elif isinstance(v, bool): + value = str(v).lower() + elif isinstance(v, (int, float)): + value = f"{v:f}".rstrip("0").rstrip(".") + else: + value = str(v) + + param = {"parameterType": k, "value": value} + parameters.append(param) + return parameters + + +def get_thumbnail_path(temp_folder, twinbru_asset): + """ + Get the thumbnail path for the asset. + Thumbnails are stored in the /renders directory of the asset + """ + # Get the path to the renders directory + renders_dir = os.path.join(temp_folder, "Samples") + + # Check if the renders directory exists + if not os.path.exists(renders_dir): + print(f"Renders directory not found for asset {twinbru_asset['name']}") + return None + + # List all files in the renders directory + render_files = os.listdir(renders_dir) + + # Filter for image files (assuming they are jpg or png) + image_files = [ + f for f in render_files if f.lower().endswith((".jpg", ".jpeg", ".png")) + ] + + # If no image files found, return None + if not image_files: + print(f"No thumbnail images found for asset {twinbru_asset['name']}") + return None + + # get the largest image file assuming it's the best quality thumbnail + image_files.sort(key=lambda f: os.path.getsize(os.path.join(renders_dir, f))) + + thumbnail_file = image_files[-1] + + # If there's a thumbnail ending with _CU.jpg, use that one, since that seems to be the nicest + for image_file in image_files: + if image_file.endswith("_CU.jpg"): + thumbnail_file = image_file + break + + # Return the full path to the thumbnail + return os.path.join(renders_dir, thumbnail_file) + + +def generate_upload_data(twinbru_asset): + """ + Generate the upload data for the asset. + """ + # convert name - remove _ and remove the number that comes last in name + readable_name = twinbru_asset["name"].split("_") + # capitalize the first letter of each word + readable_name = " ".join(word.capitalize() for word in readable_name[:-1]) + + match_category = { + "Blackout": "blackout", + "Chenille": "chenille", + "Dimout": "dimout", + "Embroidery": "embroidery", + "Flat weave": "flat-weave", + "Jacquard": "jacquard", + "Print": "print", + "Sheer": "sheer", + "Suede": "suede", + "Texture": "texture", + "Velvet": "velvet", + "Vinyl / Imitation leather": "vinyl-imitation-leather", + } + + upload_data = { + "assetType": "material", + "sourceAppName": "blender", + "sourceAppVersion": "4.2.0", + "addonVersion": "3.12.3", + "name": readable_name, + "displayName": readable_name, + "description": build_description_text(twinbru_asset), + "tags": build_tags_list(twinbru_asset), + "category": match_category.get(twinbru_asset["cat_characteristics"], "fabric"), + "license": "royalty_free", + "isFree": True, + "isPrivate": False, + "parameters": { + # twinBru specific parameters + "twinbruReference": int(twinbru_asset["reference"]), + "twinBruCatEndUse": twinbru_asset["cat_end_use"], + "twinBruColourType": twinbru_asset["cat_colour"], + "twinBruCharacteristics": twinbru_asset["cat_characteristics"], + "twinBruDesignType": twinbru_asset["cat_design_type"], + "productLink": twinbru_asset["url_info"], + # blenderkit specific parameters + "material_style": "realistic", + "engine": "cycles", + "shaders": ["principled"], + "uv": True, + "animated": False, + "purePbr": True, + "textureSizeMeters": float(twinbru_asset["texture_width_cm"]) * 0.01, + "procedural": False, + "nodeCount": 7, + "textureCount": 5, + "megapixels": 5 * 4 * 4, + "pbrType": "metallic", + "textureResolutionMax": 4096, + "textureResolutionMin": 4096, + "manufacturer": twinbru_asset["brand"], + "designCollection": twinbru_asset["collection_name"], + }, + } + upload_data["parameters"] = dict_to_params(upload_data["parameters"]) + return upload_data + + +import tempfile +import os +from blenderkit_server_utils import paths + +def sync_TwinBru_library(file_path): + """ + Sync the TwinBru library to blenderkit. + 1. Read the CSV file + 2. For each asset: + 2.1. Search for the asset on blenderkit, if it exists, skip it, if it doesn't, upload it. + 2.2. Download the asset + 2.3. Unpack the asset + 2.4. Create blenderkit upload metadata + 2.5. Make an upload request to the blenderkit API, to uplod metadata and to get asset_base_id. + 2.6. run a pack_twinbru_material.py script to create a material in Blender 3D, + write the asset_base_id and other blenderkit props on the material. + 2.7. Upload the material to blenderkit + 2.8. Mark the asset for thumbnail generation + """ + + assets = read_csv_file(file_path) + current_dir = pathlib.Path(__file__).parent.resolve() + i = 0 + for twinbru_asset in assets: + if ( + i >= MAX_ASSETS + ): # this actually counts only the assets that are not already on blenderkit + break + bk_assets = search.get_search_simple( + parameters={ + "twinbruReference": twinbru_asset["reference"], + "verification_status": "uploaded,validated", + }, + filepath=None, + page_size=10, + max_results=1, + api_key=paths.API_KEY, + ) + if len(bk_assets) > 0: + print(f"Asset {twinbru_asset['name']} already exists on blenderkit") + continue + else: + i += 1 + print(f"Asset {twinbru_asset['name']} does not exist on blenderkit") + # Download the asset into temp folder + temp_folder = os.path.join(tempfile.gettempdir(), twinbru_asset["name"]) + # create the folder if it doesn't exist + if not os.path.exists(temp_folder): + os.makedirs(temp_folder) + + # check if the file exists + asset_file_name = twinbru_asset["url_texture_source"].split("/")[-1] + # crop any data behind first ? in the string + asset_file_name = asset_file_name.split("?")[0] + asset_file_path = os.path.join(temp_folder, asset_file_name) + if not os.path.exists(asset_file_path): + download_file(twinbru_asset["url_texture_source"], asset_file_path) + # Unzip the asset file + with zipfile.ZipFile(asset_file_path, "r") as zip_ref: + zip_ref.extractall(temp_folder) + + # skip assets that don't have the same suffix as originally + # let's assume all have at least texture with "_NRM." in the folder + # switched this to lower case, as the files are not always consistent + if not any("_nrm." in f.lower() for f in os.listdir(temp_folder)): + print(f"Asset {twinbru_asset['name']} isn't expected configuration") + continue + + # Create blenderkit upload metadata + upload_data = generate_upload_data(twinbru_asset) + + # upload metadata and get result + print("uploading metadata") + # print json structure + + print(json.dumps(upload_data, indent=4)) + asset_data = upload.upload_asset_metadata(upload_data, paths.API_KEY) + if asset_data.get("statusCode") == 400: + print(asset_data) + return + # Run the _bg.py script to create a material in Blender 3D + send_to_bg.send_to_bg( + asset_data=asset_data, + template_file_path=os.path.join( + current_dir, "blend_files", "empty.blend" + ), + result_path=os.path.join(temp_folder, "material.blend"), + script="pack_twinbru_material.py", + binary_type="NEWEST", + temp_folder=temp_folder, + verbosity_level=2, + ) + # Upload the asset to blenderkit + files = [ + { + "type": "blend", + "index": 0, + "file_path": os.path.join(temp_folder, "material.blend"), + }, + ] + upload_data = { + "name": asset_data["name"], + "displayName": upload_data["name"], + "token": paths.API_KEY, + "id": asset_data["id"], + } + uploaded = upload.upload_files(upload_data, files) + + if uploaded: + print(f"Successfully uploaded asset: {asset_data['name']}") + # Mark the asset for thumbnail generation with material-specific settings + ok = upload.mark_for_thumbnail( + asset_id=asset_data["id"], + api_key=paths.API_KEY, + # Common parameters + use_gpu=True, + samples=100, + resolution=2048, + denoising=True, + background_lightness=0.5, + # Material-specific parameters + thumbnail_type='CLOTH', # Using BALL_COMPLEX for fabric materials + scale= 2* float(twinbru_asset["texture_width_cm"]) * 0.01, # scale the scene to be 2x the width of the texture + background=False, # Enable background for better fabric visibility + adaptive_subdivision=False, # Enable for better fabric detail + ) + if ok: + print(f"Successfully marked asset for thumbnail generation: {asset_data['name']}") + else: + print(f"Failed to mark asset for thumbnail generation: {asset_data['name']}") + else: + print(f"Failed to upload asset: {asset_data['name']}") + # mark asset as uploaded + # this will return error since the thumbnail is not generated yet + + upload.patch_asset_metadata( + asset_data["id"], paths.API_KEY, data={"verificationStatus": "uploaded"} + ) + + # Add a delay not to overwhelm the server + time.sleep(10) + + +def iterate_assets(filepath, thread_function=None, process_count=12, api_key=""): + """iterate through all assigned assets, check for those which need generation and send them to res gen""" + assets = search.load_assets_list(filepath) + threads = [] + for asset_data in assets: + if asset_data is not None: + print("downloading and generating resolution for %s" % asset_data["name"]) + thread = threading.Thread( + target=thread_function, args=(asset_data, api_key) + ) + thread.start() + threads.append(thread) + while len(threads) > process_count - 1: + for t in threads: + if not t.is_alive(): + threads.remove(t) + break + time.sleep(0.1) # wait for a bit to finish all threads + + +def main(): + """Main entry point for the script. + Reads the CSV file path from TWINBRU_CSV_PATH environment variable. + If not set, prints an error message and exits. + """ + csv_path = os.environ.get('TWINBRU_CSV_PATH') + if not csv_path: + print("Error: TWINBRU_CSV_PATH environment variable not set") + return + + if not os.path.exists(csv_path): + print(f"Error: CSV file not found at path: {csv_path}") + return + + print(f"Processing TwinBru CSV file: {csv_path}") + sync_TwinBru_library(csv_path) + + +if __name__ == "__main__": + main()