Skip to content

Commit 8a689bc

Browse files
authored
Merge pull request #294 from Exabyte-io/feature/SOF-7777
feature/SOF 7777
2 parents 65db86e + ea8a046 commit 8a689bc

2 files changed

Lines changed: 211 additions & 8 deletions

File tree

other/materials_designer/workflows/valence_band_offset.ipynb

Lines changed: 169 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,17 @@
99
"\n",
1010
"Calculate the valence band offset for a labeled interface using a DFT workflow on the Mat3ra platform.\n",
1111
"\n",
12+
"The notebook supports two modes controlled by `IS_POLAR` in cell 1.2:\n",
13+
"\n",
14+
"- `IS_POLAR = False`: run the standard VBO workflow and report the workflow VBO together with the average ESP profiles.\n",
15+
"- `IS_POLAR = True`: keep the same workflow VBO calculation, then run an additional notebook-side post-process at the end that fits the interface and bulk ESP profiles over the slab regions and plots the polar fit.\n",
16+
"\n",
17+
"When the polar option is enabled, the workflow itself is unchanged. The extra polar analysis is performed in the final notebook cells using the saved interface, left-slab, and right-slab materials plus the ESP results returned by the job.\n",
18+
"\n",
1219
"<h2 style=\"color:green\">Usage</h2>\n",
1320
"\n",
1421
"1. Create and save an interface with labels (for example via `create_interface_with_min_strain_zsl.ipynb`).\n",
15-
"1. Set the interface and calculation parameters in cells 1.2 and 1.3 below.\n",
22+
"1. Set the interface and calculation parameters in cells 1.2 and 1.3 below, including `IS_POLAR` if the interface is polar.\n",
1623
"1. Click \"Run\" > \"Run All\" to run all cells.\n",
1724
"1. Wait for the job to complete.\n",
1825
"1. Scroll down to view the VBO result.\n",
@@ -26,7 +33,8 @@
2633
"1. Configure compute: get list of clusters and create compute configuration with selected cluster, queue, and number of processors.\n",
2734
"1. Create the job with materials and workflow configuration: assemble the job from materials, workflow, project, and compute configuration.\n",
2835
"1. Submit the job and monitor the status: submit the job and wait for completion.\n",
29-
"1. Retrieve results: get and display the valence band offset.\n"
36+
"1. Retrieve results: get and display the workflow valence band offset and average ESP profiles.\n",
37+
"1. If `IS_POLAR = True`, run the final polar post-process cells to fit the ESP profile and generate the polar-fit plot in the notebook.\n"
3038
]
3139
},
3240
{
@@ -87,6 +95,8 @@
8795
"RIGHT_SIDE_PART = InterfacePartsEnum.FILM\n",
8896
"INTERFACE_SYSTEM_NAME = None # Used as tag to group the materials. Defaults to shorthand from the loaded interface name\n",
8997
"\n",
98+
"IS_POLAR = False # Whether the interface is polar, to adjust the VBO calculation method accordingly.\n",
99+
"\n",
90100
"# 4. Workflow parameters\n",
91101
"APPLICATION_NAME = \"espresso\"\n",
92102
"WORKFLOW_SEARCH_TERM = \"valence_band_offset.json\"\n",
@@ -370,6 +380,7 @@
370380
"\n",
371381
"workflow_config = WorkflowStandata.filter_by_application(app.name).get_by_name_first_match(WORKFLOW_SEARCH_TERM)\n",
372382
"workflow = Workflow.create(workflow_config)\n",
383+
"\n",
373384
"workflow.name = MY_WORKFLOW_NAME\n",
374385
"\n",
375386
"visualize_workflow(workflow)\n"
@@ -630,11 +641,11 @@
630641
"job_data = client.jobs.get(job_id)\n",
631642
"workflow = job_data[\"workflow\"]\n",
632643
"\n",
633-
"vbo_value = client.properties.get_for_job(\n",
644+
"vbo_value_non_polar = client.properties.get_for_job(\n",
634645
" job_id,\n",
635646
" property_name=PropertyName.scalar.valence_band_offset.value,\n",
636647
")[0]\n",
637-
"print(f\"Valence Band Offset (VBO) value: {vbo_value['value']:.3f} eV\")\n",
648+
"print(f\"Workflow Valence Band Offset (VBO) value: {float(vbo_value_non_polar['value']):.3f} eV\")\n",
638649
"\n",
639650
"avg_esp_unit_ids = {}\n",
640651
"for subworkflow in workflow[\"subworkflows\"]:\n",
@@ -643,31 +654,181 @@
643654
" result_names = [result[\"name\"] for result in unit.get(\"results\", [])]\n",
644655
" if \"average_potential_profile\" in result_names:\n",
645656
" avg_esp_unit_ids[subworkflow_name] = unit[\"flowchartId\"]\n",
646-
" break\n",
647657
"\n",
648658
"ordered_names = [\n",
649659
" \"BS + Avg ESP (Interface)\",\n",
650660
" \"BS + Avg ESP (interface left)\",\n",
651661
" \"BS + Avg ESP (interface right)\",\n",
652662
"]\n",
653663
"\n",
664+
"avg_esp_results = {}\n",
654665
"for subworkflow_name in ordered_names:\n",
655666
" unit_id = avg_esp_unit_ids[subworkflow_name]\n",
656667
" avg_esp_data = client.properties.get_for_job(\n",
657668
" job_id,\n",
658669
" property_name=\"average_potential_profile\",\n",
659670
" unit_id=unit_id,\n",
660671
" )[0]\n",
661-
" visualize_properties(avg_esp_data, title=subworkflow_name)\n"
672+
" avg_esp_results[subworkflow_name] = avg_esp_data\n",
673+
" visualize_properties(avg_esp_data, title=subworkflow_name)\n",
674+
"\n"
675+
]
676+
},
677+
{
678+
"cell_type": "markdown",
679+
"id": "43",
680+
"metadata": {},
681+
"source": [
682+
"### 8.2. Polar-only notebook post-process\n",
683+
"If `IS_POLAR = True`, use the slab boundaries from the notebook materials and fit the ESP profiles directly in the notebook.\n"
662684
]
663685
},
664686
{
665687
"cell_type": "code",
666688
"execution_count": null,
667-
"id": "43",
689+
"id": "44",
690+
"metadata": {},
691+
"outputs": [],
692+
"source": [
693+
"import matplotlib.pyplot as plt\n",
694+
"import numpy as np\n",
695+
"from scipy.stats import linregress\n",
696+
"from mat3ra.prode import PropertyName\n",
697+
"from utils.api import get_property_holder_for_job, update_property_holder_value\n",
698+
"\n",
699+
"\n",
700+
"def get_region_indices(x_data, x_min, x_max):\n",
701+
" mask = (x_data >= x_min) & (x_data <= x_max)\n",
702+
" indices = np.where(mask)[0]\n",
703+
" if len(indices) == 0:\n",
704+
" return 0, len(x_data)\n",
705+
" return indices[0], indices[-1] + 1\n",
706+
"\n",
707+
"\n",
708+
"def fit_and_average(x_data, y_data, start_idx, end_idx):\n",
709+
" x_region = x_data[start_idx:end_idx]\n",
710+
" y_region = y_data[start_idx:end_idx]\n",
711+
" if len(x_region) < 2:\n",
712+
" avg = float(np.mean(y_region)) if len(y_region) > 0 else 0.0\n",
713+
" return avg, 0.0, avg\n",
714+
" slope, intercept, _, _, _ = linregress(x_region, y_region)\n",
715+
" x_mid = (x_region[0] + x_region[-1]) / 2.0\n",
716+
" avg_value = slope * x_mid + intercept\n",
717+
" return float(avg_value), float(slope), float(intercept)\n",
718+
"\n",
719+
"\n",
720+
"interface_profile = avg_esp_results[\"BS + Avg ESP (Interface)\"]\n",
721+
"left_profile = avg_esp_results[\"BS + Avg ESP (interface left)\"]\n",
722+
"right_profile = avg_esp_results[\"BS + Avg ESP (interface right)\"]\n",
723+
"\n",
724+
"interface_material.to_cartesian()\n",
725+
"left_material.to_cartesian()\n",
726+
"right_material.to_cartesian()\n",
727+
"\n",
728+
"interface_z = sorted(coord[2] for coord in interface_material.basis.coordinates.values)\n",
729+
"n_left = len(left_material.basis.elements.values)\n",
730+
"z_min_1 = interface_z[0]\n",
731+
"z_max_1 = interface_z[n_left - 1]\n",
732+
"z_min_2 = interface_z[n_left]\n",
733+
"z_max_2 = interface_z[-1]\n",
734+
"\n",
735+
"print(f\"Detected slab 1 boundaries: z = {z_min_1:.3f} to {z_max_1:.3f} A\")\n",
736+
"print(f\"Detected slab 2 boundaries: z = {z_min_2:.3f} to {z_max_2:.3f} A\")\n",
737+
"\n",
738+
"X = np.array(interface_profile[\"xDataArray\"])\n",
739+
"Y = np.array(interface_profile[\"yDataSeries\"][1])\n",
740+
"X_left = np.array(left_profile[\"xDataArray\"])\n",
741+
"Y_left = np.array(left_profile[\"yDataSeries\"][1])\n",
742+
"X_right = np.array(right_profile[\"xDataArray\"])\n",
743+
"Y_right = np.array(right_profile[\"yDataSeries\"][1])\n",
744+
"\n",
745+
"slab1_start, slab1_end = get_region_indices(X, z_min_1, z_max_1)\n",
746+
"slab2_start, slab2_end = get_region_indices(X, z_min_2, z_max_2)\n",
747+
"slab1_start_left, slab1_end_left = get_region_indices(X_left, z_min_1, z_max_1)\n",
748+
"slab2_start_right, slab2_end_right = get_region_indices(X_right, z_min_2, z_max_2)\n",
749+
"\n",
750+
"Va_interface, slope_a, intercept_a = fit_and_average(X, Y, slab1_start, slab1_end)\n",
751+
"Vb_interface, slope_b, intercept_b = fit_and_average(X, Y, slab2_start, slab2_end)\n",
752+
"Va_bulk_left, _, _ = fit_and_average(X_left, Y_left, slab1_start_left, slab1_end_left)\n",
753+
"Vb_bulk_right, _, _ = fit_and_average(X_right, Y_right, slab2_start_right, slab2_end_right)\n",
754+
"\n",
755+
"vbo_value_polar = (Vb_interface - Va_interface) - (Vb_bulk_right - Va_bulk_left)\n",
756+
"\n",
757+
"print(f\"Interface ESP slab 1: {Va_interface:.3f} eV\")\n",
758+
"print(f\"Interface ESP slab 2: {Vb_interface:.3f} eV\")\n",
759+
"print(f\"Bulk ESP left: {Va_bulk_left:.3f} eV\")\n",
760+
"print(f\"Bulk ESP right: {Vb_bulk_right:.3f} eV\")\n",
761+
"print(f\"Fit slope slab 1: {slope_a:.6f} eV/A\")\n",
762+
"print(f\"Fit slope slab 2: {slope_b:.6f} eV/A\")\n",
763+
"print(f\"Polar post-process VBO: {vbo_value_polar:.3f} eV\")"
764+
]
765+
},
766+
{
767+
"cell_type": "markdown",
768+
"id": "45",
769+
"metadata": {},
770+
"source": [
771+
"### 8.3. Plot the ESP profiles with slab regions and fits"
772+
]
773+
},
774+
{
775+
"cell_type": "code",
776+
"execution_count": null,
777+
"id": "46",
668778
"metadata": {},
669779
"outputs": [],
670-
"source": []
780+
"source": [
781+
"plt.figure(figsize=(10, 6))\n",
782+
"plt.plot(X, Y, label=\"Macroscopic Average Potential\", linewidth=2)\n",
783+
"plt.axvspan(z_min_1, z_max_1, color=\"red\", alpha=0.2, label=\"Slab 1 Region\")\n",
784+
"plt.axvspan(z_min_2, z_max_2, color=\"blue\", alpha=0.2, label=\"Slab 2 Region\")\n",
785+
"\n",
786+
"if slab1_end > slab1_start:\n",
787+
" x_fit1 = X[slab1_start:slab1_end]\n",
788+
" y_fit1 = slope_a * x_fit1 + intercept_a\n",
789+
" plt.plot(x_fit1, y_fit1, color=\"darkred\", linestyle=\"--\", linewidth=2, label=\"Fit Slab 1\")\n",
790+
"\n",
791+
"if slab2_end > slab2_start:\n",
792+
" x_fit2 = X[slab2_start:slab2_end]\n",
793+
" y_fit2 = slope_b * x_fit2 + intercept_b\n",
794+
" plt.plot(x_fit2, y_fit2, color=\"darkblue\", linestyle=\"--\", linewidth=2, label=\"Fit Slab 2\")\n",
795+
"\n",
796+
"plt.axhline(Va_interface, color=\"red\", linestyle=\":\", linewidth=2, label=f\"Avg ESP Slab 1 = {Va_interface:.3f} eV\")\n",
797+
"plt.axhline(Vb_interface, color=\"blue\", linestyle=\":\", linewidth=2, label=f\"Avg ESP Slab 2 = {Vb_interface:.3f} eV\")\n",
798+
"plt.xlabel(\"z-coordinate (A)\", fontsize=12)\n",
799+
"plt.ylabel(\"Macroscopic Average Potential (eV)\", fontsize=12)\n",
800+
"plt.title(f\"Polar Interface VBO = {vbo_value_polar:.3f} eV\", fontsize=14, fontweight=\"bold\")\n",
801+
"plt.legend(loc=\"best\", fontsize=10)\n",
802+
"plt.grid(True, alpha=0.3)\n",
803+
"plt.tight_layout()\n",
804+
"plt.show()"
805+
]
806+
},
807+
{
808+
"cell_type": "markdown",
809+
"id": "47",
810+
"metadata": {},
811+
"source": [
812+
"### 8.4. Update the VBO property with the polar post-process result"
813+
]
814+
},
815+
{
816+
"cell_type": "code",
817+
"execution_count": null,
818+
"id": "48",
819+
"metadata": {},
820+
"outputs": [],
821+
"source": [
822+
"if IS_POLAR:\n",
823+
" vbo_property_holder = get_property_holder_for_job(\n",
824+
" client,\n",
825+
" job_id,\n",
826+
" PropertyName.scalar.valence_band_offset.value,\n",
827+
" )\n",
828+
" update_property_holder_value(client, vbo_property_holder[\"_id\"], vbo_value_polar)\n",
829+
" print(f\"Persisted displayed Valence Band Offset (VBO) value: {vbo_value_polar:.3f} eV\")\n",
830+
"\n"
831+
]
671832
}
672833
],
673834
"metadata": {

utils/api.py

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -200,6 +200,48 @@ def get_properties_for_job(client: APIClient, job_id: str, property_name: Option
200200
return [{**prop, "fermiEnergy": fermi_energy} for prop in properties]
201201

202202

203+
def get_property_holder_for_job(
204+
client: APIClient, job_id: str, property_name: str, unit_id: Optional[str] = None
205+
) -> dict:
206+
"""
207+
Fetch the first full property holder for a job/property pair.
208+
209+
Args:
210+
client (APIClient): API client instance.
211+
job_id (str): Job ID.
212+
property_name (str): Property name.
213+
unit_id (str, optional): Unit flowchart ID.
214+
215+
Returns:
216+
dict: Full property holder document.
217+
"""
218+
query = {
219+
"source.info.jobId": job_id,
220+
"data.name": property_name,
221+
}
222+
if unit_id:
223+
query["source.info.unitId"] = unit_id
224+
holders = client.properties.list(query=query)
225+
if not holders:
226+
raise ValueError(f"Property '{property_name}' not found for job '{job_id}'")
227+
return holders[0]
228+
229+
230+
def update_property_holder_value(client: APIClient, property_holder_id: str, value: float) -> dict:
231+
"""
232+
Update a scalar property's data.value.
233+
234+
Args:
235+
client (APIClient): API client instance.
236+
property_holder_id (str): Property holder ID.
237+
value (float): New scalar value.
238+
239+
Returns:
240+
dict: Server response payload.
241+
"""
242+
return client.properties.update(property_holder_id, {"$set": {"data.value": value}})
243+
244+
203245
def create_job(
204246
api_client: APIClient,
205247
materials: List[Union[dict, Material]],

0 commit comments

Comments
 (0)