-
Notifications
You must be signed in to change notification settings - Fork 122
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Minor Stylistic changes. - Use latest Tensorlake version. - Add Function Executor lifecycle test. - Move test_function_concurrency form SDK package because it depends on Executor logic.
- Loading branch information
Showing
8 changed files
with
304 additions
and
17 deletions.
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,151 @@ | ||
import multiprocessing | ||
import time | ||
import unittest | ||
|
||
from tensorlake import Graph, tensorlake_function | ||
from tensorlake.remote_graph import RemoteGraph | ||
from testing import test_graph_name | ||
|
||
|
||
@tensorlake_function() | ||
def sleep_a(secs: int) -> str: | ||
time.sleep(secs) | ||
return "success" | ||
|
||
|
||
@tensorlake_function() | ||
def sleep_b(secs: int) -> str: | ||
time.sleep(secs) | ||
return "success" | ||
|
||
|
||
def invoke_sleep_graph(graph_name, func_name, func_arg_secs: int): | ||
graph = RemoteGraph.by_name(graph_name) | ||
invocation_id = graph.run( | ||
block_until_done=True, | ||
secs=func_arg_secs, | ||
) | ||
# Run in a new process because this call blocks and with threads | ||
# we won't be able to run it with a real concurrency. | ||
output = graph.output(invocation_id, func_name) | ||
if output != ["success"]: | ||
raise Exception(f"Expected output to be ['success'], got {output}") | ||
|
||
|
||
class TestRemoteGraphFunctionConcurrency(unittest.TestCase): | ||
def test_two_same_functions_run_with_concurrency_of_one(self): | ||
# This test verifies that two invocations of the same function run sequentially | ||
# because a single Executor can have only one Function Executor per function | ||
# version and because each Function Executor can only run a single task concurrently. | ||
graph = Graph( | ||
name=test_graph_name(self), | ||
description="test", | ||
start_node=sleep_a, | ||
) | ||
graph = RemoteGraph.deploy(graph) | ||
|
||
# Pre-warm Executor so Executor delays in the next invokes are very low. | ||
invoke_sleep_graph( | ||
graph_name=test_graph_name(self), func_name="sleep_a", func_arg_secs=0.01 | ||
) | ||
|
||
processes = [ | ||
multiprocessing.Process( | ||
target=invoke_sleep_graph, | ||
kwargs={ | ||
"graph_name": test_graph_name(self), | ||
"func_name": "sleep_a", | ||
"func_arg_secs": 0.51, | ||
}, | ||
), | ||
multiprocessing.Process( | ||
target=invoke_sleep_graph, | ||
kwargs={ | ||
"graph_name": test_graph_name(self), | ||
"func_name": "sleep_a", | ||
"func_arg_secs": 0.51, | ||
}, | ||
), | ||
] | ||
|
||
for process in processes: | ||
process.start() | ||
|
||
start_time = time.time() | ||
for process in processes: | ||
process.join() | ||
self.assertEqual(process.exitcode, 0) | ||
|
||
end_time = time.time() | ||
duration = end_time - start_time | ||
self.assertGreaterEqual( | ||
duration, | ||
1.0, | ||
"The two invocations of the same function should run sequentially", | ||
) | ||
|
||
def test_two_different_functions_run_with_concurrency_of_two(self): | ||
# This test verifies that two invocations of different functions run concurrently | ||
# because a single Executor can have a Function Executor for each different function. | ||
graph_a_name = test_graph_name(self) + "_a" | ||
graph_a = Graph( | ||
name=graph_a_name, | ||
description="test", | ||
start_node=sleep_a, | ||
) | ||
graph_a = RemoteGraph.deploy(graph_a) | ||
|
||
graph_b_name = test_graph_name(self) + "_b" | ||
graph_b = Graph( | ||
name=graph_b_name, | ||
description="test", | ||
start_node=sleep_b, | ||
) | ||
graph_b = RemoteGraph.deploy(graph_b) | ||
|
||
# Pre-warm Executor so Executor delays in the next invokes are very low. | ||
invoke_sleep_graph( | ||
graph_name=graph_a_name, func_name="sleep_a", func_arg_secs=0.01 | ||
) | ||
invoke_sleep_graph( | ||
graph_name=graph_b_name, func_name="sleep_b", func_arg_secs=0.01 | ||
) | ||
|
||
processes = [ | ||
multiprocessing.Process( | ||
target=invoke_sleep_graph, | ||
kwargs={ | ||
"graph_name": graph_a_name, | ||
"func_name": "sleep_a", | ||
"func_arg_secs": 0.51, | ||
}, | ||
), | ||
multiprocessing.Process( | ||
target=invoke_sleep_graph, | ||
kwargs={ | ||
"graph_name": graph_b_name, | ||
"func_name": "sleep_b", | ||
"func_arg_secs": 0.51, | ||
}, | ||
), | ||
] | ||
|
||
for process in processes: | ||
process.start() | ||
|
||
start_time = time.time() | ||
for process in processes: | ||
process.join() | ||
self.assertEqual(process.exitcode, 0) | ||
|
||
end_time = time.time() | ||
duration = end_time - start_time | ||
self.assertLessEqual( | ||
duration, | ||
1.0, | ||
"The two invocations of different functions should run concurrently", | ||
) | ||
|
||
|
||
if __name__ == "__main__": | ||
unittest.main() |
120 changes: 120 additions & 0 deletions
120
indexify/tests/executor/test_function_executor_routing.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,120 @@ | ||
import unittest | ||
|
||
from tensorlake import Graph, RemoteGraph, tensorlake_function | ||
from testing import test_graph_name | ||
|
||
|
||
def function_executor_id() -> str: | ||
# We can't use PIDs as they are reused when a process exits. | ||
# Use memory address of the function instead. | ||
return str(id(function_executor_id)) | ||
|
||
|
||
@tensorlake_function() | ||
def get_function_executor_id_1() -> str: | ||
return function_executor_id() | ||
|
||
|
||
@tensorlake_function() | ||
def get_function_executor_id_2(id_from_1: str) -> str: | ||
return function_executor_id() | ||
|
||
|
||
class TestFunctionExecutorRouting(unittest.TestCase): | ||
def test_functions_of_same_version_run_in_same_function_executor(self): | ||
graph = Graph( | ||
name=test_graph_name(self), | ||
description="test", | ||
start_node=get_function_executor_id_1, | ||
) | ||
graph = RemoteGraph.deploy(graph) | ||
|
||
invocation_id = graph.run(block_until_done=True) | ||
output = graph.output(invocation_id, "get_function_executor_id_1") | ||
self.assertEqual(len(output), 1) | ||
function_executor_id_1 = output[0] | ||
|
||
invocation_id = graph.run(block_until_done=True) | ||
output = graph.output(invocation_id, "get_function_executor_id_1") | ||
self.assertEqual(len(output), 1) | ||
function_executor_id_2 = output[0] | ||
|
||
self.assertEqual(function_executor_id_1, function_executor_id_2) | ||
|
||
def test_functions_of_different_versions_run_in_different_function_executors(self): | ||
graph = Graph( | ||
name=test_graph_name(self), | ||
description="test", | ||
start_node=get_function_executor_id_1, | ||
version="1.0", | ||
) | ||
graph1 = RemoteGraph.deploy(graph) | ||
|
||
invocation_id = graph1.run(block_until_done=True) | ||
output = graph1.output(invocation_id, "get_function_executor_id_1") | ||
self.assertEqual(len(output), 1) | ||
function_executor_id_1 = output[0] | ||
|
||
graph.version = "2.0" | ||
graph2 = RemoteGraph.deploy(graph) | ||
invocation_id = graph2.run(block_until_done=True) | ||
output = graph2.output(invocation_id, "get_function_executor_id_1") | ||
self.assertEqual(len(output), 1) | ||
function_executor_id_2 = output[0] | ||
|
||
self.assertNotEqual(function_executor_id_1, function_executor_id_2) | ||
|
||
def test_different_functions_of_same_graph_run_in_different_function_executors( | ||
self, | ||
): | ||
graph = Graph( | ||
name=test_graph_name(self), | ||
description="test", | ||
start_node=get_function_executor_id_1, | ||
) | ||
graph.add_edge(get_function_executor_id_1, get_function_executor_id_2) | ||
graph = RemoteGraph.deploy(graph) | ||
|
||
invocation_id = graph.run(block_until_done=True) | ||
output = graph.output(invocation_id, "get_function_executor_id_1") | ||
self.assertEqual(len(output), 1) | ||
function_executor_id_1 = output[0] | ||
|
||
output = graph.output(invocation_id, "get_function_executor_id_2") | ||
self.assertEqual(len(output), 1) | ||
function_executor_id_2 = output[0] | ||
|
||
self.assertNotEqual(function_executor_id_1, function_executor_id_2) | ||
|
||
def test_same_functions_of_different_graphs_run_in_different_function_executors( | ||
self, | ||
): | ||
graph1 = Graph( | ||
name=test_graph_name(self) + "_1", | ||
description="test", | ||
start_node=get_function_executor_id_1, | ||
) | ||
graph1 = RemoteGraph.deploy(graph1) | ||
|
||
graph2 = Graph( | ||
name=test_graph_name(self) + "_2", | ||
description="test", | ||
start_node=get_function_executor_id_1, | ||
) | ||
graph2 = RemoteGraph.deploy(graph2) | ||
|
||
invocation_id = graph1.run(block_until_done=True) | ||
output = graph1.output(invocation_id, "get_function_executor_id_1") | ||
self.assertEqual(len(output), 1) | ||
function_executor_id_1 = output[0] | ||
|
||
invocation_id = graph2.run(block_until_done=True) | ||
output = graph2.output(invocation_id, "get_function_executor_id_1") | ||
self.assertEqual(len(output), 1) | ||
function_executor_id_2 = output[0] | ||
|
||
self.assertNotEqual(function_executor_id_1, function_executor_id_2) | ||
|
||
|
||
if __name__ == "__main__": | ||
unittest.main() |
Oops, something went wrong.