Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add new cci command group for hashing configs, flows, and dependencies and for freezing flows #3833

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions cumulusci/cli/cci.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

from .error import error
from .flow import flow
from .hash import hash_group
from .logger import get_tempfile_logger, init_logger
from .org import org
from .plan import plan
Expand Down Expand Up @@ -242,3 +243,4 @@ def shell(runtime, script=None, python=None):
cli.add_command(flow)
cli.add_command(plan)
cli.add_command(robot)
cli.add_command(hash_group)
78 changes: 75 additions & 3 deletions cumulusci/cli/flow.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,17 @@
import json
import os
import yaml
from collections import defaultdict
from datetime import datetime
from pathlib import Path

import click

from cumulusci.core.github import set_github_output
from cumulusci.core.exceptions import FlowNotFoundError
from cumulusci.core.utils import format_duration
from cumulusci.utils import document_flow, flow_ref_title_and_intro
from cumulusci.utils.hashing import hash_dict
from cumulusci.utils.yaml.safer_loader import load_yaml_data

from .runtime import pass_runtime
Expand Down Expand Up @@ -44,9 +48,9 @@ def flow_doc(runtime, project=False):
flows_by_group = group_items(flows)
flow_groups = sorted(
flows_by_group.keys(),
key=lambda group: flow_info_groups.index(group)
if group in flow_info_groups
else 100,
key=lambda group: (
flow_info_groups.index(group) if group in flow_info_groups else 100
),
)

for group in flow_groups:
Expand Down Expand Up @@ -183,3 +187,71 @@ def flow_run(runtime, flow_name, org, delete_org, debug, o, no_prompt):
click.echo(str(e))

runtime.alert(f"Flow Complete: {flow_name}")


@flow.command(name="freeze", help="Freeze a flow into a flattened list of static steps")
@click.argument("flow_name")
@click.option(
"--org",
help="Specify the target org. By default, runs against the current default org",
)
@click.option(
"--debug", is_flag=True, help="Drops into pdb, the Python debugger, on an exception"
)
@click.option(
"-o",
nargs=2,
multiple=True,
help="Pass task specific options for the task as '-o taskname__option value'. You can specify more than one option by using -o more than once.",
)
@click.option(
"--no-prompt",
is_flag=True,
help="Disables all prompts. Set for non-interactive mode use such as calling from scripts or CI systems",
)
@pass_runtime(require_keychain=True)
def flow_freeze(runtime, flow_name, org, debug, o, no_prompt=True):

# Get necessary configs
org, org_config = runtime.get_org(org)

# Parse command line options
options = defaultdict(dict)
if o:
for key, value in o:
if "__" in key:
task_name, option_name = key.split("__")
options[task_name][option_name] = value
else:
raise click.UsageError(
"-o option for flows should contain __ to split task name from option name."
)

# Create the flow and handle initialization exceptions
try:
coordinator = runtime.get_flow(flow_name, options=options)
start_time = datetime.now()
steps = {}
for step in coordinator.freeze(org_config):
stepnum = len(steps)
steps[stepnum] = step

steps_hash = make_md5_hash(steps)
duration = datetime.now() - start_time
click.echo(f"Froze {flow_name} in {format_duration(duration)}")
frozen_name = f"{flow_name}__{steps_hash}"
filename = f"{frozen_name}.yml"
frozen_flow = coordinator.flow_config.config
frozen_flow["description"] = (
f"Frozen version of {flow_name} with hash {steps_hash}"
)
frozen_flow["steps"] = steps
with open(filename, "w") as f:
yaml.dump({"flows": {frozen_name: frozen_flow}}, f)
set_github_output("FLOW_FILENAME", filename)
click.echo(f"Frozen flow saved to {filename}")
except Exception:
runtime.alert(f"Flow error: {flow_name}")
raise

runtime.alert(f"Flow Complete: {flow_name}")
104 changes: 104 additions & 0 deletions cumulusci/cli/hash.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
import click
import hashlib
import json
import os
from cumulusci.core.dependencies.resolvers import get_static_dependencies
from cumulusci.core.utils import process_list_arg
from cumulusci.core.github import set_github_output
from cumulusci.utils.hashing import hash_dict
from pydantic import BaseModel
from .runtime import pass_runtime


@click.group(
"hash",
help="Commands for hashing parts of the project's CumulusCI configuration and state",
)
def hash_group():
pass


# Commands for group: hash


@hash_group.command(
name="config",
help="Hashes all or part of the project's merged CumulusCI configuration",
)
@pass_runtime(require_project=True, require_keychain=False) # maybe not needed...
@click.option(
"--locators",
"locators",
help="A comma separated list of CumulusCI config locators to specify the top level of config key(s) to hash. Example: project__package,flows__ci_beta",
)
def hash_config(
runtime,
locators,
):
locators_str = "for {}".format(locators) if locators else ""
locators = process_list_arg(locators)
config = runtime.project_config.config
if locators:
config = {loc: runtime.project_config.lookup(loc) for loc in locators}
config_hash = hash_dict(config)
click.echo(f"Hash of CumulusCI Config{locators_str}:")
click.echo(config_hash)
output_name = "HASH_CONFIG"
if locators:
output_name + "__" + "__AND__".join(locators)
set_github_output(output_name, config_hash)


@hash_group.command(
name="flow",
help="Hashes a flow's configuration, either dynamic or frozen as a flat list of static steps",
)
@pass_runtime(require_project=True, require_keychain=False) # maybe not needed...
@click.argument("flow_name")
@click.option(
"--freeze",
is_flag=True,
help="Freeze the flow configuration as a flat list of static steps",
)
def hash_flow(
runtime,
flow_name,
freeze,
):
flow = runtime.get_flow(flow_name)

steps = flow.steps
if freeze:
steps = flow.freeze(org_config=None)
config_hash = hash_dict(steps)
click.echo(f"Hash of flow {flow_name}:")
click.echo(config_hash)
output_name = "HASH_FLOW__" + flow_name
if freeze:
output_name + "__FROZEN"
set_github_output(output_name, config_hash)


@hash_group.command(
name="dependencies",
help="Resolve and hash the project's current dependencies",
)
@click.option(
"--resolution-strategy",
help="The resolution strategy to use. Defaults to production.",
default="production",
)
@pass_runtime(require_keychain=True)
def hash_dependencies(runtime, resolution_strategy):
resolved = get_static_dependencies(
runtime.project_config,
resolution_strategy=resolution_strategy,
)
dependencies = []
for dependency in resolved:
click.echo(dependency)
dependencies.append(dependency.dict())

deps_hash = hash_dict(dependencies)
click.echo(f"Hash of CumulusCI Dependencies for {resolution_strategy}:")
click.echo(deps_hash)
23 changes: 23 additions & 0 deletions cumulusci/core/flowrunner.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@
FlowInfiniteLoopError,
TaskImportError,
)
from cumulusci.utils import cd
from cumulusci.utils.version_strings import LooseVersion

if TYPE_CHECKING:
Expand Down Expand Up @@ -459,6 +460,28 @@ def get_flow_steps(

return lines

def freeze(self, org_config) -> List[StepSpec]:
self.org_config = org_config
line = f"Initializing flow for freezing: {self.__class__.__name__}"
if self.name:
line = f"{line} ({self.name})"
self._rule()
self.logger.info(line)
self.logger.info(self.flow_config.description)
self._rule(new_line=True)
steps = []
for step in self.steps:
if step.skip:
continue
with cd(step.project_config.repo_root):
task = step.task_class(
step.project_config,
TaskConfig(step.task_config),
name=step.task_name,
)
steps.extend(task.freeze(step))
return steps

def run(self, org_config: OrgConfig):
self.org_config = org_config
line = f"Initializing flow: {self.__class__.__name__}"
Expand Down
12 changes: 11 additions & 1 deletion cumulusci/core/github.py
Original file line number Diff line number Diff line change
Expand Up @@ -603,7 +603,7 @@ def catch_common_github_auth_errors(func: Callable) -> Callable:
def inner(*args, **kwargs):
try:
return func(*args, **kwargs)
except (ConnectionError) as exc:
except ConnectionError as exc:
if error_msg := format_github3_exception(exc):
raise GithubApiError(error_msg) from exc
else:
Expand Down Expand Up @@ -663,3 +663,13 @@ def create_gist(github, description, files):
files - A dict of files in the form of {filename:{'content': content},...}
"""
return github.create_gist(description, files, public=False)


# Utils for GitHub Actions worker environments
def set_github_output(name: str, value: str):
"""Set an output parameter for the GitHub Actions runner."""
github_output = os.environ.get("GITHUB_OUTPUT")
if not github_output:
return
with open(github_output, "a") as f:
f.write(f"{name}={value}\n")
31 changes: 31 additions & 0 deletions cumulusci/utils/hashing.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import hashlib
import json
from pydantic import BaseModel


def cci_json_encoder(obj):
if isinstance(obj, BaseModel):
return obj.dict()
if hasattr(obj, "task_config"):
if obj.skip:
return None
return obj.task_config
# Fallback to default encoder
try:
return json.JSONEncoder().default(obj)
except TypeError:
raise TypeError(
f"Object of type {obj.__class__.__name__} is not JSON serializable"
)


def hash_dict(dictionary):
# Step 1: Serialize the dictionary in a sorted order to ensure consistency
serialized_dict = json.dumps(
dictionary, sort_keys=True, default=cci_json_encoder
).encode("utf-8")

# Step 2: Create an MD5 hash of the serialized dictionary
md5_hash = hashlib.md5(serialized_dict).hexdigest()

return md5_hash[:8]
Loading