diff --git a/docs/getting_started/04_mdp.ipynb b/docs/getting_started/04_mdp.ipynb index ba4443c..f543f69 100644 --- a/docs/getting_started/04_mdp.ipynb +++ b/docs/getting_started/04_mdp.ipynb @@ -9,38 +9,26 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "metadata": {}, - "outputs": [ - { - "ename": "IndentationError", - "evalue": "expected an indented block after 'else' statement on line 65 (property_builder.py, line 67)", - "output_type": "error", - "traceback": [ - "Traceback \u001b[0;36m(most recent call last)\u001b[0m:\n", - "\u001b[0m File \u001b[1;32m~/repositories/stormvogel/.venv/lib/python3.12/site-packages/IPython/core/interactiveshell.py:3577\u001b[0m in \u001b[1;35mrun_code\u001b[0m\n exec(code_obj, self.user_global_ns, self.user_ns)\u001b[0m\n", - "\u001b[0;36m Cell \u001b[0;32mIn[1], line 1\u001b[0;36m\n\u001b[0;31m from stormvogel import visualization, mapping, result, show, property_builder\u001b[0;36m\n", - "\u001b[0;36m File \u001b[0;32m~/repositories/stormvogel/stormvogel/property_builder.py:67\u001b[0;36m\u001b[0m\n\u001b[0;31m else:\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mIndentationError\u001b[0m\u001b[0;31m:\u001b[0m expected an indented block after 'else' statement on line 65\n" - ] - } - ], + "outputs": [], "source": [ - "from stormvogel import visualization, mapping, result, show, property_builder\n", + "from stormvogel import visualization, mapping, result, show\n", "import stormpy" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 2, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } @@ -144,7 +132,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -154,7 +142,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -174,7 +162,7 @@ "Choice Labels: \tnone\n", "-------------------------------------------------------------- \n", "\n", - "['__class__', '__delattr__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getstate__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '_as_sparse_ctmc', '_as_sparse_dtmc', '_as_sparse_exact_dtmc', '_as_sparse_exact_mdp', '_as_sparse_imdp', '_as_sparse_ma', '_as_sparse_mdp', '_as_sparse_pctmc', '_as_sparse_pdtmc', '_as_sparse_pma', '_as_sparse_pmdp', '_as_sparse_pomdp', '_as_sparse_ppomdp', '_as_sparse_smg', '_as_symbolic_ctmc', '_as_symbolic_dtmc', '_as_symbolic_ma', '_as_symbolic_mdp', '_as_symbolic_pctmc', '_as_symbolic_pdtmc', '_as_symbolic_pma', '_as_symbolic_pmdp', 'add_reward_model', 'apply_scheduler', 'backward_transition_matrix', 'choice_labeling', 'choice_origins', 'get_choice_index', 'get_nr_available_actions', 'get_reward_model', 'has_choice_labeling', 'has_choice_origins', 'has_parameters', 'has_reward_model', 'has_state_valuations', 'initial_states', 'initial_states_as_bitvector', 'is_discrete_time_model', 'is_exact', 'is_nondeterministic_model', 'is_partially_observable', 'is_sink_state', 'is_sparse_model', 'is_symbolic_model', 'labeling', 'labels_state', 'model_type', 'nondeterministic_choice_indices', 'nr_choices', 'nr_states', 'nr_transitions', 'reduce_to_state_based_rewards', 'reward_models', 'set_initial_states', 'state_valuations', 'states', 'supports_parameters', 'supports_uncertainty', 'to_dot', 'transition_matrix']\n" + "['__class__', '__delattr__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getstate__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '_as_sparse_ctmc', '_as_sparse_dtmc', '_as_sparse_exact_dtmc', '_as_sparse_exact_mdp', '_as_sparse_imdp', '_as_sparse_ma', '_as_sparse_mdp', '_as_sparse_pctmc', '_as_sparse_pdtmc', '_as_sparse_pma', '_as_sparse_pmdp', '_as_sparse_pomdp', '_as_sparse_ppomdp', '_as_symbolic_ctmc', '_as_symbolic_dtmc', '_as_symbolic_ma', '_as_symbolic_mdp', '_as_symbolic_pctmc', '_as_symbolic_pdtmc', '_as_symbolic_pma', '_as_symbolic_pmdp', 'add_reward_model', 'apply_scheduler', 'backward_transition_matrix', 'choice_labeling', 'choice_origins', 'get_choice_index', 'get_nr_available_actions', 'get_reward_model', 'has_choice_labeling', 'has_choice_origins', 'has_parameters', 'has_reward_model', 'has_state_valuations', 'initial_states', 'initial_states_as_bitvector', 'is_discrete_time_model', 'is_exact', 'is_nondeterministic_model', 'is_partially_observable', 'is_sink_state', 'is_sparse_model', 'is_symbolic_model', 'labeling', 'labels_state', 'model_type', 'nondeterministic_choice_indices', 'nr_choices', 'nr_states', 'nr_transitions', 'reduce_to_state_based_rewards', 'reward_models', 'set_initial_states', 'state_valuations', 'states', 'supports_parameters', 'supports_uncertainty', 'to_dot', 'transition_matrix']\n" ] } ], @@ -186,7 +174,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -195,7 +183,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -204,7 +192,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -221,7 +209,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "metadata": {}, "outputs": [ { @@ -234,9 +222,9 @@ "Transitions: \t56\n", "Reward Models: rounds\n", "State Labels: \t3 labels\n", - " * deadlock -> 3 item(s)\n", - " * init -> 1 item(s)\n", " * elected -> 0 item(s)\n", + " * init -> 1 item(s)\n", + " * deadlock -> 3 item(s)\n", "Choice Labels: \tnone\n", "-------------------------------------------------------------- \n", "\n" @@ -251,7 +239,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 10, "metadata": {}, "outputs": [ { @@ -273,7 +261,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 11, "metadata": { "scrolled": true }, @@ -286,7 +274,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 12, "metadata": { "scrolled": true }, @@ -296,7 +284,7 @@ "text/html": [ "\n", " str: def probability_or_reward() -> str: while True: - choice = input("\nCheck probabilities (p) or rewards (r): ").lower() + if model.rewards == []: + print( + "\nThis model does not have reward models, therefore we can only do model checking for probabilities." + ) + return "P" + choice = input("\nCheck for probabilities (p) or rewards (r): ").lower() if choice in {"p", "r"}: if choice == "r": - if model.rewards == []: - print("This model does not have a reward model.") - elif len(model.rewards) > 1: + if len(model.rewards) > 1: print("\nThis model has multiple reward models.") print([r.name for r in model.rewards]) rewardmodel = input("\nChoose one of the above: ") @@ -26,7 +29,7 @@ def probability_or_reward() -> str: def compare_or_obtain() -> str: while True: choice = input( - "\nDo you want to compare values (c) or obtain one (o): " + "\nDo you want to check if a certain property holds (c) or obtain a value (o): " ).lower() if choice in {"c", "o"}: return choice @@ -34,7 +37,7 @@ def compare_or_obtain() -> str: def max_or_min() -> str: print( - "\nThe model you provided supports actions, hence the values depends on the scheduler." + "\nThe model you provided supports actions, hence the values will depend on the scheduler, \ntherefore you must choose between the minimum and maximum value over all schedulers." ) while True: choice = input( @@ -65,16 +68,23 @@ def value() -> str: else: return str(choice) - def label() -> str: + def labels() -> str: labels = model.get_all_state_labels() print("\nThese are all the state labels in the model:\n", labels) + s = "" while True: - choice = input( - "\nFor what state label do you want to know the reachability probability? " - ) + choice = input("\nChoose a label to append to the path: ") if choice in labels: - return choice - print("Invalid input. Please choose a label from the list.") + s += choice + else: + print("Invalid input. Please choose a label from the list.") + if ( + input("\nDo you want to append more labels to the path? (y) or (n): ") + == "n" + ): + return s + else: + s += '" & "' print("Welcome to the stormvogel property string builder.") prop = probability_or_reward() @@ -85,7 +95,7 @@ def label() -> str: else: prop += operator() prop += value() - prop += f' [F "{label()}"]' + prop += f' [F "{labels()}"]' print("\nThe resulting property string is: \n", prop) return prop