LCOV - code coverage report
Current view: top level - pg/source - PriorityPromotionSolver.cpp (source / functions) Hit Total Coverage
Test: mcrl2_coverage.info.cleaned Lines: 0 157 0.0 %
Date: 2024-04-19 03:43:27 Functions: 0 9 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : // Author(s): Maurice Laveaux
       2             : // Copyright (c) 2016-2016 Eindhoven University of Technology
       3             : //
       4             : // Distributed under the Boost Software License, Version 1.0.
       5             : // (See accompanying file LICENSE_1_0.txt or copy at
       6             : // http://www.boost.org/LICENSE_1_0.txt)
       7             : 
       8             : #include "mcrl2/pg/PriorityPromotionSolver.h"
       9             : 
      10             : const priority_t COMPUTED_REGION = -1;
      11             : 
      12           0 : PriorityPromotionSolver::PriorityPromotionSolver(const ParityGame &game) :
      13           0 :     ParityGameSolver(game)
      14           0 : {}
      15             : 
      16           0 : ParityGame::Strategy PriorityPromotionSolver::solve()
      17             : {
      18             :     // Important note, instead of actually repeatly removing dominions from
      19             :     // the game. The game is kept the same but the substrategy is used to
      20             :     // determine which vertices still are not solved. This is done
      21             :     // because make_subgame allocates new memory repeately and parity games
      22             :     // can be huge.
      23           0 :     StaticGraph& graph = const_cast<StaticGraph&>(game_.graph());
      24             : 
      25             :     // Initialize the initial state top = (priority_function, [NO_VERTEX], prio)
      26           0 :     std::vector<priority_t> region_function(graph.V(), 0);
      27           0 :     ParityGame::Strategy strategy(graph.V(), NO_VERTEX);
      28             : 
      29             :     // Due to cache locality the new approach will be slightly slower. Also
      30             :     // a magic region is used to indicate in region_function that some vertex is
      31             :     // computed.
      32           0 :     m_unsolved = std::vector<verti>(graph.V(), 0);
      33             : 
      34             :     // The lowest priority in the game (the highest number).
      35           0 :     priority_t lowestRegion = 0;
      36             : 
      37             :     // Set region_function to the original priorities and initialize the mapping
      38           0 :     for (verti v = 0; v < graph.V(); ++v) {
      39           0 :         region_function[v] = game().priority(v);
      40           0 :         m_unsolved[v] = v;
      41             : 
      42           0 :         lowestRegion = std::max(lowestRegion, game().priority(v));
      43             :     }
      44             : 
      45             :     // Initialize all regions that have some vertices to true
      46           0 :     m_regions = std::vector<verti>(lowestRegion + 1, 0);
      47           0 :     for (priority_t region : region_function) {
      48           0 :         ++m_regions[region];
      49             :     }
      50             : 
      51             :     // Find the lowest priority in the game
      52           0 :     priority_t prio = nextPriority(region_function, 0);
      53             : 
      54             :     // The searcher: The algorithm was tail recursive so can also be written as iteration.
      55             :     while (true) {
      56           0 :          query(region_function, strategy, prio);
      57             : 
      58           0 :         if (isOpen(region_function, prio, true)) {
      59           0 :             mCRL2log(mcrl2::log::debug) << "Newly computed region is open in the subgame, with p = " << prio << std::endl;
      60           0 :             printRegion(region_function, prio);
      61             : 
      62             :             // Keep the new region_function and substrategy, but go to the next priority
      63           0 :             prio = nextPriority(region_function, prio + 1);
      64             :         }
      65             :         else {
      66           0 :             if (!isOpen(region_function, prio, false)) {
      67             : 
      68             :                 // Make sure we can use todo without problems.
      69           0 :                 assert(m_todo.empty());
      70             : 
      71             :                 // This is a dominion D in the whole game, compute the attractor
      72             :                 // for this region.
      73           0 :                 for (verti v : m_unsolved) {
      74           0 :                     if (region_function[v] == prio) {
      75           0 :                         m_todo.push_back(v);
      76             :                     }
      77             :                 }
      78             : 
      79           0 :                 computeAttractor(region_function, strategy, prio, m_todo, false);
      80             : 
      81             :                 // Remove the dominion from the game and keep the unsolved vertices, also reset
      82             :                 // lower priorities and set region of prio to the COMPUTED_REGION.
      83           0 :                 mCRL2log(mcrl2::log::debug) << "Found the dominion D, with p = " << prio << std::endl;
      84           0 :                 printRegion(region_function, prio);
      85             : 
      86             :                 // Reset the unsolved set and remove all regions, also add one dominion to statistics
      87           0 :                 m_unsolved.clear();
      88           0 :                 m_regions.assign(m_regions.size(), 0);
      89           0 :                 ++m_dominions;
      90             : 
      91           0 :                 for (verti v = 0; v < game().graph().V(); ++v) {
      92           0 :                     if (region_function[v] == prio) {
      93             :                         // Assign a special region indicating that its solved.
      94           0 :                         region_function[v] = COMPUTED_REGION;
      95             :                     }
      96           0 :                     else if (region_function[v] != COMPUTED_REGION) {
      97           0 :                         region_function[v] = game().priority(v);
      98           0 :                         strategy[v] = NO_VERTEX;
      99             : 
     100             :                         // Add the not solved vertices to the unsolved set and add vertices to their region.
     101           0 :                         m_unsolved.push_back(v);
     102           0 :                         ++m_regions[game().priority(v)];
     103             :                     }
     104             :                 }
     105             : 
     106             : 
     107           0 :                 if (m_unsolved.empty()) {
     108           0 :                     break; // Stop the algorithm, as all the vertices were solved.
     109             :                 }
     110             : 
     111             :                 // Reset the game and find the lowest priority in the game
     112           0 :                 prio = nextPriority(region_function, 0);
     113             :             }
     114             :             else {
     115             :                 // The game is a dominion, but only in the subgame, so promote its priority.
     116           0 :                 mCRL2log(mcrl2::log::debug) << "Promoted dominion D, with p = " << prio << " to ";
     117           0 :                 prio = promoteSubDominion(region_function, strategy, prio);
     118           0 :                 mCRL2log(mcrl2::log::debug) << prio << std::endl;
     119           0 :                 printRegion(region_function, prio);
     120             :             }
     121             :         }
     122           0 :     }
     123             : 
     124           0 :     mCRL2log(mcrl2::log::verbose) << m_dominions << " dominions found, and " << m_promotions  << " promotions required" << std::endl;
     125             : 
     126           0 :     return strategy;
     127           0 : }
     128             : 
     129           0 : void PriorityPromotionSolver::query(std::vector<priority_t>& region_function,
     130             :     ParityGame::Strategy& strategy,
     131             :     priority_t prio)
     132             : {
     133             :     // Make sure nothing else is stored in the todo
     134           0 :     assert(m_todo.empty());
     135             : 
     136             :     // R* = region_function^-1(prio), this results in the todo for the attractor
     137             :     // computation, the initial set essentially.
     138           0 :     for (verti v : m_unsolved)
     139             :     {
     140           0 :         if (region_function[v] == prio) {
     141           0 :             m_todo.push_back(v);
     142             :         }
     143             :     }
     144             : 
     145             :     // (region_function[R -> prio], strategy*) <- computeAttractor_G(todo, strategy
     146             :     // restriced to todo)
     147           0 :     computeAttractor(region_function, strategy, prio, m_todo, true);
     148           0 : }
     149             : 
     150           0 : void PriorityPromotionSolver::computeAttractor(std::vector<priority_t>& region_function,
     151             :     ParityGame::Strategy& strategy,
     152             :     priority_t prio,
     153             :     std::deque<verti>& todo,
     154             :     bool inSubgraph)
     155             : {
     156             :     // Name some variables for easier access
     157           0 :     const StaticGraph &graph = game().graph();
     158           0 :     const ParityGame::Player alpha = (ParityGame::Player)(prio % 2);
     159             : 
     160             :     // O(V): Compute the attractor set to the alpha-region:
     161           0 :     while (!todo.empty()) {
     162           0 :         const verti w = todo.front();
     163           0 :         todo.pop_front();
     164             : 
     165             :         // Check all predecessors v of w:
     166           0 :         for (StaticGraph::const_iterator it = graph.pred_begin(w);
     167           0 :             it != graph.pred_end(w); ++it) {
     168           0 :             const verti v = *it;
     169             : 
     170             :             // Skip predecessors that are already in the attractor set, also skip
     171             :             // vertices outside the subgame G >= prio. Or vertices that are computed
     172           0 :             if (region_function[v] == prio || region_function[v] == COMPUTED_REGION || (inSubgraph && region_function[v] < prio)) continue;
     173             : 
     174           0 :             if (game().player(v) == alpha) {
     175             :                 // sigma(v) = w, a valid strategy for alpha is to pick a successor in A
     176           0 :                 strategy[v] = w;
     177             :             }
     178             :             else {
     179             : 
     180             :                 // Check if all successors (w, x) subset A, thus if they end up in a vertex with prio.
     181           0 :                 bool isSubset = true;
     182           0 :                 for (StaticGraph::const_iterator it = graph.succ_begin(v);
     183           0 :                     it != graph.succ_end(v); ++it) {
     184           0 :                     verti x = *it;
     185             : 
     186             :                     // Skip vertices that are not considered in the subgraph G >= prio
     187             :                     // or that already belong to COMPUTED_REGION
     188           0 :                     if (region_function[x] == prio || region_function[x] == COMPUTED_REGION) {
     189           0 :                         continue;
     190             :                     }
     191             : 
     192             :                     // Either only take vertices in G >= prio or all when inSubgraph is false
     193           0 :                     if (region_function[x] > prio || !inSubgraph) {
     194           0 :                         isSubset = false; break;
     195             :                     }
     196             :                 }
     197             : 
     198             :                 // opponent controls vertex
     199           0 :                 if (isSubset) {
     200             :                     // For opponent controlled vertices no strategy exists, so
     201             :                     // every possible outgoing edge is losing.
     202           0 :                     strategy[v] = NO_VERTEX;
     203             :                 } else {
     204           0 :                     continue;  // not in the attractor set yet!
     205             :                 }
     206             :             }
     207             : 
     208             :             // Add a vertex to their new region and remove from the old one
     209           0 :             --m_regions[region_function[v]];
     210           0 :             ++m_regions[prio];
     211             : 
     212             :             // When this part is reached, all liberties of v are gone or v belongs
     213             :             // to alpha, so add vertex v to the attractor set:
     214           0 :             region_function[v] = prio;
     215           0 :             todo.push_back(v);
     216             : 
     217             :         }
     218             :     }
     219             : 
     220             :     // R \ domain(tau restricted to R*), essentially vertices in R belonging to
     221             :     // alpha where no strategy is defined yet. These can pick an arbritrary
     222             :     // successor that can reach R \ R*, these already have an attraction
     223             :     // strategy so that is always fine.
     224           0 :     for (verti v : m_unsolved)
     225             :     {
     226           0 :         if (region_function[v] == prio && game().player(v) == alpha && strategy[v] == NO_VERTEX) {
     227           0 :             for (StaticGraph::const_iterator it = graph.succ_begin(v); it != graph.succ_end(v); ++it) {
     228           0 :                 const verti w = *it;
     229             : 
     230           0 :                 if (region_function[w] == prio) {
     231             :                     // There exists some (v, w) in E such that w belongs to R (has r[w] == prio).
     232           0 :                     strategy[v] = w;
     233             :                 }
     234             :             }
     235             :         }
     236             :     }
     237             : 
     238             :     // TODO: Check conflict in the paper.
     239             :     // In the lemma: by associating every position v in Stay(R) \ domain(strategy)
     240             :     //      with an arbitrary successor in R. So a vertex without a strategy can
     241             :     //      pick a successor in R.
     242             :     //
     243             :     // Later on: R \ domain(strategy restricted to R*), R* is region without
     244             :     //      attractor. So this is still the same, but then it should take a
     245             :     //      vertex in R \ R*. So it has to be outside of the region, this cannot
     246             :     //      be checked easily at this point as that information (in todo) is lost.
     247           0 : }
     248             : 
     249           0 : bool PriorityPromotionSolver::isOpen(std::vector<priority_t>& region_function,
     250             :     priority_t prio,
     251             :     bool inSubgraph)
     252             : {
     253             :     // Name some variables for easier access
     254           0 :     const StaticGraph &graph = game().graph();
     255           0 :     ParityGame::Player alpha = (ParityGame::Player)(prio % 2);
     256             : 
     257             :     // O(V): Loop over unsolved vertices and find vertices belonging to region with prio.
     258           0 :     for (verti v : m_unsolved) {
     259           0 :         if (region_function[v] == prio) {
     260             :             // If the vertex belong to the opponent
     261           0 :             if (game().player(v) != alpha) {
     262             :                 // For all (v, u) in E, u should belong to R
     263           0 :                 for (StaticGraph::const_iterator it = graph.succ_begin(v);
     264           0 :                     it != graph.succ_end(v); ++it)
     265             :                 {
     266           0 :                     const verti u = *it;
     267             : 
     268             :                     // There is an edge from opponent to a vertex in the subgraph or in the whole graph
     269           0 :                     if (region_function[u] != COMPUTED_REGION
     270           0 :                         && ((inSubgraph && region_function[u] > prio)
     271           0 :                         || (!inSubgraph && region_function[u] != prio))) {
     272           0 :                         return true;
     273             :                     }
     274             :                 }
     275             :             }
     276             :             else {
     277             :                 // If there exists a (v, u) to R its closed
     278           0 :                 bool isOpen = true;
     279             : 
     280           0 :                 for (StaticGraph::const_iterator it = graph.succ_begin(v);
     281           0 :                     it != graph.succ_end(v); ++it)
     282             :                 {
     283           0 :                     const verti u = *it;
     284             : 
     285             :                     // There is an edge from a player to the region
     286           0 :                     if (region_function[u] == prio) {
     287           0 :                         isOpen = false; break;
     288             :                     }
     289             :                 }
     290             : 
     291           0 :                 if (isOpen) {
     292           0 :                     return true;
     293             :                 }
     294             :             }
     295             :         }
     296             :     }
     297             : 
     298           0 :     return false;
     299             : }
     300             : 
     301           0 : priority_t PriorityPromotionSolver::promoteSubDominion(std::vector<priority_t>& region_function,
     302             :     ParityGame::Strategy& strategy,
     303             :     priority_t prio)
     304             : {
     305           0 :     const StaticGraph &graph = game().graph();
     306           0 :     ParityGame::Player alpha = (ParityGame::Player)(prio % 2);
     307             : 
     308             :     // O(V): It is only a dominion in the subgraph, determine highest p < prio
     309             :     // that opponent can escape to
     310           0 :     priority_t promotion = 0;
     311             : 
     312             :     // This is referred to as r* = bep(R, r) in the paper (best escape priority).
     313             :     // For every opponent vertex this is the lowest priority (highest value in
     314             :     // min-prio games) that it can reach. The region it can reach belongs to alpha,
     315             :     // otherwise it would be attracted in some earlier state.
     316           0 :     for (verti v : m_unsolved) {
     317           0 :         if (region_function[v] == prio && game().player(v) != alpha) {
     318             :             // For all (v, u) in E collect the highest priority smaller then prio that opponent can flee to.
     319           0 :             for (StaticGraph::const_iterator it = graph.succ_begin(v);
     320           0 :                 it != graph.succ_end(v); ++it) {
     321           0 :                 const verti u = *it;
     322             : 
     323           0 :                 if (region_function[u] < prio) {
     324           0 :                     promotion = std::max(promotion, region_function[u]);
     325             :                 }
     326             :             }
     327             :         }
     328             :     }
     329             : 
     330           0 :     ++m_promotions;
     331             : 
     332             :     // Here the prio region is promoted to the new priority and all lower positions
     333             :     // are reset.
     334           0 :     for (verti v : m_unsolved) {
     335             :         // Promote the current region to the promotion priority
     336           0 :         if (region_function[v] == prio) {
     337             :             // Remove the vertex from the old region
     338           0 :             --m_regions[region_function[v]];
     339             : 
     340           0 :             region_function[v] = promotion;
     341             : 
     342             :             // Add the vertex to the new region.
     343           0 :             ++m_regions[region_function[v]];
     344             :         }
     345           0 :         else if (region_function[v] > promotion) {
     346             :             // Remove the vertex from the old region
     347           0 :             --m_regions[region_function[v]];
     348             : 
     349             :             // Reset all vertices lower to the original priorities, remove the strategy.
     350           0 :             region_function[v] = game().priority(v);
     351           0 :             strategy[v] = NO_VERTEX;
     352             : 
     353             :             // Add the vertex to the new region.
     354           0 :             ++m_regions[region_function[v]];
     355             :         }
     356             :     }
     357             : 
     358           0 :     return promotion;
     359             : }
     360             : 
     361           0 : void PriorityPromotionSolver::printRegion(std::vector<priority_t>& region_function,
     362             :     priority_t prio)
     363             : {
     364             :     // This costs O(V) so only enable this in debug, prints all vertices v where region_function[v] == prio.
     365           0 :     if (mCRL2logEnabled(mcrl2::log::debug)) {
     366           0 :         mCRL2log(mcrl2::log::debug) << "alpha-region[" << prio << "] = { ";
     367           0 :         bool first = true;
     368           0 :         for (verti v : m_unsolved)
     369             :         {
     370           0 :             if (region_function[v] == prio) {
     371           0 :                 if (!first) {
     372           0 :                     mCRL2log(mcrl2::log::debug) << ",";
     373             :                 }
     374             : 
     375           0 :                 mCRL2log(mcrl2::log::debug) << v;
     376           0 :                 first = false;
     377             :             }
     378             : 
     379             :         }
     380           0 :         mCRL2log(mcrl2::log::debug) << " }" << std::endl;
     381             :     }
     382           0 : }
     383             : 
     384           0 : priority_t PriorityPromotionSolver::nextPriority(std::vector<priority_t>& /* region_function */,
     385             :     priority_t prio)
     386             : {
     387             :     // Starting from the current priority, find the next region that exists
     388             :     // greater or equal to rio. This should never go out of bounds as the lowest
     389             :     // region will always be dominion.
     390           0 :     while (m_regions[prio] == 0)
     391             :     {
     392           0 :         ++prio;
     393           0 :         assert(prio < m_regions.size());
     394             :     }
     395             : 
     396           0 :     return prio;
     397             : }
     398             : 
     399           0 : ParityGameSolver* PriorityPromotionSolverFactory::create(const ParityGame &game,
     400             :     const verti* /* vertex_map */,
     401             :     verti /* vertex_map_size*/)
     402             : {
     403           0 :     return new PriorityPromotionSolver(game);
     404             : }

Generated by: LCOV version 1.14