# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A dimod sampler that uses the steepest gradient descent."""
from numbers import Integral
import dimod
import numpy as np
from greedy.descent import steepest_gradient_descent
__all__ = ["SteepestDescentSolver", "SteepestDescentSampler"]
[docs]class SteepestDescentSolver(dimod.Sampler, dimod.Initialized):
"""Steepest descent solver/sampler for binary quadratic models.
Steepest descent is the discrete analogue of gradient descent, but the best
move is computed using a local minimization rather rather than computing a
gradient. At each step, we determine the dimension along which to descend
based on the highest energy drop caused by a variable flip.
Solves convex problems to optimality.
Number of downhill runs (samples produced) is determined by ``num_reads``,
number of ``initial_states``, or a combination of the two, depending on
the ``initial_states_generator``.
For a given input model's graph :math:`G = (V, E)`, :math:`V` being a set of
graph vertices and :math:`E` a set of edges, runtime complexity of the
underlying C++ implementation is :math:`O(|E|)` for initialization phase
and :math:`O(|V|)` per downhill step.
In the ``large_sparse_opt`` mode, runtime complexity on sparse graphs is
:math:`O(|V|*log|V|)` for initialization and :math:`O(max\_degree * log|V|)`
per downhill step.
Aliased as :class:`~greedy.sampler.SteepestDescentSampler`.
Examples:
Solve a simple Ising problem:
>>> import greedy
...
>>> sampler = greedy.SteepestDescentSampler()
>>> samples = sampler.sample_ising({0: 2, 1: 2}, {(0, 1): -1})
...
>>> print(samples) # doctest: +SKIP
0 1 energy num_oc. num_st.
0 -1 -1 -5.0 1 2
['SPIN', 1 rows, 1 samples, 2 variables]
Post-processes samples generated by another sampler (simulated annealing
in this example):
.. code-block:: python
import neal
import dimod
import greedy
import networkx
bqm = dimod.generators.ran_r(5, networkx.complete_graph('abc'))
samples = neal.SimulatedAnnealingSampler().sample(bqm)
postprocessed = greedy.SteepestDescentSampler().sample(bqm, initial_states=samples)
For additional examples, see :meth:`.sample`.
"""
parameters = None
"""dict: Keyword arguments accepted by the sampling methods.
Exactly equal to::
{
'num_reads': [],
'initial_states': [],
'initial_states_generator': ['initial_states_generators'],
'seed': [],
'large_sparse_opt': ['large_sparse_opt_values'],
}
"""
properties = None
"""dict: Values for parameters accepted by the sampling methods.
Exactly equal to::
{
'initial_states_generators': ('none', 'tile', 'random'),
'large_sparse_opt_values': (True, False),
}
"""
def __init__(self):
# create a copy (isolate from subclass)
self.parameters = {
'num_reads': [],
'initial_states': [],
'initial_states_generator': ['initial_states_generators'],
'seed': [],
'large_sparse_opt': ['large_sparse_opt_values'],
}
self.properties = {
'initial_states_generators': ('none', 'tile', 'random'),
'large_sparse_opt_values': (True, False),
}
[docs] def sample(self, bqm, num_reads=None, initial_states=None,
initial_states_generator="random", seed=None,
large_sparse_opt=False, **kwargs):
"""Sample from a binary quadratic model.
Starts from ``initial_states``, and converges to local minima using
discrete steepest-descent method.
Args:
bqm (:class:`~dimod.BinaryQuadraticModel`):
The binary quadratic model to be sampled.
num_reads (int, optional, default=len(initial_states) or 1):
Number of reads. Each read is generated by one run of the steepest
descent algorithm. If `num_reads` is not explicitly given, it is
selected to match the number of initial states given. If initial states
are not provided, only one read is performed.
initial_states (samples-like, optional, default=None):
One or more samples, each defining an initial state for all the
problem variables. Initial states are given one per read, but
if fewer than `num_reads` initial states are defined, additional
values are generated as specified by `initial_states_generator`.
See :func:`dimod.as_samples` for a description of "samples-like".
initial_states_generator ({'none', 'tile', 'random'}, optional, default='random'):
Defines the expansion of `initial_states` if fewer than
`num_reads` are specified:
* "none":
If the number of initial states specified is smaller than
`num_reads`, raises ValueError.
* "tile":
Reuses the specified initial states if fewer than `num_reads`
or truncates if greater.
* "random":
Expands the specified initial states with randomly generated
states if fewer than `num_reads` or truncates if greater.
seed (int (32-bit unsigned integer), optional):
Seed to use for the PRNG. Specifying a particular seed with a
constant set of parameters produces identical results. If not
provided, a random seed is chosen.
large_sparse_opt (bool, optional, default=False):
Use optimizations for large and sparse problems (search tree for
next descent variable instead of linear array).
Returns:
:class:`dimod.SampleSet`: A `dimod` :class:`~dimod.SampleSet` object.
Number of descends (single variable flips) taken to reach the local
minimum for each sample is stored in a data vector called
``num_steps``.
Examples:
This example samples a simple two-variable Ising model.
>>> import dimod
>>> bqm = dimod.BQM.from_ising({}, {'ab': 1})
...
>>> import greedy
>>> sampler = greedy.SteepestDescentSampler()
...
>>> samples = sampler.sample(bqm)
>>> samples.first.energy
-1.0
Run steepest descent one million times (takes ~150ms on an average
laptop), converging to local minima, each time starting from a
random state:
>>> bqm = dimod.BQM.from_ising({}, {'ab': 1})
>>> sampler = greedy.SteepestDescentSampler()
...
>>> samples = sampler.sample(bqm, num_reads=10**6)
>>> print(samples.aggregate()) # doctest: +SKIP
a b energy num_oc. num_st.
0 -1 +1 -1.0 500115 1
1 +1 -1 -1.0 499885 1
['SPIN', 2 rows, 1000000 samples, 2 variables]
Use a combination of one fixed initial state and two randomly
generated ones to sample from a simple convex Ising problem with
global minimum at ``(-1,-1)``:
>>> bqm = dimod.BQM.from_ising({'x': 2, 'y': 2}, {'xy': -1})
>>> sampler = greedy.SteepestDescentSampler()
...
>>> samples = sampler.sample(bqm, initial_states=([1, 1], 'xy'), num_reads=3)
>>> print(samples) # doctest: +SKIP
x y energy num_oc. num_st.
0 -1 -1 -5.0 1 2
1 -1 -1 -5.0 1 1
2 -1 -1 -5.0 1 2
['SPIN', 3 rows, 3 samples, 2 variables]
Notice it required 2 variable flips (``num_steps`` field in the last
column) to reach the minimum state, ``(-1, -1)``, from the initial
state, ``(1, 1)``.
"""
# get the original vartype so we can return consistently
original_vartype = bqm.vartype
# convert to spin
if bqm.vartype is not dimod.SPIN:
bqm = bqm.change_vartype(dimod.SPIN, inplace=False)
# validate seed
if not (seed is None or isinstance(seed, Integral)):
raise TypeError("'seed' should be None or a positive 32-bit integer")
if isinstance(seed, Integral) and not 0 <= seed <= 2**32 - 1:
raise ValueError("'seed' should be an integer between 0 and 2**32 - 1 inclusive")
# parse initial_states et al
parsed_initial_states = self.parse_initial_states(
bqm,
num_reads=num_reads,
initial_states=initial_states,
initial_states_generator=initial_states_generator,
seed=seed)
num_reads = parsed_initial_states.num_reads
initial_states = parsed_initial_states.initial_states
# get linear/quadratic data
linear, (coupler_starts, coupler_ends, coupler_weights), offset = \
bqm.to_numpy_vectors(variable_order=initial_states.variables)
# we need initial states as contiguous numpy array
initial_states_array = \
np.ascontiguousarray(initial_states.record.sample, dtype=np.int8)
# run the steepest descent
samples, energies, num_steps = steepest_gradient_descent(
num_reads,
linear, coupler_starts, coupler_ends, coupler_weights,
initial_states_array, large_sparse_opt)
# resulting sampleset
result = dimod.SampleSet.from_samples(
(samples, initial_states.variables),
energy=energies + offset,
vartype=dimod.SPIN,
num_steps=num_steps,
)
result.change_vartype(original_vartype, inplace=True)
return result
SteepestDescentSampler = SteepestDescentSolver