Browse Source

Initial release

jarjonam 6 years ago
commit
cb05d543cf
90 changed files with 9844 additions and 0 deletions
  1. 2 0
      .gitignore
  2. 4 0
      .idea/encodings.xml
  3. 4 0
      .idea/misc.xml
  4. 8 0
      .idea/modules.xml
  5. 11 0
      .idea/project.iml
  6. 168 0
      .idea/workspace.xml
  7. 114 0
      .ropeproject/config.py
  8. 0 0
      __init__.py
  9. 150 0
      acceleration.py
  10. 247 0
      analysis/.ipynb_checkpoints/interactive-checkpoint.ipynb
  11. 89 0
      analysis/H1line.py
  12. 0 0
      analysis/__init__.py
  13. 54 0
      analysis/angularMomentum.py
  14. 49 0
      analysis/antennaeKDE.py
  15. 34 0
      analysis/bridgeEvolution.py
  16. 48 0
      analysis/colormaps.py
  17. 56 0
      analysis/deVacouleurs.py
  18. 45 0
      analysis/eccentricity.py
  19. 14 0
      analysis/ellipsoidal.py
  20. 73 0
      analysis/energyConservation.py
  21. 51 0
      analysis/halo.py
  22. 100 0
      analysis/inclination.py
  23. 278 0
      analysis/interactive.ipynb
  24. 46 0
      analysis/isolation.py
  25. 36 0
      analysis/massFractions.py
  26. 39 0
      analysis/orbit.py
  27. 61 0
      analysis/orbitalDecay.py
  28. 86 0
      analysis/performance.py
  29. 48 0
      analysis/radialDensity.py
  30. 57 0
      analysis/ringPlot.py
  31. 156 0
      analysis/segmentation.py
  32. 112 0
      analysis/simulatedAnnealing.py
  33. 38 0
      analysis/tailEvolution.py
  34. 56 0
      analysis/tailShapePlot.py
  35. 48 0
      analysis/timestepAnalysis.py
  36. 100 0
      analysis/utils.py
  37. 16 0
      config/antennae.yml
  38. 48 0
      config/companion_mass_analysis.yml
  39. 60 0
      config/default.yml
  40. 70 0
      config/halo_analysis.yml
  41. 30 0
      config/inclination_analysis.yml
  42. 6 0
      config/isolated.yml
  43. 7 0
      config/orbit.yml
  44. 7 0
      config/prograde_equalmass.yml
  45. 7 0
      config/retrograde_equalmass.yml
  46. 51 0
      config/tail_shape_analysis.yml
  47. 26 0
      config/timestep_analysis.yaml
  48. 13 0
      config/toomre1972.yml
  49. 2 0
      cpp/Makefile
  50. 103 0
      cpp/Node.cpp
  51. 25 0
      cpp/Node.h
  52. 106 0
      cpp/acclib.cpp
  53. BIN
      cpp/acclib.so
  54. 36 0
      distributions.py
  55. 432 0
      docs/acceleration.html
  56. 166 0
      docs/analysis/antennaeKDE.html
  57. 88 0
      docs/analysis/bridgeEvolution.html
  58. 97 0
      docs/analysis/eccentricity.html
  59. 272 0
      docs/analysis/inclination.html
  60. 90 0
      docs/analysis/massFractions.html
  61. 167 0
      docs/analysis/ringPlot.html
  62. 394 0
      docs/analysis/segmentation.html
  63. 247 0
      docs/analysis/simulatedAnnealing.html
  64. 92 0
      docs/analysis/tailEvolution.html
  65. 154 0
      docs/analysis/tailShapePlot.html
  66. 354 0
      docs/analysis/utils.html
  67. 896 0
      docs/distributions.html
  68. 739 0
      docs/run_simulated_annealing.html
  69. 1562 0
      docs/simulation.html
  70. 142 0
      docs/utils.html
  71. BIN
      literature/figs/barnes2001.png
  72. BIN
      literature/figs/barnes2001.psd
  73. BIN
      literature/figs/barnes2001.tif
  74. BIN
      literature/figs/g1.tif
  75. BIN
      literature/figs/g1b.tif
  76. BIN
      literature/figs/g1c.tif
  77. BIN
      literature/figs/g2.tif
  78. BIN
      literature/figs/g2b.tif
  79. BIN
      literature/figs/g2c.tif
  80. BIN
      literature/figs/gt.bmp
  81. BIN
      literature/figs/gt.png
  82. BIN
      literature/figs/gt.tif
  83. BIN
      literature/figs/hibbard1.png
  84. BIN
      literature/figs/hibbard2.png
  85. BIN
      literature/figs/hibbard3.png
  86. 306 0
      run_simulated_annealing.py
  87. 45 0
      run_simulation.py
  88. 420 0
      simulation.py
  89. 58 0
      unittests.py
  90. 28 0
      utils.py

+ 2 - 0
.gitignore

@@ -0,0 +1,2 @@
+data/
+__pycache__

+ 4 - 0
.idea/encodings.xml

@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="Encoding" addBOMForNewFiles="with NO BOM" />
+</project>

+ 4 - 0
.idea/misc.xml

@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.6" project-jdk-type="Python SDK" />
+</project>

+ 8 - 0
.idea/modules.xml

@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ProjectModuleManager">
+    <modules>
+      <module fileurl="file://$PROJECT_DIR$/.idea/project.iml" filepath="$PROJECT_DIR$/.idea/project.iml" />
+    </modules>
+  </component>
+</project>

+ 11 - 0
.idea/project.iml

@@ -0,0 +1,11 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module type="PYTHON_MODULE" version="4">
+  <component name="NewModuleRootManager">
+    <content url="file://$MODULE_DIR$" />
+    <orderEntry type="inheritedJdk" />
+    <orderEntry type="sourceFolder" forTests="false" />
+  </component>
+  <component name="TestRunnerService">
+    <option name="PROJECT_TEST_RUNNER" value="Unittests" />
+  </component>
+</module>

+ 168 - 0
.idea/workspace.xml

@@ -0,0 +1,168 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ChangeListManager">
+    <list default="true" id="9b8b83e1-48b9-4a21-aa48-1ac85a885cf5" name="Default Changelist" comment="" />
+    <option name="EXCLUDED_CONVERTED_TO_IGNORED" value="true" />
+    <option name="SHOW_DIALOG" value="false" />
+    <option name="HIGHLIGHT_CONFLICTS" value="true" />
+    <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
+    <option name="LAST_RESOLUTION" value="IGNORE" />
+  </component>
+  <component name="FileEditorManager">
+    <leaf SIDE_TABS_SIZE_LIMIT_KEY="300">
+      <file pinned="false" current-in-tab="false">
+        <entry file="file://$PROJECT_DIR$/acceleration.py">
+          <provider selected="true" editor-type-id="text-editor">
+            <state relative-caret-position="285">
+              <caret line="19" column="15" selection-start-line="19" selection-start-column="15" selection-end-line="19" selection-end-column="15" />
+              <folding>
+                <element signature="e#220#233#0" expanded="true" />
+              </folding>
+            </state>
+          </provider>
+        </entry>
+      </file>
+      <file pinned="false" current-in-tab="true">
+        <entry file="file://$PROJECT_DIR$/run_simulated_annealing.py">
+          <provider selected="true" editor-type-id="text-editor">
+            <state relative-caret-position="294">
+              <caret line="276" column="39" lean-forward="true" selection-start-line="276" selection-start-column="39" selection-end-line="276" selection-end-column="39" />
+            </state>
+          </provider>
+        </entry>
+      </file>
+    </leaf>
+  </component>
+  <component name="IdeDocumentHistory">
+    <option name="CHANGED_PATHS">
+      <list>
+        <option value="$PROJECT_DIR$/acceleration.py" />
+        <option value="$PROJECT_DIR$/simulation.py" />
+        <option value="$PROJECT_DIR$/distributions.py" />
+        <option value="$PROJECT_DIR$/run_simulated_annealing.py" />
+      </list>
+    </option>
+  </component>
+  <component name="ProjectFrameBounds">
+    <option name="x" value="10" />
+    <option name="y" value="43" />
+    <option name="width" value="1260" />
+    <option name="height" value="700" />
+  </component>
+  <component name="ProjectView">
+    <navigator proportions="" version="1">
+      <foldersAlwaysOnTop value="true" />
+    </navigator>
+    <panes>
+      <pane id="ProjectPane">
+        <subPane>
+          <expand>
+            <path>
+              <item name="project" type="b2602c69:ProjectViewProjectNode" />
+              <item name="project" type="462c0819:PsiDirectoryNode" />
+            </path>
+          </expand>
+          <select />
+        </subPane>
+      </pane>
+      <pane id="Scope" />
+    </panes>
+  </component>
+  <component name="PropertiesComponent">
+    <property name="last_opened_file_path" value="$PROJECT_DIR$" />
+    <property name="run.code.analysis.last.selected.profile" value="pProject Default" />
+  </component>
+  <component name="RunDashboard">
+    <option name="ruleStates">
+      <list>
+        <RuleState>
+          <option name="name" value="ConfigurationTypeDashboardGroupingRule" />
+        </RuleState>
+        <RuleState>
+          <option name="name" value="StatusDashboardGroupingRule" />
+        </RuleState>
+      </list>
+    </option>
+  </component>
+  <component name="SvnConfiguration">
+    <configuration />
+  </component>
+  <component name="TaskManager">
+    <task active="true" id="Default" summary="Default task">
+      <changelist id="9b8b83e1-48b9-4a21-aa48-1ac85a885cf5" name="Default Changelist" comment="" />
+      <created>1550793708607</created>
+      <option name="number" value="Default" />
+      <option name="presentableId" value="Default" />
+      <updated>1550793708607</updated>
+    </task>
+    <servers />
+  </component>
+  <component name="ToolWindowManager">
+    <frame x="10" y="43" width="1260" height="700" extended-state="0" />
+    <layout>
+      <window_info active="true" content_ui="combo" id="Project" order="0" visible="true" weight="0.24939467" />
+      <window_info id="Structure" order="1" side_tool="true" weight="0.25" />
+      <window_info id="Favorites" order="2" side_tool="true" />
+      <window_info anchor="bottom" id="Message" order="0" />
+      <window_info anchor="bottom" id="Find" order="1" weight="0.32894737" />
+      <window_info anchor="bottom" id="Run" order="2" />
+      <window_info anchor="bottom" id="Debug" order="3" weight="0.4" />
+      <window_info anchor="bottom" id="Cvs" order="4" weight="0.25" />
+      <window_info anchor="bottom" id="Inspection" order="5" weight="0.4" />
+      <window_info anchor="bottom" id="TODO" order="6" />
+      <window_info anchor="bottom" id="Version Control" order="7" />
+      <window_info anchor="bottom" id="Terminal" order="8" />
+      <window_info anchor="bottom" id="Event Log" order="9" side_tool="true" />
+      <window_info anchor="bottom" id="Python Console" order="10" />
+      <window_info anchor="right" id="Commander" internal_type="SLIDING" order="0" type="SLIDING" weight="0.4" />
+      <window_info anchor="right" id="Ant Build" order="1" weight="0.25" />
+      <window_info anchor="right" content_ui="combo" id="Hierarchy" order="2" weight="0.25" />
+    </layout>
+  </component>
+  <component name="editorHistoryManager">
+    <entry file="file://$PROJECT_DIR$/run_simulation.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="442">
+          <caret line="42" column="16" lean-forward="true" selection-start-line="42" selection-start-column="16" selection-end-line="42" selection-end-column="16" />
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/simulation.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="243">
+          <caret line="40" column="7" lean-forward="true" selection-start-line="40" selection-start-column="7" selection-end-line="40" selection-end-column="7" />
+          <folding>
+            <element signature="e#70#79#0" expanded="true" />
+          </folding>
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/distributions.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="165">
+          <caret line="14" column="10" selection-start-line="14" selection-start-column="10" selection-end-line="14" selection-end-column="10" />
+          <folding>
+            <element signature="e#0#18#0" expanded="true" />
+          </folding>
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/acceleration.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="285">
+          <caret line="19" column="15" selection-start-line="19" selection-start-column="15" selection-end-line="19" selection-end-column="15" />
+          <folding>
+            <element signature="e#220#233#0" expanded="true" />
+          </folding>
+        </state>
+      </provider>
+    </entry>
+    <entry file="file://$PROJECT_DIR$/run_simulated_annealing.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state relative-caret-position="294">
+          <caret line="276" column="39" lean-forward="true" selection-start-line="276" selection-start-column="39" selection-end-line="276" selection-end-column="39" />
+        </state>
+      </provider>
+    </entry>
+  </component>
+</project>

+ 114 - 0
.ropeproject/config.py

@@ -0,0 +1,114 @@
+# The default ``config.py``
+# flake8: noqa
+
+
+def set_prefs(prefs):
+    """This function is called before opening the project"""
+
+    # Specify which files and folders to ignore in the project.
+    # Changes to ignored resources are not added to the history and
+    # VCSs.  Also they are not returned in `Project.get_files()`.
+    # Note that ``?`` and ``*`` match all characters but slashes.
+    # '*.pyc': matches 'test.pyc' and 'pkg/test.pyc'
+    # 'mod*.pyc': matches 'test/mod1.pyc' but not 'mod/1.pyc'
+    # '.svn': matches 'pkg/.svn' and all of its children
+    # 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o'
+    # 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o'
+    prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject',
+                                  '.hg', '.svn', '_svn', '.git', '.tox']
+
+    # Specifies which files should be considered python files.  It is
+    # useful when you have scripts inside your project.  Only files
+    # ending with ``.py`` are considered to be python files by
+    # default.
+    # prefs['python_files'] = ['*.py']
+
+    # Custom source folders:  By default rope searches the project
+    # for finding source folders (folders that should be searched
+    # for finding modules).  You can add paths to that list.  Note
+    # that rope guesses project source folders correctly most of the
+    # time; use this if you have any problems.
+    # The folders should be relative to project root and use '/' for
+    # separating folders regardless of the platform rope is running on.
+    # 'src/my_source_folder' for instance.
+    # prefs.add('source_folders', 'src')
+
+    # You can extend python path for looking up modules
+    # prefs.add('python_path', '~/python/')
+
+    # Should rope save object information or not.
+    prefs['save_objectdb'] = True
+    prefs['compress_objectdb'] = False
+
+    # If `True`, rope analyzes each module when it is being saved.
+    prefs['automatic_soa'] = True
+    # The depth of calls to follow in static object analysis
+    prefs['soa_followed_calls'] = 0
+
+    # If `False` when running modules or unit tests "dynamic object
+    # analysis" is turned off.  This makes them much faster.
+    prefs['perform_doa'] = True
+
+    # Rope can check the validity of its object DB when running.
+    prefs['validate_objectdb'] = True
+
+    # How many undos to hold?
+    prefs['max_history_items'] = 32
+
+    # Shows whether to save history across sessions.
+    prefs['save_history'] = True
+    prefs['compress_history'] = False
+
+    # Set the number spaces used for indenting.  According to
+    # :PEP:`8`, it is best to use 4 spaces.  Since most of rope's
+    # unit-tests use 4 spaces it is more reliable, too.
+    prefs['indent_size'] = 4
+
+    # Builtin and c-extension modules that are allowed to be imported
+    # and inspected by rope.
+    prefs['extension_modules'] = []
+
+    # Add all standard c-extensions to extension_modules list.
+    prefs['import_dynload_stdmods'] = True
+
+    # If `True` modules with syntax errors are considered to be empty.
+    # The default value is `False`; When `False` syntax errors raise
+    # `rope.base.exceptions.ModuleSyntaxError` exception.
+    prefs['ignore_syntax_errors'] = False
+
+    # If `True`, rope ignores unresolvable imports.  Otherwise, they
+    # appear in the importing namespace.
+    prefs['ignore_bad_imports'] = False
+
+    # If `True`, rope will insert new module imports as
+    # `from <package> import <module>` by default.
+    prefs['prefer_module_from_imports'] = False
+
+    # If `True`, rope will transform a comma list of imports into
+    # multiple separate import statements when organizing
+    # imports.
+    prefs['split_imports'] = False
+
+    # If `True`, rope will remove all top-level import statements and
+    # reinsert them at the top of the module when making changes.
+    prefs['pull_imports_to_top'] = True
+
+    # If `True`, rope will sort imports alphabetically by module name instead
+    # of alphabetically by import statement, with from imports after normal
+    # imports.
+    prefs['sort_imports_alphabetically'] = False
+
+    # Location of implementation of
+    # rope.base.oi.type_hinting.interfaces.ITypeHintingFactory In general
+    # case, you don't have to change this value, unless you're an rope expert.
+    # Change this value to inject you own implementations of interfaces
+    # listed in module rope.base.oi.type_hinting.providers.interfaces
+    # For example, you can add you own providers for Django Models, or disable
+    # the search type-hinting in a class hierarchy, etc.
+    prefs['type_hinting_factory'] = (
+        'rope.base.oi.type_hinting.factory.default_type_hinting_factory')
+
+
+def project_opened(project):
+    """This function is called after opening the project"""
+    # Do whatever you like here!

+ 0 - 0
__init__.py


+ 150 - 0
acceleration.py

@@ -0,0 +1,150 @@
+"""Defines the possible routines for computing the gravitational forces in the
+simulation.
+
+All the methods in this file require a position (n, 3) vector, a mass (n, )
+vector and an optional softening scale float."""
+
+import ctypes
+import numpy.ctypeslib as ctl
+import numpy as np
+from numba import jit
+
+
+def bruteForce(r_vec, mass, soft=0.):
+    """Calculates the acceleration generated by a set of masses on themselves.
+       Complexity O(n*m) where n is the total number of masses and m is the
+       number of massive particles.
+
+    Parameters:
+        r_vec (array): list of particles positions.
+            Shape (n, 3) where n is the number of particles
+        mass (array): list of particles masses.
+            Shape (n,)
+        soft (float): characteristic plummer softening length scale
+    Returns:
+        forces (array): list of forces acting on each particle.
+            Shape (n, 3)
+    """
+    # Only calculate forces from massive particles
+    mask = mass!=0
+    massMassive = mass[mask]
+    rMassive_vec = r_vec[mask]
+    #  x m x 1 matrix (m = number of massive particles) for broadcasting
+    mass_mat = massMassive.reshape(1, -1, 1)
+    # Calculate displacements
+    # r_ten is the direction of the pairwise displacements. Shape (n, m, 3)
+    # r_mat is the absolute distance of the pairwise displacements. (n, m, 1)
+    r_ten = rMassive_vec.reshape(1, -1, 3) - r_vec.reshape(-1, 1, 3)
+    r_mat = np.linalg.norm(r_ten, axis=-1, keepdims=True)
+    # Avoid division by zeros
+    # $a = M / (r + \epsilon)^2$, where $\epsilon$ is the softening scale
+    # r_ten/r_mat gives the direction unit vector
+    accel = np.divide(r_ten * mass_mat/(r_mat+soft)**2, r_mat, 
+        where=r_ten.astype(bool), out=r_ten) # Reuse memory from r_ten
+    return accel.sum(axis=1) # Add all forces on each particle
+
+@jit(nopython=True) # Numba annotation
+def bruteForceNumba(r_vec, mass, soft=0.):
+    """Calculates the acceleration generated by a set of masses on themselves.
+    It is done in the same way as in bruteForce, but this
+    method is ran through Numba"""
+    mask = mass!=0
+    massMassive = mass[mask]
+    rMassive_vec = r_vec[mask]
+    mass_mat = massMassive.reshape(1, -1, 1)
+    r_ten = rMassive_vec.reshape(1, -1, 3) - r_vec.reshape(-1, 1, 3)
+    # Avoid np.linalg.norm to allow Numba optimizations
+    r_mat = np.sqrt(r_ten[:,:,0:1]**2 + r_ten[:,:,1:2]**2 + r_ten[:,:,2:3]**2)
+    r_mat = np.where(r_mat == 0, np.ones_like(r_mat), r_mat)
+    accel = r_ten/r_mat * mass_mat/(r_mat+soft)**2
+    return accel.sum(axis=1) # Add all forces in each particle
+
+@jit(nopython=True) # Numba annotation
+def bruteForceNumbaOptimized(r_vec, mass, soft=0.):
+    """Calculates the acceleration generated by a set of masses on themselves.
+    This is optimized for high performance with Numba. All massive particles
+    must appear first."""
+    accel = np.zeros_like(r_vec) 
+    # Use superposition to add all the contributions
+    n = r_vec.shape[0] # Number of particles
+    delta = np.zeros((3,)) # Only allocate this once
+    for i in range(n):
+        # Only consider pairs with at least one massive particle i
+        if mass[i] == 0: break
+        for j in range(i+1, n):
+            # Explicitely separate components for high performance
+            # i.e. do not do delta = r_vec[j] - r_vec[i]
+            # (The effect of this is VERY relevant (x10) and has to do with
+            # memory reallocation) Numba will vectorize the loops.
+            for k in range(3): delta[k] = r_vec[j,k] - r_vec[i,k]
+            r = np.sqrt(delta[0]*delta[0] + delta[1]*delta[1] + delta[2]*delta[2])
+            tripler = (r+soft)**2 * r
+            
+            # Compute acceleration on first particle
+            mr3inv = mass[i]/(tripler)
+            # Again, do NOT do accel[j] -= mr3inv * delta
+            for k in range(3): accel[j,k] -= mr3inv * delta[k]
+            
+            # Compute acceleration on second particle
+            # For pairs with one massless particle, no reaction force
+            if mass[j] == 0: break
+            # Otherwise, opposite direction (+)
+            mr3inv = mass[j]/(tripler)
+            for k in range(3): accel[i,k] += mr3inv * delta[k]
+    return accel
+
+# C++ interface, load library
+ACCLIB = None
+def loadCPPLib():
+    """Loads the C++ shared library to the global variable ACCLIB. Must be
+    called before using the library."""
+    global ACCLIB
+    ACCLIB = ctypes.CDLL('cpp/acclib.so')
+    # Define appropiate types for library functions
+    doublepp = np.ctypeslib.ndpointer(dtype=np.uintp) # double**
+    doublep = ctl.ndpointer(np.float64, flags='aligned, c_contiguous')#double*
+    # Check cpp/acclib.cpp for function signatures
+    ACCLIB.bruteForceCPP.argtypes = [doublepp, doublep, 
+        ctypes.c_int, ctypes.c_double]
+    ACCLIB.barnesHutCPP.argtypes = [doublepp, doublep, 
+        ctypes.c_int, ctypes.c_double, ctypes.c_double, 
+        ctypes.c_double, ctypes.c_double, ctypes.c_double]
+
+def bruteForceCPP(r_vec, m_vec, soft=0.):
+    """Calculates the acceleration generated by a set of masses on themselves.
+    This is ran in a shared C++ library through Brute Force (pairwise sums)
+    Massive particles must appear first."""
+    # Convert array to data required by C++ library
+    if ACCLIB is None: loadCPPLib() # Singleton pattern
+    # Change type to be appropiate for calling library
+    r_vec_c = (r_vec.ctypes.data + np.arange(r_vec.shape[0]) 
+        * r_vec.strides[0]).astype(np.uintp)
+    # Set return type as double*
+    ACCLIB.bruteForceCPP.restype = np.ctypeslib.ndpointer(dtype=np.float64, 
+        shape=(r_vec.shape[0]*3,))
+    # Call the C++ function: double* bruteForceCPP
+    accel =  ACCLIB.bruteForceCPP(r_vec_c, m_vec, r_vec.shape[0], soft)
+    # Change shape to get the expected Numpy array (n, 3)
+    accel.shape = (-1, 3)
+    return accel
+
+def barnesHutCPP(r_vec, m_vec, soft=0.):
+    """Calculates the acceleration generated by a set of masses on themselves.
+    This is ran in a shared C++ library using a BarnesHut tree"""
+    # Convert array to data required by C++ library
+    if ACCLIB is None: loadCPPLib() # Singleton pattern
+    # Change type to be appropiate for calling library
+    r_vec_c = (r_vec.ctypes.data + np.arange(r_vec.shape[0]) 
+        * r_vec.strides[0]).astype(np.uintp)
+    # Set return type as double*
+    ACCLIB.barnesHutCPP.restype = np.ctypeslib.ndpointer(dtype=np.float64, 
+        shape=(r_vec.shape[0]*3,))
+    # Explicitely pass the corner and size of the box for the top node
+    px, py, pz = np.min(r_vec, axis=0)
+    size = np.max(np.max(r_vec, axis=0) - np.min(r_vec, axis=0))
+    # Call the C++ function: double* barnesHutCPP
+    accel =  ACCLIB.barnesHutCPP(r_vec_c, m_vec, r_vec.shape[0], 
+        size, px, py, pz, soft)
+    # Change shape to get the expected Numpy array (n, 3)
+    accel.shape = (-1, 3)
+    return accel

File diff suppressed because it is too large
+ 247 - 0
analysis/.ipynb_checkpoints/interactive-checkpoint.ipynb


+ 89 - 0
analysis/H1line.py

@@ -0,0 +1,89 @@
+"""Compare the H1 observations of the Antennae to the simulation"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from analysis import utils
+
+# These paremeters were checked by running an interactive version of this function
+def plot2d(i=135, theta=1.86, phi=4.10, view=5.10, scaling=12, v_scaling=150, v_offset=43, x_offset=-4, y_offset=2):
+    """Plot an encounter
+
+    Parameters:
+        i (int): timestep
+        theta (float): polar angle
+        phi (float): azimuthal angle
+        view (float): rotation along line of sight
+        scaling (float): scaling of the position vectors
+        v_scaling (float): scaling of the line of sight velocity
+        v_offset (float): offset of the line of sight velocity
+        x_offset (float): position offset in the x direction
+        y_offset (float): position offset in the y direction
+    """
+    data = utils.loadData('antennae', 13500)
+
+    r_vec, v_vec = data['r_vec'], data['v_vec']*v_scaling
+    mask = data['type'][:,0] == 'disk'
+    r_vec, v_vec = r_vec[mask], v_vec[mask]
+    
+    
+    # Viewing angles
+    M1 = np.array([[1, 0, 0],
+        [0, np.cos(theta), np.sin(theta)],
+        [0, -np.sin(theta), np.cos(theta)]])
+    M2 = np.array([[np.cos(phi), np.sin(phi), 0],
+        [-np.sin(phi), np.cos(phi), 0],
+        [0, 0, 1]])
+    M3 = np.array([[np.cos(view), np.sin(view), 0],
+        [-np.sin(view), np.cos(view), 0],
+        [0, 0, 1]])
+    
+    M = np.matmul(M2, np.matmul(M1, M3))
+    r_vec = np.tensordot(r_vec, M, axes=[1,0])
+    v_vec = np.tensordot(v_vec, M, axes=[1,0])
+    
+    # Offsets
+    r_vec[:,0] = r_vec[:,0]*scaling + x_offset
+    r_vec[:,1] = r_vec[:,1]*scaling + y_offset
+    v_vec[:,2] += v_offset
+
+    f, axs = plt.subplots(2, 2,  figsize=((11+14/1.97)/2, (14+11/1.54)/2), 
+        gridspec_kw = {'width_ratios':[11, 14/1.97], 'height_ratios':[14, 11/1.54]})
+    
+    # Plot background images
+    # We manually derive the region that must be plotted based on
+    # the data from Hibbard
+    axs[0,0].imshow(plt.imread('literature/figs/hibbard1.png'), 
+        extent=[-58.2, 88.6, -86.5, 78.7])
+    axs[1,0].imshow(plt.imread('literature/figs/hibbard3.png'), 
+        extent=[-58.2, 88.6, -270, 310])
+    axs[0,1].imshow(plt.imread('literature/figs/hibbard2.png'), 
+        extent=[-270, 310, -86.5, 78.7])
+    
+    # Plot
+    axs[0,0].scatter(r_vec[:,0], r_vec[:,1], s=3, edgecolor='', alpha=3E-1)
+    axs[1,0].scatter(r_vec[:,0], v_vec[:,2], s=3, edgecolor='', alpha=3E-1)
+    axs[0,1].scatter(v_vec[:,2], r_vec[:,1], s=3, edgecolor='', alpha=3E-1)
+    f.subplots_adjust(hspace=0, wspace=0)
+    
+    # Get the correct scales on all axes
+    axs[0,0].set_xlim((-58.2, 88.6))
+    axs[0,0].set_ylim((-86.5, 78.7))
+    axs[1,0].set_xlim((-58.2, 88.6))
+    axs[1,0].set_ylim((-270, 310))
+    axs[1,0].set_aspect((88.6 + 58.2)/(270+310) / 1.54)
+    axs[0,1].set_ylim((-86.5, 78.7))
+    axs[0,1].set_xlim((-270, 310))
+    axs[0,1].set_aspect((270+310)/(86.5 + 78.7) * 1.97)
+    axs[1,1].axis('off')
+    
+    # Styling
+    utils.stylizePlot(axs.flatten())
+    axs[0,1].set_xlabel(r'$v_{los}$ / km s$^{-1}$', fontsize=14)
+    axs[1,0].set_ylabel(r'$v_{los}$ / km s$^{-1}$', fontsize=14)
+    axs[1,0].set_xlabel(r'$x$ / kpc', fontsize=14)
+    axs[0,0].set_ylabel(r'$y$ / kpc', fontsize=14)
+    
+    plt.show()
+    
+plot2d()

+ 0 - 0
analysis/__init__.py


+ 54 - 0
analysis/angularMomentum.py

@@ -0,0 +1,54 @@
+"""Analysis of the transfer of angular momentum from luminous components
+for encounters with a dark matter halo"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from analysis import utils
+
+def luminousAngularMomentum(data):
+    """Calculate angular momentum of the luminous components
+
+    Parameters:
+        data: a data dictionary in the format saved by the simulation
+
+    Returns:
+        The norm of the angular momentum vector of the luminous components
+    """
+    maskDisk = data['type'][:,0]=='disk'
+    maskBulge = data['type'][:,0]=='bulge'
+    # Plot luminous components
+    mask = np.logical_or(maskDisk, maskBulge)
+    return np.linalg.norm(np.sum(data['mass'][mask][:,np.newaxis] 
+        * np.cross(data['r_vec'][mask], data['v_vec'][mask]), axis=0))
+
+# Plotting
+f, ax = plt.subplots(1, 1)
+
+# No halo
+j1 = []
+ts = np.linspace(1, 50001, 251)
+for t in ts:
+    data = utils.loadData('hyperbolic_nohalo', int(t))
+    j1.append(luminousAngularMomentum(data))
+ax.plot(ts, j1/j1[0], c='black', linestyle='dashed', label='1:1:0')
+
+# Halo
+j2 = []
+ts = np.linspace(1, 150001, 751)
+for t in ts:
+    data = utils.loadData('hyperbolic_halo', int(t))
+    j2.append(luminousAngularMomentum(data))
+ax.plot(ts, j2/j2[0], c='black', linestyle='solid', label='1:3:16')
+
+# Plotting styling
+utils.setSize(ax, y=(0, 1.1))
+utils.setAxes(ax, x=r'$t$', y=r'Angular momentum (luminous)', 
+    ycoords=(.9,-0.08))
+ax.legend(title=r'bulge:disk:halo')
+utils.stylizePlot([ax])
+plt.show()
+
+# Quantitaive analysis
+print('Angular momentum is conserved in encounter 1 to within', 
+    (np.max(j1)-np.min(j1))/j1[0])

+ 49 - 0
analysis/antennaeKDE.py

@@ -0,0 +1,49 @@
+"""Plots the Antennae using Kernel Density Estimation to map the density"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+from scipy.stats import gaussian_kde
+
+from analysis.colormaps import red_map
+from analysis import utils
+
+def plotKDE(i, theta, phi, view):
+    """Plot the Antennae galaxies using kernel density estimation
+
+    Parameters:
+        i (int): Timestep at which to plot them
+        theta (float): polar angle
+        phi (float): azimuthal angle
+        view (float): rotation along the line of sight
+    """
+    data = utils.loadData('antennae', 13500)
+    r_vec = data['r_vec'][::]
+    
+    # Rotate to view from (theta, phi, view) viewpoint
+    M1 = np.array([[1, 0, 0],
+        [0, np.cos(theta), np.sin(theta)],
+        [0, -np.sin(theta), np.cos(theta)]])
+    M2 = np.array([[np.cos(phi), np.sin(phi), 0],
+        [-np.sin(phi), np.cos(phi), 0],
+        [0, 0, 1]])
+    M3 = np.array([[np.cos(view), np.sin(view), 0],
+        [-np.sin(view), np.cos(view), 0],
+        [0, 0, 1]])
+    M = np.matmul(M2, np.matmul(M1, M3))
+    r_vec = np.tensordot(r_vec, M, axes=[1,0])
+    
+    # Plotting
+    plt.figure(figsize=(4,5), dpi=400)
+    # Perform Kernel Density Estimation to colour by density
+    xy = np.vstack([r_vec[:,0],r_vec[:,1]])
+    c = gaussian_kde(xy)(xy)
+    # Densest points are plotted last
+    idx = c.argsort()
+    x, y, c = r_vec[:,0][idx], r_vec[:,1][idx], c[idx]
+
+    plt.scatter(x, y, c=c, s=3, edgecolor='', cmap=red_map)
+    plt.axis('square')
+    plt.show()
+    
+
+plotKDE(135, 1.86, 4.10, 5.10)

+ 34 - 0
analysis/bridgeEvolution.py

@@ -0,0 +1,34 @@
+"""Plot the evolution of the amount of matter in the bridge as 
+a function of time for varying companion masses
+"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from analysis import utils
+from analysis.segmentation import segmentEncounter
+
+
+# Masses are given relative to 100, with respect to the main mass
+masses = [20, 50, 100, 200]
+labels = ['1:0.25', '1:0.5', '1:1', '1:2', '1:4'] # main:companion
+
+f, ax = plt.subplots(1, 1, sharey=True)
+
+for j, style in zip(range(len(masses)), [':', '-.','-','--']):
+    ts = np.linspace(4000, 10000, 61) # time range
+    fractions = [] # fractional mass in bridge
+
+    for t in ts:
+        data = utils.loadData('{}mass'.format(masses[j]), int(t))
+        masks, _, _ = segmentEncounter(data, mode='bridge')
+        fractions.append(np.sum(masks[0])/len(data['r_vec']))
+
+    ax.plot(ts, fractions, label=labels[j], c='black', linestyle=style)
+
+ax.legend(title='main:companion', fontsize=14)
+utils.setAxes(ax, x='Time', y='Fractional mass in bridge', 
+	xcoords=(.9,-0.08), ycoords=(-0.08,.6))
+utils.setSize(ax, x=(5000,10000), y=(0,0.1))
+utils.stylizePlot([ax])
+plt.show()

+ 48 - 0
analysis/colormaps.py

@@ -0,0 +1,48 @@
+from matplotlib.colors import LinearSegmentedColormap
+
+cm_data = [[0.2081, 0.1663, 0.5292], [0.2116238095, 0.1897809524, 0.5776761905], 
+ [0.212252381, 0.2137714286, 0.6269714286], [0.2081, 0.2386, 0.6770857143], 
+ [0.1959047619, 0.2644571429, 0.7279], [0.1707285714, 0.2919380952, 
+  0.779247619], [0.1252714286, 0.3242428571, 0.8302714286], 
+ [0.0591333333, 0.3598333333, 0.8683333333], [0.0116952381, 0.3875095238, 
+  0.8819571429], [0.0059571429, 0.4086142857, 0.8828428571], 
+ [0.0165142857, 0.4266, 0.8786333333], [0.032852381, 0.4430428571, 
+  0.8719571429], [0.0498142857, 0.4585714286, 0.8640571429], 
+ [0.0629333333, 0.4736904762, 0.8554380952], [0.0722666667, 0.4886666667, 
+  0.8467], [0.0779428571, 0.5039857143, 0.8383714286], 
+ [0.079347619, 0.5200238095, 0.8311809524], [0.0749428571, 0.5375428571, 
+  0.8262714286], [0.0640571429, 0.5569857143, 0.8239571429], 
+ [0.0487714286, 0.5772238095, 0.8228285714], [0.0343428571, 0.5965809524, 
+  0.819852381], [0.0265, 0.6137, 0.8135], [0.0238904762, 0.6286619048, 
+  0.8037619048], [0.0230904762, 0.6417857143, 0.7912666667], 
+ [0.0227714286, 0.6534857143, 0.7767571429], [0.0266619048, 0.6641952381, 
+  0.7607190476], [0.0383714286, 0.6742714286, 0.743552381], 
+ [0.0589714286, 0.6837571429, 0.7253857143], 
+ [0.0843, 0.6928333333, 0.7061666667], [0.1132952381, 0.7015, 0.6858571429], 
+ [0.1452714286, 0.7097571429, 0.6646285714], [0.1801333333, 0.7176571429, 
+  0.6424333333], [0.2178285714, 0.7250428571, 0.6192619048], 
+ [0.2586428571, 0.7317142857, 0.5954285714], [0.3021714286, 0.7376047619, 
+  0.5711857143], [0.3481666667, 0.7424333333, 0.5472666667], 
+ [0.3952571429, 0.7459, 0.5244428571], [0.4420095238, 0.7480809524, 
+  0.5033142857], [0.4871238095, 0.7490619048, 0.4839761905], 
+ [0.5300285714, 0.7491142857, 0.4661142857], [0.5708571429, 0.7485190476, 
+  0.4493904762], [0.609852381, 0.7473142857, 0.4336857143], 
+ [0.6473, 0.7456, 0.4188], [0.6834190476, 0.7434761905, 0.4044333333], 
+ [0.7184095238, 0.7411333333, 0.3904761905], 
+ [0.7524857143, 0.7384, 0.3768142857], [0.7858428571, 0.7355666667, 
+  0.3632714286], [0.8185047619, 0.7327333333, 0.3497904762], 
+ [0.8506571429, 0.7299, 0.3360285714], [0.8824333333, 0.7274333333, 0.3217], 
+ [0.9139333333, 0.7257857143, 0.3062761905], [0.9449571429, 0.7261142857, 
+  0.2886428571], [0.9738952381, 0.7313952381, 0.266647619], 
+ [0.9937714286, 0.7454571429, 0.240347619], [0.9990428571, 0.7653142857, 
+  0.2164142857], [0.9955333333, 0.7860571429, 0.196652381], 
+ [0.988, 0.8066, 0.1793666667], [0.9788571429, 0.8271428571, 0.1633142857], 
+ [0.9697, 0.8481380952, 0.147452381], [0.9625857143, 0.8705142857, 0.1309], 
+ [0.9588714286, 0.8949, 0.1132428571], [0.9598238095, 0.9218333333, 
+  0.0948380952], [0.9661, 0.9514428571, 0.0755333333], 
+ [0.9763, 0.9831, 0.0538]]
+
+parula_map = LinearSegmentedColormap.from_list('parula', cm_data)
+
+cm_data= ['#CB3366','#E37575','#F7C792','#9EFF88']
+red_map = LinearSegmentedColormap.from_list('red_map', cm_data)

+ 56 - 0
analysis/deVacouleurs.py

@@ -0,0 +1,56 @@
+"""Compares the matter distribution of the merger to the de Vacouleurs' law"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+from scipy.stats import linregress
+
+from analysis import utils
+
+#https://stackoverflow.com/questions/39418380/histogram-with-equal-number-of-points-in-each-bin
+def equalNHistogramBins(x, nbin):
+    """Calculates the bins necessary for a histogram 
+    with equal number of points in each bin
+    """
+    return np.interp(np.linspace(0, len(x), nbin + 1), np.arange(len(x)), np.sort(x))
+
+def plotRadialMagnitude(data, ax):
+    """Plots the magnitude (log(areal density)) as a function of radius"""
+    xs, ys = [], []
+    
+    # Project onto all three planes calculate the areal density and plot the graph
+    for dim, style, label in zip([[1,2],[0,2],[0,1]], ['o','s','x'], ['Onto y-z plane', 'Onto x-z plane', 'Onto x-y plane']):
+        # Measure radial distance with respect to COM
+        COM = np.sum(data['r'] * data['mass'][:,np.newaxis], axis=0) / np.sum(data['mass'])
+        r = np.linalg.norm((data['r'] - COM)[:,dim], axis=-1)
+        hist, bin_edges = np.histogram(r[data['types']=='disk'], 
+            histedges_equalN(r[data['types']=='disk'], 30), density=True)
+        bin_edges = (bin_edges[:-1] + bin_edges[1:])/2
+        M = np.log(hist) #Magnitude
+
+        if style == 'x':
+            plt.scatter((bin_edges**(1/4)), M, marker=style, 
+                c='black', label=label)
+        else:
+            plt.scatter((bin_edges**(1/4)), M, marker=style, 
+                facecolors='none', edgecolors='black', label=label)
+        
+        # Omit points on both ends for the fit, where the law does not hold
+        xs.extend((bin_edges**(1/4))[5:-2])
+        ys.extend(M[5:-2])
+    
+    utils.setSize(ax, x=(.5, 1.4), y=(-3.5, 1))
+    utils.setAxes(ax, x=r'$r^{1/4}$', y=r'$M$', xcoords=(.9,-0.08), ycoords=(-0.08,.9))
+    utils.stylizePlot([ax])
+    
+    #Fit a line to the data
+    slope, intercept, r_value, _, _ = linregress(xs, ys)
+    x = np.linspace(.55, 1.35, 10)
+    fit = np.poly1d((slope, intercept))(x)
+    plt.plot(x, fit, linewidth=.5, label='Global fit', c='black')
+    plt.text(.6, -3, r'$R^2 = {:.2f}$'.format(r_value**2), fontsize=14)
+    
+    plt.legend()
+    
+data = utils.loadData('hyperbolic_halo', 30000)
+f, ax = plt.subplots(1, 1)
+plotRadialMagnitude(data, None, ax)

+ 45 - 0
analysis/eccentricity.py

@@ -0,0 +1,45 @@
+"""Plot the eccentricity of the orbits of the particles in an encounter"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from analysis import utils
+from analysis.segmentation import segmentEncounter
+
+# Segment the encounter
+data = utils.loadData('200mass', 15000)
+(bridgeMask, stolenMask, orbittingMask, tailMask), _, _ = segmentEncounter(data)
+# Avoid central masses
+orbittingMask, stolenMask = orbittingMask[1:], stolenMask[1:] 
+
+# Calculate the eccentricities
+tailE = utils.calculateEccentricity(1.0, data['r_vec'][0], data['v_vec'][0], 
+	data['r_vec'][tailMask], data['v_vec'][tailMask])
+stolenE = utils.calculateEccentricity(2.0, data['r_vec'][1], data['v_vec'][1], 
+	data['r_vec'][stolenMask], data['v_vec'][stolenMask])
+orbittingE = utils.calculateEccentricity(1.0, data['r_vec'][0], data['v_vec'][0], 
+	data['r_vec'][orbittingMask], data['v_vec'][orbittingMask])
+
+# Plotting
+f, ax = plt.subplots(1, 1)
+bins = np.linspace(0, 10, 301)
+style = {'linestyle':'solid', 'normed':True, 'color':'black'}
+ax.hist(tailE, bins=bins, histtype='step', label='tail', **style)
+ax.hist(stolenE, bins=bins, histtype='step', label='stolen', **style)
+ax.hist(orbittingE, bins=bins, histtype='step', label='orbitting', **style)
+
+# Plotting styling
+utils.setSize(ax, x=(0,2))
+utils.setAxes(ax, x='Eccentricity', y='Density of particles', 
+	xcoords=(.85,-0.08), ycoords=(-0.1,.65))
+ax.legend(loc='upper right')
+utils.stylizePlot([ax])
+ax.axvline(x=1, c='black')
+plt.show()
+
+# Summarize the results
+print('Not shown, fraction of particles in tail with e>2.0:', 
+	np.sum(tailE > 2)/len(tailE))
+print('Tail, higher than e>1:', np.sum(tailE > 1)/len(tailE))
+print('Stolen, higher than e>1:', np.sum(stolenE > 1)/len(stolenE))
+print('Orbitting, higher than e>1:', np.sum(orbittingE > 1)/len(orbittingE))

+ 14 - 0
analysis/ellipsoidal.py

@@ -0,0 +1,14 @@
+"""Calculate the axes of the ellipsoidal merger remnant"""
+
+import numpy as np
+
+from analysis import utils
+
+# Calculate ellipsoidal axes of remnant
+# Simply calculate the integrals r_i, r_j and obtain the principal axes
+data = utils.loadData('hyperbolic_halo', 30000)
+r_vec = data['r_vec'][data['types'][:,1]!='dark'] #Luminous components only
+I = np.sum(r_vec[:,np.newaxis,:] * r_vec[:,:,np.newaxis], axis=0)
+eig, _ = np.linalg.eigh(I)
+print(np.sqrt(eig/eig[1]))
+# we obtain 10:11:13

+ 73 - 0
analysis/energyConservation.py

@@ -0,0 +1,73 @@
+"""Basic testing to ensure the chosen timestep is sensible and to probe
+the symplectic character of the integrator."""
+
+import matplotlib.pyplot as plt
+import numpy as np
+from scipy.signal import savgol_filter
+
+from analysis import utils
+
+
+def calculatePotentialEnergy(r_vec, v_vec, mass, soft=0.):
+    """Calculates the potential energy of a set of masses.
+       It is an adaptation of the acceleration routines.
+    """
+    mask = mass!=0
+    massMassive = mass[mask]
+    rMassive_vec = r_vec[mask]
+    mass_mat = massMassive.reshape(1, -1, 1)
+    r_ten = rMassive_vec.reshape(1, -1, 3) - r_vec.reshape(-1, 1, 3)
+    r_mat = np.linalg.norm(r_ten, axis=-1, keepdims=True)
+    # Avoid self interactions and divisions by 0
+    mass_mat = np.where(r_mat == 0, np.zeros_like(mass_mat), mass_mat)
+    r_mat = np.where(r_mat == 0, np.ones_like(r_mat), r_mat)
+    PE = -mass_mat/(r_mat + soft)
+    # Add all contributions but do not sum them twice
+    PE = np.sum(PE, axis=1)[:,0]/2 * mass
+    energy = PE + 1/2 * np.linalg.norm(v_vec, axis=-1)**2 * mass
+    return np.sum(energy)
+    '''PE = np.sum(PE, axis=1)[:,0]
+                print((mass==0)[:5])
+                print('PE', PE[:5], (1/2 * np.linalg.norm(v_vec, axis=-1)**2)[:5])
+                energy = PE + 1/2 * np.linalg.norm(v_vec, axis=-1)**2
+                return np.sum(energy[mass==0])'''
+
+#calculatePotentialEnergy(np.array([[0,0,0],[1,0,0], [2,0,0]]), np.array([1,1, 0]), soft=0.)
+
+f, ax = plt.subplots(1, 1)
+for name, label, factor, color in zip(['dt1E-2', 'dt1E-3', 'dt1E-4'],
+	[r'$dt=10^{-2}$', r'$dt=10^{-3}$', r'$dt=10^{-4}$'],
+	[1, 10, 100], 
+	['cornflowerblue', 'darkorange', '#75d2aa']):
+	time = []
+	errors = []
+
+	# Use start of the simulation as ground truth
+	truth = utils.loadData(name, 10*factor)
+	truthEnergy = calculatePotentialEnergy(truth['r_vec'], truth['v_vec'], truth['mass'], soft=0.1)
+
+	for n in np.linspace(10, 1000, 100):
+		# Load the data
+		data = utils.loadData(name, int(n*factor))
+		# Find the deviation
+		time.append(data['t'])
+		# Energies per unit mass
+		energy = calculatePotentialEnergy(data['r_vec'], data['v_vec'], data['mass'], soft=0.1)
+		errors.append(np.abs(energy - truthEnergy)/np.abs(truthEnergy))
+
+	# Plot
+	errors = savgol_filter(errors, 11, 3) # Smooth
+	ax.plot(time, errors, color=color, label=label)
+	# Styling
+	utils.stylizePlot([ax])
+	utils.setAxes(ax, x='Time', y='Relative energy error', 
+		xcoords=(.85,-0.08), ycoords=(-0.1,.75))
+	ax.set_yscale('log')
+	ax.set_xlim(0, time[-1])
+	ax.set_ylim(1E-4, None)
+	ax.axvline(x=3.55, linestyle='--', linewidth=1, color='black')
+
+plt.legend(loc='upper left')
+plt.show()
+	
+

+ 51 - 0
analysis/halo.py

@@ -0,0 +1,51 @@
+"""Plot the encounters for the halo analysis."""
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from analysis import utils
+
+def plotDisk(data, ax, axisOn, zoomOut=False):
+    """Plot the disk of an encounter
+
+    Parameters:
+        ax: the matplotlib axis where the data should be plotted
+        data: a data dictionary in the format saved by the simulation
+        axisOn (bool): whether to include the y axis in this subplot.
+            It can be omitted when it is shared among subplots.
+        zoomOut (bool): scale of the plotting
+    """
+    r_vec = data['r_vec']
+    theta, phi = data['CONFIG']['galaxy1']['orientation']
+    
+    utils.plotTracks(ax, data['tracks'])
+    utils.plotCenterMasses(ax, data)
+    mask = data['types'][:,0]=='disk'
+    ax.scatter(r[mask][:,0], r[mask][:,1], c='#c40f4c', s=2, marker='.')
+    if zoomOut: utils.setSize(x=(-10, 10), y=(-10, 10), mode='square')
+    else: utils.setSize(x=(-5, 5), y=(-5, 5), mode='square')
+    
+    utils.setAxes(ax, x=r'x\' / $r_{min}$', y=r'y\' / $r_{min}$', 
+        ycoords=(-0.2, .8), mode=('bottomleft' if axisOn else 'bottom'))
+    utils.stylizePlot([ax])
+
+# Plot all four encounters described in the report at 3 different times
+f, axs = plt.subplots(2, 3,  figsize=(15, 10), sharey=False, sharex=False)
+
+data = utils.loadData('parabolic_nohalo', 6800)
+plotDisk(data, ax=axs[0,0], axisOn=True)
+data = utils.loadData('parabolic_nohalo', 8800)
+plotDisk(data, ax=axs[0,1], axisOn=False)
+data = utils.loadData('parabolic_nohalo', 24800)
+plotDisk(data, ax=axs[0,2], axisOn=True, zoomOut=True)
+
+data = utils.loadData('parabolic_halo', 9400)
+plotDisk(data, ax=axs[1,0], axisOn=True)
+data = utils.loadData('parabolic_halo', 12800)
+plotDisk(data, ax=axs[1,1], axisOn=False)
+data = utils.loadData('parabolic_halo', 20800)
+plotDisk(data, ax=axs[1,2], axisOn=True, zoomOut=True)
+    
+f.subplots_adjust(hspace=0, wspace=0)
+plt.tight_layout()
+plt.show()

+ 100 - 0
analysis/inclination.py

@@ -0,0 +1,100 @@
+"""Plot a survey of the inclination angle"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from analysis import utils
+
+def plotEdgeView(data, ax, axisOn):
+    """Plot the encounter as viewed from the edge of the galaxy
+
+    Parameters:
+        ax: the matplotlib axis where the data should be plotted
+        data: a data dictionary in the format saved by the simulation
+        axisOn (bool): whether to include the y axis in this subplot.
+            It can be omitted when it is shared among subplots.
+    """
+    r_vec = data['r_vec']
+    theta, phi = data['CONFIG']['galaxy1']['orientation']
+
+    # Obtain the edge view, rotate 90º around y axis
+    spin = np.array([np.sin(theta)*np.cos(phi), 
+            np.sin(theta)*np.sin(phi), 
+            np.cos(theta)])
+    phi = np.pi/2
+    M = np.array([[np.cos(-phi), 0,np.sin(-phi)], 
+        [0, 1, 0], 
+        [-np.sin(-phi), 0, np.cos(-phi)]])
+    r_vec = np.einsum('ai,ij->aj', r_vec, M)
+    spin = np.einsum('i,ij->j', spin, M)
+    data['tracks'] = np.einsum('abi,ij->abj', data['tracks'], M)
+    data['tracks'] -= r_vec[0,:] #Relative to center of galaxy
+    r_vec[:,:] -= r_vec[0,:] 
+
+    # Flip y to be consistent with Toomre and Toomre
+    r_vec[:,1] = -r_vec[:,1]
+    data['tracks'][:,:,1] *= -1
+
+    # Plotting
+    utils.plotCenterMasses(ax, {'r_vec': r_vec, 'type':data['type']})
+    ax.plot(data['tracks'][1,:,0], data['tracks'][1,:,1], 
+        c='black', alpha=1.0, linewidth=1)
+    ax.scatter(r_vec[:,0], r_vec[:,1], s=0.01, marker='.', 
+        c='#c40f4c', alpha=.6)
+    utils.setSize(ax, x=(-1.5, 1.5), y=(-1.5,3.5), mode='square')
+    utils.stylizePlot([ax])
+    utils.setAxes(ax, x=r'x / $r_{min}$', y=r'y / $r_{min}$', 
+        ycoords=(-0.2, .8), mode=('bottomleft' if axisOn else 'bottom'))
+    
+def plotFrontalView(data, ax, axisOn):
+    """Plot the encounter as viewed normal to the galactic plane
+
+    Parameters:
+        ax: the matplotlib axis where the data should be plotted
+        data: a data dictionary in the format saved by the simulation
+        axisOn (bool): whether to include the y axis in this subplot.
+            It can be omitted when it is shared among subplots.
+    """
+    r_vec = data['r_vec']
+    theta, phi = data['CONFIG']['galaxy1']['orientation']
+    
+    # Rotate the position vectors into the appropiate view
+    M2 = np.array([[np.cos(-phi), np.sin(-phi), 0],
+        [-np.sin(-phi), np.cos(-phi), 0],
+        [0, 0, 1]])
+    M1 = np.array([[1, 0, 0],
+        [0, np.cos(-theta), np.sin(-theta)],
+        [0, -np.sin(-theta), np.cos(-theta)]])
+    r_vec = np.einsum('ai,ij->aj', np.einsum('ai,ij->aj', r_vec, M2), M1)
+    data['tracks'] = np.einsum('abi,ij->abj', data['tracks'], M2)
+    data['tracks'] = np.einsum('abi,ij->abj', data['tracks'], M1)
+    data['tracks'] -= data['tracks'][0]
+    r_vec[:,:] -= r_vec[0,:]
+
+    #Plotting
+    utils.plotCenterMasses(ax, {'r_vec': r_vec, 'type':data['type']})
+    utils.plotTracks(ax, data['tracks'])
+    ax.scatter(r_vec[:,0], r_vec[:,1], c='#c40f4c', s=0.01, marker='.')
+    utils.setSize(ax, x=(-3, 2), y=(-1.5, 1.5), mode='square')
+    utils.stylizePlot([ax])
+    utils.setAxes(ax, x=r'x / $r_{min}$', y=r'y / $r_{min}$', 
+        ycoords=(-0.2, .8), mode=('bottomleft' if axisOn else 'bottom'))
+
+#########################################################
+
+# Select the inclinations
+inclinations = [15, 45, 60, 90, 135]
+f, axs = plt.subplots(2, len(inclinations),  figsize=(11, 6), 
+    sharey=False, sharex=False, gridspec_kw = {'height_ratios':[5/3, 1]})
+t = 5000 # time of plots
+
+# Plot both views for each encounter
+for i in range(len(inclinations)):
+    data = utils.loadData('i{}'.format(inclinations[i]), t)
+    plotEdgeView(data, ax=axs[0,i], axisOn=True if i==0 else False)
+    data = utils.loadData('i{}'.format(inclinations[i]), t)
+    plotFrontalView(data, ax=axs[1,i], axisOn=True if i==0 else False)
+    
+f.subplots_adjust(hspace=0, wspace=0)
+plt.tight_layout()
+plt.show()

File diff suppressed because it is too large
+ 278 - 0
analysis/interactive.ipynb


+ 46 - 0
analysis/isolation.py

@@ -0,0 +1,46 @@
+"""Basic testing to ensure a galaxy evolves unperturbed
+in isolation."""
+
+import matplotlib.pyplot as plt
+import numpy as np
+from scipy.signal import savgol_filter
+
+from analysis import utils
+
+f, ax = plt.subplots(1, 1)
+
+time = []
+median = []
+areas = []
+
+truth = utils.loadData('isolated', 100)
+for n in np.linspace(100, 10000, 100):
+	
+	# Load the data
+	data = utils.loadData('isolated', int(n))
+	# Find the deviation
+	time.append(data['t'])
+	deviations = np.abs(np.linalg.norm(data['r_vec'], axis=-1) 
+		- np.linalg.norm(truth['r_vec'], axis=-1))
+	median.append(np.median(deviations))
+	areas.append(np.percentile(deviations, [6, 16, 31, 100-31, 100-16, 100-6]))
+
+# Plot
+areas = np.array(areas)
+areas = savgol_filter(areas, 11, 3, axis=0) #Smoothing
+median = savgol_filter(median, 11, 3)
+ax.fill_between(time, areas[:,0], areas[:,-1], alpha=.25, color='darkorange')
+ax.fill_between(time, areas[:,1], areas[:,-2], alpha=.25, color='darkorange')
+ax.fill_between(time, areas[:,2], areas[:,-3], alpha=.25, color='darkorange')
+ax.plot(time, median, color='darkorange')
+utils.stylizePlot([ax])
+utils.setAxes(ax, x='Time', y='Error in radius', 
+	xcoords=(.85,-0.08), ycoords=(-0.1,.75))
+ax.set_yscale('log')
+ax.set_xlim(0, time[-1])
+ax.set_ylim(1E-5, None)
+
+#plt.legend(loc='upper left')
+plt.show()
+	
+

+ 36 - 0
analysis/massFractions.py

@@ -0,0 +1,36 @@
+"""Plot the mass that, at large times, is stolen, in the tail
+or still orbitting the main mass for different perturbing masses
+"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from analysis import utils
+from analysis.segmentation import segmentEncounter
+
+# Masses are given relative to 100, with respect to the main mass
+masses = np.array([20, 33, 50, 75, 100, 150, 200, 300, 400, 500, 600])
+tailFractions, stolenFractions = [], []
+
+f, ax = plt.subplots(1, 1, sharey=True)
+
+# For each encounter calculate the fraction of stolen and tail mass
+for mass in masses:
+    data = utils.loadData('{}mass'.format(mass), 10000)
+
+    masks, _, _ = segmentEncounter(data)
+    tailFractions.append(len(masks[3])/len(data['r_vec']))
+    stolenFractions.append(len(masks[1])/len(data['r_vec']))
+
+# Plotting
+ax.plot(masses/100, tailFractions, marker='+', color='black', 
+	markersize=10, label='Tail')
+ax.plot(masses/100, stolenFractions, marker='o', color='black', 
+	markersize=8, linestyle='dashed', label='Stolen', markerfacecolor='none')
+
+ax.legend()
+utils.setAxes(ax, x='Companion mass / main mass', y='Mass fraction', 
+	xcoords=(.7,-0.08) ,ycoords=(-0.08,.8))
+utils.stylizePlot([ax])
+ax.set_ylim(0,.45)
+plt.show()

+ 39 - 0
analysis/orbit.py

@@ -0,0 +1,39 @@
+"""Basic testing to ensure a pair of galaxies follows the correct orbit."""
+
+import matplotlib.pyplot as plt
+import numpy as np
+from scipy.signal import savgol_filter
+
+from analysis import utils
+
+f, ax = plt.subplots(1, 1)
+
+time = []
+deviations = []
+
+for n in np.linspace(100, 10000, 100):
+	# Load the data
+	data = utils.loadData('orbit', int(n))
+	# Find the deviation
+	# The analytical orbit is r = 1/(1 + cos(phi))
+	time.append(data['t'])
+	phi = np.arctan2(data['r_vec'][0,1], data['r_vec'][0,0])
+	truthr = 1/(1 + np.cos(phi))
+	print(truthr, np.linalg.norm(data['r_vec'][0,:2]))
+	deviation = np.abs(np.linalg.norm(data['r_vec'][0,:2]) - truthr)
+	deviations.append(deviation)
+
+# Plot
+deviations = savgol_filter(deviations, 11, 3)
+ax.plot(time, deviations, color='darkorange')
+utils.stylizePlot([ax])
+utils.setAxes(ax, x='Time', y='Error in radius', 
+	xcoords=(.85,-0.08), ycoords=(-0.1,.75))
+ax.set_yscale('log')
+ax.set_xlim(0, time[-1])
+ax.set_ylim(1E-5, None)
+
+#plt.legend(loc='upper left')
+plt.show()
+	
+

+ 61 - 0
analysis/orbitalDecay.py

@@ -0,0 +1,61 @@
+"""Plot the trajectories followed by the central masses
+to illustrate orbital decay due to dark matter halos"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from analysis import utils
+
+def plotTrajectory(data, ax, axisOn, trajectory):
+    """Plot the trajectory followed by the central masses
+
+    Parameters:
+        ax: the matplotlib axis where the data should be plotted
+        data: a data dictionary in the format saved by the simulation
+        axisOn (bool): whether to include the y axis in this subplot.
+            It can be omitted when it is shared among subplots.
+        trajectory (string): type of theoretical trajectory 
+            ('ellipse' or 'hyperbola') to superpose
+    """
+    # Plot trajectory   
+    ax.plot(np.array(data['tracks'])[0,:,0], np.array(data['tracks'])[0,:,1],
+        c='black', alpha=1.0, linewidth=1)
+    ax.plot(np.array(data['tracks'])[1,:,0], np.array(data['tracks'])[1,:,1],
+        c='black', alpha=0.6, linewidth=1)
+    ax.axis('square')
+
+    # Superpose theoretical trajectory
+    if trajectory=='ellipse':
+        traj = patches.Arc((-.5,0), width=2, height=np.sqrt(3), 
+            theta1=-180, theta2=100, linewidth=2, edgecolor='black', 
+            facecolor='none', alpha=.7, linestyle='dashed')
+        ax.add_patch(traj)
+        utils.setSize(ax, x=(-1.5, 1.5), y=(-1.5, 1.5))
+    elif trajectory=='ellipse': #x = 1/2 - y^2/2
+        y = np.linspace(-2.5, 3, 1000)
+        x = 1/2 - y**2/2
+        ax.plot(x, y, linewidth=2, color='black', 
+            alpha=.7, linestyle='dashed')
+        utils.setSize(ax, x=(-1.5, 1.5), y=(-1.5, 1.5))
+    else: raise Exception('Unknown trajectory')
+    
+    # Styling
+    utils.setAxes(ax, x=r'x\' / $r_{min}$', y=r'y\' / $r_{min}$', 
+        ycoords=(-0.2, .8), mode=('bottomleft' if axisOn else 'bottom'))
+    utils.stylizePlot([ax])
+ 
+f, axs = plt.subplots(1, 4,  figsize=(15, 4), sharey=False, sharex=False)
+
+# Plot the four encounters described in the report
+data = utils.loadData('elliptical_nohalo', 30000)
+plotTrajectory(data, ax=axs[0], axisOn=True, trajectory='ellipse')
+data = utils.loadData('elliptical_halo', 30000)
+plotTrajectory(data, ax=axs[1], axisOn=True, trajectory='ellipse')
+data = utils.loadData('hyperbolic_nohalo', 50000)
+plotTrajectory(data, ax=axs[2], axisOn=True, trajectory='ellipse')
+data = utils.loadData('hyperbolic_halo', 100000)
+plotTrajectory(data, ax=axs[3], axisOn=True, trajectory='ellipse')
+
+f.subplots_adjust(hspace=0, wspace=0)
+plt.tight_layout()
+plt.show()

+ 86 - 0
analysis/performance.py

@@ -0,0 +1,86 @@
+import pickle
+import numpy as np
+from timeit import default_timer as timer
+import matplotlib.pyplot as plt
+
+from acceleration import *
+import analysis.utils as utils
+
+
+timescale = 10 # ms, approximate time per attempt
+massive = True # whether all particles are massive
+
+functions = [bruteForce, bruteForceNumba, bruteForceNumbaOptimized, bruteForceCPP, barnesHutCPP]
+labels = ['Brute force', 'Brute force Numba', 'Brute force Numba opt.', 'Brute force [C++]', 'Barnes-Hut [C++]']
+styles = ['solid', 'dashed', 'solid', 'dashed', 'solid']
+colours = ['cornflowerblue', 'cornflowerblue', 'darkorange', 'darkorange', 'black']
+if not massive: #do not evaluate barnes hut
+	functions, labels, styles, colours = functions[:-1], labels[:-1], styles[:-1], colours[:-1]
+
+
+timess = []
+
+for i in range(len(functions)):
+	n = 10 #First data point
+	# Run attempts with multiple iterations each
+	iterations = 1000
+	attempts = 6
+	timess.append([])
+	while(True):
+		# Evaluate the performance
+		localTimes = []
+		for _ in range(attempts):
+			# Initialize random data
+			r_vec = np.random.rand(n, 3).astype(np.float64)
+			if massive:
+				m_vec = np.random.rand(n).astype(np.float64)
+			else:
+				m_vec = np.zeros(n).astype(np.float64)
+				m_vec[0] = 1
+			start = timer()
+			for _ in range(iterations):
+				functions[i](r_vec, m_vec)
+			stop = timer()
+			localTimes.append((stop - start)*1000/iterations) #in ms
+		# Calculate mean and error
+		# Discard high times since these are likely due to interferences
+		localTimes = np.sort(localTimes)
+		localTimes = localTimes[:attempts-2]
+		print(localTimes, iterations)
+		error = np.std(localTimes)/np.sqrt(attempts-1)
+		timess[i].append([n, np.mean(localTimes), error])
+
+		# Recalculate how many iterations would have been sensible
+		iterations = int(timescale/np.mean(localTimes))
+		if iterations > 1000: iterations = 1000
+		if iterations < 1: break;
+
+		# Increase the number of points
+		n *= 2
+
+# Plot the results
+f, ax = plt.subplots(1, 1,  figsize=(6, 4), sharey=True)
+
+for times, label, style, colour in zip(timess, labels, styles, colours):
+	times = np.array(times)
+	ax.errorbar(times[:,0], times[:,1], yerr=times[:,2], 
+		label=label, marker='+', linestyle=style, c=colour)
+
+ax.loglog()
+if massive:
+	utils.setAxes(ax, x='Number of massive particles', y='Time / ms', 
+	xcoords=(.7, -0.1), ycoords=(-0.1,.7))
+else:
+	utils.setAxes(ax, x='Number of massless particles', y='Time / ms', 
+	xcoords=(.7, -0.1), ycoords=(-0.1,.7))
+utils.stylizePlot([ax])
+ax.legend()
+ax.set_xlim((10, None))
+plt.show()
+
+
+file = open('data/performance.pickle', "wb")
+pickle.dump(timess, file)
+
+
+

+ 48 - 0
analysis/radialDensity.py

@@ -0,0 +1,48 @@
+"""Plot a profile of the radial density"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from analysis import utils
+
+def plotRadialDensity(data):
+    """Plots a profile of the radial density, divided into components.
+
+    Parameters:
+        data: a data dictionary in the format saved by the simulation
+    """
+    f, ax = plt.subplots(1, 1)
+
+    # We measure the radial distance from the Center Of Mass
+    COM = np.sum(data['r_vec'] * data['mass'][:,np.newaxis], axis=0) / np.sum(data['mass'])
+    r = np.linalg.norm((data['r'] - COM), axis=-1)
+
+    # Divide the particles into groups
+    diskMask = data['types'][:,1] == 'disk'
+    bulgeMask = data['types'][:,1] == 'bulge'
+    luminousMask = np.logical_or(diskMask, bulgeMask)
+    darkMask = data['types'][:,1] == 'dark'
+
+    # Plot each group separately
+    style = {'histtype':'step', 'cumulative':True, 'bins':np.logspace(-2, 2, 1000),
+        'color':'black', 'linestyle':'dashed', 'label':'disk'}
+    plt.hist(r[diskMask], weights=data['mass'][diskMask], 
+        label='disk', **style)
+    plt.hist(r[bulgeMask], weights=data['mass'][bulgeMask], 
+        label='bulge', **style)
+    plt.hist(r[luminousMask], weights=data['mass'][luminousMask], 
+        label='luminous', **style)
+    plt.hist(r[darkMask], weights=data['mass'][darkMask], 
+        label='dark', **style)
+    
+    # Plotting styling
+    plt.legend(loc='upper left')
+    plt.yscale('log'); plt.xscale('log')
+    utils.setAxes(ax, x=(3E-2, 1E2), y=(1E-2, 2))
+    utils.setAxes(ax, x=r'$r$', y=r'Cumulative mass', 
+        xcoords=(.9,-0.08), ycoords=(-0.1,.7))
+    utils.stylizePlot([ax])
+    plt.show()
+    
+data = utils.loadData('hyperbolic_halo', 30000)
+plotRadialDensity(data)

+ 57 - 0
analysis/ringPlot.py

@@ -0,0 +1,57 @@
+"""Make a plot of an encountering with rings (fig 1, fig 3) and 
+colour each ring according to the initial distance. It plots a
+prograde equalmass encounter by default.
+"""
+
+import matplotlib.pyplot as plt
+from scipy.interpolate import splprep, splev, interp1d
+from matplotlib import markers
+import numpy as np
+
+from analysis.colormaps import parula_map
+from analysis import utils
+
+def plotRings(ax, data, n):
+    """Make a ring plot of the data.
+
+    Parameters:
+        ax: the matplotlib axis where the data should be plotted
+        data: a data dictionary in the format saved by the simulation
+        n (int): the number of particles to plot. These will be interpolated
+            and can be much higher than those originally in the simulation.
+    """
+    # Identify all the rings
+    rings = np.unique(data['type'][:,1])
+    rings = rings[rings!='0']
+    # Plot each ring
+    for ring, color, in zip(rings, 
+                     parula_map(np.linspace(1.0, 0., len(rings)))):
+        xy = data['r_vec'][data['type'][:,1]==ring] #data for this ring
+        # Interpolate data (to use opacity to plot local density)
+        tck, u = splprep(xy[:,:2].T, u=None, s=0.0, per=1) 
+        f = interp1d(np.linspace(0,1,len(u)), u, kind='cubic') 
+        x_new, y_new = splev(f(np.linspace(0,1,n)), tck, der=0)
+        #Plot data
+        ax.scatter(x_new, y_new, s=2, c=[color], alpha=0.01)
+    utils.plotCOM(ax)
+    utils.plotCenterMasses(ax, data)
+    utils.plotTracks(ax, data['tracks']) 
+    # Styling
+    utils.setAxes(ax, mode='hide')
+
+####################################
+####################################
+
+# List of times to plot
+ts = [2000, 3000, 3500, 4000, 4900]
+figsize = (12, 4)
+fileName = 'prograde_equalmass'
+
+f, axs = plt.subplots(1, len(ts),  figsize=figsize, sharey=False)
+for t, ax in zip(ts, axs):
+    data = utils.loadData(fileName, t)
+    plotRings(ax, data, n=10000)
+    height, width = 3.0, 3 * 1/len(ts) * figsize[0]/figsize[1]
+    utils.setSize(ax, x=(-width, width), y=(-height, height), mode='square')
+f.subplots_adjust(hspace=0, wspace=0)
+plt.show()

+ 156 - 0
analysis/segmentation.py

@@ -0,0 +1,156 @@
+"""Segmentation algorithm used to identify the different structures
+that are formed in the encounter. This file can be called from the
+command line to make an illustrative plot of the algorithm.
+"""
+
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib.patches as patches
+
+import utils
+
+
+def segmentEncounter(data, plot=False, mode='all'):
+    """Segment the encounter into tail, bridge, orbitting and
+    stolen particles as described in the report.
+
+    Parameters:
+        data: A data instance as saved by the simulation to a pickle file
+        plot: If true the segmentation will be plotted and shown. Highly
+            useful for debugging.
+        mode (string): If mode is 'all' all parts of the encounter will be
+            identified. If mode is 'bridge' only the bridge will be
+            identified. This is useful when there may be no tail.
+
+    Returns:
+        masks (tupple): tupple of array corresponding to the masks of the 
+            (bridge, stolen, orbitting, tail) particles. One can then use
+            e.g. data['r_vec'][bridgeMask].
+        shape (tupple): tupple of (distances, angles) as measured from the
+            center of mass and with respect to the x axis. They define the
+            shape of the tail
+        length (float): total length of the tail.
+    """
+    nRings = 100 # number of rings to use when segmenting the data
+
+    # Localize the central masses
+    r_vec = data['r_vec']
+    centers = r_vec[data['type'][:,0]=='center']
+    rCenters_vec = centers[1] - centers[0]
+    rCenters = np.linalg.norm(rCenters_vec)
+    rCenters_unit = rCenters_vec/np.linalg.norm(rCenters_vec)
+    # Take particles to be on the tail a priori and
+    # remove them as they are found in other structures
+    particlesLeft = np.arange(0, len(r_vec))
+
+
+    if plot:
+        colour = '#c40f4c'
+        f, axs = plt.subplots(1, 3,  figsize=(9, 4), sharey=False)
+        f.subplots_adjust(hspace=0, wspace=0)
+        axs[0].scatter(r_vec[:,0], r_vec[:,1], c=colour, alpha=0.1, s=0.1)
+        axs[0].axis('equal')
+        utils.plotCenterMasses(axs[0], data)
+        axs[0].axis('off')
+
+    # Step 1: project points to see if they are part of the bridge
+    parallelProjection = np.dot(r_vec - centers[0], rCenters_unit)
+    perpendicularProjection = np.linalg.norm(r_vec - centers[0][np.newaxis] 
+        - parallelProjection[:,np.newaxis] * rCenters_unit[np.newaxis], axis=-1)
+    bridgeMask = np.logical_and(np.logical_and(0.3*rCenters < parallelProjection,
+        parallelProjection < .7*rCenters), perpendicularProjection < 2)
+
+    # Remove the bridge
+    notInBridge = np.logical_not(bridgeMask)
+    r_vec = r_vec[notInBridge]
+    particlesLeft = particlesLeft[notInBridge]
+
+    if mode == 'bridge':
+        return (bridgeMask, None, None, None), None, None
+
+    # Step 2: select stolen particles by checking distance to centers
+    stolenMask = (np.linalg.norm(r_vec - centers[0][np.newaxis], axis=-1) 
+        > np.linalg.norm(r_vec - centers[1][np.newaxis], axis=-1))
+    # Remove the stolen part
+    notStolen = np.logical_not(stolenMask)
+    r_vec = r_vec[notStolen]
+    particlesLeft, stolenMask = particlesLeft[notStolen], particlesLeft[stolenMask]
+
+    # Step 3: segment data into concentric rings (spherical shells really)
+    r_vec = r_vec - centers[0]
+    r = np.linalg.norm(r_vec, axis=-1)
+    edges = np.linspace(0, 30, nRings) # nRings concentric spheres 
+    indices = np.digitize(r, edges) # Classify particles into shells
+
+    if plot:
+        axs[1].scatter(r_vec[:,0], r_vec[:,1], c=colour, alpha=.1, s=.1)
+        axs[1].axis('equal')
+        axs[1].scatter(0, 0, s=100, marker="*", c='black', alpha=.7)
+        axs[1].axis('off')
+    
+    # Step 4: find start of tail
+    start = None
+    for i in range(1, nRings+1):
+        rMean = np.mean(r[indices==i])
+        rMean_vec = np.mean(r_vec[indices==i], axis=0)
+        parameter = np.linalg.norm(rMean_vec)/rMean
+
+        if plot:
+            circ = patches.Circle((0,0), edges[i-1], linewidth=0.5,edgecolor='black',facecolor='none', alpha=.7)
+            axs[1].add_patch(circ)
+            txtxy = edges[i-1] * np.array([np.sin(i/13), np.cos(i/13)])
+            axs[1].annotate("{:.2f}".format(parameter), xy=txtxy, backgroundcolor='#ffffff55')
+        
+        if start is None and parameter>.8 : 
+            start = i #Here starts the tail
+            startDirection = rMean_vec/np.linalg.norm(rMean_vec)
+            if not plot: break;
+
+    if start is None: #abort if nothing found
+        raise Exception('Could not identify tail')
+
+    # Step 5: remove all circles before start
+    inInnerRings = indices < start
+    # Remove particles on the opposite direction to startDirection. 
+    # in the now innermost 5 rings. Likely traces of the bridge.
+    oppositeDirection = np.dot(r_vec, startDirection) < 0
+    in5InnermostRings = indices <= start+5
+    orbitting = np.logical_or(inInnerRings, 
+        np.logical_and(oppositeDirection, in5InnermostRings))
+    orbittingMask = particlesLeft[orbitting]
+    r_vec = r_vec[np.logical_not(orbitting)]
+    tailMask = particlesLeft[np.logical_not(orbitting)]
+
+    if plot:
+        axs[2].scatter(r_vec[:,0], r_vec[:,1], c=colour, alpha=0.1, s=0.1)
+        axs[2].axis('equal')
+        axs[2].scatter(0, 0, s=100, marker="*", c='black', alpha=.7)
+        axs[2].axis('off')
+
+    # Step 6: measure tail length and shape
+    r = np.linalg.norm(r_vec, axis=-1)
+    indices = np.digitize(r, edges)
+    # Make list of barycenters
+    points = [list(np.mean(r_vec[indices==i], axis=0))
+        for i in range(1, nRings) if len(r_vec[indices==i])!=0]
+    points = np.array(points)
+    # Calculate total length
+    lengths = np.sqrt(np.sum(np.diff(points, axis=0)**2, axis=1))
+    length = np.sum(lengths)
+    # Shape (for 2D only)
+    angles = np.arctan2(points[:,1], points[:,0])
+    distances = np.linalg.norm(points, axis=-1)
+    shape = (distances, angles)
+
+    if plot:
+        axs[2].plot(points[:,0], points[:,1], c='black', linewidth=0.5, marker='+')
+
+    if plot:
+        plt.show()
+
+    return (bridgeMask, stolenMask, orbittingMask, tailMask), shape, length
+
+
+if __name__ == "__main__":
+    data = utils.loadData('200mass', 10400)
+    segmentEncounter(data, plot=True)

+ 112 - 0
analysis/simulatedAnnealing.py

@@ -0,0 +1,112 @@
+"""Plots the evolution of the simulated annealing algorithm from a log file"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+import pickle
+import pandas
+from scipy.signal import savgol_filter
+
+from analysis import utils
+
+def deroll(arr, limits, start=0):
+    """Derolls a log array. It returns a likely guess of what an array would
+    have been before applying a mod operator to bring it into the limits
+    region. Example, for limits = [0, 1] the array [0.5, 0.7, 0.9, 0.1] would
+    return [0.5, 0.7, 0.9, 1.1].
+
+    Parameters:
+        arr (array): array to deroll
+        limit (tuple): (lower limit, upper limit)
+        start (int): the first start values of the array will not be derolled
+
+    Returns:
+        The derolled array.
+    """
+    for i in range(start,len(arr)-1): # Do not deroll before start
+        if np.abs(arr[i+1]-arr[i]) > (limits[1]-limits[0])/2:
+            # Continue the array in the closest possible way
+            if arr[i+1]>arr[i]: arr[i+1:] -= (limits[1]-limits[0])
+            else: arr[i+1:] += (limits[1]-limits[0])
+    return arr
+
+def returnBars(arr, n):
+    """Calculates the 10th-90th percentile running confidence interval
+
+    Parameters:
+        arr (arr): The array to calculate error bars for
+        n (int): The smoothing of the confidence interval
+
+    Returns:
+        The smoothed running confidence interval
+    """
+    r = pandas.Series(arr).rolling(window = n, center = False)
+    s1, s2 =  r.quantile(.90), r.quantile(.1)
+    return savgol_filter(s1[n:], 101, 3), savgol_filter(s2[n:], 101, 3)
+
+# Load the data saved by the simulating annealing algorithm
+log = pickle.load(open('data/logs/log.pickle', "rb" ) )[:1400]
+
+# Define the parameters we wish to plot
+mask = [0,1,2,3,5,7,8] #Non-fixed parameters
+limits = np.array([[0, np.pi], [-np.pi, np.pi], 
+    [0, np.pi], [-np.pi, np.pi], 
+    [.5,1.0], [.55,.8], [.55,.8]])
+labels = [r'$i_1$ / rad', r'$\omega_1$ / rad', 
+    r'$i_2$ / rad', r'$\omega_2$ / rad', 
+    'e', r'$R_1$', r'$R_2$', r'$\mu$']
+ticks = [[0, np.pi], [-np.pi,0, np.pi],
+    [0, np.pi],[-np.pi, 0, np.pi],
+    [.5, 1.0], [.55, .8], [.55, .8]]
+ticklabels = [[0,r'$\pi$'],[r'$-\pi$',0,r'$\pi$'],
+    [0,r'$\pi$'],[r'$-\pi$',0,r'$\pi$'],
+    ['.5','1.0'], ['.55','.8'], ['.55','.8']]
+
+# Mask away the parameters we don't want to plot
+scores = np.array([l[0] for l in log])
+paramss = np.array([l[1] for l in log])[:,mask]
+# Fix conventions for inclination
+paramss[:,0] = paramss[:,0] - np.pi
+paramss[:,2] = np.pi - paramss[:,2]
+
+# Start plotting
+i = np.arange(len(log))
+f, axs = plt.subplots(1+len(paramss[0]), 1,  figsize=(10, 10), 
+    sharex=True, gridspec_kw = {'height_ratios':[2., 1., 1, 1, 1, 1, 1, 1]})
+plt.tight_layout()
+utils.stylizePlot(axs)
+
+# Plot metric
+axs[0].scatter(i, scores, marker='x', c='black', s=5, linewidth=.5)
+axs[0].fill_between(i[20:], *returnBars(scores, 20), color='r', alpha=.2)
+utils.setSize(axs[0], x=(0, None), y=(0.8, None))
+utils.setAxes(axs[0], y='Metric')
+# Get twin axis to mark temperature in it
+ax2 = axs[0].twiny()
+ax2.set_xscale('log')
+ax2.set_xlabel('Temperature', fontsize=14)
+ax2.invert_xaxis()
+ax2.set_xlim((.25, 0.015129))
+ax2.set_xticks([.2, .1, .09, .08, .07, .06, .05, .04, .03, .02, .01])
+ax2.set_xticklabels([str(i) 
+    for i in [.2, .1, .09, .08, .07, .06, .05, .04, .03, .02, .01]])
+
+# Plot parameters one by one
+for j in range(0, len(paramss[0])):
+    utils.setSize(axs[j+1], x=(0, len(log)), y=limits[j])
+    axs[j+1].set_ylabel(labels[j], fontsize=14)
+    axs[j+1].set_yticks(ticks[j])
+    axs[j+1].set_yticklabels(ticklabels[j])
+    # Be careful plotting cyclic parameters
+    derolled = deroll(paramss[:,j], limits[j], start=500)
+    bar1, bar2 = returnBars(paramss[:,j], 20)
+    for k in range(-3, 3): #
+        # Plot the confidence intervals an data multiple times
+        # to deal with cyclic parameters
+        axs[j+1].scatter(i, derolled + k*(limits[j][1]-limits[j][0]), 
+            marker='x', c='black', s=5, linewidth=.5)
+        axs[j+1].fill_between(i[20:], bar1 + k*(limits[j][1]-limits[j][0]),
+            bar2 + k*(limits[j][1]-limits[j][0]), color='r', alpha=.2)
+
+f.align_ylabels(axs[:])
+axs[-1].set_xlabel('Iteration', fontsize=14)
+plt.show()

+ 38 - 0
analysis/tailEvolution.py

@@ -0,0 +1,38 @@
+"""Plot the evolution of the length of the tail as a function
+of time for different companion masses
+"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+from scipy.signal import savgol_filter
+
+from analysis import utils
+from analysis.segmentation import segmentEncounter
+
+# Masses are given relative to 100, with respect to the main mass
+masses = [50, 100, 200, 400]
+labels = ['1:0.5', '1:1', '1:2', '1:4'] # main:companion
+
+f, ax = plt.subplots(1, 1, sharey=True)
+
+for j, style in zip(range(len(masses)), [':', '-.','-','--']):
+    ts = np.linspace(4000, 10000, 61) # time range
+    lengths = [] # length of the tail
+
+    for t in ts:
+        data = utils.loadData('{}mass'.format(masses[j]), int(t))
+        _, _, length = segmentEncounter(data)
+        lengths.append(length)
+
+    # For some masses we manually omit some misbehaved points
+    if masses[j]==50: ax.plot(ts[20:], savgol_filter(lengths[20:], 21, 3), 
+    	label=labels[j], c='black', linestyle=style)
+    else: ax.plot(ts, savgol_filter(lengths, 21, 3), 
+    	label=labels[j], c='black', linestyle=style)
+
+ax.legend(title='main:companion', fontsize=14)
+utils.setAxes(ax, x='Time', y='Tail length', 
+	xcoords=(.9,-0.08), ycoords=(-0.08,.7))
+utils.setSize(ax, x=(5000,10000))
+utils.stylizePlot([ax])
+plt.show()

+ 56 - 0
analysis/tailShapePlot.py

@@ -0,0 +1,56 @@
+"""Make a plot of the shape of the tails as a function of
+mass of the perturbing mass, time and ring size
+"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from analysis import utils
+from analysis.segmentation import segmentEncounter
+
+def plotShape(ax, distances, angle, style, label):
+    """Plot the shape curves, normalized to maximum radius = 1
+
+    Parameters:
+        ax: the matplotlib axis where the data should be plotted
+        distances (arr): the distances of each point in the shape of the tail
+        angle (arr): the angle of each point in the shape curve of the tail
+        style (string): style (dotted, dashed...) for the line, passed to
+            matplotlib.
+        label (string): label of the encounter. Used for the legend.
+    """
+    ax.plot(-angles+np.min(angles), distances/np.max(distances), 
+        label=label, linestyle=style, c='k', alpha=0.7)
+
+
+f, axs = plt.subplots(1, 3,  figsize=(12, 3), sharey=True)
+
+# Varying the mass of the encounter
+for m, st in zip([50, 100, 200], [':', '-','--']):
+    data = utils.loadData('{}mass70radius'.format(m), 14000)
+    _, (distances, angles), _ = segmentEncounter(data, plot=False)
+    plotShape(axs[0], distances, angles, st, m/100)
+    axs[0].legend(title=r'$m_{companion}$')
+utils.setAxes(axs[0], x=r'Angle / rad', y=r'Radius / $r_{max}$', 
+    ycoords=(-0.1,.7))
+
+# Varying the time since the start of the encounter
+for t, st in zip([4400, 7400, 10400], [':', '-','--']):
+    data = utils.loadData('200mass70radius'.format(200), t)
+    _, (distances, angles), _ = segmentEncounter(data, plot=False)
+    plotShape(axs[1], distances, angles, st, t)
+    axs[1].legend(title=r'$t$')
+utils.setAxes(axs[1], x=r'Angle / rad')
+
+# Varying the radius of the ring
+for r, st in zip([50, 60, 70, 80, 90], [':', '-.','-','--']):
+    data = utils.loadData('200mass{}radius'.format(r), 10000)
+    _, (distances, angles), _ = segmentEncounter(data, plot=False)
+    plotShape(axs[2], distances, angles, st, r)
+    axs[2].legend(title=r'Disk radius')
+utils.setAxes(axs[2], x=r'Angle / rad')
+
+# Some styling
+utils.stylizePlot(axs)
+f.subplots_adjust(hspace=0, wspace=0)
+plt.show()

+ 48 - 0
analysis/timestepAnalysis.py

@@ -0,0 +1,48 @@
+"""Basic testing to ensure the chosen timestep is sensible and to probe
+the symplectic character of the integrator."""
+
+import matplotlib.pyplot as plt
+import numpy as np
+from scipy.signal import savgol_filter
+
+from analysis import utils
+
+f, ax = plt.subplots(1, 1)
+for name, label, factor, color in zip(['dt1E-2', 'dt1E-3'],
+	[r'$dt=10^{-2}$', r'$dt=10^{-3}$'],
+	[1, 10], 
+	['cornflowerblue', 'darkorange']):
+	time = []
+	median = []
+	areas = []
+
+	for n in np.linspace(10, 1000, 100):
+		truth = utils.loadData('dt1E-4', int(n*100))
+		
+		# Load the data
+		data = utils.loadData(name, int(n*factor))
+		# Find the deviation
+		time.append(truth['t'])
+		deviations = np.linalg.norm(data['r_vec'] - truth['r_vec'], axis=-1)
+		median.append(np.median(deviations))
+		areas.append(np.percentile(deviations, [6, 16, 31, 100-31, 100-16, 100-6]))
+
+	# Plot
+	areas = np.array(areas)
+	areas = savgol_filter(areas, 11, 3, axis=0) #Smoothing
+	median = savgol_filter(median, 11, 3)
+	ax.fill_between(time, areas[:,0], areas[:,-1], alpha=.25, color=color)
+	ax.fill_between(time, areas[:,1], areas[:,-2], alpha=.25, color=color)
+	ax.fill_between(time, areas[:,2], areas[:,-3], alpha=.25, color=color)
+	ax.plot(time, median, color=color, label=label)
+	utils.stylizePlot([ax])
+	utils.setAxes(ax, x='Time', y='Displacement', 
+		xcoords=(.85,-0.08), ycoords=(-0.1,.75))
+	ax.set_yscale('log')
+	ax.set_xlim(0, time[-1])
+	ax.axvline(x=3.55, linestyle='--', linewidth=1, color='black')
+
+plt.legend(loc='upper left')
+plt.show()
+	
+

+ 100 - 0
analysis/utils.py

@@ -0,0 +1,100 @@
+import pickle
+import numpy as np
+
+def loadData(fileName, timestep, absolute=False):
+    """Load the data in data/fileName/ at the given timestep.
+
+    Parameters:
+        fileName (string): data is loaded from data/fileName/
+        timestep (integer): timestep at which to load the data
+        absolute (bool): true to load relative to parent folder.
+            Used for interactive jupyter notebook.
+
+    Returns:
+        data instance that was saved by Simulation.save
+    """
+    prepend = './..' if absolute else './'
+    return pickle.load(open(prepend+'/data/{}/data{}.pickle'.format(fileName, timestep), "rb" ))
+
+def calculateEccentricity(M, ref_r, ref_v, r, v):
+    """Calculates the orbital eccentricity of a list of particles.
+
+    Parameters:
+        M (string): mass of the central object
+        ref_r (array): reference position of the central object
+        ref_v (array): reference velocity of the central object
+        r (array): list of positions of all particles (n particles, 3)
+        v (array): list of velocities of all particles (n particles, 3)
+
+    Returns:
+        Array of shape (n particles, 3) with the eccentricities of the orbits
+    """
+    v1 = v - ref_v
+    r1 = r - ref_r
+    h_vec = np.cross(r1, v1)
+    h = np.linalg.norm(h_vec, axis=-1)
+    # Make use of $\vec{e} = (v \times (r \times v)) / M$, and e = |\vec{e}|
+    e_vec = np.cross(v1, h_vec)/M - r1/np.linalg.norm(r1, axis=-1, keepdims=True)
+    e = np.linalg.norm(e_vec, axis=-1)
+    return e
+
+#############################  PLOTTING UTILS ########################
+# Matplotlib can be tedious at times. These short functions make the
+# repetitive parts simpler and ensure consistency.
+
+def plotCOM(ax):
+    """Plots cross at (0,0)"""
+    ax.scatter([0],[0],marker='+', c='black', s=500, 
+               alpha=.5, linewidth=1, zorder=-1)
+
+def plotCenterMasses(ax, data):
+    """Plots stars at the position of the central masses"""
+    ax.scatter(data['r_vec'][data['type'][:,0]=='center'][:,0], 
+               data['r_vec'][data['type'][:,0]=='center'][:,1], 
+               s=100, marker="*", c='black', alpha=.7)
+
+def plotTracks(ax, tracks):
+    """Plots the tracks of the central masses"""
+    for track in tracks:
+        ax.plot(track[:,0], track[:,1], 
+                c='black', alpha=1.0, linewidth=1)
+
+def setSize(ax, x=None, y=None, mode=None):
+    """Sets the size of the plot. If mode='square', the x and y axis
+    will have the same scale."""
+    if mode=='square': ax.axis('square')
+    if x is not None: ax.set_xlim(x)
+    if y is not None: ax.set_ylim(y)
+
+def setAxes(ax, x=None, y=None, xcoords=None, ycoords=None, mode=None):
+    """"Sets the axis labels (x, y) and their position (None). The mode
+    keyword can be used to hide all the axes ('hide'), only plot the bottom
+    axis ('bottom') or only plot the bottom and left axes ('bottomleft')"""
+    if x is not None: ax.set_xlabel(x)
+    if y is not None: ax.set_ylabel(y)
+    if mode=='hide':
+        ax.spines['right'].set_visible(False)
+        ax.spines['top'].set_visible(False)
+        ax.spines['left'].set_visible(False)
+        ax.spines['bottom'].set_visible(False)
+        ax.set_xticklabels([])
+        ax.set_xticks([])
+        ax.set_yticklabels([])
+        ax.set_yticks([])
+    elif mode=='bottomleft':
+        ax.spines['right'].set_visible(False)
+        ax.spines['top'].set_visible(False)
+    elif mode=='bottom':
+        ax.get_yaxis().set_ticks([])
+        ax.set_ylabel('')   
+        ax.spines['left'].set_visible(False)
+        ax.spines['right'].set_visible(False)
+        ax.spines['top'].set_visible(False)
+    if xcoords is not None: ax.xaxis.set_label_coords(*xcoords)
+    if ycoords is not None: ax.yaxis.set_label_coords(*ycoords)
+
+def stylizePlot(axs):
+    """Adds ticks (a la root style) for prettier plots"""
+    for ax in axs:
+        ax.tick_params(axis='both', which='both', direction='in', top=True, right=True)
+        ax.minorticks_on()

+ 16 - 0
config/antennae.yml

@@ -0,0 +1,16 @@
+name: antennae
+simulation:
+  tmax: 30
+orbit:
+  e: 0.5
+  R0: 2.2
+galaxy1:
+  orientation: [206.5, -153.2]
+  disk:
+    particles: 10000
+    l: .659
+galaxy2:
+  orientation: [103.2, 145.0]
+  disk:
+    particles: 10000
+    l: .636

+ 48 - 0
config/companion_mass_analysis.yml

@@ -0,0 +1,48 @@
+---
+name: 20mass
+galaxy1:
+  orientation: [180, 0]
+  disk:
+    particles: 10000
+galaxy2:
+  centralMass: .2
+---
+name: 33mass
+galaxy2:
+  centralMass: .33
+---
+name: 50mass
+galaxy2:
+  centralMass: .50
+---
+name: 75mass
+galaxy2:
+  centralMass: .75
+---
+name: 100mass
+galaxy2:
+  centralMass: 1
+---
+name: 150mass
+galaxy2:
+  centralMass: 1.5
+---
+name: 200mass
+galaxy2:
+  centralMass: 2.0
+---
+name: 300mass
+galaxy2:
+  centralMass: 3.0
+---
+name: 400mass
+galaxy2:
+  centralMass: 4.0
+---
+name: 500mass
+galaxy2:
+  centralMass: 5.0
+---
+name: 600mass
+galaxy2:
+  centralMass: 6.0

+ 60 - 0
config/default.yml

@@ -0,0 +1,60 @@
+---
+# When calling > python simulation.py filename.yml --output_folder
+# the simulation will use the default parameters here unless specified
+name: default
+
+simulation:
+  dt: 0.001 #timestep of the simulation
+  tmax: 15 #total runtime of the simulation
+  soft: 0.1 #plummer softening characteristic length
+  saveEvery: 100 #the state of the simulation is saved every saveEvery steps
+  method: bruteForce #method for computing gravitational forces
+  # One of 'bruteForce', 'bruteForceNumba',
+  # bruteForceNumbaOptimized', 'bruteForceCPP', 'barnesHutCPP'.
+
+orbit:
+  e: 1 #eccentricity
+  rmin: 1 #separation at pericenter
+  R0: 4 #separation at t=0
+
+galaxy1:
+  orientation: [0, 0] #[theta, phi] in degrees
+  # These are related to i, $\omega$ through theta = i + 180 and $\omega$ = phi
+  centralMass: 1 #mass of the central point object
+  bulge:
+    model: plummer #alternatively: hernquist
+    totalMass: 0
+    particles: 0 #number of particles
+    l: .04 #characteristic length scale both for plummer and Hernquist models
+  disk:
+    model: uniform #alternatively: rings, exp
+    totalMass: 0
+    particles: 2000 #number of particles
+    l: 0.8 #for uniform: single number for maximum radius
+    #l: [0., .7, 100] #for rings: [closest ring, furthest ring, number of rings]
+    #l: .2 #for exp: characteristic decay length
+  halo:
+    model: NFW #Navarro-Frenk-White profile only
+    totalMass: 0
+    particles: 0 #number of particles
+    rs: 1 #characteristic length scale of NFW. Cutoff is 5*rs
+
+# The same options are available for the second galaxy
+galaxy2:
+  orientation: [0, 0]
+  centralMass: 1
+  bulge:
+    model: plummer
+    totalMass: 0
+    particles: 0
+    l: .04
+  disk: 
+    model: uniform
+    totalMass: 0
+    particles: 0 #the second galaxy does not possess a ring by default
+    l: 0.7
+  halo:
+    model: NFW
+    totalMass: 0
+    particles: 0
+    rs: 1

+ 70 - 0
config/halo_analysis.yml

@@ -0,0 +1,70 @@
+---
+name: elliptical_nohalo
+orbit:
+  e: 0.5
+  R0: 3
+galaxy1:
+  orientation: [240, 0]
+  centralMass: 0
+  bulge:
+    totalMass: 0.5
+    particles: 500
+  disk:
+    model: 'exp'
+    totalMass: 0.5
+galaxy2:
+  orientation: [180, 0]
+---
+name: hyperbolic_nohalo
+orbit:
+  e: 1.0
+  R0: 10
+galaxy1:
+  orientation: [240, 0]
+  centralMass: 0
+  bulge:
+    totalMass: 0.5
+    particles: 500
+  disk:
+    model: 'exp'
+    totalMass: 0.5
+galaxy2:
+  orientation: [180, 0]
+---
+name: elliptical_halo
+orbit:
+  e: 0.5
+  R0: 3
+galaxy1:
+  orientation: [240, 0]
+  centralMass: 0
+  bulge:
+    totalMass: 0.05
+    particles: 500
+  disk:
+    totalMass: 0.15
+    particles: 2000
+  halo:
+    totalMass: 0.8
+    particles: 2000
+galaxy2:
+  orientation: [180, 0]
+---
+name: hyperbolic_halo
+orbit:
+  e: 1.0
+  R0: 10
+galaxy1:
+  orientation: [240, 0]
+  centralMass: 0
+  bulge:
+    totalMass: 0.05
+    particles: 500
+  disk:
+    totalMass: 0.15
+    particles: 2000
+  halo:
+    totalMass: 0.8
+    particles: 2000
+galaxy2:
+  orientation: [180, 0]

+ 30 - 0
config/inclination_analysis.yml

@@ -0,0 +1,30 @@
+---
+name: i0
+galaxy1:
+ orientation: [180, 0]
+ disk:
+    particles: 10000
+---
+name: i15
+galaxy1:
+ orientation: [195, 0]
+---
+name: i30
+galaxy1:
+ orientation: [210, 0]
+---
+name: i45
+galaxy1:
+ orientation: [225, 0]
+---
+name: i60
+galaxy1:
+ orientation: [240, 0]
+---
+name: i90
+galaxy1:
+ orientation: [270, 0]
+---
+name: i135
+galaxy1:
+ orientation: [315, 0]

+ 6 - 0
config/isolated.yml

@@ -0,0 +1,6 @@
+---
+name: isolated
+simulation:
+  tmax: 30
+galaxy2:
+  centralMass: .0

+ 7 - 0
config/orbit.yml

@@ -0,0 +1,7 @@
+---
+name: orbit
+simulation:
+  soft: 0
+galaxy1:
+  disk:
+    particles: 0

+ 7 - 0
config/prograde_equalmass.yml

@@ -0,0 +1,7 @@
+---
+name: prograde_equalmass
+galaxy1:
+  orientation: [180, 0] #prograde
+  disk:
+    model: rings
+    l: [.2, .8, 7]

+ 7 - 0
config/retrograde_equalmass.yml

@@ -0,0 +1,7 @@
+---
+name: retrograde_equalmass
+galaxy1:
+  orientation: [0, 0] #retrograde
+  disk:
+    model: rings
+    l: [.2, .8, 7]

+ 51 - 0
config/tail_shape_analysis.yml

@@ -0,0 +1,51 @@
+---
+name: 200mass50radius
+galaxy1:
+  orientation: [180, 0] #prograde
+  disk:
+    particles: 10000
+    l: 0.5
+galaxy2:
+  centralMass: 2.0
+---
+name: 200mass60radius
+galaxy1:
+  disk:
+    l: 0.6
+galaxy2:
+  centralMass: 2.0
+---
+name: 200mass70radius
+galaxy1:
+  disk:
+    l: 0.7
+galaxy2:
+  centralMass: 2.0
+---
+name: 200mass80radius
+galaxy1:
+  disk:
+    l: 0.8
+galaxy2:
+  centralMass: 2.0
+---
+name: 200mass90radius
+galaxy1:
+  disk:
+    l: 0.9
+galaxy2:
+  centralMass: 2.0
+---
+name: 100mass70radius
+galaxy1:
+  disk:
+    l: 0.7
+galaxy2:
+  centralMass: 1.0
+---
+name: 50mass70radius
+galaxy1:
+  disk:
+    l: 0.7
+galaxy2:
+  centralMass: 0.5

+ 26 - 0
config/timestep_analysis.yaml

@@ -0,0 +1,26 @@
+---
+name: dt1E-2
+simulation:
+  dt: 0.01
+  tmax: 10
+  saveEvery: 10
+galaxy1:
+  orientation: [180, 0]
+  disk:
+    model: rings
+    particles: 1000
+    l: [.2, .7, 50]
+---
+name: dt1E-1
+simulation:
+  dt: 0.1
+---
+name: dt1E-3
+simulation:
+  dt: 0.001
+  saveEvery: 100
+---
+name: dt1E-4
+simulation:
+  dt: 0.0001
+  saveEvery: 1000

+ 13 - 0
config/toomre1972.yml

@@ -0,0 +1,13 @@
+name: toomre1972
+orbit:
+  e: 0.5
+  R0: 2.2
+galaxy1:
+  orientation: [240, -30]
+  disk:
+    l: .7
+galaxy2:
+  orientation: [120, -30]
+  disk:
+    particles: 2000
+    l: .7

+ 2 - 0
cpp/Makefile

@@ -0,0 +1,2 @@
+acclib.so: acclib.cpp Node.cpp Node.h
+	g++ -shared -o acclib.so -fPIC acclib.cpp Node.cpp -O3

+ 103 - 0
cpp/Node.cpp

@@ -0,0 +1,103 @@
+#include "Node.h"
+#include <vector>
+#include <iostream>
+#include <math.h>
+using namespace std;
+
+/*
+Node constructor. Used recursively to build the Barnes-Hut tree. px, py, pz 
+denote the corner (lowest value for each dimension) of the box of size pSize.
+pBodies is a vector containing all the nodes that must be placed in this box.
+*/
+Node::Node(const vector<Node*> &pBodies, const double pSize, 
+    const double px, const double py, const double pz){
+    size = pSize; // Required later for treeWalk
+
+    // Divide into subnodes (octants)
+    vector<Node*> subBodies[2][2][2];
+
+    for (int i = 0; i < pBodies.size(); i++){
+        int xIndex, yIndex, zIndex;
+
+        if (pBodies[i]->COM[0] < (px + (size / 2))) xIndex = 0;
+        else xIndex = 1;
+
+        if (pBodies[i]->COM[1] < (py + (size / 2))) yIndex = 0;
+        else yIndex = 1;
+
+        if (pBodies[i]->COM[2] < (pz + (size / 2))) zIndex = 0;
+        else zIndex = 1;
+
+        subBodies[xIndex][yIndex][zIndex].push_back(pBodies[i]);
+    }
+
+
+    // Recursively place the nodes
+    // g++ will unroll these loops
+    for (int i = 0; i < 2; i++){
+        for (int j = 0; j < 2; j++){
+            for (int k = 0; k < 2; k++){
+                switch(subBodies[i][j][k].size()){
+                    case 0: continue;
+                    case 1:
+                        subBodies[i][j][k][0]->size = size/2;
+                        children.push_back(subBodies[i][j][k][0]);
+                        break;
+                    default:
+                        children.push_back(new Node(subBodies[i][j][k], size/2,
+                            px + size/2*i, py + size/2*j, pz + size/2*k));
+                }
+            }
+        }
+    }
+
+    // Recursively calculate the COM
+    memset(COM, 0, sizeof(COM)); // Set COM to 0s
+    m = 0.; // mass
+    for (int i = 0; i < children.size(); i++){
+        m += children[i]->m;
+        for (int j = 0; j < 3; j++) 
+            COM[j] += children[i]->m * children[i]->COM[j];
+    }
+    // COM only relevant if there is mass in the octant
+    if (m > 0) for (int i = 0; i < 3; i++) COM[i] /= m;
+
+}
+
+/*
+Node constructor. Used to build the leaf nodes directly from the data passed
+from Python using ctypes.
+*/
+Node::Node(const double* pr_vec, const double pm){
+    // Initialize a node (leaf)
+    for (int i = 0; i < 3; i++) COM[i] = pr_vec[i];
+    memset(g, 0, sizeof(g)); // Set g to 0s
+    m = pm; // mass
+}
+
+/*
+Calculate the acceleration at the this node. Used recursively calling
+treeWalk(topNode, thetamax). This is O(log n) and will be called for
+each node in the tree: O(n log n). Soft defines the characteristic
+plummer softening scale.
+*/
+void Node::treeWalk(const Node &node, 
+    const double thetamax, const double soft){
+    // Calculate distance to node
+    double delta[3];
+    for (int i = 0; i < 3; i++) delta[i] = node.COM[i] - COM[i];
+    double r = sqrt(delta[0]*delta[0] + delta[1]*delta[1] + delta[2]*delta[2]);
+
+    if (r==0) return; // Do not interact with self
+
+    // If it satisfies the size/r < thetamax criterion, add the g contribution
+    if (node.children.size() == 0 || node.size/r < thetamax){
+        double tripler = (r+soft) * (r+soft) * r;
+        for(int i = 0; i < 3; i++) g[i] += node.m * delta[i] / tripler;
+    }
+    else{ // Otherwise recurse into its children
+        for (int i = 0; i < node.children.size(); i++){
+            treeWalk((*node.children[i]), thetamax, soft);
+        }
+    }
+}

+ 25 - 0
cpp/Node.h

@@ -0,0 +1,25 @@
+#include <vector>
+
+/*
+Node class for the Barnes-Hut tree. The choice of pointers as opposed
+to references is driven by the necessity to interact with Numpy arrays
+using ctypes.
+*/
+class Node{
+private:
+    double COM[3]; // Center of mass
+    double m; // Mass of the node
+    double size; // Size of box, equal for all dimensions
+    std::vector<Node*> children;
+
+public:
+    double g[3]; // Gravitational acceleration on the node
+
+    // Constructors
+    Node(const std::vector<Node*> &pBodies, const double pSize, 
+        const double px, const double py, const double pz);
+    Node(const double* pr_vec, const double pm);
+
+    // Methods
+    void treeWalk(const Node &node, const double thetamax, const double soft);
+};

+ 106 - 0
cpp/acclib.cpp

@@ -0,0 +1,106 @@
+#include <vector>
+#include <iostream>
+#include <math.h> 
+#include <chrono>
+
+using namespace std;
+using namespace std::chrono;
+#include "Node.h"
+
+
+/*
+Calculates the self gravitational acceleration for a set of particles located
+at r_vec (n, 3) with masses m_vec (n,) using a Barnes-Hut tree with theta = 0.7
+
+Parameters:
+    r_vec: the position of the particles.
+    m_vec: the masses of the particles.
+    n: the number of particles. This cannot be infered by C++ and must be
+        passed directly.
+    size: size of the top node in the octtree.
+    px, py, pz: coordinates of the lowest corner of the top node.
+    size: characteristic softening scale.
+
+Returns:
+    The accelerations computed for each mass (n, 3).
+*/
+extern "C" double* barnesHutCPP(double** r_vec, double* m_vec, int n, 
+    double size, double px, double py, double pz, double soft){
+    // Create nodes
+    std::vector<Node*> nodes;
+    for (int i = 0; i < n; i++){
+        nodes.push_back(new Node(r_vec[i], m_vec[i]));
+    }
+
+    // Create the tree
+    Node* tree = new Node(nodes, size, px, py, pz);
+
+    // Calculate the accelerations for each node. We want to return the
+    // result as an array and use a 1D array for simplicity since this will
+    // be allocated continously in the heap and can be reshaped in Numpy.
+    double* accel = new double[3*n];
+    for (int i = 0; i < nodes.size(); i++){
+        nodes[i]->treeWalk(*tree, 0.7, soft); // thetamax = 0.7
+        accel[3*i+0] = nodes[i]->g[0];
+        accel[3*i+1] = nodes[i]->g[1];
+        accel[3*i+2] = nodes[i]->g[2];
+    }
+    
+    // Return as an (n,) array
+    return accel;
+}
+
+/*
+Calculates the self gravitational acceleration for a set of particles located
+at r_vec (n, 3) with masses m_vec (n,) using Brute Force pairwise summation.
+Massive particles must appear first.
+
+Parameters:
+    r_vec: the position of the particles.
+    m_vec: the masses of the particles.
+    n: the number of particles. This cannot be infered by C++ and must be
+        passed directly.
+    size: characteristic softening scale.
+
+Returns:
+    The accelerations computed for each mass (n, 3).
+*/
+extern "C" double* bruteForceCPP(double** r_vec, double* m_vec, 
+    int n, double soft){
+
+    // Initialize result and fill with 0s
+    // Use a 1D array so as not to have to convert back in Numpy
+    double* accel = new double[3*n];
+    for (int i=0; i<3*n; i++){
+        accel[i] = 0;
+    }
+
+    // Compute the acceleration
+    for (int i=0; i<n; i++){
+        // Only consider pairs with at least one massive particle i
+        if (m_vec[i] == 0.) break;
+        for (int j=i+1; j<n; j++){
+            // Distance between particles
+            double delta[3];
+            for (int k = 0; k < 3; k++) delta[k] = r_vec[j][k] - r_vec[i][k];
+            double r = sqrt(delta[0]*delta[0] 
+                + delta[1]*delta[1] 
+                + delta[2]*delta[2]);
+            double tripler = (r+soft) * (r+soft) * r;
+
+            // Compute acceleration on first particle
+            double mr3inv = m_vec[i]/tripler;
+            for (int k = 0; k < 3; k++) accel[3*j+k] -= mr3inv * delta[k];
+
+            // Compute acceleration on second particle
+            // For pairs with one massless particle, no reaction force
+            if (m_vec[j] == 0.) break;
+            // Otherwise, opposite direction (+)
+            mr3inv = m_vec[j]/tripler;
+            for (int k = 0; k < 3; k++) accel[3*i+k] += mr3inv * delta[k];
+        }
+    }
+
+    // Return as an (n,) array
+    return accel;
+}

BIN
cpp/acclib.so


+ 36 - 0
distributions.py

@@ -0,0 +1,36 @@
+import numpy as np
+import scipy.stats as st
+
+
+# Bulge distributions
+class radialPlummer(st.rv_continuous):
+    def _pdf(self, x):#(3M/4πa3)(1+(r/a)2)−5/2
+        return 3/(4*np.pi**3) * (1 + x**2)**(-5/2) * 4*np.pi*x**2
+PLUMMER = radialPlummer(a=0, b=5, name='rPlummer')
+
+# [TODO: Check Hernquist. Not currently in use]
+class radialHernquist(st.rv_continuous):
+    def _pdf(self, x):
+        return 1/(2*np.pi) * 1/(x**4) * 4*np.pi*x**2
+HERNQUIST = radialHernquist(a=0, b=5, name='rHernquist')
+
+
+# Disk distributions
+class radialUniform(st.rv_continuous):
+    def _pdf(self, x):
+        return 2*x if x<1 else 0
+UNIFORM = radialUniform(a=0, b=10, name='rUniform')
+
+class radialExp(st.rv_continuous):
+    def _pdf(self, x):
+        return x*np.exp(-x)
+EXP = radialExp(a=0, b=10, name='rExp')
+
+
+# Halo distributions
+class radialNFW(st.rv_continuous):
+    def _pdf(self, x):
+        y = 1 / (x * (1 + x)**2) * x**2
+        # Normalize pdf in (0, 5) range
+        y /= (-5/6 + np.log(6))
+NFW = radialNFW(a=0, b=5, name='rNFW')

File diff suppressed because it is too large
+ 432 - 0
docs/acceleration.html


File diff suppressed because it is too large
+ 166 - 0
docs/analysis/antennaeKDE.html


File diff suppressed because it is too large
+ 88 - 0
docs/analysis/bridgeEvolution.html


File diff suppressed because it is too large
+ 97 - 0
docs/analysis/eccentricity.html


File diff suppressed because it is too large
+ 272 - 0
docs/analysis/inclination.html


File diff suppressed because it is too large
+ 90 - 0
docs/analysis/massFractions.html


File diff suppressed because it is too large
+ 167 - 0
docs/analysis/ringPlot.html


File diff suppressed because it is too large
+ 394 - 0
docs/analysis/segmentation.html


File diff suppressed because it is too large
+ 247 - 0
docs/analysis/simulatedAnnealing.html


File diff suppressed because it is too large
+ 92 - 0
docs/analysis/tailEvolution.html


File diff suppressed because it is too large
+ 154 - 0
docs/analysis/tailShapePlot.html


File diff suppressed because it is too large
+ 354 - 0
docs/analysis/utils.html


File diff suppressed because it is too large
+ 896 - 0
docs/distributions.html


File diff suppressed because it is too large
+ 739 - 0
docs/run_simulated_annealing.html


File diff suppressed because it is too large
+ 1562 - 0
docs/simulation.html


File diff suppressed because it is too large
+ 142 - 0
docs/utils.html


BIN
literature/figs/barnes2001.png


BIN
literature/figs/barnes2001.psd


BIN
literature/figs/barnes2001.tif


BIN
literature/figs/g1.tif


BIN
literature/figs/g1b.tif


BIN
literature/figs/g1c.tif


BIN
literature/figs/g2.tif


BIN
literature/figs/g2b.tif


BIN
literature/figs/g2c.tif


BIN
literature/figs/gt.bmp


BIN
literature/figs/gt.png


BIN
literature/figs/gt.tif


BIN
literature/figs/hibbard1.png


BIN
literature/figs/hibbard2.png


BIN
literature/figs/hibbard3.png


+ 306 - 0
run_simulated_annealing.py

@@ -0,0 +1,306 @@
+"""Simulated annealing algorithm used to match the simulation of the
+Antennae to the observations by comparing binarized images."""
+
+import numpy as np
+import scipy
+import pickle
+from scipy import ndimage
+from fast_histogram import histogram2d
+from scipy.signal import convolve2d
+from numba import jit
+from matplotlib.image import imread
+import datetime
+
+from simulation import Simulation, Galaxy
+
+T = .25 # Initial tempreature
+STEPS = 0
+DECAY = .998 # Exponential decay factor
+
+def simToBitmap(sim, theta, phi, view, scale, x, y, galaxy):
+    """Obtain a bitmap of one galaxy as viewed from a given direction.
+       The binning has been chosen so that the scale and the offset (x,y)
+       are expected to be approximately 1 and (0, 0) respectively.
+
+        Parameters:
+            sim (Simulation): Simulation to project.
+            theta (float): polar angle of viewing direction.
+            phi (float): azimuthal angle of viewing direction.
+            view (float): rotation angle along viewing direction.
+            scale (float): scaling factor.
+            x (float): x offset
+            y (float): y offset
+            galaxy (int): galaxy to plot. Either 0, or 1. They are assumed    
+                to have the same number of particles.
+    """
+    # Obtain components in new x',y' plane
+    r_vec = (sim.project(theta, phi, view) - [[x+.12,y+1.3]]) * scale
+    # Select a single galaxy. We match them separately in the algorithm.
+    if galaxy==0: r_vec = r_vec[2:len(r_vec)//2-1] #omit central masses
+    if galaxy==1: r_vec = r_vec[len(r_vec)//2-1:]
+    # Use a fast histogram, much faster than numpy ()
+    H = histogram2d(r_vec[:,0], r_vec[:,1], 
+        range=[[-5,5], [-5,5]], bins=(50, 50))
+    im = np.zeros((50, 50))
+    H = convolve2d(H, np.ones((2,2)), mode='same') # Smooth the image
+    im[H>=1] = 1 # Binary the image
+    
+    return im
+
+@jit(nopython=True) # Numba annotation
+def bitmapScoreAlgo(im1, im2):
+    """Computes the f1 score betwen two binarized images
+
+    Parameters
+        im1 (arr): nxm ground truth image
+        im2 (arr): nxm candidate image
+    
+    Returns
+        f1 score
+    """
+    TP = np.sum((im1==1.0) & (im2==1.0))
+    TN = np.sum((im1==0.0) & (im2==0.0))
+    FP = np.sum((im1==0.0) & (im2==1.0))
+    FN = np.sum((im1==1.0) & (im2==0.0))
+    if TP==0: return 0
+    precision = TP/(TP+FP) 
+    recall = TP/(TP+FN)
+    return 2*precision*recall / (precision + recall)
+
+# The matching algorithm attempts to improve the match by shifting the
+# image by one pixel in each direction. If none improve the score the
+# f1-score of said local maximum is retuned. To make this highly efficient
+# (as this is run millions of time) we explicitely write functions to 
+# shift an image by 1 pixel in each direction, as these can then be compiled
+# using Numba (jit annotation) to low-level code.
+# Performance is crucial here and must sadly be prioritized over conciseness
+@jit(nopython=True)
+def shiftBottom(im, im2):
+    """Shifts an image by one pixel downwards.
+    
+    Parameters:
+        im (arr): the nxm image to shift by one pixel.
+        im2 (arr): an nxm array where the new image will be placed.
+
+    Returns:
+        A reference to im2
+    """
+    im2[1:] = im[:-1]
+    im2[0] = 0
+    return im2
+
+@jit(nopython=True)
+def shiftTop(im, im2):
+    """Shifts an image by one pixel upwards."""
+    im2[:-1] = im[1:]
+    im2[-1] = 0
+    return im2
+
+@jit(nopython=True)
+def shiftLeft(im, im2):
+    """Shifts an image by one pixel to the left."""
+    im2[:,1:] = im[:,:-1]
+    im2[:,0] = 0
+    return im2
+
+@jit(nopython=True)
+def shiftRight(im, im2):
+    """Shifts an image by one pixel to the right."""
+    im2[:,:-1] = im[:,1:]
+    im2[:,-1] = 0
+    return im2
+
+@jit
+def bitmapScore(im1, im2, _prev=None, _bestscore=None, _zeros=None):
+    """Computes the bitmap score between two images. This is the f1-score
+       but we allow the algorithm to attempt to improve the score by
+       shifting the images. The algorithm is implemented recursively.
+
+    Parameters:
+        im1 (array): The ground truth nxm image.
+        im2 (array): The candidate nxm imgae.
+        _prev: Used internally for recursion.
+        _bestscore: Used internally for recursion.
+        _zeros: Used internally for recursion.
+    
+    Returns:
+        f1-score for the two images.
+    """
+    # When the function is called externally, initialize an array of zeros
+    # and compute the score for no shifting. The zeros array is used for
+    # performance to only create a new array once.
+    if _bestscore is None:
+        _bestscore = bitmapScoreAlgo(im1, im2)
+        _zeros = np.zeros_like(im2)
+    # Attempt to improve the score by shifting the image in a direction
+    # Keeping track of _prev allows to not 'go back' and undo and attempt
+    # to undo a shift needlessly. 
+    if _prev is not 0: # try left
+        shifted = shiftLeft(im2, _zeros)
+        score = bitmapScoreAlgo(im1, shifted)
+        if score > _bestscore: return bitmapScore(im1, shifted, 
+            _prev=1, _bestscore=score, _zeros=_zeros)
+    if _prev is not 1: # try right
+        shifted = shiftRight(im2, _zeros)
+        score = bitmapScoreAlgo(im1, shifted)
+        if score > _bestscore: return bitmapScore(im1, shifted, 
+            _prev=0, _bestscore=score, _zeros=_zeros)
+    if _prev is not 2: # try top
+        shifted = shiftTop(im2, _zeros) 
+        score = bitmapScoreAlgo(im1, shifted)
+        if score > _bestscore: return bitmapScore(im1, shifted, 
+            _prev=3, _bestscore=score, _zeros=_zeros)
+    if _prev is not 3: # try bottom
+        shifted = shiftBottom(im2, _zeros)
+        score = bitmapScoreAlgo(im1, shifted)
+        if score > _bestscore: return bitmapScore(im1, shifted, 
+            _prev=2, _bestscore=score, _zeros=_zeros)
+    # Return _bestscore if shifting did not improve (local maximum).
+    return _bestscore
+
+
+def attemptSimulation(theta1, phi1, theta2, phi2, rmin=1, e=.5, R=2, 
+        disk1=.75, disk2=.65, mu=1, plot=False, steps=2000):
+    """Runs a simulation with the given parameters and compares it 
+    to observations of the antennae to return a score out of 2.
+
+    Parameters:
+        theta1 (float): polar angle for the spin of the first galaxy.
+        phi1 (float): azimuthal angle for the spin of the first galaxy.
+        theta2 (float): polar angle for the spin of the second galaxy.
+        phi2 (float): azimuthal angle for the spin of the second galaxy.
+        rmin (float): closest distance of approach of orbit.
+        e (float): eccentricity of the orbit.
+        R (float): initial separation
+        disk1 (float): radius of the uniform disk of the first galaxy
+        disk2 (float): radius of the uniform disk of the first galaxy
+        mu (float): ratio of masses of the two galaxies
+        plot (float): if true the simulation will be saved to data/annealing/
+        steps (float): number of times the f1 score will be computed, along
+            random viewing directions per 100 timesteps.
+
+    Returns:
+        f1-score: score obtained by the simulation. 
+    """
+
+    # Initialize the simulation
+    sim = Simulation(dt=1E-3, soft=0.1, verbose=False, CONFIG=None, method='bruteForceNumbaOptimized')
+    galaxy1 = Galaxy(orientation = (theta1, phi1), centralMass=2/(1+mu),
+        sim=sim, bulge={'model':'plummer', 'particles':0, 'totalMass':0, 'l':0},
+        disk={'model':'uniform', 'particles':2000, 'l':disk1, 'totalMass':0},
+        halo={'model':'NFW', 'particles':0, 'rs':1, 'totalMass':0})
+    galaxy2 = Galaxy(orientation = (theta2, phi2), centralMass=2*mu/(1+mu),
+        sim=sim, bulge={'model':'plummer', 'particles':0, 'totalMass':0, 'l':0},
+        disk={'model':'uniform', 'particles':2000, 'l':disk2, 'totalMass':0},
+        halo={'model':'NFW', 'particles':0, 'rs':1, 'totalMass':0})
+    sim.setOrbit(galaxy1, galaxy2, e=e, R0=R, rmin=rmin) # define the orbit
+
+    # Run the simulation manually
+    # Initialize the parameters consistently with the velocities
+    sim.rprev_vec = sim.r_vec - sim.v_vec * sim.dt
+    # Keep track of the best score
+    bestScore = 0
+    # and its corresponding binarized image and parameters
+    bestImage, bestParams = 0, 0
+    hasReachedPericenter = False
+
+    # Run until tmax = 25
+    for i in range(25001):
+        sim.step()
+        if i%100==0: # Every $\Delta t = 0.1$
+            # Check if we are close to pericenter
+            centers = sim.r_vec[sim.type[:,0] == 'center']
+            if np.linalg.norm(centers[0] - centers[1]) < 1.3*rmin: 
+                hasReachedPericenter = True
+            # Do not evaluate the f1-score until pericenter.
+            if not hasReachedPericenter: continue
+
+            # Check multiple (steps) viewing directions at random
+            localBestScore = 0
+            localBestImage, localBestParams = 0, 0
+            for j in range(steps):
+                # The viewing directions are isotropically distributed
+                theta = np.arccos(np.random.uniform(-1, 1))
+                phi = np.random.uniform(0, 2*np.pi)
+                # Rotation along line of sight
+                view = np.random.uniform(0, 2*np.pi)
+                scale = np.random.uniform(0.6, 1.0)
+                x = np.random.uniform(-1.0, 1.0) # Offsets
+                y = np.random.uniform(-1.0, 1.0)
+                # Get images for each galaxy and compute their score separately
+                im1 = simToBitmap(sim, theta, phi, view, scale, x, y, galaxy=0)
+                im2 = simToBitmap(sim, theta, phi, view, scale, x, y, galaxy=1)
+                score = bitmapScore(GT1, im1) + bitmapScore(GT2, im2)
+                if score > localBestScore: 
+                    localBestScore = score
+                    localBestImage = [im1,im2]
+                    localBestParams = [i, theta, phi, view, scale, x, y]
+
+            if bestScore < localBestScore: 
+                bestScore = localBestScore
+                bestImage = localBestImage
+                bestParams = localBestParams
+            if plot:
+                sim.save('annealing', type='2D')
+    
+    print('Best score for this attempt', bestScore)
+    print('using viewing parameters', bestParams)
+    
+    return bestScore
+
+
+################################################################################
+################################################################################
+
+# Generate a (50, 50) bitmap for each galaxy
+# They are stored globally in GT1 and GT2 (Ground Truth)
+im = imread('literature/figs/g1c.tif')
+im = np.mean(im, axis=-1)
+im = scipy.misc.imresize(im, (50,50))
+GT1 = np.zeros((50, 50))
+GT1[im > 50] = 1
+
+im = imread('literature/figs/g2c.tif')
+im = np.mean(im, axis=-1)
+im = scipy.misc.imresize(im, (50,50))
+GT2 = np.zeros((50, 50))
+
+# Define the limits and relate scale of the variations in each parameter
+# In the same order as attemptSimulation
+# phi1, theta1, phi2, theta2, rmin (fixed), e, R (fixed), disk1, disk2
+LIMITS = np.array([[np.pi, 2 * np.pi], [-np.pi, np.pi],
+                   [0, np.pi], [-np.pi, np.pi],
+                   [1,1], [.5,1.0], [2.2,2.2], [.5,.8], [.5,.8]])
+VARIATIONS = np.array([.08, .15, .08, .15, 0, .01, 0, .01, .01])
+
+# Choose a random starting point and evaluate it
+bestparams = [np.random.uniform(*l) for l in LIMITS]
+log = []
+bestscore = attemptSimulation(*bestparams, steps=500)
+print('Starting with score', bestscore, 'with parameters', bestparams)
+log.append([bestscore, bestparams, True])
+
+for i in range(STEPS):
+    T = T * DECAY #exponential decay
+    # Perturb the parameters
+    params = bestparams + np.random.normal(scale=VARIATIONS) * 2 * T / 0.04
+    # Allow the angles from $-\pi$ to $\pi$ to wrap around
+    for j in [1,3]:
+        params[j] = np.mod(params[j] - LIMITS[j][0], LIMITS[j][1] - LIMITS[j][0])
+        params[j] += LIMITS[j][0]
+    # Clip parameters outside their allowed range
+    params = np.clip(params, LIMITS[:, 0], LIMITS[:, 1])
+    # Evaluate the score for this attempt, use more steps as i progresses
+    # so as to reduce the noise in the evaluation of the score
+    score = attemptSimulation(*params, steps=500 + i)
+    # Perform simulated annealing with a typical exponential rule
+    if score > bestscore or np.exp(-(bestscore-score)/T) > np.random.rand():
+        # Flip to this new point
+        print('NEW BEST ____', i, T, score, params)
+        bestscore = score
+        bestparams = params
+        log.append([score, params, True])
+    else: # Remain in the old point
+        log.append([score, params, False])
+    # Save the progress for future plotting
+    pickle.dump(log, open('data/logs/logb.pickle', "wb" ) )   

+ 45 - 0
run_simulation.py

@@ -0,0 +1,45 @@
+"""Command line tool to run a YAML simulation configuration file."""
+
+import yaml
+import argparse
+
+from utils import update_config
+from simulation import Simulation, Galaxy
+
+
+# Parse command line arguments: 
+# > python simulation.py config_file.yml output_folder
+parser = argparse.ArgumentParser(description='''Run a galactic 
+	collision simulation.''')
+parser.add_argument('config_file', type=argparse.FileType('r'),
+	help='''Path to configuration file for the simulation, in YAML format. 
+	See the config folder for examples.''')
+parser.add_argument('--output_folder', default=None,
+	help='''Name of the output folder in data/ where the results will be 
+	saved. The directory will be created if necessary. If none is provided, 
+	the name attribute in the configuration file will be used instead.''')
+parser.add_argument('--verbose', action='store_true', default=False,
+	help='''In verbose mode the simulation will print its progress.''')
+
+args = parser.parse_args()
+
+# Load the configuration for this simulation
+CONFIG = yaml.load(open("config/default.yml", "r")) # default configuration
+updates = list(yaml.load_all(args.config_file))
+
+for update in updates:
+	# For multiple configurations in one file, 
+	# the updates are with respect to the first one.
+	update_config(CONFIG, updates[0])
+	update_config(CONFIG, update)
+	# If no output folder is provided, the name in CONFIG is used instead
+	outputFolder = (CONFIG['name'] if args.output_folder is None 
+		else args.output_folder)
+
+	# Run the simulation
+	sim = Simulation(**CONFIG['simulation'], verbose=args.verbose, 
+		CONFIG=CONFIG)
+	galaxy1 = Galaxy(**CONFIG['galaxy1'], sim=sim) # create the galaxies
+	galaxy2 = Galaxy(**CONFIG['galaxy2'], sim=sim)
+	sim.setOrbit(galaxy1, galaxy2, **CONFIG['orbit']) # define the orbit
+	sim.run(**CONFIG['simulation'], outputFolder=outputFolder)

+ 420 - 0
simulation.py

@@ -0,0 +1,420 @@
+"""Definition of the Simulation class and the Galaxy constructor."""
+
+import os
+import pickle
+import numpy as np
+import matplotlib.pyplot as plt
+
+from utils import random_unit_vectors, cascade_round
+from distributions import PLUMMER, HERNQUIST, UNIFORM, EXP, NFW
+import acceleration
+
+
+##############################################################################
+##############################################################################
+class Simulation:
+    """"Main class for the gravitational simulation.
+
+    Attributes:
+        r_vec (array): position of the particles in the current timestep.
+            Shape: (number of particles, 3)
+        rprev_vec (array): position of the particles in the previous timestep.
+            Shape: (number of particles, 3)
+        v_vec (array): velocity in the current timestep.
+            Shape: (number of particles, 3)
+        a_vec (array): acceleration in the current timestep.
+            Shape: (number of particles, 3)
+        mass (array): mass of each particle in the simulation.
+            Shape: (number of particles,)
+        type (array): non-unique identifier for each particle.
+            Shape: (number of particles,)
+        tracks (array): list of positions through the simulation for central
+            masses. Shape: (tracked particles, n+1, 3).
+        CONFIG (array): configuration used to create the simulation.
+            It will be saved along the state of the simulation.
+
+        dt (float): timestep of the simulation
+        n (int): current timestep. Initialized as n=0.
+        soft (float): softening length used by the simulation.
+        verbose (boolean): When True progress statements will be printed.
+    """
+
+    def __init__(self, dt, soft, verbose, CONFIG, method, **kwargs):
+        """Constructor for the Simulation class.
+
+        Arguments:
+            dt (float): timestep of the simulation
+            n (int): current timestep. Initialized as n=0.
+            soft (float): softening length used by the simulation.
+            verbose (bool): When True progress statements will be printed.
+            CONFIG (dict): configuration file used to create the simulation.
+            method (string): Optional. Algorithm to use when computing the 
+                gravitational forces. One of 'bruteForce', 'bruteForce_numba',
+                'bruteForce_numbaopt', 'bruteForce_CPP', 'barnesHut_CPP'.
+        """
+        self.n = 0
+        self.t = 0
+        self.dt = dt
+        self.soft = soft
+        self.verbose = verbose
+        self.CONFIG = CONFIG
+        # Initialize empty arrays for all necessary properties
+        self.r_vec = np.empty((0, 3))
+        self.v_vec = np.empty((0, 3))
+        self.a_vec = np.empty((0, 3))
+        self.mass = np.empty((0,))
+        self.type = np.empty((0, 2))
+        algorithms = {
+            'bruteForce': acceleration.bruteForce,
+            'bruteForceNumba': acceleration.bruteForceNumba,
+            'bruteForceNumbaOptimized': acceleration.bruteForceNumbaOptimized,
+            'bruteForceCPP': acceleration.bruteForceCPP,
+            'barnesHutCPP': acceleration.barnesHutCPP
+        }
+        try:
+            self.acceleration = algorithms[method]
+        except: raise Exception("Method '{}' unknown".format(method))
+
+    def add(self, body):
+        """Add a body to the simulation. It must expose the public attributes
+           body.r_vec, body.v_vec, body.a_vec, body.type, body.mass.
+
+        Arguments:
+            body: Object to be added to the simulation (e.g. a Galaxy object)
+        """
+        # Extend all relevant attributes by concatenating the body
+        for name in ['r_vec', 'v_vec', 'a_vec', 'type', 'mass']:
+            simattr, bodyattr = getattr(self, name), getattr(body, name)
+            setattr(self, name, np.concatenate([simattr, bodyattr], axis=0))
+        # Order based on mass
+        order = np.argsort(-self.mass)
+        for name in ['r_vec', 'v_vec', 'a_vec', 'type', 'mass']: 
+            setattr(self, name, getattr(self, name)[order])
+
+        # Update the list of objects to keep track of
+        self.tracks = np.empty((np.sum(self.type[:,0]=='center'), 0, 3))
+
+    def step(self):
+        """Perform a single step of the simulation.
+           Makes use of a 4th order Verlet integrator.
+        """
+        # Calculate the acceleration
+        self.a_vec = self.acceleration(self.r_vec, self.mass, soft=self.soft)
+        # Update the state using the Verlet algorithm
+        # (A custom algorithm is written mainly for learning purposes)
+        self.r_vec, self.rprev_vec = (2*self.r_vec - self.rprev_vec
+            + self.a_vec * self.dt**2, self.r_vec)
+        self.n += 1
+        # Update tracks
+        self.tracks = np.concatenate([self.tracks,
+            self.r_vec[self.type[:,0]=='center'][:,np.newaxis]], axis=1)
+
+    def run(self, tmax, saveEvery, outputFolder, **kwargs):
+        """Run the galactic simulation.
+
+        Attributes:
+            tmax (float): Time to which the simulation will run to.
+                This is measured here since the start of the simulation,
+                not since pericenter.
+            saveEvery (int): The state is saved every saveEvery steps.
+            outputFolder (string): It will be saved to /data/outputFolder/
+        """
+        # When the simulation starts, intialize self.rprev_vec
+        self.rprev_vec = self.r_vec - self.v_vec * self.dt
+        if self.verbose: print('Simulation starting. Bon voyage!')
+        while(self.t < tmax):
+            self.step()
+            if(self.n % saveEvery == 0):
+                self.save('data/{}'.format(outputFolder))
+
+        print('Simulation complete.')
+
+    def save(self, outputFolder):
+        """Save the state of the simulation to the outputFolder.
+           Two files are saved:
+                sim{self.n}.pickle: serializing the state.
+                sim{self.n}.png: a simplified 2D plot of x, y.
+        """
+        # Create the output folder if it doesn't exist
+        if not os.path.exists(outputFolder): os.makedirs(outputFolder)
+
+        # Compute some useful quantities
+        # v_vec is not required by the integrator, but useful
+        self.v_vec = (self.r_vec - self.rprev_vec)/self.dt
+        self.t = self.n * self.dt # prevents numerical rounding errors
+
+        # Serialize state
+        file = open(outputFolder+'/data{}.pickle'.format(self.n), "wb")
+        pickle.dump({'r_vec': self.r_vec, 'v_vec': self.v_vec,
+                     'type': self.type, 'mass': self.mass,
+                     'CONFIG': self.CONFIG, 't': self.t,
+                     'tracks': self.tracks}, file)
+
+        # Save simplified plot of the current state.
+        # Its main use is to detect ill-behaved situations early on.
+        fig = plt.figure()
+        plt.xlim(-5, 5); plt.ylim(-5, 5); plt.axis('equal')
+        # Dark halo is plotted in red, disk in blue, bulge in green
+        PLTCON = [('dark', 'r', 0.3), ('disk', 'b', 1.0), ('bulge', 'g', 0.5)]
+        for type_, c, a in PLTCON: 
+            plt.scatter(self.r_vec[self.type[:,0]==type_][:,0],
+                self.r_vec[self.type[:,0]==type_][:,1], s=0.1, c=c, alpha=a)
+        # Central mass as a magenta star 
+        plt.scatter(self.r_vec[self.type[:,0]=='center'][:,0],
+            self.r_vec[self.type[:,0]=='center'][:,1], s=100, marker="*", c='m')
+        # Save to png file
+        fig.savefig(outputFolder+'/sim{}.png'.format(self.n), dpi=150)
+        plt.close(fig)
+
+    def project(self, theta, phi, view=0):
+        """Projects the 3D simulation onto a plane as viewed from the
+           direction described by the (theta, phi, view). Angles in radians.
+           (This is used by the simulated annealing algorithm)
+        
+        Parameters:
+            theta (float): polar angle.
+            phi (float): azimuthal angle.
+            view (float): rotation along line of sight.
+        """
+        M1 = np.array([[np.cos(phi), np.sin(phi), 0],
+                       [-np.sin(phi), np.cos(phi), 0],
+                       [0, 0, 1]])
+        M2 = np.array([[1, 0, 0],
+                       [0, np.cos(theta), np.sin(theta)],
+                       [0, -np.sin(theta), np.cos(theta)]])
+        M3 = np.array([[np.cos(view), np.sin(view), 0],
+                       [-np.sin(view), np.cos(view), 0],
+                       [0, 0, 1]])
+
+        M = np.matmul(M1, np.matmul(M2, M3)) # combine rotations
+        r = np.tensordot(self.r_vec, M, axes=[1, 0])
+
+        return r[:,0:2]
+
+    def setOrbit(self, galaxy1, galaxy2, e, rmin, R0):
+        """Sets the two galaxies galaxy1, galaxy2 in an orbit.
+
+        Parameters:
+            galaxy1 (Galaxy): 1st galaxy (main)
+            galaxy2 (Galaxy): 2nd galaxy (companion)
+            e: eccentricity of the orbit
+            rmin: distance of closest approach
+            R0: initial separation
+        """
+        m1, m2 = np.sum(galaxy1.mass), np.sum(galaxy2.mass)
+        # Relevant formulae:
+        # $r_0 = r (1 + e) \cos(\phi)$, where $r_0$ ($\neq R_0$) is the semi-latus rectum
+        # $r_0 = r_\textup{min} (1 + e)$
+        # $v^2 = G M (2/r - 1/a)$, where a is the semimajor axis
+
+        # Solve the reduced two-body problem with reduced mass $\mu$ (mu)
+        M = m1 + m2
+        r0 = rmin * (1 + e)
+        try:
+            phi = np.arccos((r0/R0 - 1) / e) # inverting the orbit equation
+            phi = -np.abs(phi) # Choose negative (incoming) angle
+            ainv = (1 - e) / rmin # ainv = $1/a$, as a may be infinite
+            v = np.sqrt(M * (2/R0 - ainv))
+            # Finally, calculate the angle of motion. angle = tan(dy/dx)
+            # $dy/dx = ((dr/d\phi) sin(\phi) + r \cos(\phi))/((dr/d\phi) cos(\phi) - r \sin(\phi))$
+            dy = R0/r0 * e * np.sin(phi)**2 + np.cos(phi)
+            dx = R0/r0 * e * np.sin(phi) * np.cos(phi) - np.sin(phi)
+            vangle = np.arctan2(dy, dx)
+        except: raise Exception('''The orbital parameters cannot be satisfied.
+            For elliptical orbits check that R0 is posible (<rmax).''')
+
+        # We now need the actual motion of each body
+        R_vec = np.array([[R0*np.cos(phi), R0*np.sin(phi), 0.]])
+        V_vec = np.array([[v*np.cos(vangle), v*np.sin(vangle), 0.]])
+
+        galaxy1.r_vec += m2/M * R_vec
+        galaxy1.v_vec += m2/M * V_vec
+        galaxy2.r_vec += -m1/M * R_vec
+        galaxy2.v_vec += -m1/M * V_vec
+
+        # Explicitely add the galaxies to the simulation
+        self.add(galaxy1)
+        self.add(galaxy2)
+
+        if self.verbose: print('Galaxies set in orbit.')
+
+
+##############################################################################
+##############################################################################
+class Galaxy():
+    """"Helper class for creating galaxies.
+
+    Attributes:
+        r_vec (array): position of the particles in the current timestep.
+            Shape: (number of particles, 3)
+        v_vec (array): velocity in the current timestep.
+            Shape: (number of particles, 3)
+        a_vec (array): acceleration in the current timestep.
+            Shape: (number of particles, 3)
+        mass (array): mass of each particle in the simulation.
+            Shape: (number of particles,)
+        type (array): non-unique identifier for each particle.
+            Shape: (number of particles,)    """
+    def __init__(self, orientation, centralMass, bulge, disk, halo, sim):
+        """Constructor for the Galaxy class.
+
+           Parameters:
+                orientation (tupple): (inclination i, argument of pericenter w)
+                centralMass (float): mass at the center of the galaxy
+                bulge (dict): passed to the addBulge method.
+                disk (dict): passed to the addDisk method.
+                halo (dict): passed to the addHalo method.
+                sim (Simulation): simulation object
+        """
+        if sim.verbose: print('Initializing galaxy')
+        # Build the central mass
+        self.r_vec = np.zeros((1, 3))
+        self.v_vec = np.zeros((1, 3))
+        self.a_vec = np.zeros((1, 3))
+        self.mass = np.array([centralMass])
+        self.type = np.array([['center', 0]])
+        # Build the other components
+        self.addBulge(**bulge)
+        if sim.verbose: print('Bulge created.')
+        self.addDisk(**disk)
+        if sim.verbose: print('Disk created.')
+        self.addHalo(**halo)
+        if sim.verbose: print('Halo created.')
+        # Correct particles velocities to generate circular orbits
+        # $a_\textup{centripetal} = v^2/r$
+        a_vec = sim.acceleration(self.r_vec, self.mass, soft=sim.soft)
+        a = np.linalg.norm(a_vec, axis=-1, keepdims=True)
+        r = np.linalg.norm(self.r_vec, axis=-1, keepdims=True)
+        v = np.linalg.norm(self.v_vec[1:], axis=-1, keepdims=True)
+        direction_unit = self.v_vec[1:]/v
+        # Set orbital velocities (except for central mass)
+        self.v_vec[1:] = np.sqrt(a[1:] * r[1:]) * direction_unit
+        self.a_vec = np.zeros_like(self.r_vec)
+        # Rotate the galaxy into its correct orientation
+        self.rotate(*(np.array(orientation)/360 * 2*np.pi))
+        if sim.verbose: print('Galaxy set in rotation and oriented.')
+
+    def addBulge(self, model, totalMass, particles, l):
+        """Adds a bulge to the galaxy.
+
+            Parameters:
+                model (string): parametrization of the bulge.
+                    'plummer' and 'hernquist' are supported.
+                totalMass (float): total mass of the bulge
+                particles (int): number of particles in the bulge
+                l (float): characteristic length scale of the model.
+        """
+        if particles == 0: return None
+        # Divide the mass equally among all particles
+        mass = np.ones(particles) * totalMass/particles
+        self.mass = np.concatenate([self.mass, mass], axis=0)
+        # Create particles according to the radial distribution from model
+        if model == 'plummer':
+            r = PLUMMER.ppf(np.random.rand(particles), scale=l)
+        elif model == 'hernquist':
+            r = HERNQUIST.ppf(np.random.rand(particles), scale=l)
+        else: raise Exception("""Bulge distribution not allowed.
+                    'plummer' and 'hernquist' are the supported values""")
+        r_vec = r[:,np.newaxis] * random_unit_vectors(size=particles)
+        self.r_vec = np.concatenate([self.r_vec, r_vec], axis=0)
+        # Set them orbitting along random directions normal to r_vec
+        v_vec = np.cross(r_vec, random_unit_vectors(size=particles))
+        self.v_vec = np.concatenate([self.v_vec, v_vec], axis=0)
+        # Label the particles
+        type_ = [['bulge', 0]]*particles
+        self.type = np.concatenate([self.type, type_], axis=0)
+
+    def addDisk(self, model, totalMass, particles, l):
+        """Adds a disk to the galaxy.
+
+            Parameters:
+                model (string): parametrization of the disk.
+                    'rings', 'uniform' and 'exp' are supported.
+                totalMass (float): total mass of the bulge
+                particles (int): number of particles in the bulge
+                l: fot 'exp' and 'uniform' characteristic length of the
+                    model. For 'rings' tupple of the form (inner radius,
+                    outer radius, number of rings)
+        """
+        if particles == 0: return None
+        # Create particles according to the radial distribution from model
+        if model == 'uniform':
+            r = UNIFORM.ppf(np.random.rand(particles), scale=l)
+            type_ = [['disk', 0]]*particles
+            r_vec = r[:,np.newaxis] * random_unit_vectors(particles, '2D')
+            self.type = np.concatenate([self.type, type_], axis=0)
+        elif model == 'rings':
+            # l = [inner radius, outter radius, number of rings]
+            distances = np.linspace(*l)
+            # Aim for roughly constant areal density
+            # Cascade rounding preserves the total number of particles
+            perRing = cascade_round(particles * distances / np.sum(distances))
+            particles = int(np.sum(perRing)) # prevents numerical errors
+            r_vec = np.empty((0, 3))
+            for d, n, i in zip(distances, perRing, range(l[2])):
+                type_ = [['disk', i+1]]*int(n)
+                self.type = np.concatenate([self.type, type_], axis=0)
+                phi = np.linspace(0, 2 * np.pi, n, endpoint=False)
+                ringr = d * np.array([[np.cos(i), np.sin(i), 0] for i in phi])
+                r_vec = np.concatenate([r_vec, ringr], axis=0)
+        elif model == 'exp':
+            r = EXP.ppf(np.random.rand(particles), scale=l)
+            r_vec = r[:,np.newaxis] * random_unit_vectors(particles, '2D')
+            type_ = [['disk', 0]]*particles
+            self.type = np.concatenate([self.type, type_], axis=0)
+        else:
+            raise Exception("""Disk distribution not allowed.
+                    'uniform', 'rings' and 'exp' are the supported values""")
+        self.r_vec = np.concatenate([self.r_vec, r_vec], axis=0)
+        # Divide the mass equally among all particles
+        mass = np.ones(particles) * totalMass/particles
+        self.mass = np.concatenate([self.mass, mass], axis=0)
+        # Set them orbitting along the spin plane
+        v_vec = np.cross(r_vec, [0, 0, 1])
+        self.v_vec = np.concatenate([self.v_vec, v_vec], axis=0)
+
+    def addHalo(self, model, totalMass, particles, rs):
+        """Adds a halo to the galaxy.
+
+            Parameters:
+                model (string): parametrization of the halo.
+                    Only 'NFW' is supported.
+                totalMass (float): total mass of the halo
+                particles (int): number of particles in the halo
+                rs (float): characteristic length scale of the NFW profile.
+        """
+        if particles == 0: return None
+        # Divide the mass equally among all particles
+        mass = np.ones(particles)*totalMass/particles
+        self.mass = np.concatenate([self.mass, mass], axis=0)
+        # Create particles according to the radial distribution from model
+        if model == 'NFW':
+            r = NFW.ppf(np.random.rand(particles), scale=rs)
+        else: raise Exception("""Bulge distribution not allowed.
+                    'plummer' and 'hernquist' are the supported values""")
+        r_vec = r[:,np.newaxis] * random_unit_vectors(size=particles)
+        self.r_vec = np.concatenate([self.r_vec, r_vec], axis=0)
+        # Orbit along random directions normal to the radial vector
+        v_vec = np.cross(r_vec, random_unit_vectors(size=particles))
+        self.v_vec = np.concatenate([self.v_vec, v_vec], axis=0)
+        # Label the particles
+        type_ = [['dark'], 0]*particles
+        self.type = np.concatenate([self.type, type_], axis=0)
+
+    def rotate(self, theta, phi):
+        """Rotates the galaxy so that its spin is along the (theta, phi)
+           direction.
+
+        Parameters:
+            theta (float): polar angle.
+            phi (float): azimuthal angle.
+        """
+        M1 = np.array([[1, 0, 0],
+                       [0, np.cos(theta), np.sin(theta)],
+                       [0, -np.sin(theta), np.cos(theta)]])
+        M2 = np.array([[np.cos(phi), np.sin(phi), 0],
+                       [-np.sin(phi), np.cos(phi), 0],
+                       [0, 0, 1]])
+        M = np.matmul(M1, M2) # combine rotations
+        self.r_vec = np.tensordot(self.r_vec, M, axes=[1, 0])
+        self.v_vec = np.tensordot(self.v_vec, M, axes=[1, 0])

+ 58 - 0
unittests.py

@@ -0,0 +1,58 @@
+import unittest
+from acceleration import *
+import numpy as np
+from numpy.testing import assert_almost_equal
+
+accelerationFunctions = [bruteForce, bruteForceNumba, bruteForceNumbaOptimized, bruteForceCPP]
+
+class MyTest(unittest.TestCase):
+    # Massive particles
+    def test1(self):
+        r_vec = np.array([[0.,0,0],[1,0,0]])
+        m_vec = np.array([1., 1])
+        result = np.array([[1., 0, 0],[-1, 0, 0]])
+        for fun in accelerationFunctions:
+            assert_almost_equal(fun(r_vec, m_vec, 0), result)
+
+    # Massless particles
+    def test2(self):
+        r_vec = np.array([[0.,0,0],[1,0,0]])
+        m_vec = np.array([1., 0])
+        result = np.array([[0, 0, 0],[-1, 0, 0]])
+        for fun in accelerationFunctions:
+            assert_almost_equal(fun(r_vec, m_vec, 0), result)
+
+    # Softening
+    def test3(self):
+        r_vec = np.array([[0.,0,0],[1,0,0]])
+        m_vec = np.array([1., 1])
+        result = np.array([[1/(1+.1)**2., 0, 0],[-1/(1+.1)**2, 0, 0]])
+        for fun in accelerationFunctions:
+            assert_almost_equal(fun(r_vec, m_vec, 0.1), result)
+
+    # 3 dimensions
+    def test4(self):
+        r_vec = np.array([[0.,0,0],[1,1,1]])
+        m_vec = np.array([1., 0])
+        result = np.array([[0, 0, 0],[-1, -1, -1]])/np.sqrt(3)**3
+        for fun in accelerationFunctions:
+            assert_almost_equal(fun(r_vec, m_vec, 0.), result)
+
+    # More particles
+    def test5(self):
+        r_vec = np.array([[1.,0,0],[0,0,0],[2,0,0],[3,0,0]])
+        m_vec = np.array([1., 0, 0, 0])
+        result = np.array([[0, 0, 0],[1, 0, 0],[-1, 0, 0],[-1/4, 0, 0]])
+        for fun in accelerationFunctions:
+            assert_almost_equal(fun(r_vec, m_vec, 0.), result)
+
+    # Many particles: self consistency
+    def test5(self):
+        r_vec = np.random.rand(100, 3)
+        m_vec = np.random.rand(100)
+        result = bruteForce(r_vec, m_vec, 0.1)
+        for fun in [bruteForceNumba, bruteForceNumbaOptimized, bruteForceCPP]:
+            assert_almost_equal(fun(r_vec, m_vec, 0.1), result)
+
+if __name__ == '__main__':
+    unittest.main()

+ 28 - 0
utils.py

@@ -0,0 +1,28 @@
+"""Short utility functions for use elsewhere"""
+
+import numpy as np
+
+def update_config(default, update):
+    """Recursively updates a Python (configuration) dictionary"""
+    for key in update: 
+        if isinstance(default[key], dict): #Recurse if it is a dictionary
+            update_config(default[key], update[key])
+        else: default[key] = update[key] #Simply copy the value otherwise
+
+def random_unit_vectors(size, mode='3D'):
+    """Draws an array of size size corresponding to isotropically distributed
+    random unit vectors (number of vectors, dimension). If mode is 2D, the
+    vectors are instead distributed isotropically on the xy plane."""
+    direction = np.random.normal(size=(size, 3))
+    if mode == '2D': direction[:, -1] = 0
+    return direction / np.linalg.norm(direction, axis=-1, keepdims=True)
+
+def cascade_round(arr):
+    """Rounds the floats in an array to integers while preserving their total
+    sum (as closely as possible). Follows the cascade rounding algorithm."""
+    runningSurplus = 0
+    for i in range(len(arr)):
+        f = arr[i] + runningSurplus
+        arr[i] = int(np.round(f))
+        runningSurplus = f - arr[i]
+    return arr