From fb851af39050ea62d29944a14fbd69db129fe3c6 Mon Sep 17 00:00:00 2001
From: Mikyung Kang <mkkang@isi.edu>
Date: Mon, 7 Nov 2011 14:32:22 -0500
Subject: [PATCH] Implements blueprint
 heterogeneous-tilera-architecture-support

Change-Id: Iad8f66af18eb396f4737cd4ea168edcc77481ee6
---
 nova/tests/baremetal/__init__.py              |   0
 nova/tests/baremetal/test_proxy_bare_metal.py | 292 +++++++
 nova/tests/baremetal/test_tilera.py           |  88 ++
 nova/virt/baremetal/__init__.py               |  15 +
 nova/virt/baremetal/dom.py                    | 268 ++++++
 nova/virt/baremetal/fake.py                   | 157 ++++
 nova/virt/baremetal/nodes.py                  |  42 +
 nova/virt/baremetal/proxy.py                  | 799 ++++++++++++++++++
 nova/virt/baremetal/tilera.py                 | 368 ++++++++
 9 files changed, 2029 insertions(+)
 create mode 100644 nova/tests/baremetal/__init__.py
 create mode 100644 nova/tests/baremetal/test_proxy_bare_metal.py
 create mode 100644 nova/tests/baremetal/test_tilera.py
 create mode 100644 nova/virt/baremetal/__init__.py
 create mode 100644 nova/virt/baremetal/dom.py
 create mode 100644 nova/virt/baremetal/fake.py
 create mode 100644 nova/virt/baremetal/nodes.py
 create mode 100644 nova/virt/baremetal/proxy.py
 create mode 100644 nova/virt/baremetal/tilera.py

diff --git a/nova/tests/baremetal/__init__.py b/nova/tests/baremetal/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/nova/tests/baremetal/test_proxy_bare_metal.py b/nova/tests/baremetal/test_proxy_bare_metal.py
new file mode 100644
index 0000000000..ad41a85c30
--- /dev/null
+++ b/nova/tests/baremetal/test_proxy_bare_metal.py
@@ -0,0 +1,292 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 University of Southern California
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import __builtin__
+
+import functools
+import mox
+import StringIO
+import stubout
+
+from nova import flags
+from nova import utils
+from nova import test
+from nova.compute import power_state
+from nova import context
+from nova.tests import fake_utils
+from nova import exception
+
+from nova.virt.baremetal import proxy
+from nova.virt.baremetal import dom
+
+FLAGS = flags.FLAGS
+
+
+# Same fake_domains is used by different classes,
+# but different fake_file is used by different classes for unit test.
+fake_domains = [{'status': 1, 'name': 'instance-00000001',
+                 'memory_kb': 16777216, 'kernel_id': '1896115634',
+                 'ramdisk_id': '', 'image_id': '1552326678',
+                 'vcpus': 1, 'node_id': 6,
+                 'mac_address': '02:16:3e:01:4e:c9',
+                 'ip_address': '10.5.1.2'}]
+
+
+class DomainReadWriteTestCase(test.TestCase):
+
+    def setUp(self):
+        self.flags(baremetal_driver='fake')
+        super(DomainReadWriteTestCase, self).setUp()
+
+    def test_read_domain_with_empty_list(self):
+        """Read a file that contains no domains"""
+
+        self.mox.StubOutWithMock(__builtin__, 'open')
+        try:
+            fake_file = StringIO.StringIO('[]')
+            open('/tftpboot/test_fake_dom_file', 'r').AndReturn(fake_file)
+
+            self.mox.ReplayAll()
+
+            domains = dom.read_domains('/tftpboot/test_fake_dom_file')
+
+            self.assertEqual(domains, [])
+
+        finally:
+            self.mox.UnsetStubs()
+
+    def test_read_domain(self):
+        """Read a file that contains at least one domain"""
+        fake_file = StringIO.StringIO('''[{"status": 1,
+         "image_id": "1552326678", "vcpus": 1, "node_id": 6,
+         "name": "instance-00000001", "memory_kb": 16777216,
+         "mac_address": "02:16:3e:01:4e:c9", "kernel_id": "1896115634",
+         "ramdisk_id": "", "ip_address": "10.5.1.2"}]''')
+
+        self.mox.StubOutWithMock(__builtin__, 'open')
+        try:
+            open('/tftpboot/test_fake_dom_file', 'r').AndReturn(fake_file)
+
+            self.mox.ReplayAll()
+
+            domains = dom.read_domains('/tftpboot/test_fake_dom_file')
+
+            self.assertEqual(domains, fake_domains)
+
+        finally:
+            self.mox.UnsetStubs()
+
+    def test_read_no_file(self):
+        """Try to read when the file does not exist
+
+        This should through and IO exception"""
+
+        self.mox.StubOutWithMock(__builtin__, 'open')
+        try:
+            open('/tftpboot/test_fake_dom_file', 'r').AndRaise(\
+            IOError(2, 'No such file or directory',
+                       '/tftpboot/test_fake_dom_file'))
+
+            self.mox.ReplayAll()
+
+            self.assertRaises(exception.NotFound, dom.read_domains,
+                       '/tftpboot/test_fake_dom_file')
+
+        finally:
+            self.mox.UnsetStubs()
+
+    def assertJSONEquals(self, x, y):
+        """Check if two json strings represent the equivalent Python object"""
+        self.assertEquals(utils.loads(x), utils.loads(y))
+        return utils.loads(x) == utils.loads(y)
+
+    def test_write_domain(self):
+        """Write the domain to file"""
+        self.mox.StubOutWithMock(__builtin__, 'open')
+        mock_file = self.mox.CreateMock(file)
+        expected_json = '''[{"status": 1,
+               "image_id": "1552326678", "vcpus": 1, "node_id": 6,
+               "name": "instance-00000001", "memory_kb": 16777216,
+               "mac_address": "02:16:3e:01:4e:c9", "kernel_id": "1896115634",
+               "ramdisk_id": "", "ip_address": "10.5.1.2"}]'''
+        try:
+            open('/tftpboot/test_fake_dom_file', 'w').AndReturn(mock_file)
+
+            # Check if the argument to file.write() represents the same
+            # Python object as expected_json
+            # We can't do an exact string comparison
+            # because of ordering and whitespace
+            mock_file.write(mox.Func(functools.partial(self.assertJSONEquals,\
+                expected_json)))
+            mock_file.close()
+
+            self.mox.ReplayAll()
+
+            dom.write_domains('/tftpboot/test_fake_dom_file', fake_domains)
+
+        finally:
+            self.mox.UnsetStubs()
+
+
+class BareMetalDomTestCase(test.TestCase):
+
+    def setUp(self):
+        self.flags(baremetal_driver='fake')
+        super(BareMetalDomTestCase, self).setUp()
+        # Stub out utils.execute
+        self.stubs = stubout.StubOutForTesting()
+        fake_utils.stub_out_utils_execute(self.stubs)
+
+    def tearDown(self):
+        self.stubs.UnsetAll()
+        super(BareMetalDomTestCase, self).tearDown()
+
+        # Reset the singleton state
+        dom.BareMetalDom._instance = None
+        dom.BareMetalDom._is_init = False
+
+    def test_read_domain_only_once(self):
+        """Confirm that the domain is read from a file only once,
+        even if the object is instantiated multiple times"""
+        self.mox.StubOutWithMock(dom, 'read_domains')
+        self.mox.StubOutWithMock(dom, 'write_domains')
+
+        dom.read_domains('/tftpboot/test_fake_dom_file').AndReturn([])
+        dom.write_domains('/tftpboot/test_fake_dom_file', [])
+
+        self.mox.ReplayAll()
+
+        # Instantiate multiple instances
+        x = dom.BareMetalDom()
+        x = dom.BareMetalDom()
+        x = dom.BareMetalDom()
+
+    def test_init_no_domains(self):
+
+        # Create the mock objects
+        self.mox.StubOutWithMock(dom, 'read_domains')
+        self.mox.StubOutWithMock(dom, 'write_domains')
+
+        dom.read_domains('/tftpboot/test_fake_dom_file').AndReturn([])
+        dom.write_domains('/tftpboot/test_fake_dom_file', [])
+
+        self.mox.ReplayAll()
+
+        # Code under test
+        bmdom = dom.BareMetalDom()
+
+        # Expectd values
+        self.assertEqual(bmdom.fake_dom_nums, 0)
+
+    def test_init_remove_non_running_domain(self):
+        """Check to see that all entries in the domain list are removed
+        except for the one that is in the running state"""
+
+        fake_file = StringIO.StringIO()
+
+        domains = [dict(node_id=1, name='i-00000001',
+                        status=power_state.NOSTATE),
+              dict(node_id=2, name='i-00000002', status=power_state.RUNNING),
+              dict(node_id=3, name='i-00000003', status=power_state.BLOCKED),
+              dict(node_id=4, name='i-00000004', status=power_state.PAUSED),
+              dict(node_id=5, name='i-00000005', status=power_state.SHUTDOWN),
+              dict(node_id=6, name='i-00000006', status=power_state.SHUTOFF),
+              dict(node_id=7, name='i-00000007', status=power_state.CRASHED),
+              dict(node_id=8, name='i-00000008', status=power_state.SUSPENDED),
+              dict(node_id=9, name='i-00000009', status=power_state.FAILED)]
+
+        # Create the mock objects
+        self.mox.StubOutWithMock(dom, 'read_domains')
+        self.mox.StubOutWithMock(dom, 'write_domains')
+        dom.read_domains('/tftpboot/test_fake_dom_file').AndReturn(domains)
+        dom.write_domains('/tftpboot/test_fake_dom_file', domains)
+
+        self.mox.ReplayAll()
+
+        # Code under test
+        bmdom = dom.BareMetalDom()
+
+        self.assertEqual(bmdom.domains, [{'node_id': 2,
+                                          'name': 'i-00000002',
+                                          'status': power_state.RUNNING}])
+        self.assertEqual(bmdom.fake_dom_nums, 1)
+
+    def test_find_domain(self):
+        domain = {'status': 1, 'name': 'instance-00000001',
+                    'memory_kb': 16777216, 'kernel_id': '1896115634',
+                    'ramdisk_id': '', 'image_id': '1552326678',
+                    'vcpus': 1, 'node_id': 6,
+                    'mac_address': '02:16:3e:01:4e:c9',
+                    'ip_address': '10.5.1.2'}
+
+        # Create the mock objects
+        self.mox.StubOutWithMock(dom, 'read_domains')
+        self.mox.StubOutWithMock(dom, 'write_domains')
+
+        # Expected calls
+        dom.read_domains('/tftpboot/test_fake_dom_file')\
+            .AndReturn(fake_domains)
+        dom.write_domains('/tftpboot/test_fake_dom_file', fake_domains)
+
+        self.mox.ReplayAll()
+
+        # Code under test
+        bmdom = dom.BareMetalDom()
+
+        # Expected values
+        self.assertEquals(bmdom.find_domain('instance-00000001'), domain)
+
+
+class ProxyBareMetalTestCase(test.TestCase):
+
+    test_ip = '10.11.12.13'
+    test_instance = {'memory_kb': '1024000',
+                     'basepath': '/some/path',
+                     'bridge_name': 'br100',
+                     'mac_address': '02:12:34:46:56:67',
+                     'vcpus': 2,
+                     'project_id': 'fake',
+                     'bridge': 'br101',
+                     'image_ref': '123456',
+                     'instance_type_id': '5'}  # m1.small
+
+    def setUp(self):
+        self.flags(baremetal_driver='fake')
+        super(ProxyBareMetalTestCase, self).setUp()
+        self.context = context.get_admin_context()
+        fake_utils.stub_out_utils_execute(self.stubs)
+
+    def test_get_info(self):
+        # Create the mock objects
+        self.mox.StubOutWithMock(dom, 'read_domains')
+        self.mox.StubOutWithMock(dom, 'write_domains')
+
+        # Expected calls
+        dom.read_domains('/tftpboot/test_fake_dom_file')\
+            .AndReturn(fake_domains)
+        dom.write_domains('/tftpboot/test_fake_dom_file', fake_domains)
+
+        self.mox.ReplayAll()
+
+        # Code under test
+        conn = proxy.get_connection(True)
+        info = conn.get_info('instance-00000001')
+
+        # Expected values
+        self.assertEquals(info['mem'], 16777216)
+        self.assertEquals(info['state'], 1)
+        self.assertEquals(info['num_cpu'], 1)
+        self.assertEquals(info['cpu_time'], 100)
+        self.assertEquals(info['max_mem'], 16777216)
diff --git a/nova/tests/baremetal/test_tilera.py b/nova/tests/baremetal/test_tilera.py
new file mode 100644
index 0000000000..7b58282e18
--- /dev/null
+++ b/nova/tests/baremetal/test_tilera.py
@@ -0,0 +1,88 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 University of Southern California
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+import __builtin__
+import StringIO
+
+from nova import test
+from nova.virt.baremetal import tilera
+
+
+class TileraBareMetalNodesTestCase(test.TestCase):
+
+    def setUp(self):
+        super(TileraBareMetalNodesTestCase, self).setUp()
+        self.board_info = """\
+# board_id  ip_address mac_address 00:1A:CA:00:57:90 \
+00:1A:CA:00:58:98 00:1A:CA:00:58:50
+6            10.0.2.7   00:1A:CA:00:58:5C 10 16218 917 476 1 tilera_hv 1 \
+{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64","features":\
+["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh","700MHz-866MHz",\
+"4DDR2","2XAUIMAC/PHY","2GbEMAC"],"topology":{"cores":"64"}}
+7            10.0.2.8   00:1A:CA:00:58:A4 10 16218 917 476 1 tilera_hv 1 \
+{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64","features":\
+["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh","700MHz-866MHz",\
+"4DDR2","2XAUIMAC/PHY","2GbEMAC"],"topology":{"cores":"64"}}
+8            10.0.2.9   00:1A:CA:00:58:1A 10 16218 917 476 1 tilera_hv 1 \
+{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64","features":\
+["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh","700MHz-866MHz",\
+"4DDR2","2XAUIMAC/PHY","2GbEMAC"],"topology":{"cores":"64"}}
+9            10.0.2.10  00:1A:CA:00:58:38 10 16385 1000 0 0 tilera_hv 1 \
+{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64","features":\
+["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh","700MHz-866MHz",\
+"4DDR2","2XAUIMAC/PHY","2GbEMAC"],"topology":{"cores":"64"}}
+"""
+
+    def tearDown(self):
+        super(TileraBareMetalNodesTestCase, self).tearDown()
+
+        # Reset the singleton state
+        tilera.BareMetalNodes._instance = None
+        tilera.BareMetalNodes._is_init = False
+
+    def test_singleton(self):
+        """Confirm that the object acts like a singleton.
+
+        In this case, we check that it only loads the config file once,
+        even though it has been instantiated multiple times"""
+
+        try:
+            self.mox.StubOutWithMock(__builtin__, 'open')
+
+            open("/tftpboot/tilera_boards", "r").AndReturn(\
+                 StringIO.StringIO(self.board_info))
+
+            self.mox.ReplayAll()
+
+            nodes = tilera.BareMetalNodes("/tftpboot/tilera_boards")
+            nodes = tilera.BareMetalNodes("/tftpboot/tilera_boards")
+        finally:
+            self.mox.UnsetStubs()
+
+    def test_get_hw_info(self):
+        try:
+
+            self.mox.StubOutWithMock(__builtin__, 'open')
+
+            open("/tftpboot/tilera_boards", "r").AndReturn(\
+                StringIO.StringIO(self.board_info))
+
+            self.mox.ReplayAll()
+            nodes = tilera.BareMetalNodes()
+            self.assertEqual(nodes.get_hw_info('vcpus'), 10)
+        finally:
+            self.mox.UnsetStubs()
diff --git a/nova/virt/baremetal/__init__.py b/nova/virt/baremetal/__init__.py
new file mode 100644
index 0000000000..efe07d0df2
--- /dev/null
+++ b/nova/virt/baremetal/__init__.py
@@ -0,0 +1,15 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 University of Southern California
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
diff --git a/nova/virt/baremetal/dom.py b/nova/virt/baremetal/dom.py
new file mode 100644
index 0000000000..c2f07e72f7
--- /dev/null
+++ b/nova/virt/baremetal/dom.py
@@ -0,0 +1,268 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 University of Southern California
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from nova.compute import power_state
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import utils
+from nova.virt.baremetal import nodes
+
+FLAGS = flags.FLAGS
+
+LOG = logging.getLogger('nova.virt.baremetal.dom')
+
+
+def read_domains(fname):
+    try:
+        f = open(fname, 'r')
+        json = f.read()
+        f.close()
+        domains = utils.loads(json)
+        return domains
+    except IOError:
+        raise exception.NotFound()
+
+
+def write_domains(fname, domains):
+    json = utils.dumps(domains)
+    f = open(fname, 'w')
+    f.write(json)
+    f.close()
+
+
+class BareMetalDom(object):
+    """
+    BareMetalDom class handles fake domain for bare metal back ends.
+
+    This implements the singleton pattern.
+    """
+
+    _instance = None
+    _is_init = False
+
+    def __new__(cls, *args, **kwargs):
+        """
+        Returns the BareMetalDom singleton.
+        """
+        if not cls._instance or ('new' in kwargs and kwargs['new']):
+            cls._instance = super(BareMetalDom, cls).__new__(cls)
+        return cls._instance
+
+    def __init__(self,
+                 fake_dom_file="/tftpboot/test_fake_dom_file"):
+        """
+        Only call __init__ the first time object is instantiated.
+
+        Sets and Opens domain file: /tftpboot/test_fake_dom_file. Even though
+        nova-compute service is rebooted, this file should retain the
+        existing domains.
+        """
+        if self._is_init:
+            return
+        self._is_init = True
+
+        self.fake_dom_file = fake_dom_file
+        self.domains = []
+        self.fake_dom_nums = 0
+        self.baremetal_nodes = nodes.get_baremetal_nodes()
+
+        self._read_domain_from_file()
+
+    def _read_domain_from_file(self):
+        """
+        Reads the domains from a file.
+        """
+        try:
+            self.domains = read_domains(self.fake_dom_file)
+        except IOError:
+            dom = []
+            LOG.debug(_("No domains exist."))
+            return
+        msg = _("============= initial domains =========== : %s")
+        LOG.debug(msg % (self.domains))
+        for dom in self.domains[:]:
+            if dom['status'] == power_state.BUILDING:
+                LOG.debug(_("Building domain: to be removed"))
+                self.destroy_domain(dom['name'])
+                continue
+            elif dom['status'] != power_state.RUNNING:
+                LOG.debug(_("Not running domain: remove"))
+                self.domains.remove(dom)
+                continue
+            res = self.baremetal_nodes.set_status(dom['node_id'],
+                                    dom['status'])
+            if res > 0:
+                self.fake_dom_nums = self.fake_dom_nums + 1
+            else:
+                LOG.debug(_("domain running on an unknown node: discarded"))
+                self.domains.remove(dom)
+                continue
+
+        LOG.debug(_(self.domains))
+        self.store_domain()
+
+    def reboot_domain(self, name):
+        """
+        Finds domain and deactivates (power down) bare-metal node.
+
+        Activates the node again. In case of fail,
+        destroys the domain from domains list.
+        """
+        fd = self.find_domain(name)
+        if fd == []:
+            msg = _("No such domain (%s)")
+            raise exception.NotFound(msg % name)
+        node_ip = self.baremetal_nodes.get_ip_by_id(fd['node_id'])
+
+        try:
+            self.baremetal_nodes.deactivate_node(fd['node_id'])
+        except:
+            msg = _("Failed power down Bare-metal node %s")
+            raise exception.NotFound(msg % fd['node_id'])
+        self.change_domain_state(name, power_state.BUILDING)
+        try:
+            state = self.baremetal_nodes.activate_node(fd['node_id'],
+                node_ip, name, fd['mac_address'], fd['ip_address'])
+            self.change_domain_state(name, state)
+            return state
+        except:
+            LOG.debug(_("deactivate -> activate fails"))
+            self.destroy_domain(name)
+            raise
+
+    def destroy_domain(self, name):
+        """
+        Removes domain from domains list and deactivates node.
+        """
+        fd = self.find_domain(name)
+        if fd == []:
+            LOG.debug(_("destroy_domain: no such domain"))
+            msg = _("No such domain %s")
+            raise exception.NotFound(msg % name)
+
+        try:
+            self.baremetal_nodes.deactivate_node(fd['node_id'])
+
+            self.domains.remove(fd)
+            msg = _("Domains: %s")
+            LOG.debug(msg % (self.domains))
+            msg = _("Nodes: %s")
+            LOG.debug(msg % (self.baremetal_nodes.nodes))
+            self.store_domain()
+            msg = _("After storing domains: %s")
+            LOG.debug(msg % (self.domains))
+        except:
+            LOG.debug(_("deactivation/removing domain failed"))
+            raise
+
+    def create_domain(self, xml_dict, bpath):
+        """
+        Adds a domain to domains list and activates an idle bare-metal node.
+        """
+        LOG.debug(_("===== Domain is being created ====="))
+        fd = self.find_domain(xml_dict['name'])
+        if fd != []:
+            msg = _("Same domain name already exists")
+            raise exception.NotFound(msg)
+        LOG.debug(_("create_domain: before get_idle_node"))
+
+        node_id = self.baremetal_nodes.get_idle_node()
+        node_ip = self.baremetal_nodes.get_ip_by_id(node_id)
+
+        new_dom = {'node_id': node_id,
+                    'name': xml_dict['name'],
+                    'memory_kb': xml_dict['memory_kb'],
+                    'vcpus': xml_dict['vcpus'],
+                    'mac_address': xml_dict['mac_address'],
+                    'user_data': xml_dict['user_data'],
+                    'ip_address': xml_dict['ip_address'],
+                    'image_id': xml_dict['image_id'],
+                    'kernel_id': xml_dict['kernel_id'],
+                    'ramdisk_id': xml_dict['ramdisk_id'],
+                     'status': power_state.BUILDING}
+        self.domains.append(new_dom)
+        msg = _("Created new domain: %s")
+        LOG.debug(msg % (new_dom))
+        self.change_domain_state(new_dom['name'], power_state.BUILDING)
+
+        self.baremetal_nodes.set_image(bpath, node_id)
+
+        state = power_state.NOSTATE
+        try:
+            state = self.baremetal_nodes.activate_node(node_id,
+                node_ip, new_dom['name'], new_dom['mac_address'],
+                new_dom['ip_address'], new_dom['user_data'])
+            self.change_domain_state(new_dom['name'], state)
+        except:
+            self.domains.remove(new_dom)
+            self.baremetal_nodes.free_node(node_id)
+            LOG.debug(_("Failed to boot Bare-metal node %s"), node_id)
+        return state
+
+    def change_domain_state(self, name, state):
+        """
+        Changes domain state by the given state and updates domain file.
+        """
+        l = self.find_domain(name)
+        if l == []:
+            msg = _("No such domain exists")
+            raise exception.NotFound(msg)
+        i = self.domains.index(l)
+        self.domains[i]['status'] = state
+        LOG.debug(_("change_domain_state: to new state %s"), str(state))
+        self.store_domain()
+
+    def store_domain(self):
+        """
+        Stores fake domains to the file.
+        """
+        msg = _("Stored fake domains to the file: %s")
+        LOG.debug(msg % (self.domains))
+        write_domains(self.fake_dom_file, self.domains)
+
+    def find_domain(self, name):
+        """
+        Finds domain by the given name and returns the domain.
+        """
+        for item in self.domains:
+            if item['name'] == name:
+                return item
+        LOG.debug(_("domain does not exist"))
+        return []
+
+    def list_domains(self):
+        """
+        Returns the instance name from domains list.
+        """
+        if self.domains == []:
+            return []
+        return [x['name'] for x in self.domains]
+
+    def get_domain_info(self, instance_name):
+        """
+        Finds domain by the given instance_name and returns informaiton.
+
+        For example, status, memory_kb, vcpus, etc.
+        """
+        domain = self.find_domain(instance_name)
+        if domain != []:
+            return [domain['status'], domain['memory_kb'],
+                    domain['memory_kb'],
+                    domain['vcpus'],
+                    100]
+        else:
+            return [power_state.NOSTATE, '', '', '', '']
diff --git a/nova/virt/baremetal/fake.py b/nova/virt/baremetal/fake.py
new file mode 100644
index 0000000000..c849104b3b
--- /dev/null
+++ b/nova/virt/baremetal/fake.py
@@ -0,0 +1,157 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 University of Southern California
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+
+def get_baremetal_nodes():
+    return BareMetalNodes()
+
+
+class BareMetalNodes(object):
+    """
+    This manages node information and implements singleton.
+
+    BareMetalNodes class handles machine architectures of interest to
+    technical computing users have either poor or non-existent support
+    for virtualization.
+    """
+
+    def get_hw_info(self, field):
+        """
+        Returns hardware information of bare-metal node by the given field.
+
+        Given field can be vcpus, memory_mb, local_gb, memory_mb_used,
+        local_gb_used, hypervisor_type, hypervisor_version, and cpu_info.
+        """
+        return "fake"
+
+    def set_status(self, node_id, status):
+        """
+        Sets status of the given node by the given status.
+
+        Returns 1 if the node is in the nodes list.
+        """
+        return True
+
+    def get_status(self):
+        """
+        Gets status of the given node.
+        """
+        pass
+
+    def get_idle_node(self):
+        """
+        Gets an idle node, sets the status as 1 (RUNNING) and Returns node ID.
+        """
+        return False
+
+    def get_ip_by_id(self, id):
+        """
+        Returns default IP address of the given node.
+        """
+        return "127.0.0.1"
+
+    def free_node(self, node_id):
+        """
+        Sets/frees status of the given node as 0 (IDLE).
+        """
+        return False
+
+    def power_mgr(self, node_id, mode):
+        """
+        Changes power state of the given node.
+
+        According to the mode (1-ON, 2-OFF, 3-REBOOT), power state can be
+        changed. /tftpboot/pdu_mgr script handles power management of
+        PDU (Power Distribution Unit).
+        """
+        pass
+
+    def deactivate_node(self, node_id):
+        """
+        Deactivates the given node by turnning it off.
+        """
+        pass
+
+    def network_set(self, node_ip, mac_address, ip_address):
+        """
+        Sets network configuration based on the given ip and mac address.
+
+        User can access the bare-metal node using ssh.
+        """
+        pass
+
+    def iptables_set(self, node_ip, user_data):
+        """
+        Sets security setting (iptables:port) if needed.
+        """
+        pass
+
+    def check_activated(self, node_id, node_ip):
+        """
+        Checks whether the given node is activated or not.
+        """
+        pass
+
+    def vmlinux_set(self, node_id, mode):
+        """
+        Sets kernel into default path (/tftpboot) if needed.
+
+        From basepath to /tftpboot, kernel is set based on the given mode
+        such as 0-NoSet, 1-SetVmlinux, or 9-RemoveVmlinux.
+        """
+        pass
+
+    def sleep_mgr(self, time):
+        """
+        Sleeps until the node is activated.
+        """
+        pass
+
+    def ssh_set(self, node_ip):
+        """
+        Sets and Runs sshd in the node.
+        """
+        pass
+
+    def activate_node(self, node_id, node_ip, name, mac_address, \
+                      ip_address):
+        """
+        Activates the given node using ID, IP, and MAC address.
+        """
+        pass
+
+    def get_console_output(self, console_log):
+        """
+        Gets console output of the given node.
+        """
+        pass
+
+    def get_image(self, bp):
+        """
+        Gets the bare-metal file system image into the instance path.
+
+        Noting to do for tilera nodes: actual image is used.
+        """
+        pass
+
+    def set_image(self, bpath, node_id):
+        """
+        Sets the PXE bare-metal file system from the instance path.
+
+        This should be done after ssh key is injected.
+        """
+        pass
diff --git a/nova/virt/baremetal/nodes.py b/nova/virt/baremetal/nodes.py
new file mode 100644
index 0000000000..5d1519f069
--- /dev/null
+++ b/nova/virt/baremetal/nodes.py
@@ -0,0 +1,42 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 University of Southern California
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+from nova.virt.baremetal import tilera
+from nova.virt.baremetal import fake
+from nova.openstack.common import cfg
+from nova import flags
+from nova import exception
+
+FLAGS = flags.FLAGS
+
+global_opts = [
+    cfg.StrOpt('baremetal_driver',
+               default='tilera',
+               help='Bare-metal driver runs on')
+    ]
+
+FLAGS.add_options(global_opts)
+
+
+def get_baremetal_nodes():
+    d = FLAGS.baremetal_driver
+    if  d == 'tilera':
+        return tilera.get_baremetal_nodes()
+    elif d == 'fake':
+        return fake.get_baremetal_nodes()
+    else:
+        raise exception.Error(_("Unknown baremetal driver %(d)s"))
diff --git a/nova/virt/baremetal/proxy.py b/nova/virt/baremetal/proxy.py
new file mode 100644
index 0000000000..78b5de7df0
--- /dev/null
+++ b/nova/virt/baremetal/proxy.py
@@ -0,0 +1,799 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 University of Southern California
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+"""
+A connection to a hypervisor through baremetal.
+
+**Related Flags**
+
+:baremetal_type:  Baremetal domain type.
+:baremetal_uri:  Override for the default baremetal URI (baremetal_type).
+:rescue_image_id:  Rescue ami image (default: ami-rescue).
+:rescue_kernel_id:  Rescue aki image (default: aki-rescue).
+:rescue_ramdisk_id:  Rescue ari image (default: ari-rescue).
+:injected_network_template:  Template file for injected network
+:allow_project_net_traffic:  Whether to allow in project network traffic
+
+"""
+
+import hashlib
+import os
+import shutil
+import time
+
+from nova import context as nova_context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import utils
+from nova.openstack.common import cfg
+from nova.compute import instance_types
+from nova.compute import power_state
+from nova.compute import vm_states
+from nova.virt import disk
+from nova.virt.disk import api as disk
+from nova.virt import driver
+from nova.virt.baremetal import nodes
+from nova.virt.baremetal import dom
+from nova.virt.libvirt import utils as libvirt_utils
+
+
+Template = None
+
+LOG = logging.getLogger('nova.virt.baremetal.proxy')
+
+FLAGS = flags.FLAGS
+
+global_opts = [
+    cfg.StrOpt('baremetal_injected_network_template',
+                default=utils.abspath('virt/interfaces.template'),
+                help='Template file for injected network'),
+    cfg.StrOpt('baremetal_type',
+                default='baremetal',
+                help='baremetal domain type'),
+    cfg.StrOpt('baremetal_uri',
+                default='',
+                help='Override the default baremetal URI'),
+    cfg.BoolOpt('baremetal_allow_project_net_traffic',
+                 default=True,
+                 help='Whether to allow in project network traffic')
+    ]
+
+FLAGS.add_options(global_opts)
+
+
+def get_connection(read_only):
+    # These are loaded late so that there's no need to install these
+    # libraries when not using baremetal.
+    # Cheetah is separate because the unit tests want to load Cheetah,
+    # but not baremetal.
+    _late_load_cheetah()
+    return ProxyConnection(read_only)
+
+
+def _late_load_cheetah():
+    global Template
+    if Template is None:
+        t = __import__('Cheetah.Template', globals(), locals(),
+                       ['Template'], -1)
+        Template = t.Template
+
+
+class ProxyConnection(driver.ComputeDriver):
+
+    def __init__(self, read_only):
+        super(ProxyConnection, self).__init__()
+        self.baremetal_nodes = nodes.get_baremetal_nodes()
+        self._wrapped_conn = None
+        self.read_only = read_only
+        self._host_state = None
+
+    @property
+    def HostState(self):
+        if not self._host_state:
+            self._host_state = HostState(self.read_only)
+        return self._host_state
+
+    def init_host(self, host):
+        pass
+
+    def _get_connection(self):
+        self._wrapped_conn = dom.BareMetalDom()
+        return self._wrapped_conn
+    _conn = property(_get_connection)
+
+    def get_pty_for_instance(self, instance_name):
+        raise NotImplementedError()
+
+    def list_instances(self):
+        return self._conn.list_domains()
+
+    def _map_to_instance_info(self, domain_name):
+        """Gets info from a virsh domain object into an InstanceInfo"""
+        (state, _max_mem, _mem, _num_cpu, _cpu_time) \
+            = self._conn.get_domain_info(domain_name)
+        name = domain_name
+        return driver.InstanceInfo(name, state)
+
+    def list_instances_detail(self):
+        infos = []
+        for domain_name in self._conn.list_domains():
+            info = self._map_to_instance_info(domain_name)
+            infos.append(info)
+        return infos
+
+    def destroy(self, instance, network_info, block_device_info=None,
+                cleanup=True):
+        timer = utils.LoopingCall(f=None)
+
+        while True:
+            try:
+                self._conn.destroy_domain(instance['name'])
+                break
+            except Exception as ex:
+                msg = (_("Error encountered when destroying instance "
+                        "'%(name)s': %(ex)s") %
+                        {"name": instance["name"], "ex": ex})
+                LOG.debug(msg)
+                break
+
+        if cleanup:
+            self._cleanup(instance)
+
+        return True
+
+    def _cleanup(self, instance):
+        target = os.path.join(FLAGS.instances_path, instance['name'])
+        instance_name = instance['name']
+        LOG.info(_('instance %(instance_name)s: deleting instance files'
+                ' %(target)s') % locals())
+        if FLAGS.baremetal_type == 'lxc':
+            disk.destroy_container(self.container)
+        if os.path.exists(target):
+            shutil.rmtree(target)
+
+    @exception.wrap_exception
+    def attach_volume(self, instance_name, device_path, mountpoint):
+        raise exception.APIError("attach_volume not supported for baremetal.")
+
+    @exception.wrap_exception
+    def detach_volume(self, instance_name, mountpoint):
+        raise exception.APIError("detach_volume not supported for baremetal.")
+
+    @exception.wrap_exception
+    def snapshot(self, instance, image_id):
+        raise exception.APIError("snapshot not supported for baremetal.")
+
+    @exception.wrap_exception
+    def reboot(self, instance):
+        timer = utils.LoopingCall(f=None)
+
+        def _wait_for_reboot():
+            try:
+                state = self._conn.reboot_domain(instance['name'])
+                if state == power_state.RUNNING:
+                    LOG.debug(_('instance %s: rebooted'), instance['name'])
+                    timer.stop()
+            except:
+                LOG.exception(_('_wait_for_reboot failed'))
+                timer.stop()
+        timer.f = _wait_for_reboot
+        return timer.start(interval=0.5, now=True)
+
+    @exception.wrap_exception
+    def rescue(self, context, instance, network_info):
+        """Loads a VM using rescue images.
+
+        A rescue is normally performed when something goes wrong with the
+        primary images and data needs to be corrected/recovered. Rescuing
+        should not edit or over-ride the original image, only allow for
+        data recovery.
+
+        """
+        self.destroy(instance, False)
+
+        xml_dict = self.to_xml_dict(instance, rescue=True)
+        rescue_images = {'image_id': FLAGS.baremetal_rescue_image_id,
+                         'kernel_id': FLAGS.baremetal_rescue_kernel_id,
+                         'ramdisk_id': FLAGS.baremetal_rescue_ramdisk_id}
+        self._create_image(instance, '.rescue', rescue_images,
+                           network_info=network_info)
+
+        timer = utils.LoopingCall(f=None)
+
+        def _wait_for_rescue():
+            try:
+                state = self._conn.reboot_domain(instance['name'])
+                if state == power_state.RUNNING:
+                    LOG.debug(_('instance %s: rescued'), instance['name'])
+                    timer.stop()
+            except:
+                LOG.exception(_('_wait_for_rescue failed'))
+                timer.stop()
+        timer.f = _wait_for_reboot
+        return timer.start(interval=0.5, now=True)
+
+    @exception.wrap_exception
+    def unrescue(self, instance, network_info):
+        """Reboot the VM which is being rescued back into primary images.
+
+        Because reboot destroys and re-creates instances, unresue should
+        simply call reboot.
+
+        """
+        self.reboot(instance)
+
+    def spawn(self, context, instance, image_meta, network_info,
+              block_device_info=None):
+        LOG.debug(_("<============= spawn of baremetal =============>"))
+
+        def basepath(fname='', suffix=''):
+            return os.path.join(FLAGS.instances_path,
+                                instance['name'],
+                                fname + suffix)
+        bpath = basepath(suffix='')
+        timer = utils.LoopingCall(f=None)
+
+        xml_dict = self.to_xml_dict(instance, network_info)
+        self._create_image(context, instance, xml_dict,
+            network_info=network_info,
+            block_device_info=block_device_info)
+        LOG.debug(_("instance %s: is building"), instance['name'])
+        LOG.debug(_(xml_dict))
+
+        def _wait_for_boot():
+            try:
+                LOG.debug(_("Key is injected but instance is not running yet"))
+                db.instance_update(context, instance['id'],
+                    {'vm_state': vm_states.BUILDING})
+                state = self._conn.create_domain(xml_dict, bpath)
+                if state == power_state.RUNNING:
+                    LOG.debug(_('instance %s: booted'), instance['name'])
+                    db.instance_update(context, instance['id'],
+                            {'vm_state': vm_states.ACTIVE})
+                    LOG.debug(_('~~~~~~ current state = %s ~~~~~~'), state)
+                    LOG.debug(_("instance %s spawned successfully"),
+                            instance['name'])
+                else:
+                    LOG.debug(_('instance %s:not booted'), instance['name'])
+            except Exception as Exn:
+                LOG.debug(_("Bremetal assignment is overcommitted."))
+                db.instance_update(context, instance['id'],
+                           {'vm_state': vm_states.OVERCOMMIT,
+                            'power_state': power_state.SUSPENDED})
+            timer.stop()
+        timer.f = _wait_for_boot
+
+        return timer.start(interval=0.5, now=True)
+
+    def get_console_output(self, instance):
+        console_log = os.path.join(FLAGS.instances_path, instance['name'],
+                                   'console.log')
+
+        libvirt_utils.chown(console_log, os.getuid())
+
+        fd = self._conn.find_domain(instance['name'])
+
+        self.baremetal_nodes.get_console_output(console_log, fd['node_id'])
+
+        fpath = console_log
+
+        return libvirt_utils.load_file(fpath)
+
+    @exception.wrap_exception
+    def get_ajax_console(self, instance):
+        raise NotImplementedError()
+
+    @exception.wrap_exception
+    def get_vnc_console(self, instance):
+        raise NotImplementedError()
+
+    @staticmethod
+    def _cache_image(fn, target, fname, cow=False, *args, **kwargs):
+        """Wrapper for a method that creates an image that caches the image.
+
+        This wrapper will save the image into a common store and create a
+        copy for use by the hypervisor.
+
+        The underlying method should specify a kwarg of target representing
+        where the image will be saved.
+
+        fname is used as the filename of the base image.  The filename needs
+        to be unique to a given image.
+
+        If cow is True, it will make a CoW image instead of a copy.
+        """
+        if not os.path.exists(target):
+            base_dir = os.path.join(FLAGS.instances_path, '_base')
+            if not os.path.exists(base_dir):
+                libvirt_utils.ensure_tree(base_dir)
+            base = os.path.join(base_dir, fname)
+
+            @utils.synchronized(fname)
+            def call_if_not_exists(base, fn, *args, **kwargs):
+                if not os.path.exists(base):
+                    fn(target=base, *args, **kwargs)
+
+            call_if_not_exists(base, fn, *args, **kwargs)
+
+            if cow:
+                libvirt_utils.create_cow_image(base, target)
+            else:
+                libvirt_utils.copy_image(base, target)
+
+    def _fetch_image(self, context, target, image_id, user_id, project_id,
+                     size=None):
+        """Grab image and optionally attempt to resize it"""
+        images.fetch_to_raw(context, image_id, target, user_id, project_id)
+
+    def _create_image(self, context, inst, xml, suffix='',
+                      disk_images=None, network_info=None,
+                      block_device_info=None):
+        if not suffix:
+            suffix = ''
+
+        # syntactic nicety
+        def basepath(fname='', suffix=suffix):
+            return os.path.join(FLAGS.instances_path,
+                                inst['name'],
+                                fname + suffix)
+
+        # ensure directories exist and are writable
+        libvirt_utils.ensure_tree(basepath(suffix=''))
+        utils.execute('chmod', '0777', basepath(suffix=''))
+
+        LOG.info(_('instance %s: Creating image'), inst['name'])
+
+        if FLAGS.baremetal_type == 'lxc':
+            container_dir = '%s/rootfs' % basepath(suffix='')
+            libvirt_utils.ensure_tree(container_dir)
+
+        # NOTE(vish): No need add the suffix to console.log
+        libvirt_utils.write_to_file(basepath('console.log', ''), '', 007)
+
+        if not disk_images:
+            disk_images = {'image_id': inst['image_ref'],
+                           'kernel_id': inst['kernel_id'],
+                           'ramdisk_id': inst['ramdisk_id']}
+
+        if disk_images['kernel_id']:
+            fname = disk_images['kernel_id']
+            self._cache_image(fn=libvirt_utils.fetch_image,
+                              context=context,
+                              target=basepath('kernel'),
+                              fname=fname,
+                              cow=False,
+                              image_id=disk_images['kernel_id'],
+                              user_id=inst['user_id'],
+                              project_id=inst['project_id'])
+            if disk_images['ramdisk_id']:
+                fname = disk_images['ramdisk_id']
+                self._cache_image(fn=libvirt_utils.fetch_image,
+                                  context=context,
+                                  target=basepath('ramdisk'),
+                                  fname=fname,
+                                  cow=False,
+                                  image_id=disk_images['ramdisk_id'],
+                                  user_id=inst['user_id'],
+                                  project_id=inst['project_id'])
+
+        root_fname = hashlib.sha1(str(disk_images['image_id'])).hexdigest()
+        size = inst['root_gb'] * 1024 * 1024 * 1024
+
+        inst_type_id = inst['instance_type_id']
+        inst_type = instance_types.get_instance_type(inst_type_id)
+        if inst_type['name'] == 'm1.tiny' or suffix == '.rescue':
+            size = None
+            root_fname += "_sm"
+        else:
+            root_fname += "_%d" % inst['root_gb']
+
+        self._cache_image(fn=libvirt_utils.fetch_image,
+                          context=context,
+                          target=basepath('root'),
+                          fname=root_fname,
+                          cow=False,  # FLAGS.use_cow_images,
+                          image_id=disk_images['image_id'],
+                          user_id=inst['user_id'],
+                          project_id=inst['project_id'],
+                          size=size)
+
+        # For now, we assume that if we're not using a kernel, we're using a
+        # partitioned disk image where the target partition is the first
+        # partition
+        target_partition = None
+        if not inst['kernel_id']:
+            target_partition = "1"
+
+        if FLAGS.baremetal_type == 'lxc':
+            target_partition = None
+
+        if inst['key_data']:
+            key = str(inst['key_data'])
+        else:
+            key = None
+        net = None
+
+        nets = []
+        ifc_template = open(FLAGS.injected_network_template).read()
+        ifc_num = -1
+        have_injected_networks = False
+        admin_context = nova_context.get_admin_context()
+        for (network_ref, mapping) in network_info:
+            ifc_num += 1
+
+            if not network_ref['injected']:
+                continue
+
+            have_injected_networks = True
+            address = mapping['ips'][0]['ip']
+            netmask = mapping['ips'][0]['netmask']
+            address_v6 = None
+            gateway_v6 = None
+            netmask_v6 = None
+            if FLAGS.use_ipv6:
+                address_v6 = mapping['ip6s'][0]['ip']
+                netmask_v6 = mapping['ip6s'][0]['netmask']
+                gateway_v6 = mapping['gateway_v6']
+            net_info = {'name': 'eth%d' % ifc_num,
+                   'address': address,
+                   'netmask': netmask,
+                   'gateway': mapping['gateway'],
+                   'broadcast': mapping['broadcast'],
+                   'dns': ' '.join(mapping['dns']),
+                   'address_v6': address_v6,
+                   'gateway_v6': gateway_v6,
+                   'netmask_v6': netmask_v6}
+            nets.append(net_info)
+
+        if have_injected_networks:
+            net = str(Template(ifc_template,
+                               searchList=[{'interfaces': nets,
+                                            'use_ipv6': FLAGS.use_ipv6}]))
+
+        metadata = inst.get('metadata')
+        if any((key, net, metadata)):
+            inst_name = inst['name']
+
+            injection_path = basepath('root')
+            img_id = inst.image_ref
+            disable_auto_fsck = True
+
+            for injection in ('metadata', 'key', 'net'):
+                if locals()[injection]:
+                    LOG.info(_('instance %(inst_name)s: injecting '
+                               '%(injection)s into image %(img_id)s'
+                               % locals()))
+            try:
+                disk.inject_data(injection_path, key, net, metadata,
+                                 partition=target_partition,
+                                 use_cow=False,  # FLAGS.use_cow_images,
+                                 disable_auto_fsck=disable_auto_fsck)
+
+            except Exception as e:
+                # This could be a windows image, or a vmdk format disk
+                LOG.warn(_('instance %(inst_name)s: ignoring error injecting'
+                        ' data into image %(img_id)s (%(e)s)') % locals())
+
+    def _prepare_xml_info(self, instance, network_info, rescue,
+                          block_device_info=None):
+        # block_device_mapping = driver.block_device_info_get_mapping(
+        #    block_device_info)
+        map = 0
+        for (network, mapping) in network_info:
+            map += 1
+
+        nics = []
+        # FIXME(vish): stick this in db
+        inst_type_id = instance['instance_type_id']
+        inst_type = instance_types.get_instance_type(inst_type_id)
+
+        driver_type = 'raw'
+
+        xml_info = {'type': FLAGS.baremetal_type,
+                    'name': instance['name'],
+                    'basepath': os.path.join(FLAGS.instances_path,
+                                             instance['name']),
+                    'memory_kb': inst_type['memory_mb'] * 1024,
+                    'vcpus': inst_type['vcpus'],
+                    'rescue': rescue,
+                    'driver_type': driver_type,
+                    'nics': nics,
+                    'ip_address': mapping['ips'][0]['ip'],
+                    'mac_address': mapping['mac'],
+                    'user_data': instance['user_data'],
+                    'image_id': instance['image_ref'],
+                    'kernel_id': instance['kernel_id'],
+                    'ramdisk_id': instance['ramdisk_id']}
+
+        if not rescue:
+            if instance['kernel_id']:
+                xml_info['kernel'] = xml_info['basepath'] + "/kernel"
+
+            if instance['ramdisk_id']:
+                xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"
+
+            xml_info['disk'] = xml_info['basepath'] + "/disk"
+        return xml_info
+
+    def to_xml_dict(self, instance, rescue=False, network_info=None):
+        LOG.debug(_('instance %s: starting toXML method'), instance['name'])
+        xml_info = self._prepare_xml_info(instance, rescue, network_info)
+        LOG.debug(_('instance %s: finished toXML method'), instance['name'])
+        return xml_info
+
+    def get_info(self, instance_name):
+        """Retrieve information from baremetal for a specific instance name.
+
+        If a baremetal error is encountered during lookup, we might raise a
+        NotFound exception or Error exception depending on how severe the
+        baremetal error is.
+
+        """
+        (state, max_mem, mem, num_cpu, cpu_time) \
+                = self._conn.get_domain_info(instance_name)
+        return {'state': state,
+                'max_mem': max_mem,
+                'mem': mem,
+                'num_cpu': num_cpu,
+                'cpu_time': cpu_time}
+
+    def _create_new_domain(self, persistent=True, launch_flags=0):
+        raise NotImplementedError()
+
+    def get_diagnostics(self, instance_name):
+        raise exception.ApiError(_("diagnostics are not supported "
+                                   "for baremetal"))
+
+    def get_disks(self, instance_name):
+        raise NotImplementedError()
+
+    def get_interfaces(self, instance_name):
+        raise NotImplementedError()
+
+    def get_vcpu_total(self):
+        """Get vcpu number of physical computer.
+
+        :returns: the number of cpu core.
+
+        """
+
+        # On certain platforms, this will raise a NotImplementedError.
+        try:
+            return self.baremetal_nodes.get_hw_info('vcpus')
+        except NotImplementedError:
+            LOG.warn(_("Cannot get the number of cpu, because this "
+                       "function is not implemented for this platform. "
+                       "This error can be safely ignored for now."))
+            return False
+
+    def get_memory_mb_total(self):
+        """Get the total memory size(MB) of physical computer.
+
+        :returns: the total amount of memory(MB).
+
+        """
+        return self.baremetal_nodes.get_hw_info('memory_mb')
+
+    def get_local_gb_total(self):
+        """Get the total hdd size(GB) of physical computer.
+
+        :returns:
+            The total amount of HDD(GB).
+            Note that this value shows a partition where
+            NOVA-INST-DIR/instances mounts.
+
+        """
+        return self.baremetal_nodes.get_hw_info('local_gb')
+
+    def get_vcpu_used(self):
+        """ Get vcpu usage number of physical computer.
+
+        :returns: The total number of vcpu that currently used.
+
+        """
+
+        total = 0
+        for dom_id in self._conn.list_domains():
+            total += 1
+        return total
+
+    def get_memory_mb_used(self):
+        """Get the free memory size(MB) of physical computer.
+
+        :returns: the total usage of memory(MB).
+
+        """
+        return self.baremetal_nodes.get_hw_info('memory_mb_used')
+
+    def get_local_gb_used(self):
+        """Get the free hdd size(GB) of physical computer.
+
+        :returns:
+           The total usage of HDD(GB).
+           Note that this value shows a partition where
+           NOVA-INST-DIR/instances mounts.
+
+        """
+        return self.baremetal_nodes.get_hw_info('local_gb_used')
+
+    def get_hypervisor_type(self):
+        """Get hypervisor type.
+
+        :returns: hypervisor type (ex. qemu)
+
+        """
+        return self.baremetal_nodes.get_hw_info('hypervisor_type')
+
+    def get_hypervisor_version(self):
+        """Get hypervisor version.
+
+        :returns: hypervisor version (ex. 12003)
+
+        """
+        return self.baremetal_nodes.get_hw_info('hypervisor_version')
+
+    def get_cpu_info(self):
+        """Get cpuinfo information.
+
+        Obtains cpu feature from virConnect.getCapabilities,
+        and returns as a json string.
+
+        :return: see above description
+
+        """
+        return self.baremetal_nodes.get_hw_info('cpu_info')
+
+    def block_stats(self, instance_name, disk):
+        raise NotImplementedError()
+
+    def interface_stats(self, instance_name, interface):
+        raise NotImplementedError()
+
+    def get_console_pool_info(self, console_type):
+        #TODO(mdragon): console proxy should be implemented for baremetal,
+        #               in case someone wants to use it.
+        #               For now return fake data.
+        return  {'address': '127.0.0.1',
+                 'username': 'fakeuser',
+                 'password': 'fakepassword'}
+
+    def refresh_security_group_rules(self, security_group_id):
+        # Bare metal doesn't currently support security groups
+        pass
+
+    def refresh_security_group_members(self, security_group_id):
+        # Bare metal doesn't currently support security groups
+        pass
+
+    def update_available_resource(self, ctxt, host):
+        """Updates compute manager resource info on ComputeNode table.
+
+        This method is called when nova-coompute launches, and
+        whenever admin executes "nova-manage service update_resource".
+
+        :param ctxt: security context
+        :param host: hostname that compute manager is currently running
+
+        """
+
+        try:
+            service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
+        except exception.NotFound:
+            raise exception.ComputeServiceUnavailable(host=host)
+
+        # Updating host information
+        dic = {'vcpus': self.get_vcpu_total(),
+               'memory_mb': self.get_memory_mb_total(),
+               'local_gb': self.get_local_gb_total(),
+               'vcpus_used': self.get_vcpu_used(),
+               'memory_mb_used': self.get_memory_mb_used(),
+               'local_gb_used': self.get_local_gb_used(),
+               'hypervisor_type': self.get_hypervisor_type(),
+               'hypervisor_version': self.get_hypervisor_version(),
+               'cpu_info': self.get_cpu_info(),
+               'cpu_arch': FLAGS.cpu_arch,
+               'xpu_arch': FLAGS.xpu_arch,
+               'xpus': FLAGS.xpus,
+               'xpu_info': FLAGS.xpu_info,
+               'net_arch': FLAGS.net_arch,
+               'net_info': FLAGS.net_info,
+               'net_mbps': FLAGS.net_mbps,
+               'service_id': service_ref['id']}
+
+        compute_node_ref = service_ref['compute_node']
+        LOG.info(_('#### RLK: cpu_arch = %s ') % FLAGS.cpu_arch)
+        if not compute_node_ref:
+            LOG.info(_('Compute_service record created for %s ') % host)
+            dic['service_id'] = service_ref['id']
+            db.compute_node_create(ctxt, dic)
+        else:
+            LOG.info(_('Compute_service record updated for %s ') % host)
+            db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic)
+
+    def compare_cpu(self, cpu_info):
+        raise NotImplementedError()
+
+    def ensure_filtering_rules_for_instance(self, instance_ref,
+                                            time=None):
+        raise NotImplementedError()
+
+    def live_migration(self, ctxt, instance_ref, dest,
+                       post_method, recover_method):
+        raise NotImplementedError()
+
+    def unfilter_instance(self, instance_ref):
+        """See comments of same method in firewall_driver."""
+        pass
+
+    def update_host_status(self):
+        """Update the status info of the host, and return those values
+            to the calling program."""
+        return self.HostState.update_status()
+
+    def get_host_stats(self, refresh=False):
+        """Return the current state of the host. If 'refresh' is
+           True, run the update first."""
+        LOG.debug(_("Updating!"))
+        return self.HostState.get_host_stats(refresh=refresh)
+
+
+class HostState(object):
+    """Manages information about the XenServer host this compute
+    node is running on.
+    """
+
+    def __init__(self, read_only):
+        super(HostState, self).__init__()
+        self.read_only = read_only
+        self._stats = {}
+        self.update_status()
+
+    def get_host_stats(self, refresh=False):
+        """Return the current state of the host. If 'refresh' is
+        True, run the update first.
+        """
+        if refresh:
+            self.update_status()
+        return self._stats
+
+    def update_status(self):
+        """
+        We can get host status information.
+        """
+        LOG.debug(_("Updating host stats"))
+        connection = get_connection(self.read_only)
+        data = {}
+        data["vcpus"] = connection.get_vcpu_total()
+        data["vcpus_used"] = connection.get_vcpu_used()
+        data["cpu_info"] = connection.get_cpu_info()
+        data["cpu_arch"] = FLAGS.cpu_arch
+        data["xpus"] = FLAGS.xpus
+        data["xpu_arch"] = FLAGS.xpu_arch
+        data["xpu_info"] = FLAGS.xpu_info
+        data["net_arch"] = FLAGS.net_arch
+        data["net_info"] = FLAGS.net_info
+        data["net_mbps"] = FLAGS.net_mbps
+        data["disk_total"] = connection.get_local_gb_total()
+        data["disk_used"] = connection.get_local_gb_used()
+        data["disk_available"] = data["disk_total"] - data["disk_used"]
+        data["host_memory_total"] = connection.get_memory_mb_total()
+        data["host_memory_free"] = data["host_memory_total"] - \
+            connection.get_memory_mb_used()
+        data["hypervisor_type"] = connection.get_hypervisor_type()
+        data["hypervisor_version"] = connection.get_hypervisor_version()
+        self._stats = data
diff --git a/nova/virt/baremetal/tilera.py b/nova/virt/baremetal/tilera.py
new file mode 100644
index 0000000000..7e31a1553b
--- /dev/null
+++ b/nova/virt/baremetal/tilera.py
@@ -0,0 +1,368 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 University of Southern California
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Tilera back-end for bare-metal compute node provisioning
+
+The details of this implementation are specific to ISI's testbed. This code
+is provided here as an example of how to implement a backend.
+"""
+
+import base64
+import os
+import subprocess
+import time
+
+from nova.compute import power_state
+from nova.openstack.common import cfg
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import utils
+
+FLAGS = flags.FLAGS
+
+global_opts = [
+    cfg.StrOpt('tile_monitor',
+               default='/usr/local/TileraMDE/bin/tile-monitor',
+               help='Tilera command line program for Bare-metal driver')
+    ]
+
+FLAGS.add_options(global_opts)
+
+LOG = logging.getLogger('nova.virt.tilera')
+
+
+def get_baremetal_nodes():
+    return BareMetalNodes()
+
+
+class BareMetalNodes(object):
+    """
+    This manages node information and implements singleton.
+
+    BareMetalNodes class handles machine architectures of interest to
+    technical computing users have either poor or non-existent support
+    for virtualization.
+    """
+
+    _instance = None
+    _is_init = False
+
+    def __new__(cls, *args, **kwargs):
+        """
+        Returns the BareMetalNodes singleton.
+        """
+        if not cls._instance or ('new' in kwargs and kwargs['new']):
+            cls._instance = super(BareMetalNodes, cls).__new__(cls)
+        return cls._instance
+
+    def __init__(self, file_name="/tftpboot/tilera_boards"):
+        """
+        Only call __init__ the first time object is instantiated.
+
+        From the bare-metal node list file: /tftpboot/tilera_boards,
+        reads each item of each node such as node ID, IP address,
+        MAC address, vcpus, memory, hdd, hypervisor type/version, and cpu
+        and appends each node information into nodes list.
+        """
+        if self._is_init:
+            return
+        self._is_init = True
+
+        self.nodes = []
+        self.BOARD_ID = 0
+        self.IP_ADDR = 1
+        self.MAC_ADDR = 2
+        self.VCPUS = 3
+        self.MEMORY_MB = 4
+        self.LOCAL_GB = 5
+        self.MEMORY_MB_USED = 6
+        self.LOCAL_GB_USED = 7
+        self.HYPERVISOR_TYPE = 8
+        self.HYPERVISOR_VER = 9
+        self.CPU_INFO = 10
+
+        fp = open(file_name, "r")
+        for item in fp:
+            l = item.split()
+            if l[0] == '#':
+                continue
+            l_d = {'node_id': int(l[self.BOARD_ID]),
+                    'ip_addr': l[self.IP_ADDR],
+                    'mac_addr': l[self.MAC_ADDR],
+                    'status': power_state.NOSTATE,
+                    'vcpus': int(l[self.VCPUS]),
+                    'memory_mb': int(l[self.MEMORY_MB]),
+                    'local_gb': int(l[self.LOCAL_GB]),
+                    'memory_mb_used': int(l[self.MEMORY_MB_USED]),
+                    'local_gb_used': int(l[self.LOCAL_GB_USED]),
+                    'hypervisor_type': l[self.HYPERVISOR_TYPE],
+                    'hypervisor_version': int(l[self.HYPERVISOR_VER]),
+                    'cpu_info': l[self.CPU_INFO]}
+            self.nodes.append(l_d)
+        fp.close()
+
+    def get_hw_info(self, field):
+        """
+        Returns hardware information of bare-metal node by the given field.
+
+        Given field can be vcpus, memory_mb, local_gb, memory_mb_used,
+        local_gb_used, hypervisor_type, hypervisor_version, and cpu_info.
+        """
+        for node in self.nodes:
+            if node['node_id'] == 9:
+                if field == 'vcpus':
+                    return node['vcpus']
+                elif field == 'memory_mb':
+                    return node['memory_mb']
+                elif field == 'local_gb':
+                    return node['local_gb']
+                elif field == 'memory_mb_used':
+                    return node['memory_mb_used']
+                elif field == 'local_gb_used':
+                    return node['local_gb_used']
+                elif field == 'hypervisor_type':
+                    return node['hypervisor_type']
+                elif field == 'hypervisor_version':
+                    return node['hypervisor_version']
+                elif field == 'cpu_info':
+                    return node['cpu_info']
+
+    def set_status(self, node_id, status):
+        """
+        Sets status of the given node by the given status.
+
+        Returns 1 if the node is in the nodes list.
+        """
+        for node in self.nodes:
+            if node['node_id'] == node_id:
+                node['status'] = status
+                return True
+        return False
+
+    def get_status(self):
+        """
+        Gets status of the given node.
+        """
+        pass
+
+    def get_idle_node(self):
+        """
+        Gets an idle node, sets the status as 1 (RUNNING) and Returns node ID.
+        """
+        for item in self.nodes:
+            if item['status'] == 0:
+                item['status'] = 1      # make status RUNNING
+                return item['node_id']
+        raise exception.NotFound("No free nodes available")
+
+    def get_ip_by_id(self, id):
+        """
+        Returns default IP address of the given node.
+        """
+        for item in self.nodes:
+            if item['node_id'] == id:
+                return item['ip_addr']
+
+    def free_node(self, node_id):
+        """
+        Sets/frees status of the given node as 0 (IDLE).
+        """
+        LOG.debug(_("free_node...."))
+        for item in self.nodes:
+            if item['node_id'] == str(node_id):
+                item['status'] = 0  # make status IDLE
+
+    def power_mgr(self, node_id, mode):
+        """
+        Changes power state of the given node.
+
+        According to the mode (1-ON, 2-OFF, 3-REBOOT), power state can be
+        changed. /tftpboot/pdu_mgr script handles power management of
+        PDU (Power Distribution Unit).
+        """
+        if node_id < 5:
+            pdu_num = 1
+            pdu_outlet_num = node_id + 5
+        else:
+            pdu_num = 2
+            pdu_outlet_num = node_id
+        path1 = "10.0.100." + str(pdu_num)
+        utils.execute('/tftpboot/pdu_mgr', path1, str(pdu_outlet_num), \
+            str(mode), '>>', 'pdu_output')
+
+    def deactivate_node(self, node_id):
+        """
+        Deactivates the given node by turnning it off.
+
+        /tftpboot/fs_x directory is a NFS of node#x
+        and /tftpboot/root_x file is an file system image of node#x.
+        """
+        node_ip = self.get_ip_by_id(node_id)
+        LOG.debug(_("deactivate_node is called for \
+               node_id = %(id)s node_ip = %(ip)s"),
+               {'id': str(node_id), 'ip': node_ip})
+        for item in self.nodes:
+            if item['node_id'] == node_id:
+                LOG.debug(_("status of node is set to 0"))
+                item['status'] = 0
+        self.power_mgr(node_id, 2)
+        self.sleep_mgr(5)
+        path = "/tftpboot/fs_" + str(node_id)
+        pathx = "/tftpboot/root_" + str(node_id)
+        utils.execute('sudo', '/usr/sbin/rpc.mountd')
+        try:
+            utils.execute('sudo', 'umount', '-f', pathx)
+            utils.execute('sudo', 'rm', '-f', pathx)
+        except:
+            LOG.debug(_("rootfs is already removed"))
+
+    def network_set(self, node_ip, mac_address, ip_address):
+        """
+        Sets network configuration based on the given ip and mac address.
+
+        User can access the bare-metal node using ssh.
+        """
+        cmd = FLAGS.tile_monitor + \
+            " --resume --net " + node_ip + " --run - " + \
+            "ifconfig xgbe0 hw ether " + mac_address + \
+            " - --wait --run - ifconfig xgbe0 " + ip_address + \
+            " - --wait --quit"
+        subprocess.Popen(cmd, shell=True)
+        #utils.execute(cmd, shell=True)
+        self.sleep_mgr(5)
+
+    def iptables_set(self, node_ip, user_data):
+        """
+        Sets security setting (iptables:port) if needed.
+
+        iptables -A INPUT -p tcp ! -s $IP --dport $PORT -j DROP
+        /tftpboot/iptables_rule script sets iptables rule on the given node.
+        """
+        if user_data != '':
+            open_ip = base64.b64decode(user_data)
+            utils.execute('/tftpboot/iptables_rule', node_ip, open_ip)
+
+    def check_activated(self, node_id, node_ip):
+        """
+        Checks whether the given node is activated or not.
+        """
+        LOG.debug(_("Before ping to the bare-metal node"))
+        tile_output = "/tftpboot/tile_output_" + str(node_id)
+        grep_cmd = "ping -c1 " + node_ip + " | grep Unreachable > " \
+                   + tile_output
+        subprocess.Popen(grep_cmd, shell=True)
+        self.sleep_mgr(5)
+
+        file = open(tile_output, "r")
+        out_msg = file.readline().find("Unreachable")
+        utils.execute('sudo', 'rm', tile_output)
+        if out_msg == -1:
+            cmd = "TILERA_BOARD_#" + str(node_id) + " " + node_ip \
+                + " is ready"
+            LOG.debug(_(cmd))
+            return True
+        else:
+            cmd = "TILERA_BOARD_#" + str(node_id) + " " \
+                + node_ip + " is not ready, out_msg=" + out_msg
+            LOG.debug(_(cmd))
+            self.power_mgr(node_id, 2)
+            return False
+
+    def vmlinux_set(self, node_id, mode):
+        """
+        Sets kernel into default path (/tftpboot) if needed.
+
+        From basepath to /tftpboot, kernel is set based on the given mode
+        such as 0-NoSet, 1-SetVmlinux, or 9-RemoveVmlinux.
+        """
+        cmd = "Noting to do for tilera nodes: vmlinux is in CF"
+        LOG.debug(_(cmd))
+
+    def sleep_mgr(self, time_in_seconds):
+        """
+        Sleeps until the node is activated.
+        """
+        time.sleep(time_in_seconds)
+
+    def ssh_set(self, node_ip):
+        """
+        Sets and Runs sshd in the node.
+        """
+        cmd = FLAGS.tile_monitor + \
+            " --resume --net " + node_ip + " --run - " + \
+            "/usr/sbin/sshd - --wait --quit"
+        subprocess.Popen(cmd, shell=True)
+        self.sleep_mgr(5)
+
+    def activate_node(self, node_id, node_ip, name, mac_address, \
+                      ip_address, user_data):
+        """
+        Activates the given node using ID, IP, and MAC address.
+        """
+        LOG.debug(_("activate_node"))
+
+        self.power_mgr(node_id, 2)
+        self.power_mgr(node_id, 3)
+        self.sleep_mgr(100)
+
+        try:
+            self.check_activated(node_id, node_ip)
+            self.network_set(node_ip, mac_address, ip_address)
+            self.ssh_set(node_ip)
+            self.iptables_set(node_ip, user_data)
+            return power_state.RUNNING
+        except Exception as ex:
+            self.deactivate_node(node_id)
+            raise exception.Error(_("Node is unknown error state."))
+
+    def get_console_output(self, console_log, node_id):
+        """
+        Gets console output of the given node.
+        """
+        node_ip = self.get_ip_by_id(node_id)
+        log_path = "/tftpboot/log_" + str(node_id)
+        kmsg_cmd = FLAGS.tile_monitor + \
+                   " --resume --net " + node_ip + \
+                   " -- dmesg > " + log_path
+        subprocess.Popen(kmsg_cmd, shell=True)
+        self.sleep_mgr(5)
+        utils.execute('cp', log_path, console_log)
+
+    def get_image(self, bp):
+        """
+        Gets the bare-metal file system image into the instance path.
+
+        Noting to do for tilera nodes: actual image is used.
+        """
+        path_fs = "/tftpboot/tilera_fs"
+        path_root = bp + "/root"
+        utils.execute('cp', path_fs, path_root)
+
+    def set_image(self, bpath, node_id):
+        """
+        Sets the PXE bare-metal file system from the instance path.
+
+        This should be done after ssh key is injected.
+        /tftpboot/fs_x directory is a NFS of node#x.
+        /tftpboot/root_x file is an file system image of node#x.
+        """
+        path1 = bpath + "/root"
+        pathx = "/tftpboot/root_" + str(node_id)
+        path2 = "/tftpboot/fs_" + str(node_id)
+        utils.execute('sudo', 'mv', path1, pathx)
+        utils.execute('sudo', 'mount', '-o', 'loop', pathx, path2)