diff --git a/cinder/backup/manager.py b/cinder/backup/manager.py
index 7df64ce13a0..1d58c73d3a4 100644
--- a/cinder/backup/manager.py
+++ b/cinder/backup/manager.py
@@ -1068,7 +1068,7 @@ class BackupManager(manager.SchedulerDependentManager):
         if not is_snapshot:
             rpcapi.terminate_connection(ctxt, device, properties,
                                         force=force)
-            rpcapi.remove_export(ctxt, device)
+            rpcapi.remove_export(ctxt, device, sync=True)
         else:
             rpcapi.terminate_connection_snapshot(ctxt, device,
                                                  properties, force=force)
diff --git a/cinder/volume/rpcapi.py b/cinder/volume/rpcapi.py
index e43a0d447f1..9b72583bca9 100644
--- a/cinder/volume/rpcapi.py
+++ b/cinder/volume/rpcapi.py
@@ -231,9 +231,12 @@ class VolumeAPI(rpc.RPCAPI):
         return cctxt.call(ctxt, 'terminate_connection', volume_id=volume['id'],
                           connector=connector, force=force)
 
-    def remove_export(self, ctxt, volume):
+    def remove_export(self, ctxt, volume, sync=False):
         cctxt = self._get_cctxt(volume.service_topic_queue)
-        cctxt.cast(ctxt, 'remove_export', volume_id=volume['id'])
+        if sync:
+            cctxt.call(ctxt, 'remove_export', volume_id=volume.id)
+        else:
+            cctxt.cast(ctxt, 'remove_export', volume_id=volume.id)
 
     def publish_service_capabilities(self, ctxt):
         cctxt = self._get_cctxt(fanout=True)
diff --git a/releasenotes/notes/bug-1920237-backup-remove-export-race-941e2ab1f056e54c.yaml b/releasenotes/notes/bug-1920237-backup-remove-export-race-941e2ab1f056e54c.yaml
new file mode 100644
index 00000000000..91cd552afbc
--- /dev/null
+++ b/releasenotes/notes/bug-1920237-backup-remove-export-race-941e2ab1f056e54c.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+  - |
+    `Bug #1920237 <https://bugs.launchpad.net/cinder/+bug/1920237>`_: The
+    backup manager calls volume remove_export() but does not wait for it to
+    complete when detaching a volume after backup.  This caused problems
+    when a subsequent operation started on that volume before it had fully
+    detached.