diff --git a/ceph-osd/templates/bin/osd/ceph-volume/_common.sh.tpl b/ceph-osd/templates/bin/osd/ceph-volume/_common.sh.tpl
index 4c5f72c50..cc8a51629 100644
--- a/ceph-osd/templates/bin/osd/ceph-volume/_common.sh.tpl
+++ b/ceph-osd/templates/bin/osd/ceph-volume/_common.sh.tpl
@@ -25,7 +25,16 @@ export PS4='+${BASH_SOURCE:+$(basename ${BASH_SOURCE}):${LINENO}:}${FUNCNAME:+${
 : "${OSD_JOURNAL_SIZE:=$(awk '/^osd_journal_size/{print $3}' ${CEPH_CONF}.template)}"
 : "${OSD_WEIGHT:=1.0}"
 
+eval CRUSH_FAILURE_DOMAIN_NAME_FROM_NODE_LABEL=$(kubectl get node  ${HOSTNAME} -o json| jq -r '.metadata.labels.rack')
 eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain"]))')
+
+if [ ${CRUSH_FAILURE_DOMAIN_NAME_FROM_NODE_LABEL} == "null" ]; then
+
+  eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
+else
+  CRUSH_FAILURE_DOMAIN_NAME=${CRUSH_FAILURE_DOMAIN_NAME_FROM_NODE_LABEL}
+fi
+
 eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
 eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
 eval CRUSH_FAILURE_DOMAIN_FROM_HOSTNAME_MAP=$(cat /etc/ceph/storage.json | jq '.failure_domain_by_hostname_map."'$HOSTNAME'"')
diff --git a/ceph-osd/templates/daemonset-osd.yaml b/ceph-osd/templates/daemonset-osd.yaml
index 5f1f221a6..2e3edd167 100644
--- a/ceph-osd/templates/daemonset-osd.yaml
+++ b/ceph-osd/templates/daemonset-osd.yaml
@@ -12,6 +12,40 @@ See the License for the specific language governing permissions and
 limitations under the License.
 */}}
 
+{{- if .Values.manifests.daemonset_osd }}
+{{- $envAll := . }}
+
+{{- $serviceAccountName := (printf "%s" .Release.Name) }}
+{{ tuple . "osd" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: {{ $serviceAccountName }}
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - nodes
+    verbs:
+      - get
+      - list
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: {{ $serviceAccountName }}
+subjects:
+  - kind: ServiceAccount
+    name: {{ $serviceAccountName }}
+    namespace: {{ .Release.Namespace }}
+roleRef:
+  kind: ClusterRole
+  name: {{ $serviceAccountName }}
+  apiGroup: rbac.authorization.k8s.io
+{{- end }}
+
 {{- define "ceph.osd.daemonset" }}
 {{- $daemonset := index . 0 }}
 {{- $configMapName := index . 1 }}
@@ -460,7 +494,6 @@ spec:
 {{- $daemonset := .Values.daemonset.prefix_name }}
 {{- $configMapName := (printf "%s-%s" .Release.Name "etc") }}
 {{- $serviceAccountName := (printf "%s" .Release.Name) }}
-{{ tuple . "osd" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
 {{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include "ceph.osd.daemonset" | toString | fromYaml }}
 {{- $configmap_yaml := "ceph.osd.configmap.etc" }}
 {{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include "ceph.utils.osd_daemonset_overrides" }}
diff --git a/ceph-osd/values.yaml b/ceph-osd/values.yaml
index dd5cde5b1..a691d2ac3 100644
--- a/ceph-osd/values.yaml
+++ b/ceph-osd/values.yaml
@@ -209,6 +209,8 @@ conf:
     # rack_replicated_rule you would specify "rack" as the `failure_domain` to use.
     # `failure_domain`: Set the CRUSH bucket type for your OSD to reside in. See the supported CRUSH configuration
     #  as listed here: Supported CRUSH configuration is listed here: http://docs.ceph.com/docs/nautilus/rados/operations/crush-map/
+    #  if failure domain is rack then it will check for node label "rack" and get the value from it to create the rack, if there
+    #  is no label rack then it will use following options.
     # `failure_domain_by_hostname`: Specify the portion of the hostname to use for your failure domain bucket name.
     # `failure_domain_by_hostname_map`: Explicit mapping of hostname to failure domain, as a simpler alternative to overrides.
     # `failure_domain_name`: Manually name the failure domain bucket name. This configuration option should only be used