diff --git a/cinder/locale/de/LC_MESSAGES/cinder.po b/cinder/locale/de/LC_MESSAGES/cinder.po index b9338217912..340842d253e 100644 --- a/cinder/locale/de/LC_MESSAGES/cinder.po +++ b/cinder/locale/de/LC_MESSAGES/cinder.po @@ -14,7 +14,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2020-01-07 21:20+0000\n" +"POT-Creation-Date: 2020-02-19 01:28+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -168,10 +168,6 @@ msgstr "%s ist kein Verzeichnis." msgid "%s is not installed" msgstr "%s ist nicht installiert." -#, python-format -msgid "%s is not installed." -msgstr "%s ist nicht installiert." - #, python-format msgid "%s is not set" msgstr "%s ist nicht festgelegt" @@ -2097,10 +2093,6 @@ msgstr "Fehler bei der Fibre Channel-Verbindungssteuerung: %(reason)s" msgid "File %(file_path)s could not be found." msgstr "Datei %(file_path)s wurde nicht gefunden." -#, python-format -msgid "File already exists at %s." -msgstr "Datei bereits vorhanden bei %s." - #, python-format msgid "File already exists at: %s" msgstr "Datei bereits vorhanden in: %s" @@ -2503,14 +2495,6 @@ msgstr "" msgid "Invalid Replication Target: %(reason)s" msgstr "Ungültiges Replikationsziel: %(reason)s" -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"Ungültige Spezifikation einer Virtuozzo-Speicherfreigabe: %r. Erforderlich: " -"[MDS1[,MDS2],...:/][:KENNWORT]." - #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "" @@ -2632,10 +2616,6 @@ msgstr "Ungültige Metadaten: %(reason)s" msgid "Invalid mount point base: %s" msgstr "Ungültige Mountpunktbasis: %s" -#, python-format -msgid "Invalid mount point base: %s." -msgstr "Ungültige Mountpunktbasis: %s." - #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "Ungültiger neuer snapCPG-Name für Typänderung. new_snap_cpg='%s'." @@ -3136,9 +3116,6 @@ msgstr "Keine eingehängten gemeinsam genutzten NFS-Laufwerke gefunden." msgid "No mounted SMBFS shares found." msgstr "Keine eingehängten gemeinsam genutzten SMBFS-Laufwerke gefunden." -msgid "No mounted Virtuozzo Storage shares found" -msgstr "Keine eingehängten gemeinsam genutzten Virtuozzo-Laufwerke gefunden." - msgid "No mounted shares found" msgstr "Keine eingehängten gemeinsam genutzten Laufwerke gefunden." @@ -3646,11 +3623,6 @@ msgstr "SMBFS-Konfigurationsdatei ist nicht in %(config)s vorhanden." msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "SMBFS-Konfigurationsdatei nicht definiert (smbfs_shares_config)." -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "" -"SSH-Befehl fehlgeschlagen nach '%(total_attempts)r' Versuchen : '%(command)s'" - #, python-format msgid "SSH command injection detected: %(command)s" msgstr "SSH-Befehlsinjektion erkannt: %(command)s" @@ -4499,9 +4471,6 @@ msgstr "Unbekannte RemoteFS-Ausnahme" msgid "Unknown SMBFS exception." msgstr "Unbekannte SMBFS-Ausnahme" -msgid "Unknown Virtuozzo Storage exception" -msgstr "Unbekannte Virtuozzo-Speicherausnahmebedingung" - msgid "Unknown action" msgstr "Unbekannte Aktion" @@ -4765,11 +4734,6 @@ msgstr "Der Datenträger %s ist nicht in der Nexenta Store-Appliance vorhanden." msgid "Volume %s does not exist on the array." msgstr "Der Datenträger %s ist im Array nicht vorhanden. " -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "" -"Für Datenträger %s ist provider_location nicht angegeben, wird übersprungen." - #, python-format msgid "Volume %s doesn't exist on array." msgstr "Der Datenträger %s ist im Array nicht vorhanden. " @@ -4967,9 +4931,6 @@ msgstr "Datenträgergröße muss Vielfaches von 1 GB sein." msgid "Volume size must multiple of 1 GB." msgstr "Datenträgergröße muss Vielfaches von 1 GB sein." -msgid "Volume status must be 'available'." -msgstr "Datenträgerstatus muss 'available' sein." - msgid "Volume to Initiator Group mapping already exists" msgstr "" "Die Zuordnung von Datenträger zu Initiatorgruppe ist bereits vorhanden." @@ -5032,17 +4993,6 @@ msgstr "" "Anzahl Datenträger/Konto sowohl auf primären als auch auf sekundären " "SolidFire-Konten überschritten." -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"VzStorage-Konfiguration 'vzstorage_used_ratio' ist ungültig. Muss > 0 und <= " -"1,0 sein: %s." - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "VzStorage-Konfigurationsdatei ist nicht in %(config)s vorhanden." - #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "Warten auf Synchronisierung fehlgeschlagen. Ausführungsstatus: %s." diff --git a/cinder/locale/es/LC_MESSAGES/cinder.po b/cinder/locale/es/LC_MESSAGES/cinder.po index e33c666f89a..992d0006c67 100644 --- a/cinder/locale/es/LC_MESSAGES/cinder.po +++ b/cinder/locale/es/LC_MESSAGES/cinder.po @@ -12,7 +12,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2020-01-07 21:20+0000\n" +"POT-Creation-Date: 2020-02-19 01:28+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -163,10 +163,6 @@ msgstr "%s no es un directorio." msgid "%s is not installed" msgstr "%s no está instalado" -#, python-format -msgid "%s is not installed." -msgstr "%s no está instalado." - #, python-format msgid "%s is not set" msgstr "%s no está establecido" @@ -2057,10 +2053,6 @@ msgstr "Anomalía de control de conexión de canal de fibra: %(reason)s" msgid "File %(file_path)s could not be found." msgstr "No se ha podido encontrar el archivo %(file_path)s." -#, python-format -msgid "File already exists at %s." -msgstr "Ya existe el archivo en %s." - #, python-format msgid "File already exists at: %s" msgstr "El archivo ya existe en: %s" @@ -2423,14 +2415,6 @@ msgstr "" msgid "Invalid Replication Target: %(reason)s" msgstr "Destino de replicación no válido: %(reason)s" -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"Especificación de compartición de Virtuozzo Storage no válida: %r. Debe ser: " -"[MDS1[,MDS2],...:/][:PASSWORD]." - #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "" @@ -2542,10 +2526,6 @@ msgstr "Metadatos inválidos: %(reason)s" msgid "Invalid mount point base: %s" msgstr "Base de punto de montaje no válida: %s" -#, python-format -msgid "Invalid mount point base: %s." -msgstr "Base de punto de montaje no válida: %s." - #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "" @@ -3048,9 +3028,6 @@ msgstr "No se han encontrado unidades compartidas NFS montadas" msgid "No mounted SMBFS shares found." msgstr "No se han encontrado unidades compartidas SMBFS montadas" -msgid "No mounted Virtuozzo Storage shares found" -msgstr "No se han encontrado unidades compartidas de Virtuozzo Storage" - msgid "No mounted shares found" msgstr "No se han encontrado unidades compartidas montadas" @@ -3544,12 +3521,6 @@ msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "" "El archivo de configuración SMBFS no se ha configurado (smbfs_shares_config)." -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "" -"Se ha encontrado un error en el mandato SSH tras '%(total_attempts)r' " -"intentos: '%(command)s'" - #, python-format msgid "SSH command injection detected: %(command)s" msgstr "Se ha detectado inyección de mandato SSH: %(command)s" @@ -4383,9 +4354,6 @@ msgstr "Excepción de RemoteFS desconocida" msgid "Unknown SMBFS exception." msgstr "Excepción de SMBFS desconocida" -msgid "Unknown Virtuozzo Storage exception" -msgstr "Excepción desconocida de Virtuozzo Storage" - msgid "Unknown action" msgstr "Acción desconocida" @@ -4634,10 +4602,6 @@ msgstr "El volumen %s no existe en la aplicación Nexenta Store" msgid "Volume %s does not exist on the array." msgstr "El volumen %s no existe en la matriz." -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "El volumen %s no tiene especificado provider_location, se salta." - #, python-format msgid "Volume %s doesn't exist on array." msgstr "El volumen %s no existe en la matriz." @@ -4704,10 +4668,6 @@ msgstr "La vía de acceso de archivo de dispositivo de volumen %s no existe." msgid "Volume device not found at %(device)s." msgstr "Dispositivo de volumen no encontrado en: %(device)s" -#, python-format -msgid "Volume does not exists %s." -msgstr "El volumen %s no existe." - #, python-format msgid "Volume driver %s not initialized." msgstr "Controlador de volumen %s no inicializado." @@ -4829,9 +4789,6 @@ msgstr "El tamaño del volumen debe ser un múltiplo de 1 GB." msgid "Volume size must multiple of 1 GB." msgstr "El tamaño de volumen debe ser múltiplo de 1 GB." -msgid "Volume status must be 'available'." -msgstr "El estado de volumen debe ser 'disponible'." - msgid "Volume to Initiator Group mapping already exists" msgstr "El volumen para la correlación del grupo de iniciadores ya existe" @@ -4893,16 +4850,6 @@ msgstr "" "Se ha superado el número de volúmenes por cuenta en las cuentas de " "SolidFire, tanto primarias como secundarias." -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"VzStorage config 'vzstorage_used_ratio' no válido. Debe ser > 0 y <= 1.0: %s." - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "El archivo de config VzStorage en %(config)s no existe." - #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "Esperar sincronización ha fallado. Estado de ejecución: %s." diff --git a/cinder/locale/fr/LC_MESSAGES/cinder.po b/cinder/locale/fr/LC_MESSAGES/cinder.po index d69b32b8e87..26cc41a6ea9 100644 --- a/cinder/locale/fr/LC_MESSAGES/cinder.po +++ b/cinder/locale/fr/LC_MESSAGES/cinder.po @@ -13,7 +13,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2020-01-07 21:20+0000\n" +"POT-Creation-Date: 2020-02-19 01:28+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -160,10 +160,6 @@ msgstr "%s n'est pas un répertoire." msgid "%s is not installed" msgstr "%s n'est pas installé" -#, python-format -msgid "%s is not installed." -msgstr "%s n'est pas installé." - #, python-format msgid "%s is not set" msgstr "%s n'est pas défini" @@ -2046,10 +2042,6 @@ msgstr "Echec de contrôle de la connexion Fibre Channel : %(reason)s" msgid "File %(file_path)s could not be found." msgstr "Fichier %(file_path)s introuvable." -#, python-format -msgid "File already exists at %s." -msgstr "Le fichier existe déjà dans %s." - #, python-format msgid "File already exists at: %s" msgstr "Le fichier existe déjà dans : %s" @@ -2412,14 +2404,6 @@ msgstr "" msgid "Invalid Replication Target: %(reason)s" msgstr "Cible de réplication non valide : %(reason)s" -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"Spécification de partage Virtuozzo Storage non valide : %r. Doit être : " -"[MDS1[,MDS2],...:/][:MOT DE PASSE]." - #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "" @@ -2532,10 +2516,6 @@ msgstr "Métadonnée invalide : %(reason)s" msgid "Invalid mount point base: %s" msgstr "Base du point de montage non valide : %s" -#, python-format -msgid "Invalid mount point base: %s." -msgstr "Base de point de montage non valide : %s." - #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "Nouveau nom snapCPG non valide pour confirmation. new_snap_cpg='%s'." @@ -3012,9 +2992,6 @@ msgstr "Aucun partage NFS monté trouvé" msgid "No mounted SMBFS shares found." msgstr "Aucun partage SMBFS monté trouvé." -msgid "No mounted Virtuozzo Storage shares found" -msgstr "Aucun partage de stockage Virtuozzo monté n'a été trouvé" - msgid "No mounted shares found" msgstr "Aucun partage monté trouvé" @@ -3482,12 +3459,6 @@ msgstr "Le fichier de configuration SMBFS dans %(config)s n'existe pas." msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "Fichier de configuration SMBFS non défini (smbfs_shares_config)." -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "" -"Echec de la commande SSH après '%(total_attempts)r' tentatives : " -"'%(command)s'" - #, python-format msgid "SSH command injection detected: %(command)s" msgstr "Injection de commande SSH détectée : %(command)s" @@ -4313,9 +4284,6 @@ msgstr "Exception RemoteFS inconnue" msgid "Unknown SMBFS exception." msgstr "Exception SMBFS inconnue." -msgid "Unknown Virtuozzo Storage exception" -msgstr "Exception Virtuozzo Storage inconnue" - msgid "Unknown action" msgstr "Action inconnu" @@ -4566,11 +4534,6 @@ msgstr "Le volume %s n'existe pas dans Nexenta Store Appliance" msgid "Volume %s does not exist on the array." msgstr "Le volume %s n'existe pas sur la matrice." -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "" -"provider_location n'a pas été spécifié pour le volume %s. Il sera ignoré." - #, python-format msgid "Volume %s doesn't exist on array." msgstr "Le volume %s n'existe pas sur la matrice." @@ -4760,9 +4723,6 @@ msgstr "La taille du volume doit être un multiple de 1 Go." msgid "Volume size must multiple of 1 GB." msgstr "La taille du volume doit être un multiple de 1 Go." -msgid "Volume status must be 'available'." -msgstr "L'état du volume doit être 'disponible'." - msgid "Volume to Initiator Group mapping already exists" msgstr "Le volume pour le mappage du groupe initiateur existe déjà" @@ -4823,17 +4783,6 @@ msgstr "" "Nombre de volumes/compte dépassé sur les comptes SolidFire principaux et " "secondaires." -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"La configuration VzStorage 'vzstorage_used_ratio' n'est pas valide. Doit " -"être > 0 et <= 1.0: %s." - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "Le fichier de configuration VzStorage %(config)s n'existe pas." - #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "Echec de l'attente de synchronisation. Statut d'exécution : %s." diff --git a/cinder/locale/it/LC_MESSAGES/cinder.po b/cinder/locale/it/LC_MESSAGES/cinder.po index 863283a1a1d..dd2e8c4818a 100644 --- a/cinder/locale/it/LC_MESSAGES/cinder.po +++ b/cinder/locale/it/LC_MESSAGES/cinder.po @@ -9,7 +9,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2020-01-07 21:20+0000\n" +"POT-Creation-Date: 2020-02-19 01:28+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -156,10 +156,6 @@ msgstr "%s non è una directory." msgid "%s is not installed" msgstr "%s non installato" -#, python-format -msgid "%s is not installed." -msgstr "%s non installato." - #, python-format msgid "%s is not set" msgstr "%s non è impostato" @@ -1997,10 +1993,6 @@ msgstr "Errore di controllo connessione di Fibre Channel: %(reason)s" msgid "File %(file_path)s could not be found." msgstr "Impossibile trovare il file %(file_path)s." -#, python-format -msgid "File already exists at %s." -msgstr "Il file già esiste in %s." - #, python-format msgid "File already exists at: %s" msgstr "Il file già esiste in: %s" @@ -2356,14 +2348,6 @@ msgstr "" msgid "Invalid Replication Target: %(reason)s" msgstr "Destinazione di replica non valida: %(reason)s" -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"Specifica di condivisione Virtuozzo Storage non valida: %r. Deve essere: " -"[MDS1[,MDS2],...:/][:PASSWORD]." - #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "" @@ -2475,10 +2459,6 @@ msgstr "Metadati non validi: %(reason)s" msgid "Invalid mount point base: %s" msgstr "Base del punto di montaggio non valida: %s" -#, python-format -msgid "Invalid mount point base: %s." -msgstr "Base del punto di montaggio non valida: %s" - #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "Nuovo nome snapCPG non valido per la riscrittura new_snap_cpg='%s'." @@ -2952,9 +2932,6 @@ msgstr "Non è stata trovata nessuna condivisione di NFS montata" msgid "No mounted SMBFS shares found." msgstr "Non è stata trovata nessuna condivisione SMBFS montata." -msgid "No mounted Virtuozzo Storage shares found" -msgstr "Non è stata trovata alcuna condivisione Virtuozzo Storage montata" - msgid "No mounted shares found" msgstr "Non è stata trovata nessuna condivisione montata" @@ -3418,11 +3395,6 @@ msgstr "Il file di configurazione SMBFS in %(config)s non esiste." msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "Il file di configurazione SMBFS non è impostato (smbfs_shares_config)." -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "" -"Comando SSH non riuscito dopo '%(total_attempts)r' tentativi: '%(command)s'" - #, python-format msgid "SSH command injection detected: %(command)s" msgstr "Rilevato inserimento comando SSH: %(command)s" @@ -4233,9 +4205,6 @@ msgstr "Eccezione RemoteFS sconosciuta" msgid "Unknown SMBFS exception." msgstr "Eccezione SMBFS sconosciuta." -msgid "Unknown Virtuozzo Storage exception" -msgstr "Eccezione Virtuozzo Storage sconosciuta " - msgid "Unknown action" msgstr "Azione sconosciuta" @@ -4478,10 +4447,6 @@ msgstr "Il volume %s non esiste nell'applicazione Nexenta Store" msgid "Volume %s does not exist on the array." msgstr "Il volume %s non esiste su questo array. " -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "Sul volume %s non è specificato provider_location; ignorato." - #, python-format msgid "Volume %s doesn't exist on array." msgstr "Il volume %s non esiste nell'array." @@ -4673,9 +4638,6 @@ msgstr "La dimensione del volume deve essere un multiplo di 1 GB." msgid "Volume size must multiple of 1 GB." msgstr "La dimensione del volume deve essere un multiplo di 1 GB. " -msgid "Volume status must be 'available'." -msgstr "Lo stato del volume deve essere 'available'." - msgid "Volume to Initiator Group mapping already exists" msgstr "L'associazione del volume al gruppo iniziatori già esiste" @@ -4734,17 +4696,6 @@ msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "Volumi/account superati sugli account SolidFire primario e secondario." -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"Config VzStorage 'vzstorage_used_ratio' non valida, deve essere > 0 e <= " -"1.0: %s." - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "File config VzStorage in %(config)s non esiste." - #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "Sincronizzazione attesa non riuscita. Stato esecuzione: %s." diff --git a/cinder/locale/ja/LC_MESSAGES/cinder.po b/cinder/locale/ja/LC_MESSAGES/cinder.po index 58a928f13ae..9fde34b8199 100644 --- a/cinder/locale/ja/LC_MESSAGES/cinder.po +++ b/cinder/locale/ja/LC_MESSAGES/cinder.po @@ -13,7 +13,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2020-01-07 21:20+0000\n" +"POT-Creation-Date: 2020-02-19 01:28+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -187,10 +187,6 @@ msgstr "%s はディレクトリーではありません。" msgid "%s is not installed" msgstr "%s がインストールされていません。" -#, python-format -msgid "%s is not installed." -msgstr "%s がインストールされていません。" - #, python-format msgid "%s is not set" msgstr "%s が設定されていません" @@ -1659,11 +1655,6 @@ msgstr "ゾーン文字列の形成中に例外が発生しました: %s。" msgid "Exception: %s" msgstr "例外: %s" -#, python-format -msgid "Expected higher file exists for snapshot %s" -msgstr "" -"スナップショット %s には、上位のファイルが存在することが期待されています。" - #, python-format msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." msgstr "" @@ -2357,10 +2348,6 @@ msgstr "ファイバーチャネル接続制御障害: %(reason)s" msgid "File %(file_path)s could not be found." msgstr "ファイル %(file_path)s が見つかりませんでした。" -#, python-format -msgid "File already exists at %s." -msgstr "ファイルは %s に既に存在します。" - #, python-format msgid "File already exists at: %s" msgstr "ファイルは既に存在します: %s" @@ -2851,14 +2838,6 @@ msgstr "" msgid "Invalid Replication Target: %(reason)s" msgstr "無効なレプリケーションターゲット: %(reason)s" -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"Virtuozzo Storage のシェアの指定が無効です: %r。[MDS1[,MDS2],...:/][:PASSWORD] である必要があります。" - #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "" @@ -2978,10 +2957,6 @@ msgstr "メタデータが無効です: %(reason)s" msgid "Invalid mount point base: %s" msgstr "無効なマウントポイントベース: %s" -#, python-format -msgid "Invalid mount point base: %s." -msgstr "無効なマウントポイントベース: %s" - #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "新規 snapCPG 名がタイプ変更には無効です。new_snap_cpg='%s'。" @@ -3551,9 +3526,6 @@ msgstr "マウントされた NFS 共有が見つかりません" msgid "No mounted SMBFS shares found." msgstr "マウントされた SMBFS 共有が見つかりません。" -msgid "No mounted Virtuozzo Storage shares found" -msgstr "マウントされた Virtuozzo Storage 共有が見つかりません" - msgid "No mounted shares found" msgstr "マウントされた共有が見つかりません" @@ -4110,11 +4082,6 @@ msgstr "%(config)s の SMBFS 構成ファイルは存在しません。" msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "SMBFS 構成ファイルが設定されていません (smbfs_shares_config)。" -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "" -"'%(total_attempts)r' 回の試行後に SSH コマンドが失敗しました: '%(command)s'" - #, python-format msgid "SSH command injection detected: %(command)s" msgstr "SSH コマンド注入が検出されました: %(command)s" @@ -5105,9 +5072,6 @@ msgstr "不明な RemoteFS 例外" msgid "Unknown SMBFS exception." msgstr "不明な SMBFS 例外。" -msgid "Unknown Virtuozzo Storage exception" -msgstr "Virtuozzo Storage で不明な例外が発生しました" - msgid "Unknown action" msgstr "不明なアクション" @@ -5175,10 +5139,6 @@ msgstr "" "スイッチ %s でサポートされないファームウェアです。スイッチでファームウェア " "v6.4 以上が実行されていることを確認してください" -#, python-format -msgid "Unsupported volume format %s" -msgstr "ボリューム形式はサポートされていません: %s " - #, python-format msgid "Unsupported volume format: %s " msgstr "ボリューム形式はサポートされていません: %s " @@ -5385,11 +5345,6 @@ msgstr "ボリューム %s は Nexenta Store アプライアンスに存在し msgid "Volume %s does not exist on the array." msgstr "ボリューム %s does はアレイに存在しません。" -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "" -"ボリューム %s で provider_location が指定されていません。スキップします。" - #, python-format msgid "Volume %s doesn't exist on array." msgstr "ボリューム %s がアレイに存在しません。" @@ -5458,10 +5413,6 @@ msgstr "ボリュームデバイスのファイルパス %s が存在しませ msgid "Volume device not found at %(device)s." msgstr "%(device)s でボリュームデバイスが見つかりません。" -#, python-format -msgid "Volume does not exists %s." -msgstr "ボリュームが存在しません: %s" - #, python-format msgid "Volume driver %s not initialized." msgstr "ボリュームドライバー %s が初期化されていません。" @@ -5606,15 +5557,6 @@ msgstr "ボリュームサイズは 1 GB の倍数である必要があります msgid "Volume size must multiple of 1 GB." msgstr "ボリュームサイズは 1 GB の倍数である必要があります" -msgid "Volume status must be 'available'." -msgstr "ボリュームの状態は「使用可能」でなければなりません。" - -#, python-format -msgid "Volume status must be available for snapshot %(id)s. (is %(status)s)" -msgstr "" -"スナップショット %(id)s に関しては、ボリュームの状態が「利用可能」でなければ" -"いけません。 (現在は %(status)s です)" - msgid "Volume to Initiator Group mapping already exists" msgstr "ボリュームからイニシエーターグループへのマッピングは既に存在します。" @@ -5674,17 +5616,6 @@ msgstr "" " プライマリーとセカンダリーの SolidFire アカウント上で、ボリュームとアカウン" "トの数量が超過しました。" -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"VzStorage の設定の 'vzstorage_used_ratio' が無効です。0 より大きく 1.0 以下で" -"ある必要があります: %s。" - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "%(config)s の VzStorage のコンフィグファイルが存在しません" - #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "同期の待機が失敗しました。実行状態: %s。" diff --git a/cinder/locale/ko_KR/LC_MESSAGES/cinder.po b/cinder/locale/ko_KR/LC_MESSAGES/cinder.po index c4b4b3a88fb..0405c06cec6 100644 --- a/cinder/locale/ko_KR/LC_MESSAGES/cinder.po +++ b/cinder/locale/ko_KR/LC_MESSAGES/cinder.po @@ -15,7 +15,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2020-01-07 21:20+0000\n" +"POT-Creation-Date: 2020-02-19 01:28+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -170,10 +170,6 @@ msgstr "%s는 유효한 로그 레벨이 아닙니다." msgid "%s is not installed" msgstr "%s이(가) 설치되지 않음" -#, python-format -msgid "%s is not installed." -msgstr "%s이(가) 설치되어 있지 않습니다. " - #, python-format msgid "%s is not set" msgstr "%s이(가) 설정되지 않았음" @@ -1977,10 +1973,6 @@ msgstr "파이버 채널 연결 제어 실패: %(reason)s" msgid "File %(file_path)s could not be found." msgstr "%(file_path)s 파일을 찾을 수 없습니다. " -#, python-format -msgid "File already exists at %s." -msgstr "%s에 파일이 이미 있습니다. " - #, python-format msgid "File already exists at: %s" msgstr "%s에 파일이 이미 있음" @@ -2378,14 +2370,6 @@ msgstr "" msgid "Invalid Replication Target: %(reason)s" msgstr "올바르지 않은 복제 대상: %(reason)s" -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"올바르지 않은 Virtuozzo 스토리지 공유 스펙: %r. 다음이어야 함: [MDS1[," -"MDS2],...:/][:PASSWORD]." - #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "올바르지 않은 XtremIO 버전 %(cur)s, %(min)s 이상의 버전이 필요함" @@ -2503,10 +2487,6 @@ msgstr "잘못된 메타데이터: %(reason)s" msgid "Invalid mount point base: %s" msgstr "올바르지 않은 마운트 지점 기반: %s" -#, python-format -msgid "Invalid mount point base: %s." -msgstr "올바르지 않은 마운트 지점 기반: %s." - #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "" @@ -2969,9 +2949,6 @@ msgstr "마운트된 NFS 공유를 찾지 못함" msgid "No mounted SMBFS shares found." msgstr "마운트된 SMBFS 공유를 찾을 수 없습니다." -msgid "No mounted Virtuozzo Storage shares found" -msgstr "마운트된 Virtuozzo 스토리지 공유를 찾을 수 없습니다. " - msgid "No mounted shares found" msgstr "마운트된 공유를 찾을 수 없음" @@ -3438,12 +3415,6 @@ msgstr "SMBFS 구성 파일이 %(config)s에 없습니다." msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "SMBFS 구성 파일이 설정되지 않았습니다(smbfs_shares_config)." -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "" -"'%(command)s' 명령을 '%(total_attempts)r'번 시도한 후에 SSH 명령이 실패했습니" -"다. " - #, python-format msgid "SSH command injection detected: %(command)s" msgstr "SSH 명령 인젝션 발견됨: %(command)s" @@ -4221,9 +4192,6 @@ msgstr "알 수 없는 RemoteFS 예외" msgid "Unknown SMBFS exception." msgstr "알 수 없는 SMBFS 예외입니다." -msgid "Unknown Virtuozzo Storage exception" -msgstr "알 수 없는 Virtuozzo 스토리지 예외" - msgid "Unknown action" msgstr "알 수 없는 조치" @@ -4459,10 +4427,6 @@ msgstr "볼륨 %s이(가) Nexenta Store 어플라이언스에 없음" msgid "Volume %s does not exist on the array." msgstr "배열에 %s 볼륨이 없습니다." -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "%s 볼륨에 지정된 provider_location이 없습니다. 건너뜁니다." - #, python-format msgid "Volume %s doesn't exist on array." msgstr "배열에 %s 볼륨이 없습니다." @@ -4648,9 +4612,6 @@ msgstr "볼륨 크기는 1GB의 배수여야 합니다. " msgid "Volume size must multiple of 1 GB." msgstr "볼륨 크기는 1GB의 배수여야 합니다. " -msgid "Volume status must be 'available'." -msgstr "볼륨 상태가 '사용 가능'이어야 합니다. " - msgid "Volume to Initiator Group mapping already exists" msgstr "개시자 그룹에 대한 볼륨 맵핑이 이미 있음" @@ -4711,17 +4672,6 @@ msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "기본 및 보조 SolidFire 계정 모두에서 볼륨/계정이 초과되었습니다." -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"VzStorage 구성 'vzstorage_used_ratio'가 올바르지 않습니다. 0보다 크고 1.0 이" -"하여야 함: %s." - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "%(config)s의 VzStorage 구성 파일이 존재하지 않습니다. " - #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "동기화 대기 실패. 실행 상태: %s." diff --git a/cinder/locale/pt_BR/LC_MESSAGES/cinder.po b/cinder/locale/pt_BR/LC_MESSAGES/cinder.po index f2452f015e1..6684f43b1bf 100644 --- a/cinder/locale/pt_BR/LC_MESSAGES/cinder.po +++ b/cinder/locale/pt_BR/LC_MESSAGES/cinder.po @@ -12,7 +12,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2020-01-07 21:20+0000\n" +"POT-Creation-Date: 2020-02-19 01:28+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -157,10 +157,6 @@ msgstr "%s não é um diretório." msgid "%s is not installed" msgstr "%s não está instalado" -#, python-format -msgid "%s is not installed." -msgstr "%s não está instalado." - #, python-format msgid "%s is not set" msgstr "%s não está configurado" @@ -1971,10 +1967,6 @@ msgstr "Falha no controle de conexão Fibre Channel: %(reason)s" msgid "File %(file_path)s could not be found." msgstr "O arquivo %(file_path)s não pôde ser localizado." -#, python-format -msgid "File already exists at %s." -msgstr "O arquivo já existe em %s." - #, python-format msgid "File already exists at: %s" msgstr "O arquivo já existe em: %s" @@ -2326,14 +2318,6 @@ msgstr "" msgid "Invalid Replication Target: %(reason)s" msgstr "Destino de Replicação Inválido: %(reason)s" -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"Especificação de compartilhamento de armazenamento Virtuozzo inválido: %r. " -"Deve ser: [MDS1[,MDS2],...:/][:PASSWORD]." - #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "" @@ -2442,10 +2426,6 @@ msgstr "Metadados inválidos: %(reason)s" msgid "Invalid mount point base: %s" msgstr "Base de ponto de montagem inválido: %s" -#, python-format -msgid "Invalid mount point base: %s." -msgstr "Base de ponto de montagem inválida: %s." - #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "Novo nome do snapCPG inválido para nova digitação. new_snap_cpg='%s'." @@ -2920,10 +2900,6 @@ msgstr "Nenhum compartilhamento NFS montado foi localizado" msgid "No mounted SMBFS shares found." msgstr "Nenhum compartilhamento SMBFS montado foi localizado." -msgid "No mounted Virtuozzo Storage shares found" -msgstr "" -"Nenhum compartilhamento de armazenamento Virtuozzo montado foi localizado" - msgid "No mounted shares found" msgstr "Nenhum compartilhamento montado foi localizado" @@ -3384,10 +3360,6 @@ msgstr "O arquivo de configuração SMBFS em %(config)s não existe." msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "Arquivo de configuração SMBFS não definido (smbfs_shares_config)." -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "Comando SSH falhou após '%(total_attempts)r' tentativas: '%(command)s'" - #, python-format msgid "SSH command injection detected: %(command)s" msgstr "Injeção de comando SSH detectada: %(command)s" @@ -4195,9 +4167,6 @@ msgstr "Exceção RemoteFS desconhecida" msgid "Unknown SMBFS exception." msgstr "Exceção SMBFS desconhecida." -msgid "Unknown Virtuozzo Storage exception" -msgstr "Exceção de armazenamento Virtuozzo desconhecido" - msgid "Unknown action" msgstr "Ação desconhecida" @@ -4439,10 +4408,6 @@ msgstr "O volume %s não existe no dispositivo Nexenta Store." msgid "Volume %s does not exist on the array." msgstr "O volume %s não existe na matriz." -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "O volume %s não possui provider_location especificado, ignorando." - #, python-format msgid "Volume %s doesn't exist on array." msgstr "O volume %s não existe na matriz." @@ -4630,9 +4595,6 @@ msgstr "O tamanho do volume deve ser múltiplo de 1 GB." msgid "Volume size must multiple of 1 GB." msgstr "O tamanho do volume deve ser múltiplo de 1 GB." -msgid "Volume status must be 'available'." -msgstr "O status do volume deve ser 'disponível'." - msgid "Volume to Initiator Group mapping already exists" msgstr "Já existe um mapeamento de grupos de volume para inicializador" @@ -4692,17 +4654,6 @@ msgid "" msgstr "" "Os volumes/contas excederam nas contas SolidFire primárias e secundárias." -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"Configuração VzStorage 'vzstorage_used_ratio' inválida. Deve ser > 0 e <= " -"1.0: %s." - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "Arquivo de configuração VzStorage em %(config)s não existe." - #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "Falha ao aguardar sincronização. Status de execução: %s." diff --git a/cinder/locale/ru/LC_MESSAGES/cinder.po b/cinder/locale/ru/LC_MESSAGES/cinder.po index 4f8bb222ed9..d1c0b5007fd 100644 --- a/cinder/locale/ru/LC_MESSAGES/cinder.po +++ b/cinder/locale/ru/LC_MESSAGES/cinder.po @@ -11,7 +11,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2020-01-07 21:20+0000\n" +"POT-Creation-Date: 2020-02-19 01:28+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -159,10 +159,6 @@ msgstr "%s не является каталогом." msgid "%s is not installed" msgstr "%s не установлен" -#, python-format -msgid "%s is not installed." -msgstr "%s не установлен." - #, python-format msgid "%s is not set" msgstr "%s - не множество" @@ -1967,10 +1963,6 @@ msgstr "Сбой управления соединением Fibre Channel: %(re msgid "File %(file_path)s could not be found." msgstr "Файл %(file_path)s не может быть найден." -#, python-format -msgid "File already exists at %s." -msgstr "Файл уже существует в %s." - #, python-format msgid "File already exists at: %s" msgstr "Файл уже существует в %s" @@ -2321,14 +2313,6 @@ msgstr "" msgid "Invalid Replication Target: %(reason)s" msgstr "Недопустимый целевой объект репликации: %(reason)s" -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"Недопустимая спецификация общего ресурса Virtuozzo Storage: %r. Должно быть: " -"[MDS1[,MDS2],...:/]<ИМЯ-КЛАСТЕРА>[:ПАРОЛЬ]." - #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "Недопустимая версия XtremIO %(cur)s, требуется версия не ниже %(min)s" @@ -2436,10 +2420,6 @@ msgstr "Недопустимые метаданные: %(reason)s" msgid "Invalid mount point base: %s" msgstr "Недопустимая база точки монтирования: %s" -#, python-format -msgid "Invalid mount point base: %s." -msgstr "Недопустимая база точки монтирования: %s." - #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "Недопустимое новое имя snapCPG для изменения типа. new_snap_cpg='%s'." @@ -2904,9 +2884,6 @@ msgstr "Не обнаружено смонтированных общих рес msgid "No mounted SMBFS shares found." msgstr "Не обнаружено смонтированных общих ресурсов SMBFS." -msgid "No mounted Virtuozzo Storage shares found" -msgstr "Не найдены смонтированные общие ресурсы Virtuozzo Storage" - msgid "No mounted shares found" msgstr "Не обнаружено смонтированных общих ресурсов" @@ -3360,11 +3337,6 @@ msgstr "Файл конфигурации SMBFS в %(config)s не сущест msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "Файл конфигурации SMBFS не указан (smbfs_shares_config)." -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "" -"После '%(total_attempts)r' попыток не выполнена команда SSH: '%(command)s'" - #, python-format msgid "SSH command injection detected: %(command)s" msgstr "Обнаружено внедрение команды SSH: %(command)s" @@ -4153,9 +4125,6 @@ msgstr "Неизвестная исключительная ситуация в msgid "Unknown SMBFS exception." msgstr "Неизвестная исключительная ситуация в SMBFS." -msgid "Unknown Virtuozzo Storage exception" -msgstr "Неизвестная исключительная ситуация Virtuozzo Storage" - msgid "Unknown action" msgstr "Неизвестное действие" @@ -4393,10 +4362,6 @@ msgstr "Том %s не существует в устройстве Nexenta Stor msgid "Volume %s does not exist on the array." msgstr "Том %s не существует в массиве." -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "Для тома %s не указан параметр provider_location - пропущено." - #, python-format msgid "Volume %s doesn't exist on array." msgstr "Том %s не существует в массиве." @@ -4581,9 +4546,6 @@ msgstr "Размер тома должен быть кратным 1 ГБ." msgid "Volume size must multiple of 1 GB." msgstr "Размер тома должен быть кратен 1 ГБ." -msgid "Volume status must be 'available'." -msgstr "Состояние тома должно быть available." - msgid "Volume to Initiator Group mapping already exists" msgstr "Связь тома с группой инициаторов уже существует" @@ -4643,17 +4605,6 @@ msgstr "" "Число томов превышено и для основной, и для вторичной учетной записи " "SolidFire." -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"Конфигурация VzStorage 'vzstorage_used_ratio' недопустима. Значение должно " -"быть больше 0 и не больше 1,0: %s." - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "Файл конфигурации VzStorage в %(config)s не существует." - #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "Ошибка ожидания синхронизации. Состояние выполнения: %s." diff --git a/cinder/locale/zh_CN/LC_MESSAGES/cinder.po b/cinder/locale/zh_CN/LC_MESSAGES/cinder.po index 23be39ca7a7..4b3cc62731a 100644 --- a/cinder/locale/zh_CN/LC_MESSAGES/cinder.po +++ b/cinder/locale/zh_CN/LC_MESSAGES/cinder.po @@ -19,7 +19,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2020-01-07 21:20+0000\n" +"POT-Creation-Date: 2020-02-19 01:28+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -177,10 +177,6 @@ msgstr "%s 不是一个目录。" msgid "%s is not installed" msgstr "未安装 %s" -#, python-format -msgid "%s is not installed." -msgstr "未安装 %s。" - #, python-format msgid "%s is not set" msgstr "未设置 %s " @@ -2068,10 +2064,6 @@ msgstr "光纤通道连接控制失败:%(reason)s" msgid "File %(file_path)s could not be found." msgstr "找不到文件 %(file_path)s。" -#, python-format -msgid "File already exists at %s." -msgstr "%s 处已存在文件。" - #, python-format msgid "File already exists at: %s" msgstr "在以下位置处,已存在文件:%s" @@ -2518,14 +2510,6 @@ msgstr "获取卷 %s 的 QoS 策略时,检测到无效 QoS 规范" msgid "Invalid Replication Target: %(reason)s" msgstr "无效复制目标:%(reason)s" -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"无效 Virtuozzo 存储器共享规范:%r。必须为 [MDS1[,MDS2],...:/][:" -"PASSWORD]。" - #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "XtremIO V%(cur)s 无效,需要 V%(min)s 或更高版本" @@ -2642,10 +2626,6 @@ msgstr "元数据无效: %(reason)s" msgid "Invalid mount point base: %s" msgstr "安装点基准无效:%s" -#, python-format -msgid "Invalid mount point base: %s." -msgstr "安装点基准无效:%s。" - #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "新 snapCPG 名称对执行 retype 操作无效。new_snap_cpg='%s'。" @@ -3143,9 +3123,6 @@ msgstr "找不到任何已安装的 NFS 共享项" msgid "No mounted SMBFS shares found." msgstr "找不到任何已安装的 SMBFS 共享项。" -msgid "No mounted Virtuozzo Storage shares found" -msgstr "找不到任何已安装的 Virtuozzo 存储器共享项" - msgid "No mounted shares found" msgstr "找不到任何已安装的共享项" @@ -3603,10 +3580,6 @@ msgstr "%(config)s 处不存在 SMBFS 配置文件。" msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "SMBFS 配置文件未设置 (smbfs_shares_config)。" -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "SSH 命令在“%(total_attempts)r”之后失败,尝试次数:“%(command)s”" - #, python-format msgid "SSH command injection detected: %(command)s" msgstr "检测到 SSH 命令注入:%(command)s" @@ -4411,9 +4384,6 @@ msgstr "RemoteFS 异常未知" msgid "Unknown SMBFS exception." msgstr "SMBFS 异常未知。" -msgid "Unknown Virtuozzo Storage exception" -msgstr "未知 Virtuozzo 存储器异常" - msgid "Unknown action" msgstr "操作未知" @@ -4476,10 +4446,6 @@ msgid "" msgstr "" "在交换机 %s 上存在不受支持的固件。请确保交换机正在运行固件 V6.4 或更高版本" -#, python-format -msgid "Unsupported volume format %s" -msgstr "不支持的卷格式:%s" - #, python-format msgid "Unsupported volume format: %s " msgstr "以下卷格式不受支持:%s " @@ -4663,10 +4629,6 @@ msgstr "卷 %s 在 Nexenta 存储设备中不存在" msgid "Volume %s does not exist on the array." msgstr "卷 %s 在阵列上不存在。" -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "卷 %s 没有指定 provider_location,正在跳过。" - #, python-format msgid "Volume %s doesn't exist on array." msgstr "卷 %s 在阵列上不存在。" @@ -4850,13 +4812,6 @@ msgstr "卷大小必须为 1 GB 的倍数。" msgid "Volume size must multiple of 1 GB." msgstr "卷大小必须是 1 GB 的倍数。" -msgid "Volume status must be 'available'." -msgstr "卷状态必须为“可用”。" - -#, python-format -msgid "Volume status must be available for snapshot %(id)s. (is %(status)s)" -msgstr "对于快照 %(id)s,卷状态必须为“available”。(卷状态现在为 %(status)s)" - msgid "Volume to Initiator Group mapping already exists" msgstr "卷至发起方组的映射已存在" @@ -4908,16 +4863,6 @@ msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "卷/帐户同时超出主 SolidFire 帐户和辅助 SolidFire 帐户的限制。" -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"VzStorage 配置“vzstorage_used_ratio”无效。必须大于 0 并且小于或等于 1.0:%s。" - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "%(config)s 处不存在 VzStorage 配置文件。" - #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "等待同步失败。运行状态:%s。" diff --git a/cinder/locale/zh_TW/LC_MESSAGES/cinder.po b/cinder/locale/zh_TW/LC_MESSAGES/cinder.po index 97736499fdd..298bce302ab 100644 --- a/cinder/locale/zh_TW/LC_MESSAGES/cinder.po +++ b/cinder/locale/zh_TW/LC_MESSAGES/cinder.po @@ -9,7 +9,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2020-01-07 21:20+0000\n" +"POT-Creation-Date: 2020-02-19 01:28+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -147,10 +147,6 @@ msgstr "%s 不是目錄。" msgid "%s is not installed" msgstr "%s 未安裝" -#, python-format -msgid "%s is not installed." -msgstr "未安裝 %s。" - #, python-format msgid "%s is not set" msgstr "未設定 %s" @@ -1828,10 +1824,6 @@ msgstr "「光纖通道」連線控制失敗:%(reason)s" msgid "File %(file_path)s could not be found." msgstr "找不到檔案 %(file_path)s。" -#, python-format -msgid "File already exists at %s." -msgstr "%s 處已存在檔案。" - #, python-format msgid "File already exists at: %s" msgstr "%s 處已存在檔案" @@ -2167,14 +2159,6 @@ msgstr "取得磁區 %s 的服務品質原則時,偵測到無效的服務品 msgid "Invalid Replication Target: %(reason)s" msgstr "無效的抄寫目標:%(reason)s" -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"Virtuozzo 儲存體共用項目規格無效:%r。必須是:[MDS1[,MDS2],...:/][:PASSWORD]。" - #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "XtremIO %(cur)s 版無效,需要 %(min)s 版或更高版本" @@ -2279,10 +2263,6 @@ msgstr "無效的 meta 資料:%(reason)s" msgid "Invalid mount point base: %s" msgstr "無效的裝載點基本程式:%s" -#, python-format -msgid "Invalid mount point base: %s." -msgstr "無效的裝載點基本程式:%s。" - #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "新 snapCPG 名稱無效,無法執行 Retype 動作。new_snap_cpg='%s'。" @@ -2711,9 +2691,6 @@ msgstr "找不到已裝載的 NFS 共用" msgid "No mounted SMBFS shares found." msgstr "找不到已裝載的 SMBFS 共用項目。" -msgid "No mounted Virtuozzo Storage shares found" -msgstr "找不到已裝載的 Virtuozzo 儲存體共用項目" - msgid "No mounted shares found" msgstr "找不到已裝載的共用項目" @@ -3149,10 +3126,6 @@ msgstr "%(config)s 處的 SMBFS 配置檔不存在。" msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "未設定 SMBFS 配置檔 (smbfs_shares_config)。" -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "嘗試 '%(total_attempts)r' 次之後 SSH 指令仍失敗:'%(command)s'" - #, python-format msgid "SSH command injection detected: %(command)s" msgstr "偵測到 SSH 指令注入:%(command)s" @@ -3875,9 +3848,6 @@ msgstr "不明的 RemoteFS 異常狀況" msgid "Unknown SMBFS exception." msgstr "不明的 SMBFS 異常狀況。" -msgid "Unknown Virtuozzo Storage exception" -msgstr "「不明 Virtuozzo 儲存體」異常狀況" - msgid "Unknown action" msgstr "不明動作" @@ -4098,10 +4068,6 @@ msgstr "磁區 %s 不存在於「Nexenta 儲存庫」軟體驅動裝置中" msgid "Volume %s does not exist on the array." msgstr "磁區 %s 不在陣列上。" -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "沒有為磁區 %s 指定 provider_location,正在跳過。" - #, python-format msgid "Volume %s doesn't exist on array." msgstr "磁區 %s 不在陣列上。" @@ -4274,9 +4240,6 @@ msgstr "磁區大小必須是 1 GB 的倍數。" msgid "Volume size must multiple of 1 GB." msgstr "磁區大小必須是 1 GB 的倍數。" -msgid "Volume status must be 'available'." -msgstr "磁區狀態必須為「可用」。" - msgid "Volume to Initiator Group mapping already exists" msgstr "磁區至起始器群組的對映已存在" @@ -4328,16 +4291,6 @@ msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "超過了主要及次要 SolidFire 帳戶上的磁區/帳戶。" -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"VzStorage 配置 'vzstorage_used_ratio' 無效。必須大於 0 且小於或等於 1.0:%s。" - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "%(config)s 處的 VzStorage 配置檔不存在。" - #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "等待同步失敗。執行中狀態:%s。" diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po new file mode 100644 index 00000000000..3951a726288 --- /dev/null +++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po @@ -0,0 +1,6570 @@ +# Andi Chandler , 2017. #zanata +# Andi Chandler , 2018. #zanata +# Andi Chandler , 2019. #zanata +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2020-02-19 01:24+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"PO-Revision-Date: 2019-12-22 08:11+0000\n" +"Last-Translator: Andi Chandler \n" +"Language-Team: English (United Kingdom)\n" +"Language: en_GB\n" +"X-Generator: Zanata 4.3.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" + +msgid "" +"\"volume_extension:types_extra_specs:create\": \"rule:admin or rule:" +"type_admin\", \"volume_extension:types_extra_specs:delete\": \"rule:admin or " +"rule:type_admin\", \"volume_extension:types_extra_specs:index\": \"\", " +"\"volume_extension:types_extra_specs:show\": \"rule:admin or rule:type_admin " +"or rule:type_viewer\", \"volume_extension:types_extra_specs:update\": \"rule:" +"admin or rule:type_admin\"" +msgstr "" +"\"volume_extension:types_extra_specs:create\": \"rule:admin or rule:" +"type_admin\", \"volume_extension:types_extra_specs:delete\": \"rule:admin or " +"rule:type_admin\", \"volume_extension:types_extra_specs:index\": \"\", " +"\"volume_extension:types_extra_specs:show\": \"rule:admin or rule:type_admin " +"or rule:type_viewer\", \"volume_extension:types_extra_specs:update\": \"rule:" +"admin or rule:type_admin\"" + +msgid "10.0.0" +msgstr "10.0.0" + +msgid "10.0.1" +msgstr "10.0.1" + +msgid "10.0.3" +msgstr "10.0.3" + +msgid "10.0.4" +msgstr "10.0.4" + +msgid "10.0.5" +msgstr "10.0.5" + +msgid "10.0.7" +msgstr "10.0.7" + +msgid "10.0.8" +msgstr "10.0.8" + +msgid "10.0.8-20" +msgstr "10.0.8-20" + +msgid "11.0.0" +msgstr "11.0.0" + +msgid "11.0.1" +msgstr "11.0.1" + +msgid "11.0.2" +msgstr "11.0.2" + +msgid "11.1.1" +msgstr "11.1.1" + +msgid "11.2.0" +msgstr "11.2.0" + +msgid "11.2.1" +msgstr "11.2.1" + +msgid "11.2.2" +msgstr "11.2.2" + +msgid "11.2.2-15" +msgstr "11.2.2-15" + +msgid "12.0.0" +msgstr "12.0.0" + +msgid "12.0.1" +msgstr "12.0.1" + +msgid "12.0.10" +msgstr "12.0.10" + +msgid "12.0.2" +msgstr "12.0.2" + +msgid "12.0.3" +msgstr "12.0.3" + +msgid "12.0.4" +msgstr "12.0.4" + +msgid "12.0.5" +msgstr "12.0.5" + +msgid "12.0.6" +msgstr "12.0.6" + +msgid "12.0.7" +msgstr "12.0.7" + +msgid "12.0.8" +msgstr "12.0.8" + +msgid "13.0.0" +msgstr "13.0.0" + +msgid "13.0.1" +msgstr "13.0.1" + +msgid "13.0.2" +msgstr "13.0.2" + +msgid "13.0.3" +msgstr "13.0.3" + +msgid "13.0.4" +msgstr "13.0.4" + +msgid "13.0.6" +msgstr "13.0.6" + +msgid "13.0.7" +msgstr "13.0.7" + +msgid "14.0.0" +msgstr "14.0.0" + +msgid "14.0.1" +msgstr "14.0.1" + +msgid "14.0.2" +msgstr "14.0.2" + +msgid "15.0.0" +msgstr "15.0.0" + +msgid "" +"3PAR driver creates FC VLUN of match-set type instead of host sees. With " +"match-set, the host will see the virtual volume on specified NSP (Node-Slot-" +"Port). This change in vlun type fixes bug 1577993." +msgstr "" +"3PAR driver creates FC VLUN of match-set type instead of host sees. With " +"match-set, the host will see the virtual volume on specified NSP (Node-Slot-" +"Port). This change in vlun type fixes bug 1577993." + +msgid "7.0.1" +msgstr "7.0.1" + +msgid "7.0.2" +msgstr "7.0.2" + +msgid "7.0.3" +msgstr "7.0.3" + +msgid "8.0.0" +msgstr "8.0.0" + +msgid "8.1.0" +msgstr "8.1.0" + +msgid "8.1.1" +msgstr "8.1.1" + +msgid "8.1.1-11" +msgstr "8.1.1-11" + +msgid "9.0.0" +msgstr "9.0.0" + +msgid "9.1.0" +msgstr "9.1.0" + +msgid "9.1.1" +msgstr "9.1.1" + +msgid "9.1.2" +msgstr "9.1.2" + +msgid "" +"A bug in the Quobyte driver was fixed that prevented backing up volumes and " +"snapshots" +msgstr "" +"A bug in the Quobyte driver was fixed that prevented backing up volumes and " +"snapshots" + +msgid "" +"A general framework to accommodate hardware compression accelerators for " +"compression of volumes uploaded to the Image service (Glance) as images and " +"decompression of compressed images used to create volumes is introduced." +msgstr "" +"A general framework to accommodate hardware compression accelerators for " +"compression of volumes uploaded to the Image service (Glance) as images and " +"decompression of compressed images used to create volumes is introduced." + +msgid "" +"A lock in the volume manager flow generally prevents this on normal clone " +"volume operations, but this clone method in the driver is called for " +"operations such as cloning from the cinder image-volume cache or cloning " +"from a cinder backend used as a glance store." +msgstr "" +"A lock in the volume manager flow generally prevents this on normal clone " +"volume operations, but this clone method in the driver is called for " +"operations such as cloning from the Cinder image-volume cache or cloning " +"from a Cinder backend used as a Glance store." + +msgid "" +"A new API to display the volumes summary. This summary API displays the " +"total number of volumes and total volume's size in GB." +msgstr "" +"A new API to display the volumes summary. This summary API displays the " +"total number of volumes and total volume's size in GB." + +msgid "" +"A new check is added to the ``cinder-status upgrade check`` CLI to check for " +"the configuration of CoprHD, HGST or ITRI DISCO drivers. These drivers were " +"removed in the Stein release and should not be configured at the time of " +"upgrade." +msgstr "" +"A new check is added to the ``cinder-status upgrade check`` CLI to check for " +"the configuration of CoprHD, HGST or ITRI DISCO drivers. These drivers were " +"removed in the Stein release and should not be configured at the time of " +"upgrade." + +msgid "" +"A new check is added to the ``cinder-status upgrade check`` CLI to check for " +"the use of ``cinder.volume.drivers.windows.windows.WindowsDriver`` and a " +"message is reported that the user needs to update the setting to ``cinder." +"volume.drivers.windows.iscsi.WindowsISCSIDriver`` if it is encountered." +msgstr "" +"A new check is added to the ``cinder-status upgrade check`` CLI to check for " +"the use of ``cinder.volume.drivers.windows.windows.WindowsDriver`` and a " +"message is reported that the user needs to update the setting to ``cinder." +"volume.drivers.windows.iscsi.WindowsISCSIDriver`` if it is encountered." + +msgid "" +"A new check is added to the ``cinder-status upgrade check`` CLI to check for " +"the use of backup driver module path instead of full driver class path in " +"the ``backup_driver`` configuration setting." +msgstr "" +"A new check is added to the ``cinder-status upgrade check`` CLI to check for " +"the use of backup driver module path instead of full driver class path in " +"the ``backup_driver`` configuration setting." + +msgid "" +"A new cinder-manage command, reset_active_backend, was added to promote a " +"failed-over backend participating in replication. This allows you to reset " +"a backend without manually editing the database. A backend undergoing " +"promotion using this command is expected to be in a disabled and frozen " +"state. Support for both standalone and clustered backend configurations are " +"supported." +msgstr "" +"A new cinder-manage command, reset_active_backend, was added to promote a " +"failed-over backend participating in replication. This allows you to reset " +"a backend without manually editing the database. A backend undergoing " +"promotion using this command is expected to be in a disabled and frozen " +"state. Support for both standalone and clustered backend configurations are " +"supported." + +msgid "" +"A new target, NVMET, is added for the LVM driver over RDMA, it allows cinder " +"to use nvmetcli in order to create/delete subsystems on attaching/detaching " +"an LVM volume to/from an instance." +msgstr "" +"A new target, NVMET, is added for the LVM driver over RDMA, it allows Cinder " +"to use nvmetcli in order to create/delete subsystems on attaching/detaching " +"an LVM volume to/from an instance." + +msgid "" +"A new target, spdk-nvmeof, is added for the SPDK driver over RDMA. It allows " +"cinder to use SPDK target in order to create/delete subsystems on attaching/" +"detaching an SPDK volume to/from an instance." +msgstr "" +"A new target, spdk-nvmeof, is added for the SPDK driver over RDMA. It allows " +"Cinder to use SPDK target in order to create/delete subsystems on attaching/" +"detaching an SPDK volume to/from an instance." + +msgid "" +"Add 'LUNType' configuration verification for Huawei driver when connecting " +"to Dorado array. Because Dorado array only supports 'Thin' lun type, so " +"'LUNType' only can be configured as 'Thin', any other type is invalid and if " +"'LUNType' not explicitly configured, by default use 'Thin' for Dorado array." +msgstr "" +"Add 'LUNType' configuration verification for Huawei driver when connecting " +"to Dorado array. Because Dorado array only supports 'Thin' LUN type, so " +"'LUNType' only can be configured as 'Thin', any other type is invalid and if " +"'LUNType' not explicitly configured, by default use 'Thin' for Dorado array." + +msgid "" +"Add 'display_name' and 'display_description' validation for creating/" +"updating snapshot and volume operations." +msgstr "" +"Add 'display_name' and 'display_description' validation for creating/" +"updating snapshot and volume operations." + +msgid "Add CG capability to generic volume groups in Huawei driver." +msgstr "Add CG capability to generic volume groups in Huawei driver." + +msgid "Add CG capability to generic volume groups in INFINIDAT driver." +msgstr "Add CG capability to generic volume groups in INFINIDAT driver." + +msgid "" +"Add Support for QoS in the Nimble Storage driver. QoS is available from " +"Nimble OS release 4.x and above." +msgstr "" +"Add Support for QoS in the Nimble Storage driver. QoS is available from " +"Nimble OS release 4.x and above." + +msgid "Add Support for deduplication of volumes in the Nimble Storage driver." +msgstr "" +"Add Support for de-duplication of volumes in the Nimble Storage driver." + +msgid "Add ``admin_or_storage_type_admin`` rule to ``policy.json``, e.g." +msgstr "Add ``admin_or_storage_type_admin`` rule to ``policy.json``, e.g." + +msgid "" +"Add ``all_tenants``, ``project_id`` support in attachment list&detail APIs." +msgstr "" +"Add ``all_tenants``, ``project_id`` support in attachment list&detail APIs." + +msgid "" +"Add ``all_tenants``, ``project_id`` support in the attachment list and " +"detail APIs." +msgstr "" +"Add ``all_tenants``, ``project_id`` support in the attachment list and " +"detail APIs." + +msgid "Add ``storage_type_admin`` role." +msgstr "Add ``storage_type_admin`` role." + +msgid "" +"Add ``user_id`` attribute to response body of list backup with detail and " +"show backup detail APIs." +msgstr "" +"Add ``user_id`` attribute to response body of list backup with detail and " +"show backup detail APIs." + +msgid "Add ``user_id`` field to snapshot list/detail and snapshot show." +msgstr "Add ``user_id`` field to snapshot list/detail and snapshot show." + +msgid "Add ``volume-type`` filter to API Get-Pools" +msgstr "Add ``volume-type`` filter to API Get-Pools" + +msgid "" +"Add ability to call failover-host on a replication enabled SF cluster a " +"second time with host id = default to initiate a failback to the default " +"configured SolidFire Cluster." +msgstr "" +"Add ability to call failover-host on a replication enabled SF cluster a " +"second time with host id = default to initiate a failback to the default " +"configured SolidFire Cluster." + +msgid "" +"Add ability to enable multi-initiator support to allow live migration in the " +"Nimble backend driver." +msgstr "" +"Add ability to enable multi-initiator support to allow live migration in the " +"Nimble backend driver." + +msgid "" +"Add ability to extend ``in-use`` volume. User should be aware of the whole " +"environment before using this feature because it's dependent on several " +"external factors below:" +msgstr "" +"Add ability to extend ``in-use`` volume. User should be aware of the whole " +"environment before using this feature because it's dependent on several " +"external factors below:" + +msgid "Add ability to specify backup driver via class name." +msgstr "Add ability to specify backup driver via class name." + +msgid "Add backup snapshots support for Storwize/SVC driver." +msgstr "Add backup snapshots support for Storwize/SVC driver." + +msgid "Add chap authentication support for the vmax backend." +msgstr "Add CHAP authentication support for the vmax backend." + +msgid "" +"Add consistency group capability to Generic Volume Groups in the Dell EMC SC " +"driver." +msgstr "" +"Add consistency group capability to Generic Volume Groups in the Dell EMC SC " +"driver." + +msgid "" +"Add consistency group capability to generic volume groups in Storwize " +"drivers." +msgstr "" +"Add consistency group capability to generic volume groups in Storwize " +"drivers." + +msgid "Add consistency group replication support in XIV\\A9000 Cinder driver." +msgstr "Add consistency group replication support in XIV\\A9000 Cinder driver." + +msgid "" +"Add consistent group capability to generic volume groups in CoprHD driver." +msgstr "" +"Add consistent group capability to generic volume groups in CoprHD driver." + +msgid "" +"Add consistent group capability to generic volume groups in Lefthand driver." +msgstr "" +"Add consistent group capability to generic volume groups in Lefthand driver." + +msgid "" +"Add consistent group capability to generic volume groups in Pure drivers." +msgstr "" +"Add consistent group capability to generic volume groups in Pure drivers." + +msgid "Add consistent group capability to generic volume groups in VNX driver." +msgstr "" +"Add consistent group capability to generic volume groups in VNX driver." + +msgid "" +"Add consistent group capability to generic volume groups in XIV, Spectrum " +"Accelerate and A9000/R storage systems." +msgstr "" +"Add consistent group capability to generic volume groups in XIV, Spectrum " +"Accelerate and A9000/R storage systems." + +msgid "" +"Add consistent group capability to generic volume groups in the SolidFire " +"driver." +msgstr "" +"Add consistent group capability to generic volume groups in the SolidFire " +"driver." + +msgid "" +"Add consistent group capability to generic volume groups in the XtremIO " +"driver." +msgstr "" +"Add consistent group capability to generic volume groups in the XtremIO " +"driver." + +msgid "" +"Add consistent group snapshot support to generic volume groups in VMAX " +"driver version 3.0." +msgstr "" +"Add consistent group snapshot support to generic volume groups in VMAX " +"driver version 3.0." + +msgid "" +"Add consistent replication group support in Dell EMC VMAX cinder driver." +msgstr "" +"Add consistent replication group support in Dell EMC VMAX cinder driver." + +msgid "Add consistent replication group support in Storwize Cinder driver." +msgstr "Add consistent replication group support in Storwize Cinder driver." + +msgid "Add consistent replication group support in VNX cinder driver." +msgstr "Add consistent replication group support in VNX cinder driver." + +msgid "" +"Add enhanced support to the QNAP Cinder driver, including 'CHAP', 'Thin " +"Provision', 'SSD Cache', 'Dedup' and 'Compression'." +msgstr "" +"Add enhanced support to the QNAP Cinder driver, including 'CHAP', 'Thin " +"Provision', 'SSD Cache', 'Dedup' and 'Compression'." + +msgid "Add filter, sorter and pagination support in group snapshot listings." +msgstr "Add filter, sorter and pagination support in group snapshot listings." + +msgid "Add filters support to get_pools API v3.28." +msgstr "Add filters support to get_pools API v3.28." + +msgid "" +"Add get_manageable_volumes and get_manageable_snapshots implementations for " +"Pure Storage Volume Drivers." +msgstr "" +"Add get_manageable_volumes and get_manageable_snapshots implementations for " +"Pure Storage Volume Drivers." + +msgid "" +"Add global mirror with change volumes(gmcv) support and user can manage gmcv " +"replication volume by SVC driver. An example to set a gmcv replication " +"volume type, set property replication_type as \" gmcv\", property " +"replication_enabled as \" True\" and set property drivers:" +"cycle_period_seconds as 500." +msgstr "" +"Add global mirror with change volumes(gmcv) support and user can manage gmcv " +"replication volume by SVC driver. An example to set a gmcv replication " +"volume type, set property replication_type as \" gmcv\", property " +"replication_enabled as \" True\" and set property drivers:" +"cycle_period_seconds as 500." + +msgid "" +"Add granularity to the ``volume_extension:volume_type_encryption`` policy " +"with the addition of distinct actions for create, get, update, and delete:" +msgstr "" +"Add granularity to the ``volume_extension:volume_type_encryption`` policy " +"with the addition of distinct actions for create, get, update, and delete:" + +msgid "Add mirrored volume support in IBM SVC/Storwize driver." +msgstr "Add mirrored volume support in IBM SVC/Storwize driver." + +msgid "Add multipath enhancement to Storwize iSCSI driver." +msgstr "Add multipath enhancement to Storwize iSCSI driver." + +msgid "" +"Add option `max_luns_per_storage_group` back. The max LUNs per storage group " +"was set to 255 before. With the new option, admin can set it to a larger " +"number." +msgstr "" +"Add option `max_luns_per_storage_group` back. The max LUNs per storage group " +"was set to 255 before. With the new option, admin can set it to a larger " +"number." + +msgid "Add provider_id in the detailed view of a volume for admin." +msgstr "Add provider_id in the detailed view of a volume for admin." + +msgid "Add replication consistency group support in DS8K cinder driver." +msgstr "Add replication consistency group support in DS8K cinder driver." + +msgid "Add retype functionality to VMAX driver version 3.0." +msgstr "Add retype functionality to VMAX driver version 3.0." + +msgid "Add revert to snapshot API and support in LVM driver." +msgstr "Add revert to snapshot API and support in LVM driver." + +msgid "Add reverting to snapshot support in Storwize Cinder driver." +msgstr "Add reverting to snapshot support in Storwize Cinder driver." + +msgid "Add support for deferred deletion in the RBD volume driver." +msgstr "Add support for deferred deletion in the RBD volume driver." + +msgid "Add support for hybrid aggregates to the NetApp cDOT drivers." +msgstr "Add support for hybrid aggregates to the NetApp cDOT drivers." + +msgid "Add support for reporting pool disk type in Huawei driver." +msgstr "Add support for reporting pool disk type in Huawei driver." + +msgid "Add support for sorting backups by \"name\"." +msgstr "Add support for sorting backups by \"name\"." + +msgid "" +"Add support to backup volume using snapshot in the Unity driver, which " +"enables backing up of volumes that are in-use." +msgstr "" +"Add support to backup volume using snapshot in the Unity driver, which " +"enables backing up of volumes that are in-use." + +msgid "Add support to backup volume using snapshot in the Unity driver." +msgstr "Add support to backup volume using snapshot in the Unity driver." + +msgid "Add support to configure IO ports option in Dell EMC Unity driver." +msgstr "Add support to configure IO ports option in Dell EMC Unity driver." + +msgid "Add support to force detach a volume from all hosts on 3PAR." +msgstr "Add support to force detach a volume from all hosts on 3PAR." + +msgid "Add support to force detach a volume from all hosts on Unity." +msgstr "Add support to force detach a volume from all hosts on Unity." + +msgid "Add support to force detach a volume from all hosts on VNX." +msgstr "Add support to force detach a volume from all hosts on VNX." + +msgid "" +"Add thin clone support in the Unity driver. Unity storage supports the thin " +"clone of a LUN from OE version 4.2.0. It is more efficient than the dd " +"solution. However, there is a limit of thin clone inside each LUN family. " +"Every time the limit reaches, a new LUN family will be created by a dd-copy, " +"and then the volume clone afterward will use the thin clone of the new LUN " +"family." +msgstr "" +"Add thin clone support in the Unity driver. Unity storage supports the thin " +"clone of a LUN from OE version 4.2.0. It is more efficient than the dd " +"solution. However, there is a limit of thin clone inside each LUN family. " +"Every time the limit reaches, a new LUN family will be created by a dd-copy, " +"and then the volume clone afterwards will use the thin clone of the new LUN " +"family." + +msgid "Add v2.1 volume replication support in VMAX driver." +msgstr "Add v2.1 volume replication support in VMAX driver." + +msgid "" +"Added \"backend_state: up/down\" in response body of service list if context " +"is admin. This feature will help operators or cloud management system to get " +"the backend device state in every service. If device state is *down*, " +"specify that storage device has got some problems. Give more information to " +"locate bugs quickly." +msgstr "" +"Added \"backend_state: up/down\" in response body of service list if context " +"is admin. This feature will help operators or cloud management system to get " +"the backend device state in every service. If device state is *down*, " +"specify that storage device has got some problems. Give more information to " +"locate bugs quickly." + +msgid "" +"Added Cheesecake (v2.1) replication support to the Pure Storage Volume " +"drivers." +msgstr "" +"Added Cheesecake (v2.1) replication support to the Pure Storage Volume " +"drivers." + +msgid "Added Cinder consistency group for the NetApp NFS driver." +msgstr "Added Cinder consistency group for the NetApp NFS driver." + +msgid "Added Cinder fast-retype support to Datera EDF driver." +msgstr "Added Cinder fast-retype support to Datera EDF driver." + +msgid "Added Consistency Group support in ScaleIO driver." +msgstr "Added Consistency Group support in ScaleIO driver." + +msgid "Added Datera EDF API 2.1 support." +msgstr "Added Datera EDF API 2.1 support." + +msgid "Added Datera Multi-Tenancy Support." +msgstr "Added Datera Multi-Tenancy Support." + +msgid "Added Datera Template Support." +msgstr "Added Datera Template Support." + +msgid "Added HA support for NexentaEdge iSCSI driver" +msgstr "Added HA support for NexentaEdge iSCSI driver" + +msgid "Added ISCSI based driver for Veritas Access." +msgstr "Added iSCSI based driver for Veritas Access." + +msgid "Added Keystone v3 support for Swift backup driver in single user mode." +msgstr "Added Keystone v3 support for Swift backup driver in single user mode." + +msgid "Added Migrate and Extend for Nexenta NFS driver." +msgstr "Added Migrate and Extend for Nexenta NFS driver." + +msgid "Added NBD driver for NexentaEdge." +msgstr "Added NBD driver for NexentaEdge." + +msgid "Added NFS based driver for Veritas Access." +msgstr "Added NFS based driver for Veritas Access." + +msgid "Added Nimble Storage Fibre Channel backend driver." +msgstr "Added Nimble Storage Fibre Channel backend driver." + +msgid "Added QoS support in ScaleIO driver." +msgstr "Added QoS support in ScaleIO driver." + +msgid "" +"Added RBD keyring configuration parameter ``rbd_keyring_conf`` to define " +"custom path of Ceph keyring file." +msgstr "" +"Added RBD keyring configuration parameter ``rbd_keyring_conf`` to define " +"custom path of Ceph keyring file." + +msgid "Added REST API to update backup name and description." +msgstr "Added REST API to update backup name and description." + +msgid "" +"Added RPC backward compatibility layer similar to the one implemented in " +"Nova. This means that Cinder services can be upgraded one-by-one without " +"breakage. After all the services are upgraded SIGHUP signals should be " +"issued to all the services to signal them to reload cached minimum RPC " +"versions. Alternative is of course restart of them. Please note that cinder-" +"api service doesn't support SIGHUP yet. Please also take into account that " +"all the rolling upgrades capabilities are considered tech preview, as we " +"don't have a CI testing it yet." +msgstr "" +"Added RPC backward compatibility layer similar to the one implemented in " +"Nova. This means that Cinder services can be upgraded one-by-one without " +"breakage. After all the services are upgraded SIGHUP signals should be " +"issued to all the services to signal them to reload cached minimum RPC " +"versions. Alternative is of course restart of them. Please note that cinder-" +"api service doesn't support SIGHUP yet. Please also take into account that " +"all the rolling upgrades capabilities are considered tech preview, as we " +"don't have a CI testing it yet." + +msgid "Added Retype functionality to Nexenta iSCSI and NFS drivers." +msgstr "Added Retype functionality to Nexenta iSCSI and NFS drivers." + +msgid "Added Volume Placement extra-specs support to Datera EDF driver." +msgstr "Added Volume Placement extra-specs support to Datera EDF driver." + +msgid "Added ``datera_disable_profiler`` boolean config option." +msgstr "Added ``datera_disable_profiler`` boolean config option." + +msgid "Added ``resource_filters`` API to retrieve configured resource filters." +msgstr "" +"Added ``resource_filters`` API to retrieve configured resource filters." + +msgid "" +"Added a new config ``reinit_driver_count`` in volume driver, which indicates " +"the maximum retry limit for driver re-initialization when it fails to " +"initialize a volume driver. Its default value is 3. The interval of retry is " +"exponentially backoff, and will be 1s, 2s, 4s etc." +msgstr "" +"Added a new config ``reinit_driver_count`` in volume driver, which indicates " +"the maximum retry limit for driver re-initialisation when it fails to " +"initialise a volume driver. Its default value is 3. The interval of retry is " +"exponentially back-off, and will be 1s, 2s, 4s etc." + +msgid "" +"Added a new config option `scheduler_weight_handler`. This is a global " +"option which specifies how the scheduler should choose from a listed of " +"weighted pools. By default the existing weigher is used which always chooses " +"the highest weight." +msgstr "" +"Added a new config option `scheduler_weight_handler`. This is a global " +"option which specifies how the scheduler should choose from a listed of " +"weighted pools. By default the existing weigher is used which always chooses " +"the highest weight." + +msgid "" +"Added a new option ``quobyte_overlay_volumes`` for the Quobyte volume " +"driver. This option activates internal snapshots who allow to create volumes " +"from snapshots as overlay files based on the volume from snapshot cache. " +"This significantly speeds up the creation of volumes from large snapshots." +msgstr "" +"Added a new option ``quobyte_overlay_volumes`` for the Quobyte volume " +"driver. This option activates internal snapshots who allow to create volumes " +"from snapshots as overlay files based on the volume from snapshot cache. " +"This significantly speeds up the creation of volumes from large snapshots." + +msgid "" +"Added a new optional cache of volumes generated from snapshots for the " +"Quobyte backend. Enabling this cache speeds up creation of multiple volumes " +"from a single snapshot at the cost of a slight increase in creation time for " +"the first volume generated for this given snapshot. The " +"``quobyte_volume_from_snapshot_cache`` option is off by default." +msgstr "" +"Added a new optional cache of volumes generated from snapshots for the " +"Quobyte backend. Enabling this cache speeds up creation of multiple volumes " +"from a single snapshot at the cost of a slight increase in creation time for " +"the first volume generated for this given snapshot. The " +"``quobyte_volume_from_snapshot_cache`` option is off by default." + +msgid "" +"Added a new weight handler `StochasticHostWeightHandler`. This weight " +"handler chooses pools randomly, where the random probabilities are " +"proportional to the weights, so higher weighted pools are chosen more " +"frequently, but not all the time. This weight handler spreads new shares " +"across available pools more fairly." +msgstr "" +"Added a new weight handler `StochasticHostWeightHandler`. This weight " +"handler chooses pools randomly, where the random probabilities are " +"proportional to the weights, so higher weighted pools are chosen more " +"frequently, but not all the time. This weight handler spreads new shares " +"across available pools more fairly." + +msgid "Added ability to backup snapshots." +msgstr "Added ability to backup snapshots." + +msgid "Added ability to list all manageable volumes within ScaleIO Driver." +msgstr "Added ability to list all manageable volumes within ScaleIO Driver." + +msgid "" +"Added ability to purge records less than 1 day old, using the cinder-manage " +"db_purge utility. This helps especially for those testing scenarios in which " +"a a large number of volumes are created and deleted. (bug" +msgstr "" +"Added ability to purge records less than 1 day old, using the cinder-manage " +"db_purge utility. This helps especially for those testing scenarios in which " +"a a large number of volumes are created and deleted. (bug" + +msgid "Added ability to query backups by project ID." +msgstr "Added ability to query backups by project ID." + +msgid "" +"Added ability to specify multiple storage pools in the FalconStor driver." +msgstr "" +"Added ability to specify multiple storage pools in the FalconStor driver." + +msgid "" +"Added additional metrics reported to the scheduler for Pure Volume Drivers " +"for better filtering and weighing functions." +msgstr "" +"Added additional metrics reported to the scheduler for Pure Volume Drivers " +"for better filtering and weighing functions." + +msgid "" +"Added an ``excluded_domain_ips`` option to the Dell EMC SC driver. This is " +"identical to the excluded_domain_ip option only comma separated rather than " +"multiple entry. This is concatenated with the ``excluded_domain_ip`` option." +msgstr "" +"Added an ``excluded_domain_ips`` option to the Dell EMC SC driver. This is " +"identical to the excluded_domain_ip option only comma separated rather than " +"multiple entry. This is concatenated with the ``excluded_domain_ip`` option." + +msgid "" +"Added asynchronous remote replication support in Dell EMC VMAX cinder driver." +msgstr "" +"Added asynchronous remote replication support in Dell EMC VMAX Cinder driver." + +msgid "Added attribute ``connection_info`` to attachment object." +msgstr "Added attribute ``connection_info`` to attachment object." + +msgid "" +"Added automatic configuration of SAN access control for the NEC volume " +"driver." +msgstr "" +"Added automatic configuration of SAN access control for the NEC volume " +"driver." + +msgid "Added availability_zone filter for snapshots list." +msgstr "Added availability_zone filter for snapshots list." + +msgid "Added backend FC and iSCSI drivers for NEC Storage." +msgstr "Added backend FC and iSCSI drivers for NEC Storage." + +msgid "Added backend ISCSI driver for Reduxio." +msgstr "Added backend ISCSI driver for Reduxio." + +msgid "Added backend driver for Coho Data storage." +msgstr "Added backend driver for Coho Data storage." + +msgid "Added backend driver for DISCO storage." +msgstr "Added backend driver for DISCO storage." + +msgid "Added backend driver for Dell EMC Unity storage." +msgstr "Added backend driver for Dell EMC Unity storage." + +msgid "Added backend driver for FalconStor FreeStor." +msgstr "Added backend driver for FalconStor FreeStor." + +msgid "Added backend driver for Fujitsu ETERNUS DX (FC)." +msgstr "Added backend driver for Fujitsu ETERNUS DX (FC)." + +msgid "Added backend driver for Fujitsu ETERNUS DX (iSCSI)." +msgstr "Added backend driver for Fujitsu ETERNUS DX (iSCSI)." + +msgid "Added backend driver for Hedvig iSCSI storage." +msgstr "Added backend driver for Hedvig iSCSI storage." + +msgid "Added backend driver for Huawei FusionStorage." +msgstr "Added backend driver for Huawei FusionStorage." + +msgid "Added backend driver for Nexenta Edge iSCSI storage." +msgstr "Added backend driver for Nexenta Edge iSCSI storage." + +msgid "Added backend driver for NexentaStor5 NFS storage." +msgstr "Added backend driver for NexentaStor5 NFS storage." + +msgid "Added backend driver for NexentaStor5 iSCSI storage." +msgstr "Added backend driver for NexentaStor5 iSCSI storage." + +msgid "Added backend driver for Synology iSCSI-supported storage." +msgstr "Added backend driver for Synology iSCSI-supported storage." + +msgid "Added backend driver for VMware VStorageObject (First Class Disk)." +msgstr "Added backend driver for VMware VStorageObject (First Class Disk)." + +msgid "Added backend driver for Violin Memory 7000 iscsi storage." +msgstr "Added backend driver for Violin Memory 7000 iSCSI storage." + +msgid "Added backend driver for ZTE iSCSI storage." +msgstr "Added backend driver for ZTE iSCSI storage." + +msgid "" +"Added boolean conf option 'split_loggers' in [default] section of cinder." +"conf to `enable split logging`_ functionality. The default value of " +"split_loggers option is set to False. Operator can set it's value to True to " +"split HTTP content into subloggers to allow for fine-grained control of what " +"is logged and how. This new config option 'split_loggers' should be enabled " +"only when keystoneauth log level is set to DEBUG in 'default_log_levels' " +"config option." +msgstr "" +"Added boolean conf option 'split_loggers' in [default] section of cinder." +"conf to `enable split logging`_ functionality. The default value of " +"split_loggers option is set to False. Operator can set it's value to True to " +"split HTTP content into subloggers to allow for fine-grained control of what " +"is logged and how. This new config option 'split_loggers' should be enabled " +"only when keystoneauth log level is set to DEBUG in 'default_log_levels' " +"config option." + +msgid "Added cinder backup driver for Google Cloud Storage." +msgstr "Added Cinder backup driver for Google Cloud Storage." + +msgid "" +"Added config option ``vmware_adapter_type`` for the VMware VMDK driver to " +"specify the default adapter type for volumes in vCenter server." +msgstr "" +"Added config option ``vmware_adapter_type`` for the VMware VMDK driver to " +"specify the default adapter type for volumes in vCenter server." + +msgid "" +"Added config option ``vmware_connection_pool_size`` in the VMware VMDK " +"driver to specify the maximum number of connections (to vCenter) in the http " +"connection pool." +msgstr "" +"Added config option ``vmware_connection_pool_size`` in the VMware VMDK " +"driver to specify the maximum number of connections (to vCenter) in the HTTP " +"connection pool." + +msgid "" +"Added config option to enable/disable automatically calculation an over-" +"subscription ratio max for Pure Volume Drivers. When disabled the drivers " +"will now respect the max_oversubscription_ratio config option." +msgstr "" +"Added config option to enable/disable automatically calculation an over-" +"subscription ratio max for Pure Volume Drivers. When disabled the drivers " +"will now respect the max_oversubscription_ratio config option." + +msgid "" +"Added consistency group capability to generic volume groups in the HPE 3PAR " +"driver." +msgstr "" +"Added consistency group capability to generic volume groups in the HPE 3PAR " +"driver." + +msgid "" +"Added consistency group support to generic volume groups in ScaleIO Driver." +msgstr "" +"Added consistency group support to generic volume groups in ScaleIO Driver." + +msgid "Added consistency group support to the Huawei driver." +msgstr "Added consistency group support to the Huawei driver." + +msgid "" +"Added consistent group capability to generic volume groups in GPFS driver." +msgstr "" +"Added consistent group capability to generic volume groups in GPFS driver." + +msgid "" +"Added consistent group capability to generic volume groups in ProphetStor " +"driver." +msgstr "" +"Added consistent group capability to generic volume groups in ProphetStor " +"driver." + +msgid "Added count info in volume, snapshot and backup's list APIs since 3.45." +msgstr "" +"Added count info in volume, snapshot and backup's list APIs since 3.45." + +msgid "" +"Added create/delete APIs for group snapshots and an API to create group from " +"source." +msgstr "" +"Added create/delete APIs for group snapshots and an API to create group from " +"source." + +msgid "" +"Added data reduction pool support for thin-provisoned and compressed volume " +"in Storwize cinder driver." +msgstr "" +"Added data reduction pool support for thin-provisoned and compressed volume " +"in Storwize cinder driver." + +msgid "" +"Added dell_api_async_rest_timeout option to the Dell EMC SC driver. This is " +"the timeout used for asynchronous REST calls to the Dell EMC SC REST API. " +"Default is 15 seconds." +msgstr "" +"Added dell_api_async_rest_timeout option to the Dell EMC SC driver. This is " +"the timeout used for asynchronous REST calls to the Dell EMC SC REST API. " +"Default is 15 seconds." + +msgid "" +"Added dell_api_sync_rest_timeout option to the Dell EMC SC driver. This is " +"the timeout used for synchronous REST calls to the Dell EMC SC REST API. " +"Default is 30 seconds." +msgstr "" +"Added dell_api_sync_rest_timeout option to the Dell EMC SC driver. This is " +"the timeout used for synchronous REST calls to the Dell EMC SC REST API. " +"Default is 30 seconds." + +msgid "Added driver for Tegile IntelliFlash arrays." +msgstr "Added driver for Tegile IntelliFlash arrays." + +msgid "Added driver for the InfiniBox storage array." +msgstr "Added driver for the InfiniBox storage array." + +msgid "" +"Added driver-assisted volume migration to RBD driver. This allows a volume " +"to be efficiently copied by Ceph from one pool to another within the same " +"cluster." +msgstr "" +"Added driver-assisted volume migration to RBD driver. This allows a volume " +"to be efficiently copied by Ceph from one pool to another within the same " +"cluster." + +msgid "Added extend method to NFS driver for NexentaStor 5." +msgstr "Added extend method to NFS driver for NexentaStor 5." + +msgid "" +"Added flag 'backend_state' which will give backend state info in service " +"list." +msgstr "" +"Added flag 'backend_state' which will give backend state info in service " +"list." + +msgid "" +"Added flag 'backend_state: up/down' which will give backend state info in " +"service list." +msgstr "" +"Added flag 'backend_state: up/down' which will give backend state info in " +"service list." + +msgid "" +"Added generalized resource filter support in ``list volume``, ``list " +"backup``, ``list snapshot``, ``list group``, ``list group-snapshot``, ``list " +"attachment``, ``list message`` and ``list pools`` APIs." +msgstr "" +"Added generalised resource filter support in ``list volume``, ``list " +"backup``, ``list snapshot``, ``list group``, ``list group-snapshot``, ``list " +"attachment``, ``list message`` and ``list pools`` APIs." + +msgid "" +"Added generic volume group capability to NetApp cDot drivers with support " +"for write consistent group snapshots." +msgstr "" +"Added generic volume group capability to NetApp cDot drivers with support " +"for write consistent group snapshots." + +msgid "Added get capability feature for HPE-3PAR." +msgstr "Added get capability feature for HPE-3PAR." + +msgid "Added group type and group specs APIs." +msgstr "Added group type and group specs APIs." + +msgid "" +"Added host-level (whole back end replication - v2.1) replication support to " +"the NetApp cDOT drivers (iSCSI, FC, NFS)." +msgstr "" +"Added host-level (whole back end replication - v2.1) replication support to " +"the NetApp cDOT drivers (iSCSI, FC, NFS)." + +msgid "" +"Added hyperswap volume and group support in Storwize cinder driver. Storwize/" +"svc versions prior to 7.6 do not support this feature." +msgstr "" +"Added hyperswap volume and group support in Storwize Cinder driver. Storwize/" +"svc versions prior to 7.6 do not support this feature." + +msgid "Added iSCSI CHAP uni-directional authentication for NetApp drivers." +msgstr "Added iSCSI CHAP uni-directional authentication for NetApp drivers." + +msgid "" +"Added iSCSI and Fibre Channel volume drivers for DataCore's SANsymphony and " +"Hyper-converged Virtual SAN storage." +msgstr "" +"Added iSCSI and Fibre Channel volume drivers for DataCore's SANsymphony and " +"Hyper-converged Virtual SAN storage." + +msgid "" +"Added image signature verification support when creating volume from image. " +"This depends on signature metadata from glance. This feature is turned on by " +"default, administrators can change behaviour by updating option " +"``verify_glance_signatures``. Also, an additional image metadata " +"``signature_verified`` has been added to indicate whether signature " +"verification was performed during creating process." +msgstr "" +"Added image signature verification support when creating volume from image. " +"This depends on signature metadata from glance. This feature is turned on by " +"default, administrators can change behaviour by updating option " +"``verify_glance_signatures``. Also, an additional image metadata " +"``signature_verified`` has been added to indicate whether signature " +"verification was performed during creating process." + +msgid "" +"Added independent and shared types for qos classes in XIV & A9000. Shared " +"type enables to share bandwidth and IO rates between volumes of the same " +"class. Independent type gives each volume the same bandwidth and IO rates " +"without being affected by other volumes in the same qos class." +msgstr "" +"Added independent and shared types for QoS classes in XIV & A9000. Shared " +"type enables to share bandwidth and I/O rates between volumes of the same " +"class. Independent type gives each volume the same bandwidth and I/O rates " +"without being affected by other volumes in the same QoS class." + +msgid "Added like operator support to filters for the following resources::" +msgstr "Added like operator support to filters for the following resources::" + +msgid "Added manage/unmanage snapshot support for Huawei drivers." +msgstr "Added manage/unmanage snapshot support for Huawei drivers." + +msgid "Added manage/unmanage snapshot support to the HNAS NFS driver." +msgstr "Added manage/unmanage snapshot support to the HNAS NFS driver." + +msgid "Added manage/unmanage volume support for Dell Equallogic driver." +msgstr "Added manage/unmanage volume support for Dell Equallogic driver." + +msgid "Added manage/unmanage volume support for Huawei drivers." +msgstr "Added manage/unmanage volume support for Huawei drivers." + +msgid "" +"Added metadata support for backup source. Now users can create/update " +"metadata for a specified backup." +msgstr "" +"Added metadata support for backup source. Now users can create/update " +"metadata for a specified backup." + +msgid "Added multiple management IP support to Storwize SVC driver." +msgstr "Added multiple management IP support to Storwize SVC driver." + +msgid "Added multiple pools support to Storwize SVC driver." +msgstr "Added multiple pools support to Storwize SVC driver." + +msgid "" +"Added new APIs on microversion 3.32 to support dynamically changing log " +"levels in Cinder services without restart as well as retrieving current log " +"levels, which is an easy way to ping via the message broker a service." +msgstr "" +"Added new APIs on microversion 3.32 to support dynamically changing log " +"levels in Cinder services without restart as well as retrieving current log " +"levels, which is an easy way to ping via the message broker a service." + +msgid "" +"Added new BoolOpt ``backup_ceph_image_journals`` for enabling the Ceph image " +"features required to support RBD mirroring of Cinder backup pool." +msgstr "" +"Added new BoolOpt ``backup_ceph_image_journals`` for enabling the Ceph image " +"features required to support RBD mirroring of Cinder backup pool." + +msgid "" +"Added new Hitachi VSP FC Driver. The VSP driver supports all Hitachi VSP " +"Family and HUSVM." +msgstr "" +"Added new Hitachi VSP FC Driver. The VSP driver supports all Hitachi VSP " +"Family and HUSVM." + +msgid "" +"Added new option to delete XtremIO initiator groups after the last volume " +"was detached from them. Cleanup can be enabled by setting " +"``xtremio_clean_unused_ig`` to ``True`` under the backend settings in cinder." +"conf." +msgstr "" +"Added new option to delete XtremIO initiator groups after the last volume " +"was detached from them. Cleanup can be enabled by setting " +"``xtremio_clean_unused_ig`` to ``True`` under the backend settings in cinder." +"conf." + +msgid "Added oversubscription support in the VMAX driver" +msgstr "Added over-subscription support in the VMAX driver" + +msgid "" +"Added periodic task to clean expired messages in cinder scheduler, also " +"added a configuration option ``message_reap_interval`` to handle the " +"interval." +msgstr "" +"Added periodic task to clean expired messages in Cinder scheduler, also " +"added a configuration option ``message_reap_interval`` to handle the " +"interval." + +msgid "" +"Added periodic task to clean expired reservation in cinder scheduler. Added " +"a configuration option ``reservation_clean_interval`` to handle the interval." +msgstr "" +"Added periodic task to clean expired reservation in Cinder scheduler. Added " +"a configuration option ``reservation_clean_interval`` to handle the interval." + +msgid "" +"Added policies to disallow multiattach operations. This includes two " +"policies, the first being a general policy to allow the creation or retyping " +"of multiattach volumes is a volume create policy with the name ``volume:" +"multiattach``. The second policy is specifically for disallowing the ability " +"to create multiple attachments on a volume that is marked as bootable, and " +"is an attachment policy with the name ``volume:" +"multiattach_bootable_volume``. The default for these new policies is ``rule:" +"admin_or_owner``; be aware that if you wish to disable either of these " +"policies for your users you will need to modify the default policy settings." +msgstr "" +"Added policies to disallow multiattach operations. This includes two " +"policies, the first being a general policy to allow the creation or retyping " +"of multiattach volumes is a volume create policy with the name ``volume:" +"multiattach``. The second policy is specifically for disallowing the ability " +"to create multiple attachments on a volume that is marked as bootable, and " +"is an attachment policy with the name ``volume:" +"multiattach_bootable_volume``. The default for these new policies is ``rule:" +"admin_or_owner``; be aware that if you wish to disable either of these " +"policies for your users you will need to modify the default policy settings." + +msgid "Added replication failback support for the Dell SC driver." +msgstr "Added replication failback support for the Dell SC driver." + +msgid "Added replication group support in HPE 3PAR cinder driver." +msgstr "Added replication group support in HPE 3PAR Cinder driver." + +msgid "Added replication v2.1 support to the Dell Storage Center drivers." +msgstr "Added replication v2.1 support to the Dell Storage Centre drivers." + +msgid "Added replication v2.1 support to the IBM Storwize driver." +msgstr "Added replication v2.1 support to the IBM Storwize driver." + +msgid "Added replication v2.1 support to the IBM XIV/DS8K driver." +msgstr "Added replication v2.1 support to the IBM XIV/DS8K driver." + +msgid "Added reset status API to generic volume group." +msgstr "Added reset status API to generic volume group." + +msgid "Added reset status API to group snapshot." +msgstr "Added reset status API to group snapshot." + +msgid "Added revert volume to snapshot in 3par driver." +msgstr "Added revert volume to snapshot in 3PAR driver." + +msgid "" +"Added schema validation support using jsonschema `[json-schema-validation]`_ " +"for all supported v3 APIs." +msgstr "" +"Added schema validation support using jsonschema `[json-schema-validation]`_ " +"for all supported v3 APIs." + +msgid "" +"Added secure HTTP support for REST API calls in the NexentaStor5 driver. Use " +"of HTTPS is set True by default with option ``nexenta_use_https``." +msgstr "" +"Added secure HTTP support for REST API calls in the NexentaStor5 driver. Use " +"of HTTPS is set True by default with option ``nexenta_use_https``." + +msgid "Added snapshot manage/unmanage support to the EMC XtremIO driver." +msgstr "Added snapshot manage/unmanage support to the EMC XtremIO driver." + +msgid "Added snapshot manage/unmanage support to the HPE 3PAR driver." +msgstr "Added snapshot manage/unmanage support to the HPE 3PAR driver." + +msgid "Added snapshot manage/unmanage support to the HPE LeftHand driver." +msgstr "Added snapshot manage/unmanage support to the HPE LeftHand driver." + +msgid "Added support for API microversions, as well as /v3 API endpoint." +msgstr "Added support for API microversions, as well as /v3 API endpoint." + +msgid "" +"Added support for Keystone middleware feature to pass service token along " +"with the user token for Cinder to Nova and Glance services. This will help " +"get rid of user token expiration issues during long running tasks e.g. " +"creating volume snapshot (Cinder->Nova) and creating volume from image " +"(Cinder->Glance) etc. To use this functionality a service user needs to be " +"created first. Add the service user configurations in ``cinder.conf`` under " +"``service_user`` group and set ``send_service_user_token`` flag to ``True``." +msgstr "" +"Added support for Keystone middleware feature to pass service token along " +"with the user token for Cinder to Nova and Glance services. This will help " +"get rid of user token expiration issues during long running tasks e.g. " +"creating volume snapshot (Cinder->Nova) and creating volume from image " +"(Cinder->Glance) etc. To use this functionality a service user needs to be " +"created first. Add the service user configurations in ``cinder.conf`` under " +"``service_user`` group and set ``send_service_user_token`` flag to ``True``." + +msgid "" +"Added support for QoS in the INFINIDAT InfiniBox driver. QoS is available on " +"InfiniBox 4.0 onward." +msgstr "" +"Added support for QoS in the INFINIDAT InfiniBox driver. QoS is available on " +"InfiniBox 4.0 onward." + +msgid "Added support for ZMQ messaging layer in multibackend configuration." +msgstr "Added support for ZMQ messaging layer in multibackend configuration." + +msgid "" +"Added support for ZeroMQ messaging driver in cinder single backend config." +msgstr "" +"Added support for ZeroMQ messaging driver in Cinder single backend config." + +msgid "" +"Added support for active-active replication to the RBD driver. This allows " +"users to configure multiple volume backends that are all a member of the " +"same cluster participating in replication." +msgstr "" +"Added support for active-active replication to the RBD driver. This allows " +"users to configure multiple volume backends that are all a member of the " +"same cluster participating in replication." + +msgid "" +"Added support for cloning volume asynchronously, it can be enabled by option " +"async_clone set to true in parameter metadata when creating volume from " +"volume or snapshot." +msgstr "" +"Added support for cloning volume asynchronously, it can be enabled by option " +"async_clone set to true in parameter metadata when creating volume from " +"volume or snapshot." + +msgid "" +"Added support for creating a consistency group from a source consistency " +"group in the HPE 3PAR driver." +msgstr "" +"Added support for creating a consistency group from a source consistency " +"group in the HPE 3PAR driver." + +msgid "" +"Added support for creating, deleting, and updating consistency groups for " +"NetApp 7mode and CDOT backends." +msgstr "" +"Added support for creating, deleting, and updating consistency groups for " +"NetApp 7mode and CDOT backends." + +msgid "" +"Added support for get all distinct volumes' metadata from volume-summary API." +msgstr "" +"Added support for get all distinct volumes' metadata from volume-summary API." + +msgid "" +"Added support for images with vmware_adaptertype set to paraVirtual in the " +"VMDK driver." +msgstr "" +"Added support for images with vmware_adaptertype set to paraVirtual in the " +"VMDK driver." + +msgid "Added support for manage volume in the VMware VMDK driver." +msgstr "Added support for manage volume in the VMware VMDK driver." + +msgid "Added support for manage/unmanage snapshot in the ScaleIO driver." +msgstr "Added support for manage/unmanage snapshot in the ScaleIO driver." + +msgid "Added support for manage/unmanage volume in the ScaleIO driver." +msgstr "Added support for manage/unmanage volume in the ScaleIO driver." + +msgid "" +"Added support for oversubscription in thin provisioning in the INFINIDAT " +"InfiniBox driver. To use oversubscription, define " +"``max_over_subscription_ratio`` in the cinder configuration file." +msgstr "" +"Added support for over-subscription in thin provisioning in the INFINIDAT " +"InfiniBox driver. To use over-subscription, define " +"``max_over_subscription_ratio`` in the Cinder configuration file." + +msgid "" +"Added support for oversubscription in thin provisioning in the ScaleIO " +"driver. Volumes should have extra_specs with the key provisioning:type with " +"value equals to either 'thick' or 'thin'. max_oversubscription_ratio can be " +"defined by the global config or for ScaleIO specific with the config option " +"sio_max_over_subscription_ratio. The maximum oversubscription ratio " +"supported at the moment is 10.0." +msgstr "" +"Added support for over-subscription in thin provisioning in the ScaleIO " +"driver. Volumes should have extra_specs with the key provisioning:type with " +"value equals to either 'thick' or 'thin'. max_oversubscription_ratio can be " +"defined by the global config or for ScaleIO specific with the config option " +"sio_max_over_subscription_ratio. The maximum over-subscription ratio " +"supported at the moment is 10.0." + +msgid "" +"Added support for querying group details with volume ids which are in this " +"group. For example, \"groups/{group_id}?list_volume=True\"." +msgstr "" +"Added support for querying group details with volume ids which are in this " +"group. For example, \"groups/{group_id}?list_volume=True\"." + +msgid "" +"Added support for querying volumes filtered by glance metadata key/value " +"using 'glance_metadata' optional URL parameter. For example, \"volumes/" +"detail?glance_metadata={\"image_name\":\"xxx\"}\"." +msgstr "" +"Added support for querying volumes filtered by Glance metadata key/value " +"using 'glance_metadata' optional URL parameter. For example, \"volumes/" +"detail?glance_metadata={\"image_name\":\"xxx\"}\"." + +msgid "" +"Added support for querying volumes filtered by group_id using 'group_id' " +"optional URL parameter. For example, \"volumes/detail?" +"group_id={consistency_group_id}\"." +msgstr "" +"Added support for querying volumes filtered by group_id using 'group_id' " +"optional URL parameter. For example, \"volumes/detail?" +"group_id={consistency_group_id}\"." + +msgid "Added support for revert-to-snapshot in the VMware VMDK driver." +msgstr "Added support for revert-to-snapshot in the VMware VMDK driver." + +msgid "" +"Added support for scaling QoS in the ScaleIO driver. The new QoS keys are " +"maxIOPSperGB and maxBWSperGB." +msgstr "" +"Added support for scaling QoS in the ScaleIO driver. The new QoS keys are " +"maxIOPSperGB and maxBWSperGB." + +msgid "" +"Added support for snapshots in the NFS driver. This functionality is only " +"enabled if ``nfs_snapshot_support`` is set to ``True`` in cinder.conf. " +"Cloning volumes is only supported if the source volume is not attached." +msgstr "" +"Added support for snapshots in the NFS driver. This functionality is only " +"enabled if ``nfs_snapshot_support`` is set to ``True`` in cinder.conf. " +"Cloning volumes is only supported if the source volume is not attached." + +msgid "" +"Added support for taking, deleting, and restoring a cgsnapshot for NetApp " +"7mode and CDOT backends." +msgstr "" +"Added support for taking, deleting, and restoring a cgsnapshot for NetApp " +"7mode and CDOT backends." + +msgid "" +"Added support for the use of live volume in place of standard replication in " +"the Dell SC driver." +msgstr "" +"Added support for the use of live volume in place of standard replication in " +"the Dell SC driver." + +msgid "Added support for vhd and vhdx disk-formats for volume upload-to-image." +msgstr "" +"Added support for vhd and vhdx disk-formats for volume upload-to-image." + +msgid "Added support for vhd disk-format for volume upload-to-image." +msgstr "Added support for vhd disk-format for volume upload-to-image." + +msgid "" +"Added support for volume compression in INFINIDAT driver. Compression is " +"available on InfiniBox 3.0 onward. To enable volume compression, set " +"``infinidat_use_compression`` to True in the backend section in the Cinder " +"configuration file." +msgstr "" +"Added support for volume compression in INFINIDAT driver. Compression is " +"available on InfiniBox 3.0 onward. To enable volume compression, set " +"``infinidat_use_compression`` to True in the backend section in the Cinder " +"configuration file." + +msgid "" +"Added support to Pure Storage Volume Drivers for Active Cluster using the " +"standard replication API's for the Block Storage Service." +msgstr "" +"Added support to Pure Storage Volume Drivers for Active Cluster using the " +"standard replication APIs for the Block Storage Service." + +msgid "" +"Added support to querying snapshots filtered by metadata key/value using " +"'metadata' optional URL parameter. For example, \"/v3/snapshots?" +"metadata=={'key1':'value1'}\"." +msgstr "" +"Added support to querying snapshots filtered by metadata key/value using " +"'metadata' optional URL parameter. For example, \"/v3/snapshots?" +"metadata=={'key1':'value1'}\"." + +msgid "" +"Added support to revert a volume to a snapshot with the Dell EMC VNX driver." +msgstr "" +"Added support to revert a volume to a snapshot with the Dell EMC VNX driver." + +msgid "Added supported driver checks on all drivers." +msgstr "Added supported driver checks on all drivers." + +msgid "Added the ability to create a CG from a source CG with the VMAX driver." +msgstr "" +"Added the ability to create a CG from a source CG with the VMAX driver." + +msgid "" +"Added the ability to list manageable volumes and snapshots to HNAS NFS " +"driver." +msgstr "" +"Added the ability to list manageable volumes and snapshots to HNAS NFS " +"driver." + +msgid "" +"Added the ability to list manageable volumes and snapshots via GET operation " +"on the /v2//os-volume-manage and /v2//os-snapshot-" +"manage URLs, respectively." +msgstr "" +"Added the ability to list manageable volumes and snapshots via GET operation " +"on the /v2//os-volume-manage and /v2//os-snapshot-" +"manage URLs, respectively." + +msgid "" +"Added the options ``visibility`` and ``protected`` to the os-" +"volume_upload_image REST API call." +msgstr "" +"Added the options ``visibility`` and ``protected`` to the os-" +"volume_upload_image REST API call." + +msgid "Added update-host command for consistency groups in cinder-manage." +msgstr "Added update-host command for consistency groups in cinder-manage." + +msgid "" +"Added using etags in API calls to avoid the lost update problem during " +"deleting volume metadata." +msgstr "" +"Added using etags in API calls to avoid the lost update problem during " +"deleting volume metadata." + +msgid "Added v2.1 replication support in Huawei Cinder driver." +msgstr "Added v2.1 replication support in Huawei Cinder driver." + +msgid "Added v2.1 replication support to RBD driver." +msgstr "Added v2.1 replication support to RBD driver." + +msgid "Added v2.1 replication support to SolidFire driver." +msgstr "Added v2.1 replication support to SolidFire driver." + +msgid "Added v2.1 replication support to the HPE 3PAR driver." +msgstr "Added v2.1 replication support to the HPE 3PAR driver." + +msgid "Added v2.1 replication support to the HPE LeftHand driver." +msgstr "Added v2.1 replication support to the HPE LeftHand driver." + +msgid "Added volume backend driver for Veritas HyperScale storage." +msgstr "Added volume backend driver for Veritas HyperScale storage." + +msgid "Added volume backend drivers for CoprHD FC, iSCSI and Scaleio." +msgstr "Added volume backend drivers for CoprHD FC, iSCSI and Scaleio." + +msgid "Added volume driver for QNAP ES Storage Driver." +msgstr "Added volume driver for QNAP ES Storage Driver." + +msgid "Added volume driver for Zadara Storage VPSA." +msgstr "Added volume driver for Zadara Storage VPSA." + +msgid "Adding Live Migration functionality to VMAX driver version 3.0." +msgstr "Adding Live Migration functionality to VMAX driver version 3.0." + +msgid "Adding Qos functionality to VMAX driver version 3.0." +msgstr "Adding QoS functionality to VMAX driver version 3.0." + +msgid "Adding Replication V2.1 functionality to VMAX driver version 3.0." +msgstr "Adding Replication V2.1 functionality to VMAX driver version 3.0." + +msgid "Adding compression functionality to VMAX driver version 3.0." +msgstr "Adding compression functionality to VMAX driver version 3.0." + +msgid "" +"Adding or removing volume_type_access from any project during DB migration " +"62 must not be performed." +msgstr "" +"Adding or removing volume_type_access from any project during DB migration " +"62 must not be performed." + +msgid "Adds QoS support for VNX Cinder driver." +msgstr "Adds QoS support for VNX Cinder driver." + +msgid "Adds new Hitachi VSP iSCSI Driver." +msgstr "Adds new Hitachi VSP iSCSI Driver." + +msgid "" +"Adds support to configure the size of the native thread pool used by the " +"cinder volume and backup services. For the backup we use " +"`backup_native_threads_pool_size` in the `[DEFAULT]` section, and for the " +"backends we use `backend_native_threads_pool_size` in the driver section." +msgstr "" +"Adds support to configure the size of the native thread pool used by the " +"Cinder volume and backup services. For the backup we use " +"`backup_native_threads_pool_size` in the `[DEFAULT]` section, and for the " +"backends we use `backend_native_threads_pool_size` in the driver section." + +msgid "Adds v2.1 replication support in VNX Cinder driver." +msgstr "Adds v2.1 replication support in VNX Cinder driver." + +msgid "" +"Administrator can disable this ability by updating the ``volume:" +"extend_attached_volume`` policy rule." +msgstr "" +"Administrator can disable this ability by updating the ``volume:" +"extend_attached_volume`` policy rule." + +msgid "" +"After CG tables are removed, we will allow default_cgsnapshot_type to be " +"used by group APIs." +msgstr "" +"After CG tables are removed, we will allow default_cgsnapshot_type to be " +"used by group APIs." + +msgid "" +"After an offline upgrade we had to restart all Cinder services twice, now " +"with the `cinder-manage db sync --bump-versions` command we can avoid the " +"second restart." +msgstr "" +"After an offline upgrade we had to restart all Cinder services twice, now " +"with the `cinder-manage db sync --bump-versions` command we can avoid the " +"second restart." + +msgid "" +"After being marked unsupported in the Rocky release the CoprHD driver is now " +"being removed in Stein. The vendor has indicated that this is desired as " +"the CoprHD driver has been deprecated." +msgstr "" +"After being marked unsupported in the Rocky release the CoprHD driver is now " +"being removed in Stein. The vendor has indicated that this is desired as " +"the CoprHD driver has been deprecated." + +msgid "" +"After running the migration script to migrate CGs to generic volume groups, " +"CG and group APIs work as follows." +msgstr "" +"After running the migration script to migrate CGs to generic volume groups, " +"CG and group APIs work as follows." + +msgid "" +"After transferring a volume without snapshots from one user project to " +"another user project, if the receiving user uses cascade deleting, it will " +"cause some exceptions in driver and volume will be error_deleting. Adding " +"additional check to ensure there are no snapshots left in other project when " +"cascade deleting a tranferred volume." +msgstr "" +"After transferring a volume without snapshots from one user project to " +"another user project, if the receiving user uses cascade deleting, it will " +"cause some exceptions in driver and volume will be error_deleting. Adding " +"additional check to ensure there are no snapshots left in other project when " +"cascade deleting a transferred volume." + +msgid "" +"All Datera DataFabric backed volume-types will now use API version 2 with " +"Datera DataFabric" +msgstr "" +"All Datera DataFabric backed volume-types will now use API version 2 with " +"Datera DataFabric" + +msgid "" +"All barbican and keymgr config options in Cinder are now deprecated. All of " +"these options are moved to the key_manager section for the Castellan library." +msgstr "" +"All Barbican and keymgr config options in Cinder are now deprecated. All of " +"these options are moved to the key_manager section for the Castellan library." + +msgid "" +"Allow API user to remove the consistency group name or description " +"information." +msgstr "" +"Allow API user to remove the consistency group name or description " +"information." + +msgid "" +"Allow for eradicating Pure Storage volumes, snapshots, and pgroups when " +"deleting their Cinder counterpart." +msgstr "" +"Allow for eradicating Pure Storage volumes, snapshots, and pgroups when " +"deleting their Cinder counterpart." + +msgid "Allow rbd driver to list manageable snapshots." +msgstr "Allow RBD driver to list manageable snapshots." + +msgid "Allow rbd driver to list manageable volumes." +msgstr "Allow RBD driver to list manageable volumes." + +msgid "Allow rbd driver to manage existing snapshot." +msgstr "Allow RBD driver to manage existing snapshot." + +msgid "Allow rbd driver to report backend state." +msgstr "Allow RBD driver to report backend state." + +msgid "Allow spaces when managing existing volumes with the HNAS iSCSI driver." +msgstr "" +"Allow spaces when managing existing volumes with the HNAS iSCSI driver." + +msgid "Allow the RBD driver to work with max_over_subscription_ratio." +msgstr "Allow the RBD driver to work with max_over_subscription_ratio." + +msgid "" +"Allow users to specify the copy speed while using Huawei driver to create " +"volume from snapshot or clone volume, by the new added metadata 'copyspeed'. " +"For example, user can add --metadata copyspeed=1 when creating volume from " +"source volume/snapshot. The valid optional range of copyspeed is [1, 2, 3, " +"4], respectively representing LOW, MEDIUM, HIGH and HIGHEST." +msgstr "" +"Allow users to specify the copy speed while using Huawei driver to create " +"volume from snapshot or clone volume, by the new added metadata 'copyspeed'. " +"For example, user can add --metadata copyspeed=1 when creating volume from " +"source volume/snapshot. The valid optional range of copyspeed is [1, 2, 3, " +"4], respectively representing LOW, MEDIUM, HIGH and HIGHEST." + +msgid "" +"Also some options are renamed (note that 3 of them were both moved and " +"renamed):" +msgstr "" +"Also some options are renamed (note that 3 of them were both moved and " +"renamed):" + +msgid "" +"An error has been corrected in the EMC ScaleIO driver that had caused all " +"volumes to be provisioned at 'thick' even if user had specificed 'thin'." +msgstr "" +"An error has been corrected in the EMC ScaleIO driver that had caused all " +"volumes to be provisioned at 'thick' even if user had specified 'thin'." + +msgid "" +"Any Volume Drivers configured in the DEFAULT config stanza should be moved " +"to their own stanza and enabled via the enabled_backends config option. The " +"older style of config with DEFAULT is deprecated and will be removed in " +"future releases." +msgstr "" +"Any Volume Drivers configured in the DEFAULT config stanza should be moved " +"to their own stanza and enabled via the enabled_backends config option. The " +"older style of config with DEFAULT is deprecated and will be removed in " +"future releases." + +msgid "" +"As an example one provider may have roles called viewer, admin, type_viewer, " +"and say type_admin. Admin and type_admin can create, delete, update types. " +"Everyone can list the storage types. Admin, type_viewer, and type_admin can " +"view the extra_specs." +msgstr "" +"As an example one provider may have roles called viewer, admin, type_viewer, " +"and say type_admin. Admin and type_admin can create, delete, update types. " +"Everyone can list the storage types. Admin, type_viewer, and type_admin can " +"view the extra_specs." + +msgid "" +"As cinder-backup was strongly reworked in this release, the recommended " +"upgrade order when executing live (rolling) upgrade is c-api->c-sch->c-vol-" +">c-bak." +msgstr "" +"As cinder-backup was strongly reworked in this release, the recommended " +"upgrade order when executing live (rolling) upgrade is c-api->c-sch->c-vol-" +">c-bak." + +msgid "" +"Availability zones may now be configured per backend in a multi-backend " +"configuration. Individual backend sections can now set the configuration " +"option ``backend_availability_zone``. If set, this value will override the " +"[DEFAULT] ``storage_availability_zone`` setting." +msgstr "" +"Availability zones may now be configured per backend in a multi-backend " +"configuration. Individual backend sections can now set the configuration " +"option ``backend_availability_zone``. If set, this value will override the " +"[DEFAULT] ``storage_availability_zone`` setting." + +msgid "Backend driver for Scality SRB has been removed." +msgstr "Backend driver for Scality SRB has been removed." + +msgid "Backup driver initialization using module name is deprecated." +msgstr "Backup driver initialisation using module name is deprecated." + +msgid "" +"Backup service to driver mapping is deprecated. If you use old values like " +"'cinder.backup.services.swift' or 'cinder.backup.services.ceph' it should be " +"changed to 'cinder.backup.drivers.swift' or 'cinder.backup.drivers.ceph' " +"accordingly to get your backup service working in the 'R' release." +msgstr "" +"Backup service to driver mapping is deprecated. If you use old values like " +"'cinder.backup.services.swift' or 'cinder.backup.services.ceph' it should be " +"changed to 'cinder.backup.drivers.swift' or 'cinder.backup.drivers.ceph' " +"accordingly to get your backup service working in the 'R' release." + +msgid "" +"Backup service to driver mapping is removed. If you use old values like " +"'cinder.backup.services.swift' or 'cinder.backup.services.ceph' it should be " +"changed to 'cinder.backup.drivers.swift' or 'cinder.backup.drivers.ceph' " +"accordingly to get your backup service working." +msgstr "" +"Backup service to driver mapping is removed. If you use old values like " +"'cinder.backup.services.swift' or 'cinder.backup.services.ceph' it should be " +"changed to 'cinder.backup.drivers.swift' or 'cinder.backup.drivers.ceph' " +"accordingly to get your backup service working." + +msgid "" +"Beginning with Cinder 12.0.0, you only need to specify policies in your " +"policy file that you want to **differ** from the default values. Unspecified " +"policies will use the default values *defined in the code*. Given that a " +"default value *must* be specified *in the code* when a new policy is " +"introduced, the ``default`` policy, which was formerly used as a catch-all " +"for policy targets that were not defined elsewhere in the policy file, has " +"no effect. We mention this because an old upgrade strategy was to use the " +"policy file from the previous release with ``\"default\": \"role:admin\"`` " +"(or ``\"default\": \"!\"``) so that newly introduced actions would be " +"blocked from end users until the operator had time to assess the " +"implications of exposing these actions. This strategy no longer works. " +"Hopefully this isn't a problem because we're defining sensible defaults in " +"the code. It would be a good idea, however, to generate the sample policy " +"file with each release (see instructions above) to verify this for yourself." +msgstr "" +"Beginning with Cinder 12.0.0, you only need to specify policies in your " +"policy file that you want to **differ** from the default values. Unspecified " +"policies will use the default values *defined in the code*. Given that a " +"default value *must* be specified *in the code* when a new policy is " +"introduced, the ``default`` policy, which was formerly used as a catch-all " +"for policy targets that were not defined elsewhere in the policy file, has " +"no effect. We mention this because an old upgrade strategy was to use the " +"policy file from the previous release with ``\"default\": \"role:admin\"`` " +"(or ``\"default\": \"!\"``) so that newly introduced actions would be " +"blocked from end users until the operator had time to assess the " +"implications of exposing these actions. This strategy no longer works. " +"Hopefully this isn't a problem because we're defining sensible defaults in " +"the code. It would be a good idea, however, to generate the sample policy " +"file with each release (see instructions above) to verify this for yourself." + +msgid "" +"Beginning with Cinder version 12.0.0, as part of the Queens release " +"\"policies in code\" community effort, Cinder has had the ability to run " +"without a policy file because sensible default values are specified in the " +"code. Customizing the policies in effect at your site, however, still " +"requires a policy file. The default location of this file has been ``/etc/" +"cinder/policy.json`` (although the documentation has indicated otherwise). " +"With this release, the default location of this file is changed to ``/etc/" +"cinder/policy.yaml``." +msgstr "" +"Beginning with Cinder version 12.0.0, as part of the Queens release " +"\"policies in code\" community effort, Cinder has had the ability to run " +"without a policy file because sensible default values are specified in the " +"code. Customizing the policies in effect at your site, however, still " +"requires a policy file. The default location of this file has been ``/etc/" +"cinder/policy.json`` (although the documentation has indicated otherwise). " +"With this release, the default location of this file is changed to ``/etc/" +"cinder/policy.yaml``." + +msgid "Better cleanup handling in the NetApp E-Series driver." +msgstr "Better clean-up handling in the NetApp E-Series driver." + +msgid "Block device driver" +msgstr "Block device driver" + +msgid "" +"BlockDeviceDriver was deprecated in Ocata release and marked as " +"'unsupported'. There is no CI for it too. If you used this driver before you " +"have to migrate your volumes to LVM with LIO target yourself before " +"upgrading to Queens release to get your volumes working." +msgstr "" +"BlockDeviceDriver was deprecated in Ocata release and marked as " +"'unsupported'. There is no CI for it too. If you used this driver before you " +"have to migrate your volumes to LVM with LIO target yourself before " +"upgrading to Queens release to get your volumes working." + +msgid "Blockbridge" +msgstr "Blockbridge" + +msgid "" +"BoolOpt ``datera_acl_allow_all`` is changed to a volume type extra spec " +"option-- ``DF:acl_allow_all``" +msgstr "" +"BoolOpt ``datera_acl_allow_all`` is changed to a volume type extra spec " +"option-- ``DF:acl_allow_all``" + +msgid "Broke Datera driver up into modules." +msgstr "Broke Datera driver up into modules." + +msgid "Bug Fixes" +msgstr "Bug Fixes" + +msgid "Capabilites List for Datera Volume Drivers" +msgstr "Capabilities List for Datera Volume Drivers" + +msgid "Capacity reporting fixed with Huawei backend drivers." +msgstr "Capacity reporting fixed with Huawei backend drivers." + +msgid "Changes config option default for datera_num_replicas from 1 to 3" +msgstr "Changes config option default for datera_num_replicas from 1 to 3" + +msgid "" +"Cinder FC Zone Manager Friendly Zone Names This feature adds support for " +"Fibre Channel user friendly zone names if implemented by the volume driver. " +"If the volume driver passes the host name and storage system to the Fibre " +"Channel Zone Manager in the conn_info structure, the zone manager will use " +"these names in structuring the zone name to provide a user friendly zone " +"name." +msgstr "" +"Cinder FC Zone Manager Friendly Zone Names This feature adds support for " +"Fibre Channel user friendly zone names if implemented by the volume driver. " +"If the volume driver passes the host name and storage system to the Fibre " +"Channel Zone Manager in the conn_info structure, the zone manager will use " +"these names in structuring the zone name to provide a user friendly zone " +"name." + +msgid "Cinder Release Notes" +msgstr "Cinder Release Notes" + +msgid "" +"Cinder backup creation can now (since microversion 3.51) receive the " +"availability zone where the backup should be stored." +msgstr "" +"Cinder backup creation can now (since microversion 3.51) receive the " +"availability zone where the backup should be stored." + +msgid "" +"Cinder backup now supports running multiple processes to make the most of " +"the available CPU cores. Performance gains will be significant when running " +"multiple concurrent backups/restores with compression. The number of " +"processes is set with `backup_workers` configuration option." +msgstr "" +"Cinder backup now supports running multiple processes to make the most of " +"the available CPU cores. Performance gains will be significant when running " +"multiple concurrent backups/restores with compression. The number of " +"processes is set with `backup_workers` configuration option." + +msgid "" +"Cinder is now collecting capacity data, including virtual free capacity etc " +"from the backends. A notification which includes that data is periodically " +"emitted." +msgstr "" +"Cinder is now collecting capacity data, including virtual free capacity etc " +"from the backends. A notification which includes that data is periodically " +"emitted." + +msgid "" +"Cinder now allows for a minimum value when using the capacity based QoS in " +"order to make sure small volumes can get a minimum allocation for them to be " +"usable. The newly added QoS specs are `read_iops_sec_per_gb_min`, " +"`write_iops_sec_per_gb_min`, `total_iops_sec_per_gb_min`, " +"`read_bytes_sec_per_gb_min`, `write_bytes_sec_per_gb_min` and " +"`total_bytes_sec_per_gb_min`" +msgstr "" +"Cinder now allows for a minimum value when using the capacity based QoS in " +"order to make sure small volumes can get a minimum allocation for them to be " +"usable. The newly added QoS specs are `read_iops_sec_per_gb_min`, " +"`write_iops_sec_per_gb_min`, `total_iops_sec_per_gb_min`, " +"`read_bytes_sec_per_gb_min`, `write_bytes_sec_per_gb_min` and " +"`total_bytes_sec_per_gb_min`" + +msgid "" +"Cinder now allows for capacity based QoS which can be useful in environments " +"where storage performance scales with consumption (such as RBD backed " +"storage). The newly added QoS specs are `read_iops_sec_per_gb`, " +"`write_iops_sec_per_gb`, `total_iops_sec_per_gb`, `read_bytes_sec_per_gb`, " +"`write_bytes_sec_per_gb` and `total_bytes_sec_per_gb`. These values will be " +"multiplied by the size of the volume and passed to the consumer. For " +"example, setting `total_iops_sec_per_gb` to 30 and setting " +"`total_bytes_sec_per_gb` to `1048576` (1MB) then creating a 100 GB volume " +"with that QoS will result in a volume with 3,000 total IOPs and 100MB/s " +"throughput limit." +msgstr "" +"Cinder now allows for capacity based QoS which can be useful in environments " +"where storage performance scales with consumption (such as RBD backed " +"storage). The newly added QoS specs are `read_iops_sec_per_gb`, " +"`write_iops_sec_per_gb`, `total_iops_sec_per_gb`, `read_bytes_sec_per_gb`, " +"`write_bytes_sec_per_gb` and `total_bytes_sec_per_gb`. These values will be " +"multiplied by the size of the volume and passed to the consumer. For " +"example, setting `total_iops_sec_per_gb` to 30 and setting " +"`total_bytes_sec_per_gb` to `1048576` (1MB) then creating a 100 GB volume " +"with that QoS will result in a volume with 3,000 total IOPs and 100MB/s " +"throughput limit." + +msgid "" +"Cinder now defaults to using the Glance v2 API. The ``glance_api_version`` " +"configuration option has been deprecated and will be removed in the 12.0.0 " +"Queens release." +msgstr "" +"Cinder now defaults to using the Glance v2 API. The ``glance_api_version`` " +"configuration option has been deprecated and will be removed in the 12.0.0 " +"Queens release." + +msgid "" +"Cinder now support policy in code, which means if users don't need to modify " +"any of the default policy rules, they do not need a policy file. Users can " +"modify/generate a `policy.yaml` file which will override specific policy " +"rules from their defaults." +msgstr "" +"Cinder now support policy in code, which means if users don't need to modify " +"any of the default policy rules, they do not need a policy file. Users can " +"modify/generate a `policy.yaml` file which will override specific policy " +"rules from their defaults." + +msgid "" +"Cinder now supports the use of 'max_over_subscription_ratio = auto' which " +"automatically calculates the value for max_over_subscription_ratio in the " +"scheduler." +msgstr "" +"Cinder now supports the use of 'max_over_subscription_ratio = auto' which " +"automatically calculates the value for max_over_subscription_ratio in the " +"scheduler." + +msgid "" +"Cinder now will return 415 (HTTPUnsupportedMediaType) when any unsupported " +"content type is specified in request header." +msgstr "" +"Cinder now will return 415 (HTTPUnsupportedMediaType) when any unsupported " +"content type is specified in request header." + +msgid "" +"Cinder services are now automatically downgrading RPC messages to be " +"understood by the oldest version of a service among all the deployment. " +"Disabled and dead services are also taken into account. It is important to " +"keep service list up to date, without old, unused records. This can be done " +"using ``cinder-manage service remove`` command. Once situation is cleaned up " +"services should be either restarted or ``SIGHUP`` signal should be issued to " +"their processes to force them to reload version pins. Please note that " +"cinder-api does not support ``SIGHUP`` signal." +msgstr "" +"Cinder services are now automatically downgrading RPC messages to be " +"understood by the oldest version of a service among all the deployment. " +"Disabled and dead services are also taken into account. It is important to " +"keep service list up to date without old unused records. This can be done " +"using ``cinder-manage service remove`` command. Once the situation is " +"cleaned up services should be either restarted or the ``SIGHUP`` signal " +"should be issued to their processes to force them to reload. Please note " +"that cinder-api does not support ``SIGHUP`` signal." + +msgid "" +"Cinder stopped supporting single-backend configurations in Ocata. However, " +"sample ``cinder.conf`` was still generated with driver-related options in " +"``[DEFAULT]`` section, where those options had no effect at all. Now all of " +"driver options are listed in ``[backend_defaults]`` section, that indicates " +"that those options are effective only in this section and " +"``[]`` sections listed in ``enabled_backends``." +msgstr "" +"Cinder stopped supporting single-backend configurations in Ocata. However, " +"sample ``cinder.conf`` was still generated with driver-related options in " +"``[DEFAULT]`` section, where those options had no effect at all. Now all of " +"driver options are listed in ``[backend_defaults]`` section, that indicates " +"that those options are effective only in this section and " +"``[]`` sections listed in ``enabled_backends``." + +msgid "Cinder will now consume quota when importing new backup resource." +msgstr "Cinder will now consume quota when importing new backup resource." + +msgid "" +"Cinder will now correctly read Keystone's endpoint for quota calls from " +"keystone_authtoken.auth_uri instead of keymgr.encryption_auth_url config " +"option." +msgstr "" +"Cinder will now correctly read Keystone's endpoint for quota calls from " +"keystone_authtoken.auth_uri instead of keymgr.encryption_auth_url config " +"option." + +msgid "" +"Cinder's Google backup driver is now called gcs, so ``backup_driver`` " +"configuration for Google Cloud Storage should be updated from ``cinder." +"backup.drivers.google`` to ``cinder.backup.driver.gcs``." +msgstr "" +"Cinder's Google backup driver is now called gcs, so ``backup_driver`` " +"configuration for Google Cloud Storage should be updated from ``cinder." +"backup.drivers.google`` to ``cinder.backup.driver.gcs``." + +msgid "" +"Cinder-manage DB sync command can now bump the RPC and Objects versions of " +"the services to avoid a second restart when doing offline upgrades." +msgstr "" +"Cinder-manage DB sync command can now bump the RPC and Objects versions of " +"the services to avoid a second restart when doing offline upgrades." + +msgid "Cloning of consistency group added to EMC VNX backend driver." +msgstr "Cloning of consistency group added to EMC VNX backend driver." + +msgid "Coho" +msgstr "Coho" + +msgid "Configrable migration rate in VNX driver via metadata" +msgstr "Configurable migration rate in VNX driver via metadata" + +msgid "" +"Configuration options for the DRBD driver that will be applied to DRBD " +"resources; the default values should be okay for most installations." +msgstr "" +"Configuration options for the DRBD driver that will be applied to DRBD " +"resources; the default values should be okay for most installations." + +msgid "" +"Configurations that are setting backend config in ``[DEFAULT]`` section are " +"now not supported. You should use ``enabled_backends`` option to set up " +"backends." +msgstr "" +"Configurations that are setting backend config in ``[DEFAULT]`` section are " +"now not supported. You should use ``enabled_backends`` option to set up " +"backends." + +msgid "" +"Configuring Volume Drivers in the DEFAULT config stanza is not going to be " +"maintained and will be removed in the next release. All backends should use " +"the enabled_backends config option with separate stanza's for each." +msgstr "" +"Configuring Volume Drivers in the DEFAULT config stanza is not going to be " +"maintained and will be removed in the next release. All backends should use " +"the enabled_backends config option with separate stanzas for each." + +msgid "" +"Consistency group creation previously scheduled at the pool level. Now it is " +"fixed to schedule at the backend level as designed." +msgstr "" +"Consistency group creation previously scheduled at the pool level. Now it is " +"fixed to schedule at the backend level as designed." + +msgid "" +"Consistency group support has been added to the LeftHand backend driver." +msgstr "" +"Consistency group support has been added to the LeftHand backend driver." + +msgid "Corrected quota usage when transferring a volume between tenants." +msgstr "Corrected quota usage when transferring a volume between tenants." + +msgid "Corrected support to force detach a volume from all hosts on Unity." +msgstr "Corrected support to force detach a volume from all hosts on Unity." + +msgid "" +"Create CG Snapshot creates either in the CG or the groups table depending on " +"where the CG is." +msgstr "" +"Create CG Snapshot creates either in the CG or the groups table depending on " +"where the CG is." + +msgid "" +"Create CG from Source creates in either the CG or the groups table depending " +"on the source." +msgstr "" +"Create CG from Source creates in either the CG or the groups table depending " +"on the source." + +msgid "Create CG only creates in the groups table." +msgstr "Create CG only creates in the groups table." + +msgid "Create Volume adds the volume either to the CG or the group." +msgstr "Create Volume adds the volume either to the CG or the group." + +msgid "" +"Creating a new volume from an image that was created from an encrypted " +"Cinder volume now succeeds." +msgstr "" +"Creating a new volume from an image that was created from an encrypted " +"Cinder volume now succeeds." + +msgid "Current Series Release Notes" +msgstr "Current Series Release Notes" + +msgid "" +"DS8K driver adds two new properties into extra-specs so that user can " +"specify pool or lss or both of them to allocate volume in their expected " +"area." +msgstr "" +"DS8K driver adds two new properties into extra-specs so that user can " +"specify pool or lss or both of them to allocate volume in their expected " +"area." + +msgid "" +"Datera driver location has changed from cinder.volume.drivers .datera." +"DateraDriver to cinder.volume.drivers.datera.datera_iscsi .DateraDriver." +msgstr "" +"Datera driver location has changed from cinder.volume.drivers .datera." +"DateraDriver to cinder.volume.drivers.datera.datera_iscsi .DateraDriver." + +msgid "" +"Default `policy.json` file is now removed as Cinder now uses default " +"policies. A policy file is only needed if overriding one of the defaults." +msgstr "" +"Default `policy.json` file is now removed as Cinder now uses default " +"policies. A policy file is only needed if overriding one of the defaults." + +msgid "" +"Delete CG deletes from the CG or the groups table depending on where the CG " +"is." +msgstr "" +"Delete CG deletes from the CG or the groups table depending on where the CG " +"is." + +msgid "" +"Dell EMC PS Driver stats report has been fixed, now reports the " +"`provisioned_capacity_gb` properly. Fixes bug 1719659." +msgstr "" +"Dell EMC PS Driver stats report has been fixed, now reports the " +"`provisioned_capacity_gb` properly. Fixes bug 1719659." + +msgid "" +"Dell EMC PS Series Driver code reporting volume stats is now optimized to " +"return the information earlier and accelerate the process. This change fixes " +"bug 1661154." +msgstr "" +"Dell EMC PS Series Driver code reporting volume stats is now optimized to " +"return the information earlier and accelerate the process. This change fixes " +"bug 1661154." + +msgid "" +"Dell EMC PS Series Driver code was creating duplicate ACL records during " +"live migration. Fixes the initialize_connection code to not create access " +"record for a host if one exists previously. This change fixes bug 1726591." +msgstr "" +"Dell EMC PS Series Driver code was creating duplicate ACL records during " +"live migration. Fixes the initialize_connection code to not create access " +"record for a host if one exists previously. This change fixes bug 1726591." + +msgid "" +"Dell EMC PS Series Driver was creating unmanaged snapshots when extending " +"volumes. Fixed it by adding the missing no-snap parameter. This change fixes " +"bug 1720454." +msgstr "" +"Dell EMC PS Series Driver was creating unmanaged snapshots when extending " +"volumes. Fixed it by adding the missing no-snap parameter. This change fixes " +"bug 1720454." + +msgid "" +"Dell EMC PS Series Driver was creating unmanaged snapshots when extending " +"volumes. Fixed it by adding the missing no-snap parameter. This changes " +"fixes bug 1720454." +msgstr "" +"Dell EMC PS Series Driver was creating unmanaged snapshots when extending " +"volumes. Fixed it by adding the missing no-snap parameter. This changes " +"fixes bug 1720454." + +msgid "" +"Dell EMC PS volume driver reports the total number of volumes on the backend " +"in volume stats." +msgstr "" +"Dell EMC PS volume driver reports the total number of volumes on the backend " +"in volume stats." + +msgid "" +"Dell EMC SC driver correctly returns initialize_connection data when more " +"than one IQN is attached to a volume. This fixes some random Nova Live " +"Migration failures where the connection information being returned was for " +"an IQN other than the one for which it was being requested." +msgstr "" +"Dell EMC SC driver correctly returns initialise_connection data when more " +"than one IQN is attached to a volume. This fixes some random Nova Live " +"Migration failures where the connection information being returned was for " +"an IQN other than the one for which it was being requested." + +msgid "" +"Dell EMC Scale IO Driver: Fixes `bug 1560649 ` for creating volumes with sizes greater than that of " +"the original snapshot." +msgstr "" +"Dell EMC Scale IO Driver: Fixes `bug 1560649 ` for creating volumes with sizes greater than that of " +"the original snapshot." + +msgid "" +"Dell EMC ScaleIO has been renamed to Dell EMC VxFlex OS. Documentation for " +"the driver can be found under the new name. The driver maintains full " +"backwards compatability with prior ScaleIO releases and no configuration " +"changes are needed upon upgrade to the new version of the driver." +msgstr "" +"Dell EMC ScaleIO has been renamed to Dell EMC VxFlex OS. Documentation for " +"the driver can be found under the new name. The driver maintains full " +"backwards compatibility with prior ScaleIO releases and no configuration " +"changes are needed upon upgrade to the new version of the driver." + +msgid "" +"Dell EMC Unity Cinder driver allows enabling/disabling the SSL verification. " +"Admin can set `True` or `False` for `driver_ssl_cert_verify` to enable or " +"disable this function, alternatively set the `driver_ssl_cert_path=` " +"for customized CA path. Both above 2 options should go under the driver " +"section." +msgstr "" +"Dell EMC Unity Cinder driver allows enabling/disabling the SSL verification. " +"Admin can set `True` or `False` for `driver_ssl_cert_verify` to enable or " +"disable this function, alternatively set the `driver_ssl_cert_path=` " +"for customized CA path. Both above 2 options should go under the driver " +"section." + +msgid "" +"Dell EMC Unity Driver: Add thick volume support. Refer to `Unity Cinder " +"Configuration document `__ to create " +"a thick volume." +msgstr "" +"Dell EMC Unity Driver: Add thick volume support. Refer to `Unity Cinder " +"Configuration document `__ to create " +"a thick volume." + +msgid "" +"Dell EMC Unity Driver: Adds support for removing empty host. The new option " +"named `remove_empty_host` could be configured as `True` to notify Unity " +"driver to remove the host after the last LUN is detached from it." +msgstr "" +"Dell EMC Unity Driver: Adds support for removing empty host. The new option " +"named `remove_empty_host` could be configured as `True` to notify Unity " +"driver to remove the host after the last LUN is detached from it." + +msgid "" +"Dell EMC Unity Driver: Fixes `bug 1759175 `__ to detach the lun correctly when auto zone was enabled and " +"the lun was the last one attached to the host." +msgstr "" +"Dell EMC Unity Driver: Fixes `bug 1759175 `__ to detach the LUN correctly when auto zone was enabled and " +"the LUN was the last one attached to the host." + +msgid "" +"Dell EMC Unity Driver: Fixes `bug 1773305 `__ to return the targets which connect to the logged-out " +"initiators. Then the zone manager could clean up the FC zone based on the " +"correct target wwns." +msgstr "" +"Dell EMC Unity Driver: Fixes `bug 1773305 `__ to return the targets which connect to the logged-out " +"initiators. Then the zone manager could clean up the FC zone based on the " +"correct target WWNs." + +msgid "Dell EMC Unity driver: Add compressed volume support." +msgstr "Dell EMC Unity driver: Add compressed volume support." + +msgid "" +"Dell EMC Unity: Fixes bug 1775518 to make sure driver succeed to initialize " +"even though the value of unity_io_ports and unity_storage_pool_names are " +"empty" +msgstr "" +"Dell EMC Unity: Fixes bug 1775518 to make sure driver succeed to initialize " +"even though the value of unity_io_ports and unity_storage_pool_names are " +"empty" + +msgid "" +"Dell EMC Unity: Implements `bp unity-multiattach-support `__ to support " +"attaching a volume to multiple servers simultaneously." +msgstr "" +"Dell EMC Unity: Implements `bp unity-multiattach-support `__ to support " +"attaching a volume to multiple servers simultaneously." + +msgid "" +"Dell EMC VMAX driver has added list manageable volumes and snapshots support." +msgstr "" +"Dell EMC VMAX driver has added list manageable volumes and snapshots support." + +msgid "Dell EMC VMAX driver has added multiattach support." +msgstr "Dell EMC VMAX driver has added multiattach support." + +msgid "" +"Dell EMC VMAX driver has added support for failover to second instance of " +"Unisphere." +msgstr "" +"Dell EMC VMAX driver has added support for failover to second instance of " +"Unisphere." + +msgid "Dell EMC VNX driver: Enhances the performance of create/delete volume." +msgstr "Dell EMC VNX driver: Enhances the performance of create/delete volume." + +msgid "Dell EMC XtremIO driver has added multiattach support." +msgstr "Dell EMC XtremIO driver has added multiattach support." + +msgid "" +"Dell SC - Compression and Dedupe support added for Storage Centers that " +"support the options." +msgstr "" +"Dell SC - Compression and Dedupe support added for Storage Centres that " +"support the options." + +msgid "" +"Dell SC - Volume and Group QOS support added for Storage Centers that " +"support and have enabled the option." +msgstr "" +"Dell SC - Volume and Group QOS support added for Storage Centres that " +"support and have enabled the option." + +msgid "" +"Dell SC Cinder driver has limited support in a failed over state so " +"thaw_backend has been implemented to reject the thaw call when in such a " +"state." +msgstr "" +"Dell SC Cinder driver has limited support in a failed over state so " +"thaw_backend has been implemented to reject the thaw call when in such a " +"state." + +msgid "" +"Deployments doing continuous live upgrades from master branch should not " +"upgrade into Ocata before doing an upgrade which includes all the Newton's " +"RPC API version bump commits (scheduler, volume). If you're upgrading " +"deployment in a release-to-release manner, then you can safely ignore this " +"note." +msgstr "" +"Deployments doing continuous live upgrades from master branch should not " +"upgrade into Ocata before doing an upgrade which includes all the Newton's " +"RPC API version bump commits (scheduler, volume). If you're upgrading " +"deployment in a release-to-release manner, then you can safely ignore this " +"note." + +msgid "" +"Deprecate option `check_max_pool_luns_threshold`. The VNX driver will always " +"check the threshold." +msgstr "" +"Deprecate option `check_max_pool_luns_threshold`. The VNX driver will always " +"check the threshold." + +msgid "" +"Deprecate the \"cinder-manage logs\" commands. These will be removed in a " +"later release." +msgstr "" +"Deprecate the \"cinder-manage logs\" commands. These will be removed in a " +"later release." + +msgid "Deprecated IBM driver _multipath_enabled config flags." +msgstr "Deprecated IBM driver _multipath_enabled config flags." + +msgid "" +"Deprecated config option `query_volume_filters` is removed now. Please, use " +"config file described in resource_query_filters_file to configure allowed " +"volume filters." +msgstr "" +"Deprecated config option `query_volume_filters` is removed now. Please, use " +"config file described in resource_query_filters_file to configure allowed " +"volume filters." + +msgid "Deprecated datera_api_version option." +msgstr "Deprecated datera_api_version option." + +msgid "" +"Deprecated the configuration option ``hnas_svcX_volume_type``. Use option " +"``hnas_svcX_pool_name`` to indicate the name of the services (pools)." +msgstr "" +"Deprecated the configuration option ``hnas_svcX_volume_type``. Use option " +"``hnas_svcX_pool_name`` to indicate the name of the services (pools)." + +msgid "" +"Deprecated the configuration option ``nas_ip``. Use option ``nas_host`` to " +"indicate the IP address or hostname of the NAS system." +msgstr "" +"Deprecated the configuration option ``nas_ip``. Use option ``nas_host`` to " +"indicate the IP address or hostname of the NAS system." + +msgid "Deprecation Notes" +msgstr "Deprecation Notes" + +msgid "" +"Disable creating volume with non cg_snapshot group_id in Storwize/SVC driver." +msgstr "" +"Disable creating volume with non cg_snapshot group_id in Storwize/SVC driver." + +msgid "Disable standard capabilities based on 3PAR licenses." +msgstr "Disable standard capabilities based on 3PAR licenses." + +msgid "" +"Drivers supporting consistent group snapshot in generic volume groups " +"reports \"consistent_group_snapshot_enabled = True\" instead of " +"\"consistencygroup_support = True\". As a result, a spec such as " +"\"consistencygroup_support: ' True'\" in either group type or volume " +"type will cause the scheduler not to choose the backend that does not report " +"\"consistencygroup_support = True\". In order to create a generic volume " +"group that supports consistent group snapshot, " +"\"consistent_group_snapshot_enable: ' True'\" should be set in the group " +"type specs and volume type extra specs, and \"consistencygroup_support: " +"' True'\" should not be set in group type spec and volume type extra " +"specs." +msgstr "" +"Drivers supporting consistent group snapshot in generic volume groups " +"reports \"consistent_group_snapshot_enabled = True\" instead of " +"\"consistencygroup_support = True\". As a result, a spec such as " +"\"consistencygroup_support: ' True'\" in either group type or volume " +"type will cause the scheduler not to choose the backend that does not report " +"\"consistencygroup_support = True\". In order to create a generic volume " +"group that supports consistent group snapshot, " +"\"consistent_group_snapshot_enable: ' True'\" should be set in the group " +"type specs and volume type extra specs, and \"consistencygroup_support: " +"' True'\" should not be set in group type spec and volume type extra " +"specs." + +msgid "" +"Due to the ibmnas (SONAS) driver being rendered redundant by the addition of " +"NFS capabilities to the IBM GPFS driver, the ibmnas driver is being removed " +"in the Mitaka release." +msgstr "" +"Due to the ibmnas (SONAS) driver being rendered redundant by the addition of " +"NFS capabilities to the IBM GPFS driver, the ibmnas driver is being removed " +"in the Mitaka release." + +msgid "" +"EMC ScaleIO driver now uses the config option san_thin_provision to " +"determine the default provisioning type." +msgstr "" +"EMC ScaleIO driver now uses the config option san_thin_provision to " +"determine the default provisioning type." + +msgid "" +"EMC VNX driver have been rebranded to Dell EMC VNX driver. Existing " +"configurations will continue to work with the legacy name, but will need to " +"be updated by the next release. User needs update ``volume_driver`` to " +"``cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver``." +msgstr "" +"EMC VNX driver have been rebranded to Dell EMC VNX driver. Existing " +"configurations will continue to work with the legacy name, but will need to " +"be updated by the next release. User needs update ``volume_driver`` to " +"``cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver``." + +msgid "" +"Enable backup snapshot optimal path by implementing attach and detach " +"snapshot in the NEC driver." +msgstr "" +"Enable backup snapshot optimal path by implementing attach and detach " +"snapshot in the NEC driver." + +msgid "" +"Enable backup snapshot optimal path by implementing attach and detach " +"snapshot in the VMAX driver." +msgstr "" +"Enable backup snapshot optimal path by implementing attach and detach " +"snapshot in the VMAX driver." + +msgid "" +"Enabled Cinder Multi-Attach capability in the Dell EMC Storage Center Cinder " +"driver." +msgstr "" +"Enabled Cinder Multi-Attach capability in the Dell EMC Storage Centre Cinder " +"driver." + +msgid "" +"Enabled a cloud operator to correctly manage policy for volume type " +"operations. To permit volume type operations for specific user, you can for " +"example do as follows." +msgstr "" +"Enabled a cloud operator to correctly manage policy for volume type " +"operations. To permit volume type operations for specific user, you can for " +"example do as follows." + +msgid "" +"Everything in Cinder's release notes related to the High Availability Active-" +"Active effort -preluded with \"HA A-A:\"- is work in progress and should not " +"be used in production until it has been completed and the appropriate " +"release note has been issued stating its readiness for production." +msgstr "" +"Everything in Cinder's release notes related to the High Availability Active-" +"Active effort -preluded with \"HA A-A:\"- is work in progress and should not " +"be used in production until it has been completed and the appropriate " +"release note has been issued stating its readiness for production." + +msgid "Extended Volume-Type Support for Datera Volume Drivers" +msgstr "Extended Volume-Type Support for Datera Volume Drivers" + +msgid "" +"Extra spec ``RESKEY:availability_zones`` will only be used for filtering " +"backends when creating and retyping volumes." +msgstr "" +"Extra spec ``RESKEY:availability_zones`` will only be used for filtering " +"backends when creating and retyping volumes." + +msgid "FalconStor FSS" +msgstr "FalconStor FSS" + +msgid "" +"Filtering volumes by their display name now correctly handles display names " +"with single and double quotes." +msgstr "" +"Filtering volumes by their display name now correctly handles display names " +"with single and double quotes." + +msgid "" +"Fix NFS backup driver, we now support multiple backups on the same " +"container, they are no longer overwritten." +msgstr "" +"Fix NFS backup driver, we now support multiple backups on the same " +"container, they are no longer overwritten." + +msgid "" +"Fix a quota usage error triggered by a non-admin user backing up an in-use " +"volume. The forced backup uses a temporary volume, and quota usage was " +"incorrectly updated when the temporary volume was deleted after the backup " +"operation completed. Fixes `bug 1778774 `__." +msgstr "" +"Fix a quota usage error triggered by a non-admin user backing up an in-use " +"volume. The forced backup uses a temporary volume, and quota usage was " +"incorrectly updated when the temporary volume was deleted after the backup " +"operation completed. Fixes `bug 1778774 `__." + +msgid "" +"Fix for Tintri image direct clone feature. Fix for the bug 1400966 prevents " +"user from specifying image \"nfs share location\" as location value for an " +"image. Now, in order to use Tintri image direct clone, user can specify " +"\"provider_location\" in image metadata to specify image nfs share location. " +"NFS share which hosts images should be specified in a file using " +"tintri_image_shares_config config option." +msgstr "" +"Fix for Tintri image direct clone feature. Fix for the bug 1400966 prevents " +"user from specifying image \"nfs share location\" as location value for an " +"image. Now, in order to use Tintri image direct clone, user can specify " +"\"provider_location\" in image metadata to specify image NFS share location. " +"NFS share which hosts images should be specified in a file using " +"tintri_image_shares_config config option." + +msgid "" +"Fix issue with PureFCDriver where partially case sensitive comparison of " +"connector wwpn could cause initialize_connection to fail when attempting to " +"create duplicate Purity host." +msgstr "" +"Fix issue with PureFCDriver where partially case sensitive comparison of " +"connector wwpn could cause initialise_connection to fail when attempting to " +"create duplicate Purity host." + +msgid "" +"Fix the bug that Cinder can't support creating volume from Nova specific " +"image which only includes ``snapshot-id`` metadata (Bug" +msgstr "" +"Fix the bug that Cinder can't support creating volume from Nova specific " +"image which only includes ``snapshot-id`` metadata (Bug" + +msgid "" +"Fix the bug that Cinder would commit quota twice in a clean environment when " +"managing volume and snapshot resource (Bug" +msgstr "" +"Fix the bug that Cinder would commit quota twice in a clean environment when " +"managing volume and snapshot resource (Bug" + +msgid "" +"Fix the following volume image metadata endpoints returning None following " +"policy enforcement failure:" +msgstr "" +"Fix the following volume image metadata endpoints returning None following " +"policy enforcement failure:" + +msgid "" +"Fix the way encryption key IDs are managed for encrypted volume backups. " +"When creating a backup, the volume's encryption key is cloned and assigned a " +"new key ID. The backup's cloned key ID is now stored in the backup database " +"so that it can be deleted whenever the backup is deleted." +msgstr "" +"Fix the way encryption key IDs are managed for encrypted volume backups. " +"When creating a backup, the volume's encryption key is cloned and assigned a " +"new key ID. The backup's cloned key ID is now stored in the backup database " +"so that it can be deleted whenever the backup is deleted." + +msgid "" +"Fixed 'No Space left' error by dd command when users set the config option " +"``volume_clear_size`` to a value larger than the size of a volume." +msgstr "" +"Fixed 'No Space left' error by dd command when users set the config option " +"``volume_clear_size`` to a value larger than the size of a volume." + +msgid "Fixed ACL multi-attach bug in Datera EDF driver." +msgstr "Fixed ACL multi-attach bug in Datera EDF driver." + +msgid "" +"Fixed HNAS bug that placed a cloned volume in the same pool as its source, " +"even if the clone had a different pool specification. Driver will not allow " +"to make clones using a different volume type anymore." +msgstr "" +"Fixed HNAS bug that placed a cloned volume in the same pool as its source, " +"even if the clone had a different pool specification. Driver will not allow " +"to make clones using a different volume type any more." + +msgid "Fixed Non-WAN port filter issue in Kaminario iSCSI driver" +msgstr "Fixed Non-WAN port filter issue in Kaminario iSCSI driver" + +msgid "Fixed Non-WAN port filter issue in Kaminario iSCSI driver." +msgstr "Fixed Non-WAN port filter issue in Kaminario iSCSI driver." + +msgid "Fixed QNAP driver failures to create volume and snapshot in some cases." +msgstr "" +"Fixed QNAP driver failures to create volume and snapshot in some cases." + +msgid "" +"Fixed QNAP driver failures to detach iscsi device while uploading volume to " +"image." +msgstr "" +"Fixed QNAP driver failures to detach iSCSI device while uploading volume to " +"image." + +msgid "" +"Fixed StorWize/SVC error causing volume deletion to get stuck in the " +"'deleting' state when using FlashCopy." +msgstr "" +"Fixed StorWize/SVC error causing volume deletion to get stuck in the " +"'deleting' state when using FlashCopy." + +msgid "" +"Fixed a bug which could create volumes with invalid content in case of " +"unhandled errors from glance client (Bug `#1799221 `_)." +msgstr "" +"Fixed a bug which could create volumes with invalid content in case of " +"unhandled errors from glance client (Bug `#1799221 `_)." + +msgid "Fixed a few scalability bugs in the Datera EDF driver." +msgstr "Fixed a few scalability bugs in the Datera EDF driver." + +msgid "" +"Fixed an error in quota handling that required the keystone " +"encryption_auth_url to be configured even if no encryption was being used." +msgstr "" +"Fixed an error in quota handling that required the keystone " +"encryption_auth_url to be configured even if no encryption was being used." + +msgid "" +"Fixed an issue when deleting a consistency group snapshot with the Dell SC " +"backend driver." +msgstr "" +"Fixed an issue when deleting a consistency group snapshot with the Dell SC " +"backend driver." + +msgid "" +"Fixed an issue where the NetApp cDOT NFS driver failed to clone new volumes " +"from the image cache." +msgstr "" +"Fixed an issue where the NetApp cDOT NFS driver failed to clone new volumes " +"from the image cache." + +msgid "Fixed an issue with live migration when using the EMC VMAX driver." +msgstr "Fixed an issue with live migration when using the EMC VMAX driver." + +msgid "Fixed backup and restore of volumes in VMware VMDK driver." +msgstr "Fixed backup and restore of volumes in VMware VMDK driver." + +msgid "" +"Fixed bug #1731474 on NetApp Data ONTAP driver that was causing LUNs to be " +"created with larger size than requested. This fix requires version 9.1 of " +"ONTAP or later." +msgstr "" +"Fixed bug #1731474 on NetApp Data ONTAP driver that was causing LUNs to be " +"created with larger size than requested. This fix requires version 9.1 of " +"ONTAP or later." + +msgid "" +"Fixed bug #1783582, where calls to os-force_detach were failing on NetApp " +"ONTAP iSCSI/FC drivers." +msgstr "" +"Fixed bug #1783582, where calls to os-force_detach were failing on NetApp " +"ONTAP iSCSI/FC drivers." + +msgid "" +"Fixed bug 1632333 with the NetApp ONTAP Driver. Now the copy offload method " +"is invoked early to avoid downloading Glance images twice." +msgstr "" +"Fixed bug 1632333 with the NetApp ONTAP Driver. Now the copy offload method " +"is invoked early to avoid downloading Glance images twice." + +msgid "" +"Fixed bug causing snapshot creation to fail on systems with LC_NUMERIC set " +"to locale using ',' as decimal separator." +msgstr "" +"Fixed bug causing snapshot creation to fail on systems with LC_NUMERIC set " +"to locale using ',' as decimal separator." + +msgid "" +"Fixed consistency groups API which was always returning groups scoped to " +"project ID from user context instead of given input project ID." +msgstr "" +"Fixed consistency groups API which was always returning groups scoped to " +"project ID from user context instead of given input project ID." + +msgid "" +"Fixed group availability zone-backend host mismatch [`Bug 1773446 `_]." +msgstr "" +"Fixed group availability zone-backend host mismatch [`Bug 1773446 `_]." + +msgid "" +"Fixed issue of managing a VG with more than one volume in Kaminario FC and " +"iSCSI Cinder drivers." +msgstr "" +"Fixed issue of managing a VG with more than one volume in Kaminario FC and " +"iSCSI Cinder drivers." + +msgid "" +"Fixed issue where Pure Volume Drivers would ignore reserved_percentage " +"config option." +msgstr "" +"Fixed issue where Pure Volume Drivers would ignore reserved_percentage " +"config option." + +msgid "" +"Fixed issue where ``create`` and ``update`` api's of ``volume-type`` and " +"``group_type`` were returning 500 error if boolean 'is_public' value passed " +"in the form of string. Now user can pass following valid boolean values to " +"these api's: '0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on', " +"'y', 'yes'" +msgstr "" +"Fixed issue where ``create`` and ``update`` api's of ``volume-type`` and " +"``group_type`` were returning 500 error if boolean 'is_public' value passed " +"in the form of string. Now user can pass following valid boolean values to " +"these api's: '0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on', " +"'y', 'yes'" + +msgid "" +"Fixed issue where the HNAS driver was not correctly reporting THIN " +"provisioning and related stats." +msgstr "" +"Fixed issue where the HNAS driver was not correctly reporting THIN " +"provisioning and related stats." + +msgid "" +"Fixed issue with error being raised when performing a delete quota operation " +"in a subproject." +msgstr "" +"Fixed issue with error being raised when performing a delete quota operation " +"in a subproject." + +msgid "Fixed issue with extra-specs not being applied when cloning a volume." +msgstr "Fixed issue with extra-specs not being applied when cloning a volume." + +msgid "" +"Fixed issue with the EMC ScaleIO driver not able to identify a volume after " +"a migration is performed." +msgstr "" +"Fixed issue with the EMC ScaleIO driver not able to identify a volume after " +"a migration is performed." + +msgid "Fixed live migration on EMC VMAX3 backends." +msgstr "Fixed live migration on EMC VMAX3 backends." + +msgid "" +"Fixed misleading error message when NetApp copyoffload tool is not in place " +"during image cloning." +msgstr "" +"Fixed misleading error message when NetApp copyoffload tool is not in place " +"during image cloning." + +msgid "" +"Fixed service state reporting when backup manager is unable to initialize " +"one of the backup drivers." +msgstr "" +"Fixed service state reporting when backup manager is unable to initialise " +"one of the backup drivers." + +msgid "" +"Fixed support for IPv6 on management and data paths for NFS, iSCSI and FCP " +"NetApp ONTAP drivers." +msgstr "" +"Fixed support for IPv6 on management and data paths for NFS, iSCSI and FCP " +"NetApp ONTAP drivers." + +msgid "" +"Fixed the VMware VMDK driver to create volume from image in ova container." +msgstr "" +"Fixed the VMware VMDK driver to create volume from image in ova container." + +msgid "" +"Fixed using of the user's token in the nova client (`bug #1686616 `_)" +msgstr "" +"Fixed using of the user's token in the nova client (`bug #1686616 `_)" + +msgid "" +"Fixed volume extend issue that allowed a tenant with enough quota to extend " +"the volume to limits greater than what the volume backend supported." +msgstr "" +"Fixed volume extend issue that allowed a tenant with enough quota to extend " +"the volume to limits greater than what the volume backend supported." + +msgid "" +"Fixes a bug in NetApp SolidFire where the deletion of group snapshots was " +"failing." +msgstr "" +"Fixes a bug in NetApp SolidFire where the deletion of group snapshots was " +"failing." + +msgid "" +"Fixes a bug that prevented the configuration of multiple redundant Quobyte " +"registries in the quobyte_volume_url config option." +msgstr "" +"Fixes a bug that prevented the configuration of multiple redundant Quobyte " +"registries in the quobyte_volume_url config option." + +msgid "" +"Fixes an issue where starting the Pure volume drivers with replication " +"enabled and default values for pure_replica_interval_default would cause an " +"error to be raised from the backend." +msgstr "" +"Fixes an issue where starting the Pure volume drivers with replication " +"enabled and default values for pure_replica_interval_default would cause an " +"error to be raised from the backend." + +msgid "" +"Fixes concurrency issue on backups, where only 20 native threads could be " +"concurrently be executed. Now default will be 60, and can be changed with " +"`backup_native_threads_pool_size`." +msgstr "" +"Fixes concurrency issue on backups, where only 20 native threads could be " +"concurrently be executed. Now the default will be 60, and can be changed " +"with `backup_native_threads_pool_size`." + +msgid "Fixes force_detach behavior for volumes in NetApp SolidFire driver." +msgstr "Fixes force_detach behaviour for volumes in NetApp SolidFire driver." + +msgid "" +"Following APIs were accepting boolean parameters with leading and trailing " +"white spaces (for e.g. \" true \"). But now with schema validation support, " +"all these boolean parameters henceforth will not accept leading and trailing " +"whitespaces to maintain consistency." +msgstr "" +"Following APIs were accepting boolean parameters with leading and trailing " +"white spaces (for e.g. \" true \"). But now with schema validation support, " +"all these boolean parameters henceforth will not accept leading and trailing " +"whitespaces to maintain consistency." + +msgid "" +"For EMC VNX backends, please upgrade to use ``cinder.volume.drivers.emc.vnx." +"driver.EMCVNXDriver``. Add config option ``storage_protocol = fc`` or " +"``storage_protocol = iscsi`` to the driver section to enable the FC or iSCSI " +"driver respectively." +msgstr "" +"For EMC VNX backends, please upgrade to use ``cinder.volume.drivers.emc.vnx." +"driver.EMCVNXDriver``. Add config option ``storage_protocol = fc`` or " +"``storage_protocol = iscsi`` to the driver section to enable the FC or iSCSI " +"driver respectively." + +msgid "" +"For SolidFire, QoS specs are now checked to make sure they fall within the " +"min and max constraints. If not the QoS specs are capped at the min or max " +"(i.e. if spec says 50 and minimum supported is 100, the driver will set it " +"to 100)." +msgstr "" +"For SolidFire, QoS specs are now checked to make sure they fall within the " +"min and max constraints. If not the QoS specs are capped at the min or max " +"(i.e. if spec says 50 and minimum supported is 100, the driver will set it " +"to 100)." + +msgid "Generic group is added into quota management." +msgstr "Generic group is added into quota management." + +msgid "Generic volume groups:" +msgstr "Generic volume groups:" + +msgid "" +"Google backup driver now supports ``google-auth`` library, and is the " +"preferred library if both ``google-auth`` (together with ``google-auth-" +"httplib2``) and ``oauth2client`` libraries are present in the system." +msgstr "" +"Google backup driver now supports ``google-auth`` library, and is the " +"preferred library if both ``google-auth`` (together with ``google-auth-" +"httplib2``) and ``oauth2client`` libraries are present in the system." + +msgid "" +"Google backup driver now works when using ``google-api-python-client`` " +"version 1.6.0 or higher." +msgstr "" +"Google backup driver now works when using ``google-api-python-client`` " +"version 1.6.0 or higher." + +msgid "Group APIs will not work on groups with default_cgsnapshot_type." +msgstr "Group APIs will not work on groups with default_cgsnapshot_type." + +msgid "Group APIs will only write/read in/from the groups table." +msgstr "Group APIs will only write/read in/from the groups table." + +msgid "Groups with default_cgsnapshot_type can only be operated by CG APIs." +msgstr "Groups with default_cgsnapshot_type can only be operated by CG APIs." + +msgid "" +"HA A-A: Add cluster configuration option to allow grouping hosts that share " +"the same backend configurations and should work in Active-Active fashion." +msgstr "" +"HA A-A: Add cluster configuration option to allow grouping hosts that share " +"the same backend configurations and should work in Active-Active fashion." + +msgid "" +"HA A-A: Added cluster subcommand in manage command to list, remove, and " +"rename clusters." +msgstr "" +"HA A-A: Added cluster subcommand in manage command to list, remove, and " +"rename clusters." + +msgid "" +"HA A-A: Added clusters API endpoints for cluster related operations (index, " +"detail, show, enable/disable). Index and detail accept filtering by `name`, " +"`binary`, `disabled`, `num_hosts`, `num_down_hosts`, and up/down status " +"(`is_up`) as URL parameters. Also added their respective policies." +msgstr "" +"HA A-A: Added clusters API endpoints for cluster related operations (index, " +"detail, show, enable/disable). Index and detail accept filtering by `name`, " +"`binary`, `disabled`, `num_hosts`, `num_down_hosts`, and up/down status " +"(`is_up`) as URL parameters. Also added their respective policies." + +msgid "" +"HA A-A: Updated manage command to display cluster information on service " +"listings." +msgstr "" +"HA A-A: Updated manage command to display cluster information on service " +"listings." + +msgid "" +"HNAS drivers have new configuration paths. Users should now use ``cinder." +"volume.drivers.hitachi.hnas_nfs.HNASNFSDriver`` for HNAS NFS driver and " +"``cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver`` for HNAS iSCSI " +"driver." +msgstr "" +"HNAS drivers have new configuration paths. Users should now use ``cinder." +"volume.drivers.hitachi.hnas_nfs.HNASNFSDriver`` for HNAS NFS driver and " +"``cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver`` for HNAS iSCSI " +"driver." + +msgid "HNAS drivers will now read configuration from cinder.conf." +msgstr "HNAS drivers will now read configuration from cinder.conf." + +msgid "" +"HP drivers have been rebranded to HPE. Existing configurations will continue " +"to work with the legacy name, but will need to be updated by the next " +"release." +msgstr "" +"HP drivers have been rebranded to HPE. Existing configurations will continue " +"to work with the legacy name, but will need to be updated by the next " +"release." + +msgid "" +"HPE 3PAR driver adds following functionalities Creating thin/dedup " +"compresssed volume. Retype for tpvv/tdvv volumes to be compressed. Migration " +"of compressed volumes. Create compressed volume from compressed volume/" +"snapshot source. Compression support to create cg from source." +msgstr "" +"HPE 3PAR driver adds following functionalities Creating thin/dedup " +"compressed volume. Retype for tpvv/tdvv volumes to be compressed. Migration " +"of compressed volumes. Create compressed volume from compressed volume/" +"snapshot source. Compression support to create cg from source." + +msgid "" +"HPE LeftHand config options ``hplefthand_api_url``, ``hplefthand_username``, " +"``hplefthand_password``, ``hplefthand_clustername``, " +"``hplefthand_iscsi_chap_enabled``, and ``hplefthand_debug`` were deprecated " +"in the Mitaka release and have now been removed. The corresponding " +"``hpelefthand_api_url``, ``hpelefthand_username``, ``hpelefthand_password``, " +"``hpelefthand_clustername``, ``hpelefthand_iscsi_chap_enabled``, and " +"``hpelefthand_debug`` should be used instead." +msgstr "" +"HPE LeftHand config options ``hplefthand_api_url``, ``hplefthand_username``, " +"``hplefthand_password``, ``hplefthand_clustername``, " +"``hplefthand_iscsi_chap_enabled``, and ``hplefthand_debug`` were deprecated " +"in the Mitaka release and have now been removed. The corresponding " +"``hpelefthand_api_url``, ``hpelefthand_username``, ``hpelefthand_password``, " +"``hpelefthand_clustername``, ``hpelefthand_iscsi_chap_enabled``, and " +"``hpelefthand_debug`` should be used instead." + +msgid "" +"HTTP connector for the Cinder Brocade FC Zone plugin. This connector allows " +"for communication between the Brocade FC zone plugin and the switch to be " +"over HTTP or HTTPs. To make use of this connector, the user would add a " +"configuration setting in the fabric block for a Brocade switch with the name " +"as 'fc_southbound_protocol' with a value as 'HTTP' or 'HTTPS'." +msgstr "" +"HTTP connector for the Cinder Brocade FC Zone plugin. This connector allows " +"for communication between the Brocade FC zone plugin and the switch to be " +"over HTTP or HTTPS. To make use of this connector, the user would add a " +"configuration setting in the fabric block for a Brocade switch with the name " +"as 'fc_southbound_protocol' with a value as 'HTTP' or 'HTTPS'." + +msgid "" +"Hitachi VSP drivers have a new config option ``vsp_compute_target_ports`` to " +"specify IDs of the storage ports used to attach volumes to compute nodes. " +"The default is the value specified for the existing ``vsp_target_ports`` " +"option. Either or both of ``vsp_compute_target_ports`` and " +"``vsp_target_ports`` must be specified." +msgstr "" +"Hitachi VSP drivers have a new config option ``vsp_compute_target_ports`` to " +"specify IDs of the storage ports used to attach volumes to compute nodes. " +"The default is the value specified for the existing ``vsp_target_ports`` " +"option. Either or both of ``vsp_compute_target_ports`` and " +"``vsp_target_ports`` must be specified." + +msgid "" +"Hitachi VSP drivers have a new config option ``vsp_horcm_pair_target_ports`` " +"to specify IDs of the storage ports used to copy volumes by Shadow Image or " +"Thin Image. The default is the value specified for the existing " +"``vsp_target_ports`` option. Either or both of " +"``vsp_horcm_pair_target_ports`` and ``vsp_target_ports`` must be specified." +msgstr "" +"Hitachi VSP drivers have a new config option ``vsp_horcm_pair_target_ports`` " +"to specify IDs of the storage ports used to copy volumes by Shadow Image or " +"Thin Image. The default is the value specified for the existing " +"``vsp_target_ports`` option. Either or both of " +"``vsp_horcm_pair_target_ports`` and ``vsp_target_ports`` must be specified." + +msgid "IBM DS8K driver has added multiattach support." +msgstr "IBM DS8K driver has added multiattach support." + +msgid "" +"INFINIDAT volume driver now requires the 'infinisdk' python module to be " +"installed." +msgstr "" +"INFINIDAT volume driver now requires the 'infinisdk' Python module to be " +"installed." + +msgid "IQN identification is now case-insensitive when using LIO." +msgstr "IQN identification is now case-insensitive when using LIO." + +msgid "" +"If RBD stats collection is taking too long in your environment maybe even " +"leading to the service appearing as down you'll want to use the " +"`rbd_exclusive_cinder_pool = true` configuration option if you are using the " +"pool exclusively for Cinder and maybe even if you are not and can live with " +"the innacuracy." +msgstr "" +"If RBD stats collection is taking too long in your environment maybe even " +"leading to the service appearing as down you'll want to use the " +"`rbd_exclusive_cinder_pool = true` configuration option if you are using the " +"pool exclusively for Cinder and maybe even if you are not and can live with " +"the inaccuracy." + +msgid "" +"If device attachment failed it could leave the volume partially attached. " +"Cinder now tries to clean up on failure." +msgstr "" +"If device attachment failed it could leave the volume partially attached. " +"Cinder now tries to clean up on failure." + +msgid "" +"If during a *live* upgrade from Liberty a backup service will be killed " +"while processing a restore request it may happen that such backup status " +"won't be automatically cleaned up on the service restart. Such orphaned " +"backups need to be cleaned up manually." +msgstr "" +"If during a *live* upgrade from Liberty a backup service will be killed " +"while processing a restore request it may happen that such backup status " +"won't be automatically cleaned up on the service restart. Such orphaned " +"backups need to be cleaned up manually." + +msgid "" +"If policy for update volume metadata is modified in a desired way it's " +"needed to add a desired rule for create volume metadata." +msgstr "" +"If policy for update volume metadata is modified in a desired way it's " +"needed to add a desired rule for create volume metadata." + +msgid "" +"If using the NetApp ONTAP drivers (7mode/cmode), the configuration value for " +"\"max_over_subscription_ratio\" may need to be increased to avoid scheduling " +"problems where storage pools that previously were valid to schedule new " +"volumes suddenly appear to be out of space to the Cinder scheduler. See " +"documentation `here `_." +msgstr "" +"If using the NetApp ONTAP drivers (7mode/cmode), the configuration value for " +"\"max_over_subscription_ratio\" may need to be increased to avoid scheduling " +"problems where storage pools that previously were valid to schedule new " +"volumes suddenly appear to be out of space to the Cinder scheduler. See " +"documentation `here `_." + +msgid "" +"If using the key manager, the configuration details should be updated to " +"reflect the Castellan-specific configuration options." +msgstr "" +"If using the key manager, the configuration details should be updated to " +"reflect the Castellan-specific configuration options." + +msgid "" +"In IBM Storwize_SVC driver, user could specify only one IO group per backend " +"definition. The user now may specify a comma separated list of IO groups, " +"and at the time of creating the volume, the driver will select an IO group " +"which has the least number of volumes associated with it. The change is " +"backward compatible, meaning single value is still supported." +msgstr "" +"In IBM Storwize_SVC driver, user could specify only one IO group per backend " +"definition. The user now may specify a comma separated list of IO groups, " +"and at the time of creating the volume, the driver will select an IO group " +"which has the least number of volumes associated with it. The change is " +"backward compatible, meaning single value is still supported." + +msgid "" +"In NEC driver, the deprecated configuration parameter " +"`ldset_controller_node_name` was deleted." +msgstr "" +"In NEC driver, the deprecated configuration parameter " +"`ldset_controller_node_name` was deleted." + +msgid "" +"In NEC driver, the number of volumes in a storage pool is no longer limited " +"to 1024. More volumes can be created with storage firmware revision 1015 or " +"later." +msgstr "" +"In the NEC driver, the number of volumes in a storage pool is no longer " +"limited to 1024. More volumes can be created with storage firmware revision " +"1015 or later." + +msgid "" +"In VNX Cinder driver, ``replication_device`` keys, ``backend_id`` and " +"``san_ip`` are mandatory now. If you prefer security file authentication, " +"please append ``storage_vnx_security_file_dir`` in ``replication_device``, " +"otherwise, append ``san_login``, ``san_password``, " +"``storage_vnx_authentication_type`` in ``replication_device``." +msgstr "" +"In VNX Cinder driver, ``replication_device`` keys, ``backend_id`` and " +"``san_ip`` are mandatory now. If you prefer security file authentication, " +"please append ``storage_vnx_security_file_dir`` in ``replication_device``, " +"otherwise, append ``san_login``, ``san_password``, " +"``storage_vnx_authentication_type`` in ``replication_device``." + +msgid "" +"In certain environments (Kubernetes for example) indirect calls to the LVM " +"commands result in file descriptor leak warning messages which in turn cause " +"the process_execution method to raise and exception." +msgstr "" +"In certain environments (Kubernetes for example) indirect calls to the LVM " +"commands result in file descriptor leak warning messages which in turn cause " +"the process_execution method to raise and exception." + +msgid "" +"In order to simplify initial setup for new installations the default " +"behaviour of the Quobyte driver for the options " +"``nas_secure_file_operations`` and ``nas_secure_file_permissions`` has " +"changed. The 'auto' values are no longer mapped to true but to false. " +"Therefore the old default behaviour to run with secure settings is changed " +"to run without secure settings as the new default behaviour. Installations " +"using the default values for these options should ensure to explicitly set " +"them to true with this new Cinder Quobyte driver version." +msgstr "" +"In order to simplify initial setup for new installations the default " +"behaviour of the Quobyte driver for the options " +"``nas_secure_file_operations`` and ``nas_secure_file_permissions`` has " +"changed. The 'auto' values are no longer mapped to true but to false. " +"Therefore the old default behaviour to run with secure settings is changed " +"to run without secure settings as the new default behaviour. Installations " +"using the default values for these options should ensure to explicitly set " +"them to true with this new Cinder Quobyte driver version." + +msgid "Infortrend" +msgstr "Infortrend" + +msgid "" +"Instead of ``api_class`` option ``cinder.keymgr.barbican." +"BarbicanKeyManager``, use ``backend`` option `barbican``" +msgstr "" +"Instead of ``api_class`` option ``cinder.keymgr.barbican." +"BarbicanKeyManager``, use ``backend`` option `barbican``" + +msgid "" +"Instead of using osapi_volume_base_url use public_endpoint. Both do the same " +"thing." +msgstr "" +"Instead of using osapi_volume_base_url use public_endpoint. Both do the same " +"thing." + +msgid "" +"IntOpt ``datera_num_replicas`` is changed to a volume type extra spec " +"option-- ``DF:replica_count``" +msgstr "" +"IntOpt ``datera_num_replicas`` is changed to a volume type extra spec " +"option-- ``DF:replica_count``" + +msgid "" +"Introduced generic volume groups and added create/ delete/update/list/show " +"APIs for groups." +msgstr "" +"Introduced generic volume groups and added create/ delete/update/list/show " +"APIs for groups." + +msgid "" +"Introduced replication group support and added group action APIs " +"enable_replication, disable_replication, failover_replication and " +"list_replication_targets." +msgstr "" +"Introduced replication group support and added group action APIs " +"enable_replication, disable_replication, failover_replication and " +"list_replication_targets." + +msgid "" +"It is now possible to delete a volume and its snapshots by passing an " +"additional argument to volume delete, \"cascade=True\"." +msgstr "" +"It is now possible to delete a volume and its snapshots by passing an " +"additional argument to volume delete, \"cascade=True\"." + +msgid "" +"It is required to copy new rootwrap.d/volume.filters file into /etc/cinder/" +"rootwrap.d directory." +msgstr "" +"It is required to copy new rootwrap.d/volume.filters file into /etc/cinder/" +"rootwrap.d directory." + +msgid "" +"Kaminario K2 iSCSI driver now supports non discovery multipathing (Nova and " +"Cinder won't use iSCSI sendtargets) which can be enabled by setting " +"`disable_discovery` to `true` in the configuration." +msgstr "" +"Kaminario K2 iSCSI driver now supports non discovery multipathing (Nova and " +"Cinder won't use iSCSI sendtargets) which can be enabled by setting " +"`disable_discovery` to `true` in the configuration." + +msgid "" +"Kaminario K2 now supports networks with duplicated FQDNs via configuration " +"option `unique_fqdn_network` so attaching in these networks will work (bug " +"#1720147)." +msgstr "" +"Kaminario K2 now supports networks with duplicated FQDNs via configuration " +"option `unique_fqdn_network` so attaching in these networks will work (bug " +"#1720147)." + +msgid "" +"Key migration is initiated on service startup, and entries in the cinder-" +"volume log will indicate the migration status. Log entries will indicate " +"when a volume's encryption key ID has been migrated to Barbican, and a " +"summary log message will indicate when key migration has finished." +msgstr "" +"Key migration is initiated on service start-up, and entries in the cinder-" +"volume log will indicate the migration status. Log entries will indicate " +"when a volume's encryption key ID has been migrated to Barbican, and a " +"summary log message will indicate when key migration has finished." + +msgid "Known Issues" +msgstr "Known Issues" + +msgid "" +"LUKS Encrypted RBD volumes can now be created by cinder-volume. This " +"capability was previously blocked by the rbd volume driver due to the lack " +"of any encryptors capable of attaching to an encrypted RBD volume. These " +"volumes can also be seeded with RAW image data from Glance through the use " +"of QEMU 2.10 and the qemu-img convert command." +msgstr "" +"LUKS Encrypted RBD volumes can now be created by cinder-volume. This " +"capability was previously blocked by the rbd volume driver due to the lack " +"of any encryptors capable of attaching to an encrypted RBD volume. These " +"volumes can also be seeded with RAW image data from Glance through the use " +"of QEMU 2.10 and the qemu-img convert command." + +msgid "Liberty Series Release Notes" +msgstr "Liberty Series Release Notes" + +msgid "List CG Snapshots checks both the CG and the groups tables." +msgstr "List CG Snapshots checks both the CG and the groups tables." + +msgid "List CG checks both CG and groups tables." +msgstr "List CG checks both CG and groups tables." + +msgid "" +"Locks may use Tooz as abstraction layer now, to support distributed lock " +"managers and prepare Cinder to better support HA configurations." +msgstr "" +"Locks may use Tooz as abstraction layer now, to support distributed lock " +"managers and prepare Cinder to better support HA configurations." + +msgid "Log VMAX specific metadata of a volume if debug is enabled." +msgstr "Log VMAX specific metadata of a volume if debug is enabled." + +msgid "" +"Logging path can now be configured for vzstorage driver in shares config " +"file (specified by vzstorage_shares_config option). To set custom logging " +"path add `'-l', ''` to mount options array. Otherwise " +"default logging path `/var/log/vstorage//cinder.log.gz` will " +"be used." +msgstr "" +"Logging path can now be configured for vzstorage driver in shares config " +"file (specified by vzstorage_shares_config option). To set custom logging " +"path add `'-l', ''` to mount options array. Otherwise " +"default logging path `/var/log/vstorage//cinder.log.gz` will " +"be used." + +msgid "" +"Make Cinder scheduler check if backend reports `online_extend_support` " +"before performing an online extend operation." +msgstr "" +"Make Cinder scheduler check if backend reports `online_extend_support` " +"before performing an online extend operation." + +msgid "" +"Manage and unmanage support has been added to the Nimble backend driver." +msgstr "" +"Manage and unmanage support has been added to the Nimble backend driver." + +msgid "" +"Marked the ITRI DISCO driver option ``disco_wsdl_path`` as deprecated. The " +"new preferred protocol for array communication is REST and SOAP support will " +"be removed." +msgstr "" +"Marked the ITRI DISCO driver option ``disco_wsdl_path`` as deprecated. The " +"new preferred protocol for array communication is REST and SOAP support will " +"be removed." + +msgid "Mitaka Series Release Notes" +msgstr "Mitaka Series Release Notes" + +msgid "" +"Modify CG modifies in the CG table if the CG is in the CG table, otherwise " +"it modifies in the groups table." +msgstr "" +"Modify CG modifies in the CG table if the CG is in the CG table, otherwise " +"it modifies in the groups table." + +msgid "" +"Modify default lvm_type setting from thick to auto. This will result in " +"Cinder preferring thin on init, if there are no LV's in the VG it will " +"create a thin-pool and use thin. If there are LV's and no thin-pool it will " +"continue using thick." +msgstr "" +"Modify default lvm_type setting from thick to auto. This will result in " +"Cinder preferring thin on init, if there are no LV's in the VG it will " +"create a thin-pool and use thin. If there are LV's and no thin-pool it will " +"continue using thick." + +msgid "Modify rule for types_manage and volume_type_access, e.g." +msgstr "Modify rule for types_manage and volume_type_access, e.g." + +msgid "" +"Modifying the extra-specs of an in use Volume Type was something that we've " +"unintentionally allowed. The result is unexpected or unknown volume " +"behaviors in cases where a type was modified while a volume was assigned " +"that type. This has been particularly annoying for folks that have assigned " +"the volume-type to a different/new backend device. In case there are " +"customers using this \"bug\" we add a config option to retain the bad " +"behavior \"allow_inuse_volume_type_modification\", with a default setting of " +"False (Don't allow). Note this config option is being introduced as " +"deprecated and will be removed in a future release. It's being provided as " +"a bridge to not break upgrades without notice." +msgstr "" +"Modifying the extra-specs of an in use Volume Type was something that we've " +"unintentionally allowed. The result is unexpected or unknown volume " +"behaviours in cases where a type was modified while a volume was assigned " +"that type. This has been particularly annoying for folks that have assigned " +"the volume-type to a different/new backend device. In case there are " +"customers using this \"bug\" we add a config option to retain the bad " +"behaviour \"allow_inuse_volume_type_modification\", with a default setting " +"of False (Don't allow). Note this config option is being introduced as " +"deprecated and will be removed in a future release. It's being provided as " +"a bridge to not break upgrades without notice." + +msgid "" +"Multiattach support is disabled for the LVM driver when using the LIO iSCSI " +"target. This functionality will be fixed in a later release." +msgstr "" +"Multiattach support is disabled for the LVM driver when using the LIO iSCSI " +"target. This functionality will be fixed in a later release." + +msgid "" +"Multiple backends may now be enabled within the same Cinder Volume service " +"on Windows by using the ``enabled_backends`` config option." +msgstr "" +"Multiple backends may now be enabled within the same Cinder Volume service " +"on Windows by using the ``enabled_backends`` config option." + +msgid "Naming convention change for Datera Volume Drivers" +msgstr "Naming convention change for Datera Volume Drivers" + +msgid "" +"Nested quotas will no longer be used by default, but can be configured by " +"setting ``quota_driver = cinder.quota.NestedDbQuotaDriver``" +msgstr "" +"Nested quotas will no longer be used by default, but can be configured by " +"setting ``quota_driver = cinder.quota.NestedDbQuotaDriver``" + +msgid "" +"NetApp E-series (bug 1718739):The NetApp E-series driver has been fixed to " +"correctly report the \"provisioned_capacity_gb\". Now it sums the capacity " +"of all the volumes in the configured backend to get the correct value. This " +"bug fix affects all the protocols supported by the driver (FC and iSCSI)." +msgstr "" +"NetApp E-series (bug 1718739):The NetApp E-series driver has been fixed to " +"correctly report the \"provisioned_capacity_gb\". Now it sums the capacity " +"of all the volumes in the configured backend to get the correct value. This " +"bug fix affects all the protocols supported by the driver (FC and iSCSI)." + +msgid "" +"NetApp ONTAP (bug 1762424): Fix ONTAP NetApp driver not being able to extend " +"a volume to a size greater than the corresponding LUN max geometry." +msgstr "" +"NetApp ONTAP (bug 1762424): Fix ONTAP NetApp driver not being able to extend " +"a volume to a size greater than the corresponding LUN max geometry." + +msgid "" +"NetApp ONTAP (bug 1765182): Make ONTAP NetApp NFS driver report to the " +"Cinder scheduler that it doesn't support online volume extending." +msgstr "" +"NetApp ONTAP (bug 1765182): Make ONTAP NetApp NFS driver report to the " +"Cinder scheduler that it doesn't support online volume extending." + +msgid "" +"NetApp ONTAP (bug 1765182): Make ONTAP NetApp iSCSI driver and FC driver " +"report to the Cinder scheduler that they don't support online volume " +"extending." +msgstr "" +"NetApp ONTAP (bug 1765182): Make ONTAP NetApp iSCSI driver and FC driver " +"report to the Cinder scheduler that they don't support online volume " +"extending." + +msgid "" +"NetApp ONTAP NFS (bug 1690954): Fix wrong usage of export path as volume " +"name when deleting volumes and snapshots." +msgstr "" +"NetApp ONTAP NFS (bug 1690954): Fix wrong usage of export path as volume " +"name when deleting volumes and snapshots." + +msgid "NetApp ONTAP NFS multiattach capability enabled." +msgstr "NetApp ONTAP NFS multiattach capability enabled." + +msgid "" +"NetApp ONTAP iSCSI (bug 1712651): Fix ONTAP NetApp iSCSI driver not raising " +"a proper exception when trying to extend an attached volume beyond its max " +"geometry." +msgstr "" +"NetApp ONTAP iSCSI (bug 1712651): Fix ONTAP NetApp iSCSI driver not raising " +"a proper exception when trying to extend an attached volume beyond its max " +"geometry." + +msgid "NetApp ONTAP iSCSI and FCP drivers multiattach capability enabled." +msgstr "NetApp ONTAP iSCSI and FCP drivers multiattach capability enabled." + +msgid "" +"NetApp SolidFire driver now supports optimized revert to snapshot operations." +msgstr "" +"NetApp SolidFire driver now supports optimized revert to snapshot operations." + +msgid "" +"NetApp cDOT block and file drivers have improved support for SVM scoped user " +"accounts. Features not supported for SVM scoped users include QoS, aggregate " +"usage reporting, and dedupe usage reporting." +msgstr "" +"NetApp cDOT block and file drivers have improved support for SVM scoped user " +"accounts. Features not supported for SVM scoped users include QoS, aggregate " +"usage reporting, and dedupe usage reporting." + +msgid "" +"NetApp cDOT block and file drivers now report replication capability at the " +"pool level; and are hence compatible with using the ``replication_enabled`` " +"extra-spec in volume types." +msgstr "" +"NetApp cDOT block and file drivers now report replication capability at the " +"pool level; and are hence compatible with using the ``replication_enabled`` " +"extra-spec in volume types." + +msgid "" +"New BoolOpt ``datera_debug_override_num_replicas`` for Datera Volume Drivers" +msgstr "" +"New BoolOpt ``datera_debug_override_num_replicas`` for Datera Volume Drivers" + +msgid "" +"New Cinder driver based on storops library (available in pypi) for EMC VNX." +msgstr "" +"New Cinder driver based on storops library (available in pypi) for EMC VNX." + +msgid "New Cinder volume driver for Inspur AS13000 series." +msgstr "New Cinder volume driver for Inspur AS13000 series." + +msgid "" +"New Cinder volume driver for Inspur InStorage. The new driver supports iSCSI." +msgstr "" +"New Cinder volume driver for Inspur InStorage. The new driver supports iSCSI." + +msgid "New Cinder volume driver for LINBIT LINSTOR resources." +msgstr "New Cinder volume driver for LINBIT LINSTOR resources." + +msgid "New FC Cinder volume driver for Inspur Instorage." +msgstr "New FC Cinder volume driver for Inspur Instorage." + +msgid "New FC Cinder volume driver for Kaminario K2 all-flash arrays." +msgstr "New FC Cinder volume driver for Kaminario K2 all-flash arrays." + +msgid "New Features" +msgstr "New Features" + +msgid "" +"New config format to allow for using shared Volume Driver configuration " +"defaults via the [backend_defaults] stanza. Config options defined there " +"will be used as defaults for each backend enabled via enabled_backends." +msgstr "" +"New config format to allow for using shared Volume Driver configuration " +"defaults via the [backend_defaults] stanza. Config options defined there " +"will be used as defaults for each backend enabled via enabled_backends." + +msgid "" +"New config option added. ``\"connection_string\"`` in [profiler] section is " +"used to specify OSProfiler driver connection string, for example, ``" +"\"connection_string = messaging://\"``, ``\"connection_string = mongodb://" +"localhost:27017\"``" +msgstr "" +"New config option added. ``\"connection_string\"`` in [profiler] section is " +"used to specify OSProfiler driver connection string, for example, ``" +"\"connection_string = messaging://\"``, ``\"connection_string = mongodb://" +"localhost:27017\"``" + +msgid "" +"New config option for Pure Storage volume drivers pure_eradicate_on_delete. " +"When enabled will permanantly eradicate data instead of placing into pending " +"eradication state." +msgstr "" +"New config option for Pure Storage volume drivers pure_eradicate_on_delete. " +"When enabled will permanently eradicate data instead of placing into pending " +"eradication state." + +msgid "" +"New config option to enable discard (trim/unmap) support for any backend." +msgstr "" +"New config option to enable discard (trim/unmap) support for any backend." + +msgid "New iSCSI Cinder volume driver for Kaminario K2 all-flash arrays." +msgstr "New iSCSI Cinder volume driver for Kaminario K2 all-flash arrays." + +msgid "New path - cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver" +msgstr "New path - cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver" + +msgid "New path - cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver" +msgstr "New path - cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver" + +msgid "" +"New path - cinder.volume.drivers.hpe.hpe_lefthand_iscsi." +"HPELeftHandISCSIDriver" +msgstr "" +"New path - cinder.volume.drivers.hpe.hpe_lefthand_iscsi." +"HPELeftHandISCSIDriver" + +msgid "New path - cinder.volume.drivers.hpe.hpe_xp_fc.HPEXPFCDriver" +msgstr "New path - cinder.volume.drivers.hpe.hpe_xp_fc.HPEXPFCDriver" + +msgid "New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver" +msgstr "New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver" + +msgid "New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver" +msgstr "" +"New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver" + +msgid "Newton Series Release Notes" +msgstr "Newton Series Release Notes" + +msgid "Now availability zone is supported in volume type as below." +msgstr "Now availability zone is supported in volume type as below." + +msgid "" +"Now cinder will keep track of 'multiattach' attribute when managing backend " +"volumes." +msgstr "" +"Now Cinder will keep track of 'multiattach' attribute when managing backend " +"volumes." + +msgid "" +"Now cinder will refresh the az cache immediately if previous create volume " +"task failed due to az not found." +msgstr "" +"Now Cinder will refresh the az cache immediately if previous create volume " +"task failed due to az not found." + +msgid "" +"Now extend won't work on disabled services because it's going through the " +"scheduler, unlike how it worked before." +msgstr "" +"Now extend won't work on disabled services because it's going through the " +"scheduler, unlike how it worked before." + +msgid "" +"Now scheduler plugins are aware of operation type via ``operation`` " +"attribute in RequestSpec dictionary, plugins can support backend filtering " +"according to backend status as well as operation type. Current possible " +"values for ``operation`` are:" +msgstr "" +"Now scheduler plugins are aware of operation type via ``operation`` " +"attribute in RequestSpec dictionary, plugins can support backend filtering " +"according to backend status as well as operation type. Current possible " +"values for ``operation`` are:" + +msgid "Now the ``os-host show`` API will count project's resource correctly." +msgstr "Now the ``os-host show`` API will count project's resource correctly." + +msgid "Ocata Series Release Notes" +msgstr "Ocata Series Release Notes" + +msgid "" +"Old VNX FC (``cinder.volume.drivers.emc.emc_cli_fc.EMCCLIFCDriver``)/ iSCSI " +"(``cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver``) drivers are " +"deprecated. Please refer to upgrade section for information about the new " +"driver." +msgstr "" +"Old VNX FC (``cinder.volume.drivers.emc.emc_cli_fc.EMCCLIFCDriver``)/ iSCSI " +"(``cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver``) drivers are " +"deprecated. Please refer to upgrade section for information about the new " +"driver." + +msgid "" +"Old driver paths have been removed since they have been through our alloted " +"deprecation period. Make sure if you have any of these paths being set in " +"your cinder.conf for the volume_driver option, to update to the new driver " +"path listed here." +msgstr "" +"Old driver paths have been removed since they have been through our allotted " +"deprecation period. Make sure if you have any of these paths being set in " +"your cinder.conf for the volume_driver option, to update to the new driver " +"path listed here." + +msgid "" +"Old names and locations are still supported but support will be removed in " +"the future." +msgstr "" +"Old names and locations are still supported but support will be removed in " +"the future." + +msgid "" +"Old path - cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver" +msgstr "" +"Old path - cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver" + +msgid "" +"Old path - cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver" +msgstr "" +"Old path - cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver" + +msgid "" +"Old path - cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver" +msgstr "" +"Old path - cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver" + +msgid "" +"Old path - cinder.volume.drivers.huawei.huawei_driver.Huawei18000ISCSIDriver" +msgstr "" +"Old path - cinder.volume.drivers.huawei.huawei_driver.Huawei18000ISCSIDriver" + +msgid "Old path - cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver" +msgstr "Old path - cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver" + +msgid "Old path - cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver" +msgstr "" +"Old path - cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver" + +msgid "" +"Old path - cinder.volume.drivers.san.hp.hp_lefthand_iscsi." +"HPLeftHandISCSIDriver" +msgstr "" +"Old path - cinder.volume.drivers.san.hp.hp_lefthand_iscsi." +"HPLeftHandISCSIDriver" + +msgid "Old path - cinder.volume.drivers.san.hp.hp_xp_fc.HPXPFCDriver" +msgstr "Old path - cinder.volume.drivers.san.hp.hp_xp_fc.HPXPFCDriver" + +msgid "" +"On offline upgrades, due to the rolling upgrade mechanism we need to restart " +"the cinder services twice to complete the installation just like in the " +"rolling upgrades case. First you stop the cinder services, then you upgrade " +"them, you sync your DB, then you start all the cinder services, and then you " +"restart them all. To avoid this last restart we can now instruct the DB " +"sync to bump the services after the migration is completed, the command to " +"do this is `cinder-manage db sync --bump-versions`" +msgstr "" +"On offline upgrades, due to the rolling upgrade mechanism we need to restart " +"the cinder services twice to complete the installation just like in the " +"rolling upgrades case. First you stop the cinder services, then you upgrade " +"them, you sync your DB, then you start all the cinder services, and then you " +"restart them all. To avoid this last restart we can now instruct the DB " +"sync to bump the services after the migration is completed, the command to " +"do this is `cinder-manage db sync --bump-versions`" + +msgid "" +"Operator needs to perform ``cinder-manage db online_data_migrations`` to " +"migrate existing consistency groups to generic volume groups." +msgstr "" +"Operator needs to perform ``cinder-manage db online_data_migrations`` to " +"migrate existing consistency groups to generic volume groups." + +msgid "" +"Operators should change backup driver configuration value to use class name " +"to get backup service working in a 'S' release." +msgstr "" +"Operators should change backup driver configuration value to use class name " +"to get backup service working in a 'S' release." + +msgid "Optimize backend reporting capabilities for Huawei drivers." +msgstr "Optimise backend reporting capabilities for Huawei drivers." + +msgid "" +"Oracle ZFSSA iSCSI - allows a volume to be connected to more than one " +"connector at the same time, which is required for live-migration to work. " +"ZFSSA software release 2013.1.3.x (or newer) is required for this to work." +msgstr "" +"Oracle ZFSSA iSCSI - allows a volume to be connected to more than one " +"connector at the same time, which is required for live-migration to work. " +"ZFSSA software release 2013.1.3.x (or newer) is required for this to work." + +msgid "" +"Oracle ZFSSA iSCSI volume driver implements ``get_manageable_volumes()``" +msgstr "" +"Oracle ZFSSA iSCSI volume driver implements ``get_manageable_volumes()``" + +msgid "Other Notes" +msgstr "Other Notes" + +msgid "Pike Series Release Notes" +msgstr "Pike Series Release Notes" + +msgid "" +"PowerMax driver - Workload support was dropped in ucode 5978. If a VMAX All " +"Flash array is upgraded to 5978 or greater and existing volume types " +"leveraged workload e.g. DSS, DSS_REP, OLTP and OLTP_REP, certain operations " +"will no longer work and the volume type will be unusable. This fix addresses " +"these issues and fixes problems with using old volume types with workloads " +"included in the volume type pool_name." +msgstr "" +"PowerMax driver - Workload support was dropped in ucode 5978. If a VMAX All " +"Flash array is upgraded to 5978 or greater and existing volume types " +"leveraged workload e.g. DSS, DSS_REP, OLTP and OLTP_REP, certain operations " +"will no longer work and the volume type will be unusable. This fix addresses " +"these issues and fixes problems with using old volume types with workloads " +"included in the volume type pool_name." + +msgid "Prelude" +msgstr "Prelude" + +msgid "" +"Previous installations of IBM Storage must be un-installed first and the new " +"driver should be installed on top. In addition the cinder.conf values should " +"be updated to reflect the new paths. For example the proxy setting of " +"``storage.proxy.IBMStorageProxy`` should be updated to ``cinder.volume." +"drivers.ibm.ibm_storage.proxy.IBMStorageProxy``." +msgstr "" +"Previous installations of IBM Storage must be uninstalled first and the new " +"driver should be installed on top. In addition the cinder.conf values should " +"be updated to reflect the new paths. For example the proxy setting of " +"``storage.proxy.IBMStorageProxy`` should be updated to ``cinder.volume." +"drivers.ibm.ibm_storage.proxy.IBMStorageProxy``." + +msgid "" +"Previously the only way to remove volumes in error states from a consistency-" +"group was to delete the consistency group and create it again. Now it is " +"possible to remove volumes in error and error_deleting states." +msgstr "" +"Previously the only way to remove volumes in error states from a consistency-" +"group was to delete the consistency group and create it again. Now it is " +"possible to remove volumes in error and error_deleting states." + +msgid "" +"Privsep daemons are now started by Cinder when required. These daemons can " +"be started via rootwrap if required. rootwrap configs therefore need to be " +"updated to include new privsep daemon invocations." +msgstr "" +"Privsep daemons are now started by Cinder when required. These daemons can " +"be started via rootwrap if required. rootwrap configs therefore need to be " +"updated to include new privsep daemon invocations." + +msgid "" +"Privsep transitions. Cinder is transitioning from using the older style " +"rootwrap privilege escalation path to the new style Oslo privsep path. This " +"should improve performance and security of Cinder in the long term." +msgstr "" +"Privsep transitions. Cinder is transitioning from using the older style " +"rootwrap privilege escalation path to the new style Oslo privsep path. This " +"should improve performance and security of Cinder in the long term." + +msgid "Prohibit the deletion of group if group snapshot exists." +msgstr "Prohibit the deletion of group if group snapshot exists." + +msgid "" +"Projects with the admin role are now allowed to operate on the quotas of all " +"other projects." +msgstr "" +"Projects with the admin role are now allowed to operate on the quotas of all " +"other projects." + +msgid "" +"Pure Storage FlashArray driver has added configuration options " +"``pure_replication_pg_name`` and ``pure_replication_pod_name`` for setting " +"the names for replication PGs and Pods." +msgstr "" +"Pure Storage FlashArray driver has added configuration options " +"``pure_replication_pg_name`` and ``pure_replication_pod_name`` for setting " +"the names for replication PGs and Pods." + +msgid "Pure Storage FlashArray driver has added multiatach support." +msgstr "Pure Storage FlashArray driver has added multiattach support." + +msgid "" +"Pure Storage Volume Drivers can now utilize driver_ssl_cert_verify and " +"driver_ssl_cert_path config options to allow for secure https requests to " +"the FlashArray." +msgstr "" +"Pure Storage Volume Drivers can now utilise driver_ssl_cert_verify and " +"driver_ssl_cert_path config options to allow for secure https requests to " +"the FlashArray." + +msgid "" +"Pure volume drivers will need 'purestorage' python module v1.6.0 or newer. " +"Support for 1.4.x has been removed." +msgstr "" +"Pure volume drivers will need 'purestorage' Python module v1.6.0 or newer. " +"Support for 1.4.x has been removed." + +msgid "QNAP" +msgstr "QNAP" + +msgid "QNAP Cinder driver added support for QES fw 2.0.0." +msgstr "QNAP Cinder driver added support for QES fw 2.0.0." + +msgid "QNAP Cinder driver added support for QES fw 2.1.0." +msgstr "QNAP Cinder driver added support for QES fw 2.1.0." + +msgid "QoS support in EMC VMAX iSCSI and FC drivers." +msgstr "QoS support in EMC VMAX iSCSI and FC drivers." + +msgid "Queens Series Release Notes" +msgstr "Queens Series Release Notes" + +msgid "" +"Quota validations are now forced for all APIs. skip_validation flag is now " +"removed from the request body for the quota-set update API." +msgstr "" +"Quota validations are now forced for all APIs. skip_validation flag is now " +"removed from the request body for the quota-set update API." + +msgid "" +"RBD driver can have bottlenecks if too many slow operations are happening at " +"the same time (for example many huge volume deletions), we can now use the " +"`backend_native_threads_pool_size` option in the RBD driver section to " +"resolve the issue." +msgstr "" +"RBD driver can have bottlenecks if too many slow operations are happening at " +"the same time (for example many huge volume deletions), we can now use the " +"`backend_native_threads_pool_size` option in the RBD driver section to " +"resolve the issue." + +msgid "" +"RBD driver supports returning a static total capacity value instead of a " +"dynamic value like it's been doing. Configurable with " +"`report_dynamic_total_capacity` configuration option." +msgstr "" +"RBD driver supports returning a static total capacity value instead of a " +"dynamic value like it's been doing. Configurable with " +"`report_dynamic_total_capacity` configuration option." + +msgid "" +"RBD stats report has been fixed, now properly reports " +"`allocated_capacity_gb` and `provisioned_capacity_gb` with the sum of the " +"sizes of the volumes (not physical sizes) for volumes created by Cinder and " +"all available in the pool respectively. Free capacity will now properly " +"handle quota size restrictions of the pool." +msgstr "" +"RBD stats report has been fixed, now properly reports " +"`allocated_capacity_gb` and `provisioned_capacity_gb` with the sum of the " +"sizes of the volumes (not physical sizes) for volumes created by Cinder and " +"all available in the pool respectively. Free capacity will now properly " +"handle quota size restrictions of the pool." + +msgid "" +"RBD/Ceph backends should adjust `max_over_subscription_ratio` to take into " +"account that the driver is no longer reporting volume's physical usage but " +"it's provisioned size." +msgstr "" +"RBD/Ceph backends should adjust `max_over_subscription_ratio` to take into " +"account that the driver is no longer reporting volume's physical usage but " +"it's provisioned size." + +msgid "Re-added QNAP Cinder volume driver." +msgstr "Re-added QNAP Cinder volume driver." + +msgid "Reduxio" +msgstr "Reduxio" + +msgid "Remove mirror policy parameter from huawei driver." +msgstr "Remove mirror policy parameter from Huawei driver." + +msgid "Removed - ``eqlx_chap_login``" +msgstr "Removed - ``eqlx_chap_login``" + +msgid "Removed - ``eqlx_chap_password``" +msgstr "Removed - ``eqlx_chap_password``" + +msgid "Removed - ``eqlx_cli_timeout``" +msgstr "Removed - ``eqlx_cli_timeout``" + +msgid "Removed - ``eqlx_use_chap``" +msgstr "Removed - ``eqlx_use_chap``" + +msgid "Removed datera_acl_allow_all option." +msgstr "Removed datera_acl_allow_all option." + +msgid "Removed datera_num_replicas option." +msgstr "Removed datera_num_replicas option." + +msgid "" +"Removed deprecated LVMISCSIDriver and LVMISERDriver. These should be " +"switched to use the LVMVolumeDriver with the desired iscsi_helper " +"configuration set to the desired iSCSI helper." +msgstr "" +"Removed deprecated LVMISCSIDriver and LVMISERDriver. These should be " +"switched to use the LVMVolumeDriver with the desired iscsi_helper " +"configuration set to the desired iSCSI helper." + +msgid "" +"Removed deprecated option ``kaminario_nodedup_substring`` in Kaminario FC " +"and iSCSI Cinder drivers." +msgstr "" +"Removed deprecated option ``kaminario_nodedup_substring`` in Kaminario FC " +"and iSCSI Cinder drivers." + +msgid "Removed deprecated option ``osapi_max_request_body_size``." +msgstr "Removed deprecated option ``osapi_max_request_body_size``." + +msgid "Removed force_delete option from ScaleIO configuration." +msgstr "Removed force_delete option from ScaleIO configuration." + +msgid "" +"Removed restriction of hard coded iSCSI IP address to allow the use of " +"multiple iSCSI portgroups." +msgstr "" +"Removed restriction of hard coded iSCSI IP address to allow the use of " +"multiple iSCSI portgroups." + +msgid "" +"Removed storwize_svc_connection_protocol config setting. Users will now need " +"to set different values for volume_driver in cinder.conf. FC:volume_driver = " +"cinder.volume.drivers.ibm.storwize_svc.storwize_svc_fc.StorwizeSVCFCDriver " +"iSCSI:volume_driver = cinder.volume.drivers.ibm.storwize_svc." +"storwize_svc_iscsi.StorwizeSVCISCSIDriver" +msgstr "" +"Removed storwize_svc_connection_protocol config setting. Users will now need " +"to set different values for volume_driver in cinder.conf. FC:volume_driver = " +"cinder.volume.drivers.ibm.storwize_svc.storwize_svc_fc.StorwizeSVCFCDriver " +"iSCSI:volume_driver = cinder.volume.drivers.ibm.storwize_svc." +"storwize_svc_iscsi.StorwizeSVCISCSIDriver" + +msgid "" +"Removed the ability to create volumes in a ScaleIO Storage Pool that has " +"zero-padding disabled. A new configuration option " +"``sio_allow_non_padded_volumes`` has been added to override this new " +"behavior and allow unpadded volumes, but should not be enabled if multiple " +"tenants will utilize volumes from a shared Storage Pool." +msgstr "" +"Removed the ability to create volumes in a ScaleIO Storage Pool that has " +"zero-padding disabled. A new configuration option " +"``sio_allow_non_padded_volumes`` has been added to override this new " +"behaviour and allow unpadded volumes, but should not be enabled if multiple " +"tenants will utilise volumes from a shared Storage Pool." + +msgid "" +"Removed the ability to create volumes in a ScaleIO Storage Pool that has " +"zero-padding disabled. A new configuration option had been added to override " +"this new behavior and allow volume creation, but should not be enabled if " +"multiple tenants will utilize volumes from a shared Storage Pool." +msgstr "" +"Removed the ability to create volumes in a ScaleIO Storage Pool that has " +"zero-padding disabled. A new configuration option had been added to override " +"this new behaviour and allow volume creation, but should not be enabled if " +"multiple tenants will utilise volumes from a shared Storage Pool." + +msgid "Removed the deprecated NPIV options for the Storwize backend driver." +msgstr "Removed the deprecated NPIV options for the Storwize backend driver." + +msgid "" +"Removed the deprecated options for the Nova connection:> " +"os_privileged_user{name, password, tenant, auth_url}, nova_catalog_info, " +"nova_catalog_admin_info, nova_endpoint_template, " +"nova_endpoint_admin_template, nova_ca_certificates_file, nova_api_insecure. " +"From Pike, using the [nova] section is preferred to configure compute " +"connection for Guest Assisted Snapshost or the InstanceLocalityFilter." +msgstr "" +"Removed the deprecated options for the Nova connection:> " +"os_privileged_user{name, password, tenant, auth_url}, nova_catalog_info, " +"nova_catalog_admin_info, nova_endpoint_template, " +"nova_endpoint_admin_template, nova_ca_certificates_file, nova_api_insecure. " +"From Pike, using the [nova] section is preferred to configure compute " +"connection for Guest Assisted Snapshot or the InstanceLocalityFilter." + +msgid "" +"Removed the need for deployers to run tox for config reference generation." +msgstr "" +"Removed the need for deployers to run tox for config reference generation." + +msgid "" +"Removed the option ``allow_inuse_volume_type_modification`` which had been " +"deprecated in Ocata release." +msgstr "" +"Removed the option ``allow_inuse_volume_type_modification`` which had been " +"deprecated in Ocata release." + +msgid "" +"Removing cinder-all binary. Instead use the individual binaries like cinder-" +"api, cinder-backup, cinder-volume, cinder-scheduler." +msgstr "" +"Removing cinder-all binary. Instead use the individual binaries like cinder-" +"api, cinder-backup, cinder-volume, cinder-scheduler." + +msgid "" +"Removing deprecated file cinder.middleware.sizelimit. In your api-paste.ini, " +"replace cinder.middleware.sizelimit:RequestBodySizeLimiter.factory with " +"oslo_middleware.sizelimit:RequestBodySizeLimiter.factory" +msgstr "" +"Removing deprecated file cinder.middleware.sizelimit. In your api-paste.ini, " +"replace cinder.middleware.sizelimit:RequestBodySizeLimiter.factory with " +"oslo_middleware.sizelimit:RequestBodySizeLimiter.factory" + +msgid "" +"Removing the Dell EqualLogic driver's deprecated configuration options. " +"Please replace old options in your cinder.conf with the new one." +msgstr "" +"Removing the Dell EqualLogic driver's deprecated configuration options. " +"Please replace old options in your cinder.conf with the new one." + +msgid "" +"Rename Huawei18000ISCSIDriver and Huawei18000FCDriver to HuaweiISCSIDriver " +"and HuaweiFCDriver." +msgstr "" +"Rename Huawei18000ISCSIDriver and Huawei18000FCDriver to HuaweiISCSIDriver " +"and HuaweiFCDriver." + +msgid "Replaced with - ``chap_password``" +msgstr "Replaced with - ``chap_password``" + +msgid "Replaced with - ``chap_username``" +msgstr "Replaced with - ``chap_username``" + +msgid "Replaced with - ``ssh_conn_timeout``" +msgstr "Replaced with - ``ssh_conn_timeout``" + +msgid "Replaced with - ``use_chap_auth``" +msgstr "Replaced with - ``use_chap_auth``" + +msgid "Report pools in volume stats for Block Device Driver." +msgstr "Report pools in volume stats for Block Device Driver." + +msgid "" +"Resolve issue with cross AZ migrations and retypes where the destination " +"volume kept the source volume's AZ, so we ended up with a volume where the " +"AZ does not match the backend. (bug 1747949)" +msgstr "" +"Resolve issue with cross AZ migrations and retypes where the destination " +"volume kept the source volume's AZ, so we ended up with a volume where the " +"AZ does not match the backend. (bug 1747949)" + +msgid "Retype support added to CloudByte iSCSI driver." +msgstr "Retype support added to CloudByte iSCSI driver." + +msgid "Rocky Series Release Notes" +msgstr "Rocky Series Release Notes" + +msgid "" +"ScaleIO volumes need to be sized in increments of 8G. Handling added to " +"volume extend operations to ensure the new size is rounded up to the nearest " +"size when needed." +msgstr "" +"ScaleIO volumes need to be sized in increments of 8G. Handling added to " +"volume extend operations to ensure the new size is rounded up to the nearest " +"size when needed." + +msgid "Security Issues" +msgstr "Security Issues" + +msgid "Separate create and update rules for volume metadata." +msgstr "Separate create and update rules for volume metadata." + +msgid "Show CG Snapshot checks both tables." +msgstr "Show CG Snapshot checks both tables." + +msgid "Show CG checks both tables." +msgstr "Show CG checks both tables." + +msgid "" +"Some of DISCO driver options were incorrectly read from ``[DEFAULT]`` " +"section in the cinder.conf. Now those are correctly read from " +"``[]`` section. This includes following options:" +msgstr "" +"Some of DISCO driver options were incorrectly read from ``[DEFAULT]`` " +"section in the cinder.conf. Now those are correctly read from " +"``[]`` section. This includes following options:" + +msgid "Some points to keep in mind:" +msgstr "Some points to keep in mind:" + +msgid "" +"Split nested quota support into a separate driver. In order to use nested " +"quotas, change the following config ``quota_driver = cinder.quota." +"NestedDbQuotaDriver`` after running the following admin API \"os-quota-sets/" +"validate_setup_for_nested_quota_use\" command to ensure the existing quota " +"values make sense to nest." +msgstr "" +"Split nested quota support into a separate driver. In order to use nested " +"quotas, change the following config ``quota_driver = cinder.quota." +"NestedDbQuotaDriver`` after running the following admin API \"os-quota-sets/" +"validate_setup_for_nested_quota_use\" command to ensure the existing quota " +"values make sense to nest." + +msgid "Start using reno to manage release notes." +msgstr "Start using Reno to manage release notes." + +msgid "" +"Starting from Mitaka release Cinder is having a tech preview of rolling " +"upgrades support." +msgstr "" +"Starting from Mitaka release Cinder is having a tech preview of rolling " +"upgrades support." + +msgid "" +"Starting with API microversion 3.47, Cinder now supports the ability to " +"create a volume directly from a backup. For instance, you can use the " +"command: ``cinder create --backup-id `` in cinderclient." +msgstr "" +"Starting with API microversion 3.47, Cinder now supports the ability to " +"create a volume directly from a backup. For instance, you can use the " +"command: ``cinder create --backup-id `` in cinderclient." + +msgid "" +"Storage assisted volume migration from one Pool/SLO/Workload combination to " +"another, on the same array, via retype, for the VMAX driver. Both All Flash " +"and Hybrid VMAX3 arrays are supported. VMAX2 is not supported." +msgstr "" +"Storage assisted volume migration from one Pool/SLO/Workload combination to " +"another, on the same array, via retype, for the VMAX driver. Both All Flash " +"and Hybrid VMAX3 arrays are supported. VMAX2 is not supported." + +msgid "" +"Storwize SVC Driver: Fixes `bug 1749687 `__ previously lsvdisk() was called separately for every 'in-" +"use' volume in order to check if the volume exists on the storage. In order " +"to avoid problem of too long driver initialization now lsvdisk() is called " +"once per pool." +msgstr "" +"Storwize SVC Driver: Fixes `bug 1749687 `__ previously lsvdisk() was called separately for every 'in-" +"use' volume in order to check if the volume exists on the storage. In order " +"to avoid problem of too long driver initialisation now lsvdisk() is called " +"once per pool." + +msgid "Support Force backup of in-use cinder volumes for Nimble Storage." +msgstr "Support Force backup of in-use cinder volumes for Nimble Storage." + +msgid "" +"Support backup restore cancelation by changing the backup status to anything " +"other than `restoring` using `cinder backup-reset-state`." +msgstr "" +"Support backup restore cancellation by changing the backup status to " +"anything other than `restoring` using `cinder backup-reset-state`." + +msgid "Support balanced FC port selection for Huawei drivers." +msgstr "Support balanced FC port selection for Huawei drivers." + +msgid "" +"Support cinder_img_volume_type property in glance image metadata to specify " +"volume type." +msgstr "" +"Support cinder_img_volume_type property in glance image metadata to specify " +"volume type." + +msgid "Support for Consistency Groups in the NetApp E-Series Volume Driver." +msgstr "Support for Consistency Groups in the NetApp E-Series Volume Driver." + +msgid "Support for Dot Hill AssuredSAN arrays has been removed." +msgstr "Support for Dot Hill AssuredSAN arrays has been removed." + +msgid "" +"Support for NetApp E-Series has been removed. The NetApp Unified driver can " +"now only be used with NetApp Clustered Data ONTAP." +msgstr "" +"Support for NetApp E-Series has been removed. The NetApp Unified driver can " +"now only be used with NetApp Clustered Data ONTAP." + +msgid "" +"Support for NetApp ONTAP 7 (previously known as \"Data ONTAP operating in " +"7mode\") has been removed. The NetApp Unified driver can now only be used " +"with NetApp Clustered Data ONTAP and NetApp E-Series storage systems. This " +"removal affects all three storage protocols that were supported on for ONTAP " +"7 - iSCSI, NFS and FC. Deployers are advised to consult the `migration " +"support `_ provided " +"to transition from ONTAP 7 to Clustered Data ONTAP operating system." +msgstr "" +"Support for NetApp ONTAP 7 (previously known as \"Data ONTAP operating in " +"7mode\") has been removed. The NetApp Unified driver can now only be used " +"with NetApp Clustered Data ONTAP and NetApp E-Series storage systems. This " +"removal affects all three storage protocols that were supported on for ONTAP " +"7 - iSCSI, NFS and FC. Deployers are advised to consult the `migration " +"support `_ provided " +"to transition from ONTAP 7 to Clustered Data ONTAP operating system." + +msgid "" +"Support for ScaleIO 1.32 is now deprecated and will be removed in a future " +"release." +msgstr "" +"Support for ScaleIO 1.32 is now deprecated and will be removed in a future " +"release." + +msgid "Support for VMAX SRDF/Metro on VMAX cinder driver." +msgstr "Support for VMAX SRDF/Metro on VMAX cinder driver." + +msgid "Support for compression on VMAX All Flash in the VMAX driver." +msgstr "Support for compression on VMAX All Flash in the VMAX driver." + +msgid "" +"Support for configuring Fibre Channel zoning on Brocade switches through " +"Cinder Fibre Channel Zone Manager and Brocade Fibre Channel zone plugin. To " +"zone in a Virtual Fabric, set the configuration option " +"'fc_virtual_fabric_id' for the fabric." +msgstr "" +"Support for configuring Fibre Channel zoning on Brocade switches through " +"Cinder Fibre Channel Zone Manager and Brocade Fibre Channel zone plugin. To " +"zone in a Virtual Fabric, set the configuration option " +"'fc_virtual_fabric_id' for the fabric." + +msgid "" +"Support for creating a consistency group from consistency group in XtremIO." +msgstr "" +"Support for creating a consistency group from consistency group in XtremIO." + +msgid "Support for force backup of in-use Cinder volumes in Nimble driver." +msgstr "Support for force backup of in-use Cinder volumes in Nimble driver." + +msgid "Support for iSCSI in INFINIDAT InfiniBox driver." +msgstr "Support for iSCSI in INFINIDAT InfiniBox driver." + +msgid "Support for iSCSI multipath in Huawei driver." +msgstr "Support for iSCSI multipath in Huawei driver." + +msgid "Support for iSCSI multipathing in EMC VMAX driver." +msgstr "Support for iSCSI multipathing in EMC VMAX driver." + +msgid "Support for manage/ unmanage snapshots on VMAX cinder driver." +msgstr "Support for manage/unmanage snapshots on VMAX cinder driver." + +msgid "" +"Support for retype (storage-assisted migration) of replicated volumes on " +"VMAX cinder driver." +msgstr "" +"Support for retype (storage-assisted migration) of replicated volumes on " +"VMAX Cinder driver." + +msgid "Support for retype and volume migration for HPE Nimble Storage driver." +msgstr "Support for retype and volume migration for HPE Nimble Storage driver." + +msgid "" +"Support for retype volumes with different encryptions including changes from " +"unencrypted types to encrypted types and vice-versa." +msgstr "" +"Support for retype volumes with different encryptions including changes from " +"unencrypted types to encrypted types and vice-versa." + +msgid "" +"Support for reverting a volume to a previous snapshot in VMAX cinder driver." +msgstr "" +"Support for reverting a volume to a previous snapshot in VMAX cinder driver." + +msgid "Support for snapshot backup using the optimal path in Huawei driver." +msgstr "Support for snapshot backup using the optimal path in Huawei driver." + +msgid "" +"Support for snapshots named in the backend as ``snapshot-`` is " +"deprecated. Snapshots are now named in the backend as ``." +"``." +msgstr "" +"Support for snapshots named in the backend as ``snapshot-`` is " +"deprecated. Snapshots are now named in the backend as ``." +"``." + +msgid "" +"Support for use of 'fc_southbound_protocol' configuration setting in the " +"Brocade FC SAN lookup service." +msgstr "" +"Support for use of 'fc_southbound_protocol' configuration setting in the " +"Brocade FC SAN lookup service." + +msgid "Support for volume multi-attach in the INFINIDAT InfiniBox driver." +msgstr "Support for volume multi-attach in the INFINIDAT InfiniBox driver." + +msgid "Support iSCSI configuration in replication in Huawei driver." +msgstr "Support iSCSI configuration in replication in Huawei driver." + +msgid "" +"Support manage/unmanage volume and manage/unmanage snapshot functions for " +"the NEC volume driver." +msgstr "" +"Support managed/unmanaged volume and managed/unmanaged snapshot functions " +"for the NEC volume driver." + +msgid "Support to sort snapshots with \"name\"." +msgstr "Support to sort snapshots with \"name\"." + +msgid "" +"Support transfer volume with snapshots by default in new V3 API 'v3/" +"volume_transfers'. After microverison 3.55, if users don't want to transfer " +"snapshots, they could use the new optional argument `no_snapshots=True` in " +"request body of new transfer creation API." +msgstr "" +"Support transfer volume with snapshots by default in new V3 API 'v3/" +"volume_transfers'. After microverison 3.55, if users don't want to transfer " +"snapshots, they could use the new optional argument `no_snapshots=True` in " +"request body of new transfer creation API." + +msgid "Supported ``project_id`` admin filters to limits API." +msgstr "Supported ``project_id`` admin filters to limits API." + +msgid "Tegile" +msgstr "Tegile" + +msgid "The \"backing-up\" status is added to snapshot's status matrix." +msgstr "The \"backing-up\" status is added to snapshot's status matrix." + +msgid "" +"The \"cinder-manage logs\" commands have been removed. Information " +"previously gathered by these commands may be found in cinder service and " +"syslog logs." +msgstr "" +"The \"cinder-manage logs\" commands have been removed. Information " +"previously gathered by these commands may be found in cinder service and " +"syslog logs." + +msgid "" +"The 'backup_service_inithost_offload' configuration option now defaults to " +"'True' instead of 'False'." +msgstr "" +"The 'backup_service_inithost_offload' configuration option now defaults to " +"'True' instead of 'False'." + +msgid "" +"The 'smbfs_allocation_info_file_path' SMBFS driver config option is now " +"deprecated as we're no longer using a JSON file to store volume allocation " +"data. This file had a considerable chance of getting corrupted." +msgstr "" +"The 'smbfs_allocation_info_file_path' SMBFS driver config option is now " +"deprecated as we're no longer using a JSON file to store volume allocation " +"data. This file had a considerable chance of getting corrupted." + +msgid "" +"The 7-Mode Data ONTAP configuration of the NetApp Unified driver is " +"deprecated as of the Ocata release and will be removed in the Queens " +"release. Other configurations of the NetApp Unified driver, including " +"Clustered Data ONTAP and E-series, are unaffected." +msgstr "" +"The 7-Mode Data ONTAP configuration of the NetApp Unified driver is " +"deprecated as of the Ocata release and will be removed in the Queens " +"release. Other configurations of the NetApp Unified driver, including " +"Clustered Data ONTAP and E-series, are unaffected." + +msgid "" +"The Blockbridge driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it." +msgstr "" +"The Blockbridge driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it." + +msgid "" +"The Blockbridge driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it. If its support status does not change it will be " +"removed in the next release." +msgstr "" +"The Blockbridge driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it. If its support status does not change it will be " +"removed in the next release." + +msgid "" +"The Castellan library used for encryption has deprecated the ``api_class`` " +"config option. Configuration files using this should now be updated to use " +"the ``backend`` option instead." +msgstr "" +"The Castellan library used for encryption has deprecated the ``api_class`` " +"config option. Configuration files using this should now be updated to use " +"the ``backend`` option instead." + +msgid "" +"The Cinder API v1 was deprecated in the Juno release and defaulted to be " +"disabled in the Ocata release. It is now removed completely. If upgrading " +"from a previous version, it is recommended you edit your `/etc/cinder/api-" +"paste.ini` file to remove all references to v1." +msgstr "" +"The Cinder API v1 was deprecated in the Juno release and defaulted to be " +"disabled in the Ocata release. It is now removed completely. If upgrading " +"from a previous version, it is recommended you edit your `/etc/cinder/api-" +"paste.ini` file to remove all references to v1." + +msgid "" +"The Cinder Linux SMBFS driver is now deprecated and will be removed during " +"the following release. Deployers are encouraged to use the Windows SMBFS " +"driver instead." +msgstr "" +"The Cinder Linux SMBFS driver is now deprecated and will be removed during " +"the following release. Deployers are encouraged to use the Windows SMBFS " +"driver instead." + +msgid "" +"The Cinder Volume Backup service can now be run on Windows. It supports " +"backing up volumes exposed by SMBFS/iSCSI Windows Cinder Volume backends, as " +"well as any other Cinder backend that's accessible on Windows (e.g. SANs " +"exposing volumes via iSCSI/FC)." +msgstr "" +"The Cinder Volume Backup service can now be run on Windows. It supports " +"backing up volumes exposed by SMBFS/iSCSI Windows Cinder Volume backends, as " +"well as any other Cinder backend that's accessible on Windows (e.g. SANs " +"exposing volumes via iSCSI/FC)." + +msgid "" +"The Cinder database can now only be ugpraded from changes since the Newton " +"release. In order to upgrade from a version prior to that, you must now " +"upgrade to at least Newton first, then to Queens or later." +msgstr "" +"The Cinder database can now only be upgraded from changes since the Newton " +"release. In order to upgrade from a version prior to that, you must now " +"upgrade to at least Newton first, then to Queens or later." + +msgid "" +"The Cinder database can now only be upgraded from changes since the Kilo " +"release. In order to upgrade from a version prior to that, you must now " +"upgrade to at least Kilo first, then to Newton or later." +msgstr "" +"The Cinder database can now only be upgraded from changes since the Kilo " +"release. In order to upgrade from a version prior to that, you must now " +"upgrade to at least Kilo first, then to Newton or later." + +msgid "" +"The Cinder database can now only be upgraded from changes since the Liberty " +"release. In order to upgrade from a version prior to that, you must now " +"upgrade to at least Liberty first, then to Ocata or later." +msgstr "" +"The Cinder database can now only be upgraded from changes since the Liberty " +"release. In order to upgrade from a version prior to that, you must now " +"upgrade to at least Liberty first, then to Ocata or later." + +msgid "" +"The Cinder database can now only be upgraded from changes since the Mitaka " +"release. In order to upgrade from a version prior to that, you must now " +"upgrade to at least Mitaka first, then to Pike or later." +msgstr "" +"The Cinder database can now only be upgraded from changes since the Mitaka " +"release. In order to upgrade from a version prior to that, you must now " +"upgrade to at least Mitaka first, then to Pike or later." + +msgid "" +"The Cinder v2 API has now been marked as deprecated. All new client code " +"should use the v3 API. API v3 adds support for microversioned API calls. If " +"no microversion is requested, the base 3.0 version for the v3 API is " +"identical to v2." +msgstr "" +"The Cinder v2 API has now been marked as deprecated. All new client code " +"should use the v3 API. API v3 adds support for microversioned API calls. If " +"no microversion is requested, the base 3.0 version for the v3 API is " +"identical to v2." + +msgid "" +"The Cisco Fibre Channel Zone Manager driver has been marked as unsupported " +"and is now deprecated. ``enable_unsupported_driver`` will need to be set to " +"``True`` in the driver's section in cinder.conf to continue to use it." +msgstr "" +"The Cisco Fibre Channel Zone Manager driver has been marked as unsupported " +"and is now deprecated. ``enable_unsupported_driver`` will need to be set to " +"``True`` in the driver's section in cinder.conf to continue to use it." + +msgid "" +"The Cisco Firbre Channel Zone Manager driver has been marked as unsupported " +"and is now deprecated. ``enable_unsupported_driver`` will need to be set to " +"``True`` in the driver's section in cinder.conf to continue to use it. If " +"its support status does not change, they will be removed in the Queens " +"development cycle." +msgstr "" +"The Cisco Fibre Channel Zone Manager driver has been marked as unsupported " +"and is now deprecated. ``enable_unsupported_driver`` will need to be set to " +"``True`` in the driver's section in cinder.conf to continue to use it. If " +"its support status does not change, they will be removed in the Queens " +"development cycle." + +msgid "" +"The CloudByte driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it." +msgstr "" +"The CloudByte driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it." + +msgid "" +"The CloudByte driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it. If its support status does not change it will be " +"removed in the next release." +msgstr "" +"The CloudByte driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it. If its support status does not change it will be " +"removed in the next release." + +msgid "" +"The Coho driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it." +msgstr "" +"The Coho driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it." + +msgid "" +"The Coho driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it. If its support status " +"does not change, they will be removed in the Queens development cycle." +msgstr "" +"The Coho driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it. If its support status " +"does not change, they will be removed in the Queens development cycle." + +msgid "" +"The Consistency Group APIs have now been marked as deprecated and will be " +"removed in a future release. Generic Volume Group APIs should be used " +"instead." +msgstr "" +"The Consistency Group APIs have now been marked as deprecated and will be " +"removed in a future release. Generic Volume Group APIs should be used " +"instead." + +msgid "" +"The DataCore drivers have been marked as unsupported and are now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it." +msgstr "" +"The DataCore drivers have been marked as unsupported and are now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it." + +msgid "" +"The DataCore drivers have been marked as unsupported and are now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it. If its support status " +"does not change, they will be removed in the Stein development cycle." +msgstr "" +"The DataCore drivers have been marked as unsupported and are now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it. If its support status " +"does not change, they will be removed in the Stein development cycle." + +msgid "" +"The Dell EMC CoprHD drivers have been marked as unsupported and are now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use it." +msgstr "" +"The Dell EMC CoprHD drivers have been marked as unsupported and are now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use it." + +msgid "" +"The Dell EMC CoprHD drivers have been marked as unsupported and are now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use it. If its support " +"status does not change, they will be removed in the Stein development cycle." +msgstr "" +"The Dell EMC CoprHD drivers have been marked as unsupported and are now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use it. If its support " +"status does not change, they will be removed in the Stein development cycle." + +msgid "" +"The Dell EMC PS Series volume driver which supports Dell PS Series " +"(EqualLogic) Storage is moving to maintenance mode in S Release and will be " +"removed in T Release." +msgstr "" +"The Dell EMC PS Series volume driver which supports Dell PS Series " +"(EqualLogic) Storage is moving to maintenance mode in S Release and will be " +"removed in T Release." + +msgid "" +"The Dell EMC SC configuration option ``excluded_domain_ip`` has been " +"deprecated and will be removed in a future release. Deployments should now " +"migrate to the option ``excluded_domain_ips`` for equivalent functionality." +msgstr "" +"The Dell EMC SC configuration option ``excluded_domain_ip`` has been " +"deprecated and will be removed in a future release. Deployments should now " +"migrate to the option ``excluded_domain_ips`` for equivalent functionality." + +msgid "" +"The Disco driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it." +msgstr "" +"The Disco driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it." + +msgid "" +"The Disco driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it. If its support status " +"does not change, it will be removed in the Stein development cycle." +msgstr "" +"The Disco driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it. If its support status " +"does not change, it will be removed in the Stein development cycle." + +msgid "" +"The DotHill drivers has been marked as unsupported and are now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it. If its support status does not change it will be " +"removed in the next release." +msgstr "" +"The DotHill drivers has been marked as unsupported and are now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it. If its support status does not change it will be " +"removed in the next release." + +msgid "" +"The DotHill drivers have been marked as unsupported and are now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it." +msgstr "" +"The DotHill drivers have been marked as unsupported and are now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it." + +msgid "" +"The EqualLogic driver is moved to the dell_emc directory and has been " +"rebranded to its current Dell EMC PS Series name. The volume_driver entry in " +"cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc.ps." +"PSSeriesISCSIDriver``." +msgstr "" +"The EqualLogic driver is moved to the dell_emc directory and has been " +"rebranded to its current Dell EMC PS Series name. The volume_driver entry in " +"cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc.ps." +"PSSeriesISCSIDriver``." + +msgid "" +"The Falconstor drivers have been marked as unsupported and are now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use it." +msgstr "" +"The Falconstor drivers have been marked as unsupported and are now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use it." + +msgid "" +"The Falconstor drivers have been marked as unsupported and are now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use it. If its support " +"status does not change, they will be removed in the Queens development cycle." +msgstr "" +"The Falconstor drivers have been marked as unsupported and are now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use it. If its support " +"status does not change, they will be removed in the Queens development cycle." + +msgid "" +"The Glance v1 API has been deprecated and will soon be removed. Cinder " +"support for using the v1 API was deprecated in the Pike release and is now " +"no longer available. The ``glance_api_version`` configuration option to " +"support version selection has now been removed." +msgstr "" +"The Glance v1 API has been deprecated and will soon be removed. Cinder " +"support for using the v1 API was deprecated in the Pike release and is now " +"no longer available. The ``glance_api_version`` configuration option to " +"support version selection has now been removed." + +msgid "" +"The GlusterFS volume driver, which was deprecated in the Newton release, has " +"been removed." +msgstr "" +"The GlusterFS volume driver, which was deprecated in the Newton release, has " +"been removed." + +msgid "" +"The HBSD (Hitachi Block Storage Driver) volume drivers which supports " +"Hitachi Storages HUS100 and VSP family are deprecated. Support for HUS110 " +"family will be no longer provided. Support on VSP will be provided as " +"hitachi.vsp_* drivers." +msgstr "" +"The HBSD (Hitachi Block Storage Driver) volume drivers which supports " +"Hitachi Storages HUS100 and VSP family are deprecated. Support for HUS110 " +"family will be no longer provided. Support on VSP will be provided as " +"hitachi.vsp_* drivers." + +msgid "" +"The HGST Flash Storage Suite Driver was marked unsupported in the Rocky " +"release because their 3rd Party CI system was not meeting Cinder's " +"requirements. The system has not started reporting so the driver is now " +"removed as of the Stein release." +msgstr "" +"The HGST Flash Storage Suite Driver was marked unsupported in the Rocky " +"release because their 3rd Party CI system was not meeting Cinder's " +"requirements. The system has not started reporting so the driver is now " +"removed as of the Stein release." + +msgid "" +"The HGST Flash Suite storage driver has been removed after completion of its " +"deprecation period without a reliable 3rd Party CI system being supported. " +"Customers using the HGST Flash Suite driver should not upgrade Cinder " +"without first migrating all volumes from their HGST backend to a supported " +"storage backend. Failure to migrate volumes will result in no longer being " +"able to access volumes backed by the HGST storage backend." +msgstr "" +"The HGST Flash Suite storage driver has been removed after completion of its " +"deprecation period without a reliable 3rd Party CI system being supported. " +"Customers using the HGST Flash Suite driver should not upgrade Cinder " +"without first migrating all volumes from their HGST backend to a supported " +"storage backend. Failure to migrate volumes will result in no longer being " +"able to access volumes backed by the HGST storage backend." + +msgid "" +"The HGST driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it." +msgstr "" +"The HGST driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it." + +msgid "" +"The HGST driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it. If its support status " +"does not change, it will be removed in the Stein development cycle." +msgstr "" +"The HGST driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it. If its support status " +"does not change, it will be removed in the Stein development cycle." + +msgid "" +"The HPE XP driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it." +msgstr "" +"The HPE XP driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it." + +msgid "" +"The HPE XP driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it. If its support status does not change it will be " +"removed in the next release." +msgstr "" +"The HPE XP driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it. If its support status does not change it will be " +"removed in the next release." + +msgid "" +"The Hitachi Block Storage Driver (HBSD) and VSP driver have been marked as " +"unsupported and are now deprecated. enable_unsupported_driver will need to " +"be set to True in cinder.conf to continue to use them." +msgstr "" +"The Hitachi Block Storage Driver (HBSD) and VSP driver have been marked as " +"unsupported and are now deprecated. enable_unsupported_driver will need to " +"be set to True in cinder.conf to continue to use them." + +msgid "" +"The Hitachi HNAS, HBSD, and VSP volume drivers were marked as deprecated in " +"the Pike release and have now been removed. Hitachi storage drivers are now " +"only available directly from Hitachi." +msgstr "" +"The Hitachi HNAS, HBSD, and VSP volume drivers were marked as deprecated in " +"the Pike release and have now been removed. Hitachi storage drivers are now " +"only available directly from Hitachi." + +msgid "" +"The Hitachi NAS NFS driver has been marked as unsupported and is now " +"deprecated. enable_unsupported_driver will need to be set to True in cinder." +"conf to continue to use it." +msgstr "" +"The Hitachi NAS NFS driver has been marked as unsupported and is now " +"deprecated. enable_unsupported_driver will need to be set to True in cinder." +"conf to continue to use it." + +msgid "" +"The Hitachi NAS Platform iSCSI driver was marked as not supported in the " +"Ocata realease and has now been removed." +msgstr "" +"The Hitachi NAS Platform iSCSI driver was marked as not supported in the " +"Ocata release and has now been removed." + +msgid "" +"The Hitachi NAS iSCSI driver has been marked as unsupported and is now " +"deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " +"in cinder.conf to continue to use it." +msgstr "" +"The Hitachi NAS iSCSI driver has been marked as unsupported and is now " +"deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " +"in cinder.conf to continue to use it." + +msgid "" +"The Hitachi NAS iSCSI driver has been marked as unsupported and is now " +"deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " +"in cinder.conf to continue to use it. The driver will be removed in the next " +"release." +msgstr "" +"The Hitachi NAS iSCSI driver has been marked as unsupported and is now " +"deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " +"in cinder.conf to continue to use it. The driver will be removed in the next " +"release." + +msgid "" +"The IBM FlashSystem configuration options ``flashsystem_multipath_enabled`` " +"was deprecated in the Mitaka release. It had no effect, so it can be safely " +"removed and does not have a new equivalent config option." +msgstr "" +"The IBM FlashSystem configuration options ``flashsystem_multipath_enabled`` " +"was deprecated in the Mitaka release. It had no effect, so it can be safely " +"removed and does not have a new equivalent config option." + +msgid "" +"The IBM_Storage driver has been open sourced. This means that there is no " +"more need to download the package from the IBM site. The only requirement " +"remaining is to install pyxcli, which is available through pypi::" +msgstr "" +"The IBM_Storage driver has been open sourced. This means that there is no " +"more need to download the package from the IBM site. The only requirement " +"remaining is to install pyxcli, which is available through pypi::" + +msgid "" +"The ISERTgtAdm target was deprecated in the Kilo release. It has now been " +"removed. You should now just use LVMVolumeDriver and specify iscsi_helper " +"for the target driver you wish to use. In order to enable iser, please set " +"iscsi_protocol=iser with lioadm or tgtadm target helpers." +msgstr "" +"The ISERTgtAdm target was deprecated in the Kilo release. It has now been " +"removed. You should now just use LVMVolumeDriver and specify iscsi_helper " +"for the target driver you wish to use. In order to enable iser, please set " +"iscsi_protocol=iser with lioadm or tgtadm target helpers." + +msgid "" +"The ITRI DISCO storage driver has been removed after completion of its " +"deprecation period without a reliable 3rd Party CI system being supported. " +"Customers using the ITRI DISCO driver should not upgrade Cinder without " +"first migrating all volumes from their DISCO backend to a supported storage " +"backend. Failure to migrate volumes will result in no longer being able to " +"access volumes back by the ITRI DISCO storage backend." +msgstr "" +"The ITRI DISCO storage driver has been removed after completion of its " +"deprecation period without a reliable 3rd Party CI system being supported. " +"Customers using the ITRI DISCO driver should not upgrade Cinder without " +"first migrating all volumes from their DISCO backend to a supported storage " +"backend. Failure to migrate volumes will result in no longer being able to " +"access volumes back by the ITRI DISCO storage backend." + +msgid "" +"The ITRI DISCO storage driver was marked unsupported in Rocky due to 3rd " +"Party CI not meeting Cinder's requirements. As a result the driver is " +"removed starting from the Stein release." +msgstr "" +"The ITRI DISCO storage driver was marked unsupported in Rocky due to 3rd " +"Party CI not meeting Cinder's requirements. As a result the driver is " +"removed starting from the Stein release." + +msgid "" +"The Infortrend drivers have been marked as unsupported and are now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use them." +msgstr "" +"The Infortrend drivers have been marked as unsupported and are now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use them." + +msgid "" +"The Infortrend drivers have been marked as unsupported and are now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use them. If their " +"support status does not change, they will be removed in the Queens " +"development cycle." +msgstr "" +"The Infortrend drivers have been marked as unsupported and are now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use them. If their " +"support status does not change, they will be removed in the Queens " +"development cycle." + +msgid "" +"The LINBIT DRBDManage volume driver is moving to maintenance mode in Stein " +"Release and will be removed in T Release." +msgstr "" +"The LINBIT DRBDManage volume driver is moving to maintenance mode in Stein " +"Release and will be removed in T Release." + +msgid "" +"The LVM driver specific `lvm_max_over_subscription_ratio` setting had been " +"deprecated and is now removed. Over subscription should now be managed using " +"the generic `max_over_subscription_ratio` setting." +msgstr "" +"The LVM driver specific `lvm_max_over_subscription_ratio` setting had been " +"deprecated and is now removed. Over subscription should now be managed using " +"the generic `max_over_subscription_ratio` setting." + +msgid "" +"The NetApp E-Series drivers are deprecated as of the Rocky release and will " +"be removed in the Stein release. Other configurations of the NetApp driver, " +"i.e Clustered Data ONTAP and Solidfire, are unaffected." +msgstr "" +"The NetApp E-Series drivers are deprecated as of the Rocky release and will " +"be removed in the Stein release. Other configurations of the NetApp driver, " +"i.e Clustered Data ONTAP and Solidfire, are unaffected." + +msgid "" +"The NetApp E-series driver has been fixed to correctly report the " +"\"provisioned_capacity_gb\". Now it sums the capacity of all the volumes in " +"the configured backend to get the correct value. This bug fix affects all " +"the protocols supported by the driver (FC and iSCSI)." +msgstr "" +"The NetApp E-series driver has been fixed to correctly report the " +"\"provisioned_capacity_gb\". Now it sums the capacity of all the volumes in " +"the configured backend to get the correct value. This bug fix affects all " +"the protocols supported by the driver (FC and iSCSI)." + +msgid "" +"The NetApp ONTAP driver supports a new configuration option " +"``netapp_api_trace_pattern`` to enable filtering backend API interactions to " +"log. This option must be specified in the backend section when desired and " +"it accepts a valid python regular expression." +msgstr "" +"The NetApp ONTAP driver supports a new configuration option " +"``netapp_api_trace_pattern`` to enable filtering backend API interactions to " +"log. This option must be specified in the backend section when desired and " +"it accepts a valid python regular expression." + +msgid "" +"The NetApp cDOT driver now sets the ``replication_status`` attribute " +"appropriately on volumes created within replicated backends when using host " +"level replication." +msgstr "" +"The NetApp cDOT driver now sets the ``replication_status`` attribute " +"appropriately on volumes created within replicated backends when using host " +"level replication." + +msgid "" +"The NetApp cDOT driver operating with NFS protocol has been fixed to manage " +"volumes correctly when ``nas_secure_file_operations`` option has been set to " +"False." +msgstr "" +"The NetApp cDOT driver operating with NFS protocol has been fixed to manage " +"volumes correctly when ``nas_secure_file_operations`` option has been set to " +"False." + +msgid "" +"The NetApp cDOT drivers report to the scheduler, for each FlexVol pool, the " +"fraction of the shared block limit that has been consumed by dedupe and " +"cloning operations. This value, netapp_dedupe_used_percent, may be used in " +"the filter & goodness functions for better placement of new Cinder volumes." +msgstr "" +"The NetApp cDOT drivers report to the scheduler, for each FlexVol pool, the " +"fraction of the shared block limit that has been consumed by dedupe and " +"cloning operations. This value, netapp_dedupe_used_percent, may be used in " +"the filter & goodness functions for better placement of new Cinder volumes." + +msgid "" +"The Nexenta Edge driver has been marked as unsupported and is now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use it." +msgstr "" +"The Nexenta Edge driver has been marked as unsupported and is now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use it." + +msgid "" +"The Nexenta Edge driver has been marked as unsupported and is now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use it. If its support " +"status does not change, it will be removed in the 'T' development cycle." +msgstr "" +"The Nexenta Edge driver has been marked as unsupported and is now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use it. If its support " +"status does not change, it will be removed in the 'T' development cycle." + +msgid "" +"The Nexenta Edge drivers has been marked as unsupported and are now " +"deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " +"in cinder.conf to continue to use it. If its support status does not change " +"it will be removed in the next release." +msgstr "" +"The Nexenta Edge drivers has been marked as unsupported and are now " +"deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " +"in cinder.conf to continue to use it. If its support status does not change " +"it will be removed in the next release." + +msgid "" +"The Nexenta Edge drivers have been marked as unsupported and are now " +"deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " +"in cinder.conf to continue to use it." +msgstr "" +"The Nexenta Edge drivers have been marked as unsupported and are now " +"deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " +"in cinder.conf to continue to use it." + +msgid "" +"The Nimble backend driver has been updated to use REST for array " +"communication." +msgstr "" +"The Nimble backend driver has been updated to use REST for array " +"communication." + +msgid "" +"The ONTAP drivers (\"7mode\" and \"cmode\") have been fixed to not report " +"consumed space as \"provisioned_capacity_gb\". They instead rely on the " +"cinder scheduler's calculation of \"provisioned_capacity_gb\". This fixes " +"the oversubscription miscalculations with the ONTAP drivers. This bugfix " +"affects all three protocols supported by these drivers (iSCSI/FC/NFS)." +msgstr "" +"The ONTAP drivers (\"7mode\" and \"cmode\") have been fixed to not report " +"consumed space as \"provisioned_capacity_gb\". They instead rely on the " +"cinder scheduler's calculation of \"provisioned_capacity_gb\". This fixes " +"the over-subscription miscalculations with the ONTAP drivers. This bugfix " +"affects all three protocols supported by these drivers (iSCSI/FC/NFS)." + +msgid "" +"The QNAP driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it." +msgstr "" +"The QNAP driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it." + +msgid "" +"The QNAP driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it. If its support status does not change it will be " +"removed in the next release." +msgstr "" +"The QNAP driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use it. If its support status does not change it will be " +"removed in the next release." + +msgid "" +"The Quobyte Cinder driver now supports identifying Quobyte mounts via the " +"mounts fstype field." +msgstr "" +"The Quobyte Cinder driver now supports identifying Quobyte mounts via the " +"mounts fstype field." + +msgid "" +"The RBD driver no longer uses the \"volume_tmp_dir\" option to set where " +"temporary files for image conversion are stored. Set \"image_conversion_dir" +"\" to configure this in Ocata." +msgstr "" +"The RBD driver no longer uses the \"volume_tmp_dir\" option to set where " +"temporary files for image conversion are stored. Set \"image_conversion_dir" +"\" to configure this in Ocata." + +msgid "" +"The Reduxio driver has been marked unsupported and is now deprecated. " +"``use_unsupported_driver`` will need to be set to ``True`` in the driver's " +"section in cinder.conf to use it." +msgstr "" +"The Reduxio driver has been marked unsupported and is now deprecated. " +"``use_unsupported_driver`` will need to be set to ``True`` in the driver's " +"section in cinder.conf to use it." + +msgid "" +"The Reduxio driver has been marked unsupported and is now deprecated. " +"``use_unsupported_driver`` will need to be set to ``True`` in the driver's " +"section in cinder.conf to use it. If its support status does not change, the " +"driver will be removed in the Queens development cycle." +msgstr "" +"The Reduxio driver has been marked unsupported and is now deprecated. " +"``use_unsupported_driver`` will need to be set to ``True`` in the driver's " +"section in cinder.conf to use it. If its support status does not change, the " +"driver will be removed in the Queens development cycle." + +msgid "" +"The SMBFS driver now exposes share information to the scheduler via pools. " +"The pool names are configurable, defaulting to the share names." +msgstr "" +"The SMBFS driver now exposes share information to the scheduler via pools. " +"The pool names are configurable, defaulting to the share names." + +msgid "" +"The SMBFS driver now supports the 'snapshot attach' feature. Special care " +"must be taken when attaching snapshots though, as writing to a snapshot will " +"corrupt the differencing image chain." +msgstr "" +"The SMBFS driver now supports the 'snapshot attach' feature. Special care " +"must be taken when attaching snapshots though, as writing to a snapshot will " +"corrupt the differencing image chain." + +msgid "" +"The SMBFS driver now supports the volume manage/unmanage feature. Images " +"residing on preconfigured shares may be listed and managed by Cinder." +msgstr "" +"The SMBFS driver now supports the volume manage/unmanage feature. Images " +"residing on preconfigured shares may be listed and managed by Cinder." + +msgid "" +"The SMBFS volume driver can now be configured to use fixed vhd/x images " +"through the 'nas_volume_prov_type' config option." +msgstr "" +"The SMBFS volume driver can now be configured to use fixed VHD/X images " +"through the 'nas_volume_prov_type' config option." + +msgid "" +"The SMBFS volume driver now supports reverting volumes to the latest " +"snapshot." +msgstr "" +"The SMBFS volume driver now supports reverting volumes to the latest " +"snapshot." + +msgid "" +"The ScaleIO Driver has deprecated several options specified in ``cinder." +"conf``: * ``sio_protection_domain_id`` * ``sio_protection_domain_name``, * " +"``sio_storage_pool_id`` * ``sio_storage_pool_name``. Users of the ScaleIO " +"Driver should now utilize the ``sio_storage_pools`` options to provide a " +"list of protection_domain:storage_pool pairs." +msgstr "" +"The ScaleIO Driver has deprecated several options specified in ``cinder." +"conf``: * ``sio_protection_domain_id`` * ``sio_protection_domain_name``, * " +"``sio_storage_pool_id`` * ``sio_storage_pool_name``. Users of the ScaleIO " +"Driver should now utilise the ``sio_storage_pools`` options to provide a " +"list of protection_domain:storage_pool pairs." + +msgid "" +"The ScaleIO Driver has deprecated the ability to specify the protection " +"domain, as ``sio:pd_name``, and storage pool, as ``sio:sp_name``, extra " +"specs in volume types. The supported way to specify a specific protection " +"domain and storage pool in a volume type is to define a ``pool_name`` extra " +"spec and set the value to the appropriate ``protection_domain_name:" +"storage_pool_name``." +msgstr "" +"The ScaleIO Driver has deprecated the ability to specify the protection " +"domain, as ``sio:pd_name``, and storage pool, as ``sio:sp_name``, extra " +"specs in volume types. The supported way to specify a specific protection " +"domain and storage pool in a volume type is to define a ``pool_name`` extra " +"spec and set the value to the appropriate ``protection_domain_name:" +"storage_pool_name``." + +msgid "" +"The ScaleIO driver is moved to the dell_emc directory. volume_driver entry " +"in cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc." +"scaleio.driver.ScaleIODriver``." +msgstr "" +"The ScaleIO driver is moved to the dell_emc directory. volume_driver entry " +"in cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc." +"scaleio.driver.ScaleIODriver``." + +msgid "" +"The Scality backend volume driver was marked as not supported in the " +"previous release and has now been removed." +msgstr "" +"The Scality backend volume driver was marked as not supported in the " +"previous release and has now been removed." + +msgid "" +"The Scality driver has been marked as unsupported and is now deprecated. " +"enable_unsupported_drivers will need to be set to True in cinder.conf to " +"continue to use it." +msgstr "" +"The Scality driver has been marked as unsupported and is now deprecated. " +"enable_unsupported_drivers will need to be set to True in cinder.conf to " +"continue to use it." + +msgid "" +"The Scality driver has been marked as unsupported and is now deprecated. " +"enable_unsupported_drivers will need to be set to True in cinder.conf to " +"continue to use it. If its support status does not change it will be removed " +"in the next release." +msgstr "" +"The Scality driver has been marked as unsupported and is now deprecated. " +"enable_unsupported_drivers will need to be set to True in cinder.conf to " +"continue to use it. If its support status does not change it will be removed " +"in the next release." + +msgid "" +"The SolidFire driver will recognize 4 new QoS spec keys to allow an " +"administrator to specify QoS settings which are scaled by the size of the " +"volume. 'ScaledIOPS' is a flag which will tell the driver to look for " +"'scaleMin', 'scaleMax' and 'scaleBurst' which provide the scaling factor " +"from the minimum values specified by the previous QoS keys ('minIOPS', " +"'maxIOPS', 'burstIOPS'). The administrator must take care to assure that no " +"matter what the final calculated QoS values follow minIOPS <= maxIOPS <= " +"burstIOPS. A exception will be thrown if not. The QoS settings are also " +"checked against the cluster min and max allowed and truncated at the min or " +"max if they exceed." +msgstr "" +"The SolidFire driver will recognize 4 new QoS spec keys to allow an " +"administrator to specify QoS settings which are scaled by the size of the " +"volume. 'ScaledIOPS' is a flag which will tell the driver to look for " +"'scaleMin', 'scaleMax' and 'scaleBurst' which provide the scaling factor " +"from the minimum values specified by the previous QoS keys ('minIOPS', " +"'maxIOPS', 'burstIOPS'). The administrator must take care to assure that no " +"matter what the final calculated QoS values follow minIOPS <= maxIOPS <= " +"burstIOPS. A exception will be thrown if not. The QoS settings are also " +"checked against the cluster min and max allowed and truncated at the min or " +"max if they exceed." + +msgid "" +"The Solidfire cinder driver has been fixed to ensure delete happens on the " +"correct volume." +msgstr "" +"The Solidfire Cinder driver has been fixed to ensure delete happens on the " +"correct volume." + +msgid "The StorPool backend driver was added." +msgstr "The StorPool backend driver was added." + +msgid "The Swift and Posix backup drivers are known to be working on Windows." +msgstr "The Swift and Posix backup drivers are known to be working on Windows." + +msgid "" +"The Synology driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in ``cinder.conf`` to continue to use it." +msgstr "" +"The Synology driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in ``cinder.conf`` to continue to use it." + +msgid "" +"The Synology driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in ``cinder.conf`` to continue to use it. If its support " +"status does not change, the driver will be removed in the Queens development " +"cycle." +msgstr "" +"The Synology driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in ``cinder.conf`` to continue to use it. If its support " +"status does not change, the driver will be removed in the Queens development " +"cycle." + +msgid "" +"The Tegile driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it." +msgstr "" +"The Tegile driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it." + +msgid "" +"The Tegile driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it. If its support status " +"does not change, they will be removed in the Queens development cycle." +msgstr "" +"The Tegile driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it. If its support status " +"does not change, they will be removed in the Queens development cycle." + +msgid "" +"The Tintri driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it." +msgstr "" +"The Tintri driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it." + +msgid "" +"The Tintri driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it. If its support status " +"does not change, it will be removed in the 'T' development cycle." +msgstr "" +"The Tintri driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it. If its support status " +"does not change, it will be removed in the 'T' development cycle." + +msgid "" +"The VMAX driver is moved to the dell_emc directory. volume_driver entry in " +"cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc.vmax." +"iscsi.VMAXISCSIDriver`` or ``cinder.volume.drivers.dell_emc.vmax.fc." +"VMAXFCDriver``." +msgstr "" +"The VMAX driver is moved to the dell_emc directory. volume_driver entry in " +"cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc.vmax." +"iscsi.VMAXISCSIDriver`` or ``cinder.volume.drivers.dell_emc.vmax.fc." +"VMAXFCDriver``." + +msgid "The VMware VMDK driver for ESX server has been removed." +msgstr "The VMware VMDK driver for ESX server has been removed." + +msgid "The VMware VMDK driver now enforces minimum vCenter version of 5.1." +msgstr "The VMware VMDK driver now enforces minimum vCenter version of 5.1." + +msgid "The VMware VMDK driver now enforces minimum vCenter version of 5.5." +msgstr "The VMware VMDK driver now enforces minimum vCenter version of 5.5." + +msgid "" +"The VMware VMDK driver supports a new config option 'vmware_host_port' to " +"specify the port number to connect to vCenter server." +msgstr "" +"The VMware VMDK driver supports a new config option 'vmware_host_port' to " +"specify the port number to connect to vCenter server." + +msgid "" +"The Veritas HyperScale driver has been marked as unsupported and is now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use it." +msgstr "" +"The Veritas HyperScale driver has been marked as unsupported and is now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use it." + +msgid "" +"The Veritas HyperScale driver has been marked as unsupported and is now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use it. If its support " +"status does not change, it will be removed in the 'T' development cycle." +msgstr "" +"The Veritas HyperScale driver has been marked as unsupported and is now " +"deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " +"the driver's section in cinder.conf to continue to use it. If its support " +"status does not change, it will be removed in the 'T' development cycle." + +msgid "" +"The Violin drivers have been marked as unsupported and are now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use them." +msgstr "" +"The Violin drivers have been marked as unsupported and are now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use them." + +msgid "" +"The Violin drivers have been marked as unsupported and are now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use them. If its support status does not change it will " +"be removed in the next release." +msgstr "" +"The Violin drivers have been marked as unsupported and are now deprecated. " +"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." +"conf to continue to use them. If its support status does not change it will " +"be removed in the next release." + +msgid "" +"The Windows iSCSI driver has been renamed. The updated driver location is " +"``cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver``." +msgstr "" +"The Windows iSCSI driver has been renamed. The updated driver location is " +"``cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver``." + +msgid "" +"The Windows iSCSI driver now honors the configured iSCSI addresses, ensuring " +"that only those addresses will be used for iSCSI traffic." +msgstr "" +"The Windows iSCSI driver now honours the configured iSCSI addresses, " +"ensuring that only those addresses will be used for iSCSI traffic." + +msgid "" +"The Windows iSCSI driver now returns multiple portals when available and " +"multipath is requested." +msgstr "" +"The Windows iSCSI driver now returns multiple portals when available and " +"multipath is requested." + +msgid "" +"The X-IO driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it." +msgstr "" +"The X-IO driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it." + +msgid "" +"The X-IO driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it. If its support status " +"does not change, they will be removed in the Queens development cycle." +msgstr "" +"The X-IO driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it. If its support status " +"does not change, they will be removed in the Queens development cycle." + +msgid "" +"The XML API has been marked deprecated and will be removed in a future " +"release." +msgstr "" +"The XML API has been marked deprecated and will be removed in a future " +"release." + +msgid "" +"The XML API has been removed in Newton release. Cinder supports only JSON " +"API request/response format now." +msgstr "" +"The XML API has been removed in Newton release. Cinder supports only JSON " +"API request/response format now." + +msgid "" +"The XML configuration file used by the HNAS drivers is now deprecated and " +"will no longer be used in the future. Please use cinder.conf for all driver " +"configuration." +msgstr "" +"The XML configuration file used by the HNAS drivers is now deprecated and " +"will no longer be used in the future. Please use cinder.conf for all driver " +"configuration." + +msgid "" +"The XtremIO driver has been fixed to correctly report the \"free_capacity_gb" +"\" size." +msgstr "" +"The XtremIO driver has been fixed to correctly report the \"free_capacity_gb" +"\" size." + +msgid "" +"The XtremIO driver is moved to the dell_emc directory. volume_driver entry " +"in cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc." +"xtremio.XtremIOISCSIDriver`` or ``cinder.volume.drivers.dell_emc.xtremio." +"XtremIOFCDriver``." +msgstr "" +"The XtremIO driver is moved to the dell_emc directory. volume_driver entry " +"in cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc." +"xtremio.XtremIOISCSIDriver`` or ``cinder.volume.drivers.dell_emc.xtremio." +"XtremIOFCDriver``." + +msgid "" +"The ZTE driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it." +msgstr "" +"The ZTE driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it." + +msgid "" +"The ZTE driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it. If its support status " +"does not change, they will be removed in the Queens development cycle." +msgstr "" +"The ZTE driver has been marked as unsupported and is now deprecated. " +"``enable_unsupported_driver`` will need to be set to ``True`` in the " +"driver's section in cinder.conf to continue to use it. If its support status " +"does not change, they will be removed in the Queens development cycle." + +msgid "" +"The ``WindowsDriver`` was renamed in the Queens release to " +"``WindowsISCSIDriver`` to avoid confusion with the SMB driver. The backwards " +"compatibility for this has now been removed, so any cinder.conf settings " +"still using ``cinder.volume.drivers.windows.windows.WindowsDriver`` must now " +"be updated to use ``cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver``." +msgstr "" +"The ``WindowsDriver`` was renamed in the Queens release to " +"``WindowsISCSIDriver`` to avoid confusion with the SMB driver. The backwards " +"compatibility for this has now been removed, so any cinder.conf settings " +"still using ``cinder.volume.drivers.windows.windows.WindowsDriver`` must now " +"be updated to use ``cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver``." + +msgid "" +"The ``cinder-manage db online_data_migrations`` command now returns exit " +"status 2 in the case where some migrations failed (raised exceptions) and no " +"others were completed successfully from the last batch attempted. This " +"should be considered a fatal condition that requires intervention. Exit " +"status 1 will be returned in the case where the ``--max-count`` option was " +"used and some migrations failed but others succeeded (updated at least one " +"row), because more work may remain for the non-failing migrations, and their " +"completion may be a dependency for the failing ones. The command should be " +"reiterated while it returns exit status 1, and considered completed " +"successfully only when it returns exit status 0." +msgstr "" +"The ``cinder-manage db online_data_migrations`` command now returns exit " +"status 2 in the case where some migrations failed (raised exceptions) and no " +"others were completed successfully from the last batch attempted. This " +"should be considered a fatal condition that requires intervention. Exit " +"status 1 will be returned in the case where the ``--max-count`` option was " +"used and some migrations failed but others succeeded (updated at least one " +"row), because more work may remain for the non-failing migrations, and their " +"completion may be a dependency for the failing ones. The command should be " +"reiterated while it returns exit status 1, and considered completed " +"successfully only when it returns exit status 0." + +msgid "" +"The ``force`` boolean parameter has been added to the volume delete API. It " +"may be used in combination with ``cascade``. This also means that volume " +"force delete is available in the base volume API rather than only in the " +"``volume_admin_actions`` extension." +msgstr "" +"The ``force`` boolean parameter has been added to the volume delete API. It " +"may be used in combination with ``cascade``. This also means that volume " +"force delete is available in the base volume API rather than only in the " +"``volume_admin_actions`` extension." + +msgid "" +"The ``service`` filter for service list API was deprecated 3 years ago in " +"2013 July (Havana). Removed this filter and please use \"binary\" instead." +msgstr "" +"The ``service`` filter for service list API was deprecated 3 years ago in " +"2013 July (Havana). Removed this filter and please use \"binary\" instead." + +msgid "" +"The `lvm_max_overprovision_ratio` config option has been deprecated. It will " +"be removed in a future release. Configurations should move to using the " +"common `max_overprovision_ratio` config option." +msgstr "" +"The `lvm_max_overprovision_ratio` config option has been deprecated. It will " +"be removed in a future release. Configurations should move to using the " +"common `max_overprovision_ratio` config option." + +msgid "" +"The `osapi_volume_base_URL` config option was deprecated in Pike and has now " +"been removed. The `public_endpoint` config option should be used instead." +msgstr "" +"The `osapi_volume_base_URL` config option was deprecated in Pike and has now " +"been removed. The `public_endpoint` config option should be used instead." + +msgid "" +"The ability to specify a backup driver by module name was deprecated in the " +"Queens release and the ability has now been removed. Any configuration in " +"cinder.conf still using the module path should be updated to include the " +"full class name. For example, ``cinder.backup.drivers.swift`` should be " +"updated to ``cinder.backup.drivers.swift.SwiftBackupDriver``." +msgstr "" +"The ability to specify a backup driver by module name was deprecated in the " +"Queens release and the ability has now been removed. Any configuration in " +"cinder.conf still using the module path should be updated to include the " +"full class name. For example, ``cinder.backup.drivers.swift`` should be " +"updated to ``cinder.backup.drivers.swift.SwiftBackupDriver``." + +msgid "" +"The block_driver is deprecated as of the Ocata release and will be removed " +"in the Queens release of Cinder. Instead the LVM driver with the LIO iSCSI " +"target should be used. For those that desire higher performance, they " +"should use LVM striping." +msgstr "" +"The block_driver is deprecated as of the Ocata release and will be removed " +"in the Queens release of Cinder. Instead the LVM driver with the LIO iSCSI " +"target should be used. For those that desire higher performance they should " +"use LVM striping." + +msgid "" +"The cinder-manage online_data_migrations command now prints a tabular " +"summary of completed and remaining records. The goal here is to get all your " +"numbers to zero. The previous execution return code behavior is retained for " +"scripting." +msgstr "" +"The cinder-manage online_data_migrations command now prints a tabular " +"summary of completed and remaining records. The goal here is to get all your " +"numbers to zero. The previous execution return code behaviour is retained " +"for scripting." + +msgid "" +"The config options ``iscsi_ip_address``, ``iscsi_port``, ``target_helper``, " +"``iscsi_target_prefix`` and ``iscsi_protocol`` were deprecated in the Queens " +"release and have now been removed. Deployments should now used the more " +"general ``target_ip_address``, ``target_port``, ``target_helper``, " +"``target_prefix`` and ``target_protocol`` options." +msgstr "" +"The config options ``iscsi_ip_address``, ``iscsi_port``, ``target_helper``, " +"``iscsi_target_prefix`` and ``iscsi_protocol`` were deprecated in the Queens " +"release and have now been removed. Deployments should now used the more " +"general ``target_ip_address``, ``target_port``, ``target_helper``, " +"``target_prefix`` and ``target_protocol`` options." + +msgid "" +"The config options ``scheduler_topic``, ``volume_topic`` and " +"``backup_topic`` have been removed without a deprecation period as these had " +"never worked correctly." +msgstr "" +"The config options ``scheduler_topic``, ``volume_topic`` and " +"``backup_topic`` have been removed without a deprecation period as these had " +"never worked correctly." + +msgid "The consistency group API now returns volume type IDs." +msgstr "The consistency group API now returns volume type IDs." + +msgid "" +"The coordination system used by Cinder has been simplified to leverage tooz " +"builtin heartbeat feature. Therefore, the configuration options " +"`coordination.heartbeat`, `coordination.initial_reconnect_backoff` and " +"`coordination.max_reconnect_backoff` have been removed." +msgstr "" +"The coordination system used by Cinder has been simplified to leverage Tooz " +"built-in heartbeat feature. Therefore, the configuration options " +"`coordination.heartbeat`, `coordination.initial_reconnect_backoff` and " +"`coordination.max_reconnect_backoff` have been removed." + +msgid "" +"The create volume api will now return 400 error instead of 404/500 if user " +"passes non-uuid values to consistencygroup_id, source_volid and " +"source_replica parameters in the request body." +msgstr "" +"The create volume API will now return 400 error instead of 404/500 if user " +"passes non-UUID values to consistencygroup_id, source_volid and " +"source_replica parameters in the request body." + +msgid "" +"The default interval for polling vCenter tasks in the VMware VMDK driver is " +"changed to 2s." +msgstr "" +"The default interval for polling vCenter tasks in the VMware VMDK driver is " +"changed to 2s." + +msgid "" +"The default key manager interface in Cinder was deprecated and the Castellan " +"key manager interface library is now used instead. For more information " +"about Castellan, please see http://docs.openstack.org/developer/castellan/ ." +msgstr "" +"The default key manager interface in Cinder was deprecated and the Castellan " +"key manager interface library is now used instead. For more information " +"about Castellan, please see http://docs.openstack.org/developer/castellan/ ." + +msgid "" +"The default value for pure_replica_interval_default used by Pure Storage " +"volume drivers has changed from 900 to 3600 seconds." +msgstr "" +"The default value for pure_replica_interval_default used by Pure Storage " +"volume drivers has changed from 900 to 3600 seconds." + +msgid "" +"The default value has been removed for the LVM specific " +"`lvm_max_over_subscription_ratio` setting. This changes the behavior so that " +"LVM backends now adhere to the common `max_over_subscription_ratio` setting. " +"The LVM specific config option may still be used, but it is now deprecated " +"and will be removed in a future release." +msgstr "" +"The default value has been removed for the LVM specific " +"`lvm_max_over_subscription_ratio` setting. This changes the behaviour so " +"that LVM backends now adhere to the common `max_over_subscription_ratio` " +"setting. The LVM specific config option may still be used, but it is now " +"deprecated and will be removed in a future release." + +msgid "The deprecated HP CLIQ proxy driver has now been removed." +msgstr "The deprecated HP CLIQ proxy driver has now been removed." + +msgid "The endpoints will now correctly raise a 403 Forbidden instead." +msgstr "The endpoints will now correctly raise a 403 Forbidden instead." + +msgid "" +"The following commands are no longer required to be listed in your rootwrap " +"configuration: cgcreate; and cgset." +msgstr "" +"The following commands are no longer required to be listed in your rootwrap " +"configuration: cgcreate; and cgset." + +msgid "" +"The following volume drivers were deprecated in the Pike release and have " +"now been removed:" +msgstr "" +"The following volume drivers were deprecated in the Pike release and have " +"now been removed:" + +msgid "The fss_pool option is deprecated. Use fss_pools instead." +msgstr "The fss_pool option is deprecated. Use fss_pools instead." + +msgid "" +"The hosts api extension is now deprecated and will be removed in a future " +"version." +msgstr "" +"The hosts API extension is now deprecated and will be removed in a future " +"version." + +msgid "" +"The multiattach capability has been enabled and verified as working with the " +"ScaleIO driver. It is the user's responsibility to add some type of " +"exclusion (at the file system or network file system layer) to prevent " +"multiple writers from corrupting data on the volume." +msgstr "" +"The multiattach capability has been enabled and verified as working with the " +"ScaleIO driver. It is the user's responsibility to add some type of " +"exclusion (at the file system or network file system layer) to prevent " +"multiple writers from corrupting data on the volume." + +msgid "" +"The old HNAS drivers configuration paths have been marked for deprecation." +msgstr "" +"The old HNAS drivers configuration paths have been marked for deprecation." + +msgid "" +"The old deprecated ``hp3par*`` options have been removed. Use the " +"``hpe3par*`` instead of them." +msgstr "" +"The old deprecated ``hp3par*`` options have been removed. Use the " +"``hpe3par*`` instead of them." + +msgid "" +"The old deprecated ``keymgr`` options have been removed. Configuration " +"options using the ``[keymgr]`` group will not be applied anymore. Use the " +"``[key_manager]`` group from Castellan instead. The Castellan ``backend`` " +"options should also be used instead of ``api_class``, as most of the options " +"that lived in Cinder have migrated to Castellan." +msgstr "" +"The old deprecated ``keymgr`` options have been removed. Configuration " +"options using the ``[keymgr]`` group will not be applied any more. Use the " +"``[key_manager]`` group from Castellan instead. The Castellan ``backend`` " +"options should also be used instead of ``api_class``, as most of the options " +"that lived in Cinder have migrated to Castellan." + +msgid "" +"The old deprecated ``nas_ip`` option has been removed. Use the ``nas_host`` " +"instead of it." +msgstr "" +"The old deprecated ``nas_ip`` option has been removed. Use the ``nas_host`` " +"instead of it." + +msgid "" +"The old deprecated ``netapp_eseries_host_type`` option has been removed. Use " +"the ``netapp_host_type`` instead." +msgstr "" +"The old deprecated ``netapp_eseries_host_type`` option has been removed. Use " +"the ``netapp_host_type`` instead." + +msgid "" +"The old deprecated ``pybasedir`` option has been removed. Use the " +"``state_path`` instead." +msgstr "" +"The old deprecated ``pybasedir`` option has been removed. Use the " +"``state_path`` instead." + +msgid "" +"The os_privileged_xxx and nova_xxx in the [default] section are deprecated " +"in favor of the settings in the [nova] section." +msgstr "" +"The os_privileged_xxx and nova_xxx in the [default] section are deprecated " +"in favour of the settings in the [nova] section." + +msgid "" +"The policy file to be used may be specified in the ``/etc/cinder/cinder." +"conf`` file in the ``[oslo_policy]`` section as the value of the " +"``policy_file`` configuration option. That way there's no question what file " +"is being used." +msgstr "" +"The policy file to be used may be specified in the ``/etc/cinder/cinder." +"conf`` file in the ``[oslo_policy]`` section as the value of the " +"``policy_file`` configuration option. That way there's no question what file " +"is being used." + +msgid "" +"The qemu-img tool now has resource limits applied which prevent it from " +"using more than 1GB of address space or more than 2 seconds of CPU time. " +"This provides protection against denial of service attacks from maliciously " +"crafted or corrupted disk images." +msgstr "" +"The qemu-img tool now has resource limits applied which prevent it from " +"using more than 1GB of address space or more than 2 seconds of CPU time. " +"This provides protection against denial of service attacks from maliciously " +"crafted or corrupted disk images." + +msgid "" +"The reserve volume API was incorrectly enforcing \"volume:retype\" policy " +"action. It has been corrected to \"volume_extension:volume_actions:reserve\"." +msgstr "" +"The reserve volume API was incorrectly enforcing \"volume:retype\" policy " +"action. It has been corrected to \"volume_extension:volume_actions:reserve\"." + +msgid "" +"The sample file is YAML (because unlike JSON, YAML allows comments). If you " +"prefer, you may use a JSON policy file." +msgstr "" +"The sample file is YAML (because unlike JSON, YAML allows comments). If you " +"prefer, you may use a JSON policy file." + +msgid "" +"The support for ``cinder.keymgr.barbican.BarbicanKeyManager`` and the " +"``[keymgr]`` config section has now been removed. All configs should now be " +"switched to use ``castellan.key_manager.barbican_key_manager." +"BarbicanKeyManager`` and the ``[key_manager]`` config section." +msgstr "" +"The support for ``cinder.keymgr.barbican.BarbicanKeyManager`` and the " +"``[keymgr]`` config section has now been removed. All configs should now be " +"switched to use ``castellan.key_manager.barbican_key_manager." +"BarbicanKeyManager`` and the ``[key_manager]`` config section." + +msgid "The updated_at timestamp is now returned in listing detail." +msgstr "The updated_at timestamp is now returned in listing detail." + +msgid "" +"The use of xml files for vmax backend configuration is now deprecated and " +"will be removed during the following release. Deployers are encouraged to " +"use the cinder.conf for configuring connections to the vmax." +msgstr "" +"The use of XML files for VMAX backend configuration is now deprecated and " +"will be removed during the following release. Deployers are encouraged to " +"use the cinder.conf for configuring connections to the VMAX." + +msgid "" +"The v1 API was deprecated in the Juno release and is now defaulted to " +"disabled. In order to still use the v1 API, you must now set " +"``enable_v1_api`` to ``True`` in your cinder.conf file." +msgstr "" +"The v1 API was deprecated in the Juno release and is now defaulted to " +"disabled. In order to still use the v1 API, you must now set " +"``enable_v1_api`` to ``True`` in your cinder.conf file." + +msgid "" +"The v2 API extensions os-volume-manage and os-snapshot-manage have been " +"mapped to the v3 resources manageable_volumes and manageable_snapshots" +msgstr "" +"The v2 API extensions os-volume-manage and os-snapshot-manage have been " +"mapped to the v3 resources manageable_volumes and manageable_snapshots" + +msgid "" +"The volume_clear option to use `shred` was deprecated in the Newton release " +"and has now been removed. Since deprecation, this option has performed the " +"same action as the `zero` option. Config settings for `shred` should be " +"updated to be set to `zero` for continued operation." +msgstr "" +"The volume_clear option to use `shred` was deprecated in the Newton release " +"and has now been removed. Since deprecation, this option has performed the " +"same action as the `zero` option. Configuration settings for `shred` should " +"be updated to be set to `zero` for continued operation." + +msgid "" +"The volumes created by VMware VMDK driver will be displayed as \"managed by " +"OpenStack Cinder\" in vCenter server." +msgstr "" +"The volumes created by VMware VMDK driver will be displayed as \"managed by " +"OpenStack Cinder\" in vCenter server." + +msgid "" +"The xiv_ds8k driver now supports IBM XIV, Spectrum Accelerate, FlashSystem " +"A9000, FlashSystem A9000R and DS8000 storage systems, and was renamed to IBM " +"Storage Driver for OpenStack. The changes include text changes, file names, " +"names of cinder.conf flags, and names of the proxy classes." +msgstr "" +"The xiv_ds8k driver now supports IBM XIV, Spectrum Accelerate, FlashSystem " +"A9000, FlashSystem A9000R and DS8000 storage systems, and was renamed to IBM " +"Storage Driver for OpenStack. The changes include text changes, file names, " +"names of cinder.conf flags, and names of the proxy classes." + +msgid "" +"There is a new policy option ``volume:force_delete`` which controls access " +"to the ability to specify force delete via the volume delete API. This is " +"separate from the pre-existing ``volume-admin-actions:force_delete`` policy " +"check." +msgstr "" +"There is a new policy option ``volume:force_delete`` which controls access " +"to the ability to specify force delete via the volume delete API. This is " +"separate from the pre-existing ``volume-admin-actions:force_delete`` policy " +"check." + +msgid "" +"This PowerMax driver now puts the unmanaged \"orphan\" volume in a storage " +"group called OS-Unmanaged. It is not possible to query a volume's associated " +"snapvx snapshots using the PowerMax management software, unless it belongs " +"to a storage group." +msgstr "" +"This PowerMax driver now puts the unmanaged \"orphan\" volume in a storage " +"group called OS-Unmanaged. It is not possible to query a volume's associated " +"snapvx snapshots using the PowerMax management software, unless it belongs " +"to a storage group." + +msgid "" +"This is made an optional configuration because it only applies to very " +"specific environments. If we were to make this global that would require a " +"rootwrap/privsep update that could break compatibility when trying to do " +"rolling upgrades of the volume service." +msgstr "" +"This is made an optional configuration because it only applies to very " +"specific environments. If we were to make this global that would require a " +"rootwrap/privsep update that could break compatibility when trying to do " +"rolling upgrades of the volume service." + +msgid "" +"This will generate a file named ``policy.yaml`` in the ``etc/cinder`` " +"directory of your checked-out Cinder repository." +msgstr "" +"This will generate a file named ``policy.yaml`` in the ``etc/cinder`` " +"directory of your checked-out Cinder repository." + +msgid "" +"To accommodate these environments, and to maintain backward compatibility in " +"Newton we add a ``lvm_suppress_fd_warnings`` bool config to the LVM driver. " +"Setting this to True will append the LVM env vars to include the variable " +"``LVM_SUPPRESS_FD_WARNINGS=1``." +msgstr "" +"To accommodate these environments, and to maintain backward compatibility in " +"Newton we add a ``lvm_suppress_fd_warnings`` bool config to the LVM driver. " +"Setting this to True will append the LVM environment variables to include " +"the variable ``LVM_SUPPRESS_FD_WARNINGS=1``." + +msgid "" +"To address backwards compatibility, the new rules added to the volume_type." +"py policy file, default to the existing rule, ``volume_extension:" +"volume_type_encryption``, if it is set to a non-default value." +msgstr "" +"To address backwards compatibility, the new rules added to the volume_type." +"py policy file, default to the existing rule, ``volume_extension:" +"volume_type_encryption``, if it is set to a non-default value." + +msgid "" +"To find out what policies are available and what their default values are, " +"you can generate a sample policy file. To do this, you must have a local " +"copy of the Cinder source code repository. From the top level directory, run " +"the command::" +msgstr "" +"To find out what policies are available and what their default values are, " +"you can generate a sample policy file. To do this, you must have a local " +"copy of the Cinder source code repository. From the top level directory, run " +"the command::" + +msgid "" +"To get rid of long running DB data migrations that must be run offline, " +"Cinder will now be able to execute them online, on a live cloud. Before " +"upgrading from Ocata to Pike, operator needs to perform all the Newton data " +"migrations. To achieve that he needs to perform ``cinder-manage db " +"online_data_migrations`` until there are no records to be updated. To limit " +"DB performance impact migrations can be performed in chunks limited by ``--" +"max_number`` option. If your intent is to upgrade Cinder in a non-live " +"manner, you can use ``--ignore_state`` option safely. Please note that " +"finishing all the Newton data migrations will be enforced by the first " +"schema migration in Pike, so you won't be able to upgrade to Pike without " +"that." +msgstr "" +"To get rid of long running DB data migrations that must be run offline, " +"Cinder will now be able to execute them online, on a live cloud. Before " +"upgrading from Ocata to Pike, operator needs to perform all the Newton data " +"migrations. To achieve that he needs to perform ``cinder-manage db " +"online_data_migrations`` until there are no records to be updated. To limit " +"DB performance impact migrations can be performed in chunks limited by ``--" +"max_number`` option. If your intent is to upgrade Cinder in a non-live " +"manner, you can use ``--ignore_state`` option safely. Please note that " +"finishing all the Newton data migrations will be enforced by the first " +"schema migration in Pike, so you won't be able to upgrade to Pike without " +"that." + +msgid "" +"Two new policies \"volume_extension:type_get\" and \"volume_extension:" +"type_get_all\" have been added to control type show and type list APIs." +msgstr "" +"Two new policies \"volume_extension:type_get\" and \"volume_extension:" +"type_get_all\" have been added to control type show and type list APIs." + +msgid "Update backend state in scheduler when extending volume." +msgstr "Update backend state in scheduler when extending volume." + +msgid "" +"Updated the parameter storwize_preferred_host_site from StrOpt to DictOpt in " +"cinder back-end configuration, and removed it from volume type configuration." +msgstr "" +"Updated the parameter storwize_preferred_host_site from StrOpt to DictOpt in " +"cinder back-end configuration, and removed it from volume type configuration." + +msgid "" +"Updated the parameter storwzie_preferred_host_site from StrOpt to DictOpt in " +"cinder back-end configuration, and removed it from volume type configuration." +msgstr "" +"Updated the parameter storwzie_preferred_host_site from StrOpt to DictOpt in " +"Cinder back-end configuration, and removed it from volume type configuration." + +msgid "" +"Updating the Datera Elastic DataFabric Storage Driver to version 2.1. This " +"adds ACL support, Multipath support and basic IP pool support." +msgstr "" +"Updating the Datera Elastic DataFabric Storage Driver to version 2.1. This " +"adds ACL support, Multipath support and basic IP pool support." + +msgid "Upgrade Notes" +msgstr "Upgrade Notes" + +msgid "" +"Users of the Datera Cinder driver are now required to use Datera DataFabric " +"version 1.0+. Versions before 1.0 will not be able to utilize this new " +"driver since they still function on v1 of the Datera DataFabric API" +msgstr "" +"Users of the Datera Cinder driver are now required to use Datera DataFabric " +"version 1.0+. Versions before 1.0 will not be able to utilise this new " +"driver since they still function on v1 of the Datera DataFabric API" + +msgid "" +"Users of the IBM Storage Driver, previously known as the IBM XIV/DS8K " +"driver, upgrading from Mitaka or previous releases, need to reconfigure the " +"relevant cinder.conf entries. In most cases the change is just removal of " +"the xiv-ds8k field prefix, but for details use the driver documentation." +msgstr "" +"Users of the IBM Storage Driver, previously known as the IBM XIV/DS8K " +"driver, upgrading from Mitaka or previous releases, need to reconfigure the " +"relevant cinder.conf entries. In most cases the change is just removal of " +"the xiv-ds8k field prefix, but for details use the driver documentation." + +msgid "" +"Users of the ibmnas driver should switch to using the IBM GPFS driver to " +"enable Cinder access to IBM NAS resources. For details configuring the IBM " +"GPFS driver, see the GPFS config reference. - http://docs.openstack.org/" +"liberty/config-reference/content/GPFS-driver.html" +msgstr "" +"Users of the ibmnas driver should switch to using the IBM GPFS driver to " +"enable Cinder access to IBM NAS resources. For details configuring the IBM " +"GPFS driver, see the GPFS config reference. - http://docs.openstack.org/" +"liberty/config-reference/content/GPFS-driver.html" + +msgid "VMAX driver - Removed deprecated option ``cinder_dell_emc_config_file``" +msgstr "" +"VMAX driver - Removed deprecated option ``cinder_dell_emc_config_file``" + +msgid "" +"VMAX driver - configuration tag san_rest_port will be replaced by " +"san_api_port in the next release." +msgstr "" +"VMAX driver - configuration tag san_rest_port will be replaced by " +"san_api_port in the next release." + +msgid "VMAX driver - fixes SSL certificate verification error." +msgstr "VMAX driver - fixes SSL certificate verification error." + +msgid "" +"VMAX driver support for new configuration option - vmax_snapvx_unlink_limit " +"for specifying the maximum number of unlinks which will be performed before " +"a clone operation. Default value is 3" +msgstr "" +"VMAX driver support for new configuration option - vmax_snapvx_unlink_limit " +"for specifying the maximum number of unlinks which will be performed before " +"a clone operation. Default value is 3" + +msgid "" +"VMAX driver version 3.0, replacing SMI-S with Unisphere REST. This driver " +"supports VMAX3 hybrid and All Flash arrays." +msgstr "" +"VMAX driver version 3.0, replacing SMI-S with Unisphere REST. This driver " +"supports VMAX3 hybrid and All Flash arrays." + +msgid "" +"VMware VMDK driver and FCD driver now support NFS 4.1 datastores in vCenter " +"server." +msgstr "" +"VMware VMDK driver and FCD driver now support NFS 4.1 datastores in vCenter " +"server." + +msgid "" +"VMware VMDK driver and FCD driver now support a config option " +"``vmware_datastore_regex`` to specify the regular expression pattern to " +"match the name of datastores where backend volumes are created." +msgstr "" +"VMware VMDK driver and FCD driver now support a config option " +"``vmware_datastore_regex`` to specify the regular expression pattern to " +"match the name of datastores where backend volumes are created." + +msgid "VMware VMDK driver deprecated the support for vCenter version 5.1" +msgstr "VMware VMDK driver deprecated the support for vCenter version 5.1" + +msgid "" +"VMware VMDK driver now supports a config option ``vmware_lazy_create`` to " +"disable the default behavior of lazy creation of raw volumes in the backend." +msgstr "" +"VMware VMDK driver now supports a config option ``vmware_lazy_create`` to " +"disable the default behaviour of lazy creation of raw volumes in the backend." + +msgid "" +"VMware VMDK driver now supports changing adpater type using retype. To " +"change the adapter type, set ``vmware:adapter_type`` in the new volume type." +msgstr "" +"VMware VMDK driver now supports changing adapter type using retype. To " +"change the adapter type, set ``vmware:adapter_type`` in the new volume type." + +msgid "" +"VMware VMDK driver now supports vSphere template as a volume snapshot format " +"in vCenter server. The snapshot format in vCenter server can be specified " +"using driver config option ``vmware_snapshot_format``." +msgstr "" +"VMware VMDK driver now supports vSphere template as a volume snapshot format " +"in vCenter server. The snapshot format in vCenter server can be specified " +"using driver config option ``vmware_snapshot_format``." + +msgid "" +"VMware VMDK driver now supports volume type extra-spec option ``vmware:" +"adapter_type`` to specify the adapter type of volumes in vCenter server." +msgstr "" +"VMware VMDK driver now supports volume type extra-spec option ``vmware:" +"adapter_type`` to specify the adapter type of volumes in vCenter server." + +msgid "" +"VMware VMDK driver will use vSphere template as the default snapshot format " +"in vCenter server." +msgstr "" +"VMware VMDK driver will use vSphere template as the default snapshot format " +"in vCenter server." + +msgid "" +"VNX cinder driver now supports async migration during volume cloning. By " +"default, the cloned volume will be available after the migration starts in " +"the VNX instead of waiting for the completion of migration. This greatly " +"accelerates the cloning process. If user wants to disable this, he could add " +"``--metadata async_migrate=False`` when creating volume from source volume/" +"snapshot." +msgstr "" +"VNX cinder driver now supports async migration during volume cloning. By " +"default, the cloned volume will be available after the migration starts in " +"the VNX instead of waiting for the completion of migration. This greatly " +"accelerates the cloning process. If user wants to disable this, he could add " +"``--metadata async_migrate=False`` when creating volume from source volume/" +"snapshot." + +msgid "Violin" +msgstr "Violin" + +msgid "Violin Memory 6000 array series drivers are removed." +msgstr "Violin Memory 6000 array series drivers are removed." + +msgid "" +"Volume \"force delete\" was introduced with the 3.23 API microversion, " +"however the check for in the service was incorrectly looking for " +"microversion 3.2. That check has now been fixed. It is possible that an API " +"call using a microversion below 3.23 would previously work for this call, " +"which will now fail. This closes `bug #1783028 `_." +msgstr "" +"Volume \"force delete\" was introduced with the 3.23 API microversion, " +"however the check for in the service was incorrectly looking for " +"microversion 3.2. That check has now been fixed. It is possible that an API " +"call using a microversion below 3.23 would previously work for this call, " +"which will now fail. This closes `bug #1783028 `_." + +msgid "Volume Manage/Unmanage support for Datera Volume Drivers" +msgstr "Volume Manage/Unmanage support for Datera Volume Drivers" + +msgid "Volume Snapshots:" +msgstr "Volume Snapshots:" + +msgid "" +"Volume group updates of any kind had previously required the group to be in " +"``Available`` status. Updates to the group name or description will now work " +"regardless of the volume group status." +msgstr "" +"Volume group updates of any kind had previously required the group to be in " +"``Available`` status. Updates to the group name or description will now work " +"regardless of the volume group status." + +msgid "" +"Volume manage/unmanage support for IBM FlashSystem FC and iSCSI drivers." +msgstr "" +"Volume manage/unmanage support for IBM FlashSystem FC and iSCSI drivers." + +msgid "Volume manage/unmanage support for Oracle ZFSSA iSCSI and NFS drivers." +msgstr "Volume manage/unmanage support for Oracle ZFSSA iSCSI and NFS drivers." + +msgid "" +"Volume type can be filtered within extra spec: /types?extra_specs={\"key\":" +"\"value\"} since microversion \"3.52\"." +msgstr "" +"Volume type can be filtered within extra spec: /types?extra_specs={\"key\":" +"\"value\"} since microversion \"3.52\"." + +msgid "Volume_actions:" +msgstr "Volume_actions:" + +msgid "" +"Volumes created on NetApp cDOT and 7mode storage systems now report " +"'multiattach' capability. They have always supported such a capability, but " +"not reported it to Cinder." +msgstr "" +"Volumes created on NetApp cDOT and 7mode storage systems now report " +"'multiattach' capability. They have always supported such a capability, but " +"not reported it to Cinder." + +msgid "" +"VzStorage volume driver now supports choosing desired volume format by " +"setting vendor property 'vz:volume_format' in volume type metadata. Allowed " +"values are 'ploop', 'qcow2' and 'raw'." +msgstr "" +"VzStorage volume driver now supports choosing desired volume format by " +"setting vendor property 'vz:volume_format' in volume type metadata. Allowed " +"values are 'ploop', 'qcow2' and 'raw'." + +msgid "" +"We no longer leave orphaned chunks on the backup backend or leave a " +"temporary volume/snapshot when aborting a backup." +msgstr "" +"We no longer leave orphaned chunks on the backup backend or leave a " +"temporary volume/snapshot when aborting a backup." + +msgid "" +"We replaced the config option in the disco volume driver " +"\"disco_choice_client\" with \"disco_client_protocol\". We add \"san_api_port" +"\" as new config option in san driver for accessing the SAN API using this " +"port." +msgstr "" +"We replaced the config option in the disco volume driver " +"\"disco_choice_client\" with \"disco_client_protocol\". We add \"san_api_port" +"\" as new config option in SAN driver for accessing the SAN API using this " +"port." + +msgid "" +"When Barbican is the encryption key_manager backend, any encryption keys " +"associated with the legacy ConfKeyManager will be automatically migrated to " +"Barbican. All database references to the ConfKeyManager's all-zeros key ID " +"will be updated with a Barbican key ID. The encryption keys do not change. " +"Only the encryption key ID changes." +msgstr "" +"When Barbican is the encryption key_manager backend, any encryption keys " +"associated with the legacy ConfKeyManager will be automatically migrated to " +"Barbican. All database references to the ConfKeyManager's all-zeros key ID " +"will be updated with a Barbican key ID. The encryption keys do not change. " +"Only the encryption key ID changes." + +msgid "" +"When backing up a volume from a snapshot, the volume status would be set to " +"\"backing-up\", preventing operations on the volume until the backup is " +"complete. This status is now set on the snapshot instead, making the volume " +"available for other operations." +msgstr "" +"When backing up a volume from a snapshot, the volume status would be set to " +"\"backing-up\", preventing operations on the volume until the backup is " +"complete. This status is now set on the snapshot instead, making the volume " +"available for other operations." + +msgid "" +"When encryption keys based on the ConfKeyManager's fixed_key are migrated to " +"Barbican, ConfKeyManager keys stored in the Backup table are included in the " +"migration process. Fixes `bug 1757235 `__." +msgstr "" +"When encryption keys based on the ConfKeyManager's fixed_key are migrated to " +"Barbican, ConfKeyManager keys stored in the Backup table are included in the " +"migration process. Fixes `bug 1757235 `__." + +msgid "" +"When managing volume types an OpenStack provider is now given more control " +"to grant access to for different storage type operations. The provider can " +"now customize access to type create, delete, update, list, and show using " +"new entries in the cinder policy file." +msgstr "" +"When managing volume types an OpenStack provider is now given more control " +"to grant access to for different storage type operations. The provider can " +"now customise access to type create, delete, update, list, and show using " +"new entries in the Cinder policy file." + +msgid "" +"When performing a *live* upgrade from Liberty it may happen that retype " +"calls will reserve additional quota. As by default quota reservations are " +"invalidated after 24 hours (config option ``reservation_expire=86400``), we " +"recommend either decreasing that time or watching for unused quota " +"reservations manually during the upgrade process." +msgstr "" +"When performing a *live* upgrade from Liberty it may happen that retype " +"calls will reserve additional quota. As by default quota reservations are " +"invalidated after 24 hours (config option ``reservation_expire=86400``), we " +"recommend either decreasing that time or watching for unused quota " +"reservations manually during the upgrade process." + +msgid "" +"When restoring the backup of an encrypted volume, the destination volume is " +"assigned a clone of the backup's encryption key ID. This ensures every " +"restored backup has a unique encryption key ID, even when multiple volumes " +"have been restored from the same backup." +msgstr "" +"When restoring the backup of an encrypted volume, the destination volume is " +"assigned a clone of the backup's encryption key ID. This ensures every " +"restored backup has a unique encryption key ID, even when multiple volumes " +"have been restored from the same backup." + +msgid "" +"When running Nova Compute and Cinder Volume or Backup services on the same " +"host they must use a shared lock directory to avoid rare race conditions " +"that can cause volume operation failures (primarily attach/detach of " +"volumes). This is done by setting the \"lock_path\" to the same directory in " +"the \"oslo_concurrency\" section of nova.conf and cinder.conf. This issue " +"affects all previous releases utilizing os-brick and shared operations on " +"hosts between Nova Compute and Cinder data services." +msgstr "" +"When running Nova Compute and Cinder Volume or Backup services on the same " +"host they must use a shared lock directory to avoid rare race conditions " +"that can cause volume operation failures (primarily attach/detach of " +"volumes). This is done by setting the \"lock_path\" to the same directory in " +"the \"oslo_concurrency\" section of nova.conf and cinder.conf. This issue " +"affects all previous releases utilising os-brick and shared operations on " +"hosts between Nova Compute and Cinder data services." + +msgid "" +"When running PostgreSQL it is required to upgrade and restart all the cinder-" +"api services along with DB migration 62." +msgstr "" +"When running PostgreSQL it is required to upgrade and restart all the cinder-" +"api services along with DB migration 62." + +msgid "" +"When using the RBD pool exclusively for Cinder we can now set " +"`rbd_exclusive_cinder_pool` to `true` and Cinder will use DB information to " +"calculate provisioned size instead of querying all volumes in the backend, " +"which will reduce the load on the Ceph cluster and the volume service." +msgstr "" +"When using the RBD pool exclusively for Cinder we can now set " +"`rbd_exclusive_cinder_pool` to `true` and Cinder will use DB information to " +"calculate provisioned size instead of querying all volumes in the backend, " +"which will reduce the load on the Ceph cluster and the volume service." + +msgid "" +"While configuring NetApp cDOT back ends, new configuration options " +"('replication_device' and 'netapp_replication_aggregate_map') must be added " +"in order to use the host-level failover feature." +msgstr "" +"While configuring NetApp cDOT back ends, new configuration options " +"('replication_device' and 'netapp_replication_aggregate_map') must be added " +"in order to use the host-level failover feature." + +msgid "" +"With removal of the CoprHD Volume Driver any volumes being used by Cinder " +"within a CoprHD backend should be migrated to a supported storage backend " +"before upgrade." +msgstr "" +"With removal of the CoprHD Volume Driver any volumes being used by Cinder " +"within a CoprHD backend should be migrated to a supported storage backend " +"before upgrade." + +msgid "" +"With the Dell SC Cinder Driver if a volume is retyped to a new storage " +"profile all volumes created via snapshots from this volume will also change " +"to the new storage profile." +msgstr "" +"With the Dell SC Cinder Driver if a volume is retyped to a new storage " +"profile all volumes created via snapshots from this volume will also change " +"to the new storage profile." + +msgid "" +"With the Dell SC Cinder Driver retype failed to return a tuple if it had to " +"return an update to the volume state." +msgstr "" +"With the Dell SC Cinder Driver retype failed to return a tuple if it had to " +"return an update to the volume state." + +msgid "" +"With the Dell SC Cinder Driver retyping from one replication type to another " +"type (ex. regular replication to live volume replication) is not supported." +msgstr "" +"With the Dell SC Cinder Driver retyping from one replication type to another " +"type (ex. regular replication to live volume replication) is not supported." + +msgid "" +"With the Dell SC Cinder Driver retyping to or from a replicated type should " +"now work." +msgstr "" +"With the Dell SC Cinder Driver retyping to or from a replicated type should " +"now work." + +msgid "X-IO" +msgstr "X-IO" + +msgid "ZTE" +msgstr "ZTE" + +msgid "" +"[`Community Goal `_] Support has been added for developers to write pre-upgrade " +"checks. Operators can run these checks using ``cinder-status upgrade " +"check``. This allows operators to be more confident when upgrading their " +"deployments by having a tool that automates programmable checks against the " +"deployment configuration or dataset." +msgstr "" +"[`Community Goal `_] Support has been added for developers to write pre-upgrade " +"checks. Operators can run these checks using ``cinder-status upgrade " +"check``. This allows operators to be more confident when upgrading their " +"deployments by having a tool that automates programmable checks against the " +"deployment configuration or dataset." + +msgid "" +"[`bug 1772421 `_] " +"INFINIDAT fixed a bug in volume extension feature where volumes were not " +"extended to target size but added the given target size." +msgstr "" +"[`bug 1772421 `_] " +"INFINIDAT fixed a bug in volume extension feature where volumes were not " +"extended to target size but added the given target size." + +msgid "" +"``\"admin_or_storage_type_admin\": \"is_admin:True or role:storage_type_admin" +"\",``" +msgstr "" +"``\"admin_or_storage_type_admin\": \"is_admin:True or role:storage_type_admin" +"\",``" + +msgid "" +"``\"volume_extension:types_manage\": \"rule:admin_or_storage_type_admin\", " +"\"volume_extension:volume_type_access:addProjectAccess\": \"rule:" +"admin_or_storage_type_admin\", \"volume_extension:volume_type_access:" +"removeProjectAccess\": \"rule:admin_or_storage_type_admin\",``" +msgstr "" +"``\"volume_extension:types_manage\": \"rule:admin_or_storage_type_admin\", " +"\"volume_extension:volume_type_access:addProjectAccess\": \"rule:" +"admin_or_storage_type_admin\", \"volume_extension:volume_type_access:" +"removeProjectAccess\": \"rule:admin_or_storage_type_admin\",``" + +msgid "" +"``RESKEY:availability_zones`` now is a reserved spec key for AZ volume type, " +"and administrator can create AZ volume type that includes AZ restrictions by " +"adding a list of Az's to the extra specs similar to: ``RESKEY:" +"availability_zones: az1,az2``." +msgstr "" +"``RESKEY:availability_zones`` now is a reserved spec key for AZ volume type, " +"and administrator can create AZ volume type that includes AZ restrictions by " +"adding a list of AZs to the extra specs similar to: ``RESKEY:" +"availability_zones: az1,az2``." + +msgid "``choice_client``" +msgstr "``choice_client``" + +msgid "``choice_client`` to ``disco_choice_client``" +msgstr "``choice_client`` to ``disco_choice_client``" + +msgid "" +"``cinder.keymgr.conf_key_mgr.ConfKeyManager`` still remains, but the " +"``fixed_key`` configuration options should be moved to the ``[key_manager]`` " +"section" +msgstr "" +"``cinder.keymgr.conf_key_mgr.ConfKeyManager`` still remains, but the " +"``fixed_key`` configuration options should be moved to the ``[key_manager]`` " +"section" + +msgid "``clone_check_timeout`` to ``disco_clone_check_timeout``" +msgstr "``clone_check_timeout`` to ``disco_clone_check_timeout``" + +msgid "``disco_client_port``" +msgstr "``disco_client_port``" + +msgid "``disco_client``" +msgstr "``disco_client``" + +msgid "``disco_src_api_port``" +msgstr "``disco_src_api_port``" + +msgid "" +"``iscsi_ip_address``, ``iscsi_port``, ``target_helper``, " +"``iscsi_target_prefix`` and ``iscsi_protocol`` config options are deprecated " +"in flavor of ``target_ip_address``, ``target_port``, ``target_helper``, " +"``target_prefix`` and ``target_protocol`` accordingly. Old config options " +"will be removed in S release." +msgstr "" +"``iscsi_ip_address``, ``iscsi_port``, ``target_helper``, " +"``iscsi_target_prefix`` and ``iscsi_protocol`` config options are deprecated " +"in flavour of ``target_ip_address``, ``target_port``, ``target_helper``, " +"``target_prefix`` and ``target_protocol`` accordingly. Old config options " +"will be removed in S release." + +msgid "``os-set_image_metadata``" +msgstr "``os-set_image_metadata``" + +msgid "``os-unset_image_metadata``" +msgstr "``os-unset_image_metadata``" + +msgid "``rest_ip``" +msgstr "``rest_ip``" + +msgid "``rest_ip`` to ``disco_rest_ip``" +msgstr "``rest_ip`` to ``disco_rest_ip``" + +msgid "``restore_check_timeout`` to ``disco_restore_check_timeout``" +msgstr "``restore_check_timeout`` to ``disco_restore_check_timeout``" + +msgid "``retry_interval``" +msgstr "``retry_interval``" + +msgid "``retry_interval`` to ``disco_retry_interval``" +msgstr "``retry_interval`` to ``disco_retry_interval``" + +msgid "``snapshot_check_timeout`` to ``disco_snapshot_check_timeout``" +msgstr "``snapshot_check_timeout`` to ``disco_snapshot_check_timeout``" + +msgid "``volume_extension:volume_type_encryption:create``" +msgstr "``volume_extension:volume_type_encryption:create``" + +msgid "``volume_extension:volume_type_encryption:delete``" +msgstr "``volume_extension:volume_type_encryption:delete``" + +msgid "``volume_extension:volume_type_encryption:get``" +msgstr "``volume_extension:volume_type_encryption:get``" + +msgid "``volume_extension:volume_type_encryption:update``" +msgstr "``volume_extension:volume_type_encryption:update``" + +msgid "``volume_name_prefix`` to ``disco_volume_name_prefix``" +msgstr "``volume_name_prefix`` to ``disco_volume_name_prefix``" + +msgid "" +"a [nova] section is added to configure the connection to the compute " +"service, which is needed to the InstanceLocalityFilter, for example." +msgstr "" +"a [nova] section is added to configure the connection to the compute " +"service, which is needed to the InstanceLocalityFilter, for example." + +msgid "" +"cinder-backup service is now decoupled from cinder-volume, which allows more " +"flexible scaling." +msgstr "" +"cinder-backup service is now decoupled from cinder-volume, which allows more " +"flexible scaling." + +msgid "" +"cinder.api.middleware.sizelimit was deprecated in kilo and compatability " +"shim added to call into oslo_middleware. Using oslo_middleware.sizelimit " +"directly will allow us to remove the compatability shim in a future release." +msgstr "" +"cinder.api.middleware.sizelimit was deprecated in Kilo and a compatibility " +"shim added to call into oslo_middleware. Using oslo_middleware.sizelimit " +"directly will allow us to remove the compatibility shim in a future release." + +msgid "create a snapshot: \"POST /v3/{project_id}/snapshots\"" +msgstr "create a snapshot: \"POST /v3/{project_id}/snapshots\"" + +msgid "create_group" +msgstr "create_group" + +msgid "create_snapshot" +msgstr "create_snapshot" + +msgid "create_volume" +msgstr "create_volume" + +msgid "" +"datera_api_token -- this has been replaced by san_login and san_password" +msgstr "" +"datera_api_token -- this has been replaced by san_login and san_password" + +msgid "default_cgsnapshot_type is reserved for migrating CGs." +msgstr "default_cgsnapshot_type is reserved for migrating CGs." + +msgid "delete group: \"POST /v3/{project_id}/groups/{group_id}/action\"" +msgstr "delete group: \"POST /v3/{project_id}/groups/{group_id}/action\"" + +msgid "" +"dell_server_os option added to the Dell SC driver. This option allows the " +"selection of the server type used when creating a server on the Dell DSM " +"during initialize connection. This is only used if the server does not " +"exist. Valid values are from the Dell DSM create server list." +msgstr "" +"dell_server_os option added to the Dell SC driver. This option allows the " +"selection of the server type used when creating a server on the Dell DSM " +"during initialise connection. This is only used if the server does not " +"exist. Valid values are from the Dell DSM create server list." + +msgid "extend_volume" +msgstr "extend_volume" + +msgid "" +"failover replication: \"POST /v3/{project_id}/groups/{group_id}/action\"" +msgstr "" +"failover replication: \"POST /v3/{project_id}/groups/{group_id}/action\"" + +msgid "manage_existing" +msgstr "manage_existing" + +msgid "manage_existing_snapshot" +msgstr "manage_existing_snapshot" + +msgid "migrate_volume" +msgstr "migrate_volume" + +msgid "nova-compute version - needs to be the latest for Pike." +msgstr "nova-compute version - needs to be the latest for Pike." + +msgid "" +"only iscsi and fibre channel volume types are supported on the nova side " +"currently." +msgstr "" +"only iSCSI and fibre channel volume types are supported on the Nova side " +"currently." + +msgid "only the libvirt compute driver supports this currently." +msgstr "only the libvirt compute driver supports this currently." + +msgid "retype_volume" +msgstr "retype_volume" + +msgid "set bootable: \"POST /v3/{project_id}/volumes/{volume_id}/action\"" +msgstr "set bootable: \"POST /v3/{project_id}/volumes/{volume_id}/action\"" + +msgid "" +"upload-to-image using Image API v2 now correctly handles custom image " +"properties." +msgstr "" +"upload-to-image using Image API v2 now correctly handles custom image " +"properties." + +msgid "" +"use oslo_middleware.sizelimit rather than cinder.api.middleware.sizelimit " +"compatibility shim" +msgstr "" +"use oslo_middleware.sizelimit rather than cinder.api.middleware.sizelimit " +"compatibility shim" + +msgid "" +"volume readonly update: \"POST /v3/{project_id}/volumes/{volume_id}/action\"" +msgstr "" +"volume readonly update: \"POST /v3/{project_id}/volumes/{volume_id}/action\""