User Tools

Site Tools


sles

Differences

This shows you the differences between two versions of the page.

Link to this comparison view

Both sides previous revisionPrevious revision
Next revision
Previous revision
sles [2010/10/06 14:16] michaelsles [2022/11/04 17:23] (current) mduersch
Line 1: Line 1:
 ====== SLES ====== ====== SLES ======
 +
 +==== Add Salt Minion to SUSE Manager ====
 +   # cleanup if necessary
 +   rpm -qa | grep salt
 +   rpm -e salt-2016.11.10-43.54.1.x86_64 salt-minion-2016.11.10-43.54.1.x86_64
 +   rm -r /etc/salt
 +   rm /etc/zypp/services.d/*
 +   rm /etc/zypp/repos.d/*
 +
 +   # connect minion. 
 +   zypper ar http://my.sum.url.de/pub/repositories/sle/12/5/bootstrap sles12-sp5
 +   zypper in salt-minion
 +   sed -i 's/\#master: salt/master: my.sum.url.de/' /etc/salt/minion
 +   rm /etc/machine-id
 +   systemd-machine-id-setup
 +   rm /var/lib/dbus/machine-id
 +   dbus-uuidgen --ensure
 +   systemctl enable salt-minion.service
 +   hostname > /etc/salt/minion_id
 +   echo "deployment: asvpn" >> /etc/salt/grains
 +   rcsalt-minion start
 +   *WAIT*
 +   zypper lr
 +
 +==== iSCSI =====
 +
 +  195  2013-06-30 11:32:28 iscsiadm -m node iqn.2013-06.home.itbetrieb:tsm --login
 +  198  2013-06-30 11:49:53 iscsiadm -m node --logout
 +
 +==== kernelupdate kills modules =====
 +
 +  root@bacula:/etc/iscsi#  iscsiadm -m discovery -t sendtargets -p 192.168.178.21:3260
 +  iscsiadm: read error (-1/104), daemon died?
 +  iscsiadm: Could not scan /sys/class/iscsi_transport.
 +  libkmod: ERROR ../libkmod/libkmod-module.c:838 kmod_module_insert_module: could not find module by        name='iscsi_tcp'
 +  iscsiadm: Could not insert module tcp. Kmod error -2
 +  iscsiadm: iSCSI driver tcp is not loaded. Load the module then retry the command.
 +  iscsiadm: Could not perform SendTargets discovery: iSCSI driver not found. Please make sure it is loaded, and   retry the operation
 +  
 +  missing module is included in:
 +  
 +  apt-get install linux-modules-extra
 +
 +  
 +  iscsiadm -m node iqn.2014-01.home.itbetrieb:tsm.disk1 --portal "192.168.56.50:3260"
 +==== iSCSI target =====
 +
 +bind specific interface.
 +- not possible through /etc/ietf.conf.
 +- change /etc/init.d/iscsitarget line :
 +        startproc -p $PIDFILE $DAEMON 
 +add parameter address
 +        startproc -p $PIDFILE $DAEMON --address 192.168.56.100
 +
 +
 +connect to iscsi target:
 +tsm2:/etc/init.d # iscsiadm -m node -n  iqn.2013-06.home.itbetrieb:tsm --login
 +
 +
 +List all iscsi Volumes that are available on the Target
 +
 +  infra:/dev # cat /proc/net/iet/volume
 +  tid:1 name:iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221
 +        lun:0 state:0 iotype:fileio iomode:wt blocks:20963328 blocksize:512 path:/dev/vg01/lvol1
 +        lun:1 state:0 iotype:fileio iomode:wt blocks:9830400 blocksize:512 path:/dev/vg02/lvol1
 +        lun:2 state:0 iotype:fileio iomode:wt blocks:9830400 blocksize:512 path:/dev/vg02/lvol2
 +        lun:3 state:0 iotype:fileio iomode:wt blocks:9830400 blocksize:512 path:/dev/vg02/lvol3
 +        lun:4 state:0 iotype:fileio iomode:wt blocks:33415168 blocksize:512 path:/dev/vg02/lvol4
 +
 +
 +List all Volumes/disks that are avaiable at the initiator after iscsi --login
 +
 +  tsmsrv1:/dev # lsscsi
 +  [0:0:0:0]    disk    ATA      VBOX HARDDISK    1.0   /dev/sda
 +  [2:0:0:0]    cd/dvd  VBOX     CD-ROM           1.0   /dev/sr0
 +  [7:0:0:0]    disk    IET      VIRTUAL-DISK         /dev/sdb
 +  [7:0:0:1]    disk    IET      VIRTUAL-DISK         /dev/sdc
 +  [7:0:0:2]    disk    IET      VIRTUAL-DISK         /dev/sdd
 +  [7:0:0:3]    disk    IET      VIRTUAL-DISK         /dev/sde
 +  [7:0:0:4]    disk    IET      VIRTUAL-DISK         /dev/sdf
 +
 +
 +
 +==== SLEHA - corosync pacemaker ====
 +
 +
 +pvcreate /dev/sdb
 +pvcreate /dev/sdc
 +pvcreate /dev/sdd
 +pvcreate /dev/sde
 +pvcreate /dev/sdf
 + 
 +lrwxrwxrwx 1 root root  9 Feb  5 12:48 ip-192.168.56.100:3260-iscsi-iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221-lun-0 -> ../../sdb
 +lrwxrwxrwx 1 root root  9 Feb  5 12:48 ip-192.168.56.100:3260-iscsi-iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221-lun-1 -> ../../sdc
 +lrwxrwxrwx 1 root root  9 Feb  5 12:48 ip-192.168.56.100:3260-iscsi-iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221-lun-2 -> ../../sdd
 +lrwxrwxrwx 1 root root  9 Feb  5 12:48 ip-192.168.56.100:3260-iscsi-iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221-lun-3 -> ../../sde
 +lrwxrwxrwx 1 root root  9 Feb  5 12:48 ip-192.168.56.100:3260-iscsi-iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221-lun-4 -> ../../sdf
 +
 +sdb disk:
 +iscsiadm -m node -T iqn.2014-02.home.itbetrieb:71bda8c2-ac75-48a9-a30b-7f958d3bcf8f --login
 +
 +
 +
 +login to ONLY ONE target:
 +iscsiadm -m node -T iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221 --login 
 +
 +
 +infra:/etc # cat /proc/net/iet/session
 +tid:1 name:iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221
 +        sid:562950141968896 initiator:iqn.2014-02.home.itbetrieb:01:57575330903e
 +                cid:0 ip:192.168.56.101 state:active hd:none dd:none
 +
 +
 +unavailable vgs after setting sbd to wrong device
 +
 +infra:/dev # vgchange -a y vg01
 +  1 logical volume(s) in volume group "vg01" now active
 +infra:/dev # vgchange -a y vg02
 +  4 logical volume(s) in volume group "vg02" now active
 +infra:/dev # vgchange -a y vg04
 +  Volume group "vg04" not found
 +infra:/dev # vgchange -a y vg03
 +  1 logical volume(s) in volume group "vg03" now active
 +
 +
 +LATEST ERROR
 +  
 + Retrieving SSH keys from 192.168.56.101
 +  Configuring csync2
 +  Enabling csync2 service
 +  Enabling xinetd service
 +  Merging known_hosts
 +  Probing for new partitions......ERROR: Failed to probe new partitions
 +tsmsrv2:/dev/disk/by-path # crm status
 +Could not establish cib_ro connection: Connection refused (111)
 +ERROR: crm_mon exited with code 107 and said: Connection to cluster failed: Transport endpoint is not connected
 +
 +
 +infra:/var/log # cat /proc/net/iet/volume
 +tid:2 name:iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221
 +        lun:0 state:0 iotype:fileio iomode:wt blocks:20963328 blocksize:512 path:/dev/vg01/lvol1
 +        lun:1 state:0 iotype:fileio iomode:wt blocks:9830400 blocksize:512 path:/dev/vg02/lvol1
 +        lun:2 state:0 iotype:fileio iomode:wt blocks:9830400 blocksize:512 path:/dev/vg02/lvol2
 +        lun:3 state:0 iotype:fileio iomode:wt blocks:9830400 blocksize:512 path:/dev/vg02/lvol3
 +        lun:4 state:0 iotype:fileio iomode:wt blocks:33415168 blocksize:512 path:/dev/vg02/lvol4
 +tid:1 name:iqn.2014-02.home.itbetrieb:71bda8c2-ac75-48a9-a30b-7f958d3bcf8f
 +        lun:0 state:0 iotype:fileio iomode:wt blocks:2088960 blocksize:512 path:/dev/vg03/lvol1
 +
 +wenn keine luns da: iscsitarget restart
 +
 +
 +delete modify a resource
 +   crm_resource -D --resource vtsm1-target -t primitive
 +   # crm resource param <resource> set <name> <value>
 +   # crm resource param <resource> delete <name>
 +   # crm resource param <resource> show <name>
 +
 +failed Meldungen loswerden.
 +  # crm resource cleanup <resource>
 +
 +
 +# clustermode für vg aktivieren:
 +    vgchange -c y
 +
 +node tsmsrv1
 +node tsmsrv2
 +
 +  crm configure primitive vtsm1IP ocf:heartbeat:IPaddr params ip="192.168.56.111" op monitor interval="10s"
 +
 +  # mirroring (not wanted in this case)
 +  # crm configure primitive iscsi-drbd ocf:linbit:drbd params drbd_resource="iscsi" op monitor interval="30s" op start interval="0" timeout="240s" op stop interval="0" timeout="100s"
 +
 +
 +  # crm configure primitive vtsm1target ocf:heartbeat:iSCSITarget params iqn="iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221" tid="2" op monitor interval="30s"
 +  # crm configure primitive vtsm1lun0 ocf:heartbeat:iSCSILogicalUnit params target_iqn="iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221" lun="0" path="/dev/disk/by-path/ip-192.168.56.100:3260-iscsi-iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221-lun-0"
 +
 +  # crm configure primitive vtsm1lun1 ocf:heartbeat:iSCSILogicalUnit params target_iqn="iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221" lun="1" path="/dev/drbd1"
 +  # crm configure primitive vtsm1lun2 ocf:heartbeat:iSCSILogicalUnit params target_iqn="iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221" lun="2" path="/dev/drbd1"
 +  # crm configure primitive vtsm1lun3 ocf:heartbeat:iSCSILogicalUnit params target_iqn="iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221" lun="3" path="/dev/drbd1"
 +  # crm configure primitive vtsm1lun4 ocf:heartbeat:iSCSILogicalUnit params target_iqn="iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221" lun="1" path="/dev/drbd1"
 +
 +
 +
 +  crm configure primitive vtsm1IP ocf:heartbeat:IPaddr params ip="192.168.56.111" op monitor interval="10s" 
 +  crm configure primitive vtsm1LVM ocf:heartbeat:LVM params volgrpname="vg01" exclusive="yes" op monitor interval="60s" timeout="60s"
 +  crm configure primitive vtsm1db ocf:heartbeat:Filesystem params device="/dev/vg01/lvol1" directory="/vtsm1/db" fstype="xfs" op monitor interval="20" timeout="40"
 +  crm configure primitive vtsm1log ocf:heartbeat:Filesystem params device="/dev/vg01/lvol2" directory="/vtsm1/log" fstype="xfs" op monitor interval="20" timeout="40"
 +  crm configure primitive vtsm1archlog ocf:heartbeat:Filesystem params device="/dev/vg01/lvol3" directory="/vtsm1/archlog:" fstype="xfs" op monitor interval="20" timeout="40"
 +  crm configure primitive vtsm1disk ocf:heartbeat:Filesystem params device="/dev/vg01/lvol4" directory="/vtsm1/disk" fstype="xfs" op monitor interval="20" timeout="40"
 +  crm configure primitive vtsm1server ocf:heartbeat:Filesystem params device="/dev/vg01/lvol5" directory="/vtsm1/server" fstype="xfs" op monitor interval="20" timeout="40"
 +  crm configure group vtsm1 vtsm1IP vtsm1LVM vtsm1db vtsm1log vtsm1archlog vtsm1disk vtsm1server meta target-role="Started" is-managed="true"
 +  crm configure location cli-standby-vtsm1 vtsm1 rule $id="cli-standby-rule-vtsm1" -inf: #uname eq tsmsrv1
 +
 +  crm configure primitive vtsm1run lsb:tsmserver_vtsm1  
 +
 +
 +
 +  crm_resource -D --resource vtsm1 -t group
 +  crm_resource -D --resource vtsm1LVM -t primitive
 +  crm_resource -D --resource vtsm1db -t primitive
 +  crm_resource -D --resource vtsm1log -t primitive
 +  crm_resource -D --resource vtsm1archlog -t primitive
 +  crm_resource -D --resource vtsm1disk -t primitive  
 +  crm_resource -D --resource vtsm1server -t primitive
 +  crm_resource -D --resource vtsm1IP -t primitive
 + 
 +   primitive tsmtlmLVM ocf:heartbeat:LVM \
 +        params volgrpname="vg02" exclusive="yes" \
 +        op monitor interval="60s" timeout="60s"
 +
 +   primitive vtsm1db ocf:heartbeat:Filesystem \
 +        params device="/dev/vg01/lvol1" directory="/vtsm1/db" fstype="xfs" \
 +        op monitor interval="20" timeout="40"
 +   primitive tsmtlmrun lsb:tsmserver_tsmtlm
 +   primitive vtsm1db ocf:heartbeat:Filesystem \
 +        params device="/dev/vg01/lvol2" directory="/vtsm1/log" fstype="xfs" \
 +        op monitor interval="20" timeout="40"
 +   primitive vtsm1db ocf:heartbeat:Filesystem \
 +        params device="/dev/vg01/lvol3" directory="/vtsm1/archlog" fstype="xfs" \
 +        op monitor interval="20" timeout="40"
 +   primitive vtsm1db ocf:heartbeat:Filesystem \
 +        params device="/dev/vg01/lvol4" directory="/vtsm1/disk" fstype="xfs" \
 +        op monitor interval="20" timeout="40"
 +   primitive vtsm1db ocf:heartbeat:Filesystem \
 +        params device="/dev/vg01/lvol5" directory="/vtsm1/server" fstype="xfs" \
 +        op monitor interval="20" timeout="40"
 +
 +   primitive tsmtrun lsb:tsmserver_tsmt
 +   group tsmt tsmtIP tsmtLVM tsmtMount1 tsmtMount2 tsmtMount3 tsmtMount4 tsmtMount5 tsmtrun \
 +        meta target-role="Started" is-managed="true"
 +   group tsmtlm tsmtlmIP tsmtlmLVM tsmtlmMount1 tsmtlmMount2 tsmtlmMount3 tsmtlmMount4 tsmtlmrun \
 +        meta target-role="Started"
 +   location cli-standby-tsmt tsmt \
 +        rule $id="cli-standby-rule-tsmt" -inf: #uname eq tsmlintst1
 +   location cli-standby-tsmtlm tsmtlm \
 +        rule $id="cli-standby-rule-tsmtlm" -inf: #uname eq tsmlintst2
 +   property $id="cib-bootstrap-options" \
 +        dc-version="1.1.6-b988976485d15cb702c9307df55512d323831a5e" \
 +        cluster-infrastructure="openais" \
 +        expected-quorum-votes="2" \
 +        stonith-enabled="false" \
 +        no-quorum-policy="ignore" \
 +
 +
 +  crm configure primitive vtsm1ip ocf:IPaddr2 params ip=192.168.56.111 cidr_netmask=32 nic=eth2 op monitor interval=30s
 +
 +move vg between clusternodes:
 +   http://www.tldp.org/HOWTO/LVM-HOWTO/recipemovevgtonewsys.html
 +
 +   vgchange 
 +   vgexport
 +   vgimport
 +   vgchange
 +
 +
 +create sbd device for pacemaker
 +  tsmsrv1:~ # sbd -d /dev/sdb create
 +  Initializing device /dev/sdb
 +  Creating version 2.1 header on device 3 (uuid: 5f960a58-8429-4397-ad4b-b2b879aa600c)
 +  Initializing 255 slots on device 3
 +  Device /dev/sdb is initialized.
 +  tsmsrv1:~ #
 +  tsmsrv1:~ #
 +  tsmsrv1:~ # /etc/init.d/
 +  tsmsrv1:~ # /etc/init.d/openais start
 +  Starting OpenAIS/Corosync daemon (corosync): Starting SBD - starting... OK
 +
 +  tsmsrv1:/etc/lvm # lsblk
 +  NAME   MAJ:MIN RM   SIZE RO MOUNTPOINT
 +  sda      8:0    0    20G  0
 +  ââsda1   8:   0     2G  0 [SWAP]
 +  ââsda2   8:   0    18G  0 /
 +  sr0     11:   1  1024M  0
 +  sdb      8:16    1020M  0
 +  sdc      8:32      10G  0
 +
 +
 +crm configure primitive clvm ocf:lvm2:clvmd params daemon_timeout="30"
 +crm configure primitive vg1 ocf:heartbeat:LVM params volgrpname="cluster-vg" exclusive="yes" op monitor interval="60" timeout="60"
  
 ==== Lin_tape / udev ==== ==== Lin_tape / udev ====
 +
 +barcode lenght @lin_tape
 +
 +options lin_tape ibm3592_barcode=8
 +options lin_tape lto_barcode=6
 +
 +Stop and start the driver by running these commands.
 +   lin_taped stop
 +   rmmod lin_tape
 +   depmod
 +   modprobe lin_tape (reinstall back)
 +   lin_taped (restart the daemon)
  
 If there is a tape device attached to the Linux system with world-wide port name If there is a tape device attached to the Linux system with world-wide port name
Line 190: Line 481:
 4 new device(s) found. 4 new device(s) found.
 0 device(s) removed. 0 device(s) removed.
 +
 +  echo "- - -" > /sys/class/scsi_host/host13/scan
 +  echo "- - -" > /sys/class/scsi_host/host14/scan
 +
 +  echo 1 >/sys/class/fc_host/host13/issue_lip
 +  echo 1 >/sys/class/fc_host/host14/issue_lip
 +  
 +  
 +===== systool =====
 +
 +   systool -c fc_host
 +   systool -c fc_remote_ports -v | grep Blocked | wc -l
 +   systool -c fc_remote_ports -v | grep "Not Present" | wc -l
 +
 +
 +
  
 ==== Configuring Path Failover Policies ==== ==== Configuring Path Failover Policies ====
Line 215: Line 522:
   -rw-r--r-- 1 root root 4096 Aug 27 08:59 tgtid_bind_type   -rw-r--r-- 1 root root 4096 Aug 27 08:59 tgtid_bind_type
   --w------- 1 root root 4096 Aug 27 08:59 uevent   --w------- 1 root root 4096 Aug 27 08:59 uevent
 +
 +
 +systool -av -c fc_host
 +
 +
 +=== Install Server ALSWAYS without SAN connectet ===
 +
 +Otherwise local HDD might be recognized after SAN Storage, resulting in local disk NOT beeing /dev/sda
 +How to correct this:
 +
 +in /etc/sysconfig/kernel
 +  INITRD_MODULES="...."
 +
 +make shure that the HBA module is loaded after ahci
 +
 +
  
sles.1286374604.txt.gz · Last modified: 2021/04/24 07:38 (external edit)