User Tools

Site Tools


sles

This is an old revision of the document!


SLES

Add Salt Minion to SUSE Manager

 # cleanup if necessary
 rpm -qa | grep salt
 rpm -e salt-2016.11.10-43.54.1.x86_64 salt-minion-2016.11.10-43.54.1.x86_64
 rm -r /etc/salt
 rm /etc/zypp/services.d/*
 rm /etc/zypp/repos.d/*
 # connect minion. 
 zypper ar http://my.sum.url.de/pub/repositories/sle/12/5/bootstrap sles12-sp5
 zypper in salt-minion
 sed -i 's/\#master: salt/master: my.sum.url.de/' /etc/salt/minion
 rm /etc/machine-id
 systemd-machine-id-setup
 rm /var/lib/dbus/machine-id
 dbus-uuidgen --ensure
 systemctl enable salt-minion.service
 hostname > /etc/salt/minion_id
 echo "deployment: asvpn" >> /etc/salt/grains
 rcsalt-minion start
 *WAIT*
 zypper lr

iSCSI

195  2013-06-30 11:32:28 iscsiadm -m node iqn.2013-06.home.itbetrieb:tsm --login
198  2013-06-30 11:49:53 iscsiadm -m node --logout

iscsiadm -m node iqn.2014-01.home.itbetrieb:tsm.disk1 --portal "192.168.56.50:3260"

iSCSI target

bind specific interface. - not possible through /etc/ietf.conf. - change /etc/init.d/iscsitarget line :

      startproc -p $PIDFILE $DAEMON 

add parameter address

      startproc -p $PIDFILE $DAEMON --address 192.168.56.100

connect to iscsi target: tsm2:/etc/init.d # iscsiadm -m node -n iqn.2013-06.home.itbetrieb:tsm –login

List all iscsi Volumes that are available on the Target

infra:/dev # cat /proc/net/iet/volume
tid:1 name:iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221
      lun:0 state:0 iotype:fileio iomode:wt blocks:20963328 blocksize:512 path:/dev/vg01/lvol1
      lun:1 state:0 iotype:fileio iomode:wt blocks:9830400 blocksize:512 path:/dev/vg02/lvol1
      lun:2 state:0 iotype:fileio iomode:wt blocks:9830400 blocksize:512 path:/dev/vg02/lvol2
      lun:3 state:0 iotype:fileio iomode:wt blocks:9830400 blocksize:512 path:/dev/vg02/lvol3
      lun:4 state:0 iotype:fileio iomode:wt blocks:33415168 blocksize:512 path:/dev/vg02/lvol4

List all Volumes/disks that are avaiable at the initiator after iscsi –login

	
tsmsrv1:/dev # lsscsi
[0:0:0:0]    disk    ATA      VBOX HARDDISK    1.0   /dev/sda
[2:0:0:0]    cd/dvd  VBOX     CD-ROM           1.0   /dev/sr0
[7:0:0:0]    disk    IET      VIRTUAL-DISK     0     /dev/sdb
[7:0:0:1]    disk    IET      VIRTUAL-DISK     0     /dev/sdc
[7:0:0:2]    disk    IET      VIRTUAL-DISK     0     /dev/sdd
[7:0:0:3]    disk    IET      VIRTUAL-DISK     0     /dev/sde
[7:0:0:4]    disk    IET      VIRTUAL-DISK     0     /dev/sdf

SLEHA - corosync pacemaker

pvcreate /dev/sdb pvcreate /dev/sdc pvcreate /dev/sdd pvcreate /dev/sde pvcreate /dev/sdf

lrwxrwxrwx 1 root root 9 Feb 5 12:48 ip-192.168.56.100:3260-iscsi-iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221-lun-0 → ../../sdb lrwxrwxrwx 1 root root 9 Feb 5 12:48 ip-192.168.56.100:3260-iscsi-iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221-lun-1 → ../../sdc lrwxrwxrwx 1 root root 9 Feb 5 12:48 ip-192.168.56.100:3260-iscsi-iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221-lun-2 → ../../sdd lrwxrwxrwx 1 root root 9 Feb 5 12:48 ip-192.168.56.100:3260-iscsi-iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221-lun-3 → ../../sde lrwxrwxrwx 1 root root 9 Feb 5 12:48 ip-192.168.56.100:3260-iscsi-iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221-lun-4 → ../../sdf

sdb disk: iscsiadm -m node -T iqn.2014-02.home.itbetrieb:71bda8c2-ac75-48a9-a30b-7f958d3bcf8f –login

login to ONLY ONE target: iscsiadm -m node -T iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221 –login

infra:/etc # cat /proc/net/iet/session tid:1 name:iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221

      sid:562950141968896 initiator:iqn.2014-02.home.itbetrieb:01:57575330903e
              cid:0 ip:192.168.56.101 state:active hd:none dd:none

unavailable vgs after setting sbd to wrong device

infra:/dev # vgchange -a y vg01

1 logical volume(s) in volume group "vg01" now active

infra:/dev # vgchange -a y vg02

4 logical volume(s) in volume group "vg02" now active

infra:/dev # vgchange -a y vg04

Volume group "vg04" not found

infra:/dev # vgchange -a y vg03

1 logical volume(s) in volume group "vg03" now active

LATEST ERROR

Retrieving SSH keys from 192.168.56.101

Configuring csync2
Enabling csync2 service
Enabling xinetd service
Merging known_hosts
Probing for new partitions......ERROR: Failed to probe new partitions

tsmsrv2:/dev/disk/by-path # crm status Could not establish cib_ro connection: Connection refused (111) ERROR: crm_mon exited with code 107 and said: Connection to cluster failed: Transport endpoint is not connected

infra:/var/log # cat /proc/net/iet/volume tid:2 name:iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221

      lun:0 state:0 iotype:fileio iomode:wt blocks:20963328 blocksize:512 path:/dev/vg01/lvol1
      lun:1 state:0 iotype:fileio iomode:wt blocks:9830400 blocksize:512 path:/dev/vg02/lvol1
      lun:2 state:0 iotype:fileio iomode:wt blocks:9830400 blocksize:512 path:/dev/vg02/lvol2
      lun:3 state:0 iotype:fileio iomode:wt blocks:9830400 blocksize:512 path:/dev/vg02/lvol3
      lun:4 state:0 iotype:fileio iomode:wt blocks:33415168 blocksize:512 path:/dev/vg02/lvol4

tid:1 name:iqn.2014-02.home.itbetrieb:71bda8c2-ac75-48a9-a30b-7f958d3bcf8f

      lun:0 state:0 iotype:fileio iomode:wt blocks:2088960 blocksize:512 path:/dev/vg03/lvol1
	

wenn keine luns da: iscsitarget restart

delete modify a resource

 crm_resource -D --resource vtsm1-target -t primitive
 # crm resource param <resource> set <name> <value>
 # crm resource param <resource> delete <name>
 # crm resource param <resource> show <name>

failed Meldungen loswerden.

# crm resource cleanup <resource>

# clustermode für vg aktivieren:

  vgchange -c y

node tsmsrv1 node tsmsrv2

crm configure primitive vtsm1IP ocf:heartbeat:IPaddr params ip="192.168.56.111" op monitor interval="10s"
# mirroring (not wanted in this case)
# crm configure primitive iscsi-drbd ocf:linbit:drbd params drbd_resource="iscsi" op monitor interval="30s" op start interval="0" timeout="240s" op stop interval="0" timeout="100s"
# crm configure primitive vtsm1target ocf:heartbeat:iSCSITarget params iqn="iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221" tid="2" op monitor interval="30s"
# crm configure primitive vtsm1lun0 ocf:heartbeat:iSCSILogicalUnit params target_iqn="iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221" lun="0" path="/dev/disk/by-path/ip-192.168.56.100:3260-iscsi-iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221-lun-0"
# crm configure primitive vtsm1lun1 ocf:heartbeat:iSCSILogicalUnit params target_iqn="iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221" lun="1" path="/dev/drbd1"
# crm configure primitive vtsm1lun2 ocf:heartbeat:iSCSILogicalUnit params target_iqn="iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221" lun="2" path="/dev/drbd1"
# crm configure primitive vtsm1lun3 ocf:heartbeat:iSCSILogicalUnit params target_iqn="iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221" lun="3" path="/dev/drbd1"
# crm configure primitive vtsm1lun4 ocf:heartbeat:iSCSILogicalUnit params target_iqn="iqn.2014-02.home.itbetrieb:72f59e27-9c61-44ce-9936-74c788cdb221" lun="1" path="/dev/drbd1"
crm configure primitive vtsm1IP ocf:heartbeat:IPaddr params ip="192.168.56.111" op monitor interval="10s" 
crm configure primitive vtsm1LVM ocf:heartbeat:LVM params volgrpname="vg01" exclusive="yes" op monitor interval="60s" timeout="60s"
crm configure primitive vtsm1db ocf:heartbeat:Filesystem params device="/dev/vg01/lvol1" directory="/vtsm1/db" fstype="xfs" op monitor interval="20" timeout="40"
crm configure primitive vtsm1log ocf:heartbeat:Filesystem params device="/dev/vg01/lvol2" directory="/vtsm1/log" fstype="xfs" op monitor interval="20" timeout="40"
crm configure primitive vtsm1archlog ocf:heartbeat:Filesystem params device="/dev/vg01/lvol3" directory="/vtsm1/archlog:" fstype="xfs" op monitor interval="20" timeout="40"
crm configure primitive vtsm1disk ocf:heartbeat:Filesystem params device="/dev/vg01/lvol4" directory="/vtsm1/disk" fstype="xfs" op monitor interval="20" timeout="40"
crm configure primitive vtsm1server ocf:heartbeat:Filesystem params device="/dev/vg01/lvol5" directory="/vtsm1/server" fstype="xfs" op monitor interval="20" timeout="40"
crm configure group vtsm1 vtsm1IP vtsm1LVM vtsm1db vtsm1log vtsm1archlog vtsm1disk vtsm1server meta target-role="Started" is-managed="true"
crm configure location cli-standby-vtsm1 vtsm1 rule $id="cli-standby-rule-vtsm1" -inf: #uname eq tsmsrv1
crm configure primitive vtsm1run lsb:tsmserver_vtsm1  
crm_resource -D --resource vtsm1 -t group
crm_resource -D --resource vtsm1LVM -t primitive
crm_resource -D --resource vtsm1db -t primitive
crm_resource -D --resource vtsm1log -t primitive
crm_resource -D --resource vtsm1archlog -t primitive
crm_resource -D --resource vtsm1disk -t primitive  
crm_resource -D --resource vtsm1server -t primitive
crm_resource -D --resource vtsm1IP -t primitive

primitive tsmtlmLVM ocf:heartbeat:LVM \

      params volgrpname="vg02" exclusive="yes" \
      op monitor interval="60s" timeout="60s"
	
 primitive vtsm1db ocf:heartbeat:Filesystem \
      params device="/dev/vg01/lvol1" directory="/vtsm1/db" fstype="xfs" \
      op monitor interval="20" timeout="40"
 primitive tsmtlmrun lsb:tsmserver_tsmtlm
 primitive vtsm1db ocf:heartbeat:Filesystem \
      params device="/dev/vg01/lvol2" directory="/vtsm1/log" fstype="xfs" \
      op monitor interval="20" timeout="40"
 primitive vtsm1db ocf:heartbeat:Filesystem \
      params device="/dev/vg01/lvol3" directory="/vtsm1/archlog" fstype="xfs" \
      op monitor interval="20" timeout="40"
 primitive vtsm1db ocf:heartbeat:Filesystem \
      params device="/dev/vg01/lvol4" directory="/vtsm1/disk" fstype="xfs" \
      op monitor interval="20" timeout="40"
 primitive vtsm1db ocf:heartbeat:Filesystem \
      params device="/dev/vg01/lvol5" directory="/vtsm1/server" fstype="xfs" \
      op monitor interval="20" timeout="40"
	
 primitive tsmtrun lsb:tsmserver_tsmt
 group tsmt tsmtIP tsmtLVM tsmtMount1 tsmtMount2 tsmtMount3 tsmtMount4 tsmtMount5 tsmtrun \
      meta target-role="Started" is-managed="true"
 group tsmtlm tsmtlmIP tsmtlmLVM tsmtlmMount1 tsmtlmMount2 tsmtlmMount3 tsmtlmMount4 tsmtlmrun \
      meta target-role="Started"
 location cli-standby-tsmt tsmt \
      rule $id="cli-standby-rule-tsmt" -inf: #uname eq tsmlintst1
 location cli-standby-tsmtlm tsmtlm \
      rule $id="cli-standby-rule-tsmtlm" -inf: #uname eq tsmlintst2
 property $id="cib-bootstrap-options" \
      dc-version="1.1.6-b988976485d15cb702c9307df55512d323831a5e" \
      cluster-infrastructure="openais" \
      expected-quorum-votes="2" \
      stonith-enabled="false" \
      no-quorum-policy="ignore" \
crm configure primitive vtsm1ip ocf:IPaddr2 params ip=192.168.56.111 cidr_netmask=32 nic=eth2 op monitor interval=30s

move vg between clusternodes:

 http://www.tldp.org/HOWTO/LVM-HOWTO/recipemovevgtonewsys.html
 vgchange 
 vgexport
 vgimport
 vgchange

create sbd device for pacemaker

tsmsrv1:~ # sbd -d /dev/sdb create
Initializing device /dev/sdb
Creating version 2.1 header on device 3 (uuid: 5f960a58-8429-4397-ad4b-b2b879aa600c)
Initializing 255 slots on device 3
Device /dev/sdb is initialized.
tsmsrv1:~ #
tsmsrv1:~ #
tsmsrv1:~ # /etc/init.d/
tsmsrv1:~ # /etc/init.d/openais start
Starting OpenAIS/Corosync daemon (corosync): Starting SBD - starting... OK
tsmsrv1:/etc/lvm # lsblk
NAME   MAJ:MIN RM   SIZE RO MOUNTPOINT
sda      8:0    0    20G  0
ââsda1   8:1    0     2G  0 [SWAP]
ââsda2   8:2    0    18G  0 /
sr0     11:0    1  1024M  0
sdb      8:16   0  1020M  0
sdc      8:32   0    10G  0

crm configure primitive clvm ocf:lvm2:clvmd params daemon_timeout=“30” crm configure primitive vg1 ocf:heartbeat:LVM params volgrpname=“cluster-vg” exclusive=“yes” op monitor interval=“60” timeout=“60”

Lin_tape / udev

barcode lenght @lin_tape

options lin_tape ibm3592_barcode=8 options lin_tape lto_barcode=6

Stop and start the driver by running these commands.

 lin_taped stop
 rmmod lin_tape
 depmod
 modprobe lin_tape (reinstall back)
 lin_taped (restart the daemon)

If there is a tape device attached to the Linux system with world-wide port name “0x123456789ABCDEF0” with a current device name of /dev/IBMtape0, a user could run udevadm info to obtain information on exported attributes for this device. This could be done as follows:

udevadm info –attribute-walk –name /dev/IBMtape0

The output of this command should include something similar to the following: ATTRS{serial_num}==“123456789” ATTRS{ww_node_name}==“0x123456789ABCDEF1” ATTRS{ww_port_name}==“0x123456789ABCDEF0” Note: Variations exist between kernels. For example, if you are using udevinfo, you should enter the previous command as

udevinfo -a -p `udevinfo -q path -n /dev/IBMtape0`

Also, on some kernels an attribute ATTRS{xxx} will be replaced by SYSFS{xxx}. Furthermore, some kernels use a '=' (single equal sign) to indicate an attribute match as well as an assignment, whereas other kernels use a '==' (double equal sign) for a match and '=' for assignment. You should place the attribute from the attribute list into your rules file exactly as it appears in the attribute list, as described below. We can use the ww_port_name in a .rules file that will assign a symbolic link to a device that has the listed world-wide port name. The file will typically be placed in /etc/udev/rules.d, but this location may be changed by the udev_rules Linux Device Driver (lin_tape) Chapter 5. Linux Tape and Medium Changer Device Driver 105

directive in the /etc/udev/rules.conf file. In this example, we create a file called /etc/udev/rules.d/98-lin_tape.rules and write a single line to the file: KERNEL==“IBMtape*”, ATTRS{ww_port_name}==“0x123456789ABCDEF0”, SYMLINK=“lin_tape/by-id/lin_tape480110 Assuming that the udev service is running and configured correctly, the user can install or reinstall lin_tape using modprobe, and the symbolic link should be created in the /dev/lin_tape/by-id folder. One line should be added to the 98-lin_tape.rules file for each symbolic link desired.

DD

Ceating a large file for backup and restore purpouses

dd if=/dev/zero of=100GB.FILE bs=10k count=10000000 <- Optimale Komprimierung möglich 8-)
dd if=/dev/urandom of=100GB.RANDOM.FILE bs=10k count=10000000

dd if=/dev/zero of=/dev/sdd bs=1G count=1 oflag=direct

LVM2

Preparation of 1 or more hard disks for LVM

pvcreate /dev/sda /dev/mapper/myname 

Creating a VolumeGroup

vgcreate -s 4M vgname /dev/sda /dev/mapper/myname

Configuring a Logical Volume in the Group

lvcreate -l100% -n lvol01 vgname

Mount

mount -t xfs device mountpoint

Multipathing

dry run, verbosity level 2:

multipath -v2 -d

list configured devices

multipath -ll

/etc/multipath.conf - set userfriendly names for devices

## Use user friendly names, instead of using WWIDs as names.
defaults {
       user_friendly_names yes
#       multipath_tool  "/sbin/multipath -v0"
#       udev_dir        /dev
#      polling_interval 10
#       default_selector        "round-robin 0"
#       default_path_grouping_policy    failover
#       default_getuid_callout  "/sbin/scsi_id -g -u -s /block/%n" 
#        default_prio_callout    "/bin/true"
#       default_features        "0"
#       rr_min_io               100
       failback                immediate
}
devices {
  device {
      vendor "DGC "
      product "LUNZ            "
      path_grouping_policy group_by_priority
      getuid_callout "/sbin/scsi_id -g -u -s /block/%n"
      prio_callout    "/sbin/mpath_prio_emc /dev/%n"
      path_checker    emc_clariion
      path_selector   "round-robin 0"
      features        "0"
      #features        "1 queue_if_no_path"
      hardware_handler "1 emc"
  }
}
# die einzelnen LUNs mit sprechenden Namen
multipaths {
      multipath {
              wwid    360060160113127004648e477123145678
              alias   meineLUN1
      }
} 
# hier alle devices listen die nicht ueber multipathing genutzt werden sollen
devnode_blacklist {
      wwid 3600508e0000000003ce93cc6b29deb04
      devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st|sda)[0-9]*"
      devnode "^hd[a-z][0-9]*"
}
# Sicherstellen, dass die Clariion nicht dabei ist
blacklist_exceptions {
      device {
              vendor  "DGC "
              product "LUNZ            "
      }
}
  • Befehle zum überprüfen der korrekten Einstellungen:

Sicherstellen, dass die devices richtig erkannt werden.

Rescan SCSI Bus

myserver:/ # rescan_scsi_bus … … Scanning for device 9 0 1 0 … OLD: Host: scsi9 Channel: 00 Id: 01 Lun: 00

    Vendor: DGC      Model: LUNZ             Rev: 0429
    Type:   Direct-Access                    ANSI SCSI revision: 04

Scanning for device 9 0 1 1 … OLD: Host: scsi9 Channel: 00 Id: 01 Lun: 01

    Vendor: DGC      Model: RAID 5           Rev: 0429
    Type:   Direct-Access                    ANSI SCSI revision: 04

Scanning for device 9 0 1 2 … NEW: Host: scsi9 Channel: 00 Id: 01 Lun: 02

    Vendor: DGC      Model: RAID 5           Rev: 0429
    Type:   Direct-Access                    ANSI SCSI revision: 04

4 new device(s) found. 0 device(s) removed.

echo "- - -" > /sys/class/scsi_host/host13/scan
echo "- - -" > /sys/class/scsi_host/host14/scan
echo 1 >/sys/class/fc_host/host13/issue_lip
echo 1 >/sys/class/fc_host/host14/issue_lip

systool

 systool -c fc_host
 systool -c fc_remote_ports -v | grep Blocked | wc -l
 systool -c fc_remote_ports -v | grep "Not Present" | wc -l

Configuring Path Failover Policies

FC-Adapter Details

tuxbox:/sys/class/fc_host/host10 # ll
total 0
-r--r--r-- 1 root root 4096 Aug 27 08:59 active_fc4s
lrwxrwxrwx 1 root root    0 Aug 27 08:59 device -> ../../../devices/pci0000:00/0000:00:0b.0/0000:08:00.0/host1
-r--r--r-- 1 root root 4096 Aug 27 08:59 fabric_name
--w------- 1 root root 4096 Aug 27 08:59 issue_lip
-r--r--r-- 1 root root 4096 Aug 27 08:59 maxframe_size
-r--r--r-- 1 root root 4096 Aug 27 08:59 node_name
-r--r--r-- 1 root root 4096 Aug 27 08:59 port_id
-r--r--r-- 1 root root 4096 Aug 27 08:59 port_name
-r--r--r-- 1 root root 4096 Aug 27 08:59 port_state
-r--r--r-- 1 root root 4096 Aug 27 08:59 port_type
-r--r--r-- 1 root root 4096 Aug 27 08:59 speed
drwxr-xr-x 2 root root    0 Aug 27 09:00 statistics
-r--r--r-- 1 root root 4096 Aug 27 08:59 supported_classes
-r--r--r-- 1 root root 4096 Aug 27 08:59 supported_fc4s
-r--r--r-- 1 root root 4096 Aug 27 08:59 supported_speeds
-rw-r--r-- 1 root root 4096 Aug 27 08:59 tgtid_bind_type
--w------- 1 root root 4096 Aug 27 08:59 uevent

systool -av -c fc_host

Install Server ALSWAYS without SAN connectet

Otherwise local HDD might be recognized after SAN Storage, resulting in local disk NOT beeing /dev/sda How to correct this:

in /etc/sysconfig/kernel

INITRD_MODULES="...."

make shure that the HBA module is loaded after ahci

sles.1619250016.txt.gz · Last modified: 2021/04/24 07:40 by 127.0.0.1