<% inventoryAnalyzerTabs = inventoryAnalyzerTabs.sort_by do |key, val| key[2] end %> <%= refreshString %> <%= testTitleString %> <% if opts[:timerange] %> <% end %> ng-init="timerangeOn='1'; timerangeShowing=<%= JSON.dump(inventoryAnalyzer.dumpTimerange).gsub('"', "'") %>;" <% end %> >
<% if opts[:timerange] %>
<% end %> <% i = 0 inventoryAnalyzerTabs.each do |key, content| label, tabname, visibility = key %>
<%= content %> <% if i == 0 && skipTasksTab %> <% end %>
<% i += 1 end %>

What am I looking at

This view shows how various physical and in-memory objects are distributed across the vSAN cluster.

Components are the slices/pieces of vSAN objects stored on HDDs. They include IO components and witnesses. Balancing components across the cluster is an important sideeffect of balancing for performance and space. But in addition, in v1 of vSAN there is 3000 components per host limit.

DOM owners are in-memory state inside vSAN that the user can't control. The information is shown here for the benefit of VMware Support.

<% graphs = "" prefix = "distribution" types = ['lsom-components', 'lsom-iocomponents', 'dom-owners', 'dom-clients', 'dom-colocated'] types.each do |i| graphs += "
\n" end graphs += "" %> Distributions across hosts: Full graphs
<%= graphs %>
<% if !skipTasksTab || false %>
{{host}}
<% end %>
{{host}}
Full graphs
<% if $rvc_observer_enable_nfstab %>
<% keyInfos = inventoryAnalyzer.nfsStats.keyInfos hostnames = keyInfos.values.map{|x| x[:hostname]}.uniq %> <% keyInfos.values.each do |mount| %> <% mounts = [mount] mounts.each do |mnt| group, file = inventoryAnalyzer.nfsStats.invFiles[mnt.values] divname = "#{group}-#{file}".gsub(".", "-") pp [group, file, divname] %> <% end %> <% end %>
<%= mount[:hostname] %>
<%= mount[:mnt] %>
<% keyInfos = inventoryAnalyzer.vscsiHostStats.keyInfos keyInfos = keyInfos.values.sort_by{|x| x[:hostname]} %> <% keyInfos.each do |key| %> <% group, file = inventoryAnalyzer.vscsiHostStats.invFiles[key.values] divname = "#{group}-#{file}".gsub(".", "-") %> <% end %>
<%= key[:hostname] %>
Full graphs
<% keyInfos = inventoryAnalyzer.vscsiHostStats.keyInfos keyInfos = keyInfos.values.sort_by{|x| x[:hostname]} %> <% [ ['NFS', 'nfs', 'nfssum'], ['VSCSI', 'misc', 'vscsisum'], ['vSAN Client', 'dom', 'domclientsum'], ['vSAN Owner', 'dom', 'domownersum'], ['vSAN Disks', 'dom', 'domcompmgrsum'], ['vSAN Owner (VM home)', 'dom', 'domvmhomesum'], ['vSAN Owner (Disks)', 'dom', 'domvmdiskssum'], ['LSOM Disks', 'lsom', 'lsomsum'], ['Phys Disk (cache)', 'lsom', 'physdiskcachesum'], ['Phys Disk (capacity)', 'lsom', 'physdiskcapacitysum'], ['SSDs', 'lsom', 'ssdsum'], ].each do |label, group, file| %> <% divname = "#{group}-#{file}".gsub(".", "-") %> <% end %>
<%= label %>
Full graphs
<% end %>
<% keyInfos = inventoryAnalyzer.lsomCongestion.keyInfos hostnames = keyInfos.values.map{|x| x[:hostname]}.uniq %> <% keyInfos.values.group_by{|x| x[:hostname]}.sort_by do |h, info| h end.each do |hname, devices| %> <% devices.each do |dev| group, file = inventoryAnalyzer.lsomCongestion.invFiles[dev.values] divname = "#{group}-#{file}".gsub(".", "-") %> <% end %> <% end %>
<%= hname %>
<%= dev[:dev] %>
Full graphs
<% if !skipTasksTab %>

What am I looking at

This view shows the fitness data that CLOM uses to select disks for components.

The information is shown here for the benefit of VMware Support.

<% inventoryAnalyzer.fitnessStats.keys.sort.each do |hostname| params = inventoryAnalyzer.fitnessStats[hostname].keys %> <% divname = "fitness-#{hostname}".gsub(".", "-") %> <% end %>
<%= hostname %>
Full graphs
<% end %>

vSAN observer

vSAN observer is an experimental feature. It can be used to understand vSAN performance characteristics and as such is a tool intended for customers who desire deeper insight into vSAN as well as by VMware Support to analyze performance issues encountered in the field.

<% if vcInfo && (! vcInfo.empty?) && hosts_props %>

System information:
  • VC host: <%= vcInfo['hostname'] %>
  • VC build: <%= vcInfo['about']['fullName'] %>
  • <% hosts_props.each do |host, hostProps| %>
  • ESX host: <%= hostProps['name'] %>
    • Build: <%= hostProps['summary.config.product']['fullName'] %>
    • CPU: <%= hostProps['summary.hardware']['cpuModel'].gsub(/\ +/, " ") %> ( Packages: <%= hostProps['summary.hardware']['numCpuPkgs'] %>, Cores: <%= hostProps['summary.hardware']['numCpuCores'] %> )
    • Memory: <%= hostProps['summary.hardware']['memorySize'] / 1000**3 %> GB
    • Server vendor/model: <%= hostProps['summary.hardware']['vendor'] %> <%= hostProps['summary.hardware']['model'] %>
    • <% if hostProps['disks'] %>
    • vSAN Disks:
        <% hostProps['disks'].each do |disk| %>
      • <%= disk['ssd'] ? 'SSD' : 'HDD' %>: <%= disk['displayName'] %>
          <% if disk['vendor'] && disk['model'] %>
        • <%= disk['vendor'].strip %> <%= disk['model'].strip %>
        • <%= ((disk['size'] || 0) / 1000**3) %> GB
        • <% end %>
      • <% end %>
    • <% end %>
  • <% end %>

<% end %>

What am I looking at

Disclaimer: This view is primarly meant for VMware Support. Users may or may not be able to spot problems in the shown values.

This view shows information about vsanSparse. Information is presented at a per host level and also at a per open vsanSparse disk. If a disk is migrated between 2 hosts then it will have an entry under each host. Each line shows information about that disk and performance of the layer cache.

Search:

What am I looking at

Disclaimer: This view is primarly meant for VMware Support. Users may or may not be able to spot problems in the shown values.

Shows CMMDS stats.

<% inventoryAnalyzer.cmmdsStats.keys.sort.each do |hostname| %> <% divname = "cmmds-#{hostname}".gsub(".", "-") %> <% end %>
<%= hostname %>
Full graphs

What am I looking at

This view allows a VM centric drill down, all the way down to individual vSAN "components" stored on vSAN disk groups.

Every VM has a "VM home" or "namespace directory", i.e. the directory in which its .vmx, .log and other small files are stored. This view allows the user to drill down on those VM Home directories, as well as on any virtual disk the VM may have. While Virtual Disks are perceived as a single IO device by VMs (see "VSCSI stats"), in case of linked clones or VMs with snapshots a single virtual disk is actually backed by multiple vSAN objects that can be inspected separately. Any object other than the top most one is opened in read-only mode, so should only see read IO. All writes go to the top-most vSAN object.

Search:   

Total number of VMs: {{vmvallist.length}}
Matching number of VMs: {{(vmvallist | filter:{name:query}).length }}

  • {{vm.name}}

    • {{path}}
      DOM Object UUID: {{uuid}}

      DOM owner:



<% if !skipTasksTab %>
<% if opts[:runName] != "" %> Test Run: <%= opts[:runName] %>
<% end %> <% if opts[:build] != "" %> ESX Build: <%= opts[:build] %> <% end %>

<% uptime = tasksAnalyzer.uptime %> Uptime: <%= "%d:%02d:%02d" % [uptime / 3600, (uptime / 60) % 60, uptime % 60] %>
Updated <%= Time.now %>
<%= tasksAnalyzer.writeUptimeToFile %> <%= graphUpdateMsg %>
<%= table = TableEmitter.new("Tasks", "Total", "Success", "Failure", "Time(s)", "Details") do |table| keys = tasksAnalyzer.taskTotal.keys keys.sort_by do |k| -1.0 * tasksAnalyzer.taskFailure[k] / tasksAnalyzer.taskTotal[k] end.each do |k| taskTime = "(no data)*" taskDetails = "" if tasksAnalyzer.taskStats[k].is_a?(Hash) taskTime = tasksAnalyzer.taskStats[k][:success] taskDetails = "H, " + "S" tasksAnalyzer.taskStats[k][:success].close tasksAnalyzer.taskStats[k][:failure].close end if tasksAnalyzer.taskFailure[k] > 0 taskDetails = taskDetails + ", E" end row = [ k, tasksAnalyzer.taskTotal[k], tasksAnalyzer.taskSuccess[k], tasksAnalyzer.taskFailure[k], taskTime, taskDetails ] table.row row end end table.generate %>
All exceptions:
    <% tasksAnalyzer.exceptionHisto.collect do |k,v|
         [k,v]
       end.sort_by do |p|
         -p[1]
       end.each do |key, value|
    %>
      [<%= value %>]<%= "\t" %><%=key%>
    <% end %>
    
<% tasksAnalyzer.taskStats.each_key do |k| %> <% end %>
Show tasks
Show only failures
{{task.op}} {{task.result}} {{task.id}} {{task.startTime * 1000 | date:'short'}} {{task.endTime * 1000 | date:'short'}}
<% end %>
{{profilingTimes}}